/drivers/include/drm/drm_global.h |
---|
File deleted |
/drivers/include/drm/drmP.h |
---|
60,7 → 60,6 |
//#include <linux/file.h> |
#include <linux/pci.h> |
#include <linux/jiffies.h> |
#include <linux/irqreturn.h> |
//#include <linux/smp_lock.h> /* For (un)lock_kernel */ |
//#include <linux/dma-mapping.h> |
//#include <linux/mm.h> |
171,7 → 170,7 |
/** \name Begin the DRM... */ |
/*@{*/ |
#define DRM_DEBUG_CODE 0 /**< Include debugging code if > 1, then |
#define DRM_DEBUG_CODE 2 /**< Include debugging code if > 1, then |
also include looping detection. */ |
#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */ |
966,15 → 965,6 |
#endif |
#define DRM_IRQ_ARGS int irq, void *arg |
struct drm_driver { |
irqreturn_t (*irq_handler) (DRM_IRQ_ARGS); |
void (*irq_preinstall) (struct drm_device *dev); |
int (*irq_postinstall) (struct drm_device *dev); |
}; |
#define DRM_MINOR_UNASSIGNED 0 |
#define DRM_MINOR_LEGACY 1 |
#define DRM_MINOR_CONTROL 2 |
1182,7 → 1172,7 |
// struct drm_sigdata sigdata; /**< For block_all_signals */ |
// sigset_t sigmask; |
struct drm_driver *driver; |
// struct drm_driver *driver; |
// struct drm_local_map *agp_buffer_map; |
// unsigned int agp_buffer_token; |
// struct drm_minor *control; /**< Control node for card */ |
/drivers/include/drm/intel-gtt.h |
---|
19,7 → 19,6 |
unsigned int do_idle_maps : 1; |
/* Share the scratch page dma with ppgtts. */ |
dma_addr_t scratch_page_dma; |
struct page *scratch_page; |
/* for ppgtt PDE access */ |
u32 __iomem *gtt; |
/* needed for ioremap in drm/i915 */ |
33,8 → 32,7 |
bool intel_enable_gtt(void); |
void intel_gtt_chipset_flush(void); |
void intel_gtt_insert_sg_entries(struct sg_table *st, |
unsigned int pg_start, |
void intel_gtt_insert_sg_entries(struct pagelist *st, unsigned int pg_start, |
unsigned int flags); |
void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries); |
/drivers/include/linux/mm.h |
---|
File deleted |
/drivers/include/linux/err.h |
---|
File deleted |
/drivers/include/linux/rwlock.h |
---|
File deleted |
/drivers/include/linux/rbtree.h |
---|
File deleted |
/drivers/include/linux/scatterlist.h |
---|
File deleted |
/drivers/include/linux/i2c.h |
---|
172,7 → 172,6 |
* @platform_data: stored in i2c_client.dev.platform_data |
* @archdata: copied into i2c_client.dev.archdata |
* @of_node: pointer to OpenFirmware device node |
* @acpi_node: ACPI device node |
* @irq: stored in i2c_client.irq |
* |
* I2C doesn't actually support hardware probing, although controllers and |
/drivers/include/linux/lockdep.h |
---|
498,17 → 498,14 |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# ifdef CONFIG_PROVE_LOCKING |
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) |
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i) |
# else |
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) |
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i) |
# endif |
# define rwsem_release(l, n, i) lock_release(l, n, i) |
#else |
# define rwsem_acquire(l, s, t, i) do { } while (0) |
# define rwsem_acquire_nest(l, s, t, n, i) do { } while (0) |
# define rwsem_acquire_read(l, s, t, i) do { } while (0) |
# define rwsem_release(l, n, i) do { } while (0) |
#endif |
/drivers/include/linux/kernel.h |
---|
331,15 → 331,47 |
#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) |
struct scatterlist { |
unsigned long page_link; |
unsigned int offset; |
unsigned int length; |
dma_addr_t dma_address; |
unsigned int dma_length; |
}; |
struct sg_table { |
struct scatterlist *sgl; /* the list */ |
unsigned int nents; /* number of mapped entries */ |
unsigned int orig_nents; /* original size of list */ |
}; |
#define SG_MAX_SINGLE_ALLOC (4096 / sizeof(struct scatterlist)) |
struct scatterlist *sg_next(struct scatterlist *sg); |
#define sg_dma_address(sg) ((sg)->dma_address) |
#define sg_dma_len(sg) ((sg)->length) |
#define sg_is_chain(sg) ((sg)->page_link & 0x01) |
#define sg_is_last(sg) ((sg)->page_link & 0x02) |
#define sg_chain_ptr(sg) \ |
((struct scatterlist *) ((sg)->page_link & ~0x03)) |
static inline addr_t sg_page(struct scatterlist *sg) |
{ |
return (addr_t)((sg)->page_link & ~0x3); |
} |
#define for_each_sg(sglist, sg, nr, __i) \ |
for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg)) |
struct page |
{ |
unsigned int addr; |
}; |
#define page_to_phys(page) ((dma_addr_t)(page)) |
struct vm_fault { |
unsigned int flags; /* FAULT_FLAG_xxx flags */ |
358,9 → 390,5 |
unsigned int nents; |
}; |
#define page_cache_release(page) FreePage((addr_t)(page)) |
#define alloc_page(gfp_mask) (struct page*)AllocPage() |
#endif |
/drivers/include/linux/asm/scatterlist.h |
---|
File deleted |
/drivers/include/linux/compiler-gcc4.h |
---|
63,13 → 63,3 |
#define __compiletime_warning(message) __attribute__((warning(message))) |
#define __compiletime_error(message) __attribute__((error(message))) |
#endif |
#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP |
#if __GNUC_MINOR__ >= 4 |
#define __HAVE_BUILTIN_BSWAP32__ |
#define __HAVE_BUILTIN_BSWAP64__ |
#endif |
#if __GNUC_MINOR__ >= 8 || (defined(__powerpc__) && __GNUC_MINOR__ >= 6) |
#define __HAVE_BUILTIN_BSWAP16__ |
#endif |
#endif |
/drivers/include/linux/compiler.h |
---|
10,7 → 10,6 |
# define __force __attribute__((force)) |
# define __nocast __attribute__((nocast)) |
# define __iomem __attribute__((noderef, address_space(2))) |
# define __must_hold(x) __attribute__((context(x,1,1))) |
# define __acquires(x) __attribute__((context(x,0,1))) |
# define __releases(x) __attribute__((context(x,1,0))) |
# define __acquire(x) __context__(x,1) |
34,7 → 33,6 |
# define __chk_user_ptr(x) (void)0 |
# define __chk_io_ptr(x) (void)0 |
# define __builtin_warning(x, y...) (1) |
# define __must_hold(x) |
# define __acquires(x) |
# define __releases(x) |
# define __acquire(x) (void)0 |
44,10 → 42,6 |
# define __rcu |
#endif |
/* Indirect macros required for expanded argument pasting, eg. __LINE__. */ |
#define ___PASTE(a,b) a##b |
#define __PASTE(a,b) ___PASTE(a,b) |
#ifdef __KERNEL__ |
#ifdef __GNUC__ |
/drivers/include/linux/module.h |
---|
9,7 → 9,6 |
#include <linux/list.h> |
#include <linux/compiler.h> |
#include <linux/kernel.h> |
#include <linux/export.h> |
/drivers/include/linux/slab.h |
---|
1,3 → 1,2 |
#include <errno.h> |
// stub |
/drivers/include/linux/spinlock_types.h |
---|
17,7 → 17,7 |
#include <linux/lockdep.h> |
typedef struct spinlock { |
typedef struct { |
raw_spinlock_t raw_lock; |
#ifdef CONFIG_GENERIC_LOCKBREAK |
unsigned int break_lock; |
/drivers/include/syscall.h |
---|
516,11 → 516,11 |
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
//static void init_rwsem(struct rw_semaphore *sem) |
//{ |
// sem->count = RWSEM_UNLOCKED_VALUE; |
// spin_lock_init(&sem->wait_lock); |
// INIT_LIST_HEAD(&sem->wait_list); |
//} |
static void init_rwsem(struct rw_semaphore *sem) |
{ |
sem->count = RWSEM_UNLOCKED_VALUE; |
spin_lock_init(&sem->wait_lock); |
INIT_LIST_HEAD(&sem->wait_list); |
} |
#endif |
/drivers/video/drm/drm_crtc_helper.c |
---|
135,10 → 135,8 |
if (connector->funcs->force) |
connector->funcs->force(connector); |
} else { |
// dbgprintf("call detect funcs %p ", connector->funcs); |
// dbgprintf("detect %p\n", connector->funcs->detect); |
connector->status = connector->funcs->detect(connector, true); |
// dbgprintf("status %x\n", connector->status); |
// drm_kms_helper_poll_enable(dev); |
} |
if (connector->status == connector_status_disconnected) { |
298,6 → 296,7 |
crtc->fb = NULL; |
} |
} |
} |
EXPORT_SYMBOL(drm_helper_disable_unused_functions); |
/drivers/video/drm/drm_fb_helper.c |
---|
61,6 → 61,8 |
struct drm_connector *connector; |
int i; |
ENTER(); |
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
struct drm_fb_helper_connector *fb_helper_connector; |
71,6 → 73,7 |
fb_helper_connector->connector = connector; |
fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector; |
} |
LEAVE(); |
return 0; |
fail: |
for (i = 0; i < fb_helper->connector_count; i++) { |
78,6 → 81,7 |
fb_helper->connector_info[i] = NULL; |
} |
fb_helper->connector_count = 0; |
FAIL(); |
return -ENOMEM; |
} |
EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors); |
190,6 → 194,10 |
struct drm_crtc *crtc; |
int i; |
ENTER(); |
dbgprintf("crtc_count %d max_conn_count %d\n", crtc_count, max_conn_count); |
fb_helper->dev = dev; |
INIT_LIST_HEAD(&fb_helper->kernel_fb_list); |
196,12 → 204,16 |
fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL); |
if (!fb_helper->crtc_info) |
{ |
FAIL(); |
return -ENOMEM; |
}; |
fb_helper->crtc_count = crtc_count; |
fb_helper->connector_info = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_connector *), GFP_KERNEL); |
if (!fb_helper->connector_info) { |
kfree(fb_helper->crtc_info); |
FAIL(); |
return -ENOMEM; |
} |
fb_helper->connector_count = 0; |
223,9 → 235,11 |
i++; |
} |
LEAVE(); |
return 0; |
out_free: |
drm_fb_helper_crtc_free(fb_helper); |
FAIL(); |
return -ENOMEM; |
} |
EXPORT_SYMBOL(drm_fb_helper_init); |
585,8 → 599,8 |
if (new_fb) { |
info->var.pixclock = 0; |
dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n", |
info->node, info->fix.id); |
printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, |
info->fix.id); |
} else { |
drm_fb_helper_set_par(info); |
982,7 → 996,10 |
{ |
struct drm_device *dev = fb_helper->dev; |
int count = 0; |
bool ret; |
ENTER(); |
/* disable all the possible outputs/crtcs before entering KMS mode */ |
drm_helper_disable_unused_functions(fb_helper->dev); |
999,7 → 1016,8 |
drm_setup_crtcs(fb_helper); |
return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); |
ret = drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); |
LEAVE(); |
} |
EXPORT_SYMBOL(drm_fb_helper_initial_config); |
/drivers/video/drm/i915/i915_drv.c |
---|
33,6 → 33,7 |
#include "i915_drv.h" |
#include "intel_drv.h" |
#include <linux/kernel.h> |
#include <linux/module.h> |
#include <linux/mod_devicetable.h> |
383,36 → 384,26 |
pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); |
if (pch) { |
if (pch->vendor == PCI_VENDOR_ID_INTEL) { |
unsigned short id; |
int id; |
id = pch->device & INTEL_PCH_DEVICE_ID_MASK; |
dev_priv->pch_id = id; |
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { |
dev_priv->pch_type = PCH_IBX; |
dev_priv->num_pch_pll = 2; |
DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); |
WARN_ON(!IS_GEN5(dev)); |
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { |
dev_priv->pch_type = PCH_CPT; |
dev_priv->num_pch_pll = 2; |
DRM_DEBUG_KMS("Found CougarPoint PCH\n"); |
WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); |
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { |
/* PantherPoint is CPT compatible */ |
dev_priv->pch_type = PCH_CPT; |
dev_priv->num_pch_pll = 2; |
DRM_DEBUG_KMS("Found PatherPoint PCH\n"); |
WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); |
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { |
dev_priv->pch_type = PCH_LPT; |
dev_priv->num_pch_pll = 0; |
DRM_DEBUG_KMS("Found LynxPoint PCH\n"); |
WARN_ON(!IS_HASWELL(dev)); |
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { |
dev_priv->pch_type = PCH_LPT; |
dev_priv->num_pch_pll = 0; |
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); |
WARN_ON(!IS_HASWELL(dev)); |
} |
BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS); |
} |
458,7 → 449,7 |
struct intel_device_info *intel_info = |
(struct intel_device_info *) ent->driver_data; |
if (intel_info->is_valleyview) |
if (intel_info->is_haswell || intel_info->is_valleyview) |
if(!i915_preliminary_hw_support) { |
DRM_ERROR("Preliminary hardware support disabled\n"); |
return -ENODEV; |
482,8 → 473,6 |
int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent) |
{ |
struct drm_device *dev; |
static struct drm_driver driver; |
int ret; |
dev = kzalloc(sizeof(*dev), 0); |
514,8 → 503,6 |
mutex_init(&dev->struct_mutex); |
mutex_init(&dev->ctxlist_mutex); |
dev->driver = &driver; |
ret = i915_driver_load(dev, ent->driver_data ); |
if (ret) |
622,40 → 609,12 |
if (reg == GEN6_GDRST) |
return false; |
switch (reg) { |
case _3D_CHICKEN3: |
case IVB_CHICKEN3: |
case GEN7_COMMON_SLICE_CHICKEN1: |
case GEN7_L3CNTLREG1: |
case GEN7_L3_CHICKEN_MODE_REGISTER: |
case GEN7_ROW_CHICKEN2: |
case GEN7_L3SQCREG4: |
case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG: |
case GEN7_HALF_SLICE_CHICKEN1: |
case GEN6_MBCTL: |
case GEN6_UCGCTL2: |
return false; |
default: |
break; |
} |
return true; |
} |
static void |
ilk_dummy_write(struct drm_i915_private *dev_priv) |
{ |
/* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the |
* chip from rc6 before touching it for real. MI_MODE is masked, hence |
* harmless to write 0 into. */ |
I915_WRITE_NOTRACE(MI_MODE, 0); |
} |
#define __i915_read(x, y) \ |
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ |
u##x val = 0; \ |
if (IS_GEN5(dev_priv->dev)) \ |
ilk_dummy_write(dev_priv); \ |
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ |
unsigned long irqflags; \ |
spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ |
686,12 → 645,6 |
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ |
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ |
} \ |
if (IS_GEN5(dev_priv->dev)) \ |
ilk_dummy_write(dev_priv); \ |
if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \ |
DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \ |
I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \ |
} \ |
if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \ |
write##y(val, dev_priv->regs + reg + 0x180000); \ |
} else { \ |
/drivers/video/drm/i915/intel_bios.c |
---|
735,8 → 735,7 |
struct drm_i915_private *dev_priv = dev->dev_private; |
/* Set the Panel Power On/Off timings if uninitialized. */ |
if (!HAS_PCH_SPLIT(dev) && |
I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) { |
if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) { |
/* Set T2 to 40ms and T5 to 200ms */ |
I915_WRITE(PP_ON_DELAYS, 0x019007d0); |
/drivers/video/drm/i915/intel_crt.c |
---|
197,11 → 197,6 |
if (mode->clock > max_clock) |
return MODE_CLOCK_HIGH; |
/* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */ |
if (HAS_PCH_LPT(dev) && |
(ironlake_get_lanes_required(mode->clock, 270000, 24) > 2)) |
return MODE_CLOCK_HIGH; |
return MODE_OK; |
} |
225,11 → 220,7 |
struct drm_i915_private *dev_priv = dev->dev_private; |
u32 adpa; |
if (HAS_PCH_SPLIT(dev)) |
adpa = ADPA_HOTPLUG_BITS; |
else |
adpa = 0; |
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
adpa |= ADPA_HSYNC_ACTIVE_HIGH; |
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
236,9 → 227,7 |
adpa |= ADPA_VSYNC_ACTIVE_HIGH; |
/* For CPT allow 3 pipe config, for others just use A or B */ |
if (HAS_PCH_LPT(dev)) |
; /* Those bits don't exist here */ |
else if (HAS_PCH_CPT(dev)) |
if (HAS_PCH_CPT(dev)) |
adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe); |
else if (intel_crtc->pipe == 0) |
adpa |= ADPA_PIPE_A_SELECT; |
411,16 → 400,12 |
struct i2c_adapter *adapter) |
{ |
struct edid *edid; |
int ret; |
edid = intel_crt_get_edid(connector, adapter); |
if (!edid) |
return 0; |
ret = intel_connector_update_modes(connector, edid); |
kfree(edid); |
return ret; |
return intel_connector_update_modes(connector, edid); |
} |
static bool intel_crt_detect_ddc(struct drm_connector *connector) |
658,24 → 643,12 |
static void intel_crt_reset(struct drm_connector *connector) |
{ |
struct drm_device *dev = connector->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_crt *crt = intel_attached_crt(connector); |
if (HAS_PCH_SPLIT(dev)) { |
u32 adpa; |
adpa = I915_READ(PCH_ADPA); |
adpa &= ~ADPA_CRT_HOTPLUG_MASK; |
adpa |= ADPA_HOTPLUG_BITS; |
I915_WRITE(PCH_ADPA, adpa); |
POSTING_READ(PCH_ADPA); |
DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa); |
if (HAS_PCH_SPLIT(dev)) |
crt->force_hotplug_required = 1; |
} |
} |
/* |
* Routines for controlling stuff on the analog port |
*/ |
733,7 → 706,7 |
crt->base.type = INTEL_OUTPUT_ANALOG; |
crt->base.cloneable = true; |
if (IS_I830(dev)) |
if (IS_HASWELL(dev) || IS_I830(dev)) |
crt->base.crtc_mask = (1 << 0); |
else |
crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); |
753,9 → 726,6 |
crt->base.disable = intel_disable_crt; |
crt->base.enable = intel_enable_crt; |
if (IS_HASWELL(dev)) |
crt->base.get_hw_state = intel_ddi_get_hw_state; |
else |
crt->base.get_hw_state = intel_crt_get_hw_state; |
intel_connector->get_hw_state = intel_connector_get_hw_state; |
773,14 → 743,18 |
* Configure the automatic hotplug detection stuff |
*/ |
crt->force_hotplug_required = 0; |
if (HAS_PCH_SPLIT(dev)) { |
u32 adpa; |
dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; |
adpa = I915_READ(PCH_ADPA); |
adpa &= ~ADPA_CRT_HOTPLUG_MASK; |
adpa |= ADPA_HOTPLUG_BITS; |
I915_WRITE(PCH_ADPA, adpa); |
POSTING_READ(PCH_ADPA); |
/* |
* TODO: find a proper way to discover whether we need to set the |
* polarity reversal bit or not, instead of relying on the BIOS. |
*/ |
if (HAS_PCH_LPT(dev)) |
dev_priv->fdi_rx_polarity_reversed = |
!!(I915_READ(_FDI_RXA_CTL) & FDI_RX_POLARITY_REVERSED_LPT); |
DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa); |
crt->force_hotplug_required = 1; |
} |
dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; |
} |
/drivers/video/drm/i915/intel_display.c |
---|
51,11 → 51,24 |
#define MAX_ERRNO 4095 |
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO) |
static inline long IS_ERR(const void *ptr) |
{ |
return IS_ERR_VALUE((unsigned long)ptr); |
} |
static inline void *ERR_PTR(long error) |
{ |
return (void *) error; |
} |
#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) |
bool intel_pipe_has_type(struct drm_crtc *crtc, int type); |
static void intel_increase_pllclock(struct drm_crtc *crtc); |
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); |
//static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); |
typedef struct { |
/* given values */ |
90,16 → 103,6 |
/* FDI */ |
#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ |
int |
intel_pch_rawclk(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
WARN_ON(!HAS_PCH_SPLIT(dev)); |
return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK; |
} |
static bool |
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
int target, int refclk, intel_clock_t *match_clock, |
400,7 → 403,7 |
static const intel_limit_t intel_limits_vlv_hdmi = { |
.dot = { .min = 20000, .max = 165000 }, |
.vco = { .min = 4000000, .max = 5994000}, |
.vco = { .min = 5994000, .max = 4000000 }, |
.n = { .min = 1, .max = 7 }, |
.m = { .min = 60, .max = 300 }, /* guess */ |
.m1 = { .min = 2, .max = 3 }, |
413,10 → 416,10 |
}; |
static const intel_limit_t intel_limits_vlv_dp = { |
.dot = { .min = 25000, .max = 270000 }, |
.vco = { .min = 4000000, .max = 6000000 }, |
.dot = { .min = 162000, .max = 270000 }, |
.vco = { .min = 5994000, .max = 4000000 }, |
.n = { .min = 1, .max = 7 }, |
.m = { .min = 22, .max = 450 }, |
.m = { .min = 60, .max = 300 }, /* guess */ |
.m1 = { .min = 2, .max = 3 }, |
.m2 = { .min = 11, .max = 156 }, |
.p = { .min = 10, .max = 30 }, |
551,7 → 554,7 |
limit = &intel_limits_ironlake_single_lvds; |
} |
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || |
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) |
HAS_eDP) |
limit = &intel_limits_ironlake_display_port; |
else |
limit = &intel_limits_ironlake_dac; |
947,15 → 950,6 |
return true; |
} |
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, |
enum pipe pipe) |
{ |
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
return intel_crtc->cpu_transcoder; |
} |
static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
1028,11 → 1022,9 |
void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
pipe); |
if (INTEL_INFO(dev)->gen >= 4) { |
int reg = PIPECONF(cpu_transcoder); |
int reg = PIPECONF(pipe); |
/* Wait for the Pipe State to go off */ |
if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, |
1134,14 → 1126,12 |
int reg; |
u32 val; |
bool cur_state; |
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
pipe); |
if (IS_HASWELL(dev_priv->dev)) { |
/* On Haswell, DDI is used instead of FDI_TX_CTL */ |
reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); |
reg = DDI_FUNC_CTL(pipe); |
val = I915_READ(reg); |
cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); |
cur_state = !!(val & PIPE_DDI_FUNC_ENABLE); |
} else { |
reg = FDI_TX_CTL(pipe); |
val = I915_READ(reg); |
1161,9 → 1151,14 |
u32 val; |
bool cur_state; |
if (IS_HASWELL(dev_priv->dev) && pipe > 0) { |
DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n"); |
return; |
} else { |
reg = FDI_RX_CTL(pipe); |
val = I915_READ(reg); |
cur_state = !!(val & FDI_RX_ENABLE); |
} |
WARN(cur_state != state, |
"FDI RX state assertion failure (expected %s, current %s)\n", |
state_string(state), state_string(cur_state)); |
1196,6 → 1191,10 |
int reg; |
u32 val; |
if (IS_HASWELL(dev_priv->dev) && pipe > 0) { |
DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n"); |
return; |
} |
reg = FDI_RX_CTL(pipe); |
val = I915_READ(reg); |
WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n"); |
1236,14 → 1235,12 |
int reg; |
u32 val; |
bool cur_state; |
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
pipe); |
/* if we need the pipe A quirk it must be always on */ |
if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) |
state = true; |
reg = PIPECONF(cpu_transcoder); |
reg = PIPECONF(pipe); |
val = I915_READ(reg); |
cur_state = !!(val & PIPECONF_ENABLE); |
WARN(cur_state != state, |
1518,27 → 1515,25 |
/* SBI access */ |
static void |
intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, |
enum intel_sbi_destination destination) |
intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value) |
{ |
unsigned long flags; |
u32 tmp; |
spin_lock_irqsave(&dev_priv->dpio_lock, flags); |
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) { |
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, |
100)) { |
DRM_ERROR("timeout waiting for SBI to become ready\n"); |
goto out_unlock; |
} |
I915_WRITE(SBI_ADDR, (reg << 16)); |
I915_WRITE(SBI_DATA, value); |
I915_WRITE(SBI_ADDR, |
(reg << 16)); |
I915_WRITE(SBI_DATA, |
value); |
I915_WRITE(SBI_CTL_STAT, |
SBI_BUSY | |
SBI_CTL_OP_CRWR); |
if (destination == SBI_ICLK) |
tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR; |
else |
tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR; |
I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp); |
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, |
100)) { |
DRM_ERROR("timeout waiting for SBI to complete write transaction\n"); |
1550,26 → 1545,24 |
} |
static u32 |
intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, |
enum intel_sbi_destination destination) |
intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg) |
{ |
unsigned long flags; |
u32 value = 0; |
spin_lock_irqsave(&dev_priv->dpio_lock, flags); |
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) { |
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, |
100)) { |
DRM_ERROR("timeout waiting for SBI to become ready\n"); |
goto out_unlock; |
} |
I915_WRITE(SBI_ADDR, (reg << 16)); |
I915_WRITE(SBI_ADDR, |
(reg << 16)); |
I915_WRITE(SBI_CTL_STAT, |
SBI_BUSY | |
SBI_CTL_OP_CRRD); |
if (destination == SBI_ICLK) |
value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD; |
else |
value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD; |
I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY); |
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, |
100)) { |
DRM_ERROR("timeout waiting for SBI to complete read transaction\n"); |
1584,7 → 1577,7 |
} |
/** |
* ironlake_enable_pch_pll - enable PCH PLL |
* intel_enable_pch_pll - enable PCH PLL |
* @dev_priv: i915 private structure |
* @pipe: pipe PLL to enable |
* |
1591,7 → 1584,7 |
* The PCH PLL needs to be enabled before the PCH transcoder, since it |
* drives the transcoder clock. |
*/ |
static void ironlake_enable_pch_pll(struct intel_crtc *intel_crtc) |
static void intel_enable_pch_pll(struct intel_crtc *intel_crtc) |
{ |
struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; |
struct intel_pch_pll *pll; |
1675,12 → 1668,12 |
pll->on = false; |
} |
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, |
static void intel_enable_transcoder(struct drm_i915_private *dev_priv, |
enum pipe pipe) |
{ |
struct drm_device *dev = dev_priv->dev; |
int reg; |
u32 val, pipeconf_val; |
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
uint32_t reg, val, pipeconf_val; |
/* PCH only available on ILK+ */ |
BUG_ON(dev_priv->info->gen < 5); |
1694,15 → 1687,10 |
assert_fdi_tx_enabled(dev_priv, pipe); |
assert_fdi_rx_enabled(dev_priv, pipe); |
if (HAS_PCH_CPT(dev)) { |
/* Workaround: Set the timing override bit before enabling the |
* pch transcoder. */ |
reg = TRANS_CHICKEN2(pipe); |
val = I915_READ(reg); |
val |= TRANS_CHICKEN2_TIMING_OVERRIDE; |
I915_WRITE(reg, val); |
if (IS_HASWELL(dev_priv->dev) && pipe > 0) { |
DRM_ERROR("Attempting to enable transcoder on Haswell with pipe > 0\n"); |
return; |
} |
reg = TRANSCONF(pipe); |
val = I915_READ(reg); |
pipeconf_val = I915_READ(PIPECONF(pipe)); |
1731,42 → 1719,11 |
DRM_ERROR("failed to enable transcoder %d\n", pipe); |
} |
static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, |
enum transcoder cpu_transcoder) |
{ |
u32 val, pipeconf_val; |
/* PCH only available on ILK+ */ |
BUG_ON(dev_priv->info->gen < 5); |
/* FDI must be feeding us bits for PCH ports */ |
assert_fdi_tx_enabled(dev_priv, cpu_transcoder); |
assert_fdi_rx_enabled(dev_priv, TRANSCODER_A); |
/* Workaround: set timing override bit. */ |
val = I915_READ(_TRANSA_CHICKEN2); |
val |= TRANS_CHICKEN2_TIMING_OVERRIDE; |
I915_WRITE(_TRANSA_CHICKEN2, val); |
val = TRANS_ENABLE; |
pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); |
if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == |
PIPECONF_INTERLACED_ILK) |
val |= TRANS_INTERLACED; |
else |
val |= TRANS_PROGRESSIVE; |
I915_WRITE(TRANSCONF(TRANSCODER_A), val); |
if (wait_for(I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE, 100)) |
DRM_ERROR("Failed to enable PCH transcoder\n"); |
} |
static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, |
static void intel_disable_transcoder(struct drm_i915_private *dev_priv, |
enum pipe pipe) |
{ |
struct drm_device *dev = dev_priv->dev; |
uint32_t reg, val; |
int reg; |
u32 val; |
/* FDI relies on the transcoder */ |
assert_fdi_tx_disabled(dev_priv, pipe); |
1782,33 → 1739,8 |
/* wait for PCH transcoder off, transcoder state */ |
if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) |
DRM_ERROR("failed to disable transcoder %d\n", pipe); |
if (!HAS_PCH_IBX(dev)) { |
/* Workaround: Clear the timing override chicken bit again. */ |
reg = TRANS_CHICKEN2(pipe); |
val = I915_READ(reg); |
val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; |
I915_WRITE(reg, val); |
} |
} |
static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) |
{ |
u32 val; |
val = I915_READ(_TRANSACONF); |
val &= ~TRANS_ENABLE; |
I915_WRITE(_TRANSACONF, val); |
/* wait for PCH transcoder off, transcoder state */ |
if (wait_for((I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE) == 0, 50)) |
DRM_ERROR("Failed to disable PCH transcoder\n"); |
/* Workaround: clear timing override bit. */ |
val = I915_READ(_TRANSA_CHICKEN2); |
val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; |
I915_WRITE(_TRANSA_CHICKEN2, val); |
} |
/** |
* intel_enable_pipe - enable a pipe, asserting requirements |
* @dev_priv: i915 private structure |
1826,17 → 1758,9 |
static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, |
bool pch_port) |
{ |
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
pipe); |
enum transcoder pch_transcoder; |
int reg; |
u32 val; |
if (IS_HASWELL(dev_priv->dev)) |
pch_transcoder = TRANSCODER_A; |
else |
pch_transcoder = pipe; |
/* |
* A pipe without a PLL won't actually be able to drive bits from |
* a plane. On ILK+ the pipe PLLs are integrated, so we don't |
1847,13 → 1771,13 |
else { |
if (pch_port) { |
/* if driving the PCH, we need FDI enabled */ |
assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder); |
assert_fdi_tx_pll_enabled(dev_priv, cpu_transcoder); |
assert_fdi_rx_pll_enabled(dev_priv, pipe); |
assert_fdi_tx_pll_enabled(dev_priv, pipe); |
} |
/* FIXME: assert CPU port conditions for SNB+ */ |
} |
reg = PIPECONF(cpu_transcoder); |
reg = PIPECONF(pipe); |
val = I915_READ(reg); |
if (val & PIPECONF_ENABLE) |
return; |
1877,8 → 1801,6 |
static void intel_disable_pipe(struct drm_i915_private *dev_priv, |
enum pipe pipe) |
{ |
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
pipe); |
int reg; |
u32 val; |
1892,7 → 1814,7 |
if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) |
return; |
reg = PIPECONF(cpu_transcoder); |
reg = PIPECONF(pipe); |
val = I915_READ(reg); |
if ((val & PIPECONF_ENABLE) == 0) |
return; |
1908,10 → 1830,8 |
void intel_flush_display_plane(struct drm_i915_private *dev_priv, |
enum plane plane) |
{ |
if (dev_priv->info->gen >= 4) |
I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane))); |
I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane))); |
else |
I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane))); |
} |
/** |
2005,6 → 1925,11 |
* framebuffer compression. For simplicity, we always install |
* a fence as the cost is not that onerous. |
*/ |
// if (obj->tiling_mode != I915_TILING_NONE) { |
// ret = i915_gem_object_get_fence(obj, pipelined); |
// if (ret) |
// goto err_unpin; |
// } |
dev_priv->mm.interruptible = true; |
return 0; |
2024,7 → 1949,7 |
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel |
* is assumed to be a power-of-two. */ |
unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y, |
static unsigned long gen4_compute_dspaddr_offset_xtiled(int *x, int *y, |
unsigned int bpp, |
unsigned int pitch) |
{ |
2067,38 → 1992,24 |
dspcntr = I915_READ(reg); |
/* Mask out pixel format bits in case we change it */ |
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; |
switch (fb->pixel_format) { |
case DRM_FORMAT_C8: |
switch (fb->bits_per_pixel) { |
case 8: |
dspcntr |= DISPPLANE_8BPP; |
break; |
case DRM_FORMAT_XRGB1555: |
case DRM_FORMAT_ARGB1555: |
dspcntr |= DISPPLANE_BGRX555; |
case 16: |
if (fb->depth == 15) |
dspcntr |= DISPPLANE_15_16BPP; |
else |
dspcntr |= DISPPLANE_16BPP; |
break; |
case DRM_FORMAT_RGB565: |
dspcntr |= DISPPLANE_BGRX565; |
case 24: |
case 32: |
dspcntr |= DISPPLANE_32BPP_NO_ALPHA; |
break; |
case DRM_FORMAT_XRGB8888: |
case DRM_FORMAT_ARGB8888: |
dspcntr |= DISPPLANE_BGRX888; |
break; |
case DRM_FORMAT_XBGR8888: |
case DRM_FORMAT_ABGR8888: |
dspcntr |= DISPPLANE_RGBX888; |
break; |
case DRM_FORMAT_XRGB2101010: |
case DRM_FORMAT_ARGB2101010: |
dspcntr |= DISPPLANE_BGRX101010; |
break; |
case DRM_FORMAT_XBGR2101010: |
case DRM_FORMAT_ABGR2101010: |
dspcntr |= DISPPLANE_RGBX101010; |
break; |
default: |
DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format); |
DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel); |
return -EINVAL; |
} |
if (INTEL_INFO(dev)->gen >= 4) { |
if (obj->tiling_mode != I915_TILING_NONE) |
dspcntr |= DISPPLANE_TILED; |
2112,7 → 2023,7 |
if (INTEL_INFO(dev)->gen >= 4) { |
intel_crtc->dspaddr_offset = |
intel_gen4_compute_offset_xtiled(&x, &y, |
gen4_compute_dspaddr_offset_xtiled(&x, &y, |
fb->bits_per_pixel / 8, |
fb->pitches[0]); |
linear_offset -= intel_crtc->dspaddr_offset; |
2165,31 → 2076,27 |
dspcntr = I915_READ(reg); |
/* Mask out pixel format bits in case we change it */ |
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; |
switch (fb->pixel_format) { |
case DRM_FORMAT_C8: |
switch (fb->bits_per_pixel) { |
case 8: |
dspcntr |= DISPPLANE_8BPP; |
break; |
case DRM_FORMAT_RGB565: |
dspcntr |= DISPPLANE_BGRX565; |
case 16: |
if (fb->depth != 16) |
return -EINVAL; |
dspcntr |= DISPPLANE_16BPP; |
break; |
case DRM_FORMAT_XRGB8888: |
case DRM_FORMAT_ARGB8888: |
dspcntr |= DISPPLANE_BGRX888; |
case 24: |
case 32: |
if (fb->depth == 24) |
dspcntr |= DISPPLANE_32BPP_NO_ALPHA; |
else if (fb->depth == 30) |
dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA; |
else |
return -EINVAL; |
break; |
case DRM_FORMAT_XBGR8888: |
case DRM_FORMAT_ABGR8888: |
dspcntr |= DISPPLANE_RGBX888; |
break; |
case DRM_FORMAT_XRGB2101010: |
case DRM_FORMAT_ARGB2101010: |
dspcntr |= DISPPLANE_BGRX101010; |
break; |
case DRM_FORMAT_XBGR2101010: |
case DRM_FORMAT_ABGR2101010: |
dspcntr |= DISPPLANE_RGBX101010; |
break; |
default: |
DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format); |
DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel); |
return -EINVAL; |
} |
2205,7 → 2112,7 |
linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); |
intel_crtc->dspaddr_offset = |
intel_gen4_compute_offset_xtiled(&x, &y, |
gen4_compute_dspaddr_offset_xtiled(&x, &y, |
fb->bits_per_pixel / 8, |
fb->pitches[0]); |
linear_offset -= intel_crtc->dspaddr_offset; |
2215,12 → 2122,8 |
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); |
I915_MODIFY_DISPBASE(DSPSURF(plane), |
obj->gtt_offset + intel_crtc->dspaddr_offset); |
if (IS_HASWELL(dev)) { |
I915_WRITE(DSPOFFSET(plane), (y << 16) | x); |
} else { |
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); |
I915_WRITE(DSPLINOFF(plane), linear_offset); |
} |
POSTING_READ(reg); |
return 0; |
2276,6 → 2179,7 |
{ |
struct drm_device *dev = crtc->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct drm_i915_master_private *master_priv; |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
struct drm_framebuffer *old_fb; |
int ret; |
2408,28 → 2312,17 |
FDI_FE_ERRC_ENABLE); |
} |
static void ivb_modeset_global_resources(struct drm_device *dev) |
static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_crtc *pipe_B_crtc = |
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]); |
struct intel_crtc *pipe_C_crtc = |
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]); |
uint32_t temp; |
u32 flags = I915_READ(SOUTH_CHICKEN1); |
/* When everything is off disable fdi C so that we could enable fdi B |
* with all lanes. XXX: This misses the case where a pipe is not using |
* any pch resources and so doesn't need any fdi lanes. */ |
if (!pipe_B_crtc->base.enabled && !pipe_C_crtc->base.enabled) { |
WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); |
WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); |
temp = I915_READ(SOUTH_CHICKEN1); |
temp &= ~FDI_BC_BIFURCATION_SELECT; |
DRM_DEBUG_KMS("disabling fdi C rx\n"); |
I915_WRITE(SOUTH_CHICKEN1, temp); |
flags |= FDI_PHASE_SYNC_OVR(pipe); |
I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */ |
flags |= FDI_PHASE_SYNC_EN(pipe); |
I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */ |
POSTING_READ(SOUTH_CHICKEN1); |
} |
} |
/* The FDI link training functions for ILK/Ibexpeak. */ |
static void ironlake_fdi_link_train(struct drm_crtc *crtc) |
2474,9 → 2367,11 |
udelay(150); |
/* Ironlake workaround, enable clock pointer after FDI enable*/ |
if (HAS_PCH_IBX(dev)) { |
I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); |
I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | |
FDI_RX_PHASE_SYNC_POINTER_EN); |
} |
reg = FDI_RX_IIR(pipe); |
for (tries = 0; tries < 5; tries++) { |
2565,9 → 2460,6 |
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; |
I915_WRITE(reg, temp | FDI_TX_ENABLE); |
I915_WRITE(FDI_RX_MISC(pipe), |
FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); |
reg = FDI_RX_CTL(pipe); |
temp = I915_READ(reg); |
if (HAS_PCH_CPT(dev)) { |
2582,6 → 2474,9 |
POSTING_READ(reg); |
udelay(150); |
if (HAS_PCH_CPT(dev)) |
cpt_phase_pointer_enable(dev, pipe); |
for (i = 0; i < 4; i++) { |
reg = FDI_TX_CTL(pipe); |
temp = I915_READ(reg); |
2685,9 → 2580,6 |
POSTING_READ(reg); |
udelay(150); |
DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", |
I915_READ(FDI_RX_IIR(pipe))); |
/* enable CPU FDI TX and PCH FDI RX */ |
reg = FDI_TX_CTL(pipe); |
temp = I915_READ(reg); |
2700,9 → 2592,6 |
temp |= FDI_COMPOSITE_SYNC; |
I915_WRITE(reg, temp | FDI_TX_ENABLE); |
I915_WRITE(FDI_RX_MISC(pipe), |
FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); |
reg = FDI_RX_CTL(pipe); |
temp = I915_READ(reg); |
temp &= ~FDI_LINK_TRAIN_AUTO; |
2714,6 → 2603,9 |
POSTING_READ(reg); |
udelay(150); |
if (HAS_PCH_CPT(dev)) |
cpt_phase_pointer_enable(dev, pipe); |
for (i = 0; i < 4; i++) { |
reg = FDI_TX_CTL(pipe); |
temp = I915_READ(reg); |
2731,7 → 2623,7 |
if (temp & FDI_RX_BIT_LOCK || |
(I915_READ(reg) & FDI_RX_BIT_LOCK)) { |
I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); |
DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i); |
DRM_DEBUG_KMS("FDI train 1 done.\n"); |
break; |
} |
} |
2772,7 → 2664,7 |
if (temp & FDI_RX_SYMBOL_LOCK) { |
I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); |
DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i); |
DRM_DEBUG_KMS("FDI train 2 done.\n"); |
break; |
} |
} |
2789,6 → 2681,9 |
int pipe = intel_crtc->pipe; |
u32 reg, temp; |
/* Write the TU size bits so error detection works */ |
I915_WRITE(FDI_RX_TUSIZE1(pipe), |
I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); |
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */ |
reg = FDI_RX_CTL(pipe); |
2852,6 → 2747,17 |
udelay(100); |
} |
static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
u32 flags = I915_READ(SOUTH_CHICKEN1); |
flags &= ~(FDI_PHASE_SYNC_EN(pipe)); |
I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */ |
flags &= ~(FDI_PHASE_SYNC_OVR(pipe)); |
I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */ |
POSTING_READ(SOUTH_CHICKEN1); |
} |
static void ironlake_fdi_disable(struct drm_crtc *crtc) |
{ |
struct drm_device *dev = crtc->dev; |
2878,6 → 2784,11 |
/* Ironlake workaround, disable clock pointer after downing FDI */ |
if (HAS_PCH_IBX(dev)) { |
I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); |
I915_WRITE(FDI_RX_CHICKEN(pipe), |
I915_READ(FDI_RX_CHICKEN(pipe) & |
~FDI_RX_PHASE_SYNC_POINTER_EN)); |
} else if (HAS_PCH_CPT(dev)) { |
cpt_phase_pointer_disable(dev, pipe); |
} |
/* still set train pattern 1 */ |
2940,7 → 2851,7 |
} |
#endif |
static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc) |
static bool intel_crtc_driving_pch(struct drm_crtc *crtc) |
{ |
struct drm_device *dev = crtc->dev; |
struct intel_encoder *intel_encoder; |
2950,6 → 2861,23 |
* must be driven by its own crtc; no sharing is possible. |
*/ |
for_each_encoder_on_crtc(dev, crtc, intel_encoder) { |
/* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell |
* CPU handles all others */ |
if (IS_HASWELL(dev)) { |
/* It is still unclear how this will work on PPT, so throw up a warning */ |
WARN_ON(!HAS_PCH_LPT(dev)); |
if (intel_encoder->type == INTEL_OUTPUT_ANALOG) { |
DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n"); |
return true; |
} else { |
DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n", |
intel_encoder->type); |
return false; |
} |
} |
switch (intel_encoder->type) { |
case INTEL_OUTPUT_EDP: |
if (!intel_encoder_is_pch_edp(&intel_encoder->base)) |
2961,11 → 2889,6 |
return true; |
} |
static bool haswell_crtc_driving_pch(struct drm_crtc *crtc) |
{ |
return intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG); |
} |
/* Program iCLKIP clock to the desired frequency */ |
static void lpt_program_iclkip(struct drm_crtc *crtc) |
{ |
2981,9 → 2904,8 |
/* Disable SSCCTL */ |
intel_sbi_write(dev_priv, SBI_SSCCTL6, |
intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) | |
SBI_SSCCTL_DISABLE, |
SBI_ICLK); |
intel_sbi_read(dev_priv, SBI_SSCCTL6) | |
SBI_SSCCTL_DISABLE); |
/* 20MHz is a corner case which is out of range for the 7-bit divisor */ |
if (crtc->mode.clock == 20000) { |
3024,7 → 2946,7 |
phaseinc); |
/* Program SSCDIVINTPHASE6 */ |
temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); |
temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6); |
temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; |
temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); |
temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; |
3031,18 → 2953,26 |
temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); |
temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); |
temp |= SBI_SSCDIVINTPHASE_PROPAGATE; |
intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); |
intel_sbi_write(dev_priv, |
SBI_SSCDIVINTPHASE6, |
temp); |
/* Program SSCAUXDIV */ |
temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); |
temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6); |
temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); |
temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); |
intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); |
intel_sbi_write(dev_priv, |
SBI_SSCAUXDIV6, |
temp); |
/* Enable modulator and associated divider */ |
temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); |
temp = intel_sbi_read(dev_priv, SBI_SSCCTL6); |
temp &= ~SBI_SSCCTL_DISABLE; |
intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); |
intel_sbi_write(dev_priv, |
SBI_SSCCTL6, |
temp); |
/* Wait for initialization time */ |
udelay(24); |
3068,24 → 2998,15 |
assert_transcoder_disabled(dev_priv, pipe); |
/* Write the TU size bits before fdi link training, so that error |
* detection works. */ |
I915_WRITE(FDI_RX_TUSIZE1(pipe), |
I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); |
/* For PCH output, training FDI link */ |
dev_priv->display.fdi_link_train(crtc); |
/* XXX: pch pll's can be enabled any time before we enable the PCH |
* transcoder, and we actually should do this to not upset any PCH |
* transcoder that already use the clock when we share it. |
* |
* Note that enable_pch_pll tries to do the right thing, but get_pch_pll |
* unconditionally resets the pll - we need that to have the right LVDS |
* enable sequence. */ |
ironlake_enable_pch_pll(intel_crtc); |
intel_enable_pch_pll(intel_crtc); |
if (HAS_PCH_CPT(dev)) { |
if (HAS_PCH_LPT(dev)) { |
DRM_DEBUG_KMS("LPT detected: programming iCLKIP\n"); |
lpt_program_iclkip(crtc); |
} else if (HAS_PCH_CPT(dev)) { |
u32 sel; |
temp = I915_READ(PCH_DPLL_SEL); |
3122,6 → 3043,7 |
I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); |
I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe))); |
if (!IS_HASWELL(dev)) |
intel_fdi_normal_train(crtc); |
/* For PCH DP, enable TRANS_DP_CTL */ |
3154,39 → 3076,17 |
temp |= TRANS_DP_PORT_SEL_D; |
break; |
default: |
BUG(); |
DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n"); |
temp |= TRANS_DP_PORT_SEL_B; |
break; |
} |
I915_WRITE(reg, temp); |
} |
ironlake_enable_pch_transcoder(dev_priv, pipe); |
intel_enable_transcoder(dev_priv, pipe); |
} |
static void lpt_pch_enable(struct drm_crtc *crtc) |
{ |
struct drm_device *dev = crtc->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; |
assert_transcoder_disabled(dev_priv, TRANSCODER_A); |
lpt_program_iclkip(crtc); |
/* Set transcoder timing. */ |
I915_WRITE(_TRANS_HTOTAL_A, I915_READ(HTOTAL(cpu_transcoder))); |
I915_WRITE(_TRANS_HBLANK_A, I915_READ(HBLANK(cpu_transcoder))); |
I915_WRITE(_TRANS_HSYNC_A, I915_READ(HSYNC(cpu_transcoder))); |
I915_WRITE(_TRANS_VTOTAL_A, I915_READ(VTOTAL(cpu_transcoder))); |
I915_WRITE(_TRANS_VBLANK_A, I915_READ(VBLANK(cpu_transcoder))); |
I915_WRITE(_TRANS_VSYNC_A, I915_READ(VSYNC(cpu_transcoder))); |
I915_WRITE(_TRANS_VSYNCSHIFT_A, I915_READ(VSYNCSHIFT(cpu_transcoder))); |
lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); |
} |
static void intel_put_pch_pll(struct intel_crtc *intel_crtc) |
{ |
struct intel_pch_pll *pll = intel_crtc->pch_pll; |
3277,12 → 3177,16 |
void intel_cpt_verify_modeset(struct drm_device *dev, int pipe) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
int dslreg = PIPEDSL(pipe); |
int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe); |
u32 temp; |
temp = I915_READ(dslreg); |
udelay(500); |
if (wait_for(I915_READ(dslreg) != temp, 5)) { |
/* Without this, mode sets may fail silently on FDI */ |
I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS); |
udelay(250); |
I915_WRITE(tc2reg, 0); |
if (wait_for(I915_READ(dslreg) != temp, 5)) |
DRM_ERROR("mode set failed: pipe %d stuck\n", pipe); |
} |
3313,12 → 3217,9 |
I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); |
} |
is_pch_port = ironlake_crtc_driving_pch(crtc); |
is_pch_port = intel_crtc_driving_pch(crtc); |
if (is_pch_port) { |
/* Note: FDI PLL enabling _must_ be done before we enable the |
* cpu pipes, hence this is separate from all the other fdi/pch |
* enabling. */ |
ironlake_fdi_pll_enable(intel_crtc); |
} else { |
assert_fdi_tx_disabled(dev_priv, pipe); |
3331,16 → 3232,11 |
/* Enable panel fitting for LVDS */ |
if (dev_priv->pch_pf_size && |
(intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || |
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { |
(intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) { |
/* Force use of hard-coded filter coefficients |
* as some pre-programmed values are broken, |
* e.g. x201. |
*/ |
if (IS_IVYBRIDGE(dev)) |
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | |
PF_PIPE_SEL_IVB(pipe)); |
else |
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); |
I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); |
I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); |
3381,83 → 3277,6 |
intel_wait_for_vblank(dev, intel_crtc->pipe); |
} |
static void haswell_crtc_enable(struct drm_crtc *crtc) |
{ |
struct drm_device *dev = crtc->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
struct intel_encoder *encoder; |
int pipe = intel_crtc->pipe; |
int plane = intel_crtc->plane; |
bool is_pch_port; |
WARN_ON(!crtc->enabled); |
if (intel_crtc->active) |
return; |
intel_crtc->active = true; |
intel_update_watermarks(dev); |
is_pch_port = haswell_crtc_driving_pch(crtc); |
if (is_pch_port) |
dev_priv->display.fdi_link_train(crtc); |
for_each_encoder_on_crtc(dev, crtc, encoder) |
if (encoder->pre_enable) |
encoder->pre_enable(encoder); |
intel_ddi_enable_pipe_clock(intel_crtc); |
/* Enable panel fitting for eDP */ |
if (dev_priv->pch_pf_size && |
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { |
/* Force use of hard-coded filter coefficients |
* as some pre-programmed values are broken, |
* e.g. x201. |
*/ |
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | |
PF_PIPE_SEL_IVB(pipe)); |
I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); |
I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); |
} |
/* |
* On ILK+ LUT must be loaded before the pipe is running but with |
* clocks enabled |
*/ |
intel_crtc_load_lut(crtc); |
intel_ddi_set_pipe_settings(crtc); |
intel_ddi_enable_pipe_func(crtc); |
intel_enable_pipe(dev_priv, pipe, is_pch_port); |
intel_enable_plane(dev_priv, plane, pipe); |
if (is_pch_port) |
lpt_pch_enable(crtc); |
mutex_lock(&dev->struct_mutex); |
intel_update_fbc(dev); |
mutex_unlock(&dev->struct_mutex); |
// intel_crtc_update_cursor(crtc, true); |
for_each_encoder_on_crtc(dev, crtc, encoder) |
encoder->enable(encoder); |
/* |
* There seems to be a race in PCH platform hw (at least on some |
* outputs) where an enabled pipe still completes any pageflip right |
* away (as if the pipe is off) instead of waiting for vblank. As soon |
* as the first vblank happend, everything works as expected. Hence just |
* wait for one vblank before returning to avoid strange things |
* happening. |
*/ |
intel_wait_for_vblank(dev, intel_crtc->pipe); |
} |
static void ironlake_crtc_disable(struct drm_crtc *crtc) |
{ |
struct drm_device *dev = crtc->dev; |
3496,7 → 3315,7 |
ironlake_fdi_disable(crtc); |
ironlake_disable_pch_transcoder(dev_priv, pipe); |
intel_disable_transcoder(dev_priv, pipe); |
if (HAS_PCH_CPT(dev)) { |
/* disable TRANS_DP_CTL */ |
3538,58 → 3357,6 |
mutex_unlock(&dev->struct_mutex); |
} |
static void haswell_crtc_disable(struct drm_crtc *crtc) |
{ |
struct drm_device *dev = crtc->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
struct intel_encoder *encoder; |
int pipe = intel_crtc->pipe; |
int plane = intel_crtc->plane; |
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; |
bool is_pch_port; |
if (!intel_crtc->active) |
return; |
is_pch_port = haswell_crtc_driving_pch(crtc); |
for_each_encoder_on_crtc(dev, crtc, encoder) |
encoder->disable(encoder); |
intel_disable_plane(dev_priv, plane, pipe); |
if (dev_priv->cfb_plane == plane) |
intel_disable_fbc(dev); |
intel_disable_pipe(dev_priv, pipe); |
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); |
/* Disable PF */ |
I915_WRITE(PF_CTL(pipe), 0); |
I915_WRITE(PF_WIN_SZ(pipe), 0); |
intel_ddi_disable_pipe_clock(intel_crtc); |
for_each_encoder_on_crtc(dev, crtc, encoder) |
if (encoder->post_disable) |
encoder->post_disable(encoder); |
if (is_pch_port) { |
lpt_disable_pch_transcoder(dev_priv); |
intel_ddi_fdi_disable(crtc); |
} |
intel_crtc->active = false; |
intel_update_watermarks(dev); |
mutex_lock(&dev->struct_mutex); |
intel_update_fbc(dev); |
mutex_unlock(&dev->struct_mutex); |
} |
static void ironlake_crtc_off(struct drm_crtc *crtc) |
{ |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3596,17 → 3363,6 |
intel_put_pch_pll(intel_crtc); |
} |
static void haswell_crtc_off(struct drm_crtc *crtc) |
{ |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
/* Stop saying we're using TRANSCODER_EDP because some other CRTC might |
* start using it. */ |
intel_crtc->cpu_transcoder = intel_crtc->pipe; |
intel_ddi_put_crtc_pll(crtc); |
} |
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) |
{ |
if (!enable && intel_crtc->overlay) { |
4105,7 → 3861,7 |
/* Use VBT settings if we have an eDP panel */ |
unsigned int edp_bpc = dev_priv->edp.bpp / 3; |
if (edp_bpc && edp_bpc < display_bpc) { |
if (edp_bpc < display_bpc) { |
DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); |
display_bpc = edp_bpc; |
} |
4321,7 → 4077,7 |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode, |
intel_clock_t *clock, intel_clock_t *reduced_clock, |
int num_connectors) |
int refclk, int num_connectors) |
{ |
struct drm_device *dev = crtc->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
4329,20 → 4085,10 |
int pipe = intel_crtc->pipe; |
u32 dpll, mdiv, pdiv; |
u32 bestn, bestm1, bestm2, bestp1, bestp2; |
bool is_sdvo; |
u32 temp; |
bool is_hdmi; |
is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) || |
intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); |
is_hdmi = intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); |
dpll = DPLL_VGA_MODE_DIS; |
dpll |= DPLL_EXT_BUFFER_ENABLE_VLV; |
dpll |= DPLL_REFA_CLK_ENABLE_VLV; |
dpll |= DPLL_INTEGRATED_CLOCK_VLV; |
I915_WRITE(DPLL(pipe), dpll); |
POSTING_READ(DPLL(pipe)); |
bestn = clock->n; |
bestm1 = clock->m1; |
bestm2 = clock->m2; |
4349,10 → 4095,12 |
bestp1 = clock->p1; |
bestp2 = clock->p2; |
/* |
* In Valleyview PLL and program lane counter registers are exposed |
* through DPIO interface |
*/ |
/* Enable DPIO clock input */ |
dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV | |
DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV; |
I915_WRITE(DPLL(pipe), dpll); |
POSTING_READ(DPLL(pipe)); |
mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); |
mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); |
mdiv |= ((bestn << DPIO_N_SHIFT)); |
4363,13 → 4111,12 |
intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000); |
pdiv = (1 << DPIO_REFSEL_OVERRIDE) | (5 << DPIO_PLL_MODESEL_SHIFT) | |
pdiv = DPIO_REFSEL_OVERRIDE | (5 << DPIO_PLL_MODESEL_SHIFT) | |
(3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) | |
(7 << DPIO_PLL_REFCLK_SEL_SHIFT) | (8 << DPIO_DRIVER_CTL_SHIFT) | |
(5 << DPIO_CLK_BIAS_CTL_SHIFT); |
(8 << DPIO_DRIVER_CTL_SHIFT) | (5 << DPIO_CLK_BIAS_CTL_SHIFT); |
intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv); |
intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x005f003b); |
intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x009f0051); |
dpll |= DPLL_VCO_ENABLE; |
I915_WRITE(DPLL(pipe), dpll); |
4377,45 → 4124,20 |
if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) |
DRM_ERROR("DPLL %d failed to lock\n", pipe); |
intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620); |
if (is_hdmi) { |
u32 temp = intel_mode_get_pixel_multiplier(adjusted_mode); |
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) |
intel_dp_set_m_n(crtc, mode, adjusted_mode); |
I915_WRITE(DPLL(pipe), dpll); |
/* Wait for the clocks to stabilize. */ |
POSTING_READ(DPLL(pipe)); |
udelay(150); |
temp = 0; |
if (is_sdvo) { |
temp = intel_mode_get_pixel_multiplier(adjusted_mode); |
if (temp > 1) |
temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; |
else |
temp = 0; |
} |
I915_WRITE(DPLL_MD(pipe), temp); |
POSTING_READ(DPLL_MD(pipe)); |
} |
/* Now program lane control registers */ |
if(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) |
|| intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) |
{ |
temp = 0x1000C4; |
if(pipe == 1) |
temp |= (1 << 21); |
intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp); |
intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x641); /* ??? */ |
} |
if(intel_pipe_has_type(crtc,INTEL_OUTPUT_EDP)) |
{ |
temp = 0x1000C4; |
if(pipe == 1) |
temp |= (1 << 21); |
intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp); |
} |
} |
static void i9xx_update_pll(struct drm_crtc *crtc, |
struct drm_display_mode *mode, |
4430,8 → 4152,6 |
u32 dpll; |
bool is_sdvo; |
i9xx_update_pll_dividers(crtc, clock, reduced_clock); |
is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) || |
intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); |
4532,7 → 4252,7 |
static void i8xx_update_pll(struct drm_crtc *crtc, |
struct drm_display_mode *adjusted_mode, |
intel_clock_t *clock, intel_clock_t *reduced_clock, |
intel_clock_t *clock, |
int num_connectors) |
{ |
struct drm_device *dev = crtc->dev; |
4541,8 → 4261,6 |
int pipe = intel_crtc->pipe; |
u32 dpll; |
i9xx_update_pll_dividers(crtc, clock, reduced_clock); |
dpll = DPLL_VGA_MODE_DIS; |
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
4592,64 → 4310,6 |
I915_WRITE(DPLL(pipe), dpll); |
} |
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc, |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode) |
{ |
struct drm_device *dev = intel_crtc->base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
enum pipe pipe = intel_crtc->pipe; |
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; |
uint32_t vsyncshift; |
if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { |
/* the chip adds 2 halflines automatically */ |
adjusted_mode->crtc_vtotal -= 1; |
adjusted_mode->crtc_vblank_end -= 1; |
vsyncshift = adjusted_mode->crtc_hsync_start |
- adjusted_mode->crtc_htotal / 2; |
} else { |
vsyncshift = 0; |
} |
if (INTEL_INFO(dev)->gen > 3) |
I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); |
I915_WRITE(HTOTAL(cpu_transcoder), |
(adjusted_mode->crtc_hdisplay - 1) | |
((adjusted_mode->crtc_htotal - 1) << 16)); |
I915_WRITE(HBLANK(cpu_transcoder), |
(adjusted_mode->crtc_hblank_start - 1) | |
((adjusted_mode->crtc_hblank_end - 1) << 16)); |
I915_WRITE(HSYNC(cpu_transcoder), |
(adjusted_mode->crtc_hsync_start - 1) | |
((adjusted_mode->crtc_hsync_end - 1) << 16)); |
I915_WRITE(VTOTAL(cpu_transcoder), |
(adjusted_mode->crtc_vdisplay - 1) | |
((adjusted_mode->crtc_vtotal - 1) << 16)); |
I915_WRITE(VBLANK(cpu_transcoder), |
(adjusted_mode->crtc_vblank_start - 1) | |
((adjusted_mode->crtc_vblank_end - 1) << 16)); |
I915_WRITE(VSYNC(cpu_transcoder), |
(adjusted_mode->crtc_vsync_start - 1) | |
((adjusted_mode->crtc_vsync_end - 1) << 16)); |
/* Workaround: when the EDP input selection is B, the VTOTAL_B must be |
* programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is |
* documented on the DDI_FUNC_CTL register description, EDP Input Select |
* bits. */ |
if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP && |
(pipe == PIPE_B || pipe == PIPE_C)) |
I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); |
/* pipesrc controls the size that is scaled from, which should |
* always be the user's requested size. |
*/ |
I915_WRITE(PIPESRC(pipe), |
((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); |
} |
static int i9xx_crtc_mode_set(struct drm_crtc *crtc, |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode, |
4663,7 → 4323,7 |
int plane = intel_crtc->plane; |
int refclk, num_connectors = 0; |
intel_clock_t clock, reduced_clock; |
u32 dspcntr, pipeconf; |
u32 dspcntr, pipeconf, vsyncshift; |
bool ok, has_reduced_clock = false, is_sdvo = false; |
bool is_lvds = false, is_tv = false, is_dp = false; |
struct intel_encoder *encoder; |
4727,14 → 4387,14 |
if (is_sdvo && is_tv) |
i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock); |
i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ? |
&reduced_clock : NULL); |
if (IS_GEN2(dev)) |
i8xx_update_pll(crtc, adjusted_mode, &clock, |
has_reduced_clock ? &reduced_clock : NULL, |
num_connectors); |
i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors); |
else if (IS_VALLEYVIEW(dev)) |
vlv_update_pll(crtc, mode, adjusted_mode, &clock, |
has_reduced_clock ? &reduced_clock : NULL, |
num_connectors); |
vlv_update_pll(crtc, mode,adjusted_mode, &clock, NULL, |
refclk, num_connectors); |
else |
i9xx_update_pll(crtc, mode, adjusted_mode, &clock, |
has_reduced_clock ? &reduced_clock : NULL, |
4775,14 → 4435,6 |
} |
} |
if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { |
if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { |
pipeconf |= PIPECONF_BPP_6 | |
PIPECONF_ENABLE | |
I965_PIPECONF_ACTIVE; |
} |
} |
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); |
drm_mode_debug_printmodeline(mode); |
4798,13 → 4450,41 |
pipeconf &= ~PIPECONF_INTERLACE_MASK; |
if (!IS_GEN2(dev) && |
adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) |
adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { |
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; |
else |
/* the chip adds 2 halflines automatically */ |
adjusted_mode->crtc_vtotal -= 1; |
adjusted_mode->crtc_vblank_end -= 1; |
vsyncshift = adjusted_mode->crtc_hsync_start |
- adjusted_mode->crtc_htotal/2; |
} else { |
pipeconf |= PIPECONF_PROGRESSIVE; |
vsyncshift = 0; |
} |
intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); |
if (!IS_GEN3(dev)) |
I915_WRITE(VSYNCSHIFT(pipe), vsyncshift); |
I915_WRITE(HTOTAL(pipe), |
(adjusted_mode->crtc_hdisplay - 1) | |
((adjusted_mode->crtc_htotal - 1) << 16)); |
I915_WRITE(HBLANK(pipe), |
(adjusted_mode->crtc_hblank_start - 1) | |
((adjusted_mode->crtc_hblank_end - 1) << 16)); |
I915_WRITE(HSYNC(pipe), |
(adjusted_mode->crtc_hsync_start - 1) | |
((adjusted_mode->crtc_hsync_end - 1) << 16)); |
I915_WRITE(VTOTAL(pipe), |
(adjusted_mode->crtc_vdisplay - 1) | |
((adjusted_mode->crtc_vtotal - 1) << 16)); |
I915_WRITE(VBLANK(pipe), |
(adjusted_mode->crtc_vblank_start - 1) | |
((adjusted_mode->crtc_vblank_end - 1) << 16)); |
I915_WRITE(VSYNC(pipe), |
(adjusted_mode->crtc_vsync_start - 1) | |
((adjusted_mode->crtc_vsync_end - 1) << 16)); |
/* pipesrc and dspsize control the size that is scaled from, |
* which should always be the user's requested size. |
*/ |
4812,6 → 4492,8 |
((mode->vdisplay - 1) << 16) | |
(mode->hdisplay - 1)); |
I915_WRITE(DSPPOS(plane), 0); |
I915_WRITE(PIPESRC(pipe), |
((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); |
I915_WRITE(PIPECONF(pipe), pipeconf); |
POSTING_READ(PIPECONF(pipe)); |
4829,7 → 4511,10 |
return ret; |
} |
static void ironlake_init_pch_refclk(struct drm_device *dev) |
/* |
* Initialize reference clocks when the driver loads |
*/ |
void ironlake_init_pch_refclk(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct drm_mode_config *mode_config = &dev->mode_config; |
4943,182 → 4628,6 |
} |
} |
/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */ |
static void lpt_init_pch_refclk(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct drm_mode_config *mode_config = &dev->mode_config; |
struct intel_encoder *encoder; |
bool has_vga = false; |
bool is_sdv = false; |
u32 tmp; |
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { |
switch (encoder->type) { |
case INTEL_OUTPUT_ANALOG: |
has_vga = true; |
break; |
} |
} |
if (!has_vga) |
return; |
/* XXX: Rip out SDV support once Haswell ships for real. */ |
if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00) |
is_sdv = true; |
tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); |
tmp &= ~SBI_SSCCTL_DISABLE; |
tmp |= SBI_SSCCTL_PATHALT; |
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); |
udelay(24); |
tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); |
tmp &= ~SBI_SSCCTL_PATHALT; |
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); |
if (!is_sdv) { |
tmp = I915_READ(SOUTH_CHICKEN2); |
tmp |= FDI_MPHY_IOSFSB_RESET_CTL; |
I915_WRITE(SOUTH_CHICKEN2, tmp); |
if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) & |
FDI_MPHY_IOSFSB_RESET_STATUS, 100)) |
DRM_ERROR("FDI mPHY reset assert timeout\n"); |
tmp = I915_READ(SOUTH_CHICKEN2); |
tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; |
I915_WRITE(SOUTH_CHICKEN2, tmp); |
if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) & |
FDI_MPHY_IOSFSB_RESET_STATUS) == 0, |
100)) |
DRM_ERROR("FDI mPHY reset de-assert timeout\n"); |
} |
tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); |
tmp &= ~(0xFF << 24); |
tmp |= (0x12 << 24); |
intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); |
if (!is_sdv) { |
tmp = intel_sbi_read(dev_priv, 0x808C, SBI_MPHY); |
tmp &= ~(0x3 << 6); |
tmp |= (1 << 6) | (1 << 0); |
intel_sbi_write(dev_priv, 0x808C, tmp, SBI_MPHY); |
} |
if (is_sdv) { |
tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY); |
tmp |= 0x7FFF; |
intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY); |
} |
tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); |
tmp |= (1 << 11); |
intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); |
tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); |
tmp |= (1 << 11); |
intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); |
if (is_sdv) { |
tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY); |
tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16); |
intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY); |
tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY); |
tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16); |
intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY); |
tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY); |
tmp |= (0x3F << 8); |
intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY); |
tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY); |
tmp |= (0x3F << 8); |
intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY); |
} |
tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); |
tmp |= (1 << 24) | (1 << 21) | (1 << 18); |
intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); |
tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); |
tmp |= (1 << 24) | (1 << 21) | (1 << 18); |
intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); |
if (!is_sdv) { |
tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); |
tmp &= ~(7 << 13); |
tmp |= (5 << 13); |
intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); |
tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); |
tmp &= ~(7 << 13); |
tmp |= (5 << 13); |
intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); |
} |
tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); |
tmp &= ~0xFF; |
tmp |= 0x1C; |
intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); |
tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); |
tmp &= ~0xFF; |
tmp |= 0x1C; |
intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); |
tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); |
tmp &= ~(0xFF << 16); |
tmp |= (0x1C << 16); |
intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); |
tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); |
tmp &= ~(0xFF << 16); |
tmp |= (0x1C << 16); |
intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); |
if (!is_sdv) { |
tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); |
tmp |= (1 << 27); |
intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); |
tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); |
tmp |= (1 << 27); |
intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); |
tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); |
tmp &= ~(0xF << 28); |
tmp |= (4 << 28); |
intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); |
tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); |
tmp &= ~(0xF << 28); |
tmp |= (4 << 28); |
intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); |
} |
/* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */ |
tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK); |
tmp |= SBI_DBUFF0_ENABLE; |
intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK); |
} |
/* |
* Initialize reference clocks when the driver loads |
*/ |
void intel_init_pch_refclk(struct drm_device *dev) |
{ |
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) |
ironlake_init_pch_refclk(dev); |
else if (HAS_PCH_LPT(dev)) |
lpt_init_pch_refclk(dev); |
} |
static int ironlake_get_refclk(struct drm_crtc *crtc) |
{ |
struct drm_device *dev = crtc->dev; |
5175,8 → 4684,8 |
val |= PIPE_12BPC; |
break; |
default: |
/* Case prevented by intel_choose_pipe_bpp_dither. */ |
BUG(); |
val |= PIPE_8BPC; |
break; |
} |
val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK); |
5193,31 → 4702,6 |
POSTING_READ(PIPECONF(pipe)); |
} |
static void haswell_set_pipeconf(struct drm_crtc *crtc, |
struct drm_display_mode *adjusted_mode, |
bool dither) |
{ |
struct drm_i915_private *dev_priv = crtc->dev->dev_private; |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; |
uint32_t val; |
val = I915_READ(PIPECONF(cpu_transcoder)); |
val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK); |
if (dither) |
val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); |
val &= ~PIPECONF_INTERLACE_MASK_HSW; |
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) |
val |= PIPECONF_INTERLACED_ILK; |
else |
val |= PIPECONF_PROGRESSIVE; |
I915_WRITE(PIPECONF(cpu_transcoder), val); |
POSTING_READ(PIPECONF(cpu_transcoder)); |
} |
static bool ironlake_compute_clocks(struct drm_crtc *crtc, |
struct drm_display_mode *adjusted_mode, |
intel_clock_t *clock, |
5281,126 → 4765,74 |
return true; |
} |
static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
uint32_t temp; |
temp = I915_READ(SOUTH_CHICKEN1); |
if (temp & FDI_BC_BIFURCATION_SELECT) |
return; |
WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); |
WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); |
temp |= FDI_BC_BIFURCATION_SELECT; |
DRM_DEBUG_KMS("enabling fdi C rx\n"); |
I915_WRITE(SOUTH_CHICKEN1, temp); |
POSTING_READ(SOUTH_CHICKEN1); |
} |
static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc) |
{ |
struct drm_device *dev = intel_crtc->base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_crtc *pipe_B_crtc = |
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]); |
DRM_DEBUG_KMS("checking fdi config on pipe %i, lanes %i\n", |
intel_crtc->pipe, intel_crtc->fdi_lanes); |
if (intel_crtc->fdi_lanes > 4) { |
DRM_DEBUG_KMS("invalid fdi lane config on pipe %i: %i lanes\n", |
intel_crtc->pipe, intel_crtc->fdi_lanes); |
/* Clamp lanes to avoid programming the hw with bogus values. */ |
intel_crtc->fdi_lanes = 4; |
return false; |
} |
if (dev_priv->num_pipe == 2) |
return true; |
switch (intel_crtc->pipe) { |
case PIPE_A: |
return true; |
case PIPE_B: |
if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled && |
intel_crtc->fdi_lanes > 2) { |
DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n", |
intel_crtc->pipe, intel_crtc->fdi_lanes); |
/* Clamp lanes to avoid programming the hw with bogus values. */ |
intel_crtc->fdi_lanes = 2; |
return false; |
} |
if (intel_crtc->fdi_lanes > 2) |
WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT); |
else |
cpt_enable_fdi_bc_bifurcation(dev); |
return true; |
case PIPE_C: |
if (!pipe_B_crtc->base.enabled || pipe_B_crtc->fdi_lanes <= 2) { |
if (intel_crtc->fdi_lanes > 2) { |
DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n", |
intel_crtc->pipe, intel_crtc->fdi_lanes); |
/* Clamp lanes to avoid programming the hw with bogus values. */ |
intel_crtc->fdi_lanes = 2; |
return false; |
} |
} else { |
DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); |
return false; |
} |
cpt_enable_fdi_bc_bifurcation(dev); |
return true; |
default: |
BUG(); |
} |
} |
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) |
{ |
/* |
* Account for spread spectrum to avoid |
* oversubscribing the link. Max center spread |
* is 2.5%; use 5% for safety's sake. |
*/ |
u32 bps = target_clock * bpp * 21 / 20; |
return bps / (link_bw * 8) + 1; |
} |
static void ironlake_set_m_n(struct drm_crtc *crtc, |
static int ironlake_crtc_mode_set(struct drm_crtc *crtc, |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode) |
struct drm_display_mode *adjusted_mode, |
int x, int y, |
struct drm_framebuffer *fb) |
{ |
struct drm_device *dev = crtc->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; |
struct intel_encoder *intel_encoder, *edp_encoder = NULL; |
int pipe = intel_crtc->pipe; |
int plane = intel_crtc->plane; |
int num_connectors = 0; |
intel_clock_t clock, reduced_clock; |
u32 dpll, fp = 0, fp2 = 0; |
bool ok, has_reduced_clock = false, is_sdvo = false; |
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; |
struct intel_encoder *encoder, *edp_encoder = NULL; |
int ret; |
struct fdi_m_n m_n = {0}; |
int target_clock, pixel_multiplier, lane, link_bw; |
bool is_dp = false, is_cpu_edp = false; |
u32 temp; |
int target_clock, pixel_multiplier, lane, link_bw, factor; |
unsigned int pipe_bpp; |
bool dither; |
bool is_cpu_edp = false, is_pch_edp = false; |
for_each_encoder_on_crtc(dev, crtc, intel_encoder) { |
switch (intel_encoder->type) { |
for_each_encoder_on_crtc(dev, crtc, encoder) { |
switch (encoder->type) { |
case INTEL_OUTPUT_LVDS: |
is_lvds = true; |
break; |
case INTEL_OUTPUT_SDVO: |
case INTEL_OUTPUT_HDMI: |
is_sdvo = true; |
if (encoder->needs_tv_clock) |
is_tv = true; |
break; |
case INTEL_OUTPUT_TVOUT: |
is_tv = true; |
break; |
case INTEL_OUTPUT_ANALOG: |
is_crt = true; |
break; |
case INTEL_OUTPUT_DISPLAYPORT: |
is_dp = true; |
break; |
case INTEL_OUTPUT_EDP: |
is_dp = true; |
if (!intel_encoder_is_pch_edp(&intel_encoder->base)) |
if (intel_encoder_is_pch_edp(&encoder->base)) |
is_pch_edp = true; |
else |
is_cpu_edp = true; |
edp_encoder = intel_encoder; |
edp_encoder = encoder; |
break; |
} |
num_connectors++; |
} |
ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock, |
&has_reduced_clock, &reduced_clock); |
if (!ok) { |
DRM_ERROR("Couldn't find PLL settings for mode!\n"); |
return -EINVAL; |
} |
/* Ensure that the cursor is valid for the new mode before changing... */ |
// intel_crtc_update_cursor(crtc, true); |
/* FDI link */ |
pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); |
lane = 0; |
5427,10 → 4859,30 |
else |
target_clock = adjusted_mode->clock; |
if (!lane) |
lane = ironlake_get_lanes_required(target_clock, link_bw, |
intel_crtc->bpp); |
/* determine panel color depth */ |
dither = intel_choose_pipe_bpp_dither(crtc, fb, &pipe_bpp, |
adjusted_mode); |
if (is_lvds && dev_priv->lvds_dither) |
dither = true; |
if (pipe_bpp != 18 && pipe_bpp != 24 && pipe_bpp != 30 && |
pipe_bpp != 36) { |
WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n", |
pipe_bpp); |
pipe_bpp = 24; |
} |
intel_crtc->bpp = pipe_bpp; |
if (!lane) { |
/* |
* Account for spread spectrum to avoid |
* oversubscribing the link. Max center spread |
* is 2.5%; use 5% for safety's sake. |
*/ |
u32 bps = target_clock * intel_crtc->bpp * 21 / 20; |
lane = bps / (link_bw * 8) + 1; |
} |
intel_crtc->fdi_lanes = lane; |
if (pixel_multiplier > 1) |
5438,52 → 4890,11 |
ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, |
&m_n); |
I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m); |
I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n); |
I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m); |
I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n); |
} |
fp = clock.n << 16 | clock.m1 << 8 | clock.m2; |
if (has_reduced_clock) |
fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | |
reduced_clock.m2; |
static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, |
struct drm_display_mode *adjusted_mode, |
intel_clock_t *clock, u32 fp) |
{ |
struct drm_crtc *crtc = &intel_crtc->base; |
struct drm_device *dev = crtc->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_encoder *intel_encoder; |
uint32_t dpll; |
int factor, pixel_multiplier, num_connectors = 0; |
bool is_lvds = false, is_sdvo = false, is_tv = false; |
bool is_dp = false, is_cpu_edp = false; |
for_each_encoder_on_crtc(dev, crtc, intel_encoder) { |
switch (intel_encoder->type) { |
case INTEL_OUTPUT_LVDS: |
is_lvds = true; |
break; |
case INTEL_OUTPUT_SDVO: |
case INTEL_OUTPUT_HDMI: |
is_sdvo = true; |
if (intel_encoder->needs_tv_clock) |
is_tv = true; |
break; |
case INTEL_OUTPUT_TVOUT: |
is_tv = true; |
break; |
case INTEL_OUTPUT_DISPLAYPORT: |
is_dp = true; |
break; |
case INTEL_OUTPUT_EDP: |
is_dp = true; |
if (!intel_encoder_is_pch_edp(&intel_encoder->base)) |
is_cpu_edp = true; |
break; |
} |
num_connectors++; |
} |
/* Enable autotuning of the PLL clock (if permissible) */ |
factor = 21; |
if (is_lvds) { |
5494,7 → 4905,7 |
} else if (is_sdvo && is_tv) |
factor = 20; |
if (clock->m < factor * clock->n) |
if (clock.m < factor * clock.n) |
fp |= FP_CB_TUNE; |
dpll = 0; |
5504,7 → 4915,7 |
else |
dpll |= DPLLB_MODE_DAC_SERIAL; |
if (is_sdvo) { |
pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); |
int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); |
if (pixel_multiplier > 1) { |
dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; |
} |
5514,11 → 4925,11 |
dpll |= DPLL_DVO_HIGH_SPEED; |
/* compute bitmask from p1 value */ |
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; |
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; |
/* also FPA1 */ |
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; |
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; |
switch (clock->p2) { |
switch (clock.p2) { |
case 5: |
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; |
break; |
5544,79 → 4955,15 |
else |
dpll |= PLL_REF_INPUT_DREFCLK; |
return dpll; |
} |
static int ironlake_crtc_mode_set(struct drm_crtc *crtc, |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode, |
int x, int y, |
struct drm_framebuffer *fb) |
{ |
struct drm_device *dev = crtc->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
int pipe = intel_crtc->pipe; |
int plane = intel_crtc->plane; |
int num_connectors = 0; |
intel_clock_t clock, reduced_clock; |
u32 dpll, fp = 0, fp2 = 0; |
bool ok, has_reduced_clock = false; |
bool is_lvds = false, is_dp = false, is_cpu_edp = false; |
struct intel_encoder *encoder; |
u32 temp; |
int ret; |
bool dither, fdi_config_ok; |
for_each_encoder_on_crtc(dev, crtc, encoder) { |
switch (encoder->type) { |
case INTEL_OUTPUT_LVDS: |
is_lvds = true; |
break; |
case INTEL_OUTPUT_DISPLAYPORT: |
is_dp = true; |
break; |
case INTEL_OUTPUT_EDP: |
is_dp = true; |
if (!intel_encoder_is_pch_edp(&encoder->base)) |
is_cpu_edp = true; |
break; |
} |
num_connectors++; |
} |
WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)), |
"Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); |
ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock, |
&has_reduced_clock, &reduced_clock); |
if (!ok) { |
DRM_ERROR("Couldn't find PLL settings for mode!\n"); |
return -EINVAL; |
} |
/* Ensure that the cursor is valid for the new mode before changing... */ |
// intel_crtc_update_cursor(crtc, true); |
/* determine panel color depth */ |
dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp, |
adjusted_mode); |
if (is_lvds && dev_priv->lvds_dither) |
dither = true; |
fp = clock.n << 16 | clock.m1 << 8 | clock.m2; |
if (has_reduced_clock) |
fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | |
reduced_clock.m2; |
dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, fp); |
DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); |
drm_mode_debug_printmodeline(mode); |
/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ |
if (!is_cpu_edp) { |
/* CPU eDP is the only output that doesn't need a PCH PLL of its own on |
* pre-Haswell/LPT generation */ |
if (HAS_PCH_LPT(dev)) { |
DRM_DEBUG_KMS("LPT detected: no PLL for pipe %d necessary\n", |
pipe); |
} else if (!is_cpu_edp) { |
struct intel_pch_pll *pll; |
pll = intel_get_pch_pll(intel_crtc, dpll, fp); |
5702,232 → 5049,55 |
} |
} |
intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); |
/* Note, this also computes intel_crtc->fdi_lanes which is used below in |
* ironlake_check_fdi_lanes. */ |
ironlake_set_m_n(crtc, mode, adjusted_mode); |
fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc); |
if (is_cpu_edp) |
ironlake_set_pll_edp(crtc, adjusted_mode->clock); |
ironlake_set_pipeconf(crtc, adjusted_mode, dither); |
intel_wait_for_vblank(dev, pipe); |
/* Set up the display plane register */ |
I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE); |
POSTING_READ(DSPCNTR(plane)); |
ret = intel_pipe_set_base(crtc, x, y, fb); |
intel_update_watermarks(dev); |
intel_update_linetime_watermarks(dev, pipe, adjusted_mode); |
return fdi_config_ok ? ret : -EINVAL; |
} |
static int haswell_crtc_mode_set(struct drm_crtc *crtc, |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode, |
int x, int y, |
struct drm_framebuffer *fb) |
{ |
struct drm_device *dev = crtc->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
int pipe = intel_crtc->pipe; |
int plane = intel_crtc->plane; |
int num_connectors = 0; |
intel_clock_t clock, reduced_clock; |
u32 dpll = 0, fp = 0, fp2 = 0; |
bool ok, has_reduced_clock = false; |
bool is_lvds = false, is_dp = false, is_cpu_edp = false; |
struct intel_encoder *encoder; |
u32 temp; |
int ret; |
bool dither; |
for_each_encoder_on_crtc(dev, crtc, encoder) { |
switch (encoder->type) { |
case INTEL_OUTPUT_LVDS: |
is_lvds = true; |
break; |
case INTEL_OUTPUT_DISPLAYPORT: |
is_dp = true; |
break; |
case INTEL_OUTPUT_EDP: |
is_dp = true; |
if (!intel_encoder_is_pch_edp(&encoder->base)) |
is_cpu_edp = true; |
break; |
} |
num_connectors++; |
} |
if (is_cpu_edp) |
intel_crtc->cpu_transcoder = TRANSCODER_EDP; |
else |
intel_crtc->cpu_transcoder = pipe; |
/* We are not sure yet this won't happen. */ |
WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n", |
INTEL_PCH_TYPE(dev)); |
WARN(num_connectors != 1, "%d connectors attached to pipe %c\n", |
num_connectors, pipe_name(pipe)); |
WARN_ON(I915_READ(PIPECONF(intel_crtc->cpu_transcoder)) & |
(PIPECONF_ENABLE | I965_PIPECONF_ACTIVE)); |
WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE); |
if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock)) |
return -EINVAL; |
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { |
ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock, |
&has_reduced_clock, |
&reduced_clock); |
if (!ok) { |
DRM_ERROR("Couldn't find PLL settings for mode!\n"); |
return -EINVAL; |
} |
} |
/* Ensure that the cursor is valid for the new mode before changing... */ |
// intel_crtc_update_cursor(crtc, true); |
/* determine panel color depth */ |
dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp, |
adjusted_mode); |
if (is_lvds && dev_priv->lvds_dither) |
dither = true; |
DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); |
drm_mode_debug_printmodeline(mode); |
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { |
fp = clock.n << 16 | clock.m1 << 8 | clock.m2; |
if (has_reduced_clock) |
fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | |
reduced_clock.m2; |
dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, |
fp); |
/* CPU eDP is the only output that doesn't need a PCH PLL of its |
* own on pre-Haswell/LPT generation */ |
if (!is_cpu_edp) { |
struct intel_pch_pll *pll; |
pll = intel_get_pch_pll(intel_crtc, dpll, fp); |
if (pll == NULL) { |
DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n", |
pipe); |
return -EINVAL; |
} |
} else |
intel_put_pch_pll(intel_crtc); |
/* The LVDS pin pair needs to be on before the DPLLs are |
* enabled. This is an exception to the general rule that |
* mode_set doesn't turn things on. |
*/ |
if (is_lvds) { |
temp = I915_READ(PCH_LVDS); |
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; |
if (HAS_PCH_CPT(dev)) { |
temp &= ~PORT_TRANS_SEL_MASK; |
temp |= PORT_TRANS_SEL_CPT(pipe); |
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { |
/* the chip adds 2 halflines automatically */ |
adjusted_mode->crtc_vtotal -= 1; |
adjusted_mode->crtc_vblank_end -= 1; |
I915_WRITE(VSYNCSHIFT(pipe), |
adjusted_mode->crtc_hsync_start |
- adjusted_mode->crtc_htotal/2); |
} else { |
if (pipe == 1) |
temp |= LVDS_PIPEB_SELECT; |
else |
temp &= ~LVDS_PIPEB_SELECT; |
I915_WRITE(VSYNCSHIFT(pipe), 0); |
} |
/* set the corresponsding LVDS_BORDER bit */ |
temp |= dev_priv->lvds_border_bits; |
/* Set the B0-B3 data pairs corresponding to whether |
* we're going to set the DPLLs for dual-channel mode or |
* not. |
*/ |
if (clock.p2 == 7) |
temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; |
else |
temp &= ~(LVDS_B0B3_POWER_UP | |
LVDS_CLKB_POWER_UP); |
I915_WRITE(HTOTAL(pipe), |
(adjusted_mode->crtc_hdisplay - 1) | |
((adjusted_mode->crtc_htotal - 1) << 16)); |
I915_WRITE(HBLANK(pipe), |
(adjusted_mode->crtc_hblank_start - 1) | |
((adjusted_mode->crtc_hblank_end - 1) << 16)); |
I915_WRITE(HSYNC(pipe), |
(adjusted_mode->crtc_hsync_start - 1) | |
((adjusted_mode->crtc_hsync_end - 1) << 16)); |
/* It would be nice to set 24 vs 18-bit mode |
* (LVDS_A3_POWER_UP) appropriately here, but we need to |
* look more thoroughly into how panels behave in the |
* two modes. |
*/ |
temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); |
if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) |
temp |= LVDS_HSYNC_POLARITY; |
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) |
temp |= LVDS_VSYNC_POLARITY; |
I915_WRITE(PCH_LVDS, temp); |
} |
} |
I915_WRITE(VTOTAL(pipe), |
(adjusted_mode->crtc_vdisplay - 1) | |
((adjusted_mode->crtc_vtotal - 1) << 16)); |
I915_WRITE(VBLANK(pipe), |
(adjusted_mode->crtc_vblank_start - 1) | |
((adjusted_mode->crtc_vblank_end - 1) << 16)); |
I915_WRITE(VSYNC(pipe), |
(adjusted_mode->crtc_vsync_start - 1) | |
((adjusted_mode->crtc_vsync_end - 1) << 16)); |
if (is_dp && !is_cpu_edp) { |
intel_dp_set_m_n(crtc, mode, adjusted_mode); |
} else { |
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { |
/* For non-DP output, clear any trans DP clock recovery |
* setting.*/ |
I915_WRITE(TRANSDATA_M1(pipe), 0); |
I915_WRITE(TRANSDATA_N1(pipe), 0); |
I915_WRITE(TRANSDPLINK_M1(pipe), 0); |
I915_WRITE(TRANSDPLINK_N1(pipe), 0); |
} |
} |
intel_crtc->lowfreq_avail = false; |
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { |
if (intel_crtc->pch_pll) { |
I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); |
/* Wait for the clocks to stabilize. */ |
POSTING_READ(intel_crtc->pch_pll->pll_reg); |
udelay(150); |
/* The pixel multiplier can only be updated once the |
* DPLL is enabled and the clocks are stable. |
* |
* So write it again. |
/* pipesrc controls the size that is scaled from, which should |
* always be the user's requested size. |
*/ |
I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); |
} |
I915_WRITE(PIPESRC(pipe), |
((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); |
if (intel_crtc->pch_pll) { |
if (is_lvds && has_reduced_clock && i915_powersave) { |
I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2); |
intel_crtc->lowfreq_avail = true; |
} else { |
I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp); |
} |
} |
} |
I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); |
I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); |
I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); |
I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); |
intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); |
if (!is_dp || is_cpu_edp) |
ironlake_set_m_n(crtc, mode, adjusted_mode); |
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) |
if (is_cpu_edp) |
ironlake_set_pll_edp(crtc, adjusted_mode->clock); |
haswell_set_pipeconf(crtc, adjusted_mode, dither); |
ironlake_set_pipeconf(crtc, adjusted_mode, dither); |
intel_wait_for_vblank(dev, pipe); |
/* Set up the display plane register */ |
I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE); |
POSTING_READ(DSPCNTR(plane)); |
5949,8 → 5119,6 |
{ |
struct drm_device *dev = crtc->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct drm_encoder_helper_funcs *encoder_funcs; |
struct intel_encoder *encoder; |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
int pipe = intel_crtc->pipe; |
int ret; |
5961,21 → 5129,9 |
x, y, fb); |
drm_vblank_post_modeset(dev, pipe); |
if (ret != 0) |
return ret; |
for_each_encoder_on_crtc(dev, crtc, encoder) { |
DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n", |
encoder->base.base.id, |
drm_get_encoder_name(&encoder->base), |
mode->base.id, mode->name); |
encoder_funcs = encoder->base.helper_private; |
encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode); |
} |
return 0; |
} |
static bool intel_eld_uptodate(struct drm_connector *connector, |
int reg_eldv, uint32_t bits_eldv, |
int reg_elda, uint32_t bits_elda, |
6611,7 → 5767,7 |
int depth, int bpp) |
{ |
struct drm_i915_gem_object *obj; |
struct drm_mode_fb_cmd2 mode_cmd = { 0 }; |
struct drm_mode_fb_cmd2 mode_cmd; |
// obj = i915_gem_alloc_object(dev, |
// intel_framebuffer_size_for_mode(mode, bpp)); |
6741,7 → 5897,7 |
DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); |
if (IS_ERR(fb)) { |
DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); |
return false; |
goto fail; |
} |
if (!intel_set_mode(crtc, mode, 0, 0, fb)) { |
6748,12 → 5904,17 |
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); |
if (old->release_fb) |
old->release_fb->funcs->destroy(old->release_fb); |
return false; |
goto fail; |
} |
/* let the connector get through one full cycle before testing */ |
intel_wait_for_vblank(dev, intel_crtc->pipe); |
return true; |
fail: |
connector->encoder = NULL; |
encoder->crtc = NULL; |
return false; |
} |
void intel_release_load_detect_pipe(struct drm_connector *connector, |
6878,12 → 6039,12 |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; |
int pipe = intel_crtc->pipe; |
struct drm_display_mode *mode; |
int htot = I915_READ(HTOTAL(cpu_transcoder)); |
int hsync = I915_READ(HSYNC(cpu_transcoder)); |
int vtot = I915_READ(VTOTAL(cpu_transcoder)); |
int vsync = I915_READ(VSYNC(cpu_transcoder)); |
int htot = I915_READ(HTOTAL(pipe)); |
int hsync = I915_READ(HSYNC(pipe)); |
int vtot = I915_READ(VTOTAL(pipe)); |
int vsync = I915_READ(VSYNC(pipe)); |
mode = kzalloc(sizeof(*mode), GFP_KERNEL); |
if (!mode) |
7041,19 → 6202,14 |
{ |
struct intel_unpin_work *work = |
container_of(__work, struct intel_unpin_work, work); |
struct drm_device *dev = work->crtc->dev; |
mutex_lock(&dev->struct_mutex); |
mutex_lock(&work->dev->struct_mutex); |
intel_unpin_fb_obj(work->old_fb_obj); |
drm_gem_object_unreference(&work->pending_flip_obj->base); |
drm_gem_object_unreference(&work->old_fb_obj->base); |
intel_update_fbc(dev); |
mutex_unlock(&dev->struct_mutex); |
BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0); |
atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count); |
intel_update_fbc(work->dev); |
mutex_unlock(&work->dev->struct_mutex); |
kfree(work); |
} |
7064,6 → 6220,8 |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
struct intel_unpin_work *work; |
struct drm_i915_gem_object *obj; |
struct drm_pending_vblank_event *e; |
struct timeval tvbl; |
unsigned long flags; |
/* Ignore early vblank irqs */ |
7072,23 → 6230,25 |
spin_lock_irqsave(&dev->event_lock, flags); |
work = intel_crtc->unpin_work; |
/* Ensure we don't miss a work->pending update ... */ |
smp_rmb(); |
if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { |
if (work == NULL || !work->pending) { |
spin_unlock_irqrestore(&dev->event_lock, flags); |
return; |
} |
/* and that the unpin work is consistent wrt ->pending. */ |
smp_rmb(); |
intel_crtc->unpin_work = NULL; |
if (work->event) |
drm_send_vblank_event(dev, intel_crtc->pipe, work->event); |
if (work->event) { |
e = work->event; |
e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl); |
e->event.tv_sec = tvbl.tv_sec; |
e->event.tv_usec = tvbl.tv_usec; |
list_add_tail(&e->base.link, |
&e->base.file_priv->event_list); |
wake_up_interruptible(&e->base.file_priv->event_wait); |
} |
drm_vblank_put(dev, intel_crtc->pipe); |
spin_unlock_irqrestore(&dev->event_lock, flags); |
7097,10 → 6257,10 |
atomic_clear_mask(1 << intel_crtc->plane, |
&obj->pending_flip.counter); |
wake_up(&dev_priv->pending_flip_queue); |
schedule_work(&work->work); |
queue_work(dev_priv->wq, &work->work); |
trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); |
} |
7127,25 → 6287,16 |
to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); |
unsigned long flags; |
/* NB: An MMIO update of the plane base pointer will also |
* generate a page-flip completion irq, i.e. every modeset |
* is also accompanied by a spurious intel_prepare_page_flip(). |
*/ |
spin_lock_irqsave(&dev->event_lock, flags); |
if (intel_crtc->unpin_work) |
atomic_inc_not_zero(&intel_crtc->unpin_work->pending); |
if (intel_crtc->unpin_work) { |
if ((++intel_crtc->unpin_work->pending) > 1) |
DRM_ERROR("Prepared flip multiple times\n"); |
} else { |
DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n"); |
} |
spin_unlock_irqrestore(&dev->event_lock, flags); |
} |
inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc) |
{ |
/* Ensure that the work item is consistent when activating it ... */ |
smp_wmb(); |
atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING); |
/* and that it is marked active as soon as the irq could fire. */ |
smp_wmb(); |
} |
static int intel_gen2_queue_flip(struct drm_device *dev, |
struct drm_crtc *crtc, |
struct drm_framebuffer *fb, |
7179,8 → 6330,6 |
intel_ring_emit(ring, fb->pitches[0]); |
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); |
intel_ring_emit(ring, 0); /* aux display base address, unused */ |
intel_mark_page_flip_active(intel_crtc); |
intel_ring_advance(ring); |
return 0; |
7221,7 → 6370,6 |
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); |
intel_ring_emit(ring, MI_NOOP); |
intel_mark_page_flip_active(intel_crtc); |
intel_ring_advance(ring); |
return 0; |
7268,8 → 6416,6 |
pf = 0; |
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; |
intel_ring_emit(ring, pf | pipesrc); |
intel_mark_page_flip_active(intel_crtc); |
intel_ring_advance(ring); |
return 0; |
7312,8 → 6458,6 |
pf = 0; |
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; |
intel_ring_emit(ring, pf | pipesrc); |
intel_mark_page_flip_active(intel_crtc); |
intel_ring_advance(ring); |
return 0; |
7368,8 → 6512,6 |
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); |
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); |
intel_ring_emit(ring, (MI_NOOP)); |
intel_mark_page_flip_active(intel_crtc); |
intel_ring_advance(ring); |
return 0; |
7418,7 → 6560,7 |
return -ENOMEM; |
work->event = event; |
work->crtc = crtc; |
work->dev = crtc->dev; |
intel_fb = to_intel_framebuffer(crtc->fb); |
work->old_fb_obj = intel_fb->obj; |
INIT_WORK(&work->work, intel_unpin_work_fn); |
7443,9 → 6585,6 |
intel_fb = to_intel_framebuffer(fb); |
obj = intel_fb->obj; |
if (atomic_read(&intel_crtc->unpin_work_count) >= 2) |
flush_workqueue(dev_priv->wq); |
ret = i915_mutex_lock_interruptible(dev); |
if (ret) |
goto cleanup; |
7464,7 → 6603,6 |
* the flip occurs and the object is no longer visible. |
*/ |
atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); |
atomic_inc(&intel_crtc->unpin_work_count); |
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); |
if (ret) |
7479,7 → 6617,6 |
return 0; |
cleanup_pending: |
atomic_dec(&intel_crtc->unpin_work_count); |
atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); |
drm_gem_object_unreference(&work->old_fb_obj->base); |
drm_gem_object_unreference(&obj->base); |
7777,7 → 6914,7 |
dev->mode_config.dpms_property; |
connector->dpms = DRM_MODE_DPMS_ON; |
drm_object_property_set_value(&connector->base, |
drm_connector_property_set_value(connector, |
dpms_property, |
DRM_MODE_DPMS_ON); |
7899,6 → 7036,8 |
struct drm_device *dev = crtc->dev; |
drm_i915_private_t *dev_priv = dev->dev_private; |
struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode; |
struct drm_encoder_helper_funcs *encoder_funcs; |
struct drm_encoder *encoder; |
struct intel_crtc *intel_crtc; |
unsigned disable_pipes, prepare_pipes, modeset_pipes; |
bool ret = true; |
7943,9 → 7082,6 |
* update the the output configuration. */ |
intel_modeset_update_state(dev, prepare_pipes); |
if (dev_priv->display.modeset_global_resources) |
dev_priv->display.modeset_global_resources(dev); |
/* Set up the DPLL and any encoders state that needs to adjust or depend |
* on the DPLL. |
*/ |
7955,7 → 7091,19 |
x, y, fb); |
if (!ret) |
goto done; |
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
if (encoder->crtc != &intel_crtc->base) |
continue; |
DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n", |
encoder->base.id, drm_get_encoder_name(encoder), |
mode->base.id, mode->name); |
encoder_funcs = encoder->helper_private; |
encoder_funcs->mode_set(encoder, mode, adjusted_mode); |
} |
} |
/* Now enable the clocks, plane, pipe, and connectors that we set up. */ |
for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) |
8132,6 → 7280,10 |
DRM_DEBUG_KMS("encoder changed, full mode switch\n"); |
config->mode_changed = true; |
} |
/* Disable all disconnected encoders. */ |
if (connector->base.status == connector_status_disconnected) |
connector->new_encoder = NULL; |
} |
/* connector->new_encoder is now updated for all connectors. */ |
8289,12 → 7441,6 |
// .page_flip = intel_crtc_page_flip, |
}; |
static void intel_cpu_pll_init(struct drm_device *dev) |
{ |
if (IS_HASWELL(dev)) |
intel_ddi_pll_init(dev); |
} |
static void intel_pch_pll_init(struct drm_device *dev) |
{ |
drm_i915_private_t *dev_priv = dev->dev_private; |
8334,7 → 7480,6 |
/* Swap pipes & planes for FBC on pre-965 */ |
intel_crtc->pipe = pipe; |
intel_crtc->plane = pipe; |
intel_crtc->cpu_transcoder = pipe; |
if (IS_MOBILE(dev) && IS_GEN3(dev)) { |
DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); |
intel_crtc->plane = !pipe; |
8348,6 → 7493,11 |
intel_crtc->bpp = 24; /* default for pre-Ironlake */ |
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); |
DRM_DEBUG_KMS("CRTC %d mode %x FB %x enable %d\n", |
intel_crtc->base.base.id, intel_crtc->base.mode, |
intel_crtc->base.fb, intel_crtc->base.enabled); |
} |
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, |
8424,8 → 7574,16 |
I915_WRITE(PFIT_CONTROL, 0); |
} |
if (!(IS_HASWELL(dev) && |
(I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES))) |
if (HAS_PCH_SPLIT(dev)) { |
dpd_is_edp = intel_dpd_is_edp(dev); |
if (has_edp_a(dev)) |
intel_dp_init(dev, DP_A, PORT_A); |
if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) |
intel_dp_init(dev, PCH_DP_D, PORT_D); |
} |
intel_crt_init(dev); |
if (IS_HASWELL(dev)) { |
8449,11 → 7607,7 |
intel_ddi_init(dev, PORT_D); |
} else if (HAS_PCH_SPLIT(dev)) { |
int found; |
dpd_is_edp = intel_dpd_is_edp(dev); |
if (has_edp_a(dev)) |
intel_dp_init(dev, DP_A, PORT_A); |
if (I915_READ(HDMIB) & PORT_DETECTED) { |
/* PCH SDVOB multiplex with HDMIB */ |
found = intel_sdvo_init(dev, PCH_SDVOB, true); |
8472,15 → 7626,11 |
if (I915_READ(PCH_DP_C) & DP_DETECTED) |
intel_dp_init(dev, PCH_DP_C, PORT_C); |
if (I915_READ(PCH_DP_D) & DP_DETECTED) |
if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) |
intel_dp_init(dev, PCH_DP_D, PORT_D); |
} else if (IS_VALLEYVIEW(dev)) { |
int found; |
/* Check for built-in panel first. Shares lanes with HDMI on SDVOC */ |
if (I915_READ(DP_C) & DP_DETECTED) |
intel_dp_init(dev, DP_C, PORT_C); |
if (I915_READ(SDVOB) & PORT_DETECTED) { |
/* SDVOB multiplex with HDMIB */ |
found = intel_sdvo_init(dev, SDVOB, true); |
8493,6 → 7643,9 |
if (I915_READ(SDVOC) & PORT_DETECTED) |
intel_hdmi_init(dev, SDVOC, PORT_C); |
/* Shares lanes with HDMI on SDVOC */ |
if (I915_READ(DP_C) & DP_DETECTED) |
intel_dp_init(dev, DP_C, PORT_C); |
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { |
bool found = false; |
8546,9 → 7699,8 |
intel_encoder_clones(encoder); |
} |
intel_init_pch_refclk(dev); |
drm_helper_move_panel_connectors_to_head(dev); |
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) |
ironlake_init_pch_refclk(dev); |
} |
8565,74 → 7717,33 |
{ |
int ret; |
if (obj->tiling_mode == I915_TILING_Y) { |
DRM_DEBUG("hardware does not support tiling Y\n"); |
if (obj->tiling_mode == I915_TILING_Y) |
return -EINVAL; |
} |
if (mode_cmd->pitches[0] & 63) { |
DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n", |
mode_cmd->pitches[0]); |
if (mode_cmd->pitches[0] & 63) |
return -EINVAL; |
} |
/* FIXME <= Gen4 stride limits are bit unclear */ |
if (mode_cmd->pitches[0] > 32768) { |
DRM_DEBUG("pitch (%d) must be at less than 32768\n", |
mode_cmd->pitches[0]); |
return -EINVAL; |
} |
if (obj->tiling_mode != I915_TILING_NONE && |
mode_cmd->pitches[0] != obj->stride) { |
DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n", |
mode_cmd->pitches[0], obj->stride); |
return -EINVAL; |
} |
/* Reject formats not supported by any plane early. */ |
switch (mode_cmd->pixel_format) { |
case DRM_FORMAT_C8: |
case DRM_FORMAT_RGB332: |
case DRM_FORMAT_RGB565: |
case DRM_FORMAT_XRGB8888: |
case DRM_FORMAT_XBGR8888: |
case DRM_FORMAT_ARGB8888: |
break; |
case DRM_FORMAT_XRGB1555: |
case DRM_FORMAT_ARGB1555: |
if (INTEL_INFO(dev)->gen > 3) { |
DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format); |
return -EINVAL; |
} |
break; |
case DRM_FORMAT_XBGR8888: |
case DRM_FORMAT_ABGR8888: |
case DRM_FORMAT_XRGB2101010: |
case DRM_FORMAT_ARGB2101010: |
case DRM_FORMAT_XBGR2101010: |
case DRM_FORMAT_ABGR2101010: |
if (INTEL_INFO(dev)->gen < 4) { |
DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format); |
return -EINVAL; |
} |
/* RGB formats are common across chipsets */ |
break; |
case DRM_FORMAT_YUYV: |
case DRM_FORMAT_UYVY: |
case DRM_FORMAT_YVYU: |
case DRM_FORMAT_VYUY: |
if (INTEL_INFO(dev)->gen < 5) { |
DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format); |
return -EINVAL; |
} |
break; |
default: |
DRM_DEBUG("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format); |
DRM_DEBUG_KMS("unsupported pixel format %u\n", |
mode_cmd->pixel_format); |
return -EINVAL; |
} |
/* FIXME need to adjust LINOFF/TILEOFF accordingly. */ |
if (mode_cmd->offsets[0] != 0) |
return -EINVAL; |
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); |
if (ret) { |
DRM_ERROR("framebuffer init failed %d\n", ret); |
8656,13 → 7767,7 |
struct drm_i915_private *dev_priv = dev->dev_private; |
/* We always want a DPMS function */ |
if (IS_HASWELL(dev)) { |
dev_priv->display.crtc_mode_set = haswell_crtc_mode_set; |
dev_priv->display.crtc_enable = haswell_crtc_enable; |
dev_priv->display.crtc_disable = haswell_crtc_disable; |
dev_priv->display.off = haswell_crtc_off; |
dev_priv->display.update_plane = ironlake_update_plane; |
} else if (HAS_PCH_SPLIT(dev)) { |
if (HAS_PCH_SPLIT(dev)) { |
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; |
dev_priv->display.crtc_enable = ironlake_crtc_enable; |
dev_priv->display.crtc_disable = ironlake_crtc_disable; |
8713,8 → 7818,6 |
/* FIXME: detect B0+ stepping and use auto training */ |
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; |
dev_priv->display.write_eld = ironlake_write_eld; |
dev_priv->display.modeset_global_resources = |
ivb_modeset_global_resources; |
} else if (IS_HASWELL(dev)) { |
dev_priv->display.fdi_link_train = hsw_fdi_link_train; |
dev_priv->display.write_eld = haswell_write_eld; |
8928,7 → 8031,6 |
DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret); |
} |
intel_cpu_pll_init(dev); |
intel_pch_pll_init(dev); |
/* Just disable it once at startup */ |
8998,7 → 8100,7 |
u32 reg; |
/* Clear any frame start delays used for debugging left by the BIOS */ |
reg = PIPECONF(crtc->cpu_transcoder); |
reg = PIPECONF(crtc->pipe); |
I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); |
/* We need to sanitize the plane -> pipe mapping first because this will |
9117,8 → 8219,7 |
/* Scan out the current hw modeset state, sanitizes it and maps it into the drm |
* and i915 state tracking structures. */ |
void intel_modeset_setup_hw_state(struct drm_device *dev, |
bool force_restore) |
void intel_modeset_setup_hw_state(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
enum pipe pipe; |
9127,35 → 8228,10 |
struct intel_encoder *encoder; |
struct intel_connector *connector; |
if (IS_HASWELL(dev)) { |
tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); |
if (tmp & TRANS_DDI_FUNC_ENABLE) { |
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { |
case TRANS_DDI_EDP_INPUT_A_ON: |
case TRANS_DDI_EDP_INPUT_A_ONOFF: |
pipe = PIPE_A; |
break; |
case TRANS_DDI_EDP_INPUT_B_ONOFF: |
pipe = PIPE_B; |
break; |
case TRANS_DDI_EDP_INPUT_C_ONOFF: |
pipe = PIPE_C; |
break; |
} |
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); |
crtc->cpu_transcoder = TRANSCODER_EDP; |
DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n", |
pipe_name(pipe)); |
} |
} |
for_each_pipe(pipe) { |
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); |
tmp = I915_READ(PIPECONF(crtc->cpu_transcoder)); |
tmp = I915_READ(PIPECONF(pipe)); |
if (tmp & PIPECONF_ENABLE) |
crtc->active = true; |
else |
9168,9 → 8244,6 |
crtc->active ? "enabled" : "disabled"); |
} |
if (IS_HASWELL(dev)) |
intel_ddi_setup_hw_pll_state(dev); |
list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
base.head) { |
pipe = 0; |
9217,21 → 8290,9 |
intel_sanitize_crtc(crtc); |
} |
if (force_restore) { |
for_each_pipe(pipe) { |
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); |
intel_set_mode(&crtc->base, &crtc->base.mode, |
crtc->base.x, crtc->base.y, crtc->base.fb); |
} |
// i915_redisable_vga(dev); |
} else { |
intel_modeset_update_staged_output_state(dev); |
} |
intel_modeset_check_state(dev); |
drm_mode_config_reset(dev); |
} |
void intel_modeset_gem_init(struct drm_device *dev) |
9240,7 → 8301,7 |
// intel_setup_overlay(dev); |
intel_modeset_setup_hw_state(dev, false); |
intel_modeset_setup_hw_state(dev); |
} |
void intel_modeset_cleanup(struct drm_device *dev) |
9361,7 → 8422,6 |
{ |
drm_i915_private_t *dev_priv = dev->dev_private; |
struct intel_display_error_state *error; |
enum transcoder cpu_transcoder; |
int i; |
error = kmalloc(sizeof(*error), GFP_ATOMIC); |
9369,8 → 8429,6 |
return NULL; |
for_each_pipe(i) { |
cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i); |
error->cursor[i].control = I915_READ(CURCNTR(i)); |
error->cursor[i].position = I915_READ(CURPOS(i)); |
error->cursor[i].base = I915_READ(CURBASE(i)); |
9385,14 → 8443,14 |
error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); |
} |
error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder)); |
error->pipe[i].conf = I915_READ(PIPECONF(i)); |
error->pipe[i].source = I915_READ(PIPESRC(i)); |
error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); |
error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder)); |
error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder)); |
error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); |
error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder)); |
error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder)); |
error->pipe[i].htotal = I915_READ(HTOTAL(i)); |
error->pipe[i].hblank = I915_READ(HBLANK(i)); |
error->pipe[i].hsync = I915_READ(HSYNC(i)); |
error->pipe[i].vtotal = I915_READ(VTOTAL(i)); |
error->pipe[i].vblank = I915_READ(VBLANK(i)); |
error->pipe[i].vsync = I915_READ(VSYNC(i)); |
} |
return error; |
/drivers/video/drm/i915/intel_pm.c |
---|
63,14 → 63,6 |
* i915.i915_enable_fbc parameter |
*/ |
static bool intel_crtc_active(struct drm_crtc *crtc) |
{ |
/* Be paranoid as we can arrive here with only partial |
* state retrieved from the hardware during setup. |
*/ |
return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock; |
} |
static void i8xx_disable_fbc(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
436,8 → 428,9 |
* - going to an unsupported config (interlace, pixel multiply, etc.) |
*/ |
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { |
if (intel_crtc_active(tmp_crtc) && |
!to_intel_crtc(tmp_crtc)->primary_disabled) { |
if (tmp_crtc->enabled && |
!to_intel_crtc(tmp_crtc)->primary_disabled && |
tmp_crtc->fb) { |
if (crtc) { |
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); |
dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; |
1022,7 → 1015,7 |
struct drm_crtc *crtc, *enabled = NULL; |
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
if (intel_crtc_active(crtc)) { |
if (crtc->enabled && crtc->fb) { |
if (enabled) |
return NULL; |
enabled = crtc; |
1116,7 → 1109,9 |
int entries, tlb_miss; |
crtc = intel_get_crtc_for_plane(dev, plane); |
if (!intel_crtc_active(crtc)) { |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
if (crtc->fb == NULL || !crtc->enabled || !intel_crtc->active) { |
*cursor_wm = cursor->guard_size; |
*plane_wm = display->guard_size; |
return false; |
1245,7 → 1240,7 |
int entries; |
crtc = intel_get_crtc_for_plane(dev, plane); |
if (!intel_crtc_active(crtc)) |
if (crtc->fb == NULL || !crtc->enabled) |
return false; |
clock = crtc->mode.clock; /* VESA DOT Clock */ |
1316,7 → 1311,6 |
struct drm_i915_private *dev_priv = dev->dev_private; |
int planea_wm, planeb_wm, cursora_wm, cursorb_wm; |
int plane_sr, cursor_sr; |
int ignore_plane_sr, ignore_cursor_sr; |
unsigned int enabled = 0; |
vlv_update_drain_latency(dev); |
1333,23 → 1327,17 |
&planeb_wm, &cursorb_wm)) |
enabled |= 2; |
plane_sr = cursor_sr = 0; |
if (single_plane_enabled(enabled) && |
g4x_compute_srwm(dev, ffs(enabled) - 1, |
sr_latency_ns, |
&valleyview_wm_info, |
&valleyview_cursor_wm_info, |
&plane_sr, &ignore_cursor_sr) && |
g4x_compute_srwm(dev, ffs(enabled) - 1, |
2*sr_latency_ns, |
&valleyview_wm_info, |
&valleyview_cursor_wm_info, |
&ignore_plane_sr, &cursor_sr)) { |
&plane_sr, &cursor_sr)) |
I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN); |
} else { |
else |
I915_WRITE(FW_BLC_SELF_VLV, |
I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN); |
plane_sr = cursor_sr = 0; |
} |
DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", |
planea_wm, cursora_wm, |
1362,11 → 1350,10 |
(planeb_wm << DSPFW_PLANEB_SHIFT) | |
planea_wm); |
I915_WRITE(DSPFW2, |
(I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) | |
(I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | |
(cursora_wm << DSPFW_CURSORA_SHIFT)); |
I915_WRITE(DSPFW3, |
(I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) | |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT)); |
(I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT))); |
} |
static void g4x_update_wm(struct drm_device *dev) |
1389,18 → 1376,17 |
&planeb_wm, &cursorb_wm)) |
enabled |= 2; |
plane_sr = cursor_sr = 0; |
if (single_plane_enabled(enabled) && |
g4x_compute_srwm(dev, ffs(enabled) - 1, |
sr_latency_ns, |
&g4x_wm_info, |
&g4x_cursor_wm_info, |
&plane_sr, &cursor_sr)) { |
&plane_sr, &cursor_sr)) |
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); |
} else { |
else |
I915_WRITE(FW_BLC_SELF, |
I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN); |
plane_sr = cursor_sr = 0; |
} |
DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", |
planea_wm, cursora_wm, |
1413,11 → 1399,11 |
(planeb_wm << DSPFW_PLANEB_SHIFT) | |
planea_wm); |
I915_WRITE(DSPFW2, |
(I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) | |
(I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | |
(cursora_wm << DSPFW_CURSORA_SHIFT)); |
/* HPLL off in SR has some issues on G4x... disable it */ |
I915_WRITE(DSPFW3, |
(I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) | |
(I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) | |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT)); |
} |
1506,13 → 1492,10 |
fifo_size = dev_priv->display.get_fifo_size(dev, 0); |
crtc = intel_get_crtc_for_plane(dev, 0); |
if (intel_crtc_active(crtc)) { |
int cpp = crtc->fb->bits_per_pixel / 8; |
if (IS_GEN2(dev)) |
cpp = 4; |
if (crtc->enabled && crtc->fb) { |
planea_wm = intel_calculate_wm(crtc->mode.clock, |
wm_info, fifo_size, cpp, |
wm_info, fifo_size, |
crtc->fb->bits_per_pixel / 8, |
latency_ns); |
enabled = crtc; |
} else |
1520,13 → 1503,10 |
fifo_size = dev_priv->display.get_fifo_size(dev, 1); |
crtc = intel_get_crtc_for_plane(dev, 1); |
if (intel_crtc_active(crtc)) { |
int cpp = crtc->fb->bits_per_pixel / 8; |
if (IS_GEN2(dev)) |
cpp = 4; |
if (crtc->enabled && crtc->fb) { |
planeb_wm = intel_calculate_wm(crtc->mode.clock, |
wm_info, fifo_size, cpp, |
wm_info, fifo_size, |
crtc->fb->bits_per_pixel / 8, |
latency_ns); |
if (enabled == NULL) |
enabled = crtc; |
1616,7 → 1596,8 |
planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info, |
dev_priv->display.get_fifo_size(dev, 0), |
4, latency_ns); |
crtc->fb->bits_per_pixel / 8, |
latency_ns); |
fwater_lo = I915_READ(FW_BLC) & ~0xfff; |
fwater_lo |= (3<<8) | planea_wm; |
1849,113 → 1830,11 |
enabled |= 2; |
} |
/* |
* Calculate and update the self-refresh watermark only when one |
* display plane is used. |
* |
* SNB support 3 levels of watermark. |
* |
* WM1/WM2/WM2 watermarks have to be enabled in the ascending order, |
* and disabled in the descending order |
* |
*/ |
I915_WRITE(WM3_LP_ILK, 0); |
I915_WRITE(WM2_LP_ILK, 0); |
I915_WRITE(WM1_LP_ILK, 0); |
if (!single_plane_enabled(enabled) || |
dev_priv->sprite_scaling_enabled) |
return; |
enabled = ffs(enabled) - 1; |
/* WM1 */ |
if (!ironlake_compute_srwm(dev, 1, enabled, |
SNB_READ_WM1_LATENCY() * 500, |
&sandybridge_display_srwm_info, |
&sandybridge_cursor_srwm_info, |
&fbc_wm, &plane_wm, &cursor_wm)) |
return; |
I915_WRITE(WM1_LP_ILK, |
WM1_LP_SR_EN | |
(SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | |
(fbc_wm << WM1_LP_FBC_SHIFT) | |
(plane_wm << WM1_LP_SR_SHIFT) | |
cursor_wm); |
/* WM2 */ |
if (!ironlake_compute_srwm(dev, 2, enabled, |
SNB_READ_WM2_LATENCY() * 500, |
&sandybridge_display_srwm_info, |
&sandybridge_cursor_srwm_info, |
&fbc_wm, &plane_wm, &cursor_wm)) |
return; |
I915_WRITE(WM2_LP_ILK, |
WM2_LP_EN | |
(SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | |
(fbc_wm << WM1_LP_FBC_SHIFT) | |
(plane_wm << WM1_LP_SR_SHIFT) | |
cursor_wm); |
/* WM3 */ |
if (!ironlake_compute_srwm(dev, 3, enabled, |
SNB_READ_WM3_LATENCY() * 500, |
&sandybridge_display_srwm_info, |
&sandybridge_cursor_srwm_info, |
&fbc_wm, &plane_wm, &cursor_wm)) |
return; |
I915_WRITE(WM3_LP_ILK, |
WM3_LP_EN | |
(SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) | |
(fbc_wm << WM1_LP_FBC_SHIFT) | |
(plane_wm << WM1_LP_SR_SHIFT) | |
cursor_wm); |
} |
static void ivybridge_update_wm(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ |
u32 val; |
int fbc_wm, plane_wm, cursor_wm; |
int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm; |
unsigned int enabled; |
enabled = 0; |
if (g4x_compute_wm0(dev, 0, |
if ((dev_priv->num_pipe == 3) && |
g4x_compute_wm0(dev, 2, |
&sandybridge_display_wm_info, latency, |
&sandybridge_cursor_wm_info, latency, |
&plane_wm, &cursor_wm)) { |
val = I915_READ(WM0_PIPEA_ILK); |
val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); |
I915_WRITE(WM0_PIPEA_ILK, val | |
((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); |
DRM_DEBUG_KMS("FIFO watermarks For pipe A -" |
" plane %d, " "cursor: %d\n", |
plane_wm, cursor_wm); |
enabled |= 1; |
} |
if (g4x_compute_wm0(dev, 1, |
&sandybridge_display_wm_info, latency, |
&sandybridge_cursor_wm_info, latency, |
&plane_wm, &cursor_wm)) { |
val = I915_READ(WM0_PIPEB_ILK); |
val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); |
I915_WRITE(WM0_PIPEB_ILK, val | |
((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); |
DRM_DEBUG_KMS("FIFO watermarks For pipe B -" |
" plane %d, cursor: %d\n", |
plane_wm, cursor_wm); |
enabled |= 2; |
} |
if (g4x_compute_wm0(dev, 2, |
&sandybridge_display_wm_info, latency, |
&sandybridge_cursor_wm_info, latency, |
&plane_wm, &cursor_wm)) { |
val = I915_READ(WM0_PIPEC_IVB); |
val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); |
I915_WRITE(WM0_PIPEC_IVB, val | |
2015,17 → 1894,12 |
(plane_wm << WM1_LP_SR_SHIFT) | |
cursor_wm); |
/* WM3, note we have to correct the cursor latency */ |
/* WM3 */ |
if (!ironlake_compute_srwm(dev, 3, enabled, |
SNB_READ_WM3_LATENCY() * 500, |
&sandybridge_display_srwm_info, |
&sandybridge_cursor_srwm_info, |
&fbc_wm, &plane_wm, &ignore_cursor_wm) || |
!ironlake_compute_srwm(dev, 3, enabled, |
2 * SNB_READ_WM3_LATENCY() * 500, |
&sandybridge_display_srwm_info, |
&sandybridge_cursor_srwm_info, |
&ignore_fbc_wm, &ignore_plane_wm, &cursor_wm)) |
&fbc_wm, &plane_wm, &cursor_wm)) |
return; |
I915_WRITE(WM3_LP_ILK, |
2074,7 → 1948,7 |
int entries, tlb_miss; |
crtc = intel_get_crtc_for_plane(dev, plane); |
if (!intel_crtc_active(crtc)) { |
if (crtc->fb == NULL || !crtc->enabled) { |
*sprite_wm = display->guard_size; |
return false; |
} |
2474,7 → 2348,7 |
struct drm_i915_private *dev_priv = dev->dev_private; |
u32 limits = gen6_rps_limits(dev_priv, &val); |
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); |
WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
WARN_ON(val > dev_priv->rps.max_delay); |
WARN_ON(val < dev_priv->rps.min_delay); |
2549,12 → 2423,12 |
struct intel_ring_buffer *ring; |
u32 rp_state_cap; |
u32 gt_perf_status; |
u32 rc6vids, pcu_mbox, rc6_mask = 0; |
u32 pcu_mbox, rc6_mask = 0; |
u32 gtfifodbg; |
int rc6_mode; |
int i, ret; |
int i; |
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); |
WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
/* Here begins a magic sequence of register writes to enable |
* auto-downclocking. |
2648,17 → 2522,31 |
GEN6_RP_UP_BUSY_AVG | |
(IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT)); |
ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0); |
if (!ret) { |
pcu_mbox = 0; |
ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox); |
if (ret && pcu_mbox & (1<<31)) { /* OC supported */ |
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, |
500)) |
DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); |
I915_WRITE(GEN6_PCODE_DATA, 0); |
I915_WRITE(GEN6_PCODE_MAILBOX, |
GEN6_PCODE_READY | |
GEN6_PCODE_WRITE_MIN_FREQ_TABLE); |
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, |
500)) |
DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); |
/* Check for overclock support */ |
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, |
500)) |
DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); |
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS); |
pcu_mbox = I915_READ(GEN6_PCODE_DATA); |
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, |
500)) |
DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); |
if (pcu_mbox & (1<<31)) { /* OC supported */ |
dev_priv->rps.max_delay = pcu_mbox & 0xff; |
DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); |
} |
} else { |
DRM_DEBUG_DRIVER("Failed to set the min frequency\n"); |
} |
gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8); |
2671,20 → 2559,6 |
/* enable all PM interrupts */ |
I915_WRITE(GEN6_PMINTRMSK, 0); |
rc6vids = 0; |
ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); |
if (IS_GEN6(dev) && ret) { |
DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n"); |
} else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { |
DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", |
GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); |
rc6vids &= 0xffff00; |
rc6vids |= GEN6_ENCODE_RC6_VID(450); |
ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids); |
if (ret) |
DRM_ERROR("Couldn't fix incorrect rc6 voltage\n"); |
} |
gen6_gt_force_wake_put(dev_priv); |
} |
2693,11 → 2567,10 |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
int min_freq = 15; |
int gpu_freq; |
unsigned int ia_freq, max_ia_freq; |
int gpu_freq, ia_freq, max_ia_freq; |
int scaling_factor = 180; |
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); |
WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
max_ia_freq = cpufreq_quick_get_max(0); |
/* |
2728,13 → 2601,19 |
else |
ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); |
ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); |
ia_freq <<= GEN6_PCODE_FREQ_IA_RATIO_SHIFT; |
sandybridge_pcode_write(dev_priv, |
GEN6_PCODE_WRITE_MIN_FREQ_TABLE, |
ia_freq | gpu_freq); |
I915_WRITE(GEN6_PCODE_DATA, |
(ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) | |
gpu_freq); |
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | |
GEN6_PCODE_WRITE_MIN_FREQ_TABLE); |
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & |
GEN6_PCODE_READY) == 0, 10)) { |
DRM_ERROR("pcode write of freq table timed out\n"); |
continue; |
} |
} |
} |
#endif |
void ironlake_teardown_rc6(struct drm_device *dev) |
2741,16 → 2620,16 |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
if (dev_priv->ips.renderctx) { |
i915_gem_object_unpin(dev_priv->ips.renderctx); |
drm_gem_object_unreference(&dev_priv->ips.renderctx->base); |
dev_priv->ips.renderctx = NULL; |
if (dev_priv->renderctx) { |
i915_gem_object_unpin(dev_priv->renderctx); |
drm_gem_object_unreference(&dev_priv->renderctx->base); |
dev_priv->renderctx = NULL; |
} |
if (dev_priv->ips.pwrctx) { |
i915_gem_object_unpin(dev_priv->ips.pwrctx); |
drm_gem_object_unreference(&dev_priv->ips.pwrctx->base); |
dev_priv->ips.pwrctx = NULL; |
if (dev_priv->pwrctx) { |
i915_gem_object_unpin(dev_priv->pwrctx); |
drm_gem_object_unreference(&dev_priv->pwrctx->base); |
dev_priv->pwrctx = NULL; |
} |
} |
2776,14 → 2655,14 |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
if (dev_priv->ips.renderctx == NULL) |
dev_priv->ips.renderctx = intel_alloc_context_page(dev); |
if (!dev_priv->ips.renderctx) |
if (dev_priv->renderctx == NULL) |
dev_priv->renderctx = intel_alloc_context_page(dev); |
if (!dev_priv->renderctx) |
return -ENOMEM; |
if (dev_priv->ips.pwrctx == NULL) |
dev_priv->ips.pwrctx = intel_alloc_context_page(dev); |
if (!dev_priv->ips.pwrctx) { |
if (dev_priv->pwrctx == NULL) |
dev_priv->pwrctx = intel_alloc_context_page(dev); |
if (!dev_priv->pwrctx) { |
ironlake_teardown_rc6(dev); |
return -ENOMEM; |
} |
2795,7 → 2674,6 |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; |
bool was_interruptible; |
int ret; |
/* rc6 disabled by default due to repeated reports of hanging during |
2810,9 → 2688,6 |
if (ret) |
return; |
was_interruptible = dev_priv->mm.interruptible; |
dev_priv->mm.interruptible = false; |
/* |
* GPU can automatically power down the render unit if given a page |
* to save state. |
2820,13 → 2695,12 |
ret = intel_ring_begin(ring, 6); |
if (ret) { |
ironlake_teardown_rc6(dev); |
dev_priv->mm.interruptible = was_interruptible; |
return; |
} |
intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); |
intel_ring_emit(ring, MI_SET_CONTEXT); |
intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset | |
intel_ring_emit(ring, dev_priv->renderctx->gtt_offset | |
MI_MM_SPACE_GTT | |
MI_SAVE_EXT_STATE_EN | |
MI_RESTORE_EXT_STATE_EN | |
2841,8 → 2715,7 |
* does an implicit flush, combined with MI_FLUSH above, it should be |
* safe to assume that renderctx is valid |
*/ |
ret = intel_ring_idle(ring); |
dev_priv->mm.interruptible = was_interruptible; |
ret = intel_wait_ring_idle(ring); |
if (ret) { |
DRM_ERROR("failed to enable ironlake power power savings\n"); |
ironlake_teardown_rc6(dev); |
2849,7 → 2722,7 |
return; |
} |
I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN); |
I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN); |
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); |
} |
3458,8 → 3331,6 |
void intel_disable_gt_powersave(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
if (IS_IRONLAKE_M(dev)) { |
ironlake_disable_drps(dev); |
ironlake_disable_rc6(dev); |
3470,44 → 3341,27 |
void intel_enable_gt_powersave(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
if (IS_IRONLAKE_M(dev)) { |
ironlake_enable_drps(dev); |
ironlake_enable_rc6(dev); |
intel_init_emon(dev); |
} else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) { |
/* |
* PCU communication is slow and this doesn't need to be |
* done at any specific time, so do this out of our fast path |
* to make resume and init faster. |
*/ |
// schedule_delayed_work(&dev_priv->rps.delayed_resume_work, |
// round_jiffies_up_relative(HZ)); |
// gen6_enable_rps(dev); |
// gen6_update_ring_freq(dev); |
} |
} |
static void ibx_init_clock_gating(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
/* |
* On Ibex Peak and Cougar Point, we need to disable clock |
* gating for the panel power sequencer or it will fail to |
* start up when no ports are active. |
*/ |
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); |
} |
static void ironlake_init_clock_gating(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; |
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; |
/* Required for FBC */ |
dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE | |
ILK_DPFCUNIT_CLOCK_GATE_DISABLE | |
ILK_DPFDUNIT_CLOCK_GATE_ENABLE; |
dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE | |
DPFCRUNIT_CLOCK_GATE_DISABLE | |
DPFDUNIT_CLOCK_GATE_DISABLE; |
/* Required for CxSR */ |
dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; |
I915_WRITE(PCH_3DCGDIS0, |
MARIUNIT_CLOCK_GATE_DISABLE | |
3515,6 → 3369,8 |
I915_WRITE(PCH_3DCGDIS1, |
VFMUNIT_CLOCK_GATE_DISABLE); |
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); |
/* |
* According to the spec the following bits should be set in |
* order to enable memory self-refresh |
3525,7 → 3381,9 |
I915_WRITE(ILK_DISPLAY_CHICKEN2, |
(I915_READ(ILK_DISPLAY_CHICKEN2) | |
ILK_DPARB_GATE | ILK_VSDPFD_FULL)); |
dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE; |
I915_WRITE(ILK_DSPCLK_GATE, |
(I915_READ(ILK_DSPCLK_GATE) | |
ILK_DPARB_CLK_GATE)); |
I915_WRITE(DISP_ARB_CTL, |
(I915_READ(DISP_ARB_CTL) | |
DISP_FBC_WM_DIS)); |
3547,10 → 3405,13 |
I915_WRITE(ILK_DISPLAY_CHICKEN2, |
I915_READ(ILK_DISPLAY_CHICKEN2) | |
ILK_DPARB_GATE); |
I915_WRITE(ILK_DSPCLK_GATE, |
I915_READ(ILK_DSPCLK_GATE) | |
ILK_DPFC_DIS1 | |
ILK_DPFC_DIS2 | |
ILK_CLK_FBC); |
} |
I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); |
I915_WRITE(ILK_DISPLAY_CHICKEN2, |
I915_READ(ILK_DISPLAY_CHICKEN2) | |
ILK_ELPIN_409_SELECT); |
3557,60 → 3418,20 |
I915_WRITE(_3D_CHICKEN2, |
_3D_CHICKEN2_WM_READ_PIPELINED << 16 | |
_3D_CHICKEN2_WM_READ_PIPELINED); |
/* WaDisableRenderCachePipelinedFlush */ |
I915_WRITE(CACHE_MODE_0, |
_MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); |
ibx_init_clock_gating(dev); |
} |
static void cpt_init_clock_gating(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
int pipe; |
/* |
* On Ibex Peak and Cougar Point, we need to disable clock |
* gating for the panel power sequencer or it will fail to |
* start up when no ports are active. |
*/ |
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); |
I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | |
DPLS_EDP_PPS_FIX_DIS); |
/* The below fixes the weird display corruption, a few pixels shifted |
* downward, on (only) LVDS of some HP laptops with IVY. |
*/ |
for_each_pipe(pipe) |
I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_CHICKEN2_TIMING_OVERRIDE); |
/* WADP0ClockGatingDisable */ |
for_each_pipe(pipe) { |
I915_WRITE(TRANS_CHICKEN1(pipe), |
TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); |
} |
} |
static void gen6_init_clock_gating(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
int pipe; |
uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; |
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; |
I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); |
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); |
I915_WRITE(ILK_DISPLAY_CHICKEN2, |
I915_READ(ILK_DISPLAY_CHICKEN2) | |
ILK_ELPIN_409_SELECT); |
/* WaDisableHiZPlanesWhenMSAAEnabled */ |
I915_WRITE(_3D_CHICKEN, |
_MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB)); |
/* WaSetupGtModeTdRowDispatch */ |
if (IS_SNB_GT1(dev)) |
I915_WRITE(GEN6_GT_MODE, |
_MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE)); |
I915_WRITE(WM3_LP_ILK, 0); |
I915_WRITE(WM2_LP_ILK, 0); |
I915_WRITE(WM1_LP_ILK, 0); |
3660,12 → 3481,11 |
I915_WRITE(ILK_DISPLAY_CHICKEN2, |
I915_READ(ILK_DISPLAY_CHICKEN2) | |
ILK_DPARB_GATE | ILK_VSDPFD_FULL); |
I915_WRITE(ILK_DSPCLK_GATE_D, |
I915_READ(ILK_DSPCLK_GATE_D) | |
ILK_DPARBUNIT_CLOCK_GATE_ENABLE | |
ILK_DPFDUNIT_CLOCK_GATE_ENABLE); |
I915_WRITE(ILK_DSPCLK_GATE, |
I915_READ(ILK_DSPCLK_GATE) | |
ILK_DPARB_CLK_GATE | |
ILK_DPFD_CLK_GATE); |
/* WaMbcDriverBootEnable */ |
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | |
GEN6_MBCTL_ENABLE_BOOT_FETCH); |
3680,8 → 3500,6 |
* platforms I checked have a 0 for this. (Maybe BIOS overrides?) */ |
I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff)); |
I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI)); |
cpt_init_clock_gating(dev); |
} |
static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) |
3696,25 → 3514,14 |
I915_WRITE(GEN7_FF_THREAD_MODE, reg); |
} |
static void lpt_init_clock_gating(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
/* |
* TODO: this bit should only be enabled when really needed, then |
* disabled when not needed anymore in order to save power. |
*/ |
if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) |
I915_WRITE(SOUTH_DSPCLK_GATE_D, |
I915_READ(SOUTH_DSPCLK_GATE_D) | |
PCH_LP_PARTITION_LEVEL_DISABLE); |
} |
static void haswell_init_clock_gating(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
int pipe; |
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; |
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); |
I915_WRITE(WM3_LP_ILK, 0); |
I915_WRITE(WM2_LP_ILK, 0); |
I915_WRITE(WM1_LP_ILK, 0); |
3724,6 → 3531,12 |
*/ |
I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE); |
I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); |
I915_WRITE(IVB_CHICKEN3, |
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | |
CHICKEN3_DGMG_DONE_FIX_DISABLE); |
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ |
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, |
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); |
3752,10 → 3565,6 |
I915_WRITE(CACHE_MODE_1, |
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); |
/* WaMbcDriverBootEnable */ |
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | |
GEN6_MBCTL_ENABLE_BOOT_FETCH); |
/* XXX: This is a workaround for early silicon revisions and should be |
* removed later. |
*/ |
3765,7 → 3574,6 |
WM_DBG_DISALLOW_SPRITE | |
WM_DBG_DISALLOW_MAXFIFO); |
lpt_init_clock_gating(dev); |
} |
static void ivybridge_init_clock_gating(struct drm_device *dev) |
3772,31 → 3580,21 |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
int pipe; |
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; |
uint32_t snpcr; |
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); |
I915_WRITE(WM3_LP_ILK, 0); |
I915_WRITE(WM2_LP_ILK, 0); |
I915_WRITE(WM1_LP_ILK, 0); |
I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); |
I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); |
/* WaDisableEarlyCull */ |
I915_WRITE(_3D_CHICKEN3, |
_MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); |
/* WaDisableBackToBackFlipFix */ |
I915_WRITE(IVB_CHICKEN3, |
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | |
CHICKEN3_DGMG_DONE_FIX_DISABLE); |
/* WaDisablePSDDualDispatchEnable */ |
if (IS_IVB_GT1(dev)) |
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, |
_MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); |
else |
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2, |
_MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); |
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ |
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, |
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); |
3806,18 → 3604,7 |
GEN7_WA_FOR_GEN7_L3_CONTROL); |
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, |
GEN7_WA_L3_CHICKEN_MODE); |
if (IS_IVB_GT1(dev)) |
I915_WRITE(GEN7_ROW_CHICKEN2, |
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); |
else |
I915_WRITE(GEN7_ROW_CHICKEN2_GT2, |
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); |
/* WaForceL3Serialization */ |
I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & |
~L3SQ_URB_READ_CAM_MATCH_DISABLE); |
/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock |
* gating disable must be set. Failure to set it results in |
* flickering pixels due to Z write ordering failures after |
3847,7 → 3634,6 |
intel_flush_display_plane(dev_priv, pipe); |
} |
/* WaMbcDriverBootEnable */ |
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | |
GEN6_MBCTL_ENABLE_BOOT_FETCH); |
3861,8 → 3647,6 |
snpcr &= ~GEN6_MBC_SNPCR_MASK; |
snpcr |= GEN6_MBC_SNPCR_MED; |
I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); |
cpt_init_clock_gating(dev); |
} |
static void valleyview_init_clock_gating(struct drm_device *dev) |
3869,51 → 3653,33 |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
int pipe; |
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; |
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); |
I915_WRITE(WM3_LP_ILK, 0); |
I915_WRITE(WM2_LP_ILK, 0); |
I915_WRITE(WM1_LP_ILK, 0); |
I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); |
I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); |
/* WaDisableEarlyCull */ |
I915_WRITE(_3D_CHICKEN3, |
_MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); |
/* WaDisableBackToBackFlipFix */ |
I915_WRITE(IVB_CHICKEN3, |
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | |
CHICKEN3_DGMG_DONE_FIX_DISABLE); |
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, |
_MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); |
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ |
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, |
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); |
/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */ |
I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS); |
I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL); |
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE); |
/* WaForceL3Serialization */ |
I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & |
~L3SQ_URB_READ_CAM_MATCH_DISABLE); |
/* WaDisableDopClockGating */ |
I915_WRITE(GEN7_ROW_CHICKEN2, |
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); |
/* WaForceL3Serialization */ |
I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & |
~L3SQ_URB_READ_CAM_MATCH_DISABLE); |
/* This is required by WaCatErrorRejectionIssue */ |
I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, |
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); |
/* WaMbcDriverBootEnable */ |
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | |
GEN6_MBCTL_ENABLE_BOOT_FETCH); |
3965,13 → 3731,6 |
PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN | |
SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN | |
PLANEA_FLIPDONE_INT_EN); |
/* |
* WaDisableVLVClockGating_VBIIssue |
* Disable clock gating on th GCFG unit to prevent a delay |
* in the reporting of vblank events. |
*/ |
I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS); |
} |
static void g4x_init_clock_gating(struct drm_device *dev) |
3990,10 → 3749,6 |
if (IS_GM45(dev)) |
dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; |
I915_WRITE(DSPCLK_GATE_D, dspclk_gate); |
/* WaDisableRenderCachePipelinedFlush */ |
I915_WRITE(CACHE_MODE_0, |
_MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); |
} |
static void crestline_init_clock_gating(struct drm_device *dev) |
4049,11 → 3804,44 |
I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); |
} |
static void ibx_init_clock_gating(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
/* |
* On Ibex Peak and Cougar Point, we need to disable clock |
* gating for the panel power sequencer or it will fail to |
* start up when no ports are active. |
*/ |
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); |
} |
static void cpt_init_clock_gating(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
int pipe; |
/* |
* On Ibex Peak and Cougar Point, we need to disable clock |
* gating for the panel power sequencer or it will fail to |
* start up when no ports are active. |
*/ |
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); |
I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | |
DPLS_EDP_PPS_FIX_DIS); |
/* Without this, mode sets may fail silently on FDI */ |
for_each_pipe(pipe) |
I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS); |
} |
void intel_init_clock_gating(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
dev_priv->display.init_clock_gating(dev); |
if (dev_priv->display.init_pch_clock_gating) |
dev_priv->display.init_pch_clock_gating(dev); |
} |
/* Starting with Haswell, we have different power wells for |
4079,7 → 3867,7 |
if ((well & HSW_PWR_WELL_STATE) == 0) { |
I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE); |
if (wait_for((I915_READ(power_wells[i]) & HSW_PWR_WELL_STATE), 20)) |
if (wait_for(I915_READ(power_wells[i] & HSW_PWR_WELL_STATE), 20)) |
DRM_ERROR("Error enabling power well %lx\n", power_wells[i]); |
} |
} |
4117,6 → 3905,11 |
/* For FIFO watermark updates */ |
if (HAS_PCH_SPLIT(dev)) { |
if (HAS_PCH_IBX(dev)) |
dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating; |
else if (HAS_PCH_CPT(dev)) |
dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating; |
if (IS_GEN5(dev)) { |
if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) |
dev_priv->display.update_wm = ironlake_update_wm; |
4139,7 → 3932,7 |
} else if (IS_IVYBRIDGE(dev)) { |
/* FIXME: detect B0+ stepping and use auto training */ |
if (SNB_READ_WM0_LATENCY()) { |
dev_priv->display.update_wm = ivybridge_update_wm; |
dev_priv->display.update_wm = sandybridge_update_wm; |
dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; |
} else { |
DRM_DEBUG_KMS("Failed to read display plane latency. " |
4227,12 → 4020,6 |
DRM_ERROR("GT thread status wait timed out\n"); |
} |
static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv) |
{ |
I915_WRITE_NOTRACE(FORCEWAKE, 0); |
POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ |
} |
static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) |
{ |
u32 forcewake_ack; |
4246,7 → 4033,7 |
FORCEWAKE_ACK_TIMEOUT_MS)) |
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); |
I915_WRITE_NOTRACE(FORCEWAKE, FORCEWAKE_KERNEL); |
I915_WRITE_NOTRACE(FORCEWAKE, 1); |
POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ |
if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), |
4256,13 → 4043,6 |
__gen6_gt_wait_for_thread_c0(dev_priv); |
} |
static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) |
{ |
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); |
/* something from same cacheline, but !FORCEWAKE_MT */ |
POSTING_READ(ECOBUS); |
} |
static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) |
{ |
u32 forcewake_ack; |
4276,9 → 4056,8 |
FORCEWAKE_ACK_TIMEOUT_MS)) |
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); |
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); |
/* something from same cacheline, but !FORCEWAKE_MT */ |
POSTING_READ(ECOBUS); |
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1)); |
POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ |
if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), |
FORCEWAKE_ACK_TIMEOUT_MS)) |
4315,16 → 4094,14 |
static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) |
{ |
I915_WRITE_NOTRACE(FORCEWAKE, 0); |
/* something from same cacheline, but !FORCEWAKE */ |
POSTING_READ(ECOBUS); |
/* gen6_gt_check_fifodbg doubles as the POSTING_READ */ |
gen6_gt_check_fifodbg(dev_priv); |
} |
static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) |
{ |
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); |
/* something from same cacheline, but !FORCEWAKE_MT */ |
POSTING_READ(ECOBUS); |
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1)); |
/* gen6_gt_check_fifodbg doubles as the POSTING_READ */ |
gen6_gt_check_fifodbg(dev_priv); |
} |
4361,13 → 4138,6 |
return ret; |
} |
static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) |
{ |
I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff)); |
/* something from same cacheline, but !FORCEWAKE_VLV */ |
POSTING_READ(FORCEWAKE_ACK_VLV); |
} |
static void vlv_force_wake_get(struct drm_i915_private *dev_priv) |
{ |
if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0, |
4374,7 → 4144,7 |
FORCEWAKE_ACK_TIMEOUT_MS)) |
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); |
I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); |
I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(1)); |
if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), |
FORCEWAKE_ACK_TIMEOUT_MS)) |
4385,25 → 4155,11 |
static void vlv_force_wake_put(struct drm_i915_private *dev_priv) |
{ |
I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); |
/* something from same cacheline, but !FORCEWAKE_VLV */ |
POSTING_READ(FORCEWAKE_ACK_VLV); |
I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(1)); |
/* The below doubles as a POSTING_READ */ |
gen6_gt_check_fifodbg(dev_priv); |
} |
void intel_gt_reset(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
if (IS_VALLEYVIEW(dev)) { |
vlv_force_wake_reset(dev_priv); |
} else if (INTEL_INFO(dev)->gen >= 6) { |
__gen6_gt_force_wake_reset(dev_priv); |
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) |
__gen6_gt_force_wake_mt_reset(dev_priv); |
} |
} |
void intel_gt_init(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
4410,63 → 4166,38 |
spin_lock_init(&dev_priv->gt_lock); |
intel_gt_reset(dev); |
if (IS_VALLEYVIEW(dev)) { |
dev_priv->gt.force_wake_get = vlv_force_wake_get; |
dev_priv->gt.force_wake_put = vlv_force_wake_put; |
} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { |
dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get; |
dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put; |
} else if (IS_GEN6(dev)) { |
} else if (INTEL_INFO(dev)->gen >= 6) { |
dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; |
dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; |
} |
} |
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val) |
{ |
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); |
/* IVB configs may use multi-threaded forcewake */ |
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { |
u32 ecobus; |
if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { |
DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n"); |
return -EAGAIN; |
} |
/* A small trick here - if the bios hasn't configured |
* MT forcewake, and if the device is in RC6, then |
* force_wake_mt_get will not wake the device and the |
* ECOBUS read will return zero. Which will be |
* (correctly) interpreted by the test below as MT |
* forcewake being disabled. |
*/ |
mutex_lock(&dev->struct_mutex); |
__gen6_gt_force_wake_mt_get(dev_priv); |
ecobus = I915_READ_NOTRACE(ECOBUS); |
__gen6_gt_force_wake_mt_put(dev_priv); |
mutex_unlock(&dev->struct_mutex); |
I915_WRITE(GEN6_PCODE_DATA, *val); |
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); |
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, |
500)) { |
DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox); |
return -ETIMEDOUT; |
if (ecobus & FORCEWAKE_MT_ENABLE) { |
DRM_DEBUG_KMS("Using MT version of forcewake\n"); |
dev_priv->gt.force_wake_get = |
__gen6_gt_force_wake_mt_get; |
dev_priv->gt.force_wake_put = |
__gen6_gt_force_wake_mt_put; |
} |
*val = I915_READ(GEN6_PCODE_DATA); |
I915_WRITE(GEN6_PCODE_DATA, 0); |
return 0; |
} |
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val) |
{ |
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); |
if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { |
DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n"); |
return -EAGAIN; |
} |
I915_WRITE(GEN6_PCODE_DATA, val); |
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); |
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, |
500)) { |
DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox); |
return -ETIMEDOUT; |
} |
I915_WRITE(GEN6_PCODE_DATA, 0); |
return 0; |
} |
/drivers/video/drm/i915/intel_sdvo.c |
---|
27,7 → 27,7 |
*/ |
#include <linux/i2c.h> |
#include <linux/slab.h> |
#include <linux/delay.h> |
//#include <linux/delay.h> |
#include <linux/export.h> |
#include <drm/drmP.h> |
#include <drm/drm_crtc.h> |
518,7 → 518,7 |
static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, |
void *response, int response_len) |
{ |
u8 retry = 15; /* 5 quick checks, followed by 10 long checks */ |
u8 retry = 5; |
u8 status; |
int i; |
531,15 → 531,6 |
* command to be complete. |
* |
* Check 5 times in case the hardware failed to read the docs. |
* |
* Also beware that the first response by many devices is to |
* reply PENDING and stall for time. TVs are notorious for |
* requiring longer than specified to complete their replies. |
* Originally (in the DDX long ago), the delay was only ever 15ms |
* with an additional delay of 30ms applied for TVs added later after |
* many experiments. To accommodate both sets of delays, we do a |
* sequence of slow checks if the device is falling behind and fails |
* to reply within 5*15µs. |
*/ |
if (!intel_sdvo_read_byte(intel_sdvo, |
SDVO_I2C_CMD_STATUS, |
546,12 → 537,8 |
&status)) |
goto log_fail; |
while (status == SDVO_CMD_STATUS_PENDING && --retry) { |
if (retry < 10) |
msleep(15); |
else |
while (status == SDVO_CMD_STATUS_PENDING && retry--) { |
udelay(15); |
if (!intel_sdvo_read_byte(intel_sdvo, |
SDVO_I2C_CMD_STATUS, |
&status)) |
1250,30 → 1237,6 |
temp = I915_READ(intel_sdvo->sdvo_reg); |
if ((temp & SDVO_ENABLE) != 0) { |
/* HW workaround for IBX, we need to move the port to |
* transcoder A before disabling it. */ |
if (HAS_PCH_IBX(encoder->base.dev)) { |
struct drm_crtc *crtc = encoder->base.crtc; |
int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1; |
if (temp & SDVO_PIPE_B_SELECT) { |
temp &= ~SDVO_PIPE_B_SELECT; |
I915_WRITE(intel_sdvo->sdvo_reg, temp); |
POSTING_READ(intel_sdvo->sdvo_reg); |
/* Again we need to write this twice. */ |
I915_WRITE(intel_sdvo->sdvo_reg, temp); |
POSTING_READ(intel_sdvo->sdvo_reg); |
/* Transcoder selection bits only update |
* effectively on vblank. */ |
if (crtc) |
intel_wait_for_vblank(encoder->base.dev, pipe); |
else |
msleep(50); |
} |
} |
intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE); |
} |
} |
1290,20 → 1253,8 |
u8 status; |
temp = I915_READ(intel_sdvo->sdvo_reg); |
if ((temp & SDVO_ENABLE) == 0) { |
/* HW workaround for IBX, we need to move the port |
* to transcoder A before disabling it. */ |
if (HAS_PCH_IBX(dev)) { |
struct drm_crtc *crtc = encoder->base.crtc; |
int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1; |
/* Restore the transcoder select bit. */ |
if (pipe == PIPE_B) |
temp |= SDVO_PIPE_B_SELECT; |
} |
if ((temp & SDVO_ENABLE) == 0) |
intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE); |
} |
for (i = 0; i < 2; i++) |
intel_wait_for_vblank(dev, intel_crtc->pipe); |
1557,11 → 1508,17 |
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); |
enum drm_connector_status ret; |
if (!intel_sdvo_get_value(intel_sdvo, |
SDVO_CMD_GET_ATTACHED_DISPLAYS, |
&response, 2)) |
if (!intel_sdvo_write_cmd(intel_sdvo, |
SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0)) |
return connector_status_unknown; |
/* add 30ms delay when the output type might be TV */ |
if (intel_sdvo->caps.output_flags & SDVO_TV_MASK) |
msleep(30); |
if (!intel_sdvo_read_response(intel_sdvo, &response, 2)) |
return connector_status_unknown; |
DRM_DEBUG_KMS("SDVO response %d %d [%x]\n", |
response & 0xff, response >> 8, |
intel_sdvo_connector->output_flag); |
1848,7 → 1805,7 |
intel_sdvo_destroy_enhance_property(connector); |
drm_sysfs_connector_remove(connector); |
drm_connector_cleanup(connector); |
kfree(intel_sdvo_connector); |
kfree(connector); |
} |
static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector) |
1880,7 → 1837,7 |
uint8_t cmd; |
int ret; |
ret = drm_object_property_set_value(&connector->base, property, val); |
ret = drm_connector_property_set_value(connector, property, val); |
if (ret) |
return ret; |
1937,7 → 1894,7 |
} else if (IS_TV_OR_LVDS(intel_sdvo_connector)) { |
temp_value = val; |
if (intel_sdvo_connector->left == property) { |
drm_object_property_set_value(&connector->base, |
drm_connector_property_set_value(connector, |
intel_sdvo_connector->right, val); |
if (intel_sdvo_connector->left_margin == temp_value) |
return 0; |
1949,7 → 1906,7 |
cmd = SDVO_CMD_SET_OVERSCAN_H; |
goto set_value; |
} else if (intel_sdvo_connector->right == property) { |
drm_object_property_set_value(&connector->base, |
drm_connector_property_set_value(connector, |
intel_sdvo_connector->left, val); |
if (intel_sdvo_connector->right_margin == temp_value) |
return 0; |
1961,7 → 1918,7 |
cmd = SDVO_CMD_SET_OVERSCAN_H; |
goto set_value; |
} else if (intel_sdvo_connector->top == property) { |
drm_object_property_set_value(&connector->base, |
drm_connector_property_set_value(connector, |
intel_sdvo_connector->bottom, val); |
if (intel_sdvo_connector->top_margin == temp_value) |
return 0; |
1973,7 → 1930,7 |
cmd = SDVO_CMD_SET_OVERSCAN_V; |
goto set_value; |
} else if (intel_sdvo_connector->bottom == property) { |
drm_object_property_set_value(&connector->base, |
drm_connector_property_set_value(connector, |
intel_sdvo_connector->top, val); |
if (intel_sdvo_connector->bottom_margin == temp_value) |
return 0; |
2046,7 → 2003,7 |
drm_mode_destroy(encoder->dev, |
intel_sdvo->sdvo_lvds_fixed_mode); |
i2c_del_adapter(&intel_sdvo->ddc); |
// i2c_del_adapter(&intel_sdvo->ddc); |
intel_encoder_destroy(encoder); |
} |
2126,24 → 2083,17 |
else |
mapping = &dev_priv->sdvo_mappings[1]; |
if (mapping->initialized && intel_gmbus_is_port_valid(mapping->i2c_pin)) |
pin = GMBUS_PORT_DPB; |
if (mapping->initialized) |
pin = mapping->i2c_pin; |
else |
pin = GMBUS_PORT_DPB; |
if (intel_gmbus_is_port_valid(pin)) { |
sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin); |
/* With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow |
* our code totally fails once we start using gmbus. Hence fall back to |
* bit banging for now. */ |
intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ); |
intel_gmbus_force_bit(sdvo->i2c, true); |
} else { |
sdvo->i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB); |
} |
/* undo any changes intel_sdvo_select_i2c_bus() did to sdvo->i2c */ |
static void |
intel_sdvo_unselect_i2c_bus(struct intel_sdvo *sdvo) |
{ |
intel_gmbus_force_bit(sdvo->i2c, false); |
} |
static bool |
2488,7 → 2438,7 |
i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]); |
intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0]; |
drm_object_attach_property(&intel_sdvo_connector->base.base.base, |
drm_connector_attach_property(&intel_sdvo_connector->base.base, |
intel_sdvo_connector->tv_format, 0); |
return true; |
2504,7 → 2454,7 |
intel_sdvo_connector->name = \ |
drm_property_create_range(dev, 0, #name, 0, data_value[0]); \ |
if (!intel_sdvo_connector->name) return false; \ |
drm_object_attach_property(&connector->base, \ |
drm_connector_attach_property(connector, \ |
intel_sdvo_connector->name, \ |
intel_sdvo_connector->cur_##name); \ |
DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \ |
2541,7 → 2491,7 |
if (!intel_sdvo_connector->left) |
return false; |
drm_object_attach_property(&connector->base, |
drm_connector_attach_property(connector, |
intel_sdvo_connector->left, |
intel_sdvo_connector->left_margin); |
2550,7 → 2500,7 |
if (!intel_sdvo_connector->right) |
return false; |
drm_object_attach_property(&connector->base, |
drm_connector_attach_property(connector, |
intel_sdvo_connector->right, |
intel_sdvo_connector->right_margin); |
DRM_DEBUG_KMS("h_overscan: max %d, " |
2578,7 → 2528,7 |
if (!intel_sdvo_connector->top) |
return false; |
drm_object_attach_property(&connector->base, |
drm_connector_attach_property(connector, |
intel_sdvo_connector->top, |
intel_sdvo_connector->top_margin); |
2588,7 → 2538,7 |
if (!intel_sdvo_connector->bottom) |
return false; |
drm_object_attach_property(&connector->base, |
drm_connector_attach_property(connector, |
intel_sdvo_connector->bottom, |
intel_sdvo_connector->bottom_margin); |
DRM_DEBUG_KMS("v_overscan: max %d, " |
2620,7 → 2570,7 |
if (!intel_sdvo_connector->dot_crawl) |
return false; |
drm_object_attach_property(&connector->base, |
drm_connector_attach_property(connector, |
intel_sdvo_connector->dot_crawl, |
intel_sdvo_connector->cur_dot_crawl); |
DRM_DEBUG_KMS("dot crawl: current %d\n", response); |
2705,7 → 2655,7 |
sdvo->ddc.algo_data = sdvo; |
sdvo->ddc.algo = &intel_sdvo_ddc_proxy; |
return i2c_add_adapter(&sdvo->ddc) == 0; |
return 1; //i2c_add_adapter(&sdvo->ddc) == 0; |
} |
bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) |
2724,8 → 2674,10 |
intel_sdvo->is_sdvob = is_sdvob; |
intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1; |
intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg); |
if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) |
goto err_i2c_bus; |
if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) { |
kfree(intel_sdvo); |
return false; |
} |
/* encoder type will be decided later */ |
intel_encoder = &intel_sdvo->base; |
2823,9 → 2775,7 |
err: |
drm_encoder_cleanup(&intel_encoder->base); |
i2c_del_adapter(&intel_sdvo->ddc); |
err_i2c_bus: |
intel_sdvo_unselect_i2c_bus(intel_sdvo); |
// i2c_del_adapter(&intel_sdvo->ddc); |
kfree(intel_sdvo); |
return false; |
/drivers/video/drm/i915/kms_display.c |
---|
368,9 → 368,7 |
main_device = dev; |
#ifdef __HWA__ |
err = init_bitmaps(); |
#endif |
return 0; |
}; |
626,8 → 624,8 |
#ifdef __HWA__ |
extern struct hmm bm_mm; |
1314,18 → 1312,18 |
#endif |
#endif |
void __stdcall run_workqueue(struct workqueue_struct *cwq) |
{ |
unsigned long irqflags; |
dbgprintf("wq: %x head %x, next %x\n", |
cwq, &cwq->worklist, cwq->worklist.next); |
// dbgprintf("wq: %x head %x, next %x\n", |
// cwq, &cwq->worklist, cwq->worklist.next); |
spin_lock_irqsave(&cwq->lock, irqflags); |
1335,8 → 1333,8 |
struct work_struct, entry); |
work_func_t f = work->func; |
list_del_init(cwq->worklist.next); |
dbgprintf("head %x, next %x\n", |
&cwq->worklist, cwq->worklist.next); |
// dbgprintf("head %x, next %x\n", |
// &cwq->worklist, cwq->worklist.next); |
spin_unlock_irqrestore(&cwq->lock, irqflags); |
f(work); |
1353,8 → 1351,8 |
{ |
unsigned long flags; |
dbgprintf("wq: %x, work: %x\n", |
wq, work ); |
// dbgprintf("wq: %x, work: %x\n", |
// wq, work ); |
if(!list_empty(&work->entry)) |
return 0; |
1367,8 → 1365,8 |
list_add_tail(&work->entry, &wq->worklist); |
spin_unlock_irqrestore(&wq->lock, flags); |
dbgprintf("wq: %x head %x, next %x\n", |
wq, &wq->worklist, wq->worklist.next); |
// dbgprintf("wq: %x head %x, next %x\n", |
// wq, &wq->worklist, wq->worklist.next); |
return 1; |
}; |
1378,8 → 1376,8 |
struct delayed_work *dwork = (struct delayed_work *)__data; |
struct workqueue_struct *wq = dwork->work.data; |
dbgprintf("wq: %x, work: %x\n", |
wq, &dwork->work ); |
// dbgprintf("wq: %x, work: %x\n", |
// wq, &dwork->work ); |
__queue_work(wq, &dwork->work); |
} |
1400,8 → 1398,8 |
{ |
u32 flags; |
dbgprintf("wq: %x, work: %x\n", |
wq, &dwork->work ); |
// dbgprintf("wq: %x, work: %x\n", |
// wq, &dwork->work ); |
if (delay == 0) |
return __queue_work(wq, &dwork->work); |
/drivers/video/drm/i915/main.c |
---|
53,8 → 53,8 |
if(!dbg_open(log)) |
{ |
// strcpy(log, "/tmp1/1/i915.log"); |
strcpy(log, "/RD/1/DRIVERS/i915.log"); |
// strcpy(log, "/BD1/2/i915.log"); |
if(!dbg_open(log)) |
{ |
62,7 → 62,7 |
return 0; |
}; |
} |
dbgprintf("i915 RC 10\n cmdline: %s\n", cmdline); |
dbgprintf("i915 preview #08\n cmdline: %s\n", cmdline); |
cpu_detect(); |
dbgprintf("\ncache line size %d\n", x86_clflush_size); |
153,20 → 153,20 |
case SRV_CREATE_SURFACE: |
// check_input(8); |
// retval = create_surface(main_device, (struct io_call_10*)inp); |
retval = create_surface(main_device, (struct io_call_10*)inp); |
break; |
case SRV_LOCK_SURFACE: |
// retval = lock_surface((struct io_call_12*)inp); |
retval = lock_surface((struct io_call_12*)inp); |
break; |
case SRV_RESIZE_SURFACE: |
// retval = resize_surface((struct io_call_14*)inp); |
retval = resize_surface((struct io_call_14*)inp); |
break; |
// case SRV_BLIT_BITMAP: |
// srv_blit_bitmap( inp[0], inp[1], inp[2], |
// inp[3], inp[4], inp[5], inp[6]); |
case SRV_BLIT_BITMAP: |
srv_blit_bitmap( inp[0], inp[1], inp[2], |
inp[3], inp[4], inp[5], inp[6]); |
// blit_tex( inp[0], inp[1], inp[2], |
// inp[3], inp[4], inp[5], inp[6]); |
279,26 → 279,3 |
} |
} |
int get_driver_caps(hwcaps_t *caps) |
{ |
int ret = 0; |
switch(caps->idx) |
{ |
case 0: |
caps->opt[0] = 0; |
caps->opt[1] = 0; |
break; |
case 1: |
caps->cap1.max_tex_width = 4096; |
caps->cap1.max_tex_height = 4096; |
break; |
default: |
ret = 1; |
}; |
caps->idx = 1; |
return ret; |
} |
/drivers/video/drm/i915/bitmap.c |
---|
6,9 → 6,7 |
#include "hmm.h" |
#include "bitmap.h" |
//#define DRIVER_CAPS_0 HW_BIT_BLIT; |
#define DRIVER_CAPS_0 0 |
#define DRIVER_CAPS_0 HW_BIT_BLIT; |
#define DRIVER_CAPS_1 0 |
struct context *context_map[256]; |
/drivers/video/drm/i915/i915_irq.c |
---|
28,6 → 28,7 |
#define pr_fmt(fmt) ": " fmt |
#include <linux/irqreturn.h> |
#include <linux/slab.h> |
#include <drm/drmP.h> |
#include <drm/i915_drm.h> |
39,7 → 40,16 |
#define pr_err(fmt, ...) \ |
printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) |
#define DRM_IRQ_ARGS void *arg |
static struct drm_driver { |
irqreturn_t(*irq_handler) (DRM_IRQ_ARGS); |
void (*irq_preinstall) (struct drm_device *dev); |
int (*irq_postinstall) (struct drm_device *dev); |
}drm_driver; |
static struct drm_driver *driver = &drm_driver; |
#define DRM_WAKEUP( queue ) wake_up( queue ) |
#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue ) |
160,10 → 170,7 |
i915_pipe_enabled(struct drm_device *dev, int pipe) |
{ |
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
pipe); |
return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE; |
return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; |
} |
/* Called from drm generic code, passed a 'crtc', which |
253,7 → 260,7 |
if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) |
return; |
mutex_lock(&dev_priv->rps.hw_lock); |
mutex_lock(&dev_priv->dev->struct_mutex); |
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) |
new_delay = dev_priv->rps.cur_delay + 1; |
268,7 → 275,7 |
gen6_set_rps(dev_priv->dev, new_delay); |
} |
mutex_unlock(&dev_priv->rps.hw_lock); |
mutex_unlock(&dev_priv->dev->struct_mutex); |
} |
284,7 → 291,7 |
static void ivybridge_parity_work(struct work_struct *work) |
{ |
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, |
l3_parity.error_work); |
parity_error_work); |
u32 error_status, row, bank, subbank; |
char *parity_event[5]; |
uint32_t misccpctl; |
348,7 → 355,7 |
I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); |
queue_work(dev_priv->wq, &dev_priv->parity_error_work); |
} |
#endif |
357,7 → 364,6 |
struct drm_i915_private *dev_priv, |
u32 gt_iir) |
{ |
printf("%s\n", __FUNCTION__); |
if (gt_iir & (GEN6_RENDER_USER_INTERRUPT | |
GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT)) |
399,10 → 405,10 |
POSTING_READ(GEN6_PMIMR); |
spin_unlock_irqrestore(&dev_priv->rps.lock, flags); |
// queue_work(dev_priv->wq, &dev_priv->rps.work); |
queue_work(dev_priv->wq, &dev_priv->rps.work); |
} |
static irqreturn_t valleyview_irq_handler(int irq, void *arg) |
static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) |
{ |
struct drm_device *dev = (struct drm_device *) arg; |
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
413,8 → 419,6 |
u32 pipe_stats[I915_MAX_PIPES]; |
bool blc_event; |
printf("%s\n", __FUNCTION__); |
atomic_inc(&dev_priv->irq_received); |
while (true) { |
475,8 → 479,8 |
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) |
blc_event = true; |
if (pm_iir & GEN6_PM_DEFERRED_EVENTS) |
gen6_queue_rps_work(dev_priv, pm_iir); |
// if (pm_iir & GEN6_PM_DEFERRED_EVENTS) |
// gen6_queue_rps_work(dev_priv, pm_iir); |
I915_WRITE(GTIIR, gt_iir); |
I915_WRITE(GEN6_PMIIR, pm_iir); |
492,8 → 496,6 |
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
int pipe; |
printf("%s\n", __FUNCTION__); |
if (pch_iir & SDE_AUDIO_POWER_MASK) |
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", |
(pch_iir & SDE_AUDIO_POWER_MASK) >> |
558,7 → 560,7 |
I915_READ(FDI_RX_IIR(pipe))); |
} |
static irqreturn_t ivybridge_irq_handler(int irq, void *arg) |
static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) |
{ |
struct drm_device *dev = (struct drm_device *) arg; |
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
566,8 → 568,6 |
irqreturn_t ret = IRQ_NONE; |
int i; |
printf("%s\n", __FUNCTION__); |
atomic_inc(&dev_priv->irq_received); |
/* disable master interrupt before clearing iir */ |
636,15 → 636,14 |
notify_ring(dev, &dev_priv->ring[VCS]); |
} |
static irqreturn_t ironlake_irq_handler(int irq, void *arg) |
static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) |
{ |
struct drm_device *dev = (struct drm_device *) arg; |
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
int ret = IRQ_NONE; |
u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; |
u32 hotplug_mask; |
printf("%s\n", __FUNCTION__); |
atomic_inc(&dev_priv->irq_received); |
/* disable master interrupt before clearing iir */ |
661,6 → 660,11 |
(!IS_GEN6(dev) || pm_iir == 0)) |
goto done; |
if (HAS_PCH_CPT(dev)) |
hotplug_mask = SDE_HOTPLUG_MASK_CPT; |
else |
hotplug_mask = SDE_HOTPLUG_MASK; |
ret = IRQ_HANDLED; |
if (IS_GEN5(dev)) |
982,8 → 986,6 |
= I915_READ(RING_SYNC_0(ring->mmio_base)); |
error->semaphore_mboxes[ring->id][1] |
= I915_READ(RING_SYNC_1(ring->mmio_base)); |
error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0]; |
error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1]; |
} |
if (INTEL_INFO(dev)->gen >= 4) { |
1007,7 → 1009,6 |
error->acthd[ring->id] = intel_ring_get_active_head(ring); |
error->head[ring->id] = I915_READ_HEAD(ring); |
error->tail[ring->id] = I915_READ_TAIL(ring); |
error->ctl[ring->id] = I915_READ_CTL(ring); |
error->cpu_ring_head[ring->id] = ring->head; |
error->cpu_ring_tail[ring->id] = ring->tail; |
1102,16 → 1103,6 |
else |
error->ier = I915_READ(IER); |
if (INTEL_INFO(dev)->gen >= 6) |
error->derrmr = I915_READ(DERRMR); |
if (IS_VALLEYVIEW(dev)) |
error->forcewake = I915_READ(FORCEWAKE_VLV); |
else if (INTEL_INFO(dev)->gen >= 7) |
error->forcewake = I915_READ(FORCEWAKE_MT); |
else if (INTEL_INFO(dev)->gen == 6) |
error->forcewake = I915_READ(FORCEWAKE); |
for_each_pipe(pipe) |
error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); |
1342,9 → 1333,7 |
spin_lock_irqsave(&dev->event_lock, flags); |
work = intel_crtc->unpin_work; |
if (work == NULL || |
atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || |
!work->enable_stall_check) { |
if (work == NULL || work->pending || !work->enable_stall_check) { |
/* Either the pending flip IRQ arrived, or we're too early. Don't check */ |
spin_unlock_irqrestore(&dev->event_lock, flags); |
return; |
1659,7 → 1648,7 |
/* Clear & enable PCU event interrupts */ |
I915_WRITE(DEIIR, DE_PCU_EVENT); |
I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT); |
// ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); |
ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); |
} |
return 0; |
1721,7 → 1710,6 |
u32 enable_mask; |
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); |
u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; |
u32 render_irqs; |
u16 msid; |
enable_mask = I915_DISPLAY_PORT_INTERRUPT; |
1742,11 → 1730,11 |
dev_priv->pipestat[1] = 0; |
/* Hack for broken MSIs on VLV */ |
// pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000); |
// pci_read_config_word(dev->pdev, 0x98, &msid); |
// msid &= 0xff; /* mask out delivery bits */ |
// msid |= (1<<14); |
// pci_write_config_word(dev_priv->dev->pdev, 0x98, msid); |
pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000); |
pci_read_config_word(dev->pdev, 0x98, &msid); |
msid &= 0xff; /* mask out delivery bits */ |
msid |= (1<<14); |
pci_write_config_word(dev_priv->dev->pdev, 0x98, msid); |
I915_WRITE(VLV_IMR, dev_priv->irq_mask); |
I915_WRITE(VLV_IER, enable_mask); |
1761,12 → 1749,21 |
I915_WRITE(VLV_IIR, 0xffffffff); |
I915_WRITE(VLV_IIR, 0xffffffff); |
dev_priv->gt_irq_mask = ~0; |
I915_WRITE(GTIIR, I915_READ(GTIIR)); |
I915_WRITE(GTIIR, I915_READ(GTIIR)); |
I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | |
GEN6_BLITTER_USER_INTERRUPT; |
I915_WRITE(GTIER, render_irqs); |
I915_WRITE(GTIER, GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT | |
GT_GEN6_BLT_CS_ERROR_INTERRUPT | |
GT_GEN6_BLT_USER_INTERRUPT | |
GT_GEN6_BSD_USER_INTERRUPT | |
GT_GEN6_BSD_CS_ERROR_INTERRUPT | |
GT_GEN7_L3_PARITY_ERROR_INTERRUPT | |
GT_PIPE_NOTIFY | |
GT_RENDER_CS_ERROR_INTERRUPT | |
GT_SYNC_STATUS | |
GT_USER_INTERRUPT); |
POSTING_READ(GTIER); |
/* ack & enable invalid PTE error interrupts */ |
1784,9 → 1781,9 |
hotplug_en |= HDMIC_HOTPLUG_INT_EN; |
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) |
hotplug_en |= HDMID_HOTPLUG_INT_EN; |
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) |
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) |
hotplug_en |= SDVOC_HOTPLUG_INT_EN; |
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) |
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) |
hotplug_en |= SDVOB_HOTPLUG_INT_EN; |
if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { |
hotplug_en |= CRT_HOTPLUG_INT_EN; |
1799,6 → 1796,7 |
return 0; |
} |
static void valleyview_irq_uninstall(struct drm_device *dev) |
{ |
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1888,7 → 1886,8 |
return 0; |
} |
static irqreturn_t i8xx_irq_handler(int irq, void *arg) |
static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS) |
{ |
struct drm_device *dev = (struct drm_device *) arg; |
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
2069,7 → 2068,7 |
return 0; |
} |
static irqreturn_t i915_irq_handler(int irq, void *arg) |
static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS) |
{ |
struct drm_device *dev = (struct drm_device *) arg; |
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
2308,7 → 2307,7 |
return 0; |
} |
static irqreturn_t i965_irq_handler(int irq, void *arg) |
static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS) |
{ |
struct drm_device *dev = (struct drm_device *) arg; |
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
2452,49 → 2451,38 |
struct drm_i915_private *dev_priv = dev->dev_private; |
if (IS_VALLEYVIEW(dev)) { |
dev->driver->irq_handler = valleyview_irq_handler; |
dev->driver->irq_preinstall = valleyview_irq_preinstall; |
dev->driver->irq_postinstall = valleyview_irq_postinstall; |
driver->irq_handler = valleyview_irq_handler; |
driver->irq_preinstall = valleyview_irq_preinstall; |
driver->irq_postinstall = valleyview_irq_postinstall; |
} else if (IS_IVYBRIDGE(dev)) { |
/* Share pre & uninstall handlers with ILK/SNB */ |
dev->driver->irq_handler = ivybridge_irq_handler; |
dev->driver->irq_preinstall = ironlake_irq_preinstall; |
dev->driver->irq_postinstall = ivybridge_irq_postinstall; |
driver->irq_handler = ivybridge_irq_handler; |
driver->irq_preinstall = ironlake_irq_preinstall; |
driver->irq_postinstall = ivybridge_irq_postinstall; |
} else if (IS_HASWELL(dev)) { |
/* Share interrupts handling with IVB */ |
dev->driver->irq_handler = ivybridge_irq_handler; |
dev->driver->irq_preinstall = ironlake_irq_preinstall; |
dev->driver->irq_postinstall = ivybridge_irq_postinstall; |
driver->irq_handler = ivybridge_irq_handler; |
driver->irq_preinstall = ironlake_irq_preinstall; |
driver->irq_postinstall = ivybridge_irq_postinstall; |
} else if (HAS_PCH_SPLIT(dev)) { |
dev->driver->irq_handler = ironlake_irq_handler; |
dev->driver->irq_preinstall = ironlake_irq_preinstall; |
dev->driver->irq_postinstall = ironlake_irq_postinstall; |
driver->irq_handler = ironlake_irq_handler; |
driver->irq_preinstall = ironlake_irq_preinstall; |
driver->irq_postinstall = ironlake_irq_postinstall; |
} else { |
if (INTEL_INFO(dev)->gen == 2) { |
} else if (INTEL_INFO(dev)->gen == 3) { |
dev->driver->irq_preinstall = i915_irq_preinstall; |
dev->driver->irq_postinstall = i915_irq_postinstall; |
dev->driver->irq_handler = i915_irq_handler; |
driver->irq_handler = i915_irq_handler; |
driver->irq_preinstall = i915_irq_preinstall; |
driver->irq_postinstall = i915_irq_postinstall; |
} else { |
dev->driver->irq_preinstall = i965_irq_preinstall; |
dev->driver->irq_postinstall = i965_irq_postinstall; |
dev->driver->irq_handler = i965_irq_handler; |
driver->irq_handler = i965_irq_handler; |
driver->irq_preinstall = i965_irq_preinstall; |
driver->irq_postinstall = i965_irq_postinstall; |
} |
} |
printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ; |
} |
irqreturn_t intel_irq_handler(struct drm_device *dev) |
{ |
printf("i915 irq\n"); |
// printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ; |
return dev->driver->irq_handler(0, dev); |
} |
int drm_irq_install(struct drm_device *dev) |
{ |
unsigned long sh_flags = 0; |
2523,14 → 2511,14 |
DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev)); |
/* Before installing handler */ |
if (dev->driver->irq_preinstall) |
dev->driver->irq_preinstall(dev); |
if (driver->irq_preinstall) |
driver->irq_preinstall(dev); |
ret = AttachIntHandler(irq_line, intel_irq_handler, (u32)dev); |
ret = AttachIntHandler(irq_line, driver->irq_handler, (u32)dev); |
/* After installing handler */ |
if (dev->driver->irq_postinstall) |
ret = dev->driver->irq_postinstall(dev); |
if (driver->irq_postinstall) |
ret = driver->irq_postinstall(dev); |
if (ret < 0) { |
DRM_ERROR(__FUNCTION__); |
/drivers/video/drm/i915/i915_gem.c |
---|
30,6 → 30,7 |
#include "i915_drv.h" |
#include "i915_trace.h" |
#include "intel_drv.h" |
//#include <linux/shmem_fs.h> |
#include <linux/slab.h> |
//#include <linux/swap.h> |
#include <linux/pci.h> |
52,6 → 53,21 |
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO) |
static inline long IS_ERR(const void *ptr) |
{ |
return IS_ERR_VALUE((unsigned long)ptr); |
} |
static inline void *ERR_PTR(long error) |
{ |
return (void *) error; |
} |
static inline long PTR_ERR(const void *ptr) |
{ |
return (long) ptr; |
} |
void |
drm_gem_object_free(struct kref *kref) |
{ |
905,12 → 921,12 |
* domain anymore. */ |
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { |
i915_gem_clflush_object(obj); |
i915_gem_chipset_flush(dev); |
intel_gtt_chipset_flush(); |
} |
} |
if (needs_clflush_after) |
i915_gem_chipset_flush(dev); |
intel_gtt_chipset_flush(); |
return ret; |
} |
1373,8 → 1389,6 |
static void |
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) |
{ |
int page_count = obj->base.size / PAGE_SIZE; |
struct scatterlist *sg; |
int ret, i; |
BUG_ON(obj->madv == __I915_MADV_PURGED); |
1392,18 → 1406,12 |
if (obj->madv == I915_MADV_DONTNEED) |
obj->dirty = 0; |
for_each_sg(obj->pages->sgl, sg, page_count, i) { |
struct page *page = sg_page(sg); |
for (i = 0; i < obj->pages.nents; i++) |
FreePage(obj->pages.page[i]); |
page_cache_release(page); |
} |
//DRM_DEBUG_KMS("%s release %d pages\n", __FUNCTION__, page_count); |
DRM_DEBUG_KMS("%s release %d pages\n", __FUNCTION__, obj->pages.nents); |
obj->dirty = 0; |
sg_free_table(obj->pages); |
kfree(obj->pages); |
kfree(obj->pages.page); |
} |
static int |
1411,7 → 1419,10 |
{ |
const struct drm_i915_gem_object_ops *ops = obj->ops; |
if (obj->pages == NULL) |
// printf("page %x pin count %d\n", |
// obj->pages.page, obj->pages_pin_count ); |
if (obj->pages.page == NULL) |
return 0; |
BUG_ON(obj->gtt_space); |
1419,14 → 1430,10 |
if (obj->pages_pin_count) |
return -EBUSY; |
/* ->put_pages might need to allocate memory for the bit17 swizzle |
* array, hence protect them from being reaped by removing them from gtt |
* lists early. */ |
list_del(&obj->gtt_list); |
ops->put_pages(obj); |
obj->pages = NULL; |
obj->pages.page = NULL; |
list_del(&obj->gtt_list); |
if (i915_gem_object_is_purgeable(obj)) |
i915_gem_object_truncate(obj); |
1443,55 → 1450,43 |
static int |
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) |
{ |
dma_addr_t page; |
int page_count, i; |
struct sg_table *st; |
struct scatterlist *sg; |
struct page *page; |
gfp_t gfp; |
/* Assert that the object is not currently in any GPU domain. As it |
* wasn't in the GTT, there shouldn't be any way it could have been in |
* a GPU cache |
/* Get the list of pages out of our struct file. They'll be pinned |
* at this point until we release them. |
*/ |
BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); |
BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); |
st = kmalloc(sizeof(*st), GFP_KERNEL); |
if (st == NULL) |
return -ENOMEM; |
page_count = obj->base.size / PAGE_SIZE; |
if (sg_alloc_table(st, page_count, GFP_KERNEL)) { |
sg_free_table(st); |
kfree(st); |
BUG_ON(obj->pages.page != NULL); |
obj->pages.page = malloc(page_count * sizeof(dma_addr_t)); |
if (obj->pages.page == NULL) |
return -ENOMEM; |
} |
/* Get the list of pages out of our struct file. They'll be pinned |
* at this point until we release them. |
* |
* Fail silently without starting the shrinker |
*/ |
for_each_sg(st->sgl, sg, page_count, i) { |
for (i = 0; i < page_count; i++) { |
page = AllocPage(); // oh-oh |
if ( page == 0 ) |
goto err_pages; |
sg_set_page(sg, page, PAGE_SIZE, 0); |
} |
obj->pages.page[i] = page; |
}; |
DRM_DEBUG_KMS("%s alloc %d pages\n", __FUNCTION__, page_count); |
obj->pages.nents = page_count; |
obj->pages = st; |
// DRM_DEBUG_KMS("%s alloc %d pages\n", __FUNCTION__, page_count); |
// if (obj->tiling_mode != I915_TILING_NONE) |
// i915_gem_object_do_bit_17_swizzle(obj); |
return 0; |
err_pages: |
for_each_sg(st->sgl, sg, i, page_count) |
page_cache_release(sg_page(sg)); |
sg_free_table(st); |
kfree(st); |
return PTR_ERR(page); |
while (i--) |
FreePage(obj->pages.page[i]); |
free(obj->pages.page); |
obj->pages.page = NULL; |
obj->pages.nents = 0; |
return -ENOMEM; |
} |
/* Ensure that the associated pages are gathered from the backing storage |
1508,7 → 1503,7 |
const struct drm_i915_gem_object_ops *ops = obj->ops; |
int ret; |
if (obj->pages) |
if (obj->pages.page) |
return 0; |
BUG_ON(obj->pages_pin_count); |
1523,11 → 1518,11 |
void |
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, |
struct intel_ring_buffer *ring) |
struct intel_ring_buffer *ring, |
u32 seqno) |
{ |
struct drm_device *dev = obj->base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
u32 seqno = intel_ring_get_seqno(ring); |
BUG_ON(ring == NULL); |
obj->ring = ring; |
1588,56 → 1583,28 |
WARN_ON(i915_verify_lists(dev)); |
} |
static int |
i915_gem_handle_seqno_wrap(struct drm_device *dev) |
static u32 |
i915_gem_get_seqno(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_ring_buffer *ring; |
int ret, i, j; |
drm_i915_private_t *dev_priv = dev->dev_private; |
u32 seqno = dev_priv->next_seqno; |
/* The hardware uses various monotonic 32-bit counters, if we |
* detect that they will wraparound we need to idle the GPU |
* and reset those counters. |
*/ |
ret = 0; |
for_each_ring(ring, dev_priv, i) { |
for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++) |
ret |= ring->sync_seqno[j] != 0; |
} |
if (ret == 0) |
return ret; |
/* reserve 0 for non-seqno */ |
if (++dev_priv->next_seqno == 0) |
dev_priv->next_seqno = 1; |
ret = i915_gpu_idle(dev); |
if (ret) |
return ret; |
i915_gem_retire_requests(dev); |
for_each_ring(ring, dev_priv, i) { |
for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++) |
ring->sync_seqno[j] = 0; |
return seqno; |
} |
return 0; |
} |
int |
i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) |
u32 |
i915_gem_next_request_seqno(struct intel_ring_buffer *ring) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
if (ring->outstanding_lazy_request == 0) |
ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev); |
/* reserve 0 for non-seqno */ |
if (dev_priv->next_seqno == 0) { |
int ret = i915_gem_handle_seqno_wrap(dev); |
if (ret) |
return ret; |
dev_priv->next_seqno = 1; |
return ring->outstanding_lazy_request; |
} |
*seqno = dev_priv->next_seqno++; |
return 0; |
} |
int |
i915_add_request(struct intel_ring_buffer *ring, |
struct drm_file *file, |
1646,6 → 1613,7 |
drm_i915_private_t *dev_priv = ring->dev->dev_private; |
struct drm_i915_gem_request *request; |
u32 request_ring_position; |
u32 seqno; |
int was_empty; |
int ret; |
1664,6 → 1632,7 |
if (request == NULL) |
return -ENOMEM; |
seqno = i915_gem_next_request_seqno(ring); |
/* Record the position of the start of the request so that |
* should we detect the updated seqno part-way through the |
1672,13 → 1641,15 |
*/ |
request_ring_position = intel_ring_get_tail(ring); |
ret = ring->add_request(ring); |
ret = ring->add_request(ring, &seqno); |
if (ret) { |
kfree(request); |
return ret; |
} |
request->seqno = intel_ring_get_seqno(ring); |
trace_i915_gem_request_add(ring, seqno); |
request->seqno = seqno; |
request->ring = ring; |
request->tail = request_ring_position; |
request->emitted_jiffies = GetTimerTicks(); |
1703,7 → 1674,7 |
} |
if (out_seqno) |
*out_seqno = request->seqno; |
*out_seqno = seqno; |
return 0; |
} |
1788,6 → 1759,7 |
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) |
{ |
uint32_t seqno; |
int i; |
if (list_empty(&ring->request_list)) |
return; |
1796,6 → 1768,10 |
seqno = ring->get_seqno(ring, true); |
for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) |
if (seqno >= ring->sync_seqno[i]) |
ring->sync_seqno[i] = 0; |
while (!list_empty(&ring->request_list)) { |
struct drm_i915_gem_request *request; |
1915,28 → 1891,6 |
return 0; |
} |
/** |
* i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT |
* @DRM_IOCTL_ARGS: standard ioctl arguments |
* |
* Returns 0 if successful, else an error is returned with the remaining time in |
* the timeout parameter. |
* -ETIME: object is still busy after timeout |
* -ERESTARTSYS: signal interrupted the wait |
* -ENONENT: object doesn't exist |
* Also possible, but rare: |
* -EAGAIN: GPU wedged |
* -ENOMEM: damn |
* -ENODEV: Internal IRQ fail |
* -E?: The add request failed |
* |
* The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any |
* non-zero timeout parameter the wait ioctl will wait for the given number of |
* nanoseconds on an object becoming unbusy. Since the wait itself does so |
* without holding struct_mutex the object may become re-busied before this |
* function completes. A similar but shorter * race condition exists in the busy |
* ioctl |
*/ |
1946,11 → 1900,6 |
/** |
* i915_gem_object_sync - sync an object to a ring. |
* |
1989,11 → 1938,7 |
ret = to->sync_to(to, from, seqno); |
if (!ret) |
/* We use last_read_seqno because sync_to() |
* might have just caused seqno wrap under |
* the radar. |
*/ |
from->sync_seqno[idx] = obj->last_read_seqno; |
from->sync_seqno[idx] = seqno; |
return ret; |
} |
2037,7 → 1982,7 |
if (obj->pin_count) |
return -EBUSY; |
BUG_ON(obj->pages == NULL); |
BUG_ON(obj->pages.page == NULL); |
ret = i915_gem_object_finish_gpu(obj); |
if (ret) |
2076,6 → 2021,14 |
return 0; |
} |
static int i915_ring_idle(struct intel_ring_buffer *ring) |
{ |
if (list_empty(&ring->active_list)) |
return 0; |
return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring)); |
} |
int i915_gpu_idle(struct drm_device *dev) |
{ |
drm_i915_private_t *dev_priv = dev->dev_private; |
2088,7 → 2041,7 |
if (ret) |
return ret; |
ret = intel_ring_idle(ring); |
ret = i915_ring_idle(ring); |
if (ret) |
return ret; |
} |
2478,7 → 2431,7 |
{ |
struct drm_device *dev = obj->base.dev; |
drm_i915_private_t *dev_priv = dev->dev_private; |
struct drm_mm_node *node; |
struct drm_mm_node *free_space; |
u32 size, fence_size, fence_alignment, unfenced_alignment; |
bool mappable, fenceable; |
int ret; |
2522,50 → 2475,66 |
if (ret) |
return ret; |
i915_gem_object_pin_pages(obj); |
node = kzalloc(sizeof(*node), GFP_KERNEL); |
if (node == NULL) { |
i915_gem_object_unpin_pages(obj); |
return -ENOMEM; |
} |
search_free: |
if (map_and_fenceable) |
ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node, |
free_space = |
drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space, |
size, alignment, obj->cache_level, |
0, dev_priv->mm.gtt_mappable_end); |
0, dev_priv->mm.gtt_mappable_end, |
false); |
else |
ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node, |
size, alignment, obj->cache_level); |
if (ret) { |
free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space, |
size, alignment, obj->cache_level, |
false); |
i915_gem_object_unpin_pages(obj); |
kfree(node); |
if (free_space != NULL) { |
if (map_and_fenceable) |
obj->gtt_space = |
drm_mm_get_block_range_generic(free_space, |
size, alignment, obj->cache_level, |
0, dev_priv->mm.gtt_mappable_end, |
false); |
else |
obj->gtt_space = |
drm_mm_get_block_generic(free_space, |
size, alignment, obj->cache_level, |
false); |
} |
if (obj->gtt_space == NULL) { |
ret = 1; //i915_gem_evict_something(dev, size, alignment, |
// map_and_fenceable); |
if (ret) |
return ret; |
goto search_free; |
} |
if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) { |
i915_gem_object_unpin_pages(obj); |
drm_mm_put_block(node); |
if (WARN_ON(!i915_gem_valid_gtt_space(dev, |
obj->gtt_space, |
obj->cache_level))) { |
drm_mm_put_block(obj->gtt_space); |
obj->gtt_space = NULL; |
return -EINVAL; |
} |
ret = i915_gem_gtt_prepare_object(obj); |
if (ret) { |
i915_gem_object_unpin_pages(obj); |
drm_mm_put_block(node); |
drm_mm_put_block(obj->gtt_space); |
obj->gtt_space = NULL; |
return ret; |
} |
if (!dev_priv->mm.aliasing_ppgtt) |
i915_gem_gtt_bind_object(obj, obj->cache_level); |
list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list); |
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
obj->gtt_space = node; |
obj->gtt_offset = node->start; |
obj->gtt_offset = obj->gtt_space->start; |
fenceable = |
node->size == fence_size && |
(node->start & (fence_alignment - 1)) == 0; |
obj->gtt_space->size == fence_size && |
(obj->gtt_space->start & (fence_alignment - 1)) == 0; |
mappable = |
obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; |
2572,7 → 2541,6 |
obj->map_and_fenceable = mappable && fenceable; |
i915_gem_object_unpin_pages(obj); |
trace_i915_gem_object_bind(obj, map_and_fenceable); |
i915_gem_verify_gtt(dev); |
return 0; |
2585,7 → 2553,7 |
* to GPU, and we can ignore the cache flush because it'll happen |
* again at bind time. |
*/ |
if (obj->pages == NULL) |
if (obj->pages.page == NULL) |
return; |
/* If the GPU is snooping the contents of the CPU cache, |
2598,7 → 2566,7 |
*/ |
if (obj->cache_level != I915_CACHE_NONE) |
return; |
#if 0 |
if(obj->mapped != NULL) |
{ |
uint8_t *page_virtual; |
2645,8 → 2613,6 |
"mfence"); |
} |
} |
#endif |
} |
/** Flushes the GTT write domain for the object if it's dirty. */ |
2686,7 → 2652,7 |
return; |
i915_gem_clflush_object(obj); |
i915_gem_chipset_flush(obj->base.dev); |
intel_gtt_chipset_flush(); |
old_write_domain = obj->base.write_domain; |
obj->base.write_domain = 0; |
3023,16 → 2989,11 |
#endif |
if (obj->gtt_space == NULL) { |
struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
ret = i915_gem_object_bind_to_gtt(obj, alignment, |
map_and_fenceable, |
nonblocking); |
if (ret) |
return ret; |
if (!dev_priv->mm.aliasing_ppgtt) |
i915_gem_gtt_bind_object(obj, obj->cache_level); |
} |
if (!obj->has_global_gtt_mapping && map_and_fenceable) |
3086,15 → 3047,14 |
goto out; |
} |
if (obj->user_pin_count == 0) { |
obj->user_pin_count++; |
obj->pin_filp = file; |
if (obj->user_pin_count == 1) { |
ret = i915_gem_object_pin(obj, args->alignment, true, false); |
if (ret) |
goto out; |
} |
obj->user_pin_count++; |
obj->pin_filp = file; |
/* XXX - flush the CPU caches for pinned objects |
* as the X server doesn't manage domains yet |
*/ |
3335,7 → 3295,7 |
i915_gem_object_put_pages(obj); |
// i915_gem_object_free_mmap_offset(obj); |
BUG_ON(obj->pages); |
BUG_ON(obj->pages.page); |
// if (obj->base.import_attach) |
// drm_prime_gem_destroy(&obj->base, NULL); |
3398,7 → 3358,7 |
if (!IS_IVYBRIDGE(dev)) |
return; |
if (!dev_priv->l3_parity.remap_info) |
if (!dev_priv->mm.l3_remap_info) |
return; |
misccpctl = I915_READ(GEN7_MISCCPCTL); |
3407,12 → 3367,12 |
for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) { |
u32 remap = I915_READ(GEN7_L3LOG_BASE + i); |
if (remap && remap != dev_priv->l3_parity.remap_info[i/4]) |
if (remap && remap != dev_priv->mm.l3_remap_info[i/4]) |
DRM_DEBUG("0x%x was already programmed to %x\n", |
GEN7_L3LOG_BASE + i, remap); |
if (remap && !dev_priv->l3_parity.remap_info[i/4]) |
if (remap && !dev_priv->mm.l3_remap_info[i/4]) |
DRM_DEBUG_DRIVER("Clearing remapped register\n"); |
I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]); |
I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->mm.l3_remap_info[i/4]); |
} |
/* Make sure all the writes land before disabling dop clock gating */ |
3442,6 → 3402,68 |
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); |
} |
void i915_gem_init_ppgtt(struct drm_device *dev) |
{ |
drm_i915_private_t *dev_priv = dev->dev_private; |
uint32_t pd_offset; |
struct intel_ring_buffer *ring; |
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; |
uint32_t __iomem *pd_addr; |
uint32_t pd_entry; |
int i; |
if (!dev_priv->mm.aliasing_ppgtt) |
return; |
pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t); |
for (i = 0; i < ppgtt->num_pd_entries; i++) { |
dma_addr_t pt_addr; |
if (dev_priv->mm.gtt->needs_dmar) |
pt_addr = ppgtt->pt_dma_addr[i]; |
else |
pt_addr = ppgtt->pt_pages[i]; |
pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); |
pd_entry |= GEN6_PDE_VALID; |
writel(pd_entry, pd_addr + i); |
} |
readl(pd_addr); |
pd_offset = ppgtt->pd_offset; |
pd_offset /= 64; /* in cachelines, */ |
pd_offset <<= 16; |
if (INTEL_INFO(dev)->gen == 6) { |
uint32_t ecochk, gab_ctl, ecobits; |
ecobits = I915_READ(GAC_ECO_BITS); |
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); |
gab_ctl = I915_READ(GAB_CTL); |
I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); |
ecochk = I915_READ(GAM_ECOCHK); |
I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | |
ECOCHK_PPGTT_CACHE64B); |
I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); |
} else if (INTEL_INFO(dev)->gen >= 7) { |
I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B); |
/* GFX_MODE is per-ring on gen7+ */ |
} |
for_each_ring(ring, dev_priv, i) { |
if (INTEL_INFO(dev)->gen >= 7) |
I915_WRITE(RING_MODE_GEN7(ring), |
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); |
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); |
I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset); |
} |
} |
static bool |
intel_enable_blt(struct drm_device *dev) |
{ |
3464,7 → 3486,7 |
drm_i915_private_t *dev_priv = dev->dev_private; |
int ret; |
if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) |
if (!intel_enable_gtt()) |
return -EIO; |
if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1)) |
/drivers/video/drm/i915/Gtt/intel-gtt.c |
---|
20,8 → 20,6 |
#include <linux/pci.h> |
#include <linux/kernel.h> |
#include <linux/export.h> |
#include <linux/scatterlist.h> |
//#include <linux/pagemap.h> |
//#include <linux/agp_backend.h> |
//#include <asm/smp.h> |
28,7 → 26,7 |
#include <linux/spinlock.h> |
#include "agp.h" |
#include "intel-agp.h" |
#include <drm/intel-gtt.h> |
#include "intel-gtt.h" |
#include <syscall.h> |
112,15 → 110,14 |
static int intel_gtt_setup_scratch_page(void) |
{ |
struct page *page; |
dma_addr_t dma_addr; |
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); |
if (page == NULL) |
dma_addr = AllocPage(); |
if (dma_addr == 0) |
return -ENOMEM; |
intel_private.base.scratch_page_dma = page_to_phys(page); |
intel_private.scratch_page = page; |
intel_private.base.scratch_page_dma = dma_addr; |
intel_private.scratch_page = NULL; |
return 0; |
} |
161,6 → 158,62 |
stolen_size = 0; |
break; |
} |
} else if (INTEL_GTT_GEN == 6) { |
/* |
* SandyBridge has new memory control reg at 0x50.w |
*/ |
u16 snb_gmch_ctl; |
pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); |
switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) { |
case SNB_GMCH_GMS_STOLEN_32M: |
stolen_size = MB(32); |
break; |
case SNB_GMCH_GMS_STOLEN_64M: |
stolen_size = MB(64); |
break; |
case SNB_GMCH_GMS_STOLEN_96M: |
stolen_size = MB(96); |
break; |
case SNB_GMCH_GMS_STOLEN_128M: |
stolen_size = MB(128); |
break; |
case SNB_GMCH_GMS_STOLEN_160M: |
stolen_size = MB(160); |
break; |
case SNB_GMCH_GMS_STOLEN_192M: |
stolen_size = MB(192); |
break; |
case SNB_GMCH_GMS_STOLEN_224M: |
stolen_size = MB(224); |
break; |
case SNB_GMCH_GMS_STOLEN_256M: |
stolen_size = MB(256); |
break; |
case SNB_GMCH_GMS_STOLEN_288M: |
stolen_size = MB(288); |
break; |
case SNB_GMCH_GMS_STOLEN_320M: |
stolen_size = MB(320); |
break; |
case SNB_GMCH_GMS_STOLEN_352M: |
stolen_size = MB(352); |
break; |
case SNB_GMCH_GMS_STOLEN_384M: |
stolen_size = MB(384); |
break; |
case SNB_GMCH_GMS_STOLEN_416M: |
stolen_size = MB(416); |
break; |
case SNB_GMCH_GMS_STOLEN_448M: |
stolen_size = MB(448); |
break; |
case SNB_GMCH_GMS_STOLEN_480M: |
stolen_size = MB(480); |
break; |
case SNB_GMCH_GMS_STOLEN_512M: |
stolen_size = MB(512); |
break; |
} |
} else { |
switch (gmch_ctrl & I855_GMCH_GMS_MASK) { |
case I855_GMCH_GMS_STOLEN_1M: |
294,9 → 347,29 |
static unsigned int intel_gtt_total_entries(void) |
{ |
int size; |
if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) |
return i965_gtt_total_entries(); |
else { |
else if (INTEL_GTT_GEN == 6) { |
u16 snb_gmch_ctl; |
pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); |
switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) { |
default: |
case SNB_GTT_SIZE_0M: |
printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl); |
size = MB(0); |
break; |
case SNB_GTT_SIZE_1M: |
size = MB(1); |
break; |
case SNB_GTT_SIZE_2M: |
size = MB(2); |
break; |
} |
return size/4; |
} else { |
/* On previous hardware, the GTT size was just what was |
* required to map the aperture. |
*/ |
360,8 → 433,11 |
ret = intel_private.driver->setup(); |
if (ret != 0) |
{ |
return ret; |
}; |
intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries(); |
intel_private.base.gtt_total_entries = intel_gtt_total_entries(); |
381,6 → 457,9 |
gtt_map_size = intel_private.base.gtt_total_entries * 4; |
intel_private.gtt = NULL; |
// if (INTEL_GTT_GEN < 6 && INTEL_GTT_GEN > 2) |
// intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr, |
// gtt_map_size); |
if (intel_private.gtt == NULL) |
intel_private.gtt = ioremap(intel_private.gtt_bus_addr, |
gtt_map_size); |
430,6 → 509,9 |
{ |
u8 __iomem *reg; |
if (INTEL_GTT_GEN >= 6) |
return true; |
if (INTEL_GTT_GEN == 2) { |
u16 gmch_ctrl; |
483,39 → 565,33 |
return false; |
} |
void intel_gtt_insert_sg_entries(struct sg_table *st, |
void intel_gtt_insert_sg_entries(struct pagelist *st, |
unsigned int pg_start, |
unsigned int flags) |
{ |
struct scatterlist *sg; |
unsigned int len, m; |
int i, j; |
j = pg_start; |
/* sg may merge pages, but we have to separate |
* per-page addr for GTT */ |
for_each_sg(st->sgl, sg, st->nents, i) { |
len = sg_dma_len(sg) >> PAGE_SHIFT; |
for (m = 0; m < len; m++) { |
dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); |
for(i = 0; i < st->nents; i++) |
{ |
dma_addr_t addr = st->page[i]; |
intel_private.driver->write_entry(addr, j, flags); |
j++; |
} |
} |
}; |
readl(intel_private.gtt+j-1); |
} |
EXPORT_SYMBOL(intel_gtt_insert_sg_entries); |
static void intel_gtt_insert_pages(unsigned int first_entry, |
unsigned int num_entries, |
struct page **pages, |
dma_addr_t *pages, |
unsigned int flags) |
{ |
int i, j; |
for (i = 0, j = first_entry; i < num_entries; i++, j++) { |
dma_addr_t addr = page_to_phys(pages[i]); |
dma_addr_t addr = pages[i]; |
intel_private.driver->write_entry(addr, |
j, flags); |
} |
594,6 → 670,85 |
writel(addr | pte_flags, intel_private.gtt + entry); |
} |
static bool gen6_check_flags(unsigned int flags) |
{ |
return true; |
} |
static void haswell_write_entry(dma_addr_t addr, unsigned int entry, |
unsigned int flags) |
{ |
unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT; |
unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT; |
u32 pte_flags; |
if (type_mask == AGP_USER_MEMORY) |
pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID; |
else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) { |
pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID; |
if (gfdt) |
pte_flags |= GEN6_PTE_GFDT; |
} else { /* set 'normal'/'cached' to LLC by default */ |
pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; |
if (gfdt) |
pte_flags |= GEN6_PTE_GFDT; |
} |
/* gen6 has bit11-4 for physical addr bit39-32 */ |
addr |= (addr >> 28) & 0xff0; |
writel(addr | pte_flags, intel_private.gtt + entry); |
} |
static void gen6_write_entry(dma_addr_t addr, unsigned int entry, |
unsigned int flags) |
{ |
unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT; |
unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT; |
u32 pte_flags; |
if (type_mask == AGP_USER_MEMORY) |
pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID; |
else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) { |
pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID; |
if (gfdt) |
pte_flags |= GEN6_PTE_GFDT; |
} else { /* set 'normal'/'cached' to LLC by default */ |
pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; |
if (gfdt) |
pte_flags |= GEN6_PTE_GFDT; |
} |
/* gen6 has bit11-4 for physical addr bit39-32 */ |
addr |= (addr >> 28) & 0xff0; |
writel(addr | pte_flags, intel_private.gtt + entry); |
} |
static void valleyview_write_entry(dma_addr_t addr, unsigned int entry, |
unsigned int flags) |
{ |
unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT; |
unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT; |
u32 pte_flags; |
if (type_mask == AGP_USER_MEMORY) |
pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID; |
else { |
pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; |
if (gfdt) |
pte_flags |= GEN6_PTE_GFDT; |
} |
/* gen6 has bit11-4 for physical addr bit39-32 */ |
addr |= (addr >> 28) & 0xff0; |
writel(addr | pte_flags, intel_private.gtt + entry); |
writel(1, intel_private.registers + GFX_FLSH_CNTL_VLV); |
} |
static void gen6_cleanup(void) |
{ |
} |
/* Certain Gen5 chipsets require require idling the GPU before |
* unmapping anything from the GTT when VT-d is enabled. |
*/ |
615,7 → 770,7 |
static int i9xx_setup(void) |
{ |
u32 reg_addr, gtt_addr; |
u32 reg_addr; |
int size = KB(512); |
pci_read_config_dword(intel_private.pcidev, I915_MMADDR, ®_addr); |
622,23 → 777,35 |
reg_addr &= 0xfff80000; |
if (INTEL_GTT_GEN >= 7) |
size = MB(2); |
intel_private.registers = ioremap(reg_addr, size); |
if (!intel_private.registers) |
return -ENOMEM; |
switch (INTEL_GTT_GEN) { |
case 3: |
if (INTEL_GTT_GEN == 3) { |
u32 gtt_addr; |
pci_read_config_dword(intel_private.pcidev, |
I915_PTEADDR, >t_addr); |
intel_private.gtt_bus_addr = gtt_addr; |
break; |
} else { |
u32 gtt_offset; |
switch (INTEL_GTT_GEN) { |
case 5: |
intel_private.gtt_bus_addr = reg_addr + MB(2); |
case 6: |
case 7: |
gtt_offset = MB(2); |
break; |
case 4: |
default: |
intel_private.gtt_bus_addr = reg_addr + KB(512); |
gtt_offset = KB(512); |
break; |
} |
intel_private.gtt_bus_addr = reg_addr + gtt_offset; |
} |
if (needs_idle_maps()) |
intel_private.base.do_idle_maps = 1; |
708,6 → 875,32 |
.check_flags = i830_check_flags, |
.chipset_flush = i9xx_chipset_flush, |
}; |
static const struct intel_gtt_driver sandybridge_gtt_driver = { |
.gen = 6, |
.setup = i9xx_setup, |
.cleanup = gen6_cleanup, |
.write_entry = gen6_write_entry, |
.dma_mask_size = 40, |
.check_flags = gen6_check_flags, |
.chipset_flush = i9xx_chipset_flush, |
}; |
static const struct intel_gtt_driver haswell_gtt_driver = { |
.gen = 6, |
.setup = i9xx_setup, |
.cleanup = gen6_cleanup, |
.write_entry = haswell_write_entry, |
.dma_mask_size = 40, |
.check_flags = gen6_check_flags, |
.chipset_flush = i9xx_chipset_flush, |
}; |
static const struct intel_gtt_driver valleyview_gtt_driver = { |
.gen = 7, |
.setup = i9xx_setup, |
.cleanup = gen6_cleanup, |
.write_entry = valleyview_write_entry, |
.dma_mask_size = 40, |
.check_flags = gen6_check_flags, |
}; |
/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of |
* driver and gmch_driver must be non-null, and find_gmch will determine |
770,6 → 963,106 |
"HD Graphics", &ironlake_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, |
"HD Graphics", &ironlake_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG, |
"Sandybridge", &sandybridge_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG, |
"Sandybridge", &sandybridge_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG, |
"Sandybridge", &sandybridge_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG, |
"Sandybridge", &sandybridge_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG, |
"Sandybridge", &sandybridge_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG, |
"Sandybridge", &sandybridge_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG, |
"Sandybridge", &sandybridge_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG, |
"Ivybridge", &sandybridge_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG, |
"Ivybridge", &sandybridge_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG, |
"Ivybridge", &sandybridge_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG, |
"Ivybridge", &sandybridge_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG, |
"Ivybridge", &sandybridge_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG, |
"Ivybridge", &sandybridge_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG, |
"ValleyView", &valleyview_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG, |
"Haswell", &haswell_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG, |
"Haswell", &haswell_gtt_driver }, |
{ 0, NULL, NULL } |
}; |
814,7 → 1107,7 |
intel_private.bridge_dev = bridge_pdev; |
dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name); |
dbgprintf("Intel %s Chipset\n", intel_gtt_chipsets[i].name); |
mask = intel_private.driver->dma_mask_size; |
// if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask))) |
834,7 → 1127,7 |
} |
EXPORT_SYMBOL(intel_gmch_probe); |
struct intel_gtt *intel_gtt_get(void) |
const struct intel_gtt *intel_gtt_get(void) |
{ |
return &intel_private.base; |
} |
848,5 → 1141,7 |
EXPORT_SYMBOL(intel_gtt_chipset_flush); |
MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); |
MODULE_LICENSE("GPL and additional rights"); |
//phys_addr_t get_bus_addr(void) |
//{ |
// return intel_private.gma_bus_addr; |
//}; |
/drivers/video/drm/i915/Gtt/intel-agp.h |
---|
62,6 → 62,12 |
#define I810_PTE_LOCAL 0x00000002 |
#define I810_PTE_VALID 0x00000001 |
#define I830_PTE_SYSTEM_CACHED 0x00000006 |
/* GT PTE cache control fields */ |
#define GEN6_PTE_UNCACHED 0x00000002 |
#define HSW_PTE_UNCACHED 0x00000000 |
#define GEN6_PTE_LLC 0x00000004 |
#define GEN6_PTE_LLC_MLC 0x00000006 |
#define GEN6_PTE_GFDT 0x00000008 |
#define I810_SMRAM_MISCC 0x70 |
#define I810_GFX_MEM_WIN_SIZE 0x00010000 |
91,6 → 97,7 |
#define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN) |
#define GFX_FLSH_CNTL 0x2170 /* 915+ */ |
#define GFX_FLSH_CNTL_VLV 0x101008 |
#define I810_DRAM_CTL 0x3000 |
#define I810_DRAM_ROW_0 0x00000001 |
141,6 → 148,29 |
#define INTEL_I7505_AGPCTRL 0x70 |
#define INTEL_I7505_MCHCFG 0x50 |
#define SNB_GMCH_CTRL 0x50 |
#define SNB_GMCH_GMS_STOLEN_MASK 0xF8 |
#define SNB_GMCH_GMS_STOLEN_32M (1 << 3) |
#define SNB_GMCH_GMS_STOLEN_64M (2 << 3) |
#define SNB_GMCH_GMS_STOLEN_96M (3 << 3) |
#define SNB_GMCH_GMS_STOLEN_128M (4 << 3) |
#define SNB_GMCH_GMS_STOLEN_160M (5 << 3) |
#define SNB_GMCH_GMS_STOLEN_192M (6 << 3) |
#define SNB_GMCH_GMS_STOLEN_224M (7 << 3) |
#define SNB_GMCH_GMS_STOLEN_256M (8 << 3) |
#define SNB_GMCH_GMS_STOLEN_288M (9 << 3) |
#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3) |
#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3) |
#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3) |
#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3) |
#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3) |
#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3) |
#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3) |
#define SNB_GTT_SIZE_0M (0 << 8) |
#define SNB_GTT_SIZE_1M (1 << 8) |
#define SNB_GTT_SIZE_2M (2 << 8) |
#define SNB_GTT_SIZE_MASK (3 << 8) |
/* pci devices ids */ |
#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588 |
#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a |
189,5 → 219,66 |
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 |
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a |
#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046 |
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 /* Desktop */ |
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG 0x0102 |
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG 0x0112 |
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG 0x0122 |
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 /* Mobile */ |
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG 0x0106 |
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG 0x0116 |
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG 0x0126 |
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */ |
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A |
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB 0x0150 /* Desktop */ |
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG 0x0152 |
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG 0x0162 |
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB 0x0154 /* Mobile */ |
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG 0x0156 |
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG 0x0166 |
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */ |
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A |
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A |
#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */ |
#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30 |
#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */ |
#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402 |
#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412 |
#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG 0x0422 |
#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */ |
#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406 |
#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416 |
#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG 0x0426 |
#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */ |
#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a |
#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a |
#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG 0x042a |
#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04 |
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG 0x0C02 |
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG 0x0C12 |
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG 0x0C22 |
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG 0x0C06 |
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG 0x0C16 |
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG 0x0C26 |
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG 0x0C0A |
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG 0x0C1A |
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG 0x0C2A |
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG 0x0A02 |
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG 0x0A12 |
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG 0x0A22 |
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG 0x0A06 |
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG 0x0A16 |
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG 0x0A26 |
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG 0x0A0A |
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG 0x0A1A |
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG 0x0A2A |
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG 0x0D12 |
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG 0x0D22 |
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG 0x0D32 |
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG 0x0D16 |
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG 0x0D26 |
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG 0x0D36 |
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG 0x0D1A |
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG 0x0D2A |
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG 0x0D3A |
#endif |
/drivers/video/drm/i915/i915_dma.c |
---|
106,6 → 106,32 |
} |
/** |
* Sets up the hardware status page for devices that need a physical address |
* in the register. |
*/ |
static int i915_init_phys_hws(struct drm_device *dev) |
{ |
drm_i915_private_t *dev_priv = dev->dev_private; |
/* Program Hardware Status Page */ |
dev_priv->status_page_dmah = |
drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); |
if (!dev_priv->status_page_dmah) { |
DRM_ERROR("Can not allocate hardware status page\n"); |
return -ENOMEM; |
} |
memset((void __force __iomem *)dev_priv->status_page_dmah->vaddr, |
0, PAGE_SIZE); |
i915_write_hws_pga(dev); |
DRM_DEBUG_DRIVER("Enabled hardware status page\n"); |
return 0; |
} |
/** |
* Frees the hardware status page, whether it's a physical address or a virtual |
* address set up by the X Server. |
*/ |
145,7 → 171,7 |
ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; |
ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE); |
ring->space = ring->head - (ring->tail + 8); |
if (ring->space < 0) |
ring->space += ring->size; |
429,16 → 455,16 |
drm_i915_private_t *dev_priv = dev->dev_private; |
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
dev_priv->dri1.counter++; |
if (dev_priv->dri1.counter > 0x7FFFFFFFUL) |
dev_priv->dri1.counter = 0; |
dev_priv->counter++; |
if (dev_priv->counter > 0x7FFFFFFFUL) |
dev_priv->counter = 0; |
if (master_priv->sarea_priv) |
master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; |
master_priv->sarea_priv->last_enqueue = dev_priv->counter; |
if (BEGIN_LP_RING(4) == 0) { |
OUT_RING(MI_STORE_DWORD_INDEX); |
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
OUT_RING(dev_priv->dri1.counter); |
OUT_RING(dev_priv->counter); |
OUT_RING(0); |
ADVANCE_LP_RING(); |
} |
580,12 → 606,12 |
ADVANCE_LP_RING(); |
master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++; |
master_priv->sarea_priv->last_enqueue = dev_priv->counter++; |
if (BEGIN_LP_RING(4) == 0) { |
OUT_RING(MI_STORE_DWORD_INDEX); |
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
OUT_RING(dev_priv->dri1.counter); |
OUT_RING(dev_priv->counter); |
OUT_RING(0); |
ADVANCE_LP_RING(); |
} |
596,8 → 622,10 |
static int i915_quiescent(struct drm_device *dev) |
{ |
struct intel_ring_buffer *ring = LP_RING(dev->dev_private); |
i915_kernel_lost_context(dev); |
return intel_ring_idle(LP_RING(dev->dev_private)); |
return intel_wait_ring_idle(ring); |
} |
static int i915_flush_ioctl(struct drm_device *dev, void *data, |
751,21 → 779,21 |
DRM_DEBUG_DRIVER("\n"); |
dev_priv->dri1.counter++; |
if (dev_priv->dri1.counter > 0x7FFFFFFFUL) |
dev_priv->dri1.counter = 1; |
dev_priv->counter++; |
if (dev_priv->counter > 0x7FFFFFFFUL) |
dev_priv->counter = 1; |
if (master_priv->sarea_priv) |
master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; |
master_priv->sarea_priv->last_enqueue = dev_priv->counter; |
if (BEGIN_LP_RING(4) == 0) { |
OUT_RING(MI_STORE_DWORD_INDEX); |
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
OUT_RING(dev_priv->dri1.counter); |
OUT_RING(dev_priv->counter); |
OUT_RING(MI_USER_INTERRUPT); |
ADVANCE_LP_RING(); |
} |
return dev_priv->dri1.counter; |
return dev_priv->counter; |
} |
static int i915_wait_irq(struct drm_device * dev, int irq_nr) |
796,7 → 824,7 |
if (ret == -EBUSY) { |
DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", |
READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter); |
READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); |
} |
return ret; |
990,12 → 1018,6 |
case I915_PARAM_HAS_PRIME_VMAP_FLUSH: |
value = 1; |
break; |
case I915_PARAM_HAS_SECURE_BATCHES: |
value = capable(CAP_SYS_ADMIN); |
break; |
case I915_PARAM_HAS_PINNED_BATCHES: |
value = 1; |
break; |
default: |
DRM_DEBUG_DRIVER("Unknown parameter %d\n", |
param->param); |
1052,7 → 1074,7 |
{ |
drm_i915_private_t *dev_priv = dev->dev_private; |
drm_i915_hws_addr_t *hws = data; |
struct intel_ring_buffer *ring; |
struct intel_ring_buffer *ring = LP_RING(dev_priv); |
if (drm_core_check_feature(dev, DRIVER_MODESET)) |
return -ENODEV; |
1072,7 → 1094,6 |
DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); |
ring = LP_RING(dev_priv); |
ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); |
dev_priv->dri1.gfx_hws_cpu_addr = |
1278,7 → 1299,19 |
info = (struct intel_device_info *) flags; |
#if 0 |
/* Refuse to load on gen6+ without kms enabled. */ |
if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) |
return -ENODEV; |
/* i915 has 4 more counters */ |
dev->counters += 4; |
dev->types[6] = _DRM_STAT_IRQ; |
dev->types[7] = _DRM_STAT_PRIMARY; |
dev->types[8] = _DRM_STAT_SECONDARY; |
dev->types[9] = _DRM_STAT_DMA; |
#endif |
dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL); |
if (dev_priv == NULL) |
return -ENOMEM; |
1294,14 → 1327,26 |
goto free_priv; |
} |
ret = i915_gem_gtt_init(dev); |
if (ret) |
ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL); |
if (!ret) { |
DRM_ERROR("failed to set up gmch\n"); |
ret = -EIO; |
goto put_bridge; |
} |
dev_priv->mm.gtt = intel_gtt_get(); |
if (!dev_priv->mm.gtt) { |
DRM_ERROR("Failed to initialize GTT\n"); |
ret = -ENODEV; |
goto put_gmch; |
} |
pci_set_master(dev->pdev); |
/* overlay on gen2 is broken and can't address above 1G */ |
// if (IS_GEN2(dev)) |
// dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); |
/* 965GM sometimes incorrectly writes to hardware status page (HWS) |
* using 32bit addressing, overwriting memory if HWS is located |
1311,6 → 1356,8 |
* behaviour if any general state is accessed within a page above 4GB, |
* which also needs to be handled carefully. |
*/ |
// if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) |
// dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); |
mmio_bar = IS_GEN2(dev) ? 1 : 0; |
/* Before gen4, the registers and the GTT are behind different BARs. |
1335,7 → 1382,11 |
aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; |
dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr; |
DRM_INFO("gtt_base_addr %x aperture_size %d\n", |
dev_priv->mm.gtt_base_addr, aperture_size ); |
// i915_mtrr_setup(dev_priv, dev_priv->mm.gtt_base_addr, |
// aperture_size); |
/* The i915 workqueue is primarily used for batched retirement of |
* requests (and thus managing bo) once the task has been completed |
1368,10 → 1419,18 |
intel_setup_gmbus(dev); |
intel_opregion_setup(dev); |
/* Make sure the bios did its job and set up vital registers */ |
intel_setup_bios(dev); |
i915_gem_load(dev); |
/* Init HWS */ |
if (!I915_NEED_GFX_HWS(dev)) { |
ret = i915_init_phys_hws(dev); |
if (ret) |
goto out_gem_unload; |
} |
/* On the 945G/GM, the chipset reports the MSI capability on the |
* integrated graphics even though the support isn't actually there |
* according to the published specs. It doesn't appear to function |
1389,8 → 1448,6 |
spin_lock_init(&dev_priv->rps.lock); |
spin_lock_init(&dev_priv->dpio_lock); |
mutex_init(&dev_priv->rps.hw_lock); |
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) |
dev_priv->num_pipe = 3; |
else if (IS_MOBILE(dev) || !IS_GEN2(dev)) |
1412,8 → 1469,13 |
} |
/* Must be done after probing outputs */ |
// intel_opregion_init(dev); |
// acpi_video_register(); |
// setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, |
// (unsigned long) dev); |
if (IS_GEN5(dev)) |
intel_gpu_ips_init(dev_priv); |
1485,7 → 1547,6 |
if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
intel_fbdev_fini(dev); |
intel_modeset_cleanup(dev); |
cancel_work_sync(&dev_priv->console_resume_work); |
/* |
* free the memory space allocated for the child device |
/drivers/video/drm/i915/intel_i2c.c |
---|
1,4 → 1,4 |
/* |
/* |
* Copyright (c) 2006 Dave Airlie <airlied@linux.ie> |
* Copyright © 2006-2008,2010 Intel Corporation |
* Jesse Barnes <jesse.barnes@intel.com> |
432,7 → 432,7 |
I915_WRITE(GMBUS0 + reg_offset, 0); |
/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */ |
bus->force_bit = 1; |
bus->force_bit = true; |
ret = i2c_bit_algo.master_xfer(adapter, msgs, num); |
out: |
491,13 → 491,10 |
/* gmbus seems to be broken on i830 */ |
if (IS_I830(dev)) |
bus->force_bit = 1; |
bus->force_bit = true; |
intel_gpio_setup(bus, port); |
ret = i2c_add_adapter(&bus->adapter); |
if (ret) |
goto err; |
} |
intel_i2c_reset(dev_priv->dev); |
505,10 → 502,10 |
return 0; |
err: |
while (--i) { |
struct intel_gmbus *bus = &dev_priv->gmbus[i]; |
i2c_del_adapter(&bus->adapter); |
} |
// while (--i) { |
// struct intel_gmbus *bus = &dev_priv->gmbus[i]; |
// i2c_del_adapter(&bus->adapter); |
// } |
return ret; |
} |
532,10 → 529,7 |
{ |
struct intel_gmbus *bus = to_intel_gmbus(adapter); |
bus->force_bit += force_bit ? 1 : -1; |
DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n", |
force_bit ? "en" : "dis", adapter->name, |
bus->force_bit); |
bus->force_bit = force_bit; |
} |
void intel_teardown_gmbus(struct drm_device *dev) |
545,6 → 539,6 |
for (i = 0; i < GMBUS_NUM_PORTS; i++) { |
struct intel_gmbus *bus = &dev_priv->gmbus[i]; |
i2c_del_adapter(&bus->adapter); |
// i2c_del_adapter(&bus->adapter); |
} |
} |
/drivers/video/drm/i915/intel_panel.c |
---|
130,9 → 130,8 |
return 0; |
} |
static u32 i915_read_blc_pwm_ctl(struct drm_device *dev) |
static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
u32 val; |
/* Restore the CTL value if it lost, e.g. GPU reset */ |
139,25 → 138,24 |
if (HAS_PCH_SPLIT(dev_priv->dev)) { |
val = I915_READ(BLC_PWM_PCH_CTL2); |
if (dev_priv->regfile.saveBLC_PWM_CTL2 == 0) { |
dev_priv->regfile.saveBLC_PWM_CTL2 = val; |
if (dev_priv->saveBLC_PWM_CTL2 == 0) { |
dev_priv->saveBLC_PWM_CTL2 = val; |
} else if (val == 0) { |
val = dev_priv->regfile.saveBLC_PWM_CTL2; |
I915_WRITE(BLC_PWM_PCH_CTL2, val); |
I915_WRITE(BLC_PWM_PCH_CTL2, |
dev_priv->saveBLC_PWM_CTL2); |
val = dev_priv->saveBLC_PWM_CTL2; |
} |
} else { |
val = I915_READ(BLC_PWM_CTL); |
if (dev_priv->regfile.saveBLC_PWM_CTL == 0) { |
dev_priv->regfile.saveBLC_PWM_CTL = val; |
if (INTEL_INFO(dev)->gen >= 4) |
dev_priv->regfile.saveBLC_PWM_CTL2 = |
I915_READ(BLC_PWM_CTL2); |
if (dev_priv->saveBLC_PWM_CTL == 0) { |
dev_priv->saveBLC_PWM_CTL = val; |
dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); |
} else if (val == 0) { |
val = dev_priv->regfile.saveBLC_PWM_CTL; |
I915_WRITE(BLC_PWM_CTL, val); |
if (INTEL_INFO(dev)->gen >= 4) |
I915_WRITE(BLC_PWM_CTL, |
dev_priv->saveBLC_PWM_CTL); |
I915_WRITE(BLC_PWM_CTL2, |
dev_priv->regfile.saveBLC_PWM_CTL2); |
dev_priv->saveBLC_PWM_CTL2); |
val = dev_priv->saveBLC_PWM_CTL; |
} |
} |
166,9 → 164,10 |
static u32 _intel_panel_get_max_backlight(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
u32 max; |
max = i915_read_blc_pwm_ctl(dev); |
max = i915_read_blc_pwm_ctl(dev_priv); |
if (HAS_PCH_SPLIT(dev)) { |
max >>= 16; |
375,24 → 374,27 |
enum drm_connector_status |
intel_panel_detect(struct drm_device *dev) |
{ |
#if 0 |
struct drm_i915_private *dev_priv = dev->dev_private; |
#endif |
if (i915_panel_ignore_lid) |
return i915_panel_ignore_lid > 0 ? |
connector_status_connected : |
connector_status_disconnected; |
/* opregion lid state on HP 2540p is wrong at boot up, |
* appears to be either the BIOS or Linux ACPI fault */ |
#if 0 |
/* Assume that the BIOS does not lie through the OpRegion... */ |
if (!i915_panel_ignore_lid && dev_priv->opregion.lid_state) { |
if (dev_priv->opregion.lid_state) |
return ioread32(dev_priv->opregion.lid_state) & 0x1 ? |
connector_status_connected : |
connector_status_disconnected; |
} |
#endif |
switch (i915_panel_ignore_lid) { |
case -2: |
return connector_status_connected; |
case -1: |
return connector_status_disconnected; |
default: |
return connector_status_unknown; |
} |
} |
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE |
static int intel_panel_update_status(struct backlight_device *bd) |
414,14 → 416,21 |
.get_brightness = intel_panel_get_brightness, |
}; |
int intel_panel_setup_backlight(struct drm_connector *connector) |
int intel_panel_setup_backlight(struct drm_device *dev) |
{ |
struct drm_device *dev = connector->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct backlight_properties props; |
struct drm_connector *connector; |
intel_panel_init_backlight(dev); |
if (dev_priv->int_lvds_connector) |
connector = dev_priv->int_lvds_connector; |
else if (dev_priv->int_edp_connector) |
connector = dev_priv->int_edp_connector; |
else |
return -ENODEV; |
memset(&props, 0, sizeof(props)); |
props.type = BACKLIGHT_RAW; |
props.max_brightness = _intel_panel_get_max_backlight(dev); |
451,9 → 460,9 |
backlight_device_unregister(dev_priv->backlight); |
} |
#else |
int intel_panel_setup_backlight(struct drm_connector *connector) |
int intel_panel_setup_backlight(struct drm_device *dev) |
{ |
intel_panel_init_backlight(connector->dev); |
intel_panel_init_backlight(dev); |
return 0; |
} |
462,20 → 471,3 |
return; |
} |
#endif |
int intel_panel_init(struct intel_panel *panel, |
struct drm_display_mode *fixed_mode) |
{ |
panel->fixed_mode = fixed_mode; |
return 0; |
} |
void intel_panel_fini(struct intel_panel *panel) |
{ |
struct intel_connector *intel_connector = |
container_of(panel, struct intel_connector, panel); |
if (panel->fixed_mode) |
drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode); |
} |
/drivers/video/drm/i915/intel_ringbuffer.c |
---|
47,7 → 47,7 |
static inline int ring_space(struct intel_ring_buffer *ring) |
{ |
int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE); |
int space = (ring->head & HEAD_ADDR) - (ring->tail + 8); |
if (space < 0) |
space += ring->size; |
return space; |
247,7 → 247,7 |
/* |
* TLB invalidate requires a post-sync write. |
*/ |
flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; |
flags |= PIPE_CONTROL_QW_WRITE; |
} |
ret = intel_ring_begin(ring, 4); |
461,7 → 461,7 |
goto err_unref; |
pc->gtt_offset = obj->gtt_offset; |
pc->cpu_page = (void*)MapIoMem((addr_t)sg_page(obj->pages->sgl),4096, PG_SW); |
pc->cpu_page = (void*)MapIoMem((addr_t)obj->pages.page[0], 4096, PG_SW); |
if (pc->cpu_page == NULL) |
goto err_unpin; |
502,25 → 502,13 |
struct drm_i915_private *dev_priv = dev->dev_private; |
int ret = init_ring_common(ring); |
if (INTEL_INFO(dev)->gen > 3) |
if (INTEL_INFO(dev)->gen > 3) { |
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); |
/* We need to disable the AsyncFlip performance optimisations in order |
* to use MI_WAIT_FOR_EVENT within the CS. It should already be |
* programmed to '1' on all products. |
*/ |
if (INTEL_INFO(dev)->gen >= 6) |
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); |
/* Required for the hardware to program scanline values for waiting */ |
if (INTEL_INFO(dev)->gen == 6) |
I915_WRITE(GFX_MODE, |
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS)); |
if (IS_GEN7(dev)) |
I915_WRITE(GFX_MODE_GEN7, |
_MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | |
_MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); |
} |
if (INTEL_INFO(dev)->gen >= 5) { |
ret = init_pipe_control(ring); |
564,11 → 552,15 |
static void |
update_mboxes(struct intel_ring_buffer *ring, |
u32 seqno, |
u32 mmio_offset) |
{ |
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); |
intel_ring_emit(ring, MI_SEMAPHORE_MBOX | |
MI_SEMAPHORE_GLOBAL_GTT | |
MI_SEMAPHORE_REGISTER | |
MI_SEMAPHORE_UPDATE); |
intel_ring_emit(ring, seqno); |
intel_ring_emit(ring, mmio_offset); |
intel_ring_emit(ring, ring->outstanding_lazy_request); |
} |
/** |
581,7 → 573,8 |
* This acts like a signal in the canonical semaphore. |
*/ |
static int |
gen6_add_request(struct intel_ring_buffer *ring) |
gen6_add_request(struct intel_ring_buffer *ring, |
u32 *seqno) |
{ |
u32 mbox1_reg; |
u32 mbox2_reg; |
594,11 → 587,13 |
mbox1_reg = ring->signal_mbox[0]; |
mbox2_reg = ring->signal_mbox[1]; |
update_mboxes(ring, mbox1_reg); |
update_mboxes(ring, mbox2_reg); |
*seqno = i915_gem_next_request_seqno(ring); |
update_mboxes(ring, *seqno, mbox1_reg); |
update_mboxes(ring, *seqno, mbox2_reg); |
intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
intel_ring_emit(ring, ring->outstanding_lazy_request); |
intel_ring_emit(ring, *seqno); |
intel_ring_emit(ring, MI_USER_INTERRUPT); |
intel_ring_advance(ring); |
655,8 → 650,10 |
} while (0) |
static int |
pc_render_add_request(struct intel_ring_buffer *ring) |
pc_render_add_request(struct intel_ring_buffer *ring, |
u32 *result) |
{ |
u32 seqno = i915_gem_next_request_seqno(ring); |
struct pipe_control *pc = ring->private; |
u32 scratch_addr = pc->gtt_offset + 128; |
int ret; |
677,7 → 674,7 |
PIPE_CONTROL_WRITE_FLUSH | |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); |
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); |
intel_ring_emit(ring, ring->outstanding_lazy_request); |
intel_ring_emit(ring, seqno); |
intel_ring_emit(ring, 0); |
PIPE_CONTROL_FLUSH(ring, scratch_addr); |
scratch_addr += 128; /* write to separate cachelines */ |
696,10 → 693,11 |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | |
PIPE_CONTROL_NOTIFY); |
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); |
intel_ring_emit(ring, ring->outstanding_lazy_request); |
intel_ring_emit(ring, seqno); |
intel_ring_emit(ring, 0); |
intel_ring_advance(ring); |
*result = seqno; |
return 0; |
} |
887,8 → 885,10 |
} |
static int |
i9xx_add_request(struct intel_ring_buffer *ring) |
i9xx_add_request(struct intel_ring_buffer *ring, |
u32 *result) |
{ |
u32 seqno; |
int ret; |
ret = intel_ring_begin(ring, 4); |
895,12 → 895,15 |
if (ret) |
return ret; |
seqno = i915_gem_next_request_seqno(ring); |
intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
intel_ring_emit(ring, ring->outstanding_lazy_request); |
intel_ring_emit(ring, seqno); |
intel_ring_emit(ring, MI_USER_INTERRUPT); |
intel_ring_advance(ring); |
*result = seqno; |
return 0; |
} |
958,9 → 961,7 |
} |
static int |
i965_dispatch_execbuffer(struct intel_ring_buffer *ring, |
u32 offset, u32 length, |
unsigned flags) |
i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) |
{ |
int ret; |
971,7 → 972,7 |
intel_ring_emit(ring, |
MI_BATCH_BUFFER_START | |
MI_BATCH_GTT | |
(flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); |
MI_BATCH_NON_SECURE_I965); |
intel_ring_emit(ring, offset); |
intel_ring_advance(ring); |
978,56 → 979,21 |
return 0; |
} |
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */ |
#define I830_BATCH_LIMIT (256*1024) |
static int |
i830_dispatch_execbuffer(struct intel_ring_buffer *ring, |
u32 offset, u32 len, |
unsigned flags) |
u32 offset, u32 len) |
{ |
int ret; |
if (flags & I915_DISPATCH_PINNED) { |
ret = intel_ring_begin(ring, 4); |
if (ret) |
return ret; |
intel_ring_emit(ring, MI_BATCH_BUFFER); |
intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); |
intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); |
intel_ring_emit(ring, offset + len - 8); |
intel_ring_emit(ring, MI_NOOP); |
intel_ring_advance(ring); |
} else { |
struct drm_i915_gem_object *obj = ring->private; |
u32 cs_offset = obj->gtt_offset; |
if (len > I830_BATCH_LIMIT) |
return -ENOSPC; |
ret = intel_ring_begin(ring, 9+3); |
if (ret) |
return ret; |
/* Blit the batch (which has now all relocs applied) to the stable batch |
* scratch bo area (so that the CS never stumbles over its tlb |
* invalidation bug) ... */ |
intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD | |
XY_SRC_COPY_BLT_WRITE_ALPHA | |
XY_SRC_COPY_BLT_WRITE_RGB); |
intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096); |
intel_ring_emit(ring, 0); |
intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024); |
intel_ring_emit(ring, cs_offset); |
intel_ring_emit(ring, 0); |
intel_ring_emit(ring, 4096); |
intel_ring_emit(ring, offset); |
intel_ring_emit(ring, MI_FLUSH); |
/* ... and execute it. */ |
intel_ring_emit(ring, MI_BATCH_BUFFER); |
intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); |
intel_ring_emit(ring, cs_offset + len - 8); |
intel_ring_advance(ring); |
} |
return 0; |
} |
1034,8 → 1000,7 |
static int |
i915_dispatch_execbuffer(struct intel_ring_buffer *ring, |
u32 offset, u32 len, |
unsigned flags) |
u32 offset, u32 len) |
{ |
int ret; |
1044,7 → 1009,7 |
return ret; |
intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); |
intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); |
intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); |
intel_ring_advance(ring); |
return 0; |
1085,7 → 1050,7 |
} |
ring->status_page.gfx_addr = obj->gtt_offset; |
ring->status_page.page_addr = (void*)MapIoMem((addr_t)sg_page(obj->pages->sgl),4096,PG_SW); |
ring->status_page.page_addr = (void*)MapIoMem(obj->pages.page[0],4096,PG_SW); |
if (ring->status_page.page_addr == NULL) { |
ret = -ENOMEM; |
goto err_unpin; |
1107,29 → 1072,6 |
return ret; |
} |
static int init_phys_hws_pga(struct intel_ring_buffer *ring) |
{ |
struct drm_i915_private *dev_priv = ring->dev->dev_private; |
u32 addr; |
if (!dev_priv->status_page_dmah) { |
dev_priv->status_page_dmah = |
drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE); |
if (!dev_priv->status_page_dmah) |
return -ENOMEM; |
} |
addr = dev_priv->status_page_dmah->busaddr; |
if (INTEL_INFO(ring->dev)->gen >= 4) |
addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; |
I915_WRITE(HWS_PGA, addr); |
ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; |
memset(ring->status_page.page_addr, 0, PAGE_SIZE); |
return 0; |
} |
static int intel_init_ring_buffer(struct drm_device *dev, |
struct intel_ring_buffer *ring) |
{ |
1141,7 → 1083,6 |
INIT_LIST_HEAD(&ring->active_list); |
INIT_LIST_HEAD(&ring->request_list); |
ring->size = 32 * PAGE_SIZE; |
memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno)); |
init_waitqueue_head(&ring->irq_queue); |
1149,11 → 1090,6 |
ret = init_status_page(ring); |
if (ret) |
return ret; |
} else { |
BUG_ON(ring->id != RCS); |
ret = init_phys_hws_pga(ring); |
if (ret) |
return ret; |
} |
obj = i915_gem_alloc_object(dev, ring->size); |
1218,7 → 1154,7 |
/* Disable the ring buffer. The ring must be idle at this point */ |
dev_priv = ring->dev->dev_private; |
ret = intel_ring_idle(ring); |
ret = intel_wait_ring_idle(ring); |
if (ret) |
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", |
ring->name, ret); |
1237,6 → 1173,28 |
// cleanup_status_page(ring); |
} |
static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) |
{ |
uint32_t __iomem *virt; |
int rem = ring->size - ring->tail; |
if (ring->space < rem) { |
int ret = intel_wait_ring_buffer(ring, rem); |
if (ret) |
return ret; |
} |
virt = ring->virtual_start + ring->tail; |
rem /= 4; |
while (rem--) |
iowrite32(MI_NOOP, virt++); |
ring->tail = 0; |
ring->space = ring_space(ring); |
return 0; |
} |
static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno) |
{ |
int ret; |
1270,7 → 1228,7 |
if (request->tail == -1) |
continue; |
space = request->tail - (ring->tail + I915_RING_FREE_SPACE); |
space = request->tail - (ring->tail + 8); |
if (space < 0) |
space += ring->size; |
if (space >= n) { |
1305,7 → 1263,7 |
return 0; |
} |
static int ring_wait_for_space(struct intel_ring_buffer *ring, int n) |
int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) |
{ |
struct drm_device *dev = ring->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
1316,7 → 1274,7 |
if (ret != -ENOSPC) |
return ret; |
trace_i915_ring_wait_begin(ring); |
/* With GEM the hangcheck timer should kick us out of the loop, |
* leaving it early runs the risk of corrupting GEM state (due |
* to running on almost untested codepaths). But on resume |
1342,60 → 1300,6 |
return -EBUSY; |
} |
static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) |
{ |
uint32_t __iomem *virt; |
int rem = ring->size - ring->tail; |
if (ring->space < rem) { |
int ret = ring_wait_for_space(ring, rem); |
if (ret) |
return ret; |
} |
virt = ring->virtual_start + ring->tail; |
rem /= 4; |
while (rem--) |
iowrite32(MI_NOOP, virt++); |
ring->tail = 0; |
ring->space = ring_space(ring); |
return 0; |
} |
int intel_ring_idle(struct intel_ring_buffer *ring) |
{ |
u32 seqno; |
int ret; |
/* We need to add any requests required to flush the objects and ring */ |
if (ring->outstanding_lazy_request) { |
ret = i915_add_request(ring, NULL, NULL); |
if (ret) |
return ret; |
} |
/* Wait upon the last request to be completed */ |
if (list_empty(&ring->request_list)) |
return 0; |
seqno = list_entry(ring->request_list.prev, |
struct drm_i915_gem_request, |
list)->seqno; |
return i915_wait_seqno(ring, seqno); |
} |
static int |
intel_ring_alloc_seqno(struct intel_ring_buffer *ring) |
{ |
if (ring->outstanding_lazy_request) |
return 0; |
return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request); |
} |
int intel_ring_begin(struct intel_ring_buffer *ring, |
int num_dwords) |
{ |
1407,11 → 1311,6 |
if (ret) |
return ret; |
/* Preallocate the olr before touching the ring */ |
ret = intel_ring_alloc_seqno(ring); |
if (ret) |
return ret; |
if (unlikely(ring->tail + n > ring->effective_size)) { |
ret = intel_wrap_ring_buffer(ring); |
if (unlikely(ret)) |
1419,7 → 1318,7 |
} |
if (unlikely(ring->space < n)) { |
ret = ring_wait_for_space(ring, n); |
ret = intel_wait_ring_buffer(ring, n); |
if (unlikely(ret)) |
return ret; |
} |
1483,18 → 1382,11 |
return ret; |
cmd = MI_FLUSH_DW; |
/* |
* Bspec vol 1c.5 - video engine command streamer: |
* "If ENABLED, all TLBs will be invalidated once the flush |
* operation is complete. This bit is only valid when the |
* Post-Sync Operation field is a value of 1h or 3h." |
*/ |
if (invalidate & I915_GEM_GPU_DOMAINS) |
cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD | |
MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; |
cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; |
intel_ring_emit(ring, cmd); |
intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); |
intel_ring_emit(ring, 0); |
intel_ring_emit(ring, 0); |
intel_ring_emit(ring, MI_NOOP); |
intel_ring_advance(ring); |
return 0; |
1501,30 → 1393,8 |
} |
static int |
hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
u32 offset, u32 len, |
unsigned flags) |
{ |
int ret; |
ret = intel_ring_begin(ring, 2); |
if (ret) |
return ret; |
intel_ring_emit(ring, |
MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW | |
(flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW)); |
/* bit0-7 is the length on GEN6+ */ |
intel_ring_emit(ring, offset); |
intel_ring_advance(ring); |
return 0; |
} |
static int |
gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
u32 offset, u32 len, |
unsigned flags) |
u32 offset, u32 len) |
{ |
int ret; |
1532,9 → 1402,7 |
if (ret) |
return ret; |
intel_ring_emit(ring, |
MI_BATCH_BUFFER_START | |
(flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); |
intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); |
/* bit0-7 is the length on GEN6+ */ |
intel_ring_emit(ring, offset); |
intel_ring_advance(ring); |
1555,18 → 1423,11 |
return ret; |
cmd = MI_FLUSH_DW; |
/* |
* Bspec vol 1c.3 - blitter engine command streamer: |
* "If ENABLED, all TLBs will be invalidated once the flush |
* operation is complete. This bit is only valid when the |
* Post-Sync Operation field is a value of 1h or 3h." |
*/ |
if (invalidate & I915_GEM_DOMAIN_RENDER) |
cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX | |
MI_FLUSH_DW_OP_STOREDW; |
cmd |= MI_INVALIDATE_TLB; |
intel_ring_emit(ring, cmd); |
intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); |
intel_ring_emit(ring, 0); |
intel_ring_emit(ring, 0); |
intel_ring_emit(ring, MI_NOOP); |
intel_ring_advance(ring); |
return 0; |
1620,9 → 1481,7 |
ring->irq_enable_mask = I915_USER_INTERRUPT; |
} |
ring->write_tail = ring_write_tail; |
if (IS_HASWELL(dev)) |
ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; |
else if (INTEL_INFO(dev)->gen >= 6) |
if (INTEL_INFO(dev)->gen >= 6) |
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; |
else if (INTEL_INFO(dev)->gen >= 4) |
ring->dispatch_execbuffer = i965_dispatch_execbuffer; |
1633,99 → 1492,16 |
ring->init = init_render_ring; |
ring->cleanup = render_ring_cleanup; |
/* Workaround batchbuffer to combat CS tlb bug. */ |
if (HAS_BROKEN_CS_TLB(dev)) { |
struct drm_i915_gem_object *obj; |
int ret; |
obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT); |
if (obj == NULL) { |
DRM_ERROR("Failed to allocate batch bo\n"); |
return -ENOMEM; |
if (!I915_NEED_GFX_HWS(dev)) { |
ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; |
memset(ring->status_page.page_addr, 0, PAGE_SIZE); |
} |
ret = i915_gem_object_pin(obj, 0, true, false); |
if (ret != 0) { |
drm_gem_object_unreference(&obj->base); |
DRM_ERROR("Failed to ping batch bo\n"); |
return ret; |
} |
ring->private = obj; |
} |
return intel_init_ring_buffer(dev, ring); |
} |
#if 0 |
int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) |
{ |
drm_i915_private_t *dev_priv = dev->dev_private; |
struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; |
int ret; |
ring->name = "render ring"; |
ring->id = RCS; |
ring->mmio_base = RENDER_RING_BASE; |
if (INTEL_INFO(dev)->gen >= 6) { |
/* non-kms not supported on gen6+ */ |
return -ENODEV; |
} |
/* Note: gem is not supported on gen5/ilk without kms (the corresponding |
* gem_init ioctl returns with -ENODEV). Hence we do not need to set up |
* the special gen5 functions. */ |
ring->add_request = i9xx_add_request; |
if (INTEL_INFO(dev)->gen < 4) |
ring->flush = gen2_render_ring_flush; |
else |
ring->flush = gen4_render_ring_flush; |
ring->get_seqno = ring_get_seqno; |
if (IS_GEN2(dev)) { |
ring->irq_get = i8xx_ring_get_irq; |
ring->irq_put = i8xx_ring_put_irq; |
} else { |
ring->irq_get = i9xx_ring_get_irq; |
ring->irq_put = i9xx_ring_put_irq; |
} |
ring->irq_enable_mask = I915_USER_INTERRUPT; |
ring->write_tail = ring_write_tail; |
if (INTEL_INFO(dev)->gen >= 4) |
ring->dispatch_execbuffer = i965_dispatch_execbuffer; |
else if (IS_I830(dev) || IS_845G(dev)) |
ring->dispatch_execbuffer = i830_dispatch_execbuffer; |
else |
ring->dispatch_execbuffer = i915_dispatch_execbuffer; |
ring->init = init_render_ring; |
ring->cleanup = render_ring_cleanup; |
ring->dev = dev; |
INIT_LIST_HEAD(&ring->active_list); |
INIT_LIST_HEAD(&ring->request_list); |
ring->size = size; |
ring->effective_size = ring->size; |
if (IS_I830(ring->dev) || IS_845G(ring->dev)) |
ring->effective_size -= 128; |
ring->virtual_start = ioremap_wc(start, size); |
if (ring->virtual_start == NULL) { |
DRM_ERROR("can not ioremap virtual address for" |
" ring buffer\n"); |
return -ENOMEM; |
} |
if (!I915_NEED_GFX_HWS(dev)) { |
ret = init_phys_hws_pga(ring); |
if (ret) |
return ret; |
} |
return 0; |
} |
#endif |
int intel_init_bsd_ring_buffer(struct drm_device *dev) |
{ |
drm_i915_private_t *dev_priv = dev->dev_private; |
1771,6 → 1547,7 |
} |
ring->init = init_ring_common; |
return intel_init_ring_buffer(dev, ring); |
} |
/drivers/video/drm/i915/i915_drv.h |
---|
33,7 → 33,6 |
#include "i915_reg.h" |
#include "intel_bios.h" |
#include "intel_ringbuffer.h" |
#include <linux/scatterlist.h> |
//#include <linux/io-mapping.h> |
#include <linux/i2c.h> |
#include <linux/i2c-algo-bit.h> |
41,7 → 40,6 |
//#include <linux/backlight.h> |
#include <linux/spinlock.h> |
#include <linux/err.h> |
/* General customization: |
71,14 → 69,6 |
}; |
#define pipe_name(p) ((p) + 'A') |
enum transcoder { |
TRANSCODER_A = 0, |
TRANSCODER_B, |
TRANSCODER_C, |
TRANSCODER_EDP = 0xF, |
}; |
#define transcoder_name(t) ((t) + 'A') |
enum plane { |
PLANE_A = 0, |
PLANE_B, |
114,12 → 104,6 |
}; |
#define I915_NUM_PLLS 2 |
struct intel_ddi_plls { |
int spll_refcount; |
int wrpll1_refcount; |
int wrpll2_refcount; |
}; |
/* Interface history: |
* |
* 1.1: Original. |
143,8 → 127,14 |
#define I915_GEM_PHYS_OVERLAY_REGS 3 |
#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS) |
struct mem_block { |
struct mem_block *next; |
struct mem_block *prev; |
int start; |
int size; |
struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ |
}; |
struct opregion_header; |
struct opregion_acpi; |
struct opregion_swsci; |
191,24 → 181,19 |
struct intel_display_error_state; |
struct drm_i915_error_state { |
struct kref ref; |
u32 eir; |
u32 pgtbl_er; |
u32 ier; |
u32 ccid; |
u32 derrmr; |
u32 forcewake; |
bool waiting[I915_NUM_RINGS]; |
u32 pipestat[I915_MAX_PIPES]; |
u32 tail[I915_NUM_RINGS]; |
u32 head[I915_NUM_RINGS]; |
u32 ctl[I915_NUM_RINGS]; |
u32 ipeir[I915_NUM_RINGS]; |
u32 ipehr[I915_NUM_RINGS]; |
u32 instdone[I915_NUM_RINGS]; |
u32 acthd[I915_NUM_RINGS]; |
u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1]; |
u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1]; |
u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */ |
/* our own tracking of ring head and tail */ |
u32 cpu_ring_head[I915_NUM_RINGS]; |
269,7 → 254,6 |
uint32_t sprite_width, int pixel_size); |
void (*update_linetime_wm)(struct drm_device *dev, int pipe, |
struct drm_display_mode *mode); |
void (*modeset_global_resources)(struct drm_device *dev); |
int (*crtc_mode_set)(struct drm_crtc *crtc, |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode, |
282,6 → 266,7 |
struct drm_crtc *crtc); |
void (*fdi_link_train)(struct drm_crtc *crtc); |
void (*init_clock_gating)(struct drm_device *dev); |
void (*init_pch_clock_gating)(struct drm_device *dev); |
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, |
struct drm_framebuffer *fb, |
struct drm_i915_gem_object *obj); |
356,9 → 341,8 |
#define I915_PPGTT_PD_ENTRIES 512 |
#define I915_PPGTT_PT_ENTRIES 1024 |
struct i915_hw_ppgtt { |
struct drm_device *dev; |
unsigned num_pd_entries; |
struct page **pt_pages; |
dma_addr_t *pt_pages; |
uint32_t pd_offset; |
dma_addr_t *pt_dma_addr; |
dma_addr_t scratch_page_dma_addr; |
393,11 → 377,6 |
PCH_LPT, /* Lynxpoint PCH */ |
}; |
enum intel_sbi_destination { |
SBI_ICLK, |
SBI_MPHY, |
}; |
#define QUIRK_PIPEA_FORCE (1<<0) |
#define QUIRK_LVDS_SSC_DISABLE (1<<1) |
#define QUIRK_INVERT_BRIGHTNESS (1<<2) |
407,7 → 386,7 |
struct intel_gmbus { |
struct i2c_adapter adapter; |
u32 force_bit; |
bool force_bit; |
u32 reg0; |
u32 gpio_reg; |
struct i2c_algo_bit_data bit_algo; |
414,11 → 393,147 |
struct drm_i915_private *dev_priv; |
}; |
struct i915_suspend_saved_registers { |
typedef struct drm_i915_private { |
struct drm_device *dev; |
const struct intel_device_info *info; |
int relative_constants_mode; |
void __iomem *regs; |
struct drm_i915_gt_funcs gt; |
/** gt_fifo_count and the subsequent register write are synchronized |
* with dev->struct_mutex. */ |
unsigned gt_fifo_count; |
/** forcewake_count is protected by gt_lock */ |
unsigned forcewake_count; |
/** gt_lock is also taken in irq contexts. */ |
spinlock_t gt_lock; |
struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; |
/** gmbus_mutex protects against concurrent usage of the single hw gmbus |
* controller on different i2c buses. */ |
struct mutex gmbus_mutex; |
/** |
* Base address of the gmbus and gpio block. |
*/ |
uint32_t gpio_mmio_base; |
struct pci_dev *bridge_dev; |
struct intel_ring_buffer ring[I915_NUM_RINGS]; |
uint32_t next_seqno; |
drm_dma_handle_t *status_page_dmah; |
uint32_t counter; |
struct drm_i915_gem_object *pwrctx; |
struct drm_i915_gem_object *renderctx; |
// struct resource mch_res; |
atomic_t irq_received; |
/* protects the irq masks */ |
spinlock_t irq_lock; |
/* DPIO indirect register protection */ |
spinlock_t dpio_lock; |
/** Cached value of IMR to avoid reads in updating the bitfield */ |
u32 pipestat[2]; |
u32 irq_mask; |
u32 gt_irq_mask; |
u32 pch_irq_mask; |
u32 hotplug_supported_mask; |
struct work_struct hotplug_work; |
int num_pipe; |
int num_pch_pll; |
/* For hangcheck timer */ |
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ |
struct timer_list hangcheck_timer; |
int hangcheck_count; |
uint32_t last_acthd[I915_NUM_RINGS]; |
uint32_t prev_instdone[I915_NUM_INSTDONE_REG]; |
unsigned int stop_rings; |
unsigned long cfb_size; |
unsigned int cfb_fb; |
enum plane cfb_plane; |
int cfb_y; |
// struct intel_fbc_work *fbc_work; |
struct intel_opregion opregion; |
/* overlay */ |
// struct intel_overlay *overlay; |
bool sprite_scaling_enabled; |
/* LVDS info */ |
int backlight_level; /* restore backlight to this value */ |
bool backlight_enabled; |
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ |
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ |
/* Feature bits from the VBIOS */ |
unsigned int int_tv_support:1; |
unsigned int lvds_dither:1; |
unsigned int lvds_vbt:1; |
unsigned int int_crt_support:1; |
unsigned int lvds_use_ssc:1; |
unsigned int display_clock_mode:1; |
int lvds_ssc_freq; |
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ |
unsigned int lvds_val; /* used for checking LVDS channel mode */ |
struct { |
int rate; |
int lanes; |
int preemphasis; |
int vswing; |
bool initialized; |
bool support; |
int bpp; |
struct edp_power_seq pps; |
} edp; |
bool no_aux_handshake; |
// struct notifier_block lid_notifier; |
int crt_ddc_pin; |
struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ |
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ |
int num_fence_regs; /* 8 on pre-965, 16 otherwise */ |
unsigned int fsb_freq, mem_freq, is_ddr3; |
spinlock_t error_lock; |
/* Protected by dev->error_lock. */ |
struct drm_i915_error_state *first_error; |
struct work_struct error_work; |
struct completion error_completion; |
struct workqueue_struct *wq; |
/* Display functions */ |
struct drm_i915_display_funcs display; |
/* PCH chipset type */ |
enum intel_pch pch_type; |
unsigned long quirks; |
/* Register state */ |
bool modeset_on_lid; |
u8 saveLBB; |
u32 saveDSPACNTR; |
u32 saveDSPBCNTR; |
u32 saveDSPARB; |
u32 saveHWS; |
u32 savePIPEACONF; |
u32 savePIPEBCONF; |
u32 savePIPEASRC; |
564,206 → 679,10 |
u32 savePIPEB_LINK_N1; |
u32 saveMCHBAR_RENDER_STANDBY; |
u32 savePCH_PORT_HOTPLUG; |
}; |
struct intel_gen6_power_mgmt { |
struct work_struct work; |
u32 pm_iir; |
/* lock - irqsave spinlock that protectects the work_struct and |
* pm_iir. */ |
spinlock_t lock; |
/* The below variables an all the rps hw state are protected by |
* dev->struct mutext. */ |
u8 cur_delay; |
u8 min_delay; |
u8 max_delay; |
struct delayed_work delayed_resume_work; |
/* |
* Protects RPS/RC6 register access and PCU communication. |
* Must be taken after struct_mutex if nested. |
*/ |
struct mutex hw_lock; |
}; |
struct intel_ilk_power_mgmt { |
u8 cur_delay; |
u8 min_delay; |
u8 max_delay; |
u8 fmax; |
u8 fstart; |
u64 last_count1; |
unsigned long last_time1; |
unsigned long chipset_power; |
u64 last_count2; |
struct timespec last_time2; |
unsigned long gfx_power; |
u8 corr; |
int c_m; |
int r_t; |
struct drm_i915_gem_object *pwrctx; |
struct drm_i915_gem_object *renderctx; |
}; |
struct i915_dri1_state { |
unsigned allow_batchbuffer : 1; |
u32 __iomem *gfx_hws_cpu_addr; |
unsigned int cpp; |
int back_offset; |
int front_offset; |
int current_page; |
int page_flipping; |
uint32_t counter; |
}; |
struct intel_l3_parity { |
u32 *remap_info; |
struct work_struct error_work; |
}; |
typedef struct drm_i915_private { |
struct drm_device *dev; |
const struct intel_device_info *info; |
int relative_constants_mode; |
void __iomem *regs; |
struct drm_i915_gt_funcs gt; |
/** gt_fifo_count and the subsequent register write are synchronized |
* with dev->struct_mutex. */ |
unsigned gt_fifo_count; |
/** forcewake_count is protected by gt_lock */ |
unsigned forcewake_count; |
/** gt_lock is also taken in irq contexts. */ |
struct spinlock gt_lock; |
struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; |
/** gmbus_mutex protects against concurrent usage of the single hw gmbus |
* controller on different i2c buses. */ |
struct mutex gmbus_mutex; |
/** |
* Base address of the gmbus and gpio block. |
*/ |
uint32_t gpio_mmio_base; |
struct pci_dev *bridge_dev; |
struct intel_ring_buffer ring[I915_NUM_RINGS]; |
uint32_t next_seqno; |
drm_dma_handle_t *status_page_dmah; |
struct resource mch_res; |
atomic_t irq_received; |
/* protects the irq masks */ |
spinlock_t irq_lock; |
/* DPIO indirect register protection */ |
spinlock_t dpio_lock; |
/** Cached value of IMR to avoid reads in updating the bitfield */ |
u32 pipestat[2]; |
u32 irq_mask; |
u32 gt_irq_mask; |
u32 pch_irq_mask; |
u32 hotplug_supported_mask; |
struct work_struct hotplug_work; |
int num_pipe; |
int num_pch_pll; |
/* For hangcheck timer */ |
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ |
#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) |
struct timer_list hangcheck_timer; |
int hangcheck_count; |
uint32_t last_acthd[I915_NUM_RINGS]; |
uint32_t prev_instdone[I915_NUM_INSTDONE_REG]; |
unsigned int stop_rings; |
unsigned long cfb_size; |
unsigned int cfb_fb; |
enum plane cfb_plane; |
int cfb_y; |
struct intel_fbc_work *fbc_work; |
struct intel_opregion opregion; |
/* overlay */ |
struct intel_overlay *overlay; |
bool sprite_scaling_enabled; |
/* LVDS info */ |
int backlight_level; /* restore backlight to this value */ |
bool backlight_enabled; |
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ |
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ |
/* Feature bits from the VBIOS */ |
unsigned int int_tv_support:1; |
unsigned int lvds_dither:1; |
unsigned int lvds_vbt:1; |
unsigned int int_crt_support:1; |
unsigned int lvds_use_ssc:1; |
unsigned int display_clock_mode:1; |
int lvds_ssc_freq; |
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ |
unsigned int lvds_val; /* used for checking LVDS channel mode */ |
struct { |
int rate; |
int lanes; |
int preemphasis; |
int vswing; |
bool initialized; |
bool support; |
int bpp; |
struct edp_power_seq pps; |
} edp; |
bool no_aux_handshake; |
int crt_ddc_pin; |
struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ |
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ |
int num_fence_regs; /* 8 on pre-965, 16 otherwise */ |
unsigned int fsb_freq, mem_freq, is_ddr3; |
spinlock_t error_lock; |
/* Protected by dev->error_lock. */ |
struct drm_i915_error_state *first_error; |
struct work_struct error_work; |
struct completion error_completion; |
struct workqueue_struct *wq; |
/* Display functions */ |
struct drm_i915_display_funcs display; |
/* PCH chipset type */ |
enum intel_pch pch_type; |
unsigned short pch_id; |
unsigned long quirks; |
/* Register state */ |
bool modeset_on_lid; |
struct { |
/** Bridge to intel-gtt-ko */ |
struct intel_gtt *gtt; |
const struct intel_gtt *gtt; |
/** Memory allocator for GTT stolen memory */ |
struct drm_mm stolen; |
/** Memory allocator for GTT */ |
790,8 → 709,9 |
/** PPGTT used for aliasing the PPGTT with the GTT */ |
struct i915_hw_ppgtt *aliasing_ppgtt; |
u32 *l3_remap_info; |
// struct shrinker inactive_shrinker; |
bool shrinker_no_lock_stealing; |
/** |
* List of objects currently involved in rendering. |
868,6 → 788,19 |
u32 object_count; |
} mm; |
/* Old dri1 support infrastructure, beware the dragons ya fools entering |
* here! */ |
struct { |
unsigned allow_batchbuffer : 1; |
u32 __iomem *gfx_hws_cpu_addr; |
unsigned int cpp; |
int back_offset; |
int front_offset; |
int current_page; |
int page_flipping; |
} dri1; |
/* Kernel Modesetting */ |
struct sdvo_device_mapping sdvo_mappings[2]; |
881,7 → 814,6 |
wait_queue_head_t pending_flip_queue; |
struct intel_pch_pll pch_plls[I915_NUM_PLLS]; |
struct intel_ddi_plls ddi_plls; |
/* Reclocking support */ |
bool render_reclock_avail; |
891,18 → 823,47 |
u16 orig_clock; |
int child_dev_num; |
struct child_device_config *child_dev; |
struct drm_connector *int_lvds_connector; |
struct drm_connector *int_edp_connector; |
bool mchbar_need_disable; |
struct intel_l3_parity l3_parity; |
/* gen6+ rps state */ |
struct intel_gen6_power_mgmt rps; |
struct { |
struct work_struct work; |
u32 pm_iir; |
/* lock - irqsave spinlock that protectects the work_struct and |
* pm_iir. */ |
spinlock_t lock; |
/* The below variables an all the rps hw state are protected by |
* dev->struct mutext. */ |
u8 cur_delay; |
u8 min_delay; |
u8 max_delay; |
} rps; |
/* ilk-only ips/rps state. Everything in here is protected by the global |
* mchdev_lock in intel_pm.c */ |
struct intel_ilk_power_mgmt ips; |
struct { |
u8 cur_delay; |
u8 min_delay; |
u8 max_delay; |
u8 fmax; |
u8 fstart; |
u64 last_count1; |
unsigned long last_time1; |
unsigned long chipset_power; |
u64 last_count2; |
struct timespec last_time2; |
unsigned long gfx_power; |
u8 corr; |
int c_m; |
int r_t; |
} ips; |
enum no_fbc_reason no_fbc_reason; |
struct drm_mm_node *compressed_fb; |
913,12 → 874,6 |
/* list of fbdev register on this device */ |
struct intel_fbdev *fbdev; |
/* |
* The console may be contended at resume, but we don't |
* want it to block on it. |
*/ |
struct work_struct console_resume_work; |
// struct backlight_device *backlight; |
struct drm_property *broadcast_rgb_property; |
926,14 → 881,6 |
bool hw_contexts_disabled; |
uint32_t hw_context_size; |
bool fdi_rx_polarity_reversed; |
struct i915_suspend_saved_registers regfile; |
/* Old dri1 support infrastructure, beware the dragons ya fools entering |
* here! */ |
struct i915_dri1_state dri1; |
} drm_i915_private_t; |
/* Iterate over initialised rings */ |
977,7 → 924,7 |
const struct drm_i915_gem_object_ops *ops; |
// void *mapped; |
void *mapped; |
/** Current space allocated to this object in the GTT, if any. */ |
struct drm_mm_node *gtt_space; |
1065,8 → 1012,8 |
unsigned int has_global_gtt_mapping:1; |
unsigned int has_dma_mapping:1; |
// dma_addr_t *allocated_pages; |
struct sg_table *pages; |
dma_addr_t *allocated_pages; |
struct pagelist pages; |
int pages_pin_count; |
/* prime dma-buf support */ |
1115,7 → 1062,6 |
*/ |
atomic_t pending_flip; |
}; |
#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base) |
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) |
1152,7 → 1098,7 |
struct drm_i915_file_private { |
struct { |
struct spinlock lock; |
spinlock_t lock; |
struct list_head request_list; |
} mm; |
struct idr context_idr; |
1179,17 → 1125,9 |
#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) |
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) |
#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) |
#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \ |
(dev)->pci_device == 0x0152 || \ |
(dev)->pci_device == 0x015a) |
#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \ |
(dev)->pci_device == 0x0106 || \ |
(dev)->pci_device == 0x010A) |
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) |
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) |
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) |
#define IS_ULT(dev) (IS_HASWELL(dev) && \ |
((dev)->pci_device & 0xFF00) == 0x0A00) |
/* |
* The genX designation typically refers to the render engine, so render |
1215,9 → 1153,6 |
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) |
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) |
/* Early gen2 have a totally busted CS tlb and require pinned batches. */ |
#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) |
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte |
* rows, which changed the alignment requirements and fence programming. |
*/ |
1238,13 → 1173,6 |
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) |
#define INTEL_PCH_DEVICE_ID_MASK 0xff00 |
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 |
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 |
#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 |
#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 |
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 |
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) |
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) |
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) |
1330,7 → 1258,6 |
extern void intel_irq_init(struct drm_device *dev); |
extern void intel_gt_init(struct drm_device *dev); |
extern void intel_gt_reset(struct drm_device *dev); |
void i915_error_state_free(struct kref *error_ref); |
1413,23 → 1340,15 |
void i915_gem_lastclose(struct drm_device *dev); |
int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); |
static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) |
static inline dma_addr_t i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) |
{ |
struct scatterlist *sg = obj->pages->sgl; |
int nents = obj->pages->nents; |
while (nents > SG_MAX_SINGLE_ALLOC) { |
if (n < SG_MAX_SINGLE_ALLOC - 1) |
break; |
return obj->pages.page[n]; |
}; |
sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1); |
n -= SG_MAX_SINGLE_ALLOC - 1; |
nents -= SG_MAX_SINGLE_ALLOC - 1; |
} |
return sg_page(sg+n); |
} |
static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) |
{ |
BUG_ON(obj->pages == NULL); |
BUG_ON(obj->pages.page == NULL); |
obj->pages_pin_count++; |
} |
static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) |
1442,7 → 1361,8 |
int i915_gem_object_sync(struct drm_i915_gem_object *obj, |
struct intel_ring_buffer *to); |
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, |
struct intel_ring_buffer *ring); |
struct intel_ring_buffer *ring, |
u32 seqno); |
int i915_gem_dumb_create(struct drm_file *file_priv, |
struct drm_device *dev, |
1460,7 → 1380,7 |
return (int32_t)(seq1 - seq2) >= 0; |
} |
extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); |
u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring); |
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); |
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); |
1570,15 → 1490,7 |
unsigned long start, |
unsigned long mappable_end, |
unsigned long end); |
int i915_gem_gtt_init(struct drm_device *dev); |
void i915_gem_gtt_fini(struct drm_device *dev); |
static inline void i915_gem_chipset_flush(struct drm_device *dev) |
{ |
if (INTEL_INFO(dev)->gen < 6) |
intel_gtt_chipset_flush(); |
} |
/* i915_gem_evict.c */ |
int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, |
unsigned alignment, |
1674,12 → 1586,11 |
extern void intel_modeset_gem_init(struct drm_device *dev); |
extern void intel_modeset_cleanup(struct drm_device *dev); |
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); |
extern void intel_modeset_setup_hw_state(struct drm_device *dev, |
bool force_restore); |
extern void intel_modeset_setup_hw_state(struct drm_device *dev); |
extern bool intel_fbc_enabled(struct drm_device *dev); |
extern void intel_disable_fbc(struct drm_device *dev); |
extern bool ironlake_set_drps(struct drm_device *dev, u8 val); |
extern void intel_init_pch_refclk(struct drm_device *dev); |
extern void ironlake_init_pch_refclk(struct drm_device *dev); |
extern void gen6_set_rps(struct drm_device *dev, u8 val); |
extern void intel_detect_pch(struct drm_device *dev); |
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); |
1708,9 → 1619,6 |
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); |
int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); |
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); |
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); |
#define __i915_read(x, y) \ |
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); |
/drivers/video/drm/i915/i915_gem_gtt.c |
---|
22,8 → 22,6 |
* |
*/ |
#define iowrite32(v, addr) writel((v), (addr)) |
#include <drm/drmP.h> |
#include <drm/i915_drm.h> |
#include "i915_drv.h" |
34,67 → 32,19 |
#define AGP_USER_MEMORY (AGP_USER_TYPES) |
#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1) |
typedef uint32_t gtt_pte_t; |
/* PPGTT stuff */ |
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) |
#define GEN6_PDE_VALID (1 << 0) |
/* gen6+ has bit 11-4 for physical addr bit 39-32 */ |
#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) |
#define GEN6_PTE_VALID (1 << 0) |
#define GEN6_PTE_UNCACHED (1 << 1) |
#define HSW_PTE_UNCACHED (0) |
#define GEN6_PTE_CACHE_LLC (2 << 1) |
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1) |
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) |
static inline gtt_pte_t pte_encode(struct drm_device *dev, |
dma_addr_t addr, |
enum i915_cache_level level) |
{ |
gtt_pte_t pte = GEN6_PTE_VALID; |
pte |= GEN6_PTE_ADDR_ENCODE(addr); |
switch (level) { |
case I915_CACHE_LLC_MLC: |
/* Haswell doesn't set L3 this way */ |
if (IS_HASWELL(dev)) |
pte |= GEN6_PTE_CACHE_LLC; |
else |
pte |= GEN6_PTE_CACHE_LLC_MLC; |
break; |
case I915_CACHE_LLC: |
pte |= GEN6_PTE_CACHE_LLC; |
break; |
case I915_CACHE_NONE: |
if (IS_HASWELL(dev)) |
pte |= HSW_PTE_UNCACHED; |
else |
pte |= GEN6_PTE_UNCACHED; |
break; |
default: |
BUG(); |
} |
return pte; |
} |
/* PPGTT support for Sandybdrige/Gen6 and later */ |
static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, |
unsigned first_entry, |
unsigned num_entries) |
{ |
gtt_pte_t *pt_vaddr; |
gtt_pte_t scratch_pte; |
uint32_t *pt_vaddr; |
uint32_t scratch_pte; |
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; |
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; |
unsigned last_pte, i; |
scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr, |
I915_CACHE_LLC); |
scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr); |
scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC; |
pt_vaddr = AllocKernelSpace(4096); |
106,7 → 56,7 |
if (last_pte > I915_PPGTT_PT_ENTRIES) |
last_pte = I915_PPGTT_PT_ENTRIES; |
MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pd]), 3); |
MapPage(pt_vaddr,ppgtt->pt_pages[act_pd], 3); |
for (i = first_pte; i < last_pte; i++) |
pt_vaddr[i] = scratch_pte; |
137,13 → 87,13 |
return ret; |
ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES; |
ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries, |
ppgtt->pt_pages = kzalloc(sizeof(dma_addr_t)*ppgtt->num_pd_entries, |
GFP_KERNEL); |
if (!ppgtt->pt_pages) |
goto err_ppgtt; |
for (i = 0; i < ppgtt->num_pd_entries; i++) { |
ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL); |
ppgtt->pt_pages[i] = AllocPage(); |
if (!ppgtt->pt_pages[i]) |
goto err_pt_alloc; |
} |
178,7 → 128,7 |
i915_ppgtt_clear_range(ppgtt, 0, |
ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES); |
ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t); |
ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t); |
dev_priv->mm.aliasing_ppgtt = ppgtt; |
194,7 → 144,7 |
// kfree(ppgtt->pt_dma_addr); |
for (i = 0; i < ppgtt->num_pd_entries; i++) { |
if (ppgtt->pt_pages[i]) |
FreePage((addr_t)(ppgtt->pt_pages[i])); |
FreePage(ppgtt->pt_pages[i]); |
} |
kfree(ppgtt->pt_pages); |
err_ppgtt: |
220,57 → 170,43 |
// kfree(ppgtt->pt_dma_addr); |
for (i = 0; i < ppgtt->num_pd_entries; i++) |
FreePage((addr_t)(ppgtt->pt_pages[i])); |
FreePage(ppgtt->pt_pages[i]); |
kfree(ppgtt->pt_pages); |
kfree(ppgtt); |
} |
static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt, |
const struct sg_table *pages, |
const struct pagelist *pages, |
unsigned first_entry, |
enum i915_cache_level cache_level) |
uint32_t pte_flags) |
{ |
gtt_pte_t *pt_vaddr; |
uint32_t *pt_vaddr, pte; |
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; |
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; |
unsigned i, j, m, segment_len; |
unsigned i, j; |
dma_addr_t page_addr; |
struct scatterlist *sg; |
/* init sg walking */ |
sg = pages->sgl; |
i = 0; |
segment_len = sg_dma_len(sg) >> PAGE_SHIFT; |
m = 0; |
pt_vaddr = AllocKernelSpace(4096); |
if( pt_vaddr == NULL) |
return; |
while (i < pages->nents) { |
MapPage(pt_vaddr,(addr_t)ppgtt->pt_pages[act_pd], 3); |
if( pt_vaddr != NULL) |
{ |
while (i < pages->nents) |
{ |
MapPage(pt_vaddr, ppgtt->pt_pages[act_pd], 3); |
for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) { |
page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT); |
pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr, |
cache_level); |
/* grab the next page */ |
if (++m == segment_len) { |
if (++i == pages->nents) |
break; |
sg = sg_next(sg); |
segment_len = sg_dma_len(sg) >> PAGE_SHIFT; |
m = 0; |
for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++, i++) { |
page_addr = pages->page[i]; |
pte = GEN6_PTE_ADDR_ENCODE(page_addr); |
pt_vaddr[j] = pte | pte_flags; |
} |
} |
first_pte = 0; |
act_pd++; |
} |
FreeKernelSpace(pt_vaddr); |
}; |
} |
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, |
277,10 → 213,29 |
struct drm_i915_gem_object *obj, |
enum i915_cache_level cache_level) |
{ |
uint32_t pte_flags = GEN6_PTE_VALID; |
switch (cache_level) { |
case I915_CACHE_LLC_MLC: |
pte_flags |= GEN6_PTE_CACHE_LLC_MLC; |
break; |
case I915_CACHE_LLC: |
pte_flags |= GEN6_PTE_CACHE_LLC; |
break; |
case I915_CACHE_NONE: |
if (IS_HASWELL(obj->base.dev)) |
pte_flags |= HSW_PTE_UNCACHED; |
else |
pte_flags |= GEN6_PTE_UNCACHED; |
break; |
default: |
BUG(); |
} |
i915_ppgtt_insert_sg_entries(ppgtt, |
obj->pages, |
&obj->pages, |
obj->gtt_space->start >> PAGE_SHIFT, |
cache_level); |
pte_flags); |
} |
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, |
291,68 → 246,26 |
obj->base.size >> PAGE_SHIFT); |
} |
void i915_gem_init_ppgtt(struct drm_device *dev) |
/* XXX kill agp_type! */ |
static unsigned int cache_level_to_agp_type(struct drm_device *dev, |
enum i915_cache_level cache_level) |
{ |
drm_i915_private_t *dev_priv = dev->dev_private; |
uint32_t pd_offset; |
struct intel_ring_buffer *ring; |
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; |
uint32_t __iomem *pd_addr; |
uint32_t pd_entry; |
int i; |
if (!dev_priv->mm.aliasing_ppgtt) |
return; |
pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t); |
for (i = 0; i < ppgtt->num_pd_entries; i++) { |
dma_addr_t pt_addr; |
if (dev_priv->mm.gtt->needs_dmar) |
pt_addr = ppgtt->pt_dma_addr[i]; |
else |
pt_addr = page_to_phys(ppgtt->pt_pages[i]); |
pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); |
pd_entry |= GEN6_PDE_VALID; |
writel(pd_entry, pd_addr + i); |
switch (cache_level) { |
case I915_CACHE_LLC_MLC: |
if (INTEL_INFO(dev)->gen >= 6) |
return AGP_USER_CACHED_MEMORY_LLC_MLC; |
/* Older chipsets do not have this extra level of CPU |
* cacheing, so fallthrough and request the PTE simply |
* as cached. |
*/ |
case I915_CACHE_LLC: |
return AGP_USER_CACHED_MEMORY; |
default: |
case I915_CACHE_NONE: |
return AGP_USER_MEMORY; |
} |
readl(pd_addr); |
pd_offset = ppgtt->pd_offset; |
pd_offset /= 64; /* in cachelines, */ |
pd_offset <<= 16; |
if (INTEL_INFO(dev)->gen == 6) { |
uint32_t ecochk, gab_ctl, ecobits; |
ecobits = I915_READ(GAC_ECO_BITS); |
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); |
gab_ctl = I915_READ(GAB_CTL); |
I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); |
ecochk = I915_READ(GAM_ECOCHK); |
I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | |
ECOCHK_PPGTT_CACHE64B); |
I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); |
} else if (INTEL_INFO(dev)->gen >= 7) { |
I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B); |
/* GFX_MODE is per-ring on gen7+ */ |
} |
for_each_ring(ring, dev_priv, i) { |
if (INTEL_INFO(dev)->gen >= 7) |
I915_WRITE(RING_MODE_GEN7(ring), |
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); |
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); |
I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset); |
} |
} |
static bool do_idling(struct drm_i915_private *dev_priv) |
{ |
bool ret = dev_priv->mm.interruptible; |
375,34 → 288,6 |
dev_priv->mm.interruptible = interruptible; |
} |
static void i915_ggtt_clear_range(struct drm_device *dev, |
unsigned first_entry, |
unsigned num_entries) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
gtt_pte_t scratch_pte; |
gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry; |
const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry; |
int i; |
if (INTEL_INFO(dev)->gen < 6) { |
intel_gtt_clear_range(first_entry, num_entries); |
return; |
} |
if (WARN(num_entries > max_entries, |
"First entry = %d; Num entries = %d (max=%d)\n", |
first_entry, num_entries, max_entries)) |
num_entries = max_entries; |
scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC); |
for (i = 0; i < num_entries; i++) |
iowrite32(scratch_pte, >t_base[i]); |
readl(gtt_base); |
} |
#if 0 |
void i915_gem_restore_gtt_mappings(struct drm_device *dev) |
{ |
410,7 → 295,7 |
struct drm_i915_gem_object *obj; |
/* First fill our portion of the GTT with scratch pages */ |
i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE, |
intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE, |
(dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); |
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { |
418,105 → 303,30 |
i915_gem_gtt_bind_object(obj, obj->cache_level); |
} |
i915_gem_chipset_flush(dev); |
intel_gtt_chipset_flush(); |
} |
#endif |
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) |
{ |
struct scatterlist *sg, *s; |
unsigned int nents ; |
int i; |
if (obj->has_dma_mapping) |
return 0; |
sg = obj->pages->sgl; |
nents = obj->pages->nents; |
WARN_ON(nents == 0 || sg[0].length == 0); |
for_each_sg(sg, s, nents, i) { |
BUG_ON(!sg_page(s)); |
s->dma_address = sg_phys(s); |
} |
asm volatile("lock; addl $0,0(%%esp)": : :"memory"); |
return 0; |
} |
/* |
* Binds an object into the global gtt with the specified cache level. The object |
* will be accessible to the GPU via commands whose operands reference offsets |
* within the global GTT as well as accessible by the GPU through the GMADR |
* mapped BAR (dev_priv->mm.gtt->gtt). |
*/ |
static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj, |
enum i915_cache_level level) |
{ |
struct drm_device *dev = obj->base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct sg_table *st = obj->pages; |
struct scatterlist *sg = st->sgl; |
const int first_entry = obj->gtt_space->start >> PAGE_SHIFT; |
const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry; |
gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry; |
int unused, i = 0; |
unsigned int len, m = 0; |
dma_addr_t addr; |
for_each_sg(st->sgl, sg, st->nents, unused) { |
len = sg_dma_len(sg) >> PAGE_SHIFT; |
for (m = 0; m < len; m++) { |
addr = sg_dma_address(sg) + (m << PAGE_SHIFT); |
iowrite32(pte_encode(dev, addr, level), >t_entries[i]); |
i++; |
} |
} |
BUG_ON(i > max_entries); |
BUG_ON(i != obj->base.size / PAGE_SIZE); |
/* XXX: This serves as a posting read to make sure that the PTE has |
* actually been updated. There is some concern that even though |
* registers and PTEs are within the same BAR that they are potentially |
* of NUMA access patterns. Therefore, even with the way we assume |
* hardware should work, we must keep this posting read for paranoia. |
*/ |
if (i != 0) |
WARN_ON(readl(>t_entries[i-1]) != pte_encode(dev, addr, level)); |
/* This next bit makes the above posting read even more important. We |
* want to flush the TLBs only after we're certain all the PTE updates |
* have finished. |
*/ |
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); |
POSTING_READ(GFX_FLSH_CNTL_GEN6); |
} |
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, |
enum i915_cache_level cache_level) |
{ |
struct drm_device *dev = obj->base.dev; |
if (INTEL_INFO(dev)->gen < 6) { |
unsigned int flags = (cache_level == I915_CACHE_NONE) ? |
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; |
intel_gtt_insert_sg_entries(obj->pages, |
unsigned int agp_type = cache_level_to_agp_type(dev, cache_level); |
intel_gtt_insert_sg_entries(&obj->pages, |
obj->gtt_space->start >> PAGE_SHIFT, |
flags); |
} else { |
gen6_ggtt_bind_object(obj, cache_level); |
} |
agp_type); |
obj->has_global_gtt_mapping = 1; |
} |
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) |
{ |
i915_ggtt_clear_range(obj->base.dev, |
obj->gtt_space->start >> PAGE_SHIFT, |
intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, |
obj->base.size >> PAGE_SHIFT); |
obj->has_global_gtt_mapping = 0; |
574,276 → 384,5 |
dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; |
/* ... but ensure that we clear the entire range. */ |
i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE); |
intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE); |
} |
static int setup_scratch_page(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct page *page; |
dma_addr_t dma_addr; |
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); |
if (page == NULL) |
return -ENOMEM; |
#ifdef CONFIG_INTEL_IOMMU |
dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE, |
PCI_DMA_BIDIRECTIONAL); |
if (pci_dma_mapping_error(dev->pdev, dma_addr)) |
return -EINVAL; |
#else |
dma_addr = page_to_phys(page); |
#endif |
dev_priv->mm.gtt->scratch_page = page; |
dev_priv->mm.gtt->scratch_page_dma = dma_addr; |
return 0; |
} |
static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) |
{ |
snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; |
snb_gmch_ctl &= SNB_GMCH_GGMS_MASK; |
return snb_gmch_ctl << 20; |
} |
static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl) |
{ |
snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT; |
snb_gmch_ctl &= SNB_GMCH_GMS_MASK; |
return snb_gmch_ctl << 25; /* 32 MB units */ |
} |
static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl) |
{ |
static const int stolen_decoder[] = { |
0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352}; |
snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT; |
snb_gmch_ctl &= IVB_GMCH_GMS_MASK; |
return stolen_decoder[snb_gmch_ctl] << 20; |
} |
int i915_gem_gtt_init(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
phys_addr_t gtt_bus_addr; |
u16 snb_gmch_ctl; |
int ret; |
/* On modern platforms we need not worry ourself with the legacy |
* hostbridge query stuff. Skip it entirely |
*/ |
if (INTEL_INFO(dev)->gen < 6) { |
ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL); |
if (!ret) { |
DRM_ERROR("failed to set up gmch\n"); |
return -EIO; |
} |
dev_priv->mm.gtt = intel_gtt_get(); |
if (!dev_priv->mm.gtt) { |
DRM_ERROR("Failed to initialize GTT\n"); |
return -ENODEV; |
} |
return 0; |
} |
dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL); |
if (!dev_priv->mm.gtt) |
return -ENOMEM; |
#ifdef CONFIG_INTEL_IOMMU |
dev_priv->mm.gtt->needs_dmar = 1; |
#endif |
/* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */ |
gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20); |
dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2); |
/* i9xx_setup */ |
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); |
dev_priv->mm.gtt->gtt_total_entries = |
gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t); |
if (INTEL_INFO(dev)->gen < 7) |
dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl); |
else |
dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl); |
dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT; |
/* 64/512MB is the current min/max we actually know of, but this is just a |
* coarse sanity check. |
*/ |
if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 || |
dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) { |
DRM_ERROR("Unknown GMADR entries (%d)\n", |
dev_priv->mm.gtt->gtt_mappable_entries); |
ret = -ENXIO; |
goto err_out; |
} |
ret = setup_scratch_page(dev); |
if (ret) { |
DRM_ERROR("Scratch setup failed\n"); |
goto err_out; |
} |
dev_priv->mm.gtt->gtt = ioremap(gtt_bus_addr, |
dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t)); |
if (!dev_priv->mm.gtt->gtt) { |
DRM_ERROR("Failed to map the gtt page table\n"); |
ret = -ENOMEM; |
goto err_out; |
} |
/* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */ |
DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8); |
DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8); |
DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20); |
return 0; |
err_out: |
kfree(dev_priv->mm.gtt); |
return ret; |
} |
struct scatterlist *sg_next(struct scatterlist *sg) |
{ |
if (sg_is_last(sg)) |
return NULL; |
sg++; |
if (unlikely(sg_is_chain(sg))) |
sg = sg_chain_ptr(sg); |
return sg; |
} |
void __sg_free_table(struct sg_table *table, unsigned int max_ents, |
sg_free_fn *free_fn) |
{ |
struct scatterlist *sgl, *next; |
if (unlikely(!table->sgl)) |
return; |
sgl = table->sgl; |
while (table->orig_nents) { |
unsigned int alloc_size = table->orig_nents; |
unsigned int sg_size; |
/* |
* If we have more than max_ents segments left, |
* then assign 'next' to the sg table after the current one. |
* sg_size is then one less than alloc size, since the last |
* element is the chain pointer. |
*/ |
if (alloc_size > max_ents) { |
next = sg_chain_ptr(&sgl[max_ents - 1]); |
alloc_size = max_ents; |
sg_size = alloc_size - 1; |
} else { |
sg_size = alloc_size; |
next = NULL; |
} |
table->orig_nents -= sg_size; |
kfree(sgl); |
sgl = next; |
} |
table->sgl = NULL; |
} |
void sg_free_table(struct sg_table *table) |
{ |
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, NULL); |
} |
int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) |
{ |
struct scatterlist *sg, *prv; |
unsigned int left; |
unsigned int max_ents = SG_MAX_SINGLE_ALLOC; |
#ifndef ARCH_HAS_SG_CHAIN |
BUG_ON(nents > max_ents); |
#endif |
memset(table, 0, sizeof(*table)); |
left = nents; |
prv = NULL; |
do { |
unsigned int sg_size, alloc_size = left; |
if (alloc_size > max_ents) { |
alloc_size = max_ents; |
sg_size = alloc_size - 1; |
} else |
sg_size = alloc_size; |
left -= sg_size; |
sg = kmalloc(alloc_size * sizeof(struct scatterlist), gfp_mask); |
if (unlikely(!sg)) { |
/* |
* Adjust entry count to reflect that the last |
* entry of the previous table won't be used for |
* linkage. Without this, sg_kfree() may get |
* confused. |
*/ |
if (prv) |
table->nents = ++table->orig_nents; |
goto err; |
} |
sg_init_table(sg, alloc_size); |
table->nents = table->orig_nents += sg_size; |
/* |
* If this is the first mapping, assign the sg table header. |
* If this is not the first mapping, chain previous part. |
*/ |
if (prv) |
sg_chain(prv, max_ents, sg); |
else |
table->sgl = sg; |
/* |
* If no more entries after this one, mark the end |
*/ |
if (!left) |
sg_mark_end(&sg[sg_size - 1]); |
prv = sg; |
} while (left); |
return 0; |
err: |
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, NULL); |
return -ENOMEM; |
} |
void sg_init_table(struct scatterlist *sgl, unsigned int nents) |
{ |
memset(sgl, 0, sizeof(*sgl) * nents); |
#ifdef CONFIG_DEBUG_SG |
{ |
unsigned int i; |
for (i = 0; i < nents; i++) |
sgl[i].sg_magic = SG_MAGIC; |
} |
#endif |
sg_mark_end(&sgl[nents - 1]); |
} |
/drivers/video/drm/i915/i915.map |
---|
0,0 → 1,1284 |
Archive member included because of file (symbol) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(dbglog.o) |
main.o (dbg_open) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(malloc.o) |
pci.o (malloc) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(kref.o) |
i915_gem.o (kref_init) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(memset.o) |
intel_bios.o (memset) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(memcmp.o) |
intel_bios.o (memcmp) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(time.o) |
intel_pm.o (jiffies_to_msecs) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(memcpy.o) |
intel_dp.o (memcpy) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(strncpy.o) |
intel_dp.o (strncpy) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(vsprintf.o) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(dbglog.o) (vsnprintf) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(string.o) |
e:/kos/kolibri/drivers/video/drm/i915/../i2c/i2c-core.o (strlcpy) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(list_sort.o) |
e:/kos/kolibri/drivers/video/drm/i915/../drm_modes.o (list_sort) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(idr.o) |
e:/kos/kolibri/drivers/video/drm/i915/../drm_crtc.o (idr_pre_get) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(strncmp.o) |
e:/kos/kolibri/drivers/video/drm/i915/../drm_edid.o (strncmp) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(finfo.o) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(dbglog.o) (get_fileinfo) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(create.o) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(dbglog.o) (create_file) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(ssize.o) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(dbglog.o) (set_file_size) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(write.o) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(dbglog.o) (write_file) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(_memmove.o) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(memcpy.o) (_memcpy) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(_strncpy.o) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(strncpy.o) (_strncpy) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(ctype.o) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(vsprintf.o) (_ctype) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(strnlen.o) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(vsprintf.o) (strnlen) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(_strncmp.o) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(strncmp.o) (_strncmp) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(_strnlen.o) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(strnlen.o) (_strnlen) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000030.o) |
main.o (_imp__PciRead32) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000017.o) |
main.o (_imp__GetService) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000036.o) |
main.o (_imp__RegService) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000033.o) |
pci.o (_imp__PciWrite32) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000031.o) |
pci.o (_imp__PciRead8) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000029.o) |
pci.o (_imp__PciRead16) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000008.o) |
pci.o (_imp__Delay) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000028.o) |
pci.o (_imp__PciApi) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000023.o) |
pci.o (_imp__MapIoMem) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000011.o) |
pci.o (_imp__FreeKernelSpace) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000032.o) |
pci.o (_imp__PciWrite16) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000025.o) |
i915_drv.o (_imp__MutexInit) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000002.o) |
i915_gem.o (_imp__AllocPage) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000012.o) |
i915_gem.o (_imp__FreePage) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000026.o) |
i915_gem.o (_imp__MutexLock) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000027.o) |
i915_gem.o (_imp__MutexUnlock) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000019.o) |
i915_gem.o (_imp__GetTimerTicks) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000005.o) |
i915_gem.o (_imp__CreateEvent) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000043.o) |
i915_gem.o (_imp__WaitEvent) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000009.o) |
i915_gem.o (_imp__DestroyEvent) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000001.o) |
i915_gem.o (_imp__AllocKernelSpace) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000024.o) |
i915_gem_gtt.o (_imp__MapPage) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000035.o) |
i915_irq.o (_imp__RaiseEvent) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000004.o) |
i915_irq.o (_imp__AttachIntHandler) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000034.o) |
intel_panel.o (_imp__PciWrite8) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000010.o) |
kms_display.o (_imp__DestroyObject) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000041.o) |
kms_display.o (_imp__TimerHs) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000021.o) |
kms_display.o (_imp__KernelAlloc) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000015.o) |
kms_display.o (_imp__GetPgAddr) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000022.o) |
kms_display.o (_imp__KernelFree) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000039.o) |
kms_display.o (_imp__SetScreen) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000013.o) |
kms_display.o (_imp__GetDisplay) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000040.o) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(dbglog.o) (_imp__SysMsgBoardStr) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000000.o) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000030.o) (_head_core_dll) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000044.o) |
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000000.o) (core_dll_iname) |
Allocating common symbols |
Common symbol size file |
cmd_buffer 0x4 kms_display.o |
intel_agp_enabled 0x4 Gtt/intel-agp.o |
x86_clflush_size 0x4 main.o |
i915_lvds_channel_mode |
0x4 i915_drv.o |
main_device 0x4 i915_drv.o |
cmd_offset 0x4 kms_display.o |
Discarded input sections |
.drectve 0x00000000 0x24 main.o |
.drectve 0x00000000 0x44 i915_drv.o |
.drectve 0x00000000 0x38 kms_display.o |
.drectve 0x00000000 0x24 Gtt/intel-agp.o |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000030.o) |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000017.o) |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000036.o) |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000033.o) |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000031.o) |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000029.o) |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000008.o) |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000028.o) |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000023.o) |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000011.o) |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000032.o) |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000025.o) |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000002.o) |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000012.o) |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000026.o) |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000027.o) |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000019.o) |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000005.o) |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000043.o) |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000009.o) |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000001.o) |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000024.o) |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000035.o) |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000004.o) |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000034.o) |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000010.o) |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000041.o) |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000021.o) |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000015.o) |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000022.o) |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000039.o) |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000013.o) |
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000040.o) |
Memory Configuration |
Name Origin Length Attributes |
*default* 0x00000000 0xffffffff |
Linker script and memory map |
0x00000000 __image_base__ = 0x0 |
0x00000000 __dll__ = 0x0 |
0x00000000 ___ImageBase = 0x0 |
0x00001000 __section_alignment__ = 0x1000 |
0x00000200 __file_alignment__ = 0x200 |
0x00000004 __major_os_version__ = 0x4 |
0x00000000 __minor_os_version__ = 0x0 |
0x00000001 __major_image_version__ = 0x1 |
0x00000000 __minor_image_version__ = 0x0 |
0x00000004 __major_subsystem_version__ = 0x4 |
0x00000000 __minor_subsystem_version__ = 0x0 |
0x00000003 __subsystem__ = 0x3 |
0x00200000 __size_of_stack_reserve__ = 0x200000 |
0x00001000 __size_of_stack_commit__ = 0x1000 |
0x00100000 __size_of_heap_reserve__ = 0x100000 |
0x00001000 __size_of_heap_commit__ = 0x1000 |
0x00000000 __loader_flags__ = 0x0 |
0x00000000 __dll_characteristics__ = 0x0 |
0x00000268 . = SIZEOF_HEADERS |
0x00001000 . = ALIGN (__section_alignment__) |
.text 0x00001000 0x53600 |
*(.text) |
.text 0x00001000 0x310 main.o |
0x00001000 display_handler@4 |
0x000010a0 pci_scan_filter |
0x00001100 parse_cmdline |
0x00001170 drvEntry |
0x000012f0 cpu_detect |
.text 0x00001310 0x1120 pci.o |
0x000018b0 pci_setup_device |
0x00001b40 pci_scan_slot |
0x00001d40 pci_find_capability |
0x00001e10 enum_pci_devices |
0x00001eb0 find_pci_device |
0x00001f40 pci_get_device |
0x00001fb0 pci_get_bus_and_slot |
0x00002000 pci_get_class |
0x00002050 ioport_map |
0x00002070 pci_iomap |
0x00002130 pci_iounmap |
0x00002150 pci_enable_rom |
0x000021c0 pci_disable_rom |
0x00002210 pci_get_rom_size |
0x000022a0 pci_map_rom |
0x00002360 pci_unmap_rom |
0x000023b0 pci_set_master |
.text 0x00002430 0x5d0 dvo_ch7017.o |
.text 0x00002a00 0x570 dvo_ch7xxx.o |
.text 0x00002f70 0x700 dvo_ivch.o |
.text 0x00003670 0xd80 dvo_ns2501.o |
.text 0x000043f0 0x420 dvo_sil164.o |
.text 0x00004810 0x5e0 dvo_tfp410.o |
.text 0x00004df0 0x9c0 i915_dma.o |
0x00004df0 i915_update_dri1_breadcrumb |
0x00004e50 i915_driver_load |
.text 0x000057b0 0x950 i915_drv.o |
0x000058c0 intel_detect_pch |
0x000059f0 i915_semaphore_is_enabled |
0x00005a20 drm_get_dev |
0x00005b20 i915_init |
0x00005bf0 i915_read8 |
0x00005cd0 i915_read16 |
0x00005db0 i915_read32 |
0x00005e90 i915_read64 |
0x00005f70 i915_write8 |
0x00005fd0 i915_write16 |
0x00006030 i915_write32 |
0x00006090 i915_write64 |
.text 0x00006100 0x2e70 i915_gem.o |
0x000066e0 drm_gem_object_init |
0x00006750 drm_gem_object_release |
0x00006760 i915_gem_get_aperture_ioctl |
0x000067e0 i915_gem_check_wedge |
0x00006820 i915_gem_release_mmap |
0x000068f0 i915_gem_get_unfenced_gtt_alignment |
0x00006930 i915_gem_object_get_pages |
0x000069d0 i915_gem_object_move_to_active |
0x00006b10 i915_gem_next_request_seqno |
0x00006b60 i915_add_request |
0x00006d60 i915_wait_seqno |
0x00007010 i915_gem_reset |
0x000070e0 i915_gem_retire_requests_ring |
0x00007260 i915_gem_retire_requests |
0x000073e0 i915_gem_object_sync |
0x000074c0 i915_gpu_idle |
0x00007570 i915_gem_object_put_fence |
0x00007600 i915_gem_object_get_fence |
0x000077b0 i915_gem_clflush_object |
0x000078a0 i915_gem_object_set_to_gtt_domain |
0x000079c0 i915_gem_object_finish_gpu |
0x000079f0 i915_gem_object_unbind |
0x00007b80 i915_gem_object_set_cache_level |
0x00007d30 i915_gem_object_set_to_cpu_domain |
0x00007ed0 i915_gem_object_pin |
0x00008360 i915_gem_object_pin_to_display_plane |
0x00008400 i915_gem_object_unpin |
0x00008490 i915_gem_object_init |
0x000084f0 i915_gem_alloc_object |
0x000085a0 i915_gem_init_object |
0x000085d0 i915_gem_free_object |
0x00008750 drm_gem_object_free |
0x000087a0 i915_gem_l3_remap |
0x000088a0 i915_gem_init_swizzling |
0x00008980 i915_gem_init_ppgtt |
0x00008b70 i915_gem_init_hw |
0x00008ce0 i915_gem_init |
0x00008dd0 i915_gem_cleanup_ringbuffer |
0x00008e10 i915_gem_load |
.text 0x00008f70 0x30 i915_gem_context.o |
0x00008f70 i915_gem_context_init |
0x00008f90 i915_switch_context |
.text 0x00008fa0 0x650 i915_gem_gtt.o |
0x000090b0 i915_gem_init_aliasing_ppgtt |
0x00009200 i915_gem_cleanup_aliasing_ppgtt |
0x00009270 i915_ppgtt_bind_object |
0x000093c0 i915_ppgtt_unbind_object |
0x000093f0 i915_gem_gtt_prepare_object |
0x00009400 i915_gem_gtt_bind_object |
0x00009460 i915_gem_gtt_unbind_object |
0x00009490 i915_gem_gtt_finish_object |
0x00009530 i915_gem_init_global_gtt |
.text 0x000095f0 0x3e0 i915_gem_stolen.o |
0x00009680 i915_gem_cleanup_stolen |
0x000096d0 i915_gem_init_stolen |
.text 0x000099d0 0x170 i915_gem_tiling.o |
0x000099d0 i915_gem_detect_bit_6_swizzle |
.text 0x00009b40 0x11f0 i915_irq.o |
0x00009bb0 i915_enable_pipestat |
0x00009c20 i915_disable_pipestat |
0x00009c80 i915_handle_error |
0x0000a320 irq_handler_kms |
0x0000a8a0 intel_irq_init |
0x0000a8b0 drm_irq_install |
.text 0x0000ad30 0x10b0 intel_bios.o |
0x0000ae30 intel_parse_bios |
0x0000bd70 intel_setup_bios |
.text 0x0000bde0 0xf70 intel_crt.o |
0x0000cb20 intel_crt_init |
.text 0x0000cd50 0x7e0 intel_ddi.o |
0x0000cd50 intel_prepare_ddi_buffers |
0x0000cde0 intel_prepare_ddi |
0x0000ce40 hsw_fdi_link_train |
0x0000d090 intel_ddi_init |
0x0000d0f0 intel_ddi_mode_set |
0x0000d3b0 intel_ddi_get_hw_state |
0x0000d470 intel_enable_ddi |
0x0000d4d0 intel_disable_ddi |
.text 0x0000d530 0xb7e0 intel_display.o |
0x0000dc20 intel_crtc_load_lut |
0x0000fef0 intel_dpio_read |
0x0000fff0 intel_pipe_has_type |
0x00010040 intel_wait_for_vblank |
0x00012fa0 intel_wait_for_pipe_off |
0x00013180 assert_pipe |
0x00013580 intel_flush_display_plane |
0x00014260 intel_pin_and_fence_fb_obj |
0x00014350 intel_unpin_fb_obj |
0x00014360 intel_cpt_verify_modeset |
0x000153f0 intel_crtc_update_dpms |
0x00015460 intel_modeset_disable |
0x000154b0 intel_encoder_noop |
0x000154c0 intel_encoder_destroy |
0x000154e0 intel_encoder_dpms |
0x00015510 intel_connector_get_hw_state |
0x00015540 ironlake_init_pch_refclk |
0x00015890 intel_write_eld |
0x00015980 intel_crtc_fb_gamma_set |
0x000159e0 intel_crtc_fb_gamma_get |
0x00015a30 intel_get_load_detect_pipe |
0x00015b90 intel_crtc_mode_get |
0x00015f00 intel_mark_busy |
0x00015f20 intel_mark_idle |
0x00015f30 intel_mark_fb_busy |
0x00015f80 intel_mark_fb_idle |
0x000160a0 intel_encoder_check_is_cloned |
0x00016120 intel_modeset_check_state |
0x00016290 intel_connector_dpms |
0x00016310 intel_set_mode |
0x00017380 intel_release_load_detect_pipe |
0x00017490 intel_get_pipe_from_crtc_id |
0x000174f0 intel_framebuffer_init |
0x00017620 intel_modeset_init_hw |
0x00017650 intel_modeset_init |
0x00018610 intel_modeset_setup_hw_state |
0x00018c30 intel_modeset_gem_init |
0x00018c50 intel_modeset_cleanup |
0x00018c60 intel_best_encoder |
0x00018c70 intel_connector_attach_encoder |
0x00018c90 intel_modeset_vga_set_state |
.text 0x00018d10 0x3310 intel_dp.o |
0x0001b650 intel_encoder_is_pch_edp |
0x0001b670 intel_edp_link_config |
0x0001b6b0 intel_edp_target_clock |
0x0001b6d0 intel_dp_set_m_n |
0x0001b890 intel_trans_dp_port_sel |
0x0001b8e0 intel_dpd_is_edp |
0x0001b940 intel_dp_init |
.text 0x0001c020 0x7f0 intel_dvo.o |
0x0001c560 intel_dvo_init |
.text 0x0001c810 0x4c0 intel_fb.o |
0x0001c810 framebuffer_alloc |
0x0001cbf0 intel_fbdev_init |
.text 0x0001ccd0 0x1770 intel_hdmi.o |
0x0001e120 enc_to_intel_hdmi |
0x0001e130 intel_dip_infoframe_csum |
0x0001e160 intel_hdmi_init |
.text 0x0001e440 0xc20 intel_i2c.o |
0x0001ee00 intel_i2c_reset |
0x0001eeb0 intel_setup_gmbus |
0x0001efe0 intel_gmbus_get_adapter |
0x0001f020 intel_gmbus_set_speed |
0x0001f040 intel_gmbus_force_bit |
0x0001f050 intel_teardown_gmbus |
.text 0x0001f060 0x1180 intel_lvds.o |
0x0001fbe0 intel_lvds_init |
.text 0x000201e0 0xc0 intel_modes.o |
0x000201e0 intel_connector_update_modes |
0x00020240 intel_ddc_get_modes |
0x00020280 intel_attach_force_audio_property |
0x00020290 intel_attach_broadcast_rgb_property |
.text 0x000202a0 0x1c0 intel_opregion.o |
0x000202a0 intel_opregion_setup |
.text 0x00020460 0x7d0 intel_panel.o |
0x000204c0 intel_fixed_panel_mode |
0x00020500 intel_pch_panel_fitting |
0x00020620 intel_panel_get_max_backlight |
0x000208e0 intel_panel_set_backlight |
0x00020910 intel_panel_disable_backlight |
0x000209e0 intel_panel_enable_backlight |
0x00020b20 intel_panel_detect |
0x00020b40 intel_panel_setup_backlight |
0x00020c20 intel_panel_destroy_backlight |
.text 0x00020c30 0x5ea0 intel_pm.o |
0x000248f0 intel_fbc_enabled |
0x00024910 intel_enable_fbc |
0x00024920 intel_disable_fbc |
0x00024940 intel_update_fbc |
0x00024c20 intel_update_watermarks |
0x00024c40 intel_update_linetime_watermarks |
0x00024c60 intel_update_sprite_watermarks |
0x00024ca0 ironlake_set_drps |
0x00024d40 gen6_set_rps |
0x00024e80 intel_enable_rc6 |
0x00024f10 ironlake_teardown_rc6 |
0x00024f90 i915_chipset_val |
0x00024fc0 i915_mch_val |
0x00025150 i915_update_gfx_val |
0x00025180 i915_gfx_val |
0x000251b0 i915_read_mch_val |
0x000251f0 i915_gpu_raise |
0x00025220 i915_gpu_lower |
0x00025250 i915_gpu_busy |
0x000252a0 i915_gpu_turbo_disable |
0x000252e0 intel_gpu_ips_init |
0x000252f0 intel_gpu_ips_teardown |
0x00025300 intel_disable_gt_powersave |
0x000255e0 intel_enable_gt_powersave |
0x00025df0 intel_init_clock_gating |
0x00025e40 intel_init_power_wells |
0x00025f70 intel_init_pm |
0x000266b0 gen6_gt_force_wake_get |
0x000266e0 gen6_gt_check_fifodbg |
0x00026710 gen6_gt_force_wake_put |
0x00026940 __gen6_gt_wait_for_fifo |
0x000269e0 intel_gt_init |
.text 0x00026ad0 0x2720 intel_ringbuffer.o |
0x000277c0 intel_ring_get_active_head |
0x00027830 intel_ring_setup_status_page |
0x00027bd0 intel_wait_ring_buffer |
0x00027da0 intel_cleanup_ring_buffer |
0x00027e60 intel_ring_begin |
0x00027fa0 intel_ring_advance |
0x00028c30 intel_init_render_ring_buffer |
0x00028f00 intel_init_bsd_ring_buffer |
0x000290a0 intel_init_blt_ring_buffer |
0x00029170 intel_ring_flush_all_caches |
0x000291b0 intel_ring_invalidate_all_caches |
.text 0x000291f0 0x3f60 intel_sdvo.o |
0x0002c990 hweight16 |
0x0002c9d0 intel_sdvo_init |
.text 0x0002d150 0x1310 intel_sprite.o |
0x0002e1c0 intel_sprite_set_colorkey |
0x0002e250 intel_sprite_get_colorkey |
0x0002e2d0 intel_plane_init |
.text 0x0002e460 0xc90 kms_display.o |
0x0002e460 restore_cursor@8 |
0x0002e470 disable_mouse |
0x0002e480 destroy_cursor |
0x0002e4c0 run_workqueue@4 |
0x0002e510 delayed_work_timer_fn@4 |
0x0002e590 init_cursor |
0x0002e970 set_mode |
0x0002ec00 init_display_kms |
0x0002ee30 get_videomodes |
0x0002eef0 set_user_mode |
0x0002ef70 queue_delayed_work_on |
0x0002efb0 queue_delayed_work |
0x0002f050 alloc_workqueue |
0x0002f080 getrawmonotonic |
0x0002f0b0 set_normalized_timespec |
.text 0x0002f0f0 0xd0 Gtt/intel-agp.o |
0x0002f0f0 agp_alloc_bridge |
0x0002f130 init_agp |
.text 0x0002f1c0 0xda0 Gtt/intel-gtt.o |
0x0002f500 intel_enable_gtt |
0x0002f660 intel_gtt_insert_sg_entries |
0x0002f6c0 intel_gtt_clear_range |
0x0002f710 intel_gmch_probe |
0x0002ff30 intel_gtt_get |
0x0002ff40 intel_gtt_chipset_flush |
.text 0x0002ff60 0x110 e:/kos/kolibri/drivers/video/drm/i915/../i2c/i2c-core.o |
0x0002ff60 i2c_transfer |
0x0002fff0 i2c_new_device |
.text 0x00030070 0xb50 e:/kos/kolibri/drivers/video/drm/i915/../i2c/i2c-algo-bit.o |
0x00030b60 i2c_bit_add_bus |
.text 0x00030bc0 0x90 e:/kos/kolibri/drivers/video/drm/i915/../drm_pci.o |
0x00030bc0 drm_pci_alloc |
0x00030c40 drm_pcie_get_speed_cap_mask |
.text 0x00030c50 0xdd0 e:/kos/kolibri/drivers/video/drm/i915/../drm_modes.o |
0x00030c90 drm_mode_debug_printmodeline |
0x00030d10 drm_mode_set_name |
0x00030d60 drm_gtf_mode_complex |
0x00030fb0 drm_gtf_mode |
0x00031010 drm_cvt_mode |
0x000313d0 drm_mode_list_concat |
0x00031410 drm_mode_width |
0x00031420 drm_mode_height |
0x00031430 drm_mode_hsync |
0x00031470 drm_mode_vrefresh |
0x000314f0 drm_mode_set_crtcinfo |
0x00031670 drm_mode_copy |
0x000316b0 drm_mode_duplicate |
0x000316f0 drm_mode_equal |
0x000317b0 drm_mode_validate_size |
0x00031810 drm_mode_validate_clocks |
0x00031870 drm_mode_prune_invalid |
0x00031900 drm_mode_sort |
0x00031930 drm_mode_connector_list_update |
.text 0x00031a20 0x80 e:/kos/kolibri/drivers/video/drm/i915/../drm_stub.o |
0x00031a20 drm_err |
0x00031a60 drm_ut_debug_printk |
0x00031a70 drm_order |
.text 0x00031aa0 0x2700 e:/kos/kolibri/drivers/video/drm/i915/../drm_crtc.o |
0x00031bb0 drm_get_dpms_name |
0x00031c10 drm_get_dvi_i_select_name |
0x00031c50 drm_get_dvi_i_subconnector_name |
0x00031c90 drm_get_tv_select_name |
0x00031cd0 drm_get_tv_subconnector_name |
0x00031d10 drm_get_dirty_info_name |
0x00031d50 drm_get_encoder_name |
0x00031da0 drm_get_connector_name |
0x00031df0 drm_get_connector_status_name |
0x00031e10 drm_mode_object_find |
0x00031e90 drm_framebuffer_init |
0x00031ef0 drm_framebuffer_cleanup |
0x00031f40 drm_crtc_init |
0x00031fe0 drm_crtc_cleanup |
0x00032050 drm_mode_probed_add |
0x00032070 drm_connector_init |
0x000321e0 drm_connector_unplug_all |
0x000321f0 drm_encoder_init |
0x00032280 drm_encoder_cleanup |
0x000322f0 drm_plane_init |
0x00032430 drm_plane_cleanup |
0x000324b0 drm_mode_create |
0x00032510 drm_mode_destroy |
0x00032540 drm_mode_remove |
0x00032570 drm_connector_cleanup |
0x00032640 drm_mode_group_init |
0x000326c0 drm_mode_group_init_legacy_group |
0x000327b0 drm_mode_legacy_fb_format |
0x00032840 drm_mode_attachmode_crtc |
0x000329c0 drm_mode_detachmode_crtc |
0x00032a70 drm_property_create |
0x00032b90 drm_property_create_range |
0x00032c10 drm_property_add_enum |
0x00032d30 drm_property_destroy |
0x00032dd0 drm_property_create_bitmask |
0x00032e60 drm_property_create_enum |
0x00032ef0 drm_mode_config_init |
0x00033040 drm_mode_create_dirty_info_property |
0x000330a0 drm_mode_create_dithering_property |
0x00033100 drm_mode_create_scaling_mode_property |
0x00033160 drm_mode_create_tv_properties |
0x000334e0 drm_mode_create_dvi_i_properties |
0x00033570 drm_mode_config_cleanup |
0x000336a0 drm_connector_attach_property |
0x000336f0 drm_connector_property_set_value |
0x00033750 drm_connector_property_get_value |
0x000337b0 drm_object_attach_property |
0x00033800 drm_object_property_set_value |
0x00033860 drm_object_property_get_value |
0x000338c0 drm_mode_connector_update_edid_property |
0x00033a70 drm_mode_connector_attach_encoder |
0x00033ad0 drm_mode_connector_detach_encoder |
0x00033b30 drm_mode_crtc_set_gamma_size |
0x00033ba0 drm_mode_config_reset |
0x00033c50 drm_fb_get_bpp_depth |
0x00033ea0 drm_format_num_planes |
0x00033f40 drm_format_plane_cpp |
0x000340b0 drm_format_horz_chroma_subsampling |
0x00034150 drm_format_vert_chroma_subsampling |
.text 0x000341a0 0x1640 e:/kos/kolibri/drivers/video/drm/i915/../drm_crtc_helper.o |
0x00034250 drm_helper_probe_single_connector_modes |
0x00034510 drm_helper_encoder_in_use |
0x00034560 drm_helper_crtc_in_use |
0x000345d0 drm_helper_disable_unused_functions |
0x00034700 drm_crtc_helper_set_mode |
0x00034b50 drm_crtc_helper_set_config |
0x00035540 drm_helper_connector_dpms |
0x00035640 drm_helper_mode_fill_fb_struct |
0x000356a0 drm_helper_resume_force_mode |
.text 0x000357e0 0x2a50 e:/kos/kolibri/drivers/video/drm/i915/../drm_edid.o |
0x00036680 drm_edid_header_is_valid |
0x000366b0 drm_edid_block_valid |
0x000367d0 drm_edid_is_valid |
0x00036830 drm_probe_ddc |
0x00036860 drm_get_edid |
0x00036b40 drm_mode_find_dmt |
0x00037240 drm_find_cea_extension |
0x00037290 drm_edid_to_eld |
0x00037710 drm_av_sync_delay |
0x00037790 drm_select_eld |
0x000377e0 drm_detect_hdmi_monitor |
0x000378a0 drm_detect_monitor_audio |
0x000379b0 drm_add_edid_modes |
0x00038170 drm_add_modes_noedid |
.text 0x00038230 0x2b0 e:/kos/kolibri/drivers/video/drm/i915/../drm_irq.o |
0x00038230 div64_u64 |
0x000382b0 drm_calc_timestamping_constants |
0x000384c0 drm_vblank_pre_modeset |
0x000384d0 drm_vblank_post_modeset |
.text 0x000384e0 0x280 e:/kos/kolibri/drivers/video/drm/i915/../drm_dp_i2c_helper.o |
0x000386e0 i2c_dp_aux_add_bus |
.text 0x00038760 0xe00 e:/kos/kolibri/drivers/video/drm/i915/../drm_mm.o |
0x00038b20 drm_mm_pre_get |
0x00038b90 drm_mm_get_block_generic |
0x00038be0 drm_mm_get_block_range_generic |
0x00038c30 drm_mm_remove_node |
0x00038d70 drm_mm_put_block |
0x00038dd0 drm_mm_search_free_generic |
0x00038f20 drm_mm_insert_node |
0x00038f80 drm_mm_search_free_in_range_generic |
0x000390e0 drm_mm_insert_node_in_range |
0x00039170 drm_mm_replace_node |
0x000391e0 drm_mm_init_scan |
0x00039220 drm_mm_init_scan_with_range |
0x00039270 drm_mm_scan_add_block |
0x000393b0 drm_mm_scan_remove_block |
0x00039440 drm_mm_clean |
0x00039460 drm_mm_init |
0x000394c0 drm_mm_takedown |
.text 0x00039560 0x16f0 e:/kos/kolibri/drivers/video/drm/i915/../drm_fb_helper.o |
0x000398f0 drm_fb_helper_single_add_all_connectors |
0x000399a0 drm_fb_helper_blank |
0x00039a20 drm_fb_helper_init |
0x00039c80 drm_fb_helper_setcmap |
0x00039fe0 drm_fb_helper_check_var |
0x0003a220 drm_fb_helper_set_par |
0x0003a2d0 drm_fb_helper_pan_display |
0x0003a370 drm_fb_helper_single_fb_probe |
0x0003a600 drm_fb_helper_fill_fix |
0x0003a670 drm_fb_helper_fill_var |
0x0003a810 drm_fb_helper_initial_config |
.text 0x0003ac50 0x1d8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(dbglog.o) |
0x0003ac50 dbg_open |
0x0003acba printf |
0x0003ad03 dbgprintf |
0x0003ad98 xf86DrvMsg |
.text 0x0003ae28 0x2274 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(malloc.o) |
0x0003b29e malloc |
0x0003c675 free |
0x0003cf45 memalign |
.text 0x0003d09c 0x3c e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(kref.o) |
0x0003d09c kref_set |
0x0003d0a7 kref_init |
0x0003d0b2 kref_get |
0x0003d0b9 kref_put |
*fill* 0x0003d0d8 0x8 00 |
.text 0x0003d0e0 0x50 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(memset.o) |
0x0003d0e0 memset |
.text 0x0003d130 0x60 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(memcmp.o) |
0x0003d130 memcmp |
.text 0x0003d190 0x4c e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(time.o) |
0x0003d190 jiffies_to_msecs |
0x0003d196 jiffies_to_usecs |
0x0003d19f msecs_to_jiffies |
0x0003d1b9 usecs_to_jiffies |
*fill* 0x0003d1dc 0x4 00 |
.text 0x0003d1e0 0x20 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(memcpy.o) |
0x0003d1e0 memcpy |
.text 0x0003d200 0x20 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(strncpy.o) |
0x0003d200 strncpy |
.text 0x0003d220 0x12dc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(vsprintf.o) |
0x0003dda9 simple_strtoull |
0x0003de8a simple_strtoul |
0x0003dead simple_strtol |
0x0003dee9 simple_strtoll |
0x0003df26 strict_strtoul |
0x0003df82 strict_strtol |
0x0003dfca strict_strtoull |
0x0003e030 strict_strtoll |
0x0003e07f vsnprintf |
0x0003e479 vsprintf |
0x0003e4a4 snprintf |
0x0003e4cf sprintf |
.text 0x0003e4fc 0x3c e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(string.o) |
0x0003e4fc strlcpy |
.text 0x0003e538 0x130 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(list_sort.o) |
0x0003e538 list_sort |
.text 0x0003e668 0x638 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(idr.o) |
0x0003e682 find_first_bit |
0x0003e6c9 find_next_bit |
0x0003e95b idr_pre_get |
0x0003e9a8 idr_get_new_above |
0x0003e9e7 idr_get_new |
0x0003ea12 idr_remove |
0x0003eb27 idr_remove_all |
0x0003ebb5 idr_destroy |
0x0003ebd5 idr_find |
0x0003ec8b idr_init_cache |
0x0003ec8c idr_init |
.text 0x0003eca0 0x10 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(strncmp.o) |
0x0003eca0 strncmp |
.text 0x0003ecb0 0x28 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(finfo.o) |
0x0003ecb0 get_fileinfo |
.text 0x0003ecd8 0x28 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(create.o) |
0x0003ecd8 create_file |
.text 0x0003ed00 0x28 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(ssize.o) |
0x0003ed00 set_file_size |
.text 0x0003ed28 0x3c e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(write.o) |
0x0003ed28 write_file |
*fill* 0x0003ed64 0xc 00 |
.text 0x0003ed70 0x60 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(_memmove.o) |
0x0003ed70 _memmove |
0x0003ed86 _memcpy |
.text 0x0003edd0 0x20 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(_strncpy.o) |
0x0003edd0 _strncpy |
.text 0x0003edf0 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(ctype.o) |
.text 0x0003edf0 0x10 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(strnlen.o) |
0x0003edf0 strnlen |
.text 0x0003ee00 0x30 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(_strncmp.o) |
0x0003ee00 _strncmp |
.text 0x0003ee30 0x20 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(_strnlen.o) |
0x0003ee30 _strnlen |
*(.rdata) |
.rdata 0x0003ee50 0x74 main.o |
.rdata 0x0003eec4 0x16c pci.o |
.rdata 0x0003f030 0x270 dvo_ch7017.o |
.rdata 0x0003f2a0 0x160 dvo_ch7xxx.o |
.rdata 0x0003f400 0x204 dvo_ivch.o |
.rdata 0x0003f604 0x314 dvo_ns2501.o |
.rdata 0x0003f918 0x178 dvo_sil164.o |
.rdata 0x0003fa90 0x270 dvo_tfp410.o |
.rdata 0x0003fd00 0x34c i915_dma.o |
*fill* 0x0004004c 0x14 00 |
.rdata 0x00040060 0xa80 i915_drv.o |
.rdata 0x00040ae0 0x3c0 i915_gem.o |
.rdata 0x00040ea0 0x60 i915_gem_gtt.o |
.rdata 0x00040f00 0xd8 i915_gem_stolen.o |
.rdata 0x00040fd8 0x50 i915_gem_tiling.o |
.rdata 0x00041028 0x408 i915_irq.o |
.rdata 0x00041430 0x55c intel_bios.o |
*fill* 0x0004198c 0x14 00 |
.rdata 0x000419a0 0x360 intel_crt.o |
.rdata 0x00041d00 0x14e0 intel_ddi.o |
.rdata 0x000431e0 0x2040 intel_display.o |
.rdata 0x00045220 0xa60 intel_dp.o |
.rdata 0x00045c80 0x180 intel_dvo.o |
.rdata 0x00045e00 0x80 intel_fb.o |
.rdata 0x00045e80 0x180 intel_hdmi.o |
.rdata 0x00046000 0x160 intel_i2c.o |
.rdata 0x00046160 0x260 intel_lvds.o |
.rdata 0x000463c0 0xec intel_opregion.o |
.rdata 0x000464ac 0x84 intel_panel.o |
*fill* 0x00046530 0x10 00 |
.rdata 0x00046540 0x1680 intel_pm.o |
.rdata 0x00047bc0 0x2c0 intel_ringbuffer.o |
.rdata 0x00047e80 0x2880 intel_sdvo.o |
.rdata 0x0004a700 0x40 intel_sprite.o |
.rdata 0x0004a740 0x148 kms_display.o |
.rdata 0x0004a888 0x1c Gtt/intel-agp.o |
*fill* 0x0004a8a4 0x1c 00 |
.rdata 0x0004a8c0 0x9a0 Gtt/intel-gtt.o |
.rdata 0x0004b260 0x24 e:/kos/kolibri/drivers/video/drm/i915/../i2c/i2c-core.o |
.rdata 0x0004b284 0x1a8 e:/kos/kolibri/drivers/video/drm/i915/../i2c/i2c-algo-bit.o |
0x0004b3e4 i2c_bit_algo |
.rdata 0x0004b42c 0x70 e:/kos/kolibri/drivers/video/drm/i915/../drm_modes.o |
.rdata 0x0004b49c 0x18 e:/kos/kolibri/drivers/video/drm/i915/../drm_stub.o |
*fill* 0x0004b4b4 0xc 00 |
.rdata 0x0004b4c0 0x300 e:/kos/kolibri/drivers/video/drm/i915/../drm_crtc.o |
.rdata 0x0004b7c0 0x3f8 e:/kos/kolibri/drivers/video/drm/i915/../drm_crtc_helper.o |
*fill* 0x0004bbb8 0x8 00 |
.rdata 0x0004bbc0 0x80c0 e:/kos/kolibri/drivers/video/drm/i915/../drm_edid.o |
.rdata 0x00053c80 0xe0 e:/kos/kolibri/drivers/video/drm/i915/../drm_irq.o |
.rdata 0x00053d60 0x2c e:/kos/kolibri/drivers/video/drm/i915/../drm_dp_i2c_helper.o |
*fill* 0x00053d8c 0x14 00 |
.rdata 0x00053da0 0x160 e:/kos/kolibri/drivers/video/drm/i915/../drm_mm.o |
.rdata 0x00053f00 0x38c e:/kos/kolibri/drivers/video/drm/i915/../drm_fb_helper.o |
.rdata 0x0005428c 0x80 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(malloc.o) |
.rdata 0x0005430c 0x12c e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(vsprintf.o) |
0x000543f4 hex_asc |
.rdata 0x00054438 0x74 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(idr.o) |
.rdata 0x000544ac 0x100 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(ctype.o) |
0x000544ac _ctype |
.eh_frame 0x00055000 0xa400 |
.eh_frame 0x00055000 0xdc main.o |
.eh_frame 0x000550dc 0x3d0 pci.o |
.eh_frame 0x000554ac 0x174 dvo_ch7017.o |
.eh_frame 0x00055620 0x188 dvo_ch7xxx.o |
.eh_frame 0x000557a8 0x198 dvo_ivch.o |
.eh_frame 0x00055940 0x24c dvo_ns2501.o |
.eh_frame 0x00055b8c 0x138 dvo_sil164.o |
.eh_frame 0x00055cc4 0x164 dvo_tfp410.o |
.eh_frame 0x00055e28 0x88 i915_dma.o |
.eh_frame 0x00055eb0 0x23c i915_drv.o |
.eh_frame 0x000560ec 0x8dc i915_gem.o |
.eh_frame 0x000569c8 0x40 i915_gem_context.o |
.eh_frame 0x00056a08 0x214 i915_gem_gtt.o |
.eh_frame 0x00056c1c 0xa8 i915_gem_stolen.o |
.eh_frame 0x00056cc4 0x40 i915_gem_tiling.o |
.eh_frame 0x00056d04 0x184 i915_irq.o |
.eh_frame 0x00056e88 0xc8 intel_bios.o |
.eh_frame 0x00056f50 0x2d0 intel_crt.o |
.eh_frame 0x00057220 0x1cc intel_ddi.o |
.eh_frame 0x000573ec 0x1344 intel_display.o |
.eh_frame 0x00058730 0x92c intel_dp.o |
.eh_frame 0x0005905c 0x210 intel_dvo.o |
.eh_frame 0x0005926c 0x9c intel_fb.o |
.eh_frame 0x00059308 0x50c intel_hdmi.o |
.eh_frame 0x00059814 0x290 intel_i2c.o |
.eh_frame 0x00059aa4 0x210 intel_lvds.o |
.eh_frame 0x00059cb4 0x90 intel_modes.o |
.eh_frame 0x00059d44 0x44 intel_opregion.o |
.eh_frame 0x00059d88 0x1ac intel_panel.o |
.eh_frame 0x00059f34 0xfb0 intel_pm.o |
.eh_frame 0x0005aee4 0x6ec intel_ringbuffer.o |
.eh_frame 0x0005b5d0 0x604 intel_sdvo.o |
.eh_frame 0x0005bbd4 0x2f4 intel_sprite.o |
.eh_frame 0x0005bec8 0x320 kms_display.o |
.eh_frame 0x0005c1e8 0x5c Gtt/intel-agp.o |
.eh_frame 0x0005c244 0x250 Gtt/intel-gtt.o |
.eh_frame 0x0005c494 0x7c e:/kos/kolibri/drivers/video/drm/i915/../i2c/i2c-core.o |
.eh_frame 0x0005c510 0x23c e:/kos/kolibri/drivers/video/drm/i915/../i2c/i2c-algo-bit.o |
.eh_frame 0x0005c74c 0x5c e:/kos/kolibri/drivers/video/drm/i915/../drm_pci.o |
.eh_frame 0x0005c7a8 0x334 e:/kos/kolibri/drivers/video/drm/i915/../drm_modes.o |
.eh_frame 0x0005cadc 0x58 e:/kos/kolibri/drivers/video/drm/i915/../drm_stub.o |
.eh_frame 0x0005cb34 0x9f4 e:/kos/kolibri/drivers/video/drm/i915/../drm_crtc.o |
.eh_frame 0x0005d528 0x2c8 e:/kos/kolibri/drivers/video/drm/i915/../drm_crtc_helper.o |
.eh_frame 0x0005d7f0 0x6ac e:/kos/kolibri/drivers/video/drm/i915/../drm_edid.o |
.eh_frame 0x0005de9c 0x94 e:/kos/kolibri/drivers/video/drm/i915/../drm_irq.o |
.eh_frame 0x0005df30 0x8c e:/kos/kolibri/drivers/video/drm/i915/../drm_dp_i2c_helper.o |
.eh_frame 0x0005dfbc 0x380 e:/kos/kolibri/drivers/video/drm/i915/../drm_mm.o |
.eh_frame 0x0005e33c 0x378 e:/kos/kolibri/drivers/video/drm/i915/../drm_fb_helper.o |
.eh_frame 0x0005e6b4 0xb8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(dbglog.o) |
.eh_frame 0x0005e76c 0x170 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(malloc.o) |
.eh_frame 0x0005e8dc 0x6c e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(kref.o) |
.eh_frame 0x0005e948 0x68 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(time.o) |
.eh_frame 0x0005e9b0 0x530 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(vsprintf.o) |
.eh_frame 0x0005eee0 0x44 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(string.o) |
.eh_frame 0x0005ef24 0x58 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(list_sort.o) |
.eh_frame 0x0005ef7c 0x23c e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(idr.o) |
.eh_frame 0x0005f1b8 0x34 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(finfo.o) |
.eh_frame 0x0005f1ec 0x34 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(create.o) |
.eh_frame 0x0005f220 0x34 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(ssize.o) |
.eh_frame 0x0005f254 0x3c e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(write.o) |
.data 0x00060000 0xa00 |
*(.data) |
.data 0x00060000 0x4 main.o |
0x00060000 i915_modeset |
.data 0x00060004 0x8 pci.o |
*fill* 0x0006000c 0x14 00 |
.data 0x00060020 0x40 dvo_ch7017.o |
0x00060020 ch7017_ops |
.data 0x00060060 0x40 dvo_ch7xxx.o |
0x00060060 ch7xxx_ops |
.data 0x000600a0 0x40 dvo_ivch.o |
0x000600a0 ivch_ops |
.data 0x000600e0 0x40 dvo_ns2501.o |
0x000600e0 ns2501_ops |
.data 0x00060120 0x40 dvo_sil164.o |
0x00060120 sil164_ops |
.data 0x00060160 0x40 dvo_tfp410.o |
0x00060160 tfp410_ops |
.data 0x000601a0 0x0 i915_dma.o |
.data 0x000601a0 0x10 i915_drv.o |
0x000601a0 i915_preliminary_hw_support |
0x000601a4 i915_vbt_sdvo_panel_type |
0x000601a8 i915_panel_use_ssc |
0x000601ac i915_semaphores |
.data 0x000601b0 0x0 i915_gem.o |
.data 0x000601b0 0x0 i915_gem_context.o |
.data 0x000601b0 0x0 i915_gem_gtt.o |
.data 0x000601b0 0x0 i915_gem_stolen.o |
.data 0x000601b0 0x0 i915_gem_tiling.o |
.data 0x000601b0 0x0 i915_irq.o |
.data 0x000601b0 0x0 intel_bios.o |
.data 0x000601b0 0x0 intel_crt.o |
.data 0x000601b0 0x0 intel_ddi.o |
*fill* 0x000601b0 0x10 00 |
.data 0x000601c0 0xc0 intel_display.o |
.data 0x00060280 0x0 intel_dp.o |
.data 0x00060280 0x0 intel_dvo.o |
.data 0x00060280 0x80 intel_fb.o |
.data 0x00060300 0x0 intel_hdmi.o |
.data 0x00060300 0x0 intel_i2c.o |
.data 0x00060300 0x0 intel_lvds.o |
.data 0x00060300 0x0 intel_modes.o |
.data 0x00060300 0x0 intel_opregion.o |
.data 0x00060300 0x0 intel_panel.o |
.data 0x00060300 0x0 intel_pm.o |
.data 0x00060300 0x0 intel_ringbuffer.o |
.data 0x00060300 0x0 intel_sdvo.o |
.data 0x00060300 0x2c intel_sprite.o |
.data 0x0006032c 0x0 kms_display.o |
*fill* 0x0006032c 0x14 00 |
.data 0x00060340 0x360 Gtt/intel-agp.o |
.data 0x000606a0 0x0 Gtt/intel-gtt.o |
.data 0x000606a0 0x0 e:/kos/kolibri/drivers/video/drm/i915/../i2c/i2c-core.o |
.data 0x000606a0 0x0 e:/kos/kolibri/drivers/video/drm/i915/../i2c/i2c-algo-bit.o |
.data 0x000606a0 0x0 e:/kos/kolibri/drivers/video/drm/i915/../drm_pci.o |
.data 0x000606a0 0x0 e:/kos/kolibri/drivers/video/drm/i915/../drm_modes.o |
.data 0x000606a0 0x8 e:/kos/kolibri/drivers/video/drm/i915/../drm_stub.o |
0x000606a0 drm_timestamp_precision |
0x000606a4 drm_vblank_offdelay |
*fill* 0x000606a8 0x18 00 |
.data 0x000606c0 0x1e0 e:/kos/kolibri/drivers/video/drm/i915/../drm_crtc.o |
.data 0x000608a0 0x0 e:/kos/kolibri/drivers/video/drm/i915/../drm_crtc_helper.o |
.data 0x000608a0 0x100 e:/kos/kolibri/drivers/video/drm/i915/../drm_edid.o |
.data 0x000609a0 0x0 e:/kos/kolibri/drivers/video/drm/i915/../drm_irq.o |
.data 0x000609a0 0x0 e:/kos/kolibri/drivers/video/drm/i915/../drm_dp_i2c_helper.o |
.data 0x000609a0 0x0 e:/kos/kolibri/drivers/video/drm/i915/../drm_mm.o |
.data 0x000609a0 0x8 e:/kos/kolibri/drivers/video/drm/i915/../drm_fb_helper.o |
.data 0x000609a8 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(dbglog.o) |
.data 0x000609a8 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(malloc.o) |
.data 0x000609b4 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(kref.o) |
.data 0x000609b4 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(memset.o) |
.data 0x000609b4 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(memcmp.o) |
.data 0x000609b4 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(time.o) |
.data 0x000609b4 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(memcpy.o) |
.data 0x000609b4 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(strncpy.o) |
.data 0x000609b4 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(vsprintf.o) |
0x000609b4 kptr_restrict |
.data 0x000609b8 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(string.o) |
.data 0x000609b8 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(list_sort.o) |
.data 0x000609b8 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(idr.o) |
.data 0x000609b8 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(strncmp.o) |
.data 0x000609b8 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(finfo.o) |
.data 0x000609b8 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(create.o) |
.data 0x000609b8 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(ssize.o) |
.data 0x000609b8 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(write.o) |
.data 0x000609b8 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(_memmove.o) |
.data 0x000609b8 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(_strncpy.o) |
.data 0x000609b8 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(ctype.o) |
.data 0x000609b8 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(strnlen.o) |
.data 0x000609b8 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(_strncmp.o) |
.data 0x000609b8 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(_strnlen.o) |
.bss 0x00061000 0x858 |
*(.bss) |
.bss 0x00061000 0x100 main.o |
.bss 0x00061100 0x0 pci.o |
.bss 0x00061100 0x0 dvo_ch7017.o |
.bss 0x00061100 0x0 dvo_ch7xxx.o |
.bss 0x00061100 0x0 dvo_ivch.o |
.bss 0x00061100 0x0 dvo_ns2501.o |
.bss 0x00061100 0x0 dvo_sil164.o |
.bss 0x00061100 0x0 dvo_tfp410.o |
.bss 0x00061100 0x0 i915_dma.o |
.bss 0x00061100 0x200 i915_drv.o |
0x00061100 i915_enable_ppgtt |
0x00061104 i915_enable_hangcheck |
0x00061108 i915_lvds_downclock |
0x0006110c i915_enable_fbc |
0x00061110 i915_enable_rc6 |
0x00061114 i915_powersave |
0x00061118 i915_panel_ignore_lid |
.bss 0x00061300 0x0 i915_gem.o |
.bss 0x00061300 0x0 i915_gem_context.o |
.bss 0x00061300 0x0 i915_gem_gtt.o |
.bss 0x00061300 0x0 i915_gem_stolen.o |
.bss 0x00061300 0x0 i915_gem_tiling.o |
.bss 0x00061300 0x4 i915_irq.o |
.bss 0x00061304 0x4 intel_bios.o |
.bss 0x00061308 0x0 intel_crt.o |
.bss 0x00061308 0x0 intel_ddi.o |
.bss 0x00061308 0x0 intel_display.o |
.bss 0x00061308 0x0 intel_dp.o |
.bss 0x00061308 0x0 intel_dvo.o |
*fill* 0x00061308 0x18 00 |
.bss 0x00061320 0x40 intel_fb.o |
.bss 0x00061360 0x0 intel_hdmi.o |
.bss 0x00061360 0x0 intel_i2c.o |
.bss 0x00061360 0x0 intel_lvds.o |
.bss 0x00061360 0x0 intel_modes.o |
.bss 0x00061360 0x0 intel_opregion.o |
.bss 0x00061360 0x0 intel_panel.o |
.bss 0x00061360 0x8 intel_pm.o |
0x00061360 mchdev_lock |
.bss 0x00061368 0x0 intel_ringbuffer.o |
.bss 0x00061368 0x0 intel_sdvo.o |
.bss 0x00061368 0x0 intel_sprite.o |
.bss 0x00061368 0x4 kms_display.o |
*fill* 0x0006136c 0x14 00 |
.bss 0x00061380 0x1e0 Gtt/intel-agp.o |
.bss 0x00061560 0x80 Gtt/intel-gtt.o |
.bss 0x000615e0 0x0 e:/kos/kolibri/drivers/video/drm/i915/../i2c/i2c-core.o |
.bss 0x000615e0 0x0 e:/kos/kolibri/drivers/video/drm/i915/../i2c/i2c-algo-bit.o |
.bss 0x000615e0 0x0 e:/kos/kolibri/drivers/video/drm/i915/../drm_pci.o |
.bss 0x000615e0 0x0 e:/kos/kolibri/drivers/video/drm/i915/../drm_modes.o |
.bss 0x000615e0 0x4 e:/kos/kolibri/drivers/video/drm/i915/../drm_stub.o |
0x000615e0 drm_debug |
*fill* 0x000615e4 0x1c 00 |
.bss 0x00061600 0x40 e:/kos/kolibri/drivers/video/drm/i915/../drm_crtc.o |
.bss 0x00061640 0x0 e:/kos/kolibri/drivers/video/drm/i915/../drm_crtc_helper.o |
.bss 0x00061640 0x0 e:/kos/kolibri/drivers/video/drm/i915/../drm_edid.o |
.bss 0x00061640 0x0 e:/kos/kolibri/drivers/video/drm/i915/../drm_irq.o |
.bss 0x00061640 0x0 e:/kos/kolibri/drivers/video/drm/i915/../drm_dp_i2c_helper.o |
.bss 0x00061640 0x0 e:/kos/kolibri/drivers/video/drm/i915/../drm_mm.o |
.bss 0x00061640 0x0 e:/kos/kolibri/drivers/video/drm/i915/../drm_fb_helper.o |
.bss 0x00061640 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(dbglog.o) |
.bss 0x00061648 0x1f8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(malloc.o) |
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(kref.o) |
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(memset.o) |
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(memcmp.o) |
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(time.o) |
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(memcpy.o) |
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(strncpy.o) |
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(vsprintf.o) |
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(string.o) |
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(list_sort.o) |
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(idr.o) |
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(strncmp.o) |
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(finfo.o) |
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(create.o) |
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(ssize.o) |
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(write.o) |
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(_memmove.o) |
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(_strncpy.o) |
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(ctype.o) |
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(strnlen.o) |
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(_strncmp.o) |
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(_strnlen.o) |
*(COMMON) |
COMMON 0x00061840 0x4 main.o |
0x00061840 x86_clflush_size |
COMMON 0x00061844 0x8 i915_drv.o |
0x00061844 i915_lvds_channel_mode |
0x00061848 main_device |
COMMON 0x0006184c 0x8 kms_display.o |
0x0006184c cmd_buffer |
0x00061850 cmd_offset |
COMMON 0x00061854 0x4 Gtt/intel-agp.o |
0x00061854 intel_agp_enabled |
/DISCARD/ |
*(.debug$S) |
*(.debug$T) |
*(.debug$F) |
*(.drectve) |
*(.edata) |
.idata 0x00062000 0x400 |
SORT(*)(.idata$2) |
.idata$2 0x00062000 0x14 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000000.o) |
0x00062000 _head_core_dll |
SORT(*)(.idata$3) |
0x00062014 0x4 LONG 0x0 |
0x00062018 0x4 LONG 0x0 |
0x0006201c 0x4 LONG 0x0 |
0x00062020 0x4 LONG 0x0 |
0x00062024 0x4 LONG 0x0 |
SORT(*)(.idata$4) |
.idata$4 0x00062028 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000000.o) |
.idata$4 0x00062028 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000001.o) |
.idata$4 0x0006202c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000002.o) |
.idata$4 0x00062030 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000004.o) |
.idata$4 0x00062034 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000005.o) |
.idata$4 0x00062038 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000008.o) |
.idata$4 0x0006203c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000009.o) |
.idata$4 0x00062040 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000010.o) |
.idata$4 0x00062044 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000011.o) |
.idata$4 0x00062048 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000012.o) |
.idata$4 0x0006204c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000013.o) |
.idata$4 0x00062050 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000015.o) |
.idata$4 0x00062054 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000017.o) |
.idata$4 0x00062058 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000019.o) |
.idata$4 0x0006205c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000021.o) |
.idata$4 0x00062060 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000022.o) |
.idata$4 0x00062064 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000023.o) |
.idata$4 0x00062068 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000024.o) |
.idata$4 0x0006206c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000025.o) |
.idata$4 0x00062070 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000026.o) |
.idata$4 0x00062074 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000027.o) |
.idata$4 0x00062078 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000028.o) |
.idata$4 0x0006207c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000029.o) |
.idata$4 0x00062080 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000030.o) |
.idata$4 0x00062084 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000031.o) |
.idata$4 0x00062088 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000032.o) |
.idata$4 0x0006208c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000033.o) |
.idata$4 0x00062090 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000034.o) |
.idata$4 0x00062094 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000035.o) |
.idata$4 0x00062098 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000036.o) |
.idata$4 0x0006209c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000039.o) |
.idata$4 0x000620a0 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000040.o) |
.idata$4 0x000620a4 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000041.o) |
.idata$4 0x000620a8 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000043.o) |
.idata$4 0x000620ac 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000044.o) |
SORT(*)(.idata$5) |
.idata$5 0x000620b0 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000000.o) |
.idata$5 0x000620b0 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000001.o) |
0x000620b0 _imp__AllocKernelSpace |
.idata$5 0x000620b4 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000002.o) |
0x000620b4 _imp__AllocPage |
.idata$5 0x000620b8 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000004.o) |
0x000620b8 _imp__AttachIntHandler |
.idata$5 0x000620bc 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000005.o) |
0x000620bc _imp__CreateEvent |
.idata$5 0x000620c0 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000008.o) |
0x000620c0 _imp__Delay |
.idata$5 0x000620c4 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000009.o) |
0x000620c4 _imp__DestroyEvent |
.idata$5 0x000620c8 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000010.o) |
0x000620c8 _imp__DestroyObject |
.idata$5 0x000620cc 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000011.o) |
0x000620cc _imp__FreeKernelSpace |
.idata$5 0x000620d0 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000012.o) |
0x000620d0 _imp__FreePage |
.idata$5 0x000620d4 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000013.o) |
0x000620d4 _imp__GetDisplay |
.idata$5 0x000620d8 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000015.o) |
0x000620d8 _imp__GetPgAddr |
.idata$5 0x000620dc 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000017.o) |
0x000620dc _imp__GetService |
.idata$5 0x000620e0 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000019.o) |
0x000620e0 _imp__GetTimerTicks |
.idata$5 0x000620e4 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000021.o) |
0x000620e4 _imp__KernelAlloc |
.idata$5 0x000620e8 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000022.o) |
0x000620e8 _imp__KernelFree |
.idata$5 0x000620ec 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000023.o) |
0x000620ec _imp__MapIoMem |
.idata$5 0x000620f0 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000024.o) |
0x000620f0 _imp__MapPage |
.idata$5 0x000620f4 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000025.o) |
0x000620f4 _imp__MutexInit |
.idata$5 0x000620f8 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000026.o) |
0x000620f8 _imp__MutexLock |
.idata$5 0x000620fc 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000027.o) |
0x000620fc _imp__MutexUnlock |
.idata$5 0x00062100 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000028.o) |
0x00062100 _imp__PciApi |
.idata$5 0x00062104 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000029.o) |
0x00062104 _imp__PciRead16 |
.idata$5 0x00062108 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000030.o) |
0x00062108 _imp__PciRead32 |
.idata$5 0x0006210c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000031.o) |
0x0006210c _imp__PciRead8 |
.idata$5 0x00062110 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000032.o) |
0x00062110 _imp__PciWrite16 |
.idata$5 0x00062114 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000033.o) |
0x00062114 _imp__PciWrite32 |
.idata$5 0x00062118 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000034.o) |
0x00062118 _imp__PciWrite8 |
.idata$5 0x0006211c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000035.o) |
0x0006211c _imp__RaiseEvent |
.idata$5 0x00062120 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000036.o) |
0x00062120 _imp__RegService |
.idata$5 0x00062124 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000039.o) |
0x00062124 _imp__SetScreen |
.idata$5 0x00062128 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000040.o) |
0x00062128 _imp__SysMsgBoardStr |
.idata$5 0x0006212c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000041.o) |
0x0006212c _imp__TimerHs |
.idata$5 0x00062130 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000043.o) |
0x00062130 _imp__WaitEvent |
.idata$5 0x00062134 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000044.o) |
SORT(*)(.idata$6) |
.idata$6 0x00062138 0x14 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000001.o) |
.idata$6 0x0006214c 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000002.o) |
.idata$6 0x00062158 0x14 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000004.o) |
.idata$6 0x0006216c 0x10 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000005.o) |
.idata$6 0x0006217c 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000008.o) |
.idata$6 0x00062184 0x10 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000009.o) |
.idata$6 0x00062194 0x10 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000010.o) |
.idata$6 0x000621a4 0x14 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000011.o) |
.idata$6 0x000621b8 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000012.o) |
.idata$6 0x000621c4 0x10 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000013.o) |
.idata$6 0x000621d4 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000015.o) |
.idata$6 0x000621e0 0x10 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000017.o) |
.idata$6 0x000621f0 0x10 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000019.o) |
.idata$6 0x00062200 0x10 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000021.o) |
.idata$6 0x00062210 0x10 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000022.o) |
.idata$6 0x00062220 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000023.o) |
.idata$6 0x0006222c 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000024.o) |
.idata$6 0x00062238 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000025.o) |
.idata$6 0x00062244 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000026.o) |
.idata$6 0x00062250 0x10 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000027.o) |
.idata$6 0x00062260 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000028.o) |
.idata$6 0x0006226c 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000029.o) |
.idata$6 0x00062278 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000030.o) |
.idata$6 0x00062284 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000031.o) |
.idata$6 0x00062290 0x10 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000032.o) |
.idata$6 0x000622a0 0x10 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000033.o) |
.idata$6 0x000622b0 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000034.o) |
.idata$6 0x000622bc 0x10 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000035.o) |
.idata$6 0x000622cc 0x10 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000036.o) |
.idata$6 0x000622dc 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000039.o) |
.idata$6 0x000622e8 0x14 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000040.o) |
.idata$6 0x000622fc 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000041.o) |
.idata$6 0x00062308 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000043.o) |
SORT(*)(.idata$7) |
.idata$7 0x00062314 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000001.o) |
.idata$7 0x00062318 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000002.o) |
.idata$7 0x0006231c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000004.o) |
.idata$7 0x00062320 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000005.o) |
.idata$7 0x00062324 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000008.o) |
.idata$7 0x00062328 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000009.o) |
.idata$7 0x0006232c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000010.o) |
.idata$7 0x00062330 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000011.o) |
.idata$7 0x00062334 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000012.o) |
.idata$7 0x00062338 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000013.o) |
.idata$7 0x0006233c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000015.o) |
.idata$7 0x00062340 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000017.o) |
.idata$7 0x00062344 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000019.o) |
.idata$7 0x00062348 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000021.o) |
.idata$7 0x0006234c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000022.o) |
.idata$7 0x00062350 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000023.o) |
.idata$7 0x00062354 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000024.o) |
.idata$7 0x00062358 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000025.o) |
.idata$7 0x0006235c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000026.o) |
.idata$7 0x00062360 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000027.o) |
.idata$7 0x00062364 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000028.o) |
.idata$7 0x00062368 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000029.o) |
.idata$7 0x0006236c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000030.o) |
.idata$7 0x00062370 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000031.o) |
.idata$7 0x00062374 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000032.o) |
.idata$7 0x00062378 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000033.o) |
.idata$7 0x0006237c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000034.o) |
.idata$7 0x00062380 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000035.o) |
.idata$7 0x00062384 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000036.o) |
.idata$7 0x00062388 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000039.o) |
.idata$7 0x0006238c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000040.o) |
.idata$7 0x00062390 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000041.o) |
.idata$7 0x00062394 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000043.o) |
.idata$7 0x00062398 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000044.o) |
0x00062398 core_dll_iname |
.reloc 0x00063000 0x2000 |
*(.reloc) |
.reloc 0x00063000 0x1f4c dll stuff |
LOAD main.o |
LOAD pci.o |
LOAD dvo_ch7017.o |
LOAD dvo_ch7xxx.o |
LOAD dvo_ivch.o |
LOAD dvo_ns2501.o |
LOAD dvo_sil164.o |
LOAD dvo_tfp410.o |
LOAD i915_dma.o |
LOAD i915_drv.o |
LOAD i915_gem.o |
LOAD i915_gem_context.o |
LOAD i915_gem_gtt.o |
LOAD i915_gem_stolen.o |
LOAD i915_gem_tiling.o |
LOAD i915_irq.o |
LOAD intel_bios.o |
LOAD intel_crt.o |
LOAD intel_ddi.o |
LOAD intel_display.o |
LOAD intel_dp.o |
LOAD intel_dvo.o |
LOAD intel_fb.o |
LOAD intel_hdmi.o |
LOAD intel_i2c.o |
LOAD intel_lvds.o |
LOAD intel_modes.o |
LOAD intel_opregion.o |
LOAD intel_panel.o |
LOAD intel_pm.o |
LOAD intel_ringbuffer.o |
LOAD intel_sdvo.o |
LOAD intel_sprite.o |
LOAD kms_display.o |
LOAD Gtt/intel-agp.o |
LOAD Gtt/intel-gtt.o |
LOAD e:/kos/kolibri/drivers/video/drm/i915/../i2c/i2c-core.o |
LOAD e:/kos/kolibri/drivers/video/drm/i915/../i2c/i2c-algo-bit.o |
LOAD e:/kos/kolibri/drivers/video/drm/i915/../drm_pci.o |
LOAD e:/kos/kolibri/drivers/video/drm/i915/../drm_modes.o |
LOAD e:/kos/kolibri/drivers/video/drm/i915/../drm_stub.o |
LOAD e:/kos/kolibri/drivers/video/drm/i915/../drm_crtc.o |
LOAD e:/kos/kolibri/drivers/video/drm/i915/../drm_crtc_helper.o |
LOAD e:/kos/kolibri/drivers/video/drm/i915/../drm_edid.o |
LOAD e:/kos/kolibri/drivers/video/drm/i915/../drm_irq.o |
LOAD e:/kos/kolibri/drivers/video/drm/i915/../drm_dp_i2c_helper.o |
LOAD e:/kos/kolibri/drivers/video/drm/i915/../drm_mm.o |
LOAD e:/kos/kolibri/drivers/video/drm/i915/../drm_fb_helper.o |
LOAD e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a |
LOAD e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a |
LOAD e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libgcc.a |
OUTPUT(i915.dll pei-i386) |
LOAD dll stuff |
/drivers/video/drm/i915/i915_gem_context.c |
---|
148,7 → 148,7 |
struct i915_hw_context *ctx; |
int ret, id; |
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); |
ctx = kzalloc(sizeof(struct drm_i915_file_private), GFP_KERNEL); |
if (ctx == NULL) |
return ERR_PTR(-ENOMEM); |
420,8 → 420,9 |
* MI_SET_CONTEXT instead of when the next seqno has completed. |
*/ |
if (from_obj != NULL) { |
u32 seqno = i915_gem_next_request_seqno(ring); |
from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; |
i915_gem_object_move_to_active(from_obj, ring); |
i915_gem_object_move_to_active(from_obj, ring, seqno); |
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the |
* whole damn pipeline, we don't need to explicitly mark the |
* object dirty. The only exception is that the context must be |
/drivers/video/drm/i915/i915_reg.h |
---|
26,7 → 26,6 |
#define _I915_REG_H_ |
#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) |
#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a))) |
#define _PORT(port, a, b) ((a) + (port)*((b)-(a))) |
41,15 → 40,7 |
*/ |
#define INTEL_GMCH_CTRL 0x52 |
#define INTEL_GMCH_VGA_DISABLE (1 << 1) |
#define SNB_GMCH_CTRL 0x50 |
#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */ |
#define SNB_GMCH_GGMS_MASK 0x3 |
#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */ |
#define SNB_GMCH_GMS_MASK 0x1f |
#define IVB_GMCH_GMS_SHIFT 4 |
#define IVB_GMCH_GMS_MASK 0xf |
/* PCI config space */ |
#define HPLLCC 0xc0 /* 855 only */ |
114,6 → 105,23 |
#define GEN6_GRDOM_MEDIA (1 << 2) |
#define GEN6_GRDOM_BLT (1 << 3) |
/* PPGTT stuff */ |
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) |
#define GEN6_PDE_VALID (1 << 0) |
#define GEN6_PDE_LARGE_PAGE (2 << 0) /* use 32kb pages */ |
/* gen6+ has bit 11-4 for physical addr bit 39-32 */ |
#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) |
#define GEN6_PTE_VALID (1 << 0) |
#define GEN6_PTE_UNCACHED (1 << 1) |
#define HSW_PTE_UNCACHED (0) |
#define GEN6_PTE_CACHE_LLC (2 << 1) |
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1) |
#define GEN6_PTE_CACHE_BITS (3 << 1) |
#define GEN6_PTE_GFDT (1 << 3) |
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) |
#define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228) |
#define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518) |
#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220) |
233,18 → 241,11 |
*/ |
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) |
#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ |
#define MI_FLUSH_DW_STORE_INDEX (1<<21) |
#define MI_INVALIDATE_TLB (1<<18) |
#define MI_FLUSH_DW_OP_STOREDW (1<<14) |
#define MI_INVALIDATE_BSD (1<<7) |
#define MI_FLUSH_DW_USE_GTT (1<<2) |
#define MI_FLUSH_DW_USE_PPGTT (0<<2) |
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1) |
#define MI_BATCH_NON_SECURE (1) |
/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */ |
#define MI_BATCH_NON_SECURE_I965 (1<<8) |
#define MI_BATCH_PPGTT_HSW (1<<8) |
#define MI_BATCH_NON_SECURE_HSW (1<<13) |
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) |
#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */ |
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ |
368,7 → 369,6 |
#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */ |
#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */ |
#define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */ |
#define DPIO_PLL_REFCLK_SEL_MASK 3 |
#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */ |
#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */ |
#define _DPIO_REFSFR_B 0x8034 |
384,9 → 384,6 |
#define DPIO_FASTCLK_DISABLE 0x8100 |
#define DPIO_DATA_CHANNEL1 0x8220 |
#define DPIO_DATA_CHANNEL2 0x8420 |
/* |
* Fence registers |
*/ |
512,14 → 509,11 |
#define GEN7_ERR_INT 0x44040 |
#define ERR_INT_MMIO_UNCLAIMED (1<<13) |
#define DERRMR 0x44050 |
/* GM45+ chicken bits -- debug workaround bits that may be required |
* for various sorts of correct behavior. The top 16 bits of each are |
* the enables for writing to the corresponding low bit. |
*/ |
#define _3D_CHICKEN 0x02084 |
#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10) |
#define _3D_CHICKEN2 0x0208c |
/* Disables pipelining of read flushes past the SF-WIZ interface. |
* Required on all Ironlake steppings according to the B-Spec, but the |
527,17 → 521,14 |
*/ |
# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) |
#define _3D_CHICKEN3 0x02090 |
#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10) |
#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) |
#define MI_MODE 0x0209c |
# define VS_TIMER_DISPATCH (1 << 6) |
# define MI_FLUSH_ENABLE (1 << 12) |
# define ASYNC_FLIP_PERF_DISABLE (1 << 14) |
#define GEN6_GT_MODE 0x20d0 |
#define GEN6_GT_MODE_HI (1 << 9) |
#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5) |
#define GFX_MODE 0x02520 |
#define GFX_MODE_GEN7 0x0229c |
556,8 → 547,6 |
#define IIR 0x020a4 |
#define IMR 0x020a8 |
#define ISR 0x020ac |
#define VLV_GUNIT_CLOCK_GATE 0x182060 |
#define GCFG_DIS (1<<8) |
#define VLV_IIR_RW 0x182084 |
#define VLV_IER 0x1820a0 |
#define VLV_IIR 0x1820a4 |
672,7 → 661,6 |
#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */ |
#define CACHE_MODE_0 0x02120 /* 915+ only */ |
#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8) |
#define CM0_IZ_OPT_DISABLE (1<<6) |
#define CM0_ZR_OPT_DISABLE (1<<5) |
#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5) |
682,8 → 670,6 |
#define CM0_RC_OP_FLUSH_DISABLE (1<<0) |
#define BB_ADDR 0x02140 /* 8 bytes */ |
#define GFX_FLSH_CNTL 0x02170 /* 915+ only */ |
#define GFX_FLSH_CNTL_GEN6 0x101008 |
#define GFX_FLSH_CNTL_EN (1<<0) |
#define ECOSKPD 0x021d0 |
#define ECO_GATING_CX_ONLY (1<<3) |
#define ECO_FLIP_DONE (1<<0) |
1573,14 → 1559,14 |
#define _VSYNCSHIFT_B 0x61028 |
#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B) |
#define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B) |
#define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B) |
#define VTOTAL(trans) _TRANSCODER(trans, _VTOTAL_A, _VTOTAL_B) |
#define VBLANK(trans) _TRANSCODER(trans, _VBLANK_A, _VBLANK_B) |
#define VSYNC(trans) _TRANSCODER(trans, _VSYNC_A, _VSYNC_B) |
#define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B) |
#define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B) |
#define HSYNC(pipe) _PIPE(pipe, _HSYNC_A, _HSYNC_B) |
#define VTOTAL(pipe) _PIPE(pipe, _VTOTAL_A, _VTOTAL_B) |
#define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B) |
#define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B) |
#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B) |
#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B) |
#define VSYNCSHIFT(pipe) _PIPE(pipe, _VSYNCSHIFT_A, _VSYNCSHIFT_B) |
/* VGA port control */ |
#define ADPA 0x61100 |
2655,7 → 2641,6 |
#define PIPECONF_GAMMA (1<<24) |
#define PIPECONF_FORCE_BORDER (1<<25) |
#define PIPECONF_INTERLACE_MASK (7 << 21) |
#define PIPECONF_INTERLACE_MASK_HSW (3 << 21) |
/* Note that pre-gen3 does not support interlaced display directly. Panel |
* fitting must be disabled on pre-ilk for interlaced. */ |
#define PIPECONF_PROGRESSIVE (0 << 21) |
2726,7 → 2711,7 |
#define PIPE_12BPC (3 << 5) |
#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC) |
#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF) |
#define PIPECONF(pipe) _PIPE(pipe, _PIPEACONF, _PIPEBCONF) |
#define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL) |
#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH) |
#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL) |
3013,19 → 2998,12 |
#define DISPPLANE_GAMMA_ENABLE (1<<30) |
#define DISPPLANE_GAMMA_DISABLE 0 |
#define DISPPLANE_PIXFORMAT_MASK (0xf<<26) |
#define DISPPLANE_YUV422 (0x0<<26) |
#define DISPPLANE_8BPP (0x2<<26) |
#define DISPPLANE_BGRA555 (0x3<<26) |
#define DISPPLANE_BGRX555 (0x4<<26) |
#define DISPPLANE_BGRX565 (0x5<<26) |
#define DISPPLANE_BGRX888 (0x6<<26) |
#define DISPPLANE_BGRA888 (0x7<<26) |
#define DISPPLANE_RGBX101010 (0x8<<26) |
#define DISPPLANE_RGBA101010 (0x9<<26) |
#define DISPPLANE_BGRX101010 (0xa<<26) |
#define DISPPLANE_RGBX161616 (0xc<<26) |
#define DISPPLANE_RGBX888 (0xe<<26) |
#define DISPPLANE_RGBA888 (0xf<<26) |
#define DISPPLANE_15_16BPP (0x4<<26) |
#define DISPPLANE_16BPP (0x5<<26) |
#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26) |
#define DISPPLANE_32BPP (0x7<<26) |
#define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26) |
#define DISPPLANE_STEREO_ENABLE (1<<25) |
#define DISPPLANE_STEREO_DISABLE 0 |
#define DISPPLANE_SEL_PIPE_SHIFT 24 |
3046,8 → 3024,6 |
#define _DSPASIZE 0x70190 |
#define _DSPASURF 0x7019C /* 965+ only */ |
#define _DSPATILEOFF 0x701A4 /* 965+ only */ |
#define _DSPAOFFSET 0x701A4 /* HSW */ |
#define _DSPASURFLIVE 0x701AC |
#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR) |
#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR) |
3057,8 → 3033,6 |
#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF) |
#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF) |
#define DSPLINOFF(plane) DSPADDR(plane) |
#define DSPOFFSET(plane) _PIPE(plane, _DSPAOFFSET, _DSPBOFFSET) |
#define DSPSURFLIVE(plane) _PIPE(plane, _DSPASURFLIVE, _DSPBSURFLIVE) |
/* Display/Sprite base address macros */ |
#define DISP_BASEADDR_MASK (0xfffff000) |
3104,8 → 3078,6 |
#define _DSPBSIZE 0x71190 |
#define _DSPBSURF 0x7119C |
#define _DSPBTILEOFF 0x711A4 |
#define _DSPBOFFSET 0x711A4 |
#define _DSPBSURFLIVE 0x711AC |
/* Sprite A control */ |
#define _DVSACNTR 0x72180 |
3171,7 → 3143,6 |
#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF) |
#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL) |
#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK) |
#define DVSSURFLIVE(pipe) _PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE) |
#define _SPRA_CTL 0x70280 |
#define SPRITE_ENABLE (1<<31) |
3206,8 → 3177,6 |
#define _SPRA_SURF 0x7029c |
#define _SPRA_KEYMAX 0x702a0 |
#define _SPRA_TILEOFF 0x702a4 |
#define _SPRA_OFFSET 0x702a4 |
#define _SPRA_SURFLIVE 0x702ac |
#define _SPRA_SCALE 0x70304 |
#define SPRITE_SCALE_ENABLE (1<<31) |
#define SPRITE_FILTER_MASK (3<<29) |
3228,8 → 3197,6 |
#define _SPRB_SURF 0x7129c |
#define _SPRB_KEYMAX 0x712a0 |
#define _SPRB_TILEOFF 0x712a4 |
#define _SPRB_OFFSET 0x712a4 |
#define _SPRB_SURFLIVE 0x712ac |
#define _SPRB_SCALE 0x71304 |
#define _SPRB_GAMC 0x71400 |
3243,10 → 3210,8 |
#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF) |
#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX) |
#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF) |
#define SPROFFSET(pipe) _PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET) |
#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE) |
#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) |
#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE) |
/* VBIOS regs */ |
#define VGACNTRL 0x71400 |
3281,6 → 3246,12 |
#define DISPLAY_PORT_PLL_BIOS_1 0x46010 |
#define DISPLAY_PORT_PLL_BIOS_2 0x46014 |
#define PCH_DSPCLK_GATE_D 0x42020 |
# define DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9) |
# define DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8) |
# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7) |
# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5) |
#define PCH_3DCGDIS0 0x46020 |
# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18) |
# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1) |
3330,14 → 3301,14 |
#define _PIPEB_LINK_M2 0x61048 |
#define _PIPEB_LINK_N2 0x6104c |
#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1) |
#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1) |
#define PIPE_DATA_M2(tran) _TRANSCODER(tran, _PIPEA_DATA_M2, _PIPEB_DATA_M2) |
#define PIPE_DATA_N2(tran) _TRANSCODER(tran, _PIPEA_DATA_N2, _PIPEB_DATA_N2) |
#define PIPE_LINK_M1(tran) _TRANSCODER(tran, _PIPEA_LINK_M1, _PIPEB_LINK_M1) |
#define PIPE_LINK_N1(tran) _TRANSCODER(tran, _PIPEA_LINK_N1, _PIPEB_LINK_N1) |
#define PIPE_LINK_M2(tran) _TRANSCODER(tran, _PIPEA_LINK_M2, _PIPEB_LINK_M2) |
#define PIPE_LINK_N2(tran) _TRANSCODER(tran, _PIPEA_LINK_N2, _PIPEB_LINK_N2) |
#define PIPE_DATA_M1(pipe) _PIPE(pipe, _PIPEA_DATA_M1, _PIPEB_DATA_M1) |
#define PIPE_DATA_N1(pipe) _PIPE(pipe, _PIPEA_DATA_N1, _PIPEB_DATA_N1) |
#define PIPE_DATA_M2(pipe) _PIPE(pipe, _PIPEA_DATA_M2, _PIPEB_DATA_M2) |
#define PIPE_DATA_N2(pipe) _PIPE(pipe, _PIPEA_DATA_N2, _PIPEB_DATA_N2) |
#define PIPE_LINK_M1(pipe) _PIPE(pipe, _PIPEA_LINK_M1, _PIPEB_LINK_M1) |
#define PIPE_LINK_N1(pipe) _PIPE(pipe, _PIPEA_LINK_N1, _PIPEB_LINK_N1) |
#define PIPE_LINK_M2(pipe) _PIPE(pipe, _PIPEA_LINK_M2, _PIPEB_LINK_M2) |
#define PIPE_LINK_N2(pipe) _PIPE(pipe, _PIPEA_LINK_N2, _PIPEB_LINK_N2) |
/* CPU panel fitter */ |
/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */ |
3344,8 → 3315,6 |
#define _PFA_CTL_1 0x68080 |
#define _PFB_CTL_1 0x68880 |
#define PF_ENABLE (1<<31) |
#define PF_PIPE_SEL_MASK_IVB (3<<29) |
#define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29) |
#define PF_FILTER_MASK (3<<23) |
#define PF_FILTER_PROGRAMMED (0<<23) |
#define PF_FILTER_MED_3x3 (1<<23) |
3454,13 → 3423,15 |
#define ILK_HDCP_DISABLE (1<<25) |
#define ILK_eDP_A_DISABLE (1<<24) |
#define ILK_DESKTOP (1<<23) |
#define ILK_DSPCLK_GATE 0x42020 |
#define IVB_VRHUNIT_CLK_GATE (1<<28) |
#define ILK_DPARB_CLK_GATE (1<<5) |
#define ILK_DPFD_CLK_GATE (1<<7) |
#define ILK_DSPCLK_GATE_D 0x42020 |
#define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) |
#define ILK_DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9) |
#define ILK_DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8) |
#define ILK_DPFDUNIT_CLOCK_GATE_ENABLE (1 << 7) |
#define ILK_DPARBUNIT_CLOCK_GATE_ENABLE (1 << 5) |
/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */ |
#define ILK_CLK_FBC (1<<7) |
#define ILK_DPFC_DIS1 (1<<8) |
#define ILK_DPFC_DIS2 (1<<9) |
#define IVB_CHICKEN3 0x4200c |
# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5) |
3476,21 → 3447,14 |
#define GEN7_L3CNTLREG1 0xB01C |
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C |
#define GEN7_L3AGDIS (1<<19) |
#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030 |
#define GEN7_WA_L3_CHICKEN_MODE 0x20000000 |
#define GEN7_L3SQCREG4 0xb034 |
#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27) |
/* WaCatErrorRejectionIssue */ |
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 |
#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) |
#define HSW_FUSE_STRAP 0x42014 |
#define HSW_CDCLK_LIMIT (1 << 24) |
/* PCH */ |
/* south display engine interrupt: IBX */ |
3722,7 → 3686,7 |
#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) |
#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) |
#define VLV_VIDEO_DIP_CTL_A 0x60200 |
#define VLV_VIDEO_DIP_CTL_A 0x60220 |
#define VLV_VIDEO_DIP_DATA_A 0x60208 |
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210 |
3831,25 → 3795,17 |
#define TRANS_6BPC (2<<5) |
#define TRANS_12BPC (3<<5) |
#define _TRANSA_CHICKEN1 0xf0060 |
#define _TRANSB_CHICKEN1 0xf1060 |
#define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1) |
#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4) |
#define _TRANSA_CHICKEN2 0xf0064 |
#define _TRANSB_CHICKEN2 0xf1064 |
#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2) |
#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31) |
#define TRANS_AUTOTRAIN_GEN_STALL_DIS (1<<31) |
#define SOUTH_CHICKEN1 0xc2000 |
#define FDIA_PHASE_SYNC_SHIFT_OVR 19 |
#define FDIA_PHASE_SYNC_SHIFT_EN 18 |
#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2))) |
#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2))) |
#define FDI_BC_BIFURCATION_SELECT (1 << 12) |
#define SOUTH_CHICKEN2 0xc2004 |
#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13) |
#define FDI_MPHY_IOSFSB_RESET_CTL (1<<12) |
#define DPLS_EDP_PPS_FIX_DIS (1<<0) |
#define _FDI_RXA_CHICKEN 0xc200c |
3860,7 → 3816,6 |
#define SOUTH_DSPCLK_GATE_D 0xc2020 |
#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) |
#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12) |
/* CPU: FDI_TX */ |
#define _FDI_TXA_CTL 0x60100 |
3922,7 → 3877,6 |
#define FDI_FS_ERRC_ENABLE (1<<27) |
#define FDI_FE_ERRC_ENABLE (1<<26) |
#define FDI_DP_PORT_WIDTH_X8 (7<<19) |
#define FDI_RX_POLARITY_REVERSED_LPT (1<<16) |
#define FDI_8BPC (0<<16) |
#define FDI_10BPC (1<<16) |
#define FDI_6BPC (2<<16) |
3949,19 → 3903,14 |
#define _FDI_RXA_MISC 0xf0010 |
#define _FDI_RXB_MISC 0xf1010 |
#define FDI_RX_PWRDN_LANE1_MASK (3<<26) |
#define FDI_RX_PWRDN_LANE1_VAL(x) ((x)<<26) |
#define FDI_RX_PWRDN_LANE0_MASK (3<<24) |
#define FDI_RX_PWRDN_LANE0_VAL(x) ((x)<<24) |
#define _FDI_RXA_TUSIZE1 0xf0030 |
#define _FDI_RXA_TUSIZE2 0xf0038 |
#define _FDI_RXB_TUSIZE1 0xf1030 |
#define _FDI_RXB_TUSIZE2 0xf1038 |
#define FDI_RX_TP1_TO_TP2_48 (2<<20) |
#define FDI_RX_TP1_TO_TP2_64 (3<<20) |
#define FDI_RX_FDI_DELAY_90 (0x90<<0) |
#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC) |
#define _FDI_RXA_TUSIZE1 0xf0030 |
#define _FDI_RXA_TUSIZE2 0xf0038 |
#define _FDI_RXB_TUSIZE1 0xf1030 |
#define _FDI_RXB_TUSIZE2 0xf1038 |
#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1) |
#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2) |
4054,11 → 4003,6 |
#define PANEL_LIGHT_ON_DELAY_SHIFT 0 |
#define PCH_PP_OFF_DELAYS 0xc720c |
#define PANEL_POWER_PORT_SELECT_MASK (0x3 << 30) |
#define PANEL_POWER_PORT_LVDS (0 << 30) |
#define PANEL_POWER_PORT_DP_A (1 << 30) |
#define PANEL_POWER_PORT_DP_C (2 << 30) |
#define PANEL_POWER_PORT_DP_D (3 << 30) |
#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000) |
#define PANEL_POWER_DOWN_DELAY_SHIFT 16 |
#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff) |
4106,7 → 4050,7 |
#define TRANS_DP_CTL_A 0xe0300 |
#define TRANS_DP_CTL_B 0xe1300 |
#define TRANS_DP_CTL_C 0xe2300 |
#define TRANS_DP_CTL(pipe) _PIPE(pipe, TRANS_DP_CTL_A, TRANS_DP_CTL_B) |
#define TRANS_DP_CTL(pipe) (TRANS_DP_CTL_A + (pipe) * 0x01000) |
#define TRANS_DP_OUTPUT_ENABLE (1<<31) |
#define TRANS_DP_PORT_SEL_B (0<<29) |
#define TRANS_DP_PORT_SEL_C (1<<29) |
4164,8 → 4108,6 |
#define FORCEWAKE_ACK_HSW 0x130044 |
#define FORCEWAKE_ACK 0x130090 |
#define FORCEWAKE_MT 0xa188 /* multi-threaded */ |
#define FORCEWAKE_KERNEL 0x1 |
#define FORCEWAKE_USER 0x2 |
#define FORCEWAKE_MT_ACK 0x130040 |
#define ECOBUS 0xa180 |
#define FORCEWAKE_MT_ENABLE (1<<5) |
4278,10 → 4220,6 |
#define GEN6_READ_OC_PARAMS 0xc |
#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8 |
#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9 |
#define GEN6_PCODE_WRITE_RC6VIDS 0x4 |
#define GEN6_PCODE_READ_RC6VIDS 0x5 |
#define GEN6_ENCODE_RC6_VID(mv) (((mv) / 5) - 245) < 0 ?: 0 |
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) > 0 ? ((vids) * 5) + 245 : 0) |
#define GEN6_PCODE_DATA 0x138128 |
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 |
4313,15 → 4251,6 |
#define GEN7_L3LOG_BASE 0xB070 |
#define GEN7_L3LOG_SIZE 0x80 |
#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */ |
#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100 |
#define GEN7_MAX_PS_THREAD_DEP (8<<12) |
#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3) |
#define GEN7_ROW_CHICKEN2 0xe4f4 |
#define GEN7_ROW_CHICKEN2_GT2 0xf4f4 |
#define DOP_CLOCK_GATING_DISABLE (1<<0) |
#define G4X_AUD_VID_DID 0x62020 |
#define INTEL_AUDIO_DEVCL 0x808629FB |
#define INTEL_AUDIO_DEVBLC 0x80862801 |
4451,39 → 4380,33 |
#define HSW_PWR_WELL_CTL6 0x45414 |
/* Per-pipe DDI Function Control */ |
#define TRANS_DDI_FUNC_CTL_A 0x60400 |
#define TRANS_DDI_FUNC_CTL_B 0x61400 |
#define TRANS_DDI_FUNC_CTL_C 0x62400 |
#define TRANS_DDI_FUNC_CTL_EDP 0x6F400 |
#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER(tran, TRANS_DDI_FUNC_CTL_A, \ |
TRANS_DDI_FUNC_CTL_B) |
#define TRANS_DDI_FUNC_ENABLE (1<<31) |
#define PIPE_DDI_FUNC_CTL_A 0x60400 |
#define PIPE_DDI_FUNC_CTL_B 0x61400 |
#define PIPE_DDI_FUNC_CTL_C 0x62400 |
#define PIPE_DDI_FUNC_CTL_EDP 0x6F400 |
#define DDI_FUNC_CTL(pipe) _PIPE(pipe, PIPE_DDI_FUNC_CTL_A, \ |
PIPE_DDI_FUNC_CTL_B) |
#define PIPE_DDI_FUNC_ENABLE (1<<31) |
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */ |
#define TRANS_DDI_PORT_MASK (7<<28) |
#define TRANS_DDI_SELECT_PORT(x) ((x)<<28) |
#define TRANS_DDI_PORT_NONE (0<<28) |
#define TRANS_DDI_MODE_SELECT_MASK (7<<24) |
#define TRANS_DDI_MODE_SELECT_HDMI (0<<24) |
#define TRANS_DDI_MODE_SELECT_DVI (1<<24) |
#define TRANS_DDI_MODE_SELECT_DP_SST (2<<24) |
#define TRANS_DDI_MODE_SELECT_DP_MST (3<<24) |
#define TRANS_DDI_MODE_SELECT_FDI (4<<24) |
#define TRANS_DDI_BPC_MASK (7<<20) |
#define TRANS_DDI_BPC_8 (0<<20) |
#define TRANS_DDI_BPC_10 (1<<20) |
#define TRANS_DDI_BPC_6 (2<<20) |
#define TRANS_DDI_BPC_12 (3<<20) |
#define TRANS_DDI_PVSYNC (1<<17) |
#define TRANS_DDI_PHSYNC (1<<16) |
#define TRANS_DDI_EDP_INPUT_MASK (7<<12) |
#define TRANS_DDI_EDP_INPUT_A_ON (0<<12) |
#define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12) |
#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12) |
#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12) |
#define TRANS_DDI_BFI_ENABLE (1<<4) |
#define TRANS_DDI_PORT_WIDTH_X1 (0<<1) |
#define TRANS_DDI_PORT_WIDTH_X2 (1<<1) |
#define TRANS_DDI_PORT_WIDTH_X4 (3<<1) |
#define PIPE_DDI_PORT_MASK (7<<28) |
#define PIPE_DDI_SELECT_PORT(x) ((x)<<28) |
#define PIPE_DDI_MODE_SELECT_MASK (7<<24) |
#define PIPE_DDI_MODE_SELECT_HDMI (0<<24) |
#define PIPE_DDI_MODE_SELECT_DVI (1<<24) |
#define PIPE_DDI_MODE_SELECT_DP_SST (2<<24) |
#define PIPE_DDI_MODE_SELECT_DP_MST (3<<24) |
#define PIPE_DDI_MODE_SELECT_FDI (4<<24) |
#define PIPE_DDI_BPC_MASK (7<<20) |
#define PIPE_DDI_BPC_8 (0<<20) |
#define PIPE_DDI_BPC_10 (1<<20) |
#define PIPE_DDI_BPC_6 (2<<20) |
#define PIPE_DDI_BPC_12 (3<<20) |
#define PIPE_DDI_PVSYNC (1<<17) |
#define PIPE_DDI_PHSYNC (1<<16) |
#define PIPE_DDI_BFI_ENABLE (1<<4) |
#define PIPE_DDI_PORT_WIDTH_X1 (0<<1) |
#define PIPE_DDI_PORT_WIDTH_X2 (1<<1) |
#define PIPE_DDI_PORT_WIDTH_X4 (3<<1) |
/* DisplayPort Transport Control */ |
#define DP_TP_CTL_A 0x64040 |
4497,16 → 4420,12 |
#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8) |
#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8) |
#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8) |
#define DP_TP_CTL_LINK_TRAIN_PAT3 (4<<8) |
#define DP_TP_CTL_LINK_TRAIN_IDLE (2<<8) |
#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8) |
#define DP_TP_CTL_SCRAMBLE_DISABLE (1<<7) |
/* DisplayPort Transport Status */ |
#define DP_TP_STATUS_A 0x64044 |
#define DP_TP_STATUS_B 0x64144 |
#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B) |
#define DP_TP_STATUS_IDLE_DONE (1<<25) |
#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12) |
/* DDI Buffer Control */ |
4525,7 → 4444,6 |
#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */ |
#define DDI_BUF_EMP_MASK (0xf<<24) |
#define DDI_BUF_IS_IDLE (1<<7) |
#define DDI_A_4_LANES (1<<4) |
#define DDI_PORT_WIDTH_X1 (0<<1) |
#define DDI_PORT_WIDTH_X2 (1<<1) |
#define DDI_PORT_WIDTH_X4 (3<<1) |
4542,10 → 4460,6 |
#define SBI_ADDR 0xC6000 |
#define SBI_DATA 0xC6004 |
#define SBI_CTL_STAT 0xC6008 |
#define SBI_CTL_DEST_ICLK (0x0<<16) |
#define SBI_CTL_DEST_MPHY (0x1<<16) |
#define SBI_CTL_OP_IORD (0x2<<8) |
#define SBI_CTL_OP_IOWR (0x3<<8) |
#define SBI_CTL_OP_CRRD (0x6<<8) |
#define SBI_CTL_OP_CRWR (0x7<<8) |
#define SBI_RESPONSE_FAIL (0x1<<1) |
4563,12 → 4477,10 |
#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0) |
#define SBI_SSCCTL 0x020c |
#define SBI_SSCCTL6 0x060C |
#define SBI_SSCCTL_PATHALT (1<<3) |
#define SBI_SSCCTL_DISABLE (1<<0) |
#define SBI_SSCAUXDIV6 0x0610 |
#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4) |
#define SBI_DBUFF0 0x2a00 |
#define SBI_DBUFF0_ENABLE (1<<0) |
/* LPT PIXCLK_GATE */ |
#define PIXCLK_GATE 0xC6020 |
4578,8 → 4490,8 |
/* SPLL */ |
#define SPLL_CTL 0x46020 |
#define SPLL_PLL_ENABLE (1<<31) |
#define SPLL_PLL_SSC (1<<28) |
#define SPLL_PLL_NON_SSC (2<<28) |
#define SPLL_PLL_SCC (1<<28) |
#define SPLL_PLL_NON_SCC (2<<28) |
#define SPLL_PLL_FREQ_810MHz (0<<26) |
#define SPLL_PLL_FREQ_1350MHz (1<<26) |
4588,7 → 4500,7 |
#define WRPLL_CTL2 0x46060 |
#define WRPLL_PLL_ENABLE (1<<31) |
#define WRPLL_PLL_SELECT_SSC (0x01<<28) |
#define WRPLL_PLL_SELECT_NON_SSC (0x02<<28) |
#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28) |
#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28) |
/* WRPLL divider programming */ |
#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0) |
4605,36 → 4517,21 |
#define PORT_CLK_SEL_SPLL (3<<29) |
#define PORT_CLK_SEL_WRPLL1 (4<<29) |
#define PORT_CLK_SEL_WRPLL2 (5<<29) |
#define PORT_CLK_SEL_NONE (7<<29) |
/* Transcoder clock selection */ |
#define TRANS_CLK_SEL_A 0x46140 |
#define TRANS_CLK_SEL_B 0x46144 |
#define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B) |
/* For each transcoder, we need to select the corresponding port clock */ |
#define TRANS_CLK_SEL_DISABLED (0x0<<29) |
#define TRANS_CLK_SEL_PORT(x) ((x+1)<<29) |
/* Pipe clock selection */ |
#define PIPE_CLK_SEL_A 0x46140 |
#define PIPE_CLK_SEL_B 0x46144 |
#define PIPE_CLK_SEL(pipe) _PIPE(pipe, PIPE_CLK_SEL_A, PIPE_CLK_SEL_B) |
/* For each pipe, we need to select the corresponding port clock */ |
#define PIPE_CLK_SEL_DISABLED (0x0<<29) |
#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29) |
#define _TRANSA_MSA_MISC 0x60410 |
#define _TRANSB_MSA_MISC 0x61410 |
#define TRANS_MSA_MISC(tran) _TRANSCODER(tran, _TRANSA_MSA_MISC, \ |
_TRANSB_MSA_MISC) |
#define TRANS_MSA_SYNC_CLK (1<<0) |
#define TRANS_MSA_6_BPC (0<<5) |
#define TRANS_MSA_8_BPC (1<<5) |
#define TRANS_MSA_10_BPC (2<<5) |
#define TRANS_MSA_12_BPC (3<<5) |
#define TRANS_MSA_16_BPC (4<<5) |
/* LCPLL Control */ |
#define LCPLL_CTL 0x130040 |
#define LCPLL_PLL_DISABLE (1<<31) |
#define LCPLL_PLL_LOCK (1<<30) |
#define LCPLL_CLK_FREQ_MASK (3<<26) |
#define LCPLL_CLK_FREQ_450 (0<<26) |
#define LCPLL_CD_CLOCK_DISABLE (1<<25) |
#define LCPLL_CD2X_CLOCK_DISABLE (1<<23) |
#define LCPLL_CD_SOURCE_FCLK (1<<21) |
/* Pipe WM_LINETIME - watermark line time */ |
#define PIPE_WM_LINETIME_A 0x45270 |
/drivers/video/drm/i915/i915_trace.h |
---|
20,7 → 20,5 |
#define trace_i915_gem_request_wait_end(a, b) |
#define trace_i915_gem_request_complete(a, b) |
#define trace_intel_gpu_freq_change(a) |
#define trace_i915_reg_rw(a, b, c, d) |
#define trace_i915_ring_wait_begin(a) |
#endif |
/drivers/video/drm/i915/intel_ddi.c |
---|
58,26 → 58,6 |
0x00FFFFFF, 0x00040006 /* HDMI parameters */ |
}; |
static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) |
{ |
struct drm_encoder *encoder = &intel_encoder->base; |
int type = intel_encoder->type; |
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || |
type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) { |
struct intel_digital_port *intel_dig_port = |
enc_to_dig_port(encoder); |
return intel_dig_port->port; |
} else if (type == INTEL_OUTPUT_ANALOG) { |
return PORT_E; |
} else { |
DRM_ERROR("Invalid DDI encoder type %d\n", type); |
BUG(); |
} |
} |
/* On Haswell, DDI port buffers must be programmed with correct values |
* in advance. The buffer values are different for FDI and DP modes, |
* but the HDMI/DVI fields are shared among those. So we program the DDI |
138,20 → 118,7 |
DDI_BUF_EMP_800MV_3_5DB_HSW |
}; |
static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, |
enum port port) |
{ |
uint32_t reg = DDI_BUF_CTL(port); |
int i; |
for (i = 0; i < 8; i++) { |
udelay(1); |
if (I915_READ(reg) & DDI_BUF_IS_IDLE) |
return; |
} |
DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port)); |
} |
/* Starting with Haswell, different DDI ports can work in FDI mode for |
* connection to the PCH-located connectors. For this, it is necessary to train |
* both the DDI port and PCH receiver for the desired DDI buffer settings. |
166,36 → 133,25 |
struct drm_device *dev = crtc->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
u32 temp, i, rx_ctl_val; |
int pipe = intel_crtc->pipe; |
u32 reg, temp, i; |
/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the |
* mode set "sequence for CRT port" document: |
* - TP1 to TP2 time with the default value |
* - FDI delay to 90h |
*/ |
I915_WRITE(_FDI_RXA_MISC, FDI_RX_PWRDN_LANE1_VAL(2) | |
FDI_RX_PWRDN_LANE0_VAL(2) | |
FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); |
/* Configure CPU PLL, wait for warmup */ |
I915_WRITE(SPLL_CTL, |
SPLL_PLL_ENABLE | |
SPLL_PLL_FREQ_1350MHz | |
SPLL_PLL_SCC); |
/* Enable the PCH Receiver FDI PLL */ |
rx_ctl_val = FDI_RX_PLL_ENABLE | FDI_RX_ENHANCE_FRAME_ENABLE | |
((intel_crtc->fdi_lanes - 1) << 19); |
if (dev_priv->fdi_rx_polarity_reversed) |
rx_ctl_val |= FDI_RX_POLARITY_REVERSED_LPT; |
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); |
POSTING_READ(_FDI_RXA_CTL); |
udelay(220); |
/* Use SPLL to drive the output when in FDI mode */ |
I915_WRITE(PORT_CLK_SEL(PORT_E), |
PORT_CLK_SEL_SPLL); |
I915_WRITE(PIPE_CLK_SEL(pipe), |
PIPE_CLK_SEL_PORT(PORT_E)); |
/* Switch from Rawclk to PCDclk */ |
rx_ctl_val |= FDI_PCDCLK; |
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); |
udelay(20); |
/* Configure Port Clock Select */ |
I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel); |
/* Start the training iterating through available voltages and emphasis, |
* testing each value twice. */ |
for (i = 0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values) * 2; i++) { |
/* Start the training iterating through available voltages and emphasis */ |
for (i=0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values); i++) { |
/* Configure DP_TP_CTL with auto-training */ |
I915_WRITE(DP_TP_CTL(PORT_E), |
DP_TP_CTL_FDI_AUTOTRAIN | |
204,37 → 160,41 |
DP_TP_CTL_ENABLE); |
/* Configure and enable DDI_BUF_CTL for DDI E with next voltage */ |
temp = I915_READ(DDI_BUF_CTL(PORT_E)); |
temp = (temp & ~DDI_BUF_EMP_MASK); |
I915_WRITE(DDI_BUF_CTL(PORT_E), |
temp | |
DDI_BUF_CTL_ENABLE | |
((intel_crtc->fdi_lanes - 1) << 1) | |
hsw_ddi_buf_ctl_values[i / 2]); |
POSTING_READ(DDI_BUF_CTL(PORT_E)); |
DDI_PORT_WIDTH_X2 | |
hsw_ddi_buf_ctl_values[i]); |
udelay(600); |
/* Program PCH FDI Receiver TU */ |
I915_WRITE(_FDI_RXA_TUSIZE1, TU_SIZE(64)); |
/* We need to program FDI_RX_MISC with the default TP1 to TP2 |
* values before enabling the receiver, and configure the delay |
* for the FDI timing generator to 90h. Luckily, all the other |
* bits are supposed to be zeroed, so we can write those values |
* directly. |
*/ |
I915_WRITE(FDI_RX_MISC(pipe), FDI_RX_TP1_TO_TP2_48 | |
FDI_RX_FDI_DELAY_90); |
/* Enable PCH FDI Receiver with auto-training */ |
rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO; |
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); |
POSTING_READ(_FDI_RXA_CTL); |
/* Enable CPU FDI Receiver with auto-training */ |
reg = FDI_RX_CTL(pipe); |
I915_WRITE(reg, |
I915_READ(reg) | |
FDI_LINK_TRAIN_AUTO | |
FDI_RX_ENABLE | |
FDI_LINK_TRAIN_PATTERN_1_CPT | |
FDI_RX_ENHANCE_FRAME_ENABLE | |
FDI_PORT_WIDTH_2X_LPT | |
FDI_RX_PLL_ENABLE); |
POSTING_READ(reg); |
udelay(100); |
/* Wait for FDI receiver lane calibration */ |
udelay(30); |
/* Unset FDI_RX_MISC pwrdn lanes */ |
temp = I915_READ(_FDI_RXA_MISC); |
temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); |
I915_WRITE(_FDI_RXA_MISC, temp); |
POSTING_READ(_FDI_RXA_MISC); |
/* Wait for FDI auto training time */ |
udelay(5); |
temp = I915_READ(DP_TP_STATUS(PORT_E)); |
if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) { |
DRM_DEBUG_KMS("FDI link training done on step %d\n", i); |
DRM_DEBUG_DRIVER("BUF_CTL training done on %d step\n", i); |
/* Enable normal pixel sending for FDI */ |
I915_WRITE(DP_TP_CTL(PORT_E), |
243,36 → 203,60 |
DP_TP_CTL_ENHANCED_FRAME_ENABLE | |
DP_TP_CTL_ENABLE); |
return; |
/* Enable PIPE_DDI_FUNC_CTL for the pipe to work in FDI mode */ |
temp = I915_READ(DDI_FUNC_CTL(pipe)); |
temp &= ~PIPE_DDI_PORT_MASK; |
temp |= PIPE_DDI_SELECT_PORT(PORT_E) | |
PIPE_DDI_MODE_SELECT_FDI | |
PIPE_DDI_FUNC_ENABLE | |
PIPE_DDI_PORT_WIDTH_X2; |
I915_WRITE(DDI_FUNC_CTL(pipe), |
temp); |
break; |
} else { |
DRM_ERROR("Error training BUF_CTL %d\n", i); |
/* Disable DP_TP_CTL and FDI_RX_CTL) and retry */ |
I915_WRITE(DP_TP_CTL(PORT_E), |
I915_READ(DP_TP_CTL(PORT_E)) & |
~DP_TP_CTL_ENABLE); |
I915_WRITE(FDI_RX_CTL(pipe), |
I915_READ(FDI_RX_CTL(pipe)) & |
~FDI_RX_PLL_ENABLE); |
continue; |
} |
} |
temp = I915_READ(DDI_BUF_CTL(PORT_E)); |
temp &= ~DDI_BUF_CTL_ENABLE; |
I915_WRITE(DDI_BUF_CTL(PORT_E), temp); |
POSTING_READ(DDI_BUF_CTL(PORT_E)); |
DRM_DEBUG_KMS("FDI train done.\n"); |
} |
/* Disable DP_TP_CTL and FDI_RX_CTL and retry */ |
temp = I915_READ(DP_TP_CTL(PORT_E)); |
temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); |
temp |= DP_TP_CTL_LINK_TRAIN_PAT1; |
I915_WRITE(DP_TP_CTL(PORT_E), temp); |
POSTING_READ(DP_TP_CTL(PORT_E)); |
/* For DDI connections, it is possible to support different outputs over the |
* same DDI port, such as HDMI or DP or even VGA via FDI. So we don't know by |
* the time the output is detected what exactly is on the other end of it. This |
* function aims at providing support for this detection and proper output |
* configuration. |
*/ |
void intel_ddi_init(struct drm_device *dev, enum port port) |
{ |
/* For now, we don't do any proper output detection and assume that we |
* handle HDMI only */ |
intel_wait_ddi_buf_idle(dev_priv, PORT_E); |
rx_ctl_val &= ~FDI_RX_ENABLE; |
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); |
POSTING_READ(_FDI_RXA_CTL); |
/* Reset FDI_RX_MISC pwrdn lanes */ |
temp = I915_READ(_FDI_RXA_MISC); |
temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); |
temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); |
I915_WRITE(_FDI_RXA_MISC, temp); |
POSTING_READ(_FDI_RXA_MISC); |
switch(port){ |
case PORT_A: |
/* We don't handle eDP and DP yet */ |
DRM_DEBUG_DRIVER("Found digital output on DDI port A\n"); |
break; |
/* Assume that the ports B, C and D are working in HDMI mode for now */ |
case PORT_B: |
case PORT_C: |
case PORT_D: |
intel_hdmi_init(dev, DDI_BUF_CTL(port), port); |
break; |
default: |
DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n", |
port); |
break; |
} |
DRM_ERROR("FDI link training failed!\n"); |
} |
/* WRPLL clock dividers */ |
661,854 → 645,175 |
{298000, 2, 21, 19}, |
}; |
static void intel_ddi_mode_set(struct drm_encoder *encoder, |
void intel_ddi_mode_set(struct drm_encoder *encoder, |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode) |
{ |
struct drm_device *dev = encoder->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct drm_crtc *crtc = encoder->crtc; |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
struct intel_encoder *intel_encoder = to_intel_encoder(encoder); |
int port = intel_ddi_get_encoder_port(intel_encoder); |
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); |
int port = intel_hdmi->ddi_port; |
int pipe = intel_crtc->pipe; |
int type = intel_encoder->type; |
int p, n2, r2; |
u32 temp, i; |
DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n", |
port_name(port), pipe_name(pipe)); |
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { |
struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
intel_dp->DP = DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; |
switch (intel_dp->lane_count) { |
case 1: |
intel_dp->DP |= DDI_PORT_WIDTH_X1; |
break; |
case 2: |
intel_dp->DP |= DDI_PORT_WIDTH_X2; |
break; |
case 4: |
intel_dp->DP |= DDI_PORT_WIDTH_X4; |
break; |
default: |
intel_dp->DP |= DDI_PORT_WIDTH_X4; |
WARN(1, "Unexpected DP lane count %d\n", |
intel_dp->lane_count); |
break; |
} |
if (intel_dp->has_audio) { |
DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n", |
pipe_name(intel_crtc->pipe)); |
/* write eld */ |
DRM_DEBUG_DRIVER("DP audio: write eld information\n"); |
intel_write_eld(encoder, adjusted_mode); |
} |
intel_dp_init_link_config(intel_dp); |
} else if (type == INTEL_OUTPUT_HDMI) { |
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); |
if (intel_hdmi->has_audio) { |
/* Proper support for digital audio needs a new logic |
* and a new set of registers, so we leave it for future |
* patch bombing. |
/* On Haswell, we need to enable the clocks and prepare DDI function to |
* work in HDMI mode for this pipe. |
*/ |
DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n", |
pipe_name(intel_crtc->pipe)); |
DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe)); |
/* write eld */ |
DRM_DEBUG_DRIVER("HDMI audio: write eld information\n"); |
intel_write_eld(encoder, adjusted_mode); |
} |
intel_hdmi->set_infoframes(encoder, adjusted_mode); |
} |
} |
static struct intel_encoder * |
intel_ddi_get_crtc_encoder(struct drm_crtc *crtc) |
{ |
struct drm_device *dev = crtc->dev; |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
struct intel_encoder *intel_encoder, *ret = NULL; |
int num_encoders = 0; |
for_each_encoder_on_crtc(dev, crtc, intel_encoder) { |
ret = intel_encoder; |
num_encoders++; |
} |
if (num_encoders != 1) |
WARN(1, "%d encoders on crtc for pipe %d\n", num_encoders, |
intel_crtc->pipe); |
BUG_ON(ret == NULL); |
return ret; |
} |
void intel_ddi_put_crtc_pll(struct drm_crtc *crtc) |
{ |
struct drm_i915_private *dev_priv = crtc->dev->dev_private; |
struct intel_ddi_plls *plls = &dev_priv->ddi_plls; |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
uint32_t val; |
switch (intel_crtc->ddi_pll_sel) { |
case PORT_CLK_SEL_SPLL: |
plls->spll_refcount--; |
if (plls->spll_refcount == 0) { |
DRM_DEBUG_KMS("Disabling SPLL\n"); |
val = I915_READ(SPLL_CTL); |
WARN_ON(!(val & SPLL_PLL_ENABLE)); |
I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE); |
POSTING_READ(SPLL_CTL); |
} |
break; |
case PORT_CLK_SEL_WRPLL1: |
plls->wrpll1_refcount--; |
if (plls->wrpll1_refcount == 0) { |
DRM_DEBUG_KMS("Disabling WRPLL 1\n"); |
val = I915_READ(WRPLL_CTL1); |
WARN_ON(!(val & WRPLL_PLL_ENABLE)); |
I915_WRITE(WRPLL_CTL1, val & ~WRPLL_PLL_ENABLE); |
POSTING_READ(WRPLL_CTL1); |
} |
break; |
case PORT_CLK_SEL_WRPLL2: |
plls->wrpll2_refcount--; |
if (plls->wrpll2_refcount == 0) { |
DRM_DEBUG_KMS("Disabling WRPLL 2\n"); |
val = I915_READ(WRPLL_CTL2); |
WARN_ON(!(val & WRPLL_PLL_ENABLE)); |
I915_WRITE(WRPLL_CTL2, val & ~WRPLL_PLL_ENABLE); |
POSTING_READ(WRPLL_CTL2); |
} |
break; |
} |
WARN(plls->spll_refcount < 0, "Invalid SPLL refcount\n"); |
WARN(plls->wrpll1_refcount < 0, "Invalid WRPLL1 refcount\n"); |
WARN(plls->wrpll2_refcount < 0, "Invalid WRPLL2 refcount\n"); |
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE; |
} |
static void intel_ddi_calculate_wrpll(int clock, int *p, int *n2, int *r2) |
{ |
u32 i; |
for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) |
if (clock <= wrpll_tmds_clock_table[i].clock) |
if (crtc->mode.clock <= wrpll_tmds_clock_table[i].clock) |
break; |
if (i == ARRAY_SIZE(wrpll_tmds_clock_table)) |
i--; |
*p = wrpll_tmds_clock_table[i].p; |
*n2 = wrpll_tmds_clock_table[i].n2; |
*r2 = wrpll_tmds_clock_table[i].r2; |
p = wrpll_tmds_clock_table[i].p; |
n2 = wrpll_tmds_clock_table[i].n2; |
r2 = wrpll_tmds_clock_table[i].r2; |
if (wrpll_tmds_clock_table[i].clock != clock) |
if (wrpll_tmds_clock_table[i].clock != crtc->mode.clock) |
DRM_INFO("WRPLL: using settings for %dKHz on %dKHz mode\n", |
wrpll_tmds_clock_table[i].clock, clock); |
wrpll_tmds_clock_table[i].clock, crtc->mode.clock); |
DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n", |
clock, *p, *n2, *r2); |
} |
crtc->mode.clock, p, n2, r2); |
bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock) |
{ |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); |
struct drm_encoder *encoder = &intel_encoder->base; |
struct drm_i915_private *dev_priv = crtc->dev->dev_private; |
struct intel_ddi_plls *plls = &dev_priv->ddi_plls; |
int type = intel_encoder->type; |
enum pipe pipe = intel_crtc->pipe; |
uint32_t reg, val; |
/* Enable LCPLL if disabled */ |
temp = I915_READ(LCPLL_CTL); |
if (temp & LCPLL_PLL_DISABLE) |
I915_WRITE(LCPLL_CTL, |
temp & ~LCPLL_PLL_DISABLE); |
/* TODO: reuse PLLs when possible (compare values) */ |
/* Configure WR PLL 1, program the correct divider values for |
* the desired frequency and wait for warmup */ |
I915_WRITE(WRPLL_CTL1, |
WRPLL_PLL_ENABLE | |
WRPLL_PLL_SELECT_LCPLL_2700 | |
WRPLL_DIVIDER_REFERENCE(r2) | |
WRPLL_DIVIDER_FEEDBACK(n2) | |
WRPLL_DIVIDER_POST(p)); |
intel_ddi_put_crtc_pll(crtc); |
udelay(20); |
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { |
struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
/* Use WRPLL1 clock to drive the output to the port, and tell the pipe to use |
* this port for connection. |
*/ |
I915_WRITE(PORT_CLK_SEL(port), |
PORT_CLK_SEL_WRPLL1); |
I915_WRITE(PIPE_CLK_SEL(pipe), |
PIPE_CLK_SEL_PORT(port)); |
switch (intel_dp->link_bw) { |
case DP_LINK_BW_1_62: |
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810; |
break; |
case DP_LINK_BW_2_7: |
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350; |
break; |
case DP_LINK_BW_5_4: |
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700; |
break; |
default: |
DRM_ERROR("Link bandwidth %d unsupported\n", |
intel_dp->link_bw); |
return false; |
} |
/* We don't need to turn any PLL on because we'll use LCPLL. */ |
return true; |
} else if (type == INTEL_OUTPUT_HDMI) { |
int p, n2, r2; |
if (plls->wrpll1_refcount == 0) { |
DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n", |
pipe_name(pipe)); |
plls->wrpll1_refcount++; |
reg = WRPLL_CTL1; |
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1; |
} else if (plls->wrpll2_refcount == 0) { |
DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n", |
pipe_name(pipe)); |
plls->wrpll2_refcount++; |
reg = WRPLL_CTL2; |
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2; |
} else { |
DRM_ERROR("No WRPLLs available!\n"); |
return false; |
} |
WARN(I915_READ(reg) & WRPLL_PLL_ENABLE, |
"WRPLL already enabled\n"); |
intel_ddi_calculate_wrpll(clock, &p, &n2, &r2); |
val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 | |
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | |
WRPLL_DIVIDER_POST(p); |
} else if (type == INTEL_OUTPUT_ANALOG) { |
if (plls->spll_refcount == 0) { |
DRM_DEBUG_KMS("Using SPLL on pipe %c\n", |
pipe_name(pipe)); |
plls->spll_refcount++; |
reg = SPLL_CTL; |
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL; |
} |
WARN(I915_READ(reg) & SPLL_PLL_ENABLE, |
"SPLL already enabled\n"); |
val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC; |
} else { |
WARN(1, "Invalid DDI encoder type %d\n", type); |
return false; |
} |
I915_WRITE(reg, val); |
udelay(20); |
return true; |
} |
if (intel_hdmi->has_audio) { |
/* Proper support for digital audio needs a new logic and a new set |
* of registers, so we leave it for future patch bombing. |
*/ |
DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n", |
pipe_name(intel_crtc->pipe)); |
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) |
{ |
struct drm_i915_private *dev_priv = crtc->dev->dev_private; |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); |
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; |
int type = intel_encoder->type; |
uint32_t temp; |
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { |
temp = TRANS_MSA_SYNC_CLK; |
switch (intel_crtc->bpp) { |
case 18: |
temp |= TRANS_MSA_6_BPC; |
break; |
case 24: |
temp |= TRANS_MSA_8_BPC; |
break; |
case 30: |
temp |= TRANS_MSA_10_BPC; |
break; |
case 36: |
temp |= TRANS_MSA_12_BPC; |
break; |
default: |
temp |= TRANS_MSA_8_BPC; |
WARN(1, "%d bpp unsupported by DDI function\n", |
intel_crtc->bpp); |
/* write eld */ |
DRM_DEBUG_DRIVER("HDMI audio: write eld information\n"); |
intel_write_eld(encoder, adjusted_mode); |
} |
I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp); |
} |
} |
void intel_ddi_enable_pipe_func(struct drm_crtc *crtc) |
{ |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); |
struct drm_encoder *encoder = &intel_encoder->base; |
struct drm_i915_private *dev_priv = crtc->dev->dev_private; |
enum pipe pipe = intel_crtc->pipe; |
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; |
enum port port = intel_ddi_get_encoder_port(intel_encoder); |
int type = intel_encoder->type; |
uint32_t temp; |
/* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */ |
temp = PIPE_DDI_FUNC_ENABLE | PIPE_DDI_SELECT_PORT(port); |
/* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */ |
temp = TRANS_DDI_FUNC_ENABLE; |
temp |= TRANS_DDI_SELECT_PORT(port); |
switch (intel_crtc->bpp) { |
case 18: |
temp |= TRANS_DDI_BPC_6; |
temp |= PIPE_DDI_BPC_6; |
break; |
case 24: |
temp |= TRANS_DDI_BPC_8; |
temp |= PIPE_DDI_BPC_8; |
break; |
case 30: |
temp |= TRANS_DDI_BPC_10; |
temp |= PIPE_DDI_BPC_10; |
break; |
case 36: |
temp |= TRANS_DDI_BPC_12; |
temp |= PIPE_DDI_BPC_12; |
break; |
default: |
WARN(1, "%d bpp unsupported by transcoder DDI function\n", |
WARN(1, "%d bpp unsupported by pipe DDI function\n", |
intel_crtc->bpp); |
} |
if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) |
temp |= TRANS_DDI_PVSYNC; |
if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) |
temp |= TRANS_DDI_PHSYNC; |
if (cpu_transcoder == TRANSCODER_EDP) { |
switch (pipe) { |
case PIPE_A: |
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF; |
break; |
case PIPE_B: |
temp |= TRANS_DDI_EDP_INPUT_B_ONOFF; |
break; |
case PIPE_C: |
temp |= TRANS_DDI_EDP_INPUT_C_ONOFF; |
break; |
default: |
BUG(); |
break; |
} |
} |
if (type == INTEL_OUTPUT_HDMI) { |
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); |
if (intel_hdmi->has_hdmi_sink) |
temp |= TRANS_DDI_MODE_SELECT_HDMI; |
temp |= PIPE_DDI_MODE_SELECT_HDMI; |
else |
temp |= TRANS_DDI_MODE_SELECT_DVI; |
temp |= PIPE_DDI_MODE_SELECT_DVI; |
} else if (type == INTEL_OUTPUT_ANALOG) { |
temp |= TRANS_DDI_MODE_SELECT_FDI; |
temp |= (intel_crtc->fdi_lanes - 1) << 1; |
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
temp |= PIPE_DDI_PVSYNC; |
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
temp |= PIPE_DDI_PHSYNC; |
} else if (type == INTEL_OUTPUT_DISPLAYPORT || |
type == INTEL_OUTPUT_EDP) { |
struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
I915_WRITE(DDI_FUNC_CTL(pipe), temp); |
temp |= TRANS_DDI_MODE_SELECT_DP_SST; |
switch (intel_dp->lane_count) { |
case 1: |
temp |= TRANS_DDI_PORT_WIDTH_X1; |
break; |
case 2: |
temp |= TRANS_DDI_PORT_WIDTH_X2; |
break; |
case 4: |
temp |= TRANS_DDI_PORT_WIDTH_X4; |
break; |
default: |
temp |= TRANS_DDI_PORT_WIDTH_X4; |
WARN(1, "Unsupported lane count %d\n", |
intel_dp->lane_count); |
intel_hdmi->set_infoframes(encoder, adjusted_mode); |
} |
} else { |
WARN(1, "Invalid encoder type %d for pipe %d\n", |
intel_encoder->type, pipe); |
} |
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); |
} |
void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, |
enum transcoder cpu_transcoder) |
{ |
uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); |
uint32_t val = I915_READ(reg); |
val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK); |
val |= TRANS_DDI_PORT_NONE; |
I915_WRITE(reg, val); |
} |
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) |
{ |
struct drm_device *dev = intel_connector->base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_encoder *intel_encoder = intel_connector->encoder; |
int type = intel_connector->base.connector_type; |
enum port port = intel_ddi_get_encoder_port(intel_encoder); |
enum pipe pipe = 0; |
enum transcoder cpu_transcoder; |
uint32_t tmp; |
if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) |
return false; |
if (port == PORT_A) |
cpu_transcoder = TRANSCODER_EDP; |
else |
cpu_transcoder = pipe; |
tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); |
switch (tmp & TRANS_DDI_MODE_SELECT_MASK) { |
case TRANS_DDI_MODE_SELECT_HDMI: |
case TRANS_DDI_MODE_SELECT_DVI: |
return (type == DRM_MODE_CONNECTOR_HDMIA); |
case TRANS_DDI_MODE_SELECT_DP_SST: |
if (type == DRM_MODE_CONNECTOR_eDP) |
return true; |
case TRANS_DDI_MODE_SELECT_DP_MST: |
return (type == DRM_MODE_CONNECTOR_DisplayPort); |
case TRANS_DDI_MODE_SELECT_FDI: |
return (type == DRM_MODE_CONNECTOR_VGA); |
default: |
return false; |
} |
} |
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, |
enum pipe *pipe) |
{ |
struct drm_device *dev = encoder->base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
enum port port = intel_ddi_get_encoder_port(encoder); |
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); |
u32 tmp; |
int i; |
tmp = I915_READ(DDI_BUF_CTL(port)); |
tmp = I915_READ(DDI_BUF_CTL(intel_hdmi->ddi_port)); |
if (!(tmp & DDI_BUF_CTL_ENABLE)) |
return false; |
if (port == PORT_A) { |
tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); |
for_each_pipe(i) { |
tmp = I915_READ(DDI_FUNC_CTL(i)); |
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { |
case TRANS_DDI_EDP_INPUT_A_ON: |
case TRANS_DDI_EDP_INPUT_A_ONOFF: |
*pipe = PIPE_A; |
break; |
case TRANS_DDI_EDP_INPUT_B_ONOFF: |
*pipe = PIPE_B; |
break; |
case TRANS_DDI_EDP_INPUT_C_ONOFF: |
*pipe = PIPE_C; |
break; |
} |
return true; |
} else { |
for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) { |
tmp = I915_READ(TRANS_DDI_FUNC_CTL(i)); |
if ((tmp & TRANS_DDI_PORT_MASK) |
== TRANS_DDI_SELECT_PORT(port)) { |
if ((tmp & PIPE_DDI_PORT_MASK) |
== PIPE_DDI_SELECT_PORT(intel_hdmi->ddi_port)) { |
*pipe = i; |
return true; |
} |
} |
} |
DRM_DEBUG_KMS("No pipe for ddi port %i found\n", port); |
DRM_DEBUG_KMS("No pipe for ddi port %i found\n", intel_hdmi->ddi_port); |
return true; |
} |
static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv, |
enum pipe pipe) |
void intel_enable_ddi(struct intel_encoder *encoder) |
{ |
uint32_t temp, ret; |
enum port port; |
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
pipe); |
int i; |
if (cpu_transcoder == TRANSCODER_EDP) { |
port = PORT_A; |
} else { |
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); |
temp &= TRANS_DDI_PORT_MASK; |
for (i = PORT_B; i <= PORT_E; i++) |
if (temp == TRANS_DDI_SELECT_PORT(i)) |
port = i; |
} |
ret = I915_READ(PORT_CLK_SEL(port)); |
DRM_DEBUG_KMS("Pipe %c connected to port %c using clock 0x%08x\n", |
pipe_name(pipe), port_name(port), ret); |
return ret; |
} |
void intel_ddi_setup_hw_pll_state(struct drm_device *dev) |
{ |
struct drm_device *dev = encoder->base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
enum pipe pipe; |
struct intel_crtc *intel_crtc; |
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); |
int port = intel_hdmi->ddi_port; |
u32 temp; |
for_each_pipe(pipe) { |
intel_crtc = |
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); |
temp = I915_READ(DDI_BUF_CTL(port)); |
temp |= DDI_BUF_CTL_ENABLE; |
if (!intel_crtc->active) |
continue; |
intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv, |
pipe); |
switch (intel_crtc->ddi_pll_sel) { |
case PORT_CLK_SEL_SPLL: |
dev_priv->ddi_plls.spll_refcount++; |
break; |
case PORT_CLK_SEL_WRPLL1: |
dev_priv->ddi_plls.wrpll1_refcount++; |
break; |
case PORT_CLK_SEL_WRPLL2: |
dev_priv->ddi_plls.wrpll2_refcount++; |
break; |
} |
} |
} |
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc) |
{ |
struct drm_crtc *crtc = &intel_crtc->base; |
struct drm_i915_private *dev_priv = crtc->dev->dev_private; |
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); |
enum port port = intel_ddi_get_encoder_port(intel_encoder); |
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; |
if (cpu_transcoder != TRANSCODER_EDP) |
I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), |
TRANS_CLK_SEL_PORT(port)); |
} |
void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc) |
{ |
struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; |
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; |
if (cpu_transcoder != TRANSCODER_EDP) |
I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), |
TRANS_CLK_SEL_DISABLED); |
} |
static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) |
{ |
struct drm_encoder *encoder = &intel_encoder->base; |
struct drm_crtc *crtc = encoder->crtc; |
struct drm_i915_private *dev_priv = encoder->dev->dev_private; |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
enum port port = intel_ddi_get_encoder_port(intel_encoder); |
int type = intel_encoder->type; |
if (type == INTEL_OUTPUT_EDP) { |
struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
ironlake_edp_panel_vdd_on(intel_dp); |
ironlake_edp_panel_on(intel_dp); |
ironlake_edp_panel_vdd_off(intel_dp, true); |
} |
WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE); |
I915_WRITE(PORT_CLK_SEL(port), intel_crtc->ddi_pll_sel); |
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { |
struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); |
intel_dp_start_link_train(intel_dp); |
intel_dp_complete_link_train(intel_dp); |
} |
} |
static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) |
{ |
struct drm_encoder *encoder = &intel_encoder->base; |
struct drm_i915_private *dev_priv = encoder->dev->dev_private; |
enum port port = intel_ddi_get_encoder_port(intel_encoder); |
int type = intel_encoder->type; |
uint32_t val; |
bool wait = false; |
val = I915_READ(DDI_BUF_CTL(port)); |
if (val & DDI_BUF_CTL_ENABLE) { |
val &= ~DDI_BUF_CTL_ENABLE; |
I915_WRITE(DDI_BUF_CTL(port), val); |
wait = true; |
} |
val = I915_READ(DP_TP_CTL(port)); |
val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); |
val |= DP_TP_CTL_LINK_TRAIN_PAT1; |
I915_WRITE(DP_TP_CTL(port), val); |
if (wait) |
intel_wait_ddi_buf_idle(dev_priv, port); |
if (type == INTEL_OUTPUT_EDP) { |
struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
ironlake_edp_panel_vdd_on(intel_dp); |
ironlake_edp_panel_off(intel_dp); |
} |
I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE); |
} |
static void intel_enable_ddi(struct intel_encoder *intel_encoder) |
{ |
struct drm_encoder *encoder = &intel_encoder->base; |
struct drm_device *dev = encoder->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
enum port port = intel_ddi_get_encoder_port(intel_encoder); |
int type = intel_encoder->type; |
if (type == INTEL_OUTPUT_HDMI) { |
/* In HDMI/DVI mode, the port width, and swing/emphasis values |
* are ignored so nothing special needs to be done besides |
* enabling the port. |
/* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width, |
* and swing/emphasis values are ignored so nothing special needs |
* to be done besides enabling the port. |
*/ |
I915_WRITE(DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE); |
} else if (type == INTEL_OUTPUT_EDP) { |
struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
ironlake_edp_backlight_on(intel_dp); |
I915_WRITE(DDI_BUF_CTL(port), temp); |
} |
} |
static void intel_disable_ddi(struct intel_encoder *intel_encoder) |
void intel_disable_ddi(struct intel_encoder *encoder) |
{ |
struct drm_encoder *encoder = &intel_encoder->base; |
int type = intel_encoder->type; |
if (type == INTEL_OUTPUT_EDP) { |
struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
ironlake_edp_backlight_off(intel_dp); |
} |
} |
int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) |
{ |
if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT) |
return 450; |
else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) == |
LCPLL_CLK_FREQ_450) |
return 450; |
else if (IS_ULT(dev_priv->dev)) |
return 338; |
else |
return 540; |
} |
void intel_ddi_pll_init(struct drm_device *dev) |
{ |
struct drm_device *dev = encoder->base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
uint32_t val = I915_READ(LCPLL_CTL); |
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); |
int port = intel_hdmi->ddi_port; |
u32 temp; |
/* The LCPLL register should be turned on by the BIOS. For now let's |
* just check its state and print errors in case something is wrong. |
* Don't even try to turn it on. |
*/ |
temp = I915_READ(DDI_BUF_CTL(port)); |
temp &= ~DDI_BUF_CTL_ENABLE; |
DRM_DEBUG_KMS("CDCLK running at %dMHz\n", |
intel_ddi_get_cdclk_freq(dev_priv)); |
if (val & LCPLL_CD_SOURCE_FCLK) |
DRM_ERROR("CDCLK source is not LCPLL\n"); |
if (val & LCPLL_PLL_DISABLE) |
DRM_ERROR("LCPLL is disabled\n"); |
I915_WRITE(DDI_BUF_CTL(port), temp); |
} |
void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder) |
{ |
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); |
struct intel_dp *intel_dp = &intel_dig_port->dp; |
struct drm_i915_private *dev_priv = encoder->dev->dev_private; |
enum port port = intel_dig_port->port; |
bool wait; |
uint32_t val; |
if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) { |
val = I915_READ(DDI_BUF_CTL(port)); |
if (val & DDI_BUF_CTL_ENABLE) { |
val &= ~DDI_BUF_CTL_ENABLE; |
I915_WRITE(DDI_BUF_CTL(port), val); |
wait = true; |
} |
val = I915_READ(DP_TP_CTL(port)); |
val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); |
val |= DP_TP_CTL_LINK_TRAIN_PAT1; |
I915_WRITE(DP_TP_CTL(port), val); |
POSTING_READ(DP_TP_CTL(port)); |
if (wait) |
intel_wait_ddi_buf_idle(dev_priv, port); |
} |
val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST | |
DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE; |
if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) |
val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE; |
I915_WRITE(DP_TP_CTL(port), val); |
POSTING_READ(DP_TP_CTL(port)); |
intel_dp->DP |= DDI_BUF_CTL_ENABLE; |
I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP); |
POSTING_READ(DDI_BUF_CTL(port)); |
udelay(600); |
} |
void intel_ddi_fdi_disable(struct drm_crtc *crtc) |
{ |
struct drm_i915_private *dev_priv = crtc->dev->dev_private; |
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); |
uint32_t val; |
intel_ddi_post_disable(intel_encoder); |
val = I915_READ(_FDI_RXA_CTL); |
val &= ~FDI_RX_ENABLE; |
I915_WRITE(_FDI_RXA_CTL, val); |
val = I915_READ(_FDI_RXA_MISC); |
val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); |
val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); |
I915_WRITE(_FDI_RXA_MISC, val); |
val = I915_READ(_FDI_RXA_CTL); |
val &= ~FDI_PCDCLK; |
I915_WRITE(_FDI_RXA_CTL, val); |
val = I915_READ(_FDI_RXA_CTL); |
val &= ~FDI_RX_PLL_ENABLE; |
I915_WRITE(_FDI_RXA_CTL, val); |
} |
static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder) |
{ |
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); |
int type = intel_encoder->type; |
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) |
intel_dp_check_link_status(intel_dp); |
} |
static void intel_ddi_destroy(struct drm_encoder *encoder) |
{ |
/* HDMI has nothing special to destroy, so we can go with this. */ |
intel_dp_encoder_destroy(encoder); |
} |
static bool intel_ddi_mode_fixup(struct drm_encoder *encoder, |
const struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode) |
{ |
struct intel_encoder *intel_encoder = to_intel_encoder(encoder); |
int type = intel_encoder->type; |
WARN(type == INTEL_OUTPUT_UNKNOWN, "mode_fixup() on unknown output!\n"); |
if (type == INTEL_OUTPUT_HDMI) |
return intel_hdmi_mode_fixup(encoder, mode, adjusted_mode); |
else |
return intel_dp_mode_fixup(encoder, mode, adjusted_mode); |
} |
static const struct drm_encoder_funcs intel_ddi_funcs = { |
.destroy = intel_ddi_destroy, |
}; |
static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = { |
.mode_fixup = intel_ddi_mode_fixup, |
.mode_set = intel_ddi_mode_set, |
.disable = intel_encoder_noop, |
}; |
void intel_ddi_init(struct drm_device *dev, enum port port) |
{ |
struct intel_digital_port *intel_dig_port; |
struct intel_encoder *intel_encoder; |
struct drm_encoder *encoder; |
struct intel_connector *hdmi_connector = NULL; |
struct intel_connector *dp_connector = NULL; |
intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); |
if (!intel_dig_port) |
return; |
dp_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); |
if (!dp_connector) { |
kfree(intel_dig_port); |
return; |
} |
if (port != PORT_A) { |
hdmi_connector = kzalloc(sizeof(struct intel_connector), |
GFP_KERNEL); |
if (!hdmi_connector) { |
kfree(dp_connector); |
kfree(intel_dig_port); |
return; |
} |
} |
intel_encoder = &intel_dig_port->base; |
encoder = &intel_encoder->base; |
drm_encoder_init(dev, encoder, &intel_ddi_funcs, |
DRM_MODE_ENCODER_TMDS); |
drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs); |
intel_encoder->enable = intel_enable_ddi; |
intel_encoder->pre_enable = intel_ddi_pre_enable; |
intel_encoder->disable = intel_disable_ddi; |
intel_encoder->post_disable = intel_ddi_post_disable; |
intel_encoder->get_hw_state = intel_ddi_get_hw_state; |
intel_dig_port->port = port; |
if (hdmi_connector) |
intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port); |
else |
intel_dig_port->hdmi.sdvox_reg = 0; |
intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); |
intel_encoder->type = INTEL_OUTPUT_UNKNOWN; |
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); |
intel_encoder->cloneable = false; |
intel_encoder->hot_plug = intel_ddi_hot_plug; |
if (hdmi_connector) |
intel_hdmi_init_connector(intel_dig_port, hdmi_connector); |
intel_dp_init_connector(intel_dig_port, dp_connector); |
} |
/drivers/video/drm/i915/intel_dp.c |
---|
36,6 → 36,8 |
#include <drm/i915_drm.h> |
#include "i915_drv.h" |
#define DP_RECEIVER_CAP_SIZE 0xf |
#define DP_LINK_STATUS_SIZE 6 |
#define DP_LINK_CHECK_TIMEOUT (10 * 1000) |
/** |
47,9 → 49,7 |
*/ |
static bool is_edp(struct intel_dp *intel_dp) |
{ |
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
return intel_dig_port->base.type == INTEL_OUTPUT_EDP; |
return intel_dp->base.type == INTEL_OUTPUT_EDP; |
} |
/** |
76,16 → 76,15 |
return is_edp(intel_dp) && !is_pch_edp(intel_dp); |
} |
static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp) |
static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) |
{ |
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
return intel_dig_port->base.base.dev; |
return container_of(encoder, struct intel_dp, base.base); |
} |
static struct intel_dp *intel_attached_dp(struct drm_connector *connector) |
{ |
return enc_to_intel_dp(&intel_attached_encoder(connector)->base); |
return container_of(intel_attached_encoder(connector), |
struct intel_dp, base); |
} |
/** |
107,6 → 106,8 |
return is_pch_edp(intel_dp); |
} |
static void intel_dp_start_link_train(struct intel_dp *intel_dp); |
static void intel_dp_complete_link_train(struct intel_dp *intel_dp); |
static void intel_dp_link_down(struct intel_dp *intel_dp); |
void |
113,10 → 114,13 |
intel_edp_link_config(struct intel_encoder *intel_encoder, |
int *lane_num, int *link_bw) |
{ |
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); |
struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); |
*lane_num = intel_dp->lane_count; |
*link_bw = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); |
if (intel_dp->link_bw == DP_LINK_BW_1_62) |
*link_bw = 162000; |
else if (intel_dp->link_bw == DP_LINK_BW_2_7) |
*link_bw = 270000; |
} |
int |
123,16 → 127,28 |
intel_edp_target_clock(struct intel_encoder *intel_encoder, |
struct drm_display_mode *mode) |
{ |
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); |
struct intel_connector *intel_connector = intel_dp->attached_connector; |
struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); |
if (intel_connector->panel.fixed_mode) |
return intel_connector->panel.fixed_mode->clock; |
if (intel_dp->panel_fixed_mode) |
return intel_dp->panel_fixed_mode->clock; |
else |
return mode->clock; |
} |
static int |
intel_dp_max_lane_count(struct intel_dp *intel_dp) |
{ |
int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f; |
switch (max_lane_count) { |
case 1: case 2: case 4: |
break; |
default: |
max_lane_count = 4; |
} |
return max_lane_count; |
} |
static int |
intel_dp_max_link_bw(struct intel_dp *intel_dp) |
{ |
int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; |
192,7 → 208,7 |
bool adjust_mode) |
{ |
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); |
int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); |
int max_lanes = intel_dp_max_lane_count(intel_dp); |
int max_rate, mode_rate; |
mode_rate = intel_dp_link_required(mode->clock, 24); |
218,14 → 234,12 |
struct drm_display_mode *mode) |
{ |
struct intel_dp *intel_dp = intel_attached_dp(connector); |
struct intel_connector *intel_connector = to_intel_connector(connector); |
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; |
if (is_edp(intel_dp) && fixed_mode) { |
if (mode->hdisplay > fixed_mode->hdisplay) |
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { |
if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay) |
return MODE_PANEL; |
if (mode->vdisplay > fixed_mode->vdisplay) |
if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay) |
return MODE_PANEL; |
} |
271,10 → 285,6 |
struct drm_i915_private *dev_priv = dev->dev_private; |
uint32_t clkcfg; |
/* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */ |
if (IS_VALLEYVIEW(dev)) |
return 200; |
clkcfg = I915_READ(CLKCFG); |
switch (clkcfg & CLKCFG_FSB_MASK) { |
case CLKCFG_FSB_400: |
300,7 → 310,7 |
static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) |
{ |
struct drm_device *dev = intel_dp_to_dev(intel_dp); |
struct drm_device *dev = intel_dp->base.base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0; |
308,7 → 318,7 |
static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) |
{ |
struct drm_device *dev = intel_dp_to_dev(intel_dp); |
struct drm_device *dev = intel_dp->base.base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0; |
317,7 → 327,7 |
static void |
intel_dp_check_edp(struct intel_dp *intel_dp) |
{ |
struct drm_device *dev = intel_dp_to_dev(intel_dp); |
struct drm_device *dev = intel_dp->base.base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
if (!is_edp(intel_dp)) |
336,8 → 346,7 |
uint8_t *recv, int recv_size) |
{ |
uint32_t output_reg = intel_dp->output_reg; |
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
struct drm_device *dev = intel_dig_port->base.base.dev; |
struct drm_device *dev = intel_dp->base.base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
uint32_t ch_ctl = output_reg + 0x10; |
uint32_t ch_data = ch_ctl + 4; |
347,29 → 356,6 |
uint32_t aux_clock_divider; |
int try, precharge; |
if (IS_HASWELL(dev)) { |
switch (intel_dig_port->port) { |
case PORT_A: |
ch_ctl = DPA_AUX_CH_CTL; |
ch_data = DPA_AUX_CH_DATA1; |
break; |
case PORT_B: |
ch_ctl = PCH_DPB_AUX_CH_CTL; |
ch_data = PCH_DPB_AUX_CH_DATA1; |
break; |
case PORT_C: |
ch_ctl = PCH_DPC_AUX_CH_CTL; |
ch_data = PCH_DPC_AUX_CH_DATA1; |
break; |
case PORT_D: |
ch_ctl = PCH_DPD_AUX_CH_CTL; |
ch_data = PCH_DPD_AUX_CH_DATA1; |
break; |
default: |
BUG(); |
} |
} |
intel_dp_check_edp(intel_dp); |
/* The clock divider is based off the hrawclk, |
* and would like to run at 2MHz. So, take the |
379,16 → 365,12 |
* clock divider. |
*/ |
if (is_cpu_edp(intel_dp)) { |
if (IS_HASWELL(dev)) |
aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1; |
else if (IS_VALLEYVIEW(dev)) |
aux_clock_divider = 100; |
else if (IS_GEN6(dev) || IS_GEN7(dev)) |
if (IS_GEN6(dev) || IS_GEN7(dev)) |
aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */ |
else |
aux_clock_divider = 225; /* eDP input clock at 450Mhz */ |
} else if (HAS_PCH_SPLIT(dev)) |
aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2); |
aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */ |
else |
aux_clock_divider = intel_hrawclk(dev) / 2; |
660,6 → 642,9 |
return -EREMOTEIO; |
} |
static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp); |
static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); |
static int |
intel_dp_i2c_init(struct intel_dp *intel_dp, |
struct intel_connector *intel_connector, const char *name) |
685,7 → 670,7 |
return ret; |
} |
bool |
static bool |
intel_dp_mode_fixup(struct drm_encoder *encoder, |
const struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode) |
692,18 → 677,15 |
{ |
struct drm_device *dev = encoder->dev; |
struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
struct intel_connector *intel_connector = intel_dp->attached_connector; |
int lane_count, clock; |
int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); |
int max_lane_count = intel_dp_max_lane_count(intel_dp); |
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; |
int bpp, mode_rate; |
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; |
if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { |
intel_fixed_panel_mode(intel_connector->panel.fixed_mode, |
adjusted_mode); |
intel_pch_panel_fitting(dev, |
intel_connector->panel.fitting_mode, |
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { |
intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode); |
intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN, |
mode, adjusted_mode); |
} |
780,23 → 762,21 |
struct drm_display_mode *adjusted_mode) |
{ |
struct drm_device *dev = crtc->dev; |
struct intel_encoder *intel_encoder; |
struct intel_dp *intel_dp; |
struct intel_encoder *encoder; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
int lane_count = 4; |
struct intel_dp_m_n m_n; |
int pipe = intel_crtc->pipe; |
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; |
/* |
* Find the lane count in the intel_encoder private |
*/ |
for_each_encoder_on_crtc(dev, crtc, intel_encoder) { |
intel_dp = enc_to_intel_dp(&intel_encoder->base); |
for_each_encoder_on_crtc(dev, crtc, encoder) { |
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); |
if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || |
intel_encoder->type == INTEL_OUTPUT_EDP) |
if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || |
intel_dp->base.type == INTEL_OUTPUT_EDP) |
{ |
lane_count = intel_dp->lane_count; |
break; |
811,25 → 791,17 |
intel_dp_compute_m_n(intel_crtc->bpp, lane_count, |
mode->clock, adjusted_mode->clock, &m_n); |
if (IS_HASWELL(dev)) { |
I915_WRITE(PIPE_DATA_M1(cpu_transcoder), |
TU_SIZE(m_n.tu) | m_n.gmch_m); |
I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n); |
I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m); |
I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n); |
} else if (HAS_PCH_SPLIT(dev)) { |
I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); |
if (HAS_PCH_SPLIT(dev)) { |
I915_WRITE(TRANSDATA_M1(pipe), |
((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | |
m_n.gmch_m); |
I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n); |
I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m); |
I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n); |
} else if (IS_VALLEYVIEW(dev)) { |
I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); |
I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); |
I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); |
I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); |
} else { |
I915_WRITE(PIPE_GMCH_DATA_M(pipe), |
TU_SIZE(m_n.tu) | m_n.gmch_m); |
((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | |
m_n.gmch_m); |
I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n); |
I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m); |
I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n); |
836,21 → 808,6 |
} |
} |
void intel_dp_init_link_config(struct intel_dp *intel_dp) |
{ |
memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); |
intel_dp->link_configuration[0] = intel_dp->link_bw; |
intel_dp->link_configuration[1] = intel_dp->lane_count; |
intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; |
/* |
* Check for DPCD version > 1.1 and enhanced framing support |
*/ |
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && |
(intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { |
intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; |
} |
} |
static void |
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode) |
858,7 → 815,7 |
struct drm_device *dev = encoder->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
struct drm_crtc *crtc = encoder->crtc; |
struct drm_crtc *crtc = intel_dp->base.base.crtc; |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
/* |
903,12 → 860,21 |
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; |
intel_write_eld(encoder, adjusted_mode); |
} |
memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); |
intel_dp->link_configuration[0] = intel_dp->link_bw; |
intel_dp->link_configuration[1] = intel_dp->lane_count; |
intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; |
/* |
* Check for DPCD version > 1.1 and enhanced framing support |
*/ |
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && |
(intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { |
intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; |
} |
intel_dp_init_link_config(intel_dp); |
/* Split out the IBX/CPU vs CPT settings */ |
if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { |
if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) { |
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
intel_dp->DP |= DP_SYNC_HS_HIGH; |
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
965,7 → 931,7 |
u32 mask, |
u32 value) |
{ |
struct drm_device *dev = intel_dp_to_dev(intel_dp); |
struct drm_device *dev = intel_dp->base.base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", |
1012,9 → 978,9 |
return control; |
} |
void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) |
static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) |
{ |
struct drm_device *dev = intel_dp_to_dev(intel_dp); |
struct drm_device *dev = intel_dp->base.base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
u32 pp; |
1053,7 → 1019,7 |
static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) |
{ |
struct drm_device *dev = intel_dp_to_dev(intel_dp); |
struct drm_device *dev = intel_dp->base.base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
u32 pp; |
1071,18 → 1037,8 |
} |
} |
static void ironlake_panel_vdd_work(struct work_struct *__work) |
{ |
// struct intel_dp *intel_dp = container_of(to_delayed_work(__work), |
// struct intel_dp, panel_vdd_work); |
// struct drm_device *dev = intel_dp_to_dev(intel_dp); |
// |
// mutex_lock(&dev->mode_config.mutex); |
// ironlake_panel_vdd_off_sync(intel_dp); |
// mutex_unlock(&dev->mode_config.mutex); |
} |
void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) |
static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) |
{ |
if (!is_edp(intel_dp)) |
return; |
1105,9 → 1061,9 |
} |
} |
void ironlake_edp_panel_on(struct intel_dp *intel_dp) |
static void ironlake_edp_panel_on(struct intel_dp *intel_dp) |
{ |
struct drm_device *dev = intel_dp_to_dev(intel_dp); |
struct drm_device *dev = intel_dp->base.base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
u32 pp; |
1147,9 → 1103,9 |
} |
} |
void ironlake_edp_panel_off(struct intel_dp *intel_dp) |
static void ironlake_edp_panel_off(struct intel_dp *intel_dp) |
{ |
struct drm_device *dev = intel_dp_to_dev(intel_dp); |
struct drm_device *dev = intel_dp->base.base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
u32 pp; |
1172,12 → 1128,10 |
ironlake_wait_panel_off(intel_dp); |
} |
void ironlake_edp_backlight_on(struct intel_dp *intel_dp) |
static void ironlake_edp_backlight_on(struct intel_dp *intel_dp) |
{ |
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
struct drm_device *dev = intel_dig_port->base.base.dev; |
struct drm_device *dev = intel_dp->base.base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe; |
u32 pp; |
if (!is_edp(intel_dp)) |
1195,13 → 1149,11 |
pp |= EDP_BLC_ENABLE; |
I915_WRITE(PCH_PP_CONTROL, pp); |
POSTING_READ(PCH_PP_CONTROL); |
intel_panel_enable_backlight(dev, pipe); |
} |
void ironlake_edp_backlight_off(struct intel_dp *intel_dp) |
static void ironlake_edp_backlight_off(struct intel_dp *intel_dp) |
{ |
struct drm_device *dev = intel_dp_to_dev(intel_dp); |
struct drm_device *dev = intel_dp->base.base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
u32 pp; |
1208,8 → 1160,6 |
if (!is_edp(intel_dp)) |
return; |
intel_panel_disable_backlight(dev); |
DRM_DEBUG_KMS("\n"); |
pp = ironlake_get_pp_control(dev_priv); |
pp &= ~EDP_BLC_ENABLE; |
1220,9 → 1170,8 |
static void ironlake_edp_pll_on(struct intel_dp *intel_dp) |
{ |
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
struct drm_crtc *crtc = intel_dig_port->base.base.crtc; |
struct drm_device *dev = crtc->dev; |
struct drm_device *dev = intel_dp->base.base.dev; |
struct drm_crtc *crtc = intel_dp->base.base.crtc; |
struct drm_i915_private *dev_priv = dev->dev_private; |
u32 dpa_ctl; |
1246,9 → 1195,8 |
static void ironlake_edp_pll_off(struct intel_dp *intel_dp) |
{ |
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
struct drm_crtc *crtc = intel_dig_port->base.base.crtc; |
struct drm_device *dev = crtc->dev; |
struct drm_device *dev = intel_dp->base.base.dev; |
struct drm_crtc *crtc = intel_dp->base.base.crtc; |
struct drm_i915_private *dev_priv = dev->dev_private; |
u32 dpa_ctl; |
1270,7 → 1218,7 |
} |
/* If the sink supports it, try to set the power state appropriately */ |
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) |
static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) |
{ |
int ret, i; |
1340,11 → 1288,10 |
return true; |
} |
} |
DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", |
intel_dp->output_reg); |
} |
DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", intel_dp->output_reg); |
return true; |
} |
1439,6 → 1386,38 |
DP_LINK_STATUS_SIZE); |
} |
static uint8_t |
intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE], |
int r) |
{ |
return link_status[r - DP_LANE0_1_STATUS]; |
} |
static uint8_t |
intel_get_adjust_request_voltage(uint8_t adjust_request[2], |
int lane) |
{ |
int s = ((lane & 1) ? |
DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : |
DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); |
uint8_t l = adjust_request[lane>>1]; |
return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; |
} |
static uint8_t |
intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2], |
int lane) |
{ |
int s = ((lane & 1) ? |
DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : |
DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); |
uint8_t l = adjust_request[lane>>1]; |
return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; |
} |
#if 0 |
static char *voltage_names[] = { |
"0.4V", "0.6V", "0.8V", "1.2V" |
1459,7 → 1438,7 |
static uint8_t |
intel_dp_voltage_max(struct intel_dp *intel_dp) |
{ |
struct drm_device *dev = intel_dp_to_dev(intel_dp); |
struct drm_device *dev = intel_dp->base.base.dev; |
if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) |
return DP_TRAIN_VOLTAGE_SWING_800; |
1472,24 → 1451,12 |
static uint8_t |
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) |
{ |
struct drm_device *dev = intel_dp_to_dev(intel_dp); |
struct drm_device *dev = intel_dp->base.base.dev; |
if (IS_HASWELL(dev)) { |
if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) { |
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { |
case DP_TRAIN_VOLTAGE_SWING_400: |
return DP_TRAIN_PRE_EMPHASIS_9_5; |
case DP_TRAIN_VOLTAGE_SWING_600: |
return DP_TRAIN_PRE_EMPHASIS_6; |
case DP_TRAIN_VOLTAGE_SWING_800: |
return DP_TRAIN_PRE_EMPHASIS_3_5; |
case DP_TRAIN_VOLTAGE_SWING_1200: |
default: |
return DP_TRAIN_PRE_EMPHASIS_0; |
} |
} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { |
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { |
case DP_TRAIN_VOLTAGE_SWING_400: |
return DP_TRAIN_PRE_EMPHASIS_6; |
case DP_TRAIN_VOLTAGE_SWING_600: |
case DP_TRAIN_VOLTAGE_SWING_800: |
return DP_TRAIN_PRE_EMPHASIS_3_5; |
1517,12 → 1484,13 |
uint8_t v = 0; |
uint8_t p = 0; |
int lane; |
uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS); |
uint8_t voltage_max; |
uint8_t preemph_max; |
for (lane = 0; lane < intel_dp->lane_count; lane++) { |
uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane); |
uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); |
uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane); |
uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane); |
if (this_v > v) |
v = this_v; |
1639,88 → 1607,64 |
} |
} |
/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */ |
static uint32_t |
intel_dp_signal_levels_hsw(uint8_t train_set) |
static uint8_t |
intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], |
int lane) |
{ |
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | |
DP_TRAIN_PRE_EMPHASIS_MASK); |
switch (signal_levels) { |
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: |
return DDI_BUF_EMP_400MV_0DB_HSW; |
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: |
return DDI_BUF_EMP_400MV_3_5DB_HSW; |
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: |
return DDI_BUF_EMP_400MV_6DB_HSW; |
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5: |
return DDI_BUF_EMP_400MV_9_5DB_HSW; |
int s = (lane & 1) * 4; |
uint8_t l = link_status[lane>>1]; |
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: |
return DDI_BUF_EMP_600MV_0DB_HSW; |
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: |
return DDI_BUF_EMP_600MV_3_5DB_HSW; |
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: |
return DDI_BUF_EMP_600MV_6DB_HSW; |
return (l >> s) & 0xf; |
} |
case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: |
return DDI_BUF_EMP_800MV_0DB_HSW; |
case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: |
return DDI_BUF_EMP_800MV_3_5DB_HSW; |
default: |
DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" |
"0x%x\n", signal_levels); |
return DDI_BUF_EMP_400MV_0DB_HSW; |
/* Check for clock recovery is done on all channels */ |
static bool |
intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) |
{ |
int lane; |
uint8_t lane_status; |
for (lane = 0; lane < lane_count; lane++) { |
lane_status = intel_get_lane_status(link_status, lane); |
if ((lane_status & DP_LANE_CR_DONE) == 0) |
return false; |
} |
return true; |
} |
/* Check to see if channel eq is done on all channels */ |
#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\ |
DP_LANE_CHANNEL_EQ_DONE|\ |
DP_LANE_SYMBOL_LOCKED) |
static bool |
intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) |
{ |
uint8_t lane_align; |
uint8_t lane_status; |
int lane; |
lane_align = intel_dp_link_status(link_status, |
DP_LANE_ALIGN_STATUS_UPDATED); |
if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) |
return false; |
for (lane = 0; lane < intel_dp->lane_count; lane++) { |
lane_status = intel_get_lane_status(link_status, lane); |
if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) |
return false; |
} |
return true; |
} |
static bool |
intel_dp_set_link_train(struct intel_dp *intel_dp, |
uint32_t dp_reg_value, |
uint8_t dp_train_pat) |
{ |
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
struct drm_device *dev = intel_dig_port->base.base.dev; |
struct drm_device *dev = intel_dp->base.base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
enum port port = intel_dig_port->port; |
int ret; |
uint32_t temp; |
if (IS_HASWELL(dev)) { |
temp = I915_READ(DP_TP_CTL(port)); |
if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) |
temp |= DP_TP_CTL_SCRAMBLE_DISABLE; |
else |
temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE; |
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; |
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { |
case DP_TRAINING_PATTERN_DISABLE: |
temp |= DP_TP_CTL_LINK_TRAIN_IDLE; |
I915_WRITE(DP_TP_CTL(port), temp); |
if (wait_for((I915_READ(DP_TP_STATUS(port)) & |
DP_TP_STATUS_IDLE_DONE), 1)) |
DRM_ERROR("Timed out waiting for DP idle patterns\n"); |
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; |
temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; |
break; |
case DP_TRAINING_PATTERN_1: |
temp |= DP_TP_CTL_LINK_TRAIN_PAT1; |
break; |
case DP_TRAINING_PATTERN_2: |
temp |= DP_TP_CTL_LINK_TRAIN_PAT2; |
break; |
case DP_TRAINING_PATTERN_3: |
temp |= DP_TP_CTL_LINK_TRAIN_PAT3; |
break; |
} |
I915_WRITE(DP_TP_CTL(port), temp); |
} else if (HAS_PCH_CPT(dev) && |
(IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { |
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { |
dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT; |
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { |
1780,11 → 1724,10 |
} |
/* Enable corresponding port and start training pattern 1 */ |
void |
static void |
intel_dp_start_link_train(struct intel_dp *intel_dp) |
{ |
struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base; |
struct drm_device *dev = encoder->dev; |
struct drm_device *dev = intel_dp->base.base.dev; |
int i; |
uint8_t voltage; |
bool clock_recovery = false; |
1791,9 → 1734,6 |
int voltage_tries, loop_tries; |
uint32_t DP = intel_dp->DP; |
if (IS_HASWELL(dev)) |
intel_ddi_prepare_link_retrain(encoder); |
/* Write the link configuration data */ |
intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, |
intel_dp->link_configuration, |
1811,11 → 1751,8 |
uint8_t link_status[DP_LINK_STATUS_SIZE]; |
uint32_t signal_levels; |
if (IS_HASWELL(dev)) { |
signal_levels = intel_dp_signal_levels_hsw( |
intel_dp->train_set[0]); |
DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels; |
} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { |
if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) { |
signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); |
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; |
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { |
1823,24 → 1760,23 |
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; |
} else { |
signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); |
DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels); |
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; |
} |
DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", |
signal_levels); |
/* Set training pattern 1 */ |
if (!intel_dp_set_link_train(intel_dp, DP, |
DP_TRAINING_PATTERN_1 | |
DP_LINK_SCRAMBLING_DISABLE)) |
break; |
/* Set training pattern 1 */ |
drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); |
udelay(100); |
if (!intel_dp_get_link_status(intel_dp, link_status)) { |
DRM_ERROR("failed to get link status\n"); |
break; |
} |
if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { |
if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { |
DRM_DEBUG_KMS("clock recovery OK\n"); |
clock_recovery = true; |
break; |
1879,10 → 1815,10 |
intel_dp->DP = DP; |
} |
void |
static void |
intel_dp_complete_link_train(struct intel_dp *intel_dp) |
{ |
struct drm_device *dev = intel_dp_to_dev(intel_dp); |
struct drm_device *dev = intel_dp->base.base.dev; |
bool channel_eq = false; |
int tries, cr_tries; |
uint32_t DP = intel_dp->DP; |
1902,10 → 1838,7 |
break; |
} |
if (IS_HASWELL(dev)) { |
signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]); |
DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels; |
} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { |
if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) { |
signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); |
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; |
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { |
1922,18 → 1855,18 |
DP_LINK_SCRAMBLING_DISABLE)) |
break; |
drm_dp_link_train_channel_eq_delay(intel_dp->dpcd); |
udelay(400); |
if (!intel_dp_get_link_status(intel_dp, link_status)) |
break; |
/* Make sure clock is still ok */ |
if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { |
if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { |
intel_dp_start_link_train(intel_dp); |
cr_tries++; |
continue; |
} |
if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { |
if (intel_channel_eq_ok(intel_dp, link_status)) { |
channel_eq = true; |
break; |
} |
1952,9 → 1885,6 |
++tries; |
} |
if (channel_eq) |
DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n"); |
intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE); |
} |
1961,29 → 1891,10 |
static void |
intel_dp_link_down(struct intel_dp *intel_dp) |
{ |
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
struct drm_device *dev = intel_dig_port->base.base.dev; |
struct drm_device *dev = intel_dp->base.base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
uint32_t DP = intel_dp->DP; |
/* |
* DDI code has a strict mode set sequence and we should try to respect |
* it, otherwise we might hang the machine in many different ways. So we |
* really should be disabling the port only on a complete crtc_disable |
* sequence. This function is just called under two conditions on DDI |
* code: |
* - Link train failed while doing crtc_enable, and on this case we |
* really should respect the mode set sequence and wait for a |
* crtc_disable. |
* - Someone turned the monitor off and intel_dp_check_link_status |
* called us. We don't need to disable the whole port on this case, so |
* when someone turns the monitor on again, |
* intel_ddi_prepare_link_retrain will take care of redoing the link |
* train. |
*/ |
if (IS_HASWELL(dev)) |
return; |
if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) |
return; |
2002,7 → 1913,7 |
if (HAS_PCH_IBX(dev) && |
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { |
struct drm_crtc *crtc = intel_dig_port->base.base.crtc; |
struct drm_crtc *crtc = intel_dp->base.base.crtc; |
/* Hardware workaround: leaving our transcoder select |
* set to transcoder B while it's off will prevent the |
2103,7 → 2014,7 |
intel_dp_handle_test_request(struct intel_dp *intel_dp) |
{ |
/* NAK by default */ |
intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK); |
intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_ACK); |
} |
/* |
2115,17 → 2026,16 |
* 4. Check link status on receipt of hot-plug interrupt |
*/ |
void |
static void |
intel_dp_check_link_status(struct intel_dp *intel_dp) |
{ |
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; |
u8 sink_irq_vector; |
u8 link_status[DP_LINK_STATUS_SIZE]; |
if (!intel_encoder->connectors_active) |
if (!intel_dp->base.connectors_active) |
return; |
if (WARN_ON(!intel_encoder->base.crtc)) |
if (WARN_ON(!intel_dp->base.base.crtc)) |
return; |
/* Try to read receiver status if the link appears to be up */ |
2154,9 → 2064,9 |
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); |
} |
if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { |
if (!intel_channel_eq_ok(intel_dp, link_status)) { |
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", |
drm_get_encoder_name(&intel_encoder->base)); |
drm_get_encoder_name(&intel_dp->base.base)); |
intel_dp_start_link_train(intel_dp); |
intel_dp_complete_link_train(intel_dp); |
} |
2205,12 → 2115,11 |
static enum drm_connector_status |
ironlake_dp_detect(struct intel_dp *intel_dp) |
{ |
struct drm_device *dev = intel_dp_to_dev(intel_dp); |
enum drm_connector_status status; |
/* Can't disconnect eDP, but you can close the lid... */ |
if (is_edp(intel_dp)) { |
status = intel_panel_detect(dev); |
status = intel_panel_detect(intel_dp->base.base.dev); |
if (status == connector_status_unknown) |
status = connector_status_connected; |
return status; |
2222,7 → 2131,7 |
static enum drm_connector_status |
g4x_dp_detect(struct intel_dp *intel_dp) |
{ |
struct drm_device *dev = intel_dp_to_dev(intel_dp); |
struct drm_device *dev = intel_dp->base.base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
uint32_t bit; |
2249,45 → 2158,44 |
static struct edid * |
intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) |
{ |
struct intel_connector *intel_connector = to_intel_connector(connector); |
/* use cached edid if we have one */ |
if (intel_connector->edid) { |
struct intel_dp *intel_dp = intel_attached_dp(connector); |
struct edid *edid; |
int size; |
/* invalid edid */ |
if (IS_ERR(intel_connector->edid)) |
if (is_edp(intel_dp)) { |
if (!intel_dp->edid) |
return NULL; |
size = (intel_connector->edid->extensions + 1) * EDID_LENGTH; |
size = (intel_dp->edid->extensions + 1) * EDID_LENGTH; |
edid = kmalloc(size, GFP_KERNEL); |
if (!edid) |
return NULL; |
memcpy(edid, intel_connector->edid, size); |
memcpy(edid, intel_dp->edid, size); |
return edid; |
} |
return drm_get_edid(connector, adapter); |
edid = drm_get_edid(connector, adapter); |
return edid; |
} |
static int |
intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter) |
{ |
struct intel_connector *intel_connector = to_intel_connector(connector); |
struct intel_dp *intel_dp = intel_attached_dp(connector); |
int ret; |
/* use cached edid if we have one */ |
if (intel_connector->edid) { |
/* invalid edid */ |
if (IS_ERR(intel_connector->edid)) |
return 0; |
return intel_connector_update_modes(connector, |
intel_connector->edid); |
if (is_edp(intel_dp)) { |
drm_mode_connector_update_edid_property(connector, |
intel_dp->edid); |
ret = drm_add_edid_modes(connector, intel_dp->edid); |
drm_edid_to_eld(connector, |
intel_dp->edid); |
return intel_dp->edid_mode_count; |
} |
return intel_ddc_get_modes(connector, adapter); |
ret = intel_ddc_get_modes(connector, adapter); |
return ret; |
} |
2301,12 → 2209,9 |
intel_dp_detect(struct drm_connector *connector, bool force) |
{ |
struct intel_dp *intel_dp = intel_attached_dp(connector); |
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
struct intel_encoder *intel_encoder = &intel_dig_port->base; |
struct drm_device *dev = connector->dev; |
struct drm_device *dev = intel_dp->base.base.dev; |
enum drm_connector_status status; |
struct edid *edid = NULL; |
char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; |
intel_dp->has_audio = false; |
2315,9 → 2220,10 |
else |
status = g4x_dp_detect(intel_dp); |
// hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd), |
// 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false); |
// DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump); |
DRM_DEBUG_KMS("DPCD: %02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n", |
intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2], |
intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5], |
intel_dp->dpcd[6], intel_dp->dpcd[7]); |
if (status != connector_status_connected) |
return status; |
2324,8 → 2230,9 |
intel_dp_probe_oui(intel_dp); |
if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { |
intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); |
/* |
if (intel_dp->force_audio) { |
intel_dp->has_audio = intel_dp->force_audio > 0; |
} else { |
edid = intel_dp_get_edid(connector, &intel_dp->adapter); |
if (edid) { |
2333,9 → 2240,7 |
kfree(edid); |
} |
} |
if (intel_encoder->type != INTEL_OUTPUT_EDP) |
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; |
*/ |
return connector_status_connected; |
} |
2342,8 → 2247,8 |
static int intel_dp_get_modes(struct drm_connector *connector) |
{ |
struct intel_dp *intel_dp = intel_attached_dp(connector); |
struct intel_connector *intel_connector = to_intel_connector(connector); |
struct drm_device *dev = connector->dev; |
struct drm_device *dev = intel_dp->base.base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
int ret; |
/* We should parse the EDID data and find out if it has an audio sink |
2350,15 → 2255,35 |
*/ |
ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter); |
if (ret) |
if (ret) { |
if (is_edp(intel_dp) && !intel_dp->panel_fixed_mode) { |
struct drm_display_mode *newmode; |
list_for_each_entry(newmode, &connector->probed_modes, |
head) { |
if ((newmode->type & DRM_MODE_TYPE_PREFERRED)) { |
intel_dp->panel_fixed_mode = |
drm_mode_duplicate(dev, newmode); |
break; |
} |
} |
} |
return ret; |
} |
/* if eDP has no EDID, fall back to fixed mode */ |
if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { |
/* if eDP has no EDID, try to use fixed panel mode from VBT */ |
if (is_edp(intel_dp)) { |
/* initialize panel mode from VBT if available for eDP */ |
if (intel_dp->panel_fixed_mode == NULL && dev_priv->lfp_lvds_vbt_mode != NULL) { |
intel_dp->panel_fixed_mode = |
drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); |
if (intel_dp->panel_fixed_mode) { |
intel_dp->panel_fixed_mode->type |= |
DRM_MODE_TYPE_PREFERRED; |
} |
} |
if (intel_dp->panel_fixed_mode) { |
struct drm_display_mode *mode; |
mode = drm_mode_duplicate(dev, |
intel_connector->panel.fixed_mode); |
if (mode) { |
mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode); |
drm_mode_probed_add(connector, mode); |
return 1; |
} |
2366,22 → 2291,10 |
return 0; |
} |
static bool |
intel_dp_detect_audio(struct drm_connector *connector) |
{ |
struct intel_dp *intel_dp = intel_attached_dp(connector); |
struct edid *edid; |
bool has_audio = false; |
edid = intel_dp_get_edid(connector, &intel_dp->adapter); |
if (edid) { |
has_audio = drm_detect_monitor_audio(edid); |
kfree(edid); |
} |
return has_audio; |
} |
static int |
intel_dp_set_property(struct drm_connector *connector, |
struct drm_property *property, |
2388,12 → 2301,10 |
uint64_t val) |
{ |
struct drm_i915_private *dev_priv = connector->dev->dev_private; |
struct intel_connector *intel_connector = to_intel_connector(connector); |
struct intel_encoder *intel_encoder = intel_attached_encoder(connector); |
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); |
struct intel_dp *intel_dp = intel_attached_dp(connector); |
int ret; |
ret = drm_object_property_set_value(&connector->base, property, val); |
ret = drm_connector_property_set_value(connector, property, val); |
if (ret) |
return ret; |
#if 0 |
2427,27 → 2338,11 |
} |
#endif |
if (is_edp(intel_dp) && |
property == connector->dev->mode_config.scaling_mode_property) { |
if (val == DRM_MODE_SCALE_NONE) { |
DRM_DEBUG_KMS("no scaling not supported\n"); |
return -EINVAL; |
} |
if (intel_connector->panel.fitting_mode == val) { |
/* the eDP scaling property is not changed */ |
return 0; |
} |
intel_connector->panel.fitting_mode = val; |
goto done; |
} |
return -EINVAL; |
done: |
if (intel_encoder->base.crtc) { |
struct drm_crtc *crtc = intel_encoder->base.crtc; |
if (intel_dp->base.base.crtc) { |
struct drm_crtc *crtc = intel_dp->base.base.crtc; |
intel_set_mode(crtc, &crtc->mode, |
crtc->x, crtc->y, crtc->fb); |
} |
2460,15 → 2355,9 |
{ |
struct drm_device *dev = connector->dev; |
struct intel_dp *intel_dp = intel_attached_dp(connector); |
struct intel_connector *intel_connector = to_intel_connector(connector); |
if (!IS_ERR_OR_NULL(intel_connector->edid)) |
kfree(intel_connector->edid); |
if (is_edp(intel_dp)) { |
if (is_edp(intel_dp)) |
intel_panel_destroy_backlight(dev); |
intel_panel_fini(&intel_connector->panel); |
} |
drm_sysfs_connector_remove(connector); |
drm_connector_cleanup(connector); |
2475,18 → 2364,17 |
kfree(connector); |
} |
void intel_dp_encoder_destroy(struct drm_encoder *encoder) |
static void intel_dp_encoder_destroy(struct drm_encoder *encoder) |
{ |
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); |
struct intel_dp *intel_dp = &intel_dig_port->dp; |
struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
i2c_del_adapter(&intel_dp->adapter); |
// i2c_del_adapter(&intel_dp->adapter); |
drm_encoder_cleanup(encoder); |
if (is_edp(intel_dp)) { |
// cancel_delayed_work_sync(&intel_dp->panel_vdd_work); |
ironlake_panel_vdd_off_sync(intel_dp); |
} |
kfree(intel_dig_port); |
kfree(intel_dp); |
} |
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { |
2516,7 → 2404,7 |
static void |
intel_dp_hot_plug(struct intel_encoder *intel_encoder) |
{ |
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); |
struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); |
intel_dp_check_link_status(intel_dp); |
} |
2526,14 → 2414,13 |
intel_trans_dp_port_sel(struct drm_crtc *crtc) |
{ |
struct drm_device *dev = crtc->dev; |
struct intel_encoder *intel_encoder; |
struct intel_dp *intel_dp; |
struct intel_encoder *encoder; |
for_each_encoder_on_crtc(dev, crtc, intel_encoder) { |
intel_dp = enc_to_intel_dp(&intel_encoder->base); |
for_each_encoder_on_crtc(dev, crtc, encoder) { |
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); |
if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || |
intel_encoder->type == INTEL_OUTPUT_EDP) |
if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || |
intel_dp->base.type == INTEL_OUTPUT_EDP) |
return intel_dp->output_reg; |
} |
2563,205 → 2450,79 |
static void |
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) |
{ |
struct intel_connector *intel_connector = to_intel_connector(connector); |
intel_attach_force_audio_property(connector); |
intel_attach_broadcast_rgb_property(connector); |
if (is_edp(intel_dp)) { |
drm_mode_create_scaling_mode_property(connector->dev); |
drm_object_attach_property( |
&connector->base, |
connector->dev->mode_config.scaling_mode_property, |
DRM_MODE_SCALE_ASPECT); |
intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT; |
} |
} |
static void |
intel_dp_init_panel_power_sequencer(struct drm_device *dev, |
struct intel_dp *intel_dp, |
struct edp_power_seq *out) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct edp_power_seq cur, vbt, spec, final; |
u32 pp_on, pp_off, pp_div, pp; |
/* Workaround: Need to write PP_CONTROL with the unlock key as |
* the very first thing. */ |
pp = ironlake_get_pp_control(dev_priv); |
I915_WRITE(PCH_PP_CONTROL, pp); |
pp_on = I915_READ(PCH_PP_ON_DELAYS); |
pp_off = I915_READ(PCH_PP_OFF_DELAYS); |
pp_div = I915_READ(PCH_PP_DIVISOR); |
/* Pull timing values out of registers */ |
cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> |
PANEL_POWER_UP_DELAY_SHIFT; |
cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> |
PANEL_LIGHT_ON_DELAY_SHIFT; |
cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> |
PANEL_LIGHT_OFF_DELAY_SHIFT; |
cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> |
PANEL_POWER_DOWN_DELAY_SHIFT; |
cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> |
PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; |
DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", |
cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); |
vbt = dev_priv->edp.pps; |
/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of |
* our hw here, which are all in 100usec. */ |
spec.t1_t3 = 210 * 10; |
spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ |
spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ |
spec.t10 = 500 * 10; |
/* This one is special and actually in units of 100ms, but zero |
* based in the hw (so we need to add 100 ms). But the sw vbt |
* table multiplies it with 1000 to make it in units of 100usec, |
* too. */ |
spec.t11_t12 = (510 + 100) * 10; |
DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", |
vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12); |
/* Use the max of the register settings and vbt. If both are |
* unset, fall back to the spec limits. */ |
#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \ |
spec.field : \ |
max(cur.field, vbt.field)) |
assign_final(t1_t3); |
assign_final(t8); |
assign_final(t9); |
assign_final(t10); |
assign_final(t11_t12); |
#undef assign_final |
#define get_delay(field) (DIV_ROUND_UP(final.field, 10)) |
intel_dp->panel_power_up_delay = get_delay(t1_t3); |
intel_dp->backlight_on_delay = get_delay(t8); |
intel_dp->backlight_off_delay = get_delay(t9); |
intel_dp->panel_power_down_delay = get_delay(t10); |
intel_dp->panel_power_cycle_delay = get_delay(t11_t12); |
#undef get_delay |
DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", |
intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, |
intel_dp->panel_power_cycle_delay); |
DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", |
intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); |
if (out) |
*out = final; |
} |
static void |
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, |
struct intel_dp *intel_dp, |
struct edp_power_seq *seq) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
u32 pp_on, pp_off, pp_div; |
/* And finally store the new values in the power sequencer. */ |
pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | |
(seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT); |
pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | |
(seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); |
/* Compute the divisor for the pp clock, simply match the Bspec |
* formula. */ |
pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1) |
<< PP_REFERENCE_DIVIDER_SHIFT; |
pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000) |
<< PANEL_POWER_CYCLE_DELAY_SHIFT); |
/* Haswell doesn't have any port selection bits for the panel |
* power sequencer any more. */ |
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { |
if (is_cpu_edp(intel_dp)) |
pp_on |= PANEL_POWER_PORT_DP_A; |
else |
pp_on |= PANEL_POWER_PORT_DP_D; |
} |
I915_WRITE(PCH_PP_ON_DELAYS, pp_on); |
I915_WRITE(PCH_PP_OFF_DELAYS, pp_off); |
I915_WRITE(PCH_PP_DIVISOR, pp_div); |
DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", |
I915_READ(PCH_PP_ON_DELAYS), |
I915_READ(PCH_PP_OFF_DELAYS), |
I915_READ(PCH_PP_DIVISOR)); |
} |
void |
intel_dp_init_connector(struct intel_digital_port *intel_dig_port, |
struct intel_connector *intel_connector) |
intel_dp_init(struct drm_device *dev, int output_reg, enum port port) |
{ |
struct drm_connector *connector = &intel_connector->base; |
struct intel_dp *intel_dp = &intel_dig_port->dp; |
struct intel_encoder *intel_encoder = &intel_dig_port->base; |
struct drm_device *dev = intel_encoder->base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct drm_display_mode *fixed_mode = NULL; |
struct edp_power_seq power_seq = { 0 }; |
enum port port = intel_dig_port->port; |
struct drm_connector *connector; |
struct intel_dp *intel_dp; |
struct intel_encoder *intel_encoder; |
struct intel_connector *intel_connector; |
const char *name = NULL; |
int type; |
intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL); |
if (!intel_dp) |
return; |
intel_dp->output_reg = output_reg; |
intel_dp->port = port; |
/* Preserve the current hw state. */ |
intel_dp->DP = I915_READ(intel_dp->output_reg); |
intel_dp->attached_connector = intel_connector; |
if (HAS_PCH_SPLIT(dev) && port == PORT_D) |
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); |
if (!intel_connector) { |
kfree(intel_dp); |
return; |
} |
intel_encoder = &intel_dp->base; |
if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D) |
if (intel_dpd_is_edp(dev)) |
intel_dp->is_pch_edp = true; |
/* |
* FIXME : We need to initialize built-in panels before external panels. |
* For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup |
*/ |
if (IS_VALLEYVIEW(dev) && port == PORT_C) { |
if (output_reg == DP_A || is_pch_edp(intel_dp)) { |
type = DRM_MODE_CONNECTOR_eDP; |
intel_encoder->type = INTEL_OUTPUT_EDP; |
} else if (port == PORT_A || is_pch_edp(intel_dp)) { |
type = DRM_MODE_CONNECTOR_eDP; |
intel_encoder->type = INTEL_OUTPUT_EDP; |
} else { |
/* The intel_encoder->type value may be INTEL_OUTPUT_UNKNOWN for |
* DDI or INTEL_OUTPUT_DISPLAYPORT for the older gens, so don't |
* rewrite it. |
*/ |
type = DRM_MODE_CONNECTOR_DisplayPort; |
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; |
} |
connector = &intel_connector->base; |
drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); |
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); |
connector->polled = DRM_CONNECTOR_POLL_HPD; |
intel_encoder->cloneable = false; |
// INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, |
// ironlake_panel_vdd_work); |
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); |
connector->interlace_allowed = true; |
connector->doublescan_allowed = 0; |
INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, |
ironlake_panel_vdd_work); |
drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, |
DRM_MODE_ENCODER_TMDS); |
drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs); |
intel_connector_attach_encoder(intel_connector, intel_encoder); |
drm_sysfs_connector_add(connector); |
if (IS_HASWELL(dev)) |
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; |
else |
intel_encoder->enable = intel_enable_dp; |
intel_encoder->pre_enable = intel_pre_enable_dp; |
intel_encoder->disable = intel_disable_dp; |
intel_encoder->post_disable = intel_post_disable_dp; |
intel_encoder->get_hw_state = intel_dp_get_hw_state; |
intel_connector->get_hw_state = intel_connector_get_hw_state; |
/* Set up the DDC bus. */ |
switch (port) { |
case PORT_A: |
2784,15 → 2545,66 |
break; |
} |
if (is_edp(intel_dp)) |
intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); |
/* Cache some DPCD data in the eDP case */ |
if (is_edp(intel_dp)) { |
struct edp_power_seq cur, vbt; |
u32 pp_on, pp_off, pp_div; |
pp_on = I915_READ(PCH_PP_ON_DELAYS); |
pp_off = I915_READ(PCH_PP_OFF_DELAYS); |
pp_div = I915_READ(PCH_PP_DIVISOR); |
if (!pp_on || !pp_off || !pp_div) { |
DRM_INFO("bad panel power sequencing delays, disabling panel\n"); |
intel_dp_encoder_destroy(&intel_dp->base.base); |
intel_dp_destroy(&intel_connector->base); |
return; |
} |
/* Pull timing values out of registers */ |
cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> |
PANEL_POWER_UP_DELAY_SHIFT; |
cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> |
PANEL_LIGHT_ON_DELAY_SHIFT; |
cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> |
PANEL_LIGHT_OFF_DELAY_SHIFT; |
cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> |
PANEL_POWER_DOWN_DELAY_SHIFT; |
cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> |
PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; |
DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", |
cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); |
vbt = dev_priv->edp.pps; |
DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", |
vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12); |
#define get_delay(field) ((max(cur.field, vbt.field) + 9) / 10) |
intel_dp->panel_power_up_delay = get_delay(t1_t3); |
intel_dp->backlight_on_delay = get_delay(t8); |
intel_dp->backlight_off_delay = get_delay(t9); |
intel_dp->panel_power_down_delay = get_delay(t10); |
intel_dp->panel_power_cycle_delay = get_delay(t11_t12); |
DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", |
intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, |
intel_dp->panel_power_cycle_delay); |
DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", |
intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); |
} |
intel_dp_i2c_init(intel_dp, intel_connector, name); |
/* Cache DPCD and EDID for edp. */ |
if (is_edp(intel_dp)) { |
bool ret; |
struct drm_display_mode *scan; |
struct edid *edid; |
ironlake_edp_panel_vdd_on(intel_dp); |
2807,51 → 2619,29 |
} else { |
/* if this fails, presume the device is a ghost */ |
DRM_INFO("failed to retrieve link info, disabling eDP\n"); |
intel_dp_encoder_destroy(&intel_encoder->base); |
intel_dp_destroy(connector); |
intel_dp_encoder_destroy(&intel_dp->base.base); |
intel_dp_destroy(&intel_connector->base); |
return; |
} |
/* We now know it's not a ghost, init power sequence regs. */ |
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, |
&power_seq); |
ironlake_edp_panel_vdd_on(intel_dp); |
edid = drm_get_edid(connector, &intel_dp->adapter); |
if (edid) { |
if (drm_add_edid_modes(connector, edid)) { |
drm_mode_connector_update_edid_property(connector, edid); |
drm_mode_connector_update_edid_property(connector, |
edid); |
intel_dp->edid_mode_count = |
drm_add_edid_modes(connector, edid); |
drm_edid_to_eld(connector, edid); |
} else { |
kfree(edid); |
edid = ERR_PTR(-EINVAL); |
intel_dp->edid = edid; |
} |
} else { |
edid = ERR_PTR(-ENOENT); |
ironlake_edp_panel_vdd_off(intel_dp, false); |
} |
intel_connector->edid = edid; |
/* prefer fixed mode from EDID if available */ |
list_for_each_entry(scan, &connector->probed_modes, head) { |
if ((scan->type & DRM_MODE_TYPE_PREFERRED)) { |
fixed_mode = drm_mode_duplicate(dev, scan); |
break; |
} |
} |
intel_encoder->hot_plug = intel_dp_hot_plug; |
/* fallback to VBT if available for eDP */ |
if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) { |
fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); |
if (fixed_mode) |
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; |
} |
ironlake_edp_panel_vdd_off(intel_dp, false); |
} |
if (is_edp(intel_dp)) { |
intel_panel_init(&intel_connector->panel, fixed_mode); |
intel_panel_setup_backlight(connector); |
dev_priv->int_edp_connector = connector; |
intel_panel_setup_backlight(dev); |
} |
intel_dp_add_properties(intel_dp, connector); |
2865,45 → 2655,3 |
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); |
} |
} |
void |
intel_dp_init(struct drm_device *dev, int output_reg, enum port port) |
{ |
struct intel_digital_port *intel_dig_port; |
struct intel_encoder *intel_encoder; |
struct drm_encoder *encoder; |
struct intel_connector *intel_connector; |
intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); |
if (!intel_dig_port) |
return; |
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); |
if (!intel_connector) { |
kfree(intel_dig_port); |
return; |
} |
intel_encoder = &intel_dig_port->base; |
encoder = &intel_encoder->base; |
drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, |
DRM_MODE_ENCODER_TMDS); |
drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs); |
intel_encoder->enable = intel_enable_dp; |
intel_encoder->pre_enable = intel_pre_enable_dp; |
intel_encoder->disable = intel_disable_dp; |
intel_encoder->post_disable = intel_post_disable_dp; |
intel_encoder->get_hw_state = intel_dp_get_hw_state; |
intel_dig_port->port = port; |
intel_dig_port->dp.output_reg = output_reg; |
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; |
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); |
intel_encoder->cloneable = false; |
intel_encoder->hot_plug = intel_dp_hot_plug; |
intel_dp_init_connector(intel_dig_port, intel_connector); |
} |
/drivers/video/drm/i915/intel_drv.h |
---|
103,8 → 103,26 |
#define INTEL_OUTPUT_HDMI 6 |
#define INTEL_OUTPUT_DISPLAYPORT 7 |
#define INTEL_OUTPUT_EDP 8 |
#define INTEL_OUTPUT_UNKNOWN 9 |
/* Intel Pipe Clone Bit */ |
#define INTEL_HDMIB_CLONE_BIT 1 |
#define INTEL_HDMIC_CLONE_BIT 2 |
#define INTEL_HDMID_CLONE_BIT 3 |
#define INTEL_HDMIE_CLONE_BIT 4 |
#define INTEL_HDMIF_CLONE_BIT 5 |
#define INTEL_SDVO_NON_TV_CLONE_BIT 6 |
#define INTEL_SDVO_TV_CLONE_BIT 7 |
#define INTEL_SDVO_LVDS_CLONE_BIT 8 |
#define INTEL_ANALOG_CLONE_BIT 9 |
#define INTEL_TV_CLONE_BIT 10 |
#define INTEL_DP_B_CLONE_BIT 11 |
#define INTEL_DP_C_CLONE_BIT 12 |
#define INTEL_DP_D_CLONE_BIT 13 |
#define INTEL_LVDS_CLONE_BIT 14 |
#define INTEL_DVO_TMDS_CLONE_BIT 15 |
#define INTEL_DVO_LVDS_CLONE_BIT 16 |
#define INTEL_EDP_CLONE_BIT 17 |
#define INTEL_DVO_CHIP_NONE 0 |
#define INTEL_DVO_CHIP_LVDS 1 |
#define INTEL_DVO_CHIP_TMDS 2 |
173,11 → 191,6 |
int crtc_mask; |
}; |
struct intel_panel { |
struct drm_display_mode *fixed_mode; |
int fitting_mode; |
}; |
struct intel_connector { |
struct drm_connector base; |
/* |
194,12 → 207,6 |
/* Reads out the current hw, returning true if the connector is enabled |
* and active (i.e. dpms ON state). */ |
bool (*get_hw_state)(struct intel_connector *); |
/* Panel info for eDP and LVDS */ |
struct intel_panel panel; |
/* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */ |
struct edid *edid; |
}; |
struct intel_crtc { |
206,7 → 213,6 |
struct drm_crtc base; |
enum pipe pipe; |
enum plane plane; |
enum transcoder cpu_transcoder; |
u8 lut_r[256], lut_g[256], lut_b[256]; |
/* |
* Whether the crtc and the connected output pipeline is active. Implies |
220,8 → 226,6 |
struct intel_unpin_work *unpin_work; |
int fdi_lanes; |
atomic_t unpin_work_count; |
/* Display surface base address adjustement for pageflips. Note that on |
* gen4+ this only adjusts up to a tile, offsets within a tile are |
* handled in the hw itself (with the TILEOFF register). */ |
236,7 → 240,6 |
/* We can share PLLs across outputs if the timings match */ |
struct intel_pch_pll *pch_pll; |
uint32_t ddi_pll_sel; |
}; |
struct intel_plane { |
243,7 → 246,6 |
struct drm_plane base; |
enum pipe pipe; |
struct drm_i915_gem_object *obj; |
bool can_scale; |
int max_downscale; |
u32 lut_r[1024], lut_g[1024], lut_b[1024]; |
void (*update_plane)(struct drm_plane *plane, |
343,8 → 345,10 |
} __attribute__((packed)); |
struct intel_hdmi { |
struct intel_encoder base; |
u32 sdvox_reg; |
int ddc_bus; |
int ddi_port; |
uint32_t color_range; |
bool has_hdmi_sink; |
bool has_audio; |
355,15 → 359,18 |
struct drm_display_mode *adjusted_mode); |
}; |
#define DP_RECEIVER_CAP_SIZE 0xf |
#define DP_MAX_DOWNSTREAM_PORTS 0x10 |
#define DP_LINK_CONFIGURATION_SIZE 9 |
struct intel_dp { |
struct intel_encoder base; |
uint32_t output_reg; |
uint32_t DP; |
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; |
bool has_audio; |
enum hdmi_force_audio force_audio; |
enum port port; |
uint32_t color_range; |
uint8_t link_bw; |
uint8_t lane_count; |
378,18 → 385,13 |
int panel_power_cycle_delay; |
int backlight_on_delay; |
int backlight_off_delay; |
struct drm_display_mode *panel_fixed_mode; /* for eDP */ |
struct delayed_work panel_vdd_work; |
bool want_panel_vdd; |
struct intel_connector *attached_connector; |
struct edid *edid; /* cached EDID for eDP */ |
int edid_mode_count; |
}; |
struct intel_digital_port { |
struct intel_encoder base; |
enum port port; |
struct intel_dp dp; |
struct intel_hdmi hdmi; |
}; |
static inline struct drm_crtc * |
intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) |
{ |
406,14 → 408,11 |
struct intel_unpin_work { |
struct work_struct work; |
struct drm_crtc *crtc; |
struct drm_device *dev; |
struct drm_i915_gem_object *old_fb_obj; |
struct drm_i915_gem_object *pending_flip_obj; |
struct drm_pending_vblank_event *event; |
atomic_t pending; |
#define INTEL_FLIP_INACTIVE 0 |
#define INTEL_FLIP_PENDING 1 |
#define INTEL_FLIP_COMPLETE 2 |
int pending; |
bool enable_stall_check; |
}; |
424,8 → 423,6 |
int interval; |
}; |
int intel_pch_rawclk(struct drm_device *dev); |
int intel_connector_update_modes(struct drm_connector *connector, |
struct edid *edid); |
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); |
436,12 → 433,7 |
extern void intel_crt_init(struct drm_device *dev); |
extern void intel_hdmi_init(struct drm_device *dev, |
int sdvox_reg, enum port port); |
extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, |
struct intel_connector *intel_connector); |
extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); |
extern bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, |
const struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode); |
extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); |
extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, |
bool is_sdvob); |
454,27 → 446,10 |
extern bool intel_lvds_init(struct drm_device *dev); |
extern void intel_dp_init(struct drm_device *dev, int output_reg, |
enum port port); |
extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port, |
struct intel_connector *intel_connector); |
void |
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode); |
extern void intel_dp_init_link_config(struct intel_dp *intel_dp); |
extern void intel_dp_start_link_train(struct intel_dp *intel_dp); |
extern void intel_dp_complete_link_train(struct intel_dp *intel_dp); |
extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); |
extern void intel_dp_encoder_destroy(struct drm_encoder *encoder); |
extern void intel_dp_check_link_status(struct intel_dp *intel_dp); |
extern bool intel_dp_mode_fixup(struct drm_encoder *encoder, |
const struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode); |
extern bool intel_dpd_is_edp(struct drm_device *dev); |
extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp); |
extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp); |
extern void ironlake_edp_panel_on(struct intel_dp *intel_dp); |
extern void ironlake_edp_panel_off(struct intel_dp *intel_dp); |
extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp); |
extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); |
extern void intel_edp_link_config(struct intel_encoder *, int *, int *); |
extern int intel_edp_target_clock(struct intel_encoder *, |
struct drm_display_mode *mode); |
484,10 → 459,6 |
enum plane plane); |
/* intel_panel.c */ |
extern int intel_panel_init(struct intel_panel *panel, |
struct drm_display_mode *fixed_mode); |
extern void intel_panel_fini(struct intel_panel *panel); |
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, |
struct drm_display_mode *adjusted_mode); |
extern void intel_pch_panel_fitting(struct drm_device *dev, |
496,7 → 467,7 |
struct drm_display_mode *adjusted_mode); |
extern u32 intel_panel_get_max_backlight(struct drm_device *dev); |
extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); |
extern int intel_panel_setup_backlight(struct drm_connector *connector); |
extern int intel_panel_setup_backlight(struct drm_device *dev); |
extern void intel_panel_enable_backlight(struct drm_device *dev, |
enum pipe pipe); |
extern void intel_panel_disable_backlight(struct drm_device *dev); |
530,31 → 501,6 |
return to_intel_connector(connector)->encoder; |
} |
static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) |
{ |
struct intel_digital_port *intel_dig_port = |
container_of(encoder, struct intel_digital_port, base.base); |
return &intel_dig_port->dp; |
} |
static inline struct intel_digital_port * |
enc_to_dig_port(struct drm_encoder *encoder) |
{ |
return container_of(encoder, struct intel_digital_port, base.base); |
} |
static inline struct intel_digital_port * |
dp_to_dig_port(struct intel_dp *intel_dp) |
{ |
return container_of(intel_dp, struct intel_digital_port, dp); |
} |
static inline struct intel_digital_port * |
hdmi_to_dig_port(struct intel_hdmi *intel_hdmi) |
{ |
return container_of(intel_hdmi, struct intel_digital_port, hdmi); |
} |
extern void intel_connector_attach_encoder(struct intel_connector *connector, |
struct intel_encoder *encoder); |
extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); |
563,12 → 509,8 |
struct drm_crtc *crtc); |
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern enum transcoder |
intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, |
enum pipe pipe); |
extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); |
extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe); |
extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); |
struct intel_load_detect_pipe { |
struct drm_framebuffer *release_fb; |
636,10 → 578,6 |
extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe, |
struct drm_display_mode *mode); |
extern unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y, |
unsigned int bpp, |
unsigned int pitch); |
extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data, |
663,22 → 601,12 |
extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv); |
extern void ironlake_teardown_rc6(struct drm_device *dev); |
extern void intel_enable_ddi(struct intel_encoder *encoder); |
extern void intel_disable_ddi(struct intel_encoder *encoder); |
extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder, |
enum pipe *pipe); |
extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv); |
extern void intel_ddi_pll_init(struct drm_device *dev); |
extern void intel_ddi_enable_pipe_func(struct drm_crtc *crtc); |
extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, |
enum transcoder cpu_transcoder); |
extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc); |
extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc); |
extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev); |
extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock); |
extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc); |
extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc); |
extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder); |
extern bool |
intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); |
extern void intel_ddi_fdi_disable(struct drm_crtc *crtc); |
extern void intel_ddi_mode_set(struct drm_encoder *encoder, |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode); |
#endif /* __INTEL_DRV_H__ */ |
/drivers/video/drm/i915/intel_hdmi.c |
---|
36,15 → 36,10 |
#include <drm/i915_drm.h> |
#include "i915_drv.h" |
static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi) |
{ |
return hdmi_to_dig_port(intel_hdmi)->base.base.dev; |
} |
static void |
assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi) |
{ |
struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi); |
struct drm_device *dev = intel_hdmi->base.base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
uint32_t enabled_bits; |
56,14 → 51,13 |
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) |
{ |
struct intel_digital_port *intel_dig_port = |
container_of(encoder, struct intel_digital_port, base.base); |
return &intel_dig_port->hdmi; |
return container_of(encoder, struct intel_hdmi, base.base); |
} |
static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector) |
{ |
return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base); |
return container_of(intel_attached_encoder(connector), |
struct intel_hdmi, base); |
} |
void intel_dip_infoframe_csum(struct dip_infoframe *frame) |
340,8 → 334,6 |
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) |
avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2; |
avi_if.body.avi.VIC = drm_mode_cea_vic(adjusted_mode); |
intel_set_infoframe(encoder, &avi_if); |
} |
762,7 → 754,7 |
return MODE_OK; |
} |
bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, |
static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, |
const struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode) |
{ |
771,7 → 763,7 |
static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi) |
{ |
struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi); |
struct drm_device *dev = intel_hdmi->base.base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
uint32_t bit; |
794,9 → 786,6 |
intel_hdmi_detect(struct drm_connector *connector, bool force) |
{ |
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); |
struct intel_digital_port *intel_dig_port = |
hdmi_to_dig_port(intel_hdmi); |
struct intel_encoder *intel_encoder = &intel_dig_port->base; |
struct drm_i915_private *dev_priv = connector->dev->dev_private; |
struct edid *edid; |
enum drm_connector_status status = connector_status_disconnected; |
825,7 → 814,6 |
if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO) |
intel_hdmi->has_audio = |
(intel_hdmi->force_audio == HDMI_AUDIO_ON); |
intel_encoder->type = INTEL_OUTPUT_HDMI; |
} |
return status; |
871,12 → 859,10 |
uint64_t val) |
{ |
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); |
struct intel_digital_port *intel_dig_port = |
hdmi_to_dig_port(intel_hdmi); |
struct drm_i915_private *dev_priv = connector->dev->dev_private; |
int ret; |
ret = drm_object_property_set_value(&connector->base, property, val); |
ret = drm_connector_property_set_value(connector, property, val); |
if (ret) |
return ret; |
#if 0 |
913,8 → 899,8 |
return -EINVAL; |
done: |
if (intel_dig_port->base.base.crtc) { |
struct drm_crtc *crtc = intel_dig_port->base.base.crtc; |
if (intel_hdmi->base.base.crtc) { |
struct drm_crtc *crtc = intel_hdmi->base.base.crtc; |
intel_set_mode(crtc, &crtc->mode, |
crtc->x, crtc->y, crtc->fb); |
} |
929,6 → 915,12 |
kfree(connector); |
} |
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = { |
.mode_fixup = intel_hdmi_mode_fixup, |
.mode_set = intel_ddi_mode_set, |
.disable = intel_encoder_noop, |
}; |
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { |
.mode_fixup = intel_hdmi_mode_fixup, |
.mode_set = intel_hdmi_mode_set, |
960,24 → 952,43 |
intel_attach_broadcast_rgb_property(connector); |
} |
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, |
struct intel_connector *intel_connector) |
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port) |
{ |
struct drm_connector *connector = &intel_connector->base; |
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; |
struct intel_encoder *intel_encoder = &intel_dig_port->base; |
struct drm_device *dev = intel_encoder->base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
enum port port = intel_dig_port->port; |
struct drm_connector *connector; |
struct intel_encoder *intel_encoder; |
struct intel_connector *intel_connector; |
struct intel_hdmi *intel_hdmi; |
intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL); |
if (!intel_hdmi) |
return; |
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); |
if (!intel_connector) { |
kfree(intel_hdmi); |
return; |
} |
intel_encoder = &intel_hdmi->base; |
drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, |
DRM_MODE_ENCODER_TMDS); |
connector = &intel_connector->base; |
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, |
DRM_MODE_CONNECTOR_HDMIA); |
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); |
intel_encoder->type = INTEL_OUTPUT_HDMI; |
connector->polled = DRM_CONNECTOR_POLL_HPD; |
connector->interlace_allowed = 1; |
connector->doublescan_allowed = 0; |
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); |
intel_encoder->cloneable = false; |
intel_hdmi->ddi_port = port; |
switch (port) { |
case PORT_B: |
intel_hdmi->ddc_bus = GMBUS_PORT_DPB; |
997,6 → 1008,8 |
BUG(); |
} |
intel_hdmi->sdvox_reg = sdvox_reg; |
if (!HAS_PCH_SPLIT(dev)) { |
intel_hdmi->write_infoframe = g4x_write_infoframe; |
intel_hdmi->set_infoframes = g4x_set_infoframes; |
1014,11 → 1027,22 |
intel_hdmi->set_infoframes = cpt_set_infoframes; |
} |
if (IS_HASWELL(dev)) |
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; |
else |
if (IS_HASWELL(dev)) { |
intel_encoder->enable = intel_enable_ddi; |
intel_encoder->disable = intel_disable_ddi; |
intel_encoder->get_hw_state = intel_ddi_get_hw_state; |
drm_encoder_helper_add(&intel_encoder->base, |
&intel_hdmi_helper_funcs_hsw); |
} else { |
intel_encoder->enable = intel_enable_hdmi; |
intel_encoder->disable = intel_disable_hdmi; |
intel_encoder->get_hw_state = intel_hdmi_get_hw_state; |
drm_encoder_helper_add(&intel_encoder->base, |
&intel_hdmi_helper_funcs); |
} |
intel_connector->get_hw_state = intel_connector_get_hw_state; |
intel_hdmi_add_properties(intel_hdmi, connector); |
intel_connector_attach_encoder(intel_connector, intel_encoder); |
1033,42 → 1057,3 |
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); |
} |
} |
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port) |
{ |
struct intel_digital_port *intel_dig_port; |
struct intel_encoder *intel_encoder; |
struct drm_encoder *encoder; |
struct intel_connector *intel_connector; |
intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); |
if (!intel_dig_port) |
return; |
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); |
if (!intel_connector) { |
kfree(intel_dig_port); |
return; |
} |
intel_encoder = &intel_dig_port->base; |
encoder = &intel_encoder->base; |
drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, |
DRM_MODE_ENCODER_TMDS); |
drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); |
intel_encoder->enable = intel_enable_hdmi; |
intel_encoder->disable = intel_disable_hdmi; |
intel_encoder->get_hw_state = intel_hdmi_get_hw_state; |
intel_encoder->type = INTEL_OUTPUT_HDMI; |
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); |
intel_encoder->cloneable = false; |
intel_dig_port->port = port; |
intel_dig_port->hdmi.sdvox_reg = sdvox_reg; |
intel_dig_port->dp.output_reg = 0; |
intel_hdmi_init_connector(intel_dig_port, intel_connector); |
} |
/drivers/video/drm/i915/intel_lvds.c |
---|
40,30 → 40,28 |
//#include <linux/acpi.h> |
/* Private structure for the integrated LVDS support */ |
struct intel_lvds_connector { |
struct intel_connector base; |
struct intel_lvds { |
struct intel_encoder base; |
// struct notifier_block lid_notifier; |
}; |
struct edid *edid; |
struct intel_lvds_encoder { |
struct intel_encoder base; |
int fitting_mode; |
u32 pfit_control; |
u32 pfit_pgm_ratios; |
bool pfit_dirty; |
struct intel_lvds_connector *attached_connector; |
struct drm_display_mode *fixed_mode; |
}; |
static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder) |
static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder) |
{ |
return container_of(encoder, struct intel_lvds_encoder, base.base); |
return container_of(encoder, struct intel_lvds, base.base); |
} |
static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector) |
static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector) |
{ |
return container_of(connector, struct intel_lvds_connector, base.base); |
return container_of(intel_attached_encoder(connector), |
struct intel_lvds, base); |
} |
static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, |
98,7 → 96,7 |
static void intel_enable_lvds(struct intel_encoder *encoder) |
{ |
struct drm_device *dev = encoder->base.dev; |
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); |
struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base); |
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); |
struct drm_i915_private *dev_priv = dev->dev_private; |
u32 ctl_reg, lvds_reg, stat_reg; |
115,7 → 113,7 |
I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); |
if (lvds_encoder->pfit_dirty) { |
if (intel_lvds->pfit_dirty) { |
/* |
* Enable automatic panel scaling so that non-native modes |
* fill the screen. The panel fitter should only be |
123,12 → 121,12 |
* register description and PRM. |
*/ |
DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", |
lvds_encoder->pfit_control, |
lvds_encoder->pfit_pgm_ratios); |
intel_lvds->pfit_control, |
intel_lvds->pfit_pgm_ratios); |
I915_WRITE(PFIT_PGM_RATIOS, lvds_encoder->pfit_pgm_ratios); |
I915_WRITE(PFIT_CONTROL, lvds_encoder->pfit_control); |
lvds_encoder->pfit_dirty = false; |
I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); |
I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); |
intel_lvds->pfit_dirty = false; |
} |
I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); |
142,7 → 140,7 |
static void intel_disable_lvds(struct intel_encoder *encoder) |
{ |
struct drm_device *dev = encoder->base.dev; |
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); |
struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base); |
struct drm_i915_private *dev_priv = dev->dev_private; |
u32 ctl_reg, lvds_reg, stat_reg; |
162,9 → 160,9 |
if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000)) |
DRM_ERROR("timed out waiting for panel to power off\n"); |
if (lvds_encoder->pfit_control) { |
if (intel_lvds->pfit_control) { |
I915_WRITE(PFIT_CONTROL, 0); |
lvds_encoder->pfit_dirty = true; |
intel_lvds->pfit_dirty = true; |
} |
I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); |
174,8 → 172,8 |
static int intel_lvds_mode_valid(struct drm_connector *connector, |
struct drm_display_mode *mode) |
{ |
struct intel_connector *intel_connector = to_intel_connector(connector); |
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; |
struct intel_lvds *intel_lvds = intel_attached_lvds(connector); |
struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode; |
if (mode->hdisplay > fixed_mode->hdisplay) |
return MODE_PANEL; |
251,10 → 249,8 |
{ |
struct drm_device *dev = encoder->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); |
struct intel_connector *intel_connector = |
&lvds_encoder->attached_connector->base; |
struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc; |
struct intel_lvds *intel_lvds = to_intel_lvds(encoder); |
struct intel_crtc *intel_crtc = intel_lvds->base.new_crtc; |
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; |
int pipe; |
264,7 → 260,7 |
return false; |
} |
if (intel_encoder_check_is_cloned(&lvds_encoder->base)) |
if (intel_encoder_check_is_cloned(&intel_lvds->base)) |
return false; |
/* |
273,12 → 269,10 |
* with the panel scaling set up to source from the H/VDisplay |
* of the original mode. |
*/ |
intel_fixed_panel_mode(intel_connector->panel.fixed_mode, |
adjusted_mode); |
intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode); |
if (HAS_PCH_SPLIT(dev)) { |
intel_pch_panel_fitting(dev, |
intel_connector->panel.fitting_mode, |
intel_pch_panel_fitting(dev, intel_lvds->fitting_mode, |
mode, adjusted_mode); |
return true; |
} |
304,7 → 298,7 |
drm_mode_set_crtcinfo(adjusted_mode, 0); |
switch (intel_connector->panel.fitting_mode) { |
switch (intel_lvds->fitting_mode) { |
case DRM_MODE_SCALE_CENTER: |
/* |
* For centered modes, we have to calculate border widths & |
402,11 → 396,11 |
if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither) |
pfit_control |= PANEL_8TO6_DITHER_ENABLE; |
if (pfit_control != lvds_encoder->pfit_control || |
pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) { |
lvds_encoder->pfit_control = pfit_control; |
lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios; |
lvds_encoder->pfit_dirty = true; |
if (pfit_control != intel_lvds->pfit_control || |
pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) { |
intel_lvds->pfit_control = pfit_control; |
intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios; |
intel_lvds->pfit_dirty = true; |
} |
dev_priv->lvds_border_bits = border; |
455,15 → 449,14 |
*/ |
static int intel_lvds_get_modes(struct drm_connector *connector) |
{ |
struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector); |
struct intel_lvds *intel_lvds = intel_attached_lvds(connector); |
struct drm_device *dev = connector->dev; |
struct drm_display_mode *mode; |
/* use cached edid if we have one */ |
if (!IS_ERR_OR_NULL(lvds_connector->base.edid)) |
return drm_add_edid_modes(connector, lvds_connector->base.edid); |
if (intel_lvds->edid) |
return drm_add_edid_modes(connector, intel_lvds->edid); |
mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode); |
mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode); |
if (mode == NULL) |
return 0; |
504,11 → 497,10 |
static int intel_lid_notify(struct notifier_block *nb, unsigned long val, |
void *unused) |
{ |
struct intel_lvds_connector *lvds_connector = |
container_of(nb, struct intel_lvds_connector, lid_notifier); |
struct drm_connector *connector = &lvds_connector->base.base; |
struct drm_device *dev = connector->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct drm_i915_private *dev_priv = |
container_of(nb, struct drm_i915_private, lid_notifier); |
struct drm_device *dev = dev_priv->dev; |
struct drm_connector *connector = dev_priv->int_lvds_connector; |
if (dev->switch_power_state != DRM_SWITCH_POWER_ON) |
return NOTIFY_OK; |
517,7 → 509,9 |
* check and update the status of LVDS connector after receiving |
* the LID nofication event. |
*/ |
connector->status = connector->funcs->detect(connector, false); |
if (connector) |
connector->status = connector->funcs->detect(connector, |
false); |
/* Don't force modeset on machines where it causes a GPU lockup */ |
if (dmi_check_system(intel_no_modeset_on_lid)) |
533,7 → 527,7 |
dev_priv->modeset_on_lid = 0; |
mutex_lock(&dev->mode_config.mutex); |
intel_modeset_setup_hw_state(dev, true); |
intel_modeset_check_state(dev); |
mutex_unlock(&dev->mode_config.mutex); |
return NOTIFY_OK; |
549,16 → 543,13 |
*/ |
static void intel_lvds_destroy(struct drm_connector *connector) |
{ |
struct intel_lvds_connector *lvds_connector = |
to_lvds_connector(connector); |
struct drm_device *dev = connector->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
intel_panel_destroy_backlight(dev); |
if (!IS_ERR_OR_NULL(lvds_connector->base.edid)) |
kfree(lvds_connector->base.edid); |
intel_panel_destroy_backlight(connector->dev); |
intel_panel_fini(&lvds_connector->base.panel); |
// if (dev_priv->lid_notifier.notifier_call) |
// acpi_lid_notifier_unregister(&dev_priv->lid_notifier); |
drm_sysfs_connector_remove(connector); |
drm_connector_cleanup(connector); |
kfree(connector); |
568,11 → 559,11 |
struct drm_property *property, |
uint64_t value) |
{ |
struct intel_connector *intel_connector = to_intel_connector(connector); |
struct intel_lvds *intel_lvds = intel_attached_lvds(connector); |
struct drm_device *dev = connector->dev; |
if (property == dev->mode_config.scaling_mode_property) { |
struct drm_crtc *crtc; |
struct drm_crtc *crtc = intel_lvds->base.base.crtc; |
if (value == DRM_MODE_SCALE_NONE) { |
DRM_DEBUG_KMS("no scaling not supported\n"); |
579,13 → 570,11 |
return -EINVAL; |
} |
if (intel_connector->panel.fitting_mode == value) { |
if (intel_lvds->fitting_mode == value) { |
/* the LVDS scaling property is not changed */ |
return 0; |
} |
intel_connector->panel.fitting_mode = value; |
crtc = intel_attached_encoder(connector)->base.crtc; |
intel_lvds->fitting_mode = value; |
if (crtc && crtc->enabled) { |
/* |
* If the CRTC is enabled, the display will be changed |
776,6 → 765,14 |
}, |
{ |
.callback = intel_no_lvds_dmi_callback, |
.ident = "ZOTAC ZBOXSD-ID12/ID13", |
.matches = { |
DMI_MATCH(DMI_BOARD_VENDOR, "ZOTAC"), |
DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"), |
}, |
}, |
{ |
.callback = intel_no_lvds_dmi_callback, |
.ident = "Gigabyte GA-D525TUD", |
.matches = { |
DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), |
917,15 → 914,12 |
bool intel_lvds_init(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_lvds_encoder *lvds_encoder; |
struct intel_lvds *intel_lvds; |
struct intel_encoder *intel_encoder; |
struct intel_lvds_connector *lvds_connector; |
struct intel_connector *intel_connector; |
struct drm_connector *connector; |
struct drm_encoder *encoder; |
struct drm_display_mode *scan; /* *modes, *bios_mode; */ |
struct drm_display_mode *fixed_mode = NULL; |
struct edid *edid; |
struct drm_crtc *crtc; |
u32 lvds; |
int pipe; |
953,25 → 947,23 |
} |
} |
lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL); |
if (!lvds_encoder) |
intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL); |
if (!intel_lvds) { |
return false; |
} |
lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL); |
if (!lvds_connector) { |
kfree(lvds_encoder); |
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); |
if (!intel_connector) { |
kfree(intel_lvds); |
return false; |
} |
lvds_encoder->attached_connector = lvds_connector; |
if (!HAS_PCH_SPLIT(dev)) { |
lvds_encoder->pfit_control = I915_READ(PFIT_CONTROL); |
intel_lvds->pfit_control = I915_READ(PFIT_CONTROL); |
} |
intel_encoder = &lvds_encoder->base; |
intel_encoder = &intel_lvds->base; |
encoder = &intel_encoder->base; |
intel_connector = &lvds_connector->base; |
connector = &intel_connector->base; |
drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs, |
DRM_MODE_CONNECTOR_LVDS); |
1003,10 → 995,14 |
/* create the scaling mode property */ |
drm_mode_create_scaling_mode_property(dev); |
drm_object_attach_property(&connector->base, |
/* |
* the initial panel fitting mode will be FULL_SCREEN. |
*/ |
drm_connector_attach_property(&intel_connector->base, |
dev->mode_config.scaling_mode_property, |
DRM_MODE_SCALE_ASPECT); |
intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT; |
intel_lvds->fitting_mode = DRM_MODE_SCALE_ASPECT; |
/* |
* LVDS discovery: |
* 1) check for EDID on DDC |
1021,21 → 1017,20 |
* Attempt to get the fixed panel mode from DDC. Assume that the |
* preferred mode is the right one. |
*/ |
edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin)); |
if (edid) { |
if (drm_add_edid_modes(connector, edid)) { |
intel_lvds->edid = drm_get_edid(connector, |
intel_gmbus_get_adapter(dev_priv, |
pin)); |
if (intel_lvds->edid) { |
if (drm_add_edid_modes(connector, |
intel_lvds->edid)) { |
drm_mode_connector_update_edid_property(connector, |
edid); |
intel_lvds->edid); |
} else { |
kfree(edid); |
edid = ERR_PTR(-EINVAL); |
kfree(intel_lvds->edid); |
intel_lvds->edid = NULL; |
} |
} else { |
edid = ERR_PTR(-ENOENT); |
} |
lvds_connector->base.edid = edid; |
if (IS_ERR_OR_NULL(edid)) { |
if (!intel_lvds->edid) { |
/* Didn't get an EDID, so |
* Set wide sync ranges so we get all modes |
* handed to valid_mode for checking |
1048,26 → 1043,22 |
list_for_each_entry(scan, &connector->probed_modes, head) { |
if (scan->type & DRM_MODE_TYPE_PREFERRED) { |
DRM_DEBUG_KMS("using preferred mode from EDID: "); |
drm_mode_debug_printmodeline(scan); |
fixed_mode = drm_mode_duplicate(dev, scan); |
if (fixed_mode) { |
intel_find_lvds_downclock(dev, fixed_mode, |
intel_lvds->fixed_mode = |
drm_mode_duplicate(dev, scan); |
intel_find_lvds_downclock(dev, |
intel_lvds->fixed_mode, |
connector); |
goto out; |
} |
} |
} |
/* Failed to get EDID, what about VBT? */ |
if (dev_priv->lfp_lvds_vbt_mode) { |
DRM_DEBUG_KMS("using mode from VBT: "); |
drm_mode_debug_printmodeline(dev_priv->lfp_lvds_vbt_mode); |
fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); |
if (fixed_mode) { |
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; |
intel_lvds->fixed_mode = |
drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); |
if (intel_lvds->fixed_mode) { |
intel_lvds->fixed_mode->type |= |
DRM_MODE_TYPE_PREFERRED; |
goto out; |
} |
} |
1087,17 → 1078,16 |
crtc = intel_get_crtc_for_pipe(dev, pipe); |
if (crtc && (lvds & LVDS_PORT_EN)) { |
fixed_mode = intel_crtc_mode_get(dev, crtc); |
if (fixed_mode) { |
DRM_DEBUG_KMS("using current (BIOS) mode: "); |
drm_mode_debug_printmodeline(fixed_mode); |
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; |
intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc); |
if (intel_lvds->fixed_mode) { |
intel_lvds->fixed_mode->type |= |
DRM_MODE_TYPE_PREFERRED; |
goto out; |
} |
} |
/* If we still don't have a mode after all that, give up. */ |
if (!fixed_mode) |
if (!intel_lvds->fixed_mode) |
goto failed; |
out: |
1117,10 → 1107,11 |
// DRM_DEBUG_KMS("lid notifier registration failed\n"); |
// dev_priv->lid_notifier.notifier_call = NULL; |
// } |
/* keep the LVDS connector */ |
dev_priv->int_lvds_connector = connector; |
drm_sysfs_connector_add(connector); |
intel_panel_init(&intel_connector->panel, fixed_mode); |
intel_panel_setup_backlight(connector); |
intel_panel_setup_backlight(dev); |
return true; |
1128,9 → 1119,7 |
DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); |
drm_connector_cleanup(connector); |
drm_encoder_cleanup(encoder); |
if (fixed_mode) |
drm_mode_destroy(dev, fixed_mode); |
kfree(lvds_encoder); |
kfree(lvds_connector); |
kfree(intel_lvds); |
kfree(intel_connector); |
return false; |
} |
/drivers/video/drm/i915/intel_modes.c |
---|
45,6 → 45,7 |
drm_mode_connector_update_edid_property(connector, edid); |
ret = drm_add_edid_modes(connector, edid); |
drm_edid_to_eld(connector, edid); |
kfree(edid); |
return ret; |
} |
60,16 → 61,12 |
struct i2c_adapter *adapter) |
{ |
struct edid *edid; |
int ret; |
edid = drm_get_edid(connector, adapter); |
if (!edid) |
return 0; |
ret = intel_connector_update_modes(connector, edid); |
kfree(edid); |
return ret; |
return intel_connector_update_modes(connector, edid); |
} |
static const struct drm_prop_enum_list force_audio_names[] = { |
97,7 → 94,7 |
dev_priv->force_audio_property = prop; |
} |
drm_object_attach_property(&connector->base, prop, 0); |
drm_connector_attach_property(connector, prop, 0); |
#endif |
} |
125,6 → 122,6 |
dev_priv->broadcast_rgb_property = prop; |
} |
drm_object_attach_property(&connector->base, prop, 0); |
drm_connector_attach_property(connector, prop, 0); |
#endif |
} |
/drivers/video/drm/i915/intel_ringbuffer.h |
---|
1,17 → 1,6 |
#ifndef _INTEL_RINGBUFFER_H_ |
#define _INTEL_RINGBUFFER_H_ |
/* |
* Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" |
* Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use" |
* Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use" |
* |
* "If the Ring Buffer Head Pointer and the Tail Pointer are on the same |
* cacheline, the Head Pointer must not be greater than the Tail |
* Pointer." |
*/ |
#define I915_RING_FREE_SPACE 64 |
struct intel_hw_status_page { |
u32 *page_addr; |
unsigned int gfx_addr; |
81,7 → 70,8 |
int __must_check (*flush)(struct intel_ring_buffer *ring, |
u32 invalidate_domains, |
u32 flush_domains); |
int (*add_request)(struct intel_ring_buffer *ring); |
int (*add_request)(struct intel_ring_buffer *ring, |
u32 *seqno); |
/* Some chipsets are not quite as coherent as advertised and need |
* an expensive kick to force a true read of the up-to-date seqno. |
* However, the up-to-date seqno is not always required and the last |
91,10 → 81,7 |
u32 (*get_seqno)(struct intel_ring_buffer *ring, |
bool lazy_coherency); |
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, |
u32 offset, u32 length, |
unsigned flags); |
#define I915_DISPATCH_SECURE 0x1 |
#define I915_DISPATCH_PINNED 0x2 |
u32 offset, u32 length); |
void (*cleanup)(struct intel_ring_buffer *ring); |
int (*sync_to)(struct intel_ring_buffer *ring, |
struct intel_ring_buffer *to, |
194,12 → 181,17 |
* The area from dword 0x20 to 0x3ff is available for driver usage. |
*/ |
#define I915_GEM_HWS_INDEX 0x20 |
#define I915_GEM_HWS_SCRATCH_INDEX 0x30 |
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) |
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); |
int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n); |
static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring) |
{ |
return intel_wait_ring_buffer(ring, ring->size - 8); |
} |
int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); |
static inline void intel_ring_emit(struct intel_ring_buffer *ring, |
u32 data) |
{ |
206,9 → 198,10 |
iowrite32(data, ring->virtual_start + ring->tail); |
ring->tail += 4; |
} |
void intel_ring_advance(struct intel_ring_buffer *ring); |
int __must_check intel_ring_idle(struct intel_ring_buffer *ring); |
u32 intel_ring_get_seqno(struct intel_ring_buffer *ring); |
int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); |
int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring); |
224,12 → 217,6 |
return ring->tail; |
} |
static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring) |
{ |
BUG_ON(ring->outstanding_lazy_request == 0); |
return ring->outstanding_lazy_request; |
} |
static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) |
{ |
if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) |
/drivers/video/drm/i915/intel_sprite.c |
---|
48,8 → 48,7 |
struct intel_plane *intel_plane = to_intel_plane(plane); |
int pipe = intel_plane->pipe; |
u32 sprctl, sprscale = 0; |
unsigned long sprsurf_offset, linear_offset; |
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); |
int pixel_size; |
sprctl = I915_READ(SPRCTL(pipe)); |
62,24 → 61,33 |
switch (fb->pixel_format) { |
case DRM_FORMAT_XBGR8888: |
sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX; |
pixel_size = 4; |
break; |
case DRM_FORMAT_XRGB8888: |
sprctl |= SPRITE_FORMAT_RGBX888; |
pixel_size = 4; |
break; |
case DRM_FORMAT_YUYV: |
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV; |
pixel_size = 2; |
break; |
case DRM_FORMAT_YVYU: |
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU; |
pixel_size = 2; |
break; |
case DRM_FORMAT_UYVY: |
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY; |
pixel_size = 2; |
break; |
case DRM_FORMAT_VYUY: |
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY; |
pixel_size = 2; |
break; |
default: |
BUG(); |
DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n"); |
sprctl |= SPRITE_FORMAT_RGBX888; |
pixel_size = 4; |
break; |
} |
if (obj->tiling_mode != I915_TILING_NONE) |
119,27 → 127,18 |
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); |
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x); |
linear_offset = y * fb->pitches[0] + x * pixel_size; |
sprsurf_offset = |
intel_gen4_compute_offset_xtiled(&x, &y, |
pixel_size, fb->pitches[0]); |
linear_offset -= sprsurf_offset; |
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET |
* register */ |
if (IS_HASWELL(dev)) |
I915_WRITE(SPROFFSET(pipe), (y << 16) | x); |
else if (obj->tiling_mode != I915_TILING_NONE) |
if (obj->tiling_mode != I915_TILING_NONE) { |
I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x); |
else |
I915_WRITE(SPRLINOFF(pipe), linear_offset); |
} else { |
unsigned long offset; |
offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); |
I915_WRITE(SPRLINOFF(pipe), offset); |
} |
I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w); |
if (intel_plane->can_scale) |
I915_WRITE(SPRSCALE(pipe), sprscale); |
I915_WRITE(SPRCTL(pipe), sprctl); |
I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset); |
I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset); |
POSTING_READ(SPRSURF(pipe)); |
} |
153,7 → 152,6 |
I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE); |
/* Can't leave the scaler enabled... */ |
if (intel_plane->can_scale) |
I915_WRITE(SPRSCALE(pipe), 0); |
/* Activate double buffered register update */ |
I915_MODIFY_DISPBASE(SPRSURF(pipe), 0); |
227,10 → 225,8 |
struct drm_device *dev = plane->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_plane *intel_plane = to_intel_plane(plane); |
int pipe = intel_plane->pipe; |
unsigned long dvssurf_offset, linear_offset; |
int pipe = intel_plane->pipe, pixel_size; |
u32 dvscntr, dvsscale; |
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); |
dvscntr = I915_READ(DVSCNTR(pipe)); |
243,24 → 239,33 |
switch (fb->pixel_format) { |
case DRM_FORMAT_XBGR8888: |
dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR; |
pixel_size = 4; |
break; |
case DRM_FORMAT_XRGB8888: |
dvscntr |= DVS_FORMAT_RGBX888; |
pixel_size = 4; |
break; |
case DRM_FORMAT_YUYV: |
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV; |
pixel_size = 2; |
break; |
case DRM_FORMAT_YVYU: |
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU; |
pixel_size = 2; |
break; |
case DRM_FORMAT_UYVY: |
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY; |
pixel_size = 2; |
break; |
case DRM_FORMAT_VYUY: |
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY; |
pixel_size = 2; |
break; |
default: |
BUG(); |
DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n"); |
dvscntr |= DVS_FORMAT_RGBX888; |
pixel_size = 4; |
break; |
} |
if (obj->tiling_mode != I915_TILING_NONE) |
284,22 → 289,18 |
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); |
I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x); |
linear_offset = y * fb->pitches[0] + x * pixel_size; |
dvssurf_offset = |
intel_gen4_compute_offset_xtiled(&x, &y, |
pixel_size, fb->pitches[0]); |
linear_offset -= dvssurf_offset; |
if (obj->tiling_mode != I915_TILING_NONE) |
if (obj->tiling_mode != I915_TILING_NONE) { |
I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x); |
else |
I915_WRITE(DVSLINOFF(pipe), linear_offset); |
} else { |
unsigned long offset; |
offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); |
I915_WRITE(DVSLINOFF(pipe), offset); |
} |
I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w); |
I915_WRITE(DVSSCALE(pipe), dvsscale); |
I915_WRITE(DVSCNTR(pipe), dvscntr); |
I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset); |
I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset); |
POSTING_READ(DVSSURF(pipe)); |
} |
421,8 → 422,6 |
struct intel_framebuffer *intel_fb; |
struct drm_i915_gem_object *obj, *old_obj; |
int pipe = intel_plane->pipe; |
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
pipe); |
int ret = 0; |
int x = src_x >> 16, y = src_y >> 16; |
int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay; |
437,7 → 436,7 |
src_h = src_h >> 16; |
/* Pipe must be running... */ |
if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE)) |
if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE)) |
return -EINVAL; |
if (crtc_x >= primary_w || crtc_y >= primary_h) |
447,15 → 446,6 |
if (intel_plane->pipe != intel_crtc->pipe) |
return -EINVAL; |
/* Sprite planes can be linear or x-tiled surfaces */ |
switch (obj->tiling_mode) { |
case I915_TILING_NONE: |
case I915_TILING_X: |
break; |
default: |
return -EINVAL; |
} |
/* |
* Clamp the width & height into the visible area. Note we don't |
* try to scale the source if part of the visible region is offscreen. |
483,12 → 473,6 |
goto out; |
/* |
* We may not have a scaler, eg. HSW does not have it any more |
*/ |
if (!intel_plane->can_scale && (crtc_w != src_w || crtc_h != src_h)) |
return -EINVAL; |
/* |
* We can take a larger source and scale it down, but |
* only so much... 16x is the max on SNB. |
*/ |
586,6 → 570,8 |
struct intel_plane *intel_plane; |
int ret = 0; |
// if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
// return -ENODEV; |
/* Make sure we don't try to enable both src & dest simultaneously */ |
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) |
617,6 → 603,8 |
struct intel_plane *intel_plane; |
int ret = 0; |
// if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
// return -ENODEV; |
mutex_lock(&dev->mode_config.mutex); |
677,7 → 665,6 |
switch (INTEL_INFO(dev)->gen) { |
case 5: |
case 6: |
intel_plane->can_scale = true; |
intel_plane->max_downscale = 16; |
intel_plane->update_plane = ilk_update_plane; |
intel_plane->disable_plane = ilk_disable_plane; |
694,10 → 681,6 |
break; |
case 7: |
if (IS_HASWELL(dev) || IS_VALLEYVIEW(dev)) |
intel_plane->can_scale = false; |
else |
intel_plane->can_scale = true; |
intel_plane->max_downscale = 2; |
intel_plane->update_plane = ivb_update_plane; |
intel_plane->disable_plane = ivb_disable_plane; |