Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 3745 → Rev 3746

/drivers/video/drm/i915/i915_irq.c
26,7 → 26,7
*
*/
 
#define pr_fmt(fmt) ": " fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
#include <linux/slab.h>
#include <drm/drmP.h>
35,7 → 35,61
#include "i915_trace.h"
#include "intel_drv.h"
 
static const u32 hpd_ibx[] = {
[HPD_CRT] = SDE_CRT_HOTPLUG,
[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
[HPD_PORT_D] = SDE_PORTD_HOTPLUG
};
 
static const u32 hpd_cpt[] = {
[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
};
 
static const u32 hpd_mask_i915[] = {
[HPD_CRT] = CRT_HOTPLUG_INT_EN,
[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
};
 
static const u32 hpd_status_gen4[] = {
[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};
 
static const u32 hpd_status_i965[] = {
[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I965,
[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I965,
[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};
 
static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};
 
static void ibx_hpd_irq_setup(struct drm_device *dev);
static void i915_hpd_irq_setup(struct drm_device *dev);
 
#define pr_err(fmt, ...) \
printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
 
58,7 → 112,7
}
}
 
static inline void
static void
ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->irq_mask & mask) != mask) {
71,27 → 125,31
void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
{
if ((dev_priv->pipestat[pipe] & mask) != mask) {
u32 reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & 0x7fff0000;
 
dev_priv->pipestat[pipe] |= mask;
if ((pipestat & mask) == mask)
return;
 
/* Enable the interrupt, clear any pending status */
I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
pipestat |= mask | (mask >> 16);
I915_WRITE(reg, pipestat);
POSTING_READ(reg);
}
}
 
void
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
{
if ((dev_priv->pipestat[pipe] & mask) != 0) {
u32 reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & 0x7fff0000;
 
dev_priv->pipestat[pipe] &= ~mask;
I915_WRITE(reg, dev_priv->pipestat[pipe]);
if ((pipestat & mask) == 0)
return;
 
pipestat &= ~mask;
I915_WRITE(reg, pipestat);
POSTING_READ(reg);
}
}
 
#if 0
/**
190,9 → 248,109
return I915_READ(reg);
}
 
static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
int *vpos, int *hpos)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 vbl = 0, position = 0;
int vbl_start, vbl_end, htotal, vtotal;
bool in_vbl = true;
int ret = 0;
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
 
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
"pipe %c\n", pipe_name(pipe));
return 0;
}
 
/* Get vtotal. */
vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
 
if (INTEL_INFO(dev)->gen >= 4) {
/* No obvious pixelcount register. Only query vertical
* scanout position from Display scan line register.
*/
position = I915_READ(PIPEDSL(pipe));
 
/* Decode into vertical scanout position. Don't have
* horizontal scanout position.
*/
*vpos = position & 0x1fff;
*hpos = 0;
} else {
/* Have access to pixelcount since start of frame.
* We can split this into vertical and horizontal
* scanout position.
*/
position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
 
htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
*vpos = position / htotal;
*hpos = position - (*vpos * htotal);
}
 
/* Query vblank area. */
vbl = I915_READ(VBLANK(cpu_transcoder));
 
/* Test position against vblank region. */
vbl_start = vbl & 0x1fff;
vbl_end = (vbl >> 16) & 0x1fff;
 
if ((*vpos < vbl_start) || (*vpos > vbl_end))
in_vbl = false;
 
/* Inside "upper part" of vblank area? Apply corrective offset: */
if (in_vbl && (*vpos >= vbl_start))
*vpos = *vpos - vtotal;
 
/* Readouts valid? */
if (vbl > 0)
ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
 
/* In vblank? */
if (in_vbl)
ret |= DRM_SCANOUTPOS_INVBL;
 
return ret;
}
 
static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
int *max_error,
struct timeval *vblank_time,
unsigned flags)
{
struct drm_crtc *crtc;
 
if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
DRM_ERROR("Invalid crtc %d\n", pipe);
return -EINVAL;
}
 
/* Get drm_crtc to timestamp: */
crtc = intel_get_crtc_for_pipe(dev, pipe);
if (crtc == NULL) {
DRM_ERROR("Invalid crtc %d\n", pipe);
return -EINVAL;
}
 
if (!crtc->enabled) {
DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
return -EBUSY;
}
 
/* Helper routine in DRM core does all the work: */
return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
vblank_time, flags,
crtc);
}
 
/*
* Handle hotplug events outside the interrupt handler proper.
*/
#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
 
static void i915_hotplug_work_func(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
199,7 → 357,11
hotplug_work);
struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
struct intel_connector *intel_connector;
struct intel_encoder *intel_encoder;
struct drm_connector *connector;
unsigned long irqflags;
bool hpd_disabled = false;
 
/* HPD irq before everything is fully set up. */
if (!dev_priv->enable_hotplug_processing)
208,10 → 370,37
mutex_lock(&mode_config->mutex);
DRM_DEBUG_KMS("running encoder hotplug functions\n");
 
list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
if (encoder->hot_plug)
encoder->hot_plug(encoder);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
list_for_each_entry(connector, &mode_config->connector_list, head) {
intel_connector = to_intel_connector(connector);
intel_encoder = intel_connector->encoder;
if (intel_encoder->hpd_pin > HPD_NONE &&
dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
connector->polled == DRM_CONNECTOR_POLL_HPD) {
DRM_INFO("HPD interrupt storm detected on connector %s: "
"switching from hotplug detection to polling\n",
drm_get_connector_name(connector));
dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
connector->polled = DRM_CONNECTOR_POLL_CONNECT
| DRM_CONNECTOR_POLL_DISCONNECT;
hpd_disabled = true;
}
}
/* if there were no outputs to poll, poll was disabled,
* therefore make sure it's enabled when disabling HPD on
* some connectors */
if (hpd_disabled) {
drm_kms_helper_poll_enable(dev);
// mod_timer(&dev_priv->hotplug_reenable_timer,
// jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
}
 
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
if (intel_encoder->hot_plug)
intel_encoder->hot_plug(intel_encoder);
 
mutex_unlock(&mode_config->mutex);
 
/* Just fire off a uevent and let userspace tell us what to do */
218,6 → 407,46
drm_helper_hpd_irq_event(dev);
}
 
static void ironlake_handle_rps_change(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 busy_up, busy_down, max_avg, min_avg;
u8 new_delay;
unsigned long flags;
 
spin_lock_irqsave(&mchdev_lock, flags);
 
I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
 
new_delay = dev_priv->ips.cur_delay;
 
I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
busy_up = I915_READ(RCPREVBSYTUPAVG);
busy_down = I915_READ(RCPREVBSYTDNAVG);
max_avg = I915_READ(RCBMAXAVG);
min_avg = I915_READ(RCBMINAVG);
 
/* Handle RCS change request from hw */
if (busy_up > max_avg) {
if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
new_delay = dev_priv->ips.cur_delay - 1;
if (new_delay < dev_priv->ips.max_delay)
new_delay = dev_priv->ips.max_delay;
} else if (busy_down < min_avg) {
if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
new_delay = dev_priv->ips.cur_delay + 1;
if (new_delay > dev_priv->ips.min_delay)
new_delay = dev_priv->ips.min_delay;
}
 
if (ironlake_set_drps(dev, new_delay))
dev_priv->ips.cur_delay = new_delay;
 
spin_unlock_irqrestore(&mchdev_lock, flags);
 
return;
}
 
static void notify_ring(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
359,7 → 588,6
struct drm_i915_private *dev_priv,
u32 gt_iir)
{
// printf("%s\n", __FUNCTION__);
 
if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
404,6 → 632,45
// queue_work(dev_priv->wq, &dev_priv->rps.work);
}
 
#define HPD_STORM_DETECT_PERIOD 1000
#define HPD_STORM_THRESHOLD 5
 
static inline bool hotplug_irq_storm_detect(struct drm_device *dev,
u32 hotplug_trigger,
const u32 *hpd)
{
drm_i915_private_t *dev_priv = dev->dev_private;
unsigned long irqflags;
int i;
bool ret = false;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
for (i = 1; i < HPD_NUM_PINS; i++) {
 
if (!(hpd[i] & hotplug_trigger) ||
dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
continue;
 
// if (!time_in_range(GetTimerTicks(), dev_priv->hpd_stats[i].hpd_last_jiffies,
// dev_priv->hpd_stats[i].hpd_last_jiffies
// + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
// dev_priv->hpd_stats[i].hpd_last_jiffies = GetTimerTicks;
// dev_priv->hpd_stats[i].hpd_cnt = 0;
// } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
// dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
// DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
// ret = true;
// } else {
dev_priv->hpd_stats[i].hpd_cnt++;
// }
}
 
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
return ret;
}
 
static void gmbus_irq_handler(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
474,13 → 741,16
/* Consume port. Then clear IIR or we'll miss events */
if (iir & I915_DISPLAY_PORT_INTERRUPT) {
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
 
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
hotplug_status);
if (hotplug_status & dev_priv->hotplug_supported_mask)
if (hotplug_trigger) {
if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
i915_hpd_irq_setup(dev);
queue_work(dev_priv->wq,
&dev_priv->hotplug_work);
 
}
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
I915_READ(PORT_HOTPLUG_STAT);
}
504,10 → 774,13
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
 
if (pch_iir & SDE_HOTPLUG_MASK)
if (hotplug_trigger) {
if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_ibx))
ibx_hpd_irq_setup(dev);
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
 
}
if (pch_iir & SDE_AUDIO_POWER_MASK)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
(pch_iir & SDE_AUDIO_POWER_MASK) >>
550,10 → 823,13
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
 
if (pch_iir & SDE_HOTPLUG_MASK_CPT)
if (hotplug_trigger) {
if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_cpt))
ibx_hpd_irq_setup(dev);
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
 
}
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
(pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
582,7 → 858,7
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
int i;
 
597,9 → 873,11
* able to process them after we restore SDEIER (as soon as we restore
* it, we'll get an interrupt if SDEIIR still has something to process
* due to its back queue). */
if (!HAS_PCH_NOP(dev)) {
sde_ier = I915_READ(SDEIER);
I915_WRITE(SDEIER, 0);
POSTING_READ(SDEIER);
}
 
gt_iir = I915_READ(GTIIR);
if (gt_iir) {
626,7 → 904,7
}
#endif
/* check event from PCH */
if (de_iir & DE_PCH_EVENT_IVB) {
if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
u32 pch_iir = I915_READ(SDEIIR);
 
cpt_irq_handler(dev, pch_iir);
649,8 → 927,10
 
I915_WRITE(DEIER, de_ier);
POSTING_READ(DEIER);
if (!HAS_PCH_NOP(dev)) {
I915_WRITE(SDEIER, sde_ier);
POSTING_READ(SDEIER);
}
 
return ret;
}
792,24 → 1072,23
 
#ifdef CONFIG_DEBUG_FS
static struct drm_i915_error_object *
i915_error_object_create(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *src)
i915_error_object_create_sized(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *src,
const int num_pages)
{
struct drm_i915_error_object *dst;
int i, count;
int i;
u32 reloc_offset;
 
if (src == NULL || src->pages == NULL)
return NULL;
 
count = src->base.size / PAGE_SIZE;
 
dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC);
dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
if (dst == NULL)
return NULL;
 
reloc_offset = src->gtt_offset;
for (i = 0; i < count; i++) {
for (i = 0; i < num_pages; i++) {
unsigned long flags;
void *d;
 
859,7 → 1138,7
 
reloc_offset += PAGE_SIZE;
}
dst->page_count = count;
dst->page_count = num_pages;
dst->gtt_offset = src->gtt_offset;
 
return dst;
870,6 → 1149,9
kfree(dst);
return NULL;
}
#define i915_error_object_create(dev_priv, src) \
i915_error_object_create_sized((dev_priv), (src), \
(src)->base.size>>PAGE_SHIFT)
 
static void
i915_error_object_free(struct drm_i915_error_object *obj)
968,7 → 1250,7
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
for (i = 0; i < 16; i++)
for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
break;
case 5:
1076,6 → 1358,26
error->cpu_ring_tail[ring->id] = ring->tail;
}
 
 
static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
struct drm_i915_error_state *error,
struct drm_i915_error_ring *ering)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_object *obj;
 
/* Currently render ring is the only HW context user */
if (ring->id != RCS || !error->ccid)
return;
 
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
ering->ctx = i915_error_object_create_sized(dev_priv,
obj, 1);
}
}
}
 
static void i915_gem_record_rings(struct drm_device *dev,
struct drm_i915_error_state *error)
{
1093,6 → 1395,9
error->ring[i].ringbuffer =
i915_error_object_create(dev_priv, ring->obj);
 
 
i915_gem_record_active_context(ring, error, &error->ring[i]);
 
count = 0;
list_for_each_entry(request, &ring->request_list, list)
count++;
1155,6 → 1460,7
kref_init(&error->ref);
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
if (HAS_HW_CONTEXTS(dev))
error->ccid = I915_READ(CCID);
 
if (HAS_PCH_SPLIT(dev))
1176,6 → 1482,7
else if (INTEL_INFO(dev)->gen == 6)
error->forcewake = I915_READ(FORCEWAKE);
 
if (!HAS_PCH_SPLIT(dev))
for_each_pipe(pipe)
error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
 
1390,7 → 1697,7
#if 0
 
 
static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1605,9 → 1912,18
I915_WRITE(GTIER, 0x0);
POSTING_READ(GTIER);
 
if (HAS_PCH_NOP(dev))
return;
 
/* south display irq */
I915_WRITE(SDEIMR, 0xffffffff);
I915_WRITE(SDEIER, 0x0);
/*
* SDEIER is also touched by the interrupt handler to work around missed
* PCH interrupts. Hence we can't update it after the interrupt handler
* is enabled - instead we unconditionally enable all PCH interrupt
* sources here, but then only unmask them as needed with SDEIMR.
*/
I915_WRITE(SDEIER, 0xffffffff);
POSTING_READ(SDEIER);
}
 
1643,6 → 1959,28
POSTING_READ(VLV_IER);
}
 
static void ibx_hpd_irq_setup(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *intel_encoder;
u32 mask = ~I915_READ(SDEIMR);
u32 hotplug;
 
if (HAS_PCH_IBX(dev)) {
mask &= ~SDE_HOTPLUG_MASK;
list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
mask |= hpd_ibx[intel_encoder->hpd_pin];
} else {
mask &= ~SDE_HOTPLUG_MASK_CPT;
list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
mask |= hpd_cpt[intel_encoder->hpd_pin];
}
 
I915_WRITE(SDEIMR, ~mask);
 
/*
* Enable digital hotplug on the PCH, and configure the DP short pulse
* duration to 2ms (which is the minimum in the Display Port spec)
1649,12 → 1987,6
*
* This register is the same on all known PCH chips.
*/
 
static void ibx_enable_hotplug(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 hotplug;
 
hotplug = I915_READ(PCH_PORT_HOTPLUG);
hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
1669,20 → 2001,15
u32 mask;
 
if (HAS_PCH_IBX(dev))
mask = SDE_HOTPLUG_MASK |
SDE_GMBUS |
SDE_AUX_MASK;
mask = SDE_GMBUS | SDE_AUX_MASK;
else
mask = SDE_HOTPLUG_MASK_CPT |
SDE_GMBUS_CPT |
SDE_AUX_MASK_CPT;
mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
 
if (HAS_PCH_NOP(dev))
return;
 
I915_WRITE(SDEIIR, I915_READ(SDEIIR));
I915_WRITE(SDEIMR, ~mask);
I915_WRITE(SDEIER, mask);
POSTING_READ(SDEIER);
 
ibx_enable_hotplug(dev);
}
 
static int ironlake_irq_postinstall(struct drm_device *dev)
1793,9 → 2120,6
I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
 
dev_priv->pipestat[0] = 0;
dev_priv->pipestat[1] = 0;
 
/* Hack for broken MSIs on VLV */
// pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
// pci_read_config_word(dev->pdev, 0x98, &msid);
1839,30 → 2163,6
return 0;
}
 
static void valleyview_hpd_irq_setup(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
 
/* Note HDMI and DP share bits */
if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
hotplug_en |= PORTB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
hotplug_en |= PORTC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
hotplug_en |= PORTD_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
hotplug_en |= CRT_HOTPLUG_INT_EN;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
}
 
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
}
 
static void valleyview_irq_uninstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1902,6 → 2202,9
I915_WRITE(GTIER, 0x0);
I915_WRITE(GTIIR, I915_READ(GTIIR));
 
if (HAS_PCH_NOP(dev))
return;
 
I915_WRITE(SDEIMR, 0xffffffff);
I915_WRITE(SDEIER, 0x0);
I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1927,9 → 2230,6
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
dev_priv->pipestat[0] = 0;
dev_priv->pipestat[1] = 0;
 
I915_WRITE16(EMR,
~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
 
1952,6 → 2252,37
return 0;
}
 
/*
* Returns true when a page flip has completed.
*/
static bool i8xx_handle_vblank(struct drm_device *dev,
int pipe, u16 iir)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
 
// if (!drm_handle_vblank(dev, pipe))
return false;
 
if ((iir & flip_pending) == 0)
return false;
 
// intel_prepare_page_flip(dev, pipe);
 
/* We detect FlipDone by looking for the change in PendingFlip from '1'
* to '0' on the following vblank, i.e. IIR has the Pendingflip
* asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
* the flip is completed (no longer pending). Since this doesn't raise
* an interrupt per se, we watch for the change at vblank.
*/
if (I915_READ16(ISR) & flip_pending)
return false;
 
intel_finish_page_flip(dev, pipe);
 
return true;
}
 
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
2007,22 → 2338,12
notify_ring(dev, &dev_priv->ring[RCS]);
 
if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
drm_handle_vblank(dev, 0)) {
if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
intel_prepare_page_flip(dev, 0);
intel_finish_page_flip(dev, 0);
flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
}
}
i8xx_handle_vblank(dev, 0, iir))
flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
 
if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
drm_handle_vblank(dev, 1)) {
if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
intel_prepare_page_flip(dev, 1);
intel_finish_page_flip(dev, 1);
flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
}
}
i8xx_handle_vblank(dev, 1, iir))
flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
 
iir = new_iir;
}
2072,9 → 2393,6
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 enable_mask;
 
dev_priv->pipestat[0] = 0;
dev_priv->pipestat[1] = 0;
 
I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
 
/* Unmask the interrupts that we always want on. */
2112,34 → 2430,36
return 0;
}
 
static void i915_hpd_irq_setup(struct drm_device *dev)
/*
* Returns true when a page flip has completed.
*/
static bool i915_handle_vblank(struct drm_device *dev,
int plane, int pipe, u32 iir)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 hotplug_en;
drm_i915_private_t *dev_priv = dev->dev_private;
u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
 
if (I915_HAS_HOTPLUG(dev)) {
hotplug_en = I915_READ(PORT_HOTPLUG_EN);
// if (!drm_handle_vblank(dev, pipe))
return false;
 
if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
hotplug_en |= PORTB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
hotplug_en |= PORTC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
hotplug_en |= PORTD_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
hotplug_en |= CRT_HOTPLUG_INT_EN;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
}
if ((iir & flip_pending) == 0)
return false;
 
/* Ignore TV since it's buggy */
// intel_prepare_page_flip(dev, plane);
 
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
/* We detect FlipDone by looking for the change in PendingFlip from '1'
* to '0' on the following vblank, i.e. IIR has the Pendingflip
* asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
* the flip is completed (no longer pending). Since this doesn't raise
* an interrupt per se, we watch for the change at vblank.
*/
if (I915_READ(ISR) & flip_pending)
return false;
 
intel_finish_page_flip(dev, pipe);
 
return true;
}
}
 
static irqreturn_t i915_irq_handler(int irq, void *arg)
{
2150,10 → 2470,6
u32 flip_mask =
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
u32 flip[2] = {
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
};
int pipe, ret = IRQ_NONE;
 
atomic_inc(&dev_priv->irq_received);
2194,13 → 2510,16
if ((I915_HAS_HOTPLUG(dev)) &&
(iir & I915_DISPLAY_PORT_INTERRUPT)) {
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
 
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
hotplug_status);
if (hotplug_status & dev_priv->hotplug_supported_mask)
if (hotplug_trigger) {
if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
i915_hpd_irq_setup(dev);
queue_work(dev_priv->wq,
&dev_priv->hotplug_work);
 
}
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
POSTING_READ(PORT_HOTPLUG_STAT);
}
2215,15 → 2534,11
int plane = pipe;
if (IS_MOBILE(dev))
plane = !plane;
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS /* &&
drm_handle_vblank(dev, pipe) */) {
if (iir & flip[plane]) {
// intel_prepare_page_flip(dev, plane);
// intel_finish_page_flip(dev, pipe);
flip_mask &= ~flip[plane];
}
}
 
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
i915_handle_vblank(dev, plane, pipe, iir))
flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
 
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
}
2311,13 → 2626,13
I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
 
enable_mask = ~dev_priv->irq_mask;
enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
enable_mask |= I915_USER_INTERRUPT;
 
if (IS_G4X(dev))
enable_mask |= I915_BSD_USER_INTERRUPT;
 
dev_priv->pipestat[0] = 0;
dev_priv->pipestat[1] = 0;
i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
 
/*
2347,33 → 2662,21
return 0;
}
 
static void i965_hpd_irq_setup(struct drm_device *dev)
static void i915_hpd_irq_setup(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *intel_encoder;
u32 hotplug_en;
 
if (I915_HAS_HOTPLUG(dev)) {
hotplug_en = I915_READ(PORT_HOTPLUG_EN);
hotplug_en &= ~HOTPLUG_INT_EN_MASK;
/* Note HDMI and DP share hotplug bits */
hotplug_en = 0;
if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
hotplug_en |= PORTB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
hotplug_en |= PORTC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
hotplug_en |= PORTD_HOTPLUG_INT_EN;
if (IS_G4X(dev)) {
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
hotplug_en |= SDVOB_HOTPLUG_INT_EN;
} else {
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
hotplug_en |= SDVOB_HOTPLUG_INT_EN;
}
if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
hotplug_en |= CRT_HOTPLUG_INT_EN;
 
/* enable bits are the same for all generations */
list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
/* Programming the CRT detection parameters tends
to generate a spurious hotplug event about three
seconds later. So just do it once.
2380,13 → 2683,13
*/
if (IS_G4X(dev))
hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
}
 
/* Ignore TV since it's buggy */
 
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
}
}
 
static irqreturn_t i965_irq_handler(int irq, void *arg)
{
2397,6 → 2700,9
unsigned long irqflags;
int irq_received;
int ret = IRQ_NONE, pipe;
u32 flip_mask =
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
 
atomic_inc(&dev_priv->irq_received);
 
2405,7 → 2711,7
for (;;) {
bool blc_event = false;
 
irq_received = iir != 0;
irq_received = (iir & ~flip_mask) != 0;
 
/* Can't rely on pipestat interrupt bit in iir as it might
* have been cleared after the pipestat interrupt was received.
2441,18 → 2747,24
/* Consume port. Then clear IIR or we'll miss events */
if (iir & I915_DISPLAY_PORT_INTERRUPT) {
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
HOTPLUG_INT_STATUS_G4X :
HOTPLUG_INT_STATUS_I965);
 
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
hotplug_status);
if (hotplug_status & dev_priv->hotplug_supported_mask)
if (hotplug_trigger) {
if (hotplug_irq_storm_detect(dev, hotplug_trigger,
IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i965))
i915_hpd_irq_setup(dev);
queue_work(dev_priv->wq,
&dev_priv->hotplug_work);
 
}
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
I915_READ(PORT_HOTPLUG_STAT);
}
 
I915_WRITE(IIR, iir);
I915_WRITE(IIR, iir & ~flip_mask);
new_iir = I915_READ(IIR); /* Flush posted writes */
 
if (iir & I915_USER_INTERRUPT)
2460,18 → 2772,10
if (iir & I915_BSD_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VCS]);
 
// if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
// intel_prepare_page_flip(dev, 0);
 
// if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
// intel_prepare_page_flip(dev, 1);
 
for_each_pipe(pipe) {
// if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
// drm_handle_vblank(dev, pipe)) {
// i915_pageflip_stall_check(dev, pipe);
// intel_finish_page_flip(dev, pipe);
// }
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
i915_handle_vblank(dev, pipe, pipe, iir))
flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
 
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
2544,16 → 2848,18
dev->driver->irq_handler = valleyview_irq_handler;
dev->driver->irq_preinstall = valleyview_irq_preinstall;
dev->driver->irq_postinstall = valleyview_irq_postinstall;
dev_priv->display.hpd_irq_setup = valleyview_hpd_irq_setup;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
/* Share pre & uninstall handlers with ILK/SNB */
dev->driver->irq_handler = ivybridge_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
dev->driver->irq_postinstall = ivybridge_irq_postinstall;
dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
} else if (HAS_PCH_SPLIT(dev)) {
dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
dev->driver->irq_postinstall = ironlake_irq_postinstall;
dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
} else {
if (INTEL_INFO(dev)->gen == 2) {
} else if (INTEL_INFO(dev)->gen == 3) {
2565,7 → 2871,7
dev->driver->irq_preinstall = i965_irq_preinstall;
dev->driver->irq_postinstall = i965_irq_postinstall;
dev->driver->irq_handler = i965_irq_handler;
dev_priv->display.hpd_irq_setup = i965_hpd_irq_setup;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
}
}
}
2573,7 → 2879,20
void intel_hpd_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
int i;
 
for (i = 1; i < HPD_NUM_PINS; i++) {
dev_priv->hpd_stats[i].hpd_cnt = 0;
dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
}
list_for_each_entry(connector, &mode_config->connector_list, head) {
struct intel_connector *intel_connector = to_intel_connector(connector);
connector->polled = intel_connector->polled;
if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
connector->polled = DRM_CONNECTOR_POLL_HPD;
}
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev);
}