139,7 → 139,8 |
/* |
* We should clear IMR at preinstall/uninstall, and just check at postinstall. |
*/ |
static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, u32 reg) |
static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, |
i915_reg_t reg) |
{ |
u32 val = I915_READ(reg); |
|
147,7 → 148,7 |
return; |
|
WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", |
reg, val); |
i915_mmio_reg_offset(reg), val); |
I915_WRITE(reg, 0xffffffff); |
POSTING_READ(reg); |
I915_WRITE(reg, 0xffffffff); |
214,7 → 215,7 |
* @interrupt_mask: mask of interrupt bits to update |
* @enabled_irq_mask: mask of interrupt bits to enable |
*/ |
static void ilk_update_display_irq(struct drm_i915_private *dev_priv, |
void ilk_update_display_irq(struct drm_i915_private *dev_priv, |
uint32_t interrupt_mask, |
uint32_t enabled_irq_mask) |
{ |
238,18 → 239,6 |
} |
} |
|
void |
ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) |
{ |
ilk_update_display_irq(dev_priv, mask, mask); |
} |
|
void |
ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) |
{ |
ilk_update_display_irq(dev_priv, mask, 0); |
} |
|
/** |
* ilk_update_gt_irq - update GTIMR |
* @dev_priv: driver private |
283,17 → 272,17 |
ilk_update_gt_irq(dev_priv, mask, 0); |
} |
|
static u32 gen6_pm_iir(struct drm_i915_private *dev_priv) |
static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) |
{ |
return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; |
} |
|
static u32 gen6_pm_imr(struct drm_i915_private *dev_priv) |
static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv) |
{ |
return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; |
} |
|
static u32 gen6_pm_ier(struct drm_i915_private *dev_priv) |
static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) |
{ |
return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; |
} |
350,7 → 339,7 |
void gen6_reset_rps_interrupts(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
uint32_t reg = gen6_pm_iir(dev_priv); |
i915_reg_t reg = gen6_pm_iir(dev_priv); |
|
spin_lock_irq(&dev_priv->irq_lock); |
I915_WRITE(reg, dev_priv->pm_rps_events); |
401,7 → 390,6 |
dev_priv->rps.interrupts_enabled = false; |
spin_unlock_irq(&dev_priv->irq_lock); |
|
cancel_work_sync(&dev_priv->rps.work); |
|
spin_lock_irq(&dev_priv->irq_lock); |
|
448,6 → 436,38 |
} |
|
/** |
* bdw_update_pipe_irq - update DE pipe interrupt |
* @dev_priv: driver private |
* @pipe: pipe whose interrupt to update |
* @interrupt_mask: mask of interrupt bits to update |
* @enabled_irq_mask: mask of interrupt bits to enable |
*/ |
void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, |
enum pipe pipe, |
uint32_t interrupt_mask, |
uint32_t enabled_irq_mask) |
{ |
uint32_t new_val; |
|
assert_spin_locked(&dev_priv->irq_lock); |
|
WARN_ON(enabled_irq_mask & ~interrupt_mask); |
|
if (WARN_ON(!intel_irqs_enabled(dev_priv))) |
return; |
|
new_val = dev_priv->de_irq_mask[pipe]; |
new_val &= ~interrupt_mask; |
new_val |= (~enabled_irq_mask & interrupt_mask); |
|
if (new_val != dev_priv->de_irq_mask[pipe]) { |
dev_priv->de_irq_mask[pipe] = new_val; |
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); |
POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); |
} |
} |
|
/** |
* ibx_display_interrupt_update - update SDEIMR |
* @dev_priv: driver private |
* @interrupt_mask: mask of interrupt bits to update |
476,7 → 496,7 |
__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, |
u32 enable_mask, u32 status_mask) |
{ |
u32 reg = PIPESTAT(pipe); |
i915_reg_t reg = PIPESTAT(pipe); |
u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; |
|
assert_spin_locked(&dev_priv->irq_lock); |
503,7 → 523,7 |
__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, |
u32 enable_mask, u32 status_mask) |
{ |
u32 reg = PIPESTAT(pipe); |
i915_reg_t reg = PIPESTAT(pipe); |
u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; |
|
assert_spin_locked(&dev_priv->irq_lock); |
559,7 → 579,7 |
{ |
u32 enable_mask; |
|
if (IS_VALLEYVIEW(dev_priv->dev)) |
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, |
status_mask); |
else |
573,7 → 593,7 |
{ |
u32 enable_mask; |
|
if (IS_VALLEYVIEW(dev_priv->dev)) |
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, |
status_mask); |
else |
664,8 → 684,7 |
static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
unsigned long high_frame; |
unsigned long low_frame; |
i915_reg_t high_frame, low_frame; |
u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; |
struct intel_crtc *intel_crtc = |
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); |
716,9 → 735,7 |
return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); |
} |
|
/* raw reads, only for fast reads of display block, no need for forcewake etc. */ |
#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) |
|
/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ |
static int __intel_get_crtc_scanline(struct intel_crtc *crtc) |
{ |
struct drm_device *dev = crtc->base.dev; |
732,9 → 749,9 |
vtotal /= 2; |
|
if (IS_GEN2(dev)) |
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; |
position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; |
else |
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; |
position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; |
|
/* |
* On HSW, the DSL reg (0x70000) appears to return 0 if we |
826,7 → 843,7 |
* We can split this into vertical and horizontal |
* scanout position. |
*/ |
position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; |
position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; |
|
/* convert to pixel counts */ |
vbl_start *= htotal; |
993,18 → 1010,46 |
ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); |
} |
|
static bool vlv_c0_above(struct drm_i915_private *dev_priv, |
const struct intel_rps_ei *old, |
const struct intel_rps_ei *now, |
int threshold) |
{ |
u64 time, c0; |
unsigned int mul = 100; |
|
if (old->cz_clock == 0) |
return false; |
|
if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) |
mul <<= 8; |
|
time = now->cz_clock - old->cz_clock; |
time *= threshold * dev_priv->czclk_freq; |
|
/* Workload can be split between render + media, e.g. SwapBuffers |
* being blitted in X after being rendered in mesa. To account for |
* this we need to combine both engines into our activity counter. |
*/ |
c0 = now->render_c0 - old->render_c0; |
c0 += now->media_c0 - old->media_c0; |
c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC; |
|
return c0 >= time; |
} |
|
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) |
{ |
memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei)); |
vlv_c0_read(dev_priv, &dev_priv->rps.down_ei); |
dev_priv->rps.up_ei = dev_priv->rps.down_ei; |
} |
|
static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) |
{ |
const struct intel_rps_ei *prev = &dev_priv->rps.ei; |
struct intel_rps_ei now; |
u32 events = 0; |
|
if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) |
if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0) |
return 0; |
|
vlv_c0_read(dev_priv, &now); |
1011,33 → 1056,22 |
if (now.cz_clock == 0) |
return 0; |
|
if (prev->cz_clock) { |
u64 time, c0; |
unsigned int mul; |
if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) { |
if (!vlv_c0_above(dev_priv, |
&dev_priv->rps.down_ei, &now, |
dev_priv->rps.down_threshold)) |
events |= GEN6_PM_RP_DOWN_THRESHOLD; |
dev_priv->rps.down_ei = now; |
} |
|
mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */ |
if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) |
mul <<= 8; |
|
time = now.cz_clock - prev->cz_clock; |
time *= dev_priv->czclk_freq; |
|
/* Workload can be split between render + media, |
* e.g. SwapBuffers being blitted in X after being rendered in |
* mesa. To account for this we need to combine both engines |
* into our activity counter. |
*/ |
c0 = now.render_c0 - prev->render_c0; |
c0 += now.media_c0 - prev->media_c0; |
c0 *= mul; |
|
if (c0 > time * dev_priv->rps.up_threshold) |
events = GEN6_PM_RP_UP_THRESHOLD; |
else if (c0 < time * dev_priv->rps.down_threshold) |
events = GEN6_PM_RP_DOWN_THRESHOLD; |
if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { |
if (vlv_c0_above(dev_priv, |
&dev_priv->rps.up_ei, &now, |
dev_priv->rps.up_threshold)) |
events |= GEN6_PM_RP_UP_THRESHOLD; |
dev_priv->rps.up_ei = now; |
} |
|
dev_priv->rps.ei = now; |
return events; |
} |
|
1067,6 → 1101,14 |
spin_unlock_irq(&dev_priv->irq_lock); |
return; |
} |
|
/* |
* The RPS work is synced during runtime suspend, we don't require a |
* wakeref. TODO: instead of disabling the asserts make sure that we |
* always hold an RPM reference while the work is running. |
*/ |
DISABLE_RPM_WAKEREF_ASSERTS(dev_priv); |
|
pm_iir = dev_priv->rps.pm_iir; |
dev_priv->rps.pm_iir = 0; |
/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ |
1079,7 → 1121,7 |
WARN_ON(pm_iir & ~dev_priv->pm_rps_events); |
|
if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) |
return; |
goto out; |
|
mutex_lock(&dev_priv->rps.hw_lock); |
|
1134,6 → 1176,8 |
intel_set_rps(dev_priv->dev, new_delay); |
|
mutex_unlock(&dev_priv->rps.hw_lock); |
out: |
ENABLE_RPM_WAKEREF_ASSERTS(dev_priv); |
} |
|
|
1170,7 → 1214,7 |
POSTING_READ(GEN7_MISCCPCTL); |
|
while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { |
u32 reg; |
i915_reg_t reg; |
|
slice--; |
if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) |
1178,7 → 1222,7 |
|
dev_priv->l3_parity.which_slice &= ~(1<<slice); |
|
reg = GEN7_L3CDERRST1 + (slice * 0x200); |
reg = GEN7_L3CDERRST1(slice); |
|
error_status = I915_READ(reg); |
row = GEN7_PARITY_ERROR_ROW(error_status); |
1258,6 → 1302,15 |
ivybridge_parity_error_irq_handler(dev, gt_iir); |
} |
|
static __always_inline void |
gen8_cs_irq_handler(struct intel_engine_cs *ring, u32 iir, int test_shift) |
{ |
if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) |
notify_ring(ring); |
if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) |
intel_lrc_irq_handler(ring); |
} |
|
static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv, |
u32 master_ctl) |
{ |
1264,64 → 1317,54 |
irqreturn_t ret = IRQ_NONE; |
|
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { |
u32 tmp = I915_READ_FW(GEN8_GT_IIR(0)); |
if (tmp) { |
I915_WRITE_FW(GEN8_GT_IIR(0), tmp); |
u32 iir = I915_READ_FW(GEN8_GT_IIR(0)); |
if (iir) { |
I915_WRITE_FW(GEN8_GT_IIR(0), iir); |
ret = IRQ_HANDLED; |
|
if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT)) |
intel_lrc_irq_handler(&dev_priv->ring[RCS]); |
if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT)) |
notify_ring(&dev_priv->ring[RCS]); |
gen8_cs_irq_handler(&dev_priv->ring[RCS], |
iir, GEN8_RCS_IRQ_SHIFT); |
|
if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT)) |
intel_lrc_irq_handler(&dev_priv->ring[BCS]); |
if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT)) |
notify_ring(&dev_priv->ring[BCS]); |
gen8_cs_irq_handler(&dev_priv->ring[BCS], |
iir, GEN8_BCS_IRQ_SHIFT); |
} else |
DRM_ERROR("The master control interrupt lied (GT0)!\n"); |
} |
|
if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { |
u32 tmp = I915_READ_FW(GEN8_GT_IIR(1)); |
if (tmp) { |
I915_WRITE_FW(GEN8_GT_IIR(1), tmp); |
u32 iir = I915_READ_FW(GEN8_GT_IIR(1)); |
if (iir) { |
I915_WRITE_FW(GEN8_GT_IIR(1), iir); |
ret = IRQ_HANDLED; |
|
if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT)) |
intel_lrc_irq_handler(&dev_priv->ring[VCS]); |
if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT)) |
notify_ring(&dev_priv->ring[VCS]); |
gen8_cs_irq_handler(&dev_priv->ring[VCS], |
iir, GEN8_VCS1_IRQ_SHIFT); |
|
if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT)) |
intel_lrc_irq_handler(&dev_priv->ring[VCS2]); |
if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT)) |
notify_ring(&dev_priv->ring[VCS2]); |
gen8_cs_irq_handler(&dev_priv->ring[VCS2], |
iir, GEN8_VCS2_IRQ_SHIFT); |
} else |
DRM_ERROR("The master control interrupt lied (GT1)!\n"); |
} |
|
if (master_ctl & GEN8_GT_VECS_IRQ) { |
u32 tmp = I915_READ_FW(GEN8_GT_IIR(3)); |
if (tmp) { |
I915_WRITE_FW(GEN8_GT_IIR(3), tmp); |
u32 iir = I915_READ_FW(GEN8_GT_IIR(3)); |
if (iir) { |
I915_WRITE_FW(GEN8_GT_IIR(3), iir); |
ret = IRQ_HANDLED; |
|
if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)) |
intel_lrc_irq_handler(&dev_priv->ring[VECS]); |
if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT)) |
notify_ring(&dev_priv->ring[VECS]); |
gen8_cs_irq_handler(&dev_priv->ring[VECS], |
iir, GEN8_VECS_IRQ_SHIFT); |
} else |
DRM_ERROR("The master control interrupt lied (GT3)!\n"); |
} |
|
if (master_ctl & GEN8_GT_PM_IRQ) { |
u32 tmp = I915_READ_FW(GEN8_GT_IIR(2)); |
if (tmp & dev_priv->pm_rps_events) { |
u32 iir = I915_READ_FW(GEN8_GT_IIR(2)); |
if (iir & dev_priv->pm_rps_events) { |
I915_WRITE_FW(GEN8_GT_IIR(2), |
tmp & dev_priv->pm_rps_events); |
iir & dev_priv->pm_rps_events); |
ret = IRQ_HANDLED; |
gen6_rps_irq_handler(dev_priv, tmp); |
gen6_rps_irq_handler(dev_priv, iir); |
} else |
DRM_ERROR("The master control interrupt lied (PM)!\n"); |
} |
1593,7 → 1636,7 |
|
spin_lock(&dev_priv->irq_lock); |
for_each_pipe(dev_priv, pipe) { |
int reg; |
i915_reg_t reg; |
u32 mask, iir_bit = 0; |
|
/* |
1674,7 → 1717,7 |
*/ |
POSTING_READ(PORT_HOTPLUG_STAT); |
|
if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { |
if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; |
|
if (hotplug_trigger) { |
1709,6 → 1752,9 |
if (!intel_irqs_enabled(dev_priv)) |
return IRQ_NONE; |
|
/* IRQs are synced during runtime_suspend, we don't require a wakeref */ |
disable_rpm_wakeref_asserts(dev_priv); |
|
while (true) { |
/* Find, clear, then process each source of interrupt */ |
|
1743,6 → 1789,8 |
} |
|
out: |
enable_rpm_wakeref_asserts(dev_priv); |
|
return ret; |
} |
|
1756,6 → 1804,9 |
if (!intel_irqs_enabled(dev_priv)) |
return IRQ_NONE; |
|
/* IRQs are synced during runtime_suspend, we don't require a wakeref */ |
disable_rpm_wakeref_asserts(dev_priv); |
|
for (;;) { |
master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; |
iir = I915_READ(VLV_IIR); |
1786,6 → 1837,8 |
POSTING_READ(GEN8_MASTER_IRQ); |
} |
|
enable_rpm_wakeref_asserts(dev_priv); |
|
return ret; |
} |
|
1795,8 → 1848,24 |
struct drm_i915_private *dev_priv = to_i915(dev); |
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; |
|
/* |
* Somehow the PCH doesn't seem to really ack the interrupt to the CPU |
* unless we touch the hotplug register, even if hotplug_trigger is |
* zero. Not acking leads to "The master control interrupt lied (SDE)!" |
* errors. |
*/ |
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); |
if (!hotplug_trigger) { |
u32 mask = PORTA_HOTPLUG_STATUS_MASK | |
PORTD_HOTPLUG_STATUS_MASK | |
PORTC_HOTPLUG_STATUS_MASK | |
PORTB_HOTPLUG_STATUS_MASK; |
dig_hotplug_reg &= ~mask; |
} |
|
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); |
if (!hotplug_trigger) |
return; |
|
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, |
dig_hotplug_reg, hpd, |
1811,7 → 1880,6 |
int pipe; |
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; |
|
if (hotplug_trigger) |
ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); |
|
if (pch_iir & SDE_AUDIO_POWER_MASK) { |
1905,7 → 1973,6 |
int pipe; |
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; |
|
if (hotplug_trigger) |
ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); |
|
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { |
2102,6 → 2169,9 |
if (!intel_irqs_enabled(dev_priv)) |
return IRQ_NONE; |
|
/* IRQs are synced during runtime_suspend, we don't require a wakeref */ |
disable_rpm_wakeref_asserts(dev_priv); |
|
/* We get interrupts on unclaimed registers, so check for this before we |
* do any I915_{READ,WRITE}. */ |
intel_uncore_check_errors(dev); |
2160,6 → 2230,9 |
POSTING_READ(SDEIER); |
} |
|
/* IRQs are synced during runtime_suspend, we don't require a wakeref */ |
enable_rpm_wakeref_asserts(dev_priv); |
|
return ret; |
} |
|
2192,6 → 2265,9 |
if (!intel_irqs_enabled(dev_priv)) |
return IRQ_NONE; |
|
/* IRQs are synced during runtime_suspend, we don't require a wakeref */ |
disable_rpm_wakeref_asserts(dev_priv); |
|
if (INTEL_INFO(dev_priv)->gen >= 9) |
aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | |
GEN9_AUX_CHANNEL_D; |
2199,7 → 2275,7 |
master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); |
master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; |
if (!master_ctl) |
return IRQ_NONE; |
goto out; |
|
I915_WRITE_FW(GEN8_MASTER_IRQ, 0); |
|
2334,6 → 2410,9 |
I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); |
POSTING_READ_FW(GEN8_MASTER_IRQ); |
|
out: |
enable_rpm_wakeref_asserts(dev_priv); |
|
return ret; |
} |
|
2393,6 → 2472,13 |
*/ |
if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { |
DRM_DEBUG_DRIVER("resetting chip\n"); |
/* |
* In most cases it's guaranteed that we get here with an RPM |
* reference held, for example because there is a pending GPU |
* request that won't finish until the reset is done. This |
* isn't the case at least when we get here by doing a |
* simulated reset via debugs, so get an RPM reference. |
*/ |
intel_runtime_pm_get(dev_priv); |
|
/* |
2600,7 → 2686,7 |
DE_PIPE_VBLANK(pipe); |
|
spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
ironlake_enable_display_irq(dev_priv, bit); |
ilk_enable_display_irq(dev_priv, bit); |
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
|
return 0; |
2625,10 → 2711,9 |
unsigned long irqflags; |
|
spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; |
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); |
POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); |
bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); |
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
|
return 0; |
} |
|
2655,7 → 2740,7 |
DE_PIPE_VBLANK(pipe); |
|
spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
ironlake_disable_display_irq(dev_priv, bit); |
ilk_disable_display_irq(dev_priv, bit); |
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
} |
|
2676,9 → 2761,7 |
unsigned long irqflags; |
|
spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; |
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); |
POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); |
bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); |
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
} |
|
2917,6 → 3000,13 |
if (!i915.enable_hangcheck) |
return; |
|
/* |
* The hangcheck work is synced during runtime suspend, we don't |
* require a wakeref. TODO: instead of disabling the asserts make |
* sure that we hold a reference when this work is running. |
*/ |
DISABLE_RPM_WAKEREF_ASSERTS(dev_priv); |
|
for_each_ring(ring, dev_priv, i) { |
u64 acthd; |
u32 seqno; |
3387,7 → 3477,7 |
* setup is guaranteed to run in single-threaded context. But we |
* need it to make the assert_spin_locked happy. */ |
spin_lock_irq(&dev_priv->irq_lock); |
ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); |
ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); |
spin_unlock_irq(&dev_priv->irq_lock); |
} |
|
3787,13 → 3877,18 |
u16 flip_mask = |
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; |
irqreturn_t ret; |
|
if (!intel_irqs_enabled(dev_priv)) |
return IRQ_NONE; |
|
/* IRQs are synced during runtime_suspend, we don't require a wakeref */ |
disable_rpm_wakeref_asserts(dev_priv); |
|
ret = IRQ_NONE; |
iir = I915_READ16(IIR); |
if (iir == 0) |
return IRQ_NONE; |
goto out; |
|
while (iir & ~flip_mask) { |
/* Can't rely on pipestat interrupt bit in iir as it might |
3806,7 → 3901,7 |
DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); |
|
for_each_pipe(dev_priv, pipe) { |
int reg = PIPESTAT(pipe); |
i915_reg_t reg = PIPESTAT(pipe); |
pipe_stats[pipe] = I915_READ(reg); |
|
/* |
3842,8 → 3937,12 |
|
iir = new_iir; |
} |
ret = IRQ_HANDLED; |
|
return IRQ_HANDLED; |
out: |
enable_rpm_wakeref_asserts(dev_priv); |
|
return ret; |
} |
|
static void i8xx_irq_uninstall(struct drm_device * dev) |
3974,6 → 4073,9 |
if (!intel_irqs_enabled(dev_priv)) |
return IRQ_NONE; |
|
/* IRQs are synced during runtime_suspend, we don't require a wakeref */ |
disable_rpm_wakeref_asserts(dev_priv); |
|
iir = I915_READ(IIR); |
do { |
bool irq_received = (iir & ~flip_mask) != 0; |
3989,7 → 4091,7 |
DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); |
|
for_each_pipe(dev_priv, pipe) { |
int reg = PIPESTAT(pipe); |
i915_reg_t reg = PIPESTAT(pipe); |
pipe_stats[pipe] = I915_READ(reg); |
|
/* Clear the PIPE*STAT regs before the IIR */ |
4056,6 → 4158,8 |
iir = new_iir; |
} while (iir & ~flip_mask); |
|
enable_rpm_wakeref_asserts(dev_priv); |
|
return ret; |
} |
|
4195,6 → 4299,9 |
if (!intel_irqs_enabled(dev_priv)) |
return IRQ_NONE; |
|
/* IRQs are synced during runtime_suspend, we don't require a wakeref */ |
disable_rpm_wakeref_asserts(dev_priv); |
|
iir = I915_READ(IIR); |
|
for (;;) { |
4211,7 → 4318,7 |
DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); |
|
for_each_pipe(dev_priv, pipe) { |
int reg = PIPESTAT(pipe); |
i915_reg_t reg = PIPESTAT(pipe); |
pipe_stats[pipe] = I915_READ(reg); |
|
/* |
4280,6 → 4387,8 |
iir = new_iir; |
} |
|
enable_rpm_wakeref_asserts(dev_priv); |
|
return ret; |
} |
|
4323,9 → 4432,9 |
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); |
|
/* Let's track the enabled rps events */ |
if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) |
if (IS_VALLEYVIEW(dev_priv)) |
/* WaGsvRC0ResidencyMethod:vlv */ |
dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; |
dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED; |
else |
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; |
|