Subversion Repositories Kolibri OS

Rev

Rev 6937 | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
  2.  */
  3. /*
  4.  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  5.  * All Rights Reserved.
  6.  *
  7.  * Permission is hereby granted, free of charge, to any person obtaining a
  8.  * copy of this software and associated documentation files (the
  9.  * "Software"), to deal in the Software without restriction, including
  10.  * without limitation the rights to use, copy, modify, merge, publish,
  11.  * distribute, sub license, and/or sell copies of the Software, and to
  12.  * permit persons to whom the Software is furnished to do so, subject to
  13.  * the following conditions:
  14.  *
  15.  * The above copyright notice and this permission notice (including the
  16.  * next paragraph) shall be included in all copies or substantial portions
  17.  * of the Software.
  18.  *
  19.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20.  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21.  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22.  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23.  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24.  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25.  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26.  *
  27.  */
  28.  
  29. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  30.  
  31. #include <linux/sysrq.h>
  32. #include <linux/slab.h>
  33. #include <linux/circ_buf.h>
  34. #include <drm/drmP.h>
  35. #include <drm/i915_drm.h>
  36. #include "i915_drv.h"
  37. #include "i915_trace.h"
  38. #include "intel_drv.h"
  39.  
  40. /**
  41.  * DOC: interrupt handling
  42.  *
  43.  * These functions provide the basic support for enabling and disabling the
  44.  * interrupt handling support. There's a lot more functionality in i915_irq.c
  45.  * and related files, but that will be described in separate chapters.
  46.  */
  47.  
  48. static const u32 hpd_ilk[HPD_NUM_PINS] = {
  49.         [HPD_PORT_A] = DE_DP_A_HOTPLUG,
  50. };
  51.  
  52. static const u32 hpd_ivb[HPD_NUM_PINS] = {
  53.         [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
  54. };
  55.  
  56. static const u32 hpd_bdw[HPD_NUM_PINS] = {
  57.         [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
  58. };
  59.  
  60. static const u32 hpd_ibx[HPD_NUM_PINS] = {
  61.         [HPD_CRT] = SDE_CRT_HOTPLUG,
  62.         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
  63.         [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
  64.         [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
  65.         [HPD_PORT_D] = SDE_PORTD_HOTPLUG
  66. };
  67.  
  68. static const u32 hpd_cpt[HPD_NUM_PINS] = {
  69.         [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
  70.         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
  71.         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
  72.         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
  73.         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
  74. };
  75.  
  76. static const u32 hpd_spt[HPD_NUM_PINS] = {
  77.         [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
  78.         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
  79.         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
  80.         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
  81.         [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
  82. };
  83.  
  84. static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
  85.         [HPD_CRT] = CRT_HOTPLUG_INT_EN,
  86.         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
  87.         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
  88.         [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
  89.         [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
  90.         [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
  91. };
  92.  
  93. static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
  94.         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
  95.         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
  96.         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
  97.         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
  98.         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
  99.         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
  100. };
  101.  
  102. static const u32 hpd_status_i915[HPD_NUM_PINS] = {
  103.         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
  104.         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
  105.         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
  106.         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
  107.         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
  108.         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
  109. };
  110.  
  111. /* BXT hpd list */
  112. static const u32 hpd_bxt[HPD_NUM_PINS] = {
  113.         [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
  114.         [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
  115.         [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
  116. };
  117.  
  118. /* IIR can theoretically queue up two events. Be paranoid. */
  119. #define GEN8_IRQ_RESET_NDX(type, which) do { \
  120.         I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
  121.         POSTING_READ(GEN8_##type##_IMR(which)); \
  122.         I915_WRITE(GEN8_##type##_IER(which), 0); \
  123.         I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
  124.         POSTING_READ(GEN8_##type##_IIR(which)); \
  125.         I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
  126.         POSTING_READ(GEN8_##type##_IIR(which)); \
  127. } while (0)
  128.  
  129. #define GEN5_IRQ_RESET(type) do { \
  130.         I915_WRITE(type##IMR, 0xffffffff); \
  131.         POSTING_READ(type##IMR); \
  132.         I915_WRITE(type##IER, 0); \
  133.         I915_WRITE(type##IIR, 0xffffffff); \
  134.         POSTING_READ(type##IIR); \
  135.         I915_WRITE(type##IIR, 0xffffffff); \
  136.         POSTING_READ(type##IIR); \
  137. } while (0)
  138.  
  139. /*
  140.  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
  141.  */
  142. static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
  143.                                     i915_reg_t reg)
  144. {
  145.         u32 val = I915_READ(reg);
  146.  
  147.         if (val == 0)
  148.                 return;
  149.  
  150.         WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
  151.              i915_mmio_reg_offset(reg), val);
  152.         I915_WRITE(reg, 0xffffffff);
  153.         POSTING_READ(reg);
  154.         I915_WRITE(reg, 0xffffffff);
  155.         POSTING_READ(reg);
  156. }
  157.  
  158. #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
  159.         gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
  160.         I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
  161.         I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
  162.         POSTING_READ(GEN8_##type##_IMR(which)); \
  163. } while (0)
  164.  
  165. #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
  166.         gen5_assert_iir_is_zero(dev_priv, type##IIR); \
  167.         I915_WRITE(type##IER, (ier_val)); \
  168.         I915_WRITE(type##IMR, (imr_val)); \
  169.         POSTING_READ(type##IMR); \
  170. } while (0)
  171.  
  172. static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
  173.  
  174. /* For display hotplug interrupt */
  175. static inline void
  176. i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
  177.                                      uint32_t mask,
  178.                                      uint32_t bits)
  179. {
  180.         uint32_t val;
  181.  
  182.         assert_spin_locked(&dev_priv->irq_lock);
  183.         WARN_ON(bits & ~mask);
  184.  
  185.         val = I915_READ(PORT_HOTPLUG_EN);
  186.         val &= ~mask;
  187.         val |= bits;
  188.         I915_WRITE(PORT_HOTPLUG_EN, val);
  189. }
  190.  
  191. /**
  192.  * i915_hotplug_interrupt_update - update hotplug interrupt enable
  193.  * @dev_priv: driver private
  194.  * @mask: bits to update
  195.  * @bits: bits to enable
  196.  * NOTE: the HPD enable bits are modified both inside and outside
  197.  * of an interrupt context. To avoid that read-modify-write cycles
  198.  * interfer, these bits are protected by a spinlock. Since this
  199.  * function is usually not called from a context where the lock is
  200.  * held already, this function acquires the lock itself. A non-locking
  201.  * version is also available.
  202.  */
  203. void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
  204.                                    uint32_t mask,
  205.                                    uint32_t bits)
  206. {
  207.         spin_lock_irq(&dev_priv->irq_lock);
  208.         i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
  209.         spin_unlock_irq(&dev_priv->irq_lock);
  210. }
  211.  
  212. /**
  213.  * ilk_update_display_irq - update DEIMR
  214.  * @dev_priv: driver private
  215.  * @interrupt_mask: mask of interrupt bits to update
  216.  * @enabled_irq_mask: mask of interrupt bits to enable
  217.  */
  218. void ilk_update_display_irq(struct drm_i915_private *dev_priv,
  219.                             uint32_t interrupt_mask,
  220.                             uint32_t enabled_irq_mask)
  221. {
  222.         uint32_t new_val;
  223.  
  224.         assert_spin_locked(&dev_priv->irq_lock);
  225.  
  226.         WARN_ON(enabled_irq_mask & ~interrupt_mask);
  227.  
  228.         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
  229.                 return;
  230.  
  231.         new_val = dev_priv->irq_mask;
  232.         new_val &= ~interrupt_mask;
  233.         new_val |= (~enabled_irq_mask & interrupt_mask);
  234.  
  235.         if (new_val != dev_priv->irq_mask) {
  236.                 dev_priv->irq_mask = new_val;
  237.                 I915_WRITE(DEIMR, dev_priv->irq_mask);
  238.                 POSTING_READ(DEIMR);
  239.         }
  240. }
  241.  
  242. /**
  243.  * ilk_update_gt_irq - update GTIMR
  244.  * @dev_priv: driver private
  245.  * @interrupt_mask: mask of interrupt bits to update
  246.  * @enabled_irq_mask: mask of interrupt bits to enable
  247.  */
  248. static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
  249.                               uint32_t interrupt_mask,
  250.                               uint32_t enabled_irq_mask)
  251. {
  252.         assert_spin_locked(&dev_priv->irq_lock);
  253.  
  254.         WARN_ON(enabled_irq_mask & ~interrupt_mask);
  255.  
  256.         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
  257.                 return;
  258.  
  259.         dev_priv->gt_irq_mask &= ~interrupt_mask;
  260.         dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
  261.         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  262.         POSTING_READ(GTIMR);
  263. }
  264.  
  265. void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
  266. {
  267.         ilk_update_gt_irq(dev_priv, mask, mask);
  268. }
  269.  
  270. void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
  271. {
  272.         ilk_update_gt_irq(dev_priv, mask, 0);
  273. }
  274.  
  275. static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
  276. {
  277.         return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
  278. }
  279.  
  280. static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
  281. {
  282.         return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
  283. }
  284.  
  285. static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
  286. {
  287.         return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
  288. }
  289.  
  290. /**
  291.  * snb_update_pm_irq - update GEN6_PMIMR
  292.  * @dev_priv: driver private
  293.  * @interrupt_mask: mask of interrupt bits to update
  294.  * @enabled_irq_mask: mask of interrupt bits to enable
  295.  */
  296. static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
  297.                               uint32_t interrupt_mask,
  298.                               uint32_t enabled_irq_mask)
  299. {
  300.         uint32_t new_val;
  301.  
  302.         WARN_ON(enabled_irq_mask & ~interrupt_mask);
  303.  
  304.         assert_spin_locked(&dev_priv->irq_lock);
  305.  
  306.         new_val = dev_priv->pm_irq_mask;
  307.         new_val &= ~interrupt_mask;
  308.         new_val |= (~enabled_irq_mask & interrupt_mask);
  309.  
  310.         if (new_val != dev_priv->pm_irq_mask) {
  311.                 dev_priv->pm_irq_mask = new_val;
  312.                 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
  313.                 POSTING_READ(gen6_pm_imr(dev_priv));
  314.         }
  315. }
  316.  
  317. void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
  318. {
  319.         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
  320.                 return;
  321.  
  322.         snb_update_pm_irq(dev_priv, mask, mask);
  323. }
  324.  
  325. static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
  326.                                   uint32_t mask)
  327. {
  328.         snb_update_pm_irq(dev_priv, mask, 0);
  329. }
  330.  
  331. void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
  332. {
  333.         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
  334.                 return;
  335.  
  336.         __gen6_disable_pm_irq(dev_priv, mask);
  337. }
  338.  
  339. void gen6_reset_rps_interrupts(struct drm_device *dev)
  340. {
  341.         struct drm_i915_private *dev_priv = dev->dev_private;
  342.         i915_reg_t reg = gen6_pm_iir(dev_priv);
  343.  
  344.         spin_lock_irq(&dev_priv->irq_lock);
  345.         I915_WRITE(reg, dev_priv->pm_rps_events);
  346.         I915_WRITE(reg, dev_priv->pm_rps_events);
  347.         POSTING_READ(reg);
  348.         dev_priv->rps.pm_iir = 0;
  349.         spin_unlock_irq(&dev_priv->irq_lock);
  350. }
  351.  
  352. void gen6_enable_rps_interrupts(struct drm_device *dev)
  353. {
  354.         struct drm_i915_private *dev_priv = dev->dev_private;
  355.  
  356.         spin_lock_irq(&dev_priv->irq_lock);
  357.  
  358.         WARN_ON(dev_priv->rps.pm_iir);
  359.         WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
  360.         dev_priv->rps.interrupts_enabled = true;
  361.         I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
  362.                                 dev_priv->pm_rps_events);
  363.         gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
  364.  
  365.         spin_unlock_irq(&dev_priv->irq_lock);
  366. }
  367.  
  368. u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
  369. {
  370.         /*
  371.          * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
  372.          * if GEN6_PM_UP_EI_EXPIRED is masked.
  373.          *
  374.          * TODO: verify if this can be reproduced on VLV,CHV.
  375.          */
  376.         if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
  377.                 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
  378.  
  379.         if (INTEL_INFO(dev_priv)->gen >= 8)
  380.                 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
  381.  
  382.         return mask;
  383. }
  384.  
  385. void gen6_disable_rps_interrupts(struct drm_device *dev)
  386. {
  387.         struct drm_i915_private *dev_priv = dev->dev_private;
  388.  
  389.         spin_lock_irq(&dev_priv->irq_lock);
  390.         dev_priv->rps.interrupts_enabled = false;
  391.         spin_unlock_irq(&dev_priv->irq_lock);
  392.  
  393.  
  394.         spin_lock_irq(&dev_priv->irq_lock);
  395.  
  396.         I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
  397.  
  398.         __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
  399.         I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
  400.                                 ~dev_priv->pm_rps_events);
  401.  
  402.         spin_unlock_irq(&dev_priv->irq_lock);
  403.  
  404.         synchronize_irq(dev->irq);
  405. }
  406.  
  407. /**
  408.  * bdw_update_port_irq - update DE port interrupt
  409.  * @dev_priv: driver private
  410.  * @interrupt_mask: mask of interrupt bits to update
  411.  * @enabled_irq_mask: mask of interrupt bits to enable
  412.  */
  413. static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
  414.                                 uint32_t interrupt_mask,
  415.                                 uint32_t enabled_irq_mask)
  416. {
  417.         uint32_t new_val;
  418.         uint32_t old_val;
  419.  
  420.         assert_spin_locked(&dev_priv->irq_lock);
  421.  
  422.         WARN_ON(enabled_irq_mask & ~interrupt_mask);
  423.  
  424.         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
  425.                 return;
  426.  
  427.         old_val = I915_READ(GEN8_DE_PORT_IMR);
  428.  
  429.         new_val = old_val;
  430.         new_val &= ~interrupt_mask;
  431.         new_val |= (~enabled_irq_mask & interrupt_mask);
  432.  
  433.         if (new_val != old_val) {
  434.                 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
  435.                 POSTING_READ(GEN8_DE_PORT_IMR);
  436.         }
  437. }
  438.  
  439. /**
  440.  * bdw_update_pipe_irq - update DE pipe interrupt
  441.  * @dev_priv: driver private
  442.  * @pipe: pipe whose interrupt to update
  443.  * @interrupt_mask: mask of interrupt bits to update
  444.  * @enabled_irq_mask: mask of interrupt bits to enable
  445.  */
  446. void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
  447.                          enum pipe pipe,
  448.                          uint32_t interrupt_mask,
  449.                          uint32_t enabled_irq_mask)
  450. {
  451.         uint32_t new_val;
  452.  
  453.         assert_spin_locked(&dev_priv->irq_lock);
  454.  
  455.         WARN_ON(enabled_irq_mask & ~interrupt_mask);
  456.  
  457.         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
  458.                 return;
  459.  
  460.         new_val = dev_priv->de_irq_mask[pipe];
  461.         new_val &= ~interrupt_mask;
  462.         new_val |= (~enabled_irq_mask & interrupt_mask);
  463.  
  464.         if (new_val != dev_priv->de_irq_mask[pipe]) {
  465.                 dev_priv->de_irq_mask[pipe] = new_val;
  466.                 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
  467.                 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
  468.         }
  469. }
  470.  
  471. /**
  472.  * ibx_display_interrupt_update - update SDEIMR
  473.  * @dev_priv: driver private
  474.  * @interrupt_mask: mask of interrupt bits to update
  475.  * @enabled_irq_mask: mask of interrupt bits to enable
  476.  */
  477. void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
  478.                                   uint32_t interrupt_mask,
  479.                                   uint32_t enabled_irq_mask)
  480. {
  481.         uint32_t sdeimr = I915_READ(SDEIMR);
  482.         sdeimr &= ~interrupt_mask;
  483.         sdeimr |= (~enabled_irq_mask & interrupt_mask);
  484.  
  485.         WARN_ON(enabled_irq_mask & ~interrupt_mask);
  486.  
  487.         assert_spin_locked(&dev_priv->irq_lock);
  488.  
  489.         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
  490.                 return;
  491.  
  492.         I915_WRITE(SDEIMR, sdeimr);
  493.         POSTING_READ(SDEIMR);
  494. }
  495.  
  496. static void
  497. __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
  498.                        u32 enable_mask, u32 status_mask)
  499. {
  500.         i915_reg_t reg = PIPESTAT(pipe);
  501.         u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
  502.  
  503.         assert_spin_locked(&dev_priv->irq_lock);
  504.         WARN_ON(!intel_irqs_enabled(dev_priv));
  505.  
  506.         if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
  507.                       status_mask & ~PIPESTAT_INT_STATUS_MASK,
  508.                       "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
  509.                       pipe_name(pipe), enable_mask, status_mask))
  510.                 return;
  511.  
  512.         if ((pipestat & enable_mask) == enable_mask)
  513.                 return;
  514.  
  515.         dev_priv->pipestat_irq_mask[pipe] |= status_mask;
  516.  
  517.         /* Enable the interrupt, clear any pending status */
  518.         pipestat |= enable_mask | status_mask;
  519.         I915_WRITE(reg, pipestat);
  520.         POSTING_READ(reg);
  521. }
  522.  
  523. static void
  524. __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
  525.                         u32 enable_mask, u32 status_mask)
  526. {
  527.         i915_reg_t reg = PIPESTAT(pipe);
  528.         u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
  529.  
  530.         assert_spin_locked(&dev_priv->irq_lock);
  531.         WARN_ON(!intel_irqs_enabled(dev_priv));
  532.  
  533.         if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
  534.                       status_mask & ~PIPESTAT_INT_STATUS_MASK,
  535.                       "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
  536.                       pipe_name(pipe), enable_mask, status_mask))
  537.                 return;
  538.  
  539.         if ((pipestat & enable_mask) == 0)
  540.                 return;
  541.  
  542.         dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
  543.  
  544.         pipestat &= ~enable_mask;
  545.         I915_WRITE(reg, pipestat);
  546.         POSTING_READ(reg);
  547. }
  548.  
  549. static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
  550. {
  551.         u32 enable_mask = status_mask << 16;
  552.  
  553.         /*
  554.          * On pipe A we don't support the PSR interrupt yet,
  555.          * on pipe B and C the same bit MBZ.
  556.          */
  557.         if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
  558.                 return 0;
  559.         /*
  560.          * On pipe B and C we don't support the PSR interrupt yet, on pipe
  561.          * A the same bit is for perf counters which we don't use either.
  562.          */
  563.         if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
  564.                 return 0;
  565.  
  566.         enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
  567.                          SPRITE0_FLIP_DONE_INT_EN_VLV |
  568.                          SPRITE1_FLIP_DONE_INT_EN_VLV);
  569.         if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
  570.                 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
  571.         if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
  572.                 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
  573.  
  574.         return enable_mask;
  575. }
  576.  
  577. void
  578. i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
  579.                      u32 status_mask)
  580. {
  581.         u32 enable_mask;
  582.  
  583.         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  584.                 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
  585.                                                            status_mask);
  586.         else
  587.                 enable_mask = status_mask << 16;
  588.         __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
  589. }
  590.  
  591. void
  592. i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
  593.                       u32 status_mask)
  594. {
  595.         u32 enable_mask;
  596.  
  597.         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  598.                 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
  599.                                                            status_mask);
  600.         else
  601.                 enable_mask = status_mask << 16;
  602.         __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
  603. }
  604.  
  605. /**
  606.  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
  607.  * @dev: drm device
  608.  */
  609. static void i915_enable_asle_pipestat(struct drm_device *dev)
  610. {
  611.         struct drm_i915_private *dev_priv = dev->dev_private;
  612.  
  613.         if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
  614.                 return;
  615.  
  616.         spin_lock_irq(&dev_priv->irq_lock);
  617.  
  618.         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
  619.         if (INTEL_INFO(dev)->gen >= 4)
  620.                 i915_enable_pipestat(dev_priv, PIPE_A,
  621.                                      PIPE_LEGACY_BLC_EVENT_STATUS);
  622.  
  623.         spin_unlock_irq(&dev_priv->irq_lock);
  624. }
  625.  
  626. /*
  627.  * This timing diagram depicts the video signal in and
  628.  * around the vertical blanking period.
  629.  *
  630.  * Assumptions about the fictitious mode used in this example:
  631.  *  vblank_start >= 3
  632.  *  vsync_start = vblank_start + 1
  633.  *  vsync_end = vblank_start + 2
  634.  *  vtotal = vblank_start + 3
  635.  *
  636.  *           start of vblank:
  637.  *           latch double buffered registers
  638.  *           increment frame counter (ctg+)
  639.  *           generate start of vblank interrupt (gen4+)
  640.  *           |
  641.  *           |          frame start:
  642.  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
  643.  *           |          may be shifted forward 1-3 extra lines via PIPECONF
  644.  *           |          |
  645.  *           |          |  start of vsync:
  646.  *           |          |  generate vsync interrupt
  647.  *           |          |  |
  648.  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
  649.  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
  650.  * ----va---> <-----------------vb--------------------> <--------va-------------
  651.  *       |          |       <----vs----->                     |
  652.  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
  653.  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
  654.  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
  655.  *       |          |                                         |
  656.  *       last visible pixel                                   first visible pixel
  657.  *                  |                                         increment frame counter (gen3/4)
  658.  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
  659.  *
  660.  * x  = horizontal active
  661.  * _  = horizontal blanking
  662.  * hs = horizontal sync
  663.  * va = vertical active
  664.  * vb = vertical blanking
  665.  * vs = vertical sync
  666.  * vbs = vblank_start (number)
  667.  *
  668.  * Summary:
  669.  * - most events happen at the start of horizontal sync
  670.  * - frame start happens at the start of horizontal blank, 1-4 lines
  671.  *   (depending on PIPECONF settings) after the start of vblank
  672.  * - gen3/4 pixel and frame counter are synchronized with the start
  673.  *   of horizontal active on the first line of vertical active
  674.  */
  675.  
  676. static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
  677. {
  678.         /* Gen2 doesn't have a hardware frame counter */
  679.         return 0;
  680. }
  681.  
  682. /* Called from drm generic code, passed a 'crtc', which
  683.  * we use as a pipe index
  684.  */
  685. static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
  686. {
  687.         struct drm_i915_private *dev_priv = dev->dev_private;
  688.         i915_reg_t high_frame, low_frame;
  689.         u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
  690.         struct intel_crtc *intel_crtc =
  691.                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
  692.         const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
  693.  
  694.         htotal = mode->crtc_htotal;
  695.         hsync_start = mode->crtc_hsync_start;
  696.         vbl_start = mode->crtc_vblank_start;
  697.         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
  698.                 vbl_start = DIV_ROUND_UP(vbl_start, 2);
  699.  
  700.         /* Convert to pixel count */
  701.         vbl_start *= htotal;
  702.  
  703.         /* Start of vblank event occurs at start of hsync */
  704.         vbl_start -= htotal - hsync_start;
  705.  
  706.         high_frame = PIPEFRAME(pipe);
  707.         low_frame = PIPEFRAMEPIXEL(pipe);
  708.  
  709.         /*
  710.          * High & low register fields aren't synchronized, so make sure
  711.          * we get a low value that's stable across two reads of the high
  712.          * register.
  713.          */
  714.         do {
  715.                 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
  716.                 low   = I915_READ(low_frame);
  717.                 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
  718.         } while (high1 != high2);
  719.  
  720.         high1 >>= PIPE_FRAME_HIGH_SHIFT;
  721.         pixel = low & PIPE_PIXEL_MASK;
  722.         low >>= PIPE_FRAME_LOW_SHIFT;
  723.  
  724.         /*
  725.          * The frame counter increments at beginning of active.
  726.          * Cook up a vblank counter by also checking the pixel
  727.          * counter against vblank start.
  728.          */
  729.         return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
  730. }
  731.  
  732. static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
  733. {
  734.         struct drm_i915_private *dev_priv = dev->dev_private;
  735.  
  736.         return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
  737. }
  738.  
  739. /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
  740. static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
  741. {
  742.         struct drm_device *dev = crtc->base.dev;
  743.         struct drm_i915_private *dev_priv = dev->dev_private;
  744.         const struct drm_display_mode *mode = &crtc->base.hwmode;
  745.         enum pipe pipe = crtc->pipe;
  746.         int position, vtotal;
  747.  
  748.         vtotal = mode->crtc_vtotal;
  749.         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
  750.                 vtotal /= 2;
  751.  
  752.         if (IS_GEN2(dev))
  753.                 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
  754.         else
  755.                 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
  756.  
  757.         /*
  758.          * On HSW, the DSL reg (0x70000) appears to return 0 if we
  759.          * read it just before the start of vblank.  So try it again
  760.          * so we don't accidentally end up spanning a vblank frame
  761.          * increment, causing the pipe_update_end() code to squak at us.
  762.          *
  763.          * The nature of this problem means we can't simply check the ISR
  764.          * bit and return the vblank start value; nor can we use the scanline
  765.          * debug register in the transcoder as it appears to have the same
  766.          * problem.  We may need to extend this to include other platforms,
  767.          * but so far testing only shows the problem on HSW.
  768.          */
  769.         if (HAS_DDI(dev) && !position) {
  770.                 int i, temp;
  771.  
  772.                 for (i = 0; i < 100; i++) {
  773.                         udelay(1);
  774.                         temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
  775.                                 DSL_LINEMASK_GEN3;
  776.                         if (temp != position) {
  777.                                 position = temp;
  778.                                 break;
  779.                         }
  780.                 }
  781.         }
  782.  
  783.         /*
  784.          * See update_scanline_offset() for the details on the
  785.          * scanline_offset adjustment.
  786.          */
  787.         return (position + crtc->scanline_offset) % vtotal;
  788. }
  789.  
  790. static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
  791.                                     unsigned int flags, int *vpos, int *hpos,
  792.                                     ktime_t *stime, ktime_t *etime,
  793.                                     const struct drm_display_mode *mode)
  794. {
  795.         struct drm_i915_private *dev_priv = dev->dev_private;
  796.         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  797.         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  798.         int position;
  799.         int vbl_start, vbl_end, hsync_start, htotal, vtotal;
  800.         bool in_vbl = true;
  801.         int ret = 0;
  802.         unsigned long irqflags;
  803.  
  804.         if (WARN_ON(!mode->crtc_clock)) {
  805.                 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
  806.                                  "pipe %c\n", pipe_name(pipe));
  807.                 return 0;
  808.         }
  809.  
  810.         htotal = mode->crtc_htotal;
  811.         hsync_start = mode->crtc_hsync_start;
  812.         vtotal = mode->crtc_vtotal;
  813.         vbl_start = mode->crtc_vblank_start;
  814.         vbl_end = mode->crtc_vblank_end;
  815.  
  816.         if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
  817.                 vbl_start = DIV_ROUND_UP(vbl_start, 2);
  818.                 vbl_end /= 2;
  819.                 vtotal /= 2;
  820.         }
  821.  
  822.         ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
  823.  
  824.         /*
  825.          * Lock uncore.lock, as we will do multiple timing critical raw
  826.          * register reads, potentially with preemption disabled, so the
  827.          * following code must not block on uncore.lock.
  828.          */
  829.         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  830.  
  831.         /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
  832.  
  833.         /* Get optional system timestamp before query. */
  834.         if (stime)
  835.                 *stime = ktime_get();
  836.  
  837.         if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
  838.                 /* No obvious pixelcount register. Only query vertical
  839.                  * scanout position from Display scan line register.
  840.                  */
  841.                 position = __intel_get_crtc_scanline(intel_crtc);
  842.         } else {
  843.                 /* Have access to pixelcount since start of frame.
  844.                  * We can split this into vertical and horizontal
  845.                  * scanout position.
  846.                  */
  847.                 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
  848.  
  849.                 /* convert to pixel counts */
  850.                 vbl_start *= htotal;
  851.                 vbl_end *= htotal;
  852.                 vtotal *= htotal;
  853.  
  854.                 /*
  855.                  * In interlaced modes, the pixel counter counts all pixels,
  856.                  * so one field will have htotal more pixels. In order to avoid
  857.                  * the reported position from jumping backwards when the pixel
  858.                  * counter is beyond the length of the shorter field, just
  859.                  * clamp the position the length of the shorter field. This
  860.                  * matches how the scanline counter based position works since
  861.                  * the scanline counter doesn't count the two half lines.
  862.                  */
  863.                 if (position >= vtotal)
  864.                         position = vtotal - 1;
  865.  
  866.                 /*
  867.                  * Start of vblank interrupt is triggered at start of hsync,
  868.                  * just prior to the first active line of vblank. However we
  869.                  * consider lines to start at the leading edge of horizontal
  870.                  * active. So, should we get here before we've crossed into
  871.                  * the horizontal active of the first line in vblank, we would
  872.                  * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
  873.                  * always add htotal-hsync_start to the current pixel position.
  874.                  */
  875.                 position = (position + htotal - hsync_start) % vtotal;
  876.         }
  877.  
  878.         /* Get optional system timestamp after query. */
  879.         if (etime)
  880.                 *etime = ktime_get();
  881.  
  882.         /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
  883.  
  884.         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  885.  
  886.         in_vbl = position >= vbl_start && position < vbl_end;
  887.  
  888.         /*
  889.          * While in vblank, position will be negative
  890.          * counting up towards 0 at vbl_end. And outside
  891.          * vblank, position will be positive counting
  892.          * up since vbl_end.
  893.          */
  894.         if (position >= vbl_start)
  895.                 position -= vbl_end;
  896.         else
  897.                 position += vtotal - vbl_end;
  898.  
  899.         if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
  900.                 *vpos = position;
  901.                 *hpos = 0;
  902.         } else {
  903.                 *vpos = position / htotal;
  904.                 *hpos = position - (*vpos * htotal);
  905.         }
  906.  
  907.         /* In vblank? */
  908.         if (in_vbl)
  909.                 ret |= DRM_SCANOUTPOS_IN_VBLANK;
  910.  
  911.         return ret;
  912. }
  913.  
  914. int intel_get_crtc_scanline(struct intel_crtc *crtc)
  915. {
  916.         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  917.         unsigned long irqflags;
  918.         int position;
  919.  
  920.         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  921.         position = __intel_get_crtc_scanline(crtc);
  922.         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  923.  
  924.         return position;
  925. }
  926.  
  927. static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
  928.                               int *max_error,
  929.                               struct timeval *vblank_time,
  930.                               unsigned flags)
  931. {
  932.         struct drm_crtc *crtc;
  933.  
  934.         if (pipe >= INTEL_INFO(dev)->num_pipes) {
  935.                 DRM_ERROR("Invalid crtc %u\n", pipe);
  936.                 return -EINVAL;
  937.         }
  938.  
  939.         /* Get drm_crtc to timestamp: */
  940.         crtc = intel_get_crtc_for_pipe(dev, pipe);
  941.         if (crtc == NULL) {
  942.                 DRM_ERROR("Invalid crtc %u\n", pipe);
  943.                 return -EINVAL;
  944.         }
  945.  
  946.         if (!crtc->hwmode.crtc_clock) {
  947.                 DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
  948.                 return -EBUSY;
  949.         }
  950.  
  951.         /* Helper routine in DRM core does all the work: */
  952.         return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
  953.                                                      vblank_time, flags,
  954.                                                      &crtc->hwmode);
  955. }
  956.  
  957. static void ironlake_rps_change_irq_handler(struct drm_device *dev)
  958. {
  959.         struct drm_i915_private *dev_priv = dev->dev_private;
  960.         u32 busy_up, busy_down, max_avg, min_avg;
  961.         u8 new_delay;
  962.  
  963.         spin_lock(&mchdev_lock);
  964.  
  965.         I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
  966.  
  967.         new_delay = dev_priv->ips.cur_delay;
  968.  
  969.         I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
  970.         busy_up = I915_READ(RCPREVBSYTUPAVG);
  971.         busy_down = I915_READ(RCPREVBSYTDNAVG);
  972.         max_avg = I915_READ(RCBMAXAVG);
  973.         min_avg = I915_READ(RCBMINAVG);
  974.  
  975.         /* Handle RCS change request from hw */
  976.         if (busy_up > max_avg) {
  977.                 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
  978.                         new_delay = dev_priv->ips.cur_delay - 1;
  979.                 if (new_delay < dev_priv->ips.max_delay)
  980.                         new_delay = dev_priv->ips.max_delay;
  981.         } else if (busy_down < min_avg) {
  982.                 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
  983.                         new_delay = dev_priv->ips.cur_delay + 1;
  984.                 if (new_delay > dev_priv->ips.min_delay)
  985.                         new_delay = dev_priv->ips.min_delay;
  986.         }
  987.  
  988.         if (ironlake_set_drps(dev, new_delay))
  989.                 dev_priv->ips.cur_delay = new_delay;
  990.  
  991.         spin_unlock(&mchdev_lock);
  992.  
  993.         return;
  994. }
  995.  
  996. static void notify_ring(struct intel_engine_cs *ring)
  997. {
  998.         if (!intel_ring_initialized(ring))
  999.                 return;
  1000.  
  1001.         trace_i915_gem_request_notify(ring);
  1002.  
  1003.         wake_up_all(&ring->irq_queue);
  1004. }
  1005.  
  1006. static void vlv_c0_read(struct drm_i915_private *dev_priv,
  1007.                         struct intel_rps_ei *ei)
  1008. {
  1009.         ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
  1010.         ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
  1011.         ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
  1012. }
  1013.  
  1014. static bool vlv_c0_above(struct drm_i915_private *dev_priv,
  1015.                          const struct intel_rps_ei *old,
  1016.                          const struct intel_rps_ei *now,
  1017.                          int threshold)
  1018. {
  1019.         u64 time, c0;
  1020.         unsigned int mul = 100;
  1021.  
  1022.         if (old->cz_clock == 0)
  1023.                 return false;
  1024.  
  1025.         if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
  1026.                 mul <<= 8;
  1027.  
  1028.         time = now->cz_clock - old->cz_clock;
  1029.         time *= threshold * dev_priv->czclk_freq;
  1030.  
  1031.         /* Workload can be split between render + media, e.g. SwapBuffers
  1032.          * being blitted in X after being rendered in mesa. To account for
  1033.          * this we need to combine both engines into our activity counter.
  1034.          */
  1035.         c0 = now->render_c0 - old->render_c0;
  1036.         c0 += now->media_c0 - old->media_c0;
  1037.         c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
  1038.  
  1039.         return c0 >= time;
  1040. }
  1041.  
  1042. void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
  1043. {
  1044.         vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
  1045.         dev_priv->rps.up_ei = dev_priv->rps.down_ei;
  1046. }
  1047.  
  1048. static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
  1049. {
  1050.         struct intel_rps_ei now;
  1051.         u32 events = 0;
  1052.  
  1053.         if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
  1054.                 return 0;
  1055.  
  1056.         vlv_c0_read(dev_priv, &now);
  1057.         if (now.cz_clock == 0)
  1058.                 return 0;
  1059.  
  1060.         if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
  1061.                 if (!vlv_c0_above(dev_priv,
  1062.                                   &dev_priv->rps.down_ei, &now,
  1063.                                   dev_priv->rps.down_threshold))
  1064.                         events |= GEN6_PM_RP_DOWN_THRESHOLD;
  1065.                 dev_priv->rps.down_ei = now;
  1066.         }
  1067.  
  1068.         if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
  1069.                 if (vlv_c0_above(dev_priv,
  1070.                                  &dev_priv->rps.up_ei, &now,
  1071.                                  dev_priv->rps.up_threshold))
  1072.                         events |= GEN6_PM_RP_UP_THRESHOLD;
  1073.                 dev_priv->rps.up_ei = now;
  1074.         }
  1075.  
  1076.         return events;
  1077. }
  1078.  
  1079. static bool any_waiters(struct drm_i915_private *dev_priv)
  1080. {
  1081.         struct intel_engine_cs *ring;
  1082.         int i;
  1083.  
  1084.         for_each_ring(ring, dev_priv, i)
  1085.                 if (ring->irq_refcount)
  1086.                         return true;
  1087.  
  1088.         return false;
  1089. }
  1090.  
  1091. static void gen6_pm_rps_work(struct work_struct *work)
  1092. {
  1093.         struct drm_i915_private *dev_priv =
  1094.                 container_of(work, struct drm_i915_private, rps.work);
  1095.         bool client_boost;
  1096.         int new_delay, adj, min, max;
  1097.         u32 pm_iir;
  1098.  
  1099.         spin_lock_irq(&dev_priv->irq_lock);
  1100.         /* Speed up work cancelation during disabling rps interrupts. */
  1101.         if (!dev_priv->rps.interrupts_enabled) {
  1102.                 spin_unlock_irq(&dev_priv->irq_lock);
  1103.                 return;
  1104.         }
  1105.  
  1106.         /*
  1107.          * The RPS work is synced during runtime suspend, we don't require a
  1108.          * wakeref. TODO: instead of disabling the asserts make sure that we
  1109.          * always hold an RPM reference while the work is running.
  1110.          */
  1111.         DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
  1112.  
  1113.         pm_iir = dev_priv->rps.pm_iir;
  1114.         dev_priv->rps.pm_iir = 0;
  1115.         /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
  1116.         gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
  1117.         client_boost = dev_priv->rps.client_boost;
  1118.         dev_priv->rps.client_boost = false;
  1119.         spin_unlock_irq(&dev_priv->irq_lock);
  1120.  
  1121.         /* Make sure we didn't queue anything we're not going to process. */
  1122.         WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
  1123.  
  1124.         if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
  1125.                 goto out;
  1126.  
  1127.         mutex_lock(&dev_priv->rps.hw_lock);
  1128.  
  1129.         pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
  1130.  
  1131.         adj = dev_priv->rps.last_adj;
  1132.         new_delay = dev_priv->rps.cur_freq;
  1133.         min = dev_priv->rps.min_freq_softlimit;
  1134.         max = dev_priv->rps.max_freq_softlimit;
  1135.  
  1136.         if (client_boost) {
  1137.                 new_delay = dev_priv->rps.max_freq_softlimit;
  1138.                 adj = 0;
  1139.         } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
  1140.                 if (adj > 0)
  1141.                         adj *= 2;
  1142.                 else /* CHV needs even encode values */
  1143.                         adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
  1144.                 /*
  1145.                  * For better performance, jump directly
  1146.                  * to RPe if we're below it.
  1147.                  */
  1148.                 if (new_delay < dev_priv->rps.efficient_freq - adj) {
  1149.                         new_delay = dev_priv->rps.efficient_freq;
  1150.                         adj = 0;
  1151.                 }
  1152.         } else if (any_waiters(dev_priv)) {
  1153.                 adj = 0;
  1154.         } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
  1155.                 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
  1156.                         new_delay = dev_priv->rps.efficient_freq;
  1157.                 else
  1158.                         new_delay = dev_priv->rps.min_freq_softlimit;
  1159.                 adj = 0;
  1160.         } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
  1161.                 if (adj < 0)
  1162.                         adj *= 2;
  1163.                 else /* CHV needs even encode values */
  1164.                         adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
  1165.         } else { /* unknown event */
  1166.                 adj = 0;
  1167.         }
  1168.  
  1169.         dev_priv->rps.last_adj = adj;
  1170.  
  1171.         /* sysfs frequency interfaces may have snuck in while servicing the
  1172.          * interrupt
  1173.          */
  1174.         new_delay += adj;
  1175.         new_delay = clamp_t(int, new_delay, min, max);
  1176.  
  1177.         intel_set_rps(dev_priv->dev, new_delay);
  1178.  
  1179.         mutex_unlock(&dev_priv->rps.hw_lock);
  1180. out:
  1181.         ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
  1182. }
  1183.  
  1184.  
  1185. /**
  1186.  * ivybridge_parity_work - Workqueue called when a parity error interrupt
  1187.  * occurred.
  1188.  * @work: workqueue struct
  1189.  *
  1190.  * Doesn't actually do anything except notify userspace. As a consequence of
  1191.  * this event, userspace should try to remap the bad rows since statistically
  1192.  * it is likely the same row is more likely to go bad again.
  1193.  */
  1194. static void ivybridge_parity_work(struct work_struct *work)
  1195. {
  1196.         struct drm_i915_private *dev_priv =
  1197.                 container_of(work, struct drm_i915_private, l3_parity.error_work);
  1198.         u32 error_status, row, bank, subbank;
  1199.         char *parity_event[6];
  1200.         uint32_t misccpctl;
  1201.         uint8_t slice = 0;
  1202.  
  1203.         /* We must turn off DOP level clock gating to access the L3 registers.
  1204.          * In order to prevent a get/put style interface, acquire struct mutex
  1205.          * any time we access those registers.
  1206.          */
  1207.         mutex_lock(&dev_priv->dev->struct_mutex);
  1208.  
  1209.         /* If we've screwed up tracking, just let the interrupt fire again */
  1210.         if (WARN_ON(!dev_priv->l3_parity.which_slice))
  1211.                 goto out;
  1212.  
  1213.         misccpctl = I915_READ(GEN7_MISCCPCTL);
  1214.         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
  1215.         POSTING_READ(GEN7_MISCCPCTL);
  1216.  
  1217.         while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
  1218.                 i915_reg_t reg;
  1219.  
  1220.                 slice--;
  1221.                 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
  1222.                         break;
  1223.  
  1224.                 dev_priv->l3_parity.which_slice &= ~(1<<slice);
  1225.  
  1226.                 reg = GEN7_L3CDERRST1(slice);
  1227.  
  1228.                 error_status = I915_READ(reg);
  1229.                 row = GEN7_PARITY_ERROR_ROW(error_status);
  1230.                 bank = GEN7_PARITY_ERROR_BANK(error_status);
  1231.                 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
  1232.  
  1233.                 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
  1234.                 POSTING_READ(reg);
  1235.  
  1236.                 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
  1237.                           slice, row, bank, subbank);
  1238.  
  1239.         }
  1240.  
  1241.         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
  1242.  
  1243. out:
  1244.         WARN_ON(dev_priv->l3_parity.which_slice);
  1245.         spin_lock_irq(&dev_priv->irq_lock);
  1246.         gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
  1247.         spin_unlock_irq(&dev_priv->irq_lock);
  1248.  
  1249.         mutex_unlock(&dev_priv->dev->struct_mutex);
  1250. }
  1251.  
  1252. static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
  1253. {
  1254.         struct drm_i915_private *dev_priv = dev->dev_private;
  1255.  
  1256.         if (!HAS_L3_DPF(dev))
  1257.                 return;
  1258.  
  1259.         spin_lock(&dev_priv->irq_lock);
  1260.         gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
  1261.         spin_unlock(&dev_priv->irq_lock);
  1262.  
  1263.         iir &= GT_PARITY_ERROR(dev);
  1264.         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
  1265.                 dev_priv->l3_parity.which_slice |= 1 << 1;
  1266.  
  1267.         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
  1268.                 dev_priv->l3_parity.which_slice |= 1 << 0;
  1269.  
  1270.         queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
  1271. }
  1272.  
  1273. static void ilk_gt_irq_handler(struct drm_device *dev,
  1274.                                struct drm_i915_private *dev_priv,
  1275.                                u32 gt_iir)
  1276. {
  1277.         if (gt_iir &
  1278.             (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
  1279.                 notify_ring(&dev_priv->ring[RCS]);
  1280.         if (gt_iir & ILK_BSD_USER_INTERRUPT)
  1281.                 notify_ring(&dev_priv->ring[VCS]);
  1282. }
  1283.  
  1284. static void snb_gt_irq_handler(struct drm_device *dev,
  1285.                                struct drm_i915_private *dev_priv,
  1286.                                u32 gt_iir)
  1287. {
  1288.  
  1289.         if (gt_iir &
  1290.             (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
  1291.                 notify_ring(&dev_priv->ring[RCS]);
  1292.         if (gt_iir & GT_BSD_USER_INTERRUPT)
  1293.                 notify_ring(&dev_priv->ring[VCS]);
  1294.         if (gt_iir & GT_BLT_USER_INTERRUPT)
  1295.                 notify_ring(&dev_priv->ring[BCS]);
  1296.  
  1297.         if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
  1298.                       GT_BSD_CS_ERROR_INTERRUPT |
  1299.                       GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
  1300.                 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
  1301.  
  1302.         if (gt_iir & GT_PARITY_ERROR(dev))
  1303.                 ivybridge_parity_error_irq_handler(dev, gt_iir);
  1304. }
  1305.  
  1306. static __always_inline void
  1307. gen8_cs_irq_handler(struct intel_engine_cs *ring, u32 iir, int test_shift)
  1308. {
  1309.         if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
  1310.                 notify_ring(ring);
  1311.         if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
  1312.                 intel_lrc_irq_handler(ring);
  1313. }
  1314.  
  1315. static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
  1316.                                        u32 master_ctl)
  1317. {
  1318.         irqreturn_t ret = IRQ_NONE;
  1319.  
  1320.         if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
  1321.                 u32 iir = I915_READ_FW(GEN8_GT_IIR(0));
  1322.                 if (iir) {
  1323.                         I915_WRITE_FW(GEN8_GT_IIR(0), iir);
  1324.                         ret = IRQ_HANDLED;
  1325.  
  1326.                         gen8_cs_irq_handler(&dev_priv->ring[RCS],
  1327.                                         iir, GEN8_RCS_IRQ_SHIFT);
  1328.  
  1329.                         gen8_cs_irq_handler(&dev_priv->ring[BCS],
  1330.                                         iir, GEN8_BCS_IRQ_SHIFT);
  1331.                 } else
  1332.                         DRM_ERROR("The master control interrupt lied (GT0)!\n");
  1333.         }
  1334.  
  1335.         if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
  1336.                 u32 iir = I915_READ_FW(GEN8_GT_IIR(1));
  1337.                 if (iir) {
  1338.                         I915_WRITE_FW(GEN8_GT_IIR(1), iir);
  1339.                         ret = IRQ_HANDLED;
  1340.  
  1341.                         gen8_cs_irq_handler(&dev_priv->ring[VCS],
  1342.                                         iir, GEN8_VCS1_IRQ_SHIFT);
  1343.  
  1344.                         gen8_cs_irq_handler(&dev_priv->ring[VCS2],
  1345.                                         iir, GEN8_VCS2_IRQ_SHIFT);
  1346.                 } else
  1347.                         DRM_ERROR("The master control interrupt lied (GT1)!\n");
  1348.         }
  1349.  
  1350.         if (master_ctl & GEN8_GT_VECS_IRQ) {
  1351.                 u32 iir = I915_READ_FW(GEN8_GT_IIR(3));
  1352.                 if (iir) {
  1353.                         I915_WRITE_FW(GEN8_GT_IIR(3), iir);
  1354.                         ret = IRQ_HANDLED;
  1355.  
  1356.                         gen8_cs_irq_handler(&dev_priv->ring[VECS],
  1357.                                         iir, GEN8_VECS_IRQ_SHIFT);
  1358.                 } else
  1359.                         DRM_ERROR("The master control interrupt lied (GT3)!\n");
  1360.         }
  1361.  
  1362.         if (master_ctl & GEN8_GT_PM_IRQ) {
  1363.                 u32 iir = I915_READ_FW(GEN8_GT_IIR(2));
  1364.                 if (iir & dev_priv->pm_rps_events) {
  1365.                         I915_WRITE_FW(GEN8_GT_IIR(2),
  1366.                                       iir & dev_priv->pm_rps_events);
  1367.                         ret = IRQ_HANDLED;
  1368.                         gen6_rps_irq_handler(dev_priv, iir);
  1369.                 } else
  1370.                         DRM_ERROR("The master control interrupt lied (PM)!\n");
  1371.         }
  1372.  
  1373.         return ret;
  1374. }
  1375.  
  1376. static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
  1377. {
  1378.         switch (port) {
  1379.         case PORT_A:
  1380.                 return val & PORTA_HOTPLUG_LONG_DETECT;
  1381.         case PORT_B:
  1382.                 return val & PORTB_HOTPLUG_LONG_DETECT;
  1383.         case PORT_C:
  1384.                 return val & PORTC_HOTPLUG_LONG_DETECT;
  1385.         default:
  1386.                 return false;
  1387.         }
  1388. }
  1389.  
  1390. static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
  1391. {
  1392.         switch (port) {
  1393.         case PORT_E:
  1394.                 return val & PORTE_HOTPLUG_LONG_DETECT;
  1395.         default:
  1396.                 return false;
  1397.         }
  1398. }
  1399.  
  1400. static bool spt_port_hotplug_long_detect(enum port port, u32 val)
  1401. {
  1402.         switch (port) {
  1403.         case PORT_A:
  1404.                 return val & PORTA_HOTPLUG_LONG_DETECT;
  1405.         case PORT_B:
  1406.                 return val & PORTB_HOTPLUG_LONG_DETECT;
  1407.         case PORT_C:
  1408.                 return val & PORTC_HOTPLUG_LONG_DETECT;
  1409.         case PORT_D:
  1410.                 return val & PORTD_HOTPLUG_LONG_DETECT;
  1411.         default:
  1412.                 return false;
  1413.         }
  1414. }
  1415.  
  1416. static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
  1417. {
  1418.         switch (port) {
  1419.         case PORT_A:
  1420.                 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
  1421.         default:
  1422.                 return false;
  1423.         }
  1424. }
  1425.  
  1426. static bool pch_port_hotplug_long_detect(enum port port, u32 val)
  1427. {
  1428.         switch (port) {
  1429.         case PORT_B:
  1430.                 return val & PORTB_HOTPLUG_LONG_DETECT;
  1431.         case PORT_C:
  1432.                 return val & PORTC_HOTPLUG_LONG_DETECT;
  1433.         case PORT_D:
  1434.                 return val & PORTD_HOTPLUG_LONG_DETECT;
  1435.         default:
  1436.                 return false;
  1437.         }
  1438. }
  1439.  
  1440. static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
  1441. {
  1442.         switch (port) {
  1443.         case PORT_B:
  1444.                 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
  1445.         case PORT_C:
  1446.                 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
  1447.         case PORT_D:
  1448.                 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
  1449.         default:
  1450.                 return false;
  1451.         }
  1452. }
  1453.  
  1454. /*
  1455.  * Get a bit mask of pins that have triggered, and which ones may be long.
  1456.  * This can be called multiple times with the same masks to accumulate
  1457.  * hotplug detection results from several registers.
  1458.  *
  1459.  * Note that the caller is expected to zero out the masks initially.
  1460.  */
  1461. static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
  1462.                              u32 hotplug_trigger, u32 dig_hotplug_reg,
  1463.                              const u32 hpd[HPD_NUM_PINS],
  1464.                              bool long_pulse_detect(enum port port, u32 val))
  1465. {
  1466.         enum port port;
  1467.         int i;
  1468.  
  1469.         for_each_hpd_pin(i) {
  1470.                 if ((hpd[i] & hotplug_trigger) == 0)
  1471.                         continue;
  1472.  
  1473.                 *pin_mask |= BIT(i);
  1474.  
  1475.                 if (!intel_hpd_pin_to_port(i, &port))
  1476.                         continue;
  1477.  
  1478.                 if (long_pulse_detect(port, dig_hotplug_reg))
  1479.                         *long_mask |= BIT(i);
  1480.         }
  1481.  
  1482.         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
  1483.                          hotplug_trigger, dig_hotplug_reg, *pin_mask);
  1484.  
  1485. }
  1486.  
  1487. static void gmbus_irq_handler(struct drm_device *dev)
  1488. {
  1489.         struct drm_i915_private *dev_priv = dev->dev_private;
  1490.  
  1491.         wake_up_all(&dev_priv->gmbus_wait_queue);
  1492. }
  1493.  
  1494. static void dp_aux_irq_handler(struct drm_device *dev)
  1495. {
  1496.         struct drm_i915_private *dev_priv = dev->dev_private;
  1497.  
  1498.         wake_up_all(&dev_priv->gmbus_wait_queue);
  1499. }
  1500.  
  1501. #if defined(CONFIG_DEBUG_FS)
  1502. static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
  1503.                                          uint32_t crc0, uint32_t crc1,
  1504.                                          uint32_t crc2, uint32_t crc3,
  1505.                                          uint32_t crc4)
  1506. {
  1507.         struct drm_i915_private *dev_priv = dev->dev_private;
  1508.         struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
  1509.         struct intel_pipe_crc_entry *entry;
  1510.         int head, tail;
  1511.  
  1512.         spin_lock(&pipe_crc->lock);
  1513.  
  1514.         if (!pipe_crc->entries) {
  1515.                 spin_unlock(&pipe_crc->lock);
  1516.                 DRM_DEBUG_KMS("spurious interrupt\n");
  1517.                 return;
  1518.         }
  1519.  
  1520.         head = pipe_crc->head;
  1521.         tail = pipe_crc->tail;
  1522.  
  1523.         if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
  1524.                 spin_unlock(&pipe_crc->lock);
  1525.                 DRM_ERROR("CRC buffer overflowing\n");
  1526.                 return;
  1527.         }
  1528.  
  1529.         entry = &pipe_crc->entries[head];
  1530.  
  1531.         entry->frame = dev->driver->get_vblank_counter(dev, pipe);
  1532.         entry->crc[0] = crc0;
  1533.         entry->crc[1] = crc1;
  1534.         entry->crc[2] = crc2;
  1535.         entry->crc[3] = crc3;
  1536.         entry->crc[4] = crc4;
  1537.  
  1538.         head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
  1539.         pipe_crc->head = head;
  1540.  
  1541.         spin_unlock(&pipe_crc->lock);
  1542.  
  1543.         wake_up_interruptible(&pipe_crc->wq);
  1544. }
  1545. #else
  1546. static inline void
  1547. display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
  1548.                              uint32_t crc0, uint32_t crc1,
  1549.                              uint32_t crc2, uint32_t crc3,
  1550.                              uint32_t crc4) {}
  1551. #endif
  1552.  
  1553.  
  1554. static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
  1555. {
  1556.         struct drm_i915_private *dev_priv = dev->dev_private;
  1557.  
  1558.         display_pipe_crc_irq_handler(dev, pipe,
  1559.                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
  1560.                                      0, 0, 0, 0);
  1561. }
  1562.  
  1563. static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
  1564. {
  1565.         struct drm_i915_private *dev_priv = dev->dev_private;
  1566.  
  1567.         display_pipe_crc_irq_handler(dev, pipe,
  1568.                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
  1569.                                      I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
  1570.                                      I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
  1571.                                      I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
  1572.                                      I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
  1573. }
  1574.  
  1575. static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
  1576. {
  1577.         struct drm_i915_private *dev_priv = dev->dev_private;
  1578.         uint32_t res1, res2;
  1579.  
  1580.         if (INTEL_INFO(dev)->gen >= 3)
  1581.                 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
  1582.         else
  1583.                 res1 = 0;
  1584.  
  1585.         if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
  1586.                 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
  1587.         else
  1588.                 res2 = 0;
  1589.  
  1590.         display_pipe_crc_irq_handler(dev, pipe,
  1591.                                      I915_READ(PIPE_CRC_RES_RED(pipe)),
  1592.                                      I915_READ(PIPE_CRC_RES_GREEN(pipe)),
  1593.                                      I915_READ(PIPE_CRC_RES_BLUE(pipe)),
  1594.                                      res1, res2);
  1595. }
  1596.  
  1597. /* The RPS events need forcewake, so we add them to a work queue and mask their
  1598.  * IMR bits until the work is done. Other interrupts can be processed without
  1599.  * the work queue. */
  1600. static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
  1601. {
  1602.         if (pm_iir & dev_priv->pm_rps_events) {
  1603.                 spin_lock(&dev_priv->irq_lock);
  1604.                 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
  1605.                 if (dev_priv->rps.interrupts_enabled) {
  1606.                         dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
  1607.                         queue_work(dev_priv->wq, &dev_priv->rps.work);
  1608.                 }
  1609.                 spin_unlock(&dev_priv->irq_lock);
  1610.         }
  1611.  
  1612.         if (INTEL_INFO(dev_priv)->gen >= 8)
  1613.                 return;
  1614.  
  1615.         if (HAS_VEBOX(dev_priv->dev)) {
  1616.                 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
  1617.                         notify_ring(&dev_priv->ring[VECS]);
  1618.  
  1619.                 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
  1620.                         DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
  1621.         }
  1622. }
  1623.  
  1624. static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
  1625. {
  1626.         if (!drm_handle_vblank(dev, pipe))
  1627.                 return false;
  1628.  
  1629.         return true;
  1630. }
  1631.  
  1632. static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
  1633. {
  1634.         struct drm_i915_private *dev_priv = dev->dev_private;
  1635.         u32 pipe_stats[I915_MAX_PIPES] = { };
  1636.         int pipe;
  1637.  
  1638.         spin_lock(&dev_priv->irq_lock);
  1639.  
  1640.         if (!dev_priv->display_irqs_enabled) {
  1641.                 spin_unlock(&dev_priv->irq_lock);
  1642.                 return;
  1643.         }
  1644.  
  1645.         for_each_pipe(dev_priv, pipe) {
  1646.                 i915_reg_t reg;
  1647.                 u32 mask, iir_bit = 0;
  1648.  
  1649.                 /*
  1650.                  * PIPESTAT bits get signalled even when the interrupt is
  1651.                  * disabled with the mask bits, and some of the status bits do
  1652.                  * not generate interrupts at all (like the underrun bit). Hence
  1653.                  * we need to be careful that we only handle what we want to
  1654.                  * handle.
  1655.                  */
  1656.  
  1657.                 /* fifo underruns are filterered in the underrun handler. */
  1658.                 mask = PIPE_FIFO_UNDERRUN_STATUS;
  1659.  
  1660.                 switch (pipe) {
  1661.                 case PIPE_A:
  1662.                         iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
  1663.                         break;
  1664.                 case PIPE_B:
  1665.                         iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
  1666.                         break;
  1667.                 case PIPE_C:
  1668.                         iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
  1669.                         break;
  1670.                 }
  1671.                 if (iir & iir_bit)
  1672.                         mask |= dev_priv->pipestat_irq_mask[pipe];
  1673.  
  1674.                 if (!mask)
  1675.                         continue;
  1676.  
  1677.                 reg = PIPESTAT(pipe);
  1678.                 mask |= PIPESTAT_INT_ENABLE_MASK;
  1679.                 pipe_stats[pipe] = I915_READ(reg) & mask;
  1680.  
  1681.                 /*
  1682.                  * Clear the PIPE*STAT regs before the IIR
  1683.                  */
  1684.                 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
  1685.                                         PIPESTAT_INT_STATUS_MASK))
  1686.                         I915_WRITE(reg, pipe_stats[pipe]);
  1687.         }
  1688.         spin_unlock(&dev_priv->irq_lock);
  1689.  
  1690.         for_each_pipe(dev_priv, pipe) {
  1691.                 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
  1692.                     intel_pipe_handle_vblank(dev, pipe))
  1693.                         intel_check_page_flip(dev, pipe);
  1694.  
  1695.                 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
  1696.                         intel_prepare_page_flip(dev, pipe);
  1697.                         intel_finish_page_flip(dev, pipe);
  1698.                 }
  1699.  
  1700.                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
  1701.                         i9xx_pipe_crc_irq_handler(dev, pipe);
  1702.  
  1703.                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  1704.                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
  1705.         }
  1706.  
  1707.         if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
  1708.                 gmbus_irq_handler(dev);
  1709. }
  1710.  
  1711. static void i9xx_hpd_irq_handler(struct drm_device *dev)
  1712. {
  1713.         struct drm_i915_private *dev_priv = dev->dev_private;
  1714.         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
  1715.         u32 pin_mask = 0, long_mask = 0;
  1716.  
  1717.         if (!hotplug_status)
  1718.                 return;
  1719.  
  1720.         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  1721.         /*
  1722.          * Make sure hotplug status is cleared before we clear IIR, or else we
  1723.          * may miss hotplug events.
  1724.          */
  1725.         POSTING_READ(PORT_HOTPLUG_STAT);
  1726.  
  1727.         if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
  1728.                 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
  1729.  
  1730.                 if (hotplug_trigger) {
  1731.                         intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
  1732.                                            hotplug_trigger, hpd_status_g4x,
  1733.                                            i9xx_port_hotplug_long_detect);
  1734.  
  1735.                         intel_hpd_irq_handler(dev, pin_mask, long_mask);
  1736.                 }
  1737.  
  1738.                 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
  1739.                         dp_aux_irq_handler(dev);
  1740.         } else {
  1741.                 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
  1742.  
  1743.                 if (hotplug_trigger) {
  1744.                         intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
  1745.                                            hotplug_trigger, hpd_status_i915,
  1746.                                            i9xx_port_hotplug_long_detect);
  1747.                         intel_hpd_irq_handler(dev, pin_mask, long_mask);
  1748.                 }
  1749.         }
  1750. }
  1751.  
  1752. static irqreturn_t valleyview_irq_handler(int irq, void *arg)
  1753. {
  1754.         struct drm_device *dev = arg;
  1755.         struct drm_i915_private *dev_priv = dev->dev_private;
  1756.         u32 iir, gt_iir, pm_iir;
  1757.         irqreturn_t ret = IRQ_NONE;
  1758.  
  1759.         if (!intel_irqs_enabled(dev_priv))
  1760.                 return IRQ_NONE;
  1761.  
  1762.         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
  1763.         disable_rpm_wakeref_asserts(dev_priv);
  1764.  
  1765.         while (true) {
  1766.                 /* Find, clear, then process each source of interrupt */
  1767.  
  1768.                 gt_iir = I915_READ(GTIIR);
  1769.                 if (gt_iir)
  1770.                         I915_WRITE(GTIIR, gt_iir);
  1771.  
  1772.                 pm_iir = I915_READ(GEN6_PMIIR);
  1773.                 if (pm_iir)
  1774.                         I915_WRITE(GEN6_PMIIR, pm_iir);
  1775.  
  1776.                 iir = I915_READ(VLV_IIR);
  1777.                 if (iir) {
  1778.                         /* Consume port before clearing IIR or we'll miss events */
  1779.                         if (iir & I915_DISPLAY_PORT_INTERRUPT)
  1780.                                 i9xx_hpd_irq_handler(dev);
  1781.                         I915_WRITE(VLV_IIR, iir);
  1782.                 }
  1783.  
  1784.                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
  1785.                         goto out;
  1786.  
  1787.                 ret = IRQ_HANDLED;
  1788.  
  1789.                 if (gt_iir)
  1790.                         snb_gt_irq_handler(dev, dev_priv, gt_iir);
  1791.                 if (pm_iir)
  1792.                         gen6_rps_irq_handler(dev_priv, pm_iir);
  1793.                 /* Call regardless, as some status bits might not be
  1794.                  * signalled in iir */
  1795.                 valleyview_pipestat_irq_handler(dev, iir);
  1796.         }
  1797.  
  1798. out:
  1799.         enable_rpm_wakeref_asserts(dev_priv);
  1800.  
  1801.         return ret;
  1802. }
  1803.  
  1804. static irqreturn_t cherryview_irq_handler(int irq, void *arg)
  1805. {
  1806.         struct drm_device *dev = arg;
  1807.         struct drm_i915_private *dev_priv = dev->dev_private;
  1808.         u32 master_ctl, iir;
  1809.         irqreturn_t ret = IRQ_NONE;
  1810.  
  1811.         if (!intel_irqs_enabled(dev_priv))
  1812.                 return IRQ_NONE;
  1813.  
  1814.         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
  1815.         disable_rpm_wakeref_asserts(dev_priv);
  1816.  
  1817.         for (;;) {
  1818.                 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
  1819.                 iir = I915_READ(VLV_IIR);
  1820.  
  1821.                 if (master_ctl == 0 && iir == 0)
  1822.                         break;
  1823.  
  1824.                 ret = IRQ_HANDLED;
  1825.  
  1826.                 I915_WRITE(GEN8_MASTER_IRQ, 0);
  1827.  
  1828.                 /* Find, clear, then process each source of interrupt */
  1829.  
  1830.                 if (iir) {
  1831.                         /* Consume port before clearing IIR or we'll miss events */
  1832.                         if (iir & I915_DISPLAY_PORT_INTERRUPT)
  1833.                                 i9xx_hpd_irq_handler(dev);
  1834.                         I915_WRITE(VLV_IIR, iir);
  1835.                 }
  1836.  
  1837.                 gen8_gt_irq_handler(dev_priv, master_ctl);
  1838.  
  1839.                 /* Call regardless, as some status bits might not be
  1840.                  * signalled in iir */
  1841.                 valleyview_pipestat_irq_handler(dev, iir);
  1842.  
  1843.                 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
  1844.                 POSTING_READ(GEN8_MASTER_IRQ);
  1845.         }
  1846.  
  1847.         enable_rpm_wakeref_asserts(dev_priv);
  1848.  
  1849.         return ret;
  1850. }
  1851.  
  1852. static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
  1853.                                 const u32 hpd[HPD_NUM_PINS])
  1854. {
  1855.         struct drm_i915_private *dev_priv = to_i915(dev);
  1856.         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
  1857.  
  1858.         /*
  1859.          * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
  1860.          * unless we touch the hotplug register, even if hotplug_trigger is
  1861.          * zero. Not acking leads to "The master control interrupt lied (SDE)!"
  1862.          * errors.
  1863.          */
  1864.         dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
  1865.         if (!hotplug_trigger) {
  1866.                 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
  1867.                         PORTD_HOTPLUG_STATUS_MASK |
  1868.                         PORTC_HOTPLUG_STATUS_MASK |
  1869.                         PORTB_HOTPLUG_STATUS_MASK;
  1870.                 dig_hotplug_reg &= ~mask;
  1871.         }
  1872.  
  1873.         I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
  1874.         if (!hotplug_trigger)
  1875.                 return;
  1876.  
  1877.         intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
  1878.                            dig_hotplug_reg, hpd,
  1879.                            pch_port_hotplug_long_detect);
  1880.  
  1881.         intel_hpd_irq_handler(dev, pin_mask, long_mask);
  1882. }
  1883.  
  1884. static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
  1885. {
  1886.         struct drm_i915_private *dev_priv = dev->dev_private;
  1887.         int pipe;
  1888.         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
  1889.  
  1890.         ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
  1891.  
  1892.         if (pch_iir & SDE_AUDIO_POWER_MASK) {
  1893.                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
  1894.                                SDE_AUDIO_POWER_SHIFT);
  1895.                 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
  1896.                                  port_name(port));
  1897.         }
  1898.  
  1899.         if (pch_iir & SDE_AUX_MASK)
  1900.                 dp_aux_irq_handler(dev);
  1901.  
  1902.         if (pch_iir & SDE_GMBUS)
  1903.                 gmbus_irq_handler(dev);
  1904.  
  1905.         if (pch_iir & SDE_AUDIO_HDCP_MASK)
  1906.                 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
  1907.  
  1908.         if (pch_iir & SDE_AUDIO_TRANS_MASK)
  1909.                 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
  1910.  
  1911.         if (pch_iir & SDE_POISON)
  1912.                 DRM_ERROR("PCH poison interrupt\n");
  1913.  
  1914.         if (pch_iir & SDE_FDI_MASK)
  1915.                 for_each_pipe(dev_priv, pipe)
  1916.                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
  1917.                                          pipe_name(pipe),
  1918.                                          I915_READ(FDI_RX_IIR(pipe)));
  1919.  
  1920.         if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
  1921.                 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
  1922.  
  1923.         if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
  1924.                 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
  1925.  
  1926.         if (pch_iir & SDE_TRANSA_FIFO_UNDER)
  1927.                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
  1928.  
  1929.         if (pch_iir & SDE_TRANSB_FIFO_UNDER)
  1930.                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
  1931. }
  1932.  
  1933. static void ivb_err_int_handler(struct drm_device *dev)
  1934. {
  1935.         struct drm_i915_private *dev_priv = dev->dev_private;
  1936.         u32 err_int = I915_READ(GEN7_ERR_INT);
  1937.         enum pipe pipe;
  1938.  
  1939.         if (err_int & ERR_INT_POISON)
  1940.                 DRM_ERROR("Poison interrupt\n");
  1941.  
  1942.         for_each_pipe(dev_priv, pipe) {
  1943.                 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
  1944.                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
  1945.  
  1946.                 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
  1947.                         if (IS_IVYBRIDGE(dev))
  1948.                                 ivb_pipe_crc_irq_handler(dev, pipe);
  1949.                         else
  1950.                                 hsw_pipe_crc_irq_handler(dev, pipe);
  1951.                 }
  1952.         }
  1953.  
  1954.         I915_WRITE(GEN7_ERR_INT, err_int);
  1955. }
  1956.  
  1957. static void cpt_serr_int_handler(struct drm_device *dev)
  1958. {
  1959.         struct drm_i915_private *dev_priv = dev->dev_private;
  1960.         u32 serr_int = I915_READ(SERR_INT);
  1961.  
  1962.         if (serr_int & SERR_INT_POISON)
  1963.                 DRM_ERROR("PCH poison interrupt\n");
  1964.  
  1965.         if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
  1966.                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
  1967.  
  1968.         if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
  1969.                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
  1970.  
  1971.         if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
  1972.                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
  1973.  
  1974.         I915_WRITE(SERR_INT, serr_int);
  1975. }
  1976.  
  1977. static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
  1978. {
  1979.         struct drm_i915_private *dev_priv = dev->dev_private;
  1980.         int pipe;
  1981.         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
  1982.  
  1983.         ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
  1984.  
  1985.         if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
  1986.                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
  1987.                                SDE_AUDIO_POWER_SHIFT_CPT);
  1988.                 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
  1989.                                  port_name(port));
  1990.         }
  1991.  
  1992.         if (pch_iir & SDE_AUX_MASK_CPT)
  1993.                 dp_aux_irq_handler(dev);
  1994.  
  1995.         if (pch_iir & SDE_GMBUS_CPT)
  1996.                 gmbus_irq_handler(dev);
  1997.  
  1998.         if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
  1999.                 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
  2000.  
  2001.         if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
  2002.                 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
  2003.  
  2004.         if (pch_iir & SDE_FDI_MASK_CPT)
  2005.                 for_each_pipe(dev_priv, pipe)
  2006.                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
  2007.                                          pipe_name(pipe),
  2008.                                          I915_READ(FDI_RX_IIR(pipe)));
  2009.  
  2010.         if (pch_iir & SDE_ERROR_CPT)
  2011.                 cpt_serr_int_handler(dev);
  2012. }
  2013.  
  2014. static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
  2015. {
  2016.         struct drm_i915_private *dev_priv = dev->dev_private;
  2017.         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
  2018.                 ~SDE_PORTE_HOTPLUG_SPT;
  2019.         u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
  2020.         u32 pin_mask = 0, long_mask = 0;
  2021.  
  2022.         if (hotplug_trigger) {
  2023.                 u32 dig_hotplug_reg;
  2024.  
  2025.                 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
  2026.                 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
  2027.  
  2028.                 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
  2029.                                    dig_hotplug_reg, hpd_spt,
  2030.                                    spt_port_hotplug_long_detect);
  2031.         }
  2032.  
  2033.         if (hotplug2_trigger) {
  2034.                 u32 dig_hotplug_reg;
  2035.  
  2036.                 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
  2037.                 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
  2038.  
  2039.                 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
  2040.                                    dig_hotplug_reg, hpd_spt,
  2041.                                    spt_port_hotplug2_long_detect);
  2042.         }
  2043.  
  2044.         if (pin_mask)
  2045.                 intel_hpd_irq_handler(dev, pin_mask, long_mask);
  2046.  
  2047.         if (pch_iir & SDE_GMBUS_CPT)
  2048.                 gmbus_irq_handler(dev);
  2049. }
  2050.  
  2051. static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
  2052.                                 const u32 hpd[HPD_NUM_PINS])
  2053. {
  2054.         struct drm_i915_private *dev_priv = to_i915(dev);
  2055.         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
  2056.  
  2057.         dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
  2058.         I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
  2059.  
  2060.         intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
  2061.                            dig_hotplug_reg, hpd,
  2062.                            ilk_port_hotplug_long_detect);
  2063.  
  2064.         intel_hpd_irq_handler(dev, pin_mask, long_mask);
  2065. }
  2066.  
  2067. static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
  2068. {
  2069.         struct drm_i915_private *dev_priv = dev->dev_private;
  2070.         enum pipe pipe;
  2071.         u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
  2072.  
  2073.         if (hotplug_trigger)
  2074.                 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
  2075.  
  2076.         if (de_iir & DE_AUX_CHANNEL_A)
  2077.                 dp_aux_irq_handler(dev);
  2078.  
  2079.         if (de_iir & DE_GSE)
  2080.                 intel_opregion_asle_intr(dev);
  2081.  
  2082.         if (de_iir & DE_POISON)
  2083.                 DRM_ERROR("Poison interrupt\n");
  2084.  
  2085.         for_each_pipe(dev_priv, pipe) {
  2086.                 if (de_iir & DE_PIPE_VBLANK(pipe) &&
  2087.                     intel_pipe_handle_vblank(dev, pipe))
  2088.                         intel_check_page_flip(dev, pipe);
  2089.  
  2090.                 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
  2091.                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
  2092.  
  2093.                 if (de_iir & DE_PIPE_CRC_DONE(pipe))
  2094.                         i9xx_pipe_crc_irq_handler(dev, pipe);
  2095.  
  2096.                 /* plane/pipes map 1:1 on ilk+ */
  2097.                 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
  2098.                         intel_prepare_page_flip(dev, pipe);
  2099.                         intel_finish_page_flip_plane(dev, pipe);
  2100.                 }
  2101.         }
  2102.  
  2103.         /* check event from PCH */
  2104.         if (de_iir & DE_PCH_EVENT) {
  2105.                 u32 pch_iir = I915_READ(SDEIIR);
  2106.  
  2107.                 if (HAS_PCH_CPT(dev))
  2108.                         cpt_irq_handler(dev, pch_iir);
  2109.                 else
  2110.                         ibx_irq_handler(dev, pch_iir);
  2111.  
  2112.                 /* should clear PCH hotplug event before clear CPU irq */
  2113.                 I915_WRITE(SDEIIR, pch_iir);
  2114.         }
  2115.  
  2116.         if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
  2117.                 ironlake_rps_change_irq_handler(dev);
  2118. }
  2119.  
  2120. static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
  2121. {
  2122.         struct drm_i915_private *dev_priv = dev->dev_private;
  2123.         enum pipe pipe;
  2124.         u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
  2125.  
  2126.         if (hotplug_trigger)
  2127.                 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb);
  2128.  
  2129.         if (de_iir & DE_ERR_INT_IVB)
  2130.                 ivb_err_int_handler(dev);
  2131.  
  2132.         if (de_iir & DE_AUX_CHANNEL_A_IVB)
  2133.                 dp_aux_irq_handler(dev);
  2134.  
  2135.         if (de_iir & DE_GSE_IVB)
  2136.                 intel_opregion_asle_intr(dev);
  2137.  
  2138.         for_each_pipe(dev_priv, pipe) {
  2139.                 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
  2140.                     intel_pipe_handle_vblank(dev, pipe))
  2141.                         intel_check_page_flip(dev, pipe);
  2142.  
  2143.                 /* plane/pipes map 1:1 on ilk+ */
  2144.                 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
  2145.                         intel_prepare_page_flip(dev, pipe);
  2146.                         intel_finish_page_flip_plane(dev, pipe);
  2147.                 }
  2148.         }
  2149.  
  2150.         /* check event from PCH */
  2151.         if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
  2152.                 u32 pch_iir = I915_READ(SDEIIR);
  2153.  
  2154.                 cpt_irq_handler(dev, pch_iir);
  2155.  
  2156.                 /* clear PCH hotplug event before clear CPU irq */
  2157.                 I915_WRITE(SDEIIR, pch_iir);
  2158.         }
  2159. }
  2160.  
  2161. /*
  2162.  * To handle irqs with the minimum potential races with fresh interrupts, we:
  2163.  * 1 - Disable Master Interrupt Control.
  2164.  * 2 - Find the source(s) of the interrupt.
  2165.  * 3 - Clear the Interrupt Identity bits (IIR).
  2166.  * 4 - Process the interrupt(s) that had bits set in the IIRs.
  2167.  * 5 - Re-enable Master Interrupt Control.
  2168.  */
  2169. static irqreturn_t ironlake_irq_handler(int irq, void *arg)
  2170. {
  2171.         struct drm_device *dev = arg;
  2172.         struct drm_i915_private *dev_priv = dev->dev_private;
  2173.         u32 de_iir, gt_iir, de_ier, sde_ier = 0;
  2174.         irqreturn_t ret = IRQ_NONE;
  2175.  
  2176.         if (!intel_irqs_enabled(dev_priv))
  2177.                 return IRQ_NONE;
  2178.  
  2179.         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
  2180.         disable_rpm_wakeref_asserts(dev_priv);
  2181.  
  2182.         /* disable master interrupt before clearing iir  */
  2183.         de_ier = I915_READ(DEIER);
  2184.         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
  2185.         POSTING_READ(DEIER);
  2186.  
  2187.         /* Disable south interrupts. We'll only write to SDEIIR once, so further
  2188.          * interrupts will will be stored on its back queue, and then we'll be
  2189.          * able to process them after we restore SDEIER (as soon as we restore
  2190.          * it, we'll get an interrupt if SDEIIR still has something to process
  2191.          * due to its back queue). */
  2192.         if (!HAS_PCH_NOP(dev)) {
  2193.                 sde_ier = I915_READ(SDEIER);
  2194.                 I915_WRITE(SDEIER, 0);
  2195.                 POSTING_READ(SDEIER);
  2196.         }
  2197.  
  2198.         /* Find, clear, then process each source of interrupt */
  2199.  
  2200.         gt_iir = I915_READ(GTIIR);
  2201.         if (gt_iir) {
  2202.                 I915_WRITE(GTIIR, gt_iir);
  2203.                 ret = IRQ_HANDLED;
  2204.                 if (INTEL_INFO(dev)->gen >= 6)
  2205.                         snb_gt_irq_handler(dev, dev_priv, gt_iir);
  2206.                 else
  2207.                         ilk_gt_irq_handler(dev, dev_priv, gt_iir);
  2208.         }
  2209.  
  2210.         de_iir = I915_READ(DEIIR);
  2211.         if (de_iir) {
  2212.                 I915_WRITE(DEIIR, de_iir);
  2213.                 ret = IRQ_HANDLED;
  2214.                 if (INTEL_INFO(dev)->gen >= 7)
  2215.                         ivb_display_irq_handler(dev, de_iir);
  2216.                 else
  2217.                         ilk_display_irq_handler(dev, de_iir);
  2218.         }
  2219.  
  2220.         if (INTEL_INFO(dev)->gen >= 6) {
  2221.                 u32 pm_iir = I915_READ(GEN6_PMIIR);
  2222.                 if (pm_iir) {
  2223.                         I915_WRITE(GEN6_PMIIR, pm_iir);
  2224.                         ret = IRQ_HANDLED;
  2225.                         gen6_rps_irq_handler(dev_priv, pm_iir);
  2226.                 }
  2227.         }
  2228.  
  2229.         I915_WRITE(DEIER, de_ier);
  2230.         POSTING_READ(DEIER);
  2231.         if (!HAS_PCH_NOP(dev)) {
  2232.                 I915_WRITE(SDEIER, sde_ier);
  2233.                 POSTING_READ(SDEIER);
  2234.         }
  2235.  
  2236.         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
  2237.         enable_rpm_wakeref_asserts(dev_priv);
  2238.  
  2239.         return ret;
  2240. }
  2241.  
  2242. static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
  2243.                                 const u32 hpd[HPD_NUM_PINS])
  2244. {
  2245.         struct drm_i915_private *dev_priv = to_i915(dev);
  2246.         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
  2247.  
  2248.         dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
  2249.         I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
  2250.  
  2251.         intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
  2252.                            dig_hotplug_reg, hpd,
  2253.                            bxt_port_hotplug_long_detect);
  2254.  
  2255.         intel_hpd_irq_handler(dev, pin_mask, long_mask);
  2256. }
  2257.  
  2258. static irqreturn_t
  2259. gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
  2260. {
  2261.         struct drm_device *dev = dev_priv->dev;
  2262.         irqreturn_t ret = IRQ_NONE;
  2263.         u32 iir;
  2264.         enum pipe pipe;
  2265.  
  2266.         if (master_ctl & GEN8_DE_MISC_IRQ) {
  2267.                 iir = I915_READ(GEN8_DE_MISC_IIR);
  2268.                 if (iir) {
  2269.                         I915_WRITE(GEN8_DE_MISC_IIR, iir);
  2270.                         ret = IRQ_HANDLED;
  2271.                         if (iir & GEN8_DE_MISC_GSE)
  2272.                                 intel_opregion_asle_intr(dev);
  2273.                         else
  2274.                                 DRM_ERROR("Unexpected DE Misc interrupt\n");
  2275.                 }
  2276.                 else
  2277.                         DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
  2278.         }
  2279.  
  2280.         if (master_ctl & GEN8_DE_PORT_IRQ) {
  2281.                 iir = I915_READ(GEN8_DE_PORT_IIR);
  2282.                 if (iir) {
  2283.                         u32 tmp_mask;
  2284.                         bool found = false;
  2285.  
  2286.                         I915_WRITE(GEN8_DE_PORT_IIR, iir);
  2287.                         ret = IRQ_HANDLED;
  2288.  
  2289.                         tmp_mask = GEN8_AUX_CHANNEL_A;
  2290.                         if (INTEL_INFO(dev_priv)->gen >= 9)
  2291.                                 tmp_mask |= GEN9_AUX_CHANNEL_B |
  2292.                                             GEN9_AUX_CHANNEL_C |
  2293.                                             GEN9_AUX_CHANNEL_D;
  2294.  
  2295.                         if (iir & tmp_mask) {
  2296.                                 dp_aux_irq_handler(dev);
  2297.                                 found = true;
  2298.                         }
  2299.  
  2300.                         if (IS_BROXTON(dev_priv)) {
  2301.                                 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
  2302.                                 if (tmp_mask) {
  2303.                                         bxt_hpd_irq_handler(dev, tmp_mask, hpd_bxt);
  2304.                                         found = true;
  2305.                                 }
  2306.                         } else if (IS_BROADWELL(dev_priv)) {
  2307.                                 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
  2308.                                 if (tmp_mask) {
  2309.                                         ilk_hpd_irq_handler(dev, tmp_mask, hpd_bdw);
  2310.                                         found = true;
  2311.                                 }
  2312.                         }
  2313.  
  2314.                         if (IS_BROXTON(dev) && (iir & BXT_DE_PORT_GMBUS)) {
  2315.                                 gmbus_irq_handler(dev);
  2316.                                 found = true;
  2317.                         }
  2318.  
  2319.                         if (!found)
  2320.                                 DRM_ERROR("Unexpected DE Port interrupt\n");
  2321.                 }
  2322.                 else
  2323.                         DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
  2324.         }
  2325.  
  2326.         for_each_pipe(dev_priv, pipe) {
  2327.                 u32 flip_done, fault_errors;
  2328.  
  2329.                 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
  2330.                         continue;
  2331.  
  2332.                 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
  2333.                 if (!iir) {
  2334.                         DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
  2335.                         continue;
  2336.                 }
  2337.  
  2338.                 ret = IRQ_HANDLED;
  2339.                 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
  2340.  
  2341.                 if (iir & GEN8_PIPE_VBLANK &&
  2342.                     intel_pipe_handle_vblank(dev, pipe))
  2343.                         intel_check_page_flip(dev, pipe);
  2344.  
  2345.                 flip_done = iir;
  2346.                 if (INTEL_INFO(dev_priv)->gen >= 9)
  2347.                         flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE;
  2348.                 else
  2349.                         flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
  2350.  
  2351.                 if (flip_done) {
  2352.                         intel_prepare_page_flip(dev, pipe);
  2353.                         intel_finish_page_flip_plane(dev, pipe);
  2354.                 }
  2355.  
  2356.                 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
  2357.                         hsw_pipe_crc_irq_handler(dev, pipe);
  2358.  
  2359.                 if (iir & GEN8_PIPE_FIFO_UNDERRUN)
  2360.                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
  2361.  
  2362.                 fault_errors = iir;
  2363.                 if (INTEL_INFO(dev_priv)->gen >= 9)
  2364.                         fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
  2365.                 else
  2366.                         fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
  2367.  
  2368.                 if (fault_errors)
  2369.                         DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
  2370.                                   pipe_name(pipe),
  2371.                                   fault_errors);
  2372.         }
  2373.  
  2374.         if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
  2375.             master_ctl & GEN8_DE_PCH_IRQ) {
  2376.                 /*
  2377.                  * FIXME(BDW): Assume for now that the new interrupt handling
  2378.                  * scheme also closed the SDE interrupt handling race we've seen
  2379.                  * on older pch-split platforms. But this needs testing.
  2380.                  */
  2381.                 iir = I915_READ(SDEIIR);
  2382.                 if (iir) {
  2383.                         I915_WRITE(SDEIIR, iir);
  2384.                         ret = IRQ_HANDLED;
  2385.  
  2386.                         if (HAS_PCH_SPT(dev_priv))
  2387.                                 spt_irq_handler(dev, iir);
  2388.                         else
  2389.                                 cpt_irq_handler(dev, iir);
  2390.                 } else {
  2391.                         /*
  2392.                          * Like on previous PCH there seems to be something
  2393.                          * fishy going on with forwarding PCH interrupts.
  2394.                          */
  2395.                         DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
  2396.                 }
  2397.         }
  2398.  
  2399.         return ret;
  2400. }
  2401.  
  2402. static irqreturn_t gen8_irq_handler(int irq, void *arg)
  2403. {
  2404.         struct drm_device *dev = arg;
  2405.         struct drm_i915_private *dev_priv = dev->dev_private;
  2406.         u32 master_ctl;
  2407.         irqreturn_t ret;
  2408.  
  2409.         if (!intel_irqs_enabled(dev_priv))
  2410.                 return IRQ_NONE;
  2411.  
  2412.         master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
  2413.         master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
  2414.         if (!master_ctl)
  2415.                 return IRQ_NONE;
  2416.  
  2417.         I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
  2418.  
  2419.         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
  2420.         disable_rpm_wakeref_asserts(dev_priv);
  2421.  
  2422.         /* Find, clear, then process each source of interrupt */
  2423.         ret = gen8_gt_irq_handler(dev_priv, master_ctl);
  2424.         ret |= gen8_de_irq_handler(dev_priv, master_ctl);
  2425.  
  2426.         I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
  2427.         POSTING_READ_FW(GEN8_MASTER_IRQ);
  2428.  
  2429.         enable_rpm_wakeref_asserts(dev_priv);
  2430.  
  2431.         return ret;
  2432. }
  2433.  
  2434. static void i915_error_wake_up(struct drm_i915_private *dev_priv,
  2435.                                bool reset_completed)
  2436. {
  2437.         struct intel_engine_cs *ring;
  2438.         int i;
  2439.  
  2440.         /*
  2441.          * Notify all waiters for GPU completion events that reset state has
  2442.          * been changed, and that they need to restart their wait after
  2443.          * checking for potential errors (and bail out to drop locks if there is
  2444.          * a gpu reset pending so that i915_error_work_func can acquire them).
  2445.          */
  2446.  
  2447.         /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
  2448.         for_each_ring(ring, dev_priv, i)
  2449.                 wake_up_all(&ring->irq_queue);
  2450.  
  2451.         /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
  2452.         wake_up_all(&dev_priv->pending_flip_queue);
  2453.  
  2454.         /*
  2455.          * Signal tasks blocked in i915_gem_wait_for_error that the pending
  2456.          * reset state is cleared.
  2457.          */
  2458.         if (reset_completed)
  2459.                 wake_up_all(&dev_priv->gpu_error.reset_queue);
  2460. }
  2461.  
  2462. /**
  2463.  * i915_reset_and_wakeup - do process context error handling work
  2464.  * @dev: drm device
  2465.  *
  2466.  * Fire an error uevent so userspace can see that a hang or error
  2467.  * was detected.
  2468.  */
  2469. static void i915_reset_and_wakeup(struct drm_device *dev)
  2470. {
  2471.         struct drm_i915_private *dev_priv = to_i915(dev);
  2472.         struct i915_gpu_error *error = &dev_priv->gpu_error;
  2473.         char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
  2474.         char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
  2475.         char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
  2476.         int ret;
  2477.  
  2478.         /*
  2479.          * Note that there's only one work item which does gpu resets, so we
  2480.          * need not worry about concurrent gpu resets potentially incrementing
  2481.          * error->reset_counter twice. We only need to take care of another
  2482.          * racing irq/hangcheck declaring the gpu dead for a second time. A
  2483.          * quick check for that is good enough: schedule_work ensures the
  2484.          * correct ordering between hang detection and this work item, and since
  2485.          * the reset in-progress bit is only ever set by code outside of this
  2486.          * work we don't need to worry about any other races.
  2487.          */
  2488.         if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
  2489.                 DRM_DEBUG_DRIVER("resetting chip\n");
  2490.                 /*
  2491.                  * In most cases it's guaranteed that we get here with an RPM
  2492.                  * reference held, for example because there is a pending GPU
  2493.                  * request that won't finish until the reset is done. This
  2494.                  * isn't the case at least when we get here by doing a
  2495.                  * simulated reset via debugs, so get an RPM reference.
  2496.                  */
  2497.                 intel_runtime_pm_get(dev_priv);
  2498.  
  2499.                 intel_prepare_reset(dev);
  2500.  
  2501.                 /*
  2502.                  * All state reset _must_ be completed before we update the
  2503.                  * reset counter, for otherwise waiters might miss the reset
  2504.                  * pending state and not properly drop locks, resulting in
  2505.                  * deadlocks with the reset work.
  2506.                  */
  2507.                 ret = i915_reset(dev);
  2508.  
  2509.                 intel_finish_reset(dev);
  2510.  
  2511.                 intel_runtime_pm_put(dev_priv);
  2512.  
  2513.                 if (ret == 0) {
  2514.                         /*
  2515.                          * After all the gem state is reset, increment the reset
  2516.                          * counter and wake up everyone waiting for the reset to
  2517.                          * complete.
  2518.                          *
  2519.                          * Since unlock operations are a one-sided barrier only,
  2520.                          * we need to insert a barrier here to order any seqno
  2521.                          * updates before
  2522.                          * the counter increment.
  2523.                          */
  2524.                         smp_mb__before_atomic();
  2525.                         atomic_inc(&dev_priv->gpu_error.reset_counter);
  2526.  
  2527.                 } else {
  2528.                         atomic_or(I915_WEDGED, &error->reset_counter);
  2529.                 }
  2530.  
  2531.                 /*
  2532.                  * Note: The wake_up also serves as a memory barrier so that
  2533.                  * waiters see the update value of the reset counter atomic_t.
  2534.                  */
  2535.                 i915_error_wake_up(dev_priv, true);
  2536.         }
  2537. }
  2538.  
  2539. static void i915_report_and_clear_eir(struct drm_device *dev)
  2540. {
  2541.         struct drm_i915_private *dev_priv = dev->dev_private;
  2542.         uint32_t instdone[I915_NUM_INSTDONE_REG];
  2543.         u32 eir = I915_READ(EIR);
  2544.         int pipe, i;
  2545.  
  2546.         if (!eir)
  2547.                 return;
  2548.  
  2549.         pr_err("render error detected, EIR: 0x%08x\n", eir);
  2550.  
  2551.         i915_get_extra_instdone(dev, instdone);
  2552.  
  2553.         if (IS_G4X(dev)) {
  2554.                 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
  2555.                         u32 ipeir = I915_READ(IPEIR_I965);
  2556.  
  2557.                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
  2558.                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
  2559.                         for (i = 0; i < ARRAY_SIZE(instdone); i++)
  2560.                                 pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
  2561.                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
  2562.                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
  2563.                         I915_WRITE(IPEIR_I965, ipeir);
  2564.                         POSTING_READ(IPEIR_I965);
  2565.                 }
  2566.                 if (eir & GM45_ERROR_PAGE_TABLE) {
  2567.                         u32 pgtbl_err = I915_READ(PGTBL_ER);
  2568.                         pr_err("page table error\n");
  2569.                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
  2570.                         I915_WRITE(PGTBL_ER, pgtbl_err);
  2571.                         POSTING_READ(PGTBL_ER);
  2572.                 }
  2573.         }
  2574.  
  2575.         if (!IS_GEN2(dev)) {
  2576.                 if (eir & I915_ERROR_PAGE_TABLE) {
  2577.                         u32 pgtbl_err = I915_READ(PGTBL_ER);
  2578.                         pr_err("page table error\n");
  2579.                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
  2580.                         I915_WRITE(PGTBL_ER, pgtbl_err);
  2581.                         POSTING_READ(PGTBL_ER);
  2582.                 }
  2583.         }
  2584.  
  2585.         if (eir & I915_ERROR_MEMORY_REFRESH) {
  2586.                 pr_err("memory refresh error:\n");
  2587.                 for_each_pipe(dev_priv, pipe)
  2588.                         pr_err("pipe %c stat: 0x%08x\n",
  2589.                                pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
  2590.                 /* pipestat has already been acked */
  2591.         }
  2592.         if (eir & I915_ERROR_INSTRUCTION) {
  2593.                 pr_err("instruction error\n");
  2594.                 pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
  2595.                 for (i = 0; i < ARRAY_SIZE(instdone); i++)
  2596.                         pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
  2597.                 if (INTEL_INFO(dev)->gen < 4) {
  2598.                         u32 ipeir = I915_READ(IPEIR);
  2599.  
  2600.                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
  2601.                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
  2602.                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
  2603.                         I915_WRITE(IPEIR, ipeir);
  2604.                         POSTING_READ(IPEIR);
  2605.                 } else {
  2606.                         u32 ipeir = I915_READ(IPEIR_I965);
  2607.  
  2608.                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
  2609.                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
  2610.                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
  2611.                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
  2612.                         I915_WRITE(IPEIR_I965, ipeir);
  2613.                         POSTING_READ(IPEIR_I965);
  2614.                 }
  2615.         }
  2616.  
  2617.         I915_WRITE(EIR, eir);
  2618.         POSTING_READ(EIR);
  2619.         eir = I915_READ(EIR);
  2620.         if (eir) {
  2621.                 /*
  2622.                  * some errors might have become stuck,
  2623.                  * mask them.
  2624.                  */
  2625.                 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
  2626.                 I915_WRITE(EMR, I915_READ(EMR) | eir);
  2627.                 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  2628.         }
  2629. }
  2630.  
  2631. /**
  2632.  * i915_handle_error - handle a gpu error
  2633.  * @dev: drm device
  2634.  *
  2635.  * Do some basic checking of register state at error time and
  2636.  * dump it to the syslog.  Also call i915_capture_error_state() to make
  2637.  * sure we get a record and make it available in debugfs.  Fire a uevent
  2638.  * so userspace knows something bad happened (should trigger collection
  2639.  * of a ring dump etc.).
  2640.  */
  2641. void i915_handle_error(struct drm_device *dev, bool wedged,
  2642.                        const char *fmt, ...)
  2643. {
  2644.         struct drm_i915_private *dev_priv = dev->dev_private;
  2645.         va_list args;
  2646.         char error_msg[80];
  2647.  
  2648.         va_start(args, fmt);
  2649.         vscnprintf(error_msg, sizeof(error_msg), fmt, args);
  2650.         va_end(args);
  2651.  
  2652.         i915_capture_error_state(dev, wedged, error_msg);
  2653.         i915_report_and_clear_eir(dev);
  2654.  
  2655.         if (wedged) {
  2656.                 atomic_or(I915_RESET_IN_PROGRESS_FLAG,
  2657.                                 &dev_priv->gpu_error.reset_counter);
  2658.  
  2659.                 /*
  2660.                  * Wakeup waiting processes so that the reset function
  2661.                  * i915_reset_and_wakeup doesn't deadlock trying to grab
  2662.                  * various locks. By bumping the reset counter first, the woken
  2663.                  * processes will see a reset in progress and back off,
  2664.                  * releasing their locks and then wait for the reset completion.
  2665.                  * We must do this for _all_ gpu waiters that might hold locks
  2666.                  * that the reset work needs to acquire.
  2667.                  *
  2668.                  * Note: The wake_up serves as the required memory barrier to
  2669.                  * ensure that the waiters see the updated value of the reset
  2670.                  * counter atomic_t.
  2671.                  */
  2672.                 i915_error_wake_up(dev_priv, false);
  2673.         }
  2674.  
  2675.         i915_reset_and_wakeup(dev);
  2676. }
  2677.  
  2678. /* Called from drm generic code, passed 'crtc' which
  2679.  * we use as a pipe index
  2680.  */
  2681. static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
  2682. {
  2683.         struct drm_i915_private *dev_priv = dev->dev_private;
  2684.         unsigned long irqflags;
  2685.  
  2686.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2687.         if (INTEL_INFO(dev)->gen >= 4)
  2688.                 i915_enable_pipestat(dev_priv, pipe,
  2689.                                      PIPE_START_VBLANK_INTERRUPT_STATUS);
  2690.         else
  2691.                 i915_enable_pipestat(dev_priv, pipe,
  2692.                                      PIPE_VBLANK_INTERRUPT_STATUS);
  2693.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2694.  
  2695.         return 0;
  2696. }
  2697.  
  2698. static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
  2699. {
  2700.         struct drm_i915_private *dev_priv = dev->dev_private;
  2701.         unsigned long irqflags;
  2702.         uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
  2703.                                                      DE_PIPE_VBLANK(pipe);
  2704.  
  2705.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2706.         ilk_enable_display_irq(dev_priv, bit);
  2707.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2708.  
  2709.         return 0;
  2710. }
  2711.  
  2712. static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
  2713. {
  2714.         struct drm_i915_private *dev_priv = dev->dev_private;
  2715.         unsigned long irqflags;
  2716.  
  2717.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2718.         i915_enable_pipestat(dev_priv, pipe,
  2719.                              PIPE_START_VBLANK_INTERRUPT_STATUS);
  2720.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2721.  
  2722.         return 0;
  2723. }
  2724.  
  2725. static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
  2726. {
  2727.         struct drm_i915_private *dev_priv = dev->dev_private;
  2728.         unsigned long irqflags;
  2729.  
  2730.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2731.         bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
  2732.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2733.  
  2734.         return 0;
  2735. }
  2736.  
  2737. /* Called from drm generic code, passed 'crtc' which
  2738.  * we use as a pipe index
  2739.  */
  2740. static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
  2741. {
  2742.         struct drm_i915_private *dev_priv = dev->dev_private;
  2743.         unsigned long irqflags;
  2744.  
  2745.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2746.         i915_disable_pipestat(dev_priv, pipe,
  2747.                               PIPE_VBLANK_INTERRUPT_STATUS |
  2748.                               PIPE_START_VBLANK_INTERRUPT_STATUS);
  2749.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2750. }
  2751.  
  2752. static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
  2753. {
  2754.         struct drm_i915_private *dev_priv = dev->dev_private;
  2755.         unsigned long irqflags;
  2756.         uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
  2757.                                                      DE_PIPE_VBLANK(pipe);
  2758.  
  2759.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2760.         ilk_disable_display_irq(dev_priv, bit);
  2761.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2762. }
  2763.  
  2764. static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
  2765. {
  2766.         struct drm_i915_private *dev_priv = dev->dev_private;
  2767.         unsigned long irqflags;
  2768.  
  2769.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2770.         i915_disable_pipestat(dev_priv, pipe,
  2771.                               PIPE_START_VBLANK_INTERRUPT_STATUS);
  2772.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2773. }
  2774.  
  2775. static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
  2776. {
  2777.         struct drm_i915_private *dev_priv = dev->dev_private;
  2778.         unsigned long irqflags;
  2779.  
  2780.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2781.         bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
  2782.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2783. }
  2784.  
  2785. static bool
  2786. ring_idle(struct intel_engine_cs *ring, u32 seqno)
  2787. {
  2788.         return (list_empty(&ring->request_list) ||
  2789.                 i915_seqno_passed(seqno, ring->last_submitted_seqno));
  2790. }
  2791.  
  2792. static bool
  2793. ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
  2794. {
  2795.         if (INTEL_INFO(dev)->gen >= 8) {
  2796.                 return (ipehr >> 23) == 0x1c;
  2797.         } else {
  2798.                 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
  2799.                 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
  2800.                                  MI_SEMAPHORE_REGISTER);
  2801.         }
  2802. }
  2803.  
  2804. static struct intel_engine_cs *
  2805. semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
  2806. {
  2807.         struct drm_i915_private *dev_priv = ring->dev->dev_private;
  2808.         struct intel_engine_cs *signaller;
  2809.         int i;
  2810.  
  2811.         if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
  2812.                 for_each_ring(signaller, dev_priv, i) {
  2813.                         if (ring == signaller)
  2814.                                 continue;
  2815.  
  2816.                         if (offset == signaller->semaphore.signal_ggtt[ring->id])
  2817.                                 return signaller;
  2818.                 }
  2819.         } else {
  2820.                 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
  2821.  
  2822.                 for_each_ring(signaller, dev_priv, i) {
  2823.                         if(ring == signaller)
  2824.                                 continue;
  2825.  
  2826.                         if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
  2827.                                 return signaller;
  2828.                 }
  2829.         }
  2830.  
  2831.         DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
  2832.                   ring->id, ipehr, offset);
  2833.  
  2834.         return NULL;
  2835. }
  2836.  
  2837. static struct intel_engine_cs *
  2838. semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
  2839. {
  2840.         struct drm_i915_private *dev_priv = ring->dev->dev_private;
  2841.         u32 cmd, ipehr, head;
  2842.         u64 offset = 0;
  2843.         int i, backwards;
  2844.  
  2845.         /*
  2846.          * This function does not support execlist mode - any attempt to
  2847.          * proceed further into this function will result in a kernel panic
  2848.          * when dereferencing ring->buffer, which is not set up in execlist
  2849.          * mode.
  2850.          *
  2851.          * The correct way of doing it would be to derive the currently
  2852.          * executing ring buffer from the current context, which is derived
  2853.          * from the currently running request. Unfortunately, to get the
  2854.          * current request we would have to grab the struct_mutex before doing
  2855.          * anything else, which would be ill-advised since some other thread
  2856.          * might have grabbed it already and managed to hang itself, causing
  2857.          * the hang checker to deadlock.
  2858.          *
  2859.          * Therefore, this function does not support execlist mode in its
  2860.          * current form. Just return NULL and move on.
  2861.          */
  2862.         if (ring->buffer == NULL)
  2863.                 return NULL;
  2864.  
  2865.         ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
  2866.         if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
  2867.                 return NULL;
  2868.  
  2869.         /*
  2870.          * HEAD is likely pointing to the dword after the actual command,
  2871.          * so scan backwards until we find the MBOX. But limit it to just 3
  2872.          * or 4 dwords depending on the semaphore wait command size.
  2873.          * Note that we don't care about ACTHD here since that might
  2874.          * point at at batch, and semaphores are always emitted into the
  2875.          * ringbuffer itself.
  2876.          */
  2877.         head = I915_READ_HEAD(ring) & HEAD_ADDR;
  2878.         backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
  2879.  
  2880.         for (i = backwards; i; --i) {
  2881.                 /*
  2882.                  * Be paranoid and presume the hw has gone off into the wild -
  2883.                  * our ring is smaller than what the hardware (and hence
  2884.                  * HEAD_ADDR) allows. Also handles wrap-around.
  2885.                  */
  2886.                 head &= ring->buffer->size - 1;
  2887.  
  2888.                 /* This here seems to blow up */
  2889.                 cmd = ioread32(ring->buffer->virtual_start + head);
  2890.                 if (cmd == ipehr)
  2891.                         break;
  2892.  
  2893.                 head -= 4;
  2894.         }
  2895.  
  2896.         if (!i)
  2897.                 return NULL;
  2898.  
  2899.         *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
  2900.         if (INTEL_INFO(ring->dev)->gen >= 8) {
  2901.                 offset = ioread32(ring->buffer->virtual_start + head + 12);
  2902.                 offset <<= 32;
  2903.                 offset = ioread32(ring->buffer->virtual_start + head + 8);
  2904.         }
  2905.         return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
  2906. }
  2907.  
  2908. static int semaphore_passed(struct intel_engine_cs *ring)
  2909. {
  2910.         struct drm_i915_private *dev_priv = ring->dev->dev_private;
  2911.         struct intel_engine_cs *signaller;
  2912.         u32 seqno;
  2913.  
  2914.         ring->hangcheck.deadlock++;
  2915.  
  2916.         signaller = semaphore_waits_for(ring, &seqno);
  2917.         if (signaller == NULL)
  2918.                 return -1;
  2919.  
  2920.         /* Prevent pathological recursion due to driver bugs */
  2921.         if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
  2922.                 return -1;
  2923.  
  2924.         if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
  2925.                 return 1;
  2926.  
  2927.         /* cursory check for an unkickable deadlock */
  2928.         if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
  2929.             semaphore_passed(signaller) < 0)
  2930.                 return -1;
  2931.  
  2932.         return 0;
  2933. }
  2934.  
  2935. static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
  2936. {
  2937.         struct intel_engine_cs *ring;
  2938.         int i;
  2939.  
  2940.         for_each_ring(ring, dev_priv, i)
  2941.                 ring->hangcheck.deadlock = 0;
  2942. }
  2943.  
  2944. static bool subunits_stuck(struct intel_engine_cs *ring)
  2945. {
  2946.         u32 instdone[I915_NUM_INSTDONE_REG];
  2947.         bool stuck;
  2948.         int i;
  2949.  
  2950.         if (ring->id != RCS)
  2951.                 return true;
  2952.  
  2953.         i915_get_extra_instdone(ring->dev, instdone);
  2954.  
  2955.         /* There might be unstable subunit states even when
  2956.          * actual head is not moving. Filter out the unstable ones by
  2957.          * accumulating the undone -> done transitions and only
  2958.          * consider those as progress.
  2959.          */
  2960.         stuck = true;
  2961.         for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
  2962.                 const u32 tmp = instdone[i] | ring->hangcheck.instdone[i];
  2963.  
  2964.                 if (tmp != ring->hangcheck.instdone[i])
  2965.                         stuck = false;
  2966.  
  2967.                 ring->hangcheck.instdone[i] |= tmp;
  2968.         }
  2969.  
  2970.         return stuck;
  2971. }
  2972.  
  2973. static enum intel_ring_hangcheck_action
  2974. head_stuck(struct intel_engine_cs *ring, u64 acthd)
  2975. {
  2976.         if (acthd != ring->hangcheck.acthd) {
  2977.  
  2978.                 /* Clear subunit states on head movement */
  2979.                 memset(ring->hangcheck.instdone, 0,
  2980.                        sizeof(ring->hangcheck.instdone));
  2981.  
  2982.                 if (acthd > ring->hangcheck.max_acthd) {
  2983.                         ring->hangcheck.max_acthd = acthd;
  2984.                         return HANGCHECK_ACTIVE;
  2985.                 }
  2986.  
  2987.                 return HANGCHECK_ACTIVE_LOOP;
  2988.         }
  2989.  
  2990.         if (!subunits_stuck(ring))
  2991.                 return HANGCHECK_ACTIVE;
  2992.  
  2993.         return HANGCHECK_HUNG;
  2994. }
  2995.  
  2996. static enum intel_ring_hangcheck_action
  2997. ring_stuck(struct intel_engine_cs *ring, u64 acthd)
  2998. {
  2999.         struct drm_device *dev = ring->dev;
  3000.         struct drm_i915_private *dev_priv = dev->dev_private;
  3001.         enum intel_ring_hangcheck_action ha;
  3002.         u32 tmp;
  3003.  
  3004.         ha = head_stuck(ring, acthd);
  3005.         if (ha != HANGCHECK_HUNG)
  3006.                 return ha;
  3007.  
  3008.         if (IS_GEN2(dev))
  3009.                 return HANGCHECK_HUNG;
  3010.  
  3011.         /* Is the chip hanging on a WAIT_FOR_EVENT?
  3012.          * If so we can simply poke the RB_WAIT bit
  3013.          * and break the hang. This should work on
  3014.          * all but the second generation chipsets.
  3015.          */
  3016.         tmp = I915_READ_CTL(ring);
  3017.         if (tmp & RING_WAIT) {
  3018.                 i915_handle_error(dev, false,
  3019.                                   "Kicking stuck wait on %s",
  3020.                                   ring->name);
  3021.                 I915_WRITE_CTL(ring, tmp);
  3022.                 return HANGCHECK_KICK;
  3023.         }
  3024.  
  3025.         if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
  3026.                 switch (semaphore_passed(ring)) {
  3027.                 default:
  3028.                         return HANGCHECK_HUNG;
  3029.                 case 1:
  3030.                         i915_handle_error(dev, false,
  3031.                                           "Kicking stuck semaphore on %s",
  3032.                                           ring->name);
  3033.                         I915_WRITE_CTL(ring, tmp);
  3034.                         return HANGCHECK_KICK;
  3035.                 case 0:
  3036.                         return HANGCHECK_WAIT;
  3037.                 }
  3038.         }
  3039.  
  3040.         return HANGCHECK_HUNG;
  3041. }
  3042.  
  3043. /*
  3044.  * This is called when the chip hasn't reported back with completed
  3045.  * batchbuffers in a long time. We keep track per ring seqno progress and
  3046.  * if there are no progress, hangcheck score for that ring is increased.
  3047.  * Further, acthd is inspected to see if the ring is stuck. On stuck case
  3048.  * we kick the ring. If we see no progress on three subsequent calls
  3049.  * we assume chip is wedged and try to fix it by resetting the chip.
  3050.  */
  3051. static void i915_hangcheck_elapsed(struct work_struct *work)
  3052. {
  3053.         struct drm_i915_private *dev_priv =
  3054.                 container_of(work, typeof(*dev_priv),
  3055.                              gpu_error.hangcheck_work.work);
  3056.         struct drm_device *dev = dev_priv->dev;
  3057.         struct intel_engine_cs *ring;
  3058.         int i;
  3059.         int busy_count = 0, rings_hung = 0;
  3060.         bool stuck[I915_NUM_RINGS] = { 0 };
  3061. #define BUSY 1
  3062. #define KICK 5
  3063. #define HUNG 20
  3064.  
  3065.         if (!i915.enable_hangcheck)
  3066.                 return;
  3067.  
  3068.         /*
  3069.          * The hangcheck work is synced during runtime suspend, we don't
  3070.          * require a wakeref. TODO: instead of disabling the asserts make
  3071.          * sure that we hold a reference when this work is running.
  3072.          */
  3073.         DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
  3074.  
  3075.         /* As enabling the GPU requires fairly extensive mmio access,
  3076.          * periodically arm the mmio checker to see if we are triggering
  3077.          * any invalid access.
  3078.          */
  3079.         intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
  3080.  
  3081.         for_each_ring(ring, dev_priv, i) {
  3082.                 u64 acthd;
  3083.                 u32 seqno;
  3084.                 bool busy = true;
  3085.  
  3086.                 semaphore_clear_deadlocks(dev_priv);
  3087.  
  3088.                 seqno = ring->get_seqno(ring, false);
  3089.                 acthd = intel_ring_get_active_head(ring);
  3090.  
  3091.                 if (ring->hangcheck.seqno == seqno) {
  3092.                         if (ring_idle(ring, seqno)) {
  3093.                                 ring->hangcheck.action = HANGCHECK_IDLE;
  3094.  
  3095.                                 if (waitqueue_active(&ring->irq_queue)) {
  3096.                                         /* Issue a wake-up to catch stuck h/w. */
  3097.                                         if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
  3098.                                                 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
  3099.                                                         DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
  3100.                                                                   ring->name);
  3101.                                                 else
  3102.                                                         DRM_INFO("Fake missed irq on %s\n",
  3103.                                                                  ring->name);
  3104.                                                 wake_up_all(&ring->irq_queue);
  3105.                                         }
  3106.                                         /* Safeguard against driver failure */
  3107.                                         ring->hangcheck.score += BUSY;
  3108.                                 } else
  3109.                                         busy = false;
  3110.                         } else {
  3111.                                 /* We always increment the hangcheck score
  3112.                                  * if the ring is busy and still processing
  3113.                                  * the same request, so that no single request
  3114.                                  * can run indefinitely (such as a chain of
  3115.                                  * batches). The only time we do not increment
  3116.                                  * the hangcheck score on this ring, if this
  3117.                                  * ring is in a legitimate wait for another
  3118.                                  * ring. In that case the waiting ring is a
  3119.                                  * victim and we want to be sure we catch the
  3120.                                  * right culprit. Then every time we do kick
  3121.                                  * the ring, add a small increment to the
  3122.                                  * score so that we can catch a batch that is
  3123.                                  * being repeatedly kicked and so responsible
  3124.                                  * for stalling the machine.
  3125.                                  */
  3126.                                 ring->hangcheck.action = ring_stuck(ring,
  3127.                                                                     acthd);
  3128.  
  3129.                                 switch (ring->hangcheck.action) {
  3130.                                 case HANGCHECK_IDLE:
  3131.                                 case HANGCHECK_WAIT:
  3132.                                 case HANGCHECK_ACTIVE:
  3133.                                         break;
  3134.                                 case HANGCHECK_ACTIVE_LOOP:
  3135.                                         ring->hangcheck.score += BUSY;
  3136.                                         break;
  3137.                                 case HANGCHECK_KICK:
  3138.                                         ring->hangcheck.score += KICK;
  3139.                                         break;
  3140.                                 case HANGCHECK_HUNG:
  3141.                                         ring->hangcheck.score += HUNG;
  3142.                                         stuck[i] = true;
  3143.                                         break;
  3144.                                 }
  3145.                         }
  3146.                 } else {
  3147.                         ring->hangcheck.action = HANGCHECK_ACTIVE;
  3148.  
  3149.                         /* Gradually reduce the count so that we catch DoS
  3150.                          * attempts across multiple batches.
  3151.                          */
  3152.                         if (ring->hangcheck.score > 0)
  3153.                                 ring->hangcheck.score--;
  3154.  
  3155.                         /* Clear head and subunit states on seqno movement */
  3156.                         ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
  3157.  
  3158.                         memset(ring->hangcheck.instdone, 0,
  3159.                                sizeof(ring->hangcheck.instdone));
  3160.                 }
  3161.  
  3162.                 ring->hangcheck.seqno = seqno;
  3163.                 ring->hangcheck.acthd = acthd;
  3164.                 busy_count += busy;
  3165.         }
  3166.  
  3167.         for_each_ring(ring, dev_priv, i) {
  3168.                 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
  3169.                         DRM_INFO("%s on %s\n",
  3170.                                  stuck[i] ? "stuck" : "no progress",
  3171.                                  ring->name);
  3172.                         rings_hung++;
  3173.                 }
  3174.         }
  3175.  
  3176.         if (rings_hung) {
  3177.                 i915_handle_error(dev, true, "Ring hung");
  3178.                 goto out;
  3179.         }
  3180.  
  3181.         if (busy_count)
  3182.                 /* Reset timer case chip hangs without another request
  3183.                  * being added */
  3184.                 i915_queue_hangcheck(dev);
  3185.  
  3186. out:
  3187.         ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
  3188. }
  3189.  
  3190. void i915_queue_hangcheck(struct drm_device *dev)
  3191. {
  3192.         struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
  3193.  
  3194.         if (!i915.enable_hangcheck)
  3195.                 return;
  3196.  
  3197.         /* Don't continually defer the hangcheck so that it is always run at
  3198.          * least once after work has been scheduled on any ring. Otherwise,
  3199.          * we will ignore a hung ring if a second ring is kept busy.
  3200.          */
  3201.  
  3202.         queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
  3203.                            round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
  3204. }
  3205.  
  3206. static void ibx_irq_reset(struct drm_device *dev)
  3207. {
  3208.         struct drm_i915_private *dev_priv = dev->dev_private;
  3209.  
  3210.         if (HAS_PCH_NOP(dev))
  3211.                 return;
  3212.  
  3213.         GEN5_IRQ_RESET(SDE);
  3214.  
  3215.         if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
  3216.                 I915_WRITE(SERR_INT, 0xffffffff);
  3217. }
  3218.  
  3219. /*
  3220.  * SDEIER is also touched by the interrupt handler to work around missed PCH
  3221.  * interrupts. Hence we can't update it after the interrupt handler is enabled -
  3222.  * instead we unconditionally enable all PCH interrupt sources here, but then
  3223.  * only unmask them as needed with SDEIMR.
  3224.  *
  3225.  * This function needs to be called before interrupts are enabled.
  3226.  */
  3227. static void ibx_irq_pre_postinstall(struct drm_device *dev)
  3228. {
  3229.         struct drm_i915_private *dev_priv = dev->dev_private;
  3230.  
  3231.         if (HAS_PCH_NOP(dev))
  3232.                 return;
  3233.  
  3234.         WARN_ON(I915_READ(SDEIER) != 0);
  3235.         I915_WRITE(SDEIER, 0xffffffff);
  3236.         POSTING_READ(SDEIER);
  3237. }
  3238.  
  3239. static void gen5_gt_irq_reset(struct drm_device *dev)
  3240. {
  3241.         struct drm_i915_private *dev_priv = dev->dev_private;
  3242.  
  3243.         GEN5_IRQ_RESET(GT);
  3244.         if (INTEL_INFO(dev)->gen >= 6)
  3245.                 GEN5_IRQ_RESET(GEN6_PM);
  3246. }
  3247.  
  3248. /* drm_dma.h hooks
  3249. */
  3250. static void ironlake_irq_reset(struct drm_device *dev)
  3251. {
  3252.         struct drm_i915_private *dev_priv = dev->dev_private;
  3253.  
  3254.         I915_WRITE(HWSTAM, 0xffffffff);
  3255.  
  3256.         GEN5_IRQ_RESET(DE);
  3257.         if (IS_GEN7(dev))
  3258.                 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
  3259.  
  3260.         gen5_gt_irq_reset(dev);
  3261.  
  3262.         ibx_irq_reset(dev);
  3263. }
  3264.  
  3265. static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
  3266. {
  3267.         enum pipe pipe;
  3268.  
  3269.         i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0);
  3270.         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  3271.  
  3272.         for_each_pipe(dev_priv, pipe)
  3273.                 I915_WRITE(PIPESTAT(pipe), 0xffff);
  3274.  
  3275.         GEN5_IRQ_RESET(VLV_);
  3276. }
  3277.  
  3278. static void valleyview_irq_preinstall(struct drm_device *dev)
  3279. {
  3280.         struct drm_i915_private *dev_priv = dev->dev_private;
  3281.  
  3282.         /* VLV magic */
  3283.         I915_WRITE(VLV_IMR, 0);
  3284.         I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
  3285.         I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
  3286.         I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
  3287.  
  3288.         gen5_gt_irq_reset(dev);
  3289.  
  3290.         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
  3291.  
  3292.         vlv_display_irq_reset(dev_priv);
  3293. }
  3294.  
  3295. static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
  3296. {
  3297.         GEN8_IRQ_RESET_NDX(GT, 0);
  3298.         GEN8_IRQ_RESET_NDX(GT, 1);
  3299.         GEN8_IRQ_RESET_NDX(GT, 2);
  3300.         GEN8_IRQ_RESET_NDX(GT, 3);
  3301. }
  3302.  
  3303. static void gen8_irq_reset(struct drm_device *dev)
  3304. {
  3305.         struct drm_i915_private *dev_priv = dev->dev_private;
  3306.         int pipe;
  3307.  
  3308.         I915_WRITE(GEN8_MASTER_IRQ, 0);
  3309.         POSTING_READ(GEN8_MASTER_IRQ);
  3310.  
  3311.         gen8_gt_irq_reset(dev_priv);
  3312.  
  3313.         for_each_pipe(dev_priv, pipe)
  3314.                 if (intel_display_power_is_enabled(dev_priv,
  3315.                                                    POWER_DOMAIN_PIPE(pipe)))
  3316.                         GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
  3317.  
  3318.         GEN5_IRQ_RESET(GEN8_DE_PORT_);
  3319.         GEN5_IRQ_RESET(GEN8_DE_MISC_);
  3320.         GEN5_IRQ_RESET(GEN8_PCU_);
  3321.  
  3322.         if (HAS_PCH_SPLIT(dev))
  3323.                 ibx_irq_reset(dev);
  3324. }
  3325.  
  3326. void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
  3327.                                      unsigned int pipe_mask)
  3328. {
  3329.         uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
  3330.         enum pipe pipe;
  3331.  
  3332.         spin_lock_irq(&dev_priv->irq_lock);
  3333.         for_each_pipe_masked(dev_priv, pipe, pipe_mask)
  3334.                 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
  3335.                                   dev_priv->de_irq_mask[pipe],
  3336.                                   ~dev_priv->de_irq_mask[pipe] | extra_ier);
  3337.         spin_unlock_irq(&dev_priv->irq_lock);
  3338. }
  3339.  
  3340. void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
  3341.                                      unsigned int pipe_mask)
  3342. {
  3343.         enum pipe pipe;
  3344.  
  3345.         spin_lock_irq(&dev_priv->irq_lock);
  3346.         for_each_pipe_masked(dev_priv, pipe, pipe_mask)
  3347.                 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
  3348.         spin_unlock_irq(&dev_priv->irq_lock);
  3349.  
  3350.         /* make sure we're done processing display irqs */
  3351.         synchronize_irq(dev_priv->dev->irq);
  3352. }
  3353.  
  3354. static void cherryview_irq_preinstall(struct drm_device *dev)
  3355. {
  3356.         struct drm_i915_private *dev_priv = dev->dev_private;
  3357.  
  3358.         I915_WRITE(GEN8_MASTER_IRQ, 0);
  3359.         POSTING_READ(GEN8_MASTER_IRQ);
  3360.  
  3361.         gen8_gt_irq_reset(dev_priv);
  3362.  
  3363.         GEN5_IRQ_RESET(GEN8_PCU_);
  3364.  
  3365.         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
  3366.  
  3367.         vlv_display_irq_reset(dev_priv);
  3368. }
  3369.  
  3370. static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
  3371.                                   const u32 hpd[HPD_NUM_PINS])
  3372. {
  3373.         struct drm_i915_private *dev_priv = to_i915(dev);
  3374.         struct intel_encoder *encoder;
  3375.         u32 enabled_irqs = 0;
  3376.  
  3377.         for_each_intel_encoder(dev, encoder)
  3378.                 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
  3379.                         enabled_irqs |= hpd[encoder->hpd_pin];
  3380.  
  3381.         return enabled_irqs;
  3382. }
  3383.  
  3384. static void ibx_hpd_irq_setup(struct drm_device *dev)
  3385. {
  3386.         struct drm_i915_private *dev_priv = dev->dev_private;
  3387.         u32 hotplug_irqs, hotplug, enabled_irqs;
  3388.  
  3389.         if (HAS_PCH_IBX(dev)) {
  3390.                 hotplug_irqs = SDE_HOTPLUG_MASK;
  3391.                 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
  3392.         } else {
  3393.                 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
  3394.                 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
  3395.         }
  3396.  
  3397.         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
  3398.  
  3399.         /*
  3400.          * Enable digital hotplug on the PCH, and configure the DP short pulse
  3401.          * duration to 2ms (which is the minimum in the Display Port spec).
  3402.          * The pulse duration bits are reserved on LPT+.
  3403.          */
  3404.         hotplug = I915_READ(PCH_PORT_HOTPLUG);
  3405.         hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
  3406.         hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
  3407.         hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
  3408.         hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
  3409.         /*
  3410.          * When CPU and PCH are on the same package, port A
  3411.          * HPD must be enabled in both north and south.
  3412.          */
  3413.         if (HAS_PCH_LPT_LP(dev))
  3414.                 hotplug |= PORTA_HOTPLUG_ENABLE;
  3415.         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
  3416. }
  3417.  
  3418. static void spt_hpd_irq_setup(struct drm_device *dev)
  3419. {
  3420.         struct drm_i915_private *dev_priv = dev->dev_private;
  3421.         u32 hotplug_irqs, hotplug, enabled_irqs;
  3422.  
  3423.         hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
  3424.         enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
  3425.  
  3426.         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
  3427.  
  3428.         /* Enable digital hotplug on the PCH */
  3429.         hotplug = I915_READ(PCH_PORT_HOTPLUG);
  3430.         hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
  3431.                 PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
  3432.         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
  3433.  
  3434.         hotplug = I915_READ(PCH_PORT_HOTPLUG2);
  3435.         hotplug |= PORTE_HOTPLUG_ENABLE;
  3436.         I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
  3437. }
  3438.  
  3439. static void ilk_hpd_irq_setup(struct drm_device *dev)
  3440. {
  3441.         struct drm_i915_private *dev_priv = dev->dev_private;
  3442.         u32 hotplug_irqs, hotplug, enabled_irqs;
  3443.  
  3444.         if (INTEL_INFO(dev)->gen >= 8) {
  3445.                 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
  3446.                 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw);
  3447.  
  3448.                 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
  3449.         } else if (INTEL_INFO(dev)->gen >= 7) {
  3450.                 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
  3451.                 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
  3452.  
  3453.                 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
  3454.         } else {
  3455.                 hotplug_irqs = DE_DP_A_HOTPLUG;
  3456.                 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
  3457.  
  3458.                 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
  3459.         }
  3460.  
  3461.         /*
  3462.          * Enable digital hotplug on the CPU, and configure the DP short pulse
  3463.          * duration to 2ms (which is the minimum in the Display Port spec)
  3464.          * The pulse duration bits are reserved on HSW+.
  3465.          */
  3466.         hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
  3467.         hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
  3468.         hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
  3469.         I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
  3470.  
  3471.         ibx_hpd_irq_setup(dev);
  3472. }
  3473.  
  3474. static void bxt_hpd_irq_setup(struct drm_device *dev)
  3475. {
  3476.         struct drm_i915_private *dev_priv = dev->dev_private;
  3477.         u32 hotplug_irqs, hotplug, enabled_irqs;
  3478.  
  3479.         enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt);
  3480.         hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
  3481.  
  3482.         bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
  3483.  
  3484.         hotplug = I915_READ(PCH_PORT_HOTPLUG);
  3485.         hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
  3486.                 PORTA_HOTPLUG_ENABLE;
  3487.         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
  3488. }
  3489.  
  3490. static void ibx_irq_postinstall(struct drm_device *dev)
  3491. {
  3492.         struct drm_i915_private *dev_priv = dev->dev_private;
  3493.         u32 mask;
  3494.  
  3495.         if (HAS_PCH_NOP(dev))
  3496.                 return;
  3497.  
  3498.         if (HAS_PCH_IBX(dev))
  3499.                 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
  3500.         else
  3501.                 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
  3502.  
  3503.         gen5_assert_iir_is_zero(dev_priv, SDEIIR);
  3504.         I915_WRITE(SDEIMR, ~mask);
  3505. }
  3506.  
  3507. static void gen5_gt_irq_postinstall(struct drm_device *dev)
  3508. {
  3509.         struct drm_i915_private *dev_priv = dev->dev_private;
  3510.         u32 pm_irqs, gt_irqs;
  3511.  
  3512.         pm_irqs = gt_irqs = 0;
  3513.  
  3514.         dev_priv->gt_irq_mask = ~0;
  3515.         if (HAS_L3_DPF(dev)) {
  3516.                 /* L3 parity interrupt is always unmasked. */
  3517.                 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
  3518.                 gt_irqs |= GT_PARITY_ERROR(dev);
  3519.         }
  3520.  
  3521.         gt_irqs |= GT_RENDER_USER_INTERRUPT;
  3522.         if (IS_GEN5(dev)) {
  3523.                 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
  3524.                            ILK_BSD_USER_INTERRUPT;
  3525.         } else {
  3526.                 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
  3527.         }
  3528.  
  3529.         GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
  3530.  
  3531.         if (INTEL_INFO(dev)->gen >= 6) {
  3532.                 /*
  3533.                  * RPS interrupts will get enabled/disabled on demand when RPS
  3534.                  * itself is enabled/disabled.
  3535.                  */
  3536.                 if (HAS_VEBOX(dev))
  3537.                         pm_irqs |= PM_VEBOX_USER_INTERRUPT;
  3538.  
  3539.                 dev_priv->pm_irq_mask = 0xffffffff;
  3540.                 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
  3541.         }
  3542. }
  3543.  
  3544. static int ironlake_irq_postinstall(struct drm_device *dev)
  3545. {
  3546.         struct drm_i915_private *dev_priv = dev->dev_private;
  3547.         u32 display_mask, extra_mask;
  3548.  
  3549.         if (INTEL_INFO(dev)->gen >= 7) {
  3550.                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
  3551.                                 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
  3552.                                 DE_PLANEB_FLIP_DONE_IVB |
  3553.                                 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
  3554.                 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
  3555.                               DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
  3556.                               DE_DP_A_HOTPLUG_IVB);
  3557.         } else {
  3558.                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
  3559.                                 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
  3560.                                 DE_AUX_CHANNEL_A |
  3561.                                 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
  3562.                                 DE_POISON);
  3563.                 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
  3564.                               DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
  3565.                               DE_DP_A_HOTPLUG);
  3566.         }
  3567.  
  3568.         dev_priv->irq_mask = ~display_mask;
  3569.  
  3570.         I915_WRITE(HWSTAM, 0xeffe);
  3571.  
  3572.         ibx_irq_pre_postinstall(dev);
  3573.  
  3574.         GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
  3575.  
  3576.         gen5_gt_irq_postinstall(dev);
  3577.  
  3578.         ibx_irq_postinstall(dev);
  3579.  
  3580.         if (IS_IRONLAKE_M(dev)) {
  3581.                 /* Enable PCU event interrupts
  3582.                  *
  3583.                  * spinlocking not required here for correctness since interrupt
  3584.                  * setup is guaranteed to run in single-threaded context. But we
  3585.                  * need it to make the assert_spin_locked happy. */
  3586.                 spin_lock_irq(&dev_priv->irq_lock);
  3587.                 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
  3588.                 spin_unlock_irq(&dev_priv->irq_lock);
  3589.         }
  3590.  
  3591.         return 0;
  3592. }
  3593.  
  3594. static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
  3595. {
  3596.         u32 pipestat_mask;
  3597.         u32 iir_mask;
  3598.         enum pipe pipe;
  3599.  
  3600.         pipestat_mask = PIPESTAT_INT_STATUS_MASK |
  3601.                         PIPE_FIFO_UNDERRUN_STATUS;
  3602.  
  3603.         for_each_pipe(dev_priv, pipe)
  3604.                 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
  3605.         POSTING_READ(PIPESTAT(PIPE_A));
  3606.  
  3607.         pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
  3608.                         PIPE_CRC_DONE_INTERRUPT_STATUS;
  3609.  
  3610.         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
  3611.         for_each_pipe(dev_priv, pipe)
  3612.                       i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
  3613.  
  3614.         iir_mask = I915_DISPLAY_PORT_INTERRUPT |
  3615.                    I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  3616.                    I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
  3617.         if (IS_CHERRYVIEW(dev_priv))
  3618.                 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
  3619.         dev_priv->irq_mask &= ~iir_mask;
  3620.  
  3621.         I915_WRITE(VLV_IIR, iir_mask);
  3622.         I915_WRITE(VLV_IIR, iir_mask);
  3623.         I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
  3624.         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
  3625.         POSTING_READ(VLV_IMR);
  3626. }
  3627.  
  3628. static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
  3629. {
  3630.         u32 pipestat_mask;
  3631.         u32 iir_mask;
  3632.         enum pipe pipe;
  3633.  
  3634.         iir_mask = I915_DISPLAY_PORT_INTERRUPT |
  3635.                    I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  3636.                    I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
  3637.         if (IS_CHERRYVIEW(dev_priv))
  3638.                 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
  3639.  
  3640.         dev_priv->irq_mask |= iir_mask;
  3641.         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
  3642.         I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
  3643.         I915_WRITE(VLV_IIR, iir_mask);
  3644.         I915_WRITE(VLV_IIR, iir_mask);
  3645.         POSTING_READ(VLV_IIR);
  3646.  
  3647.         pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
  3648.                         PIPE_CRC_DONE_INTERRUPT_STATUS;
  3649.  
  3650.         i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
  3651.         for_each_pipe(dev_priv, pipe)
  3652.                 i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
  3653.  
  3654.         pipestat_mask = PIPESTAT_INT_STATUS_MASK |
  3655.                         PIPE_FIFO_UNDERRUN_STATUS;
  3656.  
  3657.         for_each_pipe(dev_priv, pipe)
  3658.                 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
  3659.         POSTING_READ(PIPESTAT(PIPE_A));
  3660. }
  3661.  
  3662. void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
  3663. {
  3664.         assert_spin_locked(&dev_priv->irq_lock);
  3665.  
  3666.         if (dev_priv->display_irqs_enabled)
  3667.                 return;
  3668.  
  3669.         dev_priv->display_irqs_enabled = true;
  3670.  
  3671.         if (intel_irqs_enabled(dev_priv))
  3672.                 valleyview_display_irqs_install(dev_priv);
  3673. }
  3674.  
  3675. void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
  3676. {
  3677.         assert_spin_locked(&dev_priv->irq_lock);
  3678.  
  3679.         if (!dev_priv->display_irqs_enabled)
  3680.                 return;
  3681.  
  3682.         dev_priv->display_irqs_enabled = false;
  3683.  
  3684.         if (intel_irqs_enabled(dev_priv))
  3685.                 valleyview_display_irqs_uninstall(dev_priv);
  3686. }
  3687.  
  3688. static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
  3689. {
  3690.         dev_priv->irq_mask = ~0;
  3691.  
  3692.         i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
  3693.         POSTING_READ(PORT_HOTPLUG_EN);
  3694.  
  3695.         I915_WRITE(VLV_IIR, 0xffffffff);
  3696.         I915_WRITE(VLV_IIR, 0xffffffff);
  3697.         I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
  3698.         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
  3699.         POSTING_READ(VLV_IMR);
  3700.  
  3701.         /* Interrupt setup is already guaranteed to be single-threaded, this is
  3702.          * just to make the assert_spin_locked check happy. */
  3703.         spin_lock_irq(&dev_priv->irq_lock);
  3704.         if (dev_priv->display_irqs_enabled)
  3705.                 valleyview_display_irqs_install(dev_priv);
  3706.         spin_unlock_irq(&dev_priv->irq_lock);
  3707. }
  3708.  
  3709. static int valleyview_irq_postinstall(struct drm_device *dev)
  3710. {
  3711.         struct drm_i915_private *dev_priv = dev->dev_private;
  3712.  
  3713.         vlv_display_irq_postinstall(dev_priv);
  3714.  
  3715.         gen5_gt_irq_postinstall(dev);
  3716.  
  3717.         /* ack & enable invalid PTE error interrupts */
  3718. #if 0 /* FIXME: add support to irq handler for checking these bits */
  3719.         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
  3720.         I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
  3721. #endif
  3722.  
  3723.         I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
  3724.  
  3725.         return 0;
  3726. }
  3727.  
  3728. static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
  3729. {
  3730.         /* These are interrupts we'll toggle with the ring mask register */
  3731.         uint32_t gt_interrupts[] = {
  3732.                 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
  3733.                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
  3734.                         GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
  3735.                         GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
  3736.                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
  3737.                 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
  3738.                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
  3739.                         GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
  3740.                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
  3741.                 0,
  3742.                 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
  3743.                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
  3744.                 };
  3745.  
  3746.         dev_priv->pm_irq_mask = 0xffffffff;
  3747.         GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
  3748.         GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
  3749.         /*
  3750.          * RPS interrupts will get enabled/disabled on demand when RPS itself
  3751.          * is enabled/disabled.
  3752.          */
  3753.         GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
  3754.         GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
  3755. }
  3756.  
  3757. static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
  3758. {
  3759.         uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
  3760.         uint32_t de_pipe_enables;
  3761.         u32 de_port_masked = GEN8_AUX_CHANNEL_A;
  3762.         u32 de_port_enables;
  3763.         enum pipe pipe;
  3764.  
  3765.         if (INTEL_INFO(dev_priv)->gen >= 9) {
  3766.                 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
  3767.                                   GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
  3768.                 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
  3769.                                   GEN9_AUX_CHANNEL_D;
  3770.                 if (IS_BROXTON(dev_priv))
  3771.                         de_port_masked |= BXT_DE_PORT_GMBUS;
  3772.         } else {
  3773.                 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
  3774.                                   GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
  3775.         }
  3776.  
  3777.         de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
  3778.                                            GEN8_PIPE_FIFO_UNDERRUN;
  3779.  
  3780.         de_port_enables = de_port_masked;
  3781.         if (IS_BROXTON(dev_priv))
  3782.                 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
  3783.         else if (IS_BROADWELL(dev_priv))
  3784.                 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
  3785.  
  3786.         dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
  3787.         dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
  3788.         dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
  3789.  
  3790.         for_each_pipe(dev_priv, pipe)
  3791.                 if (intel_display_power_is_enabled(dev_priv,
  3792.                                 POWER_DOMAIN_PIPE(pipe)))
  3793.                         GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
  3794.                                           dev_priv->de_irq_mask[pipe],
  3795.                                           de_pipe_enables);
  3796.  
  3797.         GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
  3798. }
  3799.  
  3800. static int gen8_irq_postinstall(struct drm_device *dev)
  3801. {
  3802.         struct drm_i915_private *dev_priv = dev->dev_private;
  3803.  
  3804.         if (HAS_PCH_SPLIT(dev))
  3805.                 ibx_irq_pre_postinstall(dev);
  3806.  
  3807.         gen8_gt_irq_postinstall(dev_priv);
  3808.         gen8_de_irq_postinstall(dev_priv);
  3809.  
  3810.         if (HAS_PCH_SPLIT(dev))
  3811.                 ibx_irq_postinstall(dev);
  3812.  
  3813.         I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
  3814.         POSTING_READ(GEN8_MASTER_IRQ);
  3815.  
  3816.         return 0;
  3817. }
  3818.  
  3819. static int cherryview_irq_postinstall(struct drm_device *dev)
  3820. {
  3821.         struct drm_i915_private *dev_priv = dev->dev_private;
  3822.  
  3823.         vlv_display_irq_postinstall(dev_priv);
  3824.  
  3825.         gen8_gt_irq_postinstall(dev_priv);
  3826.  
  3827.         I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
  3828.         POSTING_READ(GEN8_MASTER_IRQ);
  3829.  
  3830.         return 0;
  3831. }
  3832.  
  3833. static void gen8_irq_uninstall(struct drm_device *dev)
  3834. {
  3835.         struct drm_i915_private *dev_priv = dev->dev_private;
  3836.  
  3837.         if (!dev_priv)
  3838.                 return;
  3839.  
  3840.         gen8_irq_reset(dev);
  3841. }
  3842.  
  3843. static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
  3844. {
  3845.         /* Interrupt setup is already guaranteed to be single-threaded, this is
  3846.          * just to make the assert_spin_locked check happy. */
  3847.         spin_lock_irq(&dev_priv->irq_lock);
  3848.         if (dev_priv->display_irqs_enabled)
  3849.                 valleyview_display_irqs_uninstall(dev_priv);
  3850.         spin_unlock_irq(&dev_priv->irq_lock);
  3851.  
  3852.         vlv_display_irq_reset(dev_priv);
  3853.  
  3854.         dev_priv->irq_mask = ~0;
  3855. }
  3856.  
  3857. static void valleyview_irq_uninstall(struct drm_device *dev)
  3858. {
  3859.         struct drm_i915_private *dev_priv = dev->dev_private;
  3860.  
  3861.         if (!dev_priv)
  3862.                 return;
  3863.  
  3864.         I915_WRITE(VLV_MASTER_IER, 0);
  3865.  
  3866.         gen5_gt_irq_reset(dev);
  3867.  
  3868.         I915_WRITE(HWSTAM, 0xffffffff);
  3869.  
  3870.         vlv_display_irq_uninstall(dev_priv);
  3871. }
  3872.  
  3873. static void cherryview_irq_uninstall(struct drm_device *dev)
  3874. {
  3875.         struct drm_i915_private *dev_priv = dev->dev_private;
  3876.  
  3877.         if (!dev_priv)
  3878.                 return;
  3879.  
  3880.         I915_WRITE(GEN8_MASTER_IRQ, 0);
  3881.         POSTING_READ(GEN8_MASTER_IRQ);
  3882.  
  3883.         gen8_gt_irq_reset(dev_priv);
  3884.  
  3885.         GEN5_IRQ_RESET(GEN8_PCU_);
  3886.  
  3887.         vlv_display_irq_uninstall(dev_priv);
  3888. }
  3889.  
  3890. static void ironlake_irq_uninstall(struct drm_device *dev)
  3891. {
  3892.         struct drm_i915_private *dev_priv = dev->dev_private;
  3893.  
  3894.         if (!dev_priv)
  3895.                 return;
  3896.  
  3897.         ironlake_irq_reset(dev);
  3898. }
  3899.  
  3900. #if 0
  3901. static void i8xx_irq_preinstall(struct drm_device * dev)
  3902. {
  3903.         struct drm_i915_private *dev_priv = dev->dev_private;
  3904.         int pipe;
  3905.  
  3906.         for_each_pipe(dev_priv, pipe)
  3907.                 I915_WRITE(PIPESTAT(pipe), 0);
  3908.         I915_WRITE16(IMR, 0xffff);
  3909.         I915_WRITE16(IER, 0x0);
  3910.         POSTING_READ16(IER);
  3911. }
  3912.  
  3913. static int i8xx_irq_postinstall(struct drm_device *dev)
  3914. {
  3915.         struct drm_i915_private *dev_priv = dev->dev_private;
  3916.  
  3917.         I915_WRITE16(EMR,
  3918.                      ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
  3919.  
  3920.         /* Unmask the interrupts that we always want on. */
  3921.         dev_priv->irq_mask =
  3922.                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  3923.                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  3924.                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  3925.                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
  3926.         I915_WRITE16(IMR, dev_priv->irq_mask);
  3927.  
  3928.         I915_WRITE16(IER,
  3929.                      I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  3930.                      I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  3931.                      I915_USER_INTERRUPT);
  3932.         POSTING_READ16(IER);
  3933.  
  3934.         /* Interrupt setup is already guaranteed to be single-threaded, this is
  3935.          * just to make the assert_spin_locked check happy. */
  3936.         spin_lock_irq(&dev_priv->irq_lock);
  3937.         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
  3938.         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
  3939.         spin_unlock_irq(&dev_priv->irq_lock);
  3940.  
  3941.         return 0;
  3942. }
  3943.  
  3944. /*
  3945.  * Returns true when a page flip has completed.
  3946.  */
  3947. static bool i8xx_handle_vblank(struct drm_device *dev,
  3948.                                int plane, int pipe, u32 iir)
  3949. {
  3950.         struct drm_i915_private *dev_priv = dev->dev_private;
  3951.         u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
  3952.  
  3953.         if (!intel_pipe_handle_vblank(dev, pipe))
  3954.                 return false;
  3955.  
  3956.         if ((iir & flip_pending) == 0)
  3957.                 goto check_page_flip;
  3958.  
  3959.         /* We detect FlipDone by looking for the change in PendingFlip from '1'
  3960.          * to '0' on the following vblank, i.e. IIR has the Pendingflip
  3961.          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
  3962.          * the flip is completed (no longer pending). Since this doesn't raise
  3963.          * an interrupt per se, we watch for the change at vblank.
  3964.          */
  3965.         if (I915_READ16(ISR) & flip_pending)
  3966.                 goto check_page_flip;
  3967.  
  3968.         intel_prepare_page_flip(dev, plane);
  3969.         intel_finish_page_flip(dev, pipe);
  3970.         return true;
  3971.  
  3972. check_page_flip:
  3973.         intel_check_page_flip(dev, pipe);
  3974.         return false;
  3975. }
  3976.  
  3977. static irqreturn_t i8xx_irq_handler(int irq, void *arg)
  3978. {
  3979.         struct drm_device *dev = arg;
  3980.         struct drm_i915_private *dev_priv = dev->dev_private;
  3981.         u16 iir, new_iir;
  3982.         u32 pipe_stats[2];
  3983.         int pipe;
  3984.         u16 flip_mask =
  3985.                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  3986.                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  3987.         irqreturn_t ret;
  3988.  
  3989.         if (!intel_irqs_enabled(dev_priv))
  3990.                 return IRQ_NONE;
  3991.  
  3992.         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
  3993.         disable_rpm_wakeref_asserts(dev_priv);
  3994.  
  3995.         ret = IRQ_NONE;
  3996.         iir = I915_READ16(IIR);
  3997.         if (iir == 0)
  3998.                 goto out;
  3999.  
  4000.         while (iir & ~flip_mask) {
  4001.                 /* Can't rely on pipestat interrupt bit in iir as it might
  4002.                  * have been cleared after the pipestat interrupt was received.
  4003.                  * It doesn't set the bit in iir again, but it still produces
  4004.                  * interrupts (for non-MSI).
  4005.                  */
  4006.                 spin_lock(&dev_priv->irq_lock);
  4007.                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  4008.                         DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
  4009.  
  4010.                 for_each_pipe(dev_priv, pipe) {
  4011.                         i915_reg_t reg = PIPESTAT(pipe);
  4012.                         pipe_stats[pipe] = I915_READ(reg);
  4013.  
  4014.                         /*
  4015.                          * Clear the PIPE*STAT regs before the IIR
  4016.                          */
  4017.                         if (pipe_stats[pipe] & 0x8000ffff)
  4018.                                 I915_WRITE(reg, pipe_stats[pipe]);
  4019.                 }
  4020.                 spin_unlock(&dev_priv->irq_lock);
  4021.  
  4022.                 I915_WRITE16(IIR, iir & ~flip_mask);
  4023.                 new_iir = I915_READ16(IIR); /* Flush posted writes */
  4024.  
  4025.                 if (iir & I915_USER_INTERRUPT)
  4026.                         notify_ring(&dev_priv->ring[RCS]);
  4027.  
  4028.                 for_each_pipe(dev_priv, pipe) {
  4029.                         int plane = pipe;
  4030.                         if (HAS_FBC(dev))
  4031.                                 plane = !plane;
  4032.  
  4033.                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
  4034.                             i8xx_handle_vblank(dev, plane, pipe, iir))
  4035.                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
  4036.  
  4037.                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
  4038.                                 i9xx_pipe_crc_irq_handler(dev, pipe);
  4039.  
  4040.                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  4041.                                 intel_cpu_fifo_underrun_irq_handler(dev_priv,
  4042.                                                                     pipe);
  4043.                 }
  4044.  
  4045.                 iir = new_iir;
  4046.         }
  4047.         ret = IRQ_HANDLED;
  4048.  
  4049. out:
  4050.         enable_rpm_wakeref_asserts(dev_priv);
  4051.  
  4052.         return ret;
  4053. }
  4054.  
  4055. static void i8xx_irq_uninstall(struct drm_device * dev)
  4056. {
  4057.         struct drm_i915_private *dev_priv = dev->dev_private;
  4058.         int pipe;
  4059.  
  4060.         for_each_pipe(dev_priv, pipe) {
  4061.                 /* Clear enable bits; then clear status bits */
  4062.                 I915_WRITE(PIPESTAT(pipe), 0);
  4063.                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
  4064.         }
  4065.         I915_WRITE16(IMR, 0xffff);
  4066.         I915_WRITE16(IER, 0x0);
  4067.         I915_WRITE16(IIR, I915_READ16(IIR));
  4068. }
  4069.  
  4070. #endif
  4071.  
  4072. static void i915_irq_preinstall(struct drm_device * dev)
  4073. {
  4074.         struct drm_i915_private *dev_priv = dev->dev_private;
  4075.         int pipe;
  4076.  
  4077.         if (I915_HAS_HOTPLUG(dev)) {
  4078.                 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
  4079.                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  4080.         }
  4081.  
  4082.         I915_WRITE16(HWSTAM, 0xeffe);
  4083.         for_each_pipe(dev_priv, pipe)
  4084.                 I915_WRITE(PIPESTAT(pipe), 0);
  4085.         I915_WRITE(IMR, 0xffffffff);
  4086.         I915_WRITE(IER, 0x0);
  4087.         POSTING_READ(IER);
  4088. }
  4089.  
  4090. static int i915_irq_postinstall(struct drm_device *dev)
  4091. {
  4092.         struct drm_i915_private *dev_priv = dev->dev_private;
  4093.         u32 enable_mask;
  4094.  
  4095.         I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
  4096.  
  4097.         /* Unmask the interrupts that we always want on. */
  4098.         dev_priv->irq_mask =
  4099.                 ~(I915_ASLE_INTERRUPT |
  4100.                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  4101.                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  4102.                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  4103.                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
  4104.  
  4105.         enable_mask =
  4106.                 I915_ASLE_INTERRUPT |
  4107.                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  4108.                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  4109.                 I915_USER_INTERRUPT;
  4110.  
  4111.         if (I915_HAS_HOTPLUG(dev)) {
  4112.                 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
  4113.                 POSTING_READ(PORT_HOTPLUG_EN);
  4114.  
  4115.                 /* Enable in IER... */
  4116.                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
  4117.                 /* and unmask in IMR */
  4118.                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
  4119.         }
  4120.  
  4121.         I915_WRITE(IMR, dev_priv->irq_mask);
  4122.         I915_WRITE(IER, enable_mask);
  4123.         POSTING_READ(IER);
  4124.  
  4125.         i915_enable_asle_pipestat(dev);
  4126.  
  4127.         /* Interrupt setup is already guaranteed to be single-threaded, this is
  4128.          * just to make the assert_spin_locked check happy. */
  4129.         spin_lock_irq(&dev_priv->irq_lock);
  4130.         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
  4131.         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
  4132.         spin_unlock_irq(&dev_priv->irq_lock);
  4133.  
  4134.         return 0;
  4135. }
  4136.  
  4137. /*
  4138.  * Returns true when a page flip has completed.
  4139.  */
  4140. static bool i915_handle_vblank(struct drm_device *dev,
  4141.                                int plane, int pipe, u32 iir)
  4142. {
  4143.         struct drm_i915_private *dev_priv = dev->dev_private;
  4144.         u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
  4145.  
  4146.         if (!intel_pipe_handle_vblank(dev, pipe))
  4147.                 return false;
  4148.  
  4149.         if ((iir & flip_pending) == 0)
  4150.                 goto check_page_flip;
  4151.  
  4152.         /* We detect FlipDone by looking for the change in PendingFlip from '1'
  4153.          * to '0' on the following vblank, i.e. IIR has the Pendingflip
  4154.          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
  4155.          * the flip is completed (no longer pending). Since this doesn't raise
  4156.          * an interrupt per se, we watch for the change at vblank.
  4157.          */
  4158.         if (I915_READ(ISR) & flip_pending)
  4159.                 goto check_page_flip;
  4160.  
  4161.         intel_prepare_page_flip(dev, plane);
  4162.         intel_finish_page_flip(dev, pipe);
  4163.         return true;
  4164.  
  4165. check_page_flip:
  4166.         intel_check_page_flip(dev, pipe);
  4167.         return false;
  4168. }
  4169.  
  4170. static irqreturn_t i915_irq_handler(int irq, void *arg)
  4171. {
  4172.         struct drm_device *dev = arg;
  4173.         struct drm_i915_private *dev_priv = dev->dev_private;
  4174.         u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
  4175.         u32 flip_mask =
  4176.                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  4177.                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  4178.         int pipe, ret = IRQ_NONE;
  4179.  
  4180.         if (!intel_irqs_enabled(dev_priv))
  4181.                 return IRQ_NONE;
  4182.  
  4183.         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
  4184.         disable_rpm_wakeref_asserts(dev_priv);
  4185.  
  4186.         iir = I915_READ(IIR);
  4187.         do {
  4188.                 bool irq_received = (iir & ~flip_mask) != 0;
  4189.                 bool blc_event = false;
  4190.  
  4191.                 /* Can't rely on pipestat interrupt bit in iir as it might
  4192.                  * have been cleared after the pipestat interrupt was received.
  4193.                  * It doesn't set the bit in iir again, but it still produces
  4194.                  * interrupts (for non-MSI).
  4195.                  */
  4196.                 spin_lock(&dev_priv->irq_lock);
  4197.                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  4198.                         DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
  4199.  
  4200.                 for_each_pipe(dev_priv, pipe) {
  4201.                         i915_reg_t reg = PIPESTAT(pipe);
  4202.                         pipe_stats[pipe] = I915_READ(reg);
  4203.  
  4204.                         /* Clear the PIPE*STAT regs before the IIR */
  4205.                         if (pipe_stats[pipe] & 0x8000ffff) {
  4206.                                 I915_WRITE(reg, pipe_stats[pipe]);
  4207.                                 irq_received = true;
  4208.                         }
  4209.                 }
  4210.                 spin_unlock(&dev_priv->irq_lock);
  4211.  
  4212.                 if (!irq_received)
  4213.                         break;
  4214.  
  4215.                 /* Consume port.  Then clear IIR or we'll miss events */
  4216.                 if (I915_HAS_HOTPLUG(dev) &&
  4217.                     iir & I915_DISPLAY_PORT_INTERRUPT)
  4218.                         i9xx_hpd_irq_handler(dev);
  4219.  
  4220.                 I915_WRITE(IIR, iir & ~flip_mask);
  4221.                 new_iir = I915_READ(IIR); /* Flush posted writes */
  4222.  
  4223.                 if (iir & I915_USER_INTERRUPT)
  4224.                         notify_ring(&dev_priv->ring[RCS]);
  4225.  
  4226.                 for_each_pipe(dev_priv, pipe) {
  4227.                         int plane = pipe;
  4228.                         if (HAS_FBC(dev))
  4229.                                 plane = !plane;
  4230.  
  4231.                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
  4232.                             i915_handle_vblank(dev, plane, pipe, iir))
  4233.                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
  4234.  
  4235.                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
  4236.                                 blc_event = true;
  4237.  
  4238.                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
  4239.                                 i9xx_pipe_crc_irq_handler(dev, pipe);
  4240.  
  4241.                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  4242.                                 intel_cpu_fifo_underrun_irq_handler(dev_priv,
  4243.                                                                     pipe);
  4244.                 }
  4245.  
  4246.                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
  4247.                         intel_opregion_asle_intr(dev);
  4248.  
  4249.                 /* With MSI, interrupts are only generated when iir
  4250.                  * transitions from zero to nonzero.  If another bit got
  4251.                  * set while we were handling the existing iir bits, then
  4252.                  * we would never get another interrupt.
  4253.                  *
  4254.                  * This is fine on non-MSI as well, as if we hit this path
  4255.                  * we avoid exiting the interrupt handler only to generate
  4256.                  * another one.
  4257.                  *
  4258.                  * Note that for MSI this could cause a stray interrupt report
  4259.                  * if an interrupt landed in the time between writing IIR and
  4260.                  * the posting read.  This should be rare enough to never
  4261.                  * trigger the 99% of 100,000 interrupts test for disabling
  4262.                  * stray interrupts.
  4263.                  */
  4264.                 ret = IRQ_HANDLED;
  4265.                 iir = new_iir;
  4266.         } while (iir & ~flip_mask);
  4267.  
  4268.         enable_rpm_wakeref_asserts(dev_priv);
  4269.  
  4270.         return ret;
  4271. }
  4272.  
  4273. static void i915_irq_uninstall(struct drm_device * dev)
  4274. {
  4275.         struct drm_i915_private *dev_priv = dev->dev_private;
  4276.         int pipe;
  4277.  
  4278.         if (I915_HAS_HOTPLUG(dev)) {
  4279.                 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
  4280.                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  4281.         }
  4282.  
  4283.         I915_WRITE16(HWSTAM, 0xffff);
  4284.         for_each_pipe(dev_priv, pipe) {
  4285.                 /* Clear enable bits; then clear status bits */
  4286.                 I915_WRITE(PIPESTAT(pipe), 0);
  4287.                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
  4288.         }
  4289.         I915_WRITE(IMR, 0xffffffff);
  4290.         I915_WRITE(IER, 0x0);
  4291.  
  4292.         I915_WRITE(IIR, I915_READ(IIR));
  4293. }
  4294.  
  4295. static void i965_irq_preinstall(struct drm_device * dev)
  4296. {
  4297.         struct drm_i915_private *dev_priv = dev->dev_private;
  4298.         int pipe;
  4299.  
  4300.         i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
  4301.         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  4302.  
  4303.         I915_WRITE(HWSTAM, 0xeffe);
  4304.         for_each_pipe(dev_priv, pipe)
  4305.                 I915_WRITE(PIPESTAT(pipe), 0);
  4306.         I915_WRITE(IMR, 0xffffffff);
  4307.         I915_WRITE(IER, 0x0);
  4308.         POSTING_READ(IER);
  4309. }
  4310.  
  4311. static int i965_irq_postinstall(struct drm_device *dev)
  4312. {
  4313.         struct drm_i915_private *dev_priv = dev->dev_private;
  4314.         u32 enable_mask;
  4315.         u32 error_mask;
  4316.  
  4317.         /* Unmask the interrupts that we always want on. */
  4318.         dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
  4319.                                I915_DISPLAY_PORT_INTERRUPT |
  4320.                                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  4321.                                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  4322.                                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  4323.                                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  4324.                                I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  4325.  
  4326.         enable_mask = ~dev_priv->irq_mask;
  4327.         enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  4328.                          I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
  4329.         enable_mask |= I915_USER_INTERRUPT;
  4330.  
  4331.         if (IS_G4X(dev))
  4332.                 enable_mask |= I915_BSD_USER_INTERRUPT;
  4333.  
  4334.         /* Interrupt setup is already guaranteed to be single-threaded, this is
  4335.          * just to make the assert_spin_locked check happy. */
  4336.         spin_lock_irq(&dev_priv->irq_lock);
  4337.         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
  4338.         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
  4339.         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
  4340.         spin_unlock_irq(&dev_priv->irq_lock);
  4341.  
  4342.         /*
  4343.          * Enable some error detection, note the instruction error mask
  4344.          * bit is reserved, so we leave it masked.
  4345.          */
  4346.         if (IS_G4X(dev)) {
  4347.                 error_mask = ~(GM45_ERROR_PAGE_TABLE |
  4348.                                GM45_ERROR_MEM_PRIV |
  4349.                                GM45_ERROR_CP_PRIV |
  4350.                                I915_ERROR_MEMORY_REFRESH);
  4351.         } else {
  4352.                 error_mask = ~(I915_ERROR_PAGE_TABLE |
  4353.                                I915_ERROR_MEMORY_REFRESH);
  4354.         }
  4355.         I915_WRITE(EMR, error_mask);
  4356.  
  4357.         I915_WRITE(IMR, dev_priv->irq_mask);
  4358.         I915_WRITE(IER, enable_mask);
  4359.         POSTING_READ(IER);
  4360.  
  4361.         i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
  4362.         POSTING_READ(PORT_HOTPLUG_EN);
  4363.  
  4364.         i915_enable_asle_pipestat(dev);
  4365.  
  4366.         return 0;
  4367. }
  4368.  
  4369. static void i915_hpd_irq_setup(struct drm_device *dev)
  4370. {
  4371.         struct drm_i915_private *dev_priv = dev->dev_private;
  4372.         u32 hotplug_en;
  4373.  
  4374.         assert_spin_locked(&dev_priv->irq_lock);
  4375.  
  4376.         /* Note HDMI and DP share hotplug bits */
  4377.         /* enable bits are the same for all generations */
  4378.         hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915);
  4379.         /* Programming the CRT detection parameters tends
  4380.            to generate a spurious hotplug event about three
  4381.            seconds later.  So just do it once.
  4382.         */
  4383.         if (IS_G4X(dev))
  4384.                 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
  4385.         hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
  4386.  
  4387.         /* Ignore TV since it's buggy */
  4388.         i915_hotplug_interrupt_update_locked(dev_priv,
  4389.                                              HOTPLUG_INT_EN_MASK |
  4390.                                              CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
  4391.                                              CRT_HOTPLUG_ACTIVATION_PERIOD_64,
  4392.                                              hotplug_en);
  4393. }
  4394.  
  4395. static irqreturn_t i965_irq_handler(int irq, void *arg)
  4396. {
  4397.         struct drm_device *dev = arg;
  4398.         struct drm_i915_private *dev_priv = dev->dev_private;
  4399.         u32 iir, new_iir;
  4400.         u32 pipe_stats[I915_MAX_PIPES];
  4401.         int ret = IRQ_NONE, pipe;
  4402.         u32 flip_mask =
  4403.                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  4404.                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  4405.  
  4406.         if (!intel_irqs_enabled(dev_priv))
  4407.                 return IRQ_NONE;
  4408.  
  4409.         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
  4410.         disable_rpm_wakeref_asserts(dev_priv);
  4411.  
  4412.         iir = I915_READ(IIR);
  4413.  
  4414.         for (;;) {
  4415.                 bool irq_received = (iir & ~flip_mask) != 0;
  4416.                 bool blc_event = false;
  4417.  
  4418.                 /* Can't rely on pipestat interrupt bit in iir as it might
  4419.                  * have been cleared after the pipestat interrupt was received.
  4420.                  * It doesn't set the bit in iir again, but it still produces
  4421.                  * interrupts (for non-MSI).
  4422.                  */
  4423.                 spin_lock(&dev_priv->irq_lock);
  4424.                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  4425.                         DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
  4426.  
  4427.                 for_each_pipe(dev_priv, pipe) {
  4428.                         i915_reg_t reg = PIPESTAT(pipe);
  4429.                         pipe_stats[pipe] = I915_READ(reg);
  4430.  
  4431.                         /*
  4432.                          * Clear the PIPE*STAT regs before the IIR
  4433.                          */
  4434.                         if (pipe_stats[pipe] & 0x8000ffff) {
  4435.                                 I915_WRITE(reg, pipe_stats[pipe]);
  4436.                                 irq_received = true;
  4437.                         }
  4438.                 }
  4439.                 spin_unlock(&dev_priv->irq_lock);
  4440.  
  4441.                 if (!irq_received)
  4442.                         break;
  4443.  
  4444.                 ret = IRQ_HANDLED;
  4445.  
  4446.                 /* Consume port.  Then clear IIR or we'll miss events */
  4447.                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
  4448.                         i9xx_hpd_irq_handler(dev);
  4449.  
  4450.                 I915_WRITE(IIR, iir & ~flip_mask);
  4451.                 new_iir = I915_READ(IIR); /* Flush posted writes */
  4452.  
  4453.                 if (iir & I915_USER_INTERRUPT)
  4454.                         notify_ring(&dev_priv->ring[RCS]);
  4455.                 if (iir & I915_BSD_USER_INTERRUPT)
  4456.                         notify_ring(&dev_priv->ring[VCS]);
  4457.  
  4458.                 for_each_pipe(dev_priv, pipe) {
  4459.                         if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
  4460.                             i915_handle_vblank(dev, pipe, pipe, iir))
  4461.                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
  4462.  
  4463.                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
  4464.                                 blc_event = true;
  4465.  
  4466.                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
  4467.                                 i9xx_pipe_crc_irq_handler(dev, pipe);
  4468.  
  4469.                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  4470.                                 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
  4471.                 }
  4472.  
  4473.                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
  4474.                         intel_opregion_asle_intr(dev);
  4475.  
  4476.                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
  4477.                         gmbus_irq_handler(dev);
  4478.  
  4479.                 /* With MSI, interrupts are only generated when iir
  4480.                  * transitions from zero to nonzero.  If another bit got
  4481.                  * set while we were handling the existing iir bits, then
  4482.                  * we would never get another interrupt.
  4483.                  *
  4484.                  * This is fine on non-MSI as well, as if we hit this path
  4485.                  * we avoid exiting the interrupt handler only to generate
  4486.                  * another one.
  4487.                  *
  4488.                  * Note that for MSI this could cause a stray interrupt report
  4489.                  * if an interrupt landed in the time between writing IIR and
  4490.                  * the posting read.  This should be rare enough to never
  4491.                  * trigger the 99% of 100,000 interrupts test for disabling
  4492.                  * stray interrupts.
  4493.                  */
  4494.                 iir = new_iir;
  4495.         }
  4496.  
  4497.         enable_rpm_wakeref_asserts(dev_priv);
  4498.  
  4499.         return ret;
  4500. }
  4501.  
  4502. static void i965_irq_uninstall(struct drm_device * dev)
  4503. {
  4504.         struct drm_i915_private *dev_priv = dev->dev_private;
  4505.         int pipe;
  4506.  
  4507.         if (!dev_priv)
  4508.                 return;
  4509.  
  4510.         i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
  4511.         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  4512.  
  4513.         I915_WRITE(HWSTAM, 0xffffffff);
  4514.         for_each_pipe(dev_priv, pipe)
  4515.                 I915_WRITE(PIPESTAT(pipe), 0);
  4516.         I915_WRITE(IMR, 0xffffffff);
  4517.         I915_WRITE(IER, 0x0);
  4518.  
  4519.         for_each_pipe(dev_priv, pipe)
  4520.                 I915_WRITE(PIPESTAT(pipe),
  4521.                            I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
  4522.         I915_WRITE(IIR, I915_READ(IIR));
  4523. }
  4524.  
  4525. /**
  4526.  * intel_irq_init - initializes irq support
  4527.  * @dev_priv: i915 device instance
  4528.  *
  4529.  * This function initializes all the irq support including work items, timers
  4530.  * and all the vtables. It does not setup the interrupt itself though.
  4531.  */
  4532. void intel_irq_init(struct drm_i915_private *dev_priv)
  4533. {
  4534.         struct drm_device *dev = dev_priv->dev;
  4535.  
  4536.         intel_hpd_init_work(dev_priv);
  4537.  
  4538.         INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
  4539.         INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
  4540.  
  4541.         /* Let's track the enabled rps events */
  4542.         if (IS_VALLEYVIEW(dev_priv))
  4543.                 /* WaGsvRC0ResidencyMethod:vlv */
  4544.                 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
  4545.         else
  4546.                 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
  4547.  
  4548.         INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
  4549.                           i915_hangcheck_elapsed);
  4550.  
  4551.  
  4552.         if (IS_GEN2(dev_priv)) {
  4553.                 dev->max_vblank_count = 0;
  4554.                 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
  4555.         } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
  4556.                 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
  4557.                 dev->driver->get_vblank_counter = g4x_get_vblank_counter;
  4558.         } else {
  4559.                 dev->driver->get_vblank_counter = i915_get_vblank_counter;
  4560.                 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
  4561.         }
  4562.  
  4563.         /*
  4564.          * Opt out of the vblank disable timer on everything except gen2.
  4565.          * Gen2 doesn't have a hardware frame counter and so depends on
  4566.          * vblank interrupts to produce sane vblank seuquence numbers.
  4567.          */
  4568.         if (!IS_GEN2(dev_priv))
  4569.                 dev->vblank_disable_immediate = true;
  4570.  
  4571.         dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
  4572.         dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
  4573.  
  4574.         if (IS_CHERRYVIEW(dev_priv)) {
  4575.                 dev->driver->irq_handler = cherryview_irq_handler;
  4576.                 dev->driver->irq_preinstall = cherryview_irq_preinstall;
  4577.                 dev->driver->irq_postinstall = cherryview_irq_postinstall;
  4578.                 dev->driver->irq_uninstall = cherryview_irq_uninstall;
  4579.                 dev->driver->enable_vblank = valleyview_enable_vblank;
  4580.                 dev->driver->disable_vblank = valleyview_disable_vblank;
  4581.                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  4582.         } else if (IS_VALLEYVIEW(dev_priv)) {
  4583.                 dev->driver->irq_handler = valleyview_irq_handler;
  4584.                 dev->driver->irq_preinstall = valleyview_irq_preinstall;
  4585.                 dev->driver->irq_postinstall = valleyview_irq_postinstall;
  4586.                 dev->driver->irq_uninstall = valleyview_irq_uninstall;
  4587.                 dev->driver->enable_vblank = valleyview_enable_vblank;
  4588.                 dev->driver->disable_vblank = valleyview_disable_vblank;
  4589.                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  4590.         } else if (INTEL_INFO(dev_priv)->gen >= 8) {
  4591.                 dev->driver->irq_handler = gen8_irq_handler;
  4592.                 dev->driver->irq_preinstall = gen8_irq_reset;
  4593.                 dev->driver->irq_postinstall = gen8_irq_postinstall;
  4594.                 dev->driver->irq_uninstall = gen8_irq_uninstall;
  4595.                 dev->driver->enable_vblank = gen8_enable_vblank;
  4596.                 dev->driver->disable_vblank = gen8_disable_vblank;
  4597.                 if (IS_BROXTON(dev))
  4598.                         dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
  4599.                 else if (HAS_PCH_SPT(dev))
  4600.                         dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
  4601.                 else
  4602.                         dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
  4603.         } else if (HAS_PCH_SPLIT(dev)) {
  4604.                 dev->driver->irq_handler = ironlake_irq_handler;
  4605.                 dev->driver->irq_preinstall = ironlake_irq_reset;
  4606.                 dev->driver->irq_postinstall = ironlake_irq_postinstall;
  4607.                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
  4608.                 dev->driver->enable_vblank = ironlake_enable_vblank;
  4609.                 dev->driver->disable_vblank = ironlake_disable_vblank;
  4610.                 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
  4611.         } else {
  4612.                 if (INTEL_INFO(dev_priv)->gen == 2) {
  4613.                 } else if (INTEL_INFO(dev_priv)->gen == 3) {
  4614.                         dev->driver->irq_preinstall = i915_irq_preinstall;
  4615.                         dev->driver->irq_postinstall = i915_irq_postinstall;
  4616.                         dev->driver->irq_uninstall = i915_irq_uninstall;
  4617.                         dev->driver->irq_handler = i915_irq_handler;
  4618.                 } else {
  4619.                         dev->driver->irq_preinstall = i965_irq_preinstall;
  4620.                         dev->driver->irq_postinstall = i965_irq_postinstall;
  4621.                         dev->driver->irq_uninstall = i965_irq_uninstall;
  4622.                         dev->driver->irq_handler = i965_irq_handler;
  4623.                 }
  4624.                 if (I915_HAS_HOTPLUG(dev_priv))
  4625.                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  4626.                 dev->driver->enable_vblank = i915_enable_vblank;
  4627.                 dev->driver->disable_vblank = i915_disable_vblank;
  4628.         }
  4629. }
  4630.  
  4631. /**
  4632.  * intel_irq_install - enables the hardware interrupt
  4633.  * @dev_priv: i915 device instance
  4634.  *
  4635.  * This function enables the hardware interrupt handling, but leaves the hotplug
  4636.  * handling still disabled. It is called after intel_irq_init().
  4637.  *
  4638.  * In the driver load and resume code we need working interrupts in a few places
  4639.  * but don't want to deal with the hassle of concurrent probe and hotplug
  4640.  * workers. Hence the split into this two-stage approach.
  4641.  */
  4642. int intel_irq_install(struct drm_i915_private *dev_priv)
  4643. {
  4644.         /*
  4645.          * We enable some interrupt sources in our postinstall hooks, so mark
  4646.          * interrupts as enabled _before_ actually enabling them to avoid
  4647.          * special cases in our ordering checks.
  4648.          */
  4649.         dev_priv->pm.irqs_enabled = true;
  4650.  
  4651.         return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
  4652. }
  4653.  
  4654. /**
  4655.  * intel_irq_uninstall - finilizes all irq handling
  4656.  * @dev_priv: i915 device instance
  4657.  *
  4658.  * This stops interrupt and hotplug handling and unregisters and frees all
  4659.  * resources acquired in the init functions.
  4660.  */
  4661. void intel_irq_uninstall(struct drm_i915_private *dev_priv)
  4662. {
  4663. //      drm_irq_uninstall(dev_priv->dev);
  4664.         intel_hpd_cancel_work(dev_priv);
  4665.         dev_priv->pm.irqs_enabled = false;
  4666. }
  4667.  
  4668. /**
  4669.  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
  4670.  * @dev_priv: i915 device instance
  4671.  *
  4672.  * This function is used to disable interrupts at runtime, both in the runtime
  4673.  * pm and the system suspend/resume code.
  4674.  */
  4675. void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
  4676. {
  4677.         dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
  4678.         dev_priv->pm.irqs_enabled = false;
  4679.         synchronize_irq(dev_priv->dev->irq);
  4680. }
  4681.  
  4682. /**
  4683.  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
  4684.  * @dev_priv: i915 device instance
  4685.  *
  4686.  * This function is used to enable interrupts at runtime, both in the runtime
  4687.  * pm and the system suspend/resume code.
  4688.  */
  4689. void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
  4690. {
  4691.         dev_priv->pm.irqs_enabled = true;
  4692.         dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
  4693.         dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
  4694. }
  4695.