Subversion Repositories Kolibri OS

Rev

Rev 5354 | Rev 6084 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
  2.  */
  3. /*
  4.  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  5.  * All Rights Reserved.
  6.  *
  7.  * Permission is hereby granted, free of charge, to any person obtaining a
  8.  * copy of this software and associated documentation files (the
  9.  * "Software"), to deal in the Software without restriction, including
  10.  * without limitation the rights to use, copy, modify, merge, publish,
  11.  * distribute, sub license, and/or sell copies of the Software, and to
  12.  * permit persons to whom the Software is furnished to do so, subject to
  13.  * the following conditions:
  14.  *
  15.  * The above copyright notice and this permission notice (including the
  16.  * next paragraph) shall be included in all copies or substantial portions
  17.  * of the Software.
  18.  *
  19.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20.  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21.  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22.  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23.  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24.  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25.  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26.  *
  27.  */
  28.  
  29. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  30.  
  31. #include <linux/slab.h>
  32. #include <drm/drmP.h>
  33. #include <drm/i915_drm.h>
  34. #include "i915_drv.h"
  35. #include "i915_trace.h"
  36. #include "intel_drv.h"
  37.  
  38. /**
  39.  * DOC: interrupt handling
  40.  *
  41.  * These functions provide the basic support for enabling and disabling the
  42.  * interrupt handling support. There's a lot more functionality in i915_irq.c
  43.  * and related files, but that will be described in separate chapters.
  44.  */
  45.  
  46. static const u32 hpd_ibx[] = {
  47.         [HPD_CRT] = SDE_CRT_HOTPLUG,
  48.         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
  49.         [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
  50.         [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
  51.         [HPD_PORT_D] = SDE_PORTD_HOTPLUG
  52. };
  53.  
  54. static const u32 hpd_cpt[] = {
  55.         [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
  56.         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
  57.         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
  58.         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
  59.         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
  60. };
  61.  
  62. static const u32 hpd_mask_i915[] = {
  63.         [HPD_CRT] = CRT_HOTPLUG_INT_EN,
  64.         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
  65.         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
  66.         [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
  67.         [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
  68.         [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
  69. };
  70.  
  71. static const u32 hpd_status_g4x[] = {
  72.         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
  73.         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
  74.         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
  75.         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
  76.         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
  77.         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
  78. };
  79.  
  80. static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
  81.         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
  82.         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
  83.         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
  84.         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
  85.         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
  86.         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
  87. };
  88.  
  89. /* IIR can theoretically queue up two events. Be paranoid. */
  90. #define GEN8_IRQ_RESET_NDX(type, which) do { \
  91.         I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
  92.         POSTING_READ(GEN8_##type##_IMR(which)); \
  93.         I915_WRITE(GEN8_##type##_IER(which), 0); \
  94.         I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
  95.         POSTING_READ(GEN8_##type##_IIR(which)); \
  96.         I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
  97.         POSTING_READ(GEN8_##type##_IIR(which)); \
  98. } while (0)
  99.  
  100. #define GEN5_IRQ_RESET(type) do { \
  101.         I915_WRITE(type##IMR, 0xffffffff); \
  102.         POSTING_READ(type##IMR); \
  103.         I915_WRITE(type##IER, 0); \
  104.         I915_WRITE(type##IIR, 0xffffffff); \
  105.         POSTING_READ(type##IIR); \
  106.         I915_WRITE(type##IIR, 0xffffffff); \
  107.         POSTING_READ(type##IIR); \
  108. } while (0)
  109.  
  110. /*
  111.  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
  112.  */
  113. #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
  114.         u32 val = I915_READ(reg); \
  115.         if (val) { \
  116.                 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
  117.                      (reg), val); \
  118.                 I915_WRITE((reg), 0xffffffff); \
  119.                 POSTING_READ(reg); \
  120.                 I915_WRITE((reg), 0xffffffff); \
  121.                 POSTING_READ(reg); \
  122.         } \
  123. } while (0)
  124.  
  125. #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
  126.         GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
  127.         I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
  128.         I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
  129.         POSTING_READ(GEN8_##type##_IMR(which)); \
  130. } while (0)
  131.  
  132. #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
  133.         GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
  134.         I915_WRITE(type##IER, (ier_val)); \
  135.         I915_WRITE(type##IMR, (imr_val)); \
  136.         POSTING_READ(type##IMR); \
  137. } while (0)
  138.  
  139. static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
  140.  
  141. /* For display hotplug interrupt */
  142. void
  143. ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
  144. {
  145.         assert_spin_locked(&dev_priv->irq_lock);
  146.  
  147.         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
  148.                 return;
  149.  
  150.     if ((dev_priv->irq_mask & mask) != 0) {
  151.         dev_priv->irq_mask &= ~mask;
  152.         I915_WRITE(DEIMR, dev_priv->irq_mask);
  153.         POSTING_READ(DEIMR);
  154.     }
  155. }
  156.  
  157. void
  158. ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
  159. {
  160.         assert_spin_locked(&dev_priv->irq_lock);
  161.  
  162.         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
  163.                 return;
  164.  
  165.     if ((dev_priv->irq_mask & mask) != mask) {
  166.         dev_priv->irq_mask |= mask;
  167.         I915_WRITE(DEIMR, dev_priv->irq_mask);
  168.         POSTING_READ(DEIMR);
  169.     }
  170. }
  171.  
  172. /**
  173.  * ilk_update_gt_irq - update GTIMR
  174.  * @dev_priv: driver private
  175.  * @interrupt_mask: mask of interrupt bits to update
  176.  * @enabled_irq_mask: mask of interrupt bits to enable
  177.  */
  178. static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
  179.                               uint32_t interrupt_mask,
  180.                               uint32_t enabled_irq_mask)
  181. {
  182.         assert_spin_locked(&dev_priv->irq_lock);
  183.  
  184.         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
  185.                 return;
  186.  
  187.         dev_priv->gt_irq_mask &= ~interrupt_mask;
  188.         dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
  189.         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  190.         POSTING_READ(GTIMR);
  191. }
  192.  
  193. void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
  194. {
  195.         ilk_update_gt_irq(dev_priv, mask, mask);
  196. }
  197.  
  198. void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
  199. {
  200.         ilk_update_gt_irq(dev_priv, mask, 0);
  201. }
  202.  
  203. static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
  204. {
  205.         return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
  206. }
  207.  
  208. static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
  209. {
  210.         return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
  211. }
  212.  
  213. static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
  214. {
  215.         return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
  216. }
  217.  
  218. /**
  219.   * snb_update_pm_irq - update GEN6_PMIMR
  220.   * @dev_priv: driver private
  221.   * @interrupt_mask: mask of interrupt bits to update
  222.   * @enabled_irq_mask: mask of interrupt bits to enable
  223.   */
  224. static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
  225.                               uint32_t interrupt_mask,
  226.                               uint32_t enabled_irq_mask)
  227. {
  228.         uint32_t new_val;
  229.  
  230.         assert_spin_locked(&dev_priv->irq_lock);
  231.  
  232.         new_val = dev_priv->pm_irq_mask;
  233.         new_val &= ~interrupt_mask;
  234.         new_val |= (~enabled_irq_mask & interrupt_mask);
  235.  
  236.         if (new_val != dev_priv->pm_irq_mask) {
  237.                 dev_priv->pm_irq_mask = new_val;
  238.                 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
  239.                 POSTING_READ(gen6_pm_imr(dev_priv));
  240.         }
  241. }
  242.  
  243. void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
  244. {
  245.         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
  246.                 return;
  247.  
  248.         snb_update_pm_irq(dev_priv, mask, mask);
  249. }
  250.  
  251. static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
  252.                                   uint32_t mask)
  253. {
  254.         snb_update_pm_irq(dev_priv, mask, 0);
  255. }
  256.  
  257. void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
  258. {
  259.         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
  260.                 return;
  261.  
  262.         __gen6_disable_pm_irq(dev_priv, mask);
  263. }
  264.  
  265. void gen6_reset_rps_interrupts(struct drm_device *dev)
  266. {
  267.         struct drm_i915_private *dev_priv = dev->dev_private;
  268.         uint32_t reg = gen6_pm_iir(dev_priv);
  269.  
  270.         spin_lock_irq(&dev_priv->irq_lock);
  271.         I915_WRITE(reg, dev_priv->pm_rps_events);
  272.         I915_WRITE(reg, dev_priv->pm_rps_events);
  273.                 POSTING_READ(reg);
  274.         spin_unlock_irq(&dev_priv->irq_lock);
  275. }
  276.  
  277. void gen6_enable_rps_interrupts(struct drm_device *dev)
  278. {
  279.         struct drm_i915_private *dev_priv = dev->dev_private;
  280.  
  281.         spin_lock_irq(&dev_priv->irq_lock);
  282.  
  283.         WARN_ON(dev_priv->rps.pm_iir);
  284.         WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
  285.         dev_priv->rps.interrupts_enabled = true;
  286.         I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
  287.                                 dev_priv->pm_rps_events);
  288.         gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
  289.  
  290.         spin_unlock_irq(&dev_priv->irq_lock);
  291. }
  292.  
  293. void gen6_disable_rps_interrupts(struct drm_device *dev)
  294. {
  295.         struct drm_i915_private *dev_priv = dev->dev_private;
  296.  
  297.         spin_lock_irq(&dev_priv->irq_lock);
  298.         dev_priv->rps.interrupts_enabled = false;
  299.         spin_unlock_irq(&dev_priv->irq_lock);
  300.  
  301.         cancel_work_sync(&dev_priv->rps.work);
  302.  
  303.         spin_lock_irq(&dev_priv->irq_lock);
  304.  
  305.         I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ?
  306.                    ~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0);
  307.  
  308.         __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
  309.         I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
  310.                                 ~dev_priv->pm_rps_events);
  311.         I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
  312.         I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
  313.  
  314.         dev_priv->rps.pm_iir = 0;
  315.  
  316.         spin_unlock_irq(&dev_priv->irq_lock);
  317. }
  318.  
  319. /**
  320.  * ibx_display_interrupt_update - update SDEIMR
  321.  * @dev_priv: driver private
  322.  * @interrupt_mask: mask of interrupt bits to update
  323.  * @enabled_irq_mask: mask of interrupt bits to enable
  324.  */
  325. void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
  326.                                          uint32_t interrupt_mask,
  327.                                          uint32_t enabled_irq_mask)
  328. {
  329.         uint32_t sdeimr = I915_READ(SDEIMR);
  330.         sdeimr &= ~interrupt_mask;
  331.         sdeimr |= (~enabled_irq_mask & interrupt_mask);
  332.  
  333.         assert_spin_locked(&dev_priv->irq_lock);
  334.  
  335.         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
  336.                 return;
  337.  
  338.         I915_WRITE(SDEIMR, sdeimr);
  339.         POSTING_READ(SDEIMR);
  340. }
  341.  
  342. static void
  343. __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
  344.                        u32 enable_mask, u32 status_mask)
  345. {
  346.                 u32 reg = PIPESTAT(pipe);
  347.         u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
  348.  
  349.         assert_spin_locked(&dev_priv->irq_lock);
  350.         WARN_ON(!intel_irqs_enabled(dev_priv));
  351.  
  352.         if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
  353.                       status_mask & ~PIPESTAT_INT_STATUS_MASK,
  354.                       "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
  355.                       pipe_name(pipe), enable_mask, status_mask))
  356.                 return;
  357.  
  358.         if ((pipestat & enable_mask) == enable_mask)
  359.                 return;
  360.  
  361.         dev_priv->pipestat_irq_mask[pipe] |= status_mask;
  362.  
  363.                 /* Enable the interrupt, clear any pending status */
  364.         pipestat |= enable_mask | status_mask;
  365.         I915_WRITE(reg, pipestat);
  366.                 POSTING_READ(reg);
  367. }
  368.  
  369. static void
  370. __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
  371.                         u32 enable_mask, u32 status_mask)
  372. {
  373.                 u32 reg = PIPESTAT(pipe);
  374.         u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
  375.  
  376.         assert_spin_locked(&dev_priv->irq_lock);
  377.         WARN_ON(!intel_irqs_enabled(dev_priv));
  378.  
  379.         if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
  380.                       status_mask & ~PIPESTAT_INT_STATUS_MASK,
  381.                       "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
  382.                       pipe_name(pipe), enable_mask, status_mask))
  383.                 return;
  384.  
  385.         if ((pipestat & enable_mask) == 0)
  386.                 return;
  387.  
  388.         dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
  389.  
  390.         pipestat &= ~enable_mask;
  391.         I915_WRITE(reg, pipestat);
  392.                 POSTING_READ(reg);
  393. }
  394.  
  395. static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
  396. {
  397.         u32 enable_mask = status_mask << 16;
  398.  
  399.         /*
  400.          * On pipe A we don't support the PSR interrupt yet,
  401.          * on pipe B and C the same bit MBZ.
  402.          */
  403.         if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
  404.                 return 0;
  405.         /*
  406.          * On pipe B and C we don't support the PSR interrupt yet, on pipe
  407.          * A the same bit is for perf counters which we don't use either.
  408.          */
  409.         if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
  410.                 return 0;
  411.  
  412.         enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
  413.                          SPRITE0_FLIP_DONE_INT_EN_VLV |
  414.                          SPRITE1_FLIP_DONE_INT_EN_VLV);
  415.         if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
  416.                 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
  417.         if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
  418.                 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
  419.  
  420.         return enable_mask;
  421. }
  422.  
  423. void
  424. i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
  425.                      u32 status_mask)
  426. {
  427.         u32 enable_mask;
  428.  
  429.         if (IS_VALLEYVIEW(dev_priv->dev))
  430.                 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
  431.                                                            status_mask);
  432.         else
  433.                 enable_mask = status_mask << 16;
  434.         __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
  435. }
  436.  
  437. void
  438. i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
  439.                       u32 status_mask)
  440. {
  441.         u32 enable_mask;
  442.  
  443.         if (IS_VALLEYVIEW(dev_priv->dev))
  444.                 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
  445.                                                            status_mask);
  446.         else
  447.                 enable_mask = status_mask << 16;
  448.         __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
  449. }
  450.  
  451. /**
  452.  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
  453.  */
  454. static void i915_enable_asle_pipestat(struct drm_device *dev)
  455. {
  456.         struct drm_i915_private *dev_priv = dev->dev_private;
  457.  
  458.         if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
  459.                 return;
  460.  
  461.         spin_lock_irq(&dev_priv->irq_lock);
  462.  
  463.         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
  464.                 if (INTEL_INFO(dev)->gen >= 4)
  465.                 i915_enable_pipestat(dev_priv, PIPE_A,
  466.                                      PIPE_LEGACY_BLC_EVENT_STATUS);
  467.  
  468.         spin_unlock_irq(&dev_priv->irq_lock);
  469. }
  470.  
  471. /**
  472.  * i915_pipe_enabled - check if a pipe is enabled
  473.  * @dev: DRM device
  474.  * @pipe: pipe to check
  475.  *
  476.  * Reading certain registers when the pipe is disabled can hang the chip.
  477.  * Use this routine to make sure the PLL is running and the pipe is active
  478.  * before reading such registers if unsure.
  479.  */
  480. static int
  481. i915_pipe_enabled(struct drm_device *dev, int pipe)
  482. {
  483.         struct drm_i915_private *dev_priv = dev->dev_private;
  484.  
  485.         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  486.                 /* Locking is horribly broken here, but whatever. */
  487.                 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  488.                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  489.  
  490.                 return intel_crtc->active;
  491.         } else {
  492.                 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
  493.         }
  494. }
  495.  
  496. /*
  497.  * This timing diagram depicts the video signal in and
  498.  * around the vertical blanking period.
  499.  *
  500.  * Assumptions about the fictitious mode used in this example:
  501.  *  vblank_start >= 3
  502.  *  vsync_start = vblank_start + 1
  503.  *  vsync_end = vblank_start + 2
  504.  *  vtotal = vblank_start + 3
  505.  *
  506.  *           start of vblank:
  507.  *           latch double buffered registers
  508.  *           increment frame counter (ctg+)
  509.  *           generate start of vblank interrupt (gen4+)
  510.  *           |
  511.  *           |          frame start:
  512.  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
  513.  *           |          may be shifted forward 1-3 extra lines via PIPECONF
  514.  *           |          |
  515.  *           |          |  start of vsync:
  516.  *           |          |  generate vsync interrupt
  517.  *           |          |  |
  518.  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
  519.  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
  520.  * ----va---> <-----------------vb--------------------> <--------va-------------
  521.  *       |          |       <----vs----->                     |
  522.  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
  523.  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
  524.  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
  525.  *       |          |                                         |
  526.  *       last visible pixel                                   first visible pixel
  527.  *                  |                                         increment frame counter (gen3/4)
  528.  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
  529.  *
  530.  * x  = horizontal active
  531.  * _  = horizontal blanking
  532.  * hs = horizontal sync
  533.  * va = vertical active
  534.  * vb = vertical blanking
  535.  * vs = vertical sync
  536.  * vbs = vblank_start (number)
  537.  *
  538.  * Summary:
  539.  * - most events happen at the start of horizontal sync
  540.  * - frame start happens at the start of horizontal blank, 1-4 lines
  541.  *   (depending on PIPECONF settings) after the start of vblank
  542.  * - gen3/4 pixel and frame counter are synchronized with the start
  543.  *   of horizontal active on the first line of vertical active
  544.  */
  545.  
  546. static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
  547. {
  548.         /* Gen2 doesn't have a hardware frame counter */
  549.         return 0;
  550. }
  551.  
  552. /* Called from drm generic code, passed a 'crtc', which
  553.  * we use as a pipe index
  554.  */
  555. static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
  556. {
  557.         struct drm_i915_private *dev_priv = dev->dev_private;
  558.         unsigned long high_frame;
  559.         unsigned long low_frame;
  560.         u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
  561.  
  562.         if (!i915_pipe_enabled(dev, pipe)) {
  563.                 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
  564.                                 "pipe %c\n", pipe_name(pipe));
  565.                 return 0;
  566.         }
  567.  
  568.         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  569.                 struct intel_crtc *intel_crtc =
  570.                         to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
  571.                 const struct drm_display_mode *mode =
  572.                         &intel_crtc->config.adjusted_mode;
  573.  
  574.                 htotal = mode->crtc_htotal;
  575.                 hsync_start = mode->crtc_hsync_start;
  576.                 vbl_start = mode->crtc_vblank_start;
  577.                 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
  578.                         vbl_start = DIV_ROUND_UP(vbl_start, 2);
  579.         } else {
  580.                 enum transcoder cpu_transcoder = (enum transcoder) pipe;
  581.  
  582.                 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
  583.                 hsync_start = (I915_READ(HSYNC(cpu_transcoder))  & 0x1fff) + 1;
  584.                 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
  585.                 if ((I915_READ(PIPECONF(cpu_transcoder)) &
  586.                      PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
  587.                         vbl_start = DIV_ROUND_UP(vbl_start, 2);
  588.         }
  589.  
  590.         /* Convert to pixel count */
  591.                 vbl_start *= htotal;
  592.  
  593.         /* Start of vblank event occurs at start of hsync */
  594.         vbl_start -= htotal - hsync_start;
  595.  
  596.         high_frame = PIPEFRAME(pipe);
  597.         low_frame = PIPEFRAMEPIXEL(pipe);
  598.  
  599.         /*
  600.          * High & low register fields aren't synchronized, so make sure
  601.          * we get a low value that's stable across two reads of the high
  602.          * register.
  603.          */
  604.         do {
  605.                 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
  606.                 low   = I915_READ(low_frame);
  607.                 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
  608.         } while (high1 != high2);
  609.  
  610.         high1 >>= PIPE_FRAME_HIGH_SHIFT;
  611.         pixel = low & PIPE_PIXEL_MASK;
  612.         low >>= PIPE_FRAME_LOW_SHIFT;
  613.  
  614.         /*
  615.          * The frame counter increments at beginning of active.
  616.          * Cook up a vblank counter by also checking the pixel
  617.          * counter against vblank start.
  618.          */
  619.         return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
  620. }
  621.  
  622. static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
  623. {
  624.         struct drm_i915_private *dev_priv = dev->dev_private;
  625.         int reg = PIPE_FRMCOUNT_GM45(pipe);
  626.  
  627.         if (!i915_pipe_enabled(dev, pipe)) {
  628.                 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
  629.                                  "pipe %c\n", pipe_name(pipe));
  630.                 return 0;
  631.         }
  632.  
  633.         return I915_READ(reg);
  634. }
  635.  
  636. /* raw reads, only for fast reads of display block, no need for forcewake etc. */
  637. #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
  638.  
  639. static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
  640. {
  641.         struct drm_device *dev = crtc->base.dev;
  642.         struct drm_i915_private *dev_priv = dev->dev_private;
  643.         const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
  644.         enum pipe pipe = crtc->pipe;
  645.         int position, vtotal;
  646.  
  647.         vtotal = mode->crtc_vtotal;
  648.         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
  649.                 vtotal /= 2;
  650.  
  651.         if (IS_GEN2(dev))
  652.                 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
  653.         else
  654.                 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
  655.  
  656.         /*
  657.          * See update_scanline_offset() for the details on the
  658.          * scanline_offset adjustment.
  659.          */
  660.         return (position + crtc->scanline_offset) % vtotal;
  661. }
  662.  
  663. static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
  664.                                     unsigned int flags, int *vpos, int *hpos,
  665.                                     void *stime, void *etime)
  666. {
  667.         struct drm_i915_private *dev_priv = dev->dev_private;
  668.         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  669.         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  670.         const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
  671.         int position;
  672.         int vbl_start, vbl_end, hsync_start, htotal, vtotal;
  673.         bool in_vbl = true;
  674.         int ret = 0;
  675.         unsigned long irqflags;
  676.  
  677.         if (!intel_crtc->active) {
  678.                 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
  679.                                  "pipe %c\n", pipe_name(pipe));
  680.                 return 0;
  681.         }
  682.  
  683.         htotal = mode->crtc_htotal;
  684.         hsync_start = mode->crtc_hsync_start;
  685.         vtotal = mode->crtc_vtotal;
  686.         vbl_start = mode->crtc_vblank_start;
  687.         vbl_end = mode->crtc_vblank_end;
  688.  
  689.         if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
  690.                 vbl_start = DIV_ROUND_UP(vbl_start, 2);
  691.                 vbl_end /= 2;
  692.                 vtotal /= 2;
  693.         }
  694.  
  695.         ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
  696.  
  697.         /*
  698.          * Lock uncore.lock, as we will do multiple timing critical raw
  699.          * register reads, potentially with preemption disabled, so the
  700.          * following code must not block on uncore.lock.
  701.          */
  702.         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  703.  
  704.         /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
  705.  
  706.  
  707.         if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
  708.                 /* No obvious pixelcount register. Only query vertical
  709.                  * scanout position from Display scan line register.
  710.                  */
  711.                 position = __intel_get_crtc_scanline(intel_crtc);
  712.         } else {
  713.                 /* Have access to pixelcount since start of frame.
  714.                  * We can split this into vertical and horizontal
  715.                  * scanout position.
  716.                  */
  717.                 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
  718.  
  719.                 /* convert to pixel counts */
  720.                 vbl_start *= htotal;
  721.                 vbl_end *= htotal;
  722.                 vtotal *= htotal;
  723.  
  724.                 /*
  725.                  * In interlaced modes, the pixel counter counts all pixels,
  726.                  * so one field will have htotal more pixels. In order to avoid
  727.                  * the reported position from jumping backwards when the pixel
  728.                  * counter is beyond the length of the shorter field, just
  729.                  * clamp the position the length of the shorter field. This
  730.                  * matches how the scanline counter based position works since
  731.                  * the scanline counter doesn't count the two half lines.
  732.                  */
  733.                 if (position >= vtotal)
  734.                         position = vtotal - 1;
  735.  
  736.                 /*
  737.                  * Start of vblank interrupt is triggered at start of hsync,
  738.                  * just prior to the first active line of vblank. However we
  739.                  * consider lines to start at the leading edge of horizontal
  740.                  * active. So, should we get here before we've crossed into
  741.                  * the horizontal active of the first line in vblank, we would
  742.                  * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
  743.                  * always add htotal-hsync_start to the current pixel position.
  744.                  */
  745.                 position = (position + htotal - hsync_start) % vtotal;
  746.         }
  747.  
  748.  
  749.         /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
  750.  
  751.         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  752.  
  753.         in_vbl = position >= vbl_start && position < vbl_end;
  754.  
  755.         /*
  756.          * While in vblank, position will be negative
  757.          * counting up towards 0 at vbl_end. And outside
  758.          * vblank, position will be positive counting
  759.          * up since vbl_end.
  760.          */
  761.         if (position >= vbl_start)
  762.                 position -= vbl_end;
  763.         else
  764.                 position += vtotal - vbl_end;
  765.  
  766.         if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
  767.                 *vpos = position;
  768.                 *hpos = 0;
  769.         } else {
  770.                 *vpos = position / htotal;
  771.                 *hpos = position - (*vpos * htotal);
  772.         }
  773.  
  774.         /* In vblank? */
  775.         if (in_vbl)
  776.                 ret |= DRM_SCANOUTPOS_IN_VBLANK;
  777.  
  778.         return ret;
  779. }
  780.  
  781. int intel_get_crtc_scanline(struct intel_crtc *crtc)
  782. {
  783.         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  784.         unsigned long irqflags;
  785.         int position;
  786.  
  787.         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  788.         position = __intel_get_crtc_scanline(crtc);
  789.         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  790.  
  791.         return position;
  792. }
  793.  
  794. static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
  795.                               int *max_error,
  796.                               struct timeval *vblank_time,
  797.                               unsigned flags)
  798. {
  799.         struct drm_crtc *crtc;
  800.  
  801.         if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
  802.                 DRM_ERROR("Invalid crtc %d\n", pipe);
  803.                 return -EINVAL;
  804.         }
  805.  
  806.         /* Get drm_crtc to timestamp: */
  807.         crtc = intel_get_crtc_for_pipe(dev, pipe);
  808.         if (crtc == NULL) {
  809.                 DRM_ERROR("Invalid crtc %d\n", pipe);
  810.                 return -EINVAL;
  811.         }
  812.  
  813.         if (!crtc->enabled) {
  814.                 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
  815.                 return -EBUSY;
  816.         }
  817.  
  818.         /* Helper routine in DRM core does all the work: */
  819.         return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
  820.                                                      vblank_time, flags,
  821.                                                      crtc,
  822.                                                      &to_intel_crtc(crtc)->config.adjusted_mode);
  823. }
  824.  
  825. static bool intel_hpd_irq_event(struct drm_device *dev,
  826.                                 struct drm_connector *connector)
  827. {
  828.         enum drm_connector_status old_status;
  829.  
  830.         WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
  831.         old_status = connector->status;
  832.  
  833.         connector->status = connector->funcs->detect(connector, false);
  834.         if (old_status == connector->status)
  835.                 return false;
  836.  
  837.         DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
  838.                       connector->base.id,
  839.                       connector->name,
  840.                       drm_get_connector_status_name(old_status),
  841.                       drm_get_connector_status_name(connector->status));
  842.  
  843.         return true;
  844. }
  845.  
  846. /*
  847.  * Handle hotplug events outside the interrupt handler proper.
  848.  */
  849. #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
  850.  
  851. static void i915_hotplug_work_func(struct work_struct *work)
  852. {
  853.         struct drm_i915_private *dev_priv =
  854.                 container_of(work, struct drm_i915_private, hotplug_work);
  855.         struct drm_device *dev = dev_priv->dev;
  856.         struct drm_mode_config *mode_config = &dev->mode_config;
  857.         struct intel_connector *intel_connector;
  858.         struct intel_encoder *intel_encoder;
  859.         struct drm_connector *connector;
  860.         bool hpd_disabled = false;
  861.         bool changed = false;
  862.         u32 hpd_event_bits;
  863.  
  864.         mutex_lock(&mode_config->mutex);
  865.         DRM_DEBUG_KMS("running encoder hotplug functions\n");
  866.  
  867.         spin_lock_irq(&dev_priv->irq_lock);
  868.  
  869.         hpd_event_bits = dev_priv->hpd_event_bits;
  870.         dev_priv->hpd_event_bits = 0;
  871.         list_for_each_entry(connector, &mode_config->connector_list, head) {
  872.                 intel_connector = to_intel_connector(connector);
  873.                 if (!intel_connector->encoder)
  874.                         continue;
  875.                 intel_encoder = intel_connector->encoder;
  876.                 if (intel_encoder->hpd_pin > HPD_NONE &&
  877.                     dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
  878.                     connector->polled == DRM_CONNECTOR_POLL_HPD) {
  879.                         DRM_INFO("HPD interrupt storm detected on connector %s: "
  880.                                  "switching from hotplug detection to polling\n",
  881.                                 connector->name);
  882.                         dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
  883.                         connector->polled = DRM_CONNECTOR_POLL_CONNECT
  884.                                 | DRM_CONNECTOR_POLL_DISCONNECT;
  885.                         hpd_disabled = true;
  886.                 }
  887.                 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
  888.                         DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
  889.                                       connector->name, intel_encoder->hpd_pin);
  890.                 }
  891.         }
  892.          /* if there were no outputs to poll, poll was disabled,
  893.           * therefore make sure it's enabled when disabling HPD on
  894.           * some connectors */
  895.  
  896.         spin_unlock_irq(&dev_priv->irq_lock);
  897.  
  898.         list_for_each_entry(connector, &mode_config->connector_list, head) {
  899.                 intel_connector = to_intel_connector(connector);
  900.                 if (!intel_connector->encoder)
  901.                         continue;
  902.                 intel_encoder = intel_connector->encoder;
  903.                 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
  904.                 if (intel_encoder->hot_plug)
  905.                         intel_encoder->hot_plug(intel_encoder);
  906.                         if (intel_hpd_irq_event(dev, connector))
  907.                                 changed = true;
  908.                 }
  909.         }
  910.         mutex_unlock(&mode_config->mutex);
  911.  
  912. }
  913.  
  914. static void ironlake_rps_change_irq_handler(struct drm_device *dev)
  915. {
  916.         struct drm_i915_private *dev_priv = dev->dev_private;
  917.         u32 busy_up, busy_down, max_avg, min_avg;
  918.         u8 new_delay;
  919.  
  920.         spin_lock(&mchdev_lock);
  921.  
  922.         I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
  923.  
  924.         new_delay = dev_priv->ips.cur_delay;
  925.  
  926.         I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
  927.         busy_up = I915_READ(RCPREVBSYTUPAVG);
  928.         busy_down = I915_READ(RCPREVBSYTDNAVG);
  929.         max_avg = I915_READ(RCBMAXAVG);
  930.         min_avg = I915_READ(RCBMINAVG);
  931.  
  932.         /* Handle RCS change request from hw */
  933.         if (busy_up > max_avg) {
  934.                 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
  935.                         new_delay = dev_priv->ips.cur_delay - 1;
  936.                 if (new_delay < dev_priv->ips.max_delay)
  937.                         new_delay = dev_priv->ips.max_delay;
  938.         } else if (busy_down < min_avg) {
  939.                 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
  940.                         new_delay = dev_priv->ips.cur_delay + 1;
  941.                 if (new_delay > dev_priv->ips.min_delay)
  942.                         new_delay = dev_priv->ips.min_delay;
  943.         }
  944.  
  945.         if (ironlake_set_drps(dev, new_delay))
  946.                 dev_priv->ips.cur_delay = new_delay;
  947.  
  948.         spin_unlock(&mchdev_lock);
  949.  
  950.         return;
  951. }
  952.  
  953. static void notify_ring(struct drm_device *dev,
  954.                         struct intel_engine_cs *ring)
  955. {
  956.         if (!intel_ring_initialized(ring))
  957.                 return;
  958.  
  959.         trace_i915_gem_request_complete(ring);
  960.  
  961.         wake_up_all(&ring->irq_queue);
  962. }
  963.  
  964. static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
  965.                             struct intel_rps_ei *rps_ei)
  966. {
  967.         u32 cz_ts, cz_freq_khz;
  968.         u32 render_count, media_count;
  969.         u32 elapsed_render, elapsed_media, elapsed_time;
  970.         u32 residency = 0;
  971.  
  972.         cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
  973.         cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
  974.  
  975.         render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
  976.         media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
  977.  
  978.         if (rps_ei->cz_clock == 0) {
  979.                 rps_ei->cz_clock = cz_ts;
  980.                 rps_ei->render_c0 = render_count;
  981.                 rps_ei->media_c0 = media_count;
  982.  
  983.                 return dev_priv->rps.cur_freq;
  984.         }
  985.  
  986.         elapsed_time = cz_ts - rps_ei->cz_clock;
  987.         rps_ei->cz_clock = cz_ts;
  988.  
  989.         elapsed_render = render_count - rps_ei->render_c0;
  990.         rps_ei->render_c0 = render_count;
  991.  
  992.         elapsed_media = media_count - rps_ei->media_c0;
  993.         rps_ei->media_c0 = media_count;
  994.  
  995.         /* Convert all the counters into common unit of milli sec */
  996.         elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
  997.         elapsed_render /=  cz_freq_khz;
  998.         elapsed_media /= cz_freq_khz;
  999.  
  1000.         /*
  1001.          * Calculate overall C0 residency percentage
  1002.          * only if elapsed time is non zero
  1003.          */
  1004.         if (elapsed_time) {
  1005.                 residency =
  1006.                         ((max(elapsed_render, elapsed_media) * 100)
  1007.                                 / elapsed_time);
  1008.         }
  1009.  
  1010.         return residency;
  1011. }
  1012.  
  1013. /**
  1014.  * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
  1015.  * busy-ness calculated from C0 counters of render & media power wells
  1016.  * @dev_priv: DRM device private
  1017.  *
  1018.  */
  1019. static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
  1020. {
  1021.         u32 residency_C0_up = 0, residency_C0_down = 0;
  1022.         int new_delay, adj;
  1023.  
  1024.         dev_priv->rps.ei_interrupt_count++;
  1025.  
  1026.         WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
  1027.  
  1028.  
  1029.         if (dev_priv->rps.up_ei.cz_clock == 0) {
  1030.                 vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
  1031.                 vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
  1032.                 return dev_priv->rps.cur_freq;
  1033.         }
  1034.  
  1035.  
  1036.         /*
  1037.          * To down throttle, C0 residency should be less than down threshold
  1038.          * for continous EI intervals. So calculate down EI counters
  1039.          * once in VLV_INT_COUNT_FOR_DOWN_EI
  1040.          */
  1041.         if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
  1042.  
  1043.                 dev_priv->rps.ei_interrupt_count = 0;
  1044.  
  1045.                 residency_C0_down = vlv_c0_residency(dev_priv,
  1046.                                                      &dev_priv->rps.down_ei);
  1047.         } else {
  1048.                 residency_C0_up = vlv_c0_residency(dev_priv,
  1049.                                                    &dev_priv->rps.up_ei);
  1050.         }
  1051.  
  1052.         new_delay = dev_priv->rps.cur_freq;
  1053.  
  1054.         adj = dev_priv->rps.last_adj;
  1055.         /* C0 residency is greater than UP threshold. Increase Frequency */
  1056.         if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
  1057.                 if (adj > 0)
  1058.                         adj *= 2;
  1059.                 else
  1060.                         adj = 1;
  1061.  
  1062.                 if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
  1063.                         new_delay = dev_priv->rps.cur_freq + adj;
  1064.  
  1065.                 /*
  1066.                  * For better performance, jump directly
  1067.                  * to RPe if we're below it.
  1068.                  */
  1069.                 if (new_delay < dev_priv->rps.efficient_freq)
  1070.                         new_delay = dev_priv->rps.efficient_freq;
  1071.  
  1072.         } else if (!dev_priv->rps.ei_interrupt_count &&
  1073.                         (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
  1074.                 if (adj < 0)
  1075.                         adj *= 2;
  1076.                 else
  1077.                         adj = -1;
  1078.                 /*
  1079.                  * This means, C0 residency is less than down threshold over
  1080.                  * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
  1081.                  */
  1082.                 if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
  1083.                         new_delay = dev_priv->rps.cur_freq + adj;
  1084.         }
  1085.  
  1086.         return new_delay;
  1087. }
  1088.  
  1089. static void gen6_pm_rps_work(struct work_struct *work)
  1090. {
  1091.         struct drm_i915_private *dev_priv =
  1092.                 container_of(work, struct drm_i915_private, rps.work);
  1093.         u32 pm_iir;
  1094.         int new_delay, adj;
  1095.  
  1096.         spin_lock_irq(&dev_priv->irq_lock);
  1097.         /* Speed up work cancelation during disabling rps interrupts. */
  1098.         if (!dev_priv->rps.interrupts_enabled) {
  1099.                 spin_unlock_irq(&dev_priv->irq_lock);
  1100.                 return;
  1101.         }
  1102.         pm_iir = dev_priv->rps.pm_iir;
  1103.         dev_priv->rps.pm_iir = 0;
  1104.         /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
  1105.                 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
  1106.         spin_unlock_irq(&dev_priv->irq_lock);
  1107.  
  1108.         /* Make sure we didn't queue anything we're not going to process. */
  1109.         WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
  1110.  
  1111.         if ((pm_iir & dev_priv->pm_rps_events) == 0)
  1112.                 return;
  1113.  
  1114.         mutex_lock(&dev_priv->rps.hw_lock);
  1115.  
  1116.         adj = dev_priv->rps.last_adj;
  1117.         if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
  1118.                 if (adj > 0)
  1119.                         adj *= 2;
  1120.                 else {
  1121.                         /* CHV needs even encode values */
  1122.                         adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
  1123.                 }
  1124.                 new_delay = dev_priv->rps.cur_freq + adj;
  1125.  
  1126.                 /*
  1127.                  * For better performance, jump directly
  1128.                  * to RPe if we're below it.
  1129.                  */
  1130.                 if (new_delay < dev_priv->rps.efficient_freq)
  1131.                         new_delay = dev_priv->rps.efficient_freq;
  1132.         } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
  1133.                 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
  1134.                         new_delay = dev_priv->rps.efficient_freq;
  1135.                 else
  1136.                         new_delay = dev_priv->rps.min_freq_softlimit;
  1137.                 adj = 0;
  1138.         } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
  1139.                 new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
  1140.         } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
  1141.                 if (adj < 0)
  1142.                         adj *= 2;
  1143.                 else {
  1144.                         /* CHV needs even encode values */
  1145.                         adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
  1146.                 }
  1147.                 new_delay = dev_priv->rps.cur_freq + adj;
  1148.         } else { /* unknown event */
  1149.                 new_delay = dev_priv->rps.cur_freq;
  1150.         }
  1151.  
  1152.         /* sysfs frequency interfaces may have snuck in while servicing the
  1153.          * interrupt
  1154.          */
  1155.         new_delay = clamp_t(int, new_delay,
  1156.                             dev_priv->rps.min_freq_softlimit,
  1157.                             dev_priv->rps.max_freq_softlimit);
  1158.  
  1159.         dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
  1160.  
  1161.                 if (IS_VALLEYVIEW(dev_priv->dev))
  1162.                         valleyview_set_rps(dev_priv->dev, new_delay);
  1163.                 else
  1164.                 gen6_set_rps(dev_priv->dev, new_delay);
  1165.  
  1166.         mutex_unlock(&dev_priv->rps.hw_lock);
  1167. }
  1168.  
  1169.  
  1170. /**
  1171.  * ivybridge_parity_work - Workqueue called when a parity error interrupt
  1172.  * occurred.
  1173.  * @work: workqueue struct
  1174.  *
  1175.  * Doesn't actually do anything except notify userspace. As a consequence of
  1176.  * this event, userspace should try to remap the bad rows since statistically
  1177.  * it is likely the same row is more likely to go bad again.
  1178.  */
  1179. static void ivybridge_parity_work(struct work_struct *work)
  1180. {
  1181.         struct drm_i915_private *dev_priv =
  1182.                 container_of(work, struct drm_i915_private, l3_parity.error_work);
  1183.         u32 error_status, row, bank, subbank;
  1184.         char *parity_event[6];
  1185.         uint32_t misccpctl;
  1186.         uint8_t slice = 0;
  1187.  
  1188.         /* We must turn off DOP level clock gating to access the L3 registers.
  1189.          * In order to prevent a get/put style interface, acquire struct mutex
  1190.          * any time we access those registers.
  1191.          */
  1192.         mutex_lock(&dev_priv->dev->struct_mutex);
  1193.  
  1194.         /* If we've screwed up tracking, just let the interrupt fire again */
  1195.         if (WARN_ON(!dev_priv->l3_parity.which_slice))
  1196.                 goto out;
  1197.  
  1198.         misccpctl = I915_READ(GEN7_MISCCPCTL);
  1199.         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
  1200.         POSTING_READ(GEN7_MISCCPCTL);
  1201.  
  1202.         while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
  1203.                 u32 reg;
  1204.  
  1205.                 slice--;
  1206.                 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
  1207.                         break;
  1208.  
  1209.                 dev_priv->l3_parity.which_slice &= ~(1<<slice);
  1210.  
  1211.                 reg = GEN7_L3CDERRST1 + (slice * 0x200);
  1212.  
  1213.                 error_status = I915_READ(reg);
  1214.         row = GEN7_PARITY_ERROR_ROW(error_status);
  1215.         bank = GEN7_PARITY_ERROR_BANK(error_status);
  1216.         subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
  1217.  
  1218.                 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
  1219.                 POSTING_READ(reg);
  1220.  
  1221.                 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
  1222.                           slice, row, bank, subbank);
  1223.  
  1224.         }
  1225.  
  1226.         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
  1227.  
  1228. out:
  1229.         WARN_ON(dev_priv->l3_parity.which_slice);
  1230.         spin_lock_irq(&dev_priv->irq_lock);
  1231.         gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
  1232.         spin_unlock_irq(&dev_priv->irq_lock);
  1233.  
  1234.         mutex_unlock(&dev_priv->dev->struct_mutex);
  1235. }
  1236.  
  1237. static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
  1238. {
  1239.         struct drm_i915_private *dev_priv = dev->dev_private;
  1240.  
  1241.         if (!HAS_L3_DPF(dev))
  1242.                 return;
  1243.  
  1244.         spin_lock(&dev_priv->irq_lock);
  1245.         gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
  1246.         spin_unlock(&dev_priv->irq_lock);
  1247.  
  1248.         iir &= GT_PARITY_ERROR(dev);
  1249.         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
  1250.                 dev_priv->l3_parity.which_slice |= 1 << 1;
  1251.  
  1252.         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
  1253.                 dev_priv->l3_parity.which_slice |= 1 << 0;
  1254.  
  1255.         queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
  1256. }
  1257.  
  1258. static void ilk_gt_irq_handler(struct drm_device *dev,
  1259.                                struct drm_i915_private *dev_priv,
  1260.                                u32 gt_iir)
  1261. {
  1262.         if (gt_iir &
  1263.             (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
  1264.                 notify_ring(dev, &dev_priv->ring[RCS]);
  1265.         if (gt_iir & ILK_BSD_USER_INTERRUPT)
  1266.                 notify_ring(dev, &dev_priv->ring[VCS]);
  1267. }
  1268.  
  1269. static void snb_gt_irq_handler(struct drm_device *dev,
  1270.                                struct drm_i915_private *dev_priv,
  1271.                                u32 gt_iir)
  1272. {
  1273.  
  1274.         if (gt_iir &
  1275.             (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
  1276.                 notify_ring(dev, &dev_priv->ring[RCS]);
  1277.         if (gt_iir & GT_BSD_USER_INTERRUPT)
  1278.                 notify_ring(dev, &dev_priv->ring[VCS]);
  1279.         if (gt_iir & GT_BLT_USER_INTERRUPT)
  1280.                 notify_ring(dev, &dev_priv->ring[BCS]);
  1281.  
  1282.         if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
  1283.                       GT_BSD_CS_ERROR_INTERRUPT |
  1284.                       GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
  1285.                 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
  1286.  
  1287.         if (gt_iir & GT_PARITY_ERROR(dev))
  1288.                 ivybridge_parity_error_irq_handler(dev, gt_iir);
  1289. }
  1290.  
  1291. static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
  1292.                                        struct drm_i915_private *dev_priv,
  1293.                                        u32 master_ctl)
  1294. {
  1295.         struct intel_engine_cs *ring;
  1296.         u32 rcs, bcs, vcs;
  1297.         uint32_t tmp = 0;
  1298.         irqreturn_t ret = IRQ_NONE;
  1299.  
  1300.         if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
  1301.                 tmp = I915_READ(GEN8_GT_IIR(0));
  1302.                 if (tmp) {
  1303.                         I915_WRITE(GEN8_GT_IIR(0), tmp);
  1304.                         ret = IRQ_HANDLED;
  1305.  
  1306.                         rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
  1307.                         ring = &dev_priv->ring[RCS];
  1308.                         if (rcs & GT_RENDER_USER_INTERRUPT)
  1309.                                 notify_ring(dev, ring);
  1310.                         if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
  1311.                                 intel_execlists_handle_ctx_events(ring);
  1312.  
  1313.                         bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
  1314.                         ring = &dev_priv->ring[BCS];
  1315.                         if (bcs & GT_RENDER_USER_INTERRUPT)
  1316.                                 notify_ring(dev, ring);
  1317.                         if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
  1318.                                 intel_execlists_handle_ctx_events(ring);
  1319.                 } else
  1320.                         DRM_ERROR("The master control interrupt lied (GT0)!\n");
  1321.         }
  1322.  
  1323.         if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
  1324.                 tmp = I915_READ(GEN8_GT_IIR(1));
  1325.                 if (tmp) {
  1326.                         I915_WRITE(GEN8_GT_IIR(1), tmp);
  1327.                         ret = IRQ_HANDLED;
  1328.  
  1329.                         vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
  1330.                         ring = &dev_priv->ring[VCS];
  1331.                         if (vcs & GT_RENDER_USER_INTERRUPT)
  1332.                                 notify_ring(dev, ring);
  1333.                         if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
  1334.                                 intel_execlists_handle_ctx_events(ring);
  1335.  
  1336.                         vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
  1337.                         ring = &dev_priv->ring[VCS2];
  1338.                         if (vcs & GT_RENDER_USER_INTERRUPT)
  1339.                                 notify_ring(dev, ring);
  1340.                         if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
  1341.                                 intel_execlists_handle_ctx_events(ring);
  1342.                 } else
  1343.                         DRM_ERROR("The master control interrupt lied (GT1)!\n");
  1344.         }
  1345.  
  1346.         if (master_ctl & GEN8_GT_PM_IRQ) {
  1347.                 tmp = I915_READ(GEN8_GT_IIR(2));
  1348.                 if (tmp & dev_priv->pm_rps_events) {
  1349.                         I915_WRITE(GEN8_GT_IIR(2),
  1350.                                    tmp & dev_priv->pm_rps_events);
  1351.                         ret = IRQ_HANDLED;
  1352.                         gen6_rps_irq_handler(dev_priv, tmp);
  1353.                 } else
  1354.                         DRM_ERROR("The master control interrupt lied (PM)!\n");
  1355.         }
  1356.  
  1357.         if (master_ctl & GEN8_GT_VECS_IRQ) {
  1358.                 tmp = I915_READ(GEN8_GT_IIR(3));
  1359.                 if (tmp) {
  1360.                         I915_WRITE(GEN8_GT_IIR(3), tmp);
  1361.                         ret = IRQ_HANDLED;
  1362.  
  1363.                         vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
  1364.                         ring = &dev_priv->ring[VECS];
  1365.                         if (vcs & GT_RENDER_USER_INTERRUPT)
  1366.                                 notify_ring(dev, ring);
  1367.                         if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
  1368.                                 intel_execlists_handle_ctx_events(ring);
  1369.                 } else
  1370.                         DRM_ERROR("The master control interrupt lied (GT3)!\n");
  1371.         }
  1372.  
  1373.         return ret;
  1374. }
  1375.  
  1376. #define HPD_STORM_DETECT_PERIOD 1000
  1377. #define HPD_STORM_THRESHOLD 5
  1378.  
  1379. static int pch_port_to_hotplug_shift(enum port port)
  1380. {
  1381.         switch (port) {
  1382.         case PORT_A:
  1383.         case PORT_E:
  1384.         default:
  1385.                 return -1;
  1386.         case PORT_B:
  1387.                 return 0;
  1388.         case PORT_C:
  1389.                 return 8;
  1390.         case PORT_D:
  1391.                 return 16;
  1392.         }
  1393. }
  1394.  
  1395. static int i915_port_to_hotplug_shift(enum port port)
  1396. {
  1397.         switch (port) {
  1398.         case PORT_A:
  1399.         case PORT_E:
  1400.         default:
  1401.                 return -1;
  1402.         case PORT_B:
  1403.                 return 17;
  1404.         case PORT_C:
  1405.                 return 19;
  1406.         case PORT_D:
  1407.                 return 21;
  1408.         }
  1409. }
  1410.  
  1411. static inline enum port get_port_from_pin(enum hpd_pin pin)
  1412. {
  1413.         switch (pin) {
  1414.         case HPD_PORT_B:
  1415.                 return PORT_B;
  1416.         case HPD_PORT_C:
  1417.                 return PORT_C;
  1418.         case HPD_PORT_D:
  1419.                 return PORT_D;
  1420.         default:
  1421.                 return PORT_A; /* no hpd */
  1422.         }
  1423. }
  1424.  
  1425. static inline void intel_hpd_irq_handler(struct drm_device *dev,
  1426.                                             u32 hotplug_trigger,
  1427.                                          u32 dig_hotplug_reg,
  1428.                                             const u32 *hpd)
  1429. {
  1430.         struct drm_i915_private *dev_priv = dev->dev_private;
  1431.         int i;
  1432.         enum port port;
  1433.         bool storm_detected = false;
  1434.         bool queue_dig = false, queue_hp = false;
  1435.         u32 dig_shift;
  1436.         u32 dig_port_mask = 0;
  1437.  
  1438.         if (!hotplug_trigger)
  1439.                 return;
  1440.  
  1441.         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
  1442.                          hotplug_trigger, dig_hotplug_reg);
  1443.  
  1444.         spin_lock(&dev_priv->irq_lock);
  1445.         for (i = 1; i < HPD_NUM_PINS; i++) {
  1446.                 if (!(hpd[i] & hotplug_trigger))
  1447.                         continue;
  1448.  
  1449.                 port = get_port_from_pin(i);
  1450.                 if (port && dev_priv->hpd_irq_port[port]) {
  1451.                         bool long_hpd;
  1452.  
  1453.                         if (HAS_PCH_SPLIT(dev)) {
  1454.                                 dig_shift = pch_port_to_hotplug_shift(port);
  1455.                                 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
  1456.                         } else {
  1457.                                 dig_shift = i915_port_to_hotplug_shift(port);
  1458.                                 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
  1459.                         }
  1460.  
  1461.                         DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
  1462.                                          port_name(port),
  1463.                                          long_hpd ? "long" : "short");
  1464.                         /* for long HPD pulses we want to have the digital queue happen,
  1465.                            but we still want HPD storm detection to function. */
  1466.                         if (long_hpd) {
  1467.                                 dev_priv->long_hpd_port_mask |= (1 << port);
  1468.                                 dig_port_mask |= hpd[i];
  1469.                         } else {
  1470.                                 /* for short HPD just trigger the digital queue */
  1471.                                 dev_priv->short_hpd_port_mask |= (1 << port);
  1472.                                 hotplug_trigger &= ~hpd[i];
  1473.                         }
  1474.                         queue_dig = true;
  1475.                 }
  1476.         }
  1477.  
  1478.         for (i = 1; i < HPD_NUM_PINS; i++) {
  1479.                 if (hpd[i] & hotplug_trigger &&
  1480.                     dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
  1481.                         /*
  1482.                          * On GMCH platforms the interrupt mask bits only
  1483.                          * prevent irq generation, not the setting of the
  1484.                          * hotplug bits itself. So only WARN about unexpected
  1485.                          * interrupts on saner platforms.
  1486.                          */
  1487.                         WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
  1488.                           "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
  1489.                           hotplug_trigger, i, hpd[i]);
  1490.  
  1491.                         continue;
  1492.                 }
  1493.  
  1494.                 if (!(hpd[i] & hotplug_trigger) ||
  1495.                     dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
  1496.                         continue;
  1497.  
  1498.                 if (!(dig_port_mask & hpd[i])) {
  1499.                 dev_priv->hpd_event_bits |= (1 << i);
  1500.                         queue_hp = true;
  1501.                 }
  1502.  
  1503.                 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
  1504.                   dev_priv->hpd_stats[i].hpd_last_jiffies
  1505.                   + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
  1506.                         dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
  1507.            dev_priv->hpd_stats[i].hpd_cnt = 0;
  1508.                         DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
  1509.        } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
  1510.            dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
  1511.                         dev_priv->hpd_event_bits &= ~(1 << i);
  1512.            DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
  1513.                         storm_detected = true;
  1514.                 } else {
  1515.                         dev_priv->hpd_stats[i].hpd_cnt++;
  1516.                         DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
  1517.                                       dev_priv->hpd_stats[i].hpd_cnt);
  1518.                 }
  1519.         }
  1520.  
  1521.         if (storm_detected)
  1522.                 dev_priv->display.hpd_irq_setup(dev);
  1523.         spin_unlock(&dev_priv->irq_lock);
  1524.  
  1525.         /*
  1526.          * Our hotplug handler can grab modeset locks (by calling down into the
  1527.          * fb helpers). Hence it must not be run on our own dev-priv->wq work
  1528.          * queue for otherwise the flush_work in the pageflip code will
  1529.          * deadlock.
  1530.          */
  1531.         if (queue_hp)
  1532.                 schedule_work(&dev_priv->hotplug_work);
  1533. }
  1534.  
  1535. static void gmbus_irq_handler(struct drm_device *dev)
  1536. {
  1537.         struct drm_i915_private *dev_priv = dev->dev_private;
  1538.  
  1539.         wake_up_all(&dev_priv->gmbus_wait_queue);
  1540. }
  1541.  
  1542. static void dp_aux_irq_handler(struct drm_device *dev)
  1543. {
  1544.         struct drm_i915_private *dev_priv = dev->dev_private;
  1545.  
  1546.         wake_up_all(&dev_priv->gmbus_wait_queue);
  1547. }
  1548.  
  1549. #if defined(CONFIG_DEBUG_FS)
  1550. static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
  1551.                                          uint32_t crc0, uint32_t crc1,
  1552.                                          uint32_t crc2, uint32_t crc3,
  1553.                                          uint32_t crc4)
  1554. {
  1555.         struct drm_i915_private *dev_priv = dev->dev_private;
  1556.         struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
  1557.         struct intel_pipe_crc_entry *entry;
  1558.         int head, tail;
  1559.  
  1560.         spin_lock(&pipe_crc->lock);
  1561.  
  1562.         if (!pipe_crc->entries) {
  1563.                 spin_unlock(&pipe_crc->lock);
  1564.                 DRM_DEBUG_KMS("spurious interrupt\n");
  1565.                 return;
  1566.         }
  1567.  
  1568.         head = pipe_crc->head;
  1569.         tail = pipe_crc->tail;
  1570.  
  1571.         if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
  1572.                 spin_unlock(&pipe_crc->lock);
  1573.                 DRM_ERROR("CRC buffer overflowing\n");
  1574.                 return;
  1575.         }
  1576.  
  1577.         entry = &pipe_crc->entries[head];
  1578.  
  1579.         entry->frame = dev->driver->get_vblank_counter(dev, pipe);
  1580.         entry->crc[0] = crc0;
  1581.         entry->crc[1] = crc1;
  1582.         entry->crc[2] = crc2;
  1583.         entry->crc[3] = crc3;
  1584.         entry->crc[4] = crc4;
  1585.  
  1586.         head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
  1587.         pipe_crc->head = head;
  1588.  
  1589.         spin_unlock(&pipe_crc->lock);
  1590.  
  1591.         wake_up_interruptible(&pipe_crc->wq);
  1592. }
  1593. #else
  1594. static inline void
  1595. display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
  1596.                              uint32_t crc0, uint32_t crc1,
  1597.                              uint32_t crc2, uint32_t crc3,
  1598.                              uint32_t crc4) {}
  1599. #endif
  1600.  
  1601.  
  1602. static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
  1603. {
  1604.         struct drm_i915_private *dev_priv = dev->dev_private;
  1605.  
  1606.         display_pipe_crc_irq_handler(dev, pipe,
  1607.                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
  1608.                                      0, 0, 0, 0);
  1609. }
  1610.  
  1611. static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
  1612. {
  1613.         struct drm_i915_private *dev_priv = dev->dev_private;
  1614.  
  1615.         display_pipe_crc_irq_handler(dev, pipe,
  1616.                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
  1617.                                      I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
  1618.                                      I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
  1619.                                      I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
  1620.                                      I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
  1621. }
  1622.  
  1623. static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
  1624. {
  1625.         struct drm_i915_private *dev_priv = dev->dev_private;
  1626.         uint32_t res1, res2;
  1627.  
  1628.         if (INTEL_INFO(dev)->gen >= 3)
  1629.                 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
  1630.         else
  1631.                 res1 = 0;
  1632.  
  1633.         if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
  1634.                 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
  1635.         else
  1636.                 res2 = 0;
  1637.  
  1638.         display_pipe_crc_irq_handler(dev, pipe,
  1639.                                      I915_READ(PIPE_CRC_RES_RED(pipe)),
  1640.                                      I915_READ(PIPE_CRC_RES_GREEN(pipe)),
  1641.                                      I915_READ(PIPE_CRC_RES_BLUE(pipe)),
  1642.                                      res1, res2);
  1643. }
  1644.  
  1645. /* The RPS events need forcewake, so we add them to a work queue and mask their
  1646.  * IMR bits until the work is done. Other interrupts can be processed without
  1647.  * the work queue. */
  1648. static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
  1649. {
  1650.         /* TODO: RPS on GEN9+ is not supported yet. */
  1651.         if (WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9,
  1652.                       "GEN9+: unexpected RPS IRQ\n"))
  1653.                 return;
  1654.  
  1655.         if (pm_iir & dev_priv->pm_rps_events) {
  1656.                 spin_lock(&dev_priv->irq_lock);
  1657.                 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
  1658.                 if (dev_priv->rps.interrupts_enabled) {
  1659.                 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
  1660.                         queue_work(dev_priv->wq, &dev_priv->rps.work);
  1661.                 }
  1662.                 spin_unlock(&dev_priv->irq_lock);
  1663.         }
  1664.  
  1665.         if (INTEL_INFO(dev_priv)->gen >= 8)
  1666.                 return;
  1667.  
  1668.         if (HAS_VEBOX(dev_priv->dev)) {
  1669.                 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
  1670.                         notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
  1671.  
  1672.                 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
  1673.                         DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
  1674.         }
  1675. }
  1676.  
  1677. static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
  1678. {
  1679.         if (!drm_handle_vblank(dev, pipe))
  1680.                 return false;
  1681.  
  1682.         return true;
  1683. }
  1684.  
  1685. static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
  1686. {
  1687.         struct drm_i915_private *dev_priv = dev->dev_private;
  1688.         u32 pipe_stats[I915_MAX_PIPES] = { };
  1689.         int pipe;
  1690.  
  1691.         spin_lock(&dev_priv->irq_lock);
  1692.         for_each_pipe(dev_priv, pipe) {
  1693.                 int reg;
  1694.                 u32 mask, iir_bit = 0;
  1695.  
  1696.                 /*
  1697.                  * PIPESTAT bits get signalled even when the interrupt is
  1698.                  * disabled with the mask bits, and some of the status bits do
  1699.                  * not generate interrupts at all (like the underrun bit). Hence
  1700.                  * we need to be careful that we only handle what we want to
  1701.                  * handle.
  1702.                  */
  1703.  
  1704.                 /* fifo underruns are filterered in the underrun handler. */
  1705.                 mask = PIPE_FIFO_UNDERRUN_STATUS;
  1706.  
  1707.                 switch (pipe) {
  1708.                 case PIPE_A:
  1709.                         iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
  1710.                         break;
  1711.                 case PIPE_B:
  1712.                         iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
  1713.                         break;
  1714.                 case PIPE_C:
  1715.                         iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
  1716.                         break;
  1717.                 }
  1718.                 if (iir & iir_bit)
  1719.                         mask |= dev_priv->pipestat_irq_mask[pipe];
  1720.  
  1721.                 if (!mask)
  1722.                         continue;
  1723.  
  1724.                 reg = PIPESTAT(pipe);
  1725.                 mask |= PIPESTAT_INT_ENABLE_MASK;
  1726.                 pipe_stats[pipe] = I915_READ(reg) & mask;
  1727.  
  1728.                         /*
  1729.                          * Clear the PIPE*STAT regs before the IIR
  1730.                          */
  1731.                 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
  1732.                                         PIPESTAT_INT_STATUS_MASK))
  1733.                                 I915_WRITE(reg, pipe_stats[pipe]);
  1734.                         }
  1735.         spin_unlock(&dev_priv->irq_lock);
  1736.  
  1737.         for_each_pipe(dev_priv, pipe) {
  1738.  
  1739.  
  1740.                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
  1741.                                 i9xx_pipe_crc_irq_handler(dev, pipe);
  1742.  
  1743.                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  1744.                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
  1745.                 }
  1746.  
  1747.         if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
  1748.                 gmbus_irq_handler(dev);
  1749. }
  1750.  
  1751. static void i9xx_hpd_irq_handler(struct drm_device *dev)
  1752. {
  1753.         struct drm_i915_private *dev_priv = dev->dev_private;
  1754.         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
  1755.  
  1756.         if (hotplug_status) {
  1757.                 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  1758.                 /*
  1759.                  * Make sure hotplug status is cleared before we clear IIR, or else we
  1760.                  * may miss hotplug events.
  1761.                  */
  1762.                 POSTING_READ(PORT_HOTPLUG_STAT);
  1763.  
  1764.         if (IS_G4X(dev)) {
  1765.                 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
  1766.  
  1767.                         intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
  1768.         } else {
  1769.                 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
  1770.  
  1771.                         intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
  1772.         }
  1773.  
  1774.         if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
  1775.             hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
  1776.                 dp_aux_irq_handler(dev);
  1777.         }
  1778. }
  1779.  
  1780. static irqreturn_t valleyview_irq_handler(int irq, void *arg)
  1781. {
  1782.         struct drm_device *dev = arg;
  1783.         struct drm_i915_private *dev_priv = dev->dev_private;
  1784.         u32 iir, gt_iir, pm_iir;
  1785.         irqreturn_t ret = IRQ_NONE;
  1786.  
  1787.         while (true) {
  1788.                 /* Find, clear, then process each source of interrupt */
  1789.  
  1790.                 gt_iir = I915_READ(GTIIR);
  1791.                 if (gt_iir)
  1792.                         I915_WRITE(GTIIR, gt_iir);
  1793.  
  1794.                 pm_iir = I915_READ(GEN6_PMIIR);
  1795.                 if (pm_iir)
  1796.                         I915_WRITE(GEN6_PMIIR, pm_iir);
  1797.  
  1798.                 iir = I915_READ(VLV_IIR);
  1799.                 if (iir) {
  1800.                         /* Consume port before clearing IIR or we'll miss events */
  1801.                         if (iir & I915_DISPLAY_PORT_INTERRUPT)
  1802.                                 i9xx_hpd_irq_handler(dev);
  1803.                         I915_WRITE(VLV_IIR, iir);
  1804.                 }
  1805.  
  1806.                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
  1807.                         goto out;
  1808.  
  1809.                 ret = IRQ_HANDLED;
  1810.  
  1811.                 if (gt_iir)
  1812.                 snb_gt_irq_handler(dev, dev_priv, gt_iir);
  1813.                 if (pm_iir)
  1814.                         gen6_rps_irq_handler(dev_priv, pm_iir);
  1815.                 /* Call regardless, as some status bits might not be
  1816.                  * signalled in iir */
  1817.                 valleyview_pipestat_irq_handler(dev, iir);
  1818.         }
  1819.  
  1820. out:
  1821.         return ret;
  1822. }
  1823.  
  1824. static irqreturn_t cherryview_irq_handler(int irq, void *arg)
  1825. {
  1826.         struct drm_device *dev = arg;
  1827.         struct drm_i915_private *dev_priv = dev->dev_private;
  1828.         u32 master_ctl, iir;
  1829.         irqreturn_t ret = IRQ_NONE;
  1830.  
  1831.         for (;;) {
  1832.                 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
  1833.                 iir = I915_READ(VLV_IIR);
  1834.  
  1835.                 if (master_ctl == 0 && iir == 0)
  1836.                         break;
  1837.  
  1838.                 ret = IRQ_HANDLED;
  1839.  
  1840.                 I915_WRITE(GEN8_MASTER_IRQ, 0);
  1841.  
  1842.                 /* Find, clear, then process each source of interrupt */
  1843.  
  1844.                 if (iir) {
  1845.                         /* Consume port before clearing IIR or we'll miss events */
  1846.                         if (iir & I915_DISPLAY_PORT_INTERRUPT)
  1847.                                 i9xx_hpd_irq_handler(dev);
  1848.                         I915_WRITE(VLV_IIR, iir);
  1849.                 }
  1850.  
  1851.                 gen8_gt_irq_handler(dev, dev_priv, master_ctl);
  1852.  
  1853.                 /* Call regardless, as some status bits might not be
  1854.                  * signalled in iir */
  1855.                 valleyview_pipestat_irq_handler(dev, iir);
  1856.  
  1857.                 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
  1858.                 POSTING_READ(GEN8_MASTER_IRQ);
  1859.         }
  1860.  
  1861.         return ret;
  1862. }
  1863.  
  1864. static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
  1865. {
  1866.         struct drm_i915_private *dev_priv = dev->dev_private;
  1867.         int pipe;
  1868.         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
  1869.         u32 dig_hotplug_reg;
  1870.  
  1871.         dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
  1872.         I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
  1873.  
  1874.         intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
  1875.  
  1876.         if (pch_iir & SDE_AUDIO_POWER_MASK) {
  1877.                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
  1878.                                SDE_AUDIO_POWER_SHIFT);
  1879.                 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
  1880.                                  port_name(port));
  1881.         }
  1882.  
  1883.         if (pch_iir & SDE_AUX_MASK)
  1884.                 dp_aux_irq_handler(dev);
  1885.  
  1886.         if (pch_iir & SDE_GMBUS)
  1887.                 gmbus_irq_handler(dev);
  1888.  
  1889.         if (pch_iir & SDE_AUDIO_HDCP_MASK)
  1890.                 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
  1891.  
  1892.         if (pch_iir & SDE_AUDIO_TRANS_MASK)
  1893.                 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
  1894.  
  1895.         if (pch_iir & SDE_POISON)
  1896.                 DRM_ERROR("PCH poison interrupt\n");
  1897.  
  1898.         if (pch_iir & SDE_FDI_MASK)
  1899.                 for_each_pipe(dev_priv, pipe)
  1900.                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
  1901.                                          pipe_name(pipe),
  1902.                                          I915_READ(FDI_RX_IIR(pipe)));
  1903.  
  1904.         if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
  1905.                 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
  1906.  
  1907.         if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
  1908.                 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
  1909.  
  1910.         if (pch_iir & SDE_TRANSA_FIFO_UNDER)
  1911.                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
  1912.  
  1913.         if (pch_iir & SDE_TRANSB_FIFO_UNDER)
  1914.                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
  1915. }
  1916.  
  1917. static void ivb_err_int_handler(struct drm_device *dev)
  1918. {
  1919.         struct drm_i915_private *dev_priv = dev->dev_private;
  1920.         u32 err_int = I915_READ(GEN7_ERR_INT);
  1921.         enum pipe pipe;
  1922.  
  1923.         if (err_int & ERR_INT_POISON)
  1924.                 DRM_ERROR("Poison interrupt\n");
  1925.  
  1926.         for_each_pipe(dev_priv, pipe) {
  1927.                 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
  1928.                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
  1929.  
  1930.                 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
  1931.                         if (IS_IVYBRIDGE(dev))
  1932.                                 ivb_pipe_crc_irq_handler(dev, pipe);
  1933.                         else
  1934.                                 hsw_pipe_crc_irq_handler(dev, pipe);
  1935.                 }
  1936.         }
  1937.  
  1938.         I915_WRITE(GEN7_ERR_INT, err_int);
  1939. }
  1940.  
  1941. static void cpt_serr_int_handler(struct drm_device *dev)
  1942. {
  1943.         struct drm_i915_private *dev_priv = dev->dev_private;
  1944.         u32 serr_int = I915_READ(SERR_INT);
  1945.  
  1946.         if (serr_int & SERR_INT_POISON)
  1947.                 DRM_ERROR("PCH poison interrupt\n");
  1948.  
  1949.         if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
  1950.                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
  1951.  
  1952.         if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
  1953.                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
  1954.  
  1955.         if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
  1956.                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
  1957.  
  1958.         I915_WRITE(SERR_INT, serr_int);
  1959. }
  1960.  
  1961. static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
  1962. {
  1963.         struct drm_i915_private *dev_priv = dev->dev_private;
  1964.         int pipe;
  1965.         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
  1966.         u32 dig_hotplug_reg;
  1967.  
  1968.         dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
  1969.         I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
  1970.  
  1971.         intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
  1972.  
  1973.         if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
  1974.                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
  1975.                                SDE_AUDIO_POWER_SHIFT_CPT);
  1976.                 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
  1977.                                  port_name(port));
  1978.         }
  1979.  
  1980.         if (pch_iir & SDE_AUX_MASK_CPT)
  1981.                 dp_aux_irq_handler(dev);
  1982.  
  1983.         if (pch_iir & SDE_GMBUS_CPT)
  1984.                 gmbus_irq_handler(dev);
  1985.  
  1986.         if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
  1987.                 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
  1988.  
  1989.         if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
  1990.                 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
  1991.  
  1992.         if (pch_iir & SDE_FDI_MASK_CPT)
  1993.                 for_each_pipe(dev_priv, pipe)
  1994.                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
  1995.                                          pipe_name(pipe),
  1996.                                          I915_READ(FDI_RX_IIR(pipe)));
  1997.  
  1998.         if (pch_iir & SDE_ERROR_CPT)
  1999.                 cpt_serr_int_handler(dev);
  2000. }
  2001.  
  2002. static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
  2003. {
  2004.         struct drm_i915_private *dev_priv = dev->dev_private;
  2005.         enum pipe pipe;
  2006.  
  2007.         if (de_iir & DE_AUX_CHANNEL_A)
  2008.                 dp_aux_irq_handler(dev);
  2009.  
  2010.         if (de_iir & DE_GSE)
  2011.                 intel_opregion_asle_intr(dev);
  2012.  
  2013.         if (de_iir & DE_POISON)
  2014.                 DRM_ERROR("Poison interrupt\n");
  2015.  
  2016.         for_each_pipe(dev_priv, pipe) {
  2017.  
  2018.                 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
  2019.                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
  2020.  
  2021.                 if (de_iir & DE_PIPE_CRC_DONE(pipe))
  2022.                         i9xx_pipe_crc_irq_handler(dev, pipe);
  2023.  
  2024.                 /* plane/pipes map 1:1 on ilk+ */
  2025.                 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
  2026. //                      intel_prepare_page_flip(dev, pipe);
  2027. //                      intel_finish_page_flip_plane(dev, pipe);
  2028.                 }
  2029.         }
  2030.  
  2031.         /* check event from PCH */
  2032.         if (de_iir & DE_PCH_EVENT) {
  2033.                 u32 pch_iir = I915_READ(SDEIIR);
  2034.  
  2035.                 if (HAS_PCH_CPT(dev))
  2036.                         cpt_irq_handler(dev, pch_iir);
  2037.                 else
  2038.                         ibx_irq_handler(dev, pch_iir);
  2039.  
  2040.                 /* should clear PCH hotplug event before clear CPU irq */
  2041.                 I915_WRITE(SDEIIR, pch_iir);
  2042.         }
  2043.  
  2044.         if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
  2045.                 ironlake_rps_change_irq_handler(dev);
  2046. }
  2047.  
  2048. static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
  2049. {
  2050.         struct drm_i915_private *dev_priv = dev->dev_private;
  2051.         enum pipe pipe;
  2052.  
  2053.         if (de_iir & DE_ERR_INT_IVB)
  2054.                 ivb_err_int_handler(dev);
  2055.  
  2056.         if (de_iir & DE_AUX_CHANNEL_A_IVB)
  2057.                 dp_aux_irq_handler(dev);
  2058.  
  2059.         if (de_iir & DE_GSE_IVB)
  2060.                 intel_opregion_asle_intr(dev);
  2061.  
  2062.         for_each_pipe(dev_priv, pipe) {
  2063.  
  2064.                 /* plane/pipes map 1:1 on ilk+ */
  2065.                 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
  2066. //                      intel_prepare_page_flip(dev, pipe);
  2067. //                      intel_finish_page_flip_plane(dev, pipe);
  2068.                 }
  2069.         }
  2070.  
  2071.         /* check event from PCH */
  2072.         if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
  2073.                 u32 pch_iir = I915_READ(SDEIIR);
  2074.  
  2075.                 cpt_irq_handler(dev, pch_iir);
  2076.  
  2077.                 /* clear PCH hotplug event before clear CPU irq */
  2078.                 I915_WRITE(SDEIIR, pch_iir);
  2079.         }
  2080. }
  2081.  
  2082. /*
  2083.  * To handle irqs with the minimum potential races with fresh interrupts, we:
  2084.  * 1 - Disable Master Interrupt Control.
  2085.  * 2 - Find the source(s) of the interrupt.
  2086.  * 3 - Clear the Interrupt Identity bits (IIR).
  2087.  * 4 - Process the interrupt(s) that had bits set in the IIRs.
  2088.  * 5 - Re-enable Master Interrupt Control.
  2089.  */
  2090. static irqreturn_t ironlake_irq_handler(int irq, void *arg)
  2091. {
  2092.         struct drm_device *dev = arg;
  2093.         struct drm_i915_private *dev_priv = dev->dev_private;
  2094.         u32 de_iir, gt_iir, de_ier, sde_ier = 0;
  2095.         irqreturn_t ret = IRQ_NONE;
  2096.  
  2097.         /* We get interrupts on unclaimed registers, so check for this before we
  2098.          * do any I915_{READ,WRITE}. */
  2099.         intel_uncore_check_errors(dev);
  2100.  
  2101.         /* disable master interrupt before clearing iir  */
  2102.         de_ier = I915_READ(DEIER);
  2103.         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
  2104.         POSTING_READ(DEIER);
  2105.  
  2106.         /* Disable south interrupts. We'll only write to SDEIIR once, so further
  2107.          * interrupts will will be stored on its back queue, and then we'll be
  2108.          * able to process them after we restore SDEIER (as soon as we restore
  2109.          * it, we'll get an interrupt if SDEIIR still has something to process
  2110.          * due to its back queue). */
  2111.         if (!HAS_PCH_NOP(dev)) {
  2112.                 sde_ier = I915_READ(SDEIER);
  2113.                 I915_WRITE(SDEIER, 0);
  2114.                 POSTING_READ(SDEIER);
  2115.         }
  2116.  
  2117.         /* Find, clear, then process each source of interrupt */
  2118.  
  2119.         gt_iir = I915_READ(GTIIR);
  2120.         if (gt_iir) {
  2121.                 I915_WRITE(GTIIR, gt_iir);
  2122.                 ret = IRQ_HANDLED;
  2123.                 if (INTEL_INFO(dev)->gen >= 6)
  2124.                         snb_gt_irq_handler(dev, dev_priv, gt_iir);
  2125.                 else
  2126.                         ilk_gt_irq_handler(dev, dev_priv, gt_iir);
  2127.         }
  2128.  
  2129.         de_iir = I915_READ(DEIIR);
  2130.         if (de_iir) {
  2131.                 I915_WRITE(DEIIR, de_iir);
  2132.                 ret = IRQ_HANDLED;
  2133.                 if (INTEL_INFO(dev)->gen >= 7)
  2134.                         ivb_display_irq_handler(dev, de_iir);
  2135.                 else
  2136.                         ilk_display_irq_handler(dev, de_iir);
  2137.         }
  2138.  
  2139.         if (INTEL_INFO(dev)->gen >= 6) {
  2140.                 u32 pm_iir = I915_READ(GEN6_PMIIR);
  2141.                 if (pm_iir) {
  2142.                         I915_WRITE(GEN6_PMIIR, pm_iir);
  2143.                         ret = IRQ_HANDLED;
  2144.                         gen6_rps_irq_handler(dev_priv, pm_iir);
  2145.                 }
  2146.         }
  2147.  
  2148.         I915_WRITE(DEIER, de_ier);
  2149.         POSTING_READ(DEIER);
  2150.         if (!HAS_PCH_NOP(dev)) {
  2151.                 I915_WRITE(SDEIER, sde_ier);
  2152.                 POSTING_READ(SDEIER);
  2153.         }
  2154.  
  2155.         return ret;
  2156. }
  2157.  
  2158. static irqreturn_t gen8_irq_handler(int irq, void *arg)
  2159. {
  2160.         struct drm_device *dev = arg;
  2161.         struct drm_i915_private *dev_priv = dev->dev_private;
  2162.         u32 master_ctl;
  2163.         irqreturn_t ret = IRQ_NONE;
  2164.         uint32_t tmp = 0;
  2165.         enum pipe pipe;
  2166.         u32 aux_mask = GEN8_AUX_CHANNEL_A;
  2167.  
  2168.         if (IS_GEN9(dev))
  2169.                 aux_mask |=  GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
  2170.                         GEN9_AUX_CHANNEL_D;
  2171.  
  2172.         master_ctl = I915_READ(GEN8_MASTER_IRQ);
  2173.         master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
  2174.         if (!master_ctl)
  2175.                 return IRQ_NONE;
  2176.  
  2177.         I915_WRITE(GEN8_MASTER_IRQ, 0);
  2178.         POSTING_READ(GEN8_MASTER_IRQ);
  2179.  
  2180.         /* Find, clear, then process each source of interrupt */
  2181.  
  2182.         ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
  2183.  
  2184.         if (master_ctl & GEN8_DE_MISC_IRQ) {
  2185.                 tmp = I915_READ(GEN8_DE_MISC_IIR);
  2186.                 if (tmp) {
  2187.                         I915_WRITE(GEN8_DE_MISC_IIR, tmp);
  2188.                         ret = IRQ_HANDLED;
  2189.                 if (tmp & GEN8_DE_MISC_GSE)
  2190.                         intel_opregion_asle_intr(dev);
  2191.                         else
  2192.                         DRM_ERROR("Unexpected DE Misc interrupt\n");
  2193.                 }
  2194.                 else
  2195.                         DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
  2196.         }
  2197.  
  2198.         if (master_ctl & GEN8_DE_PORT_IRQ) {
  2199.                 tmp = I915_READ(GEN8_DE_PORT_IIR);
  2200.                 if (tmp) {
  2201.                         I915_WRITE(GEN8_DE_PORT_IIR, tmp);
  2202.                         ret = IRQ_HANDLED;
  2203.  
  2204.                         if (tmp & aux_mask)
  2205.                         dp_aux_irq_handler(dev);
  2206.                         else
  2207.                         DRM_ERROR("Unexpected DE Port interrupt\n");
  2208.                 }
  2209.                 else
  2210.                         DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
  2211.         }
  2212.  
  2213.         for_each_pipe(dev_priv, pipe) {
  2214.                 uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
  2215.  
  2216.                 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
  2217.                         continue;
  2218.  
  2219.                 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
  2220.                 if (pipe_iir) {
  2221.                         ret = IRQ_HANDLED;
  2222.                         I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
  2223.  
  2224.  
  2225.                         if (IS_GEN9(dev))
  2226.                                 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
  2227.                         else
  2228.                                 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
  2229.  
  2230.  
  2231.                 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
  2232.                         hsw_pipe_crc_irq_handler(dev, pipe);
  2233.  
  2234.                         if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
  2235.                                 intel_cpu_fifo_underrun_irq_handler(dev_priv,
  2236.                                                                     pipe);
  2237.  
  2238.  
  2239.                         if (IS_GEN9(dev))
  2240.                                 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
  2241.                         else
  2242.                                 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
  2243.  
  2244.                         if (fault_errors)
  2245.                         DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
  2246.                                   pipe_name(pipe),
  2247.                                   pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
  2248.                 } else
  2249.                         DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
  2250.         }
  2251.  
  2252.         if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
  2253.                 /*
  2254.                  * FIXME(BDW): Assume for now that the new interrupt handling
  2255.                  * scheme also closed the SDE interrupt handling race we've seen
  2256.                  * on older pch-split platforms. But this needs testing.
  2257.                  */
  2258.                 u32 pch_iir = I915_READ(SDEIIR);
  2259.                 if (pch_iir) {
  2260.                         I915_WRITE(SDEIIR, pch_iir);
  2261.                         ret = IRQ_HANDLED;
  2262.                         cpt_irq_handler(dev, pch_iir);
  2263.                 } else
  2264.                         DRM_ERROR("The master control interrupt lied (SDE)!\n");
  2265.  
  2266.         }
  2267.  
  2268.         I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
  2269.         POSTING_READ(GEN8_MASTER_IRQ);
  2270.  
  2271.         return ret;
  2272. }
  2273.  
  2274. static void i915_error_wake_up(struct drm_i915_private *dev_priv,
  2275.                                bool reset_completed)
  2276. {
  2277.         struct intel_engine_cs *ring;
  2278.         int i;
  2279.  
  2280.         /*
  2281.          * Notify all waiters for GPU completion events that reset state has
  2282.          * been changed, and that they need to restart their wait after
  2283.          * checking for potential errors (and bail out to drop locks if there is
  2284.          * a gpu reset pending so that i915_error_work_func can acquire them).
  2285.          */
  2286.  
  2287.         /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
  2288.         for_each_ring(ring, dev_priv, i)
  2289.                 wake_up_all(&ring->irq_queue);
  2290.  
  2291.  
  2292.         /*
  2293.          * Signal tasks blocked in i915_gem_wait_for_error that the pending
  2294.          * reset state is cleared.
  2295.          */
  2296.         if (reset_completed)
  2297.                 wake_up_all(&dev_priv->gpu_error.reset_queue);
  2298. }
  2299.  
  2300. /**
  2301.  * i915_error_work_func - do process context error handling work
  2302.  * @work: work struct
  2303.  *
  2304.  * Fire an error uevent so userspace can see that a hang or error
  2305.  * was detected.
  2306.  */
  2307. static void i915_error_work_func(struct work_struct *work)
  2308. {
  2309.         struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
  2310.                                                     work);
  2311.         struct drm_i915_private *dev_priv =
  2312.                 container_of(error, struct drm_i915_private, gpu_error);
  2313.         struct drm_device *dev = dev_priv->dev;
  2314.         char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
  2315.         char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
  2316.         char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
  2317.         int ret;
  2318.  
  2319.         /*
  2320.          * Note that there's only one work item which does gpu resets, so we
  2321.          * need not worry about concurrent gpu resets potentially incrementing
  2322.          * error->reset_counter twice. We only need to take care of another
  2323.          * racing irq/hangcheck declaring the gpu dead for a second time. A
  2324.          * quick check for that is good enough: schedule_work ensures the
  2325.          * correct ordering between hang detection and this work item, and since
  2326.          * the reset in-progress bit is only ever set by code outside of this
  2327.          * work we don't need to worry about any other races.
  2328.          */
  2329.         if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
  2330.                 DRM_DEBUG_DRIVER("resetting chip\n");
  2331.  
  2332.                 /*
  2333.                  * All state reset _must_ be completed before we update the
  2334.                  * reset counter, for otherwise waiters might miss the reset
  2335.                  * pending state and not properly drop locks, resulting in
  2336.                  * deadlocks with the reset work.
  2337.                  */
  2338. //              ret = i915_reset(dev);
  2339.  
  2340. //       intel_display_handle_reset(dev);
  2341.  
  2342.                 if (ret == 0) {
  2343.                         /*
  2344.                          * After all the gem state is reset, increment the reset
  2345.                          * counter and wake up everyone waiting for the reset to
  2346.                          * complete.
  2347.                          *
  2348.                          * Since unlock operations are a one-sided barrier only,
  2349.                          * we need to insert a barrier here to order any seqno
  2350.                          * updates before
  2351.                          * the counter increment.
  2352.                          */
  2353.                         atomic_inc(&dev_priv->gpu_error.reset_counter);
  2354.  
  2355.                 } else {
  2356.                         atomic_set_mask(I915_WEDGED, &error->reset_counter);
  2357.         }
  2358.  
  2359.                 /*
  2360.                  * Note: The wake_up also serves as a memory barrier so that
  2361.                  * waiters see the update value of the reset counter atomic_t.
  2362.                  */
  2363.                 i915_error_wake_up(dev_priv, true);
  2364.         }
  2365. }
  2366.  
  2367. static void i915_report_and_clear_eir(struct drm_device *dev)
  2368. {
  2369.         struct drm_i915_private *dev_priv = dev->dev_private;
  2370.         uint32_t instdone[I915_NUM_INSTDONE_REG];
  2371.         u32 eir = I915_READ(EIR);
  2372.         int pipe, i;
  2373.  
  2374.         if (!eir)
  2375.                 return;
  2376.  
  2377.         pr_err("render error detected, EIR: 0x%08x\n", eir);
  2378.  
  2379.         i915_get_extra_instdone(dev, instdone);
  2380.  
  2381.         if (IS_G4X(dev)) {
  2382.                 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
  2383.                         u32 ipeir = I915_READ(IPEIR_I965);
  2384.  
  2385.                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
  2386.                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
  2387.                         for (i = 0; i < ARRAY_SIZE(instdone); i++)
  2388.                                 pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
  2389.                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
  2390.                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
  2391.                         I915_WRITE(IPEIR_I965, ipeir);
  2392.                         POSTING_READ(IPEIR_I965);
  2393.                 }
  2394.                 if (eir & GM45_ERROR_PAGE_TABLE) {
  2395.                         u32 pgtbl_err = I915_READ(PGTBL_ER);
  2396.                         pr_err("page table error\n");
  2397.                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
  2398.                         I915_WRITE(PGTBL_ER, pgtbl_err);
  2399.                         POSTING_READ(PGTBL_ER);
  2400.                 }
  2401.         }
  2402.  
  2403.         if (!IS_GEN2(dev)) {
  2404.                 if (eir & I915_ERROR_PAGE_TABLE) {
  2405.                         u32 pgtbl_err = I915_READ(PGTBL_ER);
  2406.                         pr_err("page table error\n");
  2407.                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
  2408.                         I915_WRITE(PGTBL_ER, pgtbl_err);
  2409.                         POSTING_READ(PGTBL_ER);
  2410.                 }
  2411.         }
  2412.  
  2413.         if (eir & I915_ERROR_MEMORY_REFRESH) {
  2414.                 pr_err("memory refresh error:\n");
  2415.                 for_each_pipe(dev_priv, pipe)
  2416.                         pr_err("pipe %c stat: 0x%08x\n",
  2417.                                pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
  2418.                 /* pipestat has already been acked */
  2419.         }
  2420.         if (eir & I915_ERROR_INSTRUCTION) {
  2421.                 pr_err("instruction error\n");
  2422.                 pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
  2423.                 for (i = 0; i < ARRAY_SIZE(instdone); i++)
  2424.                         pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
  2425.                 if (INTEL_INFO(dev)->gen < 4) {
  2426.                         u32 ipeir = I915_READ(IPEIR);
  2427.  
  2428.                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
  2429.                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
  2430.                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
  2431.                         I915_WRITE(IPEIR, ipeir);
  2432.                         POSTING_READ(IPEIR);
  2433.                 } else {
  2434.                         u32 ipeir = I915_READ(IPEIR_I965);
  2435.  
  2436.                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
  2437.                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
  2438.                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
  2439.                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
  2440.                         I915_WRITE(IPEIR_I965, ipeir);
  2441.                         POSTING_READ(IPEIR_I965);
  2442.                 }
  2443.         }
  2444.  
  2445.         I915_WRITE(EIR, eir);
  2446.         POSTING_READ(EIR);
  2447.         eir = I915_READ(EIR);
  2448.         if (eir) {
  2449.                 /*
  2450.                  * some errors might have become stuck,
  2451.                  * mask them.
  2452.                  */
  2453.                 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
  2454.                 I915_WRITE(EMR, I915_READ(EMR) | eir);
  2455.                 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  2456.         }
  2457. }
  2458.  
  2459. /**
  2460.  * i915_handle_error - handle an error interrupt
  2461.  * @dev: drm device
  2462.  *
  2463.  * Do some basic checking of regsiter state at error interrupt time and
  2464.  * dump it to the syslog.  Also call i915_capture_error_state() to make
  2465.  * sure we get a record and make it available in debugfs.  Fire a uevent
  2466.  * so userspace knows something bad happened (should trigger collection
  2467.  * of a ring dump etc.).
  2468.  */
  2469. void i915_handle_error(struct drm_device *dev, bool wedged,
  2470.                        const char *fmt, ...)
  2471. {
  2472.         struct drm_i915_private *dev_priv = dev->dev_private;
  2473.         va_list args;
  2474.         char error_msg[80];
  2475.  
  2476.         va_start(args, fmt);
  2477.         vscnprintf(error_msg, sizeof(error_msg), fmt, args);
  2478.         va_end(args);
  2479.  
  2480. //      i915_capture_error_state(dev);
  2481.         i915_report_and_clear_eir(dev);
  2482.  
  2483.         if (wedged) {
  2484.                 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
  2485.                                 &dev_priv->gpu_error.reset_counter);
  2486.  
  2487.                 /*
  2488.                  * Wakeup waiting processes so that the reset work function
  2489.                  * i915_error_work_func doesn't deadlock trying to grab various
  2490.                  * locks. By bumping the reset counter first, the woken
  2491.                  * processes will see a reset in progress and back off,
  2492.                  * releasing their locks and then wait for the reset completion.
  2493.                  * We must do this for _all_ gpu waiters that might hold locks
  2494.                  * that the reset work needs to acquire.
  2495.                  *
  2496.                  * Note: The wake_up serves as the required memory barrier to
  2497.                  * ensure that the waiters see the updated value of the reset
  2498.                  * counter atomic_t.
  2499.                  */
  2500.                 i915_error_wake_up(dev_priv, false);
  2501.         }
  2502.  
  2503.         /*
  2504.          * Our reset work can grab modeset locks (since it needs to reset the
  2505.          * state of outstanding pagelips). Hence it must not be run on our own
  2506.          * dev-priv->wq work queue for otherwise the flush_work in the pageflip
  2507.          * code will deadlock.
  2508.          */
  2509.         schedule_work(&dev_priv->gpu_error.work);
  2510. }
  2511.  
  2512. /* Called from drm generic code, passed 'crtc' which
  2513.  * we use as a pipe index
  2514.  */
  2515. static int i915_enable_vblank(struct drm_device *dev, int pipe)
  2516. {
  2517.         struct drm_i915_private *dev_priv = dev->dev_private;
  2518.         unsigned long irqflags;
  2519.  
  2520.         if (!i915_pipe_enabled(dev, pipe))
  2521.                 return -EINVAL;
  2522.  
  2523.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2524.         if (INTEL_INFO(dev)->gen >= 4)
  2525.                 i915_enable_pipestat(dev_priv, pipe,
  2526.                                      PIPE_START_VBLANK_INTERRUPT_STATUS);
  2527.         else
  2528.                 i915_enable_pipestat(dev_priv, pipe,
  2529.                                      PIPE_VBLANK_INTERRUPT_STATUS);
  2530.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2531.  
  2532.         return 0;
  2533. }
  2534.  
  2535. static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
  2536. {
  2537.         struct drm_i915_private *dev_priv = dev->dev_private;
  2538.         unsigned long irqflags;
  2539.         uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
  2540.                                                      DE_PIPE_VBLANK(pipe);
  2541.  
  2542.         if (!i915_pipe_enabled(dev, pipe))
  2543.                 return -EINVAL;
  2544.  
  2545.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2546.         ironlake_enable_display_irq(dev_priv, bit);
  2547.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2548.  
  2549.         return 0;
  2550. }
  2551.  
  2552. static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
  2553. {
  2554.         struct drm_i915_private *dev_priv = dev->dev_private;
  2555.         unsigned long irqflags;
  2556.  
  2557.         if (!i915_pipe_enabled(dev, pipe))
  2558.                 return -EINVAL;
  2559.  
  2560.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2561.         i915_enable_pipestat(dev_priv, pipe,
  2562.                              PIPE_START_VBLANK_INTERRUPT_STATUS);
  2563.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2564.  
  2565.         return 0;
  2566. }
  2567.  
  2568. static int gen8_enable_vblank(struct drm_device *dev, int pipe)
  2569. {
  2570.         struct drm_i915_private *dev_priv = dev->dev_private;
  2571.         unsigned long irqflags;
  2572.  
  2573.         if (!i915_pipe_enabled(dev, pipe))
  2574.                 return -EINVAL;
  2575.  
  2576.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2577.         dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
  2578.         I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
  2579.         POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
  2580.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2581.         return 0;
  2582. }
  2583.  
  2584. /* Called from drm generic code, passed 'crtc' which
  2585.  * we use as a pipe index
  2586.  */
  2587. static void i915_disable_vblank(struct drm_device *dev, int pipe)
  2588. {
  2589.         struct drm_i915_private *dev_priv = dev->dev_private;
  2590.         unsigned long irqflags;
  2591.  
  2592.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2593.         i915_disable_pipestat(dev_priv, pipe,
  2594.                               PIPE_VBLANK_INTERRUPT_STATUS |
  2595.                               PIPE_START_VBLANK_INTERRUPT_STATUS);
  2596.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2597. }
  2598.  
  2599. static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
  2600. {
  2601.         struct drm_i915_private *dev_priv = dev->dev_private;
  2602.         unsigned long irqflags;
  2603.         uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
  2604.                                                      DE_PIPE_VBLANK(pipe);
  2605.  
  2606.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2607.         ironlake_disable_display_irq(dev_priv, bit);
  2608.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2609. }
  2610.  
  2611. static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
  2612. {
  2613.         struct drm_i915_private *dev_priv = dev->dev_private;
  2614.         unsigned long irqflags;
  2615.  
  2616.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2617.         i915_disable_pipestat(dev_priv, pipe,
  2618.                               PIPE_START_VBLANK_INTERRUPT_STATUS);
  2619.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2620. }
  2621.  
  2622. static void gen8_disable_vblank(struct drm_device *dev, int pipe)
  2623. {
  2624.         struct drm_i915_private *dev_priv = dev->dev_private;
  2625.         unsigned long irqflags;
  2626.  
  2627.         if (!i915_pipe_enabled(dev, pipe))
  2628.                 return;
  2629.  
  2630.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2631.         dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
  2632.         I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
  2633.         POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
  2634.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2635. }
  2636.  
  2637. static u32
  2638. ring_last_seqno(struct intel_engine_cs *ring)
  2639. {
  2640.         return list_entry(ring->request_list.prev,
  2641.                           struct drm_i915_gem_request, list)->seqno;
  2642. }
  2643.  
  2644. static bool
  2645. ring_idle(struct intel_engine_cs *ring, u32 seqno)
  2646. {
  2647.         return (list_empty(&ring->request_list) ||
  2648.                 i915_seqno_passed(seqno, ring_last_seqno(ring)));
  2649. }
  2650.  
  2651. static bool
  2652. ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
  2653. {
  2654.         if (INTEL_INFO(dev)->gen >= 8) {
  2655.                 return (ipehr >> 23) == 0x1c;
  2656.         } else {
  2657.                 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
  2658.                 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
  2659.                                  MI_SEMAPHORE_REGISTER);
  2660.         }
  2661. }
  2662.  
  2663. static struct intel_engine_cs *
  2664. semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
  2665. {
  2666.         struct drm_i915_private *dev_priv = ring->dev->dev_private;
  2667.         struct intel_engine_cs *signaller;
  2668.         int i;
  2669.  
  2670.         if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
  2671.                 for_each_ring(signaller, dev_priv, i) {
  2672.                         if (ring == signaller)
  2673.                                 continue;
  2674.  
  2675.                         if (offset == signaller->semaphore.signal_ggtt[ring->id])
  2676.                                 return signaller;
  2677.                 }
  2678.         } else {
  2679.                 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
  2680.  
  2681.                 for_each_ring(signaller, dev_priv, i) {
  2682.                         if(ring == signaller)
  2683.                                 continue;
  2684.  
  2685.                         if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
  2686.                                 return signaller;
  2687.                 }
  2688.         }
  2689.  
  2690.         DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
  2691.                   ring->id, ipehr, offset);
  2692.  
  2693.         return NULL;
  2694. }
  2695.  
  2696. static struct intel_engine_cs *
  2697. semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
  2698. {
  2699.         struct drm_i915_private *dev_priv = ring->dev->dev_private;
  2700.         u32 cmd, ipehr, head;
  2701.         u64 offset = 0;
  2702.         int i, backwards;
  2703.  
  2704.         ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
  2705.         if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
  2706.                 return NULL;
  2707.  
  2708.         /*
  2709.          * HEAD is likely pointing to the dword after the actual command,
  2710.          * so scan backwards until we find the MBOX. But limit it to just 3
  2711.          * or 4 dwords depending on the semaphore wait command size.
  2712.          * Note that we don't care about ACTHD here since that might
  2713.          * point at at batch, and semaphores are always emitted into the
  2714.          * ringbuffer itself.
  2715.          */
  2716.         head = I915_READ_HEAD(ring) & HEAD_ADDR;
  2717.         backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
  2718.  
  2719.         for (i = backwards; i; --i) {
  2720.                 /*
  2721.                  * Be paranoid and presume the hw has gone off into the wild -
  2722.                  * our ring is smaller than what the hardware (and hence
  2723.                  * HEAD_ADDR) allows. Also handles wrap-around.
  2724.                  */
  2725.                 head &= ring->buffer->size - 1;
  2726.  
  2727.                 /* This here seems to blow up */
  2728.                 cmd = ioread32(ring->buffer->virtual_start + head);
  2729.                 if (cmd == ipehr)
  2730.                         break;
  2731.  
  2732.                 head -= 4;
  2733.         }
  2734.  
  2735.         if (!i)
  2736.                         return NULL;
  2737.  
  2738.         *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
  2739.         if (INTEL_INFO(ring->dev)->gen >= 8) {
  2740.                 offset = ioread32(ring->buffer->virtual_start + head + 12);
  2741.                 offset <<= 32;
  2742.                 offset = ioread32(ring->buffer->virtual_start + head + 8);
  2743.         }
  2744.         return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
  2745. }
  2746.  
  2747. static int semaphore_passed(struct intel_engine_cs *ring)
  2748. {
  2749.         struct drm_i915_private *dev_priv = ring->dev->dev_private;
  2750.         struct intel_engine_cs *signaller;
  2751.         u32 seqno;
  2752.  
  2753.         ring->hangcheck.deadlock++;
  2754.  
  2755.         signaller = semaphore_waits_for(ring, &seqno);
  2756.         if (signaller == NULL)
  2757.                 return -1;
  2758.  
  2759.         /* Prevent pathological recursion due to driver bugs */
  2760.         if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
  2761.                 return -1;
  2762.  
  2763.         if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
  2764.                 return 1;
  2765.  
  2766.         /* cursory check for an unkickable deadlock */
  2767.         if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
  2768.             semaphore_passed(signaller) < 0)
  2769.                 return -1;
  2770.  
  2771.         return 0;
  2772. }
  2773.  
  2774. static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
  2775. {
  2776.         struct intel_engine_cs *ring;
  2777.         int i;
  2778.  
  2779.         for_each_ring(ring, dev_priv, i)
  2780.                 ring->hangcheck.deadlock = 0;
  2781. }
  2782.  
  2783. static enum intel_ring_hangcheck_action
  2784. ring_stuck(struct intel_engine_cs *ring, u64 acthd)
  2785. {
  2786.         struct drm_device *dev = ring->dev;
  2787.         struct drm_i915_private *dev_priv = dev->dev_private;
  2788.         u32 tmp;
  2789.  
  2790.         if (acthd != ring->hangcheck.acthd) {
  2791.                 if (acthd > ring->hangcheck.max_acthd) {
  2792.                         ring->hangcheck.max_acthd = acthd;
  2793.                 return HANGCHECK_ACTIVE;
  2794.                 }
  2795.  
  2796.                 return HANGCHECK_ACTIVE_LOOP;
  2797.         }
  2798.  
  2799.         if (IS_GEN2(dev))
  2800.                 return HANGCHECK_HUNG;
  2801.  
  2802.         /* Is the chip hanging on a WAIT_FOR_EVENT?
  2803.          * If so we can simply poke the RB_WAIT bit
  2804.          * and break the hang. This should work on
  2805.          * all but the second generation chipsets.
  2806.          */
  2807.         tmp = I915_READ_CTL(ring);
  2808.         if (tmp & RING_WAIT) {
  2809.                 i915_handle_error(dev, false,
  2810.                                   "Kicking stuck wait on %s",
  2811.                           ring->name);
  2812.                 I915_WRITE_CTL(ring, tmp);
  2813.                 return HANGCHECK_KICK;
  2814.         }
  2815.  
  2816.         if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
  2817.                 switch (semaphore_passed(ring)) {
  2818.                 default:
  2819.                         return HANGCHECK_HUNG;
  2820.                 case 1:
  2821.                         i915_handle_error(dev, false,
  2822.                                           "Kicking stuck semaphore on %s",
  2823.                                   ring->name);
  2824.                         I915_WRITE_CTL(ring, tmp);
  2825.                         return HANGCHECK_KICK;
  2826.                 case 0:
  2827.                         return HANGCHECK_WAIT;
  2828.                 }
  2829.         }
  2830.  
  2831.         return HANGCHECK_HUNG;
  2832. }
  2833.  
  2834. /**
  2835.  * This is called when the chip hasn't reported back with completed
  2836.  * batchbuffers in a long time. We keep track per ring seqno progress and
  2837.  * if there are no progress, hangcheck score for that ring is increased.
  2838.  * Further, acthd is inspected to see if the ring is stuck. On stuck case
  2839.  * we kick the ring. If we see no progress on three subsequent calls
  2840.  * we assume chip is wedged and try to fix it by resetting the chip.
  2841.  */
  2842. static void i915_hangcheck_elapsed(unsigned long data)
  2843. {
  2844.         struct drm_device *dev = (struct drm_device *)data;
  2845.         struct drm_i915_private *dev_priv = dev->dev_private;
  2846.         struct intel_engine_cs *ring;
  2847.         int i;
  2848.         int busy_count = 0, rings_hung = 0;
  2849.         bool stuck[I915_NUM_RINGS] = { 0 };
  2850. #define BUSY 1
  2851. #define KICK 5
  2852. #define HUNG 20
  2853.  
  2854.         if (!i915.enable_hangcheck)
  2855.                 return;
  2856.  
  2857.         for_each_ring(ring, dev_priv, i) {
  2858.                 u64 acthd;
  2859.                 u32 seqno;
  2860.                 bool busy = true;
  2861.  
  2862.                 semaphore_clear_deadlocks(dev_priv);
  2863.  
  2864.                 seqno = ring->get_seqno(ring, false);
  2865.                 acthd = intel_ring_get_active_head(ring);
  2866.  
  2867.                 if (ring->hangcheck.seqno == seqno) {
  2868.                         if (ring_idle(ring, seqno)) {
  2869.                                 ring->hangcheck.action = HANGCHECK_IDLE;
  2870.  
  2871. //               if (waitqueue_active(&ring->irq_queue)) {
  2872.                                         /* Issue a wake-up to catch stuck h/w. */
  2873. //                   DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
  2874. //                         ring->name);
  2875. //                   wake_up_all(&ring->irq_queue);
  2876. //               } else
  2877.                                         busy = false;
  2878.                         } else {
  2879.                                 /* We always increment the hangcheck score
  2880.                                  * if the ring is busy and still processing
  2881.                                  * the same request, so that no single request
  2882.                                  * can run indefinitely (such as a chain of
  2883.                                  * batches). The only time we do not increment
  2884.                                  * the hangcheck score on this ring, if this
  2885.                                  * ring is in a legitimate wait for another
  2886.                                  * ring. In that case the waiting ring is a
  2887.                                  * victim and we want to be sure we catch the
  2888.                                  * right culprit. Then every time we do kick
  2889.                                  * the ring, add a small increment to the
  2890.                                  * score so that we can catch a batch that is
  2891.                                  * being repeatedly kicked and so responsible
  2892.                                  * for stalling the machine.
  2893.                                  */
  2894.                                 ring->hangcheck.action = ring_stuck(ring,
  2895.                                                                     acthd);
  2896.  
  2897.                                 switch (ring->hangcheck.action) {
  2898.                                 case HANGCHECK_IDLE:
  2899.                                 case HANGCHECK_WAIT:
  2900.                                 case HANGCHECK_ACTIVE:
  2901.                                         break;
  2902.                                 case HANGCHECK_ACTIVE_LOOP:
  2903.                                         ring->hangcheck.score += BUSY;
  2904.                                         break;
  2905.                                 case HANGCHECK_KICK:
  2906.                                         ring->hangcheck.score += KICK;
  2907.                                         break;
  2908.                                 case HANGCHECK_HUNG:
  2909.                                         ring->hangcheck.score += HUNG;
  2910.                                         stuck[i] = true;
  2911.                                         break;
  2912.                                 }
  2913.                         }
  2914.                 } else {
  2915.                         ring->hangcheck.action = HANGCHECK_ACTIVE;
  2916.  
  2917.                         /* Gradually reduce the count so that we catch DoS
  2918.                          * attempts across multiple batches.
  2919.                          */
  2920.                         if (ring->hangcheck.score > 0)
  2921.                                 ring->hangcheck.score--;
  2922.  
  2923.                         ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
  2924.                 }
  2925.  
  2926.                 ring->hangcheck.seqno = seqno;
  2927.                 ring->hangcheck.acthd = acthd;
  2928.                 busy_count += busy;
  2929.         }
  2930.  
  2931.         for_each_ring(ring, dev_priv, i) {
  2932.                 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
  2933.                         DRM_INFO("%s on %s\n",
  2934.                                   stuck[i] ? "stuck" : "no progress",
  2935.                                   ring->name);
  2936.                         rings_hung++;
  2937.                 }
  2938.         }
  2939.  
  2940. //   if (rings_hung)
  2941. //       return i915_handle_error(dev, true);
  2942.  
  2943. }
  2944. static void ibx_irq_reset(struct drm_device *dev)
  2945. {
  2946.         struct drm_i915_private *dev_priv = dev->dev_private;
  2947.  
  2948.         if (HAS_PCH_NOP(dev))
  2949.                 return;
  2950.  
  2951.         GEN5_IRQ_RESET(SDE);
  2952.  
  2953.         if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
  2954.                 I915_WRITE(SERR_INT, 0xffffffff);
  2955. }
  2956.  
  2957. /*
  2958.  * SDEIER is also touched by the interrupt handler to work around missed PCH
  2959.  * interrupts. Hence we can't update it after the interrupt handler is enabled -
  2960.  * instead we unconditionally enable all PCH interrupt sources here, but then
  2961.  * only unmask them as needed with SDEIMR.
  2962.  *
  2963.  * This function needs to be called before interrupts are enabled.
  2964.  */
  2965. static void ibx_irq_pre_postinstall(struct drm_device *dev)
  2966. {
  2967.         struct drm_i915_private *dev_priv = dev->dev_private;
  2968.  
  2969.         if (HAS_PCH_NOP(dev))
  2970.                 return;
  2971.  
  2972.         WARN_ON(I915_READ(SDEIER) != 0);
  2973.         I915_WRITE(SDEIER, 0xffffffff);
  2974.         POSTING_READ(SDEIER);
  2975. }
  2976.  
  2977. static void gen5_gt_irq_reset(struct drm_device *dev)
  2978. {
  2979.         struct drm_i915_private *dev_priv = dev->dev_private;
  2980.  
  2981.         GEN5_IRQ_RESET(GT);
  2982.         if (INTEL_INFO(dev)->gen >= 6)
  2983.                 GEN5_IRQ_RESET(GEN6_PM);
  2984. }
  2985.  
  2986. /* drm_dma.h hooks
  2987. */
  2988. static void ironlake_irq_reset(struct drm_device *dev)
  2989. {
  2990.         struct drm_i915_private *dev_priv = dev->dev_private;
  2991.  
  2992.         I915_WRITE(HWSTAM, 0xffffffff);
  2993.  
  2994.         GEN5_IRQ_RESET(DE);
  2995.         if (IS_GEN7(dev))
  2996.                 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
  2997.  
  2998.         gen5_gt_irq_reset(dev);
  2999.  
  3000.         ibx_irq_reset(dev);
  3001. }
  3002.  
  3003. static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
  3004. {
  3005.         enum pipe pipe;
  3006.  
  3007.         I915_WRITE(PORT_HOTPLUG_EN, 0);
  3008.         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  3009.  
  3010.         for_each_pipe(dev_priv, pipe)
  3011.                 I915_WRITE(PIPESTAT(pipe), 0xffff);
  3012.  
  3013.         GEN5_IRQ_RESET(VLV_);
  3014. }
  3015.  
  3016. static void valleyview_irq_preinstall(struct drm_device *dev)
  3017. {
  3018.         struct drm_i915_private *dev_priv = dev->dev_private;
  3019.  
  3020.         /* VLV magic */
  3021.         I915_WRITE(VLV_IMR, 0);
  3022.         I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
  3023.         I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
  3024.         I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
  3025.  
  3026.         gen5_gt_irq_reset(dev);
  3027.  
  3028.         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
  3029.  
  3030.         vlv_display_irq_reset(dev_priv);
  3031. }
  3032.  
  3033. static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
  3034. {
  3035.         GEN8_IRQ_RESET_NDX(GT, 0);
  3036.         GEN8_IRQ_RESET_NDX(GT, 1);
  3037.         GEN8_IRQ_RESET_NDX(GT, 2);
  3038.         GEN8_IRQ_RESET_NDX(GT, 3);
  3039. }
  3040.  
  3041. static void gen8_irq_reset(struct drm_device *dev)
  3042. {
  3043.         struct drm_i915_private *dev_priv = dev->dev_private;
  3044.         int pipe;
  3045.  
  3046.         I915_WRITE(GEN8_MASTER_IRQ, 0);
  3047.         POSTING_READ(GEN8_MASTER_IRQ);
  3048.  
  3049.         gen8_gt_irq_reset(dev_priv);
  3050.  
  3051.         for_each_pipe(dev_priv, pipe)
  3052.                 if (intel_display_power_is_enabled(dev_priv,
  3053.                                                 POWER_DOMAIN_PIPE(pipe)))
  3054.                 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
  3055.  
  3056.         GEN5_IRQ_RESET(GEN8_DE_PORT_);
  3057.         GEN5_IRQ_RESET(GEN8_DE_MISC_);
  3058.         GEN5_IRQ_RESET(GEN8_PCU_);
  3059.  
  3060.         ibx_irq_reset(dev);
  3061. }
  3062.  
  3063. void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
  3064. {
  3065.         uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
  3066.  
  3067.         spin_lock_irq(&dev_priv->irq_lock);
  3068.         GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
  3069.                           ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
  3070.         GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
  3071.                           ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
  3072.         spin_unlock_irq(&dev_priv->irq_lock);
  3073. }
  3074.  
  3075. static void cherryview_irq_preinstall(struct drm_device *dev)
  3076. {
  3077.         struct drm_i915_private *dev_priv = dev->dev_private;
  3078.  
  3079.         I915_WRITE(GEN8_MASTER_IRQ, 0);
  3080.         POSTING_READ(GEN8_MASTER_IRQ);
  3081.  
  3082.         gen8_gt_irq_reset(dev_priv);
  3083.  
  3084.         GEN5_IRQ_RESET(GEN8_PCU_);
  3085.  
  3086.         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
  3087.  
  3088.         vlv_display_irq_reset(dev_priv);
  3089. }
  3090.  
  3091. static void ibx_hpd_irq_setup(struct drm_device *dev)
  3092. {
  3093.         struct drm_i915_private *dev_priv = dev->dev_private;
  3094.         struct intel_encoder *intel_encoder;
  3095.         u32 hotplug_irqs, hotplug, enabled_irqs = 0;
  3096.  
  3097.         if (HAS_PCH_IBX(dev)) {
  3098.                 hotplug_irqs = SDE_HOTPLUG_MASK;
  3099.                 for_each_intel_encoder(dev, intel_encoder)
  3100.                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
  3101.                                 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
  3102.         } else {
  3103.                 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
  3104.                 for_each_intel_encoder(dev, intel_encoder)
  3105.                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
  3106.                                 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
  3107.         }
  3108.  
  3109.         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
  3110.  
  3111.         /*
  3112.  * Enable digital hotplug on the PCH, and configure the DP short pulse
  3113.  * duration to 2ms (which is the minimum in the Display Port spec)
  3114.  *
  3115.  * This register is the same on all known PCH chips.
  3116.  */
  3117.         hotplug = I915_READ(PCH_PORT_HOTPLUG);
  3118.         hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
  3119.         hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
  3120.         hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
  3121.         hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
  3122.         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
  3123. }
  3124.  
  3125. static void ibx_irq_postinstall(struct drm_device *dev)
  3126. {
  3127.         struct drm_i915_private *dev_priv = dev->dev_private;
  3128.         u32 mask;
  3129.  
  3130.         if (HAS_PCH_NOP(dev))
  3131.                 return;
  3132.  
  3133.         if (HAS_PCH_IBX(dev))
  3134.                 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
  3135.         else
  3136.                 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
  3137.  
  3138.         GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
  3139.         I915_WRITE(SDEIMR, ~mask);
  3140. }
  3141.  
  3142. static void gen5_gt_irq_postinstall(struct drm_device *dev)
  3143. {
  3144.         struct drm_i915_private *dev_priv = dev->dev_private;
  3145.         u32 pm_irqs, gt_irqs;
  3146.  
  3147.         pm_irqs = gt_irqs = 0;
  3148.  
  3149.         dev_priv->gt_irq_mask = ~0;
  3150.         if (HAS_L3_DPF(dev)) {
  3151.                 /* L3 parity interrupt is always unmasked. */
  3152.                 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
  3153.                 gt_irqs |= GT_PARITY_ERROR(dev);
  3154.         }
  3155.  
  3156.         gt_irqs |= GT_RENDER_USER_INTERRUPT;
  3157.         if (IS_GEN5(dev)) {
  3158.                 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
  3159.                            ILK_BSD_USER_INTERRUPT;
  3160.         } else {
  3161.                 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
  3162.         }
  3163.  
  3164.         GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
  3165.  
  3166.         if (INTEL_INFO(dev)->gen >= 6) {
  3167.                 /*
  3168.                  * RPS interrupts will get enabled/disabled on demand when RPS
  3169.                  * itself is enabled/disabled.
  3170.                  */
  3171.                 if (HAS_VEBOX(dev))
  3172.                         pm_irqs |= PM_VEBOX_USER_INTERRUPT;
  3173.  
  3174.                 dev_priv->pm_irq_mask = 0xffffffff;
  3175.                 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
  3176.     }
  3177. }
  3178.  
  3179. static int ironlake_irq_postinstall(struct drm_device *dev)
  3180. {
  3181.         struct drm_i915_private *dev_priv = dev->dev_private;
  3182.         u32 display_mask, extra_mask;
  3183.  
  3184.         if (INTEL_INFO(dev)->gen >= 7) {
  3185.                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
  3186.                                 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
  3187.                 DE_PLANEB_FLIP_DONE_IVB |
  3188.                                 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
  3189.                 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
  3190.                               DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
  3191.         } else {
  3192.                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
  3193.                                 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
  3194.                                 DE_AUX_CHANNEL_A |
  3195.                                 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
  3196.                                 DE_POISON);
  3197.                 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
  3198.                                 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
  3199.         }
  3200.  
  3201.         dev_priv->irq_mask = ~display_mask;
  3202.  
  3203.         I915_WRITE(HWSTAM, 0xeffe);
  3204.  
  3205.         ibx_irq_pre_postinstall(dev);
  3206.  
  3207.         GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
  3208.  
  3209.         gen5_gt_irq_postinstall(dev);
  3210.  
  3211.         ibx_irq_postinstall(dev);
  3212.  
  3213.         if (IS_IRONLAKE_M(dev)) {
  3214.                 /* Enable PCU event interrupts
  3215.                  *
  3216.                  * spinlocking not required here for correctness since interrupt
  3217.                  * setup is guaranteed to run in single-threaded context. But we
  3218.                  * need it to make the assert_spin_locked happy. */
  3219.                 spin_lock_irq(&dev_priv->irq_lock);
  3220.                 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
  3221.                 spin_unlock_irq(&dev_priv->irq_lock);
  3222.         }
  3223.  
  3224.         return 0;
  3225. }
  3226.  
  3227. static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
  3228. {
  3229.         u32 pipestat_mask;
  3230.         u32 iir_mask;
  3231.         enum pipe pipe;
  3232.  
  3233.         pipestat_mask = PIPESTAT_INT_STATUS_MASK |
  3234.                         PIPE_FIFO_UNDERRUN_STATUS;
  3235.  
  3236.         for_each_pipe(dev_priv, pipe)
  3237.                 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
  3238.         POSTING_READ(PIPESTAT(PIPE_A));
  3239.  
  3240.         pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
  3241.                         PIPE_CRC_DONE_INTERRUPT_STATUS;
  3242.  
  3243.         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
  3244.         for_each_pipe(dev_priv, pipe)
  3245.                       i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
  3246.  
  3247.         iir_mask = I915_DISPLAY_PORT_INTERRUPT |
  3248.                    I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  3249.                    I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
  3250.         if (IS_CHERRYVIEW(dev_priv))
  3251.                 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
  3252.         dev_priv->irq_mask &= ~iir_mask;
  3253.  
  3254.         I915_WRITE(VLV_IIR, iir_mask);
  3255.         I915_WRITE(VLV_IIR, iir_mask);
  3256.         I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
  3257.         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
  3258.         POSTING_READ(VLV_IMR);
  3259. }
  3260.  
  3261. static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
  3262. {
  3263.         u32 pipestat_mask;
  3264.         u32 iir_mask;
  3265.         enum pipe pipe;
  3266.  
  3267.         iir_mask = I915_DISPLAY_PORT_INTERRUPT |
  3268.                    I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  3269.                    I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
  3270.         if (IS_CHERRYVIEW(dev_priv))
  3271.                 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
  3272.  
  3273.         dev_priv->irq_mask |= iir_mask;
  3274.         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
  3275.         I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
  3276.         I915_WRITE(VLV_IIR, iir_mask);
  3277.         I915_WRITE(VLV_IIR, iir_mask);
  3278.         POSTING_READ(VLV_IIR);
  3279.  
  3280.         pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
  3281.                         PIPE_CRC_DONE_INTERRUPT_STATUS;
  3282.  
  3283.         i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
  3284.         for_each_pipe(dev_priv, pipe)
  3285.                 i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
  3286.  
  3287.         pipestat_mask = PIPESTAT_INT_STATUS_MASK |
  3288.                         PIPE_FIFO_UNDERRUN_STATUS;
  3289.  
  3290.         for_each_pipe(dev_priv, pipe)
  3291.                 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
  3292.         POSTING_READ(PIPESTAT(PIPE_A));
  3293. }
  3294.  
  3295. void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
  3296. {
  3297.         assert_spin_locked(&dev_priv->irq_lock);
  3298.  
  3299.         if (dev_priv->display_irqs_enabled)
  3300.                 return;
  3301.  
  3302.         dev_priv->display_irqs_enabled = true;
  3303.  
  3304.         if (intel_irqs_enabled(dev_priv))
  3305.                 valleyview_display_irqs_install(dev_priv);
  3306. }
  3307.  
  3308. void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
  3309. {
  3310.         assert_spin_locked(&dev_priv->irq_lock);
  3311.  
  3312.         if (!dev_priv->display_irqs_enabled)
  3313.                 return;
  3314.  
  3315.         dev_priv->display_irqs_enabled = false;
  3316.  
  3317.         if (intel_irqs_enabled(dev_priv))
  3318.                 valleyview_display_irqs_uninstall(dev_priv);
  3319. }
  3320.  
  3321. static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
  3322. {
  3323.         dev_priv->irq_mask = ~0;
  3324.  
  3325.         I915_WRITE(PORT_HOTPLUG_EN, 0);
  3326.         POSTING_READ(PORT_HOTPLUG_EN);
  3327.  
  3328.         I915_WRITE(VLV_IIR, 0xffffffff);
  3329.         I915_WRITE(VLV_IIR, 0xffffffff);
  3330.         I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
  3331.         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
  3332.         POSTING_READ(VLV_IMR);
  3333.  
  3334.         /* Interrupt setup is already guaranteed to be single-threaded, this is
  3335.          * just to make the assert_spin_locked check happy. */
  3336.         spin_lock_irq(&dev_priv->irq_lock);
  3337.         if (dev_priv->display_irqs_enabled)
  3338.                 valleyview_display_irqs_install(dev_priv);
  3339.         spin_unlock_irq(&dev_priv->irq_lock);
  3340. }
  3341.  
  3342. static int valleyview_irq_postinstall(struct drm_device *dev)
  3343. {
  3344.         struct drm_i915_private *dev_priv = dev->dev_private;
  3345.  
  3346.         vlv_display_irq_postinstall(dev_priv);
  3347.  
  3348.         gen5_gt_irq_postinstall(dev);
  3349.  
  3350.         /* ack & enable invalid PTE error interrupts */
  3351. #if 0 /* FIXME: add support to irq handler for checking these bits */
  3352.         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
  3353.         I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
  3354. #endif
  3355.  
  3356.         I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
  3357.  
  3358.         return 0;
  3359. }
  3360.  
  3361. static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
  3362. {
  3363.         /* These are interrupts we'll toggle with the ring mask register */
  3364.         uint32_t gt_interrupts[] = {
  3365.                 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
  3366.                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
  3367.                         GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
  3368.                         GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
  3369.                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
  3370.                 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
  3371.                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
  3372.                         GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
  3373.                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
  3374.                 0,
  3375.                 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
  3376.                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
  3377.                 };
  3378.  
  3379.         dev_priv->pm_irq_mask = 0xffffffff;
  3380.         GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
  3381.         GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
  3382.         /*
  3383.          * RPS interrupts will get enabled/disabled on demand when RPS itself
  3384.          * is enabled/disabled.
  3385.          */
  3386.         GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
  3387.         GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
  3388. }
  3389.  
  3390. static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
  3391. {
  3392.         uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
  3393.         uint32_t de_pipe_enables;
  3394.         int pipe;
  3395.         u32 aux_en = GEN8_AUX_CHANNEL_A;
  3396.  
  3397.         if (IS_GEN9(dev_priv)) {
  3398.                 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
  3399.                                   GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
  3400.                 aux_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
  3401.                         GEN9_AUX_CHANNEL_D;
  3402.         } else
  3403.                 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
  3404.                 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
  3405.  
  3406.         de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
  3407.                 GEN8_PIPE_FIFO_UNDERRUN;
  3408.  
  3409.         dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
  3410.         dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
  3411.         dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
  3412.  
  3413.         for_each_pipe(dev_priv, pipe)
  3414.                 if (intel_display_power_is_enabled(dev_priv,
  3415.                                 POWER_DOMAIN_PIPE(pipe)))
  3416.                         GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
  3417.                                           dev_priv->de_irq_mask[pipe],
  3418.                                   de_pipe_enables);
  3419.  
  3420.         GEN5_IRQ_INIT(GEN8_DE_PORT_, ~aux_en, aux_en);
  3421. }
  3422.  
  3423. static int gen8_irq_postinstall(struct drm_device *dev)
  3424. {
  3425.         struct drm_i915_private *dev_priv = dev->dev_private;
  3426.  
  3427.         ibx_irq_pre_postinstall(dev);
  3428.  
  3429.         gen8_gt_irq_postinstall(dev_priv);
  3430.         gen8_de_irq_postinstall(dev_priv);
  3431.  
  3432.         ibx_irq_postinstall(dev);
  3433.  
  3434.         I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
  3435.         POSTING_READ(GEN8_MASTER_IRQ);
  3436.  
  3437.         return 0;
  3438. }
  3439.  
  3440. static int cherryview_irq_postinstall(struct drm_device *dev)
  3441. {
  3442.         struct drm_i915_private *dev_priv = dev->dev_private;
  3443.  
  3444.         vlv_display_irq_postinstall(dev_priv);
  3445.  
  3446.         gen8_gt_irq_postinstall(dev_priv);
  3447.  
  3448.         I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
  3449.         POSTING_READ(GEN8_MASTER_IRQ);
  3450.  
  3451.         return 0;
  3452. }
  3453.  
  3454. static void gen8_irq_uninstall(struct drm_device *dev)
  3455. {
  3456.         struct drm_i915_private *dev_priv = dev->dev_private;
  3457.  
  3458.         if (!dev_priv)
  3459.                 return;
  3460.  
  3461.         gen8_irq_reset(dev);
  3462. }
  3463.  
  3464. static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
  3465. {
  3466.         /* Interrupt setup is already guaranteed to be single-threaded, this is
  3467.          * just to make the assert_spin_locked check happy. */
  3468.         spin_lock_irq(&dev_priv->irq_lock);
  3469.         if (dev_priv->display_irqs_enabled)
  3470.                 valleyview_display_irqs_uninstall(dev_priv);
  3471.         spin_unlock_irq(&dev_priv->irq_lock);
  3472.  
  3473.         vlv_display_irq_reset(dev_priv);
  3474.  
  3475.         dev_priv->irq_mask = ~0;
  3476. }
  3477.  
  3478. static void valleyview_irq_uninstall(struct drm_device *dev)
  3479. {
  3480.         struct drm_i915_private *dev_priv = dev->dev_private;
  3481.  
  3482.         if (!dev_priv)
  3483.                 return;
  3484.  
  3485.         I915_WRITE(VLV_MASTER_IER, 0);
  3486.  
  3487.         gen5_gt_irq_reset(dev);
  3488.  
  3489.         I915_WRITE(HWSTAM, 0xffffffff);
  3490.  
  3491.         vlv_display_irq_uninstall(dev_priv);
  3492. }
  3493.  
  3494. static void cherryview_irq_uninstall(struct drm_device *dev)
  3495. {
  3496.         struct drm_i915_private *dev_priv = dev->dev_private;
  3497.  
  3498.         if (!dev_priv)
  3499.                 return;
  3500.  
  3501.         I915_WRITE(GEN8_MASTER_IRQ, 0);
  3502.         POSTING_READ(GEN8_MASTER_IRQ);
  3503.  
  3504.         gen8_gt_irq_reset(dev_priv);
  3505.  
  3506.         GEN5_IRQ_RESET(GEN8_PCU_);
  3507.  
  3508.         vlv_display_irq_uninstall(dev_priv);
  3509. }
  3510.  
  3511. static void ironlake_irq_uninstall(struct drm_device *dev)
  3512. {
  3513.         struct drm_i915_private *dev_priv = dev->dev_private;
  3514.  
  3515.         if (!dev_priv)
  3516.                 return;
  3517.  
  3518.         ironlake_irq_reset(dev);
  3519. }
  3520.  
  3521. #if 0
  3522. static void i8xx_irq_preinstall(struct drm_device * dev)
  3523. {
  3524.         struct drm_i915_private *dev_priv = dev->dev_private;
  3525.         int pipe;
  3526.  
  3527.         for_each_pipe(dev_priv, pipe)
  3528.                 I915_WRITE(PIPESTAT(pipe), 0);
  3529.         I915_WRITE16(IMR, 0xffff);
  3530.         I915_WRITE16(IER, 0x0);
  3531.         POSTING_READ16(IER);
  3532. }
  3533.  
  3534. static int i8xx_irq_postinstall(struct drm_device *dev)
  3535. {
  3536.         struct drm_i915_private *dev_priv = dev->dev_private;
  3537.  
  3538.         I915_WRITE16(EMR,
  3539.                      ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
  3540.  
  3541.         /* Unmask the interrupts that we always want on. */
  3542.         dev_priv->irq_mask =
  3543.                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  3544.                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  3545.                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  3546.                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  3547.                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  3548.         I915_WRITE16(IMR, dev_priv->irq_mask);
  3549.  
  3550.         I915_WRITE16(IER,
  3551.                      I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  3552.                      I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  3553.                      I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
  3554.                      I915_USER_INTERRUPT);
  3555.         POSTING_READ16(IER);
  3556.  
  3557.         /* Interrupt setup is already guaranteed to be single-threaded, this is
  3558.          * just to make the assert_spin_locked check happy. */
  3559.         spin_lock_irq(&dev_priv->irq_lock);
  3560.         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
  3561.         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
  3562.         spin_unlock_irq(&dev_priv->irq_lock);
  3563.  
  3564.         return 0;
  3565. }
  3566.  
  3567. /*
  3568.  * Returns true when a page flip has completed.
  3569.  */
  3570. static bool i8xx_handle_vblank(struct drm_device *dev,
  3571.                                int plane, int pipe, u32 iir)
  3572. {
  3573.         struct drm_i915_private *dev_priv = dev->dev_private;
  3574.         u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
  3575.  
  3576. //   if (!drm_handle_vblank(dev, pipe))
  3577.        return false;
  3578.  
  3579.         if ((iir & flip_pending) == 0)
  3580.                 goto check_page_flip;
  3581.  
  3582. //   intel_prepare_page_flip(dev, pipe);
  3583.  
  3584.         /* We detect FlipDone by looking for the change in PendingFlip from '1'
  3585.          * to '0' on the following vblank, i.e. IIR has the Pendingflip
  3586.          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
  3587.          * the flip is completed (no longer pending). Since this doesn't raise
  3588.          * an interrupt per se, we watch for the change at vblank.
  3589.          */
  3590.         if (I915_READ16(ISR) & flip_pending)
  3591.                 goto check_page_flip;
  3592.  
  3593.         intel_finish_page_flip(dev, pipe);
  3594.         return true;
  3595.  
  3596. check_page_flip:
  3597. //      intel_check_page_flip(dev, pipe);
  3598.         return false;
  3599. }
  3600.  
  3601. static irqreturn_t i8xx_irq_handler(int irq, void *arg)
  3602. {
  3603.         struct drm_device *dev = arg;
  3604.         struct drm_i915_private *dev_priv = dev->dev_private;
  3605.         u16 iir, new_iir;
  3606.         u32 pipe_stats[2];
  3607.         int pipe;
  3608.         u16 flip_mask =
  3609.                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  3610.                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  3611.  
  3612.         iir = I915_READ16(IIR);
  3613.         if (iir == 0)
  3614.                 return IRQ_NONE;
  3615.  
  3616.         while (iir & ~flip_mask) {
  3617.                 /* Can't rely on pipestat interrupt bit in iir as it might
  3618.                  * have been cleared after the pipestat interrupt was received.
  3619.                  * It doesn't set the bit in iir again, but it still produces
  3620.                  * interrupts (for non-MSI).
  3621.                  */
  3622.                 spin_lock(&dev_priv->irq_lock);
  3623.                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  3624.                         DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
  3625.  
  3626.                 for_each_pipe(dev_priv, pipe) {
  3627.                         int reg = PIPESTAT(pipe);
  3628.                         pipe_stats[pipe] = I915_READ(reg);
  3629.  
  3630.                         /*
  3631.                          * Clear the PIPE*STAT regs before the IIR
  3632.                          */
  3633.                         if (pipe_stats[pipe] & 0x8000ffff)
  3634.                                 I915_WRITE(reg, pipe_stats[pipe]);
  3635.                         }
  3636.                 spin_unlock(&dev_priv->irq_lock);
  3637.  
  3638.                 I915_WRITE16(IIR, iir & ~flip_mask);
  3639.                 new_iir = I915_READ16(IIR); /* Flush posted writes */
  3640.  
  3641.                 if (iir & I915_USER_INTERRUPT)
  3642.                         notify_ring(dev, &dev_priv->ring[RCS]);
  3643.  
  3644.                 for_each_pipe(dev_priv, pipe) {
  3645.                         int plane = pipe;
  3646.                         if (HAS_FBC(dev))
  3647.                                 plane = !plane;
  3648.  
  3649.                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
  3650.                             i8xx_handle_vblank(dev, plane, pipe, iir))
  3651.                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
  3652.  
  3653.                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
  3654.                                 i9xx_pipe_crc_irq_handler(dev, pipe);
  3655.  
  3656.                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  3657.                                 intel_cpu_fifo_underrun_irq_handler(dev_priv,
  3658.                                                                     pipe);
  3659.                 }
  3660.  
  3661.                 iir = new_iir;
  3662.         }
  3663.  
  3664.         return IRQ_HANDLED;
  3665. }
  3666.  
  3667. static void i8xx_irq_uninstall(struct drm_device * dev)
  3668. {
  3669.         struct drm_i915_private *dev_priv = dev->dev_private;
  3670.         int pipe;
  3671.  
  3672.         for_each_pipe(dev_priv, pipe) {
  3673.                 /* Clear enable bits; then clear status bits */
  3674.                 I915_WRITE(PIPESTAT(pipe), 0);
  3675.                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
  3676.         }
  3677.         I915_WRITE16(IMR, 0xffff);
  3678.         I915_WRITE16(IER, 0x0);
  3679.         I915_WRITE16(IIR, I915_READ16(IIR));
  3680. }
  3681.  
  3682. #endif
  3683.  
  3684. static void i915_irq_preinstall(struct drm_device * dev)
  3685. {
  3686.         struct drm_i915_private *dev_priv = dev->dev_private;
  3687.         int pipe;
  3688.  
  3689.         if (I915_HAS_HOTPLUG(dev)) {
  3690.                 I915_WRITE(PORT_HOTPLUG_EN, 0);
  3691.                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  3692.         }
  3693.  
  3694.         I915_WRITE16(HWSTAM, 0xeffe);
  3695.         for_each_pipe(dev_priv, pipe)
  3696.                 I915_WRITE(PIPESTAT(pipe), 0);
  3697.         I915_WRITE(IMR, 0xffffffff);
  3698.         I915_WRITE(IER, 0x0);
  3699.         POSTING_READ(IER);
  3700. }
  3701.  
  3702. static int i915_irq_postinstall(struct drm_device *dev)
  3703. {
  3704.         struct drm_i915_private *dev_priv = dev->dev_private;
  3705.         u32 enable_mask;
  3706.  
  3707.         I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
  3708.  
  3709.         /* Unmask the interrupts that we always want on. */
  3710.         dev_priv->irq_mask =
  3711.                 ~(I915_ASLE_INTERRUPT |
  3712.                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  3713.                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  3714.                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  3715.                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  3716.                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  3717.  
  3718.         enable_mask =
  3719.                 I915_ASLE_INTERRUPT |
  3720.                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  3721.                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  3722.                 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
  3723.                 I915_USER_INTERRUPT;
  3724.  
  3725.         if (I915_HAS_HOTPLUG(dev)) {
  3726.                 I915_WRITE(PORT_HOTPLUG_EN, 0);
  3727.                 POSTING_READ(PORT_HOTPLUG_EN);
  3728.  
  3729.                 /* Enable in IER... */
  3730.                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
  3731.                 /* and unmask in IMR */
  3732.                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
  3733.         }
  3734.  
  3735.         I915_WRITE(IMR, dev_priv->irq_mask);
  3736.         I915_WRITE(IER, enable_mask);
  3737.         POSTING_READ(IER);
  3738.  
  3739.         i915_enable_asle_pipestat(dev);
  3740.  
  3741.         /* Interrupt setup is already guaranteed to be single-threaded, this is
  3742.          * just to make the assert_spin_locked check happy. */
  3743.         spin_lock_irq(&dev_priv->irq_lock);
  3744.         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
  3745.         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
  3746.         spin_unlock_irq(&dev_priv->irq_lock);
  3747.  
  3748.         return 0;
  3749. }
  3750.  
  3751. /*
  3752.  * Returns true when a page flip has completed.
  3753.  */
  3754. static bool i915_handle_vblank(struct drm_device *dev,
  3755.                                int plane, int pipe, u32 iir)
  3756. {
  3757.         struct drm_i915_private *dev_priv = dev->dev_private;
  3758.         u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
  3759.  
  3760. //   if (!drm_handle_vblank(dev, pipe))
  3761.                 return false;
  3762.  
  3763.         if ((iir & flip_pending) == 0)
  3764.                 goto check_page_flip;
  3765.  
  3766. //   intel_prepare_page_flip(dev, plane);
  3767.  
  3768.         /* We detect FlipDone by looking for the change in PendingFlip from '1'
  3769.          * to '0' on the following vblank, i.e. IIR has the Pendingflip
  3770.          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
  3771.          * the flip is completed (no longer pending). Since this doesn't raise
  3772.          * an interrupt per se, we watch for the change at vblank.
  3773.          */
  3774.         if (I915_READ(ISR) & flip_pending)
  3775.                 goto check_page_flip;
  3776.  
  3777.         intel_finish_page_flip(dev, pipe);
  3778.         return true;
  3779.  
  3780. check_page_flip:
  3781. //      intel_check_page_flip(dev, pipe);
  3782.         return false;
  3783. }
  3784.  
  3785. static irqreturn_t i915_irq_handler(int irq, void *arg)
  3786. {
  3787.         struct drm_device *dev = arg;
  3788.         struct drm_i915_private *dev_priv = dev->dev_private;
  3789.         u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
  3790.         u32 flip_mask =
  3791.                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  3792.                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  3793.         int pipe, ret = IRQ_NONE;
  3794.  
  3795.         iir = I915_READ(IIR);
  3796.         do {
  3797.                 bool irq_received = (iir & ~flip_mask) != 0;
  3798.                 bool blc_event = false;
  3799.  
  3800.                 /* Can't rely on pipestat interrupt bit in iir as it might
  3801.                  * have been cleared after the pipestat interrupt was received.
  3802.                  * It doesn't set the bit in iir again, but it still produces
  3803.                  * interrupts (for non-MSI).
  3804.                  */
  3805.                 spin_lock(&dev_priv->irq_lock);
  3806.                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  3807.                         DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
  3808.  
  3809.                 for_each_pipe(dev_priv, pipe) {
  3810.                         int reg = PIPESTAT(pipe);
  3811.                         pipe_stats[pipe] = I915_READ(reg);
  3812.  
  3813.                         /* Clear the PIPE*STAT regs before the IIR */
  3814.                         if (pipe_stats[pipe] & 0x8000ffff) {
  3815.                                 I915_WRITE(reg, pipe_stats[pipe]);
  3816.                                 irq_received = true;
  3817.                         }
  3818.                 }
  3819.                 spin_unlock(&dev_priv->irq_lock);
  3820.  
  3821.                 if (!irq_received)
  3822.                         break;
  3823.  
  3824.                 /* Consume port.  Then clear IIR or we'll miss events */
  3825.                 if (I915_HAS_HOTPLUG(dev) &&
  3826.                     iir & I915_DISPLAY_PORT_INTERRUPT)
  3827.                         i9xx_hpd_irq_handler(dev);
  3828.  
  3829.                 I915_WRITE(IIR, iir & ~flip_mask);
  3830.                 new_iir = I915_READ(IIR); /* Flush posted writes */
  3831.  
  3832.                 if (iir & I915_USER_INTERRUPT)
  3833.                         notify_ring(dev, &dev_priv->ring[RCS]);
  3834.  
  3835.                 for_each_pipe(dev_priv, pipe) {
  3836.                         int plane = pipe;
  3837.                         if (HAS_FBC(dev))
  3838.                                 plane = !plane;
  3839.  
  3840.                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
  3841.                             i915_handle_vblank(dev, plane, pipe, iir))
  3842.                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
  3843.  
  3844.                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
  3845.                                 blc_event = true;
  3846.  
  3847.                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
  3848.                                 i9xx_pipe_crc_irq_handler(dev, pipe);
  3849.  
  3850.                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  3851.                                 intel_cpu_fifo_underrun_irq_handler(dev_priv,
  3852.                                                                     pipe);
  3853.                 }
  3854.  
  3855.                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
  3856.                         intel_opregion_asle_intr(dev);
  3857.  
  3858.                 /* With MSI, interrupts are only generated when iir
  3859.                  * transitions from zero to nonzero.  If another bit got
  3860.                  * set while we were handling the existing iir bits, then
  3861.                  * we would never get another interrupt.
  3862.                  *
  3863.                  * This is fine on non-MSI as well, as if we hit this path
  3864.                  * we avoid exiting the interrupt handler only to generate
  3865.                  * another one.
  3866.                  *
  3867.                  * Note that for MSI this could cause a stray interrupt report
  3868.                  * if an interrupt landed in the time between writing IIR and
  3869.                  * the posting read.  This should be rare enough to never
  3870.                  * trigger the 99% of 100,000 interrupts test for disabling
  3871.                  * stray interrupts.
  3872.                  */
  3873.                 ret = IRQ_HANDLED;
  3874.                 iir = new_iir;
  3875.         } while (iir & ~flip_mask);
  3876.  
  3877.         return ret;
  3878. }
  3879.  
  3880. static void i915_irq_uninstall(struct drm_device * dev)
  3881. {
  3882.         struct drm_i915_private *dev_priv = dev->dev_private;
  3883.         int pipe;
  3884.  
  3885.         if (I915_HAS_HOTPLUG(dev)) {
  3886.                 I915_WRITE(PORT_HOTPLUG_EN, 0);
  3887.                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  3888.         }
  3889.  
  3890.         I915_WRITE16(HWSTAM, 0xffff);
  3891.         for_each_pipe(dev_priv, pipe) {
  3892.                 /* Clear enable bits; then clear status bits */
  3893.                 I915_WRITE(PIPESTAT(pipe), 0);
  3894.                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
  3895.         }
  3896.         I915_WRITE(IMR, 0xffffffff);
  3897.         I915_WRITE(IER, 0x0);
  3898.  
  3899.         I915_WRITE(IIR, I915_READ(IIR));
  3900. }
  3901.  
  3902. static void i965_irq_preinstall(struct drm_device * dev)
  3903. {
  3904.         struct drm_i915_private *dev_priv = dev->dev_private;
  3905.         int pipe;
  3906.  
  3907.         I915_WRITE(PORT_HOTPLUG_EN, 0);
  3908.         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  3909.  
  3910.         I915_WRITE(HWSTAM, 0xeffe);
  3911.         for_each_pipe(dev_priv, pipe)
  3912.                 I915_WRITE(PIPESTAT(pipe), 0);
  3913.         I915_WRITE(IMR, 0xffffffff);
  3914.         I915_WRITE(IER, 0x0);
  3915.         POSTING_READ(IER);
  3916. }
  3917.  
  3918. static int i965_irq_postinstall(struct drm_device *dev)
  3919. {
  3920.         struct drm_i915_private *dev_priv = dev->dev_private;
  3921.         u32 enable_mask;
  3922.         u32 error_mask;
  3923.  
  3924.         /* Unmask the interrupts that we always want on. */
  3925.         dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
  3926.                                I915_DISPLAY_PORT_INTERRUPT |
  3927.                                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  3928.                                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  3929.                                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  3930.                                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  3931.                                I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  3932.  
  3933.         enable_mask = ~dev_priv->irq_mask;
  3934.         enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  3935.                          I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
  3936.         enable_mask |= I915_USER_INTERRUPT;
  3937.  
  3938.         if (IS_G4X(dev))
  3939.                 enable_mask |= I915_BSD_USER_INTERRUPT;
  3940.  
  3941.         /* Interrupt setup is already guaranteed to be single-threaded, this is
  3942.          * just to make the assert_spin_locked check happy. */
  3943.         spin_lock_irq(&dev_priv->irq_lock);
  3944.         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
  3945.         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
  3946.         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
  3947.         spin_unlock_irq(&dev_priv->irq_lock);
  3948.  
  3949.         /*
  3950.          * Enable some error detection, note the instruction error mask
  3951.          * bit is reserved, so we leave it masked.
  3952.          */
  3953.         if (IS_G4X(dev)) {
  3954.                 error_mask = ~(GM45_ERROR_PAGE_TABLE |
  3955.                                GM45_ERROR_MEM_PRIV |
  3956.                                GM45_ERROR_CP_PRIV |
  3957.                                I915_ERROR_MEMORY_REFRESH);
  3958.         } else {
  3959.                 error_mask = ~(I915_ERROR_PAGE_TABLE |
  3960.                                I915_ERROR_MEMORY_REFRESH);
  3961.         }
  3962.         I915_WRITE(EMR, error_mask);
  3963.  
  3964.         I915_WRITE(IMR, dev_priv->irq_mask);
  3965.         I915_WRITE(IER, enable_mask);
  3966.         POSTING_READ(IER);
  3967.  
  3968.         I915_WRITE(PORT_HOTPLUG_EN, 0);
  3969.         POSTING_READ(PORT_HOTPLUG_EN);
  3970.  
  3971.         i915_enable_asle_pipestat(dev);
  3972.  
  3973.         return 0;
  3974. }
  3975.  
  3976. static void i915_hpd_irq_setup(struct drm_device *dev)
  3977. {
  3978.         struct drm_i915_private *dev_priv = dev->dev_private;
  3979.         struct intel_encoder *intel_encoder;
  3980.         u32 hotplug_en;
  3981.  
  3982.         assert_spin_locked(&dev_priv->irq_lock);
  3983.  
  3984.         if (I915_HAS_HOTPLUG(dev)) {
  3985.                 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
  3986.                 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
  3987.         /* Note HDMI and DP share hotplug bits */
  3988.                 /* enable bits are the same for all generations */
  3989.                 for_each_intel_encoder(dev, intel_encoder)
  3990.                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
  3991.                                 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
  3992.                 /* Programming the CRT detection parameters tends
  3993.                    to generate a spurious hotplug event about three
  3994.                    seconds later.  So just do it once.
  3995.                    */
  3996.                 if (IS_G4X(dev))
  3997.                         hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
  3998.                 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
  3999.                 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
  4000.  
  4001.         /* Ignore TV since it's buggy */
  4002.         I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
  4003.         }
  4004. }
  4005.  
  4006. static irqreturn_t i965_irq_handler(int irq, void *arg)
  4007. {
  4008.         struct drm_device *dev = arg;
  4009.         struct drm_i915_private *dev_priv = dev->dev_private;
  4010.         u32 iir, new_iir;
  4011.         u32 pipe_stats[I915_MAX_PIPES];
  4012.         int ret = IRQ_NONE, pipe;
  4013.         u32 flip_mask =
  4014.                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  4015.                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  4016.  
  4017.         iir = I915_READ(IIR);
  4018.  
  4019.         for (;;) {
  4020.                 bool irq_received = (iir & ~flip_mask) != 0;
  4021.                 bool blc_event = false;
  4022.  
  4023.                 /* Can't rely on pipestat interrupt bit in iir as it might
  4024.                  * have been cleared after the pipestat interrupt was received.
  4025.                  * It doesn't set the bit in iir again, but it still produces
  4026.                  * interrupts (for non-MSI).
  4027.                  */
  4028.                 spin_lock(&dev_priv->irq_lock);
  4029.                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  4030.                         DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
  4031.  
  4032.                 for_each_pipe(dev_priv, pipe) {
  4033.                         int reg = PIPESTAT(pipe);
  4034.                         pipe_stats[pipe] = I915_READ(reg);
  4035.  
  4036.                         /*
  4037.                          * Clear the PIPE*STAT regs before the IIR
  4038.                          */
  4039.                         if (pipe_stats[pipe] & 0x8000ffff) {
  4040.                                 I915_WRITE(reg, pipe_stats[pipe]);
  4041.                                 irq_received = true;
  4042.                         }
  4043.                 }
  4044.                 spin_unlock(&dev_priv->irq_lock);
  4045.  
  4046.                 if (!irq_received)
  4047.                         break;
  4048.  
  4049.                 ret = IRQ_HANDLED;
  4050.  
  4051.                 /* Consume port.  Then clear IIR or we'll miss events */
  4052.                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
  4053.                         i9xx_hpd_irq_handler(dev);
  4054.  
  4055.                 I915_WRITE(IIR, iir & ~flip_mask);
  4056.                 new_iir = I915_READ(IIR); /* Flush posted writes */
  4057.  
  4058.                 if (iir & I915_USER_INTERRUPT)
  4059.                         notify_ring(dev, &dev_priv->ring[RCS]);
  4060.                 if (iir & I915_BSD_USER_INTERRUPT)
  4061.                         notify_ring(dev, &dev_priv->ring[VCS]);
  4062.  
  4063.                 for_each_pipe(dev_priv, pipe) {
  4064.                         if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
  4065.                             i915_handle_vblank(dev, pipe, pipe, iir))
  4066.                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
  4067.  
  4068.                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
  4069.                                 blc_event = true;
  4070.  
  4071.                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
  4072.                                 i9xx_pipe_crc_irq_handler(dev, pipe);
  4073.  
  4074.                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  4075.                                 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
  4076.                 }
  4077.  
  4078.                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
  4079.                         intel_opregion_asle_intr(dev);
  4080.  
  4081.                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
  4082.                         gmbus_irq_handler(dev);
  4083.  
  4084.                 /* With MSI, interrupts are only generated when iir
  4085.                  * transitions from zero to nonzero.  If another bit got
  4086.                  * set while we were handling the existing iir bits, then
  4087.                  * we would never get another interrupt.
  4088.                  *
  4089.                  * This is fine on non-MSI as well, as if we hit this path
  4090.                  * we avoid exiting the interrupt handler only to generate
  4091.                  * another one.
  4092.                  *
  4093.                  * Note that for MSI this could cause a stray interrupt report
  4094.                  * if an interrupt landed in the time between writing IIR and
  4095.                  * the posting read.  This should be rare enough to never
  4096.                  * trigger the 99% of 100,000 interrupts test for disabling
  4097.                  * stray interrupts.
  4098.                  */
  4099.                 iir = new_iir;
  4100.         }
  4101.  
  4102.         return ret;
  4103. }
  4104.  
  4105. static void i965_irq_uninstall(struct drm_device * dev)
  4106. {
  4107.         struct drm_i915_private *dev_priv = dev->dev_private;
  4108.         int pipe;
  4109.  
  4110.         if (!dev_priv)
  4111.                 return;
  4112.  
  4113.         I915_WRITE(PORT_HOTPLUG_EN, 0);
  4114.         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  4115.  
  4116.         I915_WRITE(HWSTAM, 0xffffffff);
  4117.         for_each_pipe(dev_priv, pipe)
  4118.                 I915_WRITE(PIPESTAT(pipe), 0);
  4119.         I915_WRITE(IMR, 0xffffffff);
  4120.         I915_WRITE(IER, 0x0);
  4121.  
  4122.         for_each_pipe(dev_priv, pipe)
  4123.                 I915_WRITE(PIPESTAT(pipe),
  4124.                            I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
  4125.         I915_WRITE(IIR, I915_READ(IIR));
  4126. }
  4127.  
  4128. static void intel_hpd_irq_reenable_work(struct work_struct *work)
  4129. {
  4130.         struct drm_i915_private *dev_priv =
  4131.                 container_of(work, typeof(*dev_priv),
  4132.                              hotplug_reenable_work.work);
  4133.         struct drm_device *dev = dev_priv->dev;
  4134.         struct drm_mode_config *mode_config = &dev->mode_config;
  4135.         int i;
  4136.  
  4137.         intel_runtime_pm_get(dev_priv);
  4138.  
  4139.         spin_lock_irq(&dev_priv->irq_lock);
  4140.         for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
  4141.                 struct drm_connector *connector;
  4142.  
  4143.                 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
  4144.                         continue;
  4145.  
  4146.                 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
  4147.  
  4148.                 list_for_each_entry(connector, &mode_config->connector_list, head) {
  4149.                         struct intel_connector *intel_connector = to_intel_connector(connector);
  4150.  
  4151.                         if (intel_connector->encoder->hpd_pin == i) {
  4152.                                 if (connector->polled != intel_connector->polled)
  4153.                                         DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
  4154.                                                          connector->name);
  4155.                                 connector->polled = intel_connector->polled;
  4156.                                 if (!connector->polled)
  4157.                                         connector->polled = DRM_CONNECTOR_POLL_HPD;
  4158.                         }
  4159.                 }
  4160.         }
  4161.         if (dev_priv->display.hpd_irq_setup)
  4162.                 dev_priv->display.hpd_irq_setup(dev);
  4163.         spin_unlock_irq(&dev_priv->irq_lock);
  4164.  
  4165.         intel_runtime_pm_put(dev_priv);
  4166. }
  4167.  
  4168. /**
  4169.  * intel_irq_init - initializes irq support
  4170.  * @dev_priv: i915 device instance
  4171.  *
  4172.  * This function initializes all the irq support including work items, timers
  4173.  * and all the vtables. It does not setup the interrupt itself though.
  4174.  */
  4175. void intel_irq_init(struct drm_i915_private *dev_priv)
  4176. {
  4177.         struct drm_device *dev = dev_priv->dev;
  4178.  
  4179.         INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
  4180. //      INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
  4181.         INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
  4182.         INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
  4183.         INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
  4184.  
  4185.         /* Let's track the enabled rps events */
  4186.         if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
  4187.                 /* WaGsvRC0ResidencyMethod:vlv */
  4188.                 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
  4189.         else
  4190.         dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
  4191.  
  4192.         INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
  4193.                           intel_hpd_irq_reenable_work);
  4194.  
  4195.  
  4196.         if (IS_GEN2(dev_priv)) {
  4197.                 dev->max_vblank_count = 0;
  4198.                 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
  4199.         } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
  4200.                 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
  4201.                 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
  4202.         } else {
  4203.         dev->driver->get_vblank_counter = i915_get_vblank_counter;
  4204.         dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
  4205.         }
  4206.  
  4207.         /*
  4208.          * Opt out of the vblank disable timer on everything except gen2.
  4209.          * Gen2 doesn't have a hardware frame counter and so depends on
  4210.          * vblank interrupts to produce sane vblank seuquence numbers.
  4211.          */
  4212.         if (!IS_GEN2(dev_priv))
  4213.                 dev->vblank_disable_immediate = true;
  4214.  
  4215.         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  4216.                 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
  4217.         dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
  4218.         }
  4219.  
  4220.         if (IS_CHERRYVIEW(dev_priv)) {
  4221.                 dev->driver->irq_handler = cherryview_irq_handler;
  4222.                 dev->driver->irq_preinstall = cherryview_irq_preinstall;
  4223.                 dev->driver->irq_postinstall = cherryview_irq_postinstall;
  4224.                 dev->driver->irq_uninstall = cherryview_irq_uninstall;
  4225.                 dev->driver->enable_vblank = valleyview_enable_vblank;
  4226.                 dev->driver->disable_vblank = valleyview_disable_vblank;
  4227.                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  4228.         } else if (IS_VALLEYVIEW(dev_priv)) {
  4229.                 dev->driver->irq_handler = valleyview_irq_handler;
  4230.                 dev->driver->irq_preinstall = valleyview_irq_preinstall;
  4231.                 dev->driver->irq_postinstall = valleyview_irq_postinstall;
  4232.                 dev->driver->irq_uninstall = valleyview_irq_uninstall;
  4233.                 dev->driver->enable_vblank = valleyview_enable_vblank;
  4234.                 dev->driver->disable_vblank = valleyview_disable_vblank;
  4235.                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  4236.         } else if (INTEL_INFO(dev_priv)->gen >= 8) {
  4237.                 dev->driver->irq_handler = gen8_irq_handler;
  4238.                 dev->driver->irq_preinstall = gen8_irq_reset;
  4239.                 dev->driver->irq_postinstall = gen8_irq_postinstall;
  4240.                 dev->driver->irq_uninstall = gen8_irq_uninstall;
  4241.                 dev->driver->enable_vblank = gen8_enable_vblank;
  4242.                 dev->driver->disable_vblank = gen8_disable_vblank;
  4243.                 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
  4244.         } else if (HAS_PCH_SPLIT(dev)) {
  4245.                 dev->driver->irq_handler = ironlake_irq_handler;
  4246.                 dev->driver->irq_preinstall = ironlake_irq_reset;
  4247.                 dev->driver->irq_postinstall = ironlake_irq_postinstall;
  4248.                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
  4249.                 dev->driver->enable_vblank = ironlake_enable_vblank;
  4250.                 dev->driver->disable_vblank = ironlake_disable_vblank;
  4251.                 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
  4252.         } else {
  4253.                 if (INTEL_INFO(dev_priv)->gen == 2) {
  4254.                 } else if (INTEL_INFO(dev_priv)->gen == 3) {
  4255.                         dev->driver->irq_preinstall = i915_irq_preinstall;
  4256.                         dev->driver->irq_postinstall = i915_irq_postinstall;
  4257.                         dev->driver->irq_uninstall = i915_irq_uninstall;
  4258.                         dev->driver->irq_handler = i915_irq_handler;
  4259.                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  4260.                 } else {
  4261.                         dev->driver->irq_preinstall = i965_irq_preinstall;
  4262.                         dev->driver->irq_postinstall = i965_irq_postinstall;
  4263.                         dev->driver->irq_uninstall = i965_irq_uninstall;
  4264.                         dev->driver->irq_handler = i965_irq_handler;
  4265.                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  4266.                 }
  4267.                 dev->driver->enable_vblank = i915_enable_vblank;
  4268.                 dev->driver->disable_vblank = i915_disable_vblank;
  4269.         }
  4270. }
  4271.  
  4272. /**
  4273.  * intel_hpd_init - initializes and enables hpd support
  4274.  * @dev_priv: i915 device instance
  4275.  *
  4276.  * This function enables the hotplug support. It requires that interrupts have
  4277.  * already been enabled with intel_irq_init_hw(). From this point on hotplug and
  4278.  * poll request can run concurrently to other code, so locking rules must be
  4279.  * obeyed.
  4280.  *
  4281.  * This is a separate step from interrupt enabling to simplify the locking rules
  4282.  * in the driver load and resume code.
  4283.  */
  4284. void intel_hpd_init(struct drm_i915_private *dev_priv)
  4285. {
  4286.         struct drm_device *dev = dev_priv->dev;
  4287.         struct drm_mode_config *mode_config = &dev->mode_config;
  4288.         struct drm_connector *connector;
  4289.         int i;
  4290.  
  4291.         for (i = 1; i < HPD_NUM_PINS; i++) {
  4292.                 dev_priv->hpd_stats[i].hpd_cnt = 0;
  4293.                 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
  4294.         }
  4295.         list_for_each_entry(connector, &mode_config->connector_list, head) {
  4296.                 struct intel_connector *intel_connector = to_intel_connector(connector);
  4297.                 connector->polled = intel_connector->polled;
  4298.                 if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
  4299.                         connector->polled = DRM_CONNECTOR_POLL_HPD;
  4300.                 if (intel_connector->mst_port)
  4301.                         connector->polled = DRM_CONNECTOR_POLL_HPD;
  4302.         }
  4303.  
  4304.         /* Interrupt setup is already guaranteed to be single-threaded, this is
  4305.          * just to make the assert_spin_locked checks happy. */
  4306.         spin_lock_irq(&dev_priv->irq_lock);
  4307.         if (dev_priv->display.hpd_irq_setup)
  4308.                 dev_priv->display.hpd_irq_setup(dev);
  4309.         spin_unlock_irq(&dev_priv->irq_lock);
  4310. }
  4311.  
  4312. /**
  4313.  * intel_irq_install - enables the hardware interrupt
  4314.  * @dev_priv: i915 device instance
  4315.  *
  4316.  * This function enables the hardware interrupt handling, but leaves the hotplug
  4317.  * handling still disabled. It is called after intel_irq_init().
  4318.  *
  4319.  * In the driver load and resume code we need working interrupts in a few places
  4320.  * but don't want to deal with the hassle of concurrent probe and hotplug
  4321.  * workers. Hence the split into this two-stage approach.
  4322.  */
  4323. int intel_irq_install(struct drm_i915_private *dev_priv)
  4324. {
  4325.         /*
  4326.          * We enable some interrupt sources in our postinstall hooks, so mark
  4327.          * interrupts as enabled _before_ actually enabling them to avoid
  4328.          * special cases in our ordering checks.
  4329.          */
  4330.         dev_priv->pm.irqs_enabled = true;
  4331.  
  4332.         return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
  4333. }
  4334.  
  4335. /**
  4336.  * intel_irq_uninstall - finilizes all irq handling
  4337.  * @dev_priv: i915 device instance
  4338.  *
  4339.  * This stops interrupt and hotplug handling and unregisters and frees all
  4340.  * resources acquired in the init functions.
  4341.  */
  4342. void intel_irq_uninstall(struct drm_i915_private *dev_priv)
  4343. {
  4344. //      drm_irq_uninstall(dev_priv->dev);
  4345. //      intel_hpd_cancel_work(dev_priv);
  4346.         dev_priv->pm.irqs_enabled = false;
  4347. }
  4348.  
  4349. /**
  4350.  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
  4351.  * @dev_priv: i915 device instance
  4352.  *
  4353.  * This function is used to disable interrupts at runtime, both in the runtime
  4354.  * pm and the system suspend/resume code.
  4355.  */
  4356. void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
  4357. {
  4358.         dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
  4359.         dev_priv->pm.irqs_enabled = false;
  4360. }
  4361.  
  4362. /**
  4363.  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
  4364.  * @dev_priv: i915 device instance
  4365.  *
  4366.  * This function is used to enable interrupts at runtime, both in the runtime
  4367.  * pm and the system suspend/resume code.
  4368.  */
  4369. void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
  4370. {
  4371.         dev_priv->pm.irqs_enabled = true;
  4372.         dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
  4373.         dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
  4374. }
  4375.  
  4376. irqreturn_t intel_irq_handler(struct drm_device *dev)
  4377. {
  4378.  
  4379. //    printf("i915 irq\n");
  4380. //    printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
  4381.  
  4382.     return dev->driver->irq_handler(0, dev);
  4383. }
  4384.  
  4385.