Subversion Repositories Kolibri OS

Rev

Rev 4560 | Rev 5354 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
  2.  */
  3. /*
  4.  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  5.  * All Rights Reserved.
  6.  *
  7.  * Permission is hereby granted, free of charge, to any person obtaining a
  8.  * copy of this software and associated documentation files (the
  9.  * "Software"), to deal in the Software without restriction, including
  10.  * without limitation the rights to use, copy, modify, merge, publish,
  11.  * distribute, sub license, and/or sell copies of the Software, and to
  12.  * permit persons to whom the Software is furnished to do so, subject to
  13.  * the following conditions:
  14.  *
  15.  * The above copyright notice and this permission notice (including the
  16.  * next paragraph) shall be included in all copies or substantial portions
  17.  * of the Software.
  18.  *
  19.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20.  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21.  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22.  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23.  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24.  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25.  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26.  *
  27.  */
  28.  
  29. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  30.  
  31. #include <linux/slab.h>
  32. #include <drm/drmP.h>
  33. #include <drm/i915_drm.h>
  34. #include "i915_drv.h"
  35. #include "i915_trace.h"
  36. #include "intel_drv.h"
  37.  
  38. #define assert_spin_locked(a)
  39.  
  40. static const u32 hpd_ibx[] = {
  41.         [HPD_CRT] = SDE_CRT_HOTPLUG,
  42.         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
  43.         [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
  44.         [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
  45.         [HPD_PORT_D] = SDE_PORTD_HOTPLUG
  46. };
  47.  
  48. static const u32 hpd_cpt[] = {
  49.         [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
  50.         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
  51.         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
  52.         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
  53.         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
  54. };
  55.  
  56. static const u32 hpd_mask_i915[] = {
  57.         [HPD_CRT] = CRT_HOTPLUG_INT_EN,
  58.         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
  59.         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
  60.         [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
  61.         [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
  62.         [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
  63. };
  64.  
  65. static const u32 hpd_status_g4x[] = {
  66.         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
  67.         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
  68.         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
  69.         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
  70.         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
  71.         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
  72. };
  73.  
  74. static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
  75.         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
  76.         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
  77.         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
  78.         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
  79.         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
  80.         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
  81. };
  82.  
  83. /* IIR can theoretically queue up two events. Be paranoid. */
  84. #define GEN8_IRQ_RESET_NDX(type, which) do { \
  85.         I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
  86.         POSTING_READ(GEN8_##type##_IMR(which)); \
  87.         I915_WRITE(GEN8_##type##_IER(which), 0); \
  88.         I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
  89.         POSTING_READ(GEN8_##type##_IIR(which)); \
  90.         I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
  91.         POSTING_READ(GEN8_##type##_IIR(which)); \
  92. } while (0)
  93.  
  94. #define GEN5_IRQ_RESET(type) do { \
  95.         I915_WRITE(type##IMR, 0xffffffff); \
  96.         POSTING_READ(type##IMR); \
  97.         I915_WRITE(type##IER, 0); \
  98.         I915_WRITE(type##IIR, 0xffffffff); \
  99.         POSTING_READ(type##IIR); \
  100.         I915_WRITE(type##IIR, 0xffffffff); \
  101.         POSTING_READ(type##IIR); \
  102. } while (0)
  103.  
  104. /*
  105.  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
  106.  */
  107. #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
  108.         u32 val = I915_READ(reg); \
  109.         if (val) { \
  110.                 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
  111.                      (reg), val); \
  112.                 I915_WRITE((reg), 0xffffffff); \
  113.                 POSTING_READ(reg); \
  114.                 I915_WRITE((reg), 0xffffffff); \
  115.                 POSTING_READ(reg); \
  116.         } \
  117. } while (0)
  118.  
  119. #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
  120.         GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
  121.         I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
  122.         I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
  123.         POSTING_READ(GEN8_##type##_IER(which)); \
  124. } while (0)
  125.  
  126. #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
  127.         GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
  128.         I915_WRITE(type##IMR, (imr_val)); \
  129.         I915_WRITE(type##IER, (ier_val)); \
  130.         POSTING_READ(type##IER); \
  131. } while (0)
  132.  
  133. #define pr_err(fmt, ...) \
  134.         printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
  135.  
  136.  
  137. #define DRM_WAKEUP( queue ) wake_up( queue )
  138. #define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
  139.  
  140. #define MAX_NOPID ((u32)~0)
  141.  
  142.  
  143.  
  144. /* For display hotplug interrupt */
  145. static void
  146. ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
  147. {
  148.         assert_spin_locked(&dev_priv->irq_lock);
  149.  
  150.         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
  151.                 return;
  152.  
  153.     if ((dev_priv->irq_mask & mask) != 0) {
  154.         dev_priv->irq_mask &= ~mask;
  155.         I915_WRITE(DEIMR, dev_priv->irq_mask);
  156.         POSTING_READ(DEIMR);
  157.     }
  158. }
  159.  
  160. static void
  161. ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
  162. {
  163.         assert_spin_locked(&dev_priv->irq_lock);
  164.  
  165.         if (!intel_irqs_enabled(dev_priv))
  166.                 return;
  167.  
  168.     if ((dev_priv->irq_mask & mask) != mask) {
  169.         dev_priv->irq_mask |= mask;
  170.         I915_WRITE(DEIMR, dev_priv->irq_mask);
  171.         POSTING_READ(DEIMR);
  172.     }
  173. }
  174.  
  175. /**
  176.  * ilk_update_gt_irq - update GTIMR
  177.  * @dev_priv: driver private
  178.  * @interrupt_mask: mask of interrupt bits to update
  179.  * @enabled_irq_mask: mask of interrupt bits to enable
  180.  */
  181. static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
  182.                               uint32_t interrupt_mask,
  183.                               uint32_t enabled_irq_mask)
  184. {
  185.         assert_spin_locked(&dev_priv->irq_lock);
  186.  
  187.         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
  188.                 return;
  189.  
  190.         dev_priv->gt_irq_mask &= ~interrupt_mask;
  191.         dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
  192.         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  193.         POSTING_READ(GTIMR);
  194. }
  195.  
  196. void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
  197. {
  198.         ilk_update_gt_irq(dev_priv, mask, mask);
  199. }
  200.  
  201. void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
  202. {
  203.         ilk_update_gt_irq(dev_priv, mask, 0);
  204. }
  205.  
  206. /**
  207.   * snb_update_pm_irq - update GEN6_PMIMR
  208.   * @dev_priv: driver private
  209.   * @interrupt_mask: mask of interrupt bits to update
  210.   * @enabled_irq_mask: mask of interrupt bits to enable
  211.   */
  212. static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
  213.                               uint32_t interrupt_mask,
  214.                               uint32_t enabled_irq_mask)
  215. {
  216.         uint32_t new_val;
  217.  
  218.         assert_spin_locked(&dev_priv->irq_lock);
  219.  
  220.         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
  221.                 return;
  222.  
  223.         new_val = dev_priv->pm_irq_mask;
  224.         new_val &= ~interrupt_mask;
  225.         new_val |= (~enabled_irq_mask & interrupt_mask);
  226.  
  227.         if (new_val != dev_priv->pm_irq_mask) {
  228.                 dev_priv->pm_irq_mask = new_val;
  229.                 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
  230.                 POSTING_READ(GEN6_PMIMR);
  231.         }
  232. }
  233.  
  234. void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
  235. {
  236.         snb_update_pm_irq(dev_priv, mask, mask);
  237. }
  238.  
  239. void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
  240. {
  241.         snb_update_pm_irq(dev_priv, mask, 0);
  242. }
  243.  
  244. static bool ivb_can_enable_err_int(struct drm_device *dev)
  245. {
  246.         struct drm_i915_private *dev_priv = dev->dev_private;
  247.         struct intel_crtc *crtc;
  248.         enum pipe pipe;
  249.  
  250.         assert_spin_locked(&dev_priv->irq_lock);
  251.  
  252.         for_each_pipe(pipe) {
  253.                 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
  254.  
  255.                 if (crtc->cpu_fifo_underrun_disabled)
  256.                         return false;
  257.         }
  258.  
  259.         return true;
  260. }
  261.  
  262. /**
  263.   * bdw_update_pm_irq - update GT interrupt 2
  264.   * @dev_priv: driver private
  265.   * @interrupt_mask: mask of interrupt bits to update
  266.   * @enabled_irq_mask: mask of interrupt bits to enable
  267.   *
  268.   * Copied from the snb function, updated with relevant register offsets
  269.   */
  270. static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
  271.                               uint32_t interrupt_mask,
  272.                               uint32_t enabled_irq_mask)
  273. {
  274.         uint32_t new_val;
  275.  
  276.         assert_spin_locked(&dev_priv->irq_lock);
  277.  
  278.         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
  279.                 return;
  280.  
  281.         new_val = dev_priv->pm_irq_mask;
  282.         new_val &= ~interrupt_mask;
  283.         new_val |= (~enabled_irq_mask & interrupt_mask);
  284.  
  285.         if (new_val != dev_priv->pm_irq_mask) {
  286.                 dev_priv->pm_irq_mask = new_val;
  287.                 I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask);
  288.                 POSTING_READ(GEN8_GT_IMR(2));
  289.         }
  290. }
  291.  
  292. void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
  293. {
  294.         bdw_update_pm_irq(dev_priv, mask, mask);
  295. }
  296.  
  297. void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
  298. {
  299.         bdw_update_pm_irq(dev_priv, mask, 0);
  300. }
  301.  
  302. static bool cpt_can_enable_serr_int(struct drm_device *dev)
  303. {
  304.         struct drm_i915_private *dev_priv = dev->dev_private;
  305.         enum pipe pipe;
  306.         struct intel_crtc *crtc;
  307.  
  308.         assert_spin_locked(&dev_priv->irq_lock);
  309.  
  310.         for_each_pipe(pipe) {
  311.                 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
  312.  
  313.                 if (crtc->pch_fifo_underrun_disabled)
  314.                         return false;
  315.         }
  316.  
  317.         return true;
  318. }
  319.  
  320. void i9xx_check_fifo_underruns(struct drm_device *dev)
  321. {
  322.         struct drm_i915_private *dev_priv = dev->dev_private;
  323.         struct intel_crtc *crtc;
  324.         unsigned long flags;
  325.  
  326.         spin_lock_irqsave(&dev_priv->irq_lock, flags);
  327.  
  328.         for_each_intel_crtc(dev, crtc) {
  329.                 u32 reg = PIPESTAT(crtc->pipe);
  330.                 u32 pipestat;
  331.  
  332.                 if (crtc->cpu_fifo_underrun_disabled)
  333.                         continue;
  334.  
  335.                 pipestat = I915_READ(reg) & 0xffff0000;
  336.                 if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
  337.                         continue;
  338.  
  339.                 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
  340.                 POSTING_READ(reg);
  341.  
  342.                 DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
  343.         }
  344.  
  345.         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  346. }
  347.  
  348. static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
  349.                                              enum pipe pipe,
  350.                                              bool enable, bool old)
  351. {
  352.         struct drm_i915_private *dev_priv = dev->dev_private;
  353.         u32 reg = PIPESTAT(pipe);
  354.         u32 pipestat = I915_READ(reg) & 0xffff0000;
  355.  
  356.         assert_spin_locked(&dev_priv->irq_lock);
  357.  
  358.         if (enable) {
  359.         I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
  360.         POSTING_READ(reg);
  361.         } else {
  362.                 if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
  363.                         DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
  364.         }
  365. }
  366.  
  367. static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
  368.                                                  enum pipe pipe, bool enable)
  369. {
  370.         struct drm_i915_private *dev_priv = dev->dev_private;
  371.         uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
  372.                                           DE_PIPEB_FIFO_UNDERRUN;
  373.  
  374.         if (enable)
  375.                 ironlake_enable_display_irq(dev_priv, bit);
  376.         else
  377.                 ironlake_disable_display_irq(dev_priv, bit);
  378. }
  379.  
  380. static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
  381.                                                   enum pipe pipe,
  382.                                                   bool enable, bool old)
  383. {
  384.         struct drm_i915_private *dev_priv = dev->dev_private;
  385.         if (enable) {
  386.                 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
  387.  
  388.                 if (!ivb_can_enable_err_int(dev))
  389.                         return;
  390.  
  391.                 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
  392.         } else {
  393.                 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
  394.  
  395.                 if (old &&
  396.                     I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
  397.                         DRM_ERROR("uncleared fifo underrun on pipe %c\n",
  398.                                       pipe_name(pipe));
  399.                 }
  400.         }
  401. }
  402.  
  403. static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
  404.                                                   enum pipe pipe, bool enable)
  405. {
  406.         struct drm_i915_private *dev_priv = dev->dev_private;
  407.  
  408.         assert_spin_locked(&dev_priv->irq_lock);
  409.  
  410.         if (enable)
  411.                 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
  412.         else
  413.                 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
  414.         I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
  415.         POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
  416. }
  417.  
  418. /**
  419.  * ibx_display_interrupt_update - update SDEIMR
  420.  * @dev_priv: driver private
  421.  * @interrupt_mask: mask of interrupt bits to update
  422.  * @enabled_irq_mask: mask of interrupt bits to enable
  423.  */
  424. static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
  425.                                          uint32_t interrupt_mask,
  426.                                          uint32_t enabled_irq_mask)
  427. {
  428.         uint32_t sdeimr = I915_READ(SDEIMR);
  429.         sdeimr &= ~interrupt_mask;
  430.         sdeimr |= (~enabled_irq_mask & interrupt_mask);
  431.  
  432.         assert_spin_locked(&dev_priv->irq_lock);
  433.  
  434.         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
  435.                 return;
  436.  
  437.         I915_WRITE(SDEIMR, sdeimr);
  438.         POSTING_READ(SDEIMR);
  439. }
  440. #define ibx_enable_display_interrupt(dev_priv, bits) \
  441.         ibx_display_interrupt_update((dev_priv), (bits), (bits))
  442. #define ibx_disable_display_interrupt(dev_priv, bits) \
  443.         ibx_display_interrupt_update((dev_priv), (bits), 0)
  444.  
  445. static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
  446.                                             enum transcoder pch_transcoder,
  447.                                             bool enable)
  448. {
  449.         struct drm_i915_private *dev_priv = dev->dev_private;
  450.         uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
  451.                        SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
  452.  
  453.         if (enable)
  454.                 ibx_enable_display_interrupt(dev_priv, bit);
  455.         else
  456.                 ibx_disable_display_interrupt(dev_priv, bit);
  457. }
  458.  
  459. static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
  460.                                             enum transcoder pch_transcoder,
  461.                                             bool enable, bool old)
  462. {
  463.         struct drm_i915_private *dev_priv = dev->dev_private;
  464.  
  465.         if (enable) {
  466.                 I915_WRITE(SERR_INT,
  467.                            SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
  468.  
  469.                 if (!cpt_can_enable_serr_int(dev))
  470.                         return;
  471.  
  472.                 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
  473.         } else {
  474.                 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
  475.  
  476.                 if (old && I915_READ(SERR_INT) &
  477.                     SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
  478.                         DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
  479.                                       transcoder_name(pch_transcoder));
  480.                 }
  481.         }
  482. }
  483.  
  484. /**
  485.  * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
  486.  * @dev: drm device
  487.  * @pipe: pipe
  488.  * @enable: true if we want to report FIFO underrun errors, false otherwise
  489.  *
  490.  * This function makes us disable or enable CPU fifo underruns for a specific
  491.  * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
  492.  * reporting for one pipe may also disable all the other CPU error interruts for
  493.  * the other pipes, due to the fact that there's just one interrupt mask/enable
  494.  * bit for all the pipes.
  495.  *
  496.  * Returns the previous state of underrun reporting.
  497.  */
  498. static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
  499.                                            enum pipe pipe, bool enable)
  500. {
  501.         struct drm_i915_private *dev_priv = dev->dev_private;
  502.         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  503.         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  504.         bool old;
  505.  
  506.         assert_spin_locked(&dev_priv->irq_lock);
  507.  
  508.         old = !intel_crtc->cpu_fifo_underrun_disabled;
  509.         intel_crtc->cpu_fifo_underrun_disabled = !enable;
  510.  
  511.         if (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))
  512.                 i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
  513.         else if (IS_GEN5(dev) || IS_GEN6(dev))
  514.                 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
  515.         else if (IS_GEN7(dev))
  516.                 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
  517.         else if (IS_GEN8(dev))
  518.                 broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
  519.  
  520.         return old;
  521. }
  522.  
  523. bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
  524.                                            enum pipe pipe, bool enable)
  525. {
  526.         struct drm_i915_private *dev_priv = dev->dev_private;
  527.         unsigned long flags;
  528.         bool ret;
  529.  
  530.         spin_lock_irqsave(&dev_priv->irq_lock, flags);
  531.         ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable);
  532.         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  533.  
  534.         return ret;
  535. }
  536.  
  537. static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev,
  538.                                                   enum pipe pipe)
  539. {
  540.         struct drm_i915_private *dev_priv = dev->dev_private;
  541.         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  542.         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  543.  
  544.         return !intel_crtc->cpu_fifo_underrun_disabled;
  545. }
  546.  
  547. /**
  548.  * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
  549.  * @dev: drm device
  550.  * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
  551.  * @enable: true if we want to report FIFO underrun errors, false otherwise
  552.  *
  553.  * This function makes us disable or enable PCH fifo underruns for a specific
  554.  * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
  555.  * underrun reporting for one transcoder may also disable all the other PCH
  556.  * error interruts for the other transcoders, due to the fact that there's just
  557.  * one interrupt mask/enable bit for all the transcoders.
  558.  *
  559.  * Returns the previous state of underrun reporting.
  560.  */
  561. bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
  562.                                            enum transcoder pch_transcoder,
  563.                                            bool enable)
  564. {
  565.         struct drm_i915_private *dev_priv = dev->dev_private;
  566.         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
  567.         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  568.         unsigned long flags;
  569.         bool old;
  570.  
  571.         /*
  572.          * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
  573.          * has only one pch transcoder A that all pipes can use. To avoid racy
  574.          * pch transcoder -> pipe lookups from interrupt code simply store the
  575.          * underrun statistics in crtc A. Since we never expose this anywhere
  576.          * nor use it outside of the fifo underrun code here using the "wrong"
  577.          * crtc on LPT won't cause issues.
  578.          */
  579.  
  580.         spin_lock_irqsave(&dev_priv->irq_lock, flags);
  581.  
  582.         old = !intel_crtc->pch_fifo_underrun_disabled;
  583.         intel_crtc->pch_fifo_underrun_disabled = !enable;
  584.  
  585.         if (HAS_PCH_IBX(dev))
  586.                 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
  587.         else
  588.                 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable, old);
  589.  
  590.         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  591.         return old;
  592. }
  593.  
  594.  
  595. static void
  596. __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
  597.                        u32 enable_mask, u32 status_mask)
  598. {
  599.                 u32 reg = PIPESTAT(pipe);
  600.         u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
  601.  
  602.         assert_spin_locked(&dev_priv->irq_lock);
  603.  
  604.         if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
  605.                       status_mask & ~PIPESTAT_INT_STATUS_MASK,
  606.                       "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
  607.                       pipe_name(pipe), enable_mask, status_mask))
  608.                 return;
  609.  
  610.         if ((pipestat & enable_mask) == enable_mask)
  611.                 return;
  612.  
  613.         dev_priv->pipestat_irq_mask[pipe] |= status_mask;
  614.  
  615.                 /* Enable the interrupt, clear any pending status */
  616.         pipestat |= enable_mask | status_mask;
  617.         I915_WRITE(reg, pipestat);
  618.                 POSTING_READ(reg);
  619. }
  620.  
  621. static void
  622. __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
  623.                         u32 enable_mask, u32 status_mask)
  624. {
  625.                 u32 reg = PIPESTAT(pipe);
  626.         u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
  627.  
  628.         assert_spin_locked(&dev_priv->irq_lock);
  629.  
  630.         if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
  631.                       status_mask & ~PIPESTAT_INT_STATUS_MASK,
  632.                       "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
  633.                       pipe_name(pipe), enable_mask, status_mask))
  634.                 return;
  635.  
  636.         if ((pipestat & enable_mask) == 0)
  637.                 return;
  638.  
  639.         dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
  640.  
  641.         pipestat &= ~enable_mask;
  642.         I915_WRITE(reg, pipestat);
  643.                 POSTING_READ(reg);
  644. }
  645.  
  646. static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
  647. {
  648.         u32 enable_mask = status_mask << 16;
  649.  
  650.         /*
  651.          * On pipe A we don't support the PSR interrupt yet,
  652.          * on pipe B and C the same bit MBZ.
  653.          */
  654.         if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
  655.                 return 0;
  656.         /*
  657.          * On pipe B and C we don't support the PSR interrupt yet, on pipe
  658.          * A the same bit is for perf counters which we don't use either.
  659.          */
  660.         if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
  661.                 return 0;
  662.  
  663.         enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
  664.                          SPRITE0_FLIP_DONE_INT_EN_VLV |
  665.                          SPRITE1_FLIP_DONE_INT_EN_VLV);
  666.         if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
  667.                 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
  668.         if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
  669.                 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
  670.  
  671.         return enable_mask;
  672. }
  673.  
  674. void
  675. i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
  676.                      u32 status_mask)
  677. {
  678.         u32 enable_mask;
  679.  
  680.         if (IS_VALLEYVIEW(dev_priv->dev))
  681.                 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
  682.                                                            status_mask);
  683.         else
  684.                 enable_mask = status_mask << 16;
  685.         __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
  686. }
  687.  
  688. void
  689. i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
  690.                       u32 status_mask)
  691. {
  692.         u32 enable_mask;
  693.  
  694.         if (IS_VALLEYVIEW(dev_priv->dev))
  695.                 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
  696.                                                            status_mask);
  697.         else
  698.                 enable_mask = status_mask << 16;
  699.         __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
  700. }
  701.  
  702. /**
  703.  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
  704.  */
  705. static void i915_enable_asle_pipestat(struct drm_device *dev)
  706. {
  707.         struct drm_i915_private *dev_priv = dev->dev_private;
  708.         unsigned long irqflags;
  709.  
  710.         if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
  711.                 return;
  712.  
  713.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  714.  
  715.         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
  716.                 if (INTEL_INFO(dev)->gen >= 4)
  717.                 i915_enable_pipestat(dev_priv, PIPE_A,
  718.                                      PIPE_LEGACY_BLC_EVENT_STATUS);
  719.  
  720.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  721. }
  722.  
  723. /**
  724.  * i915_pipe_enabled - check if a pipe is enabled
  725.  * @dev: DRM device
  726.  * @pipe: pipe to check
  727.  *
  728.  * Reading certain registers when the pipe is disabled can hang the chip.
  729.  * Use this routine to make sure the PLL is running and the pipe is active
  730.  * before reading such registers if unsure.
  731.  */
  732. static int
  733. i915_pipe_enabled(struct drm_device *dev, int pipe)
  734. {
  735.         struct drm_i915_private *dev_priv = dev->dev_private;
  736.  
  737.         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  738.                 /* Locking is horribly broken here, but whatever. */
  739.                 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  740.                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  741.  
  742.                 return intel_crtc->active;
  743.         } else {
  744.                 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
  745.         }
  746. }
  747.  
  748. /*
  749.  * This timing diagram depicts the video signal in and
  750.  * around the vertical blanking period.
  751.  *
  752.  * Assumptions about the fictitious mode used in this example:
  753.  *  vblank_start >= 3
  754.  *  vsync_start = vblank_start + 1
  755.  *  vsync_end = vblank_start + 2
  756.  *  vtotal = vblank_start + 3
  757.  *
  758.  *           start of vblank:
  759.  *           latch double buffered registers
  760.  *           increment frame counter (ctg+)
  761.  *           generate start of vblank interrupt (gen4+)
  762.  *           |
  763.  *           |          frame start:
  764.  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
  765.  *           |          may be shifted forward 1-3 extra lines via PIPECONF
  766.  *           |          |
  767.  *           |          |  start of vsync:
  768.  *           |          |  generate vsync interrupt
  769.  *           |          |  |
  770.  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
  771.  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
  772.  * ----va---> <-----------------vb--------------------> <--------va-------------
  773.  *       |          |       <----vs----->                     |
  774.  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
  775.  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
  776.  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
  777.  *       |          |                                         |
  778.  *       last visible pixel                                   first visible pixel
  779.  *                  |                                         increment frame counter (gen3/4)
  780.  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
  781.  *
  782.  * x  = horizontal active
  783.  * _  = horizontal blanking
  784.  * hs = horizontal sync
  785.  * va = vertical active
  786.  * vb = vertical blanking
  787.  * vs = vertical sync
  788.  * vbs = vblank_start (number)
  789.  *
  790.  * Summary:
  791.  * - most events happen at the start of horizontal sync
  792.  * - frame start happens at the start of horizontal blank, 1-4 lines
  793.  *   (depending on PIPECONF settings) after the start of vblank
  794.  * - gen3/4 pixel and frame counter are synchronized with the start
  795.  *   of horizontal active on the first line of vertical active
  796.  */
  797.  
  798. static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
  799. {
  800.         /* Gen2 doesn't have a hardware frame counter */
  801.         return 0;
  802. }
  803.  
  804. /* Called from drm generic code, passed a 'crtc', which
  805.  * we use as a pipe index
  806.  */
  807. static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
  808. {
  809.         struct drm_i915_private *dev_priv = dev->dev_private;
  810.         unsigned long high_frame;
  811.         unsigned long low_frame;
  812.         u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
  813.  
  814.         if (!i915_pipe_enabled(dev, pipe)) {
  815.                 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
  816.                                 "pipe %c\n", pipe_name(pipe));
  817.                 return 0;
  818.         }
  819.  
  820.         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  821.                 struct intel_crtc *intel_crtc =
  822.                         to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
  823.                 const struct drm_display_mode *mode =
  824.                         &intel_crtc->config.adjusted_mode;
  825.  
  826.                 htotal = mode->crtc_htotal;
  827.                 hsync_start = mode->crtc_hsync_start;
  828.                 vbl_start = mode->crtc_vblank_start;
  829.                 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
  830.                         vbl_start = DIV_ROUND_UP(vbl_start, 2);
  831.         } else {
  832.                 enum transcoder cpu_transcoder = (enum transcoder) pipe;
  833.  
  834.                 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
  835.                 hsync_start = (I915_READ(HSYNC(cpu_transcoder))  & 0x1fff) + 1;
  836.                 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
  837.                 if ((I915_READ(PIPECONF(cpu_transcoder)) &
  838.                      PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
  839.                         vbl_start = DIV_ROUND_UP(vbl_start, 2);
  840.         }
  841.  
  842.         /* Convert to pixel count */
  843.                 vbl_start *= htotal;
  844.  
  845.         /* Start of vblank event occurs at start of hsync */
  846.         vbl_start -= htotal - hsync_start;
  847.  
  848.         high_frame = PIPEFRAME(pipe);
  849.         low_frame = PIPEFRAMEPIXEL(pipe);
  850.  
  851.         /*
  852.          * High & low register fields aren't synchronized, so make sure
  853.          * we get a low value that's stable across two reads of the high
  854.          * register.
  855.          */
  856.         do {
  857.                 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
  858.                 low   = I915_READ(low_frame);
  859.                 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
  860.         } while (high1 != high2);
  861.  
  862.         high1 >>= PIPE_FRAME_HIGH_SHIFT;
  863.         pixel = low & PIPE_PIXEL_MASK;
  864.         low >>= PIPE_FRAME_LOW_SHIFT;
  865.  
  866.         /*
  867.          * The frame counter increments at beginning of active.
  868.          * Cook up a vblank counter by also checking the pixel
  869.          * counter against vblank start.
  870.          */
  871.         return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
  872. }
  873.  
  874. static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
  875. {
  876.         struct drm_i915_private *dev_priv = dev->dev_private;
  877.         int reg = PIPE_FRMCOUNT_GM45(pipe);
  878.  
  879.         if (!i915_pipe_enabled(dev, pipe)) {
  880.                 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
  881.                                  "pipe %c\n", pipe_name(pipe));
  882.                 return 0;
  883.         }
  884.  
  885.         return I915_READ(reg);
  886. }
  887.  
  888. /* raw reads, only for fast reads of display block, no need for forcewake etc. */
  889. #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
  890.  
  891. static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
  892. {
  893.         struct drm_device *dev = crtc->base.dev;
  894.         struct drm_i915_private *dev_priv = dev->dev_private;
  895.         const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
  896.         enum pipe pipe = crtc->pipe;
  897.         int position, vtotal;
  898.  
  899.         vtotal = mode->crtc_vtotal;
  900.         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
  901.                 vtotal /= 2;
  902.  
  903.         if (IS_GEN2(dev))
  904.                 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
  905.         else
  906.                 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
  907.  
  908.         /*
  909.          * See update_scanline_offset() for the details on the
  910.          * scanline_offset adjustment.
  911.          */
  912.         return (position + crtc->scanline_offset) % vtotal;
  913. }
  914.  
  915. static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
  916.                                     unsigned int flags, int *vpos, int *hpos,
  917.                                     void *stime, void *etime)
  918. {
  919.         struct drm_i915_private *dev_priv = dev->dev_private;
  920.         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  921.         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  922.         const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
  923.         int position;
  924.         int vbl_start, vbl_end, hsync_start, htotal, vtotal;
  925.         bool in_vbl = true;
  926.         int ret = 0;
  927.         unsigned long irqflags;
  928.  
  929.         if (!intel_crtc->active) {
  930.                 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
  931.                                  "pipe %c\n", pipe_name(pipe));
  932.                 return 0;
  933.         }
  934.  
  935.         htotal = mode->crtc_htotal;
  936.         hsync_start = mode->crtc_hsync_start;
  937.         vtotal = mode->crtc_vtotal;
  938.         vbl_start = mode->crtc_vblank_start;
  939.         vbl_end = mode->crtc_vblank_end;
  940.  
  941.         if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
  942.                 vbl_start = DIV_ROUND_UP(vbl_start, 2);
  943.                 vbl_end /= 2;
  944.                 vtotal /= 2;
  945.         }
  946.  
  947.         ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
  948.  
  949.         /*
  950.          * Lock uncore.lock, as we will do multiple timing critical raw
  951.          * register reads, potentially with preemption disabled, so the
  952.          * following code must not block on uncore.lock.
  953.          */
  954.         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  955.  
  956.         /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
  957.  
  958.  
  959.         if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
  960.                 /* No obvious pixelcount register. Only query vertical
  961.                  * scanout position from Display scan line register.
  962.                  */
  963.                 position = __intel_get_crtc_scanline(intel_crtc);
  964.         } else {
  965.                 /* Have access to pixelcount since start of frame.
  966.                  * We can split this into vertical and horizontal
  967.                  * scanout position.
  968.                  */
  969.                 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
  970.  
  971.                 /* convert to pixel counts */
  972.                 vbl_start *= htotal;
  973.                 vbl_end *= htotal;
  974.                 vtotal *= htotal;
  975.  
  976.                 /*
  977.                  * In interlaced modes, the pixel counter counts all pixels,
  978.                  * so one field will have htotal more pixels. In order to avoid
  979.                  * the reported position from jumping backwards when the pixel
  980.                  * counter is beyond the length of the shorter field, just
  981.                  * clamp the position the length of the shorter field. This
  982.                  * matches how the scanline counter based position works since
  983.                  * the scanline counter doesn't count the two half lines.
  984.                  */
  985.                 if (position >= vtotal)
  986.                         position = vtotal - 1;
  987.  
  988.                 /*
  989.                  * Start of vblank interrupt is triggered at start of hsync,
  990.                  * just prior to the first active line of vblank. However we
  991.                  * consider lines to start at the leading edge of horizontal
  992.                  * active. So, should we get here before we've crossed into
  993.                  * the horizontal active of the first line in vblank, we would
  994.                  * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
  995.                  * always add htotal-hsync_start to the current pixel position.
  996.                  */
  997.                 position = (position + htotal - hsync_start) % vtotal;
  998.         }
  999.  
  1000.  
  1001.         /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
  1002.  
  1003.         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  1004.  
  1005.         in_vbl = position >= vbl_start && position < vbl_end;
  1006.  
  1007.         /*
  1008.          * While in vblank, position will be negative
  1009.          * counting up towards 0 at vbl_end. And outside
  1010.          * vblank, position will be positive counting
  1011.          * up since vbl_end.
  1012.          */
  1013.         if (position >= vbl_start)
  1014.                 position -= vbl_end;
  1015.         else
  1016.                 position += vtotal - vbl_end;
  1017.  
  1018.         if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
  1019.                 *vpos = position;
  1020.                 *hpos = 0;
  1021.         } else {
  1022.                 *vpos = position / htotal;
  1023.                 *hpos = position - (*vpos * htotal);
  1024.         }
  1025.  
  1026.         /* In vblank? */
  1027.         if (in_vbl)
  1028.                 ret |= DRM_SCANOUTPOS_INVBL;
  1029.  
  1030.         return ret;
  1031. }
  1032.  
  1033. int intel_get_crtc_scanline(struct intel_crtc *crtc)
  1034. {
  1035.         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  1036.         unsigned long irqflags;
  1037.         int position;
  1038.  
  1039.         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  1040.         position = __intel_get_crtc_scanline(crtc);
  1041.         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  1042.  
  1043.         return position;
  1044. }
  1045.  
  1046. static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
  1047.                               int *max_error,
  1048.                               struct timeval *vblank_time,
  1049.                               unsigned flags)
  1050. {
  1051.         struct drm_crtc *crtc;
  1052.  
  1053.         if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
  1054.                 DRM_ERROR("Invalid crtc %d\n", pipe);
  1055.                 return -EINVAL;
  1056.         }
  1057.  
  1058.         /* Get drm_crtc to timestamp: */
  1059.         crtc = intel_get_crtc_for_pipe(dev, pipe);
  1060.         if (crtc == NULL) {
  1061.                 DRM_ERROR("Invalid crtc %d\n", pipe);
  1062.                 return -EINVAL;
  1063.         }
  1064.  
  1065.         if (!crtc->enabled) {
  1066.                 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
  1067.                 return -EBUSY;
  1068.         }
  1069.  
  1070.         /* Helper routine in DRM core does all the work: */
  1071.         return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
  1072.                                                      vblank_time, flags,
  1073.                                                      crtc,
  1074.                                                      &to_intel_crtc(crtc)->config.adjusted_mode);
  1075. }
  1076.  
  1077. static bool intel_hpd_irq_event(struct drm_device *dev,
  1078.                                 struct drm_connector *connector)
  1079. {
  1080.         enum drm_connector_status old_status;
  1081.  
  1082.         WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
  1083.         old_status = connector->status;
  1084.  
  1085.         connector->status = connector->funcs->detect(connector, false);
  1086.         if (old_status == connector->status)
  1087.                 return false;
  1088.  
  1089.         DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
  1090.                       connector->base.id,
  1091.                       connector->name,
  1092.                       drm_get_connector_status_name(old_status),
  1093.                       drm_get_connector_status_name(connector->status));
  1094.  
  1095.         return true;
  1096. }
  1097.  
  1098. /*
  1099.  * Handle hotplug events outside the interrupt handler proper.
  1100.  */
  1101. #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
  1102.  
  1103. static void i915_hotplug_work_func(struct work_struct *work)
  1104. {
  1105.         struct drm_i915_private *dev_priv =
  1106.                 container_of(work, struct drm_i915_private, hotplug_work);
  1107.         struct drm_device *dev = dev_priv->dev;
  1108.         struct drm_mode_config *mode_config = &dev->mode_config;
  1109.         struct intel_connector *intel_connector;
  1110.         struct intel_encoder *intel_encoder;
  1111.         struct drm_connector *connector;
  1112.         unsigned long irqflags;
  1113.         bool hpd_disabled = false;
  1114.         bool changed = false;
  1115.         u32 hpd_event_bits;
  1116.  
  1117.         mutex_lock(&mode_config->mutex);
  1118.         DRM_DEBUG_KMS("running encoder hotplug functions\n");
  1119.  
  1120.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1121.  
  1122.         hpd_event_bits = dev_priv->hpd_event_bits;
  1123.         dev_priv->hpd_event_bits = 0;
  1124.         list_for_each_entry(connector, &mode_config->connector_list, head) {
  1125.                 intel_connector = to_intel_connector(connector);
  1126.                 if (!intel_connector->encoder)
  1127.                         continue;
  1128.                 intel_encoder = intel_connector->encoder;
  1129.                 if (intel_encoder->hpd_pin > HPD_NONE &&
  1130.                     dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
  1131.                     connector->polled == DRM_CONNECTOR_POLL_HPD) {
  1132.                         DRM_INFO("HPD interrupt storm detected on connector %s: "
  1133.                                  "switching from hotplug detection to polling\n",
  1134.                                 connector->name);
  1135.                         dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
  1136.                         connector->polled = DRM_CONNECTOR_POLL_CONNECT
  1137.                                 | DRM_CONNECTOR_POLL_DISCONNECT;
  1138.                         hpd_disabled = true;
  1139.                 }
  1140.                 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
  1141.                         DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
  1142.                                       connector->name, intel_encoder->hpd_pin);
  1143.                 }
  1144.         }
  1145.          /* if there were no outputs to poll, poll was disabled,
  1146.           * therefore make sure it's enabled when disabling HPD on
  1147.           * some connectors */
  1148.  
  1149.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1150.  
  1151.         list_for_each_entry(connector, &mode_config->connector_list, head) {
  1152.                 intel_connector = to_intel_connector(connector);
  1153.                 if (!intel_connector->encoder)
  1154.                         continue;
  1155.                 intel_encoder = intel_connector->encoder;
  1156.                 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
  1157.                 if (intel_encoder->hot_plug)
  1158.                         intel_encoder->hot_plug(intel_encoder);
  1159.                         if (intel_hpd_irq_event(dev, connector))
  1160.                                 changed = true;
  1161.                 }
  1162.         }
  1163.         mutex_unlock(&mode_config->mutex);
  1164.  
  1165. }
  1166.  
  1167. static void ironlake_rps_change_irq_handler(struct drm_device *dev)
  1168. {
  1169.         struct drm_i915_private *dev_priv = dev->dev_private;
  1170.         u32 busy_up, busy_down, max_avg, min_avg;
  1171.         u8 new_delay;
  1172.  
  1173.         spin_lock(&mchdev_lock);
  1174.  
  1175.         I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
  1176.  
  1177.         new_delay = dev_priv->ips.cur_delay;
  1178.  
  1179.         I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
  1180.         busy_up = I915_READ(RCPREVBSYTUPAVG);
  1181.         busy_down = I915_READ(RCPREVBSYTDNAVG);
  1182.         max_avg = I915_READ(RCBMAXAVG);
  1183.         min_avg = I915_READ(RCBMINAVG);
  1184.  
  1185.         /* Handle RCS change request from hw */
  1186.         if (busy_up > max_avg) {
  1187.                 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
  1188.                         new_delay = dev_priv->ips.cur_delay - 1;
  1189.                 if (new_delay < dev_priv->ips.max_delay)
  1190.                         new_delay = dev_priv->ips.max_delay;
  1191.         } else if (busy_down < min_avg) {
  1192.                 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
  1193.                         new_delay = dev_priv->ips.cur_delay + 1;
  1194.                 if (new_delay > dev_priv->ips.min_delay)
  1195.                         new_delay = dev_priv->ips.min_delay;
  1196.         }
  1197.  
  1198.         if (ironlake_set_drps(dev, new_delay))
  1199.                 dev_priv->ips.cur_delay = new_delay;
  1200.  
  1201.         spin_unlock(&mchdev_lock);
  1202.  
  1203.         return;
  1204. }
  1205.  
  1206. static void notify_ring(struct drm_device *dev,
  1207.                         struct intel_engine_cs *ring)
  1208. {
  1209.         if (!intel_ring_initialized(ring))
  1210.                 return;
  1211.  
  1212.         trace_i915_gem_request_complete(ring);
  1213.  
  1214.         wake_up_all(&ring->irq_queue);
  1215. }
  1216.  
  1217. static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
  1218.                             struct intel_rps_ei *rps_ei)
  1219. {
  1220.         u32 cz_ts, cz_freq_khz;
  1221.         u32 render_count, media_count;
  1222.         u32 elapsed_render, elapsed_media, elapsed_time;
  1223.         u32 residency = 0;
  1224.  
  1225.         cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
  1226.         cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
  1227.  
  1228.         render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
  1229.         media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
  1230.  
  1231.         if (rps_ei->cz_clock == 0) {
  1232.                 rps_ei->cz_clock = cz_ts;
  1233.                 rps_ei->render_c0 = render_count;
  1234.                 rps_ei->media_c0 = media_count;
  1235.  
  1236.                 return dev_priv->rps.cur_freq;
  1237.         }
  1238.  
  1239.         elapsed_time = cz_ts - rps_ei->cz_clock;
  1240.         rps_ei->cz_clock = cz_ts;
  1241.  
  1242.         elapsed_render = render_count - rps_ei->render_c0;
  1243.         rps_ei->render_c0 = render_count;
  1244.  
  1245.         elapsed_media = media_count - rps_ei->media_c0;
  1246.         rps_ei->media_c0 = media_count;
  1247.  
  1248.         /* Convert all the counters into common unit of milli sec */
  1249.         elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
  1250.         elapsed_render /=  cz_freq_khz;
  1251.         elapsed_media /= cz_freq_khz;
  1252.  
  1253.         /*
  1254.          * Calculate overall C0 residency percentage
  1255.          * only if elapsed time is non zero
  1256.          */
  1257.         if (elapsed_time) {
  1258.                 residency =
  1259.                         ((max(elapsed_render, elapsed_media) * 100)
  1260.                                 / elapsed_time);
  1261.         }
  1262.  
  1263.         return residency;
  1264. }
  1265.  
  1266. /**
  1267.  * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
  1268.  * busy-ness calculated from C0 counters of render & media power wells
  1269.  * @dev_priv: DRM device private
  1270.  *
  1271.  */
  1272. static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
  1273. {
  1274.         u32 residency_C0_up = 0, residency_C0_down = 0;
  1275.         u8 new_delay, adj;
  1276.  
  1277.         dev_priv->rps.ei_interrupt_count++;
  1278.  
  1279.         WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
  1280.  
  1281.  
  1282.         if (dev_priv->rps.up_ei.cz_clock == 0) {
  1283.                 vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
  1284.                 vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
  1285.                 return dev_priv->rps.cur_freq;
  1286.         }
  1287.  
  1288.  
  1289.         /*
  1290.          * To down throttle, C0 residency should be less than down threshold
  1291.          * for continous EI intervals. So calculate down EI counters
  1292.          * once in VLV_INT_COUNT_FOR_DOWN_EI
  1293.          */
  1294.         if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
  1295.  
  1296.                 dev_priv->rps.ei_interrupt_count = 0;
  1297.  
  1298.                 residency_C0_down = vlv_c0_residency(dev_priv,
  1299.                                                      &dev_priv->rps.down_ei);
  1300.         } else {
  1301.                 residency_C0_up = vlv_c0_residency(dev_priv,
  1302.                                                    &dev_priv->rps.up_ei);
  1303.         }
  1304.  
  1305.         new_delay = dev_priv->rps.cur_freq;
  1306.  
  1307.         adj = dev_priv->rps.last_adj;
  1308.         /* C0 residency is greater than UP threshold. Increase Frequency */
  1309.         if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
  1310.                 if (adj > 0)
  1311.                         adj *= 2;
  1312.                 else
  1313.                         adj = 1;
  1314.  
  1315.                 if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
  1316.                         new_delay = dev_priv->rps.cur_freq + adj;
  1317.  
  1318.                 /*
  1319.                  * For better performance, jump directly
  1320.                  * to RPe if we're below it.
  1321.                  */
  1322.                 if (new_delay < dev_priv->rps.efficient_freq)
  1323.                         new_delay = dev_priv->rps.efficient_freq;
  1324.  
  1325.         } else if (!dev_priv->rps.ei_interrupt_count &&
  1326.                         (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
  1327.                 if (adj < 0)
  1328.                         adj *= 2;
  1329.                 else
  1330.                         adj = -1;
  1331.                 /*
  1332.                  * This means, C0 residency is less than down threshold over
  1333.                  * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
  1334.                  */
  1335.                 if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
  1336.                         new_delay = dev_priv->rps.cur_freq + adj;
  1337.         }
  1338.  
  1339.         return new_delay;
  1340. }
  1341.  
  1342. static void gen6_pm_rps_work(struct work_struct *work)
  1343. {
  1344.         struct drm_i915_private *dev_priv =
  1345.                 container_of(work, struct drm_i915_private, rps.work);
  1346.         u32 pm_iir;
  1347.         int new_delay, adj;
  1348.  
  1349.         spin_lock_irq(&dev_priv->irq_lock);
  1350.         pm_iir = dev_priv->rps.pm_iir;
  1351.         dev_priv->rps.pm_iir = 0;
  1352.         if (INTEL_INFO(dev_priv->dev)->gen >= 8)
  1353.                 gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
  1354.         else {
  1355.                 /* Make sure not to corrupt PMIMR state used by ringbuffer */
  1356.                 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
  1357.         }
  1358.         spin_unlock_irq(&dev_priv->irq_lock);
  1359.  
  1360.         /* Make sure we didn't queue anything we're not going to process. */
  1361.         WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
  1362.  
  1363.         if ((pm_iir & dev_priv->pm_rps_events) == 0)
  1364.                 return;
  1365.  
  1366.         mutex_lock(&dev_priv->rps.hw_lock);
  1367.  
  1368.         adj = dev_priv->rps.last_adj;
  1369.         if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
  1370.                 if (adj > 0)
  1371.                         adj *= 2;
  1372.                 else {
  1373.                         /* CHV needs even encode values */
  1374.                         adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
  1375.                 }
  1376.                 new_delay = dev_priv->rps.cur_freq + adj;
  1377.  
  1378.                 /*
  1379.                  * For better performance, jump directly
  1380.                  * to RPe if we're below it.
  1381.                  */
  1382.                 if (new_delay < dev_priv->rps.efficient_freq)
  1383.                         new_delay = dev_priv->rps.efficient_freq;
  1384.         } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
  1385.                 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
  1386.                         new_delay = dev_priv->rps.efficient_freq;
  1387.                 else
  1388.                         new_delay = dev_priv->rps.min_freq_softlimit;
  1389.                 adj = 0;
  1390.         } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
  1391.                 new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
  1392.         } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
  1393.                 if (adj < 0)
  1394.                         adj *= 2;
  1395.                 else {
  1396.                         /* CHV needs even encode values */
  1397.                         adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
  1398.                 }
  1399.                 new_delay = dev_priv->rps.cur_freq + adj;
  1400.         } else { /* unknown event */
  1401.                 new_delay = dev_priv->rps.cur_freq;
  1402.         }
  1403.  
  1404.         /* sysfs frequency interfaces may have snuck in while servicing the
  1405.          * interrupt
  1406.          */
  1407.         new_delay = clamp_t(int, new_delay,
  1408.                             dev_priv->rps.min_freq_softlimit,
  1409.                             dev_priv->rps.max_freq_softlimit);
  1410.  
  1411.         dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
  1412.  
  1413.                 if (IS_VALLEYVIEW(dev_priv->dev))
  1414.                         valleyview_set_rps(dev_priv->dev, new_delay);
  1415.                 else
  1416.                 gen6_set_rps(dev_priv->dev, new_delay);
  1417.  
  1418.         mutex_unlock(&dev_priv->rps.hw_lock);
  1419. }
  1420.  
  1421.  
  1422. /**
  1423.  * ivybridge_parity_work - Workqueue called when a parity error interrupt
  1424.  * occurred.
  1425.  * @work: workqueue struct
  1426.  *
  1427.  * Doesn't actually do anything except notify userspace. As a consequence of
  1428.  * this event, userspace should try to remap the bad rows since statistically
  1429.  * it is likely the same row is more likely to go bad again.
  1430.  */
  1431. static void ivybridge_parity_work(struct work_struct *work)
  1432. {
  1433.         struct drm_i915_private *dev_priv =
  1434.                 container_of(work, struct drm_i915_private, l3_parity.error_work);
  1435.         u32 error_status, row, bank, subbank;
  1436.         char *parity_event[6];
  1437.         uint32_t misccpctl;
  1438.         unsigned long flags;
  1439.         uint8_t slice = 0;
  1440.  
  1441.         /* We must turn off DOP level clock gating to access the L3 registers.
  1442.          * In order to prevent a get/put style interface, acquire struct mutex
  1443.          * any time we access those registers.
  1444.          */
  1445.         mutex_lock(&dev_priv->dev->struct_mutex);
  1446.  
  1447.         /* If we've screwed up tracking, just let the interrupt fire again */
  1448.         if (WARN_ON(!dev_priv->l3_parity.which_slice))
  1449.                 goto out;
  1450.  
  1451.         misccpctl = I915_READ(GEN7_MISCCPCTL);
  1452.         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
  1453.         POSTING_READ(GEN7_MISCCPCTL);
  1454.  
  1455.         while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
  1456.                 u32 reg;
  1457.  
  1458.                 slice--;
  1459.                 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
  1460.                         break;
  1461.  
  1462.                 dev_priv->l3_parity.which_slice &= ~(1<<slice);
  1463.  
  1464.                 reg = GEN7_L3CDERRST1 + (slice * 0x200);
  1465.  
  1466.                 error_status = I915_READ(reg);
  1467.         row = GEN7_PARITY_ERROR_ROW(error_status);
  1468.         bank = GEN7_PARITY_ERROR_BANK(error_status);
  1469.         subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
  1470.  
  1471.                 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
  1472.                 POSTING_READ(reg);
  1473.  
  1474.                 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
  1475.                           slice, row, bank, subbank);
  1476.  
  1477.         }
  1478.  
  1479.         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
  1480.  
  1481. out:
  1482.         WARN_ON(dev_priv->l3_parity.which_slice);
  1483.         spin_lock_irqsave(&dev_priv->irq_lock, flags);
  1484.         gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
  1485.         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  1486.  
  1487.         mutex_unlock(&dev_priv->dev->struct_mutex);
  1488. }
  1489.  
  1490. static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
  1491. {
  1492.         struct drm_i915_private *dev_priv = dev->dev_private;
  1493.  
  1494.         if (!HAS_L3_DPF(dev))
  1495.                 return;
  1496.  
  1497.         spin_lock(&dev_priv->irq_lock);
  1498.         gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
  1499.         spin_unlock(&dev_priv->irq_lock);
  1500.  
  1501.         iir &= GT_PARITY_ERROR(dev);
  1502.         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
  1503.                 dev_priv->l3_parity.which_slice |= 1 << 1;
  1504.  
  1505.         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
  1506.                 dev_priv->l3_parity.which_slice |= 1 << 0;
  1507.  
  1508.         queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
  1509. }
  1510.  
  1511. static void ilk_gt_irq_handler(struct drm_device *dev,
  1512.                                struct drm_i915_private *dev_priv,
  1513.                                u32 gt_iir)
  1514. {
  1515.         if (gt_iir &
  1516.             (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
  1517.                 notify_ring(dev, &dev_priv->ring[RCS]);
  1518.         if (gt_iir & ILK_BSD_USER_INTERRUPT)
  1519.                 notify_ring(dev, &dev_priv->ring[VCS]);
  1520. }
  1521.  
  1522. static void snb_gt_irq_handler(struct drm_device *dev,
  1523.                                struct drm_i915_private *dev_priv,
  1524.                                u32 gt_iir)
  1525. {
  1526.  
  1527.         if (gt_iir &
  1528.             (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
  1529.                 notify_ring(dev, &dev_priv->ring[RCS]);
  1530.         if (gt_iir & GT_BSD_USER_INTERRUPT)
  1531.                 notify_ring(dev, &dev_priv->ring[VCS]);
  1532.         if (gt_iir & GT_BLT_USER_INTERRUPT)
  1533.                 notify_ring(dev, &dev_priv->ring[BCS]);
  1534.  
  1535.         if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
  1536.                       GT_BSD_CS_ERROR_INTERRUPT |
  1537.                       GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
  1538.                 i915_handle_error(dev, false, "GT error interrupt 0x%08x",
  1539.                                   gt_iir);
  1540.         }
  1541.  
  1542.         if (gt_iir & GT_PARITY_ERROR(dev))
  1543.                 ivybridge_parity_error_irq_handler(dev, gt_iir);
  1544. }
  1545.  
  1546. static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
  1547. {
  1548.         if ((pm_iir & dev_priv->pm_rps_events) == 0)
  1549.                 return;
  1550.  
  1551.         spin_lock(&dev_priv->irq_lock);
  1552.         dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
  1553.         gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
  1554.         spin_unlock(&dev_priv->irq_lock);
  1555.  
  1556.         queue_work(dev_priv->wq, &dev_priv->rps.work);
  1557. }
  1558.  
  1559. static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
  1560.                                        struct drm_i915_private *dev_priv,
  1561.                                        u32 master_ctl)
  1562. {
  1563.         u32 rcs, bcs, vcs;
  1564.         uint32_t tmp = 0;
  1565.         irqreturn_t ret = IRQ_NONE;
  1566.  
  1567.         if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
  1568.                 tmp = I915_READ(GEN8_GT_IIR(0));
  1569.                 if (tmp) {
  1570.                         I915_WRITE(GEN8_GT_IIR(0), tmp);
  1571.                         ret = IRQ_HANDLED;
  1572.                         rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
  1573.                         bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
  1574.                         if (rcs & GT_RENDER_USER_INTERRUPT)
  1575.                                 notify_ring(dev, &dev_priv->ring[RCS]);
  1576.                         if (bcs & GT_RENDER_USER_INTERRUPT)
  1577.                                 notify_ring(dev, &dev_priv->ring[BCS]);
  1578.                 } else
  1579.                         DRM_ERROR("The master control interrupt lied (GT0)!\n");
  1580.         }
  1581.  
  1582.         if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
  1583.                 tmp = I915_READ(GEN8_GT_IIR(1));
  1584.                 if (tmp) {
  1585.                         I915_WRITE(GEN8_GT_IIR(1), tmp);
  1586.                         ret = IRQ_HANDLED;
  1587.                         vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
  1588.                         if (vcs & GT_RENDER_USER_INTERRUPT)
  1589.                                 notify_ring(dev, &dev_priv->ring[VCS]);
  1590.                         vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
  1591.                         if (vcs & GT_RENDER_USER_INTERRUPT)
  1592.                                 notify_ring(dev, &dev_priv->ring[VCS2]);
  1593.                 } else
  1594.                         DRM_ERROR("The master control interrupt lied (GT1)!\n");
  1595.         }
  1596.  
  1597.         if (master_ctl & GEN8_GT_PM_IRQ) {
  1598.                 tmp = I915_READ(GEN8_GT_IIR(2));
  1599.                 if (tmp & dev_priv->pm_rps_events) {
  1600.                         I915_WRITE(GEN8_GT_IIR(2),
  1601.                                    tmp & dev_priv->pm_rps_events);
  1602.                         ret = IRQ_HANDLED;
  1603.                         gen8_rps_irq_handler(dev_priv, tmp);
  1604.                 } else
  1605.                         DRM_ERROR("The master control interrupt lied (PM)!\n");
  1606.         }
  1607.  
  1608.         if (master_ctl & GEN8_GT_VECS_IRQ) {
  1609.                 tmp = I915_READ(GEN8_GT_IIR(3));
  1610.                 if (tmp) {
  1611.                         I915_WRITE(GEN8_GT_IIR(3), tmp);
  1612.                         ret = IRQ_HANDLED;
  1613.                         vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
  1614.                         if (vcs & GT_RENDER_USER_INTERRUPT)
  1615.                                 notify_ring(dev, &dev_priv->ring[VECS]);
  1616.                 } else
  1617.                         DRM_ERROR("The master control interrupt lied (GT3)!\n");
  1618.         }
  1619.  
  1620.         return ret;
  1621. }
  1622.  
  1623. #define HPD_STORM_DETECT_PERIOD 1000
  1624. #define HPD_STORM_THRESHOLD 5
  1625.  
  1626. static int ilk_port_to_hotplug_shift(enum port port)
  1627. {
  1628.         switch (port) {
  1629.         case PORT_A:
  1630.         case PORT_E:
  1631.         default:
  1632.                 return -1;
  1633.         case PORT_B:
  1634.                 return 0;
  1635.         case PORT_C:
  1636.                 return 8;
  1637.         case PORT_D:
  1638.                 return 16;
  1639.         }
  1640. }
  1641.  
  1642. static int g4x_port_to_hotplug_shift(enum port port)
  1643. {
  1644.         switch (port) {
  1645.         case PORT_A:
  1646.         case PORT_E:
  1647.         default:
  1648.                 return -1;
  1649.         case PORT_B:
  1650.                 return 17;
  1651.         case PORT_C:
  1652.                 return 19;
  1653.         case PORT_D:
  1654.                 return 21;
  1655.         }
  1656. }
  1657.  
  1658. static inline enum port get_port_from_pin(enum hpd_pin pin)
  1659. {
  1660.         switch (pin) {
  1661.         case HPD_PORT_B:
  1662.                 return PORT_B;
  1663.         case HPD_PORT_C:
  1664.                 return PORT_C;
  1665.         case HPD_PORT_D:
  1666.                 return PORT_D;
  1667.         default:
  1668.                 return PORT_A; /* no hpd */
  1669.         }
  1670. }
  1671.  
  1672. static inline void intel_hpd_irq_handler(struct drm_device *dev,
  1673.                                             u32 hotplug_trigger,
  1674.                                          u32 dig_hotplug_reg,
  1675.                                             const u32 *hpd)
  1676. {
  1677.         struct drm_i915_private *dev_priv = dev->dev_private;
  1678.         int i;
  1679.         enum port port;
  1680.         bool storm_detected = false;
  1681.         bool queue_dig = false, queue_hp = false;
  1682.         u32 dig_shift;
  1683.         u32 dig_port_mask = 0;
  1684.  
  1685.         if (!hotplug_trigger)
  1686.                 return;
  1687.  
  1688.         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
  1689.                          hotplug_trigger, dig_hotplug_reg);
  1690.  
  1691.         spin_lock(&dev_priv->irq_lock);
  1692.         for (i = 1; i < HPD_NUM_PINS; i++) {
  1693.                 if (!(hpd[i] & hotplug_trigger))
  1694.                         continue;
  1695.  
  1696.                 port = get_port_from_pin(i);
  1697.                 if (port && dev_priv->hpd_irq_port[port]) {
  1698.                         bool long_hpd;
  1699.  
  1700.                         if (IS_G4X(dev)) {
  1701.                                 dig_shift = g4x_port_to_hotplug_shift(port);
  1702.                                 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
  1703.                         } else {
  1704.                                 dig_shift = ilk_port_to_hotplug_shift(port);
  1705.                                 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
  1706.                         }
  1707.  
  1708.                         DRM_DEBUG_DRIVER("digital hpd port %d %d\n", port, long_hpd);
  1709.                         /* for long HPD pulses we want to have the digital queue happen,
  1710.                            but we still want HPD storm detection to function. */
  1711.                         if (long_hpd) {
  1712.                                 dev_priv->long_hpd_port_mask |= (1 << port);
  1713.                                 dig_port_mask |= hpd[i];
  1714.                         } else {
  1715.                                 /* for short HPD just trigger the digital queue */
  1716.                                 dev_priv->short_hpd_port_mask |= (1 << port);
  1717.                                 hotplug_trigger &= ~hpd[i];
  1718.                         }
  1719.                         queue_dig = true;
  1720.                 }
  1721.         }
  1722.  
  1723.         for (i = 1; i < HPD_NUM_PINS; i++) {
  1724.                 if (hpd[i] & hotplug_trigger &&
  1725.                     dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
  1726.                         /*
  1727.                          * On GMCH platforms the interrupt mask bits only
  1728.                          * prevent irq generation, not the setting of the
  1729.                          * hotplug bits itself. So only WARN about unexpected
  1730.                          * interrupts on saner platforms.
  1731.                          */
  1732.                         WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
  1733.                           "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
  1734.                           hotplug_trigger, i, hpd[i]);
  1735.  
  1736.                         continue;
  1737.                 }
  1738.  
  1739.                 if (!(hpd[i] & hotplug_trigger) ||
  1740.                     dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
  1741.                         continue;
  1742.  
  1743.                 if (!(dig_port_mask & hpd[i])) {
  1744.                 dev_priv->hpd_event_bits |= (1 << i);
  1745.                         queue_hp = true;
  1746.                 }
  1747.  
  1748.                 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
  1749.                   dev_priv->hpd_stats[i].hpd_last_jiffies
  1750.                   + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
  1751.                         dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
  1752.            dev_priv->hpd_stats[i].hpd_cnt = 0;
  1753.                         DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
  1754.        } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
  1755.            dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
  1756.                         dev_priv->hpd_event_bits &= ~(1 << i);
  1757.            DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
  1758.                         storm_detected = true;
  1759.                 } else {
  1760.                         dev_priv->hpd_stats[i].hpd_cnt++;
  1761.                         DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
  1762.                                       dev_priv->hpd_stats[i].hpd_cnt);
  1763.                 }
  1764.         }
  1765.  
  1766.         if (storm_detected)
  1767.                 dev_priv->display.hpd_irq_setup(dev);
  1768.         spin_unlock(&dev_priv->irq_lock);
  1769.  
  1770.         /*
  1771.          * Our hotplug handler can grab modeset locks (by calling down into the
  1772.          * fb helpers). Hence it must not be run on our own dev-priv->wq work
  1773.          * queue for otherwise the flush_work in the pageflip code will
  1774.          * deadlock.
  1775.          */
  1776.         if (queue_hp)
  1777.                 schedule_work(&dev_priv->hotplug_work);
  1778. }
  1779.  
  1780. static void gmbus_irq_handler(struct drm_device *dev)
  1781. {
  1782.         struct drm_i915_private *dev_priv = dev->dev_private;
  1783.  
  1784.         wake_up_all(&dev_priv->gmbus_wait_queue);
  1785. }
  1786.  
  1787. static void dp_aux_irq_handler(struct drm_device *dev)
  1788. {
  1789.         struct drm_i915_private *dev_priv = dev->dev_private;
  1790.  
  1791.         wake_up_all(&dev_priv->gmbus_wait_queue);
  1792. }
  1793.  
  1794. #if defined(CONFIG_DEBUG_FS)
  1795. static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
  1796.                                          uint32_t crc0, uint32_t crc1,
  1797.                                          uint32_t crc2, uint32_t crc3,
  1798.                                          uint32_t crc4)
  1799. {
  1800.         struct drm_i915_private *dev_priv = dev->dev_private;
  1801.         struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
  1802.         struct intel_pipe_crc_entry *entry;
  1803.         int head, tail;
  1804.  
  1805.         spin_lock(&pipe_crc->lock);
  1806.  
  1807.         if (!pipe_crc->entries) {
  1808.                 spin_unlock(&pipe_crc->lock);
  1809.                 DRM_ERROR("spurious interrupt\n");
  1810.                 return;
  1811.         }
  1812.  
  1813.         head = pipe_crc->head;
  1814.         tail = pipe_crc->tail;
  1815.  
  1816.         if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
  1817.                 spin_unlock(&pipe_crc->lock);
  1818.                 DRM_ERROR("CRC buffer overflowing\n");
  1819.                 return;
  1820.         }
  1821.  
  1822.         entry = &pipe_crc->entries[head];
  1823.  
  1824.         entry->frame = dev->driver->get_vblank_counter(dev, pipe);
  1825.         entry->crc[0] = crc0;
  1826.         entry->crc[1] = crc1;
  1827.         entry->crc[2] = crc2;
  1828.         entry->crc[3] = crc3;
  1829.         entry->crc[4] = crc4;
  1830.  
  1831.         head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
  1832.         pipe_crc->head = head;
  1833.  
  1834.         spin_unlock(&pipe_crc->lock);
  1835.  
  1836.         wake_up_interruptible(&pipe_crc->wq);
  1837. }
  1838. #else
  1839. static inline void
  1840. display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
  1841.                              uint32_t crc0, uint32_t crc1,
  1842.                              uint32_t crc2, uint32_t crc3,
  1843.                              uint32_t crc4) {}
  1844. #endif
  1845.  
  1846.  
  1847. static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
  1848. {
  1849.         struct drm_i915_private *dev_priv = dev->dev_private;
  1850.  
  1851.         display_pipe_crc_irq_handler(dev, pipe,
  1852.                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
  1853.                                      0, 0, 0, 0);
  1854. }
  1855.  
  1856. static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
  1857. {
  1858.         struct drm_i915_private *dev_priv = dev->dev_private;
  1859.  
  1860.         display_pipe_crc_irq_handler(dev, pipe,
  1861.                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
  1862.                                      I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
  1863.                                      I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
  1864.                                      I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
  1865.                                      I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
  1866. }
  1867.  
  1868. static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
  1869. {
  1870.         struct drm_i915_private *dev_priv = dev->dev_private;
  1871.         uint32_t res1, res2;
  1872.  
  1873.         if (INTEL_INFO(dev)->gen >= 3)
  1874.                 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
  1875.         else
  1876.                 res1 = 0;
  1877.  
  1878.         if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
  1879.                 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
  1880.         else
  1881.                 res2 = 0;
  1882.  
  1883.         display_pipe_crc_irq_handler(dev, pipe,
  1884.                                      I915_READ(PIPE_CRC_RES_RED(pipe)),
  1885.                                      I915_READ(PIPE_CRC_RES_GREEN(pipe)),
  1886.                                      I915_READ(PIPE_CRC_RES_BLUE(pipe)),
  1887.                                      res1, res2);
  1888. }
  1889.  
  1890. /* The RPS events need forcewake, so we add them to a work queue and mask their
  1891.  * IMR bits until the work is done. Other interrupts can be processed without
  1892.  * the work queue. */
  1893. static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
  1894. {
  1895.         if (pm_iir & dev_priv->pm_rps_events) {
  1896.                 spin_lock(&dev_priv->irq_lock);
  1897.                 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
  1898.                 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
  1899.                 spin_unlock(&dev_priv->irq_lock);
  1900.  
  1901.                 queue_work(dev_priv->wq, &dev_priv->rps.work);
  1902.         }
  1903.  
  1904.         if (HAS_VEBOX(dev_priv->dev)) {
  1905.                 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
  1906.                         notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
  1907.  
  1908.                 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
  1909.                         i915_handle_error(dev_priv->dev, false,
  1910.                                           "VEBOX CS error interrupt 0x%08x",
  1911.                                           pm_iir);
  1912.                 }
  1913.         }
  1914. }
  1915.  
  1916. static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
  1917. {
  1918.         struct drm_i915_private *dev_priv = dev->dev_private;
  1919.         u32 pipe_stats[I915_MAX_PIPES] = { };
  1920.         int pipe;
  1921.  
  1922.         spin_lock(&dev_priv->irq_lock);
  1923.         for_each_pipe(pipe) {
  1924.                 int reg;
  1925.                 u32 mask, iir_bit = 0;
  1926.  
  1927.                 /*
  1928.                  * PIPESTAT bits get signalled even when the interrupt is
  1929.                  * disabled with the mask bits, and some of the status bits do
  1930.                  * not generate interrupts at all (like the underrun bit). Hence
  1931.                  * we need to be careful that we only handle what we want to
  1932.                  * handle.
  1933.                  */
  1934.                 mask = 0;
  1935.                 if (__cpu_fifo_underrun_reporting_enabled(dev, pipe))
  1936.                         mask |= PIPE_FIFO_UNDERRUN_STATUS;
  1937.  
  1938.                 switch (pipe) {
  1939.                 case PIPE_A:
  1940.                         iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
  1941.                         break;
  1942.                 case PIPE_B:
  1943.                         iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
  1944.                         break;
  1945.                 case PIPE_C:
  1946.                         iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
  1947.                         break;
  1948.                 }
  1949.                 if (iir & iir_bit)
  1950.                         mask |= dev_priv->pipestat_irq_mask[pipe];
  1951.  
  1952.                 if (!mask)
  1953.                         continue;
  1954.  
  1955.                 reg = PIPESTAT(pipe);
  1956.                 mask |= PIPESTAT_INT_ENABLE_MASK;
  1957.                 pipe_stats[pipe] = I915_READ(reg) & mask;
  1958.  
  1959.                         /*
  1960.                          * Clear the PIPE*STAT regs before the IIR
  1961.                          */
  1962.                 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
  1963.                                         PIPESTAT_INT_STATUS_MASK))
  1964.                                 I915_WRITE(reg, pipe_stats[pipe]);
  1965.                         }
  1966.         spin_unlock(&dev_priv->irq_lock);
  1967.  
  1968.                 for_each_pipe(pipe) {
  1969. //                      if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
  1970. //                              drm_handle_vblank(dev, pipe);
  1971.  
  1972.                 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
  1973. //                      intel_prepare_page_flip(dev, pipe);
  1974. //                      intel_finish_page_flip(dev, pipe);
  1975.                         }
  1976.  
  1977.                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
  1978.                                 i9xx_pipe_crc_irq_handler(dev, pipe);
  1979.  
  1980.                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
  1981.                     intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
  1982.                         DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
  1983.                 }
  1984.  
  1985.         if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
  1986.                 gmbus_irq_handler(dev);
  1987. }
  1988.  
  1989. static void i9xx_hpd_irq_handler(struct drm_device *dev)
  1990. {
  1991.         struct drm_i915_private *dev_priv = dev->dev_private;
  1992.         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
  1993.  
  1994.         if (hotplug_status) {
  1995.                 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  1996.                 /*
  1997.                  * Make sure hotplug status is cleared before we clear IIR, or else we
  1998.                  * may miss hotplug events.
  1999.                  */
  2000.                 POSTING_READ(PORT_HOTPLUG_STAT);
  2001.  
  2002.         if (IS_G4X(dev)) {
  2003.                 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
  2004.  
  2005.                         intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
  2006.         } else {
  2007.                 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
  2008.  
  2009.                         intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
  2010.         }
  2011.  
  2012.         if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
  2013.             hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
  2014.                 dp_aux_irq_handler(dev);
  2015.         }
  2016. }
  2017.  
  2018. static irqreturn_t valleyview_irq_handler(int irq, void *arg)
  2019. {
  2020.         struct drm_device *dev = arg;
  2021.         struct drm_i915_private *dev_priv = dev->dev_private;
  2022.         u32 iir, gt_iir, pm_iir;
  2023.         irqreturn_t ret = IRQ_NONE;
  2024.  
  2025.         while (true) {
  2026.                 /* Find, clear, then process each source of interrupt */
  2027.  
  2028.                 gt_iir = I915_READ(GTIIR);
  2029.                 if (gt_iir)
  2030.                         I915_WRITE(GTIIR, gt_iir);
  2031.  
  2032.                 pm_iir = I915_READ(GEN6_PMIIR);
  2033.                 if (pm_iir)
  2034.                         I915_WRITE(GEN6_PMIIR, pm_iir);
  2035.  
  2036.                 iir = I915_READ(VLV_IIR);
  2037.                 if (iir) {
  2038.                         /* Consume port before clearing IIR or we'll miss events */
  2039.                         if (iir & I915_DISPLAY_PORT_INTERRUPT)
  2040.                                 i9xx_hpd_irq_handler(dev);
  2041.                         I915_WRITE(VLV_IIR, iir);
  2042.                 }
  2043.  
  2044.                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
  2045.                         goto out;
  2046.  
  2047.                 ret = IRQ_HANDLED;
  2048.  
  2049.                 if (gt_iir)
  2050.                 snb_gt_irq_handler(dev, dev_priv, gt_iir);
  2051.                 if (pm_iir)
  2052.                         gen6_rps_irq_handler(dev_priv, pm_iir);
  2053.                 /* Call regardless, as some status bits might not be
  2054.                  * signalled in iir */
  2055.                 valleyview_pipestat_irq_handler(dev, iir);
  2056.         }
  2057.  
  2058. out:
  2059.         return ret;
  2060. }
  2061.  
  2062. static irqreturn_t cherryview_irq_handler(int irq, void *arg)
  2063. {
  2064.         struct drm_device *dev = arg;
  2065.         struct drm_i915_private *dev_priv = dev->dev_private;
  2066.         u32 master_ctl, iir;
  2067.         irqreturn_t ret = IRQ_NONE;
  2068.  
  2069.         for (;;) {
  2070.                 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
  2071.                 iir = I915_READ(VLV_IIR);
  2072.  
  2073.                 if (master_ctl == 0 && iir == 0)
  2074.                         break;
  2075.  
  2076.                 ret = IRQ_HANDLED;
  2077.  
  2078.                 I915_WRITE(GEN8_MASTER_IRQ, 0);
  2079.  
  2080.                 /* Find, clear, then process each source of interrupt */
  2081.  
  2082.                 if (iir) {
  2083.                         /* Consume port before clearing IIR or we'll miss events */
  2084.                         if (iir & I915_DISPLAY_PORT_INTERRUPT)
  2085.                                 i9xx_hpd_irq_handler(dev);
  2086.                         I915_WRITE(VLV_IIR, iir);
  2087.                 }
  2088.  
  2089.                 gen8_gt_irq_handler(dev, dev_priv, master_ctl);
  2090.  
  2091.                 /* Call regardless, as some status bits might not be
  2092.                  * signalled in iir */
  2093.                 valleyview_pipestat_irq_handler(dev, iir);
  2094.  
  2095.                 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
  2096.                 POSTING_READ(GEN8_MASTER_IRQ);
  2097.         }
  2098.  
  2099.         return ret;
  2100. }
  2101.  
  2102. static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
  2103. {
  2104.         struct drm_i915_private *dev_priv = dev->dev_private;
  2105.         int pipe;
  2106.         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
  2107.         u32 dig_hotplug_reg;
  2108.  
  2109.         dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
  2110.         I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
  2111.  
  2112.         intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
  2113.  
  2114.         if (pch_iir & SDE_AUDIO_POWER_MASK) {
  2115.                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
  2116.                                SDE_AUDIO_POWER_SHIFT);
  2117.                 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
  2118.                                  port_name(port));
  2119.         }
  2120.  
  2121.         if (pch_iir & SDE_AUX_MASK)
  2122.                 dp_aux_irq_handler(dev);
  2123.  
  2124.         if (pch_iir & SDE_GMBUS)
  2125.                 gmbus_irq_handler(dev);
  2126.  
  2127.         if (pch_iir & SDE_AUDIO_HDCP_MASK)
  2128.                 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
  2129.  
  2130.         if (pch_iir & SDE_AUDIO_TRANS_MASK)
  2131.                 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
  2132.  
  2133.         if (pch_iir & SDE_POISON)
  2134.                 DRM_ERROR("PCH poison interrupt\n");
  2135.  
  2136.         if (pch_iir & SDE_FDI_MASK)
  2137.                 for_each_pipe(pipe)
  2138.                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
  2139.                                          pipe_name(pipe),
  2140.                                          I915_READ(FDI_RX_IIR(pipe)));
  2141.  
  2142.         if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
  2143.                 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
  2144.  
  2145.         if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
  2146.                 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
  2147.  
  2148.         if (pch_iir & SDE_TRANSA_FIFO_UNDER)
  2149.                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
  2150.                                                           false))
  2151.                         DRM_ERROR("PCH transcoder A FIFO underrun\n");
  2152.  
  2153.         if (pch_iir & SDE_TRANSB_FIFO_UNDER)
  2154.                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
  2155.                                                           false))
  2156.                         DRM_ERROR("PCH transcoder B FIFO underrun\n");
  2157. }
  2158.  
  2159. static void ivb_err_int_handler(struct drm_device *dev)
  2160. {
  2161.         struct drm_i915_private *dev_priv = dev->dev_private;
  2162.         u32 err_int = I915_READ(GEN7_ERR_INT);
  2163.         enum pipe pipe;
  2164.  
  2165.         if (err_int & ERR_INT_POISON)
  2166.                 DRM_ERROR("Poison interrupt\n");
  2167.  
  2168.         for_each_pipe(pipe) {
  2169.                 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
  2170.                         if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
  2171.                                                                   false))
  2172.                                 DRM_ERROR("Pipe %c FIFO underrun\n",
  2173.                                                  pipe_name(pipe));
  2174.                 }
  2175.  
  2176.                 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
  2177.                         if (IS_IVYBRIDGE(dev))
  2178.                                 ivb_pipe_crc_irq_handler(dev, pipe);
  2179.                         else
  2180.                                 hsw_pipe_crc_irq_handler(dev, pipe);
  2181.                 }
  2182.         }
  2183.  
  2184.         I915_WRITE(GEN7_ERR_INT, err_int);
  2185. }
  2186.  
  2187. static void cpt_serr_int_handler(struct drm_device *dev)
  2188. {
  2189.         struct drm_i915_private *dev_priv = dev->dev_private;
  2190.         u32 serr_int = I915_READ(SERR_INT);
  2191.  
  2192.         if (serr_int & SERR_INT_POISON)
  2193.                 DRM_ERROR("PCH poison interrupt\n");
  2194.  
  2195.         if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
  2196.                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
  2197.                                                           false))
  2198.                         DRM_ERROR("PCH transcoder A FIFO underrun\n");
  2199.  
  2200.         if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
  2201.                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
  2202.                                                           false))
  2203.                         DRM_ERROR("PCH transcoder B FIFO underrun\n");
  2204.  
  2205.         if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
  2206.                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
  2207.                                                           false))
  2208.                         DRM_ERROR("PCH transcoder C FIFO underrun\n");
  2209.  
  2210.         I915_WRITE(SERR_INT, serr_int);
  2211. }
  2212.  
  2213. static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
  2214. {
  2215.         struct drm_i915_private *dev_priv = dev->dev_private;
  2216.         int pipe;
  2217.         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
  2218.         u32 dig_hotplug_reg;
  2219.  
  2220.         dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
  2221.         I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
  2222.  
  2223.         intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
  2224.  
  2225.         if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
  2226.                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
  2227.                                SDE_AUDIO_POWER_SHIFT_CPT);
  2228.                 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
  2229.                                  port_name(port));
  2230.         }
  2231.  
  2232.         if (pch_iir & SDE_AUX_MASK_CPT)
  2233.                 dp_aux_irq_handler(dev);
  2234.  
  2235.         if (pch_iir & SDE_GMBUS_CPT)
  2236.                 gmbus_irq_handler(dev);
  2237.  
  2238.         if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
  2239.                 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
  2240.  
  2241.         if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
  2242.                 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
  2243.  
  2244.         if (pch_iir & SDE_FDI_MASK_CPT)
  2245.                 for_each_pipe(pipe)
  2246.                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
  2247.                                          pipe_name(pipe),
  2248.                                          I915_READ(FDI_RX_IIR(pipe)));
  2249.  
  2250.         if (pch_iir & SDE_ERROR_CPT)
  2251.                 cpt_serr_int_handler(dev);
  2252. }
  2253.  
  2254. static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
  2255. {
  2256.         struct drm_i915_private *dev_priv = dev->dev_private;
  2257.         enum pipe pipe;
  2258.  
  2259.         if (de_iir & DE_AUX_CHANNEL_A)
  2260.                 dp_aux_irq_handler(dev);
  2261.  
  2262.         if (de_iir & DE_GSE)
  2263.                 intel_opregion_asle_intr(dev);
  2264.  
  2265.         if (de_iir & DE_POISON)
  2266.                 DRM_ERROR("Poison interrupt\n");
  2267.  
  2268.         for_each_pipe(pipe) {
  2269. //              if (de_iir & DE_PIPE_VBLANK(pipe))
  2270. //                      drm_handle_vblank(dev, pipe);
  2271.  
  2272.                 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
  2273.                         if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
  2274.                                 DRM_ERROR("Pipe %c FIFO underrun\n",
  2275.                                                  pipe_name(pipe));
  2276.  
  2277.                 if (de_iir & DE_PIPE_CRC_DONE(pipe))
  2278.                         i9xx_pipe_crc_irq_handler(dev, pipe);
  2279.  
  2280.                 /* plane/pipes map 1:1 on ilk+ */
  2281.                 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
  2282. //                      intel_prepare_page_flip(dev, pipe);
  2283. //                      intel_finish_page_flip_plane(dev, pipe);
  2284.                 }
  2285.         }
  2286.  
  2287.         /* check event from PCH */
  2288.         if (de_iir & DE_PCH_EVENT) {
  2289.                 u32 pch_iir = I915_READ(SDEIIR);
  2290.  
  2291.                 if (HAS_PCH_CPT(dev))
  2292.                         cpt_irq_handler(dev, pch_iir);
  2293.                 else
  2294.                         ibx_irq_handler(dev, pch_iir);
  2295.  
  2296.                 /* should clear PCH hotplug event before clear CPU irq */
  2297.                 I915_WRITE(SDEIIR, pch_iir);
  2298.         }
  2299.  
  2300.         if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
  2301.                 ironlake_rps_change_irq_handler(dev);
  2302. }
  2303.  
  2304. static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
  2305. {
  2306.         struct drm_i915_private *dev_priv = dev->dev_private;
  2307.         enum pipe pipe;
  2308.  
  2309.         if (de_iir & DE_ERR_INT_IVB)
  2310.                 ivb_err_int_handler(dev);
  2311.  
  2312.         if (de_iir & DE_AUX_CHANNEL_A_IVB)
  2313.                 dp_aux_irq_handler(dev);
  2314.  
  2315.         if (de_iir & DE_GSE_IVB)
  2316.                 intel_opregion_asle_intr(dev);
  2317.  
  2318.         for_each_pipe(pipe) {
  2319. //              if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
  2320. //                      drm_handle_vblank(dev, pipe);
  2321.  
  2322.                 /* plane/pipes map 1:1 on ilk+ */
  2323.                 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
  2324. //                      intel_prepare_page_flip(dev, pipe);
  2325. //                      intel_finish_page_flip_plane(dev, pipe);
  2326.                 }
  2327.         }
  2328.  
  2329.         /* check event from PCH */
  2330.         if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
  2331.                 u32 pch_iir = I915_READ(SDEIIR);
  2332.  
  2333.                 cpt_irq_handler(dev, pch_iir);
  2334.  
  2335.                 /* clear PCH hotplug event before clear CPU irq */
  2336.                 I915_WRITE(SDEIIR, pch_iir);
  2337.         }
  2338. }
  2339.  
  2340. /*
  2341.  * To handle irqs with the minimum potential races with fresh interrupts, we:
  2342.  * 1 - Disable Master Interrupt Control.
  2343.  * 2 - Find the source(s) of the interrupt.
  2344.  * 3 - Clear the Interrupt Identity bits (IIR).
  2345.  * 4 - Process the interrupt(s) that had bits set in the IIRs.
  2346.  * 5 - Re-enable Master Interrupt Control.
  2347.  */
  2348. static irqreturn_t ironlake_irq_handler(int irq, void *arg)
  2349. {
  2350.         struct drm_device *dev = arg;
  2351.         struct drm_i915_private *dev_priv = dev->dev_private;
  2352.         u32 de_iir, gt_iir, de_ier, sde_ier = 0;
  2353.         irqreturn_t ret = IRQ_NONE;
  2354.  
  2355.         /* We get interrupts on unclaimed registers, so check for this before we
  2356.          * do any I915_{READ,WRITE}. */
  2357.         intel_uncore_check_errors(dev);
  2358.  
  2359.         /* disable master interrupt before clearing iir  */
  2360.         de_ier = I915_READ(DEIER);
  2361.         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
  2362.         POSTING_READ(DEIER);
  2363.  
  2364.         /* Disable south interrupts. We'll only write to SDEIIR once, so further
  2365.          * interrupts will will be stored on its back queue, and then we'll be
  2366.          * able to process them after we restore SDEIER (as soon as we restore
  2367.          * it, we'll get an interrupt if SDEIIR still has something to process
  2368.          * due to its back queue). */
  2369.         if (!HAS_PCH_NOP(dev)) {
  2370.                 sde_ier = I915_READ(SDEIER);
  2371.                 I915_WRITE(SDEIER, 0);
  2372.                 POSTING_READ(SDEIER);
  2373.         }
  2374.  
  2375.         /* Find, clear, then process each source of interrupt */
  2376.  
  2377.         gt_iir = I915_READ(GTIIR);
  2378.         if (gt_iir) {
  2379.                 I915_WRITE(GTIIR, gt_iir);
  2380.                 ret = IRQ_HANDLED;
  2381.                 if (INTEL_INFO(dev)->gen >= 6)
  2382.                         snb_gt_irq_handler(dev, dev_priv, gt_iir);
  2383.                 else
  2384.                         ilk_gt_irq_handler(dev, dev_priv, gt_iir);
  2385.         }
  2386.  
  2387.         de_iir = I915_READ(DEIIR);
  2388.         if (de_iir) {
  2389.                 I915_WRITE(DEIIR, de_iir);
  2390.                 ret = IRQ_HANDLED;
  2391.                 if (INTEL_INFO(dev)->gen >= 7)
  2392.                         ivb_display_irq_handler(dev, de_iir);
  2393.                 else
  2394.                         ilk_display_irq_handler(dev, de_iir);
  2395.         }
  2396.  
  2397.         if (INTEL_INFO(dev)->gen >= 6) {
  2398.                 u32 pm_iir = I915_READ(GEN6_PMIIR);
  2399.                 if (pm_iir) {
  2400.                         I915_WRITE(GEN6_PMIIR, pm_iir);
  2401.                         ret = IRQ_HANDLED;
  2402.                         gen6_rps_irq_handler(dev_priv, pm_iir);
  2403.                 }
  2404.         }
  2405.  
  2406.         I915_WRITE(DEIER, de_ier);
  2407.         POSTING_READ(DEIER);
  2408.         if (!HAS_PCH_NOP(dev)) {
  2409.                 I915_WRITE(SDEIER, sde_ier);
  2410.                 POSTING_READ(SDEIER);
  2411.         }
  2412.  
  2413.         return ret;
  2414. }
  2415.  
  2416. static irqreturn_t gen8_irq_handler(int irq, void *arg)
  2417. {
  2418.         struct drm_device *dev = arg;
  2419.         struct drm_i915_private *dev_priv = dev->dev_private;
  2420.         u32 master_ctl;
  2421.         irqreturn_t ret = IRQ_NONE;
  2422.         uint32_t tmp = 0;
  2423.         enum pipe pipe;
  2424.  
  2425.         master_ctl = I915_READ(GEN8_MASTER_IRQ);
  2426.         master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
  2427.         if (!master_ctl)
  2428.                 return IRQ_NONE;
  2429.  
  2430.         I915_WRITE(GEN8_MASTER_IRQ, 0);
  2431.         POSTING_READ(GEN8_MASTER_IRQ);
  2432.  
  2433.         /* Find, clear, then process each source of interrupt */
  2434.  
  2435.         ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
  2436.  
  2437.         if (master_ctl & GEN8_DE_MISC_IRQ) {
  2438.                 tmp = I915_READ(GEN8_DE_MISC_IIR);
  2439.                 if (tmp) {
  2440.                         I915_WRITE(GEN8_DE_MISC_IIR, tmp);
  2441.                         ret = IRQ_HANDLED;
  2442.                 if (tmp & GEN8_DE_MISC_GSE)
  2443.                         intel_opregion_asle_intr(dev);
  2444.                         else
  2445.                         DRM_ERROR("Unexpected DE Misc interrupt\n");
  2446.                 }
  2447.                 else
  2448.                         DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
  2449.         }
  2450.  
  2451.         if (master_ctl & GEN8_DE_PORT_IRQ) {
  2452.                 tmp = I915_READ(GEN8_DE_PORT_IIR);
  2453.                 if (tmp) {
  2454.                         I915_WRITE(GEN8_DE_PORT_IIR, tmp);
  2455.                         ret = IRQ_HANDLED;
  2456.                 if (tmp & GEN8_AUX_CHANNEL_A)
  2457.                         dp_aux_irq_handler(dev);
  2458.                         else
  2459.                         DRM_ERROR("Unexpected DE Port interrupt\n");
  2460.                 }
  2461.                 else
  2462.                         DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
  2463.         }
  2464.  
  2465.         for_each_pipe(pipe) {
  2466.                 uint32_t pipe_iir;
  2467.  
  2468.                 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
  2469.                         continue;
  2470.  
  2471.                 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
  2472.                 if (pipe_iir) {
  2473.                         ret = IRQ_HANDLED;
  2474.                         I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
  2475. //              if (pipe_iir & GEN8_PIPE_VBLANK)
  2476. //                      intel_pipe_handle_vblank(dev, pipe);
  2477.  
  2478.                 if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
  2479. //                      intel_prepare_page_flip(dev, pipe);
  2480. //                      intel_finish_page_flip_plane(dev, pipe);
  2481.                 }
  2482.  
  2483.                 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
  2484.                         hsw_pipe_crc_irq_handler(dev, pipe);
  2485.  
  2486.                 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
  2487.                         if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
  2488.                                                                   false))
  2489.                                 DRM_ERROR("Pipe %c FIFO underrun\n",
  2490.                                                  pipe_name(pipe));
  2491.                 }
  2492.  
  2493.                 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
  2494.                         DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
  2495.                                   pipe_name(pipe),
  2496.                                   pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
  2497.                 }
  2498.                 } else
  2499.                         DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
  2500.         }
  2501.  
  2502.         if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
  2503.                 /*
  2504.                  * FIXME(BDW): Assume for now that the new interrupt handling
  2505.                  * scheme also closed the SDE interrupt handling race we've seen
  2506.                  * on older pch-split platforms. But this needs testing.
  2507.                  */
  2508.                 u32 pch_iir = I915_READ(SDEIIR);
  2509.                 if (pch_iir) {
  2510.                         I915_WRITE(SDEIIR, pch_iir);
  2511.                         ret = IRQ_HANDLED;
  2512.                         cpt_irq_handler(dev, pch_iir);
  2513.                 } else
  2514.                         DRM_ERROR("The master control interrupt lied (SDE)!\n");
  2515.  
  2516.         }
  2517.  
  2518.         I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
  2519.         POSTING_READ(GEN8_MASTER_IRQ);
  2520.  
  2521.         return ret;
  2522. }
  2523.  
  2524. static void i915_error_wake_up(struct drm_i915_private *dev_priv,
  2525.                                bool reset_completed)
  2526. {
  2527.         struct intel_engine_cs *ring;
  2528.         int i;
  2529.  
  2530.         /*
  2531.          * Notify all waiters for GPU completion events that reset state has
  2532.          * been changed, and that they need to restart their wait after
  2533.          * checking for potential errors (and bail out to drop locks if there is
  2534.          * a gpu reset pending so that i915_error_work_func can acquire them).
  2535.          */
  2536.  
  2537.         /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
  2538.         for_each_ring(ring, dev_priv, i)
  2539.                 wake_up_all(&ring->irq_queue);
  2540.  
  2541.  
  2542.         /*
  2543.          * Signal tasks blocked in i915_gem_wait_for_error that the pending
  2544.          * reset state is cleared.
  2545.          */
  2546.         if (reset_completed)
  2547.                 wake_up_all(&dev_priv->gpu_error.reset_queue);
  2548. }
  2549.  
  2550. /**
  2551.  * i915_error_work_func - do process context error handling work
  2552.  * @work: work struct
  2553.  *
  2554.  * Fire an error uevent so userspace can see that a hang or error
  2555.  * was detected.
  2556.  */
  2557. static void i915_error_work_func(struct work_struct *work)
  2558. {
  2559.         struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
  2560.                                                     work);
  2561.         struct drm_i915_private *dev_priv =
  2562.                 container_of(error, struct drm_i915_private, gpu_error);
  2563.         struct drm_device *dev = dev_priv->dev;
  2564.         char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
  2565.         char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
  2566.         char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
  2567.         int ret;
  2568.  
  2569.         /*
  2570.          * Note that there's only one work item which does gpu resets, so we
  2571.          * need not worry about concurrent gpu resets potentially incrementing
  2572.          * error->reset_counter twice. We only need to take care of another
  2573.          * racing irq/hangcheck declaring the gpu dead for a second time. A
  2574.          * quick check for that is good enough: schedule_work ensures the
  2575.          * correct ordering between hang detection and this work item, and since
  2576.          * the reset in-progress bit is only ever set by code outside of this
  2577.          * work we don't need to worry about any other races.
  2578.          */
  2579.         if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
  2580.                 DRM_DEBUG_DRIVER("resetting chip\n");
  2581.  
  2582.                 /*
  2583.                  * All state reset _must_ be completed before we update the
  2584.                  * reset counter, for otherwise waiters might miss the reset
  2585.                  * pending state and not properly drop locks, resulting in
  2586.                  * deadlocks with the reset work.
  2587.                  */
  2588. //              ret = i915_reset(dev);
  2589.  
  2590. //       intel_display_handle_reset(dev);
  2591.  
  2592.                 if (ret == 0) {
  2593.                         /*
  2594.                          * After all the gem state is reset, increment the reset
  2595.                          * counter and wake up everyone waiting for the reset to
  2596.                          * complete.
  2597.                          *
  2598.                          * Since unlock operations are a one-sided barrier only,
  2599.                          * we need to insert a barrier here to order any seqno
  2600.                          * updates before
  2601.                          * the counter increment.
  2602.                          */
  2603.                         atomic_inc(&dev_priv->gpu_error.reset_counter);
  2604.  
  2605.                 } else {
  2606.                         atomic_set_mask(I915_WEDGED, &error->reset_counter);
  2607.         }
  2608.  
  2609.                 /*
  2610.                  * Note: The wake_up also serves as a memory barrier so that
  2611.                  * waiters see the update value of the reset counter atomic_t.
  2612.                  */
  2613.                 i915_error_wake_up(dev_priv, true);
  2614.         }
  2615. }
  2616.  
  2617. static void i915_report_and_clear_eir(struct drm_device *dev)
  2618. {
  2619.         struct drm_i915_private *dev_priv = dev->dev_private;
  2620.         uint32_t instdone[I915_NUM_INSTDONE_REG];
  2621.         u32 eir = I915_READ(EIR);
  2622.         int pipe, i;
  2623.  
  2624.         if (!eir)
  2625.                 return;
  2626.  
  2627.         pr_err("render error detected, EIR: 0x%08x\n", eir);
  2628.  
  2629.         i915_get_extra_instdone(dev, instdone);
  2630.  
  2631.         if (IS_G4X(dev)) {
  2632.                 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
  2633.                         u32 ipeir = I915_READ(IPEIR_I965);
  2634.  
  2635.                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
  2636.                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
  2637.                         for (i = 0; i < ARRAY_SIZE(instdone); i++)
  2638.                                 pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
  2639.                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
  2640.                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
  2641.                         I915_WRITE(IPEIR_I965, ipeir);
  2642.                         POSTING_READ(IPEIR_I965);
  2643.                 }
  2644.                 if (eir & GM45_ERROR_PAGE_TABLE) {
  2645.                         u32 pgtbl_err = I915_READ(PGTBL_ER);
  2646.                         pr_err("page table error\n");
  2647.                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
  2648.                         I915_WRITE(PGTBL_ER, pgtbl_err);
  2649.                         POSTING_READ(PGTBL_ER);
  2650.                 }
  2651.         }
  2652.  
  2653.         if (!IS_GEN2(dev)) {
  2654.                 if (eir & I915_ERROR_PAGE_TABLE) {
  2655.                         u32 pgtbl_err = I915_READ(PGTBL_ER);
  2656.                         pr_err("page table error\n");
  2657.                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
  2658.                         I915_WRITE(PGTBL_ER, pgtbl_err);
  2659.                         POSTING_READ(PGTBL_ER);
  2660.                 }
  2661.         }
  2662.  
  2663.         if (eir & I915_ERROR_MEMORY_REFRESH) {
  2664.                 pr_err("memory refresh error:\n");
  2665.                 for_each_pipe(pipe)
  2666.                         pr_err("pipe %c stat: 0x%08x\n",
  2667.                                pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
  2668.                 /* pipestat has already been acked */
  2669.         }
  2670.         if (eir & I915_ERROR_INSTRUCTION) {
  2671.                 pr_err("instruction error\n");
  2672.                 pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
  2673.                 for (i = 0; i < ARRAY_SIZE(instdone); i++)
  2674.                         pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
  2675.                 if (INTEL_INFO(dev)->gen < 4) {
  2676.                         u32 ipeir = I915_READ(IPEIR);
  2677.  
  2678.                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
  2679.                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
  2680.                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
  2681.                         I915_WRITE(IPEIR, ipeir);
  2682.                         POSTING_READ(IPEIR);
  2683.                 } else {
  2684.                         u32 ipeir = I915_READ(IPEIR_I965);
  2685.  
  2686.                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
  2687.                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
  2688.                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
  2689.                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
  2690.                         I915_WRITE(IPEIR_I965, ipeir);
  2691.                         POSTING_READ(IPEIR_I965);
  2692.                 }
  2693.         }
  2694.  
  2695.         I915_WRITE(EIR, eir);
  2696.         POSTING_READ(EIR);
  2697.         eir = I915_READ(EIR);
  2698.         if (eir) {
  2699.                 /*
  2700.                  * some errors might have become stuck,
  2701.                  * mask them.
  2702.                  */
  2703.                 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
  2704.                 I915_WRITE(EMR, I915_READ(EMR) | eir);
  2705.                 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  2706.         }
  2707. }
  2708.  
  2709. /**
  2710.  * i915_handle_error - handle an error interrupt
  2711.  * @dev: drm device
  2712.  *
  2713.  * Do some basic checking of regsiter state at error interrupt time and
  2714.  * dump it to the syslog.  Also call i915_capture_error_state() to make
  2715.  * sure we get a record and make it available in debugfs.  Fire a uevent
  2716.  * so userspace knows something bad happened (should trigger collection
  2717.  * of a ring dump etc.).
  2718.  */
  2719. void i915_handle_error(struct drm_device *dev, bool wedged,
  2720.                        const char *fmt, ...)
  2721. {
  2722.         struct drm_i915_private *dev_priv = dev->dev_private;
  2723.         va_list args;
  2724.         char error_msg[80];
  2725.  
  2726.         va_start(args, fmt);
  2727.         vscnprintf(error_msg, sizeof(error_msg), fmt, args);
  2728.         va_end(args);
  2729.  
  2730. //      i915_capture_error_state(dev);
  2731.         i915_report_and_clear_eir(dev);
  2732.  
  2733.         if (wedged) {
  2734.                 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
  2735.                                 &dev_priv->gpu_error.reset_counter);
  2736.  
  2737.                 /*
  2738.                  * Wakeup waiting processes so that the reset work function
  2739.                  * i915_error_work_func doesn't deadlock trying to grab various
  2740.                  * locks. By bumping the reset counter first, the woken
  2741.                  * processes will see a reset in progress and back off,
  2742.                  * releasing their locks and then wait for the reset completion.
  2743.                  * We must do this for _all_ gpu waiters that might hold locks
  2744.                  * that the reset work needs to acquire.
  2745.                  *
  2746.                  * Note: The wake_up serves as the required memory barrier to
  2747.                  * ensure that the waiters see the updated value of the reset
  2748.                  * counter atomic_t.
  2749.                  */
  2750.                 i915_error_wake_up(dev_priv, false);
  2751.         }
  2752.  
  2753.         /*
  2754.          * Our reset work can grab modeset locks (since it needs to reset the
  2755.          * state of outstanding pagelips). Hence it must not be run on our own
  2756.          * dev-priv->wq work queue for otherwise the flush_work in the pageflip
  2757.          * code will deadlock.
  2758.          */
  2759.         schedule_work(&dev_priv->gpu_error.work);
  2760. }
  2761.  
  2762. #if 0
  2763. static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
  2764. {
  2765.         struct drm_i915_private *dev_priv = dev->dev_private;
  2766.         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  2767.         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2768.         struct drm_i915_gem_object *obj;
  2769.         struct intel_unpin_work *work;
  2770.         unsigned long flags;
  2771.         bool stall_detected;
  2772.  
  2773.         /* Ignore early vblank irqs */
  2774.         if (intel_crtc == NULL)
  2775.                 return;
  2776.  
  2777.         spin_lock_irqsave(&dev->event_lock, flags);
  2778.         work = intel_crtc->unpin_work;
  2779.  
  2780.         if (work == NULL ||
  2781.             atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
  2782.             !work->enable_stall_check) {
  2783.                 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
  2784.                 spin_unlock_irqrestore(&dev->event_lock, flags);
  2785.                 return;
  2786.         }
  2787.  
  2788.         /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
  2789.         obj = work->pending_flip_obj;
  2790.         if (INTEL_INFO(dev)->gen >= 4) {
  2791.                 int dspsurf = DSPSURF(intel_crtc->plane);
  2792.                 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
  2793.                                         i915_gem_obj_ggtt_offset(obj);
  2794.         } else {
  2795.                 int dspaddr = DSPADDR(intel_crtc->plane);
  2796.                 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
  2797.                                                         crtc->y * crtc->primary->fb->pitches[0] +
  2798.                                                         crtc->x * crtc->primary->fb->bits_per_pixel/8);
  2799.         }
  2800.  
  2801.         spin_unlock_irqrestore(&dev->event_lock, flags);
  2802.  
  2803.         if (stall_detected) {
  2804.                 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
  2805.                 intel_prepare_page_flip(dev, intel_crtc->plane);
  2806.         }
  2807. }
  2808.  
  2809. #endif
  2810.  
  2811. /* Called from drm generic code, passed 'crtc' which
  2812.  * we use as a pipe index
  2813.  */
  2814. static int i915_enable_vblank(struct drm_device *dev, int pipe)
  2815. {
  2816.         struct drm_i915_private *dev_priv = dev->dev_private;
  2817.         unsigned long irqflags;
  2818.  
  2819.         if (!i915_pipe_enabled(dev, pipe))
  2820.                 return -EINVAL;
  2821.  
  2822.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2823.         if (INTEL_INFO(dev)->gen >= 4)
  2824.                 i915_enable_pipestat(dev_priv, pipe,
  2825.                                      PIPE_START_VBLANK_INTERRUPT_STATUS);
  2826.         else
  2827.                 i915_enable_pipestat(dev_priv, pipe,
  2828.                                      PIPE_VBLANK_INTERRUPT_STATUS);
  2829.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2830.  
  2831.         return 0;
  2832. }
  2833.  
  2834. static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
  2835. {
  2836.         struct drm_i915_private *dev_priv = dev->dev_private;
  2837.         unsigned long irqflags;
  2838.         uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
  2839.                                                      DE_PIPE_VBLANK(pipe);
  2840.  
  2841.         if (!i915_pipe_enabled(dev, pipe))
  2842.                 return -EINVAL;
  2843.  
  2844.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2845.         ironlake_enable_display_irq(dev_priv, bit);
  2846.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2847.  
  2848.         return 0;
  2849. }
  2850.  
  2851. static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
  2852. {
  2853.         struct drm_i915_private *dev_priv = dev->dev_private;
  2854.         unsigned long irqflags;
  2855.  
  2856.         if (!i915_pipe_enabled(dev, pipe))
  2857.                 return -EINVAL;
  2858.  
  2859.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2860.         i915_enable_pipestat(dev_priv, pipe,
  2861.                              PIPE_START_VBLANK_INTERRUPT_STATUS);
  2862.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2863.  
  2864.         return 0;
  2865. }
  2866.  
  2867. static int gen8_enable_vblank(struct drm_device *dev, int pipe)
  2868. {
  2869.         struct drm_i915_private *dev_priv = dev->dev_private;
  2870.         unsigned long irqflags;
  2871.  
  2872.         if (!i915_pipe_enabled(dev, pipe))
  2873.                 return -EINVAL;
  2874.  
  2875.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2876.         dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
  2877.         I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
  2878.         POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
  2879.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2880.         return 0;
  2881. }
  2882.  
  2883. /* Called from drm generic code, passed 'crtc' which
  2884.  * we use as a pipe index
  2885.  */
  2886. static void i915_disable_vblank(struct drm_device *dev, int pipe)
  2887. {
  2888.         struct drm_i915_private *dev_priv = dev->dev_private;
  2889.         unsigned long irqflags;
  2890.  
  2891.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2892.         i915_disable_pipestat(dev_priv, pipe,
  2893.                               PIPE_VBLANK_INTERRUPT_STATUS |
  2894.                               PIPE_START_VBLANK_INTERRUPT_STATUS);
  2895.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2896. }
  2897.  
  2898. static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
  2899. {
  2900.         struct drm_i915_private *dev_priv = dev->dev_private;
  2901.         unsigned long irqflags;
  2902.         uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
  2903.                                                      DE_PIPE_VBLANK(pipe);
  2904.  
  2905.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2906.         ironlake_disable_display_irq(dev_priv, bit);
  2907.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2908. }
  2909.  
  2910. static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
  2911. {
  2912.         struct drm_i915_private *dev_priv = dev->dev_private;
  2913.         unsigned long irqflags;
  2914.  
  2915.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2916.         i915_disable_pipestat(dev_priv, pipe,
  2917.                               PIPE_START_VBLANK_INTERRUPT_STATUS);
  2918.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2919. }
  2920.  
  2921. static void gen8_disable_vblank(struct drm_device *dev, int pipe)
  2922. {
  2923.         struct drm_i915_private *dev_priv = dev->dev_private;
  2924.         unsigned long irqflags;
  2925.  
  2926.         if (!i915_pipe_enabled(dev, pipe))
  2927.                 return;
  2928.  
  2929.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2930.         dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
  2931.         I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
  2932.         POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
  2933.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2934. }
  2935.  
  2936. static u32
  2937. ring_last_seqno(struct intel_engine_cs *ring)
  2938. {
  2939.         return list_entry(ring->request_list.prev,
  2940.                           struct drm_i915_gem_request, list)->seqno;
  2941. }
  2942.  
  2943. static bool
  2944. ring_idle(struct intel_engine_cs *ring, u32 seqno)
  2945. {
  2946.         return (list_empty(&ring->request_list) ||
  2947.                 i915_seqno_passed(seqno, ring_last_seqno(ring)));
  2948. }
  2949.  
  2950. static bool
  2951. ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
  2952. {
  2953.         if (INTEL_INFO(dev)->gen >= 8) {
  2954.                 return (ipehr >> 23) == 0x1c;
  2955.         } else {
  2956.                 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
  2957.                 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
  2958.                                  MI_SEMAPHORE_REGISTER);
  2959.         }
  2960. }
  2961.  
  2962. static struct intel_engine_cs *
  2963. semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
  2964. {
  2965.         struct drm_i915_private *dev_priv = ring->dev->dev_private;
  2966.         struct intel_engine_cs *signaller;
  2967.         int i;
  2968.  
  2969.         if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
  2970.                 for_each_ring(signaller, dev_priv, i) {
  2971.                         if (ring == signaller)
  2972.                                 continue;
  2973.  
  2974.                         if (offset == signaller->semaphore.signal_ggtt[ring->id])
  2975.                                 return signaller;
  2976.                 }
  2977.         } else {
  2978.                 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
  2979.  
  2980.                 for_each_ring(signaller, dev_priv, i) {
  2981.                         if(ring == signaller)
  2982.                                 continue;
  2983.  
  2984.                         if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
  2985.                                 return signaller;
  2986.                 }
  2987.         }
  2988.  
  2989.         DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
  2990.                   ring->id, ipehr, offset);
  2991.  
  2992.         return NULL;
  2993. }
  2994.  
  2995. static struct intel_engine_cs *
  2996. semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
  2997. {
  2998.         struct drm_i915_private *dev_priv = ring->dev->dev_private;
  2999.         u32 cmd, ipehr, head;
  3000.         u64 offset = 0;
  3001.         int i, backwards;
  3002.  
  3003.         ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
  3004.         if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
  3005.                 return NULL;
  3006.  
  3007.         /*
  3008.          * HEAD is likely pointing to the dword after the actual command,
  3009.          * so scan backwards until we find the MBOX. But limit it to just 3
  3010.          * or 4 dwords depending on the semaphore wait command size.
  3011.          * Note that we don't care about ACTHD here since that might
  3012.          * point at at batch, and semaphores are always emitted into the
  3013.          * ringbuffer itself.
  3014.          */
  3015.         head = I915_READ_HEAD(ring) & HEAD_ADDR;
  3016.         backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
  3017.  
  3018.         for (i = backwards; i; --i) {
  3019.                 /*
  3020.                  * Be paranoid and presume the hw has gone off into the wild -
  3021.                  * our ring is smaller than what the hardware (and hence
  3022.                  * HEAD_ADDR) allows. Also handles wrap-around.
  3023.                  */
  3024.                 head &= ring->buffer->size - 1;
  3025.  
  3026.                 /* This here seems to blow up */
  3027.                 cmd = ioread32(ring->buffer->virtual_start + head);
  3028.                 if (cmd == ipehr)
  3029.                         break;
  3030.  
  3031.                 head -= 4;
  3032.         }
  3033.  
  3034.         if (!i)
  3035.                         return NULL;
  3036.  
  3037.         *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
  3038.         if (INTEL_INFO(ring->dev)->gen >= 8) {
  3039.                 offset = ioread32(ring->buffer->virtual_start + head + 12);
  3040.                 offset <<= 32;
  3041.                 offset = ioread32(ring->buffer->virtual_start + head + 8);
  3042.         }
  3043.         return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
  3044. }
  3045.  
  3046. static int semaphore_passed(struct intel_engine_cs *ring)
  3047. {
  3048.         struct drm_i915_private *dev_priv = ring->dev->dev_private;
  3049.         struct intel_engine_cs *signaller;
  3050.         u32 seqno;
  3051.  
  3052.         ring->hangcheck.deadlock++;
  3053.  
  3054.         signaller = semaphore_waits_for(ring, &seqno);
  3055.         if (signaller == NULL)
  3056.                 return -1;
  3057.  
  3058.         /* Prevent pathological recursion due to driver bugs */
  3059.         if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
  3060.                 return -1;
  3061.  
  3062.         if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
  3063.                 return 1;
  3064.  
  3065.         /* cursory check for an unkickable deadlock */
  3066.         if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
  3067.             semaphore_passed(signaller) < 0)
  3068.                 return -1;
  3069.  
  3070.         return 0;
  3071. }
  3072.  
  3073. static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
  3074. {
  3075.         struct intel_engine_cs *ring;
  3076.         int i;
  3077.  
  3078.         for_each_ring(ring, dev_priv, i)
  3079.                 ring->hangcheck.deadlock = 0;
  3080. }
  3081.  
  3082. static enum intel_ring_hangcheck_action
  3083. ring_stuck(struct intel_engine_cs *ring, u64 acthd)
  3084. {
  3085.         struct drm_device *dev = ring->dev;
  3086.         struct drm_i915_private *dev_priv = dev->dev_private;
  3087.         u32 tmp;
  3088.  
  3089.         if (acthd != ring->hangcheck.acthd) {
  3090.                 if (acthd > ring->hangcheck.max_acthd) {
  3091.                         ring->hangcheck.max_acthd = acthd;
  3092.                 return HANGCHECK_ACTIVE;
  3093.                 }
  3094.  
  3095.                 return HANGCHECK_ACTIVE_LOOP;
  3096.         }
  3097.  
  3098.         if (IS_GEN2(dev))
  3099.                 return HANGCHECK_HUNG;
  3100.  
  3101.         /* Is the chip hanging on a WAIT_FOR_EVENT?
  3102.          * If so we can simply poke the RB_WAIT bit
  3103.          * and break the hang. This should work on
  3104.          * all but the second generation chipsets.
  3105.          */
  3106.         tmp = I915_READ_CTL(ring);
  3107.         if (tmp & RING_WAIT) {
  3108.                 i915_handle_error(dev, false,
  3109.                                   "Kicking stuck wait on %s",
  3110.                           ring->name);
  3111.                 I915_WRITE_CTL(ring, tmp);
  3112.                 return HANGCHECK_KICK;
  3113.         }
  3114.  
  3115.         if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
  3116.                 switch (semaphore_passed(ring)) {
  3117.                 default:
  3118.                         return HANGCHECK_HUNG;
  3119.                 case 1:
  3120.                         i915_handle_error(dev, false,
  3121.                                           "Kicking stuck semaphore on %s",
  3122.                                   ring->name);
  3123.                         I915_WRITE_CTL(ring, tmp);
  3124.                         return HANGCHECK_KICK;
  3125.                 case 0:
  3126.                         return HANGCHECK_WAIT;
  3127.                 }
  3128.         }
  3129.  
  3130.         return HANGCHECK_HUNG;
  3131. }
  3132.  
  3133. /**
  3134.  * This is called when the chip hasn't reported back with completed
  3135.  * batchbuffers in a long time. We keep track per ring seqno progress and
  3136.  * if there are no progress, hangcheck score for that ring is increased.
  3137.  * Further, acthd is inspected to see if the ring is stuck. On stuck case
  3138.  * we kick the ring. If we see no progress on three subsequent calls
  3139.  * we assume chip is wedged and try to fix it by resetting the chip.
  3140.  */
  3141. static void i915_hangcheck_elapsed(unsigned long data)
  3142. {
  3143.         struct drm_device *dev = (struct drm_device *)data;
  3144.         struct drm_i915_private *dev_priv = dev->dev_private;
  3145.         struct intel_engine_cs *ring;
  3146.         int i;
  3147.         int busy_count = 0, rings_hung = 0;
  3148.         bool stuck[I915_NUM_RINGS] = { 0 };
  3149. #define BUSY 1
  3150. #define KICK 5
  3151. #define HUNG 20
  3152.  
  3153.         if (!i915.enable_hangcheck)
  3154.                 return;
  3155.  
  3156.         for_each_ring(ring, dev_priv, i) {
  3157.                 u64 acthd;
  3158.                 u32 seqno;
  3159.                 bool busy = true;
  3160.  
  3161.                 semaphore_clear_deadlocks(dev_priv);
  3162.  
  3163.                 seqno = ring->get_seqno(ring, false);
  3164.                 acthd = intel_ring_get_active_head(ring);
  3165.  
  3166.                 if (ring->hangcheck.seqno == seqno) {
  3167.                         if (ring_idle(ring, seqno)) {
  3168.                                 ring->hangcheck.action = HANGCHECK_IDLE;
  3169.  
  3170. //               if (waitqueue_active(&ring->irq_queue)) {
  3171.                                         /* Issue a wake-up to catch stuck h/w. */
  3172. //                   DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
  3173. //                         ring->name);
  3174. //                   wake_up_all(&ring->irq_queue);
  3175. //               } else
  3176.                                         busy = false;
  3177.                         } else {
  3178.                                 /* We always increment the hangcheck score
  3179.                                  * if the ring is busy and still processing
  3180.                                  * the same request, so that no single request
  3181.                                  * can run indefinitely (such as a chain of
  3182.                                  * batches). The only time we do not increment
  3183.                                  * the hangcheck score on this ring, if this
  3184.                                  * ring is in a legitimate wait for another
  3185.                                  * ring. In that case the waiting ring is a
  3186.                                  * victim and we want to be sure we catch the
  3187.                                  * right culprit. Then every time we do kick
  3188.                                  * the ring, add a small increment to the
  3189.                                  * score so that we can catch a batch that is
  3190.                                  * being repeatedly kicked and so responsible
  3191.                                  * for stalling the machine.
  3192.                                  */
  3193.                                 ring->hangcheck.action = ring_stuck(ring,
  3194.                                                                     acthd);
  3195.  
  3196.                                 switch (ring->hangcheck.action) {
  3197.                                 case HANGCHECK_IDLE:
  3198.                                 case HANGCHECK_WAIT:
  3199.                                 case HANGCHECK_ACTIVE:
  3200.                                         break;
  3201.                                 case HANGCHECK_ACTIVE_LOOP:
  3202.                                         ring->hangcheck.score += BUSY;
  3203.                                         break;
  3204.                                 case HANGCHECK_KICK:
  3205.                                         ring->hangcheck.score += KICK;
  3206.                                         break;
  3207.                                 case HANGCHECK_HUNG:
  3208.                                         ring->hangcheck.score += HUNG;
  3209.                                         stuck[i] = true;
  3210.                                         break;
  3211.                                 }
  3212.                         }
  3213.                 } else {
  3214.                         ring->hangcheck.action = HANGCHECK_ACTIVE;
  3215.  
  3216.                         /* Gradually reduce the count so that we catch DoS
  3217.                          * attempts across multiple batches.
  3218.                          */
  3219.                         if (ring->hangcheck.score > 0)
  3220.                                 ring->hangcheck.score--;
  3221.  
  3222.                         ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
  3223.                 }
  3224.  
  3225.                 ring->hangcheck.seqno = seqno;
  3226.                 ring->hangcheck.acthd = acthd;
  3227.                 busy_count += busy;
  3228.         }
  3229.  
  3230.         for_each_ring(ring, dev_priv, i) {
  3231.                 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
  3232.                         DRM_INFO("%s on %s\n",
  3233.                                   stuck[i] ? "stuck" : "no progress",
  3234.                                   ring->name);
  3235.                         rings_hung++;
  3236.                 }
  3237.         }
  3238.  
  3239. //   if (rings_hung)
  3240. //       return i915_handle_error(dev, true);
  3241.  
  3242. }
  3243. static void ibx_irq_reset(struct drm_device *dev)
  3244. {
  3245.         struct drm_i915_private *dev_priv = dev->dev_private;
  3246.  
  3247.         if (HAS_PCH_NOP(dev))
  3248.                 return;
  3249.  
  3250.         GEN5_IRQ_RESET(SDE);
  3251.  
  3252.         if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
  3253.                 I915_WRITE(SERR_INT, 0xffffffff);
  3254. }
  3255.  
  3256. /*
  3257.  * SDEIER is also touched by the interrupt handler to work around missed PCH
  3258.  * interrupts. Hence we can't update it after the interrupt handler is enabled -
  3259.  * instead we unconditionally enable all PCH interrupt sources here, but then
  3260.  * only unmask them as needed with SDEIMR.
  3261.  *
  3262.  * This function needs to be called before interrupts are enabled.
  3263.  */
  3264. static void ibx_irq_pre_postinstall(struct drm_device *dev)
  3265. {
  3266.         struct drm_i915_private *dev_priv = dev->dev_private;
  3267.  
  3268.         if (HAS_PCH_NOP(dev))
  3269.                 return;
  3270.  
  3271.         WARN_ON(I915_READ(SDEIER) != 0);
  3272.         I915_WRITE(SDEIER, 0xffffffff);
  3273.         POSTING_READ(SDEIER);
  3274. }
  3275.  
  3276. static void gen5_gt_irq_reset(struct drm_device *dev)
  3277. {
  3278.         struct drm_i915_private *dev_priv = dev->dev_private;
  3279.  
  3280.         GEN5_IRQ_RESET(GT);
  3281.         if (INTEL_INFO(dev)->gen >= 6)
  3282.                 GEN5_IRQ_RESET(GEN6_PM);
  3283. }
  3284.  
  3285. /* drm_dma.h hooks
  3286. */
  3287. static void ironlake_irq_reset(struct drm_device *dev)
  3288. {
  3289.         struct drm_i915_private *dev_priv = dev->dev_private;
  3290.  
  3291.         I915_WRITE(HWSTAM, 0xffffffff);
  3292.  
  3293.         GEN5_IRQ_RESET(DE);
  3294.         if (IS_GEN7(dev))
  3295.                 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
  3296.  
  3297.         gen5_gt_irq_reset(dev);
  3298.  
  3299.         ibx_irq_reset(dev);
  3300. }
  3301.  
  3302. static void valleyview_irq_preinstall(struct drm_device *dev)
  3303. {
  3304.         struct drm_i915_private *dev_priv = dev->dev_private;
  3305.         int pipe;
  3306.  
  3307.         /* VLV magic */
  3308.         I915_WRITE(VLV_IMR, 0);
  3309.         I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
  3310.         I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
  3311.         I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
  3312.  
  3313.         /* and GT */
  3314.         I915_WRITE(GTIIR, I915_READ(GTIIR));
  3315.         I915_WRITE(GTIIR, I915_READ(GTIIR));
  3316.  
  3317.         gen5_gt_irq_reset(dev);
  3318.  
  3319.         I915_WRITE(DPINVGTT, 0xff);
  3320.  
  3321.         I915_WRITE(PORT_HOTPLUG_EN, 0);
  3322.         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  3323.         for_each_pipe(pipe)
  3324.                 I915_WRITE(PIPESTAT(pipe), 0xffff);
  3325.         I915_WRITE(VLV_IIR, 0xffffffff);
  3326.         I915_WRITE(VLV_IMR, 0xffffffff);
  3327.         I915_WRITE(VLV_IER, 0x0);
  3328.         POSTING_READ(VLV_IER);
  3329. }
  3330.  
  3331. static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
  3332. {
  3333.         GEN8_IRQ_RESET_NDX(GT, 0);
  3334.         GEN8_IRQ_RESET_NDX(GT, 1);
  3335.         GEN8_IRQ_RESET_NDX(GT, 2);
  3336.         GEN8_IRQ_RESET_NDX(GT, 3);
  3337. }
  3338.  
  3339. static void gen8_irq_reset(struct drm_device *dev)
  3340. {
  3341.         struct drm_i915_private *dev_priv = dev->dev_private;
  3342.         int pipe;
  3343.  
  3344.         I915_WRITE(GEN8_MASTER_IRQ, 0);
  3345.         POSTING_READ(GEN8_MASTER_IRQ);
  3346.  
  3347.         gen8_gt_irq_reset(dev_priv);
  3348.  
  3349.         for_each_pipe(pipe)
  3350.                 if (intel_display_power_enabled(dev_priv,
  3351.                                                 POWER_DOMAIN_PIPE(pipe)))
  3352.                 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
  3353.  
  3354.         GEN5_IRQ_RESET(GEN8_DE_PORT_);
  3355.         GEN5_IRQ_RESET(GEN8_DE_MISC_);
  3356.         GEN5_IRQ_RESET(GEN8_PCU_);
  3357.  
  3358.         ibx_irq_reset(dev);
  3359. }
  3360.  
  3361. void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
  3362. {
  3363.         unsigned long irqflags;
  3364.  
  3365.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  3366.         GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
  3367.                           ~dev_priv->de_irq_mask[PIPE_B]);
  3368.         GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
  3369.                           ~dev_priv->de_irq_mask[PIPE_C]);
  3370.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  3371. }
  3372.  
  3373. static void cherryview_irq_preinstall(struct drm_device *dev)
  3374. {
  3375.         struct drm_i915_private *dev_priv = dev->dev_private;
  3376.         int pipe;
  3377.  
  3378.         I915_WRITE(GEN8_MASTER_IRQ, 0);
  3379.         POSTING_READ(GEN8_MASTER_IRQ);
  3380.  
  3381.         gen8_gt_irq_reset(dev_priv);
  3382.  
  3383.         GEN5_IRQ_RESET(GEN8_PCU_);
  3384.  
  3385.         POSTING_READ(GEN8_PCU_IIR);
  3386.  
  3387.         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
  3388.  
  3389.         I915_WRITE(PORT_HOTPLUG_EN, 0);
  3390.         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  3391.  
  3392.         for_each_pipe(pipe)
  3393.                 I915_WRITE(PIPESTAT(pipe), 0xffff);
  3394.  
  3395.         I915_WRITE(VLV_IMR, 0xffffffff);
  3396.         I915_WRITE(VLV_IER, 0x0);
  3397.         I915_WRITE(VLV_IIR, 0xffffffff);
  3398.         POSTING_READ(VLV_IIR);
  3399. }
  3400.  
  3401. static void ibx_hpd_irq_setup(struct drm_device *dev)
  3402. {
  3403.         struct drm_i915_private *dev_priv = dev->dev_private;
  3404.         struct drm_mode_config *mode_config = &dev->mode_config;
  3405.         struct intel_encoder *intel_encoder;
  3406.         u32 hotplug_irqs, hotplug, enabled_irqs = 0;
  3407.  
  3408.         if (HAS_PCH_IBX(dev)) {
  3409.                 hotplug_irqs = SDE_HOTPLUG_MASK;
  3410.                 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
  3411.                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
  3412.                                 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
  3413.         } else {
  3414.                 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
  3415.                 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
  3416.                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
  3417.                                 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
  3418.         }
  3419.  
  3420.         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
  3421.  
  3422.         /*
  3423.  * Enable digital hotplug on the PCH, and configure the DP short pulse
  3424.  * duration to 2ms (which is the minimum in the Display Port spec)
  3425.  *
  3426.  * This register is the same on all known PCH chips.
  3427.  */
  3428.         hotplug = I915_READ(PCH_PORT_HOTPLUG);
  3429.         hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
  3430.         hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
  3431.         hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
  3432.         hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
  3433.         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
  3434. }
  3435.  
  3436. static void ibx_irq_postinstall(struct drm_device *dev)
  3437. {
  3438.         struct drm_i915_private *dev_priv = dev->dev_private;
  3439.         u32 mask;
  3440.  
  3441.         if (HAS_PCH_NOP(dev))
  3442.                 return;
  3443.  
  3444.         if (HAS_PCH_IBX(dev))
  3445.                 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
  3446.         else
  3447.                 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
  3448.  
  3449.         GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
  3450.         I915_WRITE(SDEIMR, ~mask);
  3451. }
  3452.  
  3453. static void gen5_gt_irq_postinstall(struct drm_device *dev)
  3454. {
  3455.         struct drm_i915_private *dev_priv = dev->dev_private;
  3456.         u32 pm_irqs, gt_irqs;
  3457.  
  3458.         pm_irqs = gt_irqs = 0;
  3459.  
  3460.         dev_priv->gt_irq_mask = ~0;
  3461.         if (HAS_L3_DPF(dev)) {
  3462.                 /* L3 parity interrupt is always unmasked. */
  3463.                 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
  3464.                 gt_irqs |= GT_PARITY_ERROR(dev);
  3465.         }
  3466.  
  3467.         gt_irqs |= GT_RENDER_USER_INTERRUPT;
  3468.         if (IS_GEN5(dev)) {
  3469.                 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
  3470.                            ILK_BSD_USER_INTERRUPT;
  3471.         } else {
  3472.                 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
  3473.         }
  3474.  
  3475.         GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
  3476.  
  3477.         if (INTEL_INFO(dev)->gen >= 6) {
  3478.                 pm_irqs |= dev_priv->pm_rps_events;
  3479.  
  3480.                 if (HAS_VEBOX(dev))
  3481.                         pm_irqs |= PM_VEBOX_USER_INTERRUPT;
  3482.  
  3483.                 dev_priv->pm_irq_mask = 0xffffffff;
  3484.                 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
  3485.     }
  3486. }
  3487.  
  3488. static int ironlake_irq_postinstall(struct drm_device *dev)
  3489. {
  3490.         unsigned long irqflags;
  3491.         struct drm_i915_private *dev_priv = dev->dev_private;
  3492.         u32 display_mask, extra_mask;
  3493.  
  3494.         if (INTEL_INFO(dev)->gen >= 7) {
  3495.                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
  3496.                                 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
  3497.                 DE_PLANEB_FLIP_DONE_IVB |
  3498.                                 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
  3499.                 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
  3500.                               DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
  3501.         } else {
  3502.                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
  3503.                                 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
  3504.                                 DE_AUX_CHANNEL_A |
  3505.                                 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
  3506.                                 DE_POISON);
  3507.                 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
  3508.                                 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
  3509.         }
  3510.  
  3511.         dev_priv->irq_mask = ~display_mask;
  3512.  
  3513.         I915_WRITE(HWSTAM, 0xeffe);
  3514.  
  3515.         ibx_irq_pre_postinstall(dev);
  3516.  
  3517.         GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
  3518.  
  3519.         gen5_gt_irq_postinstall(dev);
  3520.  
  3521.         ibx_irq_postinstall(dev);
  3522.  
  3523.         if (IS_IRONLAKE_M(dev)) {
  3524.                 /* Enable PCU event interrupts
  3525.                  *
  3526.                  * spinlocking not required here for correctness since interrupt
  3527.                  * setup is guaranteed to run in single-threaded context. But we
  3528.                  * need it to make the assert_spin_locked happy. */
  3529.                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  3530.                 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
  3531.                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  3532.         }
  3533.  
  3534.         return 0;
  3535. }
  3536.  
  3537. static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
  3538. {
  3539.         u32 pipestat_mask;
  3540.         u32 iir_mask;
  3541.  
  3542.         pipestat_mask = PIPESTAT_INT_STATUS_MASK |
  3543.                         PIPE_FIFO_UNDERRUN_STATUS;
  3544.  
  3545.         I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
  3546.         I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
  3547.         POSTING_READ(PIPESTAT(PIPE_A));
  3548.  
  3549.         pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
  3550.                         PIPE_CRC_DONE_INTERRUPT_STATUS;
  3551.  
  3552.         i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask |
  3553.                                                PIPE_GMBUS_INTERRUPT_STATUS);
  3554.         i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask);
  3555.  
  3556.         iir_mask = I915_DISPLAY_PORT_INTERRUPT |
  3557.                    I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  3558.                    I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
  3559.         dev_priv->irq_mask &= ~iir_mask;
  3560.  
  3561.         I915_WRITE(VLV_IIR, iir_mask);
  3562.         I915_WRITE(VLV_IIR, iir_mask);
  3563.         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
  3564.         I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
  3565.         POSTING_READ(VLV_IER);
  3566. }
  3567.  
  3568. static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
  3569. {
  3570.         u32 pipestat_mask;
  3571.         u32 iir_mask;
  3572.  
  3573.         iir_mask = I915_DISPLAY_PORT_INTERRUPT |
  3574.                    I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  3575.                    I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
  3576.  
  3577.         dev_priv->irq_mask |= iir_mask;
  3578.         I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
  3579.         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
  3580.         I915_WRITE(VLV_IIR, iir_mask);
  3581.         I915_WRITE(VLV_IIR, iir_mask);
  3582.         POSTING_READ(VLV_IIR);
  3583.  
  3584.         pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
  3585.                         PIPE_CRC_DONE_INTERRUPT_STATUS;
  3586.  
  3587.         i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask |
  3588.                                                 PIPE_GMBUS_INTERRUPT_STATUS);
  3589.         i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask);
  3590.  
  3591.         pipestat_mask = PIPESTAT_INT_STATUS_MASK |
  3592.                         PIPE_FIFO_UNDERRUN_STATUS;
  3593.         I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
  3594.         I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
  3595.         POSTING_READ(PIPESTAT(PIPE_A));
  3596. }
  3597.  
  3598. void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
  3599. {
  3600.         assert_spin_locked(&dev_priv->irq_lock);
  3601.  
  3602.         if (dev_priv->display_irqs_enabled)
  3603.                 return;
  3604.  
  3605.         dev_priv->display_irqs_enabled = true;
  3606.  
  3607.         if (dev_priv->dev->irq_enabled)
  3608.                 valleyview_display_irqs_install(dev_priv);
  3609. }
  3610.  
  3611. void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
  3612. {
  3613.         assert_spin_locked(&dev_priv->irq_lock);
  3614.  
  3615.         if (!dev_priv->display_irqs_enabled)
  3616.                 return;
  3617.  
  3618.         dev_priv->display_irqs_enabled = false;
  3619.  
  3620.         if (dev_priv->dev->irq_enabled)
  3621.                 valleyview_display_irqs_uninstall(dev_priv);
  3622. }
  3623.  
  3624. static int valleyview_irq_postinstall(struct drm_device *dev)
  3625. {
  3626.         struct drm_i915_private *dev_priv = dev->dev_private;
  3627.         unsigned long irqflags;
  3628.  
  3629.         dev_priv->irq_mask = ~0;
  3630.  
  3631.         I915_WRITE(PORT_HOTPLUG_EN, 0);
  3632.         POSTING_READ(PORT_HOTPLUG_EN);
  3633.  
  3634.         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
  3635.         I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
  3636.         I915_WRITE(VLV_IIR, 0xffffffff);
  3637.         POSTING_READ(VLV_IER);
  3638.  
  3639.         /* Interrupt setup is already guaranteed to be single-threaded, this is
  3640.          * just to make the assert_spin_locked check happy. */
  3641.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  3642.         if (dev_priv->display_irqs_enabled)
  3643.                 valleyview_display_irqs_install(dev_priv);
  3644.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  3645.  
  3646.         I915_WRITE(VLV_IIR, 0xffffffff);
  3647.         I915_WRITE(VLV_IIR, 0xffffffff);
  3648.  
  3649.         gen5_gt_irq_postinstall(dev);
  3650.  
  3651.         /* ack & enable invalid PTE error interrupts */
  3652. #if 0 /* FIXME: add support to irq handler for checking these bits */
  3653.         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
  3654.         I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
  3655. #endif
  3656.  
  3657.         I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
  3658.  
  3659.         return 0;
  3660. }
  3661.  
  3662. static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
  3663. {
  3664.         int i;
  3665.  
  3666.         /* These are interrupts we'll toggle with the ring mask register */
  3667.         uint32_t gt_interrupts[] = {
  3668.                 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
  3669.                         GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
  3670.                         GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
  3671.                 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
  3672.                         GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
  3673.                 0,
  3674.                 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
  3675.                 };
  3676.  
  3677.         for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++)
  3678.                 GEN8_IRQ_INIT_NDX(GT, i, ~gt_interrupts[i], gt_interrupts[i]);
  3679.  
  3680.         dev_priv->pm_irq_mask = 0xffffffff;
  3681. }
  3682.  
  3683. static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
  3684. {
  3685.         struct drm_device *dev = dev_priv->dev;
  3686.         uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE |
  3687.                 GEN8_PIPE_CDCLK_CRC_DONE |
  3688.                 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
  3689.         uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
  3690.                 GEN8_PIPE_FIFO_UNDERRUN;
  3691.         int pipe;
  3692.         dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
  3693.         dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
  3694.         dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
  3695.  
  3696.         for_each_pipe(pipe)
  3697.                 if (intel_display_power_enabled(dev_priv,
  3698.                                 POWER_DOMAIN_PIPE(pipe)))
  3699.                         GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
  3700.                                           dev_priv->de_irq_mask[pipe],
  3701.                                   de_pipe_enables);
  3702.  
  3703.         GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
  3704. }
  3705.  
  3706. static int gen8_irq_postinstall(struct drm_device *dev)
  3707. {
  3708.         struct drm_i915_private *dev_priv = dev->dev_private;
  3709.  
  3710.         ibx_irq_pre_postinstall(dev);
  3711.  
  3712.         gen8_gt_irq_postinstall(dev_priv);
  3713.         gen8_de_irq_postinstall(dev_priv);
  3714.  
  3715.         ibx_irq_postinstall(dev);
  3716.  
  3717.         I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
  3718.         POSTING_READ(GEN8_MASTER_IRQ);
  3719.  
  3720.         return 0;
  3721. }
  3722.  
  3723. static int cherryview_irq_postinstall(struct drm_device *dev)
  3724. {
  3725.         struct drm_i915_private *dev_priv = dev->dev_private;
  3726.         u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
  3727.                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  3728.                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  3729.                 I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
  3730.         u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
  3731.                 PIPE_CRC_DONE_INTERRUPT_STATUS;
  3732.         unsigned long irqflags;
  3733.         int pipe;
  3734.  
  3735.         /*
  3736.          * Leave vblank interrupts masked initially.  enable/disable will
  3737.          * toggle them based on usage.
  3738.          */
  3739.         dev_priv->irq_mask = ~enable_mask;
  3740.  
  3741.         for_each_pipe(pipe)
  3742.                 I915_WRITE(PIPESTAT(pipe), 0xffff);
  3743.  
  3744.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  3745.         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
  3746.         for_each_pipe(pipe)
  3747.                 i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
  3748.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  3749.  
  3750.         I915_WRITE(VLV_IIR, 0xffffffff);
  3751.         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
  3752.         I915_WRITE(VLV_IER, enable_mask);
  3753.  
  3754.         gen8_gt_irq_postinstall(dev_priv);
  3755.  
  3756.         I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
  3757.         POSTING_READ(GEN8_MASTER_IRQ);
  3758.  
  3759.         return 0;
  3760. }
  3761.  
  3762. static void gen8_irq_uninstall(struct drm_device *dev)
  3763. {
  3764.         struct drm_i915_private *dev_priv = dev->dev_private;
  3765.  
  3766.         if (!dev_priv)
  3767.                 return;
  3768.  
  3769.         gen8_irq_reset(dev);
  3770. }
  3771.  
  3772. static void valleyview_irq_uninstall(struct drm_device *dev)
  3773. {
  3774.         struct drm_i915_private *dev_priv = dev->dev_private;
  3775.         unsigned long irqflags;
  3776.         int pipe;
  3777.  
  3778.         if (!dev_priv)
  3779.                 return;
  3780.  
  3781.         I915_WRITE(VLV_MASTER_IER, 0);
  3782.  
  3783.         for_each_pipe(pipe)
  3784.                 I915_WRITE(PIPESTAT(pipe), 0xffff);
  3785.  
  3786.         I915_WRITE(HWSTAM, 0xffffffff);
  3787.         I915_WRITE(PORT_HOTPLUG_EN, 0);
  3788.         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  3789.  
  3790.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  3791.         if (dev_priv->display_irqs_enabled)
  3792.                 valleyview_display_irqs_uninstall(dev_priv);
  3793.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  3794.  
  3795.         dev_priv->irq_mask = 0;
  3796.  
  3797.         I915_WRITE(VLV_IIR, 0xffffffff);
  3798.         I915_WRITE(VLV_IMR, 0xffffffff);
  3799.         I915_WRITE(VLV_IER, 0x0);
  3800.         POSTING_READ(VLV_IER);
  3801. }
  3802.  
  3803. static void cherryview_irq_uninstall(struct drm_device *dev)
  3804. {
  3805.         struct drm_i915_private *dev_priv = dev->dev_private;
  3806.         int pipe;
  3807.  
  3808.         if (!dev_priv)
  3809.                 return;
  3810.  
  3811.         I915_WRITE(GEN8_MASTER_IRQ, 0);
  3812.         POSTING_READ(GEN8_MASTER_IRQ);
  3813.  
  3814. #define GEN8_IRQ_FINI_NDX(type, which)                          \
  3815. do {                                                            \
  3816.         I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff);       \
  3817.         I915_WRITE(GEN8_##type##_IER(which), 0);                \
  3818.         I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff);       \
  3819.         POSTING_READ(GEN8_##type##_IIR(which));                 \
  3820.         I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff);       \
  3821. } while (0)
  3822.  
  3823. #define GEN8_IRQ_FINI(type)                             \
  3824. do {                                                    \
  3825.         I915_WRITE(GEN8_##type##_IMR, 0xffffffff);      \
  3826.         I915_WRITE(GEN8_##type##_IER, 0);               \
  3827.         I915_WRITE(GEN8_##type##_IIR, 0xffffffff);      \
  3828.         POSTING_READ(GEN8_##type##_IIR);                \
  3829.         I915_WRITE(GEN8_##type##_IIR, 0xffffffff);      \
  3830. } while (0)
  3831.  
  3832.         GEN8_IRQ_FINI_NDX(GT, 0);
  3833.         GEN8_IRQ_FINI_NDX(GT, 1);
  3834.         GEN8_IRQ_FINI_NDX(GT, 2);
  3835.         GEN8_IRQ_FINI_NDX(GT, 3);
  3836.  
  3837.         GEN8_IRQ_FINI(PCU);
  3838.  
  3839. #undef GEN8_IRQ_FINI
  3840. #undef GEN8_IRQ_FINI_NDX
  3841.  
  3842.         I915_WRITE(PORT_HOTPLUG_EN, 0);
  3843.         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  3844.  
  3845.         for_each_pipe(pipe)
  3846.                 I915_WRITE(PIPESTAT(pipe), 0xffff);
  3847.  
  3848.         I915_WRITE(VLV_IMR, 0xffffffff);
  3849.         I915_WRITE(VLV_IER, 0x0);
  3850.         I915_WRITE(VLV_IIR, 0xffffffff);
  3851.         POSTING_READ(VLV_IIR);
  3852. }
  3853.  
  3854. static void ironlake_irq_uninstall(struct drm_device *dev)
  3855. {
  3856.         struct drm_i915_private *dev_priv = dev->dev_private;
  3857.  
  3858.         if (!dev_priv)
  3859.                 return;
  3860.  
  3861.         ironlake_irq_reset(dev);
  3862. }
  3863.  
  3864. #if 0
  3865.  
  3866. static void i8xx_irq_preinstall(struct drm_device * dev)
  3867. {
  3868.         struct drm_i915_private *dev_priv = dev->dev_private;
  3869.         int pipe;
  3870.  
  3871.         for_each_pipe(pipe)
  3872.                 I915_WRITE(PIPESTAT(pipe), 0);
  3873.         I915_WRITE16(IMR, 0xffff);
  3874.         I915_WRITE16(IER, 0x0);
  3875.         POSTING_READ16(IER);
  3876. }
  3877.  
  3878. static int i8xx_irq_postinstall(struct drm_device *dev)
  3879. {
  3880.         struct drm_i915_private *dev_priv = dev->dev_private;
  3881.         unsigned long irqflags;
  3882.  
  3883.         I915_WRITE16(EMR,
  3884.                      ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
  3885.  
  3886.         /* Unmask the interrupts that we always want on. */
  3887.         dev_priv->irq_mask =
  3888.                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  3889.                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  3890.                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  3891.                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  3892.                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  3893.         I915_WRITE16(IMR, dev_priv->irq_mask);
  3894.  
  3895.         I915_WRITE16(IER,
  3896.                      I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  3897.                      I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  3898.                      I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
  3899.                      I915_USER_INTERRUPT);
  3900.         POSTING_READ16(IER);
  3901.  
  3902.         /* Interrupt setup is already guaranteed to be single-threaded, this is
  3903.          * just to make the assert_spin_locked check happy. */
  3904.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  3905.         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
  3906.         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
  3907.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  3908.  
  3909.         return 0;
  3910. }
  3911.  
  3912. /*
  3913.  * Returns true when a page flip has completed.
  3914.  */
  3915. static bool i8xx_handle_vblank(struct drm_device *dev,
  3916.                                int plane, int pipe, u32 iir)
  3917. {
  3918.         struct drm_i915_private *dev_priv = dev->dev_private;
  3919.         u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
  3920.  
  3921. //   if (!drm_handle_vblank(dev, pipe))
  3922.        return false;
  3923.  
  3924.         if ((iir & flip_pending) == 0)
  3925.                 return false;
  3926.  
  3927. //   intel_prepare_page_flip(dev, pipe);
  3928.  
  3929.         /* We detect FlipDone by looking for the change in PendingFlip from '1'
  3930.          * to '0' on the following vblank, i.e. IIR has the Pendingflip
  3931.          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
  3932.          * the flip is completed (no longer pending). Since this doesn't raise
  3933.          * an interrupt per se, we watch for the change at vblank.
  3934.          */
  3935.         if (I915_READ16(ISR) & flip_pending)
  3936.                 return false;
  3937.  
  3938.         intel_finish_page_flip(dev, pipe);
  3939.  
  3940.         return true;
  3941. }
  3942.  
  3943. static irqreturn_t i8xx_irq_handler(int irq, void *arg)
  3944. {
  3945.         struct drm_device *dev = arg;
  3946.         struct drm_i915_private *dev_priv = dev->dev_private;
  3947.         u16 iir, new_iir;
  3948.         u32 pipe_stats[2];
  3949.         unsigned long irqflags;
  3950.         int pipe;
  3951.         u16 flip_mask =
  3952.                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  3953.                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  3954.  
  3955.         iir = I915_READ16(IIR);
  3956.         if (iir == 0)
  3957.                 return IRQ_NONE;
  3958.  
  3959.         while (iir & ~flip_mask) {
  3960.                 /* Can't rely on pipestat interrupt bit in iir as it might
  3961.                  * have been cleared after the pipestat interrupt was received.
  3962.                  * It doesn't set the bit in iir again, but it still produces
  3963.                  * interrupts (for non-MSI).
  3964.                  */
  3965.                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  3966.                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  3967.                         i915_handle_error(dev, false,
  3968.                                           "Command parser error, iir 0x%08x",
  3969.                                           iir);
  3970.  
  3971.                 for_each_pipe(pipe) {
  3972.                         int reg = PIPESTAT(pipe);
  3973.                         pipe_stats[pipe] = I915_READ(reg);
  3974.  
  3975.                         /*
  3976.                          * Clear the PIPE*STAT regs before the IIR
  3977.                          */
  3978.                         if (pipe_stats[pipe] & 0x8000ffff)
  3979.                                 I915_WRITE(reg, pipe_stats[pipe]);
  3980.                         }
  3981.                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  3982.  
  3983.                 I915_WRITE16(IIR, iir & ~flip_mask);
  3984.                 new_iir = I915_READ16(IIR); /* Flush posted writes */
  3985.  
  3986.                 i915_update_dri1_breadcrumb(dev);
  3987.  
  3988.                 if (iir & I915_USER_INTERRUPT)
  3989.                         notify_ring(dev, &dev_priv->ring[RCS]);
  3990.  
  3991.                 for_each_pipe(pipe) {
  3992.                         int plane = pipe;
  3993.                         if (HAS_FBC(dev))
  3994.                                 plane = !plane;
  3995.  
  3996.                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
  3997.                             i8xx_handle_vblank(dev, plane, pipe, iir))
  3998.                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
  3999.  
  4000.                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
  4001.                                 i9xx_pipe_crc_irq_handler(dev, pipe);
  4002.  
  4003.                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
  4004.                             intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
  4005.                                 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
  4006.                 }
  4007.  
  4008.                 iir = new_iir;
  4009.         }
  4010.  
  4011.         return IRQ_HANDLED;
  4012. }
  4013.  
  4014. static void i8xx_irq_uninstall(struct drm_device * dev)
  4015. {
  4016.         struct drm_i915_private *dev_priv = dev->dev_private;
  4017.         int pipe;
  4018.  
  4019.         for_each_pipe(pipe) {
  4020.                 /* Clear enable bits; then clear status bits */
  4021.                 I915_WRITE(PIPESTAT(pipe), 0);
  4022.                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
  4023.         }
  4024.         I915_WRITE16(IMR, 0xffff);
  4025.         I915_WRITE16(IER, 0x0);
  4026.         I915_WRITE16(IIR, I915_READ16(IIR));
  4027. }
  4028.  
  4029. #endif
  4030.  
  4031. static void i915_irq_preinstall(struct drm_device * dev)
  4032. {
  4033.         struct drm_i915_private *dev_priv = dev->dev_private;
  4034.         int pipe;
  4035.  
  4036.         if (I915_HAS_HOTPLUG(dev)) {
  4037.                 I915_WRITE(PORT_HOTPLUG_EN, 0);
  4038.                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  4039.         }
  4040.  
  4041.         I915_WRITE16(HWSTAM, 0xeffe);
  4042.         for_each_pipe(pipe)
  4043.                 I915_WRITE(PIPESTAT(pipe), 0);
  4044.         I915_WRITE(IMR, 0xffffffff);
  4045.         I915_WRITE(IER, 0x0);
  4046.         POSTING_READ(IER);
  4047. }
  4048.  
  4049. static int i915_irq_postinstall(struct drm_device *dev)
  4050. {
  4051.         struct drm_i915_private *dev_priv = dev->dev_private;
  4052.         u32 enable_mask;
  4053.         unsigned long irqflags;
  4054.  
  4055.         I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
  4056.  
  4057.         /* Unmask the interrupts that we always want on. */
  4058.         dev_priv->irq_mask =
  4059.                 ~(I915_ASLE_INTERRUPT |
  4060.                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  4061.                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  4062.                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  4063.                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  4064.                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  4065.  
  4066.         enable_mask =
  4067.                 I915_ASLE_INTERRUPT |
  4068.                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  4069.                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  4070.                 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
  4071.                 I915_USER_INTERRUPT;
  4072.  
  4073.         if (I915_HAS_HOTPLUG(dev)) {
  4074.                 I915_WRITE(PORT_HOTPLUG_EN, 0);
  4075.                 POSTING_READ(PORT_HOTPLUG_EN);
  4076.  
  4077.                 /* Enable in IER... */
  4078.                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
  4079.                 /* and unmask in IMR */
  4080.                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
  4081.         }
  4082.  
  4083.         I915_WRITE(IMR, dev_priv->irq_mask);
  4084.         I915_WRITE(IER, enable_mask);
  4085.         POSTING_READ(IER);
  4086.  
  4087.         i915_enable_asle_pipestat(dev);
  4088.  
  4089.         /* Interrupt setup is already guaranteed to be single-threaded, this is
  4090.          * just to make the assert_spin_locked check happy. */
  4091.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  4092.         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
  4093.         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
  4094.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  4095.  
  4096.         return 0;
  4097. }
  4098.  
  4099. /*
  4100.  * Returns true when a page flip has completed.
  4101.  */
  4102. static bool i915_handle_vblank(struct drm_device *dev,
  4103.                                int plane, int pipe, u32 iir)
  4104. {
  4105.         struct drm_i915_private *dev_priv = dev->dev_private;
  4106.         u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
  4107.  
  4108. //   if (!drm_handle_vblank(dev, pipe))
  4109.                 return false;
  4110.  
  4111.         if ((iir & flip_pending) == 0)
  4112.                 return false;
  4113.  
  4114. //   intel_prepare_page_flip(dev, plane);
  4115.  
  4116.         /* We detect FlipDone by looking for the change in PendingFlip from '1'
  4117.          * to '0' on the following vblank, i.e. IIR has the Pendingflip
  4118.          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
  4119.          * the flip is completed (no longer pending). Since this doesn't raise
  4120.          * an interrupt per se, we watch for the change at vblank.
  4121.          */
  4122.         if (I915_READ(ISR) & flip_pending)
  4123.                 return false;
  4124.  
  4125.         intel_finish_page_flip(dev, pipe);
  4126.  
  4127.         return true;
  4128. }
  4129.  
  4130. static irqreturn_t i915_irq_handler(int irq, void *arg)
  4131. {
  4132.         struct drm_device *dev = arg;
  4133.         struct drm_i915_private *dev_priv = dev->dev_private;
  4134.         u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
  4135.         unsigned long irqflags;
  4136.         u32 flip_mask =
  4137.                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  4138.                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  4139.         int pipe, ret = IRQ_NONE;
  4140.  
  4141.         iir = I915_READ(IIR);
  4142.         do {
  4143.                 bool irq_received = (iir & ~flip_mask) != 0;
  4144.                 bool blc_event = false;
  4145.  
  4146.                 /* Can't rely on pipestat interrupt bit in iir as it might
  4147.                  * have been cleared after the pipestat interrupt was received.
  4148.                  * It doesn't set the bit in iir again, but it still produces
  4149.                  * interrupts (for non-MSI).
  4150.                  */
  4151.                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  4152.                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  4153.                         i915_handle_error(dev, false,
  4154.                                           "Command parser error, iir 0x%08x",
  4155.                                           iir);
  4156.  
  4157.                 for_each_pipe(pipe) {
  4158.                         int reg = PIPESTAT(pipe);
  4159.                         pipe_stats[pipe] = I915_READ(reg);
  4160.  
  4161.                         /* Clear the PIPE*STAT regs before the IIR */
  4162.                         if (pipe_stats[pipe] & 0x8000ffff) {
  4163.                                 I915_WRITE(reg, pipe_stats[pipe]);
  4164.                                 irq_received = true;
  4165.                         }
  4166.                 }
  4167.                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  4168.  
  4169.                 if (!irq_received)
  4170.                         break;
  4171.  
  4172.                 /* Consume port.  Then clear IIR or we'll miss events */
  4173.                 if (I915_HAS_HOTPLUG(dev) &&
  4174.                     iir & I915_DISPLAY_PORT_INTERRUPT)
  4175.                         i9xx_hpd_irq_handler(dev);
  4176.  
  4177.                 I915_WRITE(IIR, iir & ~flip_mask);
  4178.                 new_iir = I915_READ(IIR); /* Flush posted writes */
  4179.  
  4180.                 if (iir & I915_USER_INTERRUPT)
  4181.                         notify_ring(dev, &dev_priv->ring[RCS]);
  4182.  
  4183.                 for_each_pipe(pipe) {
  4184.                         int plane = pipe;
  4185.                         if (HAS_FBC(dev))
  4186.                                 plane = !plane;
  4187.  
  4188.                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
  4189.                             i915_handle_vblank(dev, plane, pipe, iir))
  4190.                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
  4191.  
  4192.                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
  4193.                                 blc_event = true;
  4194.  
  4195.                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
  4196.                                 i9xx_pipe_crc_irq_handler(dev, pipe);
  4197.  
  4198.                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
  4199.                             intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
  4200.                                 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
  4201.                 }
  4202.  
  4203.                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
  4204.                         intel_opregion_asle_intr(dev);
  4205.  
  4206.                 /* With MSI, interrupts are only generated when iir
  4207.                  * transitions from zero to nonzero.  If another bit got
  4208.                  * set while we were handling the existing iir bits, then
  4209.                  * we would never get another interrupt.
  4210.                  *
  4211.                  * This is fine on non-MSI as well, as if we hit this path
  4212.                  * we avoid exiting the interrupt handler only to generate
  4213.                  * another one.
  4214.                  *
  4215.                  * Note that for MSI this could cause a stray interrupt report
  4216.                  * if an interrupt landed in the time between writing IIR and
  4217.                  * the posting read.  This should be rare enough to never
  4218.                  * trigger the 99% of 100,000 interrupts test for disabling
  4219.                  * stray interrupts.
  4220.                  */
  4221.                 ret = IRQ_HANDLED;
  4222.                 iir = new_iir;
  4223.         } while (iir & ~flip_mask);
  4224.  
  4225.         i915_update_dri1_breadcrumb(dev);
  4226.  
  4227.         return ret;
  4228. }
  4229.  
  4230. static void i915_irq_uninstall(struct drm_device * dev)
  4231. {
  4232.         struct drm_i915_private *dev_priv = dev->dev_private;
  4233.         int pipe;
  4234.  
  4235.         if (I915_HAS_HOTPLUG(dev)) {
  4236.                 I915_WRITE(PORT_HOTPLUG_EN, 0);
  4237.                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  4238.         }
  4239.  
  4240.         I915_WRITE16(HWSTAM, 0xffff);
  4241.         for_each_pipe(pipe) {
  4242.                 /* Clear enable bits; then clear status bits */
  4243.                 I915_WRITE(PIPESTAT(pipe), 0);
  4244.                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
  4245.         }
  4246.         I915_WRITE(IMR, 0xffffffff);
  4247.         I915_WRITE(IER, 0x0);
  4248.  
  4249.         I915_WRITE(IIR, I915_READ(IIR));
  4250. }
  4251.  
  4252. static void i965_irq_preinstall(struct drm_device * dev)
  4253. {
  4254.         struct drm_i915_private *dev_priv = dev->dev_private;
  4255.         int pipe;
  4256.  
  4257.         I915_WRITE(PORT_HOTPLUG_EN, 0);
  4258.         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  4259.  
  4260.         I915_WRITE(HWSTAM, 0xeffe);
  4261.         for_each_pipe(pipe)
  4262.                 I915_WRITE(PIPESTAT(pipe), 0);
  4263.         I915_WRITE(IMR, 0xffffffff);
  4264.         I915_WRITE(IER, 0x0);
  4265.         POSTING_READ(IER);
  4266. }
  4267.  
  4268. static int i965_irq_postinstall(struct drm_device *dev)
  4269. {
  4270.         struct drm_i915_private *dev_priv = dev->dev_private;
  4271.         u32 enable_mask;
  4272.         u32 error_mask;
  4273.         unsigned long irqflags;
  4274.  
  4275.         /* Unmask the interrupts that we always want on. */
  4276.         dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
  4277.                                I915_DISPLAY_PORT_INTERRUPT |
  4278.                                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  4279.                                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  4280.                                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  4281.                                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  4282.                                I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  4283.  
  4284.         enable_mask = ~dev_priv->irq_mask;
  4285.         enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  4286.                          I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
  4287.         enable_mask |= I915_USER_INTERRUPT;
  4288.  
  4289.         if (IS_G4X(dev))
  4290.                 enable_mask |= I915_BSD_USER_INTERRUPT;
  4291.  
  4292.         /* Interrupt setup is already guaranteed to be single-threaded, this is
  4293.          * just to make the assert_spin_locked check happy. */
  4294.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  4295.         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
  4296.         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
  4297.         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
  4298.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  4299.  
  4300.         /*
  4301.          * Enable some error detection, note the instruction error mask
  4302.          * bit is reserved, so we leave it masked.
  4303.          */
  4304.         if (IS_G4X(dev)) {
  4305.                 error_mask = ~(GM45_ERROR_PAGE_TABLE |
  4306.                                GM45_ERROR_MEM_PRIV |
  4307.                                GM45_ERROR_CP_PRIV |
  4308.                                I915_ERROR_MEMORY_REFRESH);
  4309.         } else {
  4310.                 error_mask = ~(I915_ERROR_PAGE_TABLE |
  4311.                                I915_ERROR_MEMORY_REFRESH);
  4312.         }
  4313.         I915_WRITE(EMR, error_mask);
  4314.  
  4315.         I915_WRITE(IMR, dev_priv->irq_mask);
  4316.         I915_WRITE(IER, enable_mask);
  4317.         POSTING_READ(IER);
  4318.  
  4319.         I915_WRITE(PORT_HOTPLUG_EN, 0);
  4320.         POSTING_READ(PORT_HOTPLUG_EN);
  4321.  
  4322.         i915_enable_asle_pipestat(dev);
  4323.  
  4324.         return 0;
  4325. }
  4326.  
  4327. static void i915_hpd_irq_setup(struct drm_device *dev)
  4328. {
  4329.         struct drm_i915_private *dev_priv = dev->dev_private;
  4330.         struct drm_mode_config *mode_config = &dev->mode_config;
  4331.         struct intel_encoder *intel_encoder;
  4332.         u32 hotplug_en;
  4333.  
  4334.         assert_spin_locked(&dev_priv->irq_lock);
  4335.  
  4336.         if (I915_HAS_HOTPLUG(dev)) {
  4337.                 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
  4338.                 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
  4339.         /* Note HDMI and DP share hotplug bits */
  4340.                 /* enable bits are the same for all generations */
  4341.                 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
  4342.                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
  4343.                                 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
  4344.                 /* Programming the CRT detection parameters tends
  4345.                    to generate a spurious hotplug event about three
  4346.                    seconds later.  So just do it once.
  4347.                    */
  4348.                 if (IS_G4X(dev))
  4349.                         hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
  4350.                 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
  4351.                 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
  4352.  
  4353.         /* Ignore TV since it's buggy */
  4354.         I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
  4355.         }
  4356. }
  4357.  
  4358. static irqreturn_t i965_irq_handler(int irq, void *arg)
  4359. {
  4360.         struct drm_device *dev = arg;
  4361.         struct drm_i915_private *dev_priv = dev->dev_private;
  4362.         u32 iir, new_iir;
  4363.         u32 pipe_stats[I915_MAX_PIPES];
  4364.         unsigned long irqflags;
  4365.         int ret = IRQ_NONE, pipe;
  4366.         u32 flip_mask =
  4367.                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  4368.                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  4369.  
  4370.         iir = I915_READ(IIR);
  4371.  
  4372.         for (;;) {
  4373.                 bool irq_received = (iir & ~flip_mask) != 0;
  4374.                 bool blc_event = false;
  4375.  
  4376.                 /* Can't rely on pipestat interrupt bit in iir as it might
  4377.                  * have been cleared after the pipestat interrupt was received.
  4378.                  * It doesn't set the bit in iir again, but it still produces
  4379.                  * interrupts (for non-MSI).
  4380.                  */
  4381.                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  4382.                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  4383.                         i915_handle_error(dev, false,
  4384.                                           "Command parser error, iir 0x%08x",
  4385.                                           iir);
  4386.  
  4387.                 for_each_pipe(pipe) {
  4388.                         int reg = PIPESTAT(pipe);
  4389.                         pipe_stats[pipe] = I915_READ(reg);
  4390.  
  4391.                         /*
  4392.                          * Clear the PIPE*STAT regs before the IIR
  4393.                          */
  4394.                         if (pipe_stats[pipe] & 0x8000ffff) {
  4395.                                 I915_WRITE(reg, pipe_stats[pipe]);
  4396.                                 irq_received = true;
  4397.                         }
  4398.                 }
  4399.                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  4400.  
  4401.                 if (!irq_received)
  4402.                         break;
  4403.  
  4404.                 ret = IRQ_HANDLED;
  4405.  
  4406.                 /* Consume port.  Then clear IIR or we'll miss events */
  4407.                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
  4408.                         i9xx_hpd_irq_handler(dev);
  4409.  
  4410.                 I915_WRITE(IIR, iir & ~flip_mask);
  4411.                 new_iir = I915_READ(IIR); /* Flush posted writes */
  4412.  
  4413.                 if (iir & I915_USER_INTERRUPT)
  4414.                         notify_ring(dev, &dev_priv->ring[RCS]);
  4415.                 if (iir & I915_BSD_USER_INTERRUPT)
  4416.                         notify_ring(dev, &dev_priv->ring[VCS]);
  4417.  
  4418.                 for_each_pipe(pipe) {
  4419.                         if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
  4420.                             i915_handle_vblank(dev, pipe, pipe, iir))
  4421.                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
  4422.  
  4423.                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
  4424.                                 blc_event = true;
  4425.  
  4426.                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
  4427.                                 i9xx_pipe_crc_irq_handler(dev, pipe);
  4428.  
  4429.                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
  4430.                             intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
  4431.                                 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
  4432.                 }
  4433.  
  4434.                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
  4435.                         intel_opregion_asle_intr(dev);
  4436.  
  4437.                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
  4438.                         gmbus_irq_handler(dev);
  4439.  
  4440.                 /* With MSI, interrupts are only generated when iir
  4441.                  * transitions from zero to nonzero.  If another bit got
  4442.                  * set while we were handling the existing iir bits, then
  4443.                  * we would never get another interrupt.
  4444.                  *
  4445.                  * This is fine on non-MSI as well, as if we hit this path
  4446.                  * we avoid exiting the interrupt handler only to generate
  4447.                  * another one.
  4448.                  *
  4449.                  * Note that for MSI this could cause a stray interrupt report
  4450.                  * if an interrupt landed in the time between writing IIR and
  4451.                  * the posting read.  This should be rare enough to never
  4452.                  * trigger the 99% of 100,000 interrupts test for disabling
  4453.                  * stray interrupts.
  4454.                  */
  4455.                 iir = new_iir;
  4456.         }
  4457.  
  4458.         i915_update_dri1_breadcrumb(dev);
  4459.  
  4460.         return ret;
  4461. }
  4462.  
  4463. static void i965_irq_uninstall(struct drm_device * dev)
  4464. {
  4465.         struct drm_i915_private *dev_priv = dev->dev_private;
  4466.         int pipe;
  4467.  
  4468.         if (!dev_priv)
  4469.                 return;
  4470.  
  4471.         I915_WRITE(PORT_HOTPLUG_EN, 0);
  4472.         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  4473.  
  4474.         I915_WRITE(HWSTAM, 0xffffffff);
  4475.         for_each_pipe(pipe)
  4476.                 I915_WRITE(PIPESTAT(pipe), 0);
  4477.         I915_WRITE(IMR, 0xffffffff);
  4478.         I915_WRITE(IER, 0x0);
  4479.  
  4480.         for_each_pipe(pipe)
  4481.                 I915_WRITE(PIPESTAT(pipe),
  4482.                            I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
  4483.         I915_WRITE(IIR, I915_READ(IIR));
  4484. }
  4485.  
  4486. static void intel_hpd_irq_reenable(struct work_struct *work)
  4487. {
  4488.         struct drm_i915_private *dev_priv =
  4489.                 container_of(work, typeof(*dev_priv),
  4490.                              hotplug_reenable_work.work);
  4491.         struct drm_device *dev = dev_priv->dev;
  4492.         struct drm_mode_config *mode_config = &dev->mode_config;
  4493.         unsigned long irqflags;
  4494.         int i;
  4495.  
  4496.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  4497.         for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
  4498.                 struct drm_connector *connector;
  4499.  
  4500.                 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
  4501.                         continue;
  4502.  
  4503.                 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
  4504.  
  4505.                 list_for_each_entry(connector, &mode_config->connector_list, head) {
  4506.                         struct intel_connector *intel_connector = to_intel_connector(connector);
  4507.  
  4508.                         if (intel_connector->encoder->hpd_pin == i) {
  4509.                                 if (connector->polled != intel_connector->polled)
  4510.                                         DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
  4511.                                                          connector->name);
  4512.                                 connector->polled = intel_connector->polled;
  4513.                                 if (!connector->polled)
  4514.                                         connector->polled = DRM_CONNECTOR_POLL_HPD;
  4515.                         }
  4516.                 }
  4517.         }
  4518.         if (dev_priv->display.hpd_irq_setup)
  4519.                 dev_priv->display.hpd_irq_setup(dev);
  4520.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  4521. }
  4522.  
  4523. void intel_irq_init(struct drm_device *dev)
  4524. {
  4525.         struct drm_i915_private *dev_priv = dev->dev_private;
  4526.  
  4527.         INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
  4528.         INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
  4529.         INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
  4530.         INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
  4531.  
  4532.         /* Let's track the enabled rps events */
  4533.         if (IS_VALLEYVIEW(dev))
  4534.                 /* WaGsvRC0ResidenncyMethod:VLV */
  4535.                 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
  4536.         else
  4537.         dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
  4538.  
  4539.         /* Haven't installed the IRQ handler yet */
  4540.         dev_priv->pm._irqs_disabled = true;
  4541.  
  4542.         if (IS_GEN2(dev)) {
  4543.                 dev->max_vblank_count = 0;
  4544.                 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
  4545.         } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
  4546.                 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
  4547.                 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
  4548.         } else {
  4549.         dev->driver->get_vblank_counter = i915_get_vblank_counter;
  4550.         dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
  4551.         }
  4552.  
  4553.         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  4554.                 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
  4555.         dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
  4556.         }
  4557.  
  4558.         if (IS_CHERRYVIEW(dev)) {
  4559.                 dev->driver->irq_handler = cherryview_irq_handler;
  4560.                 dev->driver->irq_preinstall = cherryview_irq_preinstall;
  4561.                 dev->driver->irq_postinstall = cherryview_irq_postinstall;
  4562.                 dev->driver->irq_uninstall = cherryview_irq_uninstall;
  4563.                 dev->driver->enable_vblank = valleyview_enable_vblank;
  4564.                 dev->driver->disable_vblank = valleyview_disable_vblank;
  4565.                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  4566.         } else if (IS_VALLEYVIEW(dev)) {
  4567.                 dev->driver->irq_handler = valleyview_irq_handler;
  4568.                 dev->driver->irq_preinstall = valleyview_irq_preinstall;
  4569.                 dev->driver->irq_postinstall = valleyview_irq_postinstall;
  4570.                 dev->driver->irq_uninstall = valleyview_irq_uninstall;
  4571.                 dev->driver->enable_vblank = valleyview_enable_vblank;
  4572.                 dev->driver->disable_vblank = valleyview_disable_vblank;
  4573.                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  4574.         } else if (IS_GEN8(dev)) {
  4575.                 dev->driver->irq_handler = gen8_irq_handler;
  4576.                 dev->driver->irq_preinstall = gen8_irq_reset;
  4577.                 dev->driver->irq_postinstall = gen8_irq_postinstall;
  4578.                 dev->driver->irq_uninstall = gen8_irq_uninstall;
  4579.                 dev->driver->enable_vblank = gen8_enable_vblank;
  4580.                 dev->driver->disable_vblank = gen8_disable_vblank;
  4581.                 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
  4582.         } else if (HAS_PCH_SPLIT(dev)) {
  4583.                 dev->driver->irq_handler = ironlake_irq_handler;
  4584.                 dev->driver->irq_preinstall = ironlake_irq_reset;
  4585.                 dev->driver->irq_postinstall = ironlake_irq_postinstall;
  4586.                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
  4587.                 dev->driver->enable_vblank = ironlake_enable_vblank;
  4588.                 dev->driver->disable_vblank = ironlake_disable_vblank;
  4589.                 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
  4590.         } else {
  4591.                 if (INTEL_INFO(dev)->gen == 2) {
  4592.                 } else if (INTEL_INFO(dev)->gen == 3) {
  4593.                         dev->driver->irq_preinstall = i915_irq_preinstall;
  4594.                         dev->driver->irq_postinstall = i915_irq_postinstall;
  4595.                         dev->driver->irq_uninstall = i915_irq_uninstall;
  4596.                         dev->driver->irq_handler = i915_irq_handler;
  4597.                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  4598.                 } else {
  4599.                         dev->driver->irq_preinstall = i965_irq_preinstall;
  4600.                         dev->driver->irq_postinstall = i965_irq_postinstall;
  4601.                         dev->driver->irq_uninstall = i965_irq_uninstall;
  4602.                         dev->driver->irq_handler = i965_irq_handler;
  4603.                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  4604.                 }
  4605.                 dev->driver->enable_vblank = i915_enable_vblank;
  4606.                 dev->driver->disable_vblank = i915_disable_vblank;
  4607.         }
  4608. }
  4609.  
  4610. void intel_hpd_init(struct drm_device *dev)
  4611. {
  4612.         struct drm_i915_private *dev_priv = dev->dev_private;
  4613.         struct drm_mode_config *mode_config = &dev->mode_config;
  4614.         struct drm_connector *connector;
  4615.         unsigned long irqflags;
  4616.         int i;
  4617.  
  4618.         for (i = 1; i < HPD_NUM_PINS; i++) {
  4619.                 dev_priv->hpd_stats[i].hpd_cnt = 0;
  4620.                 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
  4621.         }
  4622.         list_for_each_entry(connector, &mode_config->connector_list, head) {
  4623.                 struct intel_connector *intel_connector = to_intel_connector(connector);
  4624.                 connector->polled = intel_connector->polled;
  4625.                 if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
  4626.                         connector->polled = DRM_CONNECTOR_POLL_HPD;
  4627.                 if (intel_connector->mst_port)
  4628.                         connector->polled = DRM_CONNECTOR_POLL_HPD;
  4629.         }
  4630.  
  4631.         /* Interrupt setup is already guaranteed to be single-threaded, this is
  4632.          * just to make the assert_spin_locked checks happy. */
  4633.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  4634.         if (dev_priv->display.hpd_irq_setup)
  4635.                 dev_priv->display.hpd_irq_setup(dev);
  4636.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  4637. }
  4638.  
  4639. /* Disable interrupts so we can allow runtime PM. */
  4640. void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
  4641. {
  4642.         struct drm_i915_private *dev_priv = dev->dev_private;
  4643.  
  4644.         dev->driver->irq_uninstall(dev);
  4645.         dev_priv->pm._irqs_disabled = true;
  4646. }
  4647.  
  4648. /* Restore interrupts so we can recover from runtime PM. */
  4649. void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
  4650. {
  4651.         struct drm_i915_private *dev_priv = dev->dev_private;
  4652.  
  4653.         dev_priv->pm._irqs_disabled = false;
  4654.         dev->driver->irq_preinstall(dev);
  4655.         dev->driver->irq_postinstall(dev);
  4656. }
  4657.  
  4658.  
  4659. irqreturn_t intel_irq_handler(struct drm_device *dev)
  4660. {
  4661.  
  4662. //    printf("i915 irq\n");
  4663.  
  4664. //    printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
  4665.  
  4666.     return dev->driver->irq_handler(0, dev);
  4667. }
  4668.  
  4669.