Subversion Repositories Kolibri OS

Rev

Rev 4126 | Rev 4560 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
  2.  */
  3. /*
  4.  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  5.  * All Rights Reserved.
  6.  *
  7.  * Permission is hereby granted, free of charge, to any person obtaining a
  8.  * copy of this software and associated documentation files (the
  9.  * "Software"), to deal in the Software without restriction, including
  10.  * without limitation the rights to use, copy, modify, merge, publish,
  11.  * distribute, sub license, and/or sell copies of the Software, and to
  12.  * permit persons to whom the Software is furnished to do so, subject to
  13.  * the following conditions:
  14.  *
  15.  * The above copyright notice and this permission notice (including the
  16.  * next paragraph) shall be included in all copies or substantial portions
  17.  * of the Software.
  18.  *
  19.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20.  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21.  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22.  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23.  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24.  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25.  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26.  *
  27.  */
  28.  
  29. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  30.  
  31. #include <linux/slab.h>
  32. #include <drm/drmP.h>
  33. #include <drm/i915_drm.h>
  34. #include "i915_drv.h"
  35. #include "i915_trace.h"
  36. #include "intel_drv.h"
  37.  
  38. #define assert_spin_locked(a)
  39.  
  40. static const u32 hpd_ibx[] = {
  41.         [HPD_CRT] = SDE_CRT_HOTPLUG,
  42.         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
  43.         [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
  44.         [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
  45.         [HPD_PORT_D] = SDE_PORTD_HOTPLUG
  46. };
  47.  
  48. static const u32 hpd_cpt[] = {
  49.         [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
  50.         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
  51.         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
  52.         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
  53.         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
  54. };
  55.  
  56. static const u32 hpd_mask_i915[] = {
  57.         [HPD_CRT] = CRT_HOTPLUG_INT_EN,
  58.         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
  59.         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
  60.         [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
  61.         [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
  62.         [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
  63. };
  64.  
  65. static const u32 hpd_status_gen4[] = {
  66.         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
  67.         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
  68.         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
  69.         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
  70.         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
  71.         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
  72. };
  73.  
  74. static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
  75.         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
  76.         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
  77.         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
  78.         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
  79.         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
  80.         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
  81. };
  82.  
  83.  
  84. #define pr_err(fmt, ...) \
  85.         printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
  86.  
  87.  
  88. #define DRM_WAKEUP( queue ) wake_up( queue )
  89. #define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
  90.  
  91. #define MAX_NOPID ((u32)~0)
  92.  
  93.  
  94.  
  95. /* For display hotplug interrupt */
  96. static void
  97. ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
  98. {
  99.         assert_spin_locked(&dev_priv->irq_lock);
  100.  
  101.         if (dev_priv->pc8.irqs_disabled) {
  102.                 WARN(1, "IRQs disabled\n");
  103.                 dev_priv->pc8.regsave.deimr &= ~mask;
  104.                 return;
  105.         }
  106.  
  107.     if ((dev_priv->irq_mask & mask) != 0) {
  108.         dev_priv->irq_mask &= ~mask;
  109.         I915_WRITE(DEIMR, dev_priv->irq_mask);
  110.         POSTING_READ(DEIMR);
  111.     }
  112. }
  113.  
  114. static void
  115. ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
  116. {
  117.         assert_spin_locked(&dev_priv->irq_lock);
  118.  
  119.         if (dev_priv->pc8.irqs_disabled) {
  120.                 WARN(1, "IRQs disabled\n");
  121.                 dev_priv->pc8.regsave.deimr |= mask;
  122.                 return;
  123.         }
  124.  
  125.     if ((dev_priv->irq_mask & mask) != mask) {
  126.         dev_priv->irq_mask |= mask;
  127.         I915_WRITE(DEIMR, dev_priv->irq_mask);
  128.         POSTING_READ(DEIMR);
  129.     }
  130. }
  131.  
  132. /**
  133.  * ilk_update_gt_irq - update GTIMR
  134.  * @dev_priv: driver private
  135.  * @interrupt_mask: mask of interrupt bits to update
  136.  * @enabled_irq_mask: mask of interrupt bits to enable
  137.  */
  138. static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
  139.                               uint32_t interrupt_mask,
  140.                               uint32_t enabled_irq_mask)
  141. {
  142.         assert_spin_locked(&dev_priv->irq_lock);
  143.  
  144.         if (dev_priv->pc8.irqs_disabled) {
  145.                 WARN(1, "IRQs disabled\n");
  146.                 dev_priv->pc8.regsave.gtimr &= ~interrupt_mask;
  147.                 dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask &
  148.                                                 interrupt_mask);
  149.                 return;
  150.         }
  151.  
  152.         dev_priv->gt_irq_mask &= ~interrupt_mask;
  153.         dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
  154.         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  155.         POSTING_READ(GTIMR);
  156. }
  157.  
  158. void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
  159. {
  160.         ilk_update_gt_irq(dev_priv, mask, mask);
  161. }
  162.  
  163. void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
  164. {
  165.         ilk_update_gt_irq(dev_priv, mask, 0);
  166. }
  167.  
  168. /**
  169.   * snb_update_pm_irq - update GEN6_PMIMR
  170.   * @dev_priv: driver private
  171.   * @interrupt_mask: mask of interrupt bits to update
  172.   * @enabled_irq_mask: mask of interrupt bits to enable
  173.   */
  174. static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
  175.                               uint32_t interrupt_mask,
  176.                               uint32_t enabled_irq_mask)
  177. {
  178.         uint32_t new_val;
  179.  
  180.         assert_spin_locked(&dev_priv->irq_lock);
  181.  
  182.         if (dev_priv->pc8.irqs_disabled) {
  183.                 WARN(1, "IRQs disabled\n");
  184.                 dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask;
  185.                 dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask &
  186.                                                      interrupt_mask);
  187.                 return;
  188.         }
  189.  
  190.         new_val = dev_priv->pm_irq_mask;
  191.         new_val &= ~interrupt_mask;
  192.         new_val |= (~enabled_irq_mask & interrupt_mask);
  193.  
  194.         if (new_val != dev_priv->pm_irq_mask) {
  195.                 dev_priv->pm_irq_mask = new_val;
  196.                 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
  197.                 POSTING_READ(GEN6_PMIMR);
  198.         }
  199. }
  200.  
  201. void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
  202. {
  203.         snb_update_pm_irq(dev_priv, mask, mask);
  204. }
  205.  
  206. void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
  207. {
  208.         snb_update_pm_irq(dev_priv, mask, 0);
  209. }
  210.  
  211. static bool ivb_can_enable_err_int(struct drm_device *dev)
  212. {
  213.         struct drm_i915_private *dev_priv = dev->dev_private;
  214.         struct intel_crtc *crtc;
  215.         enum pipe pipe;
  216.  
  217.         assert_spin_locked(&dev_priv->irq_lock);
  218.  
  219.         for_each_pipe(pipe) {
  220.                 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
  221.  
  222.                 if (crtc->cpu_fifo_underrun_disabled)
  223.                         return false;
  224.         }
  225.  
  226.         return true;
  227. }
  228.  
  229. static bool cpt_can_enable_serr_int(struct drm_device *dev)
  230. {
  231.         struct drm_i915_private *dev_priv = dev->dev_private;
  232.         enum pipe pipe;
  233.         struct intel_crtc *crtc;
  234.  
  235.         assert_spin_locked(&dev_priv->irq_lock);
  236.  
  237.         for_each_pipe(pipe) {
  238.                 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
  239.  
  240.                 if (crtc->pch_fifo_underrun_disabled)
  241.                         return false;
  242.         }
  243.  
  244.         return true;
  245. }
  246.  
  247. static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
  248.                                                  enum pipe pipe, bool enable)
  249. {
  250.         struct drm_i915_private *dev_priv = dev->dev_private;
  251.         uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
  252.                                           DE_PIPEB_FIFO_UNDERRUN;
  253.  
  254.         if (enable)
  255.                 ironlake_enable_display_irq(dev_priv, bit);
  256.         else
  257.                 ironlake_disable_display_irq(dev_priv, bit);
  258. }
  259.  
  260. static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
  261.                                                   enum pipe pipe, bool enable)
  262. {
  263.         struct drm_i915_private *dev_priv = dev->dev_private;
  264.         if (enable) {
  265.                 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
  266.  
  267.                 if (!ivb_can_enable_err_int(dev))
  268.                         return;
  269.  
  270.                 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
  271.         } else {
  272.                 bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
  273.  
  274.                 /* Change the state _after_ we've read out the current one. */
  275.                 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
  276.  
  277.                 if (!was_enabled &&
  278.                     (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
  279.                         DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
  280.                                       pipe_name(pipe));
  281.         }
  282. }
  283. }
  284.  
  285. /**
  286.  * ibx_display_interrupt_update - update SDEIMR
  287.  * @dev_priv: driver private
  288.  * @interrupt_mask: mask of interrupt bits to update
  289.  * @enabled_irq_mask: mask of interrupt bits to enable
  290.  */
  291. static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
  292.                                          uint32_t interrupt_mask,
  293.                                          uint32_t enabled_irq_mask)
  294. {
  295.         uint32_t sdeimr = I915_READ(SDEIMR);
  296.         sdeimr &= ~interrupt_mask;
  297.         sdeimr |= (~enabled_irq_mask & interrupt_mask);
  298.  
  299.         assert_spin_locked(&dev_priv->irq_lock);
  300.  
  301.         if (dev_priv->pc8.irqs_disabled &&
  302.             (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) {
  303.                 WARN(1, "IRQs disabled\n");
  304.                 dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask;
  305.                 dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask &
  306.                                                  interrupt_mask);
  307.                 return;
  308.         }
  309.  
  310.         I915_WRITE(SDEIMR, sdeimr);
  311.         POSTING_READ(SDEIMR);
  312. }
  313. #define ibx_enable_display_interrupt(dev_priv, bits) \
  314.         ibx_display_interrupt_update((dev_priv), (bits), (bits))
  315. #define ibx_disable_display_interrupt(dev_priv, bits) \
  316.         ibx_display_interrupt_update((dev_priv), (bits), 0)
  317.  
  318. static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
  319.                                             enum transcoder pch_transcoder,
  320.                                             bool enable)
  321. {
  322.         struct drm_i915_private *dev_priv = dev->dev_private;
  323.         uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
  324.                        SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
  325.  
  326.         if (enable)
  327.                 ibx_enable_display_interrupt(dev_priv, bit);
  328.         else
  329.                 ibx_disable_display_interrupt(dev_priv, bit);
  330. }
  331.  
  332. static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
  333.                                             enum transcoder pch_transcoder,
  334.                                             bool enable)
  335. {
  336.         struct drm_i915_private *dev_priv = dev->dev_private;
  337.  
  338.         if (enable) {
  339.                 I915_WRITE(SERR_INT,
  340.                            SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
  341.  
  342.                 if (!cpt_can_enable_serr_int(dev))
  343.                         return;
  344.  
  345.                 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
  346.         } else {
  347.                 uint32_t tmp = I915_READ(SERR_INT);
  348.                 bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
  349.  
  350.                 /* Change the state _after_ we've read out the current one. */
  351.                 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
  352.  
  353.                 if (!was_enabled &&
  354.                     (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
  355.                         DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
  356.                                       transcoder_name(pch_transcoder));
  357.                 }
  358.         }
  359. }
  360.  
  361. /**
  362.  * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
  363.  * @dev: drm device
  364.  * @pipe: pipe
  365.  * @enable: true if we want to report FIFO underrun errors, false otherwise
  366.  *
  367.  * This function makes us disable or enable CPU fifo underruns for a specific
  368.  * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
  369.  * reporting for one pipe may also disable all the other CPU error interruts for
  370.  * the other pipes, due to the fact that there's just one interrupt mask/enable
  371.  * bit for all the pipes.
  372.  *
  373.  * Returns the previous state of underrun reporting.
  374.  */
  375. bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
  376.                                            enum pipe pipe, bool enable)
  377. {
  378.         struct drm_i915_private *dev_priv = dev->dev_private;
  379.         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  380.         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  381.         unsigned long flags;
  382.         bool ret;
  383.  
  384.         spin_lock_irqsave(&dev_priv->irq_lock, flags);
  385.  
  386.         ret = !intel_crtc->cpu_fifo_underrun_disabled;
  387.  
  388.         if (enable == ret)
  389.                 goto done;
  390.  
  391.         intel_crtc->cpu_fifo_underrun_disabled = !enable;
  392.  
  393.         if (IS_GEN5(dev) || IS_GEN6(dev))
  394.                 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
  395.         else if (IS_GEN7(dev))
  396.                 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
  397.  
  398. done:
  399.         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  400.         return ret;
  401. }
  402.  
  403. /**
  404.  * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
  405.  * @dev: drm device
  406.  * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
  407.  * @enable: true if we want to report FIFO underrun errors, false otherwise
  408.  *
  409.  * This function makes us disable or enable PCH fifo underruns for a specific
  410.  * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
  411.  * underrun reporting for one transcoder may also disable all the other PCH
  412.  * error interruts for the other transcoders, due to the fact that there's just
  413.  * one interrupt mask/enable bit for all the transcoders.
  414.  *
  415.  * Returns the previous state of underrun reporting.
  416.  */
  417. bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
  418.                                            enum transcoder pch_transcoder,
  419.                                            bool enable)
  420. {
  421.         struct drm_i915_private *dev_priv = dev->dev_private;
  422.         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
  423.         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  424.         unsigned long flags;
  425.         bool ret;
  426.  
  427.         /*
  428.          * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
  429.          * has only one pch transcoder A that all pipes can use. To avoid racy
  430.          * pch transcoder -> pipe lookups from interrupt code simply store the
  431.          * underrun statistics in crtc A. Since we never expose this anywhere
  432.          * nor use it outside of the fifo underrun code here using the "wrong"
  433.          * crtc on LPT won't cause issues.
  434.          */
  435.  
  436.         spin_lock_irqsave(&dev_priv->irq_lock, flags);
  437.  
  438.         ret = !intel_crtc->pch_fifo_underrun_disabled;
  439.  
  440.         if (enable == ret)
  441.                 goto done;
  442.  
  443.         intel_crtc->pch_fifo_underrun_disabled = !enable;
  444.  
  445.         if (HAS_PCH_IBX(dev))
  446.                 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
  447.         else
  448.                 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
  449.  
  450. done:
  451.         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  452.         return ret;
  453. }
  454.  
  455.  
  456. void
  457. i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
  458. {
  459.                 u32 reg = PIPESTAT(pipe);
  460.         u32 pipestat = I915_READ(reg) & 0x7fff0000;
  461.  
  462.         assert_spin_locked(&dev_priv->irq_lock);
  463.  
  464.         if ((pipestat & mask) == mask)
  465.                 return;
  466.  
  467.                 /* Enable the interrupt, clear any pending status */
  468.         pipestat |= mask | (mask >> 16);
  469.         I915_WRITE(reg, pipestat);
  470.                 POSTING_READ(reg);
  471. }
  472.  
  473. void
  474. i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
  475. {
  476.                 u32 reg = PIPESTAT(pipe);
  477.         u32 pipestat = I915_READ(reg) & 0x7fff0000;
  478.  
  479.         assert_spin_locked(&dev_priv->irq_lock);
  480.  
  481.         if ((pipestat & mask) == 0)
  482.                 return;
  483.  
  484.         pipestat &= ~mask;
  485.         I915_WRITE(reg, pipestat);
  486.                 POSTING_READ(reg);
  487. }
  488.  
  489. /**
  490.  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
  491.  */
  492. static void i915_enable_asle_pipestat(struct drm_device *dev)
  493. {
  494.         drm_i915_private_t *dev_priv = dev->dev_private;
  495.         unsigned long irqflags;
  496.  
  497.         if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
  498.                 return;
  499.  
  500.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  501.  
  502.         i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
  503.                 if (INTEL_INFO(dev)->gen >= 4)
  504.                 i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
  505.  
  506.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  507. }
  508.  
  509. /**
  510.  * i915_pipe_enabled - check if a pipe is enabled
  511.  * @dev: DRM device
  512.  * @pipe: pipe to check
  513.  *
  514.  * Reading certain registers when the pipe is disabled can hang the chip.
  515.  * Use this routine to make sure the PLL is running and the pipe is active
  516.  * before reading such registers if unsure.
  517.  */
  518. static int
  519. i915_pipe_enabled(struct drm_device *dev, int pipe)
  520. {
  521.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  522.  
  523.         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  524.                 /* Locking is horribly broken here, but whatever. */
  525.                 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  526.                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  527.  
  528.                 return intel_crtc->active;
  529.         } else {
  530.                 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
  531.         }
  532. }
  533.  
  534. /* Called from drm generic code, passed a 'crtc', which
  535.  * we use as a pipe index
  536.  */
  537. static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
  538. {
  539.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  540.         unsigned long high_frame;
  541.         unsigned long low_frame;
  542.         u32 high1, high2, low;
  543.  
  544.         if (!i915_pipe_enabled(dev, pipe)) {
  545.                 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
  546.                                 "pipe %c\n", pipe_name(pipe));
  547.                 return 0;
  548.         }
  549.  
  550.         high_frame = PIPEFRAME(pipe);
  551.         low_frame = PIPEFRAMEPIXEL(pipe);
  552.  
  553.         /*
  554.          * High & low register fields aren't synchronized, so make sure
  555.          * we get a low value that's stable across two reads of the high
  556.          * register.
  557.          */
  558.         do {
  559.                 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
  560.                 low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
  561.                 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
  562.         } while (high1 != high2);
  563.  
  564.         high1 >>= PIPE_FRAME_HIGH_SHIFT;
  565.         low >>= PIPE_FRAME_LOW_SHIFT;
  566.         return (high1 << 8) | low;
  567. }
  568.  
  569. static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
  570. {
  571.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  572.         int reg = PIPE_FRMCOUNT_GM45(pipe);
  573.  
  574.         if (!i915_pipe_enabled(dev, pipe)) {
  575.                 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
  576.                                  "pipe %c\n", pipe_name(pipe));
  577.                 return 0;
  578.         }
  579.  
  580.         return I915_READ(reg);
  581. }
  582.  
  583. static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
  584.                              int *vpos, int *hpos)
  585. {
  586.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  587.         u32 vbl = 0, position = 0;
  588.         int vbl_start, vbl_end, htotal, vtotal;
  589.         bool in_vbl = true;
  590.         int ret = 0;
  591.         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
  592.                                                                       pipe);
  593.  
  594.         if (!i915_pipe_enabled(dev, pipe)) {
  595.                 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
  596.                                  "pipe %c\n", pipe_name(pipe));
  597.                 return 0;
  598.         }
  599.  
  600.         /* Get vtotal. */
  601.         vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
  602.  
  603.         if (INTEL_INFO(dev)->gen >= 4) {
  604.                 /* No obvious pixelcount register. Only query vertical
  605.                  * scanout position from Display scan line register.
  606.                  */
  607.                 position = I915_READ(PIPEDSL(pipe));
  608.  
  609.                 /* Decode into vertical scanout position. Don't have
  610.                  * horizontal scanout position.
  611.                  */
  612.                 *vpos = position & 0x1fff;
  613.                 *hpos = 0;
  614.         } else {
  615.                 /* Have access to pixelcount since start of frame.
  616.                  * We can split this into vertical and horizontal
  617.                  * scanout position.
  618.                  */
  619.                 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
  620.  
  621.                 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
  622.                 *vpos = position / htotal;
  623.                 *hpos = position - (*vpos * htotal);
  624.         }
  625.  
  626.         /* Query vblank area. */
  627.         vbl = I915_READ(VBLANK(cpu_transcoder));
  628.  
  629.         /* Test position against vblank region. */
  630.         vbl_start = vbl & 0x1fff;
  631.         vbl_end = (vbl >> 16) & 0x1fff;
  632.  
  633.         if ((*vpos < vbl_start) || (*vpos > vbl_end))
  634.                 in_vbl = false;
  635.  
  636.         /* Inside "upper part" of vblank area? Apply corrective offset: */
  637.         if (in_vbl && (*vpos >= vbl_start))
  638.                 *vpos = *vpos - vtotal;
  639.  
  640.         /* Readouts valid? */
  641.         if (vbl > 0)
  642.                 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
  643.  
  644.         /* In vblank? */
  645.         if (in_vbl)
  646.                 ret |= DRM_SCANOUTPOS_INVBL;
  647.  
  648.         return ret;
  649. }
  650.  
  651. static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
  652.                               int *max_error,
  653.                               struct timeval *vblank_time,
  654.                               unsigned flags)
  655. {
  656.         struct drm_crtc *crtc;
  657.  
  658.         if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
  659.                 DRM_ERROR("Invalid crtc %d\n", pipe);
  660.                 return -EINVAL;
  661.         }
  662.  
  663.         /* Get drm_crtc to timestamp: */
  664.         crtc = intel_get_crtc_for_pipe(dev, pipe);
  665.         if (crtc == NULL) {
  666.                 DRM_ERROR("Invalid crtc %d\n", pipe);
  667.                 return -EINVAL;
  668.         }
  669.  
  670.         if (!crtc->enabled) {
  671.                 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
  672.                 return -EBUSY;
  673.         }
  674.  
  675.         /* Helper routine in DRM core does all the work: */
  676.         return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
  677.                                                      vblank_time, flags,
  678.                                                      crtc);
  679. }
  680.  
  681. static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
  682. {
  683.         enum drm_connector_status old_status;
  684.  
  685.         WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
  686.         old_status = connector->status;
  687.  
  688.         connector->status = connector->funcs->detect(connector, false);
  689.         DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
  690.                       connector->base.id,
  691.                       drm_get_connector_name(connector),
  692.                       old_status, connector->status);
  693.         return (old_status != connector->status);
  694. }
  695.  
  696. /*
  697.  * Handle hotplug events outside the interrupt handler proper.
  698.  */
  699. #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
  700.  
  701. static void i915_hotplug_work_func(struct work_struct *work)
  702. {
  703.         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
  704.                                                     hotplug_work);
  705.         struct drm_device *dev = dev_priv->dev;
  706.         struct drm_mode_config *mode_config = &dev->mode_config;
  707.         struct intel_connector *intel_connector;
  708.         struct intel_encoder *intel_encoder;
  709.         struct drm_connector *connector;
  710.         unsigned long irqflags;
  711.         bool hpd_disabled = false;
  712.         bool changed = false;
  713.         u32 hpd_event_bits;
  714.  
  715.         /* HPD irq before everything is fully set up. */
  716.         if (!dev_priv->enable_hotplug_processing)
  717.                 return;
  718.  
  719.         mutex_lock(&mode_config->mutex);
  720.         DRM_DEBUG_KMS("running encoder hotplug functions\n");
  721.  
  722.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  723.  
  724.         hpd_event_bits = dev_priv->hpd_event_bits;
  725.         dev_priv->hpd_event_bits = 0;
  726.         list_for_each_entry(connector, &mode_config->connector_list, head) {
  727.                 intel_connector = to_intel_connector(connector);
  728.                 intel_encoder = intel_connector->encoder;
  729.                 if (intel_encoder->hpd_pin > HPD_NONE &&
  730.                     dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
  731.                     connector->polled == DRM_CONNECTOR_POLL_HPD) {
  732.                         DRM_INFO("HPD interrupt storm detected on connector %s: "
  733.                                  "switching from hotplug detection to polling\n",
  734.                                 drm_get_connector_name(connector));
  735.                         dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
  736.                         connector->polled = DRM_CONNECTOR_POLL_CONNECT
  737.                                 | DRM_CONNECTOR_POLL_DISCONNECT;
  738.                         hpd_disabled = true;
  739.                 }
  740.                 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
  741.                         DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
  742.                                       drm_get_connector_name(connector), intel_encoder->hpd_pin);
  743.                 }
  744.         }
  745.          /* if there were no outputs to poll, poll was disabled,
  746.           * therefore make sure it's enabled when disabling HPD on
  747.           * some connectors */
  748.         if (hpd_disabled) {
  749.                 drm_kms_helper_poll_enable(dev);
  750.                 mod_timer(&dev_priv->hotplug_reenable_timer,
  751.                           GetTimerTicks() + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
  752.         }
  753.  
  754.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  755.  
  756.         list_for_each_entry(connector, &mode_config->connector_list, head) {
  757.                 intel_connector = to_intel_connector(connector);
  758.                 intel_encoder = intel_connector->encoder;
  759.                 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
  760.                 if (intel_encoder->hot_plug)
  761.                         intel_encoder->hot_plug(intel_encoder);
  762.                         if (intel_hpd_irq_event(dev, connector))
  763.                                 changed = true;
  764.                 }
  765.         }
  766.         mutex_unlock(&mode_config->mutex);
  767.  
  768.         if (changed)
  769.                 drm_kms_helper_hotplug_event(dev);
  770. }
  771.  
  772. static void ironlake_rps_change_irq_handler(struct drm_device *dev)
  773. {
  774.         drm_i915_private_t *dev_priv = dev->dev_private;
  775.         u32 busy_up, busy_down, max_avg, min_avg;
  776.         u8 new_delay;
  777.  
  778.         spin_lock(&mchdev_lock);
  779.  
  780.         I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
  781.  
  782.         new_delay = dev_priv->ips.cur_delay;
  783.  
  784.         I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
  785.         busy_up = I915_READ(RCPREVBSYTUPAVG);
  786.         busy_down = I915_READ(RCPREVBSYTDNAVG);
  787.         max_avg = I915_READ(RCBMAXAVG);
  788.         min_avg = I915_READ(RCBMINAVG);
  789.  
  790.         /* Handle RCS change request from hw */
  791.         if (busy_up > max_avg) {
  792.                 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
  793.                         new_delay = dev_priv->ips.cur_delay - 1;
  794.                 if (new_delay < dev_priv->ips.max_delay)
  795.                         new_delay = dev_priv->ips.max_delay;
  796.         } else if (busy_down < min_avg) {
  797.                 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
  798.                         new_delay = dev_priv->ips.cur_delay + 1;
  799.                 if (new_delay > dev_priv->ips.min_delay)
  800.                         new_delay = dev_priv->ips.min_delay;
  801.         }
  802.  
  803.         if (ironlake_set_drps(dev, new_delay))
  804.                 dev_priv->ips.cur_delay = new_delay;
  805.  
  806.         spin_unlock(&mchdev_lock);
  807.  
  808.         return;
  809. }
  810.  
  811. static void notify_ring(struct drm_device *dev,
  812.                         struct intel_ring_buffer *ring)
  813. {
  814.         if (ring->obj == NULL)
  815.                 return;
  816.  
  817.         trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
  818.  
  819.         wake_up_all(&ring->irq_queue);
  820. }
  821.  
  822. static void gen6_pm_rps_work(struct work_struct *work)
  823. {
  824.         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
  825.                                                     rps.work);
  826.         u32 pm_iir;
  827.         u8 new_delay;
  828.  
  829.         spin_lock_irq(&dev_priv->irq_lock);
  830.         pm_iir = dev_priv->rps.pm_iir;
  831.         dev_priv->rps.pm_iir = 0;
  832.         /* Make sure not to corrupt PMIMR state used by ringbuffer code */
  833.         snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
  834.         spin_unlock_irq(&dev_priv->irq_lock);
  835.  
  836.         /* Make sure we didn't queue anything we're not going to process. */
  837.         WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS);
  838.  
  839.         if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
  840.                 return;
  841.  
  842.         mutex_lock(&dev_priv->rps.hw_lock);
  843.  
  844.         if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
  845.                 new_delay = dev_priv->rps.cur_delay + 1;
  846.  
  847.                 /*
  848.                  * For better performance, jump directly
  849.                  * to RPe if we're below it.
  850.                  */
  851.                 if (IS_VALLEYVIEW(dev_priv->dev) &&
  852.                     dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
  853.                         new_delay = dev_priv->rps.rpe_delay;
  854.         } else
  855.                 new_delay = dev_priv->rps.cur_delay - 1;
  856.  
  857.         /* sysfs frequency interfaces may have snuck in while servicing the
  858.          * interrupt
  859.          */
  860.         if (new_delay >= dev_priv->rps.min_delay &&
  861.             new_delay <= dev_priv->rps.max_delay) {
  862.                 if (IS_VALLEYVIEW(dev_priv->dev))
  863.                         valleyview_set_rps(dev_priv->dev, new_delay);
  864.                 else
  865.                 gen6_set_rps(dev_priv->dev, new_delay);
  866.         }
  867.  
  868.         if (IS_VALLEYVIEW(dev_priv->dev)) {
  869.                 /*
  870.                  * On VLV, when we enter RC6 we may not be at the minimum
  871.                  * voltage level, so arm a timer to check.  It should only
  872.                  * fire when there's activity or once after we've entered
  873.                  * RC6, and then won't be re-armed until the next RPS interrupt.
  874.                  */
  875. //              mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
  876. //                               msecs_to_jiffies(100));
  877.         }
  878.  
  879.         mutex_unlock(&dev_priv->rps.hw_lock);
  880. }
  881.  
  882.  
  883. /**
  884.  * ivybridge_parity_work - Workqueue called when a parity error interrupt
  885.  * occurred.
  886.  * @work: workqueue struct
  887.  *
  888.  * Doesn't actually do anything except notify userspace. As a consequence of
  889.  * this event, userspace should try to remap the bad rows since statistically
  890.  * it is likely the same row is more likely to go bad again.
  891.  */
  892. static void ivybridge_parity_work(struct work_struct *work)
  893. {
  894.         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
  895.                                                     l3_parity.error_work);
  896.         u32 error_status, row, bank, subbank;
  897.         char *parity_event[5];
  898.         uint32_t misccpctl;
  899.         unsigned long flags;
  900.  
  901.         /* We must turn off DOP level clock gating to access the L3 registers.
  902.          * In order to prevent a get/put style interface, acquire struct mutex
  903.          * any time we access those registers.
  904.          */
  905.         mutex_lock(&dev_priv->dev->struct_mutex);
  906.  
  907.         misccpctl = I915_READ(GEN7_MISCCPCTL);
  908.         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
  909.         POSTING_READ(GEN7_MISCCPCTL);
  910.  
  911.         error_status = I915_READ(GEN7_L3CDERRST1);
  912.         row = GEN7_PARITY_ERROR_ROW(error_status);
  913.         bank = GEN7_PARITY_ERROR_BANK(error_status);
  914.         subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
  915.  
  916.         I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
  917.                                     GEN7_L3CDERRST1_ENABLE);
  918.         POSTING_READ(GEN7_L3CDERRST1);
  919.  
  920.         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
  921.  
  922.         spin_lock_irqsave(&dev_priv->irq_lock, flags);
  923.         ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
  924.         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  925.  
  926.         mutex_unlock(&dev_priv->dev->struct_mutex);
  927.  
  928.         DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
  929.                   row, bank, subbank);
  930.  
  931. }
  932.  
  933. static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
  934. {
  935.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  936.  
  937.         if (!HAS_L3_GPU_CACHE(dev))
  938.                 return;
  939.  
  940.         spin_lock(&dev_priv->irq_lock);
  941.         ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
  942.         spin_unlock(&dev_priv->irq_lock);
  943.  
  944.         queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
  945. }
  946.  
  947. static void ilk_gt_irq_handler(struct drm_device *dev,
  948.                                struct drm_i915_private *dev_priv,
  949.                                u32 gt_iir)
  950. {
  951.         if (gt_iir &
  952.             (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
  953.                 notify_ring(dev, &dev_priv->ring[RCS]);
  954.         if (gt_iir & ILK_BSD_USER_INTERRUPT)
  955.                 notify_ring(dev, &dev_priv->ring[VCS]);
  956. }
  957.  
  958. static void snb_gt_irq_handler(struct drm_device *dev,
  959.                                struct drm_i915_private *dev_priv,
  960.                                u32 gt_iir)
  961. {
  962.  
  963.         if (gt_iir &
  964.             (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
  965.                 notify_ring(dev, &dev_priv->ring[RCS]);
  966.         if (gt_iir & GT_BSD_USER_INTERRUPT)
  967.                 notify_ring(dev, &dev_priv->ring[VCS]);
  968.         if (gt_iir & GT_BLT_USER_INTERRUPT)
  969.                 notify_ring(dev, &dev_priv->ring[BCS]);
  970.  
  971.         if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
  972.                       GT_BSD_CS_ERROR_INTERRUPT |
  973.                       GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
  974.                 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
  975.                 i915_handle_error(dev, false);
  976.         }
  977.  
  978.         if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
  979.                 ivybridge_parity_error_irq_handler(dev);
  980. }
  981.  
  982. #define HPD_STORM_DETECT_PERIOD 1000
  983. #define HPD_STORM_THRESHOLD 5
  984.  
  985. static inline void intel_hpd_irq_handler(struct drm_device *dev,
  986.                                             u32 hotplug_trigger,
  987.                                             const u32 *hpd)
  988. {
  989.         drm_i915_private_t *dev_priv = dev->dev_private;
  990.         int i;
  991.         bool storm_detected = false;
  992.  
  993.         if (!hotplug_trigger)
  994.                 return;
  995.  
  996.         spin_lock(&dev_priv->irq_lock);
  997.         for (i = 1; i < HPD_NUM_PINS; i++) {
  998.  
  999.                 WARN(((hpd[i] & hotplug_trigger) &&
  1000.                       dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED),
  1001.                      "Received HPD interrupt although disabled\n");
  1002.  
  1003.                 if (!(hpd[i] & hotplug_trigger) ||
  1004.                     dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
  1005.                         continue;
  1006.  
  1007.                 dev_priv->hpd_event_bits |= (1 << i);
  1008.                 if (!time_in_range(GetTimerTicks(), dev_priv->hpd_stats[i].hpd_last_jiffies,
  1009.                   dev_priv->hpd_stats[i].hpd_last_jiffies
  1010.                   + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
  1011.                         dev_priv->hpd_stats[i].hpd_last_jiffies = GetTimerTicks();
  1012.            dev_priv->hpd_stats[i].hpd_cnt = 0;
  1013.                         DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
  1014.        } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
  1015.            dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
  1016.                         dev_priv->hpd_event_bits &= ~(1 << i);
  1017.            DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
  1018.                         storm_detected = true;
  1019.                 } else {
  1020.                         dev_priv->hpd_stats[i].hpd_cnt++;
  1021.                         DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
  1022.                                       dev_priv->hpd_stats[i].hpd_cnt);
  1023.                 }
  1024.         }
  1025.  
  1026.         if (storm_detected)
  1027.                 dev_priv->display.hpd_irq_setup(dev);
  1028.         spin_unlock(&dev_priv->irq_lock);
  1029.  
  1030.         /*
  1031.          * Our hotplug handler can grab modeset locks (by calling down into the
  1032.          * fb helpers). Hence it must not be run on our own dev-priv->wq work
  1033.          * queue for otherwise the flush_work in the pageflip code will
  1034.          * deadlock.
  1035.          */
  1036.         schedule_work(&dev_priv->hotplug_work);
  1037. }
  1038.  
  1039. static void gmbus_irq_handler(struct drm_device *dev)
  1040. {
  1041.         struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1042.  
  1043.         wake_up_all(&dev_priv->gmbus_wait_queue);
  1044. }
  1045.  
  1046. static void dp_aux_irq_handler(struct drm_device *dev)
  1047. {
  1048.         struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1049.  
  1050.         wake_up_all(&dev_priv->gmbus_wait_queue);
  1051. }
  1052.  
  1053. /* The RPS events need forcewake, so we add them to a work queue and mask their
  1054.  * IMR bits until the work is done. Other interrupts can be processed without
  1055.  * the work queue. */
  1056. static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
  1057. {
  1058.         if (pm_iir & GEN6_PM_RPS_EVENTS) {
  1059.                 spin_lock(&dev_priv->irq_lock);
  1060.                 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
  1061.                 snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS);
  1062.                 spin_unlock(&dev_priv->irq_lock);
  1063.  
  1064.                 queue_work(dev_priv->wq, &dev_priv->rps.work);
  1065.         }
  1066.  
  1067.         if (HAS_VEBOX(dev_priv->dev)) {
  1068.                 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
  1069.                         notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
  1070.  
  1071.                 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
  1072.                         DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
  1073.                         i915_handle_error(dev_priv->dev, false);
  1074.                 }
  1075.         }
  1076. }
  1077.  
  1078. static irqreturn_t valleyview_irq_handler(int irq, void *arg)
  1079. {
  1080.         struct drm_device *dev = (struct drm_device *) arg;
  1081.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1082.         u32 iir, gt_iir, pm_iir;
  1083.         irqreturn_t ret = IRQ_NONE;
  1084.         unsigned long irqflags;
  1085.         int pipe;
  1086.         u32 pipe_stats[I915_MAX_PIPES];
  1087.  
  1088.         atomic_inc(&dev_priv->irq_received);
  1089.  
  1090.         while (true) {
  1091.                 iir = I915_READ(VLV_IIR);
  1092.                 gt_iir = I915_READ(GTIIR);
  1093.                 pm_iir = I915_READ(GEN6_PMIIR);
  1094.  
  1095.                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
  1096.                         goto out;
  1097.  
  1098.                 ret = IRQ_HANDLED;
  1099.  
  1100.                 snb_gt_irq_handler(dev, dev_priv, gt_iir);
  1101.  
  1102.                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1103.                 for_each_pipe(pipe) {
  1104.                         int reg = PIPESTAT(pipe);
  1105.                         pipe_stats[pipe] = I915_READ(reg);
  1106.  
  1107.                         /*
  1108.                          * Clear the PIPE*STAT regs before the IIR
  1109.                          */
  1110.                         if (pipe_stats[pipe] & 0x8000ffff) {
  1111.                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  1112.                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
  1113.                                                          pipe_name(pipe));
  1114.                                 I915_WRITE(reg, pipe_stats[pipe]);
  1115.                         }
  1116.                 }
  1117.                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1118.  
  1119. #if 0
  1120.                 for_each_pipe(pipe) {
  1121.                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
  1122.                                 drm_handle_vblank(dev, pipe);
  1123.  
  1124.                         if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
  1125.                                 intel_prepare_page_flip(dev, pipe);
  1126.                                 intel_finish_page_flip(dev, pipe);
  1127.                         }
  1128.                 }
  1129. #endif
  1130.  
  1131.                 /* Consume port.  Then clear IIR or we'll miss events */
  1132.                 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
  1133.                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
  1134.                         u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
  1135.  
  1136.                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
  1137.                                          hotplug_status);
  1138.  
  1139.                         intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
  1140.  
  1141.                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  1142.                         I915_READ(PORT_HOTPLUG_STAT);
  1143.                 }
  1144.  
  1145.                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
  1146.                         gmbus_irq_handler(dev);
  1147.  
  1148.                 if (pm_iir)
  1149.                         gen6_rps_irq_handler(dev_priv, pm_iir);
  1150.  
  1151.                 I915_WRITE(GTIIR, gt_iir);
  1152.                 I915_WRITE(GEN6_PMIIR, pm_iir);
  1153.                 I915_WRITE(VLV_IIR, iir);
  1154.         }
  1155.  
  1156. out:
  1157.         return ret;
  1158. }
  1159.  
  1160. static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
  1161. {
  1162.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1163.         int pipe;
  1164.         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
  1165.  
  1166.         intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
  1167.  
  1168.         if (pch_iir & SDE_AUDIO_POWER_MASK) {
  1169.                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
  1170.                                SDE_AUDIO_POWER_SHIFT);
  1171.                 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
  1172.                                  port_name(port));
  1173.         }
  1174.  
  1175.         if (pch_iir & SDE_AUX_MASK)
  1176.                 dp_aux_irq_handler(dev);
  1177.  
  1178.         if (pch_iir & SDE_GMBUS)
  1179.                 gmbus_irq_handler(dev);
  1180.  
  1181.         if (pch_iir & SDE_AUDIO_HDCP_MASK)
  1182.                 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
  1183.  
  1184.         if (pch_iir & SDE_AUDIO_TRANS_MASK)
  1185.                 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
  1186.  
  1187.         if (pch_iir & SDE_POISON)
  1188.                 DRM_ERROR("PCH poison interrupt\n");
  1189.  
  1190.         if (pch_iir & SDE_FDI_MASK)
  1191.                 for_each_pipe(pipe)
  1192.                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
  1193.                                          pipe_name(pipe),
  1194.                                          I915_READ(FDI_RX_IIR(pipe)));
  1195.  
  1196.         if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
  1197.                 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
  1198.  
  1199.         if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
  1200.                 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
  1201.  
  1202.         if (pch_iir & SDE_TRANSA_FIFO_UNDER)
  1203.                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
  1204.                                                           false))
  1205.                         DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
  1206.  
  1207.         if (pch_iir & SDE_TRANSB_FIFO_UNDER)
  1208.                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
  1209.                                                           false))
  1210.                         DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
  1211. }
  1212.  
  1213. static void ivb_err_int_handler(struct drm_device *dev)
  1214. {
  1215.         struct drm_i915_private *dev_priv = dev->dev_private;
  1216.         u32 err_int = I915_READ(GEN7_ERR_INT);
  1217.  
  1218.         if (err_int & ERR_INT_POISON)
  1219.                 DRM_ERROR("Poison interrupt\n");
  1220.  
  1221.         if (err_int & ERR_INT_FIFO_UNDERRUN_A)
  1222.                 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
  1223.                         DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
  1224.  
  1225.         if (err_int & ERR_INT_FIFO_UNDERRUN_B)
  1226.                 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
  1227.                         DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
  1228.  
  1229.         if (err_int & ERR_INT_FIFO_UNDERRUN_C)
  1230.                 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
  1231.                         DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
  1232.  
  1233.         I915_WRITE(GEN7_ERR_INT, err_int);
  1234. }
  1235.  
  1236. static void cpt_serr_int_handler(struct drm_device *dev)
  1237. {
  1238.         struct drm_i915_private *dev_priv = dev->dev_private;
  1239.         u32 serr_int = I915_READ(SERR_INT);
  1240.  
  1241.         if (serr_int & SERR_INT_POISON)
  1242.                 DRM_ERROR("PCH poison interrupt\n");
  1243.  
  1244.         if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
  1245.                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
  1246.                                                           false))
  1247.                         DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
  1248.  
  1249.         if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
  1250.                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
  1251.                                                           false))
  1252.                         DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
  1253.  
  1254.         if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
  1255.                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
  1256.                                                           false))
  1257.                         DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
  1258.  
  1259.         I915_WRITE(SERR_INT, serr_int);
  1260. }
  1261.  
  1262. static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
  1263. {
  1264.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1265.         int pipe;
  1266.         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
  1267.  
  1268.         intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
  1269.  
  1270.         if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
  1271.                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
  1272.                                SDE_AUDIO_POWER_SHIFT_CPT);
  1273.                 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
  1274.                                  port_name(port));
  1275.         }
  1276.  
  1277.         if (pch_iir & SDE_AUX_MASK_CPT)
  1278.                 dp_aux_irq_handler(dev);
  1279.  
  1280.         if (pch_iir & SDE_GMBUS_CPT)
  1281.                 gmbus_irq_handler(dev);
  1282.  
  1283.         if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
  1284.                 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
  1285.  
  1286.         if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
  1287.                 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
  1288.  
  1289.         if (pch_iir & SDE_FDI_MASK_CPT)
  1290.                 for_each_pipe(pipe)
  1291.                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
  1292.                                          pipe_name(pipe),
  1293.                                          I915_READ(FDI_RX_IIR(pipe)));
  1294.  
  1295.         if (pch_iir & SDE_ERROR_CPT)
  1296.                 cpt_serr_int_handler(dev);
  1297.         }
  1298.  
  1299. static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
  1300. {
  1301.         struct drm_i915_private *dev_priv = dev->dev_private;
  1302.  
  1303.         if (de_iir & DE_AUX_CHANNEL_A)
  1304.                 dp_aux_irq_handler(dev);
  1305.  
  1306.         if (de_iir & DE_GSE)
  1307.                 intel_opregion_asle_intr(dev);
  1308.  
  1309. #if 0
  1310.         if (de_iir & DE_PIPEA_VBLANK)
  1311.                 drm_handle_vblank(dev, 0);
  1312.  
  1313.         if (de_iir & DE_PIPEB_VBLANK)
  1314.                 drm_handle_vblank(dev, 1);
  1315. #endif
  1316.  
  1317.         if (de_iir & DE_POISON)
  1318.                 DRM_ERROR("Poison interrupt\n");
  1319.  
  1320.         if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
  1321.                 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
  1322.                         DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
  1323.  
  1324.         if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
  1325.                 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
  1326.                         DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
  1327. #if 0
  1328.         if (de_iir & DE_PLANEA_FLIP_DONE) {
  1329.                 intel_prepare_page_flip(dev, 0);
  1330.                 intel_finish_page_flip_plane(dev, 0);
  1331.         }
  1332.  
  1333.         if (de_iir & DE_PLANEB_FLIP_DONE) {
  1334.                 intel_prepare_page_flip(dev, 1);
  1335.                 intel_finish_page_flip_plane(dev, 1);
  1336.         }
  1337. #endif
  1338.  
  1339.         /* check event from PCH */
  1340.         if (de_iir & DE_PCH_EVENT) {
  1341.                 u32 pch_iir = I915_READ(SDEIIR);
  1342.  
  1343.                 if (HAS_PCH_CPT(dev))
  1344.                         cpt_irq_handler(dev, pch_iir);
  1345.                 else
  1346.                         ibx_irq_handler(dev, pch_iir);
  1347.  
  1348.                 /* should clear PCH hotplug event before clear CPU irq */
  1349.                 I915_WRITE(SDEIIR, pch_iir);
  1350.         }
  1351.  
  1352.         if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
  1353.                 ironlake_rps_change_irq_handler(dev);
  1354. }
  1355.  
  1356. static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
  1357. {
  1358.         struct drm_i915_private *dev_priv = dev->dev_private;
  1359.         int i;
  1360.  
  1361.         if (de_iir & DE_ERR_INT_IVB)
  1362.                 ivb_err_int_handler(dev);
  1363.  
  1364.         if (de_iir & DE_AUX_CHANNEL_A_IVB)
  1365.                 dp_aux_irq_handler(dev);
  1366.  
  1367.         if (de_iir & DE_GSE_IVB)
  1368.                 intel_opregion_asle_intr(dev);
  1369. #if 0
  1370.         for (i = 0; i < 3; i++) {
  1371.                 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
  1372.                         drm_handle_vblank(dev, i);
  1373.                 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
  1374.                         intel_prepare_page_flip(dev, i);
  1375.                         intel_finish_page_flip_plane(dev, i);
  1376.                 }
  1377.         }
  1378. #endif
  1379.  
  1380.         /* check event from PCH */
  1381.         if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
  1382.                 u32 pch_iir = I915_READ(SDEIIR);
  1383.  
  1384.                 cpt_irq_handler(dev, pch_iir);
  1385.  
  1386.                 /* clear PCH hotplug event before clear CPU irq */
  1387.                 I915_WRITE(SDEIIR, pch_iir);
  1388. }
  1389. }
  1390.  
  1391. static irqreturn_t ironlake_irq_handler(int irq, void *arg)
  1392. {
  1393.         struct drm_device *dev = (struct drm_device *) arg;
  1394.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1395.         u32 de_iir, gt_iir, de_ier, sde_ier = 0;
  1396.         irqreturn_t ret = IRQ_NONE;
  1397.         bool err_int_reenable = false;
  1398.  
  1399.         atomic_inc(&dev_priv->irq_received);
  1400.  
  1401.         /* We get interrupts on unclaimed registers, so check for this before we
  1402.          * do any I915_{READ,WRITE}. */
  1403.         intel_uncore_check_errors(dev);
  1404.  
  1405.         /* disable master interrupt before clearing iir  */
  1406.         de_ier = I915_READ(DEIER);
  1407.         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
  1408.         POSTING_READ(DEIER);
  1409.  
  1410.         /* Disable south interrupts. We'll only write to SDEIIR once, so further
  1411.          * interrupts will will be stored on its back queue, and then we'll be
  1412.          * able to process them after we restore SDEIER (as soon as we restore
  1413.          * it, we'll get an interrupt if SDEIIR still has something to process
  1414.          * due to its back queue). */
  1415.         if (!HAS_PCH_NOP(dev)) {
  1416.                 sde_ier = I915_READ(SDEIER);
  1417.                 I915_WRITE(SDEIER, 0);
  1418.                 POSTING_READ(SDEIER);
  1419.         }
  1420.  
  1421.         /* On Haswell, also mask ERR_INT because we don't want to risk
  1422.          * generating "unclaimed register" interrupts from inside the interrupt
  1423.          * handler. */
  1424.         if (IS_HASWELL(dev)) {
  1425.                 spin_lock(&dev_priv->irq_lock);
  1426.                 err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB;
  1427.                 if (err_int_reenable)
  1428.                         ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
  1429.                 spin_unlock(&dev_priv->irq_lock);
  1430. }
  1431.  
  1432.         gt_iir = I915_READ(GTIIR);
  1433.         if (gt_iir) {
  1434.                 if (INTEL_INFO(dev)->gen >= 6)
  1435.                         snb_gt_irq_handler(dev, dev_priv, gt_iir);
  1436.                 else
  1437.                         ilk_gt_irq_handler(dev, dev_priv, gt_iir);
  1438.                 I915_WRITE(GTIIR, gt_iir);
  1439.                 ret = IRQ_HANDLED;
  1440. }
  1441.  
  1442.         de_iir = I915_READ(DEIIR);
  1443.         if (de_iir) {
  1444.                 if (INTEL_INFO(dev)->gen >= 7)
  1445.                         ivb_display_irq_handler(dev, de_iir);
  1446.                 else
  1447.                         ilk_display_irq_handler(dev, de_iir);
  1448.                 I915_WRITE(DEIIR, de_iir);
  1449.                 ret = IRQ_HANDLED;
  1450.         }
  1451.  
  1452.         if (INTEL_INFO(dev)->gen >= 6) {
  1453.                 u32 pm_iir = I915_READ(GEN6_PMIIR);
  1454.                 if (pm_iir) {
  1455.                         gen6_rps_irq_handler(dev_priv, pm_iir);
  1456.                         I915_WRITE(GEN6_PMIIR, pm_iir);
  1457.                         ret = IRQ_HANDLED;
  1458.         }
  1459. }
  1460.  
  1461.         if (err_int_reenable) {
  1462.                 spin_lock(&dev_priv->irq_lock);
  1463.                 if (ivb_can_enable_err_int(dev))
  1464.                         ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
  1465.                 spin_unlock(&dev_priv->irq_lock);
  1466.         }
  1467.  
  1468.         I915_WRITE(DEIER, de_ier);
  1469.         POSTING_READ(DEIER);
  1470.         if (!HAS_PCH_NOP(dev)) {
  1471.                 I915_WRITE(SDEIER, sde_ier);
  1472.                 POSTING_READ(SDEIER);
  1473.         }
  1474.  
  1475.         return ret;
  1476. }
  1477.  
  1478. static void i915_error_wake_up(struct drm_i915_private *dev_priv,
  1479.                                bool reset_completed)
  1480. {
  1481.         struct intel_ring_buffer *ring;
  1482.         int i;
  1483.  
  1484.         /*
  1485.          * Notify all waiters for GPU completion events that reset state has
  1486.          * been changed, and that they need to restart their wait after
  1487.          * checking for potential errors (and bail out to drop locks if there is
  1488.          * a gpu reset pending so that i915_error_work_func can acquire them).
  1489.          */
  1490.  
  1491.         /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
  1492.         for_each_ring(ring, dev_priv, i)
  1493.                 wake_up_all(&ring->irq_queue);
  1494.  
  1495.  
  1496.         /*
  1497.          * Signal tasks blocked in i915_gem_wait_for_error that the pending
  1498.          * reset state is cleared.
  1499.          */
  1500.         if (reset_completed)
  1501.                 wake_up_all(&dev_priv->gpu_error.reset_queue);
  1502. }
  1503.  
  1504. /**
  1505.  * i915_error_work_func - do process context error handling work
  1506.  * @work: work struct
  1507.  *
  1508.  * Fire an error uevent so userspace can see that a hang or error
  1509.  * was detected.
  1510.  */
  1511. static void i915_error_work_func(struct work_struct *work)
  1512. {
  1513.         struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
  1514.                                                     work);
  1515.         drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
  1516.                                                     gpu_error);
  1517.         struct drm_device *dev = dev_priv->dev;
  1518.         char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
  1519.         char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
  1520.         char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
  1521.         int ret;
  1522.  
  1523.         /*
  1524.          * Note that there's only one work item which does gpu resets, so we
  1525.          * need not worry about concurrent gpu resets potentially incrementing
  1526.          * error->reset_counter twice. We only need to take care of another
  1527.          * racing irq/hangcheck declaring the gpu dead for a second time. A
  1528.          * quick check for that is good enough: schedule_work ensures the
  1529.          * correct ordering between hang detection and this work item, and since
  1530.          * the reset in-progress bit is only ever set by code outside of this
  1531.          * work we don't need to worry about any other races.
  1532.          */
  1533.         if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
  1534.                 DRM_DEBUG_DRIVER("resetting chip\n");
  1535.  
  1536.                 /*
  1537.                  * All state reset _must_ be completed before we update the
  1538.                  * reset counter, for otherwise waiters might miss the reset
  1539.                  * pending state and not properly drop locks, resulting in
  1540.                  * deadlocks with the reset work.
  1541.                  */
  1542. //       ret = i915_reset(dev);
  1543.  
  1544. //       intel_display_handle_reset(dev);
  1545.  
  1546.                 if (ret == 0) {
  1547.                         /*
  1548.                          * After all the gem state is reset, increment the reset
  1549.                          * counter and wake up everyone waiting for the reset to
  1550.                          * complete.
  1551.                          *
  1552.                          * Since unlock operations are a one-sided barrier only,
  1553.                          * we need to insert a barrier here to order any seqno
  1554.                          * updates before
  1555.                          * the counter increment.
  1556.                          */
  1557.                         atomic_inc(&dev_priv->gpu_error.reset_counter);
  1558.  
  1559.                 } else {
  1560.                         atomic_set(&error->reset_counter, I915_WEDGED);
  1561.         }
  1562.  
  1563.                 /*
  1564.                  * Note: The wake_up also serves as a memory barrier so that
  1565.                  * waiters see the update value of the reset counter atomic_t.
  1566.                  */
  1567.                 i915_error_wake_up(dev_priv, true);
  1568.         }
  1569. }
  1570.  
  1571. static void i915_report_and_clear_eir(struct drm_device *dev)
  1572. {
  1573.         struct drm_i915_private *dev_priv = dev->dev_private;
  1574.         uint32_t instdone[I915_NUM_INSTDONE_REG];
  1575.         u32 eir = I915_READ(EIR);
  1576.         int pipe, i;
  1577.  
  1578.         if (!eir)
  1579.                 return;
  1580.  
  1581.         pr_err("render error detected, EIR: 0x%08x\n", eir);
  1582.  
  1583.         i915_get_extra_instdone(dev, instdone);
  1584.  
  1585.         if (IS_G4X(dev)) {
  1586.                 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
  1587.                         u32 ipeir = I915_READ(IPEIR_I965);
  1588.  
  1589.                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
  1590.                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
  1591.                         for (i = 0; i < ARRAY_SIZE(instdone); i++)
  1592.                                 pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
  1593.                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
  1594.                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
  1595.                         I915_WRITE(IPEIR_I965, ipeir);
  1596.                         POSTING_READ(IPEIR_I965);
  1597.                 }
  1598.                 if (eir & GM45_ERROR_PAGE_TABLE) {
  1599.                         u32 pgtbl_err = I915_READ(PGTBL_ER);
  1600.                         pr_err("page table error\n");
  1601.                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
  1602.                         I915_WRITE(PGTBL_ER, pgtbl_err);
  1603.                         POSTING_READ(PGTBL_ER);
  1604.                 }
  1605.         }
  1606.  
  1607.         if (!IS_GEN2(dev)) {
  1608.                 if (eir & I915_ERROR_PAGE_TABLE) {
  1609.                         u32 pgtbl_err = I915_READ(PGTBL_ER);
  1610.                         pr_err("page table error\n");
  1611.                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
  1612.                         I915_WRITE(PGTBL_ER, pgtbl_err);
  1613.                         POSTING_READ(PGTBL_ER);
  1614.                 }
  1615.         }
  1616.  
  1617.         if (eir & I915_ERROR_MEMORY_REFRESH) {
  1618.                 pr_err("memory refresh error:\n");
  1619.                 for_each_pipe(pipe)
  1620.                         pr_err("pipe %c stat: 0x%08x\n",
  1621.                                pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
  1622.                 /* pipestat has already been acked */
  1623.         }
  1624.         if (eir & I915_ERROR_INSTRUCTION) {
  1625.                 pr_err("instruction error\n");
  1626.                 pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
  1627.                 for (i = 0; i < ARRAY_SIZE(instdone); i++)
  1628.                         pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
  1629.                 if (INTEL_INFO(dev)->gen < 4) {
  1630.                         u32 ipeir = I915_READ(IPEIR);
  1631.  
  1632.                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
  1633.                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
  1634.                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
  1635.                         I915_WRITE(IPEIR, ipeir);
  1636.                         POSTING_READ(IPEIR);
  1637.                 } else {
  1638.                         u32 ipeir = I915_READ(IPEIR_I965);
  1639.  
  1640.                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
  1641.                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
  1642.                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
  1643.                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
  1644.                         I915_WRITE(IPEIR_I965, ipeir);
  1645.                         POSTING_READ(IPEIR_I965);
  1646.                 }
  1647.         }
  1648.  
  1649.         I915_WRITE(EIR, eir);
  1650.         POSTING_READ(EIR);
  1651.         eir = I915_READ(EIR);
  1652.         if (eir) {
  1653.                 /*
  1654.                  * some errors might have become stuck,
  1655.                  * mask them.
  1656.                  */
  1657.                 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
  1658.                 I915_WRITE(EMR, I915_READ(EMR) | eir);
  1659.                 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  1660.         }
  1661. }
  1662.  
  1663. /**
  1664.  * i915_handle_error - handle an error interrupt
  1665.  * @dev: drm device
  1666.  *
  1667.  * Do some basic checking of regsiter state at error interrupt time and
  1668.  * dump it to the syslog.  Also call i915_capture_error_state() to make
  1669.  * sure we get a record and make it available in debugfs.  Fire a uevent
  1670.  * so userspace knows something bad happened (should trigger collection
  1671.  * of a ring dump etc.).
  1672.  */
  1673. void i915_handle_error(struct drm_device *dev, bool wedged)
  1674. {
  1675.         struct drm_i915_private *dev_priv = dev->dev_private;
  1676.  
  1677. //   i915_capture_error_state(dev);
  1678.         i915_report_and_clear_eir(dev);
  1679.  
  1680.         if (wedged) {
  1681.                 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
  1682.                                 &dev_priv->gpu_error.reset_counter);
  1683.  
  1684.                 /*
  1685.                  * Wakeup waiting processes so that the reset work function
  1686.                  * i915_error_work_func doesn't deadlock trying to grab various
  1687.                  * locks. By bumping the reset counter first, the woken
  1688.                  * processes will see a reset in progress and back off,
  1689.                  * releasing their locks and then wait for the reset completion.
  1690.                  * We must do this for _all_ gpu waiters that might hold locks
  1691.                  * that the reset work needs to acquire.
  1692.                  *
  1693.                  * Note: The wake_up serves as the required memory barrier to
  1694.                  * ensure that the waiters see the updated value of the reset
  1695.                  * counter atomic_t.
  1696.                  */
  1697.                 i915_error_wake_up(dev_priv, false);
  1698.         }
  1699.  
  1700.         /*
  1701.          * Our reset work can grab modeset locks (since it needs to reset the
  1702.          * state of outstanding pagelips). Hence it must not be run on our own
  1703.          * dev-priv->wq work queue for otherwise the flush_work in the pageflip
  1704.          * code will deadlock.
  1705.          */
  1706.         schedule_work(&dev_priv->gpu_error.work);
  1707. }
  1708.  
  1709. #if 0
  1710. static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
  1711. {
  1712.         drm_i915_private_t *dev_priv = dev->dev_private;
  1713.         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  1714.         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  1715.         struct drm_i915_gem_object *obj;
  1716.         struct intel_unpin_work *work;
  1717.         unsigned long flags;
  1718.         bool stall_detected;
  1719.  
  1720.         /* Ignore early vblank irqs */
  1721.         if (intel_crtc == NULL)
  1722.                 return;
  1723.  
  1724.         spin_lock_irqsave(&dev->event_lock, flags);
  1725.         work = intel_crtc->unpin_work;
  1726.  
  1727.         if (work == NULL ||
  1728.             atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
  1729.             !work->enable_stall_check) {
  1730.                 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
  1731.                 spin_unlock_irqrestore(&dev->event_lock, flags);
  1732.                 return;
  1733.         }
  1734.  
  1735.         /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
  1736.         obj = work->pending_flip_obj;
  1737.         if (INTEL_INFO(dev)->gen >= 4) {
  1738.                 int dspsurf = DSPSURF(intel_crtc->plane);
  1739.                 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
  1740.                                         i915_gem_obj_ggtt_offset(obj);
  1741.         } else {
  1742.                 int dspaddr = DSPADDR(intel_crtc->plane);
  1743.                 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
  1744.                                                         crtc->y * crtc->fb->pitches[0] +
  1745.                                                         crtc->x * crtc->fb->bits_per_pixel/8);
  1746.         }
  1747.  
  1748.         spin_unlock_irqrestore(&dev->event_lock, flags);
  1749.  
  1750.         if (stall_detected) {
  1751.                 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
  1752.                 intel_prepare_page_flip(dev, intel_crtc->plane);
  1753.         }
  1754. }
  1755.  
  1756. #endif
  1757.  
  1758. /* Called from drm generic code, passed 'crtc' which
  1759.  * we use as a pipe index
  1760.  */
  1761. static int i915_enable_vblank(struct drm_device *dev, int pipe)
  1762. {
  1763.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1764.         unsigned long irqflags;
  1765.  
  1766.         if (!i915_pipe_enabled(dev, pipe))
  1767.                 return -EINVAL;
  1768.  
  1769.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1770.         if (INTEL_INFO(dev)->gen >= 4)
  1771.                 i915_enable_pipestat(dev_priv, pipe,
  1772.                                      PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1773.         else
  1774.                 i915_enable_pipestat(dev_priv, pipe,
  1775.                                      PIPE_VBLANK_INTERRUPT_ENABLE);
  1776.  
  1777.         /* maintain vblank delivery even in deep C-states */
  1778.         if (dev_priv->info->gen == 3)
  1779.                 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
  1780.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1781.  
  1782.         return 0;
  1783. }
  1784.  
  1785. static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
  1786. {
  1787.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1788.         unsigned long irqflags;
  1789.         uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
  1790.                                                      DE_PIPE_VBLANK_ILK(pipe);
  1791.  
  1792.         if (!i915_pipe_enabled(dev, pipe))
  1793.                 return -EINVAL;
  1794.  
  1795.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1796.         ironlake_enable_display_irq(dev_priv, bit);
  1797.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1798.  
  1799.         return 0;
  1800. }
  1801.  
  1802. static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
  1803. {
  1804.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1805.         unsigned long irqflags;
  1806.         u32 imr;
  1807.  
  1808.         if (!i915_pipe_enabled(dev, pipe))
  1809.                 return -EINVAL;
  1810.  
  1811.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1812.         imr = I915_READ(VLV_IMR);
  1813.         if (pipe == 0)
  1814.                 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
  1815.         else
  1816.                 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  1817.         I915_WRITE(VLV_IMR, imr);
  1818.         i915_enable_pipestat(dev_priv, pipe,
  1819.                              PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1820.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1821.  
  1822.         return 0;
  1823. }
  1824.  
  1825. /* Called from drm generic code, passed 'crtc' which
  1826.  * we use as a pipe index
  1827.  */
  1828. static void i915_disable_vblank(struct drm_device *dev, int pipe)
  1829. {
  1830.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1831.         unsigned long irqflags;
  1832.  
  1833.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1834.         if (dev_priv->info->gen == 3)
  1835.                 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
  1836.  
  1837.         i915_disable_pipestat(dev_priv, pipe,
  1838.                               PIPE_VBLANK_INTERRUPT_ENABLE |
  1839.                               PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1840.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1841. }
  1842.  
  1843. static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
  1844. {
  1845.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1846.         unsigned long irqflags;
  1847.         uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
  1848.                                                      DE_PIPE_VBLANK_ILK(pipe);
  1849.  
  1850.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1851.         ironlake_disable_display_irq(dev_priv, bit);
  1852.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1853. }
  1854.  
  1855. static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
  1856. {
  1857.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  1858.         unsigned long irqflags;
  1859.         u32 imr;
  1860.  
  1861.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  1862.         i915_disable_pipestat(dev_priv, pipe,
  1863.                               PIPE_START_VBLANK_INTERRUPT_ENABLE);
  1864.         imr = I915_READ(VLV_IMR);
  1865.         if (pipe == 0)
  1866.                 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
  1867.         else
  1868.                 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  1869.         I915_WRITE(VLV_IMR, imr);
  1870.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1871. }
  1872.  
  1873. static u32
  1874. ring_last_seqno(struct intel_ring_buffer *ring)
  1875. {
  1876.         return list_entry(ring->request_list.prev,
  1877.                           struct drm_i915_gem_request, list)->seqno;
  1878. }
  1879.  
  1880. static bool
  1881. ring_idle(struct intel_ring_buffer *ring, u32 seqno)
  1882. {
  1883.         return (list_empty(&ring->request_list) ||
  1884.                 i915_seqno_passed(seqno, ring_last_seqno(ring)));
  1885. }
  1886.  
  1887. static struct intel_ring_buffer *
  1888. semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
  1889. {
  1890.         struct drm_i915_private *dev_priv = ring->dev->dev_private;
  1891.         u32 cmd, ipehr, acthd, acthd_min;
  1892.  
  1893.         ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
  1894.         if ((ipehr & ~(0x3 << 16)) !=
  1895.             (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
  1896.                 return NULL;
  1897.  
  1898.         /* ACTHD is likely pointing to the dword after the actual command,
  1899.          * so scan backwards until we find the MBOX.
  1900.          */
  1901.         acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
  1902.         acthd_min = max((int)acthd - 3 * 4, 0);
  1903.         do {
  1904.                 cmd = ioread32(ring->virtual_start + acthd);
  1905.                 if (cmd == ipehr)
  1906.                         break;
  1907.  
  1908.                 acthd -= 4;
  1909.                 if (acthd < acthd_min)
  1910.                         return NULL;
  1911.         } while (1);
  1912.  
  1913.         *seqno = ioread32(ring->virtual_start+acthd+4)+1;
  1914.         return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
  1915. }
  1916.  
  1917. static int semaphore_passed(struct intel_ring_buffer *ring)
  1918. {
  1919.         struct drm_i915_private *dev_priv = ring->dev->dev_private;
  1920.         struct intel_ring_buffer *signaller;
  1921.         u32 seqno, ctl;
  1922.  
  1923.         ring->hangcheck.deadlock = true;
  1924.  
  1925.         signaller = semaphore_waits_for(ring, &seqno);
  1926.         if (signaller == NULL || signaller->hangcheck.deadlock)
  1927.                 return -1;
  1928.  
  1929.         /* cursory check for an unkickable deadlock */
  1930.         ctl = I915_READ_CTL(signaller);
  1931.         if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
  1932.                 return -1;
  1933.  
  1934.         return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
  1935. }
  1936.  
  1937. static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
  1938. {
  1939.         struct intel_ring_buffer *ring;
  1940.         int i;
  1941.  
  1942.         for_each_ring(ring, dev_priv, i)
  1943.                 ring->hangcheck.deadlock = false;
  1944. }
  1945.  
  1946. static enum intel_ring_hangcheck_action
  1947. ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
  1948. {
  1949.         struct drm_device *dev = ring->dev;
  1950.         struct drm_i915_private *dev_priv = dev->dev_private;
  1951.         u32 tmp;
  1952.  
  1953.         if (ring->hangcheck.acthd != acthd)
  1954.                 return HANGCHECK_ACTIVE;
  1955.  
  1956.         if (IS_GEN2(dev))
  1957.                 return HANGCHECK_HUNG;
  1958.  
  1959.         /* Is the chip hanging on a WAIT_FOR_EVENT?
  1960.          * If so we can simply poke the RB_WAIT bit
  1961.          * and break the hang. This should work on
  1962.          * all but the second generation chipsets.
  1963.          */
  1964.         tmp = I915_READ_CTL(ring);
  1965.         if (tmp & RING_WAIT) {
  1966.                 DRM_ERROR("Kicking stuck wait on %s\n",
  1967.                           ring->name);
  1968.                 I915_WRITE_CTL(ring, tmp);
  1969.                 return HANGCHECK_KICK;
  1970.         }
  1971.  
  1972.         if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
  1973.                 switch (semaphore_passed(ring)) {
  1974.                 default:
  1975.                         return HANGCHECK_HUNG;
  1976.                 case 1:
  1977.                         DRM_ERROR("Kicking stuck semaphore on %s\n",
  1978.                                   ring->name);
  1979.                         I915_WRITE_CTL(ring, tmp);
  1980.                         return HANGCHECK_KICK;
  1981.                 case 0:
  1982.                         return HANGCHECK_WAIT;
  1983.                 }
  1984.         }
  1985.  
  1986.         return HANGCHECK_HUNG;
  1987. }
  1988.  
  1989. /**
  1990.  * This is called when the chip hasn't reported back with completed
  1991.  * batchbuffers in a long time. We keep track per ring seqno progress and
  1992.  * if there are no progress, hangcheck score for that ring is increased.
  1993.  * Further, acthd is inspected to see if the ring is stuck. On stuck case
  1994.  * we kick the ring. If we see no progress on three subsequent calls
  1995.  * we assume chip is wedged and try to fix it by resetting the chip.
  1996.  */
  1997. static void i915_hangcheck_elapsed(unsigned long data)
  1998. {
  1999.         struct drm_device *dev = (struct drm_device *)data;
  2000.         drm_i915_private_t *dev_priv = dev->dev_private;
  2001.         struct intel_ring_buffer *ring;
  2002.         int i;
  2003.         int busy_count = 0, rings_hung = 0;
  2004.         bool stuck[I915_NUM_RINGS] = { 0 };
  2005. #define BUSY 1
  2006. #define KICK 5
  2007. #define HUNG 20
  2008. #define FIRE 30
  2009.  
  2010.         if (!i915_enable_hangcheck)
  2011.                 return;
  2012.  
  2013.         for_each_ring(ring, dev_priv, i) {
  2014.                 u32 seqno, acthd;
  2015.                 bool busy = true;
  2016.  
  2017.                 semaphore_clear_deadlocks(dev_priv);
  2018.  
  2019.                 seqno = ring->get_seqno(ring, false);
  2020.                 acthd = intel_ring_get_active_head(ring);
  2021.  
  2022.                 if (ring->hangcheck.seqno == seqno) {
  2023.                         if (ring_idle(ring, seqno)) {
  2024. //               if (waitqueue_active(&ring->irq_queue)) {
  2025.                                         /* Issue a wake-up to catch stuck h/w. */
  2026. //                   DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
  2027. //                         ring->name);
  2028. //                   wake_up_all(&ring->irq_queue);
  2029. //               } else
  2030.                                         busy = false;
  2031.                         } else {
  2032.                                 /* We always increment the hangcheck score
  2033.                                  * if the ring is busy and still processing
  2034.                                  * the same request, so that no single request
  2035.                                  * can run indefinitely (such as a chain of
  2036.                                  * batches). The only time we do not increment
  2037.                                  * the hangcheck score on this ring, if this
  2038.                                  * ring is in a legitimate wait for another
  2039.                                  * ring. In that case the waiting ring is a
  2040.                                  * victim and we want to be sure we catch the
  2041.                                  * right culprit. Then every time we do kick
  2042.                                  * the ring, add a small increment to the
  2043.                                  * score so that we can catch a batch that is
  2044.                                  * being repeatedly kicked and so responsible
  2045.                                  * for stalling the machine.
  2046.                                  */
  2047.                                 ring->hangcheck.action = ring_stuck(ring,
  2048.                                                                     acthd);
  2049.  
  2050.                                 switch (ring->hangcheck.action) {
  2051.                                 case HANGCHECK_WAIT:
  2052.                                         break;
  2053.                                 case HANGCHECK_ACTIVE:
  2054.                                         ring->hangcheck.score += BUSY;
  2055.                                         break;
  2056.                                 case HANGCHECK_KICK:
  2057.                                         ring->hangcheck.score += KICK;
  2058.                                         break;
  2059.                                 case HANGCHECK_HUNG:
  2060.                                         ring->hangcheck.score += HUNG;
  2061.                                         stuck[i] = true;
  2062.                                         break;
  2063.                                 }
  2064.                         }
  2065.                 } else {
  2066.                         /* Gradually reduce the count so that we catch DoS
  2067.                          * attempts across multiple batches.
  2068.                          */
  2069.                         if (ring->hangcheck.score > 0)
  2070.                                 ring->hangcheck.score--;
  2071.                 }
  2072.  
  2073.                 ring->hangcheck.seqno = seqno;
  2074.                 ring->hangcheck.acthd = acthd;
  2075.                 busy_count += busy;
  2076.         }
  2077.  
  2078.         for_each_ring(ring, dev_priv, i) {
  2079.                 if (ring->hangcheck.score > FIRE) {
  2080.                         DRM_INFO("%s on %s\n",
  2081.                                   stuck[i] ? "stuck" : "no progress",
  2082.                                   ring->name);
  2083.                         rings_hung++;
  2084.                 }
  2085.         }
  2086.  
  2087. //   if (rings_hung)
  2088. //       return i915_handle_error(dev, true);
  2089.  
  2090. }
  2091.  
  2092. static void ibx_irq_preinstall(struct drm_device *dev)
  2093. {
  2094.         struct drm_i915_private *dev_priv = dev->dev_private;
  2095.  
  2096.         if (HAS_PCH_NOP(dev))
  2097.                 return;
  2098.  
  2099.         /* south display irq */
  2100.         I915_WRITE(SDEIMR, 0xffffffff);
  2101.         /*
  2102.          * SDEIER is also touched by the interrupt handler to work around missed
  2103.          * PCH interrupts. Hence we can't update it after the interrupt handler
  2104.          * is enabled - instead we unconditionally enable all PCH interrupt
  2105.          * sources here, but then only unmask them as needed with SDEIMR.
  2106.          */
  2107.         I915_WRITE(SDEIER, 0xffffffff);
  2108.         POSTING_READ(SDEIER);
  2109. }
  2110.  
  2111. static void gen5_gt_irq_preinstall(struct drm_device *dev)
  2112. {
  2113.         struct drm_i915_private *dev_priv = dev->dev_private;
  2114.  
  2115.     /* and GT */
  2116.     I915_WRITE(GTIMR, 0xffffffff);
  2117.     I915_WRITE(GTIER, 0x0);
  2118.     POSTING_READ(GTIER);
  2119.  
  2120.         if (INTEL_INFO(dev)->gen >= 6) {
  2121.                 /* and PM */
  2122.                 I915_WRITE(GEN6_PMIMR, 0xffffffff);
  2123.                 I915_WRITE(GEN6_PMIER, 0x0);
  2124.                 POSTING_READ(GEN6_PMIER);
  2125. }
  2126. }
  2127.  
  2128. /* drm_dma.h hooks
  2129. */
  2130. static void ironlake_irq_preinstall(struct drm_device *dev)
  2131. {
  2132.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2133.  
  2134.         atomic_set(&dev_priv->irq_received, 0);
  2135.  
  2136.         I915_WRITE(HWSTAM, 0xeffe);
  2137.  
  2138.         I915_WRITE(DEIMR, 0xffffffff);
  2139.         I915_WRITE(DEIER, 0x0);
  2140.         POSTING_READ(DEIER);
  2141.  
  2142.         gen5_gt_irq_preinstall(dev);
  2143.  
  2144.         ibx_irq_preinstall(dev);
  2145. }
  2146.  
  2147. static void valleyview_irq_preinstall(struct drm_device *dev)
  2148. {
  2149.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2150.         int pipe;
  2151.  
  2152.         atomic_set(&dev_priv->irq_received, 0);
  2153.  
  2154.         /* VLV magic */
  2155.         I915_WRITE(VLV_IMR, 0);
  2156.         I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
  2157.         I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
  2158.         I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
  2159.  
  2160.         /* and GT */
  2161.         I915_WRITE(GTIIR, I915_READ(GTIIR));
  2162.         I915_WRITE(GTIIR, I915_READ(GTIIR));
  2163.  
  2164.         gen5_gt_irq_preinstall(dev);
  2165.  
  2166.         I915_WRITE(DPINVGTT, 0xff);
  2167.  
  2168.         I915_WRITE(PORT_HOTPLUG_EN, 0);
  2169.         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2170.         for_each_pipe(pipe)
  2171.                 I915_WRITE(PIPESTAT(pipe), 0xffff);
  2172.         I915_WRITE(VLV_IIR, 0xffffffff);
  2173.         I915_WRITE(VLV_IMR, 0xffffffff);
  2174.         I915_WRITE(VLV_IER, 0x0);
  2175.         POSTING_READ(VLV_IER);
  2176. }
  2177.  
  2178. static void ibx_hpd_irq_setup(struct drm_device *dev)
  2179. {
  2180.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2181.         struct drm_mode_config *mode_config = &dev->mode_config;
  2182.         struct intel_encoder *intel_encoder;
  2183.         u32 hotplug_irqs, hotplug, enabled_irqs = 0;
  2184.  
  2185.         if (HAS_PCH_IBX(dev)) {
  2186.                 hotplug_irqs = SDE_HOTPLUG_MASK;
  2187.                 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
  2188.                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
  2189.                                 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
  2190.         } else {
  2191.                 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
  2192.                 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
  2193.                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
  2194.                                 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
  2195.         }
  2196.  
  2197.         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
  2198.  
  2199.         /*
  2200.  * Enable digital hotplug on the PCH, and configure the DP short pulse
  2201.  * duration to 2ms (which is the minimum in the Display Port spec)
  2202.  *
  2203.  * This register is the same on all known PCH chips.
  2204.  */
  2205.         hotplug = I915_READ(PCH_PORT_HOTPLUG);
  2206.         hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
  2207.         hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
  2208.         hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
  2209.         hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
  2210.         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
  2211. }
  2212.  
  2213. static void ibx_irq_postinstall(struct drm_device *dev)
  2214. {
  2215.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2216.         u32 mask;
  2217.  
  2218.         if (HAS_PCH_NOP(dev))
  2219.                 return;
  2220.  
  2221.         if (HAS_PCH_IBX(dev)) {
  2222.                 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
  2223.                        SDE_TRANSA_FIFO_UNDER | SDE_POISON;
  2224.         } else {
  2225.                 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
  2226.  
  2227.                 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
  2228.         }
  2229.  
  2230.         I915_WRITE(SDEIIR, I915_READ(SDEIIR));
  2231.         I915_WRITE(SDEIMR, ~mask);
  2232. }
  2233.  
  2234. static void gen5_gt_irq_postinstall(struct drm_device *dev)
  2235. {
  2236.         struct drm_i915_private *dev_priv = dev->dev_private;
  2237.         u32 pm_irqs, gt_irqs;
  2238.  
  2239.         pm_irqs = gt_irqs = 0;
  2240.  
  2241.         dev_priv->gt_irq_mask = ~0;
  2242.         if (HAS_L3_GPU_CACHE(dev)) {
  2243.                 /* L3 parity interrupt is always unmasked. */
  2244.                 dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
  2245.                 gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
  2246.         }
  2247.  
  2248.         gt_irqs |= GT_RENDER_USER_INTERRUPT;
  2249.         if (IS_GEN5(dev)) {
  2250.                 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
  2251.                            ILK_BSD_USER_INTERRUPT;
  2252.         } else {
  2253.                 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
  2254.         }
  2255.  
  2256.         I915_WRITE(GTIIR, I915_READ(GTIIR));
  2257.         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  2258.         I915_WRITE(GTIER, gt_irqs);
  2259.     POSTING_READ(GTIER);
  2260.  
  2261.         if (INTEL_INFO(dev)->gen >= 6) {
  2262.                 pm_irqs |= GEN6_PM_RPS_EVENTS;
  2263.  
  2264.                 if (HAS_VEBOX(dev))
  2265.                         pm_irqs |= PM_VEBOX_USER_INTERRUPT;
  2266.  
  2267.                 dev_priv->pm_irq_mask = 0xffffffff;
  2268.                 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
  2269.                 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
  2270.                 I915_WRITE(GEN6_PMIER, pm_irqs);
  2271.                 POSTING_READ(GEN6_PMIER);
  2272.     }
  2273. }
  2274.  
  2275. static int ironlake_irq_postinstall(struct drm_device *dev)
  2276. {
  2277.         unsigned long irqflags;
  2278.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2279.         u32 display_mask, extra_mask;
  2280.  
  2281.         if (INTEL_INFO(dev)->gen >= 7) {
  2282.                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
  2283.                                 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
  2284.                 DE_PLANEB_FLIP_DONE_IVB |
  2285.                                 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
  2286.                                 DE_ERR_INT_IVB);
  2287.                 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
  2288.                               DE_PIPEA_VBLANK_IVB);
  2289.  
  2290.                 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
  2291.         } else {
  2292.                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
  2293.                                 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
  2294.                                 DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
  2295.                                 DE_PIPEA_FIFO_UNDERRUN | DE_POISON);
  2296.                 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
  2297.         }
  2298.  
  2299.         dev_priv->irq_mask = ~display_mask;
  2300.  
  2301.         /* should always can generate irq */
  2302.         I915_WRITE(DEIIR, I915_READ(DEIIR));
  2303.         I915_WRITE(DEIMR, dev_priv->irq_mask);
  2304.         I915_WRITE(DEIER, display_mask | extra_mask);
  2305.         POSTING_READ(DEIER);
  2306.  
  2307.         gen5_gt_irq_postinstall(dev);
  2308.  
  2309.         ibx_irq_postinstall(dev);
  2310.  
  2311.         if (IS_IRONLAKE_M(dev)) {
  2312.                 /* Enable PCU event interrupts
  2313.                  *
  2314.                  * spinlocking not required here for correctness since interrupt
  2315.                  * setup is guaranteed to run in single-threaded context. But we
  2316.                  * need it to make the assert_spin_locked happy. */
  2317.                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2318.                 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
  2319.                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2320.         }
  2321.  
  2322.         return 0;
  2323. }
  2324.  
  2325. static int valleyview_irq_postinstall(struct drm_device *dev)
  2326. {
  2327.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2328.         u32 enable_mask;
  2329.         u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
  2330.         unsigned long irqflags;
  2331.  
  2332.         enable_mask = I915_DISPLAY_PORT_INTERRUPT;
  2333.         enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  2334.                 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
  2335.                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  2336.                 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  2337.  
  2338.         /*
  2339.          *Leave vblank interrupts masked initially.  enable/disable will
  2340.          * toggle them based on usage.
  2341.          */
  2342.         dev_priv->irq_mask = (~enable_mask) |
  2343.                 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
  2344.                 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  2345.  
  2346.         I915_WRITE(PORT_HOTPLUG_EN, 0);
  2347.         POSTING_READ(PORT_HOTPLUG_EN);
  2348.  
  2349.         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
  2350.         I915_WRITE(VLV_IER, enable_mask);
  2351.         I915_WRITE(VLV_IIR, 0xffffffff);
  2352.         I915_WRITE(PIPESTAT(0), 0xffff);
  2353.         I915_WRITE(PIPESTAT(1), 0xffff);
  2354.         POSTING_READ(VLV_IER);
  2355.  
  2356.         /* Interrupt setup is already guaranteed to be single-threaded, this is
  2357.          * just to make the assert_spin_locked check happy. */
  2358.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2359.         i915_enable_pipestat(dev_priv, 0, pipestat_enable);
  2360.         i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
  2361.         i915_enable_pipestat(dev_priv, 1, pipestat_enable);
  2362.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2363.  
  2364.         I915_WRITE(VLV_IIR, 0xffffffff);
  2365.         I915_WRITE(VLV_IIR, 0xffffffff);
  2366.  
  2367.         gen5_gt_irq_postinstall(dev);
  2368.  
  2369.         /* ack & enable invalid PTE error interrupts */
  2370. #if 0 /* FIXME: add support to irq handler for checking these bits */
  2371.         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
  2372.         I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
  2373. #endif
  2374.  
  2375.         I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
  2376.  
  2377.         return 0;
  2378. }
  2379.  
  2380. static void valleyview_irq_uninstall(struct drm_device *dev)
  2381. {
  2382.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2383.         int pipe;
  2384.  
  2385.         if (!dev_priv)
  2386.                 return;
  2387.  
  2388.         del_timer_sync(&dev_priv->hotplug_reenable_timer);
  2389.  
  2390.         for_each_pipe(pipe)
  2391.                 I915_WRITE(PIPESTAT(pipe), 0xffff);
  2392.  
  2393.         I915_WRITE(HWSTAM, 0xffffffff);
  2394.         I915_WRITE(PORT_HOTPLUG_EN, 0);
  2395.         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2396.         for_each_pipe(pipe)
  2397.                 I915_WRITE(PIPESTAT(pipe), 0xffff);
  2398.         I915_WRITE(VLV_IIR, 0xffffffff);
  2399.         I915_WRITE(VLV_IMR, 0xffffffff);
  2400.         I915_WRITE(VLV_IER, 0x0);
  2401.         POSTING_READ(VLV_IER);
  2402. }
  2403.  
  2404. static void ironlake_irq_uninstall(struct drm_device *dev)
  2405. {
  2406.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2407.  
  2408.         if (!dev_priv)
  2409.                 return;
  2410.  
  2411.         del_timer_sync(&dev_priv->hotplug_reenable_timer);
  2412.  
  2413.         I915_WRITE(HWSTAM, 0xffffffff);
  2414.  
  2415.         I915_WRITE(DEIMR, 0xffffffff);
  2416.         I915_WRITE(DEIER, 0x0);
  2417.         I915_WRITE(DEIIR, I915_READ(DEIIR));
  2418.         if (IS_GEN7(dev))
  2419.                 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
  2420.  
  2421.         I915_WRITE(GTIMR, 0xffffffff);
  2422.         I915_WRITE(GTIER, 0x0);
  2423.         I915_WRITE(GTIIR, I915_READ(GTIIR));
  2424.  
  2425.         if (HAS_PCH_NOP(dev))
  2426.                 return;
  2427.  
  2428.         I915_WRITE(SDEIMR, 0xffffffff);
  2429.         I915_WRITE(SDEIER, 0x0);
  2430.         I915_WRITE(SDEIIR, I915_READ(SDEIIR));
  2431.         if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
  2432.                 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
  2433. }
  2434.  
  2435. #if 0
  2436.  
  2437. static void i8xx_irq_preinstall(struct drm_device * dev)
  2438. {
  2439.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2440.         int pipe;
  2441.  
  2442.         atomic_set(&dev_priv->irq_received, 0);
  2443.  
  2444.         for_each_pipe(pipe)
  2445.                 I915_WRITE(PIPESTAT(pipe), 0);
  2446.         I915_WRITE16(IMR, 0xffff);
  2447.         I915_WRITE16(IER, 0x0);
  2448.         POSTING_READ16(IER);
  2449. }
  2450.  
  2451. static int i8xx_irq_postinstall(struct drm_device *dev)
  2452. {
  2453.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2454.  
  2455.         I915_WRITE16(EMR,
  2456.                      ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
  2457.  
  2458.         /* Unmask the interrupts that we always want on. */
  2459.         dev_priv->irq_mask =
  2460.                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  2461.                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  2462.                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2463.                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  2464.                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  2465.         I915_WRITE16(IMR, dev_priv->irq_mask);
  2466.  
  2467.         I915_WRITE16(IER,
  2468.                      I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  2469.                      I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  2470.                      I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
  2471.                      I915_USER_INTERRUPT);
  2472.         POSTING_READ16(IER);
  2473.  
  2474.         return 0;
  2475. }
  2476.  
  2477. /*
  2478.  * Returns true when a page flip has completed.
  2479.  */
  2480. static bool i8xx_handle_vblank(struct drm_device *dev,
  2481.                                int pipe, u16 iir)
  2482. {
  2483.         drm_i915_private_t *dev_priv = dev->dev_private;
  2484.         u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
  2485.  
  2486. //   if (!drm_handle_vblank(dev, pipe))
  2487.        return false;
  2488.  
  2489.         if ((iir & flip_pending) == 0)
  2490.                 return false;
  2491.  
  2492. //   intel_prepare_page_flip(dev, pipe);
  2493.  
  2494.         /* We detect FlipDone by looking for the change in PendingFlip from '1'
  2495.          * to '0' on the following vblank, i.e. IIR has the Pendingflip
  2496.          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
  2497.          * the flip is completed (no longer pending). Since this doesn't raise
  2498.          * an interrupt per se, we watch for the change at vblank.
  2499.          */
  2500.         if (I915_READ16(ISR) & flip_pending)
  2501.                 return false;
  2502.  
  2503.         intel_finish_page_flip(dev, pipe);
  2504.  
  2505.         return true;
  2506. }
  2507.  
  2508. static irqreturn_t i8xx_irq_handler(int irq, void *arg)
  2509. {
  2510.         struct drm_device *dev = (struct drm_device *) arg;
  2511.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2512.         u16 iir, new_iir;
  2513.         u32 pipe_stats[2];
  2514.         unsigned long irqflags;
  2515.         int pipe;
  2516.         u16 flip_mask =
  2517.                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2518.                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  2519.  
  2520.         atomic_inc(&dev_priv->irq_received);
  2521.  
  2522.         iir = I915_READ16(IIR);
  2523.         if (iir == 0)
  2524.                 return IRQ_NONE;
  2525.  
  2526.         while (iir & ~flip_mask) {
  2527.                 /* Can't rely on pipestat interrupt bit in iir as it might
  2528.                  * have been cleared after the pipestat interrupt was received.
  2529.                  * It doesn't set the bit in iir again, but it still produces
  2530.                  * interrupts (for non-MSI).
  2531.                  */
  2532.                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2533.                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  2534.                         i915_handle_error(dev, false);
  2535.  
  2536.                 for_each_pipe(pipe) {
  2537.                         int reg = PIPESTAT(pipe);
  2538.                         pipe_stats[pipe] = I915_READ(reg);
  2539.  
  2540.                         /*
  2541.                          * Clear the PIPE*STAT regs before the IIR
  2542.                          */
  2543.                         if (pipe_stats[pipe] & 0x8000ffff) {
  2544.                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  2545.                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
  2546.                                                          pipe_name(pipe));
  2547.                                 I915_WRITE(reg, pipe_stats[pipe]);
  2548.                         }
  2549.                 }
  2550.                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2551.  
  2552.                 I915_WRITE16(IIR, iir & ~flip_mask);
  2553.                 new_iir = I915_READ16(IIR); /* Flush posted writes */
  2554.  
  2555.                 i915_update_dri1_breadcrumb(dev);
  2556.  
  2557.                 if (iir & I915_USER_INTERRUPT)
  2558.                         notify_ring(dev, &dev_priv->ring[RCS]);
  2559.  
  2560.                 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
  2561.                     i8xx_handle_vblank(dev, 0, iir))
  2562.                         flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
  2563.  
  2564.                 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
  2565.                     i8xx_handle_vblank(dev, 1, iir))
  2566.                         flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
  2567.  
  2568.                 iir = new_iir;
  2569.         }
  2570.  
  2571.         return IRQ_HANDLED;
  2572. }
  2573.  
  2574. static void i8xx_irq_uninstall(struct drm_device * dev)
  2575. {
  2576.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2577.         int pipe;
  2578.  
  2579.         for_each_pipe(pipe) {
  2580.                 /* Clear enable bits; then clear status bits */
  2581.                 I915_WRITE(PIPESTAT(pipe), 0);
  2582.                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
  2583.         }
  2584.         I915_WRITE16(IMR, 0xffff);
  2585.         I915_WRITE16(IER, 0x0);
  2586.         I915_WRITE16(IIR, I915_READ16(IIR));
  2587. }
  2588.  
  2589. #endif
  2590.  
  2591. static void i915_irq_preinstall(struct drm_device * dev)
  2592. {
  2593.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2594.         int pipe;
  2595.  
  2596.         atomic_set(&dev_priv->irq_received, 0);
  2597.  
  2598.         if (I915_HAS_HOTPLUG(dev)) {
  2599.                 I915_WRITE(PORT_HOTPLUG_EN, 0);
  2600.                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2601.         }
  2602.  
  2603.         I915_WRITE16(HWSTAM, 0xeffe);
  2604.         for_each_pipe(pipe)
  2605.                 I915_WRITE(PIPESTAT(pipe), 0);
  2606.         I915_WRITE(IMR, 0xffffffff);
  2607.         I915_WRITE(IER, 0x0);
  2608.         POSTING_READ(IER);
  2609. }
  2610.  
  2611. static int i915_irq_postinstall(struct drm_device *dev)
  2612. {
  2613.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2614.         u32 enable_mask;
  2615.  
  2616.         I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
  2617.  
  2618.         /* Unmask the interrupts that we always want on. */
  2619.         dev_priv->irq_mask =
  2620.                 ~(I915_ASLE_INTERRUPT |
  2621.                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  2622.                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  2623.                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2624.                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  2625.                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  2626.  
  2627.         enable_mask =
  2628.                 I915_ASLE_INTERRUPT |
  2629.                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  2630.                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  2631.                 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
  2632.                 I915_USER_INTERRUPT;
  2633.  
  2634.         if (I915_HAS_HOTPLUG(dev)) {
  2635.                 I915_WRITE(PORT_HOTPLUG_EN, 0);
  2636.                 POSTING_READ(PORT_HOTPLUG_EN);
  2637.  
  2638.                 /* Enable in IER... */
  2639.                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
  2640.                 /* and unmask in IMR */
  2641.                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
  2642.         }
  2643.  
  2644.         I915_WRITE(IMR, dev_priv->irq_mask);
  2645.         I915_WRITE(IER, enable_mask);
  2646.         POSTING_READ(IER);
  2647.  
  2648.         i915_enable_asle_pipestat(dev);
  2649.  
  2650.         return 0;
  2651. }
  2652.  
  2653. /*
  2654.  * Returns true when a page flip has completed.
  2655.  */
  2656. static bool i915_handle_vblank(struct drm_device *dev,
  2657.                                int plane, int pipe, u32 iir)
  2658. {
  2659.         drm_i915_private_t *dev_priv = dev->dev_private;
  2660.         u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
  2661.  
  2662. //   if (!drm_handle_vblank(dev, pipe))
  2663.                 return false;
  2664.  
  2665.         if ((iir & flip_pending) == 0)
  2666.                 return false;
  2667.  
  2668. //   intel_prepare_page_flip(dev, plane);
  2669.  
  2670.         /* We detect FlipDone by looking for the change in PendingFlip from '1'
  2671.          * to '0' on the following vblank, i.e. IIR has the Pendingflip
  2672.          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
  2673.          * the flip is completed (no longer pending). Since this doesn't raise
  2674.          * an interrupt per se, we watch for the change at vblank.
  2675.          */
  2676.         if (I915_READ(ISR) & flip_pending)
  2677.                 return false;
  2678.  
  2679.         intel_finish_page_flip(dev, pipe);
  2680.  
  2681.         return true;
  2682. }
  2683.  
  2684. static irqreturn_t i915_irq_handler(int irq, void *arg)
  2685. {
  2686.         struct drm_device *dev = (struct drm_device *) arg;
  2687.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2688.         u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
  2689.         unsigned long irqflags;
  2690.         u32 flip_mask =
  2691.                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2692.                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  2693.         int pipe, ret = IRQ_NONE;
  2694.  
  2695.         atomic_inc(&dev_priv->irq_received);
  2696.  
  2697.         iir = I915_READ(IIR);
  2698.         do {
  2699.                 bool irq_received = (iir & ~flip_mask) != 0;
  2700.                 bool blc_event = false;
  2701.  
  2702.                 /* Can't rely on pipestat interrupt bit in iir as it might
  2703.                  * have been cleared after the pipestat interrupt was received.
  2704.                  * It doesn't set the bit in iir again, but it still produces
  2705.                  * interrupts (for non-MSI).
  2706.                  */
  2707.                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2708.                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  2709.                         i915_handle_error(dev, false);
  2710.  
  2711.                 for_each_pipe(pipe) {
  2712.                         int reg = PIPESTAT(pipe);
  2713.                         pipe_stats[pipe] = I915_READ(reg);
  2714.  
  2715.                         /* Clear the PIPE*STAT regs before the IIR */
  2716.                         if (pipe_stats[pipe] & 0x8000ffff) {
  2717.                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  2718.                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
  2719.                                                          pipe_name(pipe));
  2720.                                 I915_WRITE(reg, pipe_stats[pipe]);
  2721.                                 irq_received = true;
  2722.                         }
  2723.                 }
  2724.                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2725.  
  2726.                 if (!irq_received)
  2727.                         break;
  2728.  
  2729.                 /* Consume port.  Then clear IIR or we'll miss events */
  2730.                 if ((I915_HAS_HOTPLUG(dev)) &&
  2731.                     (iir & I915_DISPLAY_PORT_INTERRUPT)) {
  2732.                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
  2733.                         u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
  2734.  
  2735.                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
  2736.                                   hotplug_status);
  2737.  
  2738.                         intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
  2739.  
  2740.                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  2741.                         POSTING_READ(PORT_HOTPLUG_STAT);
  2742.                 }
  2743.  
  2744.                 I915_WRITE(IIR, iir & ~flip_mask);
  2745.                 new_iir = I915_READ(IIR); /* Flush posted writes */
  2746.  
  2747.                 if (iir & I915_USER_INTERRUPT)
  2748.                         notify_ring(dev, &dev_priv->ring[RCS]);
  2749.  
  2750.                 for_each_pipe(pipe) {
  2751.                         int plane = pipe;
  2752.                         if (IS_MOBILE(dev))
  2753.                                 plane = !plane;
  2754.  
  2755.                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
  2756.                             i915_handle_vblank(dev, plane, pipe, iir))
  2757.                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
  2758.  
  2759.                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
  2760.                                 blc_event = true;
  2761.                 }
  2762.  
  2763.                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
  2764.                         intel_opregion_asle_intr(dev);
  2765.  
  2766.                 /* With MSI, interrupts are only generated when iir
  2767.                  * transitions from zero to nonzero.  If another bit got
  2768.                  * set while we were handling the existing iir bits, then
  2769.                  * we would never get another interrupt.
  2770.                  *
  2771.                  * This is fine on non-MSI as well, as if we hit this path
  2772.                  * we avoid exiting the interrupt handler only to generate
  2773.                  * another one.
  2774.                  *
  2775.                  * Note that for MSI this could cause a stray interrupt report
  2776.                  * if an interrupt landed in the time between writing IIR and
  2777.                  * the posting read.  This should be rare enough to never
  2778.                  * trigger the 99% of 100,000 interrupts test for disabling
  2779.                  * stray interrupts.
  2780.                  */
  2781.                 ret = IRQ_HANDLED;
  2782.                 iir = new_iir;
  2783.         } while (iir & ~flip_mask);
  2784.  
  2785.         i915_update_dri1_breadcrumb(dev);
  2786.  
  2787.         return ret;
  2788. }
  2789.  
  2790. static void i915_irq_uninstall(struct drm_device * dev)
  2791. {
  2792.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2793.         int pipe;
  2794.  
  2795.         del_timer_sync(&dev_priv->hotplug_reenable_timer);
  2796.  
  2797.         if (I915_HAS_HOTPLUG(dev)) {
  2798.                 I915_WRITE(PORT_HOTPLUG_EN, 0);
  2799.                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2800.         }
  2801.  
  2802.         I915_WRITE16(HWSTAM, 0xffff);
  2803.         for_each_pipe(pipe) {
  2804.                 /* Clear enable bits; then clear status bits */
  2805.                 I915_WRITE(PIPESTAT(pipe), 0);
  2806.                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
  2807.         }
  2808.         I915_WRITE(IMR, 0xffffffff);
  2809.         I915_WRITE(IER, 0x0);
  2810.  
  2811.         I915_WRITE(IIR, I915_READ(IIR));
  2812. }
  2813.  
  2814. static void i965_irq_preinstall(struct drm_device * dev)
  2815. {
  2816.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2817.         int pipe;
  2818.  
  2819.         atomic_set(&dev_priv->irq_received, 0);
  2820.  
  2821.         I915_WRITE(PORT_HOTPLUG_EN, 0);
  2822.         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2823.  
  2824.         I915_WRITE(HWSTAM, 0xeffe);
  2825.         for_each_pipe(pipe)
  2826.                 I915_WRITE(PIPESTAT(pipe), 0);
  2827.         I915_WRITE(IMR, 0xffffffff);
  2828.         I915_WRITE(IER, 0x0);
  2829.         POSTING_READ(IER);
  2830. }
  2831.  
  2832. static int i965_irq_postinstall(struct drm_device *dev)
  2833. {
  2834.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2835.         u32 enable_mask;
  2836.         u32 error_mask;
  2837.         unsigned long irqflags;
  2838.  
  2839.         /* Unmask the interrupts that we always want on. */
  2840.         dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
  2841.                                I915_DISPLAY_PORT_INTERRUPT |
  2842.                                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  2843.                                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  2844.                                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2845.                                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  2846.                                I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  2847.  
  2848.         enable_mask = ~dev_priv->irq_mask;
  2849.         enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2850.                          I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
  2851.         enable_mask |= I915_USER_INTERRUPT;
  2852.  
  2853.         if (IS_G4X(dev))
  2854.                 enable_mask |= I915_BSD_USER_INTERRUPT;
  2855.  
  2856.         /* Interrupt setup is already guaranteed to be single-threaded, this is
  2857.          * just to make the assert_spin_locked check happy. */
  2858.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2859.         i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
  2860.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2861.  
  2862.         /*
  2863.          * Enable some error detection, note the instruction error mask
  2864.          * bit is reserved, so we leave it masked.
  2865.          */
  2866.         if (IS_G4X(dev)) {
  2867.                 error_mask = ~(GM45_ERROR_PAGE_TABLE |
  2868.                                GM45_ERROR_MEM_PRIV |
  2869.                                GM45_ERROR_CP_PRIV |
  2870.                                I915_ERROR_MEMORY_REFRESH);
  2871.         } else {
  2872.                 error_mask = ~(I915_ERROR_PAGE_TABLE |
  2873.                                I915_ERROR_MEMORY_REFRESH);
  2874.         }
  2875.         I915_WRITE(EMR, error_mask);
  2876.  
  2877.         I915_WRITE(IMR, dev_priv->irq_mask);
  2878.         I915_WRITE(IER, enable_mask);
  2879.         POSTING_READ(IER);
  2880.  
  2881.         I915_WRITE(PORT_HOTPLUG_EN, 0);
  2882.         POSTING_READ(PORT_HOTPLUG_EN);
  2883.  
  2884.         i915_enable_asle_pipestat(dev);
  2885.  
  2886.         return 0;
  2887. }
  2888.  
  2889. static void i915_hpd_irq_setup(struct drm_device *dev)
  2890. {
  2891.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2892.         struct drm_mode_config *mode_config = &dev->mode_config;
  2893.         struct intel_encoder *intel_encoder;
  2894.         u32 hotplug_en;
  2895.  
  2896.         assert_spin_locked(&dev_priv->irq_lock);
  2897.  
  2898.         if (I915_HAS_HOTPLUG(dev)) {
  2899.                 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
  2900.                 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
  2901.         /* Note HDMI and DP share hotplug bits */
  2902.                 /* enable bits are the same for all generations */
  2903.                 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
  2904.                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
  2905.                                 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
  2906.                 /* Programming the CRT detection parameters tends
  2907.                    to generate a spurious hotplug event about three
  2908.                    seconds later.  So just do it once.
  2909.                    */
  2910.                 if (IS_G4X(dev))
  2911.                         hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
  2912.                 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
  2913.                 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
  2914.  
  2915.         /* Ignore TV since it's buggy */
  2916.         I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
  2917.         }
  2918. }
  2919.  
  2920. static irqreturn_t i965_irq_handler(int irq, void *arg)
  2921. {
  2922.         struct drm_device *dev = (struct drm_device *) arg;
  2923.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  2924.         u32 iir, new_iir;
  2925.         u32 pipe_stats[I915_MAX_PIPES];
  2926.         unsigned long irqflags;
  2927.         int irq_received;
  2928.         int ret = IRQ_NONE, pipe;
  2929.         u32 flip_mask =
  2930.                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2931.                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  2932.  
  2933.         atomic_inc(&dev_priv->irq_received);
  2934.  
  2935.         iir = I915_READ(IIR);
  2936.  
  2937.         for (;;) {
  2938.                 bool blc_event = false;
  2939.  
  2940.                 irq_received = (iir & ~flip_mask) != 0;
  2941.  
  2942.                 /* Can't rely on pipestat interrupt bit in iir as it might
  2943.                  * have been cleared after the pipestat interrupt was received.
  2944.                  * It doesn't set the bit in iir again, but it still produces
  2945.                  * interrupts (for non-MSI).
  2946.                  */
  2947.                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2948.                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  2949.                         i915_handle_error(dev, false);
  2950.  
  2951.                 for_each_pipe(pipe) {
  2952.                         int reg = PIPESTAT(pipe);
  2953.                         pipe_stats[pipe] = I915_READ(reg);
  2954.  
  2955.                         /*
  2956.                          * Clear the PIPE*STAT regs before the IIR
  2957.                          */
  2958.                         if (pipe_stats[pipe] & 0x8000ffff) {
  2959.                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  2960.                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
  2961.                                                          pipe_name(pipe));
  2962.                                 I915_WRITE(reg, pipe_stats[pipe]);
  2963.                                 irq_received = 1;
  2964.                         }
  2965.                 }
  2966.                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2967.  
  2968.                 if (!irq_received)
  2969.                         break;
  2970.  
  2971.                 ret = IRQ_HANDLED;
  2972.  
  2973.                 /* Consume port.  Then clear IIR or we'll miss events */
  2974.                 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
  2975.                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
  2976.                         u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
  2977.                                                                   HOTPLUG_INT_STATUS_G4X :
  2978.                                                                   HOTPLUG_INT_STATUS_I915);
  2979.  
  2980.                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
  2981.                                   hotplug_status);
  2982.  
  2983.                         intel_hpd_irq_handler(dev, hotplug_trigger,
  2984.                                               IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915);
  2985.  
  2986.                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  2987.                         I915_READ(PORT_HOTPLUG_STAT);
  2988.                 }
  2989.  
  2990.                 I915_WRITE(IIR, iir & ~flip_mask);
  2991.                 new_iir = I915_READ(IIR); /* Flush posted writes */
  2992.  
  2993.                 if (iir & I915_USER_INTERRUPT)
  2994.                         notify_ring(dev, &dev_priv->ring[RCS]);
  2995.                 if (iir & I915_BSD_USER_INTERRUPT)
  2996.                         notify_ring(dev, &dev_priv->ring[VCS]);
  2997.  
  2998.                 for_each_pipe(pipe) {
  2999.                         if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
  3000.                             i915_handle_vblank(dev, pipe, pipe, iir))
  3001.                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
  3002.  
  3003.                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
  3004.                                 blc_event = true;
  3005.                 }
  3006.  
  3007.  
  3008.                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
  3009.                         intel_opregion_asle_intr(dev);
  3010.  
  3011.                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
  3012.                         gmbus_irq_handler(dev);
  3013.  
  3014.                 /* With MSI, interrupts are only generated when iir
  3015.                  * transitions from zero to nonzero.  If another bit got
  3016.                  * set while we were handling the existing iir bits, then
  3017.                  * we would never get another interrupt.
  3018.                  *
  3019.                  * This is fine on non-MSI as well, as if we hit this path
  3020.                  * we avoid exiting the interrupt handler only to generate
  3021.                  * another one.
  3022.                  *
  3023.                  * Note that for MSI this could cause a stray interrupt report
  3024.                  * if an interrupt landed in the time between writing IIR and
  3025.                  * the posting read.  This should be rare enough to never
  3026.                  * trigger the 99% of 100,000 interrupts test for disabling
  3027.                  * stray interrupts.
  3028.                  */
  3029.                 iir = new_iir;
  3030.         }
  3031.  
  3032.         i915_update_dri1_breadcrumb(dev);
  3033.  
  3034.         return ret;
  3035. }
  3036.  
  3037. static void i965_irq_uninstall(struct drm_device * dev)
  3038. {
  3039.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  3040.         int pipe;
  3041.  
  3042.         if (!dev_priv)
  3043.                 return;
  3044.  
  3045.         del_timer_sync(&dev_priv->hotplug_reenable_timer);
  3046.  
  3047.         I915_WRITE(PORT_HOTPLUG_EN, 0);
  3048.         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  3049.  
  3050.         I915_WRITE(HWSTAM, 0xffffffff);
  3051.         for_each_pipe(pipe)
  3052.                 I915_WRITE(PIPESTAT(pipe), 0);
  3053.         I915_WRITE(IMR, 0xffffffff);
  3054.         I915_WRITE(IER, 0x0);
  3055.  
  3056.         for_each_pipe(pipe)
  3057.                 I915_WRITE(PIPESTAT(pipe),
  3058.                            I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
  3059.         I915_WRITE(IIR, I915_READ(IIR));
  3060. }
  3061.  
  3062. static void i915_reenable_hotplug_timer_func(unsigned long data)
  3063. {
  3064.         drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
  3065.         struct drm_device *dev = dev_priv->dev;
  3066.         struct drm_mode_config *mode_config = &dev->mode_config;
  3067.         unsigned long irqflags;
  3068.         int i;
  3069.  
  3070.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  3071.         for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
  3072.                 struct drm_connector *connector;
  3073.  
  3074.                 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
  3075.                         continue;
  3076.  
  3077.                 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
  3078.  
  3079.                 list_for_each_entry(connector, &mode_config->connector_list, head) {
  3080.                         struct intel_connector *intel_connector = to_intel_connector(connector);
  3081.  
  3082.                         if (intel_connector->encoder->hpd_pin == i) {
  3083.                                 if (connector->polled != intel_connector->polled)
  3084.                                         DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
  3085.                                                          drm_get_connector_name(connector));
  3086.                                 connector->polled = intel_connector->polled;
  3087.                                 if (!connector->polled)
  3088.                                         connector->polled = DRM_CONNECTOR_POLL_HPD;
  3089.                         }
  3090.                 }
  3091.         }
  3092.         if (dev_priv->display.hpd_irq_setup)
  3093.                 dev_priv->display.hpd_irq_setup(dev);
  3094.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  3095. }
  3096.  
  3097. void intel_irq_init(struct drm_device *dev)
  3098. {
  3099.         struct drm_i915_private *dev_priv = dev->dev_private;
  3100.  
  3101.         INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
  3102.         INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
  3103.         INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
  3104.         INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
  3105.  
  3106.         setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
  3107.                     (unsigned long) dev_priv);
  3108.  
  3109.         dev->driver->get_vblank_counter = i915_get_vblank_counter;
  3110.         dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
  3111.         if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
  3112.                 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
  3113.                 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
  3114.         }
  3115.  
  3116.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  3117.                 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
  3118.         else
  3119.                 dev->driver->get_vblank_timestamp = NULL;
  3120.         dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
  3121.  
  3122.         if (IS_VALLEYVIEW(dev)) {
  3123.                 dev->driver->irq_handler = valleyview_irq_handler;
  3124.                 dev->driver->irq_preinstall = valleyview_irq_preinstall;
  3125.                 dev->driver->irq_postinstall = valleyview_irq_postinstall;
  3126.                 dev->driver->irq_uninstall = valleyview_irq_uninstall;
  3127.                 dev->driver->enable_vblank = valleyview_enable_vblank;
  3128.                 dev->driver->disable_vblank = valleyview_disable_vblank;
  3129.                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  3130.         } else if (HAS_PCH_SPLIT(dev)) {
  3131.                 dev->driver->irq_handler = ironlake_irq_handler;
  3132.                 dev->driver->irq_preinstall = ironlake_irq_preinstall;
  3133.                 dev->driver->irq_postinstall = ironlake_irq_postinstall;
  3134.                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
  3135.                 dev->driver->enable_vblank = ironlake_enable_vblank;
  3136.                 dev->driver->disable_vblank = ironlake_disable_vblank;
  3137.                 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
  3138.         } else {
  3139.                 if (INTEL_INFO(dev)->gen == 2) {
  3140.                 } else if (INTEL_INFO(dev)->gen == 3) {
  3141.                         dev->driver->irq_preinstall = i915_irq_preinstall;
  3142.                         dev->driver->irq_postinstall = i915_irq_postinstall;
  3143.                         dev->driver->irq_uninstall = i915_irq_uninstall;
  3144.                         dev->driver->irq_handler = i915_irq_handler;
  3145.                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  3146.                 } else {
  3147.                         dev->driver->irq_preinstall = i965_irq_preinstall;
  3148.                         dev->driver->irq_postinstall = i965_irq_postinstall;
  3149.                         dev->driver->irq_uninstall = i965_irq_uninstall;
  3150.                         dev->driver->irq_handler = i965_irq_handler;
  3151.                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  3152.                 }
  3153.                 dev->driver->enable_vblank = i915_enable_vblank;
  3154.                 dev->driver->disable_vblank = i915_disable_vblank;
  3155.         }
  3156. }
  3157.  
  3158. void intel_hpd_init(struct drm_device *dev)
  3159. {
  3160.         struct drm_i915_private *dev_priv = dev->dev_private;
  3161.         struct drm_mode_config *mode_config = &dev->mode_config;
  3162.         struct drm_connector *connector;
  3163.         unsigned long irqflags;
  3164.         int i;
  3165.  
  3166.         for (i = 1; i < HPD_NUM_PINS; i++) {
  3167.                 dev_priv->hpd_stats[i].hpd_cnt = 0;
  3168.                 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
  3169.         }
  3170.         list_for_each_entry(connector, &mode_config->connector_list, head) {
  3171.                 struct intel_connector *intel_connector = to_intel_connector(connector);
  3172.                 connector->polled = intel_connector->polled;
  3173.                 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
  3174.                         connector->polled = DRM_CONNECTOR_POLL_HPD;
  3175.         }
  3176.  
  3177.         /* Interrupt setup is already guaranteed to be single-threaded, this is
  3178.          * just to make the assert_spin_locked checks happy. */
  3179.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  3180.         if (dev_priv->display.hpd_irq_setup)
  3181.                 dev_priv->display.hpd_irq_setup(dev);
  3182.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  3183. }
  3184.  
  3185. /* Disable interrupts so we can allow Package C8+. */
  3186. void hsw_pc8_disable_interrupts(struct drm_device *dev)
  3187. {
  3188.         struct drm_i915_private *dev_priv = dev->dev_private;
  3189.         unsigned long irqflags;
  3190.  
  3191.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  3192.  
  3193.         dev_priv->pc8.regsave.deimr = I915_READ(DEIMR);
  3194.         dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR);
  3195.         dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR);
  3196.         dev_priv->pc8.regsave.gtier = I915_READ(GTIER);
  3197.         dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
  3198.  
  3199.         ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB);
  3200.         ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT);
  3201.         ilk_disable_gt_irq(dev_priv, 0xffffffff);
  3202.         snb_disable_pm_irq(dev_priv, 0xffffffff);
  3203.  
  3204.         dev_priv->pc8.irqs_disabled = true;
  3205.  
  3206.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  3207. }
  3208.  
  3209. /* Restore interrupts so we can recover from Package C8+. */
  3210. void hsw_pc8_restore_interrupts(struct drm_device *dev)
  3211. {
  3212.         struct drm_i915_private *dev_priv = dev->dev_private;
  3213.         unsigned long irqflags;
  3214.         uint32_t val, expected;
  3215.  
  3216.         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  3217.  
  3218.         val = I915_READ(DEIMR);
  3219.         expected = ~DE_PCH_EVENT_IVB;
  3220.         WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected);
  3221.  
  3222.         val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT;
  3223.         expected = ~SDE_HOTPLUG_MASK_CPT;
  3224.         WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n",
  3225.              val, expected);
  3226.  
  3227.         val = I915_READ(GTIMR);
  3228.         expected = 0xffffffff;
  3229.         WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected);
  3230.  
  3231.         val = I915_READ(GEN6_PMIMR);
  3232.         expected = 0xffffffff;
  3233.         WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val,
  3234.              expected);
  3235.  
  3236.         dev_priv->pc8.irqs_disabled = false;
  3237.  
  3238.         ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr);
  3239.         ibx_enable_display_interrupt(dev_priv,
  3240.                                      ~dev_priv->pc8.regsave.sdeimr &
  3241.                                      ~SDE_HOTPLUG_MASK_CPT);
  3242.         ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr);
  3243.         snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr);
  3244.         I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier);
  3245.  
  3246.         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  3247. }
  3248.  
  3249.  
  3250. irqreturn_t intel_irq_handler(struct drm_device *dev)
  3251. {
  3252.  
  3253. //    printf("i915 irq\n");
  3254.  
  3255. //    printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
  3256.  
  3257.     return dev->driver->irq_handler(0, dev);
  3258. }
  3259.  
  3260.