Subversion Repositories Kolibri OS

Rev

Rev 4560 | Rev 5354 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright © 2013 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21.  * IN THE SOFTWARE.
  22.  */
  23.  
  24. #include "i915_drv.h"
  25. #include "intel_drv.h"
  26.  
  27. #define FORCEWAKE_ACK_TIMEOUT_MS 2
  28.  
  29. #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
  30. #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
  31.  
  32. #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
  33. #define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
  34.  
  35. #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
  36. #define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
  37.  
  38. #define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
  39. #define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
  40.  
  41. #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
  42.  
  43. static void
  44. assert_device_not_suspended(struct drm_i915_private *dev_priv)
  45. {
  46.         WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
  47.              "Device suspended\n");
  48. }
  49.  
  50. static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
  51. {
  52.         u32 gt_thread_status_mask;
  53.  
  54.         if (IS_HASWELL(dev_priv->dev))
  55.                 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
  56.         else
  57.                 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
  58.  
  59.         /* w/a for a sporadic read returning 0 by waiting for the GT
  60.          * thread to wake up.
  61.          */
  62.         if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
  63.                 DRM_ERROR("GT thread status wait timed out\n");
  64. }
  65.  
  66. static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
  67. {
  68.         __raw_i915_write32(dev_priv, FORCEWAKE, 0);
  69.         /* something from same cacheline, but !FORCEWAKE */
  70.         __raw_posting_read(dev_priv, ECOBUS);
  71. }
  72.  
  73. static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv,
  74.                                                         int fw_engine)
  75. {
  76.         if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
  77.                             FORCEWAKE_ACK_TIMEOUT_MS))
  78.                 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
  79.  
  80.         __raw_i915_write32(dev_priv, FORCEWAKE, 1);
  81.         /* something from same cacheline, but !FORCEWAKE */
  82.         __raw_posting_read(dev_priv, ECOBUS);
  83.  
  84.         if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1),
  85.                             FORCEWAKE_ACK_TIMEOUT_MS))
  86.                 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
  87.  
  88.         /* WaRsForcewakeWaitTC0:snb */
  89.         __gen6_gt_wait_for_thread_c0(dev_priv);
  90. }
  91.  
  92. static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
  93. {
  94.         __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
  95.         /* something from same cacheline, but !FORCEWAKE_MT */
  96.         __raw_posting_read(dev_priv, ECOBUS);
  97. }
  98.  
  99. static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
  100.                                                         int fw_engine)
  101. {
  102.         u32 forcewake_ack;
  103.  
  104.         if (IS_HASWELL(dev_priv->dev) || IS_GEN8(dev_priv->dev))
  105.                 forcewake_ack = FORCEWAKE_ACK_HSW;
  106.         else
  107.                 forcewake_ack = FORCEWAKE_MT_ACK;
  108.  
  109.         if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
  110.                             FORCEWAKE_ACK_TIMEOUT_MS))
  111.                 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
  112.  
  113.         __raw_i915_write32(dev_priv, FORCEWAKE_MT,
  114.                            _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  115.         /* something from same cacheline, but !FORCEWAKE_MT */
  116.         __raw_posting_read(dev_priv, ECOBUS);
  117.  
  118.         if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
  119.                             FORCEWAKE_ACK_TIMEOUT_MS))
  120.                 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
  121.  
  122.         /* WaRsForcewakeWaitTC0:ivb,hsw */
  123.         if (INTEL_INFO(dev_priv->dev)->gen < 8)
  124.         __gen6_gt_wait_for_thread_c0(dev_priv);
  125. }
  126.  
  127. static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
  128. {
  129.         u32 gtfifodbg;
  130.  
  131.         gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
  132.         if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
  133.                 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
  134. }
  135.  
  136. static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
  137.                                                         int fw_engine)
  138. {
  139.         __raw_i915_write32(dev_priv, FORCEWAKE, 0);
  140.         /* something from same cacheline, but !FORCEWAKE */
  141.         __raw_posting_read(dev_priv, ECOBUS);
  142.         gen6_gt_check_fifodbg(dev_priv);
  143. }
  144.  
  145. static void __gen7_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
  146.                                                         int fw_engine)
  147. {
  148.         __raw_i915_write32(dev_priv, FORCEWAKE_MT,
  149.                            _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  150.         /* something from same cacheline, but !FORCEWAKE_MT */
  151.         __raw_posting_read(dev_priv, ECOBUS);
  152.  
  153.         if (IS_GEN7(dev_priv->dev))
  154.         gen6_gt_check_fifodbg(dev_priv);
  155. }
  156.  
  157. static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
  158. {
  159.         int ret = 0;
  160.  
  161.         /* On VLV, FIFO will be shared by both SW and HW.
  162.          * So, we need to read the FREE_ENTRIES everytime */
  163.         if (IS_VALLEYVIEW(dev_priv->dev))
  164.                 dev_priv->uncore.fifo_count =
  165.                         __raw_i915_read32(dev_priv, GTFIFOCTL) &
  166.                                                 GT_FIFO_FREE_ENTRIES_MASK;
  167.  
  168.         if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
  169.                 int loop = 500;
  170.                 u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
  171.                 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
  172.                         udelay(10);
  173.                         fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
  174.                 }
  175.                 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
  176.                         ++ret;
  177.                 dev_priv->uncore.fifo_count = fifo;
  178.         }
  179.         dev_priv->uncore.fifo_count--;
  180.  
  181.         return ret;
  182. }
  183.  
  184. static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
  185. {
  186.         __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
  187.                            _MASKED_BIT_DISABLE(0xffff));
  188.         __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
  189.                            _MASKED_BIT_DISABLE(0xffff));
  190.         /* something from same cacheline, but !FORCEWAKE_VLV */
  191.         __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
  192. }
  193.  
  194. static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
  195.                                                 int fw_engine)
  196. {
  197.         /* Check for Render Engine */
  198.         if (FORCEWAKE_RENDER & fw_engine) {
  199.                 if (wait_for_atomic((__raw_i915_read32(dev_priv,
  200.                                                 FORCEWAKE_ACK_VLV) &
  201.                                                 FORCEWAKE_KERNEL) == 0,
  202.                             FORCEWAKE_ACK_TIMEOUT_MS))
  203.                         DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
  204.  
  205.         __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
  206.                            _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  207.  
  208.                 if (wait_for_atomic((__raw_i915_read32(dev_priv,
  209.                                                 FORCEWAKE_ACK_VLV) &
  210.                                                 FORCEWAKE_KERNEL),
  211.                                         FORCEWAKE_ACK_TIMEOUT_MS))
  212.                         DRM_ERROR("Timed out: waiting for Render to ack.\n");
  213.         }
  214.  
  215.         /* Check for Media Engine */
  216.         if (FORCEWAKE_MEDIA & fw_engine) {
  217.                 if (wait_for_atomic((__raw_i915_read32(dev_priv,
  218.                                                 FORCEWAKE_ACK_MEDIA_VLV) &
  219.                                                 FORCEWAKE_KERNEL) == 0,
  220.                                         FORCEWAKE_ACK_TIMEOUT_MS))
  221.                         DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
  222.  
  223.         __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
  224.                            _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  225.  
  226.                 if (wait_for_atomic((__raw_i915_read32(dev_priv,
  227.                                                 FORCEWAKE_ACK_MEDIA_VLV) &
  228.                              FORCEWAKE_KERNEL),
  229.                             FORCEWAKE_ACK_TIMEOUT_MS))
  230.                         DRM_ERROR("Timed out: waiting for media to ack.\n");
  231.         }
  232.  
  233.         /* WaRsForcewakeWaitTC0:vlv */
  234.         if (!IS_CHERRYVIEW(dev_priv->dev))
  235.         __gen6_gt_wait_for_thread_c0(dev_priv);
  236. }
  237.  
  238. static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
  239.                                         int fw_engine)
  240. {
  241.  
  242.         /* Check for Render Engine */
  243.         if (FORCEWAKE_RENDER & fw_engine)
  244.         __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
  245.                            _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  246.  
  247.  
  248.         /* Check for Media Engine */
  249.         if (FORCEWAKE_MEDIA & fw_engine)
  250.         __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
  251.                            _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  252.  
  253.         /* something from same cacheline, but !FORCEWAKE_VLV */
  254.         __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
  255.         if (!IS_CHERRYVIEW(dev_priv->dev))
  256.         gen6_gt_check_fifodbg(dev_priv);
  257. }
  258.  
  259. static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
  260. {
  261.         unsigned long irqflags;
  262.  
  263.         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  264.  
  265.         if (fw_engine & FORCEWAKE_RENDER &&
  266.             dev_priv->uncore.fw_rendercount++ != 0)
  267.                 fw_engine &= ~FORCEWAKE_RENDER;
  268.         if (fw_engine & FORCEWAKE_MEDIA &&
  269.             dev_priv->uncore.fw_mediacount++ != 0)
  270.                 fw_engine &= ~FORCEWAKE_MEDIA;
  271.  
  272.         if (fw_engine)
  273.                 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine);
  274.  
  275.         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  276. }
  277.  
  278. static void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
  279. {
  280.         unsigned long irqflags;
  281.  
  282.         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  283.  
  284.         if (fw_engine & FORCEWAKE_RENDER) {
  285.                 WARN_ON(!dev_priv->uncore.fw_rendercount);
  286.                 if (--dev_priv->uncore.fw_rendercount != 0)
  287.                         fw_engine &= ~FORCEWAKE_RENDER;
  288.         }
  289.  
  290.         if (fw_engine & FORCEWAKE_MEDIA) {
  291.                 WARN_ON(!dev_priv->uncore.fw_mediacount);
  292.                 if (--dev_priv->uncore.fw_mediacount != 0)
  293.                         fw_engine &= ~FORCEWAKE_MEDIA;
  294.         }
  295.  
  296.         if (fw_engine)
  297.                 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine);
  298.  
  299.         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  300. }
  301.  
  302. static void gen6_force_wake_timer(unsigned long arg)
  303. {
  304.         struct drm_i915_private *dev_priv = (void *)arg;
  305.         unsigned long irqflags;
  306.  
  307.         assert_device_not_suspended(dev_priv);
  308.  
  309.         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  310.         WARN_ON(!dev_priv->uncore.forcewake_count);
  311.  
  312.         if (--dev_priv->uncore.forcewake_count == 0)
  313.                 dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
  314.         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  315.  
  316.         intel_runtime_pm_put(dev_priv);
  317. }
  318.  
  319. void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
  320. {
  321.         struct drm_i915_private *dev_priv = dev->dev_private;
  322.         unsigned long irqflags;
  323.  
  324.         if (del_timer_sync(&dev_priv->uncore.force_wake_timer))
  325.                 gen6_force_wake_timer((unsigned long)dev_priv);
  326.  
  327.         /* Hold uncore.lock across reset to prevent any register access
  328.          * with forcewake not set correctly
  329.          */
  330.         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  331.  
  332.         if (IS_VALLEYVIEW(dev))
  333.                 vlv_force_wake_reset(dev_priv);
  334.         else if (IS_GEN6(dev) || IS_GEN7(dev))
  335.                 __gen6_gt_force_wake_reset(dev_priv);
  336.  
  337.         if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_GEN8(dev))
  338.                 __gen7_gt_force_wake_mt_reset(dev_priv);
  339.  
  340.         if (restore) { /* If reset with a user forcewake, try to restore */
  341.                 unsigned fw = 0;
  342.  
  343.                 if (IS_VALLEYVIEW(dev)) {
  344.                         if (dev_priv->uncore.fw_rendercount)
  345.                                 fw |= FORCEWAKE_RENDER;
  346.  
  347.                         if (dev_priv->uncore.fw_mediacount)
  348.                                 fw |= FORCEWAKE_MEDIA;
  349.                 } else {
  350.                         if (dev_priv->uncore.forcewake_count)
  351.                                 fw = FORCEWAKE_ALL;
  352.         }
  353.  
  354.                 if (fw)
  355.                         dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
  356.  
  357.                 if (IS_GEN6(dev) || IS_GEN7(dev))
  358.                         dev_priv->uncore.fifo_count =
  359.                                 __raw_i915_read32(dev_priv, GTFIFOCTL) &
  360.                                 GT_FIFO_FREE_ENTRIES_MASK;
  361.         }
  362.  
  363.         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  364. }
  365.  
  366. void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
  367. {
  368.         struct drm_i915_private *dev_priv = dev->dev_private;
  369.  
  370.         if (HAS_FPGA_DBG_UNCLAIMED(dev))
  371.                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  372.  
  373.         if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
  374.             (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
  375.                 /* The docs do not explain exactly how the calculation can be
  376.                  * made. It is somewhat guessable, but for now, it's always
  377.                  * 128MB.
  378.                  * NB: We can't write IDICR yet because we do not have gt funcs
  379.                  * set up */
  380.                 dev_priv->ellc_size = 128;
  381.                 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
  382.         }
  383.  
  384.         /* clear out old GT FIFO errors */
  385.         if (IS_GEN6(dev) || IS_GEN7(dev))
  386.                 __raw_i915_write32(dev_priv, GTFIFODBG,
  387.                                    __raw_i915_read32(dev_priv, GTFIFODBG));
  388.  
  389.         intel_uncore_forcewake_reset(dev, restore_forcewake);
  390. }
  391.  
  392. void intel_uncore_sanitize(struct drm_device *dev)
  393. {
  394.         /* BIOS often leaves RC6 enabled, but disable it for hw init */
  395.         intel_disable_gt_powersave(dev);
  396. }
  397.  
  398. /*
  399.  * Generally this is called implicitly by the register read function. However,
  400.  * if some sequence requires the GT to not power down then this function should
  401.  * be called at the beginning of the sequence followed by a call to
  402.  * gen6_gt_force_wake_put() at the end of the sequence.
  403.  */
  404. void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
  405. {
  406.         unsigned long irqflags;
  407.  
  408.         if (!dev_priv->uncore.funcs.force_wake_get)
  409.                 return;
  410.  
  411.         intel_runtime_pm_get(dev_priv);
  412.  
  413.         /* Redirect to VLV specific routine */
  414.         if (IS_VALLEYVIEW(dev_priv->dev))
  415.                 return vlv_force_wake_get(dev_priv, fw_engine);
  416.  
  417.         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  418.         if (dev_priv->uncore.forcewake_count++ == 0)
  419.                 dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
  420.         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  421. }
  422.  
  423. /*
  424.  * see gen6_gt_force_wake_get()
  425.  */
  426. void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
  427. {
  428.         unsigned long irqflags;
  429.         bool delayed = false;
  430.  
  431.         if (!dev_priv->uncore.funcs.force_wake_put)
  432.                 return;
  433.  
  434.         /* Redirect to VLV specific routine */
  435.         if (IS_VALLEYVIEW(dev_priv->dev)) {
  436.                 vlv_force_wake_put(dev_priv, fw_engine);
  437.                 goto out;
  438.         }
  439.  
  440.  
  441.         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  442.         WARN_ON(!dev_priv->uncore.forcewake_count);
  443.  
  444.         if (--dev_priv->uncore.forcewake_count == 0) {
  445.                 dev_priv->uncore.forcewake_count++;
  446.                 delayed = true;
  447. //       mod_timer_pinned(&dev_priv->uncore.force_wake_timer,
  448. //                GetTimerTicks() + 1);
  449.         }
  450.         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  451.  
  452. out:
  453.         if (!delayed)
  454.         intel_runtime_pm_put(dev_priv);
  455. }
  456.  
  457. void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
  458. {
  459.         if (!dev_priv->uncore.funcs.force_wake_get)
  460.                 return;
  461.  
  462.         WARN_ON(dev_priv->uncore.forcewake_count > 0);
  463. }
  464.  
  465. /* We give fast paths for the really cool registers */
  466. #define NEEDS_FORCE_WAKE(dev_priv, reg) \
  467.          ((reg) < 0x40000 && (reg) != FORCEWAKE)
  468.  
  469. #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
  470.  
  471. #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
  472.         (REG_RANGE((reg), 0x2000, 0x4000) || \
  473.          REG_RANGE((reg), 0x5000, 0x8000) || \
  474.          REG_RANGE((reg), 0xB000, 0x12000) || \
  475.          REG_RANGE((reg), 0x2E000, 0x30000))
  476.  
  477. #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
  478.         (REG_RANGE((reg), 0x12000, 0x14000) || \
  479.          REG_RANGE((reg), 0x22000, 0x24000) || \
  480.          REG_RANGE((reg), 0x30000, 0x40000))
  481.  
  482. #define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
  483.         (REG_RANGE((reg), 0x2000, 0x4000) || \
  484.          REG_RANGE((reg), 0x5000, 0x8000) || \
  485.          REG_RANGE((reg), 0x8300, 0x8500) || \
  486.          REG_RANGE((reg), 0xB000, 0xC000) || \
  487.          REG_RANGE((reg), 0xE000, 0xE800))
  488.  
  489. #define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
  490.         (REG_RANGE((reg), 0x8800, 0x8900) || \
  491.          REG_RANGE((reg), 0xD000, 0xD800) || \
  492.          REG_RANGE((reg), 0x12000, 0x14000) || \
  493.          REG_RANGE((reg), 0x1A000, 0x1C000) || \
  494.          REG_RANGE((reg), 0x1E800, 0x1EA00) || \
  495.          REG_RANGE((reg), 0x30000, 0x40000))
  496.  
  497. #define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
  498.         (REG_RANGE((reg), 0x4000, 0x5000) || \
  499.          REG_RANGE((reg), 0x8000, 0x8300) || \
  500.          REG_RANGE((reg), 0x8500, 0x8600) || \
  501.          REG_RANGE((reg), 0x9000, 0xB000) || \
  502.          REG_RANGE((reg), 0xC000, 0xC800) || \
  503.          REG_RANGE((reg), 0xF000, 0x10000) || \
  504.          REG_RANGE((reg), 0x14000, 0x14400) || \
  505.          REG_RANGE((reg), 0x22000, 0x24000))
  506.  
  507. static void
  508. ilk_dummy_write(struct drm_i915_private *dev_priv)
  509. {
  510.         /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
  511.          * the chip from rc6 before touching it for real. MI_MODE is masked,
  512.          * hence harmless to write 0 into. */
  513.         __raw_i915_write32(dev_priv, MI_MODE, 0);
  514. }
  515.  
  516. static void
  517. hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
  518.                         bool before)
  519. {
  520.         const char *op = read ? "reading" : "writing to";
  521.         const char *when = before ? "before" : "after";
  522.  
  523.         if (!i915.mmio_debug)
  524.                 return;
  525.  
  526.         if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
  527.                 WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
  528.                      when, op, reg);
  529.                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  530.         }
  531. }
  532.  
  533. static void
  534. hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
  535. {
  536.         if (i915.mmio_debug)
  537.                 return;
  538.  
  539.         if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
  540.                 DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem.");
  541.                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  542.         }
  543. }
  544.  
  545. #define REG_READ_HEADER(x) \
  546.         unsigned long irqflags; \
  547.         u##x val = 0; \
  548.         assert_device_not_suspended(dev_priv); \
  549.         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
  550.  
  551. #define REG_READ_FOOTER \
  552.         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
  553.         trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
  554.         return val
  555.  
  556. #define __gen4_read(x) \
  557. static u##x \
  558. gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  559.         REG_READ_HEADER(x); \
  560.         val = __raw_i915_read##x(dev_priv, reg); \
  561.         REG_READ_FOOTER; \
  562. }
  563.  
  564. #define __gen5_read(x) \
  565. static u##x \
  566. gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  567.         REG_READ_HEADER(x); \
  568.                 ilk_dummy_write(dev_priv); \
  569.         val = __raw_i915_read##x(dev_priv, reg); \
  570.         REG_READ_FOOTER; \
  571. }
  572.  
  573. #define __gen6_read(x) \
  574. static u##x \
  575. gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  576.         REG_READ_HEADER(x); \
  577.         hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
  578.         if (dev_priv->uncore.forcewake_count == 0 && \
  579.             NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
  580.                         dev_priv->uncore.funcs.force_wake_get(dev_priv, \
  581.                                                         FORCEWAKE_ALL); \
  582.                 val = __raw_i915_read##x(dev_priv, reg); \
  583.                         dev_priv->uncore.funcs.force_wake_put(dev_priv, \
  584.                                                         FORCEWAKE_ALL); \
  585.         } else { \
  586.                 val = __raw_i915_read##x(dev_priv, reg); \
  587.         } \
  588.         hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
  589.         REG_READ_FOOTER; \
  590. }
  591.  
  592. #define __vlv_read(x) \
  593. static u##x \
  594. vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  595.         unsigned fwengine = 0; \
  596.         REG_READ_HEADER(x); \
  597.         if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) {   \
  598.                 if (dev_priv->uncore.fw_rendercount == 0) \
  599.                 fwengine = FORCEWAKE_RENDER;            \
  600.         } else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \
  601.                 if (dev_priv->uncore.fw_mediacount == 0) \
  602.                 fwengine = FORCEWAKE_MEDIA;             \
  603.         }                                               \
  604.         if (fwengine) \
  605.                 dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
  606.                 val = __raw_i915_read##x(dev_priv, reg); \
  607.         if (fwengine) \
  608.                 dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
  609.         REG_READ_FOOTER; \
  610. }
  611.  
  612. #define __chv_read(x) \
  613. static u##x \
  614. chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  615.         unsigned fwengine = 0; \
  616.         REG_READ_HEADER(x); \
  617.         if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
  618.                 if (dev_priv->uncore.fw_rendercount == 0) \
  619.                         fwengine = FORCEWAKE_RENDER; \
  620.         } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
  621.                 if (dev_priv->uncore.fw_mediacount == 0) \
  622.                 fwengine = FORCEWAKE_MEDIA;             \
  623.         } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
  624.                 if (dev_priv->uncore.fw_rendercount == 0) \
  625.                         fwengine |= FORCEWAKE_RENDER; \
  626.                 if (dev_priv->uncore.fw_mediacount == 0) \
  627.                         fwengine |= FORCEWAKE_MEDIA; \
  628.         }  \
  629.         if (fwengine) \
  630.                 dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
  631.                 val = __raw_i915_read##x(dev_priv, reg); \
  632.         if (fwengine) \
  633.                 dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
  634.         REG_READ_FOOTER; \
  635. }
  636.  
  637. __chv_read(8)
  638. __chv_read(16)
  639. __chv_read(32)
  640. __chv_read(64)
  641. __vlv_read(8)
  642. __vlv_read(16)
  643. __vlv_read(32)
  644. __vlv_read(64)
  645. __gen6_read(8)
  646. __gen6_read(16)
  647. __gen6_read(32)
  648. __gen6_read(64)
  649. __gen5_read(8)
  650. __gen5_read(16)
  651. __gen5_read(32)
  652. __gen5_read(64)
  653. __gen4_read(8)
  654. __gen4_read(16)
  655. __gen4_read(32)
  656. __gen4_read(64)
  657.  
  658. #undef __chv_read
  659. #undef __vlv_read
  660. #undef __gen6_read
  661. #undef __gen5_read
  662. #undef __gen4_read
  663. #undef REG_READ_FOOTER
  664. #undef REG_READ_HEADER
  665.  
  666. #define REG_WRITE_HEADER \
  667.         unsigned long irqflags; \
  668.         trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
  669.         assert_device_not_suspended(dev_priv); \
  670.         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
  671.  
  672. #define REG_WRITE_FOOTER \
  673.         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
  674.  
  675. #define __gen4_write(x) \
  676. static void \
  677. gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  678.         REG_WRITE_HEADER; \
  679.         __raw_i915_write##x(dev_priv, reg, val); \
  680.         REG_WRITE_FOOTER; \
  681. }
  682.  
  683. #define __gen5_write(x) \
  684. static void \
  685. gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  686.         REG_WRITE_HEADER; \
  687.         ilk_dummy_write(dev_priv); \
  688.         __raw_i915_write##x(dev_priv, reg, val); \
  689.         REG_WRITE_FOOTER; \
  690. }
  691.  
  692. #define __gen6_write(x) \
  693. static void \
  694. gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  695.         u32 __fifo_ret = 0; \
  696.         REG_WRITE_HEADER; \
  697.         if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
  698.                 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
  699.         } \
  700.         __raw_i915_write##x(dev_priv, reg, val); \
  701.         if (unlikely(__fifo_ret)) { \
  702.                 gen6_gt_check_fifodbg(dev_priv); \
  703.         } \
  704.         REG_WRITE_FOOTER; \
  705. }
  706.  
  707. #define __hsw_write(x) \
  708. static void \
  709. hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  710.         u32 __fifo_ret = 0; \
  711.         REG_WRITE_HEADER; \
  712.         if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
  713.                 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
  714.         } \
  715.         hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
  716.         __raw_i915_write##x(dev_priv, reg, val); \
  717.         if (unlikely(__fifo_ret)) { \
  718.                 gen6_gt_check_fifodbg(dev_priv); \
  719.         } \
  720.         hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
  721.         hsw_unclaimed_reg_detect(dev_priv); \
  722.         REG_WRITE_FOOTER; \
  723. }
  724.  
  725. static const u32 gen8_shadowed_regs[] = {
  726.         FORCEWAKE_MT,
  727.         GEN6_RPNSWREQ,
  728.         GEN6_RC_VIDEO_FREQ,
  729.         RING_TAIL(RENDER_RING_BASE),
  730.         RING_TAIL(GEN6_BSD_RING_BASE),
  731.         RING_TAIL(VEBOX_RING_BASE),
  732.         RING_TAIL(BLT_RING_BASE),
  733.         /* TODO: Other registers are not yet used */
  734. };
  735.  
  736. static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
  737. {
  738.         int i;
  739.         for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
  740.                 if (reg == gen8_shadowed_regs[i])
  741.                         return true;
  742.  
  743.         return false;
  744. }
  745.  
  746. #define __gen8_write(x) \
  747. static void \
  748. gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  749.         REG_WRITE_HEADER; \
  750.         hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
  751.         if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
  752.                 if (dev_priv->uncore.forcewake_count == 0) \
  753.                 dev_priv->uncore.funcs.force_wake_get(dev_priv, \
  754.                                                         FORCEWAKE_ALL); \
  755.         __raw_i915_write##x(dev_priv, reg, val); \
  756.                 if (dev_priv->uncore.forcewake_count == 0) \
  757.                 dev_priv->uncore.funcs.force_wake_put(dev_priv, \
  758.                                                         FORCEWAKE_ALL); \
  759.         } else { \
  760.                 __raw_i915_write##x(dev_priv, reg, val); \
  761.         } \
  762.         hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
  763.         hsw_unclaimed_reg_detect(dev_priv); \
  764.         REG_WRITE_FOOTER; \
  765. }
  766.  
  767. #define __chv_write(x) \
  768. static void \
  769. chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  770.         unsigned fwengine = 0; \
  771.         bool shadowed = is_gen8_shadowed(dev_priv, reg); \
  772.         REG_WRITE_HEADER; \
  773.         if (!shadowed) { \
  774.                 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
  775.                         if (dev_priv->uncore.fw_rendercount == 0) \
  776.                                 fwengine = FORCEWAKE_RENDER; \
  777.                 } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
  778.                         if (dev_priv->uncore.fw_mediacount == 0) \
  779.                                 fwengine = FORCEWAKE_MEDIA; \
  780.                 } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
  781.                         if (dev_priv->uncore.fw_rendercount == 0) \
  782.                                 fwengine |= FORCEWAKE_RENDER; \
  783.                         if (dev_priv->uncore.fw_mediacount == 0) \
  784.                                 fwengine |= FORCEWAKE_MEDIA; \
  785.                 } \
  786.         } \
  787.         if (fwengine) \
  788.                 dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
  789.         __raw_i915_write##x(dev_priv, reg, val); \
  790.         if (fwengine) \
  791.                 dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
  792.         REG_WRITE_FOOTER; \
  793. }
  794.  
  795. __chv_write(8)
  796. __chv_write(16)
  797. __chv_write(32)
  798. __chv_write(64)
  799. __gen8_write(8)
  800. __gen8_write(16)
  801. __gen8_write(32)
  802. __gen8_write(64)
  803. __hsw_write(8)
  804. __hsw_write(16)
  805. __hsw_write(32)
  806. __hsw_write(64)
  807. __gen6_write(8)
  808. __gen6_write(16)
  809. __gen6_write(32)
  810. __gen6_write(64)
  811. __gen5_write(8)
  812. __gen5_write(16)
  813. __gen5_write(32)
  814. __gen5_write(64)
  815. __gen4_write(8)
  816. __gen4_write(16)
  817. __gen4_write(32)
  818. __gen4_write(64)
  819.  
  820. #undef __chv_write
  821. #undef __gen8_write
  822. #undef __hsw_write
  823. #undef __gen6_write
  824. #undef __gen5_write
  825. #undef __gen4_write
  826. #undef REG_WRITE_FOOTER
  827. #undef REG_WRITE_HEADER
  828.  
  829. void intel_uncore_init(struct drm_device *dev)
  830. {
  831.         struct drm_i915_private *dev_priv = dev->dev_private;
  832.  
  833.         setup_timer(&dev_priv->uncore.force_wake_timer,
  834.                     gen6_force_wake_timer, (unsigned long)dev_priv);
  835.  
  836.         intel_uncore_early_sanitize(dev, false);
  837.  
  838.         if (IS_VALLEYVIEW(dev)) {
  839.                 dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
  840.                 dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
  841.         } else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
  842.                 dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get;
  843.                 dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put;
  844.         } else if (IS_IVYBRIDGE(dev)) {
  845.                 u32 ecobus;
  846.  
  847.                 /* IVB configs may use multi-threaded forcewake */
  848.  
  849.                 /* A small trick here - if the bios hasn't configured
  850.                  * MT forcewake, and if the device is in RC6, then
  851.                  * force_wake_mt_get will not wake the device and the
  852.                  * ECOBUS read will return zero. Which will be
  853.                  * (correctly) interpreted by the test below as MT
  854.                  * forcewake being disabled.
  855.                  */
  856.                 mutex_lock(&dev->struct_mutex);
  857.                 __gen7_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
  858.                 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
  859.                 __gen7_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
  860.                 mutex_unlock(&dev->struct_mutex);
  861.  
  862.                 if (ecobus & FORCEWAKE_MT_ENABLE) {
  863.                         dev_priv->uncore.funcs.force_wake_get =
  864.                                 __gen7_gt_force_wake_mt_get;
  865.                         dev_priv->uncore.funcs.force_wake_put =
  866.                                 __gen7_gt_force_wake_mt_put;
  867.                 } else {
  868.                         DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
  869.                         DRM_INFO("when using vblank-synced partial screen updates.\n");
  870.                         dev_priv->uncore.funcs.force_wake_get =
  871.                                 __gen6_gt_force_wake_get;
  872.                         dev_priv->uncore.funcs.force_wake_put =
  873.                                 __gen6_gt_force_wake_put;
  874.                 }
  875.         } else if (IS_GEN6(dev)) {
  876.                 dev_priv->uncore.funcs.force_wake_get =
  877.                         __gen6_gt_force_wake_get;
  878.                 dev_priv->uncore.funcs.force_wake_put =
  879.                         __gen6_gt_force_wake_put;
  880.         }
  881.  
  882.         switch (INTEL_INFO(dev)->gen) {
  883.         default:
  884.                 if (IS_CHERRYVIEW(dev)) {
  885.                         dev_priv->uncore.funcs.mmio_writeb  = chv_write8;
  886.                         dev_priv->uncore.funcs.mmio_writew  = chv_write16;
  887.                         dev_priv->uncore.funcs.mmio_writel  = chv_write32;
  888.                         dev_priv->uncore.funcs.mmio_writeq  = chv_write64;
  889.                         dev_priv->uncore.funcs.mmio_readb  = chv_read8;
  890.                         dev_priv->uncore.funcs.mmio_readw  = chv_read16;
  891.                         dev_priv->uncore.funcs.mmio_readl  = chv_read32;
  892.                         dev_priv->uncore.funcs.mmio_readq  = chv_read64;
  893.  
  894.                 } else {
  895.                 dev_priv->uncore.funcs.mmio_writeb  = gen8_write8;
  896.                 dev_priv->uncore.funcs.mmio_writew  = gen8_write16;
  897.                 dev_priv->uncore.funcs.mmio_writel  = gen8_write32;
  898.                 dev_priv->uncore.funcs.mmio_writeq  = gen8_write64;
  899.                 dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
  900.                 dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
  901.                 dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
  902.                 dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
  903.                 }
  904.                 break;
  905.         case 7:
  906.         case 6:
  907.                 if (IS_HASWELL(dev)) {
  908.                         dev_priv->uncore.funcs.mmio_writeb  = hsw_write8;
  909.                         dev_priv->uncore.funcs.mmio_writew  = hsw_write16;
  910.                         dev_priv->uncore.funcs.mmio_writel  = hsw_write32;
  911.                         dev_priv->uncore.funcs.mmio_writeq  = hsw_write64;
  912.                 } else {
  913.                         dev_priv->uncore.funcs.mmio_writeb  = gen6_write8;
  914.                         dev_priv->uncore.funcs.mmio_writew  = gen6_write16;
  915.                         dev_priv->uncore.funcs.mmio_writel  = gen6_write32;
  916.                         dev_priv->uncore.funcs.mmio_writeq  = gen6_write64;
  917.                 }
  918.  
  919.                 if (IS_VALLEYVIEW(dev)) {
  920.                         dev_priv->uncore.funcs.mmio_readb  = vlv_read8;
  921.                         dev_priv->uncore.funcs.mmio_readw  = vlv_read16;
  922.                         dev_priv->uncore.funcs.mmio_readl  = vlv_read32;
  923.                         dev_priv->uncore.funcs.mmio_readq  = vlv_read64;
  924.                 } else {
  925.                         dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
  926.                         dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
  927.                         dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
  928.                         dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
  929.                 }
  930.                 break;
  931.         case 5:
  932.                 dev_priv->uncore.funcs.mmio_writeb  = gen5_write8;
  933.                 dev_priv->uncore.funcs.mmio_writew  = gen5_write16;
  934.                 dev_priv->uncore.funcs.mmio_writel  = gen5_write32;
  935.                 dev_priv->uncore.funcs.mmio_writeq  = gen5_write64;
  936.                 dev_priv->uncore.funcs.mmio_readb  = gen5_read8;
  937.                 dev_priv->uncore.funcs.mmio_readw  = gen5_read16;
  938.                 dev_priv->uncore.funcs.mmio_readl  = gen5_read32;
  939.                 dev_priv->uncore.funcs.mmio_readq  = gen5_read64;
  940.                 break;
  941.         case 4:
  942.         case 3:
  943.         case 2:
  944.                 dev_priv->uncore.funcs.mmio_writeb  = gen4_write8;
  945.                 dev_priv->uncore.funcs.mmio_writew  = gen4_write16;
  946.                 dev_priv->uncore.funcs.mmio_writel  = gen4_write32;
  947.                 dev_priv->uncore.funcs.mmio_writeq  = gen4_write64;
  948.                 dev_priv->uncore.funcs.mmio_readb  = gen4_read8;
  949.                 dev_priv->uncore.funcs.mmio_readw  = gen4_read16;
  950.                 dev_priv->uncore.funcs.mmio_readl  = gen4_read32;
  951.                 dev_priv->uncore.funcs.mmio_readq  = gen4_read64;
  952.                 break;
  953.         }
  954. }
  955.  
  956. void intel_uncore_fini(struct drm_device *dev)
  957. {
  958.         /* Paranoia: make sure we have disabled everything before we exit. */
  959.         intel_uncore_sanitize(dev);
  960.         intel_uncore_forcewake_reset(dev, false);
  961. }
  962.  
  963. #define GEN_RANGE(l, h) GENMASK(h, l)
  964.  
  965. static const struct register_whitelist {
  966.         uint64_t offset;
  967.         uint32_t size;
  968.         /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
  969.         uint32_t gen_bitmask;
  970. } whitelist[] = {
  971.         { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 8) },
  972. };
  973.  
  974. int i915_reg_read_ioctl(struct drm_device *dev,
  975.                         void *data, struct drm_file *file)
  976. {
  977.         struct drm_i915_private *dev_priv = dev->dev_private;
  978.         struct drm_i915_reg_read *reg = data;
  979.         struct register_whitelist const *entry = whitelist;
  980.         int i, ret = 0;
  981.  
  982.         for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
  983.                 if (entry->offset == reg->offset &&
  984.                     (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
  985.                         break;
  986.         }
  987.  
  988.         if (i == ARRAY_SIZE(whitelist))
  989.                 return -EINVAL;
  990.  
  991.         switch (entry->size) {
  992.         case 8:
  993.                 reg->val = I915_READ64(reg->offset);
  994.                 break;
  995.         case 4:
  996.                 reg->val = I915_READ(reg->offset);
  997.                 break;
  998.         case 2:
  999.                 reg->val = I915_READ16(reg->offset);
  1000.                 break;
  1001.         case 1:
  1002.                 reg->val = I915_READ8(reg->offset);
  1003.                 break;
  1004.         default:
  1005.                 WARN_ON(1);
  1006.                 ret = -EINVAL;
  1007.                 goto out;
  1008.         }
  1009.  
  1010. out:
  1011.         return ret;
  1012. }
  1013.  
  1014. int i915_get_reset_stats_ioctl(struct drm_device *dev,
  1015.                                void *data, struct drm_file *file)
  1016. {
  1017.         struct drm_i915_private *dev_priv = dev->dev_private;
  1018.         struct drm_i915_reset_stats *args = data;
  1019.         struct i915_ctx_hang_stats *hs;
  1020.         struct intel_context *ctx;
  1021.         int ret;
  1022.  
  1023.         if (args->flags || args->pad)
  1024.                 return -EINVAL;
  1025.  
  1026.         ret = mutex_lock_interruptible(&dev->struct_mutex);
  1027.         if (ret)
  1028.                 return ret;
  1029.  
  1030.         ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
  1031.         if (IS_ERR(ctx)) {
  1032.                 mutex_unlock(&dev->struct_mutex);
  1033.                 return PTR_ERR(ctx);
  1034.         }
  1035.         hs = &ctx->hang_stats;
  1036.  
  1037.     args->reset_count = i915_reset_count(&dev_priv->gpu_error);
  1038.  
  1039.         args->batch_active = hs->batch_active;
  1040.         args->batch_pending = hs->batch_pending;
  1041.  
  1042.         mutex_unlock(&dev->struct_mutex);
  1043.  
  1044.         return 0;
  1045. }
  1046.  
  1047. static int i965_reset_complete(struct drm_device *dev)
  1048. {
  1049.         u8 gdrst;
  1050.         pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
  1051.         return (gdrst & GRDOM_RESET_ENABLE) == 0;
  1052. }
  1053.  
  1054. static int i965_do_reset(struct drm_device *dev)
  1055. {
  1056.         int ret;
  1057.  
  1058.         /* FIXME: i965g/gm need a display save/restore for gpu reset. */
  1059.         return -ENODEV;
  1060.  
  1061.         /*
  1062.          * Set the domains we want to reset (GRDOM/bits 2 and 3) as
  1063.          * well as the reset bit (GR/bit 0).  Setting the GR bit
  1064.          * triggers the reset; when done, the hardware will clear it.
  1065.          */
  1066.         pci_write_config_byte(dev->pdev, I965_GDRST,
  1067.                               GRDOM_RENDER | GRDOM_RESET_ENABLE);
  1068.         ret =  wait_for(i965_reset_complete(dev), 500);
  1069.         if (ret)
  1070.                 return ret;
  1071.  
  1072.         pci_write_config_byte(dev->pdev, I965_GDRST,
  1073.                               GRDOM_MEDIA | GRDOM_RESET_ENABLE);
  1074.  
  1075.         ret =  wait_for(i965_reset_complete(dev), 500);
  1076.         if (ret)
  1077.                 return ret;
  1078.  
  1079.         pci_write_config_byte(dev->pdev, I965_GDRST, 0);
  1080.  
  1081.         return 0;
  1082. }
  1083.  
  1084. static int g4x_do_reset(struct drm_device *dev)
  1085. {
  1086.         struct drm_i915_private *dev_priv = dev->dev_private;
  1087.         int ret;
  1088.  
  1089.         pci_write_config_byte(dev->pdev, I965_GDRST,
  1090.                               GRDOM_RENDER | GRDOM_RESET_ENABLE);
  1091.         ret =  wait_for(i965_reset_complete(dev), 500);
  1092.         if (ret)
  1093.                 return ret;
  1094.  
  1095.         /* WaVcpClkGateDisableForMediaReset:ctg,elk */
  1096.         I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
  1097.         POSTING_READ(VDECCLK_GATE_D);
  1098.  
  1099.         pci_write_config_byte(dev->pdev, I965_GDRST,
  1100.                               GRDOM_MEDIA | GRDOM_RESET_ENABLE);
  1101.         ret =  wait_for(i965_reset_complete(dev), 500);
  1102.         if (ret)
  1103.                 return ret;
  1104.  
  1105.         /* WaVcpClkGateDisableForMediaReset:ctg,elk */
  1106.         I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
  1107.         POSTING_READ(VDECCLK_GATE_D);
  1108.  
  1109.         pci_write_config_byte(dev->pdev, I965_GDRST, 0);
  1110.  
  1111.         return 0;
  1112. }
  1113.  
  1114. static int ironlake_do_reset(struct drm_device *dev)
  1115. {
  1116.         struct drm_i915_private *dev_priv = dev->dev_private;
  1117.         int ret;
  1118.  
  1119.         I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
  1120.                    ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
  1121.         ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
  1122.                         ILK_GRDOM_RESET_ENABLE) == 0, 500);
  1123.         if (ret)
  1124.                 return ret;
  1125.  
  1126.         I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
  1127.                    ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
  1128.         ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
  1129.                         ILK_GRDOM_RESET_ENABLE) == 0, 500);
  1130.         if (ret)
  1131.                 return ret;
  1132.  
  1133.         I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0);
  1134.  
  1135.         return 0;
  1136. }
  1137.  
  1138. static int gen6_do_reset(struct drm_device *dev)
  1139. {
  1140.         struct drm_i915_private *dev_priv = dev->dev_private;
  1141.         int     ret;
  1142.  
  1143.         /* Reset the chip */
  1144.  
  1145.         /* GEN6_GDRST is not in the gt power well, no need to check
  1146.          * for fifo space for the write or forcewake the chip for
  1147.          * the read
  1148.          */
  1149.         __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
  1150.  
  1151.         /* Spin waiting for the device to ack the reset request */
  1152.         ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
  1153.  
  1154.         intel_uncore_forcewake_reset(dev, true);
  1155.  
  1156.         return ret;
  1157. }
  1158.  
  1159. int intel_gpu_reset(struct drm_device *dev)
  1160. {
  1161.         if (INTEL_INFO(dev)->gen >= 6)
  1162.                 return gen6_do_reset(dev);
  1163.         else if (IS_GEN5(dev))
  1164.                 return ironlake_do_reset(dev);
  1165.         else if (IS_G4X(dev))
  1166.                         return g4x_do_reset(dev);
  1167.         else if (IS_GEN4(dev))
  1168.                 return i965_do_reset(dev);
  1169.                 else
  1170.                 return -ENODEV;
  1171. }
  1172.  
  1173. void intel_uncore_check_errors(struct drm_device *dev)
  1174. {
  1175.         struct drm_i915_private *dev_priv = dev->dev_private;
  1176.  
  1177.         if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
  1178.             (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
  1179.                 DRM_ERROR("Unclaimed register before interrupt\n");
  1180.                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  1181.         }
  1182. }
  1183.