Subversion Repositories Kolibri OS

Rev

Rev 4293 | Rev 5060 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright © 2013 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21.  * IN THE SOFTWARE.
  22.  */
  23.  
  24. #include "i915_drv.h"
  25. #include "intel_drv.h"
  26.  
  27. #define FORCEWAKE_ACK_TIMEOUT_MS 2
  28.  
  29. #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
  30. #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
  31.  
  32. #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
  33. #define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
  34.  
  35. #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
  36. #define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
  37.  
  38. #define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
  39. #define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
  40.  
  41. #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
  42.  
  43.  
  44. static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
  45. {
  46.         u32 gt_thread_status_mask;
  47.  
  48.         if (IS_HASWELL(dev_priv->dev))
  49.                 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
  50.         else
  51.                 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
  52.  
  53.         /* w/a for a sporadic read returning 0 by waiting for the GT
  54.          * thread to wake up.
  55.          */
  56.         if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
  57.                 DRM_ERROR("GT thread status wait timed out\n");
  58. }
  59.  
  60. static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
  61. {
  62.         __raw_i915_write32(dev_priv, FORCEWAKE, 0);
  63.         /* something from same cacheline, but !FORCEWAKE */
  64.         __raw_posting_read(dev_priv, ECOBUS);
  65. }
  66.  
  67. static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
  68. {
  69.         if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
  70.                             FORCEWAKE_ACK_TIMEOUT_MS))
  71.                 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
  72.  
  73.         __raw_i915_write32(dev_priv, FORCEWAKE, 1);
  74.         /* something from same cacheline, but !FORCEWAKE */
  75.         __raw_posting_read(dev_priv, ECOBUS);
  76.  
  77.         if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1),
  78.                             FORCEWAKE_ACK_TIMEOUT_MS))
  79.                 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
  80.  
  81.         /* WaRsForcewakeWaitTC0:snb */
  82.         __gen6_gt_wait_for_thread_c0(dev_priv);
  83. }
  84.  
  85. static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
  86. {
  87.         __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
  88.         /* something from same cacheline, but !FORCEWAKE_MT */
  89.         __raw_posting_read(dev_priv, ECOBUS);
  90. }
  91.  
  92. static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
  93. {
  94.         u32 forcewake_ack;
  95.  
  96.         if (IS_HASWELL(dev_priv->dev))
  97.                 forcewake_ack = FORCEWAKE_ACK_HSW;
  98.         else
  99.                 forcewake_ack = FORCEWAKE_MT_ACK;
  100.  
  101.         if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
  102.                             FORCEWAKE_ACK_TIMEOUT_MS))
  103.                 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
  104.  
  105.         __raw_i915_write32(dev_priv, FORCEWAKE_MT,
  106.                            _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  107.         /* something from same cacheline, but !FORCEWAKE_MT */
  108.         __raw_posting_read(dev_priv, ECOBUS);
  109.  
  110.         if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
  111.                             FORCEWAKE_ACK_TIMEOUT_MS))
  112.                 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
  113.  
  114.         /* WaRsForcewakeWaitTC0:ivb,hsw */
  115.         __gen6_gt_wait_for_thread_c0(dev_priv);
  116. }
  117.  
  118. static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
  119. {
  120.         u32 gtfifodbg;
  121.  
  122.         gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
  123.         if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
  124.              "MMIO read or write has been dropped %x\n", gtfifodbg))
  125.                 __raw_i915_write32(dev_priv, GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
  126. }
  127.  
  128. static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
  129. {
  130.         __raw_i915_write32(dev_priv, FORCEWAKE, 0);
  131.         /* something from same cacheline, but !FORCEWAKE */
  132.         __raw_posting_read(dev_priv, ECOBUS);
  133.         gen6_gt_check_fifodbg(dev_priv);
  134. }
  135.  
  136. static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
  137. {
  138.         __raw_i915_write32(dev_priv, FORCEWAKE_MT,
  139.                            _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  140.         /* something from same cacheline, but !FORCEWAKE_MT */
  141.         __raw_posting_read(dev_priv, ECOBUS);
  142.         gen6_gt_check_fifodbg(dev_priv);
  143. }
  144.  
  145. static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
  146. {
  147.         int ret = 0;
  148.  
  149.         if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
  150.                 int loop = 500;
  151.                 u32 fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
  152.                 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
  153.                         udelay(10);
  154.                         fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
  155.                 }
  156.                 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
  157.                         ++ret;
  158.                 dev_priv->uncore.fifo_count = fifo;
  159.         }
  160.         dev_priv->uncore.fifo_count--;
  161.  
  162.         return ret;
  163. }
  164.  
  165. static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
  166. {
  167.         __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
  168.                            _MASKED_BIT_DISABLE(0xffff));
  169.         /* something from same cacheline, but !FORCEWAKE_VLV */
  170.         __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
  171. }
  172.  
  173. static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
  174. {
  175.         if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
  176.                             FORCEWAKE_ACK_TIMEOUT_MS))
  177.                 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
  178.  
  179.         __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
  180.                            _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  181.         __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
  182.                            _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  183.  
  184.         if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
  185.                             FORCEWAKE_ACK_TIMEOUT_MS))
  186.                 DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
  187.  
  188.         if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_MEDIA_VLV) &
  189.                              FORCEWAKE_KERNEL),
  190.                             FORCEWAKE_ACK_TIMEOUT_MS))
  191.                 DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
  192.  
  193.         /* WaRsForcewakeWaitTC0:vlv */
  194.         __gen6_gt_wait_for_thread_c0(dev_priv);
  195. }
  196.  
  197. static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
  198. {
  199.         __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
  200.                            _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  201.         __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
  202.                            _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  203.         /* The below doubles as a POSTING_READ */
  204.         gen6_gt_check_fifodbg(dev_priv);
  205. }
  206.  
  207. static void intel_uncore_forcewake_reset(struct drm_device *dev)
  208. {
  209.         struct drm_i915_private *dev_priv = dev->dev_private;
  210.  
  211.         if (IS_VALLEYVIEW(dev)) {
  212.                 vlv_force_wake_reset(dev_priv);
  213.         } else if (INTEL_INFO(dev)->gen >= 6) {
  214.                 __gen6_gt_force_wake_reset(dev_priv);
  215.                 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
  216.                         __gen6_gt_force_wake_mt_reset(dev_priv);
  217.         }
  218. }
  219.  
  220. void intel_uncore_early_sanitize(struct drm_device *dev)
  221. {
  222.         struct drm_i915_private *dev_priv = dev->dev_private;
  223.  
  224.         if (HAS_FPGA_DBG_UNCLAIMED(dev))
  225.                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  226. }
  227.  
  228. void intel_uncore_init(struct drm_device *dev)
  229. {
  230.         struct drm_i915_private *dev_priv = dev->dev_private;
  231.  
  232.         if (IS_VALLEYVIEW(dev)) {
  233.                 dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
  234.                 dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
  235.         } else if (IS_HASWELL(dev)) {
  236.                 dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
  237.                 dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
  238.         } else if (IS_IVYBRIDGE(dev)) {
  239.                 u32 ecobus;
  240.  
  241.                 /* IVB configs may use multi-threaded forcewake */
  242.  
  243.                 /* A small trick here - if the bios hasn't configured
  244.                  * MT forcewake, and if the device is in RC6, then
  245.                  * force_wake_mt_get will not wake the device and the
  246.                  * ECOBUS read will return zero. Which will be
  247.                  * (correctly) interpreted by the test below as MT
  248.                  * forcewake being disabled.
  249.                  */
  250.                 mutex_lock(&dev->struct_mutex);
  251.                 __gen6_gt_force_wake_mt_get(dev_priv);
  252.                 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
  253.                 __gen6_gt_force_wake_mt_put(dev_priv);
  254.                 mutex_unlock(&dev->struct_mutex);
  255.  
  256.                 if (ecobus & FORCEWAKE_MT_ENABLE) {
  257.                         dev_priv->uncore.funcs.force_wake_get =
  258.                                 __gen6_gt_force_wake_mt_get;
  259.                         dev_priv->uncore.funcs.force_wake_put =
  260.                                 __gen6_gt_force_wake_mt_put;
  261.                 } else {
  262.                         DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
  263.                         DRM_INFO("when using vblank-synced partial screen updates.\n");
  264.                         dev_priv->uncore.funcs.force_wake_get =
  265.                                 __gen6_gt_force_wake_get;
  266.                         dev_priv->uncore.funcs.force_wake_put =
  267.                                 __gen6_gt_force_wake_put;
  268.                 }
  269.         } else if (IS_GEN6(dev)) {
  270.                 dev_priv->uncore.funcs.force_wake_get =
  271.                         __gen6_gt_force_wake_get;
  272.                 dev_priv->uncore.funcs.force_wake_put =
  273.                         __gen6_gt_force_wake_put;
  274.         }
  275.  
  276.         intel_uncore_forcewake_reset(dev);
  277. }
  278.  
  279. void intel_uncore_sanitize(struct drm_device *dev)
  280. {
  281.         intel_uncore_forcewake_reset(dev);
  282.  
  283.         /* BIOS often leaves RC6 enabled, but disable it for hw init */
  284.         intel_disable_gt_powersave(dev);
  285. }
  286.  
  287. /*
  288.  * Generally this is called implicitly by the register read function. However,
  289.  * if some sequence requires the GT to not power down then this function should
  290.  * be called at the beginning of the sequence followed by a call to
  291.  * gen6_gt_force_wake_put() at the end of the sequence.
  292.  */
  293. void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
  294. {
  295.         unsigned long irqflags;
  296.  
  297.         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  298.         if (dev_priv->uncore.forcewake_count++ == 0)
  299.                 dev_priv->uncore.funcs.force_wake_get(dev_priv);
  300.         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  301. }
  302.  
  303. /*
  304.  * see gen6_gt_force_wake_get()
  305.  */
  306. void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
  307. {
  308.         unsigned long irqflags;
  309.  
  310.         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  311.         if (--dev_priv->uncore.forcewake_count == 0)
  312.                 dev_priv->uncore.funcs.force_wake_put(dev_priv);
  313.         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  314. }
  315.  
  316. /* We give fast paths for the really cool registers */
  317. #define NEEDS_FORCE_WAKE(dev_priv, reg) \
  318.         ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
  319.          ((reg) < 0x40000) &&            \
  320.          ((reg) != FORCEWAKE))
  321.  
  322. static void
  323. ilk_dummy_write(struct drm_i915_private *dev_priv)
  324. {
  325.         /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
  326.          * the chip from rc6 before touching it for real. MI_MODE is masked,
  327.          * hence harmless to write 0 into. */
  328.         __raw_i915_write32(dev_priv, MI_MODE, 0);
  329. }
  330.  
  331. static void
  332. hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
  333. {
  334.         if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
  335.             (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
  336.                 DRM_ERROR("Unknown unclaimed register before writing to %x\n",
  337.                           reg);
  338.                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  339.         }
  340. }
  341.  
  342. static void
  343. hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
  344. {
  345.         if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
  346.             (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
  347.                 DRM_ERROR("Unclaimed write to %x\n", reg);
  348.                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  349.         }
  350. }
  351.  
  352. #define __i915_read(x) \
  353. u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \
  354.         unsigned long irqflags; \
  355.         u##x val = 0; \
  356.         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
  357.         if (dev_priv->info->gen == 5) \
  358.                 ilk_dummy_write(dev_priv); \
  359.         if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
  360.                 if (dev_priv->uncore.forcewake_count == 0) \
  361.                         dev_priv->uncore.funcs.force_wake_get(dev_priv); \
  362.                 val = __raw_i915_read##x(dev_priv, reg); \
  363.                 if (dev_priv->uncore.forcewake_count == 0) \
  364.                         dev_priv->uncore.funcs.force_wake_put(dev_priv); \
  365.         } else { \
  366.                 val = __raw_i915_read##x(dev_priv, reg); \
  367.         } \
  368.         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
  369.         trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
  370.     return val; \
  371. }
  372.  
  373. __i915_read(8)
  374. __i915_read(16)
  375. __i915_read(32)
  376. __i915_read(64)
  377. #undef __i915_read
  378.  
  379. #define __i915_write(x) \
  380. void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace) { \
  381.         unsigned long irqflags; \
  382.         u32 __fifo_ret = 0; \
  383.         trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
  384.     spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
  385.         if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
  386.                 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
  387.         } \
  388.         if (dev_priv->info->gen == 5) \
  389.                 ilk_dummy_write(dev_priv); \
  390.         hsw_unclaimed_reg_clear(dev_priv, reg); \
  391.         __raw_i915_write##x(dev_priv, reg, val); \
  392.         if (unlikely(__fifo_ret)) { \
  393.                 gen6_gt_check_fifodbg(dev_priv); \
  394.         } \
  395.         hsw_unclaimed_reg_check(dev_priv, reg); \
  396.         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
  397. }
  398. __i915_write(8)
  399. __i915_write(16)
  400. __i915_write(32)
  401. __i915_write(64)
  402. #undef __i915_write
  403.  
  404. static const struct register_whitelist {
  405.         uint64_t offset;
  406.         uint32_t size;
  407.         uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
  408. } whitelist[] = {
  409.         { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
  410. };
  411.  
  412. int i915_reg_read_ioctl(struct drm_device *dev,
  413.                         void *data, struct drm_file *file)
  414. {
  415.         struct drm_i915_private *dev_priv = dev->dev_private;
  416.         struct drm_i915_reg_read *reg = data;
  417.         struct register_whitelist const *entry = whitelist;
  418.         int i;
  419.  
  420.         for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
  421.                 if (entry->offset == reg->offset &&
  422.                     (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
  423.                         break;
  424.         }
  425.  
  426.         if (i == ARRAY_SIZE(whitelist))
  427.                 return -EINVAL;
  428.  
  429.         switch (entry->size) {
  430.         case 8:
  431.                 reg->val = I915_READ64(reg->offset);
  432.                 break;
  433.         case 4:
  434.                 reg->val = I915_READ(reg->offset);
  435.                 break;
  436.         case 2:
  437.                 reg->val = I915_READ16(reg->offset);
  438.                 break;
  439.         case 1:
  440.                 reg->val = I915_READ8(reg->offset);
  441.                 break;
  442.         default:
  443.                 WARN_ON(1);
  444.                 return -EINVAL;
  445.         }
  446.  
  447.         return 0;
  448. }
  449.  
  450. static int i8xx_do_reset(struct drm_device *dev)
  451. {
  452.         struct drm_i915_private *dev_priv = dev->dev_private;
  453.  
  454.         if (IS_I85X(dev))
  455.                 return -ENODEV;
  456.  
  457.         I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
  458.         POSTING_READ(D_STATE);
  459.  
  460.         if (IS_I830(dev) || IS_845G(dev)) {
  461.                 I915_WRITE(DEBUG_RESET_I830,
  462.                            DEBUG_RESET_DISPLAY |
  463.                            DEBUG_RESET_RENDER |
  464.                            DEBUG_RESET_FULL);
  465.                 POSTING_READ(DEBUG_RESET_I830);
  466.                 msleep(1);
  467.  
  468.                 I915_WRITE(DEBUG_RESET_I830, 0);
  469.                 POSTING_READ(DEBUG_RESET_I830);
  470.         }
  471.  
  472.         msleep(1);
  473.  
  474.         I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
  475.         POSTING_READ(D_STATE);
  476.  
  477.         return 0;
  478. }
  479.  
  480. static int i965_reset_complete(struct drm_device *dev)
  481. {
  482.         u8 gdrst;
  483.         pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
  484.         return (gdrst & GRDOM_RESET_ENABLE) == 0;
  485. }
  486.  
  487. static int i965_do_reset(struct drm_device *dev)
  488. {
  489.         int ret;
  490.  
  491.         /*
  492.          * Set the domains we want to reset (GRDOM/bits 2 and 3) as
  493.          * well as the reset bit (GR/bit 0).  Setting the GR bit
  494.          * triggers the reset; when done, the hardware will clear it.
  495.          */
  496.         pci_write_config_byte(dev->pdev, I965_GDRST,
  497.                               GRDOM_RENDER | GRDOM_RESET_ENABLE);
  498.         ret =  wait_for(i965_reset_complete(dev), 500);
  499.         if (ret)
  500.                 return ret;
  501.  
  502.         /* We can't reset render&media without also resetting display ... */
  503.         pci_write_config_byte(dev->pdev, I965_GDRST,
  504.                               GRDOM_MEDIA | GRDOM_RESET_ENABLE);
  505.  
  506.         ret =  wait_for(i965_reset_complete(dev), 500);
  507.         if (ret)
  508.                 return ret;
  509.  
  510.         pci_write_config_byte(dev->pdev, I965_GDRST, 0);
  511.  
  512.         return 0;
  513. }
  514.  
  515. static int ironlake_do_reset(struct drm_device *dev)
  516. {
  517.         struct drm_i915_private *dev_priv = dev->dev_private;
  518.         u32 gdrst;
  519.         int ret;
  520.  
  521.         gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
  522.         gdrst &= ~GRDOM_MASK;
  523.         I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
  524.                    gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
  525.         ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
  526.         if (ret)
  527.                 return ret;
  528.  
  529.         /* We can't reset render&media without also resetting display ... */
  530.         gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
  531.         gdrst &= ~GRDOM_MASK;
  532.         I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
  533.                    gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
  534.         return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
  535. }
  536.  
  537. static int gen6_do_reset(struct drm_device *dev)
  538. {
  539.         struct drm_i915_private *dev_priv = dev->dev_private;
  540.         int     ret;
  541.         unsigned long irqflags;
  542.  
  543.         /* Hold uncore.lock across reset to prevent any register access
  544.          * with forcewake not set correctly
  545.          */
  546.         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  547.  
  548.         /* Reset the chip */
  549.  
  550.         /* GEN6_GDRST is not in the gt power well, no need to check
  551.          * for fifo space for the write or forcewake the chip for
  552.          * the read
  553.          */
  554.         __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
  555.  
  556.         /* Spin waiting for the device to ack the reset request */
  557.         ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
  558.  
  559.         intel_uncore_forcewake_reset(dev);
  560.  
  561.         /* If reset with a user forcewake, try to restore, otherwise turn it off */
  562.         if (dev_priv->uncore.forcewake_count)
  563.                 dev_priv->uncore.funcs.force_wake_get(dev_priv);
  564.         else
  565.                 dev_priv->uncore.funcs.force_wake_put(dev_priv);
  566.  
  567.         /* Restore fifo count */
  568.         dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
  569.  
  570.         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  571.         return ret;
  572. }
  573.  
  574. int intel_gpu_reset(struct drm_device *dev)
  575. {
  576.         switch (INTEL_INFO(dev)->gen) {
  577.         case 7:
  578.         case 6: return gen6_do_reset(dev);
  579.         case 5: return ironlake_do_reset(dev);
  580.         case 4: return i965_do_reset(dev);
  581.         case 2: return i8xx_do_reset(dev);
  582.         default: return -ENODEV;
  583.         }
  584. }
  585.  
  586. void intel_uncore_clear_errors(struct drm_device *dev)
  587. {
  588.         struct drm_i915_private *dev_priv = dev->dev_private;
  589.  
  590.         /* XXX needs spinlock around caller's grouping */
  591.         if (HAS_FPGA_DBG_UNCLAIMED(dev))
  592.                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  593. }
  594.  
  595. void intel_uncore_check_errors(struct drm_device *dev)
  596. {
  597.         struct drm_i915_private *dev_priv = dev->dev_private;
  598.  
  599.         if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
  600.             (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
  601.                 DRM_ERROR("Unclaimed register before interrupt\n");
  602.                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  603.         }
  604. }
  605.