Subversion Repositories Kolibri OS

Rev

Rev 4104 | Rev 4371 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright © 2013 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21.  * IN THE SOFTWARE.
  22.  */
  23.  
  24. #include "i915_drv.h"
  25. #include "intel_drv.h"
  26.  
  27. #define FORCEWAKE_ACK_TIMEOUT_MS 2
  28.  
  29. #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
  30. #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
  31.  
  32. #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
  33. #define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
  34.  
  35. #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
  36. #define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
  37.  
  38. #define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
  39. #define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
  40.  
  41. #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
  42.  
  43.  
  44. static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
  45. {
  46.         u32 gt_thread_status_mask;
  47.  
  48.         if (IS_HASWELL(dev_priv->dev))
  49.                 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
  50.         else
  51.                 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
  52.  
  53.         /* w/a for a sporadic read returning 0 by waiting for the GT
  54.          * thread to wake up.
  55.          */
  56.         if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
  57.                 DRM_ERROR("GT thread status wait timed out\n");
  58. }
  59.  
  60. static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
  61. {
  62.         __raw_i915_write32(dev_priv, FORCEWAKE, 0);
  63.         /* something from same cacheline, but !FORCEWAKE */
  64.         __raw_posting_read(dev_priv, ECOBUS);
  65. }
  66.  
  67. static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
  68. {
  69.         if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
  70.                             FORCEWAKE_ACK_TIMEOUT_MS))
  71.                 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
  72.  
  73.         __raw_i915_write32(dev_priv, FORCEWAKE, 1);
  74.         /* something from same cacheline, but !FORCEWAKE */
  75.         __raw_posting_read(dev_priv, ECOBUS);
  76.  
  77.         if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1),
  78.                             FORCEWAKE_ACK_TIMEOUT_MS))
  79.                 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
  80.  
  81.         /* WaRsForcewakeWaitTC0:snb */
  82.         __gen6_gt_wait_for_thread_c0(dev_priv);
  83. }
  84.  
  85. static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
  86. {
  87.         __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
  88.         /* something from same cacheline, but !FORCEWAKE_MT */
  89.         __raw_posting_read(dev_priv, ECOBUS);
  90. }
  91.  
  92. static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
  93. {
  94.         u32 forcewake_ack;
  95.  
  96.         if (IS_HASWELL(dev_priv->dev))
  97.                 forcewake_ack = FORCEWAKE_ACK_HSW;
  98.         else
  99.                 forcewake_ack = FORCEWAKE_MT_ACK;
  100.  
  101.         if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
  102.                             FORCEWAKE_ACK_TIMEOUT_MS))
  103.                 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
  104.  
  105.         __raw_i915_write32(dev_priv, FORCEWAKE_MT,
  106.                            _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  107.         /* something from same cacheline, but !FORCEWAKE_MT */
  108.         __raw_posting_read(dev_priv, ECOBUS);
  109.  
  110.         if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
  111.                             FORCEWAKE_ACK_TIMEOUT_MS))
  112.                 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
  113.  
  114.         /* WaRsForcewakeWaitTC0:ivb,hsw */
  115.         __gen6_gt_wait_for_thread_c0(dev_priv);
  116. }
  117.  
  118. static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
  119. {
  120.         u32 gtfifodbg;
  121.  
  122.         gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
  123.         if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
  124.              "MMIO read or write has been dropped %x\n", gtfifodbg))
  125.                 __raw_i915_write32(dev_priv, GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
  126. }
  127.  
  128. static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
  129. {
  130.         __raw_i915_write32(dev_priv, FORCEWAKE, 0);
  131.         /* something from same cacheline, but !FORCEWAKE */
  132.         __raw_posting_read(dev_priv, ECOBUS);
  133.         gen6_gt_check_fifodbg(dev_priv);
  134. }
  135.  
  136. static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
  137. {
  138.         __raw_i915_write32(dev_priv, FORCEWAKE_MT,
  139.                            _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  140.         /* something from same cacheline, but !FORCEWAKE_MT */
  141.         __raw_posting_read(dev_priv, ECOBUS);
  142.         gen6_gt_check_fifodbg(dev_priv);
  143. }
  144.  
  145. static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
  146. {
  147.         int ret = 0;
  148.  
  149.         if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
  150.                 int loop = 500;
  151.                 u32 fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
  152.                 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
  153.                         udelay(10);
  154.                         fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
  155.                 }
  156.                 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
  157.                         ++ret;
  158.                 dev_priv->uncore.fifo_count = fifo;
  159.         }
  160.         dev_priv->uncore.fifo_count--;
  161.  
  162.         return ret;
  163. }
  164.  
  165. static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
  166. {
  167.         __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
  168.                            _MASKED_BIT_DISABLE(0xffff));
  169.         /* something from same cacheline, but !FORCEWAKE_VLV */
  170.         __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
  171. }
  172.  
  173. static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
  174. {
  175.         if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
  176.                             FORCEWAKE_ACK_TIMEOUT_MS))
  177.                 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
  178.  
  179.         __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
  180.                            _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  181.         __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
  182.                            _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  183.  
  184.         if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
  185.                             FORCEWAKE_ACK_TIMEOUT_MS))
  186.                 DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
  187.  
  188.         if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_MEDIA_VLV) &
  189.                              FORCEWAKE_KERNEL),
  190.                             FORCEWAKE_ACK_TIMEOUT_MS))
  191.                 DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
  192.  
  193.         /* WaRsForcewakeWaitTC0:vlv */
  194.         __gen6_gt_wait_for_thread_c0(dev_priv);
  195. }
  196.  
  197. static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
  198. {
  199.         __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
  200.                            _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  201.         __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
  202.                            _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  203.         /* The below doubles as a POSTING_READ */
  204.         gen6_gt_check_fifodbg(dev_priv);
  205. }
  206.  
  207. void intel_uncore_early_sanitize(struct drm_device *dev)
  208. {
  209.         struct drm_i915_private *dev_priv = dev->dev_private;
  210.  
  211.         if (HAS_FPGA_DBG_UNCLAIMED(dev))
  212.                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  213. }
  214.  
  215. void intel_uncore_init(struct drm_device *dev)
  216. {
  217.         struct drm_i915_private *dev_priv = dev->dev_private;
  218.  
  219.         if (IS_VALLEYVIEW(dev)) {
  220.                 dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
  221.                 dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
  222.         } else if (IS_HASWELL(dev)) {
  223.                 dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
  224.                 dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
  225.         } else if (IS_IVYBRIDGE(dev)) {
  226.                 u32 ecobus;
  227.  
  228.                 /* IVB configs may use multi-threaded forcewake */
  229.  
  230.                 /* A small trick here - if the bios hasn't configured
  231.                  * MT forcewake, and if the device is in RC6, then
  232.                  * force_wake_mt_get will not wake the device and the
  233.                  * ECOBUS read will return zero. Which will be
  234.                  * (correctly) interpreted by the test below as MT
  235.                  * forcewake being disabled.
  236.                  */
  237.                 mutex_lock(&dev->struct_mutex);
  238.                 __gen6_gt_force_wake_mt_get(dev_priv);
  239.                 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
  240.                 __gen6_gt_force_wake_mt_put(dev_priv);
  241.                 mutex_unlock(&dev->struct_mutex);
  242.  
  243.                 if (ecobus & FORCEWAKE_MT_ENABLE) {
  244.                         dev_priv->uncore.funcs.force_wake_get =
  245.                                 __gen6_gt_force_wake_mt_get;
  246.                         dev_priv->uncore.funcs.force_wake_put =
  247.                                 __gen6_gt_force_wake_mt_put;
  248.                 } else {
  249.                         DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
  250.                         DRM_INFO("when using vblank-synced partial screen updates.\n");
  251.                         dev_priv->uncore.funcs.force_wake_get =
  252.                                 __gen6_gt_force_wake_get;
  253.                         dev_priv->uncore.funcs.force_wake_put =
  254.                                 __gen6_gt_force_wake_put;
  255.                 }
  256.         } else if (IS_GEN6(dev)) {
  257.                 dev_priv->uncore.funcs.force_wake_get =
  258.                         __gen6_gt_force_wake_get;
  259.                 dev_priv->uncore.funcs.force_wake_put =
  260.                         __gen6_gt_force_wake_put;
  261.         }
  262. }
  263.  
  264. static void intel_uncore_forcewake_reset(struct drm_device *dev)
  265. {
  266.         struct drm_i915_private *dev_priv = dev->dev_private;
  267.  
  268.         if (IS_VALLEYVIEW(dev)) {
  269.                 vlv_force_wake_reset(dev_priv);
  270.         } else if (INTEL_INFO(dev)->gen >= 6) {
  271.                 __gen6_gt_force_wake_reset(dev_priv);
  272.                 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
  273.                         __gen6_gt_force_wake_mt_reset(dev_priv);
  274.         }
  275. }
  276.  
  277. void intel_uncore_sanitize(struct drm_device *dev)
  278. {
  279.         intel_uncore_forcewake_reset(dev);
  280.  
  281.         /* BIOS often leaves RC6 enabled, but disable it for hw init */
  282.         intel_disable_gt_powersave(dev);
  283. }
  284.  
  285. /*
  286.  * Generally this is called implicitly by the register read function. However,
  287.  * if some sequence requires the GT to not power down then this function should
  288.  * be called at the beginning of the sequence followed by a call to
  289.  * gen6_gt_force_wake_put() at the end of the sequence.
  290.  */
  291. void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
  292. {
  293.         unsigned long irqflags;
  294.  
  295.         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  296.         if (dev_priv->uncore.forcewake_count++ == 0)
  297.                 dev_priv->uncore.funcs.force_wake_get(dev_priv);
  298.         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  299. }
  300.  
  301. /*
  302.  * see gen6_gt_force_wake_get()
  303.  */
  304. void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
  305. {
  306.         unsigned long irqflags;
  307.  
  308.         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  309.         if (--dev_priv->uncore.forcewake_count == 0)
  310.                 dev_priv->uncore.funcs.force_wake_put(dev_priv);
  311.         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  312. }
  313.  
  314. /* We give fast paths for the really cool registers */
  315. #define NEEDS_FORCE_WAKE(dev_priv, reg) \
  316.         ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
  317.          ((reg) < 0x40000) &&            \
  318.          ((reg) != FORCEWAKE))
  319.  
  320. static void
  321. ilk_dummy_write(struct drm_i915_private *dev_priv)
  322. {
  323.         /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
  324.          * the chip from rc6 before touching it for real. MI_MODE is masked,
  325.          * hence harmless to write 0 into. */
  326.         __raw_i915_write32(dev_priv, MI_MODE, 0);
  327. }
  328.  
  329. static void
  330. hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
  331. {
  332.         if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
  333.             (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
  334.                 DRM_ERROR("Unknown unclaimed register before writing to %x\n",
  335.                           reg);
  336.                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  337.         }
  338. }
  339.  
  340. static void
  341. hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
  342. {
  343.         if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
  344.             (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
  345.                 DRM_ERROR("Unclaimed write to %x\n", reg);
  346.                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  347.         }
  348. }
  349.  
  350. #define __i915_read(x) \
  351. u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \
  352.         unsigned long irqflags; \
  353.         u##x val = 0; \
  354.         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
  355.         if (dev_priv->info->gen == 5) \
  356.                 ilk_dummy_write(dev_priv); \
  357.         if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
  358.                 if (dev_priv->uncore.forcewake_count == 0) \
  359.                         dev_priv->uncore.funcs.force_wake_get(dev_priv); \
  360.                 val = __raw_i915_read##x(dev_priv, reg); \
  361.                 if (dev_priv->uncore.forcewake_count == 0) \
  362.                         dev_priv->uncore.funcs.force_wake_put(dev_priv); \
  363.         } else { \
  364.                 val = __raw_i915_read##x(dev_priv, reg); \
  365.         } \
  366.         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
  367.         trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
  368.     return val; \
  369. }
  370.  
  371. __i915_read(8)
  372. __i915_read(16)
  373. __i915_read(32)
  374. __i915_read(64)
  375. #undef __i915_read
  376.  
  377. #define __i915_write(x) \
  378. void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace) { \
  379.         unsigned long irqflags; \
  380.         u32 __fifo_ret = 0; \
  381.         trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
  382.     spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
  383.         if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
  384.                 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
  385.         } \
  386.         if (dev_priv->info->gen == 5) \
  387.                 ilk_dummy_write(dev_priv); \
  388.         hsw_unclaimed_reg_clear(dev_priv, reg); \
  389.         __raw_i915_write##x(dev_priv, reg, val); \
  390.         if (unlikely(__fifo_ret)) { \
  391.                 gen6_gt_check_fifodbg(dev_priv); \
  392.         } \
  393.         hsw_unclaimed_reg_check(dev_priv, reg); \
  394.         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
  395. }
  396. __i915_write(8)
  397. __i915_write(16)
  398. __i915_write(32)
  399. __i915_write(64)
  400. #undef __i915_write
  401.  
  402. static const struct register_whitelist {
  403.         uint64_t offset;
  404.         uint32_t size;
  405.         uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
  406. } whitelist[] = {
  407.         { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
  408. };
  409.  
  410. int i915_reg_read_ioctl(struct drm_device *dev,
  411.                         void *data, struct drm_file *file)
  412. {
  413.         struct drm_i915_private *dev_priv = dev->dev_private;
  414.         struct drm_i915_reg_read *reg = data;
  415.         struct register_whitelist const *entry = whitelist;
  416.         int i;
  417.  
  418.         for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
  419.                 if (entry->offset == reg->offset &&
  420.                     (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
  421.                         break;
  422.         }
  423.  
  424.         if (i == ARRAY_SIZE(whitelist))
  425.                 return -EINVAL;
  426.  
  427.         switch (entry->size) {
  428.         case 8:
  429.                 reg->val = I915_READ64(reg->offset);
  430.                 break;
  431.         case 4:
  432.                 reg->val = I915_READ(reg->offset);
  433.                 break;
  434.         case 2:
  435.                 reg->val = I915_READ16(reg->offset);
  436.                 break;
  437.         case 1:
  438.                 reg->val = I915_READ8(reg->offset);
  439.                 break;
  440.         default:
  441.                 WARN_ON(1);
  442.                 return -EINVAL;
  443.         }
  444.  
  445.         return 0;
  446. }
  447.  
  448. static int i8xx_do_reset(struct drm_device *dev)
  449. {
  450.         struct drm_i915_private *dev_priv = dev->dev_private;
  451.  
  452.         if (IS_I85X(dev))
  453.                 return -ENODEV;
  454.  
  455.         I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
  456.         POSTING_READ(D_STATE);
  457.  
  458.         if (IS_I830(dev) || IS_845G(dev)) {
  459.                 I915_WRITE(DEBUG_RESET_I830,
  460.                            DEBUG_RESET_DISPLAY |
  461.                            DEBUG_RESET_RENDER |
  462.                            DEBUG_RESET_FULL);
  463.                 POSTING_READ(DEBUG_RESET_I830);
  464.                 msleep(1);
  465.  
  466.                 I915_WRITE(DEBUG_RESET_I830, 0);
  467.                 POSTING_READ(DEBUG_RESET_I830);
  468.         }
  469.  
  470.         msleep(1);
  471.  
  472.         I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
  473.         POSTING_READ(D_STATE);
  474.  
  475.         return 0;
  476. }
  477.  
  478. static int i965_reset_complete(struct drm_device *dev)
  479. {
  480.         u8 gdrst;
  481.         pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
  482.         return (gdrst & GRDOM_RESET_ENABLE) == 0;
  483. }
  484.  
  485. static int i965_do_reset(struct drm_device *dev)
  486. {
  487.         int ret;
  488.  
  489.         /*
  490.          * Set the domains we want to reset (GRDOM/bits 2 and 3) as
  491.          * well as the reset bit (GR/bit 0).  Setting the GR bit
  492.          * triggers the reset; when done, the hardware will clear it.
  493.          */
  494.         pci_write_config_byte(dev->pdev, I965_GDRST,
  495.                               GRDOM_RENDER | GRDOM_RESET_ENABLE);
  496.         ret =  wait_for(i965_reset_complete(dev), 500);
  497.         if (ret)
  498.                 return ret;
  499.  
  500.         /* We can't reset render&media without also resetting display ... */
  501.         pci_write_config_byte(dev->pdev, I965_GDRST,
  502.                               GRDOM_MEDIA | GRDOM_RESET_ENABLE);
  503.  
  504.         ret =  wait_for(i965_reset_complete(dev), 500);
  505.         if (ret)
  506.                 return ret;
  507.  
  508.         pci_write_config_byte(dev->pdev, I965_GDRST, 0);
  509.  
  510.         return 0;
  511. }
  512.  
  513. static int ironlake_do_reset(struct drm_device *dev)
  514. {
  515.         struct drm_i915_private *dev_priv = dev->dev_private;
  516.         u32 gdrst;
  517.         int ret;
  518.  
  519.         gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
  520.         gdrst &= ~GRDOM_MASK;
  521.         I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
  522.                    gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
  523.         ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
  524.         if (ret)
  525.                 return ret;
  526.  
  527.         /* We can't reset render&media without also resetting display ... */
  528.         gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
  529.         gdrst &= ~GRDOM_MASK;
  530.         I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
  531.                    gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
  532.         return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
  533. }
  534.  
  535. static int gen6_do_reset(struct drm_device *dev)
  536. {
  537.         struct drm_i915_private *dev_priv = dev->dev_private;
  538.         int     ret;
  539.         unsigned long irqflags;
  540.  
  541.         /* Hold uncore.lock across reset to prevent any register access
  542.          * with forcewake not set correctly
  543.          */
  544.         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  545.  
  546.         /* Reset the chip */
  547.  
  548.         /* GEN6_GDRST is not in the gt power well, no need to check
  549.          * for fifo space for the write or forcewake the chip for
  550.          * the read
  551.          */
  552.         __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
  553.  
  554.         /* Spin waiting for the device to ack the reset request */
  555.         ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
  556.  
  557.         intel_uncore_forcewake_reset(dev);
  558.  
  559.         /* If reset with a user forcewake, try to restore, otherwise turn it off */
  560.         if (dev_priv->uncore.forcewake_count)
  561.                 dev_priv->uncore.funcs.force_wake_get(dev_priv);
  562.         else
  563.                 dev_priv->uncore.funcs.force_wake_put(dev_priv);
  564.  
  565.         /* Restore fifo count */
  566.         dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
  567.  
  568.         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  569.         return ret;
  570. }
  571.  
  572. int intel_gpu_reset(struct drm_device *dev)
  573. {
  574.         switch (INTEL_INFO(dev)->gen) {
  575.         case 7:
  576.         case 6: return gen6_do_reset(dev);
  577.         case 5: return ironlake_do_reset(dev);
  578.         case 4: return i965_do_reset(dev);
  579.         case 2: return i8xx_do_reset(dev);
  580.         default: return -ENODEV;
  581.         }
  582. }
  583.  
  584. void intel_uncore_clear_errors(struct drm_device *dev)
  585. {
  586.         struct drm_i915_private *dev_priv = dev->dev_private;
  587.  
  588.         /* XXX needs spinlock around caller's grouping */
  589.         if (HAS_FPGA_DBG_UNCLAIMED(dev))
  590.                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  591. }
  592.  
  593. void intel_uncore_check_errors(struct drm_device *dev)
  594. {
  595.         struct drm_i915_private *dev_priv = dev->dev_private;
  596.  
  597.         if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
  598.             (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
  599.                 DRM_ERROR("Unclaimed register before interrupt\n");
  600.                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  601.         }
  602. }
  603.