Subversion Repositories Kolibri OS

Rev

Rev 6084 | Rev 6937 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright © 2013 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21.  * IN THE SOFTWARE.
  22.  */
  23.  
  24. #include "i915_drv.h"
  25. #include "intel_drv.h"
  26. #include "i915_vgpu.h"
  27.  
  28. #include <linux/pm_runtime.h>
  29.  
  30. #define FORCEWAKE_ACK_TIMEOUT_MS 50
  31.  
  32. #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
  33. #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
  34.  
  35. #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
  36. #define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
  37.  
  38. #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
  39. #define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
  40.  
  41. #define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
  42. #define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
  43.  
  44. #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
  45.  
  46. static const char * const forcewake_domain_names[] = {
  47.         "render",
  48.         "blitter",
  49.         "media",
  50. };
  51.  
  52. const char *
  53. intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
  54. {
  55.         BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
  56.  
  57.         if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
  58.                 return forcewake_domain_names[id];
  59.  
  60.         WARN_ON(id);
  61.  
  62.         return "unknown";
  63. }
  64.  
  65. static void
  66. assert_device_not_suspended(struct drm_i915_private *dev_priv)
  67. {
  68.         WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
  69.                   "Device suspended\n");
  70. }
  71.  
  72. static inline void
  73. fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
  74. {
  75.         WARN_ON(d->reg_set == 0);
  76.         __raw_i915_write32(d->i915, d->reg_set, d->val_reset);
  77. }
  78.  
  79. static inline void
  80. fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
  81. {
  82. //      __raw_i915_write32(dev_priv, FORCEWAKE, 0);
  83. //      /* something from same cacheline, but !FORCEWAKE */
  84. //      __raw_posting_read(dev_priv, ECOBUS);
  85. }
  86.  
  87. static inline void
  88. fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
  89. {
  90.         if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
  91.                              FORCEWAKE_KERNEL) == 0,
  92.                             FORCEWAKE_ACK_TIMEOUT_MS))
  93.                 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
  94.                           intel_uncore_forcewake_domain_to_str(d->id));
  95. }
  96.  
  97. static inline void
  98. fw_domain_get(const struct intel_uncore_forcewake_domain *d)
  99. {
  100.         __raw_i915_write32(d->i915, d->reg_set, d->val_set);
  101. }
  102.  
  103. static inline void
  104. fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
  105. {
  106.         if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
  107.                              FORCEWAKE_KERNEL),
  108.                             FORCEWAKE_ACK_TIMEOUT_MS))
  109.                 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
  110.                           intel_uncore_forcewake_domain_to_str(d->id));
  111. }
  112.  
  113. static inline void
  114. fw_domain_put(const struct intel_uncore_forcewake_domain *d)
  115. {
  116.         __raw_i915_write32(d->i915, d->reg_set, d->val_clear);
  117. }
  118.  
  119. static inline void
  120. fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
  121. {
  122.         /* something from same cacheline, but not from the set register */
  123.         if (d->reg_post)
  124.                 __raw_posting_read(d->i915, d->reg_post);
  125. }
  126.  
  127. static void
  128. fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
  129. {
  130.         struct intel_uncore_forcewake_domain *d;
  131.         enum forcewake_domain_id id;
  132.  
  133.         for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
  134.                 fw_domain_wait_ack_clear(d);
  135.                 fw_domain_get(d);
  136.                 fw_domain_wait_ack(d);
  137.         }
  138. }
  139.  
  140. static void
  141. fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
  142. {
  143.         struct intel_uncore_forcewake_domain *d;
  144.         enum forcewake_domain_id id;
  145.  
  146.         for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
  147.                 fw_domain_put(d);
  148.                 fw_domain_posting_read(d);
  149.         }
  150. }
  151.  
  152. static void
  153. fw_domains_posting_read(struct drm_i915_private *dev_priv)
  154. {
  155.         struct intel_uncore_forcewake_domain *d;
  156.         enum forcewake_domain_id id;
  157.  
  158.         /* No need to do for all, just do for first found */
  159.         for_each_fw_domain(d, dev_priv, id) {
  160.                 fw_domain_posting_read(d);
  161.                 break;
  162.         }
  163. }
  164.  
  165. static void
  166. fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
  167. {
  168.         struct intel_uncore_forcewake_domain *d;
  169.         enum forcewake_domain_id id;
  170.  
  171.         if (dev_priv->uncore.fw_domains == 0)
  172.                 return;
  173.  
  174.         for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
  175.                 fw_domain_reset(d);
  176.  
  177.         fw_domains_posting_read(dev_priv);
  178. }
  179.  
  180. static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
  181. {
  182.         /* w/a for a sporadic read returning 0 by waiting for the GT
  183.          * thread to wake up.
  184.          */
  185.         if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
  186.                                 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
  187.                 DRM_ERROR("GT thread status wait timed out\n");
  188. }
  189.  
  190. static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
  191.                                               enum forcewake_domains fw_domains)
  192. {
  193.         fw_domains_get(dev_priv, fw_domains);
  194.  
  195.         /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
  196.         __gen6_gt_wait_for_thread_c0(dev_priv);
  197. }
  198.  
  199. static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
  200. {
  201.         u32 gtfifodbg;
  202.  
  203.         gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
  204.         if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
  205.                 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
  206. }
  207.  
  208. static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
  209.                                      enum forcewake_domains fw_domains)
  210. {
  211.         fw_domains_put(dev_priv, fw_domains);
  212.         gen6_gt_check_fifodbg(dev_priv);
  213. }
  214.  
  215. static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
  216. {
  217.         u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
  218.  
  219.         return count & GT_FIFO_FREE_ENTRIES_MASK;
  220. }
  221.  
  222. static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
  223. {
  224.         int ret = 0;
  225.  
  226.         /* On VLV, FIFO will be shared by both SW and HW.
  227.          * So, we need to read the FREE_ENTRIES everytime */
  228.         if (IS_VALLEYVIEW(dev_priv->dev))
  229.                 dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
  230.  
  231.         if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
  232.                 int loop = 500;
  233.                 u32 fifo = fifo_free_entries(dev_priv);
  234.  
  235.                 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
  236.                         udelay(10);
  237.                         fifo = fifo_free_entries(dev_priv);
  238.                 }
  239.                 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
  240.                         ++ret;
  241.                 dev_priv->uncore.fifo_count = fifo;
  242.         }
  243.         dev_priv->uncore.fifo_count--;
  244.  
  245.         return ret;
  246. }
  247.  
  248. static void intel_uncore_fw_release_timer(unsigned long arg)
  249. {
  250.         struct intel_uncore_forcewake_domain *domain = (void *)arg;
  251.         unsigned long irqflags;
  252.  
  253.         assert_device_not_suspended(domain->i915);
  254.  
  255.         spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
  256.         if (WARN_ON(domain->wake_count == 0))
  257.                 domain->wake_count++;
  258.  
  259.         if (--domain->wake_count == 0)
  260.                 domain->i915->uncore.funcs.force_wake_put(domain->i915,
  261.                                                           1 << domain->id);
  262.  
  263.         spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
  264. }
  265.  
  266. void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
  267. {
  268.         struct drm_i915_private *dev_priv = dev->dev_private;
  269.         unsigned long irqflags;
  270.         struct intel_uncore_forcewake_domain *domain;
  271.         int retry_count = 100;
  272.         enum forcewake_domain_id id;
  273.         enum forcewake_domains fw = 0, active_domains;
  274.  
  275.         /* Hold uncore.lock across reset to prevent any register access
  276.          * with forcewake not set correctly. Wait until all pending
  277.          * timers are run before holding.
  278.          */
  279.         while (1) {
  280.                 active_domains = 0;
  281.  
  282.                 for_each_fw_domain(domain, dev_priv, id) {
  283.                         if (del_timer_sync(&domain->timer) == 0)
  284.                                 continue;
  285.  
  286.                         intel_uncore_fw_release_timer((unsigned long)domain);
  287.                 }
  288.  
  289.                 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  290.  
  291.                 for_each_fw_domain(domain, dev_priv, id) {
  292. //           if (timer_pending(&domain->timer))
  293. //                              active_domains |= (1 << id);
  294.         }
  295.  
  296.                 if (active_domains == 0)
  297.                         break;
  298.  
  299.                 if (--retry_count == 0) {
  300.                         DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
  301.                         break;
  302.                 }
  303.  
  304.                 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  305.         change_task();
  306.         }
  307.  
  308.         WARN_ON(active_domains);
  309.  
  310.         for_each_fw_domain(domain, dev_priv, id)
  311.                 if (domain->wake_count)
  312.                         fw |= 1 << id;
  313.  
  314.         if (fw)
  315.                 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
  316.  
  317.         fw_domains_reset(dev_priv, FORCEWAKE_ALL);
  318.  
  319.         if (restore) { /* If reset with a user forcewake, try to restore */
  320.                 if (fw)
  321.                         dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
  322.  
  323.                 if (IS_GEN6(dev) || IS_GEN7(dev))
  324.                         dev_priv->uncore.fifo_count =
  325.                                 fifo_free_entries(dev_priv);
  326.         }
  327.  
  328.         if (!restore)
  329.                 assert_forcewakes_inactive(dev_priv);
  330.  
  331.         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  332. }
  333.  
  334. static void intel_uncore_ellc_detect(struct drm_device *dev)
  335. {
  336.         struct drm_i915_private *dev_priv = dev->dev_private;
  337.  
  338.         if ((IS_HASWELL(dev) || IS_BROADWELL(dev) ||
  339.              INTEL_INFO(dev)->gen >= 9) &&
  340.             (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) {
  341.                 /* The docs do not explain exactly how the calculation can be
  342.                  * made. It is somewhat guessable, but for now, it's always
  343.                  * 128MB.
  344.                  * NB: We can't write IDICR yet because we do not have gt funcs
  345.                  * set up */
  346.                 dev_priv->ellc_size = 128;
  347.                 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
  348.         }
  349. }
  350.  
  351. static void __intel_uncore_early_sanitize(struct drm_device *dev,
  352.                                           bool restore_forcewake)
  353. {
  354.         struct drm_i915_private *dev_priv = dev->dev_private;
  355.  
  356.         if (HAS_FPGA_DBG_UNCLAIMED(dev))
  357.                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  358.  
  359.         /* clear out old GT FIFO errors */
  360.         if (IS_GEN6(dev) || IS_GEN7(dev))
  361.                 __raw_i915_write32(dev_priv, GTFIFODBG,
  362.                                    __raw_i915_read32(dev_priv, GTFIFODBG));
  363.  
  364.         /* WaDisableShadowRegForCpd:chv */
  365.         if (IS_CHERRYVIEW(dev)) {
  366.                 __raw_i915_write32(dev_priv, GTFIFOCTL,
  367.                                    __raw_i915_read32(dev_priv, GTFIFOCTL) |
  368.                                    GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
  369.                                    GT_FIFO_CTL_RC6_POLICY_STALL);
  370.         }
  371.  
  372.         intel_uncore_forcewake_reset(dev, restore_forcewake);
  373. }
  374.  
  375. void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
  376. {
  377.         __intel_uncore_early_sanitize(dev, restore_forcewake);
  378.         i915_check_and_clear_faults(dev);
  379. }
  380.  
  381. void intel_uncore_sanitize(struct drm_device *dev)
  382. {
  383.         /* BIOS often leaves RC6 enabled, but disable it for hw init */
  384.         intel_disable_gt_powersave(dev);
  385. }
  386.  
  387. static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
  388.                                          enum forcewake_domains fw_domains)
  389. {
  390.         struct intel_uncore_forcewake_domain *domain;
  391.         enum forcewake_domain_id id;
  392.  
  393.         if (!dev_priv->uncore.funcs.force_wake_get)
  394.                 return;
  395.  
  396.         fw_domains &= dev_priv->uncore.fw_domains;
  397.  
  398.         for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
  399.                 if (domain->wake_count++)
  400.                         fw_domains &= ~(1 << id);
  401.         }
  402.  
  403.         if (fw_domains)
  404.                 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
  405. }
  406.  
  407. /**
  408.  * intel_uncore_forcewake_get - grab forcewake domain references
  409.  * @dev_priv: i915 device instance
  410.  * @fw_domains: forcewake domains to get reference on
  411.  *
  412.  * This function can be used get GT's forcewake domain references.
  413.  * Normal register access will handle the forcewake domains automatically.
  414.  * However if some sequence requires the GT to not power down a particular
  415.  * forcewake domains this function should be called at the beginning of the
  416.  * sequence. And subsequently the reference should be dropped by symmetric
  417.  * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
  418.  * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
  419.  */
  420. void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
  421.                                 enum forcewake_domains fw_domains)
  422. {
  423.         unsigned long irqflags;
  424.  
  425.         if (!dev_priv->uncore.funcs.force_wake_get)
  426.                 return;
  427.  
  428.         WARN_ON(dev_priv->pm.suspended);
  429.  
  430.         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  431.         __intel_uncore_forcewake_get(dev_priv, fw_domains);
  432.         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  433. }
  434.  
  435. /**
  436.  * intel_uncore_forcewake_get__locked - grab forcewake domain references
  437.  * @dev_priv: i915 device instance
  438.  * @fw_domains: forcewake domains to get reference on
  439.  *
  440.  * See intel_uncore_forcewake_get(). This variant places the onus
  441.  * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
  442.  */
  443. void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
  444.                                         enum forcewake_domains fw_domains)
  445. {
  446.         assert_spin_locked(&dev_priv->uncore.lock);
  447.  
  448.         if (!dev_priv->uncore.funcs.force_wake_get)
  449.                 return;
  450.  
  451.         __intel_uncore_forcewake_get(dev_priv, fw_domains);
  452. }
  453.  
  454. static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
  455.                                          enum forcewake_domains fw_domains)
  456. {
  457.         struct intel_uncore_forcewake_domain *domain;
  458.         enum forcewake_domain_id id;
  459.  
  460.         if (!dev_priv->uncore.funcs.force_wake_put)
  461.                 return;
  462.  
  463.         fw_domains &= dev_priv->uncore.fw_domains;
  464.  
  465.         for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
  466.                 if (WARN_ON(domain->wake_count == 0))
  467.                         continue;
  468.  
  469.                 if (--domain->wake_count)
  470.                         continue;
  471.  
  472.                 domain->wake_count++;
  473.                 fw_domain_arm_timer(domain);
  474.         }
  475. }
  476.  
  477. /**
  478.  * intel_uncore_forcewake_put - release a forcewake domain reference
  479.  * @dev_priv: i915 device instance
  480.  * @fw_domains: forcewake domains to put references
  481.  *
  482.  * This function drops the device-level forcewakes for specified
  483.  * domains obtained by intel_uncore_forcewake_get().
  484.  */
  485. void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
  486.                                 enum forcewake_domains fw_domains)
  487. {
  488.         unsigned long irqflags;
  489.  
  490.         if (!dev_priv->uncore.funcs.force_wake_put)
  491.                 return;
  492.  
  493.         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  494.         __intel_uncore_forcewake_put(dev_priv, fw_domains);
  495.         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  496. }
  497.  
  498. /**
  499.  * intel_uncore_forcewake_put__locked - grab forcewake domain references
  500.  * @dev_priv: i915 device instance
  501.  * @fw_domains: forcewake domains to get reference on
  502.  *
  503.  * See intel_uncore_forcewake_put(). This variant places the onus
  504.  * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
  505.  */
  506. void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
  507.                                         enum forcewake_domains fw_domains)
  508. {
  509.         assert_spin_locked(&dev_priv->uncore.lock);
  510.  
  511.         if (!dev_priv->uncore.funcs.force_wake_put)
  512.                 return;
  513.  
  514.         __intel_uncore_forcewake_put(dev_priv, fw_domains);
  515. }
  516.  
  517. void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
  518. {
  519.         struct intel_uncore_forcewake_domain *domain;
  520.         enum forcewake_domain_id id;
  521.  
  522.         if (!dev_priv->uncore.funcs.force_wake_get)
  523.                 return;
  524.  
  525.         for_each_fw_domain(domain, dev_priv, id)
  526.                 WARN_ON(domain->wake_count);
  527. }
  528.  
  529. /* We give fast paths for the really cool registers */
  530. #define NEEDS_FORCE_WAKE(reg) \
  531.          ((reg) < 0x40000 && (reg) != FORCEWAKE)
  532.  
  533. #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
  534.  
  535. #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
  536.         (REG_RANGE((reg), 0x2000, 0x4000) || \
  537.          REG_RANGE((reg), 0x5000, 0x8000) || \
  538.          REG_RANGE((reg), 0xB000, 0x12000) || \
  539.          REG_RANGE((reg), 0x2E000, 0x30000))
  540.  
  541. #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
  542.         (REG_RANGE((reg), 0x12000, 0x14000) || \
  543.          REG_RANGE((reg), 0x22000, 0x24000) || \
  544.          REG_RANGE((reg), 0x30000, 0x40000))
  545.  
  546. #define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
  547.         (REG_RANGE((reg), 0x2000, 0x4000) || \
  548.          REG_RANGE((reg), 0x5200, 0x8000) || \
  549.          REG_RANGE((reg), 0x8300, 0x8500) || \
  550.          REG_RANGE((reg), 0xB000, 0xB480) || \
  551.          REG_RANGE((reg), 0xE000, 0xE800))
  552.  
  553. #define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
  554.         (REG_RANGE((reg), 0x8800, 0x8900) || \
  555.          REG_RANGE((reg), 0xD000, 0xD800) || \
  556.          REG_RANGE((reg), 0x12000, 0x14000) || \
  557.          REG_RANGE((reg), 0x1A000, 0x1C000) || \
  558.          REG_RANGE((reg), 0x1E800, 0x1EA00) || \
  559.          REG_RANGE((reg), 0x30000, 0x38000))
  560.  
  561. #define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
  562.         (REG_RANGE((reg), 0x4000, 0x5000) || \
  563.          REG_RANGE((reg), 0x8000, 0x8300) || \
  564.          REG_RANGE((reg), 0x8500, 0x8600) || \
  565.          REG_RANGE((reg), 0x9000, 0xB000) || \
  566.          REG_RANGE((reg), 0xF000, 0x10000))
  567.  
  568. #define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
  569.         REG_RANGE((reg), 0xB00,  0x2000)
  570.  
  571. #define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
  572.         (REG_RANGE((reg), 0x2000, 0x2700) || \
  573.          REG_RANGE((reg), 0x3000, 0x4000) || \
  574.          REG_RANGE((reg), 0x5200, 0x8000) || \
  575.          REG_RANGE((reg), 0x8140, 0x8160) || \
  576.          REG_RANGE((reg), 0x8300, 0x8500) || \
  577.          REG_RANGE((reg), 0x8C00, 0x8D00) || \
  578.          REG_RANGE((reg), 0xB000, 0xB480) || \
  579.          REG_RANGE((reg), 0xE000, 0xE900) || \
  580.          REG_RANGE((reg), 0x24400, 0x24800))
  581.  
  582. #define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
  583.         (REG_RANGE((reg), 0x8130, 0x8140) || \
  584.          REG_RANGE((reg), 0x8800, 0x8A00) || \
  585.          REG_RANGE((reg), 0xD000, 0xD800) || \
  586.          REG_RANGE((reg), 0x12000, 0x14000) || \
  587.          REG_RANGE((reg), 0x1A000, 0x1EA00) || \
  588.          REG_RANGE((reg), 0x30000, 0x40000))
  589.  
  590. #define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
  591.         REG_RANGE((reg), 0x9400, 0x9800)
  592.  
  593. #define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
  594.         ((reg) < 0x40000 &&\
  595.          !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
  596.          !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
  597.          !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
  598.          !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
  599.  
  600. static void
  601. ilk_dummy_write(struct drm_i915_private *dev_priv)
  602. {
  603.         /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
  604.          * the chip from rc6 before touching it for real. MI_MODE is masked,
  605.          * hence harmless to write 0 into. */
  606.         __raw_i915_write32(dev_priv, MI_MODE, 0);
  607. }
  608.  
  609. static void
  610. hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
  611.                         bool before)
  612. {
  613.         const char *op = read ? "reading" : "writing to";
  614.         const char *when = before ? "before" : "after";
  615.  
  616.         if (!i915.mmio_debug)
  617.                 return;
  618.  
  619.         if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
  620.                 WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
  621.                      when, op, reg);
  622.                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  623.                 i915.mmio_debug--; /* Only report the first N failures */
  624.         }
  625. }
  626.  
  627. static void
  628. hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
  629. {
  630.         static bool mmio_debug_once = true;
  631.  
  632.         if (i915.mmio_debug || !mmio_debug_once)
  633.                 return;
  634.  
  635.         if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
  636.                 DRM_DEBUG("Unclaimed register detected, "
  637.                           "enabling oneshot unclaimed register reporting. "
  638.                           "Please use i915.mmio_debug=N for more information.\n");
  639.                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  640.                 i915.mmio_debug = mmio_debug_once--;
  641.         }
  642. }
  643.  
  644. #define GEN2_READ_HEADER(x) \
  645.         u##x val = 0; \
  646.         assert_device_not_suspended(dev_priv);
  647.  
  648. #define GEN2_READ_FOOTER \
  649.         trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
  650.         return val
  651.  
  652. #define __gen2_read(x) \
  653. static u##x \
  654. gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  655.         GEN2_READ_HEADER(x); \
  656.         val = __raw_i915_read##x(dev_priv, reg); \
  657.         GEN2_READ_FOOTER; \
  658. }
  659.  
  660. #define __gen5_read(x) \
  661. static u##x \
  662. gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  663.         GEN2_READ_HEADER(x); \
  664.         ilk_dummy_write(dev_priv); \
  665.         val = __raw_i915_read##x(dev_priv, reg); \
  666.         GEN2_READ_FOOTER; \
  667. }
  668.  
  669. __gen5_read(8)
  670. __gen5_read(16)
  671. __gen5_read(32)
  672. __gen5_read(64)
  673. __gen2_read(8)
  674. __gen2_read(16)
  675. __gen2_read(32)
  676. __gen2_read(64)
  677.  
  678. #undef __gen5_read
  679. #undef __gen2_read
  680.  
  681. #undef GEN2_READ_FOOTER
  682. #undef GEN2_READ_HEADER
  683.  
  684. #define GEN6_READ_HEADER(x) \
  685.         unsigned long irqflags; \
  686.         u##x val = 0; \
  687.         assert_device_not_suspended(dev_priv); \
  688.         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
  689.  
  690. #define GEN6_READ_FOOTER \
  691.         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
  692.         trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
  693.         return val
  694.  
  695. static inline void __force_wake_get(struct drm_i915_private *dev_priv,
  696.                                     enum forcewake_domains fw_domains)
  697. {
  698.         struct intel_uncore_forcewake_domain *domain;
  699.         enum forcewake_domain_id id;
  700.  
  701.         if (WARN_ON(!fw_domains))
  702.                 return;
  703.  
  704.         /* Ideally GCC would be constant-fold and eliminate this loop */
  705.         for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
  706.                 if (domain->wake_count) {
  707.                         fw_domains &= ~(1 << id);
  708.                         continue;
  709.                 }
  710.  
  711.                 domain->wake_count++;
  712.                 fw_domain_arm_timer(domain);
  713.         }
  714.  
  715.         if (fw_domains)
  716.                 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
  717. }
  718.  
  719. #define __vgpu_read(x) \
  720. static u##x \
  721. vgpu_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  722.         GEN6_READ_HEADER(x); \
  723.         val = __raw_i915_read##x(dev_priv, reg); \
  724.         GEN6_READ_FOOTER; \
  725. }
  726.  
  727. #define __gen6_read(x) \
  728. static u##x \
  729. gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  730.         GEN6_READ_HEADER(x); \
  731.         hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
  732.         if (NEEDS_FORCE_WAKE(reg)) \
  733.                 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
  734.         val = __raw_i915_read##x(dev_priv, reg); \
  735.         hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
  736.         GEN6_READ_FOOTER; \
  737. }
  738.  
  739. #define __vlv_read(x) \
  740. static u##x \
  741. vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  742.         GEN6_READ_HEADER(x); \
  743.         if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) \
  744.                 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
  745.         else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) \
  746.                 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
  747.         val = __raw_i915_read##x(dev_priv, reg); \
  748.         GEN6_READ_FOOTER; \
  749. }
  750.  
  751. #define __chv_read(x) \
  752. static u##x \
  753. chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  754.         GEN6_READ_HEADER(x); \
  755.         if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
  756.                 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
  757.         else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
  758.                 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
  759.         else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
  760.                 __force_wake_get(dev_priv, \
  761.                                  FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
  762.         val = __raw_i915_read##x(dev_priv, reg); \
  763.         GEN6_READ_FOOTER; \
  764. }
  765.  
  766. #define SKL_NEEDS_FORCE_WAKE(reg) \
  767.          ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
  768.  
  769. #define __gen9_read(x) \
  770. static u##x \
  771. gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  772.         enum forcewake_domains fw_engine; \
  773.         GEN6_READ_HEADER(x); \
  774.         hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
  775.         if (!SKL_NEEDS_FORCE_WAKE(reg)) \
  776.                 fw_engine = 0; \
  777.         else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
  778.                 fw_engine = FORCEWAKE_RENDER; \
  779.         else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
  780.                 fw_engine = FORCEWAKE_MEDIA; \
  781.         else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
  782.                 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
  783.         else \
  784.                 fw_engine = FORCEWAKE_BLITTER; \
  785.         if (fw_engine) \
  786.                 __force_wake_get(dev_priv, fw_engine); \
  787.         val = __raw_i915_read##x(dev_priv, reg); \
  788.         hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
  789.         GEN6_READ_FOOTER; \
  790. }
  791.  
  792. __vgpu_read(8)
  793. __vgpu_read(16)
  794. __vgpu_read(32)
  795. __vgpu_read(64)
  796. __gen9_read(8)
  797. __gen9_read(16)
  798. __gen9_read(32)
  799. __gen9_read(64)
  800. __chv_read(8)
  801. __chv_read(16)
  802. __chv_read(32)
  803. __chv_read(64)
  804. __vlv_read(8)
  805. __vlv_read(16)
  806. __vlv_read(32)
  807. __vlv_read(64)
  808. __gen6_read(8)
  809. __gen6_read(16)
  810. __gen6_read(32)
  811. __gen6_read(64)
  812.  
  813. #undef __gen9_read
  814. #undef __chv_read
  815. #undef __vlv_read
  816. #undef __gen6_read
  817. #undef __vgpu_read
  818. #undef GEN6_READ_FOOTER
  819. #undef GEN6_READ_HEADER
  820.  
  821. #define GEN2_WRITE_HEADER \
  822.         trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
  823.         assert_device_not_suspended(dev_priv); \
  824.  
  825. #define GEN2_WRITE_FOOTER
  826.  
  827. #define __gen2_write(x) \
  828. static void \
  829. gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  830.         GEN2_WRITE_HEADER; \
  831.         __raw_i915_write##x(dev_priv, reg, val); \
  832.         GEN2_WRITE_FOOTER; \
  833. }
  834.  
  835. #define __gen5_write(x) \
  836. static void \
  837. gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  838.         GEN2_WRITE_HEADER; \
  839.         ilk_dummy_write(dev_priv); \
  840.         __raw_i915_write##x(dev_priv, reg, val); \
  841.         GEN2_WRITE_FOOTER; \
  842. }
  843.  
  844. __gen5_write(8)
  845. __gen5_write(16)
  846. __gen5_write(32)
  847. __gen5_write(64)
  848. __gen2_write(8)
  849. __gen2_write(16)
  850. __gen2_write(32)
  851. __gen2_write(64)
  852.  
  853. #undef __gen5_write
  854. #undef __gen2_write
  855.  
  856. #undef GEN2_WRITE_FOOTER
  857. #undef GEN2_WRITE_HEADER
  858.  
  859. #define GEN6_WRITE_HEADER \
  860.         unsigned long irqflags; \
  861.         trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
  862.         assert_device_not_suspended(dev_priv); \
  863.         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
  864.  
  865. #define GEN6_WRITE_FOOTER \
  866.         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
  867.  
  868. #define __gen6_write(x) \
  869. static void \
  870. gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  871.         u32 __fifo_ret = 0; \
  872.         GEN6_WRITE_HEADER; \
  873.         if (NEEDS_FORCE_WAKE(reg)) { \
  874.                 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
  875.         } \
  876.         __raw_i915_write##x(dev_priv, reg, val); \
  877.         if (unlikely(__fifo_ret)) { \
  878.                 gen6_gt_check_fifodbg(dev_priv); \
  879.         } \
  880.         GEN6_WRITE_FOOTER; \
  881. }
  882.  
  883. #define __hsw_write(x) \
  884. static void \
  885. hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  886.         u32 __fifo_ret = 0; \
  887.         GEN6_WRITE_HEADER; \
  888.         if (NEEDS_FORCE_WAKE(reg)) { \
  889.                 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
  890.         } \
  891.         hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
  892.         __raw_i915_write##x(dev_priv, reg, val); \
  893.         if (unlikely(__fifo_ret)) { \
  894.                 gen6_gt_check_fifodbg(dev_priv); \
  895.         } \
  896.         hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
  897.         hsw_unclaimed_reg_detect(dev_priv); \
  898.         GEN6_WRITE_FOOTER; \
  899. }
  900.  
  901. #define __vgpu_write(x) \
  902. static void vgpu_write##x(struct drm_i915_private *dev_priv, \
  903.                           off_t reg, u##x val, bool trace) { \
  904.         GEN6_WRITE_HEADER; \
  905.         __raw_i915_write##x(dev_priv, reg, val); \
  906.         GEN6_WRITE_FOOTER; \
  907. }
  908.  
  909. static const u32 gen8_shadowed_regs[] = {
  910.         FORCEWAKE_MT,
  911.         GEN6_RPNSWREQ,
  912.         GEN6_RC_VIDEO_FREQ,
  913.         RING_TAIL(RENDER_RING_BASE),
  914.         RING_TAIL(GEN6_BSD_RING_BASE),
  915.         RING_TAIL(VEBOX_RING_BASE),
  916.         RING_TAIL(BLT_RING_BASE),
  917.         /* TODO: Other registers are not yet used */
  918. };
  919.  
  920. static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
  921. {
  922.         int i;
  923.         for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
  924.                 if (reg == gen8_shadowed_regs[i])
  925.                         return true;
  926.  
  927.         return false;
  928. }
  929.  
  930. #define __gen8_write(x) \
  931. static void \
  932. gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  933.         GEN6_WRITE_HEADER; \
  934.         hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
  935.         if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) \
  936.                 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
  937.         __raw_i915_write##x(dev_priv, reg, val); \
  938.         hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
  939.         hsw_unclaimed_reg_detect(dev_priv); \
  940.         GEN6_WRITE_FOOTER; \
  941. }
  942.  
  943. #define __chv_write(x) \
  944. static void \
  945. chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  946.         bool shadowed = is_gen8_shadowed(dev_priv, reg); \
  947.         GEN6_WRITE_HEADER; \
  948.         if (!shadowed) { \
  949.                 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
  950.                         __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
  951.                 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
  952.                         __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
  953.                 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
  954.                         __force_wake_get(dev_priv, FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
  955.         } \
  956.         __raw_i915_write##x(dev_priv, reg, val); \
  957.         GEN6_WRITE_FOOTER; \
  958. }
  959.  
  960. static const u32 gen9_shadowed_regs[] = {
  961.         RING_TAIL(RENDER_RING_BASE),
  962.         RING_TAIL(GEN6_BSD_RING_BASE),
  963.         RING_TAIL(VEBOX_RING_BASE),
  964.         RING_TAIL(BLT_RING_BASE),
  965.         FORCEWAKE_BLITTER_GEN9,
  966.         FORCEWAKE_RENDER_GEN9,
  967.         FORCEWAKE_MEDIA_GEN9,
  968.         GEN6_RPNSWREQ,
  969.         GEN6_RC_VIDEO_FREQ,
  970.         /* TODO: Other registers are not yet used */
  971. };
  972.  
  973. static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
  974. {
  975.         int i;
  976.         for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
  977.                 if (reg == gen9_shadowed_regs[i])
  978.                         return true;
  979.  
  980.         return false;
  981. }
  982.  
  983. #define __gen9_write(x) \
  984. static void \
  985. gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
  986.                 bool trace) { \
  987.         enum forcewake_domains fw_engine; \
  988.         GEN6_WRITE_HEADER; \
  989.         hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
  990.         if (!SKL_NEEDS_FORCE_WAKE(reg) || \
  991.             is_gen9_shadowed(dev_priv, reg)) \
  992.                 fw_engine = 0; \
  993.         else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
  994.                 fw_engine = FORCEWAKE_RENDER; \
  995.         else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
  996.                 fw_engine = FORCEWAKE_MEDIA; \
  997.         else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
  998.                 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
  999.         else \
  1000.                 fw_engine = FORCEWAKE_BLITTER; \
  1001.         if (fw_engine) \
  1002.                 __force_wake_get(dev_priv, fw_engine); \
  1003.         __raw_i915_write##x(dev_priv, reg, val); \
  1004.         hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
  1005.         hsw_unclaimed_reg_detect(dev_priv); \
  1006.         GEN6_WRITE_FOOTER; \
  1007. }
  1008.  
  1009. __gen9_write(8)
  1010. __gen9_write(16)
  1011. __gen9_write(32)
  1012. __gen9_write(64)
  1013. __chv_write(8)
  1014. __chv_write(16)
  1015. __chv_write(32)
  1016. __chv_write(64)
  1017. __gen8_write(8)
  1018. __gen8_write(16)
  1019. __gen8_write(32)
  1020. __gen8_write(64)
  1021. __hsw_write(8)
  1022. __hsw_write(16)
  1023. __hsw_write(32)
  1024. __hsw_write(64)
  1025. __gen6_write(8)
  1026. __gen6_write(16)
  1027. __gen6_write(32)
  1028. __gen6_write(64)
  1029. __vgpu_write(8)
  1030. __vgpu_write(16)
  1031. __vgpu_write(32)
  1032. __vgpu_write(64)
  1033.  
  1034. #undef __gen9_write
  1035. #undef __chv_write
  1036. #undef __gen8_write
  1037. #undef __hsw_write
  1038. #undef __gen6_write
  1039. #undef __vgpu_write
  1040. #undef GEN6_WRITE_FOOTER
  1041. #undef GEN6_WRITE_HEADER
  1042.  
  1043. #define ASSIGN_WRITE_MMIO_VFUNCS(x) \
  1044. do { \
  1045.         dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
  1046.         dev_priv->uncore.funcs.mmio_writew = x##_write16; \
  1047.         dev_priv->uncore.funcs.mmio_writel = x##_write32; \
  1048.         dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
  1049. } while (0)
  1050.  
  1051. #define ASSIGN_READ_MMIO_VFUNCS(x) \
  1052. do { \
  1053.         dev_priv->uncore.funcs.mmio_readb = x##_read8; \
  1054.         dev_priv->uncore.funcs.mmio_readw = x##_read16; \
  1055.         dev_priv->uncore.funcs.mmio_readl = x##_read32; \
  1056.         dev_priv->uncore.funcs.mmio_readq = x##_read64; \
  1057. } while (0)
  1058.  
  1059.  
  1060. static void fw_domain_init(struct drm_i915_private *dev_priv,
  1061.                            enum forcewake_domain_id domain_id,
  1062.                            u32 reg_set, u32 reg_ack)
  1063. {
  1064.         struct intel_uncore_forcewake_domain *d;
  1065.  
  1066.         if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
  1067.                 return;
  1068.  
  1069.         d = &dev_priv->uncore.fw_domain[domain_id];
  1070.  
  1071.         WARN_ON(d->wake_count);
  1072.  
  1073.         d->wake_count = 0;
  1074.         d->reg_set = reg_set;
  1075.         d->reg_ack = reg_ack;
  1076.  
  1077.         if (IS_GEN6(dev_priv)) {
  1078.                 d->val_reset = 0;
  1079.                 d->val_set = FORCEWAKE_KERNEL;
  1080.                 d->val_clear = 0;
  1081.         } else {
  1082.                 /* WaRsClearFWBitsAtReset:bdw,skl */
  1083.                 d->val_reset = _MASKED_BIT_DISABLE(0xffff);
  1084.                 d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
  1085.                 d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
  1086.         }
  1087.  
  1088.         if (IS_VALLEYVIEW(dev_priv))
  1089.                 d->reg_post = FORCEWAKE_ACK_VLV;
  1090.         else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
  1091.                 d->reg_post = ECOBUS;
  1092.         else
  1093.                 d->reg_post = 0;
  1094.  
  1095.         d->i915 = dev_priv;
  1096.         d->id = domain_id;
  1097.  
  1098.         setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
  1099.  
  1100.         dev_priv->uncore.fw_domains |= (1 << domain_id);
  1101.  
  1102.         fw_domain_reset(d);
  1103. }
  1104.  
  1105. static void intel_uncore_fw_domains_init(struct drm_device *dev)
  1106. {
  1107.         struct drm_i915_private *dev_priv = dev->dev_private;
  1108.  
  1109.         if (INTEL_INFO(dev_priv->dev)->gen <= 5)
  1110.                 return;
  1111.  
  1112.         if (IS_GEN9(dev)) {
  1113.                 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
  1114.                 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
  1115.                 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  1116.                                FORCEWAKE_RENDER_GEN9,
  1117.                                FORCEWAKE_ACK_RENDER_GEN9);
  1118.                 fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
  1119.                                FORCEWAKE_BLITTER_GEN9,
  1120.                                FORCEWAKE_ACK_BLITTER_GEN9);
  1121.                 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
  1122.                                FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
  1123.         } else if (IS_VALLEYVIEW(dev)) {
  1124.                 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
  1125.                 if (!IS_CHERRYVIEW(dev))
  1126.                         dev_priv->uncore.funcs.force_wake_put =
  1127.                                 fw_domains_put_with_fifo;
  1128.                 else
  1129.                         dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
  1130.                 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  1131.                                FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
  1132.                 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
  1133.                                FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
  1134.         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
  1135.                 dev_priv->uncore.funcs.force_wake_get =
  1136.                         fw_domains_get_with_thread_status;
  1137.                 if (IS_HASWELL(dev))
  1138.                         dev_priv->uncore.funcs.force_wake_put =
  1139.                                 fw_domains_put_with_fifo;
  1140.                 else
  1141.                 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
  1142.                 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  1143.                                FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
  1144.         } else if (IS_IVYBRIDGE(dev)) {
  1145.                 u32 ecobus;
  1146.  
  1147.                 /* IVB configs may use multi-threaded forcewake */
  1148.  
  1149.                 /* A small trick here - if the bios hasn't configured
  1150.                  * MT forcewake, and if the device is in RC6, then
  1151.                  * force_wake_mt_get will not wake the device and the
  1152.                  * ECOBUS read will return zero. Which will be
  1153.                  * (correctly) interpreted by the test below as MT
  1154.                  * forcewake being disabled.
  1155.                  */
  1156.                 dev_priv->uncore.funcs.force_wake_get =
  1157.                         fw_domains_get_with_thread_status;
  1158.                 dev_priv->uncore.funcs.force_wake_put =
  1159.                         fw_domains_put_with_fifo;
  1160.  
  1161.                 /* We need to init first for ECOBUS access and then
  1162.                  * determine later if we want to reinit, in case of MT access is
  1163.                  * not working. In this stage we don't know which flavour this
  1164.                  * ivb is, so it is better to reset also the gen6 fw registers
  1165.                  * before the ecobus check.
  1166.                  */
  1167.  
  1168.                 __raw_i915_write32(dev_priv, FORCEWAKE, 0);
  1169.                 __raw_posting_read(dev_priv, ECOBUS);
  1170.  
  1171.                 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  1172.                                FORCEWAKE_MT, FORCEWAKE_MT_ACK);
  1173.  
  1174.                 mutex_lock(&dev->struct_mutex);
  1175.                 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
  1176.                 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
  1177.                 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
  1178.                 mutex_unlock(&dev->struct_mutex);
  1179.  
  1180.                 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
  1181.                         DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
  1182.                         DRM_INFO("when using vblank-synced partial screen updates.\n");
  1183.                         fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  1184.                                        FORCEWAKE, FORCEWAKE_ACK);
  1185.                 }
  1186.         } else if (IS_GEN6(dev)) {
  1187.                 dev_priv->uncore.funcs.force_wake_get =
  1188.                         fw_domains_get_with_thread_status;
  1189.                 dev_priv->uncore.funcs.force_wake_put =
  1190.                         fw_domains_put_with_fifo;
  1191.                 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  1192.                                FORCEWAKE, FORCEWAKE_ACK);
  1193.         }
  1194.  
  1195.         /* All future platforms are expected to require complex power gating */
  1196.         WARN_ON(dev_priv->uncore.fw_domains == 0);
  1197. }
  1198.  
  1199. void intel_uncore_init(struct drm_device *dev)
  1200. {
  1201.         struct drm_i915_private *dev_priv = dev->dev_private;
  1202.  
  1203.         i915_check_vgpu(dev);
  1204.  
  1205.         intel_uncore_ellc_detect(dev);
  1206.         intel_uncore_fw_domains_init(dev);
  1207.         __intel_uncore_early_sanitize(dev, false);
  1208.  
  1209.         switch (INTEL_INFO(dev)->gen) {
  1210.         default:
  1211.         case 9:
  1212.                 ASSIGN_WRITE_MMIO_VFUNCS(gen9);
  1213.                 ASSIGN_READ_MMIO_VFUNCS(gen9);
  1214.                 break;
  1215.         case 8:
  1216.                 if (IS_CHERRYVIEW(dev)) {
  1217.                         ASSIGN_WRITE_MMIO_VFUNCS(chv);
  1218.                         ASSIGN_READ_MMIO_VFUNCS(chv);
  1219.  
  1220.                 } else {
  1221.                         ASSIGN_WRITE_MMIO_VFUNCS(gen8);
  1222.                         ASSIGN_READ_MMIO_VFUNCS(gen6);
  1223.                 }
  1224.                 break;
  1225.         case 7:
  1226.         case 6:
  1227.                 if (IS_HASWELL(dev)) {
  1228.                         ASSIGN_WRITE_MMIO_VFUNCS(hsw);
  1229.                 } else {
  1230.                         ASSIGN_WRITE_MMIO_VFUNCS(gen6);
  1231.                 }
  1232.  
  1233.                 if (IS_VALLEYVIEW(dev)) {
  1234.                         ASSIGN_READ_MMIO_VFUNCS(vlv);
  1235.                 } else {
  1236.                         ASSIGN_READ_MMIO_VFUNCS(gen6);
  1237.                 }
  1238.                 break;
  1239.         case 5:
  1240.                 ASSIGN_WRITE_MMIO_VFUNCS(gen5);
  1241.                 ASSIGN_READ_MMIO_VFUNCS(gen5);
  1242.                 break;
  1243.         case 4:
  1244.         case 3:
  1245.         case 2:
  1246.                 ASSIGN_WRITE_MMIO_VFUNCS(gen2);
  1247.                 ASSIGN_READ_MMIO_VFUNCS(gen2);
  1248.                 break;
  1249.         }
  1250.  
  1251.         if (intel_vgpu_active(dev)) {
  1252.                 ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
  1253.                 ASSIGN_READ_MMIO_VFUNCS(vgpu);
  1254.         }
  1255.  
  1256.         i915_check_and_clear_faults(dev);
  1257. }
  1258. #undef ASSIGN_WRITE_MMIO_VFUNCS
  1259. #undef ASSIGN_READ_MMIO_VFUNCS
  1260.  
  1261. void intel_uncore_fini(struct drm_device *dev)
  1262. {
  1263.         /* Paranoia: make sure we have disabled everything before we exit. */
  1264.         intel_uncore_sanitize(dev);
  1265.         intel_uncore_forcewake_reset(dev, false);
  1266. }
  1267.  
  1268. #define GEN_RANGE(l, h) GENMASK(h, l)
  1269.  
  1270. static const struct register_whitelist {
  1271.         uint64_t offset;
  1272.         uint32_t size;
  1273.         /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
  1274.         uint32_t gen_bitmask;
  1275. } whitelist[] = {
  1276.         { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) },
  1277. };
  1278.  
  1279. int i915_reg_read_ioctl(struct drm_device *dev,
  1280.                         void *data, struct drm_file *file)
  1281. {
  1282.         struct drm_i915_private *dev_priv = dev->dev_private;
  1283.         struct drm_i915_reg_read *reg = data;
  1284.         struct register_whitelist const *entry = whitelist;
  1285.         unsigned size;
  1286.         u64 offset;
  1287.         int i, ret = 0;
  1288.  
  1289.         for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
  1290.                 if (entry->offset == (reg->offset & -entry->size) &&
  1291.                     (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
  1292.                         break;
  1293.         }
  1294.  
  1295.         if (i == ARRAY_SIZE(whitelist))
  1296.                 return -EINVAL;
  1297.  
  1298.         /* We use the low bits to encode extra flags as the register should
  1299.          * be naturally aligned (and those that are not so aligned merely
  1300.          * limit the available flags for that register).
  1301.          */
  1302.         offset = entry->offset;
  1303.         size = entry->size;
  1304.         size |= reg->offset ^ offset;
  1305.  
  1306.         intel_runtime_pm_get(dev_priv);
  1307.  
  1308.         switch (size) {
  1309.         case 8 | 1:
  1310.                 reg->val = I915_READ64_2x32(offset, offset+4);
  1311.                 break;
  1312.         case 8:
  1313.                 reg->val = I915_READ64(offset);
  1314.                 break;
  1315.         case 4:
  1316.                 reg->val = I915_READ(offset);
  1317.                 break;
  1318.         case 2:
  1319.                 reg->val = I915_READ16(offset);
  1320.                 break;
  1321.         case 1:
  1322.                 reg->val = I915_READ8(offset);
  1323.                 break;
  1324.         default:
  1325.                 ret = -EINVAL;
  1326.                 goto out;
  1327.         }
  1328.  
  1329. out:
  1330.         intel_runtime_pm_put(dev_priv);
  1331.         return ret;
  1332. }
  1333.  
  1334. int i915_get_reset_stats_ioctl(struct drm_device *dev,
  1335.                                void *data, struct drm_file *file)
  1336. {
  1337.         struct drm_i915_private *dev_priv = dev->dev_private;
  1338.         struct drm_i915_reset_stats *args = data;
  1339.         struct i915_ctx_hang_stats *hs;
  1340.         struct intel_context *ctx;
  1341.         int ret;
  1342.  
  1343.         if (args->flags || args->pad)
  1344.                 return -EINVAL;
  1345.  
  1346.         ret = mutex_lock_interruptible(&dev->struct_mutex);
  1347.         if (ret)
  1348.                 return ret;
  1349.  
  1350.         ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
  1351.         if (IS_ERR(ctx)) {
  1352.                 mutex_unlock(&dev->struct_mutex);
  1353.                 return PTR_ERR(ctx);
  1354.         }
  1355.         hs = &ctx->hang_stats;
  1356.  
  1357.     args->reset_count = i915_reset_count(&dev_priv->gpu_error);
  1358.  
  1359.         args->batch_active = hs->batch_active;
  1360.         args->batch_pending = hs->batch_pending;
  1361.  
  1362.         mutex_unlock(&dev->struct_mutex);
  1363.  
  1364.         return 0;
  1365. }
  1366.  
  1367. static int i915_reset_complete(struct drm_device *dev)
  1368. {
  1369.         u8 gdrst;
  1370.         pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
  1371.         return (gdrst & GRDOM_RESET_STATUS) == 0;
  1372. }
  1373.  
  1374. static int i915_do_reset(struct drm_device *dev)
  1375. {
  1376.         /* assert reset for at least 20 usec */
  1377.         pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
  1378.         udelay(20);
  1379.         pci_write_config_byte(dev->pdev, I915_GDRST, 0);
  1380.  
  1381.         return wait_for(i915_reset_complete(dev), 500);
  1382. }
  1383.  
  1384. static int g4x_reset_complete(struct drm_device *dev)
  1385. {
  1386.         u8 gdrst;
  1387.         pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
  1388.         return (gdrst & GRDOM_RESET_ENABLE) == 0;
  1389. }
  1390.  
  1391. static int g33_do_reset(struct drm_device *dev)
  1392. {
  1393.         pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
  1394.         return wait_for(g4x_reset_complete(dev), 500);
  1395. }
  1396.  
  1397. static int g4x_do_reset(struct drm_device *dev)
  1398. {
  1399.         struct drm_i915_private *dev_priv = dev->dev_private;
  1400.         int ret;
  1401.  
  1402.         pci_write_config_byte(dev->pdev, I915_GDRST,
  1403.                               GRDOM_RENDER | GRDOM_RESET_ENABLE);
  1404.         ret =  wait_for(g4x_reset_complete(dev), 500);
  1405.         if (ret)
  1406.                 return ret;
  1407.  
  1408.         /* WaVcpClkGateDisableForMediaReset:ctg,elk */
  1409.         I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
  1410.         POSTING_READ(VDECCLK_GATE_D);
  1411.  
  1412.         pci_write_config_byte(dev->pdev, I915_GDRST,
  1413.                               GRDOM_MEDIA | GRDOM_RESET_ENABLE);
  1414.         ret =  wait_for(g4x_reset_complete(dev), 500);
  1415.         if (ret)
  1416.                 return ret;
  1417.  
  1418.         /* WaVcpClkGateDisableForMediaReset:ctg,elk */
  1419.         I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
  1420.         POSTING_READ(VDECCLK_GATE_D);
  1421.  
  1422.         pci_write_config_byte(dev->pdev, I915_GDRST, 0);
  1423.  
  1424.         return 0;
  1425. }
  1426.  
  1427. static int ironlake_do_reset(struct drm_device *dev)
  1428. {
  1429.         struct drm_i915_private *dev_priv = dev->dev_private;
  1430.         int ret;
  1431.  
  1432.         I915_WRITE(ILK_GDSR,
  1433.                    ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
  1434.         ret = wait_for((I915_READ(ILK_GDSR) &
  1435.                         ILK_GRDOM_RESET_ENABLE) == 0, 500);
  1436.         if (ret)
  1437.                 return ret;
  1438.  
  1439.         I915_WRITE(ILK_GDSR,
  1440.                    ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
  1441.         ret = wait_for((I915_READ(ILK_GDSR) &
  1442.                         ILK_GRDOM_RESET_ENABLE) == 0, 500);
  1443.         if (ret)
  1444.                 return ret;
  1445.  
  1446.         I915_WRITE(ILK_GDSR, 0);
  1447.  
  1448.         return 0;
  1449. }
  1450.  
  1451. static int gen6_do_reset(struct drm_device *dev)
  1452. {
  1453.         struct drm_i915_private *dev_priv = dev->dev_private;
  1454.         int     ret;
  1455.  
  1456.         /* Reset the chip */
  1457.  
  1458.         /* GEN6_GDRST is not in the gt power well, no need to check
  1459.          * for fifo space for the write or forcewake the chip for
  1460.          * the read
  1461.          */
  1462.         __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
  1463.  
  1464.         /* Spin waiting for the device to ack the reset request */
  1465.         ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
  1466.  
  1467.         intel_uncore_forcewake_reset(dev, true);
  1468.  
  1469.         return ret;
  1470. }
  1471.  
  1472. static int wait_for_register(struct drm_i915_private *dev_priv,
  1473.                              const u32 reg,
  1474.                              const u32 mask,
  1475.                              const u32 value,
  1476.                              const unsigned long timeout_ms)
  1477. {
  1478.         return wait_for((I915_READ(reg) & mask) == value, timeout_ms);
  1479. }
  1480.  
  1481. static int gen8_do_reset(struct drm_device *dev)
  1482. {
  1483.         struct drm_i915_private *dev_priv = dev->dev_private;
  1484.         struct intel_engine_cs *engine;
  1485.         int i;
  1486.  
  1487.         for_each_ring(engine, dev_priv, i) {
  1488.                 I915_WRITE(RING_RESET_CTL(engine->mmio_base),
  1489.                            _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
  1490.  
  1491.                 if (wait_for_register(dev_priv,
  1492.                                       RING_RESET_CTL(engine->mmio_base),
  1493.                                       RESET_CTL_READY_TO_RESET,
  1494.                                       RESET_CTL_READY_TO_RESET,
  1495.                                       700)) {
  1496.                         DRM_ERROR("%s: reset request timeout\n", engine->name);
  1497.                         goto not_ready;
  1498.                 }
  1499.         }
  1500.  
  1501.         return gen6_do_reset(dev);
  1502.  
  1503. not_ready:
  1504.         for_each_ring(engine, dev_priv, i)
  1505.                 I915_WRITE(RING_RESET_CTL(engine->mmio_base),
  1506.                            _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
  1507.  
  1508.         return -EIO;
  1509. }
  1510.  
  1511. static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
  1512. {
  1513.         if (!i915.reset)
  1514.                 return NULL;
  1515.  
  1516.         if (INTEL_INFO(dev)->gen >= 8)
  1517.                 return gen8_do_reset;
  1518.         else if (INTEL_INFO(dev)->gen >= 6)
  1519.                 return gen6_do_reset;
  1520.         else if (IS_GEN5(dev))
  1521.                 return ironlake_do_reset;
  1522.         else if (IS_G4X(dev))
  1523.                 return g4x_do_reset;
  1524.         else if (IS_G33(dev))
  1525.                 return g33_do_reset;
  1526.         else if (INTEL_INFO(dev)->gen >= 3)
  1527.                 return i915_do_reset;
  1528.         else
  1529.                 return NULL;
  1530. }
  1531.  
  1532. int intel_gpu_reset(struct drm_device *dev)
  1533. {
  1534.         struct drm_i915_private *dev_priv = to_i915(dev);
  1535.         int (*reset)(struct drm_device *);
  1536.         int ret;
  1537.  
  1538.         reset = intel_get_gpu_reset(dev);
  1539.         if (reset == NULL)
  1540.                 return -ENODEV;
  1541.  
  1542.         /* If the power well sleeps during the reset, the reset
  1543.          * request may be dropped and never completes (causing -EIO).
  1544.          */
  1545.         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  1546.         ret = reset(dev);
  1547.         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  1548.  
  1549.         return ret;
  1550. }
  1551.  
  1552. bool intel_has_gpu_reset(struct drm_device *dev)
  1553. {
  1554.         return intel_get_gpu_reset(dev) != NULL;
  1555. }
  1556.  
  1557. void intel_uncore_check_errors(struct drm_device *dev)
  1558. {
  1559.         struct drm_i915_private *dev_priv = dev->dev_private;
  1560.  
  1561.         if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
  1562.             (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
  1563.                 DRM_ERROR("Unclaimed register before interrupt\n");
  1564.                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  1565.         }
  1566. }
  1567.