Subversion Repositories Kolibri OS

Rev

Rev 6937 | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright © 2011-2012 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21.  * IN THE SOFTWARE.
  22.  *
  23.  * Authors:
  24.  *    Ben Widawsky <ben@bwidawsk.net>
  25.  *
  26.  */
  27.  
  28. /*
  29.  * This file implements HW context support. On gen5+ a HW context consists of an
  30.  * opaque GPU object which is referenced at times of context saves and restores.
  31.  * With RC6 enabled, the context is also referenced as the GPU enters and exists
  32.  * from RC6 (GPU has it's own internal power context, except on gen5). Though
  33.  * something like a context does exist for the media ring, the code only
  34.  * supports contexts for the render ring.
  35.  *
  36.  * In software, there is a distinction between contexts created by the user,
  37.  * and the default HW context. The default HW context is used by GPU clients
  38.  * that do not request setup of their own hardware context. The default
  39.  * context's state is never restored to help prevent programming errors. This
  40.  * would happen if a client ran and piggy-backed off another clients GPU state.
  41.  * The default context only exists to give the GPU some offset to load as the
  42.  * current to invoke a save of the context we actually care about. In fact, the
  43.  * code could likely be constructed, albeit in a more complicated fashion, to
  44.  * never use the default context, though that limits the driver's ability to
  45.  * swap out, and/or destroy other contexts.
  46.  *
  47.  * All other contexts are created as a request by the GPU client. These contexts
  48.  * store GPU state, and thus allow GPU clients to not re-emit state (and
  49.  * potentially query certain state) at any time. The kernel driver makes
  50.  * certain that the appropriate commands are inserted.
  51.  *
  52.  * The context life cycle is semi-complicated in that context BOs may live
  53.  * longer than the context itself because of the way the hardware, and object
  54.  * tracking works. Below is a very crude representation of the state machine
  55.  * describing the context life.
  56.  *                                         refcount     pincount     active
  57.  * S0: initial state                          0            0           0
  58.  * S1: context created                        1            0           0
  59.  * S2: context is currently running           2            1           X
  60.  * S3: GPU referenced, but not current        2            0           1
  61.  * S4: context is current, but destroyed      1            1           0
  62.  * S5: like S3, but destroyed                 1            0           1
  63.  *
  64.  * The most common (but not all) transitions:
  65.  * S0->S1: client creates a context
  66.  * S1->S2: client submits execbuf with context
  67.  * S2->S3: other clients submits execbuf with context
  68.  * S3->S1: context object was retired
  69.  * S3->S2: clients submits another execbuf
  70.  * S2->S4: context destroy called with current context
  71.  * S3->S5->S0: destroy path
  72.  * S4->S5->S0: destroy path on current context
  73.  *
  74.  * There are two confusing terms used above:
  75.  *  The "current context" means the context which is currently running on the
  76.  *  GPU. The GPU has loaded its state already and has stored away the gtt
  77.  *  offset of the BO. The GPU is not actively referencing the data at this
  78.  *  offset, but it will on the next context switch. The only way to avoid this
  79.  *  is to do a GPU reset.
  80.  *
  81.  *  An "active context' is one which was previously the "current context" and is
  82.  *  on the active list waiting for the next context switch to occur. Until this
  83.  *  happens, the object must remain at the same gtt offset. It is therefore
  84.  *  possible to destroy a context, but it is still active.
  85.  *
  86.  */
  87.  
  88. #include <drm/drmP.h>
  89. #include <drm/i915_drm.h>
  90. #include "i915_drv.h"
  91. #include "i915_trace.h"
  92.  
  93. /* This is a HW constraint. The value below is the largest known requirement
  94.  * I've seen in a spec to date, and that was a workaround for a non-shipping
  95.  * part. It should be safe to decrease this, but it's more future proof as is.
  96.  */
  97. #define GEN6_CONTEXT_ALIGN (64<<10)
  98. #define GEN7_CONTEXT_ALIGN 4096
  99.  
  100. static size_t get_context_alignment(struct drm_device *dev)
  101. {
  102.         if (IS_GEN6(dev))
  103.                 return GEN6_CONTEXT_ALIGN;
  104.  
  105.         return GEN7_CONTEXT_ALIGN;
  106. }
  107.  
  108. static int get_context_size(struct drm_device *dev)
  109. {
  110.         struct drm_i915_private *dev_priv = dev->dev_private;
  111.         int ret;
  112.         u32 reg;
  113.  
  114.         switch (INTEL_INFO(dev)->gen) {
  115.         case 6:
  116.                 reg = I915_READ(CXT_SIZE);
  117.                 ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
  118.                 break;
  119.         case 7:
  120.                 reg = I915_READ(GEN7_CXT_SIZE);
  121.                 if (IS_HASWELL(dev))
  122.                         ret = HSW_CXT_TOTAL_SIZE;
  123.                 else
  124.                         ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
  125.                 break;
  126.         case 8:
  127.                 ret = GEN8_CXT_TOTAL_SIZE;
  128.                 break;
  129.         default:
  130.                 BUG();
  131.         }
  132.  
  133.         return ret;
  134. }
  135.  
  136. static void i915_gem_context_clean(struct intel_context *ctx)
  137. {
  138.         struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
  139.         struct i915_vma *vma, *next;
  140.  
  141.         if (!ppgtt)
  142.                 return;
  143.  
  144.         list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
  145.                                  vm_link) {
  146.                 if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
  147.                         break;
  148.         }
  149. }
  150.  
  151. void i915_gem_context_free(struct kref *ctx_ref)
  152. {
  153.         struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
  154.  
  155.         trace_i915_context_free(ctx);
  156.  
  157.         if (i915.enable_execlists)
  158.                 intel_lr_context_free(ctx);
  159.  
  160.         /*
  161.          * This context is going away and we need to remove all VMAs still
  162.          * around. This is to handle imported shared objects for which
  163.          * destructor did not run when their handles were closed.
  164.          */
  165.         i915_gem_context_clean(ctx);
  166.  
  167.         i915_ppgtt_put(ctx->ppgtt);
  168.  
  169.         if (ctx->legacy_hw_ctx.rcs_state)
  170.                 drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
  171.         list_del(&ctx->link);
  172.         kfree(ctx);
  173. }
  174.  
  175. struct drm_i915_gem_object *
  176. i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
  177. {
  178.         struct drm_i915_gem_object *obj;
  179.         int ret;
  180.  
  181.         obj = i915_gem_alloc_object(dev, size);
  182.         if (obj == NULL)
  183.                 return ERR_PTR(-ENOMEM);
  184.  
  185.         /*
  186.          * Try to make the context utilize L3 as well as LLC.
  187.          *
  188.          * On VLV we don't have L3 controls in the PTEs so we
  189.          * shouldn't touch the cache level, especially as that
  190.          * would make the object snooped which might have a
  191.          * negative performance impact.
  192.          *
  193.          * Snooping is required on non-llc platforms in execlist
  194.          * mode, but since all GGTT accesses use PAT entry 0 we
  195.          * get snooping anyway regardless of cache_level.
  196.          *
  197.          * This is only applicable for Ivy Bridge devices since
  198.          * later platforms don't have L3 control bits in the PTE.
  199.          */
  200.         if (IS_IVYBRIDGE(dev)) {
  201.                 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
  202.                 /* Failure shouldn't ever happen this early */
  203.                 if (WARN_ON(ret)) {
  204.                         drm_gem_object_unreference(&obj->base);
  205.                         return ERR_PTR(ret);
  206.                 }
  207.         }
  208.  
  209.         return obj;
  210. }
  211.  
  212. static struct intel_context *
  213. __create_hw_context(struct drm_device *dev,
  214.                     struct drm_i915_file_private *file_priv)
  215. {
  216.         struct drm_i915_private *dev_priv = dev->dev_private;
  217.         struct intel_context *ctx;
  218.         int ret;
  219.  
  220.         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  221.         if (ctx == NULL)
  222.                 return ERR_PTR(-ENOMEM);
  223.  
  224.         kref_init(&ctx->ref);
  225.         list_add_tail(&ctx->link, &dev_priv->context_list);
  226.         ctx->i915 = dev_priv;
  227.  
  228.         if (dev_priv->hw_context_size) {
  229.                 struct drm_i915_gem_object *obj =
  230.                                 i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
  231.                 if (IS_ERR(obj)) {
  232.                         ret = PTR_ERR(obj);
  233.                         goto err_out;
  234.                 }
  235.                 ctx->legacy_hw_ctx.rcs_state = obj;
  236.         }
  237.  
  238.         /* Default context will never have a file_priv */
  239.         if (file_priv != NULL) {
  240.                 ret = idr_alloc(&file_priv->context_idr, ctx,
  241.                                 DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
  242.                 if (ret < 0)
  243.                         goto err_out;
  244.         } else
  245.                 ret = DEFAULT_CONTEXT_HANDLE;
  246.  
  247.         ctx->file_priv = file_priv;
  248.         ctx->user_handle = ret;
  249.         /* NB: Mark all slices as needing a remap so that when the context first
  250.          * loads it will restore whatever remap state already exists. If there
  251.          * is no remap info, it will be a NOP. */
  252.         ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
  253.  
  254.         ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
  255.  
  256.         return ctx;
  257.  
  258. err_out:
  259.         i915_gem_context_unreference(ctx);
  260.         return ERR_PTR(ret);
  261. }
  262.  
  263. /**
  264.  * The default context needs to exist per ring that uses contexts. It stores the
  265.  * context state of the GPU for applications that don't utilize HW contexts, as
  266.  * well as an idle case.
  267.  */
  268. static struct intel_context *
  269. i915_gem_create_context(struct drm_device *dev,
  270.                         struct drm_i915_file_private *file_priv)
  271. {
  272.         const bool is_global_default_ctx = file_priv == NULL;
  273.         struct intel_context *ctx;
  274.         int ret = 0;
  275.  
  276.         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
  277.  
  278.         ctx = __create_hw_context(dev, file_priv);
  279.         if (IS_ERR(ctx))
  280.                 return ctx;
  281.  
  282.         if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
  283.                 /* We may need to do things with the shrinker which
  284.                  * require us to immediately switch back to the default
  285.                  * context. This can cause a problem as pinning the
  286.                  * default context also requires GTT space which may not
  287.                  * be available. To avoid this we always pin the default
  288.                  * context.
  289.                  */
  290.                 ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
  291.                                             get_context_alignment(dev), 0);
  292.                 if (ret) {
  293.                         DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
  294.                         goto err_destroy;
  295.                 }
  296.         }
  297.  
  298.         if (USES_FULL_PPGTT(dev)) {
  299.                 struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
  300.  
  301.                 if (IS_ERR_OR_NULL(ppgtt)) {
  302.                         DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
  303.                                          PTR_ERR(ppgtt));
  304.                         ret = PTR_ERR(ppgtt);
  305.                         goto err_unpin;
  306.                 }
  307.  
  308.                 ctx->ppgtt = ppgtt;
  309.         }
  310.  
  311.         trace_i915_context_create(ctx);
  312.  
  313.         return ctx;
  314.  
  315. err_unpin:
  316.         if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
  317.                 i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
  318. err_destroy:
  319.         idr_remove(&file_priv->context_idr, ctx->user_handle);
  320.         i915_gem_context_unreference(ctx);
  321.         return ERR_PTR(ret);
  322. }
  323.  
  324. static void i915_gem_context_unpin(struct intel_context *ctx,
  325.                                    struct intel_engine_cs *engine)
  326. {
  327.         if (i915.enable_execlists) {
  328.                 intel_lr_context_unpin(ctx, engine);
  329.         } else {
  330.                 if (engine->id == RCS && ctx->legacy_hw_ctx.rcs_state)
  331.                         i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
  332.                 i915_gem_context_unreference(ctx);
  333.         }
  334. }
  335.  
  336. void i915_gem_context_reset(struct drm_device *dev)
  337. {
  338.         struct drm_i915_private *dev_priv = dev->dev_private;
  339.         int i;
  340.  
  341.         if (i915.enable_execlists) {
  342.                 struct intel_context *ctx;
  343.  
  344.                 list_for_each_entry(ctx, &dev_priv->context_list, link)
  345.                         intel_lr_context_reset(dev, ctx);
  346.         }
  347.  
  348.         for (i = 0; i < I915_NUM_RINGS; i++) {
  349.                 struct intel_engine_cs *ring = &dev_priv->ring[i];
  350.  
  351.                 if (ring->last_context) {
  352.                         i915_gem_context_unpin(ring->last_context, ring);
  353.                         ring->last_context = NULL;
  354.                 }
  355.         }
  356.  
  357.         /* Force the GPU state to be reinitialised on enabling */
  358.         dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
  359. }
  360.  
  361. int i915_gem_context_init(struct drm_device *dev)
  362. {
  363.         struct drm_i915_private *dev_priv = dev->dev_private;
  364.         struct intel_context *ctx;
  365.  
  366.         /* Init should only be called once per module load. Eventually the
  367.          * restriction on the context_disabled check can be loosened. */
  368.         if (WARN_ON(dev_priv->kernel_context))
  369.                 return 0;
  370.  
  371.         if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
  372.                 if (!i915.enable_execlists) {
  373.                         DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
  374.                         return -EINVAL;
  375.                 }
  376.         }
  377.  
  378.         if (i915.enable_execlists) {
  379.                 /* NB: intentionally left blank. We will allocate our own
  380.                  * backing objects as we need them, thank you very much */
  381.                 dev_priv->hw_context_size = 0;
  382.         } else if (HAS_HW_CONTEXTS(dev)) {
  383.                 dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
  384.                 if (dev_priv->hw_context_size > (1<<20)) {
  385.                         DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
  386.                                          dev_priv->hw_context_size);
  387.                         dev_priv->hw_context_size = 0;
  388.                 }
  389.         }
  390.  
  391.         ctx = i915_gem_create_context(dev, NULL);
  392.         if (IS_ERR(ctx)) {
  393.                 DRM_ERROR("Failed to create default global context (error %ld)\n",
  394.                           PTR_ERR(ctx));
  395.                 return PTR_ERR(ctx);
  396.         }
  397.  
  398.         dev_priv->kernel_context = ctx;
  399.  
  400.         DRM_DEBUG_DRIVER("%s context support initialized\n",
  401.                         i915.enable_execlists ? "LR" :
  402.                         dev_priv->hw_context_size ? "HW" : "fake");
  403.         return 0;
  404. }
  405.  
  406. void i915_gem_context_fini(struct drm_device *dev)
  407. {
  408.         struct drm_i915_private *dev_priv = dev->dev_private;
  409.         struct intel_context *dctx = dev_priv->kernel_context;
  410.         int i;
  411.  
  412.         if (dctx->legacy_hw_ctx.rcs_state) {
  413.                 /* The only known way to stop the gpu from accessing the hw context is
  414.                  * to reset it. Do this as the very last operation to avoid confusing
  415.                  * other code, leading to spurious errors. */
  416.                 intel_gpu_reset(dev);
  417.  
  418.                 /* When default context is created and switched to, base object refcount
  419.                  * will be 2 (+1 from object creation and +1 from do_switch()).
  420.                  * i915_gem_context_fini() will be called after gpu_idle() has switched
  421.                  * to default context. So we need to unreference the base object once
  422.                  * to offset the do_switch part, so that i915_gem_context_unreference()
  423.                  * can then free the base object correctly. */
  424.                 WARN_ON(!dev_priv->ring[RCS].last_context);
  425.  
  426.                 i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
  427.         }
  428.  
  429.         for (i = I915_NUM_RINGS; --i >= 0;) {
  430.                 struct intel_engine_cs *ring = &dev_priv->ring[i];
  431.  
  432.                 if (ring->last_context) {
  433.                         i915_gem_context_unpin(ring->last_context, ring);
  434.                         ring->last_context = NULL;
  435.                 }
  436.         }
  437.  
  438.         i915_gem_context_unreference(dctx);
  439.         dev_priv->kernel_context = NULL;
  440. }
  441.  
  442. int i915_gem_context_enable(struct drm_i915_gem_request *req)
  443. {
  444.         struct intel_engine_cs *ring = req->ring;
  445.         int ret;
  446.  
  447.         if (i915.enable_execlists) {
  448.                 if (ring->init_context == NULL)
  449.                         return 0;
  450.  
  451.                 ret = ring->init_context(req);
  452.         } else
  453.                 ret = i915_switch_context(req);
  454.  
  455.         if (ret) {
  456.                 DRM_ERROR("ring init context: %d\n", ret);
  457.                 return ret;
  458.         }
  459.  
  460.         return 0;
  461. }
  462.  
  463. static int context_idr_cleanup(int id, void *p, void *data)
  464. {
  465.         struct intel_context *ctx = p;
  466.  
  467.         i915_gem_context_unreference(ctx);
  468.         return 0;
  469. }
  470.  
  471. int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
  472. {
  473.         struct drm_i915_file_private *file_priv = file->driver_priv;
  474.         struct intel_context *ctx;
  475.  
  476.         idr_init(&file_priv->context_idr);
  477.  
  478.         mutex_lock(&dev->struct_mutex);
  479.         ctx = i915_gem_create_context(dev, file_priv);
  480.         mutex_unlock(&dev->struct_mutex);
  481.  
  482.         if (IS_ERR(ctx)) {
  483.                 idr_destroy(&file_priv->context_idr);
  484.                 return PTR_ERR(ctx);
  485.         }
  486.  
  487.         return 0;
  488. }
  489.  
  490. void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
  491. {
  492.         struct drm_i915_file_private *file_priv = file->driver_priv;
  493.  
  494.         idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
  495.         idr_destroy(&file_priv->context_idr);
  496. }
  497.  
  498. struct intel_context *
  499. i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
  500. {
  501.         struct intel_context *ctx;
  502.  
  503.         ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id);
  504.         if (!ctx)
  505.                 return ERR_PTR(-ENOENT);
  506.  
  507.         return ctx;
  508. }
  509.  
  510. static inline int
  511. mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
  512. {
  513.         struct intel_engine_cs *ring = req->ring;
  514.         u32 flags = hw_flags | MI_MM_SPACE_GTT;
  515.         const int num_rings =
  516.                 /* Use an extended w/a on ivb+ if signalling from other rings */
  517.                 i915_semaphore_is_enabled(ring->dev) ?
  518.                 hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
  519.                 0;
  520.         int len, i, ret;
  521.  
  522.         /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
  523.          * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
  524.          * explicitly, so we rely on the value at ring init, stored in
  525.          * itlb_before_ctx_switch.
  526.          */
  527.         if (IS_GEN6(ring->dev)) {
  528.                 ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
  529.                 if (ret)
  530.                         return ret;
  531.         }
  532.  
  533.         /* These flags are for resource streamer on HSW+ */
  534.         if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8)
  535.                 flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
  536.         else if (INTEL_INFO(ring->dev)->gen < 8)
  537.                 flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
  538.  
  539.  
  540.         len = 4;
  541.         if (INTEL_INFO(ring->dev)->gen >= 7)
  542.                 len += 2 + (num_rings ? 4*num_rings + 2 : 0);
  543.  
  544.         ret = intel_ring_begin(req, len);
  545.         if (ret)
  546.                 return ret;
  547.  
  548.         /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
  549.         if (INTEL_INFO(ring->dev)->gen >= 7) {
  550.                 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
  551.                 if (num_rings) {
  552.                         struct intel_engine_cs *signaller;
  553.  
  554.                         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
  555.                         for_each_ring(signaller, to_i915(ring->dev), i) {
  556.                                 if (signaller == ring)
  557.                                         continue;
  558.  
  559.                                 intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
  560.                                 intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
  561.                         }
  562.                 }
  563.         }
  564.  
  565.         intel_ring_emit(ring, MI_NOOP);
  566.         intel_ring_emit(ring, MI_SET_CONTEXT);
  567.         intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
  568.                         flags);
  569.         /*
  570.          * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
  571.          * WaMiSetContext_Hang:snb,ivb,vlv
  572.          */
  573.         intel_ring_emit(ring, MI_NOOP);
  574.  
  575.         if (INTEL_INFO(ring->dev)->gen >= 7) {
  576.                 if (num_rings) {
  577.                         struct intel_engine_cs *signaller;
  578.  
  579.                         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
  580.                         for_each_ring(signaller, to_i915(ring->dev), i) {
  581.                                 if (signaller == ring)
  582.                                         continue;
  583.  
  584.                                 intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
  585.                                 intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
  586.                         }
  587.                 }
  588.                 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
  589.         }
  590.  
  591.         intel_ring_advance(ring);
  592.  
  593.         return ret;
  594. }
  595.  
  596. static inline bool should_skip_switch(struct intel_engine_cs *ring,
  597.                                       struct intel_context *from,
  598.                                       struct intel_context *to)
  599. {
  600.         if (to->remap_slice)
  601.                 return false;
  602.  
  603.         if (to->ppgtt && from == to &&
  604.             !(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings))
  605.                 return true;
  606.  
  607.         return false;
  608. }
  609.  
  610. static bool
  611. needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to)
  612. {
  613.         struct drm_i915_private *dev_priv = ring->dev->dev_private;
  614.  
  615.         if (!to->ppgtt)
  616.                 return false;
  617.  
  618.         if (INTEL_INFO(ring->dev)->gen < 8)
  619.                 return true;
  620.  
  621.         if (ring != &dev_priv->ring[RCS])
  622.                 return true;
  623.  
  624.         return false;
  625. }
  626.  
  627. static bool
  628. needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
  629.                 u32 hw_flags)
  630. {
  631.         struct drm_i915_private *dev_priv = ring->dev->dev_private;
  632.  
  633.         if (!to->ppgtt)
  634.                 return false;
  635.  
  636.         if (!IS_GEN8(ring->dev))
  637.                 return false;
  638.  
  639.         if (ring != &dev_priv->ring[RCS])
  640.                 return false;
  641.  
  642.         if (hw_flags & MI_RESTORE_INHIBIT)
  643.                 return true;
  644.  
  645.         return false;
  646. }
  647.  
  648. static int do_switch(struct drm_i915_gem_request *req)
  649. {
  650.         struct intel_context *to = req->ctx;
  651.         struct intel_engine_cs *ring = req->ring;
  652.         struct drm_i915_private *dev_priv = ring->dev->dev_private;
  653.         struct intel_context *from = ring->last_context;
  654.         u32 hw_flags = 0;
  655.         bool uninitialized = false;
  656.         int ret, i;
  657.  
  658.         if (from != NULL && ring == &dev_priv->ring[RCS]) {
  659.                 BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
  660.                 BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
  661.         }
  662.  
  663.         if (should_skip_switch(ring, from, to))
  664.                 return 0;
  665.  
  666.         /* Trying to pin first makes error handling easier. */
  667.         if (ring == &dev_priv->ring[RCS]) {
  668.                 ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
  669.                                             get_context_alignment(ring->dev), 0);
  670.                 if (ret)
  671.                         return ret;
  672.         }
  673.  
  674.         /*
  675.          * Pin can switch back to the default context if we end up calling into
  676.          * evict_everything - as a last ditch gtt defrag effort that also
  677.          * switches to the default context. Hence we need to reload from here.
  678.          */
  679.         from = ring->last_context;
  680.  
  681.         if (needs_pd_load_pre(ring, to)) {
  682.                 /* Older GENs and non render rings still want the load first,
  683.                  * "PP_DCLV followed by PP_DIR_BASE register through Load
  684.                  * Register Immediate commands in Ring Buffer before submitting
  685.                  * a context."*/
  686.                 trace_switch_mm(ring, to);
  687.                 ret = to->ppgtt->switch_mm(to->ppgtt, req);
  688.                 if (ret)
  689.                         goto unpin_out;
  690.  
  691.                 /* Doing a PD load always reloads the page dirs */
  692.                 to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
  693.         }
  694.  
  695.         if (ring != &dev_priv->ring[RCS]) {
  696.                 if (from)
  697.                         i915_gem_context_unreference(from);
  698.                 goto done;
  699.         }
  700.  
  701.         /*
  702.          * Clear this page out of any CPU caches for coherent swap-in/out. Note
  703.          * that thanks to write = false in this call and us not setting any gpu
  704.          * write domains when putting a context object onto the active list
  705.          * (when switching away from it), this won't block.
  706.          *
  707.          * XXX: We need a real interface to do this instead of trickery.
  708.          */
  709.         ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false);
  710.         if (ret)
  711.                 goto unpin_out;
  712.  
  713.         if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) {
  714.                 hw_flags |= MI_RESTORE_INHIBIT;
  715.                 /* NB: If we inhibit the restore, the context is not allowed to
  716.                  * die because future work may end up depending on valid address
  717.                  * space. This means we must enforce that a page table load
  718.                  * occur when this occurs. */
  719.         } else if (to->ppgtt &&
  720.                    (intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) {
  721.                 hw_flags |= MI_FORCE_RESTORE;
  722.                 to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
  723.         }
  724.  
  725.         /* We should never emit switch_mm more than once */
  726.         WARN_ON(needs_pd_load_pre(ring, to) &&
  727.                 needs_pd_load_post(ring, to, hw_flags));
  728.  
  729.         ret = mi_set_context(req, hw_flags);
  730.         if (ret)
  731.                 goto unpin_out;
  732.  
  733.         /* GEN8 does *not* require an explicit reload if the PDPs have been
  734.          * setup, and we do not wish to move them.
  735.          */
  736.         if (needs_pd_load_post(ring, to, hw_flags)) {
  737.                 trace_switch_mm(ring, to);
  738.                 ret = to->ppgtt->switch_mm(to->ppgtt, req);
  739.                 /* The hardware context switch is emitted, but we haven't
  740.                  * actually changed the state - so it's probably safe to bail
  741.                  * here. Still, let the user know something dangerous has
  742.                  * happened.
  743.                  */
  744.                 if (ret) {
  745.                         DRM_ERROR("Failed to change address space on context switch\n");
  746.                         goto unpin_out;
  747.                 }
  748.         }
  749.  
  750.         for (i = 0; i < MAX_L3_SLICES; i++) {
  751.                 if (!(to->remap_slice & (1<<i)))
  752.                         continue;
  753.  
  754.                 ret = i915_gem_l3_remap(req, i);
  755.                 /* If it failed, try again next round */
  756.                 if (ret)
  757.                         DRM_DEBUG_DRIVER("L3 remapping failed\n");
  758.                 else
  759.                         to->remap_slice &= ~(1<<i);
  760.         }
  761.  
  762.         /* The backing object for the context is done after switching to the
  763.          * *next* context. Therefore we cannot retire the previous context until
  764.          * the next context has already started running. In fact, the below code
  765.          * is a bit suboptimal because the retiring can occur simply after the
  766.          * MI_SET_CONTEXT instead of when the next seqno has completed.
  767.          */
  768.         if (from != NULL) {
  769.                 from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
  770.                 i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req);
  771.                 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
  772.                  * whole damn pipeline, we don't need to explicitly mark the
  773.                  * object dirty. The only exception is that the context must be
  774.                  * correct in case the object gets swapped out. Ideally we'd be
  775.                  * able to defer doing this until we know the object would be
  776.                  * swapped, but there is no way to do that yet.
  777.                  */
  778.                 from->legacy_hw_ctx.rcs_state->dirty = 1;
  779.  
  780.                 /* obj is kept alive until the next request by its active ref */
  781.                 i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
  782.                 i915_gem_context_unreference(from);
  783.         }
  784.  
  785.         uninitialized = !to->legacy_hw_ctx.initialized;
  786.         to->legacy_hw_ctx.initialized = true;
  787.  
  788. done:
  789.         i915_gem_context_reference(to);
  790.         ring->last_context = to;
  791.  
  792.         if (uninitialized) {
  793.                 if (ring->init_context) {
  794.                         ret = ring->init_context(req);
  795.                         if (ret)
  796.                                 DRM_ERROR("ring init context: %d\n", ret);
  797.                 }
  798.         }
  799.  
  800.         return 0;
  801.  
  802. unpin_out:
  803.         if (ring->id == RCS)
  804.                 i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
  805.         return ret;
  806. }
  807.  
  808. /**
  809.  * i915_switch_context() - perform a GPU context switch.
  810.  * @req: request for which we'll execute the context switch
  811.  *
  812.  * The context life cycle is simple. The context refcount is incremented and
  813.  * decremented by 1 and create and destroy. If the context is in use by the GPU,
  814.  * it will have a refcount > 1. This allows us to destroy the context abstract
  815.  * object while letting the normal object tracking destroy the backing BO.
  816.  *
  817.  * This function should not be used in execlists mode.  Instead the context is
  818.  * switched by writing to the ELSP and requests keep a reference to their
  819.  * context.
  820.  */
  821. int i915_switch_context(struct drm_i915_gem_request *req)
  822. {
  823.         struct intel_engine_cs *ring = req->ring;
  824.         struct drm_i915_private *dev_priv = ring->dev->dev_private;
  825.  
  826.         WARN_ON(i915.enable_execlists);
  827.         WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
  828.  
  829.         if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
  830.                 if (req->ctx != ring->last_context) {
  831.                         i915_gem_context_reference(req->ctx);
  832.                         if (ring->last_context)
  833.                                 i915_gem_context_unreference(ring->last_context);
  834.                         ring->last_context = req->ctx;
  835.                 }
  836.                 return 0;
  837.         }
  838.  
  839.         return do_switch(req);
  840. }
  841.  
  842. static bool contexts_enabled(struct drm_device *dev)
  843. {
  844.         return i915.enable_execlists || to_i915(dev)->hw_context_size;
  845. }
  846.  
  847. int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
  848.                                   struct drm_file *file)
  849. {
  850.         struct drm_i915_gem_context_create *args = data;
  851.         struct drm_i915_file_private *file_priv = file->driver_priv;
  852.         struct intel_context *ctx;
  853.         int ret;
  854.  
  855.         if (!contexts_enabled(dev))
  856.                 return -ENODEV;
  857.  
  858.         if (args->pad != 0)
  859.                 return -EINVAL;
  860.  
  861.         ret = i915_mutex_lock_interruptible(dev);
  862.         if (ret)
  863.                 return ret;
  864.  
  865.         ctx = i915_gem_create_context(dev, file_priv);
  866.         mutex_unlock(&dev->struct_mutex);
  867.         if (IS_ERR(ctx))
  868.                 return PTR_ERR(ctx);
  869.  
  870.         args->ctx_id = ctx->user_handle;
  871.         DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
  872.  
  873.         return 0;
  874. }
  875.  
  876. int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
  877.                                    struct drm_file *file)
  878. {
  879.         struct drm_i915_gem_context_destroy *args = data;
  880.         struct drm_i915_file_private *file_priv = file->driver_priv;
  881.         struct intel_context *ctx;
  882.         int ret;
  883.  
  884.         if (args->pad != 0)
  885.                 return -EINVAL;
  886.  
  887.         if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
  888.                 return -ENOENT;
  889.  
  890.         ret = i915_mutex_lock_interruptible(dev);
  891.         if (ret)
  892.                 return ret;
  893.  
  894.         ctx = i915_gem_context_get(file_priv, args->ctx_id);
  895.         if (IS_ERR(ctx)) {
  896.                 mutex_unlock(&dev->struct_mutex);
  897.                 return PTR_ERR(ctx);
  898.         }
  899.  
  900.         idr_remove(&ctx->file_priv->context_idr, ctx->user_handle);
  901.         i915_gem_context_unreference(ctx);
  902.         mutex_unlock(&dev->struct_mutex);
  903.  
  904.         DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
  905.         return 0;
  906. }
  907.  
  908. int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
  909.                                     struct drm_file *file)
  910. {
  911.         struct drm_i915_file_private *file_priv = file->driver_priv;
  912.         struct drm_i915_gem_context_param *args = data;
  913.         struct intel_context *ctx;
  914.         int ret;
  915.  
  916.         ret = i915_mutex_lock_interruptible(dev);
  917.         if (ret)
  918.                 return ret;
  919.  
  920.         ctx = i915_gem_context_get(file_priv, args->ctx_id);
  921.         if (IS_ERR(ctx)) {
  922.                 mutex_unlock(&dev->struct_mutex);
  923.                 return PTR_ERR(ctx);
  924.         }
  925.  
  926.         args->size = 0;
  927.         switch (args->param) {
  928.         case I915_CONTEXT_PARAM_BAN_PERIOD:
  929.                 args->value = ctx->hang_stats.ban_period_seconds;
  930.                 break;
  931.         case I915_CONTEXT_PARAM_NO_ZEROMAP:
  932.                 args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
  933.                 break;
  934.         case I915_CONTEXT_PARAM_GTT_SIZE:
  935.                 if (ctx->ppgtt)
  936.                         args->value = ctx->ppgtt->base.total;
  937.                 else if (to_i915(dev)->mm.aliasing_ppgtt)
  938.                         args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
  939.                 else
  940.                         args->value = to_i915(dev)->gtt.base.total;
  941.                 break;
  942.         default:
  943.                 ret = -EINVAL;
  944.                 break;
  945.         }
  946.         mutex_unlock(&dev->struct_mutex);
  947.  
  948.         return ret;
  949. }
  950.  
  951. int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
  952.                                     struct drm_file *file)
  953. {
  954.         struct drm_i915_file_private *file_priv = file->driver_priv;
  955.         struct drm_i915_gem_context_param *args = data;
  956.         struct intel_context *ctx;
  957.         int ret;
  958.  
  959.         ret = i915_mutex_lock_interruptible(dev);
  960.         if (ret)
  961.                 return ret;
  962.  
  963.         ctx = i915_gem_context_get(file_priv, args->ctx_id);
  964.         if (IS_ERR(ctx)) {
  965.                 mutex_unlock(&dev->struct_mutex);
  966.                 return PTR_ERR(ctx);
  967.         }
  968.  
  969.         switch (args->param) {
  970.         case I915_CONTEXT_PARAM_BAN_PERIOD:
  971.                 if (args->size)
  972.                         ret = -EINVAL;
  973.                 else if (args->value < ctx->hang_stats.ban_period_seconds &&
  974.                          !capable(CAP_SYS_ADMIN))
  975.                         ret = -EPERM;
  976.                 else
  977.                         ctx->hang_stats.ban_period_seconds = args->value;
  978.                 break;
  979.         case I915_CONTEXT_PARAM_NO_ZEROMAP:
  980.                 if (args->size) {
  981.                         ret = -EINVAL;
  982.                 } else {
  983.                         ctx->flags &= ~CONTEXT_NO_ZEROMAP;
  984.                         ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
  985.                 }
  986.                 break;
  987.         default:
  988.                 ret = -EINVAL;
  989.                 break;
  990.         }
  991.         mutex_unlock(&dev->struct_mutex);
  992.  
  993.         return ret;
  994. }
  995.