Subversion Repositories Kolibri OS

Rev

Rev 5078 | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /**************************************************************************
  2.  *
  3.  * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice (including the
  15.  * next paragraph) shall be included in all copies or substantial portions
  16.  * of the Software.
  17.  *
  18.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21.  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22.  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23.  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24.  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25.  *
  26.  **************************************************************************/
  27.  
  28. #include "vmwgfx_drv.h"
  29. #include "vmwgfx_resource_priv.h"
  30. #include "vmwgfx_binding.h"
  31. #include "ttm/ttm_placement.h"
  32.  
  33. struct vmw_user_context {
  34.         struct ttm_base_object base;
  35.         struct vmw_resource res;
  36.         struct vmw_ctx_binding_state *cbs;
  37.         struct vmw_cmdbuf_res_manager *man;
  38.         struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
  39.         spinlock_t cotable_lock;
  40.         struct vmw_dma_buffer *dx_query_mob;
  41. };
  42.  
  43. static void vmw_user_context_free(struct vmw_resource *res);
  44. static struct vmw_resource *
  45. vmw_user_context_base_to_res(struct ttm_base_object *base);
  46.  
  47. static int vmw_gb_context_create(struct vmw_resource *res);
  48. static int vmw_gb_context_bind(struct vmw_resource *res,
  49.                                struct ttm_validate_buffer *val_buf);
  50. static int vmw_gb_context_unbind(struct vmw_resource *res,
  51.                                  bool readback,
  52.                                  struct ttm_validate_buffer *val_buf);
  53. static int vmw_gb_context_destroy(struct vmw_resource *res);
  54. static int vmw_dx_context_create(struct vmw_resource *res);
  55. static int vmw_dx_context_bind(struct vmw_resource *res,
  56.                                struct ttm_validate_buffer *val_buf);
  57. static int vmw_dx_context_unbind(struct vmw_resource *res,
  58.                                  bool readback,
  59.                                  struct ttm_validate_buffer *val_buf);
  60. static int vmw_dx_context_destroy(struct vmw_resource *res);
  61.  
  62. static uint64_t vmw_user_context_size;
  63.  
  64. static const struct vmw_user_resource_conv user_context_conv = {
  65.         .object_type = VMW_RES_CONTEXT,
  66.         .base_obj_to_res = vmw_user_context_base_to_res,
  67.         .res_free = vmw_user_context_free
  68. };
  69.  
  70. const struct vmw_user_resource_conv *user_context_converter =
  71.         &user_context_conv;
  72.  
  73.  
  74. static const struct vmw_res_func vmw_legacy_context_func = {
  75.         .res_type = vmw_res_context,
  76.         .needs_backup = false,
  77.         .may_evict = false,
  78.         .type_name = "legacy contexts",
  79.         .backup_placement = NULL,
  80.         .create = NULL,
  81.         .destroy = NULL,
  82.         .bind = NULL,
  83.         .unbind = NULL
  84. };
  85.  
  86. static const struct vmw_res_func vmw_gb_context_func = {
  87.         .res_type = vmw_res_context,
  88.         .needs_backup = true,
  89.         .may_evict = true,
  90.         .type_name = "guest backed contexts",
  91.         .backup_placement = &vmw_mob_placement,
  92.         .create = vmw_gb_context_create,
  93.         .destroy = vmw_gb_context_destroy,
  94.         .bind = vmw_gb_context_bind,
  95.         .unbind = vmw_gb_context_unbind
  96. };
  97.  
  98. static const struct vmw_res_func vmw_dx_context_func = {
  99.         .res_type = vmw_res_dx_context,
  100.         .needs_backup = true,
  101.         .may_evict = true,
  102.         .type_name = "dx contexts",
  103.         .backup_placement = &vmw_mob_placement,
  104.         .create = vmw_dx_context_create,
  105.         .destroy = vmw_dx_context_destroy,
  106.         .bind = vmw_dx_context_bind,
  107.         .unbind = vmw_dx_context_unbind
  108. };
  109.  
  110. /**
  111.  * Context management:
  112.  */
  113.  
  114. static void vmw_context_cotables_unref(struct vmw_user_context *uctx)
  115. {
  116.         struct vmw_resource *res;
  117.         int i;
  118.  
  119.         for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
  120.                 spin_lock(&uctx->cotable_lock);
  121.                 res = uctx->cotables[i];
  122.                 uctx->cotables[i] = NULL;
  123.                 spin_unlock(&uctx->cotable_lock);
  124.  
  125.                 if (res)
  126.                         vmw_resource_unreference(&res);
  127.         }
  128. }
  129.  
  130. static void vmw_hw_context_destroy(struct vmw_resource *res)
  131. {
  132.         struct vmw_user_context *uctx =
  133.                 container_of(res, struct vmw_user_context, res);
  134.         struct vmw_private *dev_priv = res->dev_priv;
  135.         struct {
  136.                 SVGA3dCmdHeader header;
  137.                 SVGA3dCmdDestroyContext body;
  138.         } *cmd;
  139.  
  140.  
  141.         if (res->func->destroy == vmw_gb_context_destroy ||
  142.             res->func->destroy == vmw_dx_context_destroy) {
  143.                 mutex_lock(&dev_priv->cmdbuf_mutex);
  144.                 vmw_cmdbuf_res_man_destroy(uctx->man);
  145.                 mutex_lock(&dev_priv->binding_mutex);
  146.                 vmw_binding_state_kill(uctx->cbs);
  147.                 (void) res->func->destroy(res);
  148.                 mutex_unlock(&dev_priv->binding_mutex);
  149.                 if (dev_priv->pinned_bo != NULL &&
  150.                     !dev_priv->query_cid_valid)
  151.                         __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
  152.                 mutex_unlock(&dev_priv->cmdbuf_mutex);
  153.                 vmw_context_cotables_unref(uctx);
  154.                 return;
  155.         }
  156.  
  157.         vmw_execbuf_release_pinned_bo(dev_priv);
  158.         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  159.         if (unlikely(cmd == NULL)) {
  160.                 DRM_ERROR("Failed reserving FIFO space for surface "
  161.                           "destruction.\n");
  162.                 return;
  163.         }
  164.  
  165.         cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
  166.         cmd->header.size = sizeof(cmd->body);
  167.         cmd->body.cid = res->id;
  168.  
  169.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  170.         vmw_fifo_resource_dec(dev_priv);
  171. }
  172.  
  173. static int vmw_gb_context_init(struct vmw_private *dev_priv,
  174.                                bool dx,
  175.                                struct vmw_resource *res,
  176.                                void (*res_free)(struct vmw_resource *res))
  177. {
  178.         int ret, i;
  179.         struct vmw_user_context *uctx =
  180.                 container_of(res, struct vmw_user_context, res);
  181.  
  182.         res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
  183.                             SVGA3D_CONTEXT_DATA_SIZE);
  184.         ret = vmw_resource_init(dev_priv, res, true,
  185.                                 res_free,
  186.                                 dx ? &vmw_dx_context_func :
  187.                                 &vmw_gb_context_func);
  188.         if (unlikely(ret != 0))
  189.                 goto out_err;
  190.  
  191.         if (dev_priv->has_mob) {
  192.                 uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
  193.                 if (IS_ERR(uctx->man)) {
  194.                         ret = PTR_ERR(uctx->man);
  195.                         uctx->man = NULL;
  196.                         goto out_err;
  197.                 }
  198.         }
  199.  
  200.         uctx->cbs = vmw_binding_state_alloc(dev_priv);
  201.         if (IS_ERR(uctx->cbs)) {
  202.                 ret = PTR_ERR(uctx->cbs);
  203.                 goto out_err;
  204.         }
  205.  
  206.         spin_lock_init(&uctx->cotable_lock);
  207.  
  208.         if (dx) {
  209.                 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
  210.                         uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
  211.                                                               &uctx->res, i);
  212.                         if (unlikely(uctx->cotables[i] == NULL)) {
  213.                                 ret = -ENOMEM;
  214.                                 goto out_cotables;
  215.                         }
  216.                 }
  217.         }
  218.  
  219.  
  220.  
  221.         vmw_resource_activate(res, vmw_hw_context_destroy);
  222.         return 0;
  223.  
  224. out_cotables:
  225.         vmw_context_cotables_unref(uctx);
  226. out_err:
  227.         if (res_free)
  228.                 res_free(res);
  229.         else
  230.                 kfree(res);
  231.         return ret;
  232. }
  233.  
  234. static int vmw_context_init(struct vmw_private *dev_priv,
  235.                             struct vmw_resource *res,
  236.                             void (*res_free)(struct vmw_resource *res),
  237.                             bool dx)
  238. {
  239.         int ret;
  240.  
  241.         struct {
  242.                 SVGA3dCmdHeader header;
  243.                 SVGA3dCmdDefineContext body;
  244.         } *cmd;
  245.  
  246.         if (dev_priv->has_mob)
  247.                 return vmw_gb_context_init(dev_priv, dx, res, res_free);
  248.  
  249.         ret = vmw_resource_init(dev_priv, res, false,
  250.                                 res_free, &vmw_legacy_context_func);
  251.  
  252.         if (unlikely(ret != 0)) {
  253.                 DRM_ERROR("Failed to allocate a resource id.\n");
  254.                 goto out_early;
  255.         }
  256.  
  257.         if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
  258.                 DRM_ERROR("Out of hw context ids.\n");
  259.                 vmw_resource_unreference(&res);
  260.                 return -ENOMEM;
  261.         }
  262.  
  263.         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  264.         if (unlikely(cmd == NULL)) {
  265.                 DRM_ERROR("Fifo reserve failed.\n");
  266.                 vmw_resource_unreference(&res);
  267.                 return -ENOMEM;
  268.         }
  269.  
  270.         cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
  271.         cmd->header.size = sizeof(cmd->body);
  272.         cmd->body.cid = res->id;
  273.  
  274.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  275.         vmw_fifo_resource_inc(dev_priv);
  276.         vmw_resource_activate(res, vmw_hw_context_destroy);
  277.         return 0;
  278.  
  279. out_early:
  280.         if (res_free == NULL)
  281.                 kfree(res);
  282.         else
  283.                 res_free(res);
  284.         return ret;
  285. }
  286.  
  287.  
  288. /*
  289.  * GB context.
  290.  */
  291.  
  292. static int vmw_gb_context_create(struct vmw_resource *res)
  293. {
  294.         struct vmw_private *dev_priv = res->dev_priv;
  295.         int ret;
  296.         struct {
  297.                 SVGA3dCmdHeader header;
  298.                 SVGA3dCmdDefineGBContext body;
  299.         } *cmd;
  300.  
  301.         if (likely(res->id != -1))
  302.                 return 0;
  303.  
  304.         ret = vmw_resource_alloc_id(res);
  305.         if (unlikely(ret != 0)) {
  306.                 DRM_ERROR("Failed to allocate a context id.\n");
  307.                 goto out_no_id;
  308.         }
  309.  
  310.         if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
  311.                 ret = -EBUSY;
  312.                 goto out_no_fifo;
  313.         }
  314.  
  315.         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  316.         if (unlikely(cmd == NULL)) {
  317.                 DRM_ERROR("Failed reserving FIFO space for context "
  318.                           "creation.\n");
  319.                 ret = -ENOMEM;
  320.                 goto out_no_fifo;
  321.         }
  322.  
  323.         cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
  324.         cmd->header.size = sizeof(cmd->body);
  325.         cmd->body.cid = res->id;
  326.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  327.         vmw_fifo_resource_inc(dev_priv);
  328.  
  329.         return 0;
  330.  
  331. out_no_fifo:
  332.         vmw_resource_release_id(res);
  333. out_no_id:
  334.         return ret;
  335. }
  336.  
  337. static int vmw_gb_context_bind(struct vmw_resource *res,
  338.                                struct ttm_validate_buffer *val_buf)
  339. {
  340.         struct vmw_private *dev_priv = res->dev_priv;
  341.         struct {
  342.                 SVGA3dCmdHeader header;
  343.                 SVGA3dCmdBindGBContext body;
  344.         } *cmd;
  345.         struct ttm_buffer_object *bo = val_buf->bo;
  346.  
  347.         BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
  348.  
  349.         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  350.         if (unlikely(cmd == NULL)) {
  351.                 DRM_ERROR("Failed reserving FIFO space for context "
  352.                           "binding.\n");
  353.                 return -ENOMEM;
  354.         }
  355.         cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
  356.         cmd->header.size = sizeof(cmd->body);
  357.         cmd->body.cid = res->id;
  358.         cmd->body.mobid = bo->mem.start;
  359.         cmd->body.validContents = res->backup_dirty;
  360.         res->backup_dirty = false;
  361.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  362.  
  363.         return 0;
  364. }
  365.  
  366. static int vmw_gb_context_unbind(struct vmw_resource *res,
  367.                                  bool readback,
  368.                                  struct ttm_validate_buffer *val_buf)
  369. {
  370.         struct vmw_private *dev_priv = res->dev_priv;
  371.         struct ttm_buffer_object *bo = val_buf->bo;
  372.         struct vmw_fence_obj *fence;
  373.         struct vmw_user_context *uctx =
  374.                 container_of(res, struct vmw_user_context, res);
  375.  
  376.         struct {
  377.                 SVGA3dCmdHeader header;
  378.                 SVGA3dCmdReadbackGBContext body;
  379.         } *cmd1;
  380.         struct {
  381.                 SVGA3dCmdHeader header;
  382.                 SVGA3dCmdBindGBContext body;
  383.         } *cmd2;
  384.         uint32_t submit_size;
  385.         uint8_t *cmd;
  386.  
  387.  
  388.         BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
  389.  
  390.         mutex_lock(&dev_priv->binding_mutex);
  391.         vmw_binding_state_scrub(uctx->cbs);
  392.  
  393.         submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
  394.  
  395.         cmd = vmw_fifo_reserve(dev_priv, submit_size);
  396.         if (unlikely(cmd == NULL)) {
  397.                 DRM_ERROR("Failed reserving FIFO space for context "
  398.                           "unbinding.\n");
  399.                 mutex_unlock(&dev_priv->binding_mutex);
  400.                 return -ENOMEM;
  401.         }
  402.  
  403.         cmd2 = (void *) cmd;
  404.         if (readback) {
  405.                 cmd1 = (void *) cmd;
  406.                 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
  407.                 cmd1->header.size = sizeof(cmd1->body);
  408.                 cmd1->body.cid = res->id;
  409.                 cmd2 = (void *) (&cmd1[1]);
  410.         }
  411.         cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
  412.         cmd2->header.size = sizeof(cmd2->body);
  413.         cmd2->body.cid = res->id;
  414.         cmd2->body.mobid = SVGA3D_INVALID_ID;
  415.  
  416.         vmw_fifo_commit(dev_priv, submit_size);
  417.         mutex_unlock(&dev_priv->binding_mutex);
  418.  
  419.         /*
  420.          * Create a fence object and fence the backup buffer.
  421.          */
  422.  
  423.         (void) vmw_execbuf_fence_commands(NULL, dev_priv,
  424.                                           &fence, NULL);
  425.  
  426.         vmw_fence_single_bo(bo, fence);
  427.  
  428.         if (likely(fence != NULL))
  429.                 vmw_fence_obj_unreference(&fence);
  430.  
  431.         return 0;
  432. }
  433.  
  434. static int vmw_gb_context_destroy(struct vmw_resource *res)
  435. {
  436.         struct vmw_private *dev_priv = res->dev_priv;
  437.         struct {
  438.                 SVGA3dCmdHeader header;
  439.                 SVGA3dCmdDestroyGBContext body;
  440.         } *cmd;
  441.  
  442.         if (likely(res->id == -1))
  443.                 return 0;
  444.  
  445.         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  446.         if (unlikely(cmd == NULL)) {
  447.                 DRM_ERROR("Failed reserving FIFO space for context "
  448.                           "destruction.\n");
  449.                 return -ENOMEM;
  450.         }
  451.  
  452.         cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
  453.         cmd->header.size = sizeof(cmd->body);
  454.         cmd->body.cid = res->id;
  455.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  456.         if (dev_priv->query_cid == res->id)
  457.                 dev_priv->query_cid_valid = false;
  458.         vmw_resource_release_id(res);
  459.         vmw_fifo_resource_dec(dev_priv);
  460.  
  461.         return 0;
  462. }
  463.  
  464. /*
  465.  * DX context.
  466.  */
  467.  
  468. static int vmw_dx_context_create(struct vmw_resource *res)
  469. {
  470.         struct vmw_private *dev_priv = res->dev_priv;
  471.         int ret;
  472.         struct {
  473.                 SVGA3dCmdHeader header;
  474.                 SVGA3dCmdDXDefineContext body;
  475.         } *cmd;
  476.  
  477.         if (likely(res->id != -1))
  478.                 return 0;
  479.  
  480.         ret = vmw_resource_alloc_id(res);
  481.         if (unlikely(ret != 0)) {
  482.                 DRM_ERROR("Failed to allocate a context id.\n");
  483.                 goto out_no_id;
  484.         }
  485.  
  486.         if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
  487.                 ret = -EBUSY;
  488.                 goto out_no_fifo;
  489.         }
  490.  
  491.         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  492.         if (unlikely(cmd == NULL)) {
  493.                 DRM_ERROR("Failed reserving FIFO space for context "
  494.                           "creation.\n");
  495.                 ret = -ENOMEM;
  496.                 goto out_no_fifo;
  497.         }
  498.  
  499.         cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
  500.         cmd->header.size = sizeof(cmd->body);
  501.         cmd->body.cid = res->id;
  502.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  503.         vmw_fifo_resource_inc(dev_priv);
  504.  
  505.         return 0;
  506.  
  507. out_no_fifo:
  508.         vmw_resource_release_id(res);
  509. out_no_id:
  510.         return ret;
  511. }
  512.  
  513. static int vmw_dx_context_bind(struct vmw_resource *res,
  514.                                struct ttm_validate_buffer *val_buf)
  515. {
  516.         struct vmw_private *dev_priv = res->dev_priv;
  517.         struct {
  518.                 SVGA3dCmdHeader header;
  519.                 SVGA3dCmdDXBindContext body;
  520.         } *cmd;
  521.         struct ttm_buffer_object *bo = val_buf->bo;
  522.  
  523.         BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
  524.  
  525.         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  526.         if (unlikely(cmd == NULL)) {
  527.                 DRM_ERROR("Failed reserving FIFO space for context "
  528.                           "binding.\n");
  529.                 return -ENOMEM;
  530.         }
  531.  
  532.         cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
  533.         cmd->header.size = sizeof(cmd->body);
  534.         cmd->body.cid = res->id;
  535.         cmd->body.mobid = bo->mem.start;
  536.         cmd->body.validContents = res->backup_dirty;
  537.         res->backup_dirty = false;
  538.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  539.  
  540.  
  541.         return 0;
  542. }
  543.  
  544. /**
  545.  * vmw_dx_context_scrub_cotables - Scrub all bindings and
  546.  * cotables from a context
  547.  *
  548.  * @ctx: Pointer to the context resource
  549.  * @readback: Whether to save the otable contents on scrubbing.
  550.  *
  551.  * COtables must be unbound before their context, but unbinding requires
  552.  * the backup buffer being reserved, whereas scrubbing does not.
  553.  * This function scrubs all cotables of a context, potentially reading back
  554.  * the contents into their backup buffers. However, scrubbing cotables
  555.  * also makes the device context invalid, so scrub all bindings first so
  556.  * that doesn't have to be done later with an invalid context.
  557.  */
  558. void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
  559.                                    bool readback)
  560. {
  561.         struct vmw_user_context *uctx =
  562.                 container_of(ctx, struct vmw_user_context, res);
  563.         int i;
  564.  
  565.         vmw_binding_state_scrub(uctx->cbs);
  566.         for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
  567.                 struct vmw_resource *res;
  568.  
  569.                 /* Avoid racing with ongoing cotable destruction. */
  570.                 spin_lock(&uctx->cotable_lock);
  571.                 res = uctx->cotables[vmw_cotable_scrub_order[i]];
  572.                 if (res)
  573.                         res = vmw_resource_reference_unless_doomed(res);
  574.                 spin_unlock(&uctx->cotable_lock);
  575.                 if (!res)
  576.                         continue;
  577.  
  578.                 WARN_ON(vmw_cotable_scrub(res, readback));
  579.                 vmw_resource_unreference(&res);
  580.         }
  581. }
  582.  
  583. static int vmw_dx_context_unbind(struct vmw_resource *res,
  584.                                  bool readback,
  585.                                  struct ttm_validate_buffer *val_buf)
  586. {
  587.         struct vmw_private *dev_priv = res->dev_priv;
  588.         struct ttm_buffer_object *bo = val_buf->bo;
  589.         struct vmw_fence_obj *fence;
  590.         struct vmw_user_context *uctx =
  591.                 container_of(res, struct vmw_user_context, res);
  592.  
  593.         struct {
  594.                 SVGA3dCmdHeader header;
  595.                 SVGA3dCmdDXReadbackContext body;
  596.         } *cmd1;
  597.         struct {
  598.                 SVGA3dCmdHeader header;
  599.                 SVGA3dCmdDXBindContext body;
  600.         } *cmd2;
  601.         uint32_t submit_size;
  602.         uint8_t *cmd;
  603.  
  604.  
  605.         BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
  606.  
  607.         mutex_lock(&dev_priv->binding_mutex);
  608.         vmw_dx_context_scrub_cotables(res, readback);
  609.  
  610.         if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
  611.             readback) {
  612.                 WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
  613.                 if (vmw_query_readback_all(uctx->dx_query_mob))
  614.                         DRM_ERROR("Failed to read back query states\n");
  615.         }
  616.  
  617.         submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
  618.  
  619.         cmd = vmw_fifo_reserve(dev_priv, submit_size);
  620.         if (unlikely(cmd == NULL)) {
  621.                 DRM_ERROR("Failed reserving FIFO space for context "
  622.                           "unbinding.\n");
  623.                 mutex_unlock(&dev_priv->binding_mutex);
  624.                 return -ENOMEM;
  625.         }
  626.  
  627.         cmd2 = (void *) cmd;
  628.         if (readback) {
  629.                 cmd1 = (void *) cmd;
  630.                 cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
  631.                 cmd1->header.size = sizeof(cmd1->body);
  632.                 cmd1->body.cid = res->id;
  633.                 cmd2 = (void *) (&cmd1[1]);
  634.         }
  635.         cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
  636.         cmd2->header.size = sizeof(cmd2->body);
  637.         cmd2->body.cid = res->id;
  638.         cmd2->body.mobid = SVGA3D_INVALID_ID;
  639.  
  640.         vmw_fifo_commit(dev_priv, submit_size);
  641.         mutex_unlock(&dev_priv->binding_mutex);
  642.  
  643.         /*
  644.          * Create a fence object and fence the backup buffer.
  645.          */
  646.  
  647.         (void) vmw_execbuf_fence_commands(NULL, dev_priv,
  648.                                           &fence, NULL);
  649.  
  650.         vmw_fence_single_bo(bo, fence);
  651.  
  652.         if (likely(fence != NULL))
  653.                 vmw_fence_obj_unreference(&fence);
  654.  
  655.         return 0;
  656. }
  657.  
  658. static int vmw_dx_context_destroy(struct vmw_resource *res)
  659. {
  660.         struct vmw_private *dev_priv = res->dev_priv;
  661.         struct {
  662.                 SVGA3dCmdHeader header;
  663.                 SVGA3dCmdDXDestroyContext body;
  664.         } *cmd;
  665.  
  666.         if (likely(res->id == -1))
  667.                 return 0;
  668.  
  669.         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  670.         if (unlikely(cmd == NULL)) {
  671.                 DRM_ERROR("Failed reserving FIFO space for context "
  672.                           "destruction.\n");
  673.                 return -ENOMEM;
  674.         }
  675.  
  676.         cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
  677.         cmd->header.size = sizeof(cmd->body);
  678.         cmd->body.cid = res->id;
  679.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  680.         if (dev_priv->query_cid == res->id)
  681.                 dev_priv->query_cid_valid = false;
  682.         vmw_resource_release_id(res);
  683.         vmw_fifo_resource_dec(dev_priv);
  684.  
  685.         return 0;
  686. }
  687.  
  688. /**
  689.  * User-space context management:
  690.  */
  691.  
  692. static struct vmw_resource *
  693. vmw_user_context_base_to_res(struct ttm_base_object *base)
  694. {
  695.         return &(container_of(base, struct vmw_user_context, base)->res);
  696. }
  697.  
  698. static void vmw_user_context_free(struct vmw_resource *res)
  699. {
  700.         struct vmw_user_context *ctx =
  701.             container_of(res, struct vmw_user_context, res);
  702.         struct vmw_private *dev_priv = res->dev_priv;
  703.  
  704.         if (ctx->cbs)
  705.                 vmw_binding_state_free(ctx->cbs);
  706.  
  707.         (void) vmw_context_bind_dx_query(res, NULL);
  708.  
  709.         ttm_base_object_kfree(ctx, base);
  710.         ttm_mem_global_free(vmw_mem_glob(dev_priv),
  711.                             vmw_user_context_size);
  712. }
  713.  
  714. /**
  715.  * This function is called when user space has no more references on the
  716.  * base object. It releases the base-object's reference on the resource object.
  717.  */
  718.  
  719. static void vmw_user_context_base_release(struct ttm_base_object **p_base)
  720. {
  721.         struct ttm_base_object *base = *p_base;
  722.         struct vmw_user_context *ctx =
  723.             container_of(base, struct vmw_user_context, base);
  724.         struct vmw_resource *res = &ctx->res;
  725.  
  726.         *p_base = NULL;
  727.         vmw_resource_unreference(&res);
  728. }
  729.  
  730. #if 0
  731. int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
  732.                               struct drm_file *file_priv)
  733. {
  734.         struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
  735.         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  736.  
  737.         return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
  738. }
  739.  
  740. static int vmw_context_define(struct drm_device *dev, void *data,
  741.                               struct drm_file *file_priv, bool dx)
  742. {
  743.         struct vmw_private *dev_priv = vmw_priv(dev);
  744.         struct vmw_user_context *ctx;
  745.         struct vmw_resource *res;
  746.         struct vmw_resource *tmp;
  747.         struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
  748.         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  749.         int ret;
  750.  
  751.         if (!dev_priv->has_dx && dx) {
  752.                 DRM_ERROR("DX contexts not supported by device.\n");
  753.                 return -EINVAL;
  754.         }
  755.  
  756.         /*
  757.          * Approximate idr memory usage with 128 bytes. It will be limited
  758.          * by maximum number_of contexts anyway.
  759.          */
  760.  
  761.         if (unlikely(vmw_user_context_size == 0))
  762.                 vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 +
  763.                   ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0);
  764.  
  765.         ret = ttm_read_lock(&dev_priv->reservation_sem, true);
  766.         if (unlikely(ret != 0))
  767.                 return ret;
  768.  
  769.         ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
  770.                                    vmw_user_context_size,
  771.                                    false, true);
  772.         if (unlikely(ret != 0)) {
  773.                 if (ret != -ERESTARTSYS)
  774.                         DRM_ERROR("Out of graphics memory for context"
  775.                                   " creation.\n");
  776.                 goto out_unlock;
  777.         }
  778.  
  779.         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  780.         if (unlikely(ctx == NULL)) {
  781.                 ttm_mem_global_free(vmw_mem_glob(dev_priv),
  782.                                     vmw_user_context_size);
  783.                 ret = -ENOMEM;
  784.                 goto out_unlock;
  785.         }
  786.  
  787.         res = &ctx->res;
  788.         ctx->base.shareable = false;
  789.         ctx->base.tfile = NULL;
  790.  
  791.         /*
  792.          * From here on, the destructor takes over resource freeing.
  793.          */
  794.  
  795.         ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
  796.         if (unlikely(ret != 0))
  797.                 goto out_unlock;
  798.  
  799.         tmp = vmw_resource_reference(&ctx->res);
  800.         ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
  801.                                    &vmw_user_context_base_release, NULL);
  802.  
  803.         if (unlikely(ret != 0)) {
  804.                 vmw_resource_unreference(&tmp);
  805.                 goto out_err;
  806.         }
  807.  
  808.         arg->cid = ctx->base.hash.key;
  809. out_err:
  810.         vmw_resource_unreference(&res);
  811. out_unlock:
  812.         ttm_read_unlock(&dev_priv->reservation_sem);
  813.         return ret;
  814. }
  815. #endif
  816.  
  817. /**
  818.  * vmw_context_binding_list - Return a list of context bindings
  819.  *
  820.  * @ctx: The context resource
  821.  *
  822.  * Returns the current list of bindings of the given context. Note that
  823.  * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
  824.  */
  825. struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
  826. {
  827.         struct vmw_user_context *uctx =
  828.                 container_of(ctx, struct vmw_user_context, res);
  829.  
  830.         return vmw_binding_state_list(uctx->cbs);
  831. }
  832.  
  833. struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
  834. {
  835.         return container_of(ctx, struct vmw_user_context, res)->man;
  836. }
  837.  
  838. struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
  839.                                          SVGACOTableType cotable_type)
  840. {
  841.         if (cotable_type >= SVGA_COTABLE_DX10_MAX)
  842.                 return ERR_PTR(-EINVAL);
  843.  
  844.         return vmw_resource_reference
  845.                 (container_of(ctx, struct vmw_user_context, res)->
  846.                  cotables[cotable_type]);
  847. }
  848.  
  849. /**
  850.  * vmw_context_binding_state -
  851.  * Return a pointer to a context binding state structure
  852.  *
  853.  * @ctx: The context resource
  854.  *
  855.  * Returns the current state of bindings of the given context. Note that
  856.  * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
  857.  */
  858. struct vmw_ctx_binding_state *
  859. vmw_context_binding_state(struct vmw_resource *ctx)
  860. {
  861.         return container_of(ctx, struct vmw_user_context, res)->cbs;
  862. }
  863.  
  864. /**
  865.  * vmw_context_bind_dx_query -
  866.  * Sets query MOB for the context.  If @mob is NULL, then this function will
  867.  * remove the association between the MOB and the context.  This function
  868.  * assumes the binding_mutex is held.
  869.  *
  870.  * @ctx_res: The context resource
  871.  * @mob: a reference to the query MOB
  872.  *
  873.  * Returns -EINVAL if a MOB has already been set and does not match the one
  874.  * specified in the parameter.  0 otherwise.
  875.  */
  876. int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
  877.                               struct vmw_dma_buffer *mob)
  878. {
  879.         struct vmw_user_context *uctx =
  880.                 container_of(ctx_res, struct vmw_user_context, res);
  881.  
  882.         if (mob == NULL) {
  883.                 if (uctx->dx_query_mob) {
  884.                         uctx->dx_query_mob->dx_query_ctx = NULL;
  885.                         vmw_dmabuf_unreference(&uctx->dx_query_mob);
  886.                         uctx->dx_query_mob = NULL;
  887.                 }
  888.  
  889.                 return 0;
  890.         }
  891.  
  892.         /* Can only have one MOB per context for queries */
  893.         if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
  894.                 return -EINVAL;
  895.  
  896.         mob->dx_query_ctx  = ctx_res;
  897.  
  898.         if (!uctx->dx_query_mob)
  899.                 uctx->dx_query_mob = vmw_dmabuf_reference(mob);
  900.  
  901.         return 0;
  902. }
  903.  
  904. /**
  905.  * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
  906.  *
  907.  * @ctx_res: The context resource
  908.  */
  909. struct vmw_dma_buffer *
  910. vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
  911. {
  912.         struct vmw_user_context *uctx =
  913.                 container_of(ctx_res, struct vmw_user_context, res);
  914.  
  915.         return uctx->dx_query_mob;
  916. }
  917.