Subversion Repositories Kolibri OS

Rev

Rev 4075 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /**************************************************************************
  2.  *
  3.  * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice (including the
  15.  * next paragraph) shall be included in all copies or substantial portions
  16.  * of the Software.
  17.  *
  18.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21.  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22.  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23.  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24.  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25.  *
  26.  **************************************************************************/
  27.  
  28. #include "vmwgfx_drv.h"
  29. #include "vmwgfx_resource_priv.h"
  30. #include "ttm/ttm_placement.h"
  31.  
  32. struct vmw_user_context {
  33.         struct ttm_base_object base;
  34.         struct vmw_resource res;
  35.         struct vmw_ctx_binding_state cbs;
  36. };
  37.  
  38.  
  39.  
  40. typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *);
  41.  
  42. static void vmw_user_context_free(struct vmw_resource *res);
  43. static struct vmw_resource *
  44. vmw_user_context_base_to_res(struct ttm_base_object *base);
  45.  
  46. static int vmw_gb_context_create(struct vmw_resource *res);
  47. static int vmw_gb_context_bind(struct vmw_resource *res,
  48.                                struct ttm_validate_buffer *val_buf);
  49. static int vmw_gb_context_unbind(struct vmw_resource *res,
  50.                                  bool readback,
  51.                                  struct ttm_validate_buffer *val_buf);
  52. static int vmw_gb_context_destroy(struct vmw_resource *res);
  53. static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi);
  54. static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi);
  55. static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi);
  56. static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
  57. static uint64_t vmw_user_context_size;
  58.  
  59. static const struct vmw_user_resource_conv user_context_conv = {
  60.         .object_type = VMW_RES_CONTEXT,
  61.         .base_obj_to_res = vmw_user_context_base_to_res,
  62.         .res_free = vmw_user_context_free
  63. };
  64.  
  65. const struct vmw_user_resource_conv *user_context_converter =
  66.         &user_context_conv;
  67.  
  68.  
  69. static const struct vmw_res_func vmw_legacy_context_func = {
  70.         .res_type = vmw_res_context,
  71.         .needs_backup = false,
  72.         .may_evict = false,
  73.         .type_name = "legacy contexts",
  74.         .backup_placement = NULL,
  75.         .create = NULL,
  76.         .destroy = NULL,
  77.         .bind = NULL,
  78.         .unbind = NULL
  79. };
  80.  
  81. static const struct vmw_res_func vmw_gb_context_func = {
  82.         .res_type = vmw_res_context,
  83.         .needs_backup = true,
  84.         .may_evict = true,
  85.         .type_name = "guest backed contexts",
  86.         .backup_placement = &vmw_mob_placement,
  87.         .create = vmw_gb_context_create,
  88.         .destroy = vmw_gb_context_destroy,
  89.         .bind = vmw_gb_context_bind,
  90.         .unbind = vmw_gb_context_unbind
  91. };
  92.  
  93. static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
  94.         [vmw_ctx_binding_shader] = vmw_context_scrub_shader,
  95.         [vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
  96.         [vmw_ctx_binding_tex] = vmw_context_scrub_texture };
  97.  
  98. /**
  99.  * Context management:
  100.  */
  101.  
  102. static void vmw_hw_context_destroy(struct vmw_resource *res)
  103. {
  104.  
  105.         struct vmw_private *dev_priv = res->dev_priv;
  106.         struct {
  107.                 SVGA3dCmdHeader header;
  108.                 SVGA3dCmdDestroyContext body;
  109.         } *cmd;
  110.  
  111.  
  112.         if (res->func->destroy == vmw_gb_context_destroy) {
  113.                 mutex_lock(&dev_priv->cmdbuf_mutex);
  114.                 (void) vmw_gb_context_destroy(res);
  115.                 if (dev_priv->pinned_bo != NULL &&
  116.                     !dev_priv->query_cid_valid)
  117.                         __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
  118.                 mutex_unlock(&dev_priv->cmdbuf_mutex);
  119.                 return;
  120.         }
  121.  
  122.         vmw_execbuf_release_pinned_bo(dev_priv);
  123.         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  124.         if (unlikely(cmd == NULL)) {
  125.                 DRM_ERROR("Failed reserving FIFO space for surface "
  126.                           "destruction.\n");
  127.                 return;
  128.         }
  129.  
  130.         cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
  131.         cmd->header.size = cpu_to_le32(sizeof(cmd->body));
  132.         cmd->body.cid = cpu_to_le32(res->id);
  133.  
  134.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  135.         vmw_3d_resource_dec(dev_priv, false);
  136. }
  137.  
  138. static int vmw_gb_context_init(struct vmw_private *dev_priv,
  139.                                struct vmw_resource *res,
  140.                                void (*res_free) (struct vmw_resource *res))
  141. {
  142.         int ret;
  143.         struct vmw_user_context *uctx =
  144.                 container_of(res, struct vmw_user_context, res);
  145.  
  146.         ret = vmw_resource_init(dev_priv, res, true,
  147.                                 res_free, &vmw_gb_context_func);
  148.         res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
  149.  
  150.         if (unlikely(ret != 0)) {
  151.                 if (res_free)
  152.                         res_free(res);
  153.                 else
  154.                         kfree(res);
  155.                 return ret;
  156.         }
  157.  
  158.         memset(&uctx->cbs, 0, sizeof(uctx->cbs));
  159.         INIT_LIST_HEAD(&uctx->cbs.list);
  160.  
  161.         vmw_resource_activate(res, vmw_hw_context_destroy);
  162.         return 0;
  163. }
  164.  
  165. static int vmw_context_init(struct vmw_private *dev_priv,
  166.                             struct vmw_resource *res,
  167.                             void (*res_free) (struct vmw_resource *res))
  168. {
  169.         int ret;
  170.  
  171.         struct {
  172.                 SVGA3dCmdHeader header;
  173.                 SVGA3dCmdDefineContext body;
  174.         } *cmd;
  175.  
  176.         if (dev_priv->has_mob)
  177.                 return vmw_gb_context_init(dev_priv, res, res_free);
  178.  
  179.         ret = vmw_resource_init(dev_priv, res, false,
  180.                                 res_free, &vmw_legacy_context_func);
  181.  
  182.         if (unlikely(ret != 0)) {
  183.                 DRM_ERROR("Failed to allocate a resource id.\n");
  184.                 goto out_early;
  185.         }
  186.  
  187.         if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
  188.                 DRM_ERROR("Out of hw context ids.\n");
  189.                 vmw_resource_unreference(&res);
  190.                 return -ENOMEM;
  191.         }
  192.  
  193.         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  194.         if (unlikely(cmd == NULL)) {
  195.                 DRM_ERROR("Fifo reserve failed.\n");
  196.                 vmw_resource_unreference(&res);
  197.                 return -ENOMEM;
  198.         }
  199.  
  200.         cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
  201.         cmd->header.size = cpu_to_le32(sizeof(cmd->body));
  202.         cmd->body.cid = cpu_to_le32(res->id);
  203.  
  204.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  205.         (void) vmw_3d_resource_inc(dev_priv, false);
  206.         vmw_resource_activate(res, vmw_hw_context_destroy);
  207.         return 0;
  208.  
  209. out_early:
  210.         if (res_free == NULL)
  211.                 kfree(res);
  212.         else
  213.                 res_free(res);
  214.         return ret;
  215. }
  216.  
  217. struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
  218. {
  219.         struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
  220.         int ret;
  221.  
  222.         if (unlikely(res == NULL))
  223.                 return NULL;
  224.  
  225.         ret = vmw_context_init(dev_priv, res, NULL);
  226.  
  227.         return (ret == 0) ? res : NULL;
  228. }
  229.  
  230.  
  231. static int vmw_gb_context_create(struct vmw_resource *res)
  232. {
  233.         struct vmw_private *dev_priv = res->dev_priv;
  234.         int ret;
  235.         struct {
  236.                 SVGA3dCmdHeader header;
  237.                 SVGA3dCmdDefineGBContext body;
  238.         } *cmd;
  239.  
  240.         if (likely(res->id != -1))
  241.                 return 0;
  242.  
  243.         ret = vmw_resource_alloc_id(res);
  244.         if (unlikely(ret != 0)) {
  245.                 DRM_ERROR("Failed to allocate a context id.\n");
  246.                 goto out_no_id;
  247.         }
  248.  
  249.         if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
  250.                 ret = -EBUSY;
  251.                 goto out_no_fifo;
  252.         }
  253.  
  254.         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  255.         if (unlikely(cmd == NULL)) {
  256.                 DRM_ERROR("Failed reserving FIFO space for context "
  257.                           "creation.\n");
  258.                 ret = -ENOMEM;
  259.                 goto out_no_fifo;
  260.         }
  261.  
  262.         cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
  263.         cmd->header.size = sizeof(cmd->body);
  264.         cmd->body.cid = res->id;
  265.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  266.         (void) vmw_3d_resource_inc(dev_priv, false);
  267.  
  268.         return 0;
  269.  
  270. out_no_fifo:
  271.         vmw_resource_release_id(res);
  272. out_no_id:
  273.         return ret;
  274. }
  275.  
  276. static int vmw_gb_context_bind(struct vmw_resource *res,
  277.                                struct ttm_validate_buffer *val_buf)
  278. {
  279.         struct vmw_private *dev_priv = res->dev_priv;
  280.         struct {
  281.                 SVGA3dCmdHeader header;
  282.                 SVGA3dCmdBindGBContext body;
  283.         } *cmd;
  284.         struct ttm_buffer_object *bo = val_buf->bo;
  285.  
  286.         BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
  287.  
  288.         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  289.         if (unlikely(cmd == NULL)) {
  290.                 DRM_ERROR("Failed reserving FIFO space for context "
  291.                           "binding.\n");
  292.                 return -ENOMEM;
  293.         }
  294.  
  295.         cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
  296.         cmd->header.size = sizeof(cmd->body);
  297.         cmd->body.cid = res->id;
  298.         cmd->body.mobid = bo->mem.start;
  299.         cmd->body.validContents = res->backup_dirty;
  300.         res->backup_dirty = false;
  301.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  302.  
  303.         return 0;
  304. }
  305.  
  306. static int vmw_gb_context_unbind(struct vmw_resource *res,
  307.                                  bool readback,
  308.                                  struct ttm_validate_buffer *val_buf)
  309. {
  310.         struct vmw_private *dev_priv = res->dev_priv;
  311.         struct ttm_buffer_object *bo = val_buf->bo;
  312.         struct vmw_fence_obj *fence;
  313.         struct vmw_user_context *uctx =
  314.                 container_of(res, struct vmw_user_context, res);
  315.  
  316.         struct {
  317.                 SVGA3dCmdHeader header;
  318.                 SVGA3dCmdReadbackGBContext body;
  319.         } *cmd1;
  320.         struct {
  321.                 SVGA3dCmdHeader header;
  322.                 SVGA3dCmdBindGBContext body;
  323.         } *cmd2;
  324.         uint32_t submit_size;
  325.         uint8_t *cmd;
  326.  
  327.  
  328.         BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
  329.  
  330.         mutex_lock(&dev_priv->binding_mutex);
  331.         vmw_context_binding_state_kill(&uctx->cbs);
  332.  
  333.         submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
  334.  
  335.         cmd = vmw_fifo_reserve(dev_priv, submit_size);
  336.         if (unlikely(cmd == NULL)) {
  337.                 DRM_ERROR("Failed reserving FIFO space for context "
  338.                           "unbinding.\n");
  339.                 mutex_unlock(&dev_priv->binding_mutex);
  340.                 return -ENOMEM;
  341.         }
  342.  
  343.         cmd2 = (void *) cmd;
  344.         if (readback) {
  345.                 cmd1 = (void *) cmd;
  346.                 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
  347.                 cmd1->header.size = sizeof(cmd1->body);
  348.                 cmd1->body.cid = res->id;
  349.                 cmd2 = (void *) (&cmd1[1]);
  350.         }
  351.         cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
  352.         cmd2->header.size = sizeof(cmd2->body);
  353.         cmd2->body.cid = res->id;
  354.         cmd2->body.mobid = SVGA3D_INVALID_ID;
  355.  
  356.         vmw_fifo_commit(dev_priv, submit_size);
  357.         mutex_unlock(&dev_priv->binding_mutex);
  358.  
  359.         /*
  360.          * Create a fence object and fence the backup buffer.
  361.          */
  362.  
  363.         (void) vmw_execbuf_fence_commands(NULL, dev_priv,
  364.                                           &fence, NULL);
  365.  
  366.         vmw_fence_single_bo(bo, fence);
  367.  
  368.         if (likely(fence != NULL))
  369.                 vmw_fence_obj_unreference(&fence);
  370.  
  371.         return 0;
  372. }
  373.  
  374. static int vmw_gb_context_destroy(struct vmw_resource *res)
  375. {
  376.         struct vmw_private *dev_priv = res->dev_priv;
  377.         struct {
  378.                 SVGA3dCmdHeader header;
  379.                 SVGA3dCmdDestroyGBContext body;
  380.         } *cmd;
  381.         struct vmw_user_context *uctx =
  382.                 container_of(res, struct vmw_user_context, res);
  383.  
  384.         BUG_ON(!list_empty(&uctx->cbs.list));
  385.  
  386.         if (likely(res->id == -1))
  387.                 return 0;
  388.  
  389.         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  390.         if (unlikely(cmd == NULL)) {
  391.                 DRM_ERROR("Failed reserving FIFO space for context "
  392.                           "destruction.\n");
  393.                 return -ENOMEM;
  394.         }
  395.  
  396.         cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
  397.         cmd->header.size = sizeof(cmd->body);
  398.         cmd->body.cid = res->id;
  399.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  400.         if (dev_priv->query_cid == res->id)
  401.                 dev_priv->query_cid_valid = false;
  402.         vmw_resource_release_id(res);
  403.         vmw_3d_resource_dec(dev_priv, false);
  404.  
  405.         return 0;
  406. }
  407.  
  408. /**
  409.  * User-space context management:
  410.  */
  411.  
  412. static struct vmw_resource *
  413. vmw_user_context_base_to_res(struct ttm_base_object *base)
  414. {
  415.         return &(container_of(base, struct vmw_user_context, base)->res);
  416. }
  417.  
  418. static void vmw_user_context_free(struct vmw_resource *res)
  419. {
  420.         struct vmw_user_context *ctx =
  421.             container_of(res, struct vmw_user_context, res);
  422.         struct vmw_private *dev_priv = res->dev_priv;
  423.  
  424. //   ttm_base_object_kfree(ctx, base);
  425.         ttm_mem_global_free(vmw_mem_glob(dev_priv),
  426.                             vmw_user_context_size);
  427. }
  428.  
  429. /**
  430.  * This function is called when user space has no more references on the
  431.  * base object. It releases the base-object's reference on the resource object.
  432.  */
  433.  
  434. static void vmw_user_context_base_release(struct ttm_base_object **p_base)
  435. {
  436.         struct ttm_base_object *base = *p_base;
  437.         struct vmw_user_context *ctx =
  438.             container_of(base, struct vmw_user_context, base);
  439.         struct vmw_resource *res = &ctx->res;
  440.  
  441.         *p_base = NULL;
  442.         vmw_resource_unreference(&res);
  443. }
  444.  
  445. #if 0
  446. int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
  447.                               struct drm_file *file_priv)
  448. {
  449.         struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
  450.         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  451.  
  452.         return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
  453. }
  454.  
  455. int vmw_context_define_ioctl(struct drm_device *dev, void *data,
  456.                              struct drm_file *file_priv)
  457. {
  458.         struct vmw_private *dev_priv = vmw_priv(dev);
  459.         struct vmw_user_context *ctx;
  460.         struct vmw_resource *res;
  461.         struct vmw_resource *tmp;
  462.         struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
  463.         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  464.         struct vmw_master *vmaster = vmw_master(file_priv->master);
  465.         int ret;
  466.  
  467.  
  468.         /*
  469.          * Approximate idr memory usage with 128 bytes. It will be limited
  470.          * by maximum number_of contexts anyway.
  471.          */
  472.  
  473.         if (unlikely(vmw_user_context_size == 0))
  474.                 vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
  475.  
  476.         ret = ttm_read_lock(&vmaster->lock, true);
  477.         if (unlikely(ret != 0))
  478.                 return ret;
  479.  
  480.         ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
  481.                                    vmw_user_context_size,
  482.                                    false, true);
  483.         if (unlikely(ret != 0)) {
  484.                 if (ret != -ERESTARTSYS)
  485.                         DRM_ERROR("Out of graphics memory for context"
  486.                                   " creation.\n");
  487.                 goto out_unlock;
  488.         }
  489.  
  490.         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  491.         if (unlikely(ctx == NULL)) {
  492.                 ttm_mem_global_free(vmw_mem_glob(dev_priv),
  493.                                     vmw_user_context_size);
  494.                 ret = -ENOMEM;
  495.                 goto out_unlock;
  496.         }
  497.  
  498.         res = &ctx->res;
  499.         ctx->base.shareable = false;
  500.         ctx->base.tfile = NULL;
  501.  
  502.         /*
  503.          * From here on, the destructor takes over resource freeing.
  504.          */
  505.  
  506.         ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
  507.         if (unlikely(ret != 0))
  508.                 goto out_unlock;
  509.  
  510.         tmp = vmw_resource_reference(&ctx->res);
  511.         ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
  512.                                    &vmw_user_context_base_release, NULL);
  513.  
  514.         if (unlikely(ret != 0)) {
  515.                 vmw_resource_unreference(&tmp);
  516.                 goto out_err;
  517.         }
  518.  
  519.         arg->cid = ctx->base.hash.key;
  520. out_err:
  521.         vmw_resource_unreference(&res);
  522. out_unlock:
  523.         ttm_read_unlock(&vmaster->lock);
  524.         return ret;
  525.  
  526. }
  527. #endif
  528.  
  529. /**
  530.  * vmw_context_scrub_shader - scrub a shader binding from a context.
  531.  *
  532.  * @bi: single binding information.
  533.  */
  534. static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
  535. {
  536.         struct vmw_private *dev_priv = bi->ctx->dev_priv;
  537.         struct {
  538.                 SVGA3dCmdHeader header;
  539.                 SVGA3dCmdSetShader body;
  540.         } *cmd;
  541.  
  542.         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  543.         if (unlikely(cmd == NULL)) {
  544.                 DRM_ERROR("Failed reserving FIFO space for shader "
  545.                           "unbinding.\n");
  546.                 return -ENOMEM;
  547.         }
  548.  
  549.         cmd->header.id = SVGA_3D_CMD_SET_SHADER;
  550.         cmd->header.size = sizeof(cmd->body);
  551.         cmd->body.cid = bi->ctx->id;
  552.         cmd->body.type = bi->i1.shader_type;
  553.         cmd->body.shid = SVGA3D_INVALID_ID;
  554.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  555.  
  556.         return 0;
  557. }
  558.  
  559. /**
  560.  * vmw_context_scrub_render_target - scrub a render target binding
  561.  * from a context.
  562.  *
  563.  * @bi: single binding information.
  564.  */
  565. static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
  566. {
  567.         struct vmw_private *dev_priv = bi->ctx->dev_priv;
  568.         struct {
  569.                 SVGA3dCmdHeader header;
  570.                 SVGA3dCmdSetRenderTarget body;
  571.         } *cmd;
  572.  
  573.         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  574.         if (unlikely(cmd == NULL)) {
  575.                 DRM_ERROR("Failed reserving FIFO space for render target "
  576.                           "unbinding.\n");
  577.                 return -ENOMEM;
  578.         }
  579.  
  580.         cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
  581.         cmd->header.size = sizeof(cmd->body);
  582.         cmd->body.cid = bi->ctx->id;
  583.         cmd->body.type = bi->i1.rt_type;
  584.         cmd->body.target.sid = SVGA3D_INVALID_ID;
  585.         cmd->body.target.face = 0;
  586.         cmd->body.target.mipmap = 0;
  587.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  588.  
  589.         return 0;
  590. }
  591.  
  592. /**
  593.  * vmw_context_scrub_texture - scrub a texture binding from a context.
  594.  *
  595.  * @bi: single binding information.
  596.  *
  597.  * TODO: Possibly complement this function with a function that takes
  598.  * a list of texture bindings and combines them to a single command.
  599.  */
  600. static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi)
  601. {
  602.         struct vmw_private *dev_priv = bi->ctx->dev_priv;
  603.         struct {
  604.                 SVGA3dCmdHeader header;
  605.                 struct {
  606.                         SVGA3dCmdSetTextureState c;
  607.                         SVGA3dTextureState s1;
  608.                 } body;
  609.         } *cmd;
  610.  
  611.         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  612.         if (unlikely(cmd == NULL)) {
  613.                 DRM_ERROR("Failed reserving FIFO space for texture "
  614.                           "unbinding.\n");
  615.                 return -ENOMEM;
  616.         }
  617.  
  618.  
  619.         cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
  620.         cmd->header.size = sizeof(cmd->body);
  621.         cmd->body.c.cid = bi->ctx->id;
  622.         cmd->body.s1.stage = bi->i1.texture_stage;
  623.         cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
  624.         cmd->body.s1.value = (uint32) SVGA3D_INVALID_ID;
  625.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  626.  
  627.         return 0;
  628. }
  629.  
  630. /**
  631.  * vmw_context_binding_drop: Stop tracking a context binding
  632.  *
  633.  * @cb: Pointer to binding tracker storage.
  634.  *
  635.  * Stops tracking a context binding, and re-initializes its storage.
  636.  * Typically used when the context binding is replaced with a binding to
  637.  * another (or the same, for that matter) resource.
  638.  */
  639. static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
  640. {
  641.         list_del(&cb->ctx_list);
  642.         if (!list_empty(&cb->res_list))
  643.                 list_del(&cb->res_list);
  644.         cb->bi.ctx = NULL;
  645. }
  646.  
  647. /**
  648.  * vmw_context_binding_add: Start tracking a context binding
  649.  *
  650.  * @cbs: Pointer to the context binding state tracker.
  651.  * @bi: Information about the binding to track.
  652.  *
  653.  * Performs basic checks on the binding to make sure arguments are within
  654.  * bounds and then starts tracking the binding in the context binding
  655.  * state structure @cbs.
  656.  */
  657. int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
  658.                             const struct vmw_ctx_bindinfo *bi)
  659. {
  660.         struct vmw_ctx_binding *loc;
  661.  
  662.         switch (bi->bt) {
  663.         case vmw_ctx_binding_rt:
  664.                 if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
  665.                         DRM_ERROR("Illegal render target type %u.\n",
  666.                                   (unsigned) bi->i1.rt_type);
  667.                         return -EINVAL;
  668.                 }
  669.                 loc = &cbs->render_targets[bi->i1.rt_type];
  670.                 break;
  671.         case vmw_ctx_binding_tex:
  672.                 if (unlikely((unsigned)bi->i1.texture_stage >=
  673.                              SVGA3D_NUM_TEXTURE_UNITS)) {
  674.                         DRM_ERROR("Illegal texture/sampler unit %u.\n",
  675.                                   (unsigned) bi->i1.texture_stage);
  676.                         return -EINVAL;
  677.                 }
  678.                 loc = &cbs->texture_units[bi->i1.texture_stage];
  679.                 break;
  680.         case vmw_ctx_binding_shader:
  681.                 if (unlikely((unsigned)bi->i1.shader_type >=
  682.                              SVGA3D_SHADERTYPE_MAX)) {
  683.                         DRM_ERROR("Illegal shader type %u.\n",
  684.                                   (unsigned) bi->i1.shader_type);
  685.                         return -EINVAL;
  686.                 }
  687.                 loc = &cbs->shaders[bi->i1.shader_type];
  688.                 break;
  689.         default:
  690.                 BUG();
  691.         }
  692.  
  693.         if (loc->bi.ctx != NULL)
  694.                 vmw_context_binding_drop(loc);
  695.  
  696.         loc->bi = *bi;
  697.         list_add_tail(&loc->ctx_list, &cbs->list);
  698.         INIT_LIST_HEAD(&loc->res_list);
  699.  
  700.         return 0;
  701. }
  702.  
  703. /**
  704.  * vmw_context_binding_transfer: Transfer a context binding tracking entry.
  705.  *
  706.  * @cbs: Pointer to the persistent context binding state tracker.
  707.  * @bi: Information about the binding to track.
  708.  *
  709.  */
  710. static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
  711.                                          const struct vmw_ctx_bindinfo *bi)
  712. {
  713.         struct vmw_ctx_binding *loc;
  714.  
  715.         switch (bi->bt) {
  716.         case vmw_ctx_binding_rt:
  717.                 loc = &cbs->render_targets[bi->i1.rt_type];
  718.                 break;
  719.         case vmw_ctx_binding_tex:
  720.                 loc = &cbs->texture_units[bi->i1.texture_stage];
  721.                 break;
  722.         case vmw_ctx_binding_shader:
  723.                 loc = &cbs->shaders[bi->i1.shader_type];
  724.                 break;
  725.         default:
  726.                 BUG();
  727.         }
  728.  
  729.         if (loc->bi.ctx != NULL)
  730.                 vmw_context_binding_drop(loc);
  731.  
  732.         loc->bi = *bi;
  733.         list_add_tail(&loc->ctx_list, &cbs->list);
  734.         if (bi->res != NULL)
  735.                 list_add_tail(&loc->res_list, &bi->res->binding_head);
  736.         else
  737.                 INIT_LIST_HEAD(&loc->res_list);
  738. }
  739.  
  740. /**
  741.  * vmw_context_binding_kill - Kill a binding on the device
  742.  * and stop tracking it.
  743.  *
  744.  * @cb: Pointer to binding tracker storage.
  745.  *
  746.  * Emits FIFO commands to scrub a binding represented by @cb.
  747.  * Then stops tracking the binding and re-initializes its storage.
  748.  */
  749. static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
  750. {
  751.         (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi);
  752.         vmw_context_binding_drop(cb);
  753. }
  754.  
  755. /**
  756.  * vmw_context_binding_state_kill - Kill all bindings associated with a
  757.  * struct vmw_ctx_binding state structure, and re-initialize the structure.
  758.  *
  759.  * @cbs: Pointer to the context binding state tracker.
  760.  *
  761.  * Emits commands to scrub all bindings associated with the
  762.  * context binding state tracker. Then re-initializes the whole structure.
  763.  */
  764. static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
  765. {
  766.         struct vmw_ctx_binding *entry, *next;
  767.  
  768.         list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
  769.                 vmw_context_binding_kill(entry);
  770. }
  771.  
  772. /**
  773.  * vmw_context_binding_res_list_kill - Kill all bindings on a
  774.  * resource binding list
  775.  *
  776.  * @head: list head of resource binding list
  777.  *
  778.  * Kills all bindings associated with a specific resource. Typically
  779.  * called before the resource is destroyed.
  780.  */
  781. void vmw_context_binding_res_list_kill(struct list_head *head)
  782. {
  783.         struct vmw_ctx_binding *entry, *next;
  784.  
  785.         list_for_each_entry_safe(entry, next, head, res_list)
  786.                 vmw_context_binding_kill(entry);
  787. }
  788.  
  789. /**
  790.  * vmw_context_binding_state_transfer - Commit staged binding info
  791.  *
  792.  * @ctx: Pointer to context to commit the staged binding info to.
  793.  * @from: Staged binding info built during execbuf.
  794.  *
  795.  * Transfers binding info from a temporary structure to the persistent
  796.  * structure in the context. This can be done once commands
  797.  */
  798. void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
  799.                                         struct vmw_ctx_binding_state *from)
  800. {
  801.         struct vmw_user_context *uctx =
  802.                 container_of(ctx, struct vmw_user_context, res);
  803.         struct vmw_ctx_binding *entry, *next;
  804.  
  805.         list_for_each_entry_safe(entry, next, &from->list, ctx_list)
  806.                 vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
  807. }
  808.