Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | Download | RSS feed

  1. /**************************************************************************
  2.  *
  3.  * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice (including the
  15.  * next paragraph) shall be included in all copies or substantial portions
  16.  * of the Software.
  17.  *
  18.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21.  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22.  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23.  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24.  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25.  *
  26.  **************************************************************************/
  27. /*
  28.  * This file implements the vmwgfx context binding manager,
  29.  * The sole reason for having to use this code is that vmware guest
  30.  * backed contexts can be swapped out to their backing mobs by the device
  31.  * at any time, also swapped in at any time. At swapin time, the device
  32.  * validates the context bindings to make sure they point to valid resources.
  33.  * It's this outside-of-drawcall validation (that can happen at any time),
  34.  * that makes this code necessary.
  35.  *
  36.  * We therefore need to kill any context bindings pointing to a resource
  37.  * when the resource is swapped out. Furthermore, if the vmwgfx driver has
  38.  * swapped out the context we can't swap it in again to kill bindings because
  39.  * of backing mob reservation lockdep violations, so as part of
  40.  * context swapout, also kill all bindings of a context, so that they are
  41.  * already killed if a resource to which a binding points
  42.  * needs to be swapped out.
  43.  *
  44.  * Note that a resource can be pointed to by bindings from multiple contexts,
  45.  * Therefore we can't easily protect this data by a per context mutex
  46.  * (unless we use deadlock-safe WW mutexes). So we use a global binding_mutex
  47.  * to protect all binding manager data.
  48.  *
  49.  * Finally, any association between a context and a global resource
  50.  * (surface, shader or even DX query) is conceptually a context binding that
  51.  * needs to be tracked by this code.
  52.  */
  53.  
  54. #include "vmwgfx_drv.h"
  55. #include "vmwgfx_binding.h"
  56. #include "device_include/svga3d_reg.h"
  57.  
  58. #define VMW_BINDING_RT_BIT     0
  59. #define VMW_BINDING_PS_BIT     1
  60. #define VMW_BINDING_SO_BIT     2
  61. #define VMW_BINDING_VB_BIT     3
  62. #define VMW_BINDING_NUM_BITS   4
  63.  
  64. #define VMW_BINDING_PS_SR_BIT  0
  65.  
  66. /**
  67.  * struct vmw_ctx_binding_state - per context binding state
  68.  *
  69.  * @dev_priv: Pointer to device private structure.
  70.  * @list: linked list of individual active bindings.
  71.  * @render_targets: Render target bindings.
  72.  * @texture_units: Texture units bindings.
  73.  * @ds_view: Depth-stencil view binding.
  74.  * @so_targets: StreamOutput target bindings.
  75.  * @vertex_buffers: Vertex buffer bindings.
  76.  * @index_buffer: Index buffer binding.
  77.  * @per_shader: Per shader-type bindings.
  78.  * @dirty: Bitmap tracking per binding-type changes that have not yet
  79.  * been emitted to the device.
  80.  * @dirty_vb: Bitmap tracking individual vertex buffer binding changes that
  81.  * have not yet been emitted to the device.
  82.  * @bind_cmd_buffer: Scratch space used to construct binding commands.
  83.  * @bind_cmd_count: Number of binding command data entries in @bind_cmd_buffer
  84.  * @bind_first_slot: Used together with @bind_cmd_buffer to indicate the
  85.  * device binding slot of the first command data entry in @bind_cmd_buffer.
  86.  *
  87.  * Note that this structure also provides storage space for the individual
  88.  * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
  89.  * for individual bindings.
  90.  *
  91.  */
  92. struct vmw_ctx_binding_state {
  93.         struct vmw_private *dev_priv;
  94.         struct list_head list;
  95.         struct vmw_ctx_bindinfo_view render_targets[SVGA3D_RT_MAX];
  96.         struct vmw_ctx_bindinfo_tex texture_units[SVGA3D_NUM_TEXTURE_UNITS];
  97.         struct vmw_ctx_bindinfo_view ds_view;
  98.         struct vmw_ctx_bindinfo_so so_targets[SVGA3D_DX_MAX_SOTARGETS];
  99.         struct vmw_ctx_bindinfo_vb vertex_buffers[SVGA3D_DX_MAX_VERTEXBUFFERS];
  100.         struct vmw_ctx_bindinfo_ib index_buffer;
  101.         struct vmw_dx_shader_bindings per_shader[SVGA3D_NUM_SHADERTYPE_DX10];
  102.  
  103.         unsigned long dirty;
  104.         DECLARE_BITMAP(dirty_vb, SVGA3D_DX_MAX_VERTEXBUFFERS);
  105.  
  106.         u32 bind_cmd_buffer[VMW_MAX_VIEW_BINDINGS];
  107.         u32 bind_cmd_count;
  108.         u32 bind_first_slot;
  109. };
  110.  
  111. static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
  112. static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
  113.                                            bool rebind);
  114. static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
  115. static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind);
  116. static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind);
  117. static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind);
  118. static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind);
  119. static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs);
  120. static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi,
  121.                                        bool rebind);
  122. static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind);
  123. static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind);
  124. static void vmw_binding_build_asserts(void) __attribute__ ((unused));
  125.  
  126. typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
  127.  
  128. /**
  129.  * struct vmw_binding_info - Per binding type information for the binding
  130.  * manager
  131.  *
  132.  * @size: The size of the struct binding derived from a struct vmw_ctx_bindinfo.
  133.  * @offsets: array[shader_slot] of offsets to the array[slot]
  134.  * of struct bindings for the binding type.
  135.  * @scrub_func: Pointer to the scrub function for this binding type.
  136.  *
  137.  * Holds static information to help optimize the binding manager and avoid
  138.  * an excessive amount of switch statements.
  139.  */
  140. struct vmw_binding_info {
  141.         size_t size;
  142.         const size_t *offsets;
  143.         vmw_scrub_func scrub_func;
  144. };
  145.  
  146. /*
  147.  * A number of static variables that help determine the scrub func and the
  148.  * location of the struct vmw_ctx_bindinfo slots for each binding type.
  149.  */
  150. static const size_t vmw_binding_shader_offsets[] = {
  151.         offsetof(struct vmw_ctx_binding_state, per_shader[0].shader),
  152.         offsetof(struct vmw_ctx_binding_state, per_shader[1].shader),
  153.         offsetof(struct vmw_ctx_binding_state, per_shader[2].shader),
  154. };
  155. static const size_t vmw_binding_rt_offsets[] = {
  156.         offsetof(struct vmw_ctx_binding_state, render_targets),
  157. };
  158. static const size_t vmw_binding_tex_offsets[] = {
  159.         offsetof(struct vmw_ctx_binding_state, texture_units),
  160. };
  161. static const size_t vmw_binding_cb_offsets[] = {
  162.         offsetof(struct vmw_ctx_binding_state, per_shader[0].const_buffers),
  163.         offsetof(struct vmw_ctx_binding_state, per_shader[1].const_buffers),
  164.         offsetof(struct vmw_ctx_binding_state, per_shader[2].const_buffers),
  165. };
  166. static const size_t vmw_binding_dx_ds_offsets[] = {
  167.         offsetof(struct vmw_ctx_binding_state, ds_view),
  168. };
  169. static const size_t vmw_binding_sr_offsets[] = {
  170.         offsetof(struct vmw_ctx_binding_state, per_shader[0].shader_res),
  171.         offsetof(struct vmw_ctx_binding_state, per_shader[1].shader_res),
  172.         offsetof(struct vmw_ctx_binding_state, per_shader[2].shader_res),
  173. };
  174. static const size_t vmw_binding_so_offsets[] = {
  175.         offsetof(struct vmw_ctx_binding_state, so_targets),
  176. };
  177. static const size_t vmw_binding_vb_offsets[] = {
  178.         offsetof(struct vmw_ctx_binding_state, vertex_buffers),
  179. };
  180. static const size_t vmw_binding_ib_offsets[] = {
  181.         offsetof(struct vmw_ctx_binding_state, index_buffer),
  182. };
  183.  
  184. static const struct vmw_binding_info vmw_binding_infos[] = {
  185.         [vmw_ctx_binding_shader] = {
  186.                 .size = sizeof(struct vmw_ctx_bindinfo_shader),
  187.                 .offsets = vmw_binding_shader_offsets,
  188.                 .scrub_func = vmw_binding_scrub_shader},
  189.         [vmw_ctx_binding_rt] = {
  190.                 .size = sizeof(struct vmw_ctx_bindinfo_view),
  191.                 .offsets = vmw_binding_rt_offsets,
  192.                 .scrub_func = vmw_binding_scrub_render_target},
  193.         [vmw_ctx_binding_tex] = {
  194.                 .size = sizeof(struct vmw_ctx_bindinfo_tex),
  195.                 .offsets = vmw_binding_tex_offsets,
  196.                 .scrub_func = vmw_binding_scrub_texture},
  197.         [vmw_ctx_binding_cb] = {
  198.                 .size = sizeof(struct vmw_ctx_bindinfo_cb),
  199.                 .offsets = vmw_binding_cb_offsets,
  200.                 .scrub_func = vmw_binding_scrub_cb},
  201.         [vmw_ctx_binding_dx_shader] = {
  202.                 .size = sizeof(struct vmw_ctx_bindinfo_shader),
  203.                 .offsets = vmw_binding_shader_offsets,
  204.                 .scrub_func = vmw_binding_scrub_dx_shader},
  205.         [vmw_ctx_binding_dx_rt] = {
  206.                 .size = sizeof(struct vmw_ctx_bindinfo_view),
  207.                 .offsets = vmw_binding_rt_offsets,
  208.                 .scrub_func = vmw_binding_scrub_dx_rt},
  209.         [vmw_ctx_binding_sr] = {
  210.                 .size = sizeof(struct vmw_ctx_bindinfo_view),
  211.                 .offsets = vmw_binding_sr_offsets,
  212.                 .scrub_func = vmw_binding_scrub_sr},
  213.         [vmw_ctx_binding_ds] = {
  214.                 .size = sizeof(struct vmw_ctx_bindinfo_view),
  215.                 .offsets = vmw_binding_dx_ds_offsets,
  216.                 .scrub_func = vmw_binding_scrub_dx_rt},
  217.         [vmw_ctx_binding_so] = {
  218.                 .size = sizeof(struct vmw_ctx_bindinfo_so),
  219.                 .offsets = vmw_binding_so_offsets,
  220.                 .scrub_func = vmw_binding_scrub_so},
  221.         [vmw_ctx_binding_vb] = {
  222.                 .size = sizeof(struct vmw_ctx_bindinfo_vb),
  223.                 .offsets = vmw_binding_vb_offsets,
  224.                 .scrub_func = vmw_binding_scrub_vb},
  225.         [vmw_ctx_binding_ib] = {
  226.                 .size = sizeof(struct vmw_ctx_bindinfo_ib),
  227.                 .offsets = vmw_binding_ib_offsets,
  228.                 .scrub_func = vmw_binding_scrub_ib},
  229. };
  230.  
  231. /**
  232.  * vmw_cbs_context - Return a pointer to the context resource of a
  233.  * context binding state tracker.
  234.  *
  235.  * @cbs: The context binding state tracker.
  236.  *
  237.  * Provided there are any active bindings, this function will return an
  238.  * unreferenced pointer to the context resource that owns the context
  239.  * binding state tracker. If there are no active bindings, this function
  240.  * will return NULL. Note that the caller must somehow ensure that a reference
  241.  * is held on the context resource prior to calling this function.
  242.  */
  243. static const struct vmw_resource *
  244. vmw_cbs_context(const struct vmw_ctx_binding_state *cbs)
  245. {
  246.         if (list_empty(&cbs->list))
  247.                 return NULL;
  248.  
  249.         return list_first_entry(&cbs->list, struct vmw_ctx_bindinfo,
  250.                                 ctx_list)->ctx;
  251. }
  252.  
  253. /**
  254.  * vmw_binding_loc - determine the struct vmw_ctx_bindinfo slot location.
  255.  *
  256.  * @cbs: Pointer to a struct vmw_ctx_binding state which holds the slot.
  257.  * @bt: The binding type.
  258.  * @shader_slot: The shader slot of the binding. If none, then set to 0.
  259.  * @slot: The slot of the binding.
  260.  */
  261. static struct vmw_ctx_bindinfo *
  262. vmw_binding_loc(struct vmw_ctx_binding_state *cbs,
  263.                 enum vmw_ctx_binding_type bt, u32 shader_slot, u32 slot)
  264. {
  265.         const struct vmw_binding_info *b = &vmw_binding_infos[bt];
  266.         size_t offset = b->offsets[shader_slot] + b->size*slot;
  267.  
  268.         return (struct vmw_ctx_bindinfo *)((u8 *) cbs + offset);
  269. }
  270.  
  271. /**
  272.  * vmw_binding_drop: Stop tracking a context binding
  273.  *
  274.  * @bi: Pointer to binding tracker storage.
  275.  *
  276.  * Stops tracking a context binding, and re-initializes its storage.
  277.  * Typically used when the context binding is replaced with a binding to
  278.  * another (or the same, for that matter) resource.
  279.  */
  280. static void vmw_binding_drop(struct vmw_ctx_bindinfo *bi)
  281. {
  282.         list_del(&bi->ctx_list);
  283.         if (!list_empty(&bi->res_list))
  284.                 list_del(&bi->res_list);
  285.         bi->ctx = NULL;
  286. }
  287.  
  288. /**
  289.  * vmw_binding_add: Start tracking a context binding
  290.  *
  291.  * @cbs: Pointer to the context binding state tracker.
  292.  * @bi: Information about the binding to track.
  293.  *
  294.  * Starts tracking the binding in the context binding
  295.  * state structure @cbs.
  296.  */
  297. void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
  298.                     const struct vmw_ctx_bindinfo *bi,
  299.                     u32 shader_slot, u32 slot)
  300. {
  301.         struct vmw_ctx_bindinfo *loc =
  302.                 vmw_binding_loc(cbs, bi->bt, shader_slot, slot);
  303.         const struct vmw_binding_info *b = &vmw_binding_infos[bi->bt];
  304.  
  305.         if (loc->ctx != NULL)
  306.                 vmw_binding_drop(loc);
  307.  
  308.         memcpy(loc, bi, b->size);
  309.         loc->scrubbed = false;
  310.         list_add(&loc->ctx_list, &cbs->list);
  311.         INIT_LIST_HEAD(&loc->res_list);
  312. }
  313.  
  314. /**
  315.  * vmw_binding_transfer: Transfer a context binding tracking entry.
  316.  *
  317.  * @cbs: Pointer to the persistent context binding state tracker.
  318.  * @bi: Information about the binding to track.
  319.  *
  320.  */
  321. static void vmw_binding_transfer(struct vmw_ctx_binding_state *cbs,
  322.                                  const struct vmw_ctx_binding_state *from,
  323.                                  const struct vmw_ctx_bindinfo *bi)
  324. {
  325.         size_t offset = (unsigned long)bi - (unsigned long)from;
  326.         struct vmw_ctx_bindinfo *loc = (struct vmw_ctx_bindinfo *)
  327.                 ((unsigned long) cbs + offset);
  328.  
  329.         if (loc->ctx != NULL) {
  330.                 WARN_ON(bi->scrubbed);
  331.  
  332.                 vmw_binding_drop(loc);
  333.         }
  334.  
  335.         if (bi->res != NULL) {
  336.                 memcpy(loc, bi, vmw_binding_infos[bi->bt].size);
  337.                 list_add_tail(&loc->ctx_list, &cbs->list);
  338.                 list_add_tail(&loc->res_list, &loc->res->binding_head);
  339.         }
  340. }
  341.  
  342. /**
  343.  * vmw_binding_state_kill - Kill all bindings associated with a
  344.  * struct vmw_ctx_binding state structure, and re-initialize the structure.
  345.  *
  346.  * @cbs: Pointer to the context binding state tracker.
  347.  *
  348.  * Emits commands to scrub all bindings associated with the
  349.  * context binding state tracker. Then re-initializes the whole structure.
  350.  */
  351. void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs)
  352. {
  353.         struct vmw_ctx_bindinfo *entry, *next;
  354.  
  355.         vmw_binding_state_scrub(cbs);
  356.         list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
  357.                 vmw_binding_drop(entry);
  358. }
  359.  
  360. /**
  361.  * vmw_binding_state_scrub - Scrub all bindings associated with a
  362.  * struct vmw_ctx_binding state structure.
  363.  *
  364.  * @cbs: Pointer to the context binding state tracker.
  365.  *
  366.  * Emits commands to scrub all bindings associated with the
  367.  * context binding state tracker.
  368.  */
  369. void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
  370. {
  371.         struct vmw_ctx_bindinfo *entry;
  372.  
  373.         list_for_each_entry(entry, &cbs->list, ctx_list) {
  374.                 if (!entry->scrubbed) {
  375.                         (void) vmw_binding_infos[entry->bt].scrub_func
  376.                                 (entry, false);
  377.                         entry->scrubbed = true;
  378.                 }
  379.         }
  380.  
  381.         (void) vmw_binding_emit_dirty(cbs);
  382. }
  383.  
  384. /**
  385.  * vmw_binding_res_list_kill - Kill all bindings on a
  386.  * resource binding list
  387.  *
  388.  * @head: list head of resource binding list
  389.  *
  390.  * Kills all bindings associated with a specific resource. Typically
  391.  * called before the resource is destroyed.
  392.  */
  393. void vmw_binding_res_list_kill(struct list_head *head)
  394. {
  395.         struct vmw_ctx_bindinfo *entry, *next;
  396.  
  397.         vmw_binding_res_list_scrub(head);
  398.         list_for_each_entry_safe(entry, next, head, res_list)
  399.                 vmw_binding_drop(entry);
  400. }
  401.  
  402. /**
  403.  * vmw_binding_res_list_scrub - Scrub all bindings on a
  404.  * resource binding list
  405.  *
  406.  * @head: list head of resource binding list
  407.  *
  408.  * Scrub all bindings associated with a specific resource. Typically
  409.  * called before the resource is evicted.
  410.  */
  411. void vmw_binding_res_list_scrub(struct list_head *head)
  412. {
  413.         struct vmw_ctx_bindinfo *entry;
  414.  
  415.         list_for_each_entry(entry, head, res_list) {
  416.                 if (!entry->scrubbed) {
  417.                         (void) vmw_binding_infos[entry->bt].scrub_func
  418.                                 (entry, false);
  419.                         entry->scrubbed = true;
  420.                 }
  421.         }
  422.  
  423.         list_for_each_entry(entry, head, res_list) {
  424.                 struct vmw_ctx_binding_state *cbs =
  425.                         vmw_context_binding_state(entry->ctx);
  426.  
  427.                 (void) vmw_binding_emit_dirty(cbs);
  428.         }
  429. }
  430.  
  431.  
  432. /**
  433.  * vmw_binding_state_commit - Commit staged binding info
  434.  *
  435.  * @ctx: Pointer to context to commit the staged binding info to.
  436.  * @from: Staged binding info built during execbuf.
  437.  * @scrubbed: Transfer only scrubbed bindings.
  438.  *
  439.  * Transfers binding info from a temporary structure
  440.  * (typically used by execbuf) to the persistent
  441.  * structure in the context. This can be done once commands have been
  442.  * submitted to hardware
  443.  */
  444. void vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
  445.                               struct vmw_ctx_binding_state *from)
  446. {
  447.         struct vmw_ctx_bindinfo *entry, *next;
  448.  
  449.         list_for_each_entry_safe(entry, next, &from->list, ctx_list) {
  450.                 vmw_binding_transfer(to, from, entry);
  451.                 vmw_binding_drop(entry);
  452.         }
  453. }
  454.  
  455. /**
  456.  * vmw_binding_rebind_all - Rebind all scrubbed bindings of a context
  457.  *
  458.  * @ctx: The context resource
  459.  *
  460.  * Walks through the context binding list and rebinds all scrubbed
  461.  * resources.
  462.  */
  463. int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs)
  464. {
  465.         struct vmw_ctx_bindinfo *entry;
  466.         int ret;
  467.  
  468.         list_for_each_entry(entry, &cbs->list, ctx_list) {
  469.                 if (likely(!entry->scrubbed))
  470.                         continue;
  471.  
  472.                 if ((entry->res == NULL || entry->res->id ==
  473.                             SVGA3D_INVALID_ID))
  474.                         continue;
  475.  
  476.                 ret = vmw_binding_infos[entry->bt].scrub_func(entry, true);
  477.                 if (unlikely(ret != 0))
  478.                         return ret;
  479.  
  480.                 entry->scrubbed = false;
  481.         }
  482.  
  483.         return vmw_binding_emit_dirty(cbs);
  484. }
  485.  
  486. /**
  487.  * vmw_binding_scrub_shader - scrub a shader binding from a context.
  488.  *
  489.  * @bi: single binding information.
  490.  * @rebind: Whether to issue a bind instead of scrub command.
  491.  */
  492. static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
  493. {
  494.         struct vmw_ctx_bindinfo_shader *binding =
  495.                 container_of(bi, typeof(*binding), bi);
  496.         struct vmw_private *dev_priv = bi->ctx->dev_priv;
  497.         struct {
  498.                 SVGA3dCmdHeader header;
  499.                 SVGA3dCmdSetShader body;
  500.         } *cmd;
  501.  
  502.         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  503.         if (unlikely(cmd == NULL)) {
  504.                 DRM_ERROR("Failed reserving FIFO space for shader "
  505.                           "unbinding.\n");
  506.                 return -ENOMEM;
  507.         }
  508.  
  509.         cmd->header.id = SVGA_3D_CMD_SET_SHADER;
  510.         cmd->header.size = sizeof(cmd->body);
  511.         cmd->body.cid = bi->ctx->id;
  512.         cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
  513.         cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
  514.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  515.  
  516.         return 0;
  517. }
  518.  
  519. /**
  520.  * vmw_binding_scrub_render_target - scrub a render target binding
  521.  * from a context.
  522.  *
  523.  * @bi: single binding information.
  524.  * @rebind: Whether to issue a bind instead of scrub command.
  525.  */
  526. static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
  527.                                            bool rebind)
  528. {
  529.         struct vmw_ctx_bindinfo_view *binding =
  530.                 container_of(bi, typeof(*binding), bi);
  531.         struct vmw_private *dev_priv = bi->ctx->dev_priv;
  532.         struct {
  533.                 SVGA3dCmdHeader header;
  534.                 SVGA3dCmdSetRenderTarget body;
  535.         } *cmd;
  536.  
  537.         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  538.         if (unlikely(cmd == NULL)) {
  539.                 DRM_ERROR("Failed reserving FIFO space for render target "
  540.                           "unbinding.\n");
  541.                 return -ENOMEM;
  542.         }
  543.  
  544.         cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
  545.         cmd->header.size = sizeof(cmd->body);
  546.         cmd->body.cid = bi->ctx->id;
  547.         cmd->body.type = binding->slot;
  548.         cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
  549.         cmd->body.target.face = 0;
  550.         cmd->body.target.mipmap = 0;
  551.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  552.  
  553.         return 0;
  554. }
  555.  
  556. /**
  557.  * vmw_binding_scrub_texture - scrub a texture binding from a context.
  558.  *
  559.  * @bi: single binding information.
  560.  * @rebind: Whether to issue a bind instead of scrub command.
  561.  *
  562.  * TODO: Possibly complement this function with a function that takes
  563.  * a list of texture bindings and combines them to a single command.
  564.  */
  565. static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi,
  566.                                      bool rebind)
  567. {
  568.         struct vmw_ctx_bindinfo_tex *binding =
  569.                 container_of(bi, typeof(*binding), bi);
  570.         struct vmw_private *dev_priv = bi->ctx->dev_priv;
  571.         struct {
  572.                 SVGA3dCmdHeader header;
  573.                 struct {
  574.                         SVGA3dCmdSetTextureState c;
  575.                         SVGA3dTextureState s1;
  576.                 } body;
  577.         } *cmd;
  578.  
  579.         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  580.         if (unlikely(cmd == NULL)) {
  581.                 DRM_ERROR("Failed reserving FIFO space for texture "
  582.                           "unbinding.\n");
  583.                 return -ENOMEM;
  584.         }
  585.  
  586.         cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
  587.         cmd->header.size = sizeof(cmd->body);
  588.         cmd->body.c.cid = bi->ctx->id;
  589.         cmd->body.s1.stage = binding->texture_stage;
  590.         cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
  591.         cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
  592.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  593.  
  594.         return 0;
  595. }
  596.  
  597. /**
  598.  * vmw_binding_scrub_dx_shader - scrub a dx shader binding from a context.
  599.  *
  600.  * @bi: single binding information.
  601.  * @rebind: Whether to issue a bind instead of scrub command.
  602.  */
  603. static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
  604. {
  605.         struct vmw_ctx_bindinfo_shader *binding =
  606.                 container_of(bi, typeof(*binding), bi);
  607.         struct vmw_private *dev_priv = bi->ctx->dev_priv;
  608.         struct {
  609.                 SVGA3dCmdHeader header;
  610.                 SVGA3dCmdDXSetShader body;
  611.         } *cmd;
  612.  
  613.         cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
  614.         if (unlikely(cmd == NULL)) {
  615.                 DRM_ERROR("Failed reserving FIFO space for DX shader "
  616.                           "unbinding.\n");
  617.                 return -ENOMEM;
  618.         }
  619.         cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER;
  620.         cmd->header.size = sizeof(cmd->body);
  621.         cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
  622.         cmd->body.shaderId = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
  623.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  624.  
  625.         return 0;
  626. }
  627.  
  628. /**
  629.  * vmw_binding_scrub_cb - scrub a constant buffer binding from a context.
  630.  *
  631.  * @bi: single binding information.
  632.  * @rebind: Whether to issue a bind instead of scrub command.
  633.  */
  634. static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
  635. {
  636.         struct vmw_ctx_bindinfo_cb *binding =
  637.                 container_of(bi, typeof(*binding), bi);
  638.         struct vmw_private *dev_priv = bi->ctx->dev_priv;
  639.         struct {
  640.                 SVGA3dCmdHeader header;
  641.                 SVGA3dCmdDXSetSingleConstantBuffer body;
  642.         } *cmd;
  643.  
  644.         cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
  645.         if (unlikely(cmd == NULL)) {
  646.                 DRM_ERROR("Failed reserving FIFO space for DX shader "
  647.                           "unbinding.\n");
  648.                 return -ENOMEM;
  649.         }
  650.  
  651.         cmd->header.id = SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER;
  652.         cmd->header.size = sizeof(cmd->body);
  653.         cmd->body.slot = binding->slot;
  654.         cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
  655.         if (rebind) {
  656.                 cmd->body.offsetInBytes = binding->offset;
  657.                 cmd->body.sizeInBytes = binding->size;
  658.                 cmd->body.sid = bi->res->id;
  659.         } else {
  660.                 cmd->body.offsetInBytes = 0;
  661.                 cmd->body.sizeInBytes = 0;
  662.                 cmd->body.sid = SVGA3D_INVALID_ID;
  663.         }
  664.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  665.  
  666.         return 0;
  667. }
  668.  
  669. /**
  670.  * vmw_collect_view_ids - Build view id data for a view binding command
  671.  * without checking which bindings actually need to be emitted
  672.  *
  673.  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  674.  * @bi: Pointer to where the binding info array is stored in @cbs
  675.  * @max_num: Maximum number of entries in the @bi array.
  676.  *
  677.  * Scans the @bi array for bindings and builds a buffer of view id data.
  678.  * Stops at the first non-existing binding in the @bi array.
  679.  * On output, @cbs->bind_cmd_count contains the number of bindings to be
  680.  * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
  681.  * contains the command data.
  682.  */
  683. static void vmw_collect_view_ids(struct vmw_ctx_binding_state *cbs,
  684.                                  const struct vmw_ctx_bindinfo *bi,
  685.                                  u32 max_num)
  686. {
  687.         const struct vmw_ctx_bindinfo_view *biv =
  688.                 container_of(bi, struct vmw_ctx_bindinfo_view, bi);
  689.         unsigned long i;
  690.  
  691.         cbs->bind_cmd_count = 0;
  692.         cbs->bind_first_slot = 0;
  693.  
  694.         for (i = 0; i < max_num; ++i, ++biv) {
  695.                 if (!biv->bi.ctx)
  696.                         break;
  697.  
  698.                 cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
  699.                         ((biv->bi.scrubbed) ?
  700.                          SVGA3D_INVALID_ID : biv->bi.res->id);
  701.         }
  702. }
  703.  
  704. /**
  705.  * vmw_collect_dirty_view_ids - Build view id data for a view binding command
  706.  *
  707.  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  708.  * @bi: Pointer to where the binding info array is stored in @cbs
  709.  * @dirty: Bitmap indicating which bindings need to be emitted.
  710.  * @max_num: Maximum number of entries in the @bi array.
  711.  *
  712.  * Scans the @bi array for bindings that need to be emitted and
  713.  * builds a buffer of view id data.
  714.  * On output, @cbs->bind_cmd_count contains the number of bindings to be
  715.  * emitted, @cbs->bind_first_slot indicates the index of the first emitted
  716.  * binding, and @cbs->bind_cmd_buffer contains the command data.
  717.  */
  718. static void vmw_collect_dirty_view_ids(struct vmw_ctx_binding_state *cbs,
  719.                                        const struct vmw_ctx_bindinfo *bi,
  720.                                        unsigned long *dirty,
  721.                                        u32 max_num)
  722. {
  723.         const struct vmw_ctx_bindinfo_view *biv =
  724.                 container_of(bi, struct vmw_ctx_bindinfo_view, bi);
  725.         unsigned long i, next_bit;
  726.  
  727.         cbs->bind_cmd_count = 0;
  728.         i = find_first_bit(dirty, max_num);
  729.         next_bit = i;
  730.         cbs->bind_first_slot = i;
  731.  
  732.         biv += i;
  733.         for (; i < max_num; ++i, ++biv) {
  734.                 cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
  735.                         ((!biv->bi.ctx || biv->bi.scrubbed) ?
  736.                          SVGA3D_INVALID_ID : biv->bi.res->id);
  737.  
  738.                 if (next_bit == i) {
  739.                         next_bit = find_next_bit(dirty, max_num, i + 1);
  740.                         if (next_bit >= max_num)
  741.                                 break;
  742.                 }
  743.         }
  744. }
  745.  
  746. /**
  747.  * vmw_binding_emit_set_sr - Issue delayed DX shader resource binding commands
  748.  *
  749.  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  750.  */
  751. static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
  752.                            int shader_slot)
  753. {
  754.         const struct vmw_ctx_bindinfo *loc =
  755.                 &cbs->per_shader[shader_slot].shader_res[0].bi;
  756.         struct {
  757.                 SVGA3dCmdHeader header;
  758.                 SVGA3dCmdDXSetShaderResources body;
  759.         } *cmd;
  760.         size_t cmd_size, view_id_size;
  761.         const struct vmw_resource *ctx = vmw_cbs_context(cbs);
  762.  
  763.         vmw_collect_dirty_view_ids(cbs, loc,
  764.                                    cbs->per_shader[shader_slot].dirty_sr,
  765.                                    SVGA3D_DX_MAX_SRVIEWS);
  766.         if (cbs->bind_cmd_count == 0)
  767.                 return 0;
  768.  
  769.         view_id_size = cbs->bind_cmd_count*sizeof(uint32);
  770.         cmd_size = sizeof(*cmd) + view_id_size;
  771.         cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
  772.         if (unlikely(cmd == NULL)) {
  773.                 DRM_ERROR("Failed reserving FIFO space for DX shader"
  774.                           " resource binding.\n");
  775.                 return -ENOMEM;
  776.         }
  777.  
  778.         cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER_RESOURCES;
  779.         cmd->header.size = sizeof(cmd->body) + view_id_size;
  780.         cmd->body.type = shader_slot + SVGA3D_SHADERTYPE_MIN;
  781.         cmd->body.startView = cbs->bind_first_slot;
  782.  
  783.         memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
  784.  
  785.         vmw_fifo_commit(ctx->dev_priv, cmd_size);
  786.         bitmap_clear(cbs->per_shader[shader_slot].dirty_sr,
  787.                      cbs->bind_first_slot, cbs->bind_cmd_count);
  788.  
  789.         return 0;
  790. }
  791.  
  792. /**
  793.  * vmw_binding_emit_set_rt - Issue delayed DX rendertarget binding commands
  794.  *
  795.  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  796.  */
  797. static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
  798. {
  799.         const struct vmw_ctx_bindinfo *loc = &cbs->render_targets[0].bi;
  800.         struct {
  801.                 SVGA3dCmdHeader header;
  802.                 SVGA3dCmdDXSetRenderTargets body;
  803.         } *cmd;
  804.         size_t cmd_size, view_id_size;
  805.         const struct vmw_resource *ctx = vmw_cbs_context(cbs);
  806.  
  807.         vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS);
  808.         view_id_size = cbs->bind_cmd_count*sizeof(uint32);
  809.         cmd_size = sizeof(*cmd) + view_id_size;
  810.         cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
  811.         if (unlikely(cmd == NULL)) {
  812.                 DRM_ERROR("Failed reserving FIFO space for DX render-target"
  813.                           " binding.\n");
  814.                 return -ENOMEM;
  815.         }
  816.  
  817.         cmd->header.id = SVGA_3D_CMD_DX_SET_RENDERTARGETS;
  818.         cmd->header.size = sizeof(cmd->body) + view_id_size;
  819.  
  820.         if (cbs->ds_view.bi.ctx && !cbs->ds_view.bi.scrubbed)
  821.                 cmd->body.depthStencilViewId = cbs->ds_view.bi.res->id;
  822.         else
  823.                 cmd->body.depthStencilViewId = SVGA3D_INVALID_ID;
  824.  
  825.         memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
  826.  
  827.         vmw_fifo_commit(ctx->dev_priv, cmd_size);
  828.  
  829.         return 0;
  830.  
  831. }
  832.  
  833. /**
  834.  * vmw_collect_so_targets - Build SVGA3dSoTarget data for a binding command
  835.  * without checking which bindings actually need to be emitted
  836.  *
  837.  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  838.  * @bi: Pointer to where the binding info array is stored in @cbs
  839.  * @max_num: Maximum number of entries in the @bi array.
  840.  *
  841.  * Scans the @bi array for bindings and builds a buffer of SVGA3dSoTarget data.
  842.  * Stops at the first non-existing binding in the @bi array.
  843.  * On output, @cbs->bind_cmd_count contains the number of bindings to be
  844.  * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
  845.  * contains the command data.
  846.  */
  847. static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs,
  848.                                    const struct vmw_ctx_bindinfo *bi,
  849.                                    u32 max_num)
  850. {
  851.         const struct vmw_ctx_bindinfo_so *biso =
  852.                 container_of(bi, struct vmw_ctx_bindinfo_so, bi);
  853.         unsigned long i;
  854.         SVGA3dSoTarget *so_buffer = (SVGA3dSoTarget *) cbs->bind_cmd_buffer;
  855.  
  856.         cbs->bind_cmd_count = 0;
  857.         cbs->bind_first_slot = 0;
  858.  
  859.         for (i = 0; i < max_num; ++i, ++biso, ++so_buffer,
  860.                     ++cbs->bind_cmd_count) {
  861.                 if (!biso->bi.ctx)
  862.                         break;
  863.  
  864.                 if (!biso->bi.scrubbed) {
  865.                         so_buffer->sid = biso->bi.res->id;
  866.                         so_buffer->offset = biso->offset;
  867.                         so_buffer->sizeInBytes = biso->size;
  868.                 } else {
  869.                         so_buffer->sid = SVGA3D_INVALID_ID;
  870.                         so_buffer->offset = 0;
  871.                         so_buffer->sizeInBytes = 0;
  872.                 }
  873.         }
  874. }
  875.  
  876. /**
  877.  * vmw_binding_emit_set_so - Issue delayed streamout binding commands
  878.  *
  879.  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  880.  */
  881. static int vmw_emit_set_so(struct vmw_ctx_binding_state *cbs)
  882. {
  883.         const struct vmw_ctx_bindinfo *loc = &cbs->so_targets[0].bi;
  884.         struct {
  885.                 SVGA3dCmdHeader header;
  886.                 SVGA3dCmdDXSetSOTargets body;
  887.         } *cmd;
  888.         size_t cmd_size, so_target_size;
  889.         const struct vmw_resource *ctx = vmw_cbs_context(cbs);
  890.  
  891.         vmw_collect_so_targets(cbs, loc, SVGA3D_DX_MAX_SOTARGETS);
  892.         if (cbs->bind_cmd_count == 0)
  893.                 return 0;
  894.  
  895.         so_target_size = cbs->bind_cmd_count*sizeof(SVGA3dSoTarget);
  896.         cmd_size = sizeof(*cmd) + so_target_size;
  897.         cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
  898.         if (unlikely(cmd == NULL)) {
  899.                 DRM_ERROR("Failed reserving FIFO space for DX SO target"
  900.                           " binding.\n");
  901.                 return -ENOMEM;
  902.         }
  903.  
  904.         cmd->header.id = SVGA_3D_CMD_DX_SET_SOTARGETS;
  905.         cmd->header.size = sizeof(cmd->body) + so_target_size;
  906.         memcpy(&cmd[1], cbs->bind_cmd_buffer, so_target_size);
  907.  
  908.         vmw_fifo_commit(ctx->dev_priv, cmd_size);
  909.  
  910.         return 0;
  911.  
  912. }
  913.  
  914. /**
  915.  * vmw_binding_emit_dirty_ps - Issue delayed per shader binding commands
  916.  *
  917.  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  918.  *
  919.  */
  920. static int vmw_binding_emit_dirty_ps(struct vmw_ctx_binding_state *cbs)
  921. {
  922.         struct vmw_dx_shader_bindings *sb = &cbs->per_shader[0];
  923.         u32 i;
  924.         int ret;
  925.  
  926.         for (i = 0; i < SVGA3D_NUM_SHADERTYPE_DX10; ++i, ++sb) {
  927.                 if (!test_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty))
  928.                         continue;
  929.  
  930.                 ret = vmw_emit_set_sr(cbs, i);
  931.                 if (ret)
  932.                         break;
  933.  
  934.                 __clear_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty);
  935.         }
  936.  
  937.         return 0;
  938. }
  939.  
  940. /**
  941.  * vmw_collect_dirty_vbs - Build SVGA3dVertexBuffer data for a
  942.  * SVGA3dCmdDXSetVertexBuffers command
  943.  *
  944.  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  945.  * @bi: Pointer to where the binding info array is stored in @cbs
  946.  * @dirty: Bitmap indicating which bindings need to be emitted.
  947.  * @max_num: Maximum number of entries in the @bi array.
  948.  *
  949.  * Scans the @bi array for bindings that need to be emitted and
  950.  * builds a buffer of SVGA3dVertexBuffer data.
  951.  * On output, @cbs->bind_cmd_count contains the number of bindings to be
  952.  * emitted, @cbs->bind_first_slot indicates the index of the first emitted
  953.  * binding, and @cbs->bind_cmd_buffer contains the command data.
  954.  */
  955. static void vmw_collect_dirty_vbs(struct vmw_ctx_binding_state *cbs,
  956.                                   const struct vmw_ctx_bindinfo *bi,
  957.                                   unsigned long *dirty,
  958.                                   u32 max_num)
  959. {
  960.         const struct vmw_ctx_bindinfo_vb *biv =
  961.                 container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
  962.         unsigned long i, next_bit;
  963.         SVGA3dVertexBuffer *vbs = (SVGA3dVertexBuffer *) &cbs->bind_cmd_buffer;
  964.  
  965.         cbs->bind_cmd_count = 0;
  966.         i = find_first_bit(dirty, max_num);
  967.         next_bit = i;
  968.         cbs->bind_first_slot = i;
  969.  
  970.         biv += i;
  971.         for (; i < max_num; ++i, ++biv, ++vbs) {
  972.                 if (!biv->bi.ctx || biv->bi.scrubbed) {
  973.                         vbs->sid = SVGA3D_INVALID_ID;
  974.                         vbs->stride = 0;
  975.                         vbs->offset = 0;
  976.                 } else {
  977.                         vbs->sid = biv->bi.res->id;
  978.                         vbs->stride = biv->stride;
  979.                         vbs->offset = biv->offset;
  980.                 }
  981.                 cbs->bind_cmd_count++;
  982.                 if (next_bit == i) {
  983.                         next_bit = find_next_bit(dirty, max_num, i + 1);
  984.                         if (next_bit >= max_num)
  985.                                 break;
  986.                 }
  987.         }
  988. }
  989.  
  990. /**
  991.  * vmw_binding_emit_set_vb - Issue delayed vertex buffer binding commands
  992.  *
  993.  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  994.  *
  995.  */
  996. static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
  997. {
  998.         const struct vmw_ctx_bindinfo *loc =
  999.                 &cbs->vertex_buffers[0].bi;
  1000.         struct {
  1001.                 SVGA3dCmdHeader header;
  1002.                 SVGA3dCmdDXSetVertexBuffers body;
  1003.         } *cmd;
  1004.         size_t cmd_size, set_vb_size;
  1005.         const struct vmw_resource *ctx = vmw_cbs_context(cbs);
  1006.  
  1007.         vmw_collect_dirty_vbs(cbs, loc, cbs->dirty_vb,
  1008.                              SVGA3D_DX_MAX_VERTEXBUFFERS);
  1009.         if (cbs->bind_cmd_count == 0)
  1010.                 return 0;
  1011.  
  1012.         set_vb_size = cbs->bind_cmd_count*sizeof(SVGA3dVertexBuffer);
  1013.         cmd_size = sizeof(*cmd) + set_vb_size;
  1014.         cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
  1015.         if (unlikely(cmd == NULL)) {
  1016.                 DRM_ERROR("Failed reserving FIFO space for DX vertex buffer"
  1017.                           " binding.\n");
  1018.                 return -ENOMEM;
  1019.         }
  1020.  
  1021.         cmd->header.id = SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS;
  1022.         cmd->header.size = sizeof(cmd->body) + set_vb_size;
  1023.         cmd->body.startBuffer = cbs->bind_first_slot;
  1024.  
  1025.         memcpy(&cmd[1], cbs->bind_cmd_buffer, set_vb_size);
  1026.  
  1027.         vmw_fifo_commit(ctx->dev_priv, cmd_size);
  1028.         bitmap_clear(cbs->dirty_vb,
  1029.                      cbs->bind_first_slot, cbs->bind_cmd_count);
  1030.  
  1031.         return 0;
  1032. }
  1033.  
  1034. /**
  1035.  * vmw_binding_emit_dirty - Issue delayed binding commands
  1036.  *
  1037.  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  1038.  *
  1039.  * This function issues the delayed binding commands that arise from
  1040.  * previous scrub / unscrub calls. These binding commands are typically
  1041.  * commands that batch a number of bindings and therefore it makes sense
  1042.  * to delay them.
  1043.  */
  1044. static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs)
  1045. {
  1046.         int ret = 0;
  1047.         unsigned long hit = 0;
  1048.  
  1049.         while ((hit = find_next_bit(&cbs->dirty, VMW_BINDING_NUM_BITS, hit))
  1050.               < VMW_BINDING_NUM_BITS) {
  1051.  
  1052.                 switch (hit) {
  1053.                 case VMW_BINDING_RT_BIT:
  1054.                         ret = vmw_emit_set_rt(cbs);
  1055.                         break;
  1056.                 case VMW_BINDING_PS_BIT:
  1057.                         ret = vmw_binding_emit_dirty_ps(cbs);
  1058.                         break;
  1059.                 case VMW_BINDING_SO_BIT:
  1060.                         ret = vmw_emit_set_so(cbs);
  1061.                         break;
  1062.                 case VMW_BINDING_VB_BIT:
  1063.                         ret = vmw_emit_set_vb(cbs);
  1064.                         break;
  1065.                 default:
  1066.                         BUG();
  1067.                 }
  1068.                 if (ret)
  1069.                         return ret;
  1070.  
  1071.                 __clear_bit(hit, &cbs->dirty);
  1072.                 hit++;
  1073.         }
  1074.  
  1075.         return 0;
  1076. }
  1077.  
  1078. /**
  1079.  * vmw_binding_scrub_sr - Schedule a dx shaderresource binding
  1080.  * scrub from a context
  1081.  *
  1082.  * @bi: single binding information.
  1083.  * @rebind: Whether to issue a bind instead of scrub command.
  1084.  */
  1085. static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind)
  1086. {
  1087.         struct vmw_ctx_bindinfo_view *biv =
  1088.                 container_of(bi, struct vmw_ctx_bindinfo_view, bi);
  1089.         struct vmw_ctx_binding_state *cbs =
  1090.                 vmw_context_binding_state(bi->ctx);
  1091.  
  1092.         __set_bit(biv->slot, cbs->per_shader[biv->shader_slot].dirty_sr);
  1093.         __set_bit(VMW_BINDING_PS_SR_BIT,
  1094.                   &cbs->per_shader[biv->shader_slot].dirty);
  1095.         __set_bit(VMW_BINDING_PS_BIT, &cbs->dirty);
  1096.  
  1097.         return 0;
  1098. }
  1099.  
  1100. /**
  1101.  * vmw_binding_scrub_dx_rt - Schedule a dx rendertarget binding
  1102.  * scrub from a context
  1103.  *
  1104.  * @bi: single binding information.
  1105.  * @rebind: Whether to issue a bind instead of scrub command.
  1106.  */
  1107. static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind)
  1108. {
  1109.         struct vmw_ctx_binding_state *cbs =
  1110.                 vmw_context_binding_state(bi->ctx);
  1111.  
  1112.         __set_bit(VMW_BINDING_RT_BIT, &cbs->dirty);
  1113.  
  1114.         return 0;
  1115. }
  1116.  
  1117. /**
  1118.  * vmw_binding_scrub_so - Schedule a dx streamoutput buffer binding
  1119.  * scrub from a context
  1120.  *
  1121.  * @bi: single binding information.
  1122.  * @rebind: Whether to issue a bind instead of scrub command.
  1123.  */
  1124. static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
  1125. {
  1126.         struct vmw_ctx_binding_state *cbs =
  1127.                 vmw_context_binding_state(bi->ctx);
  1128.  
  1129.         __set_bit(VMW_BINDING_SO_BIT, &cbs->dirty);
  1130.  
  1131.         return 0;
  1132. }
  1133.  
  1134. /**
  1135.  * vmw_binding_scrub_vb - Schedule a dx vertex buffer binding
  1136.  * scrub from a context
  1137.  *
  1138.  * @bi: single binding information.
  1139.  * @rebind: Whether to issue a bind instead of scrub command.
  1140.  */
  1141. static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind)
  1142. {
  1143.         struct vmw_ctx_bindinfo_vb *bivb =
  1144.                 container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
  1145.         struct vmw_ctx_binding_state *cbs =
  1146.                 vmw_context_binding_state(bi->ctx);
  1147.  
  1148.         __set_bit(bivb->slot, cbs->dirty_vb);
  1149.         __set_bit(VMW_BINDING_VB_BIT, &cbs->dirty);
  1150.  
  1151.         return 0;
  1152. }
  1153.  
  1154. /**
  1155.  * vmw_binding_scrub_ib - scrub a dx index buffer binding from a context
  1156.  *
  1157.  * @bi: single binding information.
  1158.  * @rebind: Whether to issue a bind instead of scrub command.
  1159.  */
  1160. static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
  1161. {
  1162.         struct vmw_ctx_bindinfo_ib *binding =
  1163.                 container_of(bi, typeof(*binding), bi);
  1164.         struct vmw_private *dev_priv = bi->ctx->dev_priv;
  1165.         struct {
  1166.                 SVGA3dCmdHeader header;
  1167.                 SVGA3dCmdDXSetIndexBuffer body;
  1168.         } *cmd;
  1169.  
  1170.         cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
  1171.         if (unlikely(cmd == NULL)) {
  1172.                 DRM_ERROR("Failed reserving FIFO space for DX index buffer "
  1173.                           "binding.\n");
  1174.                 return -ENOMEM;
  1175.         }
  1176.         cmd->header.id = SVGA_3D_CMD_DX_SET_INDEX_BUFFER;
  1177.         cmd->header.size = sizeof(cmd->body);
  1178.         if (rebind) {
  1179.                 cmd->body.sid = bi->res->id;
  1180.                 cmd->body.format = binding->format;
  1181.                 cmd->body.offset = binding->offset;
  1182.         } else {
  1183.                 cmd->body.sid = SVGA3D_INVALID_ID;
  1184.                 cmd->body.format = 0;
  1185.                 cmd->body.offset = 0;
  1186.         }
  1187.  
  1188.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  1189.  
  1190.         return 0;
  1191. }
  1192.  
  1193. /**
  1194.  * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state with
  1195.  * memory accounting.
  1196.  *
  1197.  * @dev_priv: Pointer to a device private structure.
  1198.  *
  1199.  * Returns a pointer to a newly allocated struct or an error pointer on error.
  1200.  */
  1201. struct vmw_ctx_binding_state *
  1202. vmw_binding_state_alloc(struct vmw_private *dev_priv)
  1203. {
  1204.         struct vmw_ctx_binding_state *cbs;
  1205.         int ret;
  1206.  
  1207.         ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), sizeof(*cbs),
  1208.                                    false, false);
  1209.         if (ret)
  1210.                 return ERR_PTR(ret);
  1211.  
  1212.         cbs = vzalloc(sizeof(*cbs));
  1213.         if (!cbs) {
  1214.                 ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
  1215.                 return ERR_PTR(-ENOMEM);
  1216.         }
  1217.  
  1218.         cbs->dev_priv = dev_priv;
  1219.         INIT_LIST_HEAD(&cbs->list);
  1220.  
  1221.         return cbs;
  1222. }
  1223.  
  1224. /**
  1225.  * vmw_binding_state_free - Free a struct vmw_ctx_binding_state and its
  1226.  * memory accounting info.
  1227.  *
  1228.  * @cbs: Pointer to the struct vmw_ctx_binding_state to be freed.
  1229.  */
  1230. void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs)
  1231. {
  1232.         struct vmw_private *dev_priv = cbs->dev_priv;
  1233.  
  1234.         vfree(cbs);
  1235.         ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
  1236. }
  1237.  
  1238. /**
  1239.  * vmw_binding_state_list - Get the binding list of a
  1240.  * struct vmw_ctx_binding_state
  1241.  *
  1242.  * @cbs: Pointer to the struct vmw_ctx_binding_state
  1243.  *
  1244.  * Returns the binding list which can be used to traverse through the bindings
  1245.  * and access the resource information of all bindings.
  1246.  */
  1247. struct list_head *vmw_binding_state_list(struct vmw_ctx_binding_state *cbs)
  1248. {
  1249.         return &cbs->list;
  1250. }
  1251.  
  1252. /**
  1253.  * vmwgfx_binding_state_reset - clear a struct vmw_ctx_binding_state
  1254.  *
  1255.  * @cbs: Pointer to the struct vmw_ctx_binding_state to be cleared
  1256.  *
  1257.  * Drops all bindings registered in @cbs. No device binding actions are
  1258.  * performed.
  1259.  */
  1260. void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs)
  1261. {
  1262.         struct vmw_ctx_bindinfo *entry, *next;
  1263.  
  1264.         list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
  1265.                 vmw_binding_drop(entry);
  1266. }
  1267.  
  1268. /*
  1269.  * This function is unused at run-time, and only used to hold various build
  1270.  * asserts important for code optimization assumptions.
  1271.  */
  1272. static void vmw_binding_build_asserts(void)
  1273. {
  1274.         BUILD_BUG_ON(SVGA3D_NUM_SHADERTYPE_DX10 != 3);
  1275.         BUILD_BUG_ON(SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS > SVGA3D_RT_MAX);
  1276.         BUILD_BUG_ON(sizeof(uint32) != sizeof(u32));
  1277.  
  1278.         /*
  1279.          * struct vmw_ctx_binding_state::bind_cmd_buffer is used for various
  1280.          * view id arrays.
  1281.          */
  1282.         BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_RT_MAX);
  1283.         BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_SRVIEWS);
  1284.         BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_CONSTBUFFERS);
  1285.  
  1286.         /*
  1287.          * struct vmw_ctx_binding_state::bind_cmd_buffer is used for
  1288.          * u32 view ids, SVGA3dSoTargets and SVGA3dVertexBuffers
  1289.          */
  1290.         BUILD_BUG_ON(SVGA3D_DX_MAX_SOTARGETS*sizeof(SVGA3dSoTarget) >
  1291.                      VMW_MAX_VIEW_BINDINGS*sizeof(u32));
  1292.         BUILD_BUG_ON(SVGA3D_DX_MAX_VERTEXBUFFERS*sizeof(SVGA3dVertexBuffer) >
  1293.                      VMW_MAX_VIEW_BINDINGS*sizeof(u32));
  1294. }
  1295.