Subversion Repositories Kolibri OS

Rev

Rev 4075 | Rev 4569 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /**************************************************************************
  2.  *
  3.  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice (including the
  15.  * next paragraph) shall be included in all copies or substantial portions
  16.  * of the Software.
  17.  *
  18.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21.  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22.  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23.  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24.  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25.  *
  26.  **************************************************************************/
  27.  
  28. #include "vmwgfx_drv.h"
  29. #include "vmwgfx_reg.h"
  30. #include <drm/ttm/ttm_bo_api.h>
  31. #include <drm/ttm/ttm_placement.h>
  32.  
  33. #define VMW_RES_HT_ORDER 12
  34.  
  35. /**
  36.  * struct vmw_resource_relocation - Relocation info for resources
  37.  *
  38.  * @head: List head for the software context's relocation list.
  39.  * @res: Non-ref-counted pointer to the resource.
  40.  * @offset: Offset of 4 byte entries into the command buffer where the
  41.  * id that needs fixup is located.
  42.  */
  43. struct vmw_resource_relocation {
  44.         struct list_head head;
  45.         const struct vmw_resource *res;
  46.         unsigned long offset;
  47. };
  48.  
  49. /**
  50.  * struct vmw_resource_val_node - Validation info for resources
  51.  *
  52.  * @head: List head for the software context's resource list.
  53.  * @hash: Hash entry for quick resouce to val_node lookup.
  54.  * @res: Ref-counted pointer to the resource.
  55.  * @switch_backup: Boolean whether to switch backup buffer on unreserve.
  56.  * @new_backup: Refcounted pointer to the new backup buffer.
  57.  * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
  58.  * @first_usage: Set to true the first time the resource is referenced in
  59.  * the command stream.
  60.  * @no_buffer_needed: Resources do not need to allocate buffer backup on
  61.  * reservation. The command stream will provide one.
  62.  */
  63. struct vmw_resource_val_node {
  64.         struct list_head head;
  65.         struct drm_hash_item hash;
  66.         struct vmw_resource *res;
  67.         struct vmw_dma_buffer *new_backup;
  68.         unsigned long new_backup_offset;
  69.         bool first_usage;
  70.         bool no_buffer_needed;
  71. };
  72.  
  73. /**
  74.  * vmw_resource_unreserve - unreserve resources previously reserved for
  75.  * command submission.
  76.  *
  77.  * @list_head: list of resources to unreserve.
  78.  * @backoff: Whether command submission failed.
  79.  */
  80. static void vmw_resource_list_unreserve(struct list_head *list,
  81.                                         bool backoff)
  82. {
  83.         struct vmw_resource_val_node *val;
  84.  
  85.         list_for_each_entry(val, list, head) {
  86.                 struct vmw_resource *res = val->res;
  87.                 struct vmw_dma_buffer *new_backup =
  88.                         backoff ? NULL : val->new_backup;
  89.  
  90.                 vmw_resource_unreserve(res, new_backup,
  91.                         val->new_backup_offset);
  92.                 vmw_dmabuf_unreference(&val->new_backup);
  93.         }
  94. }
  95.  
  96.  
  97. /**
  98.  * vmw_resource_val_add - Add a resource to the software context's
  99.  * resource list if it's not already on it.
  100.  *
  101.  * @sw_context: Pointer to the software context.
  102.  * @res: Pointer to the resource.
  103.  * @p_node On successful return points to a valid pointer to a
  104.  * struct vmw_resource_val_node, if non-NULL on entry.
  105.  */
  106. static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
  107.                                 struct vmw_resource *res,
  108.                                 struct vmw_resource_val_node **p_node)
  109. {
  110.         struct vmw_resource_val_node *node;
  111.         struct drm_hash_item *hash;
  112.         int ret;
  113.  
  114.         if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
  115.                                     &hash) == 0)) {
  116.                 node = container_of(hash, struct vmw_resource_val_node, hash);
  117.                 node->first_usage = false;
  118.                 if (unlikely(p_node != NULL))
  119.                         *p_node = node;
  120.                 return 0;
  121.         }
  122.  
  123.         node = kzalloc(sizeof(*node), GFP_KERNEL);
  124.         if (unlikely(node == NULL)) {
  125.                 DRM_ERROR("Failed to allocate a resource validation "
  126.                           "entry.\n");
  127.                 return -ENOMEM;
  128.         }
  129.  
  130.         node->hash.key = (unsigned long) res;
  131.         ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
  132.         if (unlikely(ret != 0)) {
  133.                 DRM_ERROR("Failed to initialize a resource validation "
  134.                           "entry.\n");
  135.                 kfree(node);
  136.                 return ret;
  137.         }
  138.         list_add_tail(&node->head, &sw_context->resource_list);
  139.         node->res = vmw_resource_reference(res);
  140.         node->first_usage = true;
  141.  
  142.         if (unlikely(p_node != NULL))
  143.                 *p_node = node;
  144.  
  145.         return 0;
  146. }
  147.  
  148. /**
  149.  * vmw_resource_relocation_add - Add a relocation to the relocation list
  150.  *
  151.  * @list: Pointer to head of relocation list.
  152.  * @res: The resource.
  153.  * @offset: Offset into the command buffer currently being parsed where the
  154.  * id that needs fixup is located. Granularity is 4 bytes.
  155.  */
  156. static int vmw_resource_relocation_add(struct list_head *list,
  157.                                        const struct vmw_resource *res,
  158.                                        unsigned long offset)
  159. {
  160.         struct vmw_resource_relocation *rel;
  161.  
  162.         rel = kmalloc(sizeof(*rel), GFP_KERNEL);
  163.         if (unlikely(rel == NULL)) {
  164.                 DRM_ERROR("Failed to allocate a resource relocation.\n");
  165.                 return -ENOMEM;
  166.         }
  167.  
  168.         rel->res = res;
  169.         rel->offset = offset;
  170.         list_add_tail(&rel->head, list);
  171.  
  172.         return 0;
  173. }
  174.  
  175. /**
  176.  * vmw_resource_relocations_free - Free all relocations on a list
  177.  *
  178.  * @list: Pointer to the head of the relocation list.
  179.  */
  180. static void vmw_resource_relocations_free(struct list_head *list)
  181. {
  182.         struct vmw_resource_relocation *rel, *n;
  183.  
  184.         list_for_each_entry_safe(rel, n, list, head) {
  185.                 list_del(&rel->head);
  186.                 kfree(rel);
  187.         }
  188. }
  189.  
  190. /**
  191.  * vmw_resource_relocations_apply - Apply all relocations on a list
  192.  *
  193.  * @cb: Pointer to the start of the command buffer bein patch. This need
  194.  * not be the same buffer as the one being parsed when the relocation
  195.  * list was built, but the contents must be the same modulo the
  196.  * resource ids.
  197.  * @list: Pointer to the head of the relocation list.
  198.  */
  199. static void vmw_resource_relocations_apply(uint32_t *cb,
  200.                                            struct list_head *list)
  201. {
  202.         struct vmw_resource_relocation *rel;
  203.  
  204.         list_for_each_entry(rel, list, head)
  205.                 cb[rel->offset] = rel->res->id;
  206. }
  207.  
  208. static int vmw_cmd_invalid(struct vmw_private *dev_priv,
  209.                            struct vmw_sw_context *sw_context,
  210.                            SVGA3dCmdHeader *header)
  211. {
  212.     return 0; //capable(CAP_SYS_ADMIN) ? : -EINVAL;
  213. }
  214.  
  215. static int vmw_cmd_ok(struct vmw_private *dev_priv,
  216.                       struct vmw_sw_context *sw_context,
  217.                       SVGA3dCmdHeader *header)
  218. {
  219.         return 0;
  220. }
  221.  
  222. /**
  223.  * vmw_bo_to_validate_list - add a bo to a validate list
  224.  *
  225.  * @sw_context: The software context used for this command submission batch.
  226.  * @bo: The buffer object to add.
  227.  * @p_val_node: If non-NULL Will be updated with the validate node number
  228.  * on return.
  229.  *
  230.  * Returns -EINVAL if the limit of number of buffer objects per command
  231.  * submission is reached.
  232.  */
  233. static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
  234.                                    struct ttm_buffer_object *bo,
  235.                                    uint32_t *p_val_node)
  236. {
  237.         uint32_t val_node;
  238.         struct vmw_validate_buffer *vval_buf;
  239.         struct ttm_validate_buffer *val_buf;
  240.         struct drm_hash_item *hash;
  241.         int ret;
  242.  
  243.         if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
  244.                                     &hash) == 0)) {
  245.                 vval_buf = container_of(hash, struct vmw_validate_buffer,
  246.                                         hash);
  247.                 val_buf = &vval_buf->base;
  248.                 val_node = vval_buf - sw_context->val_bufs;
  249.         } else {
  250.                 val_node = sw_context->cur_val_buf;
  251.                 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
  252.                         DRM_ERROR("Max number of DMA buffers per submission "
  253.                                   "exceeded.\n");
  254.                         return -EINVAL;
  255.                 }
  256.                 vval_buf = &sw_context->val_bufs[val_node];
  257.                 vval_buf->hash.key = (unsigned long) bo;
  258.                 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
  259.                 if (unlikely(ret != 0)) {
  260.                         DRM_ERROR("Failed to initialize a buffer validation "
  261.                                   "entry.\n");
  262.                         return ret;
  263.                 }
  264.                 ++sw_context->cur_val_buf;
  265.                 val_buf = &vval_buf->base;
  266.                 val_buf->bo = ttm_bo_reference(bo);
  267.                 val_buf->reserved = false;
  268.                 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
  269.         }
  270.  
  271.         sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
  272.  
  273.         if (p_val_node)
  274.                 *p_val_node = val_node;
  275.  
  276.         return 0;
  277. }
  278.  
  279. /**
  280.  * vmw_resources_reserve - Reserve all resources on the sw_context's
  281.  * resource list.
  282.  *
  283.  * @sw_context: Pointer to the software context.
  284.  *
  285.  * Note that since vmware's command submission currently is protected by
  286.  * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
  287.  * since only a single thread at once will attempt this.
  288.  */
  289. static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
  290. {
  291.         struct vmw_resource_val_node *val;
  292.         int ret;
  293.  
  294.         list_for_each_entry(val, &sw_context->resource_list, head) {
  295.                 struct vmw_resource *res = val->res;
  296.  
  297.                 ret = vmw_resource_reserve(res, val->no_buffer_needed);
  298.                 if (unlikely(ret != 0))
  299.                         return ret;
  300.  
  301.                 if (res->backup) {
  302.                         struct ttm_buffer_object *bo = &res->backup->base;
  303.  
  304.                         ret = vmw_bo_to_validate_list
  305.                                 (sw_context, bo, NULL);
  306.  
  307.                         if (unlikely(ret != 0))
  308.                                 return ret;
  309.                 }
  310.         }
  311.         return 0;
  312. }
  313.  
  314. /**
  315.  * vmw_resources_validate - Validate all resources on the sw_context's
  316.  * resource list.
  317.  *
  318.  * @sw_context: Pointer to the software context.
  319.  *
  320.  * Before this function is called, all resource backup buffers must have
  321.  * been validated.
  322.  */
  323. static int vmw_resources_validate(struct vmw_sw_context *sw_context)
  324. {
  325.         struct vmw_resource_val_node *val;
  326.         int ret;
  327.  
  328.         list_for_each_entry(val, &sw_context->resource_list, head) {
  329.                 struct vmw_resource *res = val->res;
  330.  
  331.                 ret = vmw_resource_validate(res);
  332.                 if (unlikely(ret != 0)) {
  333.                         if (ret != -ERESTARTSYS)
  334.                                 DRM_ERROR("Failed to validate resource.\n");
  335.                         return ret;
  336.                 }
  337.         }
  338.         return 0;
  339. }
  340.  
  341. /**
  342.  * vmw_cmd_res_check - Check that a resource is present and if so, put it
  343.  * on the resource validate list unless it's already there.
  344.  *
  345.  * @dev_priv: Pointer to a device private structure.
  346.  * @sw_context: Pointer to the software context.
  347.  * @res_type: Resource type.
  348.  * @converter: User-space visisble type specific information.
  349.  * @id: Pointer to the location in the command buffer currently being
  350.  * parsed from where the user-space resource id handle is located.
  351.  */
  352. static int vmw_cmd_res_check(struct vmw_private *dev_priv,
  353.                              struct vmw_sw_context *sw_context,
  354.                              enum vmw_res_type res_type,
  355.                              const struct vmw_user_resource_conv *converter,
  356.                              uint32_t *id,
  357.                              struct vmw_resource_val_node **p_val)
  358. {
  359.         struct vmw_res_cache_entry *rcache =
  360.                 &sw_context->res_cache[res_type];
  361.         struct vmw_resource *res;
  362.         struct vmw_resource_val_node *node;
  363.         int ret;
  364.  
  365.         if (*id == SVGA3D_INVALID_ID)
  366.                 return 0;
  367.  
  368.         /*
  369.          * Fastpath in case of repeated commands referencing the same
  370.          * resource
  371.          */
  372.  
  373.         if (likely(rcache->valid && *id == rcache->handle)) {
  374.                 const struct vmw_resource *res = rcache->res;
  375.  
  376.                 rcache->node->first_usage = false;
  377.                 if (p_val)
  378.                         *p_val = rcache->node;
  379.  
  380.                 return vmw_resource_relocation_add
  381.                         (&sw_context->res_relocations, res,
  382.                          id - sw_context->buf_start);
  383.         }
  384.  
  385.         ret = vmw_user_resource_lookup_handle(dev_priv,
  386.                                               sw_context->tfile,
  387.                                               *id,
  388.                                               converter,
  389.                                               &res);
  390.         if (unlikely(ret != 0)) {
  391.                 DRM_ERROR("Could not find or use resource 0x%08x.\n",
  392.                           (unsigned) *id);
  393. //       dump_stack();
  394.                 return ret;
  395.         }
  396.  
  397.         rcache->valid = true;
  398.         rcache->res = res;
  399.         rcache->handle = *id;
  400.  
  401.         ret = vmw_resource_relocation_add(&sw_context->res_relocations,
  402.                                           res,
  403.                                           id - sw_context->buf_start);
  404.         if (unlikely(ret != 0))
  405.                 goto out_no_reloc;
  406.  
  407.         ret = vmw_resource_val_add(sw_context, res, &node);
  408.         if (unlikely(ret != 0))
  409.                 goto out_no_reloc;
  410.  
  411.         rcache->node = node;
  412.         if (p_val)
  413.                 *p_val = node;
  414.         vmw_resource_unreference(&res);
  415.         return 0;
  416.  
  417. out_no_reloc:
  418.         BUG_ON(sw_context->error_resource != NULL);
  419.         sw_context->error_resource = res;
  420.  
  421.         return ret;
  422. }
  423.  
  424. /**
  425.  * vmw_cmd_cid_check - Check a command header for valid context information.
  426.  *
  427.  * @dev_priv: Pointer to a device private structure.
  428.  * @sw_context: Pointer to the software context.
  429.  * @header: A command header with an embedded user-space context handle.
  430.  *
  431.  * Convenience function: Call vmw_cmd_res_check with the user-space context
  432.  * handle embedded in @header.
  433.  */
  434. static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
  435.                              struct vmw_sw_context *sw_context,
  436.                              SVGA3dCmdHeader *header)
  437. {
  438.         struct vmw_cid_cmd {
  439.                 SVGA3dCmdHeader header;
  440.                 __le32 cid;
  441.         } *cmd;
  442.  
  443.         cmd = container_of(header, struct vmw_cid_cmd, header);
  444.         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
  445.                                  user_context_converter, &cmd->cid, NULL);
  446. }
  447.  
  448. static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
  449.                                            struct vmw_sw_context *sw_context,
  450.                                            SVGA3dCmdHeader *header)
  451. {
  452.         struct vmw_sid_cmd {
  453.                 SVGA3dCmdHeader header;
  454.                 SVGA3dCmdSetRenderTarget body;
  455.         } *cmd;
  456.         int ret;
  457.  
  458.         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
  459.         if (unlikely(ret != 0))
  460.                 return ret;
  461.  
  462.         cmd = container_of(header, struct vmw_sid_cmd, header);
  463.         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  464.                                 user_surface_converter,
  465.                                 &cmd->body.target.sid, NULL);
  466.         return ret;
  467. }
  468.  
  469. static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
  470.                                       struct vmw_sw_context *sw_context,
  471.                                       SVGA3dCmdHeader *header)
  472. {
  473.         struct vmw_sid_cmd {
  474.                 SVGA3dCmdHeader header;
  475.                 SVGA3dCmdSurfaceCopy body;
  476.         } *cmd;
  477.         int ret;
  478.  
  479.         cmd = container_of(header, struct vmw_sid_cmd, header);
  480.         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  481.                                 user_surface_converter,
  482.                                 &cmd->body.src.sid, NULL);
  483.         if (unlikely(ret != 0))
  484.                 return ret;
  485.         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  486.                                  user_surface_converter,
  487.                                  &cmd->body.dest.sid, NULL);
  488. }
  489.  
  490. static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
  491.                                      struct vmw_sw_context *sw_context,
  492.                                      SVGA3dCmdHeader *header)
  493. {
  494.         struct vmw_sid_cmd {
  495.                 SVGA3dCmdHeader header;
  496.                 SVGA3dCmdSurfaceStretchBlt body;
  497.         } *cmd;
  498.         int ret;
  499.  
  500.         cmd = container_of(header, struct vmw_sid_cmd, header);
  501.         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  502.                                 user_surface_converter,
  503.                                 &cmd->body.src.sid, NULL);
  504.         if (unlikely(ret != 0))
  505.                 return ret;
  506.         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  507.                                  user_surface_converter,
  508.                                  &cmd->body.dest.sid, NULL);
  509. }
  510.  
  511. static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
  512.                                          struct vmw_sw_context *sw_context,
  513.                                          SVGA3dCmdHeader *header)
  514. {
  515.         struct vmw_sid_cmd {
  516.                 SVGA3dCmdHeader header;
  517.                 SVGA3dCmdBlitSurfaceToScreen body;
  518.         } *cmd;
  519.  
  520.         cmd = container_of(header, struct vmw_sid_cmd, header);
  521.  
  522.         if (unlikely(!sw_context->kernel)) {
  523.                 DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
  524.                 return -EPERM;
  525.         }
  526.  
  527.         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  528.                                  user_surface_converter,
  529.                                  &cmd->body.srcImage.sid, NULL);
  530. }
  531.  
  532. static int vmw_cmd_present_check(struct vmw_private *dev_priv,
  533.                                  struct vmw_sw_context *sw_context,
  534.                                  SVGA3dCmdHeader *header)
  535. {
  536.         struct vmw_sid_cmd {
  537.                 SVGA3dCmdHeader header;
  538.                 SVGA3dCmdPresent body;
  539.         } *cmd;
  540.  
  541.  
  542.         cmd = container_of(header, struct vmw_sid_cmd, header);
  543.  
  544.         if (unlikely(!sw_context->kernel)) {
  545.                 DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
  546.                 return -EPERM;
  547.         }
  548.  
  549.         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  550.                                  user_surface_converter, &cmd->body.sid,
  551.                                  NULL);
  552. }
  553.  
  554. /**
  555.  * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
  556.  *
  557.  * @dev_priv: The device private structure.
  558.  * @new_query_bo: The new buffer holding query results.
  559.  * @sw_context: The software context used for this command submission.
  560.  *
  561.  * This function checks whether @new_query_bo is suitable for holding
  562.  * query results, and if another buffer currently is pinned for query
  563.  * results. If so, the function prepares the state of @sw_context for
  564.  * switching pinned buffers after successful submission of the current
  565.  * command batch.
  566.  */
  567. static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
  568.                                        struct ttm_buffer_object *new_query_bo,
  569.                                        struct vmw_sw_context *sw_context)
  570. {
  571.         struct vmw_res_cache_entry *ctx_entry =
  572.                 &sw_context->res_cache[vmw_res_context];
  573.         int ret;
  574.  
  575.         BUG_ON(!ctx_entry->valid);
  576.         sw_context->last_query_ctx = ctx_entry->res;
  577.  
  578.         if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
  579.  
  580.                 if (unlikely(new_query_bo->num_pages > 4)) {
  581.                         DRM_ERROR("Query buffer too large.\n");
  582.                         return -EINVAL;
  583.                 }
  584.  
  585.                 if (unlikely(sw_context->cur_query_bo != NULL)) {
  586.                         sw_context->needs_post_query_barrier = true;
  587.                         ret = vmw_bo_to_validate_list(sw_context,
  588.                                                       sw_context->cur_query_bo,
  589.                                                       NULL);
  590.                         if (unlikely(ret != 0))
  591.                                 return ret;
  592.                 }
  593.                 sw_context->cur_query_bo = new_query_bo;
  594.  
  595.                 ret = vmw_bo_to_validate_list(sw_context,
  596.                                               dev_priv->dummy_query_bo,
  597.                                               NULL);
  598.                 if (unlikely(ret != 0))
  599.                         return ret;
  600.  
  601.         }
  602.  
  603.         return 0;
  604. }
  605.  
  606.  
  607. /**
  608.  * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
  609.  *
  610.  * @dev_priv: The device private structure.
  611.  * @sw_context: The software context used for this command submission batch.
  612.  *
  613.  * This function will check if we're switching query buffers, and will then,
  614.  * issue a dummy occlusion query wait used as a query barrier. When the fence
  615.  * object following that query wait has signaled, we are sure that all
  616.  * preceding queries have finished, and the old query buffer can be unpinned.
  617.  * However, since both the new query buffer and the old one are fenced with
  618.  * that fence, we can do an asynchronus unpin now, and be sure that the
  619.  * old query buffer won't be moved until the fence has signaled.
  620.  *
  621.  * As mentioned above, both the new - and old query buffers need to be fenced
  622.  * using a sequence emitted *after* calling this function.
  623.  */
  624. static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
  625.                                      struct vmw_sw_context *sw_context)
  626. {
  627.         /*
  628.          * The validate list should still hold references to all
  629.          * contexts here.
  630.          */
  631.  
  632.         if (sw_context->needs_post_query_barrier) {
  633.                 struct vmw_res_cache_entry *ctx_entry =
  634.                         &sw_context->res_cache[vmw_res_context];
  635.                 struct vmw_resource *ctx;
  636.                 int ret;
  637.  
  638.                 BUG_ON(!ctx_entry->valid);
  639.                 ctx = ctx_entry->res;
  640.  
  641.                 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
  642.  
  643.                 if (unlikely(ret != 0))
  644.                         DRM_ERROR("Out of fifo space for dummy query.\n");
  645.         }
  646.  
  647.         if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
  648.                 if (dev_priv->pinned_bo) {
  649.                         vmw_bo_pin(dev_priv->pinned_bo, false);
  650.                         ttm_bo_unref(&dev_priv->pinned_bo);
  651.                 }
  652.  
  653.                 if (!sw_context->needs_post_query_barrier) {
  654.                         vmw_bo_pin(sw_context->cur_query_bo, true);
  655.  
  656.                         /*
  657.                          * We pin also the dummy_query_bo buffer so that we
  658.                          * don't need to validate it when emitting
  659.                          * dummy queries in context destroy paths.
  660.                          */
  661.  
  662.                         vmw_bo_pin(dev_priv->dummy_query_bo, true);
  663.                         dev_priv->dummy_query_bo_pinned = true;
  664.  
  665.                         BUG_ON(sw_context->last_query_ctx == NULL);
  666.                         dev_priv->query_cid = sw_context->last_query_ctx->id;
  667.                         dev_priv->query_cid_valid = true;
  668.                         dev_priv->pinned_bo =
  669.                                 ttm_bo_reference(sw_context->cur_query_bo);
  670.                 }
  671.         }
  672. }
  673.  
  674. /**
  675.  * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
  676.  * handle to a valid SVGAGuestPtr
  677.  *
  678.  * @dev_priv: Pointer to a device private structure.
  679.  * @sw_context: The software context used for this command batch validation.
  680.  * @ptr: Pointer to the user-space handle to be translated.
  681.  * @vmw_bo_p: Points to a location that, on successful return will carry
  682.  * a reference-counted pointer to the DMA buffer identified by the
  683.  * user-space handle in @id.
  684.  *
  685.  * This function saves information needed to translate a user-space buffer
  686.  * handle to a valid SVGAGuestPtr. The translation does not take place
  687.  * immediately, but during a call to vmw_apply_relocations().
  688.  * This function builds a relocation list and a list of buffers to validate.
  689.  * The former needs to be freed using either vmw_apply_relocations() or
  690.  * vmw_free_relocations(). The latter needs to be freed using
  691.  * vmw_clear_validations.
  692.  */
  693. static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
  694.                                    struct vmw_sw_context *sw_context,
  695.                                    SVGAGuestPtr *ptr,
  696.                                    struct vmw_dma_buffer **vmw_bo_p)
  697. {
  698.         struct vmw_dma_buffer *vmw_bo = NULL;
  699.         struct ttm_buffer_object *bo;
  700.         uint32_t handle = ptr->gmrId;
  701.         struct vmw_relocation *reloc;
  702.         int ret;
  703.  
  704.         ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
  705.         if (unlikely(ret != 0)) {
  706.                 DRM_ERROR("Could not find or use GMR region.\n");
  707.                 return -EINVAL;
  708.         }
  709.         bo = &vmw_bo->base;
  710.  
  711.         if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
  712.                 DRM_ERROR("Max number relocations per submission"
  713.                           " exceeded\n");
  714.                 ret = -EINVAL;
  715.                 goto out_no_reloc;
  716.         }
  717.  
  718.         reloc = &sw_context->relocs[sw_context->cur_reloc++];
  719.         reloc->location = ptr;
  720.  
  721.         ret = vmw_bo_to_validate_list(sw_context, bo, &reloc->index);
  722.         if (unlikely(ret != 0))
  723.                 goto out_no_reloc;
  724.  
  725.         *vmw_bo_p = vmw_bo;
  726.         return 0;
  727.  
  728. out_no_reloc:
  729.         vmw_dmabuf_unreference(&vmw_bo);
  730.         vmw_bo_p = NULL;
  731.         return ret;
  732. }
  733.  
  734. /**
  735.  * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
  736.  *
  737.  * @dev_priv: Pointer to a device private struct.
  738.  * @sw_context: The software context used for this command submission.
  739.  * @header: Pointer to the command header in the command stream.
  740.  */
  741. static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
  742.                                struct vmw_sw_context *sw_context,
  743.                                SVGA3dCmdHeader *header)
  744. {
  745.         struct vmw_begin_query_cmd {
  746.                 SVGA3dCmdHeader header;
  747.                 SVGA3dCmdBeginQuery q;
  748.         } *cmd;
  749.  
  750.         cmd = container_of(header, struct vmw_begin_query_cmd,
  751.                            header);
  752.  
  753.         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
  754.                                  user_context_converter, &cmd->q.cid,
  755.                                  NULL);
  756. }
  757.  
  758. /**
  759.  * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
  760.  *
  761.  * @dev_priv: Pointer to a device private struct.
  762.  * @sw_context: The software context used for this command submission.
  763.  * @header: Pointer to the command header in the command stream.
  764.  */
  765. static int vmw_cmd_end_query(struct vmw_private *dev_priv,
  766.                              struct vmw_sw_context *sw_context,
  767.                              SVGA3dCmdHeader *header)
  768. {
  769.         struct vmw_dma_buffer *vmw_bo;
  770.         struct vmw_query_cmd {
  771.                 SVGA3dCmdHeader header;
  772.                 SVGA3dCmdEndQuery q;
  773.         } *cmd;
  774.         int ret;
  775.  
  776.         cmd = container_of(header, struct vmw_query_cmd, header);
  777.         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
  778.         if (unlikely(ret != 0))
  779.                 return ret;
  780.  
  781.         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
  782.                                       &cmd->q.guestResult,
  783.                                       &vmw_bo);
  784.         if (unlikely(ret != 0))
  785.                 return ret;
  786.  
  787.         ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
  788.  
  789.         vmw_dmabuf_unreference(&vmw_bo);
  790.         return ret;
  791. }
  792.  
  793. /*
  794.  * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
  795.  *
  796.  * @dev_priv: Pointer to a device private struct.
  797.  * @sw_context: The software context used for this command submission.
  798.  * @header: Pointer to the command header in the command stream.
  799.  */
  800. static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
  801.                               struct vmw_sw_context *sw_context,
  802.                               SVGA3dCmdHeader *header)
  803. {
  804.         struct vmw_dma_buffer *vmw_bo;
  805.         struct vmw_query_cmd {
  806.                 SVGA3dCmdHeader header;
  807.                 SVGA3dCmdWaitForQuery q;
  808.         } *cmd;
  809.         int ret;
  810.  
  811.         cmd = container_of(header, struct vmw_query_cmd, header);
  812.         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
  813.         if (unlikely(ret != 0))
  814.                 return ret;
  815.  
  816.         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
  817.                                       &cmd->q.guestResult,
  818.                                       &vmw_bo);
  819.         if (unlikely(ret != 0))
  820.                 return ret;
  821.  
  822.         vmw_dmabuf_unreference(&vmw_bo);
  823.         return 0;
  824. }
  825.  
  826. static int vmw_cmd_dma(struct vmw_private *dev_priv,
  827.                        struct vmw_sw_context *sw_context,
  828.                        SVGA3dCmdHeader *header)
  829. {
  830.         struct vmw_dma_buffer *vmw_bo = NULL;
  831.         struct vmw_surface *srf = NULL;
  832.         struct vmw_dma_cmd {
  833.                 SVGA3dCmdHeader header;
  834.                 SVGA3dCmdSurfaceDMA dma;
  835.         } *cmd;
  836.         int ret;
  837.  
  838.         cmd = container_of(header, struct vmw_dma_cmd, header);
  839.         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
  840.                                       &cmd->dma.guest.ptr,
  841.                                       &vmw_bo);
  842.         if (unlikely(ret != 0))
  843.                 return ret;
  844.  
  845.         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  846.                                 user_surface_converter, &cmd->dma.host.sid,
  847.                                 NULL);
  848.         if (unlikely(ret != 0)) {
  849.                 if (unlikely(ret != -ERESTARTSYS))
  850.                         DRM_ERROR("could not find surface for DMA.\n");
  851.                 goto out_no_surface;
  852.         }
  853.  
  854.         srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
  855.  
  856. //   vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header);
  857.  
  858. out_no_surface:
  859.         vmw_dmabuf_unreference(&vmw_bo);
  860.         return ret;
  861. }
  862.  
  863. static int vmw_cmd_draw(struct vmw_private *dev_priv,
  864.                         struct vmw_sw_context *sw_context,
  865.                         SVGA3dCmdHeader *header)
  866. {
  867.         struct vmw_draw_cmd {
  868.                 SVGA3dCmdHeader header;
  869.                 SVGA3dCmdDrawPrimitives body;
  870.         } *cmd;
  871.         SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
  872.                 (unsigned long)header + sizeof(*cmd));
  873.         SVGA3dPrimitiveRange *range;
  874.         uint32_t i;
  875.         uint32_t maxnum;
  876.         int ret;
  877.  
  878.         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
  879.         if (unlikely(ret != 0))
  880.                 return ret;
  881.  
  882.         cmd = container_of(header, struct vmw_draw_cmd, header);
  883.         maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
  884.  
  885.         if (unlikely(cmd->body.numVertexDecls > maxnum)) {
  886.                 DRM_ERROR("Illegal number of vertex declarations.\n");
  887.                 return -EINVAL;
  888.         }
  889.  
  890.         for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
  891.                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  892.                                         user_surface_converter,
  893.                                         &decl->array.surfaceId, NULL);
  894.                 if (unlikely(ret != 0))
  895.                         return ret;
  896.         }
  897.  
  898.         maxnum = (header->size - sizeof(cmd->body) -
  899.                   cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
  900.         if (unlikely(cmd->body.numRanges > maxnum)) {
  901.                 DRM_ERROR("Illegal number of index ranges.\n");
  902.                 return -EINVAL;
  903.         }
  904.  
  905.         range = (SVGA3dPrimitiveRange *) decl;
  906.         for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
  907.                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  908.                                         user_surface_converter,
  909.                                         &range->indexArray.surfaceId, NULL);
  910.                 if (unlikely(ret != 0))
  911.                         return ret;
  912.         }
  913.         return 0;
  914. }
  915.  
  916.  
  917. static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
  918.                              struct vmw_sw_context *sw_context,
  919.                              SVGA3dCmdHeader *header)
  920. {
  921.         struct vmw_tex_state_cmd {
  922.                 SVGA3dCmdHeader header;
  923.                 SVGA3dCmdSetTextureState state;
  924.         };
  925.  
  926.         SVGA3dTextureState *last_state = (SVGA3dTextureState *)
  927.           ((unsigned long) header + header->size + sizeof(header));
  928.         SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
  929.                 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
  930.         int ret;
  931.  
  932.         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
  933.         if (unlikely(ret != 0))
  934.                 return ret;
  935.  
  936.         for (; cur_state < last_state; ++cur_state) {
  937.                 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
  938.                         continue;
  939.  
  940.                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  941.                                         user_surface_converter,
  942.                                         &cur_state->value, NULL);
  943.                 if (unlikely(ret != 0))
  944.                         return ret;
  945.         }
  946.  
  947.         return 0;
  948. }
  949.  
  950. static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
  951.                                       struct vmw_sw_context *sw_context,
  952.                                       void *buf)
  953. {
  954.         struct vmw_dma_buffer *vmw_bo;
  955.         int ret;
  956.  
  957.         struct {
  958.                 uint32_t header;
  959.                 SVGAFifoCmdDefineGMRFB body;
  960.         } *cmd = buf;
  961.  
  962.         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
  963.                                       &cmd->body.ptr,
  964.                                       &vmw_bo);
  965.         if (unlikely(ret != 0))
  966.                 return ret;
  967.  
  968.         vmw_dmabuf_unreference(&vmw_bo);
  969.  
  970.         return ret;
  971. }
  972.  
  973. /**
  974.  * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
  975.  * command
  976.  *
  977.  * @dev_priv: Pointer to a device private struct.
  978.  * @sw_context: The software context being used for this batch.
  979.  * @header: Pointer to the command header in the command stream.
  980.  */
  981. static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
  982.                               struct vmw_sw_context *sw_context,
  983.                               SVGA3dCmdHeader *header)
  984. {
  985.         struct vmw_set_shader_cmd {
  986.                 SVGA3dCmdHeader header;
  987.                 SVGA3dCmdSetShader body;
  988.         } *cmd;
  989.         int ret;
  990.  
  991.         cmd = container_of(header, struct vmw_set_shader_cmd,
  992.                            header);
  993.  
  994.         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
  995.         if (unlikely(ret != 0))
  996.                 return ret;
  997.  
  998.         return 0;
  999. }
  1000.  
  1001. static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
  1002.                                 struct vmw_sw_context *sw_context,
  1003.                                 void *buf, uint32_t *size)
  1004. {
  1005.         uint32_t size_remaining = *size;
  1006.         uint32_t cmd_id;
  1007.  
  1008.         cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
  1009.         switch (cmd_id) {
  1010.         case SVGA_CMD_UPDATE:
  1011.                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
  1012.                 break;
  1013.         case SVGA_CMD_DEFINE_GMRFB:
  1014.                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
  1015.                 break;
  1016.         case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
  1017.                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
  1018.                 break;
  1019.         case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
  1020.                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
  1021.                 break;
  1022.         default:
  1023.                 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
  1024.                 return -EINVAL;
  1025.         }
  1026.  
  1027.         if (*size > size_remaining) {
  1028.                 DRM_ERROR("Invalid SVGA command (size mismatch):"
  1029.                           " %u.\n", cmd_id);
  1030.                 return -EINVAL;
  1031.         }
  1032.  
  1033.         if (unlikely(!sw_context->kernel)) {
  1034.                 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
  1035.                 return -EPERM;
  1036.         }
  1037.  
  1038.         if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
  1039.                 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
  1040.  
  1041.         return 0;
  1042. }
  1043.  
  1044. typedef int (*vmw_cmd_func) (struct vmw_private *,
  1045.                              struct vmw_sw_context *,
  1046.                              SVGA3dCmdHeader *);
  1047.  
  1048. #define VMW_CMD_DEF(cmd, func) \
  1049.         [cmd - SVGA_3D_CMD_BASE] = func
  1050.  
  1051. static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
  1052.         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
  1053.         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
  1054.         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
  1055.         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
  1056.         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
  1057.         VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
  1058.         VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
  1059.         VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
  1060.         VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
  1061.         VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
  1062.         VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
  1063.                     &vmw_cmd_set_render_target_check),
  1064.         VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
  1065.         VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
  1066.         VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
  1067.         VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
  1068.         VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
  1069.         VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
  1070.         VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
  1071.         VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
  1072.         VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
  1073.         VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
  1074.         VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader),
  1075.         VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
  1076.         VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
  1077.         VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
  1078.         VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query),
  1079.         VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
  1080.         VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
  1081.         VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
  1082.         VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
  1083.                     &vmw_cmd_blt_surf_screen_check),
  1084.         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid),
  1085.         VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid),
  1086.         VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid),
  1087.         VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid),
  1088. };
  1089.  
  1090. static int vmw_cmd_check(struct vmw_private *dev_priv,
  1091.                          struct vmw_sw_context *sw_context,
  1092.                          void *buf, uint32_t *size)
  1093. {
  1094.         uint32_t cmd_id;
  1095.         uint32_t size_remaining = *size;
  1096.         SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
  1097.         int ret;
  1098.  
  1099.         cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
  1100.         /* Handle any none 3D commands */
  1101.         if (unlikely(cmd_id < SVGA_CMD_MAX))
  1102.                 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
  1103.  
  1104.  
  1105.         cmd_id = le32_to_cpu(header->id);
  1106.         *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
  1107.  
  1108.         cmd_id -= SVGA_3D_CMD_BASE;
  1109.         if (unlikely(*size > size_remaining))
  1110.                 goto out_err;
  1111.  
  1112.         if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
  1113.                 goto out_err;
  1114.  
  1115.         ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
  1116.         if (unlikely(ret != 0))
  1117.                 goto out_err;
  1118.  
  1119.         return 0;
  1120. out_err:
  1121.         DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
  1122.                   cmd_id + SVGA_3D_CMD_BASE);
  1123.         return -EINVAL;
  1124. }
  1125.  
  1126. static int vmw_cmd_check_all(struct vmw_private *dev_priv,
  1127.                              struct vmw_sw_context *sw_context,
  1128.                              void *buf,
  1129.                              uint32_t size)
  1130. {
  1131.         int32_t cur_size = size;
  1132.         int ret;
  1133.  
  1134.         sw_context->buf_start = buf;
  1135.  
  1136.         while (cur_size > 0) {
  1137.                 size = cur_size;
  1138.                 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
  1139.                 if (unlikely(ret != 0))
  1140.                         return ret;
  1141.                 buf = (void *)((unsigned long) buf + size);
  1142.                 cur_size -= size;
  1143.         }
  1144.  
  1145.         if (unlikely(cur_size != 0)) {
  1146.                 DRM_ERROR("Command verifier out of sync.\n");
  1147.                 return -EINVAL;
  1148.         }
  1149.  
  1150.         return 0;
  1151. }
  1152.  
  1153. static void vmw_free_relocations(struct vmw_sw_context *sw_context)
  1154. {
  1155.         sw_context->cur_reloc = 0;
  1156. }
  1157.  
  1158. static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
  1159. {
  1160.         uint32_t i;
  1161.         struct vmw_relocation *reloc;
  1162.         struct ttm_validate_buffer *validate;
  1163.         struct ttm_buffer_object *bo;
  1164.  
  1165.         for (i = 0; i < sw_context->cur_reloc; ++i) {
  1166.                 reloc = &sw_context->relocs[i];
  1167.                 validate = &sw_context->val_bufs[reloc->index].base;
  1168.                 bo = validate->bo;
  1169.                 switch (bo->mem.mem_type) {
  1170.                 case TTM_PL_VRAM:
  1171.                         reloc->location->offset += bo->offset;
  1172.                         reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
  1173.                         break;
  1174.                 case VMW_PL_GMR:
  1175.                         reloc->location->gmrId = bo->mem.start;
  1176.                         break;
  1177.                 default:
  1178.                         BUG();
  1179.                 }
  1180.         }
  1181.         vmw_free_relocations(sw_context);
  1182. }
  1183.  
  1184. /**
  1185.  * vmw_resource_list_unrefererence - Free up a resource list and unreference
  1186.  * all resources referenced by it.
  1187.  *
  1188.  * @list: The resource list.
  1189.  */
  1190. static void vmw_resource_list_unreference(struct list_head *list)
  1191. {
  1192.         struct vmw_resource_val_node *val, *val_next;
  1193.  
  1194.         /*
  1195.          * Drop references to resources held during command submission.
  1196.          */
  1197.  
  1198.         list_for_each_entry_safe(val, val_next, list, head) {
  1199.                 list_del_init(&val->head);
  1200.                 vmw_resource_unreference(&val->res);
  1201.                 kfree(val);
  1202.         }
  1203. }
  1204.  
  1205. static void vmw_clear_validations(struct vmw_sw_context *sw_context)
  1206. {
  1207.         struct vmw_validate_buffer *entry, *next;
  1208.         struct vmw_resource_val_node *val;
  1209.  
  1210.         /*
  1211.          * Drop references to DMA buffers held during command submission.
  1212.          */
  1213.         list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
  1214.                                  base.head) {
  1215.                 list_del(&entry->base.head);
  1216.                 ttm_bo_unref(&entry->base.bo);
  1217.                 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
  1218.                 sw_context->cur_val_buf--;
  1219.         }
  1220.         BUG_ON(sw_context->cur_val_buf != 0);
  1221.  
  1222.         list_for_each_entry(val, &sw_context->resource_list, head)
  1223.                 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
  1224. }
  1225.  
  1226. static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
  1227.                                       struct ttm_buffer_object *bo)
  1228. {
  1229.         int ret;
  1230.  
  1231.  
  1232.         /*
  1233.          * Don't validate pinned buffers.
  1234.          */
  1235.  
  1236.         if (bo == dev_priv->pinned_bo ||
  1237.             (bo == dev_priv->dummy_query_bo &&
  1238.              dev_priv->dummy_query_bo_pinned))
  1239.                 return 0;
  1240.  
  1241.         /**
  1242.          * Put BO in VRAM if there is space, otherwise as a GMR.
  1243.          * If there is no space in VRAM and GMR ids are all used up,
  1244.          * start evicting GMRs to make room. If the DMA buffer can't be
  1245.          * used as a GMR, this will return -ENOMEM.
  1246.          */
  1247.  
  1248.         ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
  1249.         if (likely(ret == 0 || ret == -ERESTARTSYS))
  1250.                 return ret;
  1251.  
  1252.         /**
  1253.          * If that failed, try VRAM again, this time evicting
  1254.          * previous contents.
  1255.          */
  1256.  
  1257.         DRM_INFO("Falling through to VRAM.\n");
  1258.         ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
  1259.         return ret;
  1260. }
  1261.  
  1262.  
  1263. static int vmw_validate_buffers(struct vmw_private *dev_priv,
  1264.                                 struct vmw_sw_context *sw_context)
  1265. {
  1266.         struct vmw_validate_buffer *entry;
  1267.         int ret;
  1268.  
  1269.         list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
  1270.                 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo);
  1271.                 if (unlikely(ret != 0))
  1272.                         return ret;
  1273.         }
  1274.         return 0;
  1275. }
  1276.  
  1277. static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
  1278.                                  uint32_t size)
  1279. {
  1280.         if (likely(sw_context->cmd_bounce_size >= size))
  1281.                 return 0;
  1282.  
  1283.         if (sw_context->cmd_bounce_size == 0)
  1284.                 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
  1285.  
  1286.         while (sw_context->cmd_bounce_size < size) {
  1287.                 sw_context->cmd_bounce_size =
  1288.                         PAGE_ALIGN(sw_context->cmd_bounce_size +
  1289.                                    (sw_context->cmd_bounce_size >> 1));
  1290.         }
  1291.  
  1292.         if (sw_context->cmd_bounce != NULL)
  1293.                 vfree(sw_context->cmd_bounce);
  1294.  
  1295.     sw_context->cmd_bounce = KernelAlloc(sw_context->cmd_bounce_size);
  1296.  
  1297.         if (sw_context->cmd_bounce == NULL) {
  1298.                 DRM_ERROR("Failed to allocate command bounce buffer.\n");
  1299.                 sw_context->cmd_bounce_size = 0;
  1300.                 return -ENOMEM;
  1301.         }
  1302.  
  1303.         return 0;
  1304. }
  1305.  
  1306. /**
  1307.  * vmw_execbuf_fence_commands - create and submit a command stream fence
  1308.  *
  1309.  * Creates a fence object and submits a command stream marker.
  1310.  * If this fails for some reason, We sync the fifo and return NULL.
  1311.  * It is then safe to fence buffers with a NULL pointer.
  1312.  *
  1313.  * If @p_handle is not NULL @file_priv must also not be NULL. Creates
  1314.  * a userspace handle if @p_handle is not NULL, otherwise not.
  1315.  */
  1316.  
  1317. int vmw_execbuf_fence_commands(struct drm_file *file_priv,
  1318.                                struct vmw_private *dev_priv,
  1319.                                struct vmw_fence_obj **p_fence,
  1320.                                uint32_t *p_handle)
  1321. {
  1322.         uint32_t sequence;
  1323.         int ret;
  1324.         bool synced = false;
  1325.  
  1326.         /* p_handle implies file_priv. */
  1327.         BUG_ON(p_handle != NULL && file_priv == NULL);
  1328.  
  1329.         ret = vmw_fifo_send_fence(dev_priv, &sequence);
  1330.         if (unlikely(ret != 0)) {
  1331.                 DRM_ERROR("Fence submission error. Syncing.\n");
  1332.                 synced = true;
  1333.         }
  1334.  
  1335.         if (p_handle != NULL)
  1336.                 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
  1337.                                             sequence,
  1338.                                             DRM_VMW_FENCE_FLAG_EXEC,
  1339.                                             p_fence, p_handle);
  1340.         else
  1341.                 ret = vmw_fence_create(dev_priv->fman, sequence,
  1342.                                        DRM_VMW_FENCE_FLAG_EXEC,
  1343.                                        p_fence);
  1344.  
  1345.         if (unlikely(ret != 0 && !synced)) {
  1346.                 (void) vmw_fallback_wait(dev_priv, false, false,
  1347.                                          sequence, false,
  1348.                                          VMW_FENCE_WAIT_TIMEOUT);
  1349.                 *p_fence = NULL;
  1350.         }
  1351.  
  1352.         return 0;
  1353. }
  1354.  
  1355. /**
  1356.  * vmw_execbuf_copy_fence_user - copy fence object information to
  1357.  * user-space.
  1358.  *
  1359.  * @dev_priv: Pointer to a vmw_private struct.
  1360.  * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
  1361.  * @ret: Return value from fence object creation.
  1362.  * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
  1363.  * which the information should be copied.
  1364.  * @fence: Pointer to the fenc object.
  1365.  * @fence_handle: User-space fence handle.
  1366.  *
  1367.  * This function copies fence information to user-space. If copying fails,
  1368.  * The user-space struct drm_vmw_fence_rep::error member is hopefully
  1369.  * left untouched, and if it's preloaded with an -EFAULT by user-space,
  1370.  * the error will hopefully be detected.
  1371.  * Also if copying fails, user-space will be unable to signal the fence
  1372.  * object so we wait for it immediately, and then unreference the
  1373.  * user-space reference.
  1374.  */
  1375. void
  1376. vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
  1377.                             struct vmw_fpriv *vmw_fp,
  1378.                             int ret,
  1379.                             struct drm_vmw_fence_rep __user *user_fence_rep,
  1380.                             struct vmw_fence_obj *fence,
  1381.                             uint32_t fence_handle)
  1382. {
  1383.         struct drm_vmw_fence_rep fence_rep;
  1384.  
  1385.         if (user_fence_rep == NULL)
  1386.                 return;
  1387.  
  1388.         memset(&fence_rep, 0, sizeof(fence_rep));
  1389.  
  1390.         fence_rep.error = ret;
  1391.         if (ret == 0) {
  1392.                 BUG_ON(fence == NULL);
  1393.  
  1394.                 fence_rep.handle = fence_handle;
  1395.                 fence_rep.seqno = fence->seqno;
  1396.                 vmw_update_seqno(dev_priv, &dev_priv->fifo);
  1397.                 fence_rep.passed_seqno = dev_priv->last_read_seqno;
  1398.         }
  1399.  
  1400.         /*
  1401.          * copy_to_user errors will be detected by user space not
  1402.          * seeing fence_rep::error filled in. Typically
  1403.          * user-space would have pre-set that member to -EFAULT.
  1404.          */
  1405. //   ret = copy_to_user(user_fence_rep, &fence_rep,
  1406. //              sizeof(fence_rep));
  1407.  
  1408.         /*
  1409.          * User-space lost the fence object. We need to sync
  1410.          * and unreference the handle.
  1411.          */
  1412.         if (unlikely(ret != 0) && (fence_rep.error == 0)) {
  1413.                 ttm_ref_object_base_unref(vmw_fp->tfile,
  1414.                                           fence_handle, TTM_REF_USAGE);
  1415.                 DRM_ERROR("Fence copy error. Syncing.\n");
  1416.                 (void) vmw_fence_obj_wait(fence, fence->signal_mask,
  1417.                                           false, false,
  1418.                                           VMW_FENCE_WAIT_TIMEOUT);
  1419.         }
  1420. }
  1421.  
  1422. int vmw_execbuf_process(struct drm_file *file_priv,
  1423.                         struct vmw_private *dev_priv,
  1424.                         void __user *user_commands,
  1425.                         void *kernel_commands,
  1426.                         uint32_t command_size,
  1427.                         uint64_t throttle_us,
  1428.                         struct drm_vmw_fence_rep __user *user_fence_rep,
  1429.                         struct vmw_fence_obj **out_fence)
  1430. {
  1431.         struct vmw_sw_context *sw_context = &dev_priv->ctx;
  1432.         struct vmw_fence_obj *fence = NULL;
  1433.         struct vmw_resource *error_resource;
  1434.         struct list_head resource_list;
  1435.         struct ww_acquire_ctx ticket;
  1436.         uint32_t handle;
  1437.         void *cmd;
  1438.         int ret;
  1439.  
  1440.         ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
  1441.         if (unlikely(ret != 0))
  1442.                 return -ERESTARTSYS;
  1443.  
  1444. /*
  1445.         if (kernel_commands == NULL) {
  1446.                 sw_context->kernel = false;
  1447.  
  1448.                 ret = vmw_resize_cmd_bounce(sw_context, command_size);
  1449.                 if (unlikely(ret != 0))
  1450.                         goto out_unlock;
  1451.  
  1452.  
  1453.                 ret = copy_from_user(sw_context->cmd_bounce,
  1454.                                      user_commands, command_size);
  1455.  
  1456.                 if (unlikely(ret != 0)) {
  1457.                         ret = -EFAULT;
  1458.                         DRM_ERROR("Failed copying commands.\n");
  1459.                         goto out_unlock;
  1460.                 }
  1461.                 kernel_commands = sw_context->cmd_bounce;
  1462.     } else  */
  1463.                 sw_context->kernel = true;
  1464.  
  1465.         sw_context->tfile = vmw_fpriv(file_priv)->tfile;
  1466.         sw_context->cur_reloc = 0;
  1467.         sw_context->cur_val_buf = 0;
  1468.         sw_context->fence_flags = 0;
  1469.         INIT_LIST_HEAD(&sw_context->resource_list);
  1470.         sw_context->cur_query_bo = dev_priv->pinned_bo;
  1471.         sw_context->last_query_ctx = NULL;
  1472.         sw_context->needs_post_query_barrier = false;
  1473.         memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
  1474.         INIT_LIST_HEAD(&sw_context->validate_nodes);
  1475.         INIT_LIST_HEAD(&sw_context->res_relocations);
  1476.         if (!sw_context->res_ht_initialized) {
  1477.                 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
  1478.                 if (unlikely(ret != 0))
  1479.                         goto out_unlock;
  1480.                 sw_context->res_ht_initialized = true;
  1481.         }
  1482.  
  1483.         INIT_LIST_HEAD(&resource_list);
  1484.         ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
  1485.                                 command_size);
  1486.         if (unlikely(ret != 0))
  1487.                 goto out_err;
  1488.  
  1489.         ret = vmw_resources_reserve(sw_context);
  1490.         if (unlikely(ret != 0))
  1491.                 goto out_err;
  1492.  
  1493.         ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
  1494.         if (unlikely(ret != 0))
  1495.                 goto out_err;
  1496.  
  1497.         ret = vmw_validate_buffers(dev_priv, sw_context);
  1498.         if (unlikely(ret != 0))
  1499.                 goto out_err;
  1500.  
  1501.         ret = vmw_resources_validate(sw_context);
  1502.         if (unlikely(ret != 0))
  1503.                 goto out_err;
  1504.  
  1505.         if (throttle_us) {
  1506.                 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
  1507.                                    throttle_us);
  1508.  
  1509.                 if (unlikely(ret != 0))
  1510.                         goto out_err;
  1511.         }
  1512.  
  1513.         cmd = vmw_fifo_reserve(dev_priv, command_size);
  1514.         if (unlikely(cmd == NULL)) {
  1515.                 DRM_ERROR("Failed reserving fifo space for commands.\n");
  1516.                 ret = -ENOMEM;
  1517.                 goto out_err;
  1518.         }
  1519.  
  1520.         vmw_apply_relocations(sw_context);
  1521.         memcpy(cmd, kernel_commands, command_size);
  1522.  
  1523.         vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
  1524.         vmw_resource_relocations_free(&sw_context->res_relocations);
  1525.  
  1526.         vmw_fifo_commit(dev_priv, command_size);
  1527.  
  1528.         vmw_query_bo_switch_commit(dev_priv, sw_context);
  1529.         ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
  1530.                                          &fence,
  1531.                                          (user_fence_rep) ? &handle : NULL);
  1532.         /*
  1533.          * This error is harmless, because if fence submission fails,
  1534.          * vmw_fifo_send_fence will sync. The error will be propagated to
  1535.          * user-space in @fence_rep
  1536.          */
  1537.  
  1538.         if (ret != 0)
  1539.                 DRM_ERROR("Fence submission error. Syncing.\n");
  1540.  
  1541.         vmw_resource_list_unreserve(&sw_context->resource_list, false);
  1542.         ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
  1543.                                     (void *) fence);
  1544.  
  1545.         if (unlikely(dev_priv->pinned_bo != NULL &&
  1546.                      !dev_priv->query_cid_valid))
  1547.                 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
  1548.  
  1549.         vmw_clear_validations(sw_context);
  1550.         vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
  1551.                                     user_fence_rep, fence, handle);
  1552.  
  1553.         /* Don't unreference when handing fence out */
  1554.         if (unlikely(out_fence != NULL)) {
  1555.                 *out_fence = fence;
  1556.                 fence = NULL;
  1557.         } else if (likely(fence != NULL)) {
  1558.                 vmw_fence_obj_unreference(&fence);
  1559.         }
  1560.  
  1561.         list_splice_init(&sw_context->resource_list, &resource_list);
  1562.         mutex_unlock(&dev_priv->cmdbuf_mutex);
  1563.  
  1564.         /*
  1565.          * Unreference resources outside of the cmdbuf_mutex to
  1566.          * avoid deadlocks in resource destruction paths.
  1567.          */
  1568.         vmw_resource_list_unreference(&resource_list);
  1569.  
  1570.         return 0;
  1571.  
  1572. out_err:
  1573.         vmw_resource_relocations_free(&sw_context->res_relocations);
  1574.         vmw_free_relocations(sw_context);
  1575.         ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
  1576.         vmw_resource_list_unreserve(&sw_context->resource_list, true);
  1577.         vmw_clear_validations(sw_context);
  1578.         if (unlikely(dev_priv->pinned_bo != NULL &&
  1579.                      !dev_priv->query_cid_valid))
  1580.                 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
  1581. out_unlock:
  1582.         list_splice_init(&sw_context->resource_list, &resource_list);
  1583.         error_resource = sw_context->error_resource;
  1584.         sw_context->error_resource = NULL;
  1585.         mutex_unlock(&dev_priv->cmdbuf_mutex);
  1586.  
  1587.         /*
  1588.          * Unreference resources outside of the cmdbuf_mutex to
  1589.          * avoid deadlocks in resource destruction paths.
  1590.          */
  1591.         vmw_resource_list_unreference(&resource_list);
  1592.         if (unlikely(error_resource != NULL))
  1593.                 vmw_resource_unreference(&error_resource);
  1594.  
  1595.         return ret;
  1596. }
  1597.  
  1598. /**
  1599.  * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
  1600.  *
  1601.  * @dev_priv: The device private structure.
  1602.  *
  1603.  * This function is called to idle the fifo and unpin the query buffer
  1604.  * if the normal way to do this hits an error, which should typically be
  1605.  * extremely rare.
  1606.  */
  1607. static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
  1608. {
  1609.         DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
  1610.  
  1611.         (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
  1612.         vmw_bo_pin(dev_priv->pinned_bo, false);
  1613.         vmw_bo_pin(dev_priv->dummy_query_bo, false);
  1614.         dev_priv->dummy_query_bo_pinned = false;
  1615. }
  1616.  
  1617.  
  1618. /**
  1619.  * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
  1620.  * query bo.
  1621.  *
  1622.  * @dev_priv: The device private structure.
  1623.  * @fence: If non-NULL should point to a struct vmw_fence_obj issued
  1624.  * _after_ a query barrier that flushes all queries touching the current
  1625.  * buffer pointed to by @dev_priv->pinned_bo
  1626.  *
  1627.  * This function should be used to unpin the pinned query bo, or
  1628.  * as a query barrier when we need to make sure that all queries have
  1629.  * finished before the next fifo command. (For example on hardware
  1630.  * context destructions where the hardware may otherwise leak unfinished
  1631.  * queries).
  1632.  *
  1633.  * This function does not return any failure codes, but make attempts
  1634.  * to do safe unpinning in case of errors.
  1635.  *
  1636.  * The function will synchronize on the previous query barrier, and will
  1637.  * thus not finish until that barrier has executed.
  1638.  *
  1639.  * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
  1640.  * before calling this function.
  1641.  */
  1642. void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
  1643.                                      struct vmw_fence_obj *fence)
  1644. {
  1645.         int ret = 0;
  1646.         struct list_head validate_list;
  1647.         struct ttm_validate_buffer pinned_val, query_val;
  1648.         struct vmw_fence_obj *lfence = NULL;
  1649.         struct ww_acquire_ctx ticket;
  1650.  
  1651.         if (dev_priv->pinned_bo == NULL)
  1652.                 goto out_unlock;
  1653.  
  1654.         INIT_LIST_HEAD(&validate_list);
  1655.  
  1656.         pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
  1657.         list_add_tail(&pinned_val.head, &validate_list);
  1658.  
  1659.         query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
  1660.         list_add_tail(&query_val.head, &validate_list);
  1661.  
  1662.         do {
  1663.                 ret = ttm_eu_reserve_buffers(&ticket, &validate_list);
  1664.         } while (ret == -ERESTARTSYS);
  1665.  
  1666.         if (unlikely(ret != 0)) {
  1667.                 vmw_execbuf_unpin_panic(dev_priv);
  1668.                 goto out_no_reserve;
  1669.         }
  1670.  
  1671.         if (dev_priv->query_cid_valid) {
  1672.                 BUG_ON(fence != NULL);
  1673.                 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
  1674.                 if (unlikely(ret != 0)) {
  1675.                         vmw_execbuf_unpin_panic(dev_priv);
  1676.                         goto out_no_emit;
  1677.                 }
  1678.                 dev_priv->query_cid_valid = false;
  1679.         }
  1680.  
  1681.         vmw_bo_pin(dev_priv->pinned_bo, false);
  1682.         vmw_bo_pin(dev_priv->dummy_query_bo, false);
  1683.         dev_priv->dummy_query_bo_pinned = false;
  1684.  
  1685.         if (fence == NULL) {
  1686.                 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
  1687.                                                   NULL);
  1688.                 fence = lfence;
  1689.         }
  1690.         ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
  1691.         if (lfence != NULL)
  1692.                 vmw_fence_obj_unreference(&lfence);
  1693.  
  1694.         ttm_bo_unref(&query_val.bo);
  1695.         ttm_bo_unref(&pinned_val.bo);
  1696.         ttm_bo_unref(&dev_priv->pinned_bo);
  1697.  
  1698. out_unlock:
  1699.         return;
  1700.  
  1701. out_no_emit:
  1702.         ttm_eu_backoff_reservation(&ticket, &validate_list);
  1703. out_no_reserve:
  1704.         ttm_bo_unref(&query_val.bo);
  1705.         ttm_bo_unref(&pinned_val.bo);
  1706.         ttm_bo_unref(&dev_priv->pinned_bo);
  1707. }
  1708.  
  1709. /**
  1710.  * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
  1711.  * query bo.
  1712.  *
  1713.  * @dev_priv: The device private structure.
  1714.  *
  1715.  * This function should be used to unpin the pinned query bo, or
  1716.  * as a query barrier when we need to make sure that all queries have
  1717.  * finished before the next fifo command. (For example on hardware
  1718.  * context destructions where the hardware may otherwise leak unfinished
  1719.  * queries).
  1720.  *
  1721.  * This function does not return any failure codes, but make attempts
  1722.  * to do safe unpinning in case of errors.
  1723.  *
  1724.  * The function will synchronize on the previous query barrier, and will
  1725.  * thus not finish until that barrier has executed.
  1726.  */
  1727. void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
  1728. {
  1729.         mutex_lock(&dev_priv->cmdbuf_mutex);
  1730.         if (dev_priv->query_cid_valid)
  1731.                 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
  1732.         mutex_unlock(&dev_priv->cmdbuf_mutex);
  1733. }
  1734.  
  1735.  
  1736. int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
  1737.                       struct drm_file *file_priv)
  1738. {
  1739.         struct vmw_private *dev_priv = vmw_priv(dev);
  1740.         struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
  1741. //   struct vmw_master *vmaster = vmw_master(file_priv->master);
  1742.         int ret;
  1743.  
  1744.         /*
  1745.          * This will allow us to extend the ioctl argument while
  1746.          * maintaining backwards compatibility:
  1747.          * We take different code paths depending on the value of
  1748.          * arg->version.
  1749.          */
  1750.  
  1751.         if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
  1752.                 DRM_ERROR("Incorrect execbuf version.\n");
  1753.                 DRM_ERROR("You're running outdated experimental "
  1754.                           "vmwgfx user-space drivers.");
  1755.                 return -EINVAL;
  1756.         }
  1757.  
  1758. //   ret = ttm_read_lock(&vmaster->lock, true);
  1759.         if (unlikely(ret != 0))
  1760.                 return ret;
  1761.  
  1762.         ret = vmw_execbuf_process(file_priv, dev_priv,
  1763.                                   (void __user *)(unsigned long)arg->commands,
  1764.                                   NULL, arg->command_size, arg->throttle_us,
  1765.                                   (void __user *)(unsigned long)arg->fence_rep,
  1766.                                   NULL);
  1767.  
  1768.         if (unlikely(ret != 0))
  1769.                 goto out_unlock;
  1770.  
  1771. //   vmw_kms_cursor_post_execbuf(dev_priv);
  1772.  
  1773. out_unlock:
  1774. //   ttm_read_unlock(&vmaster->lock);
  1775.         return ret;
  1776. }
  1777.