Subversion Repositories Kolibri OS

Rev

Rev 5078 | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /**************************************************************************
  2.  *
  3.  * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice (including the
  15.  * next paragraph) shall be included in all copies or substantial portions
  16.  * of the Software.
  17.  *
  18.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21.  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22.  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23.  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24.  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25.  *
  26.  **************************************************************************/
  27.  
  28. #include "vmwgfx_drv.h"
  29. #include "vmwgfx_resource_priv.h"
  30. #include "vmwgfx_binding.h"
  31. #include "ttm/ttm_placement.h"
  32.  
  33. #define VMW_COMPAT_SHADER_HT_ORDER 12
  34.  
  35. #if 0
  36. struct vmw_shader {
  37.         struct vmw_resource res;
  38.         SVGA3dShaderType type;
  39.         uint32_t size;
  40.         uint8_t num_input_sig;
  41.         uint8_t num_output_sig;
  42. };
  43.  
  44. struct vmw_user_shader {
  45.         struct ttm_base_object base;
  46.         struct vmw_shader shader;
  47. };
  48.  
  49. struct vmw_dx_shader {
  50.         struct vmw_resource res;
  51.         struct vmw_resource *ctx;
  52.         struct vmw_resource *cotable;
  53.         u32 id;
  54.         bool committed;
  55.         struct list_head cotable_head;
  56. };
  57.  
  58. static uint64_t vmw_user_shader_size;
  59. static uint64_t vmw_shader_size;
  60. static size_t vmw_shader_dx_size;
  61.  
  62. static void vmw_user_shader_free(struct vmw_resource *res);
  63. static struct vmw_resource *
  64. vmw_user_shader_base_to_res(struct ttm_base_object *base);
  65.  
  66. static int vmw_gb_shader_create(struct vmw_resource *res);
  67. static int vmw_gb_shader_bind(struct vmw_resource *res,
  68.                                struct ttm_validate_buffer *val_buf);
  69. static int vmw_gb_shader_unbind(struct vmw_resource *res,
  70.                                  bool readback,
  71.                                  struct ttm_validate_buffer *val_buf);
  72. static int vmw_gb_shader_destroy(struct vmw_resource *res);
  73.  
  74. static int vmw_dx_shader_create(struct vmw_resource *res);
  75. static int vmw_dx_shader_bind(struct vmw_resource *res,
  76.                                struct ttm_validate_buffer *val_buf);
  77. static int vmw_dx_shader_unbind(struct vmw_resource *res,
  78.                                  bool readback,
  79.                                  struct ttm_validate_buffer *val_buf);
  80. static void vmw_dx_shader_commit_notify(struct vmw_resource *res,
  81.                                         enum vmw_cmdbuf_res_state state);
  82. static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type);
  83. static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type);
  84. static uint64_t vmw_user_shader_size;
  85.  
  86. static const struct vmw_user_resource_conv user_shader_conv = {
  87.         .object_type = VMW_RES_SHADER,
  88.         .base_obj_to_res = vmw_user_shader_base_to_res,
  89.         .res_free = vmw_user_shader_free
  90. };
  91.  
  92. const struct vmw_user_resource_conv *user_shader_converter =
  93.         &user_shader_conv;
  94.  
  95.  
  96. static const struct vmw_res_func vmw_gb_shader_func = {
  97.         .res_type = vmw_res_shader,
  98.         .needs_backup = true,
  99.         .may_evict = true,
  100.         .type_name = "guest backed shaders",
  101.         .backup_placement = &vmw_mob_placement,
  102.         .create = vmw_gb_shader_create,
  103.         .destroy = vmw_gb_shader_destroy,
  104.         .bind = vmw_gb_shader_bind,
  105.         .unbind = vmw_gb_shader_unbind
  106. };
  107.  
  108. static const struct vmw_res_func vmw_dx_shader_func = {
  109.         .res_type = vmw_res_shader,
  110.         .needs_backup = true,
  111.         .may_evict = false,
  112.         .type_name = "dx shaders",
  113.         .backup_placement = &vmw_mob_placement,
  114.         .create = vmw_dx_shader_create,
  115.         /*
  116.          * The destroy callback is only called with a committed resource on
  117.          * context destroy, in which case we destroy the cotable anyway,
  118.          * so there's no need to destroy DX shaders separately.
  119.          */
  120.         .destroy = NULL,
  121.         .bind = vmw_dx_shader_bind,
  122.         .unbind = vmw_dx_shader_unbind,
  123.         .commit_notify = vmw_dx_shader_commit_notify,
  124. };
  125.  
  126. /**
  127.  * Shader management:
  128.  */
  129.  
  130. static inline struct vmw_shader *
  131. vmw_res_to_shader(struct vmw_resource *res)
  132. {
  133.         return container_of(res, struct vmw_shader, res);
  134. }
  135.  
  136. /**
  137.  * vmw_res_to_dx_shader - typecast a struct vmw_resource to a
  138.  * struct vmw_dx_shader
  139.  *
  140.  * @res: Pointer to the struct vmw_resource.
  141.  */
  142. static inline struct vmw_dx_shader *
  143. vmw_res_to_dx_shader(struct vmw_resource *res)
  144. {
  145.         return container_of(res, struct vmw_dx_shader, res);
  146. }
  147.  
  148. static void vmw_hw_shader_destroy(struct vmw_resource *res)
  149. {
  150.         if (likely(res->func->destroy))
  151.                 (void) res->func->destroy(res);
  152.         else
  153.                 res->id = -1;
  154. }
  155.  
  156.  
  157. static int vmw_gb_shader_init(struct vmw_private *dev_priv,
  158.                               struct vmw_resource *res,
  159.                               uint32_t size,
  160.                               uint64_t offset,
  161.                               SVGA3dShaderType type,
  162.                               uint8_t num_input_sig,
  163.                               uint8_t num_output_sig,
  164.                               struct vmw_dma_buffer *byte_code,
  165.                               void (*res_free) (struct vmw_resource *res))
  166. {
  167.         struct vmw_shader *shader = vmw_res_to_shader(res);
  168.         int ret;
  169.  
  170.         ret = vmw_resource_init(dev_priv, res, true, res_free,
  171.                                 &vmw_gb_shader_func);
  172.  
  173.         if (unlikely(ret != 0)) {
  174.                 if (res_free)
  175.                         res_free(res);
  176.                 else
  177.                         kfree(res);
  178.                 return ret;
  179.         }
  180.  
  181.         res->backup_size = size;
  182.         if (byte_code) {
  183.                 res->backup = vmw_dmabuf_reference(byte_code);
  184.                 res->backup_offset = offset;
  185.         }
  186.         shader->size = size;
  187.         shader->type = type;
  188.         shader->num_input_sig = num_input_sig;
  189.         shader->num_output_sig = num_output_sig;
  190.  
  191.         vmw_resource_activate(res, vmw_hw_shader_destroy);
  192.         return 0;
  193. }
  194.  
  195. /*
  196.  * GB shader code:
  197.  */
  198.  
  199. static int vmw_gb_shader_create(struct vmw_resource *res)
  200. {
  201.         struct vmw_private *dev_priv = res->dev_priv;
  202.         struct vmw_shader *shader = vmw_res_to_shader(res);
  203.         int ret;
  204.         struct {
  205.                 SVGA3dCmdHeader header;
  206.                 SVGA3dCmdDefineGBShader body;
  207.         } *cmd;
  208.  
  209.         if (likely(res->id != -1))
  210.                 return 0;
  211.  
  212.         ret = vmw_resource_alloc_id(res);
  213.         if (unlikely(ret != 0)) {
  214.                 DRM_ERROR("Failed to allocate a shader id.\n");
  215.                 goto out_no_id;
  216.         }
  217.  
  218.         if (unlikely(res->id >= VMWGFX_NUM_GB_SHADER)) {
  219.                 ret = -EBUSY;
  220.                 goto out_no_fifo;
  221.         }
  222.  
  223.         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  224.         if (unlikely(cmd == NULL)) {
  225.                 DRM_ERROR("Failed reserving FIFO space for shader "
  226.                           "creation.\n");
  227.                 ret = -ENOMEM;
  228.                 goto out_no_fifo;
  229.         }
  230.  
  231.         cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SHADER;
  232.         cmd->header.size = sizeof(cmd->body);
  233.         cmd->body.shid = res->id;
  234.         cmd->body.type = shader->type;
  235.         cmd->body.sizeInBytes = shader->size;
  236.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  237.         vmw_fifo_resource_inc(dev_priv);
  238.  
  239.         return 0;
  240.  
  241. out_no_fifo:
  242.         vmw_resource_release_id(res);
  243. out_no_id:
  244.         return ret;
  245. }
  246.  
  247. static int vmw_gb_shader_bind(struct vmw_resource *res,
  248.                               struct ttm_validate_buffer *val_buf)
  249. {
  250.         struct vmw_private *dev_priv = res->dev_priv;
  251.         struct {
  252.                 SVGA3dCmdHeader header;
  253.                 SVGA3dCmdBindGBShader body;
  254.         } *cmd;
  255.         struct ttm_buffer_object *bo = val_buf->bo;
  256.  
  257.         BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
  258.  
  259.         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  260.         if (unlikely(cmd == NULL)) {
  261.                 DRM_ERROR("Failed reserving FIFO space for shader "
  262.                           "binding.\n");
  263.                 return -ENOMEM;
  264.         }
  265.  
  266.         cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
  267.         cmd->header.size = sizeof(cmd->body);
  268.         cmd->body.shid = res->id;
  269.         cmd->body.mobid = bo->mem.start;
  270.         cmd->body.offsetInBytes = res->backup_offset;
  271.         res->backup_dirty = false;
  272.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  273.  
  274.         return 0;
  275. }
  276.  
  277. static int vmw_gb_shader_unbind(struct vmw_resource *res,
  278.                                 bool readback,
  279.                                 struct ttm_validate_buffer *val_buf)
  280. {
  281.         struct vmw_private *dev_priv = res->dev_priv;
  282.         struct {
  283.                 SVGA3dCmdHeader header;
  284.                 SVGA3dCmdBindGBShader body;
  285.         } *cmd;
  286.         struct vmw_fence_obj *fence;
  287.  
  288.         BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
  289.  
  290.         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  291.         if (unlikely(cmd == NULL)) {
  292.                 DRM_ERROR("Failed reserving FIFO space for shader "
  293.                           "unbinding.\n");
  294.                 return -ENOMEM;
  295.         }
  296.  
  297.         cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
  298.         cmd->header.size = sizeof(cmd->body);
  299.         cmd->body.shid = res->id;
  300.         cmd->body.mobid = SVGA3D_INVALID_ID;
  301.         cmd->body.offsetInBytes = 0;
  302.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  303.  
  304.         /*
  305.          * Create a fence object and fence the backup buffer.
  306.          */
  307.  
  308.         (void) vmw_execbuf_fence_commands(NULL, dev_priv,
  309.                                           &fence, NULL);
  310.  
  311.         vmw_fence_single_bo(val_buf->bo, fence);
  312.  
  313.         if (likely(fence != NULL))
  314.                 vmw_fence_obj_unreference(&fence);
  315.  
  316.         return 0;
  317. }
  318.  
  319. static int vmw_gb_shader_destroy(struct vmw_resource *res)
  320. {
  321.         struct vmw_private *dev_priv = res->dev_priv;
  322.         struct {
  323.                 SVGA3dCmdHeader header;
  324.                 SVGA3dCmdDestroyGBShader body;
  325.         } *cmd;
  326.  
  327.         if (likely(res->id == -1))
  328.                 return 0;
  329.  
  330.         mutex_lock(&dev_priv->binding_mutex);
  331.         vmw_binding_res_list_scrub(&res->binding_head);
  332.  
  333.         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  334.         if (unlikely(cmd == NULL)) {
  335.                 DRM_ERROR("Failed reserving FIFO space for shader "
  336.                           "destruction.\n");
  337.                 mutex_unlock(&dev_priv->binding_mutex);
  338.                 return -ENOMEM;
  339.         }
  340.  
  341.         cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SHADER;
  342.         cmd->header.size = sizeof(cmd->body);
  343.         cmd->body.shid = res->id;
  344.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  345.         mutex_unlock(&dev_priv->binding_mutex);
  346.         vmw_resource_release_id(res);
  347.         vmw_fifo_resource_dec(dev_priv);
  348.  
  349.         return 0;
  350. }
  351.  
  352. /**
  353.  * User-space shader management:
  354.  */
  355.  
  356. static struct vmw_resource *
  357. vmw_user_shader_base_to_res(struct ttm_base_object *base)
  358. {
  359.         return &(container_of(base, struct vmw_user_shader, base)->
  360.                  shader.res);
  361. }
  362.  
  363. static void vmw_user_shader_free(struct vmw_resource *res)
  364. {
  365.         struct vmw_user_shader *ushader =
  366.                 container_of(res, struct vmw_user_shader, shader.res);
  367.         struct vmw_private *dev_priv = res->dev_priv;
  368.  
  369.         ttm_base_object_kfree(ushader, base);
  370.         ttm_mem_global_free(vmw_mem_glob(dev_priv),
  371.                             vmw_user_shader_size);
  372. }
  373.  
  374. /**
  375.  * This function is called when user space has no more references on the
  376.  * base object. It releases the base-object's reference on the resource object.
  377.  */
  378.  
  379. static void vmw_user_shader_base_release(struct ttm_base_object **p_base)
  380. {
  381.         struct ttm_base_object *base = *p_base;
  382.         struct vmw_resource *res = vmw_user_shader_base_to_res(base);
  383.  
  384.         *p_base = NULL;
  385.         vmw_resource_unreference(&res);
  386. }
  387.  
  388. int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
  389.                               struct drm_file *file_priv)
  390. {
  391.         struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data;
  392.         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  393.  
  394.         return ttm_ref_object_base_unref(tfile, arg->handle,
  395.                                          TTM_REF_USAGE);
  396. }
  397.  
  398. static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
  399.                                  struct vmw_dma_buffer *buffer,
  400.                                  size_t shader_size,
  401.                                  size_t offset,
  402.                                  SVGA3dShaderType shader_type,
  403.                                  uint8_t num_input_sig,
  404.                                  uint8_t num_output_sig,
  405.                                  struct ttm_object_file *tfile,
  406.                                  u32 *handle)
  407. {
  408.         struct vmw_user_shader *ushader;
  409.         struct vmw_resource *res, *tmp;
  410.         int ret;
  411.  
  412.         /*
  413.          * Approximate idr memory usage with 128 bytes. It will be limited
  414.          * by maximum number_of shaders anyway.
  415.          */
  416.         if (unlikely(vmw_user_shader_size == 0))
  417.                 vmw_user_shader_size =
  418.                         ttm_round_pot(sizeof(struct vmw_user_shader)) + 128;
  419.  
  420.         ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
  421.                                    vmw_user_shader_size,
  422.                                    false, true);
  423.         if (unlikely(ret != 0)) {
  424.                 if (ret != -ERESTARTSYS)
  425.                         DRM_ERROR("Out of graphics memory for shader "
  426.                                   "creation.\n");
  427.                 goto out;
  428.         }
  429.  
  430.         ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
  431.         if (unlikely(ushader == NULL)) {
  432.                 ttm_mem_global_free(vmw_mem_glob(dev_priv),
  433.                                     vmw_user_shader_size);
  434.                 ret = -ENOMEM;
  435.                 goto out;
  436.         }
  437.  
  438.         res = &ushader->shader.res;
  439.         ushader->base.shareable = false;
  440.         ushader->base.tfile = NULL;
  441.  
  442.         /*
  443.          * From here on, the destructor takes over resource freeing.
  444.          */
  445.  
  446.         ret = vmw_gb_shader_init(dev_priv, res, shader_size,
  447.                                  offset, shader_type, num_input_sig,
  448.                                  num_output_sig, buffer,
  449.                                  vmw_user_shader_free);
  450.         if (unlikely(ret != 0))
  451.                 goto out;
  452.  
  453.         tmp = vmw_resource_reference(res);
  454.         ret = ttm_base_object_init(tfile, &ushader->base, false,
  455.                                    VMW_RES_SHADER,
  456.                                    &vmw_user_shader_base_release, NULL);
  457.  
  458.         if (unlikely(ret != 0)) {
  459.                 vmw_resource_unreference(&tmp);
  460.                 goto out_err;
  461.         }
  462.  
  463.         if (handle)
  464.                 *handle = ushader->base.hash.key;
  465. out_err:
  466.         vmw_resource_unreference(&res);
  467. out:
  468.         return ret;
  469. }
  470.  
  471.  
  472. static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
  473.                                              struct vmw_dma_buffer *buffer,
  474.                                              size_t shader_size,
  475.                                              size_t offset,
  476.                                              SVGA3dShaderType shader_type)
  477. {
  478.         struct vmw_shader *shader;
  479.         struct vmw_resource *res;
  480.         int ret;
  481.  
  482.         /*
  483.          * Approximate idr memory usage with 128 bytes. It will be limited
  484.          * by maximum number_of shaders anyway.
  485.          */
  486.         if (unlikely(vmw_shader_size == 0))
  487.                 vmw_shader_size =
  488.                         ttm_round_pot(sizeof(struct vmw_shader)) + 128;
  489.  
  490.         ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
  491.                                    vmw_shader_size,
  492.                                    false, true);
  493.         if (unlikely(ret != 0)) {
  494.                 if (ret != -ERESTARTSYS)
  495.                         DRM_ERROR("Out of graphics memory for shader "
  496.                                   "creation.\n");
  497.                 goto out_err;
  498.         }
  499.  
  500.         shader = kzalloc(sizeof(*shader), GFP_KERNEL);
  501.         if (unlikely(shader == NULL)) {
  502.                 ttm_mem_global_free(vmw_mem_glob(dev_priv),
  503.                                     vmw_shader_size);
  504.                 ret = -ENOMEM;
  505.                 goto out_err;
  506.         }
  507.  
  508.         res = &shader->res;
  509.  
  510.         /*
  511.          * From here on, the destructor takes over resource freeing.
  512.          */
  513.         ret = vmw_gb_shader_init(dev_priv, res, shader_size,
  514.                                  offset, shader_type, 0, 0, buffer,
  515.                                  vmw_shader_free);
  516.  
  517. out_err:
  518.         return ret ? ERR_PTR(ret) : res;
  519. }
  520.  
  521.  
  522. static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
  523.                              enum drm_vmw_shader_type shader_type_drm,
  524.                              u32 buffer_handle, size_t size, size_t offset,
  525.                              uint8_t num_input_sig, uint8_t num_output_sig,
  526.                              uint32_t *shader_handle)
  527. {
  528.         struct vmw_private *dev_priv = vmw_priv(dev);
  529.         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  530.         struct vmw_dma_buffer *buffer = NULL;
  531.         SVGA3dShaderType shader_type;
  532.         int ret;
  533.  
  534.         if (buffer_handle != SVGA3D_INVALID_ID) {
  535.                 ret = vmw_user_dmabuf_lookup(tfile, buffer_handle,
  536.                                              &buffer, NULL);
  537.                 if (unlikely(ret != 0)) {
  538.                         DRM_ERROR("Could not find buffer for shader "
  539.                                   "creation.\n");
  540.                         return ret;
  541.                 }
  542.  
  543.                 if ((u64)buffer->base.num_pages * PAGE_SIZE <
  544.                     (u64)size + (u64)offset) {
  545.                         DRM_ERROR("Illegal buffer- or shader size.\n");
  546.                         ret = -EINVAL;
  547.                         goto out_bad_arg;
  548.                 }
  549.         }
  550.  
  551.         switch (shader_type_drm) {
  552.         case drm_vmw_shader_type_vs:
  553.                 shader_type = SVGA3D_SHADERTYPE_VS;
  554.                 break;
  555.         case drm_vmw_shader_type_ps:
  556.                 shader_type = SVGA3D_SHADERTYPE_PS;
  557.                 break;
  558.         default:
  559.                 DRM_ERROR("Illegal shader type.\n");
  560.                 ret = -EINVAL;
  561.                 goto out_bad_arg;
  562.         }
  563.  
  564.         ret = ttm_read_lock(&dev_priv->reservation_sem, true);
  565.         if (unlikely(ret != 0))
  566.                 goto out_bad_arg;
  567.  
  568.         ret = vmw_user_shader_alloc(dev_priv, buffer, size, offset,
  569.                                     shader_type, num_input_sig,
  570.                                     num_output_sig, tfile, shader_handle);
  571.  
  572.         ttm_read_unlock(&dev_priv->reservation_sem);
  573. out_bad_arg:
  574.         vmw_dmabuf_unreference(&buffer);
  575.         return ret;
  576. }
  577.  
  578. /**
  579.  * vmw_shader_id_ok - Check whether a compat shader user key and
  580.  * shader type are within valid bounds.
  581.  *
  582.  * @user_key: User space id of the shader.
  583.  * @shader_type: Shader type.
  584.  *
  585.  * Returns true if valid false if not.
  586.  */
  587. static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
  588. {
  589.         return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16;
  590. }
  591.  
  592. /**
  593.  * vmw_shader_key - Compute a hash key suitable for a compat shader.
  594.  *
  595.  * @user_key: User space id of the shader.
  596.  * @shader_type: Shader type.
  597.  *
  598.  * Returns a hash key suitable for a command buffer managed resource
  599.  * manager hash table.
  600.  */
  601. static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type)
  602. {
  603.         return user_key | (shader_type << 20);
  604. }
  605.  
  606. /**
  607.  * vmw_shader_remove - Stage a compat shader for removal.
  608.  *
  609.  * @man: Pointer to the compat shader manager identifying the shader namespace.
  610.  * @user_key: The key that is used to identify the shader. The key is
  611.  * unique to the shader type.
  612.  * @shader_type: Shader type.
  613.  * @list: Caller's list of staged command buffer resource actions.
  614.  */
  615. int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
  616.                       u32 user_key, SVGA3dShaderType shader_type,
  617.                       struct list_head *list)
  618. {
  619.         struct vmw_resource *dummy;
  620.  
  621.         if (!vmw_shader_id_ok(user_key, shader_type))
  622.                 return -EINVAL;
  623.  
  624.         return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_shader,
  625.                                      vmw_shader_key(user_key, shader_type),
  626.                                      list, &dummy);
  627. }
  628.  
  629. /**
  630.  * vmw_compat_shader_add - Create a compat shader and stage it for addition
  631.  * as a command buffer managed resource.
  632.  *
  633.  * @man: Pointer to the compat shader manager identifying the shader namespace.
  634.  * @user_key: The key that is used to identify the shader. The key is
  635.  * unique to the shader type.
  636.  * @bytecode: Pointer to the bytecode of the shader.
  637.  * @shader_type: Shader type.
  638.  * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
  639.  * to be created with.
  640.  * @list: Caller's list of staged command buffer resource actions.
  641.  *
  642.  */
  643. int vmw_compat_shader_add(struct vmw_private *dev_priv,
  644.                           struct vmw_cmdbuf_res_manager *man,
  645.                           u32 user_key, const void *bytecode,
  646.                           SVGA3dShaderType shader_type,
  647.                           size_t size,
  648.                           struct list_head *list)
  649. {
  650.         struct vmw_dma_buffer *buf;
  651.         struct ttm_bo_kmap_obj map;
  652.         bool is_iomem;
  653.         int ret;
  654.         struct vmw_resource *res;
  655.  
  656.         if (!vmw_shader_id_ok(user_key, shader_type))
  657.                 return -EINVAL;
  658.  
  659.         /* Allocate and pin a DMA buffer */
  660.         buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  661.         if (unlikely(buf == NULL))
  662.                 return -ENOMEM;
  663.  
  664.         ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement,
  665.                               true, vmw_dmabuf_bo_free);
  666.         if (unlikely(ret != 0))
  667.                 goto out;
  668.  
  669.         ret = ttm_bo_reserve(&buf->base, false, true, false, NULL);
  670.         if (unlikely(ret != 0))
  671.                 goto no_reserve;
  672.  
  673.         /* Map and copy shader bytecode. */
  674.         ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT,
  675.                           &map);
  676.         if (unlikely(ret != 0)) {
  677.                 ttm_bo_unreserve(&buf->base);
  678.                 goto no_reserve;
  679.         }
  680.  
  681.         memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size);
  682.         WARN_ON(is_iomem);
  683.  
  684.         ttm_bo_kunmap(&map);
  685.         ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true);
  686.         WARN_ON(ret != 0);
  687.         ttm_bo_unreserve(&buf->base);
  688.  
  689.         res = vmw_shader_alloc(dev_priv, buf, size, 0, shader_type);
  690.         if (unlikely(ret != 0))
  691.                 goto no_reserve;
  692.  
  693.         ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader,
  694.                                  vmw_shader_key(user_key, shader_type),
  695.                                  res, list);
  696.         vmw_resource_unreference(&res);
  697. no_reserve:
  698.         vmw_dmabuf_unreference(&buf);
  699. out:
  700.         return ret;
  701. }
  702.  
  703. /**
  704.  * vmw_shader_lookup - Look up a compat shader
  705.  *
  706.  * @man: Pointer to the command buffer managed resource manager identifying
  707.  * the shader namespace.
  708.  * @user_key: The user space id of the shader.
  709.  * @shader_type: The shader type.
  710.  *
  711.  * Returns a refcounted pointer to a struct vmw_resource if the shader was
  712.  * found. An error pointer otherwise.
  713.  */
  714. struct vmw_resource *
  715. vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
  716.                   u32 user_key,
  717.                   SVGA3dShaderType shader_type)
  718. {
  719.         if (!vmw_shader_id_ok(user_key, shader_type))
  720.                 return ERR_PTR(-EINVAL);
  721.  
  722.         return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_shader,
  723.                                      vmw_shader_key(user_key, shader_type));
  724. }
  725.  
  726. int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
  727.                              struct drm_file *file_priv)
  728. {
  729.         struct drm_vmw_shader_create_arg *arg =
  730.                 (struct drm_vmw_shader_create_arg *)data;
  731.  
  732.         return vmw_shader_define(dev, file_priv, arg->shader_type,
  733.                                  arg->buffer_handle,
  734.                                  arg->size, arg->offset,
  735.                                  0, 0,
  736.                                  &arg->shader_handle);
  737. }
  738. #endif
  739.