Subversion Repositories Kolibri OS

Rev

Rev 4569 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /**************************************************************************
  2.  *
  3.  * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice (including the
  15.  * next paragraph) shall be included in all copies or substantial portions
  16.  * of the Software.
  17.  *
  18.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21.  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22.  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23.  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24.  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25.  *
  26.  **************************************************************************/
  27.  
  28. #include "vmwgfx_drv.h"
  29. #include "vmwgfx_resource_priv.h"
  30. #include "ttm/ttm_placement.h"
  31.  
  32. #define VMW_COMPAT_SHADER_HT_ORDER 12
  33.  
  34. #if 0
  35. struct vmw_shader {
  36.         struct vmw_resource res;
  37.         SVGA3dShaderType type;
  38.         uint32_t size;
  39. };
  40.  
  41. struct vmw_user_shader {
  42.         struct ttm_base_object base;
  43.         struct vmw_shader shader;
  44. };
  45.  
  46. /**
  47.  * enum vmw_compat_shader_state - Staging state for compat shaders
  48.  */
  49. enum vmw_compat_shader_state {
  50.         VMW_COMPAT_COMMITED,
  51.         VMW_COMPAT_ADD,
  52.         VMW_COMPAT_DEL
  53. };
  54.  
  55. /**
  56.  * struct vmw_compat_shader - Metadata for compat shaders.
  57.  *
  58.  * @handle: The TTM handle of the guest backed shader.
  59.  * @tfile: The struct ttm_object_file the guest backed shader is registered
  60.  * with.
  61.  * @hash: Hash item for lookup.
  62.  * @head: List head for staging lists or the compat shader manager list.
  63.  * @state: Staging state.
  64.  *
  65.  * The structure is protected by the cmdbuf lock.
  66.  */
  67. struct vmw_compat_shader {
  68.         u32 handle;
  69.         struct ttm_object_file *tfile;
  70.         struct drm_hash_item hash;
  71.         struct list_head head;
  72.         enum vmw_compat_shader_state state;
  73. };
  74.  
  75. /**
  76.  * struct vmw_compat_shader_manager - Compat shader manager.
  77.  *
  78.  * @shaders: Hash table containing staged and commited compat shaders
  79.  * @list: List of commited shaders.
  80.  * @dev_priv: Pointer to a device private structure.
  81.  *
  82.  * @shaders and @list are protected by the cmdbuf mutex for now.
  83.  */
  84. struct vmw_compat_shader_manager {
  85.         struct drm_open_hash shaders;
  86.         struct list_head list;
  87.         struct vmw_private *dev_priv;
  88. };
  89.  
  90. static void vmw_user_shader_free(struct vmw_resource *res);
  91. static struct vmw_resource *
  92. vmw_user_shader_base_to_res(struct ttm_base_object *base);
  93.  
  94. static int vmw_gb_shader_create(struct vmw_resource *res);
  95. static int vmw_gb_shader_bind(struct vmw_resource *res,
  96.                                struct ttm_validate_buffer *val_buf);
  97. static int vmw_gb_shader_unbind(struct vmw_resource *res,
  98.                                  bool readback,
  99.                                  struct ttm_validate_buffer *val_buf);
  100. static int vmw_gb_shader_destroy(struct vmw_resource *res);
  101.  
  102. static const struct vmw_user_resource_conv user_shader_conv = {
  103.         .object_type = VMW_RES_SHADER,
  104.         .base_obj_to_res = vmw_user_shader_base_to_res,
  105.         .res_free = vmw_user_shader_free
  106. };
  107.  
  108. const struct vmw_user_resource_conv *user_shader_converter =
  109.         &user_shader_conv;
  110.  
  111.  
  112. static const struct vmw_res_func vmw_gb_shader_func = {
  113.         .res_type = vmw_res_shader,
  114.         .needs_backup = true,
  115.         .may_evict = true,
  116.         .type_name = "guest backed shaders",
  117.         .backup_placement = &vmw_mob_placement,
  118.         .create = vmw_gb_shader_create,
  119.         .destroy = vmw_gb_shader_destroy,
  120.         .bind = vmw_gb_shader_bind,
  121.         .unbind = vmw_gb_shader_unbind
  122. };
  123.  
  124. /**
  125.  * Shader management:
  126.  */
  127.  
  128. static inline struct vmw_shader *
  129. vmw_res_to_shader(struct vmw_resource *res)
  130. {
  131.         return container_of(res, struct vmw_shader, res);
  132. }
  133.  
  134. static void vmw_hw_shader_destroy(struct vmw_resource *res)
  135. {
  136.         (void) vmw_gb_shader_destroy(res);
  137. }
  138.  
  139. static int vmw_gb_shader_init(struct vmw_private *dev_priv,
  140.                               struct vmw_resource *res,
  141.                               uint32_t size,
  142.                               uint64_t offset,
  143.                               SVGA3dShaderType type,
  144.                               struct vmw_dma_buffer *byte_code,
  145.                               void (*res_free) (struct vmw_resource *res))
  146. {
  147.         struct vmw_shader *shader = vmw_res_to_shader(res);
  148.         int ret;
  149.  
  150.         ret = vmw_resource_init(dev_priv, res, true,
  151.                                 res_free, &vmw_gb_shader_func);
  152.  
  153.  
  154.         if (unlikely(ret != 0)) {
  155.                 if (res_free)
  156.                         res_free(res);
  157.                 else
  158.                         kfree(res);
  159.                 return ret;
  160.         }
  161.  
  162.         res->backup_size = size;
  163.         if (byte_code) {
  164.                 res->backup = vmw_dmabuf_reference(byte_code);
  165.                 res->backup_offset = offset;
  166.         }
  167.         shader->size = size;
  168.         shader->type = type;
  169.  
  170.         vmw_resource_activate(res, vmw_hw_shader_destroy);
  171.         return 0;
  172. }
  173.  
  174. static int vmw_gb_shader_create(struct vmw_resource *res)
  175. {
  176.         struct vmw_private *dev_priv = res->dev_priv;
  177.         struct vmw_shader *shader = vmw_res_to_shader(res);
  178.         int ret;
  179.         struct {
  180.                 SVGA3dCmdHeader header;
  181.                 SVGA3dCmdDefineGBShader body;
  182.         } *cmd;
  183.  
  184.         if (likely(res->id != -1))
  185.                 return 0;
  186.  
  187.         ret = vmw_resource_alloc_id(res);
  188.         if (unlikely(ret != 0)) {
  189.                 DRM_ERROR("Failed to allocate a shader id.\n");
  190.                 goto out_no_id;
  191.         }
  192.  
  193.         if (unlikely(res->id >= VMWGFX_NUM_GB_SHADER)) {
  194.                 ret = -EBUSY;
  195.                 goto out_no_fifo;
  196.         }
  197.  
  198.         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  199.         if (unlikely(cmd == NULL)) {
  200.                 DRM_ERROR("Failed reserving FIFO space for shader "
  201.                           "creation.\n");
  202.                 ret = -ENOMEM;
  203.                 goto out_no_fifo;
  204.         }
  205.  
  206.         cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SHADER;
  207.         cmd->header.size = sizeof(cmd->body);
  208.         cmd->body.shid = res->id;
  209.         cmd->body.type = shader->type;
  210.         cmd->body.sizeInBytes = shader->size;
  211.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  212.         (void) vmw_3d_resource_inc(dev_priv, false);
  213.  
  214.         return 0;
  215.  
  216. out_no_fifo:
  217.         vmw_resource_release_id(res);
  218. out_no_id:
  219.         return ret;
  220. }
  221.  
  222. static int vmw_gb_shader_bind(struct vmw_resource *res,
  223.                               struct ttm_validate_buffer *val_buf)
  224. {
  225.         struct vmw_private *dev_priv = res->dev_priv;
  226.         struct {
  227.                 SVGA3dCmdHeader header;
  228.                 SVGA3dCmdBindGBShader body;
  229.         } *cmd;
  230.         struct ttm_buffer_object *bo = val_buf->bo;
  231.  
  232.         BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
  233.  
  234.         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  235.         if (unlikely(cmd == NULL)) {
  236.                 DRM_ERROR("Failed reserving FIFO space for shader "
  237.                           "binding.\n");
  238.                 return -ENOMEM;
  239.         }
  240.  
  241.         cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
  242.         cmd->header.size = sizeof(cmd->body);
  243.         cmd->body.shid = res->id;
  244.         cmd->body.mobid = bo->mem.start;
  245.         cmd->body.offsetInBytes = 0;
  246.         res->backup_dirty = false;
  247.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  248.  
  249.         return 0;
  250. }
  251.  
  252. static int vmw_gb_shader_unbind(struct vmw_resource *res,
  253.                                 bool readback,
  254.                                 struct ttm_validate_buffer *val_buf)
  255. {
  256.         struct vmw_private *dev_priv = res->dev_priv;
  257.         struct {
  258.                 SVGA3dCmdHeader header;
  259.                 SVGA3dCmdBindGBShader body;
  260.         } *cmd;
  261.         struct vmw_fence_obj *fence;
  262.  
  263.         BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
  264.  
  265.         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  266.         if (unlikely(cmd == NULL)) {
  267.                 DRM_ERROR("Failed reserving FIFO space for shader "
  268.                           "unbinding.\n");
  269.                 return -ENOMEM;
  270.         }
  271.  
  272.         cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
  273.         cmd->header.size = sizeof(cmd->body);
  274.         cmd->body.shid = res->id;
  275.         cmd->body.mobid = SVGA3D_INVALID_ID;
  276.         cmd->body.offsetInBytes = 0;
  277.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  278.  
  279.         /*
  280.          * Create a fence object and fence the backup buffer.
  281.          */
  282.  
  283.         (void) vmw_execbuf_fence_commands(NULL, dev_priv,
  284.                                           &fence, NULL);
  285.  
  286.         vmw_fence_single_bo(val_buf->bo, fence);
  287.  
  288.         if (likely(fence != NULL))
  289.                 vmw_fence_obj_unreference(&fence);
  290.  
  291.         return 0;
  292. }
  293.  
  294. static int vmw_gb_shader_destroy(struct vmw_resource *res)
  295. {
  296.         struct vmw_private *dev_priv = res->dev_priv;
  297.         struct {
  298.                 SVGA3dCmdHeader header;
  299.                 SVGA3dCmdDestroyGBShader body;
  300.         } *cmd;
  301.  
  302.         if (likely(res->id == -1))
  303.                 return 0;
  304.  
  305.         mutex_lock(&dev_priv->binding_mutex);
  306.         vmw_context_binding_res_list_scrub(&res->binding_head);
  307.  
  308.         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  309.         if (unlikely(cmd == NULL)) {
  310.                 DRM_ERROR("Failed reserving FIFO space for shader "
  311.                           "destruction.\n");
  312.                 mutex_unlock(&dev_priv->binding_mutex);
  313.                 return -ENOMEM;
  314.         }
  315.  
  316.         cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SHADER;
  317.         cmd->header.size = sizeof(cmd->body);
  318.         cmd->body.shid = res->id;
  319.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  320.         mutex_unlock(&dev_priv->binding_mutex);
  321.         vmw_resource_release_id(res);
  322.         vmw_3d_resource_dec(dev_priv, false);
  323.  
  324.         return 0;
  325. }
  326.  
  327. /**
  328.  * User-space shader management:
  329.  */
  330.  
  331. static struct vmw_resource *
  332. vmw_user_shader_base_to_res(struct ttm_base_object *base)
  333. {
  334.         return &(container_of(base, struct vmw_user_shader, base)->
  335.                  shader.res);
  336. }
  337.  
  338. static void vmw_user_shader_free(struct vmw_resource *res)
  339. {
  340.         struct vmw_user_shader *ushader =
  341.                 container_of(res, struct vmw_user_shader, shader.res);
  342.         struct vmw_private *dev_priv = res->dev_priv;
  343.  
  344.         ttm_base_object_kfree(ushader, base);
  345.         ttm_mem_global_free(vmw_mem_glob(dev_priv),
  346.                             vmw_user_shader_size);
  347. }
  348.  
  349. /**
  350.  * This function is called when user space has no more references on the
  351.  * base object. It releases the base-object's reference on the resource object.
  352.  */
  353.  
  354. static void vmw_user_shader_base_release(struct ttm_base_object **p_base)
  355. {
  356.         struct ttm_base_object *base = *p_base;
  357.         struct vmw_resource *res = vmw_user_shader_base_to_res(base);
  358.  
  359.         *p_base = NULL;
  360.         vmw_resource_unreference(&res);
  361. }
  362.  
  363. int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
  364.                               struct drm_file *file_priv)
  365. {
  366.         struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data;
  367.         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  368.  
  369.         return ttm_ref_object_base_unref(tfile, arg->handle,
  370.                                          TTM_REF_USAGE);
  371. }
  372.  
  373. static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
  374.                      struct vmw_dma_buffer *buffer,
  375.                      size_t shader_size,
  376.                      size_t offset,
  377.                      SVGA3dShaderType shader_type,
  378.                      struct ttm_object_file *tfile,
  379.                      u32 *handle)
  380. {
  381.         struct vmw_user_shader *ushader;
  382.         struct vmw_resource *res, *tmp;
  383.         int ret;
  384.  
  385.         /*
  386.          * Approximate idr memory usage with 128 bytes. It will be limited
  387.          * by maximum number_of shaders anyway.
  388.          */
  389.         if (unlikely(vmw_user_shader_size == 0))
  390.                 vmw_user_shader_size =
  391.                         ttm_round_pot(sizeof(struct vmw_user_shader)) + 128;
  392.  
  393.         ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
  394.                                    vmw_user_shader_size,
  395.                                    false, true);
  396.         if (unlikely(ret != 0)) {
  397.                 if (ret != -ERESTARTSYS)
  398.                         DRM_ERROR("Out of graphics memory for shader "
  399.                                   "creation.\n");
  400.                 goto out;
  401.         }
  402.  
  403.         ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
  404.         if (unlikely(ushader == NULL)) {
  405.                 ttm_mem_global_free(vmw_mem_glob(dev_priv),
  406.                                     vmw_user_shader_size);
  407.                 ret = -ENOMEM;
  408.                 goto out;
  409.         }
  410.  
  411.         res = &ushader->shader.res;
  412.         ushader->base.shareable = false;
  413.         ushader->base.tfile = NULL;
  414.  
  415.         /*
  416.          * From here on, the destructor takes over resource freeing.
  417.          */
  418.  
  419.         ret = vmw_gb_shader_init(dev_priv, res, shader_size,
  420.                                  offset, shader_type, buffer,
  421.                                  vmw_user_shader_free);
  422.         if (unlikely(ret != 0))
  423.                 goto out;
  424.  
  425.         tmp = vmw_resource_reference(res);
  426.         ret = ttm_base_object_init(tfile, &ushader->base, false,
  427.                                    VMW_RES_SHADER,
  428.                                    &vmw_user_shader_base_release, NULL);
  429.  
  430.         if (unlikely(ret != 0)) {
  431.                 vmw_resource_unreference(&tmp);
  432.                 goto out_err;
  433.         }
  434.  
  435.         if (handle)
  436.                 *handle = ushader->base.hash.key;
  437. out_err:
  438.         vmw_resource_unreference(&res);
  439. out:
  440.         return ret;
  441. }
  442.  
  443.  
  444. int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
  445.                              struct drm_file *file_priv)
  446. {
  447.         struct vmw_private *dev_priv = vmw_priv(dev);
  448.         struct drm_vmw_shader_create_arg *arg =
  449.                 (struct drm_vmw_shader_create_arg *)data;
  450.         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  451.         struct vmw_dma_buffer *buffer = NULL;
  452.         SVGA3dShaderType shader_type;
  453.         int ret;
  454.  
  455.         if (arg->buffer_handle != SVGA3D_INVALID_ID) {
  456.                 ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle,
  457.                                              &buffer);
  458.                 if (unlikely(ret != 0)) {
  459.                         DRM_ERROR("Could not find buffer for shader "
  460.                                   "creation.\n");
  461.                         return ret;
  462.                 }
  463.  
  464.                 if ((u64)buffer->base.num_pages * PAGE_SIZE <
  465.                     (u64)arg->size + (u64)arg->offset) {
  466.                         DRM_ERROR("Illegal buffer- or shader size.\n");
  467.                         ret = -EINVAL;
  468.                         goto out_bad_arg;
  469.                 }
  470.         }
  471.  
  472.         switch (arg->shader_type) {
  473.         case drm_vmw_shader_type_vs:
  474.                 shader_type = SVGA3D_SHADERTYPE_VS;
  475.                 break;
  476.         case drm_vmw_shader_type_ps:
  477.                 shader_type = SVGA3D_SHADERTYPE_PS;
  478.                 break;
  479.         case drm_vmw_shader_type_gs:
  480.                 shader_type = SVGA3D_SHADERTYPE_GS;
  481.                 break;
  482.         default:
  483.                 DRM_ERROR("Illegal shader type.\n");
  484.                 ret = -EINVAL;
  485.                 goto out_bad_arg;
  486.         }
  487.  
  488.         ret = ttm_read_lock(&dev_priv->reservation_sem, true);
  489.         if (unlikely(ret != 0))
  490.                 goto out_bad_arg;
  491.  
  492.         ret = vmw_user_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
  493.                                shader_type, tfile, &arg->shader_handle);
  494.  
  495.         ttm_read_unlock(&dev_priv->reservation_sem);
  496. out_bad_arg:
  497.         vmw_dmabuf_unreference(&buffer);
  498.         return ret;
  499. }
  500.  
  501. /**
  502.  * vmw_compat_shader_id_ok - Check whether a compat shader user key and
  503.  * shader type are within valid bounds.
  504.  *
  505.  * @user_key: User space id of the shader.
  506.  * @shader_type: Shader type.
  507.  *
  508.  * Returns true if valid false if not.
  509.          */
  510. static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
  511. {
  512.         return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16;
  513. }
  514.  
  515. /**
  516.  * vmw_compat_shader_key - Compute a hash key suitable for a compat shader.
  517.  *
  518.  * @user_key: User space id of the shader.
  519.  * @shader_type: Shader type.
  520.  *
  521.  * Returns a hash key suitable for a command buffer managed resource
  522.  * manager hash table.
  523.  */
  524. static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type)
  525. {
  526.         return user_key | (shader_type << 20);
  527. }
  528.  
  529. /**
  530.  * vmw_compat_shader_remove - Stage a compat shader for removal.
  531.  *
  532.  * @man: Pointer to the compat shader manager identifying the shader namespace.
  533.  * @user_key: The key that is used to identify the shader. The key is
  534.  * unique to the shader type.
  535.  * @shader_type: Shader type.
  536.  * @list: Caller's list of staged command buffer resource actions.
  537.  */
  538. int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
  539.                              u32 user_key, SVGA3dShaderType shader_type,
  540.                              struct list_head *list)
  541. {
  542.         if (!vmw_compat_shader_id_ok(user_key, shader_type))
  543.                 return -EINVAL;
  544.  
  545.         return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_compat_shader,
  546.                                      vmw_compat_shader_key(user_key,
  547.                                                            shader_type),
  548.                                      list);
  549. }
  550.  
  551. /**
  552.  * vmw_compat_shader_add - Create a compat shader and stage it for addition
  553.  * as a command buffer managed resource.
  554.  *
  555.  * @man: Pointer to the compat shader manager identifying the shader namespace.
  556.  * @user_key: The key that is used to identify the shader. The key is
  557.  * unique to the shader type.
  558.  * @bytecode: Pointer to the bytecode of the shader.
  559.  * @shader_type: Shader type.
  560.  * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
  561.  * to be created with.
  562.  * @list: Caller's list of staged command buffer resource actions.
  563.  *
  564.          */
  565. int vmw_compat_shader_add(struct vmw_private *dev_priv,
  566.                           struct vmw_cmdbuf_res_manager *man,
  567.                           u32 user_key, const void *bytecode,
  568.                           SVGA3dShaderType shader_type,
  569.                           size_t size,
  570.                           struct list_head *list)
  571. {
  572.         struct vmw_dma_buffer *buf;
  573.         struct ttm_bo_kmap_obj map;
  574.         bool is_iomem;
  575.         int ret;
  576.         struct vmw_resource *res;
  577.  
  578.         if (!vmw_compat_shader_id_ok(user_key, shader_type))
  579.                 return -EINVAL;
  580.  
  581.         /* Allocate and pin a DMA buffer */
  582.         buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  583.         if (unlikely(buf == NULL))
  584.                 return -ENOMEM;
  585.  
  586.         ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement,
  587.                               true, vmw_dmabuf_bo_free);
  588.         if (unlikely(ret != 0))
  589.                 goto out;
  590.  
  591.         ret = ttm_bo_reserve(&buf->base, false, true, false, NULL);
  592.         if (unlikely(ret != 0))
  593.                 goto no_reserve;
  594.  
  595.         /* Map and copy shader bytecode. */
  596.         ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT,
  597.                           &map);
  598.         if (unlikely(ret != 0)) {
  599.                 ttm_bo_unreserve(&buf->base);
  600.                 goto no_reserve;
  601.         }
  602.  
  603.         memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size);
  604.         WARN_ON(is_iomem);
  605.  
  606.         ttm_bo_kunmap(&map);
  607.         ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true);
  608.         WARN_ON(ret != 0);
  609.         ttm_bo_unreserve(&buf->base);
  610.  
  611.         res = vmw_shader_alloc(dev_priv, buf, size, 0, shader_type);
  612.         if (unlikely(ret != 0))
  613.                 goto no_reserve;
  614.  
  615.         ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_compat_shader,
  616.                                  vmw_compat_shader_key(user_key, shader_type),
  617.                                  res, list);
  618.         vmw_resource_unreference(&res);
  619. no_reserve:
  620.         vmw_dmabuf_unreference(&buf);
  621. out:
  622.         return ret;
  623. }
  624.  
  625. /**
  626.  * vmw_compat_shader_lookup - Look up a compat shader
  627.  *
  628.  * @man: Pointer to the command buffer managed resource manager identifying
  629.  * the shader namespace.
  630.  * @user_key: The user space id of the shader.
  631.  * @shader_type: The shader type.
  632.  *
  633.  * Returns a refcounted pointer to a struct vmw_resource if the shader was
  634.  * found. An error pointer otherwise.
  635.  */
  636. struct vmw_resource *
  637. vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
  638.                          u32 user_key,
  639.                          SVGA3dShaderType shader_type)
  640. {
  641.         if (!vmw_compat_shader_id_ok(user_key, shader_type))
  642.                 return ERR_PTR(-EINVAL);
  643.  
  644.         return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_compat_shader,
  645.                                      vmw_compat_shader_key(user_key,
  646.                                                            shader_type));
  647. }
  648. #endif
  649.