Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /**************************************************************************
  2.  *
  3.  * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice (including the
  15.  * next paragraph) shall be included in all copies or substantial portions
  16.  * of the Software.
  17.  *
  18.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21.  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22.  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23.  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24.  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25.  *
  26.  **************************************************************************/
  27.  
  28. #include "vmwgfx_drv.h"
  29. #include "vmwgfx_resource_priv.h"
  30. #include "ttm/ttm_placement.h"
  31.  
  32. struct vmw_shader {
  33.         struct vmw_resource res;
  34.         SVGA3dShaderType type;
  35.         uint32_t size;
  36. };
  37.  
  38. struct vmw_user_shader {
  39.         struct ttm_base_object base;
  40.         struct vmw_shader shader;
  41. };
  42.  
  43. static void vmw_user_shader_free(struct vmw_resource *res);
  44. static struct vmw_resource *
  45. vmw_user_shader_base_to_res(struct ttm_base_object *base);
  46.  
  47. static int vmw_gb_shader_create(struct vmw_resource *res);
  48. static int vmw_gb_shader_bind(struct vmw_resource *res,
  49.                                struct ttm_validate_buffer *val_buf);
  50. static int vmw_gb_shader_unbind(struct vmw_resource *res,
  51.                                  bool readback,
  52.                                  struct ttm_validate_buffer *val_buf);
  53. static int vmw_gb_shader_destroy(struct vmw_resource *res);
  54.  
  55. static uint64_t vmw_user_shader_size;
  56.  
  57. static const struct vmw_user_resource_conv user_shader_conv = {
  58.         .object_type = VMW_RES_SHADER,
  59.         .base_obj_to_res = vmw_user_shader_base_to_res,
  60.         .res_free = vmw_user_shader_free
  61. };
  62.  
  63. const struct vmw_user_resource_conv *user_shader_converter =
  64.         &user_shader_conv;
  65.  
  66.  
  67. static const struct vmw_res_func vmw_gb_shader_func = {
  68.         .res_type = vmw_res_shader,
  69.         .needs_backup = true,
  70.         .may_evict = true,
  71.         .type_name = "guest backed shaders",
  72.         .backup_placement = &vmw_mob_placement,
  73.         .create = vmw_gb_shader_create,
  74.         .destroy = vmw_gb_shader_destroy,
  75.         .bind = vmw_gb_shader_bind,
  76.         .unbind = vmw_gb_shader_unbind
  77. };
  78.  
  79. /**
  80.  * Shader management:
  81.  */
  82.  
  83. static inline struct vmw_shader *
  84. vmw_res_to_shader(struct vmw_resource *res)
  85. {
  86.         return container_of(res, struct vmw_shader, res);
  87. }
  88.  
  89. static void vmw_hw_shader_destroy(struct vmw_resource *res)
  90. {
  91.         (void) vmw_gb_shader_destroy(res);
  92. }
  93.  
  94. static int vmw_gb_shader_init(struct vmw_private *dev_priv,
  95.                               struct vmw_resource *res,
  96.                               uint32_t size,
  97.                               uint64_t offset,
  98.                               SVGA3dShaderType type,
  99.                               struct vmw_dma_buffer *byte_code,
  100.                               void (*res_free) (struct vmw_resource *res))
  101. {
  102.         struct vmw_shader *shader = vmw_res_to_shader(res);
  103.         int ret;
  104.  
  105.         ret = vmw_resource_init(dev_priv, res, true,
  106.                                 res_free, &vmw_gb_shader_func);
  107.  
  108.  
  109.         if (unlikely(ret != 0)) {
  110.                 if (res_free)
  111.                         res_free(res);
  112.                 else
  113.                         kfree(res);
  114.                 return ret;
  115.         }
  116.  
  117.         res->backup_size = size;
  118.         if (byte_code) {
  119.                 res->backup = vmw_dmabuf_reference(byte_code);
  120.                 res->backup_offset = offset;
  121.         }
  122.         shader->size = size;
  123.         shader->type = type;
  124.  
  125.         vmw_resource_activate(res, vmw_hw_shader_destroy);
  126.         return 0;
  127. }
  128.  
  129. static int vmw_gb_shader_create(struct vmw_resource *res)
  130. {
  131.         struct vmw_private *dev_priv = res->dev_priv;
  132.         struct vmw_shader *shader = vmw_res_to_shader(res);
  133.         int ret;
  134.         struct {
  135.                 SVGA3dCmdHeader header;
  136.                 SVGA3dCmdDefineGBShader body;
  137.         } *cmd;
  138.  
  139.         if (likely(res->id != -1))
  140.                 return 0;
  141.  
  142.         ret = vmw_resource_alloc_id(res);
  143.         if (unlikely(ret != 0)) {
  144.                 DRM_ERROR("Failed to allocate a shader id.\n");
  145.                 goto out_no_id;
  146.         }
  147.  
  148.         if (unlikely(res->id >= VMWGFX_NUM_GB_SHADER)) {
  149.                 ret = -EBUSY;
  150.                 goto out_no_fifo;
  151.         }
  152.  
  153.         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  154.         if (unlikely(cmd == NULL)) {
  155.                 DRM_ERROR("Failed reserving FIFO space for shader "
  156.                           "creation.\n");
  157.                 ret = -ENOMEM;
  158.                 goto out_no_fifo;
  159.         }
  160.  
  161.         cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SHADER;
  162.         cmd->header.size = sizeof(cmd->body);
  163.         cmd->body.shid = res->id;
  164.         cmd->body.type = shader->type;
  165.         cmd->body.sizeInBytes = shader->size;
  166.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  167.         (void) vmw_3d_resource_inc(dev_priv, false);
  168.  
  169.         return 0;
  170.  
  171. out_no_fifo:
  172.         vmw_resource_release_id(res);
  173. out_no_id:
  174.         return ret;
  175. }
  176.  
  177. static int vmw_gb_shader_bind(struct vmw_resource *res,
  178.                               struct ttm_validate_buffer *val_buf)
  179. {
  180.         struct vmw_private *dev_priv = res->dev_priv;
  181.         struct {
  182.                 SVGA3dCmdHeader header;
  183.                 SVGA3dCmdBindGBShader body;
  184.         } *cmd;
  185.         struct ttm_buffer_object *bo = val_buf->bo;
  186.  
  187.         BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
  188.  
  189.         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  190.         if (unlikely(cmd == NULL)) {
  191.                 DRM_ERROR("Failed reserving FIFO space for shader "
  192.                           "binding.\n");
  193.                 return -ENOMEM;
  194.         }
  195.  
  196.         cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
  197.         cmd->header.size = sizeof(cmd->body);
  198.         cmd->body.shid = res->id;
  199.         cmd->body.mobid = bo->mem.start;
  200.         cmd->body.offsetInBytes = 0;
  201.         res->backup_dirty = false;
  202.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  203.  
  204.         return 0;
  205. }
  206.  
  207. static int vmw_gb_shader_unbind(struct vmw_resource *res,
  208.                                 bool readback,
  209.                                 struct ttm_validate_buffer *val_buf)
  210. {
  211.         struct vmw_private *dev_priv = res->dev_priv;
  212.         struct {
  213.                 SVGA3dCmdHeader header;
  214.                 SVGA3dCmdBindGBShader body;
  215.         } *cmd;
  216.         struct vmw_fence_obj *fence;
  217.  
  218.         BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
  219.  
  220.         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  221.         if (unlikely(cmd == NULL)) {
  222.                 DRM_ERROR("Failed reserving FIFO space for shader "
  223.                           "unbinding.\n");
  224.                 return -ENOMEM;
  225.         }
  226.  
  227.         cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
  228.         cmd->header.size = sizeof(cmd->body);
  229.         cmd->body.shid = res->id;
  230.         cmd->body.mobid = SVGA3D_INVALID_ID;
  231.         cmd->body.offsetInBytes = 0;
  232.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  233.  
  234.         /*
  235.          * Create a fence object and fence the backup buffer.
  236.          */
  237.  
  238.         (void) vmw_execbuf_fence_commands(NULL, dev_priv,
  239.                                           &fence, NULL);
  240.  
  241.         vmw_fence_single_bo(val_buf->bo, fence);
  242.  
  243.         if (likely(fence != NULL))
  244.                 vmw_fence_obj_unreference(&fence);
  245.  
  246.         return 0;
  247. }
  248.  
  249. static int vmw_gb_shader_destroy(struct vmw_resource *res)
  250. {
  251.         struct vmw_private *dev_priv = res->dev_priv;
  252.         struct {
  253.                 SVGA3dCmdHeader header;
  254.                 SVGA3dCmdDestroyGBShader body;
  255.         } *cmd;
  256.  
  257.         if (likely(res->id == -1))
  258.                 return 0;
  259.  
  260.         mutex_lock(&dev_priv->binding_mutex);
  261.         vmw_context_binding_res_list_kill(&res->binding_head);
  262.  
  263.         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  264.         if (unlikely(cmd == NULL)) {
  265.                 DRM_ERROR("Failed reserving FIFO space for shader "
  266.                           "destruction.\n");
  267.                 mutex_unlock(&dev_priv->binding_mutex);
  268.                 return -ENOMEM;
  269.         }
  270.  
  271.         cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SHADER;
  272.         cmd->header.size = sizeof(cmd->body);
  273.         cmd->body.shid = res->id;
  274.         vmw_fifo_commit(dev_priv, sizeof(*cmd));
  275.         mutex_unlock(&dev_priv->binding_mutex);
  276.         vmw_resource_release_id(res);
  277.         vmw_3d_resource_dec(dev_priv, false);
  278.  
  279.         return 0;
  280. }
  281.  
  282. /**
  283.  * User-space shader management:
  284.  */
  285.  
  286. static struct vmw_resource *
  287. vmw_user_shader_base_to_res(struct ttm_base_object *base)
  288. {
  289.         return &(container_of(base, struct vmw_user_shader, base)->
  290.                  shader.res);
  291. }
  292.  
  293. static void vmw_user_shader_free(struct vmw_resource *res)
  294. {
  295.         struct vmw_user_shader *ushader =
  296.                 container_of(res, struct vmw_user_shader, shader.res);
  297.         struct vmw_private *dev_priv = res->dev_priv;
  298.  
  299. //   ttm_base_object_kfree(ushader, base);
  300. //   ttm_mem_global_free(vmw_mem_glob(dev_priv),
  301. //               vmw_user_shader_size);
  302. }
  303.  
  304. /**
  305.  * This function is called when user space has no more references on the
  306.  * base object. It releases the base-object's reference on the resource object.
  307.  */
  308.  
  309. static void vmw_user_shader_base_release(struct ttm_base_object **p_base)
  310. {
  311.         struct ttm_base_object *base = *p_base;
  312.         struct vmw_resource *res = vmw_user_shader_base_to_res(base);
  313.  
  314.         *p_base = NULL;
  315.         vmw_resource_unreference(&res);
  316. }
  317.  
  318. int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
  319.                               struct drm_file *file_priv)
  320. {
  321.         struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data;
  322.         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  323.  
  324.         return ttm_ref_object_base_unref(tfile, arg->handle,
  325.                                          TTM_REF_USAGE);
  326. }
  327.  
  328. #if 0
  329. int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
  330.                              struct drm_file *file_priv)
  331. {
  332.         struct vmw_private *dev_priv = vmw_priv(dev);
  333.         struct vmw_user_shader *ushader;
  334.         struct vmw_resource *res;
  335.         struct vmw_resource *tmp;
  336.         struct drm_vmw_shader_create_arg *arg =
  337.                 (struct drm_vmw_shader_create_arg *)data;
  338.         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  339.         struct vmw_master *vmaster = vmw_master(file_priv->master);
  340.         struct vmw_dma_buffer *buffer = NULL;
  341.         SVGA3dShaderType shader_type;
  342.         int ret;
  343.  
  344.         if (arg->buffer_handle != SVGA3D_INVALID_ID) {
  345.                 ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle,
  346.                                              &buffer);
  347.                 if (unlikely(ret != 0)) {
  348.                         DRM_ERROR("Could not find buffer for shader "
  349.                                   "creation.\n");
  350.                         return ret;
  351.                 }
  352.  
  353.                 if ((u64)buffer->base.num_pages * PAGE_SIZE <
  354.                     (u64)arg->size + (u64)arg->offset) {
  355.                         DRM_ERROR("Illegal buffer- or shader size.\n");
  356.                         ret = -EINVAL;
  357.                         goto out_bad_arg;
  358.                 }
  359.         }
  360.  
  361.         switch (arg->shader_type) {
  362.         case drm_vmw_shader_type_vs:
  363.                 shader_type = SVGA3D_SHADERTYPE_VS;
  364.                 break;
  365.         case drm_vmw_shader_type_ps:
  366.                 shader_type = SVGA3D_SHADERTYPE_PS;
  367.                 break;
  368.         case drm_vmw_shader_type_gs:
  369.                 shader_type = SVGA3D_SHADERTYPE_GS;
  370.                 break;
  371.         default:
  372.                 DRM_ERROR("Illegal shader type.\n");
  373.                 ret = -EINVAL;
  374.                 goto out_bad_arg;
  375.         }
  376.  
  377.         /*
  378.          * Approximate idr memory usage with 128 bytes. It will be limited
  379.          * by maximum number_of shaders anyway.
  380.          */
  381.  
  382.         if (unlikely(vmw_user_shader_size == 0))
  383.                 vmw_user_shader_size = ttm_round_pot(sizeof(*ushader))
  384.                         + 128;
  385.  
  386.         ret = ttm_read_lock(&vmaster->lock, true);
  387.         if (unlikely(ret != 0))
  388.                 return ret;
  389.  
  390.         ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
  391.                                    vmw_user_shader_size,
  392.                                    false, true);
  393.         if (unlikely(ret != 0)) {
  394.                 if (ret != -ERESTARTSYS)
  395.                         DRM_ERROR("Out of graphics memory for shader"
  396.                                   " creation.\n");
  397.                 goto out_unlock;
  398.         }
  399.  
  400.         ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
  401.         if (unlikely(ushader == NULL)) {
  402.                 ttm_mem_global_free(vmw_mem_glob(dev_priv),
  403.                                     vmw_user_shader_size);
  404.                 ret = -ENOMEM;
  405.                 goto out_unlock;
  406.         }
  407.  
  408.         res = &ushader->shader.res;
  409.         ushader->base.shareable = false;
  410.         ushader->base.tfile = NULL;
  411.  
  412.         /*
  413.          * From here on, the destructor takes over resource freeing.
  414.          */
  415.  
  416.         ret = vmw_gb_shader_init(dev_priv, res, arg->size,
  417.                                  arg->offset, shader_type, buffer,
  418.                                  vmw_user_shader_free);
  419.         if (unlikely(ret != 0))
  420.                 goto out_unlock;
  421.  
  422.         tmp = vmw_resource_reference(res);
  423.         ret = ttm_base_object_init(tfile, &ushader->base, false,
  424.                                    VMW_RES_SHADER,
  425.                                    &vmw_user_shader_base_release, NULL);
  426.  
  427.         if (unlikely(ret != 0)) {
  428.                 vmw_resource_unreference(&tmp);
  429.                 goto out_err;
  430.         }
  431.  
  432.         arg->shader_handle = ushader->base.hash.key;
  433. out_err:
  434.         vmw_resource_unreference(&res);
  435. out_unlock:
  436.         ttm_read_unlock(&vmaster->lock);
  437. out_bad_arg:
  438.         vmw_dmabuf_unreference(&buffer);
  439.  
  440.         return ret;
  441.  
  442. }
  443. #endif
  444.