Subversion Repositories Kolibri OS

Rev

Rev 4075 | Rev 5078 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /**************************************************************************
  2.  *
  3.  * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice (including the
  15.  * next paragraph) shall be included in all copies or substantial portions
  16.  * of the Software.
  17.  *
  18.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21.  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22.  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23.  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24.  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25.  *
  26.  **************************************************************************/
  27.  
  28. #include "vmwgfx_drv.h"
  29. #include "vmwgfx_resource_priv.h"
  30. #include <ttm/ttm_placement.h>
  31. #include "svga3d_surfacedefs.h"
  32.  
  33. /**
  34.  * struct vmw_user_surface - User-space visible surface resource
  35.  *
  36.  * @base:           The TTM base object handling user-space visibility.
  37.  * @srf:            The surface metadata.
  38.  * @size:           TTM accounting size for the surface.
  39.  */
  40. struct vmw_user_surface {
  41.         struct ttm_prime_object prime;
  42.         struct vmw_surface srf;
  43.         uint32_t size;
  44. };
  45.  
  46. /**
  47.  * struct vmw_surface_offset - Backing store mip level offset info
  48.  *
  49.  * @face:           Surface face.
  50.  * @mip:            Mip level.
  51.  * @bo_offset:      Offset into backing store of this mip level.
  52.  *
  53.  */
  54. struct vmw_surface_offset {
  55.         uint32_t face;
  56.         uint32_t mip;
  57.         uint32_t bo_offset;
  58. };
  59.  
  60. static void vmw_user_surface_free(struct vmw_resource *res);
  61. static struct vmw_resource *
  62. vmw_user_surface_base_to_res(struct ttm_base_object *base);
  63. static int vmw_legacy_srf_bind(struct vmw_resource *res,
  64.                                struct ttm_validate_buffer *val_buf);
  65. static int vmw_legacy_srf_unbind(struct vmw_resource *res,
  66.                                  bool readback,
  67.                                  struct ttm_validate_buffer *val_buf);
  68. static int vmw_legacy_srf_create(struct vmw_resource *res);
  69. static int vmw_legacy_srf_destroy(struct vmw_resource *res);
  70. static int vmw_gb_surface_create(struct vmw_resource *res);
  71. static int vmw_gb_surface_bind(struct vmw_resource *res,
  72.                                struct ttm_validate_buffer *val_buf);
  73. static int vmw_gb_surface_unbind(struct vmw_resource *res,
  74.                                  bool readback,
  75.                                  struct ttm_validate_buffer *val_buf);
  76. static int vmw_gb_surface_destroy(struct vmw_resource *res);
  77.  
  78.  
  79. static const struct vmw_user_resource_conv user_surface_conv = {
  80.         .object_type = VMW_RES_SURFACE,
  81.         .base_obj_to_res = vmw_user_surface_base_to_res,
  82.         .res_free = vmw_user_surface_free
  83. };
  84.  
  85. const struct vmw_user_resource_conv *user_surface_converter =
  86.         &user_surface_conv;
  87.  
  88.  
  89. static uint64_t vmw_user_surface_size;
  90.  
  91. static const struct vmw_res_func vmw_legacy_surface_func = {
  92.         .res_type = vmw_res_surface,
  93.         .needs_backup = false,
  94.         .may_evict = true,
  95.         .type_name = "legacy surfaces",
  96.         .backup_placement = &vmw_srf_placement,
  97.         .create = &vmw_legacy_srf_create,
  98.         .destroy = &vmw_legacy_srf_destroy,
  99.         .bind = &vmw_legacy_srf_bind,
  100.         .unbind = &vmw_legacy_srf_unbind
  101. };
  102.  
  103. static const struct vmw_res_func vmw_gb_surface_func = {
  104.         .res_type = vmw_res_surface,
  105.         .needs_backup = true,
  106.         .may_evict = true,
  107.         .type_name = "guest backed surfaces",
  108.         .backup_placement = &vmw_mob_placement,
  109.         .create = vmw_gb_surface_create,
  110.         .destroy = vmw_gb_surface_destroy,
  111.         .bind = vmw_gb_surface_bind,
  112.         .unbind = vmw_gb_surface_unbind
  113. };
  114.  
  115. /**
  116.  * struct vmw_surface_dma - SVGA3D DMA command
  117.  */
  118. struct vmw_surface_dma {
  119.         SVGA3dCmdHeader header;
  120.         SVGA3dCmdSurfaceDMA body;
  121.         SVGA3dCopyBox cb;
  122.         SVGA3dCmdSurfaceDMASuffix suffix;
  123. };
  124.  
  125. /**
  126.  * struct vmw_surface_define - SVGA3D Surface Define command
  127.  */
  128. struct vmw_surface_define {
  129.         SVGA3dCmdHeader header;
  130.         SVGA3dCmdDefineSurface body;
  131. };
  132.  
  133. /**
  134.  * struct vmw_surface_destroy - SVGA3D Surface Destroy command
  135.  */
  136. struct vmw_surface_destroy {
  137.         SVGA3dCmdHeader header;
  138.         SVGA3dCmdDestroySurface body;
  139. };
  140.  
  141.  
  142. /**
  143.  * vmw_surface_dma_size - Compute fifo size for a dma command.
  144.  *
  145.  * @srf: Pointer to a struct vmw_surface
  146.  *
  147.  * Computes the required size for a surface dma command for backup or
  148.  * restoration of the surface represented by @srf.
  149.  */
  150. static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
  151. {
  152.         return srf->num_sizes * sizeof(struct vmw_surface_dma);
  153. }
  154.  
  155.  
  156. /**
  157.  * vmw_surface_define_size - Compute fifo size for a surface define command.
  158.  *
  159.  * @srf: Pointer to a struct vmw_surface
  160.  *
  161.  * Computes the required size for a surface define command for the definition
  162.  * of the surface represented by @srf.
  163.  */
  164. static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
  165. {
  166.         return sizeof(struct vmw_surface_define) + srf->num_sizes *
  167.                 sizeof(SVGA3dSize);
  168. }
  169.  
  170.  
  171. /**
  172.  * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
  173.  *
  174.  * Computes the required size for a surface destroy command for the destruction
  175.  * of a hw surface.
  176.  */
  177. static inline uint32_t vmw_surface_destroy_size(void)
  178. {
  179.         return sizeof(struct vmw_surface_destroy);
  180. }
  181.  
  182. /**
  183.  * vmw_surface_destroy_encode - Encode a surface_destroy command.
  184.  *
  185.  * @id: The surface id
  186.  * @cmd_space: Pointer to memory area in which the commands should be encoded.
  187.  */
  188. static void vmw_surface_destroy_encode(uint32_t id,
  189.                                        void *cmd_space)
  190. {
  191.         struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
  192.                 cmd_space;
  193.  
  194.         cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
  195.         cmd->header.size = sizeof(cmd->body);
  196.         cmd->body.sid = id;
  197. }
  198.  
  199. /**
  200.  * vmw_surface_define_encode - Encode a surface_define command.
  201.  *
  202.  * @srf: Pointer to a struct vmw_surface object.
  203.  * @cmd_space: Pointer to memory area in which the commands should be encoded.
  204.  */
  205. static void vmw_surface_define_encode(const struct vmw_surface *srf,
  206.                                       void *cmd_space)
  207. {
  208.         struct vmw_surface_define *cmd = (struct vmw_surface_define *)
  209.                 cmd_space;
  210.         struct drm_vmw_size *src_size;
  211.         SVGA3dSize *cmd_size;
  212.         uint32_t cmd_len;
  213.         int i;
  214.  
  215.         cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
  216.  
  217.         cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
  218.         cmd->header.size = cmd_len;
  219.         cmd->body.sid = srf->res.id;
  220.         cmd->body.surfaceFlags = srf->flags;
  221.         cmd->body.format = cpu_to_le32(srf->format);
  222.         for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
  223.                 cmd->body.face[i].numMipLevels = srf->mip_levels[i];
  224.  
  225.         cmd += 1;
  226.         cmd_size = (SVGA3dSize *) cmd;
  227.         src_size = srf->sizes;
  228.  
  229.         for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
  230.                 cmd_size->width = src_size->width;
  231.                 cmd_size->height = src_size->height;
  232.                 cmd_size->depth = src_size->depth;
  233.         }
  234. }
  235.  
  236. /**
  237.  * vmw_surface_dma_encode - Encode a surface_dma command.
  238.  *
  239.  * @srf: Pointer to a struct vmw_surface object.
  240.  * @cmd_space: Pointer to memory area in which the commands should be encoded.
  241.  * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
  242.  * should be placed or read from.
  243.  * @to_surface: Boolean whether to DMA to the surface or from the surface.
  244.  */
  245. static void vmw_surface_dma_encode(struct vmw_surface *srf,
  246.                                    void *cmd_space,
  247.                                    const SVGAGuestPtr *ptr,
  248.                                    bool to_surface)
  249. {
  250.         uint32_t i;
  251.         struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
  252.         const struct svga3d_surface_desc *desc =
  253.                 svga3dsurface_get_desc(srf->format);
  254.  
  255.         for (i = 0; i < srf->num_sizes; ++i) {
  256.                 SVGA3dCmdHeader *header = &cmd->header;
  257.                 SVGA3dCmdSurfaceDMA *body = &cmd->body;
  258.                 SVGA3dCopyBox *cb = &cmd->cb;
  259.                 SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
  260.                 const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
  261.                 const struct drm_vmw_size *cur_size = &srf->sizes[i];
  262.  
  263.                 header->id = SVGA_3D_CMD_SURFACE_DMA;
  264.                 header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
  265.  
  266.                 body->guest.ptr = *ptr;
  267.                 body->guest.ptr.offset += cur_offset->bo_offset;
  268.                 body->guest.pitch = svga3dsurface_calculate_pitch(desc,
  269.                                                                   cur_size);
  270.                 body->host.sid = srf->res.id;
  271.                 body->host.face = cur_offset->face;
  272.                 body->host.mipmap = cur_offset->mip;
  273.                 body->transfer = ((to_surface) ?  SVGA3D_WRITE_HOST_VRAM :
  274.                                   SVGA3D_READ_HOST_VRAM);
  275.                 cb->x = 0;
  276.                 cb->y = 0;
  277.                 cb->z = 0;
  278.                 cb->srcx = 0;
  279.                 cb->srcy = 0;
  280.                 cb->srcz = 0;
  281.                 cb->w = cur_size->width;
  282.                 cb->h = cur_size->height;
  283.                 cb->d = cur_size->depth;
  284.  
  285.                 suffix->suffixSize = sizeof(*suffix);
  286.                 suffix->maximumOffset =
  287.                         svga3dsurface_get_image_buffer_size(desc, cur_size,
  288.                                                             body->guest.pitch);
  289.                 suffix->flags.discard = 0;
  290.                 suffix->flags.unsynchronized = 0;
  291.                 suffix->flags.reserved = 0;
  292.                 ++cmd;
  293.         }
  294. };
  295.  
  296.  
  297. /**
  298.  * vmw_hw_surface_destroy - destroy a Device surface
  299.  *
  300.  * @res:        Pointer to a struct vmw_resource embedded in a struct
  301.  *              vmw_surface.
  302.  *
  303.  * Destroys a the device surface associated with a struct vmw_surface if
  304.  * any, and adjusts accounting and resource count accordingly.
  305.  */
  306. static void vmw_hw_surface_destroy(struct vmw_resource *res)
  307. {
  308.  
  309.         struct vmw_private *dev_priv = res->dev_priv;
  310.         struct vmw_surface *srf;
  311.         void *cmd;
  312.  
  313.         if (res->func->destroy == vmw_gb_surface_destroy) {
  314.                 (void) vmw_gb_surface_destroy(res);
  315.                 return;
  316.         }
  317.  
  318.         if (res->id != -1) {
  319.  
  320.                 cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
  321.                 if (unlikely(cmd == NULL)) {
  322.                         DRM_ERROR("Failed reserving FIFO space for surface "
  323.                                   "destruction.\n");
  324.                         return;
  325.                 }
  326.  
  327.                 vmw_surface_destroy_encode(res->id, cmd);
  328.                 vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
  329.  
  330.                 /*
  331.                  * used_memory_size_atomic, or separate lock
  332.                  * to avoid taking dev_priv::cmdbuf_mutex in
  333.                  * the destroy path.
  334.                  */
  335.  
  336.                 mutex_lock(&dev_priv->cmdbuf_mutex);
  337.                 srf = vmw_res_to_srf(res);
  338.                 dev_priv->used_memory_size -= res->backup_size;
  339.                 mutex_unlock(&dev_priv->cmdbuf_mutex);
  340.         }
  341.         vmw_3d_resource_dec(dev_priv, false);
  342. }
  343.  
  344. /**
  345.  * vmw_legacy_srf_create - Create a device surface as part of the
  346.  * resource validation process.
  347.  *
  348.  * @res: Pointer to a struct vmw_surface.
  349.  *
  350.  * If the surface doesn't have a hw id.
  351.  *
  352.  * Returns -EBUSY if there wasn't sufficient device resources to
  353.  * complete the validation. Retry after freeing up resources.
  354.  *
  355.  * May return other errors if the kernel is out of guest resources.
  356.  */
  357. static int vmw_legacy_srf_create(struct vmw_resource *res)
  358. {
  359.         struct vmw_private *dev_priv = res->dev_priv;
  360.         struct vmw_surface *srf;
  361.         uint32_t submit_size;
  362.         uint8_t *cmd;
  363.         int ret;
  364.  
  365.         if (likely(res->id != -1))
  366.                 return 0;
  367.  
  368.         srf = vmw_res_to_srf(res);
  369.         if (unlikely(dev_priv->used_memory_size + res->backup_size >=
  370.                      dev_priv->memory_size))
  371.                 return -EBUSY;
  372.  
  373.         /*
  374.          * Alloc id for the resource.
  375.          */
  376.  
  377.         ret = vmw_resource_alloc_id(res);
  378.         if (unlikely(ret != 0)) {
  379.                 DRM_ERROR("Failed to allocate a surface id.\n");
  380.                 goto out_no_id;
  381.         }
  382.  
  383.         if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
  384.                 ret = -EBUSY;
  385.                 goto out_no_fifo;
  386.         }
  387.  
  388.         /*
  389.          * Encode surface define- commands.
  390.          */
  391.  
  392.         submit_size = vmw_surface_define_size(srf);
  393.         cmd = vmw_fifo_reserve(dev_priv, submit_size);
  394.         if (unlikely(cmd == NULL)) {
  395.                 DRM_ERROR("Failed reserving FIFO space for surface "
  396.                           "creation.\n");
  397.                 ret = -ENOMEM;
  398.                 goto out_no_fifo;
  399.         }
  400.  
  401.         vmw_surface_define_encode(srf, cmd);
  402.         vmw_fifo_commit(dev_priv, submit_size);
  403.         /*
  404.          * Surface memory usage accounting.
  405.          */
  406.  
  407.         dev_priv->used_memory_size += res->backup_size;
  408.         return 0;
  409.  
  410. out_no_fifo:
  411.         vmw_resource_release_id(res);
  412. out_no_id:
  413.         return ret;
  414. }
  415.  
  416. /**
  417.  * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
  418.  *
  419.  * @res:            Pointer to a struct vmw_res embedded in a struct
  420.  *                  vmw_surface.
  421.  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
  422.  *                  information about the backup buffer.
  423.  * @bind:           Boolean wether to DMA to the surface.
  424.  *
  425.  * Transfer backup data to or from a legacy surface as part of the
  426.  * validation process.
  427.  * May return other errors if the kernel is out of guest resources.
  428.  * The backup buffer will be fenced or idle upon successful completion,
  429.  * and if the surface needs persistent backup storage, the backup buffer
  430.  * will also be returned reserved iff @bind is true.
  431.  */
  432. static int vmw_legacy_srf_dma(struct vmw_resource *res,
  433.                               struct ttm_validate_buffer *val_buf,
  434.                               bool bind)
  435. {
  436.         SVGAGuestPtr ptr;
  437.         struct vmw_fence_obj *fence;
  438.         uint32_t submit_size;
  439.         struct vmw_surface *srf = vmw_res_to_srf(res);
  440.         uint8_t *cmd;
  441.         struct vmw_private *dev_priv = res->dev_priv;
  442.  
  443.         BUG_ON(val_buf->bo == NULL);
  444.  
  445.         submit_size = vmw_surface_dma_size(srf);
  446.         cmd = vmw_fifo_reserve(dev_priv, submit_size);
  447.         if (unlikely(cmd == NULL)) {
  448.                 DRM_ERROR("Failed reserving FIFO space for surface "
  449.                           "DMA.\n");
  450.                 return -ENOMEM;
  451.         }
  452.         vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
  453.         vmw_surface_dma_encode(srf, cmd, &ptr, bind);
  454.  
  455.         vmw_fifo_commit(dev_priv, submit_size);
  456.  
  457.         /*
  458.          * Create a fence object and fence the backup buffer.
  459.          */
  460.  
  461.         (void) vmw_execbuf_fence_commands(NULL, dev_priv,
  462.                                           &fence, NULL);
  463.  
  464.         vmw_fence_single_bo(val_buf->bo, fence);
  465.  
  466.         if (likely(fence != NULL))
  467.                 vmw_fence_obj_unreference(&fence);
  468.  
  469.         return 0;
  470. }
  471.  
  472. /**
  473.  * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
  474.  *                       surface validation process.
  475.  *
  476.  * @res:            Pointer to a struct vmw_res embedded in a struct
  477.  *                  vmw_surface.
  478.  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
  479.  *                  information about the backup buffer.
  480.  *
  481.  * This function will copy backup data to the surface if the
  482.  * backup buffer is dirty.
  483.  */
  484. static int vmw_legacy_srf_bind(struct vmw_resource *res,
  485.                                struct ttm_validate_buffer *val_buf)
  486. {
  487.         if (!res->backup_dirty)
  488.                 return 0;
  489.  
  490.         return vmw_legacy_srf_dma(res, val_buf, true);
  491. }
  492.  
  493.  
  494. /**
  495.  * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
  496.  *                         surface eviction process.
  497.  *
  498.  * @res:            Pointer to a struct vmw_res embedded in a struct
  499.  *                  vmw_surface.
  500.  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
  501.  *                  information about the backup buffer.
  502.  *
  503.  * This function will copy backup data from the surface.
  504.  */
  505. static int vmw_legacy_srf_unbind(struct vmw_resource *res,
  506.                                  bool readback,
  507.                                  struct ttm_validate_buffer *val_buf)
  508. {
  509.         if (unlikely(readback))
  510.                 return vmw_legacy_srf_dma(res, val_buf, false);
  511.         return 0;
  512. }
  513.  
  514. /**
  515.  * vmw_legacy_srf_destroy - Destroy a device surface as part of a
  516.  *                          resource eviction process.
  517.  *
  518.  * @res:            Pointer to a struct vmw_res embedded in a struct
  519.  *                  vmw_surface.
  520.  */
  521. static int vmw_legacy_srf_destroy(struct vmw_resource *res)
  522. {
  523.         struct vmw_private *dev_priv = res->dev_priv;
  524.         uint32_t submit_size;
  525.         uint8_t *cmd;
  526.  
  527.         BUG_ON(res->id == -1);
  528.  
  529.         /*
  530.          * Encode the dma- and surface destroy commands.
  531.          */
  532.  
  533.         submit_size = vmw_surface_destroy_size();
  534.         cmd = vmw_fifo_reserve(dev_priv, submit_size);
  535.         if (unlikely(cmd == NULL)) {
  536.                 DRM_ERROR("Failed reserving FIFO space for surface "
  537.                           "eviction.\n");
  538.                 return -ENOMEM;
  539.         }
  540.  
  541.         vmw_surface_destroy_encode(res->id, cmd);
  542.         vmw_fifo_commit(dev_priv, submit_size);
  543.  
  544.         /*
  545.          * Surface memory usage accounting.
  546.          */
  547.  
  548.         dev_priv->used_memory_size -= res->backup_size;
  549.  
  550.         /*
  551.          * Release the surface ID.
  552.          */
  553.  
  554.         vmw_resource_release_id(res);
  555.  
  556.         return 0;
  557. }
  558.  
  559.  
  560. /**
  561.  * vmw_surface_init - initialize a struct vmw_surface
  562.  *
  563.  * @dev_priv:       Pointer to a device private struct.
  564.  * @srf:            Pointer to the struct vmw_surface to initialize.
  565.  * @res_free:       Pointer to a resource destructor used to free
  566.  *                  the object.
  567.  */
  568. static int vmw_surface_init(struct vmw_private *dev_priv,
  569.                             struct vmw_surface *srf,
  570.                             void (*res_free) (struct vmw_resource *res))
  571. {
  572.         int ret;
  573.         struct vmw_resource *res = &srf->res;
  574.  
  575.         BUG_ON(res_free == NULL);
  576.         if (!dev_priv->has_mob)
  577.         (void) vmw_3d_resource_inc(dev_priv, false);
  578.         ret = vmw_resource_init(dev_priv, res, true, res_free,
  579.                                 (dev_priv->has_mob) ? &vmw_gb_surface_func :
  580.                                 &vmw_legacy_surface_func);
  581.  
  582.         if (unlikely(ret != 0)) {
  583.                 if (!dev_priv->has_mob)
  584.                 vmw_3d_resource_dec(dev_priv, false);
  585.                 res_free(res);
  586.                 return ret;
  587.         }
  588.  
  589.         /*
  590.          * The surface won't be visible to hardware until a
  591.          * surface validate.
  592.          */
  593.  
  594.         vmw_resource_activate(res, vmw_hw_surface_destroy);
  595.         return ret;
  596. }
  597.  
  598. /**
  599.  * vmw_user_surface_base_to_res - TTM base object to resource converter for
  600.  *                                user visible surfaces
  601.  *
  602.  * @base:           Pointer to a TTM base object
  603.  *
  604.  * Returns the struct vmw_resource embedded in a struct vmw_surface
  605.  * for the user-visible object identified by the TTM base object @base.
  606.  */
  607. static struct vmw_resource *
  608. vmw_user_surface_base_to_res(struct ttm_base_object *base)
  609. {
  610.         return &(container_of(base, struct vmw_user_surface,
  611.                               prime.base)->srf.res);
  612. }
  613.  
  614. /**
  615.  * vmw_user_surface_free - User visible surface resource destructor
  616.  *
  617.  * @res:            A struct vmw_resource embedded in a struct vmw_surface.
  618.  */
  619. static void vmw_user_surface_free(struct vmw_resource *res)
  620. {
  621.         struct vmw_surface *srf = vmw_res_to_srf(res);
  622.         struct vmw_user_surface *user_srf =
  623.             container_of(srf, struct vmw_user_surface, srf);
  624.         struct vmw_private *dev_priv = srf->res.dev_priv;
  625.         uint32_t size = user_srf->size;
  626.  
  627.         kfree(srf->offsets);
  628.         kfree(srf->sizes);
  629.         kfree(srf->snooper.image);
  630. //   ttm_base_object_kfree(user_srf, base);
  631.         ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
  632. }
  633.  
  634. /**
  635.  * vmw_user_surface_free - User visible surface TTM base object destructor
  636.  *
  637.  * @p_base:         Pointer to a pointer to a TTM base object
  638.  *                  embedded in a struct vmw_user_surface.
  639.  *
  640.  * Drops the base object's reference on its resource, and the
  641.  * pointer pointed to by *p_base is set to NULL.
  642.  */
  643. static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
  644. {
  645.         struct ttm_base_object *base = *p_base;
  646.         struct vmw_user_surface *user_srf =
  647.             container_of(base, struct vmw_user_surface, prime.base);
  648.         struct vmw_resource *res = &user_srf->srf.res;
  649.  
  650.         *p_base = NULL;
  651.         vmw_resource_unreference(&res);
  652. }
  653.  
  654. #if 0
  655. /**
  656.  * vmw_user_surface_define_ioctl - Ioctl function implementing
  657.  *                                  the user surface define functionality.
  658.  *
  659.  * @dev:            Pointer to a struct drm_device.
  660.  * @data:           Pointer to data copied from / to user-space.
  661.  * @file_priv:      Pointer to a drm file private structure.
  662.  */
  663. int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
  664.                              struct drm_file *file_priv)
  665. {
  666.         struct vmw_private *dev_priv = vmw_priv(dev);
  667.         struct vmw_user_surface *user_srf;
  668.         struct vmw_surface *srf;
  669.         struct vmw_resource *res;
  670.         struct vmw_resource *tmp;
  671.         union drm_vmw_surface_create_arg *arg =
  672.             (union drm_vmw_surface_create_arg *)data;
  673.         struct drm_vmw_surface_create_req *req = &arg->req;
  674.         struct drm_vmw_surface_arg *rep = &arg->rep;
  675.         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  676.         struct drm_vmw_size __user *user_sizes;
  677.         int ret;
  678.         int i, j;
  679.         uint32_t cur_bo_offset;
  680.         struct drm_vmw_size *cur_size;
  681.         struct vmw_surface_offset *cur_offset;
  682.         uint32_t num_sizes;
  683.         uint32_t size;
  684.         struct vmw_master *vmaster = vmw_master(file_priv->master);
  685.         const struct svga3d_surface_desc *desc;
  686.  
  687.         if (unlikely(vmw_user_surface_size == 0))
  688.                 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
  689.                         128;
  690.  
  691.         num_sizes = 0;
  692.         for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
  693.                 num_sizes += req->mip_levels[i];
  694.  
  695.         if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
  696.             DRM_VMW_MAX_MIP_LEVELS)
  697.                 return -EINVAL;
  698.  
  699.         size = vmw_user_surface_size + 128 +
  700.                 ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
  701.                 ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
  702.  
  703.  
  704.         desc = svga3dsurface_get_desc(req->format);
  705.         if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
  706.                 DRM_ERROR("Invalid surface format for surface creation.\n");
  707.                 return -EINVAL;
  708.         }
  709.  
  710.         ret = ttm_read_lock(&vmaster->lock, true);
  711.         if (unlikely(ret != 0))
  712.                 return ret;
  713.  
  714.         ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
  715.                                    size, false, true);
  716.         if (unlikely(ret != 0)) {
  717.                 if (ret != -ERESTARTSYS)
  718.                         DRM_ERROR("Out of graphics memory for surface"
  719.                                   " creation.\n");
  720.                 goto out_unlock;
  721.         }
  722.  
  723.         user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
  724.         if (unlikely(user_srf == NULL)) {
  725.                 ret = -ENOMEM;
  726.                 goto out_no_user_srf;
  727.         }
  728.  
  729.         srf = &user_srf->srf;
  730.         res = &srf->res;
  731.  
  732.         srf->flags = req->flags;
  733.         srf->format = req->format;
  734.         srf->scanout = req->scanout;
  735.  
  736.         memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
  737.         srf->num_sizes = num_sizes;
  738.         user_srf->size = size;
  739.  
  740.         srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
  741.         if (unlikely(srf->sizes == NULL)) {
  742.                 ret = -ENOMEM;
  743.                 goto out_no_sizes;
  744.         }
  745.         srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
  746.                                GFP_KERNEL);
  747.         if (unlikely(srf->sizes == NULL)) {
  748.                 ret = -ENOMEM;
  749.                 goto out_no_offsets;
  750.         }
  751.  
  752.         user_sizes = (struct drm_vmw_size __user *)(unsigned long)
  753.             req->size_addr;
  754.  
  755.         ret = copy_from_user(srf->sizes, user_sizes,
  756.                              srf->num_sizes * sizeof(*srf->sizes));
  757.         if (unlikely(ret != 0)) {
  758.                 ret = -EFAULT;
  759.                 goto out_no_copy;
  760.         }
  761.  
  762.         srf->base_size = *srf->sizes;
  763.         srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
  764.         srf->multisample_count = 0;
  765.  
  766.         cur_bo_offset = 0;
  767.         cur_offset = srf->offsets;
  768.         cur_size = srf->sizes;
  769.  
  770.         for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
  771.                 for (j = 0; j < srf->mip_levels[i]; ++j) {
  772.                         uint32_t stride = svga3dsurface_calculate_pitch
  773.                                 (desc, cur_size);
  774.  
  775.                         cur_offset->face = i;
  776.                         cur_offset->mip = j;
  777.                         cur_offset->bo_offset = cur_bo_offset;
  778.                         cur_bo_offset += svga3dsurface_get_image_buffer_size
  779.                                 (desc, cur_size, stride);
  780.                         ++cur_offset;
  781.                         ++cur_size;
  782.                 }
  783.         }
  784.         res->backup_size = cur_bo_offset;
  785.         if (srf->scanout &&
  786.             srf->num_sizes == 1 &&
  787.             srf->sizes[0].width == 64 &&
  788.             srf->sizes[0].height == 64 &&
  789.             srf->format == SVGA3D_A8R8G8B8) {
  790.  
  791.                 srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
  792.                 /* clear the image */
  793.                 if (srf->snooper.image) {
  794.                         memset(srf->snooper.image, 0x00, 64 * 64 * 4);
  795.                 } else {
  796.                         DRM_ERROR("Failed to allocate cursor_image\n");
  797.                         ret = -ENOMEM;
  798.                         goto out_no_copy;
  799.                 }
  800.         } else {
  801.                 srf->snooper.image = NULL;
  802.         }
  803.         srf->snooper.crtc = NULL;
  804.  
  805.         user_srf->prime.base.shareable = false;
  806.         user_srf->prime.base.tfile = NULL;
  807.  
  808.         /**
  809.          * From this point, the generic resource management functions
  810.          * destroy the object on failure.
  811.          */
  812.  
  813.         ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
  814.         if (unlikely(ret != 0))
  815.                 goto out_unlock;
  816.  
  817.         tmp = vmw_resource_reference(&srf->res);
  818.         ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
  819.                                    req->shareable, VMW_RES_SURFACE,
  820.                                    &vmw_user_surface_base_release, NULL);
  821.  
  822.         if (unlikely(ret != 0)) {
  823.                 vmw_resource_unreference(&tmp);
  824.                 vmw_resource_unreference(&res);
  825.                 goto out_unlock;
  826.         }
  827.  
  828.         rep->sid = user_srf->prime.base.hash.key;
  829.         vmw_resource_unreference(&res);
  830.  
  831.         ttm_read_unlock(&vmaster->lock);
  832.         return 0;
  833. out_no_copy:
  834.         kfree(srf->offsets);
  835. out_no_offsets:
  836.         kfree(srf->sizes);
  837. out_no_sizes:
  838.         ttm_prime_object_kfree(user_srf, prime);
  839. out_no_user_srf:
  840.         ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
  841. out_unlock:
  842.         ttm_read_unlock(&vmaster->lock);
  843.         return ret;
  844. }
  845.  
  846. /**
  847.  * vmw_user_surface_define_ioctl - Ioctl function implementing
  848.  *                                  the user surface reference functionality.
  849.  *
  850.  * @dev:            Pointer to a struct drm_device.
  851.  * @data:           Pointer to data copied from / to user-space.
  852.  * @file_priv:      Pointer to a drm file private structure.
  853.  */
  854. int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
  855.                                 struct drm_file *file_priv)
  856. {
  857.         struct vmw_private *dev_priv = vmw_priv(dev);
  858.         union drm_vmw_surface_reference_arg *arg =
  859.             (union drm_vmw_surface_reference_arg *)data;
  860.         struct drm_vmw_surface_arg *req = &arg->req;
  861.         struct drm_vmw_surface_create_req *rep = &arg->rep;
  862.         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  863.         struct vmw_surface *srf;
  864.         struct vmw_user_surface *user_srf;
  865.         struct drm_vmw_size __user *user_sizes;
  866.         struct ttm_base_object *base;
  867.         int ret = -EINVAL;
  868.  
  869.         base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid);
  870.         if (unlikely(base == NULL)) {
  871.                 DRM_ERROR("Could not find surface to reference.\n");
  872.                 return -EINVAL;
  873.         }
  874.  
  875.         if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE))
  876.                 goto out_bad_resource;
  877.  
  878.         user_srf = container_of(base, struct vmw_user_surface, prime.base);
  879.         srf = &user_srf->srf;
  880.  
  881.         ret = ttm_ref_object_add(tfile, &user_srf->prime.base,
  882.                                  TTM_REF_USAGE, NULL);
  883.         if (unlikely(ret != 0)) {
  884.                 DRM_ERROR("Could not add a reference to a surface.\n");
  885.                 goto out_no_reference;
  886.         }
  887.  
  888.         rep->flags = srf->flags;
  889.         rep->format = srf->format;
  890.         memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
  891.         user_sizes = (struct drm_vmw_size __user *)(unsigned long)
  892.             rep->size_addr;
  893.  
  894.         if (user_sizes)
  895.                 ret = copy_to_user(user_sizes, srf->sizes,
  896.                                    srf->num_sizes * sizeof(*srf->sizes));
  897.         if (unlikely(ret != 0)) {
  898.                 DRM_ERROR("copy_to_user failed %p %u\n",
  899.                           user_sizes, srf->num_sizes);
  900.                 ret = -EFAULT;
  901.         }
  902. out_bad_resource:
  903. out_no_reference:
  904.         ttm_base_object_unref(&base);
  905.  
  906.         return ret;
  907. }
  908.  
  909. #endif
  910.