Subversion Repositories Kolibri OS

Rev

Rev 4111 | Rev 5078 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /**************************************************************************
  2.  *
  3.  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice (including the
  15.  * next paragraph) shall be included in all copies or substantial portions
  16.  * of the Software.
  17.  *
  18.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21.  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22.  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23.  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24.  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25.  *
  26.  **************************************************************************/
  27.  
  28. #include "vmwgfx_drv.h"
  29. #include <drm/vmwgfx_drm.h>
  30. #include <drm/ttm/ttm_object.h>
  31. #include <drm/ttm/ttm_placement.h>
  32. #include <drm/drmP.h>
  33. #include "vmwgfx_resource_priv.h"
  34.  
  35. #define VMW_RES_EVICT_ERR_COUNT 10
  36.  
  37. struct vmw_user_dma_buffer {
  38.         struct ttm_prime_object prime;
  39.         struct vmw_dma_buffer dma;
  40. };
  41.  
  42. struct vmw_bo_user_rep {
  43.         uint32_t handle;
  44.         uint64_t map_handle;
  45. };
  46.  
  47. struct vmw_stream {
  48.         struct vmw_resource res;
  49.         uint32_t stream_id;
  50. };
  51.  
  52. struct vmw_user_stream {
  53.         struct ttm_base_object base;
  54.         struct vmw_stream stream;
  55. };
  56.  
  57.  
  58. static uint64_t vmw_user_stream_size;
  59.  
  60. static const struct vmw_res_func vmw_stream_func = {
  61.         .res_type = vmw_res_stream,
  62.         .needs_backup = false,
  63.         .may_evict = false,
  64.         .type_name = "video streams",
  65.         .backup_placement = NULL,
  66.         .create = NULL,
  67.         .destroy = NULL,
  68.         .bind = NULL,
  69.         .unbind = NULL
  70. };
  71.  
  72. static inline struct vmw_dma_buffer *
  73. vmw_dma_buffer(struct ttm_buffer_object *bo)
  74. {
  75.         return container_of(bo, struct vmw_dma_buffer, base);
  76. }
  77.  
  78. static inline struct vmw_user_dma_buffer *
  79. vmw_user_dma_buffer(struct ttm_buffer_object *bo)
  80. {
  81.         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  82.         return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
  83. }
  84.  
  85. struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
  86. {
  87.         kref_get(&res->kref);
  88.         return res;
  89. }
  90.  
  91.  
  92. /**
  93.  * vmw_resource_release_id - release a resource id to the id manager.
  94.  *
  95.  * @res: Pointer to the resource.
  96.  *
  97.  * Release the resource id to the resource id manager and set it to -1
  98.  */
  99. void vmw_resource_release_id(struct vmw_resource *res)
  100. {
  101.         struct vmw_private *dev_priv = res->dev_priv;
  102.         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
  103.  
  104.         write_lock(&dev_priv->resource_lock);
  105.         if (res->id != -1)
  106.                 idr_remove(idr, res->id);
  107.         res->id = -1;
  108.         write_unlock(&dev_priv->resource_lock);
  109. }
  110.  
  111. static void vmw_resource_release(struct kref *kref)
  112. {
  113.         struct vmw_resource *res =
  114.             container_of(kref, struct vmw_resource, kref);
  115.         struct vmw_private *dev_priv = res->dev_priv;
  116.         int id;
  117.         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
  118.  
  119.         res->avail = false;
  120.         list_del_init(&res->lru_head);
  121.         write_unlock(&dev_priv->resource_lock);
  122.         if (res->backup) {
  123.                 struct ttm_buffer_object *bo = &res->backup->base;
  124.  
  125.                 ttm_bo_reserve(bo, false, false, false, 0);
  126.                 if (!list_empty(&res->mob_head) &&
  127.                     res->func->unbind != NULL) {
  128.                         struct ttm_validate_buffer val_buf;
  129.  
  130.                         val_buf.bo = bo;
  131.                         res->func->unbind(res, false, &val_buf);
  132.                 }
  133.                 res->backup_dirty = false;
  134.                 list_del_init(&res->mob_head);
  135.                 ttm_bo_unreserve(bo);
  136.                 vmw_dmabuf_unreference(&res->backup);
  137.         }
  138.  
  139.         if (likely(res->hw_destroy != NULL))
  140.                 res->hw_destroy(res);
  141.  
  142.         id = res->id;
  143.         if (res->res_free != NULL)
  144.                 res->res_free(res);
  145.         else
  146.                 kfree(res);
  147.  
  148.         write_lock(&dev_priv->resource_lock);
  149.  
  150.         if (id != -1)
  151.                 idr_remove(idr, id);
  152. }
  153.  
  154. void vmw_resource_unreference(struct vmw_resource **p_res)
  155. {
  156.         struct vmw_resource *res = *p_res;
  157.         struct vmw_private *dev_priv = res->dev_priv;
  158.  
  159.         *p_res = NULL;
  160.         write_lock(&dev_priv->resource_lock);
  161.         kref_put(&res->kref, vmw_resource_release);
  162.         write_unlock(&dev_priv->resource_lock);
  163. }
  164.  
  165.  
  166. /**
  167.  * vmw_resource_alloc_id - release a resource id to the id manager.
  168.  *
  169.  * @res: Pointer to the resource.
  170.  *
  171.  * Allocate the lowest free resource from the resource manager, and set
  172.  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
  173.  */
  174. int vmw_resource_alloc_id(struct vmw_resource *res)
  175. {
  176.         struct vmw_private *dev_priv = res->dev_priv;
  177.         int ret;
  178.         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
  179.  
  180.         BUG_ON(res->id != -1);
  181.  
  182.         idr_preload(GFP_KERNEL);
  183.         write_lock(&dev_priv->resource_lock);
  184.  
  185.         ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
  186.         if (ret >= 0)
  187.                 res->id = ret;
  188.  
  189.         write_unlock(&dev_priv->resource_lock);
  190.         idr_preload_end();
  191.         return ret < 0 ? ret : 0;
  192. }
  193.  
  194. /**
  195.  * vmw_resource_init - initialize a struct vmw_resource
  196.  *
  197.  * @dev_priv:       Pointer to a device private struct.
  198.  * @res:            The struct vmw_resource to initialize.
  199.  * @obj_type:       Resource object type.
  200.  * @delay_id:       Boolean whether to defer device id allocation until
  201.  *                  the first validation.
  202.  * @res_free:       Resource destructor.
  203.  * @func:           Resource function table.
  204.  */
  205. int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
  206.                       bool delay_id,
  207.                       void (*res_free) (struct vmw_resource *res),
  208.                       const struct vmw_res_func *func)
  209. {
  210.         kref_init(&res->kref);
  211.         res->hw_destroy = NULL;
  212.         res->res_free = res_free;
  213.         res->avail = false;
  214.         res->dev_priv = dev_priv;
  215.         res->func = func;
  216.         INIT_LIST_HEAD(&res->lru_head);
  217.         INIT_LIST_HEAD(&res->mob_head);
  218.         INIT_LIST_HEAD(&res->binding_head);
  219.         res->id = -1;
  220.         res->backup = NULL;
  221.         res->backup_offset = 0;
  222.         res->backup_dirty = false;
  223.         res->res_dirty = false;
  224.         if (delay_id)
  225.                 return 0;
  226.         else
  227.                 return vmw_resource_alloc_id(res);
  228. }
  229.  
  230. /**
  231.  * vmw_resource_activate
  232.  *
  233.  * @res:        Pointer to the newly created resource
  234.  * @hw_destroy: Destroy function. NULL if none.
  235.  *
  236.  * Activate a resource after the hardware has been made aware of it.
  237.  * Set tye destroy function to @destroy. Typically this frees the
  238.  * resource and destroys the hardware resources associated with it.
  239.  * Activate basically means that the function vmw_resource_lookup will
  240.  * find it.
  241.  */
  242. void vmw_resource_activate(struct vmw_resource *res,
  243.                            void (*hw_destroy) (struct vmw_resource *))
  244. {
  245.         struct vmw_private *dev_priv = res->dev_priv;
  246.  
  247.         write_lock(&dev_priv->resource_lock);
  248.         res->avail = true;
  249.         res->hw_destroy = hw_destroy;
  250.         write_unlock(&dev_priv->resource_lock);
  251. }
  252.  
  253. struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
  254.                                          struct idr *idr, int id)
  255. {
  256.         struct vmw_resource *res;
  257.  
  258.         read_lock(&dev_priv->resource_lock);
  259.         res = idr_find(idr, id);
  260.         if (res && res->avail)
  261.                 kref_get(&res->kref);
  262.         else
  263.                 res = NULL;
  264.         read_unlock(&dev_priv->resource_lock);
  265.  
  266.         if (unlikely(res == NULL))
  267.                 return NULL;
  268.  
  269.         return res;
  270. }
  271.  
  272. /**
  273.  * vmw_user_resource_lookup_handle - lookup a struct resource from a
  274.  * TTM user-space handle and perform basic type checks
  275.  *
  276.  * @dev_priv:     Pointer to a device private struct
  277.  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
  278.  * @handle:       The TTM user-space handle
  279.  * @converter:    Pointer to an object describing the resource type
  280.  * @p_res:        On successful return the location pointed to will contain
  281.  *                a pointer to a refcounted struct vmw_resource.
  282.  *
  283.  * If the handle can't be found or is associated with an incorrect resource
  284.  * type, -EINVAL will be returned.
  285.  */
  286. int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
  287.                                     struct ttm_object_file *tfile,
  288.                                     uint32_t handle,
  289.                                     const struct vmw_user_resource_conv
  290.                                     *converter,
  291.                                     struct vmw_resource **p_res)
  292. {
  293.         struct ttm_base_object *base;
  294.         struct vmw_resource *res;
  295.         int ret = -EINVAL;
  296.  
  297.         base = ttm_base_object_lookup(tfile, handle);
  298.         if (unlikely(base == NULL))
  299.                 return -EINVAL;
  300.  
  301.         if (unlikely(ttm_base_object_type(base) != converter->object_type))
  302.                 goto out_bad_resource;
  303.  
  304.         res = converter->base_obj_to_res(base);
  305.  
  306.         read_lock(&dev_priv->resource_lock);
  307.         if (!res->avail || res->res_free != converter->res_free) {
  308.                 read_unlock(&dev_priv->resource_lock);
  309.                 goto out_bad_resource;
  310.         }
  311.  
  312.         kref_get(&res->kref);
  313.         read_unlock(&dev_priv->resource_lock);
  314.  
  315.         *p_res = res;
  316.         ret = 0;
  317.  
  318. out_bad_resource:
  319.         ttm_base_object_unref(&base);
  320.  
  321.         return ret;
  322. }
  323.  
  324. /**
  325.  * Helper function that looks either a surface or dmabuf.
  326.  *
  327.  * The pointer this pointed at by out_surf and out_buf needs to be null.
  328.  */
  329. int vmw_user_lookup_handle(struct vmw_private *dev_priv,
  330.                            struct ttm_object_file *tfile,
  331.                            uint32_t handle,
  332.                            struct vmw_surface **out_surf,
  333.                            struct vmw_dma_buffer **out_buf)
  334. {
  335.         struct vmw_resource *res;
  336.         int ret;
  337.  
  338.         BUG_ON(*out_surf || *out_buf);
  339.  
  340.         ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
  341.                                               user_surface_converter,
  342.                                               &res);
  343.         if (!ret) {
  344.                 *out_surf = vmw_res_to_srf(res);
  345.                 return 0;
  346.         }
  347.  
  348.         *out_surf = NULL;
  349.         ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
  350.         return ret;
  351. }
  352.  
  353. /**
  354.  * Buffer management.
  355.  */
  356.  
  357. /**
  358.  * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
  359.  *
  360.  * @dev_priv: Pointer to a struct vmw_private identifying the device.
  361.  * @size: The requested buffer size.
  362.  * @user: Whether this is an ordinary dma buffer or a user dma buffer.
  363.  */
  364. static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
  365.                                   bool user)
  366. {
  367.         static size_t struct_size, user_struct_size;
  368.         size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
  369.         size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
  370.  
  371.         if (unlikely(struct_size == 0)) {
  372.                 size_t backend_size = ttm_round_pot(vmw_tt_size);
  373.  
  374.                 struct_size = backend_size +
  375.                         ttm_round_pot(sizeof(struct vmw_dma_buffer));
  376.                 user_struct_size = backend_size +
  377.                         ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
  378.         }
  379.  
  380.         if (dev_priv->map_mode == vmw_dma_alloc_coherent)
  381.                 page_array_size +=
  382.                         ttm_round_pot(num_pages * sizeof(dma_addr_t));
  383.  
  384.         return ((user) ? user_struct_size : struct_size) +
  385.                 page_array_size;
  386. }
  387.  
  388. void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
  389. {
  390.         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  391.  
  392.         kfree(vmw_bo);
  393. }
  394.  
  395. static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
  396. {
  397.         struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
  398.  
  399. //   ttm_prime_object_kfree(vmw_user_bo, prime);
  400. }
  401.  
  402. int vmw_dmabuf_init(struct vmw_private *dev_priv,
  403.                     struct vmw_dma_buffer *vmw_bo,
  404.                     size_t size, struct ttm_placement *placement,
  405.                     bool interruptible,
  406.                     void (*bo_free) (struct ttm_buffer_object *bo))
  407. {
  408.         struct ttm_bo_device *bdev = &dev_priv->bdev;
  409.         size_t acc_size;
  410.         int ret;
  411.         bool user = (bo_free == &vmw_user_dmabuf_destroy);
  412.  
  413.         BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
  414.  
  415.         acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
  416.         memset(vmw_bo, 0, sizeof(*vmw_bo));
  417.  
  418.         INIT_LIST_HEAD(&vmw_bo->res_list);
  419.  
  420.         ret = ttm_bo_init(bdev, &vmw_bo->base, size,
  421.                           (user) ? ttm_bo_type_device :
  422.                           ttm_bo_type_kernel, placement,
  423.                           0, interruptible,
  424.                           NULL, acc_size, NULL, bo_free);
  425.         return ret;
  426. }
  427.  
  428. static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
  429. {
  430.         struct vmw_user_dma_buffer *vmw_user_bo;
  431.         struct ttm_base_object *base = *p_base;
  432.         struct ttm_buffer_object *bo;
  433.  
  434.         *p_base = NULL;
  435.  
  436.         if (unlikely(base == NULL))
  437.                 return;
  438.  
  439.         vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
  440.                                    prime.base);
  441.         bo = &vmw_user_bo->dma.base;
  442.         ttm_bo_unref(&bo);
  443. }
  444.  
  445. static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
  446.                                             enum ttm_ref_type ref_type)
  447. {
  448.         struct vmw_user_dma_buffer *user_bo;
  449.         user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
  450.  
  451.         switch (ref_type) {
  452.         case TTM_REF_SYNCCPU_WRITE:
  453.                 ttm_bo_synccpu_write_release(&user_bo->dma.base);
  454.                 break;
  455.         default:
  456.                 BUG();
  457.         }
  458. }
  459.  
  460. /**
  461.  * vmw_user_dmabuf_alloc - Allocate a user dma buffer
  462.  *
  463.  * @dev_priv: Pointer to a struct device private.
  464.  * @tfile: Pointer to a struct ttm_object_file on which to register the user
  465.  * object.
  466.  * @size: Size of the dma buffer.
  467.  * @shareable: Boolean whether the buffer is shareable with other open files.
  468.  * @handle: Pointer to where the handle value should be assigned.
  469.  * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
  470.  * should be assigned.
  471.  */
  472. int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
  473.                           struct ttm_object_file *tfile,
  474.                           uint32_t size,
  475.                           bool shareable,
  476.                           uint32_t *handle,
  477.                           struct vmw_dma_buffer **p_dma_buf)
  478. {
  479.         struct vmw_user_dma_buffer *user_bo;
  480.         struct ttm_buffer_object *tmp;
  481.         int ret;
  482.  
  483.         user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
  484.         if (unlikely(user_bo == NULL)) {
  485.                 DRM_ERROR("Failed to allocate a buffer.\n");
  486.                 return -ENOMEM;
  487.         }
  488.  
  489.         ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
  490.                               (dev_priv->has_mob) ?
  491.                               &vmw_sys_placement :
  492.                               &vmw_vram_sys_placement, true,
  493.                               &vmw_user_dmabuf_destroy);
  494.         if (unlikely(ret != 0))
  495.                 return ret;
  496.  
  497.         tmp = ttm_bo_reference(&user_bo->dma.base);
  498. /*
  499.     ret = ttm_prime_object_init(tfile,
  500.                                     size,
  501.                                     &user_bo->prime,
  502.                                    shareable,
  503.                                    ttm_buffer_type,
  504.                                     &vmw_user_dmabuf_release,
  505.                                     &vmw_user_dmabuf_ref_obj_release);
  506.         if (unlikely(ret != 0)) {
  507.                 ttm_bo_unref(&tmp);
  508.                 goto out_no_base_object;
  509.         }
  510. */
  511.  
  512.         *p_dma_buf = &user_bo->dma;
  513.         *handle = user_bo->prime.base.hash.key;
  514.  
  515. out_no_base_object:
  516.         return ret;
  517. }
  518.  
  519. /**
  520.  * vmw_user_dmabuf_verify_access - verify access permissions on this
  521.  * buffer object.
  522.  *
  523.  * @bo: Pointer to the buffer object being accessed
  524.  * @tfile: Identifying the caller.
  525.  */
  526. int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
  527.                                   struct ttm_object_file *tfile)
  528. {
  529.         struct vmw_user_dma_buffer *vmw_user_bo;
  530.  
  531.         if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
  532.                 return -EPERM;
  533.  
  534.         vmw_user_bo = vmw_user_dma_buffer(bo);
  535.         return (vmw_user_bo->prime.base.tfile == tfile ||
  536.                 vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
  537. }
  538.  
  539. /**
  540.  * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
  541.  * access, idling previous GPU operations on the buffer and optionally
  542.  * blocking it for further command submissions.
  543.  *
  544.  * @user_bo: Pointer to the buffer object being grabbed for CPU access
  545.  * @tfile: Identifying the caller.
  546.  * @flags: Flags indicating how the grab should be performed.
  547.  *
  548.  * A blocking grab will be automatically released when @tfile is closed.
  549.  */
  550. static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
  551.                                         struct ttm_object_file *tfile,
  552.                                         uint32_t flags)
  553. {
  554.         struct ttm_buffer_object *bo = &user_bo->dma.base;
  555.         bool existed;
  556.     int ret=0;
  557.  
  558.         if (flags & drm_vmw_synccpu_allow_cs) {
  559.                 struct ttm_bo_device *bdev = bo->bdev;
  560.  
  561. //       spin_lock(&bdev->fence_lock);
  562. //       ret = ttm_bo_wait(bo, false, true,
  563. //                 !!(flags & drm_vmw_synccpu_dontblock));
  564. //       spin_unlock(&bdev->fence_lock);
  565.                 return ret;
  566.         }
  567.  
  568. //   ret = ttm_bo_synccpu_write_grab
  569. //       (bo, !!(flags & drm_vmw_synccpu_dontblock));
  570. //   if (unlikely(ret != 0))
  571. //       return ret;
  572.  
  573.         ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
  574.                                  TTM_REF_SYNCCPU_WRITE, &existed);
  575. //   if (ret != 0 || existed)
  576. //       ttm_bo_synccpu_write_release(&user_bo->dma.base);
  577.  
  578.         return ret;
  579. }
  580.  
  581. /**
  582.  * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
  583.  * and unblock command submission on the buffer if blocked.
  584.  *
  585.  * @handle: Handle identifying the buffer object.
  586.  * @tfile: Identifying the caller.
  587.  * @flags: Flags indicating the type of release.
  588.  */
  589. static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
  590.                                            struct ttm_object_file *tfile,
  591.                                            uint32_t flags)
  592. {
  593.         if (!(flags & drm_vmw_synccpu_allow_cs))
  594.                 return ttm_ref_object_base_unref(tfile, handle,
  595.                                                  TTM_REF_SYNCCPU_WRITE);
  596.  
  597.         return 0;
  598. }
  599.  
  600. /**
  601.  * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
  602.  * functionality.
  603.  *
  604.  * @dev: Identifies the drm device.
  605.  * @data: Pointer to the ioctl argument.
  606.  * @file_priv: Identifies the caller.
  607.  *
  608.  * This function checks the ioctl arguments for validity and calls the
  609.  * relevant synccpu functions.
  610.  */
  611. int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
  612.                                   struct drm_file *file_priv)
  613. {
  614.         struct drm_vmw_synccpu_arg *arg =
  615.                 (struct drm_vmw_synccpu_arg *) data;
  616.         struct vmw_dma_buffer *dma_buf;
  617.         struct vmw_user_dma_buffer *user_bo;
  618.         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  619.         int ret;
  620.  
  621.         if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
  622.             || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
  623.                                drm_vmw_synccpu_dontblock |
  624.                                drm_vmw_synccpu_allow_cs)) != 0) {
  625.                 DRM_ERROR("Illegal synccpu flags.\n");
  626.                 return -EINVAL;
  627.         }
  628.  
  629.         switch (arg->op) {
  630.         case drm_vmw_synccpu_grab:
  631.                 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
  632.                 if (unlikely(ret != 0))
  633.                         return ret;
  634.  
  635.                 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
  636.                                        dma);
  637.                 ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
  638.                 vmw_dmabuf_unreference(&dma_buf);
  639.                 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
  640.                              ret != -EBUSY)) {
  641.                         DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
  642.                                   (unsigned int) arg->handle);
  643.                         return ret;
  644.                 }
  645.                 break;
  646.         case drm_vmw_synccpu_release:
  647.                 ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
  648.                                                       arg->flags);
  649.                 if (unlikely(ret != 0)) {
  650.                         DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
  651.                                   (unsigned int) arg->handle);
  652.                         return ret;
  653.                 }
  654.                 break;
  655.         default:
  656.                 DRM_ERROR("Invalid synccpu operation.\n");
  657.                 return -EINVAL;
  658.         }
  659.  
  660.         return 0;
  661. }
  662.  
  663. #if 0
  664. int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
  665.                            struct drm_file *file_priv)
  666. {
  667.         struct vmw_private *dev_priv = vmw_priv(dev);
  668.         union drm_vmw_alloc_dmabuf_arg *arg =
  669.             (union drm_vmw_alloc_dmabuf_arg *)data;
  670.         struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
  671.         struct drm_vmw_dmabuf_rep *rep = &arg->rep;
  672.         struct vmw_dma_buffer *dma_buf;
  673.         uint32_t handle;
  674.         struct vmw_master *vmaster = vmw_master(file_priv->master);
  675.         int ret;
  676.  
  677.         ret = ttm_read_lock(&vmaster->lock, true);
  678.         if (unlikely(ret != 0))
  679.                 return ret;
  680.  
  681.         ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
  682.                                     req->size, false, &handle, &dma_buf);
  683.         if (unlikely(ret != 0))
  684.                 goto out_no_dmabuf;
  685.  
  686.         rep->handle = handle;
  687.         rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
  688.         rep->cur_gmr_id = handle;
  689.         rep->cur_gmr_offset = 0;
  690.  
  691.         vmw_dmabuf_unreference(&dma_buf);
  692.  
  693. out_no_dmabuf:
  694.         ttm_read_unlock(&vmaster->lock);
  695.  
  696.         return ret;
  697. }
  698.  
  699. int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
  700.                            struct drm_file *file_priv)
  701. {
  702.         struct drm_vmw_unref_dmabuf_arg *arg =
  703.             (struct drm_vmw_unref_dmabuf_arg *)data;
  704.  
  705.         return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
  706.                                          arg->handle,
  707.                                          TTM_REF_USAGE);
  708. }
  709. #endif
  710.  
  711. int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
  712.                            uint32_t handle, struct vmw_dma_buffer **out)
  713. {
  714.         struct vmw_user_dma_buffer *vmw_user_bo;
  715.         struct ttm_base_object *base;
  716.  
  717.         base = ttm_base_object_lookup(tfile, handle);
  718.         if (unlikely(base == NULL)) {
  719.                 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
  720.                        (unsigned long)handle);
  721.                 return -ESRCH;
  722.         }
  723.  
  724.         if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
  725.                 ttm_base_object_unref(&base);
  726.                 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
  727.                        (unsigned long)handle);
  728.                 return -EINVAL;
  729.         }
  730.  
  731.         vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
  732.                                    prime.base);
  733.         (void)ttm_bo_reference(&vmw_user_bo->dma.base);
  734.         ttm_base_object_unref(&base);
  735.         *out = &vmw_user_bo->dma;
  736.  
  737.         return 0;
  738. }
  739.  
  740. int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
  741.                               struct vmw_dma_buffer *dma_buf,
  742.                               uint32_t *handle)
  743. {
  744.         struct vmw_user_dma_buffer *user_bo;
  745.  
  746.         if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
  747.                 return -EINVAL;
  748.  
  749.         user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
  750.  
  751.         *handle = user_bo->prime.base.hash.key;
  752.         return ttm_ref_object_add(tfile, &user_bo->prime.base,
  753.                                   TTM_REF_USAGE, NULL);
  754. }
  755.  
  756. /*
  757.  * Stream management
  758.  */
  759.  
  760. static void vmw_stream_destroy(struct vmw_resource *res)
  761. {
  762.         struct vmw_private *dev_priv = res->dev_priv;
  763.         struct vmw_stream *stream;
  764.         int ret;
  765.  
  766.         DRM_INFO("%s: unref\n", __func__);
  767.         stream = container_of(res, struct vmw_stream, res);
  768.  
  769.         ret = vmw_overlay_unref(dev_priv, stream->stream_id);
  770.         WARN_ON(ret != 0);
  771. }
  772.  
  773. static int vmw_stream_init(struct vmw_private *dev_priv,
  774.                            struct vmw_stream *stream,
  775.                            void (*res_free) (struct vmw_resource *res))
  776. {
  777.         struct vmw_resource *res = &stream->res;
  778.         int ret;
  779.  
  780.         ret = vmw_resource_init(dev_priv, res, false, res_free,
  781.                                 &vmw_stream_func);
  782.  
  783.         if (unlikely(ret != 0)) {
  784.                 if (res_free == NULL)
  785.                         kfree(stream);
  786.                 else
  787.                         res_free(&stream->res);
  788.                 return ret;
  789.         }
  790.  
  791.         ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
  792.         if (ret) {
  793.                 vmw_resource_unreference(&res);
  794.                 return ret;
  795.         }
  796.  
  797.         DRM_INFO("%s: claimed\n", __func__);
  798.  
  799.         vmw_resource_activate(&stream->res, vmw_stream_destroy);
  800.         return 0;
  801. }
  802.  
  803. static void vmw_user_stream_free(struct vmw_resource *res)
  804. {
  805.         struct vmw_user_stream *stream =
  806.             container_of(res, struct vmw_user_stream, stream.res);
  807.         struct vmw_private *dev_priv = res->dev_priv;
  808.  
  809. //   ttm_base_object_kfree(stream, base);
  810.         ttm_mem_global_free(vmw_mem_glob(dev_priv),
  811.                             vmw_user_stream_size);
  812. }
  813.  
  814. /**
  815.  * This function is called when user space has no more references on the
  816.  * base object. It releases the base-object's reference on the resource object.
  817.  */
  818.  
  819. static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
  820. {
  821.         struct ttm_base_object *base = *p_base;
  822.         struct vmw_user_stream *stream =
  823.             container_of(base, struct vmw_user_stream, base);
  824.         struct vmw_resource *res = &stream->stream.res;
  825.  
  826.         *p_base = NULL;
  827.         vmw_resource_unreference(&res);
  828. }
  829.  
  830. #if 0
  831. int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
  832.                            struct drm_file *file_priv)
  833. {
  834.         struct vmw_private *dev_priv = vmw_priv(dev);
  835.         struct vmw_resource *res;
  836.         struct vmw_user_stream *stream;
  837.         struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
  838.         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  839.         struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
  840.         int ret = 0;
  841.  
  842.  
  843.         res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
  844.         if (unlikely(res == NULL))
  845.                 return -EINVAL;
  846.  
  847.         if (res->res_free != &vmw_user_stream_free) {
  848.                 ret = -EINVAL;
  849.                 goto out;
  850.         }
  851.  
  852.         stream = container_of(res, struct vmw_user_stream, stream.res);
  853.         if (stream->base.tfile != tfile) {
  854.                 ret = -EINVAL;
  855.                 goto out;
  856.         }
  857.  
  858.         ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
  859. out:
  860.         vmw_resource_unreference(&res);
  861.         return ret;
  862. }
  863.  
  864. int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
  865.                            struct drm_file *file_priv)
  866. {
  867.         struct vmw_private *dev_priv = vmw_priv(dev);
  868.         struct vmw_user_stream *stream;
  869.         struct vmw_resource *res;
  870.         struct vmw_resource *tmp;
  871.         struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
  872.         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  873.         struct vmw_master *vmaster = vmw_master(file_priv->master);
  874.         int ret;
  875.  
  876.         /*
  877.          * Approximate idr memory usage with 128 bytes. It will be limited
  878.          * by maximum number_of streams anyway?
  879.          */
  880.  
  881.         if (unlikely(vmw_user_stream_size == 0))
  882.                 vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
  883.  
  884.         ret = ttm_read_lock(&vmaster->lock, true);
  885.         if (unlikely(ret != 0))
  886.                 return ret;
  887.  
  888.         ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
  889.                                    vmw_user_stream_size,
  890.                                    false, true);
  891.         if (unlikely(ret != 0)) {
  892.                 if (ret != -ERESTARTSYS)
  893.                         DRM_ERROR("Out of graphics memory for stream"
  894.                                   " creation.\n");
  895.                 goto out_unlock;
  896.         }
  897.  
  898.  
  899.         stream = kmalloc(sizeof(*stream), GFP_KERNEL);
  900.         if (unlikely(stream == NULL)) {
  901.                 ttm_mem_global_free(vmw_mem_glob(dev_priv),
  902.                                     vmw_user_stream_size);
  903.                 ret = -ENOMEM;
  904.                 goto out_unlock;
  905.         }
  906.  
  907.         res = &stream->stream.res;
  908.         stream->base.shareable = false;
  909.         stream->base.tfile = NULL;
  910.  
  911.         /*
  912.          * From here on, the destructor takes over resource freeing.
  913.          */
  914.  
  915.         ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
  916.         if (unlikely(ret != 0))
  917.                 goto out_unlock;
  918.  
  919.         tmp = vmw_resource_reference(res);
  920.         ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
  921.                                    &vmw_user_stream_base_release, NULL);
  922.  
  923.         if (unlikely(ret != 0)) {
  924.                 vmw_resource_unreference(&tmp);
  925.                 goto out_err;
  926.         }
  927.  
  928.         arg->stream_id = res->id;
  929. out_err:
  930.         vmw_resource_unreference(&res);
  931. out_unlock:
  932.         ttm_read_unlock(&vmaster->lock);
  933.         return ret;
  934. }
  935. #endif
  936.  
  937. int vmw_user_stream_lookup(struct vmw_private *dev_priv,
  938.                            struct ttm_object_file *tfile,
  939.                            uint32_t *inout_id, struct vmw_resource **out)
  940. {
  941.         struct vmw_user_stream *stream;
  942.         struct vmw_resource *res;
  943.         int ret;
  944.  
  945.         res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
  946.                                   *inout_id);
  947.         if (unlikely(res == NULL))
  948.                 return -EINVAL;
  949.  
  950.         if (res->res_free != &vmw_user_stream_free) {
  951.                 ret = -EINVAL;
  952.                 goto err_ref;
  953.         }
  954.  
  955.         stream = container_of(res, struct vmw_user_stream, stream.res);
  956.         if (stream->base.tfile != tfile) {
  957.                 ret = -EPERM;
  958.                 goto err_ref;
  959.         }
  960.  
  961.         *inout_id = stream->stream.stream_id;
  962.         *out = res;
  963.         return 0;
  964. err_ref:
  965.         vmw_resource_unreference(&res);
  966.         return ret;
  967. }
  968.  
  969. #if 0
  970. int vmw_dumb_create(struct drm_file *file_priv,
  971.                     struct drm_device *dev,
  972.                     struct drm_mode_create_dumb *args)
  973. {
  974.         struct vmw_private *dev_priv = vmw_priv(dev);
  975.         struct vmw_master *vmaster = vmw_master(file_priv->master);
  976.         struct vmw_dma_buffer *dma_buf;
  977.         int ret;
  978.  
  979.         args->pitch = args->width * ((args->bpp + 7) / 8);
  980.         args->size = args->pitch * args->height;
  981.  
  982.         ret = ttm_read_lock(&vmaster->lock, true);
  983.         if (unlikely(ret != 0))
  984.                 return ret;
  985.  
  986.         ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
  987.                                     args->size, false, &args->handle,
  988.                                     &dma_buf);
  989.         if (unlikely(ret != 0))
  990.                 goto out_no_dmabuf;
  991.  
  992.         vmw_dmabuf_unreference(&dma_buf);
  993. out_no_dmabuf:
  994.         ttm_read_unlock(&vmaster->lock);
  995.         return ret;
  996. }
  997. #endif
  998.  
  999. /**
  1000.  * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
  1001.  *
  1002.  * @file_priv: Pointer to a struct drm_file identifying the caller.
  1003.  * @dev: Pointer to the drm device.
  1004.  * @handle: Handle identifying the dumb buffer.
  1005.  * @offset: The address space offset returned.
  1006.  *
  1007.  * This is a driver callback for the core drm dumb_map_offset functionality.
  1008.  */
  1009. int vmw_dumb_map_offset(struct drm_file *file_priv,
  1010.                         struct drm_device *dev, uint32_t handle,
  1011.                         uint64_t *offset)
  1012. {
  1013.         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  1014.         struct vmw_dma_buffer *out_buf;
  1015.         int ret;
  1016.  
  1017.         ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
  1018.         if (ret != 0)
  1019.                 return -EINVAL;
  1020.  
  1021.         *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
  1022.         vmw_dmabuf_unreference(&out_buf);
  1023.         return 0;
  1024. }
  1025.  
  1026. /**
  1027.  * vmw_dumb_destroy - Destroy a dumb boffer
  1028.  *
  1029.  * @file_priv: Pointer to a struct drm_file identifying the caller.
  1030.  * @dev: Pointer to the drm device.
  1031.  * @handle: Handle identifying the dumb buffer.
  1032.  *
  1033.  * This is a driver callback for the core drm dumb_destroy functionality.
  1034.  */
  1035. int vmw_dumb_destroy(struct drm_file *file_priv,
  1036.                      struct drm_device *dev,
  1037.                      uint32_t handle)
  1038. {
  1039.         return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
  1040.                                          handle, TTM_REF_USAGE);
  1041. }
  1042.  
  1043. /**
  1044.  * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
  1045.  *
  1046.  * @res:            The resource for which to allocate a backup buffer.
  1047.  * @interruptible:  Whether any sleeps during allocation should be
  1048.  *                  performed while interruptible.
  1049.  */
  1050. static int vmw_resource_buf_alloc(struct vmw_resource *res,
  1051.                                   bool interruptible)
  1052. {
  1053.         unsigned long size =
  1054.                 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
  1055.         struct vmw_dma_buffer *backup;
  1056.         int ret;
  1057.  
  1058.         if (likely(res->backup)) {
  1059.                 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
  1060.                 return 0;
  1061.         }
  1062.  
  1063.         backup = kzalloc(sizeof(*backup), GFP_KERNEL);
  1064.         if (unlikely(backup == NULL))
  1065.                 return -ENOMEM;
  1066.  
  1067.         ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
  1068.                               res->func->backup_placement,
  1069.                               interruptible,
  1070.                               &vmw_dmabuf_bo_free);
  1071.         if (unlikely(ret != 0))
  1072.                 goto out_no_dmabuf;
  1073.  
  1074.         res->backup = backup;
  1075.  
  1076. out_no_dmabuf:
  1077.         return ret;
  1078. }
  1079.  
  1080. /**
  1081.  * vmw_resource_do_validate - Make a resource up-to-date and visible
  1082.  *                            to the device.
  1083.  *
  1084.  * @res:            The resource to make visible to the device.
  1085.  * @val_buf:        Information about a buffer possibly
  1086.  *                  containing backup data if a bind operation is needed.
  1087.  *
  1088.  * On hardware resource shortage, this function returns -EBUSY and
  1089.  * should be retried once resources have been freed up.
  1090.  */
  1091. static int vmw_resource_do_validate(struct vmw_resource *res,
  1092.                                     struct ttm_validate_buffer *val_buf)
  1093. {
  1094.         int ret = 0;
  1095.         const struct vmw_res_func *func = res->func;
  1096.  
  1097.         if (unlikely(res->id == -1)) {
  1098.                 ret = func->create(res);
  1099.                 if (unlikely(ret != 0))
  1100.                         return ret;
  1101.         }
  1102.  
  1103.         if (func->bind &&
  1104.             ((func->needs_backup && list_empty(&res->mob_head) &&
  1105.               val_buf->bo != NULL) ||
  1106.              (!func->needs_backup && val_buf->bo != NULL))) {
  1107.                 ret = func->bind(res, val_buf);
  1108.                 if (unlikely(ret != 0))
  1109.                         goto out_bind_failed;
  1110.                 if (func->needs_backup)
  1111.                         list_add_tail(&res->mob_head, &res->backup->res_list);
  1112.         }
  1113.  
  1114.         /*
  1115.          * Only do this on write operations, and move to
  1116.          * vmw_resource_unreserve if it can be called after
  1117.          * backup buffers have been unreserved. Otherwise
  1118.          * sort out locking.
  1119.          */
  1120.         res->res_dirty = true;
  1121.  
  1122.         return 0;
  1123.  
  1124. out_bind_failed:
  1125.         func->destroy(res);
  1126.  
  1127.         return ret;
  1128. }
  1129.  
  1130. /**
  1131.  * vmw_resource_unreserve - Unreserve a resource previously reserved for
  1132.  * command submission.
  1133.  *
  1134.  * @res:               Pointer to the struct vmw_resource to unreserve.
  1135.  * @new_backup:        Pointer to new backup buffer if command submission
  1136.  *                     switched.
  1137.  * @new_backup_offset: New backup offset if @new_backup is !NULL.
  1138.  *
  1139.  * Currently unreserving a resource means putting it back on the device's
  1140.  * resource lru list, so that it can be evicted if necessary.
  1141.  */
  1142. void vmw_resource_unreserve(struct vmw_resource *res,
  1143.                             struct vmw_dma_buffer *new_backup,
  1144.                             unsigned long new_backup_offset)
  1145. {
  1146.         struct vmw_private *dev_priv = res->dev_priv;
  1147.  
  1148.         if (!list_empty(&res->lru_head))
  1149.                 return;
  1150.  
  1151.         if (new_backup && new_backup != res->backup) {
  1152.  
  1153.                 if (res->backup) {
  1154.                         lockdep_assert_held(&res->backup->base.resv->lock.base);
  1155.                         list_del_init(&res->mob_head);
  1156.                         vmw_dmabuf_unreference(&res->backup);
  1157.                 }
  1158.  
  1159.                 res->backup = vmw_dmabuf_reference(new_backup);
  1160.                 lockdep_assert_held(&new_backup->base.resv->lock.base);
  1161.                 list_add_tail(&res->mob_head, &new_backup->res_list);
  1162.         }
  1163.         if (new_backup)
  1164.                 res->backup_offset = new_backup_offset;
  1165.  
  1166.         if (!res->func->may_evict || res->id == -1)
  1167.                 return;
  1168.  
  1169.         write_lock(&dev_priv->resource_lock);
  1170.         list_add_tail(&res->lru_head,
  1171.                       &res->dev_priv->res_lru[res->func->res_type]);
  1172.         write_unlock(&dev_priv->resource_lock);
  1173. }
  1174.  
  1175. /**
  1176.  * vmw_resource_check_buffer - Check whether a backup buffer is needed
  1177.  *                             for a resource and in that case, allocate
  1178.  *                             one, reserve and validate it.
  1179.  *
  1180.  * @res:            The resource for which to allocate a backup buffer.
  1181.  * @interruptible:  Whether any sleeps during allocation should be
  1182.  *                  performed while interruptible.
  1183.  * @val_buf:        On successful return contains data about the
  1184.  *                  reserved and validated backup buffer.
  1185.  */
  1186. static int
  1187. vmw_resource_check_buffer(struct vmw_resource *res,
  1188.                           bool interruptible,
  1189.                           struct ttm_validate_buffer *val_buf)
  1190. {
  1191.         struct list_head val_list;
  1192.         bool backup_dirty = false;
  1193.         int ret;
  1194.  
  1195.         if (unlikely(res->backup == NULL)) {
  1196.                 ret = vmw_resource_buf_alloc(res, interruptible);
  1197.                 if (unlikely(ret != 0))
  1198.                         return ret;
  1199.         }
  1200.  
  1201.         INIT_LIST_HEAD(&val_list);
  1202.         val_buf->bo = ttm_bo_reference(&res->backup->base);
  1203.         list_add_tail(&val_buf->head, &val_list);
  1204.         ret = ttm_eu_reserve_buffers(NULL, &val_list);
  1205.         if (unlikely(ret != 0))
  1206.                 goto out_no_reserve;
  1207.  
  1208.         if (res->func->needs_backup && list_empty(&res->mob_head))
  1209.                 return 0;
  1210.  
  1211.         backup_dirty = res->backup_dirty;
  1212.         ret = ttm_bo_validate(&res->backup->base,
  1213.                               res->func->backup_placement,
  1214.                               true, false);
  1215.  
  1216.         if (unlikely(ret != 0))
  1217.                 goto out_no_validate;
  1218.  
  1219.         return 0;
  1220.  
  1221. out_no_validate:
  1222.         ttm_eu_backoff_reservation(NULL, &val_list);
  1223. out_no_reserve:
  1224.         ttm_bo_unref(&val_buf->bo);
  1225.         if (backup_dirty)
  1226.                 vmw_dmabuf_unreference(&res->backup);
  1227.  
  1228.         return ret;
  1229. }
  1230.  
  1231. /**
  1232.  * vmw_resource_reserve - Reserve a resource for command submission
  1233.  *
  1234.  * @res:            The resource to reserve.
  1235.  *
  1236.  * This function takes the resource off the LRU list and make sure
  1237.  * a backup buffer is present for guest-backed resources. However,
  1238.  * the buffer may not be bound to the resource at this point.
  1239.  *
  1240.  */
  1241. int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
  1242. {
  1243.         struct vmw_private *dev_priv = res->dev_priv;
  1244.         int ret;
  1245.  
  1246.         write_lock(&dev_priv->resource_lock);
  1247.         list_del_init(&res->lru_head);
  1248.         write_unlock(&dev_priv->resource_lock);
  1249.  
  1250.         if (res->func->needs_backup && res->backup == NULL &&
  1251.             !no_backup) {
  1252.                 ret = vmw_resource_buf_alloc(res, true);
  1253.                 if (unlikely(ret != 0))
  1254.                         return ret;
  1255.         }
  1256.  
  1257.         return 0;
  1258. }
  1259.  
  1260. /**
  1261.  * vmw_resource_backoff_reservation - Unreserve and unreference a
  1262.  *                                    backup buffer
  1263.  *.
  1264.  * @val_buf:        Backup buffer information.
  1265.  */
  1266. static void
  1267. vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
  1268. {
  1269.         struct list_head val_list;
  1270.  
  1271.         if (likely(val_buf->bo == NULL))
  1272.                 return;
  1273.  
  1274.         INIT_LIST_HEAD(&val_list);
  1275.         list_add_tail(&val_buf->head, &val_list);
  1276.         ttm_eu_backoff_reservation(NULL, &val_list);
  1277.         ttm_bo_unref(&val_buf->bo);
  1278. }
  1279.  
  1280. /**
  1281.  * vmw_resource_do_evict - Evict a resource, and transfer its data
  1282.  *                         to a backup buffer.
  1283.  *
  1284.  * @res:            The resource to evict.
  1285.  * @interruptible:  Whether to wait interruptible.
  1286.  */
  1287. int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
  1288. {
  1289.         struct ttm_validate_buffer val_buf;
  1290.         const struct vmw_res_func *func = res->func;
  1291.         int ret;
  1292.  
  1293.         BUG_ON(!func->may_evict);
  1294.  
  1295.         val_buf.bo = NULL;
  1296.         ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
  1297.         if (unlikely(ret != 0))
  1298.                 return ret;
  1299.  
  1300.         if (unlikely(func->unbind != NULL &&
  1301.                      (!func->needs_backup || !list_empty(&res->mob_head)))) {
  1302.                 ret = func->unbind(res, res->res_dirty, &val_buf);
  1303.                 if (unlikely(ret != 0))
  1304.                         goto out_no_unbind;
  1305.                 list_del_init(&res->mob_head);
  1306.         }
  1307.         ret = func->destroy(res);
  1308.         res->backup_dirty = true;
  1309.         res->res_dirty = false;
  1310. out_no_unbind:
  1311.         vmw_resource_backoff_reservation(&val_buf);
  1312.  
  1313.         return ret;
  1314. }
  1315.  
  1316.  
  1317. /**
  1318.  * vmw_resource_validate - Make a resource up-to-date and visible
  1319.  *                         to the device.
  1320.  *
  1321.  * @res:            The resource to make visible to the device.
  1322.  *
  1323.  * On succesful return, any backup DMA buffer pointed to by @res->backup will
  1324.  * be reserved and validated.
  1325.  * On hardware resource shortage, this function will repeatedly evict
  1326.  * resources of the same type until the validation succeeds.
  1327.  */
  1328. int vmw_resource_validate(struct vmw_resource *res)
  1329. {
  1330.         int ret;
  1331.         struct vmw_resource *evict_res;
  1332.         struct vmw_private *dev_priv = res->dev_priv;
  1333.         struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
  1334.         struct ttm_validate_buffer val_buf;
  1335.         unsigned err_count = 0;
  1336.  
  1337.         if (likely(!res->func->may_evict))
  1338.                 return 0;
  1339.  
  1340.         val_buf.bo = NULL;
  1341.         if (res->backup)
  1342.                 val_buf.bo = &res->backup->base;
  1343.         do {
  1344.                 ret = vmw_resource_do_validate(res, &val_buf);
  1345.                 if (likely(ret != -EBUSY))
  1346.                         break;
  1347.  
  1348.                 write_lock(&dev_priv->resource_lock);
  1349.                 if (list_empty(lru_list) || !res->func->may_evict) {
  1350.                         DRM_ERROR("Out of device device resources "
  1351.                                   "for %s.\n", res->func->type_name);
  1352.                         ret = -EBUSY;
  1353.                         write_unlock(&dev_priv->resource_lock);
  1354.                         break;
  1355.                 }
  1356.  
  1357.                 evict_res = vmw_resource_reference
  1358.                         (list_first_entry(lru_list, struct vmw_resource,
  1359.                                           lru_head));
  1360.                 list_del_init(&evict_res->lru_head);
  1361.  
  1362.                 write_unlock(&dev_priv->resource_lock);
  1363.  
  1364.                 ret = vmw_resource_do_evict(evict_res, true);
  1365.                 if (unlikely(ret != 0)) {
  1366.                         write_lock(&dev_priv->resource_lock);
  1367.                         list_add_tail(&evict_res->lru_head, lru_list);
  1368.                         write_unlock(&dev_priv->resource_lock);
  1369.                         if (ret == -ERESTARTSYS ||
  1370.                             ++err_count > VMW_RES_EVICT_ERR_COUNT) {
  1371.                                 vmw_resource_unreference(&evict_res);
  1372.                                 goto out_no_validate;
  1373.                         }
  1374.                 }
  1375.  
  1376.                 vmw_resource_unreference(&evict_res);
  1377.         } while (1);
  1378.  
  1379.         if (unlikely(ret != 0))
  1380.                 goto out_no_validate;
  1381.         else if (!res->func->needs_backup && res->backup) {
  1382.                 list_del_init(&res->mob_head);
  1383.                 vmw_dmabuf_unreference(&res->backup);
  1384.         }
  1385.  
  1386.         return 0;
  1387.  
  1388. out_no_validate:
  1389.         return ret;
  1390. }
  1391.  
  1392. /**
  1393.  * vmw_fence_single_bo - Utility function to fence a single TTM buffer
  1394.  *                       object without unreserving it.
  1395.  *
  1396.  * @bo:             Pointer to the struct ttm_buffer_object to fence.
  1397.  * @fence:          Pointer to the fence. If NULL, this function will
  1398.  *                  insert a fence into the command stream..
  1399.  *
  1400.  * Contrary to the ttm_eu version of this function, it takes only
  1401.  * a single buffer object instead of a list, and it also doesn't
  1402.  * unreserve the buffer object, which needs to be done separately.
  1403.  */
  1404. void vmw_fence_single_bo(struct ttm_buffer_object *bo,
  1405.                          struct vmw_fence_obj *fence)
  1406. {
  1407.         struct ttm_bo_device *bdev = bo->bdev;
  1408.         struct ttm_bo_driver *driver = bdev->driver;
  1409.         struct vmw_fence_obj *old_fence_obj;
  1410.         struct vmw_private *dev_priv =
  1411.                 container_of(bdev, struct vmw_private, bdev);
  1412.  
  1413.         if (fence == NULL)
  1414.                 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
  1415.         else
  1416.                 driver->sync_obj_ref(fence);
  1417.  
  1418.         spin_lock(&bdev->fence_lock);
  1419.  
  1420.         old_fence_obj = bo->sync_obj;
  1421.         bo->sync_obj = fence;
  1422.  
  1423.         spin_unlock(&bdev->fence_lock);
  1424.  
  1425.         if (old_fence_obj)
  1426.                 vmw_fence_obj_unreference(&old_fence_obj);
  1427. }
  1428.  
  1429. /**
  1430.  * vmw_resource_move_notify - TTM move_notify_callback
  1431.  *
  1432.  * @bo:             The TTM buffer object about to move.
  1433.  * @mem:            The truct ttm_mem_reg indicating to what memory
  1434.  *                  region the move is taking place.
  1435.  *
  1436.  * Evicts the Guest Backed hardware resource if the backup
  1437.  * buffer is being moved out of MOB memory.
  1438.  * Note that this function should not race with the resource
  1439.  * validation code as long as it accesses only members of struct
  1440.  * resource that remain static while bo::res is !NULL and
  1441.  * while we have @bo reserved. struct resource::backup is *not* a
  1442.  * static member. The resource validation code will take care
  1443.  * to set @bo::res to NULL, while having @bo reserved when the
  1444.  * buffer is no longer bound to the resource, so @bo:res can be
  1445.  * used to determine whether there is a need to unbind and whether
  1446.  * it is safe to unbind.
  1447.  */
  1448. void vmw_resource_move_notify(struct ttm_buffer_object *bo,
  1449.                               struct ttm_mem_reg *mem)
  1450. {
  1451. }
  1452.  
  1453. /**
  1454.  * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
  1455.  *
  1456.  * @res:            The resource being queried.
  1457.  */
  1458. bool vmw_resource_needs_backup(const struct vmw_resource *res)
  1459. {
  1460.         return res->func->needs_backup;
  1461. }
  1462.  
  1463. /**
  1464.  * vmw_resource_evict_type - Evict all resources of a specific type
  1465.  *
  1466.  * @dev_priv:       Pointer to a device private struct
  1467.  * @type:           The resource type to evict
  1468.  *
  1469.  * To avoid thrashing starvation or as part of the hibernation sequence,
  1470.  * try to evict all evictable resources of a specific type.
  1471.  */
  1472. static void vmw_resource_evict_type(struct vmw_private *dev_priv,
  1473.                                     enum vmw_res_type type)
  1474. {
  1475.         struct list_head *lru_list = &dev_priv->res_lru[type];
  1476.         struct vmw_resource *evict_res;
  1477.         unsigned err_count = 0;
  1478.         int ret;
  1479.  
  1480.         do {
  1481.                 write_lock(&dev_priv->resource_lock);
  1482.  
  1483.                 if (list_empty(lru_list))
  1484.                         goto out_unlock;
  1485.  
  1486.                 evict_res = vmw_resource_reference(
  1487.                         list_first_entry(lru_list, struct vmw_resource,
  1488.                                          lru_head));
  1489.                 list_del_init(&evict_res->lru_head);
  1490.                 write_unlock(&dev_priv->resource_lock);
  1491.  
  1492.                 ret = vmw_resource_do_evict(evict_res, false);
  1493.                 if (unlikely(ret != 0)) {
  1494.                         write_lock(&dev_priv->resource_lock);
  1495.                         list_add_tail(&evict_res->lru_head, lru_list);
  1496.                         write_unlock(&dev_priv->resource_lock);
  1497.                         if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
  1498.                                 vmw_resource_unreference(&evict_res);
  1499.                                 return;
  1500.                         }
  1501.                 }
  1502.  
  1503.                 vmw_resource_unreference(&evict_res);
  1504.         } while (1);
  1505.  
  1506. out_unlock:
  1507.         write_unlock(&dev_priv->resource_lock);
  1508. }
  1509.  
  1510. /**
  1511.  * vmw_resource_evict_all - Evict all evictable resources
  1512.  *
  1513.  * @dev_priv:       Pointer to a device private struct
  1514.  *
  1515.  * To avoid thrashing starvation or as part of the hibernation sequence,
  1516.  * evict all evictable resources. In particular this means that all
  1517.  * guest-backed resources that are registered with the device are
  1518.  * evicted and the OTable becomes clean.
  1519.  */
  1520. void vmw_resource_evict_all(struct vmw_private *dev_priv)
  1521. {
  1522.         enum vmw_res_type type;
  1523.  
  1524.         mutex_lock(&dev_priv->cmdbuf_mutex);
  1525.  
  1526.         for (type = 0; type < vmw_res_max; ++type)
  1527.                 vmw_resource_evict_type(dev_priv, type);
  1528.  
  1529.         mutex_unlock(&dev_priv->cmdbuf_mutex);
  1530. }
  1531.