Subversion Repositories Kolibri OS

Rev

Rev 4569 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /**************************************************************************
  2.  *
  3.  * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice (including the
  15.  * next paragraph) shall be included in all copies or substantial portions
  16.  * of the Software.
  17.  *
  18.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21.  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22.  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23.  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24.  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25.  *
  26.  **************************************************************************/
  27.  
  28. #include <drm/ttm/ttm_placement.h>
  29.  
  30. #include <drm/drmP.h>
  31. #include "vmwgfx_drv.h"
  32.  
  33.  
  34. /**
  35.  * vmw_dmabuf_to_placement - Validate a buffer to placement.
  36.  *
  37.  * @dev_priv:  Driver private.
  38.  * @buf:  DMA buffer to move.
  39.  * @pin:  Pin buffer if true.
  40.  * @interruptible:  Use interruptible wait.
  41.  *
  42.  * May only be called by the current master since it assumes that the
  43.  * master lock is the current master's lock.
  44.  * This function takes the master's lock in write mode.
  45.  * Flushes and unpins the query bo to avoid failures.
  46.  *
  47.  * Returns
  48.  *  -ERESTARTSYS if interrupted by a signal.
  49.  */
  50. int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
  51.                             struct vmw_dma_buffer *buf,
  52.                             struct ttm_placement *placement,
  53.                             bool interruptible)
  54. {
  55.         struct ttm_buffer_object *bo = &buf->base;
  56.         int ret;
  57.  
  58. //   ret = ttm_write_lock(&vmaster->lock, interruptible);
  59. //   if (unlikely(ret != 0))
  60. //       return ret;
  61.  
  62.         vmw_execbuf_release_pinned_bo(dev_priv);
  63.  
  64.         ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
  65.         if (unlikely(ret != 0))
  66.                 goto err;
  67.  
  68.         ret = ttm_bo_validate(bo, placement, interruptible, false);
  69.  
  70.         ttm_bo_unreserve(bo);
  71.  
  72. err:
  73. //   ttm_write_unlock(&vmaster->lock);
  74.         return ret;
  75. }
  76.  
  77. /**
  78.  * vmw_dmabuf_to_vram_or_gmr - Move a buffer to vram or gmr.
  79.  *
  80.  * May only be called by the current master since it assumes that the
  81.  * master lock is the current master's lock.
  82.  * This function takes the master's lock in write mode.
  83.  * Flushes and unpins the query bo if @pin == true to avoid failures.
  84.  *
  85.  * @dev_priv:  Driver private.
  86.  * @buf:  DMA buffer to move.
  87.  * @pin:  Pin buffer if true.
  88.  * @interruptible:  Use interruptible wait.
  89.  *
  90.  * Returns
  91.  * -ERESTARTSYS if interrupted by a signal.
  92.  */
  93. int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
  94.                               struct vmw_dma_buffer *buf,
  95.                               bool pin, bool interruptible)
  96. {
  97.         struct ttm_buffer_object *bo = &buf->base;
  98.         struct ttm_placement *placement;
  99.         int ret;
  100.  
  101. //   ret = ttm_write_lock(&vmaster->lock, interruptible);
  102. //   if (unlikely(ret != 0))
  103. //       return ret;
  104.  
  105.         if (pin)
  106.                 vmw_execbuf_release_pinned_bo(dev_priv);
  107.  
  108.         ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
  109.         if (unlikely(ret != 0))
  110.                 goto err;
  111.  
  112.         /**
  113.          * Put BO in VRAM if there is space, otherwise as a GMR.
  114.          * If there is no space in VRAM and GMR ids are all used up,
  115.          * start evicting GMRs to make room. If the DMA buffer can't be
  116.          * used as a GMR, this will return -ENOMEM.
  117.          */
  118.  
  119.         if (pin)
  120.                 placement = &vmw_vram_gmr_ne_placement;
  121.         else
  122.                 placement = &vmw_vram_gmr_placement;
  123.  
  124.         ret = ttm_bo_validate(bo, placement, interruptible, false);
  125.         if (likely(ret == 0) || ret == -ERESTARTSYS)
  126.                 goto err_unreserve;
  127.  
  128.  
  129.         /**
  130.          * If that failed, try VRAM again, this time evicting
  131.          * previous contents.
  132.          */
  133.  
  134.         if (pin)
  135.                 placement = &vmw_vram_ne_placement;
  136.         else
  137.                 placement = &vmw_vram_placement;
  138.  
  139.         ret = ttm_bo_validate(bo, placement, interruptible, false);
  140.  
  141. err_unreserve:
  142.         ttm_bo_unreserve(bo);
  143. err:
  144. //   ttm_write_unlock(&vmaster->lock);
  145.         return ret;
  146. }
  147.  
  148. /**
  149.  * vmw_dmabuf_to_vram - Move a buffer to vram.
  150.  *
  151.  * May only be called by the current master since it assumes that the
  152.  * master lock is the current master's lock.
  153.  * This function takes the master's lock in write mode.
  154.  *
  155.  * @dev_priv:  Driver private.
  156.  * @buf:  DMA buffer to move.
  157.  * @pin:  Pin buffer in vram if true.
  158.  * @interruptible:  Use interruptible wait.
  159.  *
  160.  * Returns
  161.  * -ERESTARTSYS if interrupted by a signal.
  162.  */
  163. int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
  164.                        struct vmw_dma_buffer *buf,
  165.                        bool pin, bool interruptible)
  166. {
  167.         struct ttm_placement *placement;
  168.  
  169.         if (pin)
  170.                 placement = &vmw_vram_ne_placement;
  171.         else
  172.                 placement = &vmw_vram_placement;
  173.  
  174.         return vmw_dmabuf_to_placement(dev_priv, buf,
  175.                                        placement,
  176.                                        interruptible);
  177. }
  178.  
  179. /**
  180.  * vmw_dmabuf_to_start_of_vram - Move a buffer to start of vram.
  181.  *
  182.  * May only be called by the current master since it assumes that the
  183.  * master lock is the current master's lock.
  184.  * This function takes the master's lock in write mode.
  185.  * Flushes and unpins the query bo if @pin == true to avoid failures.
  186.  *
  187.  * @dev_priv:  Driver private.
  188.  * @buf:  DMA buffer to move.
  189.  * @pin:  Pin buffer in vram if true.
  190.  * @interruptible:  Use interruptible wait.
  191.  *
  192.  * Returns
  193.  * -ERESTARTSYS if interrupted by a signal.
  194.  */
  195. int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
  196.                                 struct vmw_dma_buffer *buf,
  197.                                 bool pin, bool interruptible)
  198. {
  199.         struct ttm_buffer_object *bo = &buf->base;
  200.         struct ttm_placement placement;
  201.         int ret = 0;
  202.  
  203.         if (pin)
  204.                 placement = vmw_vram_ne_placement;
  205.         else
  206.                 placement = vmw_vram_placement;
  207.         placement.lpfn = bo->num_pages;
  208.  
  209. //   ret = ttm_write_lock(&vmaster->lock, interruptible);
  210. //   if (unlikely(ret != 0))
  211. //       return ret;
  212.  
  213.         if (pin)
  214.                 vmw_execbuf_release_pinned_bo(dev_priv);
  215.         ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
  216.         if (unlikely(ret != 0))
  217.                 goto err_unlock;
  218.  
  219.         /* Is this buffer already in vram but not at the start of it? */
  220.         if (bo->mem.mem_type == TTM_PL_VRAM &&
  221.             bo->mem.start < bo->num_pages &&
  222.             bo->mem.start > 0)
  223.                 (void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);
  224.  
  225.         ret = ttm_bo_validate(bo, &placement, interruptible, false);
  226.  
  227.         /* For some reason we didn't up at the start of vram */
  228.         WARN_ON(ret == 0 && bo->offset != 0);
  229.  
  230.         ttm_bo_unreserve(bo);
  231. err_unlock:
  232. //   ttm_write_unlock(&vmaster->lock);
  233.  
  234.         return ret;
  235. }
  236.  
  237.  
  238. /**
  239.  * vmw_dmabuf_upin - Unpin the buffer given buffer, does not move the buffer.
  240.  *
  241.  * May only be called by the current master since it assumes that the
  242.  * master lock is the current master's lock.
  243.  * This function takes the master's lock in write mode.
  244.  *
  245.  * @dev_priv:  Driver private.
  246.  * @buf:  DMA buffer to unpin.
  247.  * @interruptible:  Use interruptible wait.
  248.  *
  249.  * Returns
  250.  * -ERESTARTSYS if interrupted by a signal.
  251.  */
  252. int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
  253.                      struct vmw_dma_buffer *buf,
  254.                      bool interruptible)
  255. {
  256.         /*
  257.          * We could in theory early out if the buffer is
  258.          * unpinned but we need to lock and reserve the buffer
  259.          * anyways so we don't gain much by that.
  260.          */
  261.         return vmw_dmabuf_to_placement(dev_priv, buf,
  262.                                        &vmw_evictable_placement,
  263.                                        interruptible);
  264. }
  265.  
  266.  
  267. /**
  268.  * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
  269.  * of a buffer.
  270.  *
  271.  * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
  272.  * @ptr: SVGAGuestPtr returning the result.
  273.  */
  274. void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
  275.                           SVGAGuestPtr *ptr)
  276. {
  277.         if (bo->mem.mem_type == TTM_PL_VRAM) {
  278.                 ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
  279.                 ptr->offset = bo->offset;
  280.         } else {
  281.                 ptr->gmrId = bo->mem.start;
  282.                 ptr->offset = 0;
  283.         }
  284. }
  285.  
  286.  
  287. /**
  288.  * vmw_bo_pin - Pin or unpin a buffer object without moving it.
  289.  *
  290.  * @bo: The buffer object. Must be reserved.
  291.  * @pin: Whether to pin or unpin.
  292.  *
  293.  */
  294. void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
  295. {
  296.         uint32_t pl_flags;
  297.         struct ttm_placement placement;
  298.         uint32_t old_mem_type = bo->mem.mem_type;
  299.         int ret;
  300.  
  301.         lockdep_assert_held(&bo->resv->lock.base);
  302.  
  303.         pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
  304.                 | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
  305.         if (pin)
  306.                 pl_flags |= TTM_PL_FLAG_NO_EVICT;
  307.  
  308.         memset(&placement, 0, sizeof(placement));
  309.         placement.num_placement = 1;
  310.         placement.placement = &pl_flags;
  311.  
  312.         ret = ttm_bo_validate(bo, &placement, false, true);
  313.  
  314.         BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
  315. }
  316.