Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /**************************************************************************
  2.  *
  3.  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice (including the
  15.  * next paragraph) shall be included in all copies or substantial portions
  16.  * of the Software.
  17.  *
  18.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21.  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22.  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23.  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24.  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25.  *
  26.  **************************************************************************/
  27.  
  28. #include "vmwgfx_drv.h"
  29. #include <drm/ttm/ttm_bo_driver.h>
  30. #include <drm/ttm/ttm_placement.h>
  31. #include <drm/ttm/ttm_page_alloc.h>
  32.  
  33. static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
  34.         TTM_PL_FLAG_CACHED;
  35.  
  36. static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
  37.         TTM_PL_FLAG_CACHED |
  38.         TTM_PL_FLAG_NO_EVICT;
  39.  
  40. static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
  41.         TTM_PL_FLAG_CACHED;
  42.  
  43. static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR |
  44.         TTM_PL_FLAG_CACHED;
  45.  
  46. static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR |
  47.         TTM_PL_FLAG_CACHED |
  48.         TTM_PL_FLAG_NO_EVICT;
  49.  
  50. struct ttm_placement vmw_vram_placement = {
  51.         .fpfn = 0,
  52.         .lpfn = 0,
  53.         .num_placement = 1,
  54.         .placement = &vram_placement_flags,
  55.         .num_busy_placement = 1,
  56.         .busy_placement = &vram_placement_flags
  57. };
  58.  
  59. static uint32_t vram_gmr_placement_flags[] = {
  60.         TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
  61.         VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
  62. };
  63.  
  64. static uint32_t gmr_vram_placement_flags[] = {
  65.         VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED,
  66.         TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
  67. };
  68.  
  69. struct ttm_placement vmw_vram_gmr_placement = {
  70.         .fpfn = 0,
  71.         .lpfn = 0,
  72.         .num_placement = 2,
  73.         .placement = vram_gmr_placement_flags,
  74.         .num_busy_placement = 1,
  75.         .busy_placement = &gmr_placement_flags
  76. };
  77.  
  78. static uint32_t vram_gmr_ne_placement_flags[] = {
  79.         TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT,
  80.         VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
  81. };
  82.  
  83. struct ttm_placement vmw_vram_gmr_ne_placement = {
  84.         .fpfn = 0,
  85.         .lpfn = 0,
  86.         .num_placement = 2,
  87.         .placement = vram_gmr_ne_placement_flags,
  88.         .num_busy_placement = 1,
  89.         .busy_placement = &gmr_ne_placement_flags
  90. };
  91.  
  92. struct ttm_placement vmw_vram_sys_placement = {
  93.         .fpfn = 0,
  94.         .lpfn = 0,
  95.         .num_placement = 1,
  96.         .placement = &vram_placement_flags,
  97.         .num_busy_placement = 1,
  98.         .busy_placement = &sys_placement_flags
  99. };
  100.  
  101. struct ttm_placement vmw_vram_ne_placement = {
  102.         .fpfn = 0,
  103.         .lpfn = 0,
  104.         .num_placement = 1,
  105.         .placement = &vram_ne_placement_flags,
  106.         .num_busy_placement = 1,
  107.         .busy_placement = &vram_ne_placement_flags
  108. };
  109.  
  110. struct ttm_placement vmw_sys_placement = {
  111.         .fpfn = 0,
  112.         .lpfn = 0,
  113.         .num_placement = 1,
  114.         .placement = &sys_placement_flags,
  115.         .num_busy_placement = 1,
  116.         .busy_placement = &sys_placement_flags
  117. };
  118.  
  119. static uint32_t evictable_placement_flags[] = {
  120.         TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED,
  121.         TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
  122.         VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
  123. };
  124.  
  125. struct ttm_placement vmw_evictable_placement = {
  126.         .fpfn = 0,
  127.         .lpfn = 0,
  128.         .num_placement = 3,
  129.         .placement = evictable_placement_flags,
  130.         .num_busy_placement = 1,
  131.         .busy_placement = &sys_placement_flags
  132. };
  133.  
  134. struct ttm_placement vmw_srf_placement = {
  135.         .fpfn = 0,
  136.         .lpfn = 0,
  137.         .num_placement = 1,
  138.         .num_busy_placement = 2,
  139.         .placement = &gmr_placement_flags,
  140.         .busy_placement = gmr_vram_placement_flags
  141. };
  142.  
  143. struct vmw_ttm_tt {
  144.         struct ttm_tt ttm;
  145.         struct vmw_private *dev_priv;
  146.         int gmr_id;
  147. };
  148.  
  149. static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
  150. {
  151.         struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
  152.  
  153.         vmw_be->gmr_id = bo_mem->start;
  154.  
  155.         return vmw_gmr_bind(vmw_be->dev_priv, ttm->pages,
  156.                             ttm->num_pages, vmw_be->gmr_id);
  157. }
  158.  
  159. static int vmw_ttm_unbind(struct ttm_tt *ttm)
  160. {
  161.         struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
  162.  
  163.         vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
  164.         return 0;
  165. }
  166.  
  167. static void vmw_ttm_destroy(struct ttm_tt *ttm)
  168. {
  169.         struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
  170.  
  171.         ttm_tt_fini(ttm);
  172.         kfree(vmw_be);
  173. }
  174.  
  175. static struct ttm_backend_func vmw_ttm_func = {
  176.         .bind = vmw_ttm_bind,
  177.         .unbind = vmw_ttm_unbind,
  178.         .destroy = vmw_ttm_destroy,
  179. };
  180.  
  181. struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
  182.                                  unsigned long size, uint32_t page_flags,
  183.                                  struct page *dummy_read_page)
  184. {
  185.         struct vmw_ttm_tt *vmw_be;
  186.  
  187.         vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
  188.         if (!vmw_be)
  189.                 return NULL;
  190.  
  191.         vmw_be->ttm.func = &vmw_ttm_func;
  192.         vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
  193.  
  194.         if (ttm_tt_init(&vmw_be->ttm, bdev, size, page_flags, dummy_read_page)) {
  195.                 kfree(vmw_be);
  196.                 return NULL;
  197.         }
  198.  
  199.         return &vmw_be->ttm;
  200. }
  201.  
  202. int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
  203. {
  204.         return 0;
  205. }
  206.  
  207. int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
  208.                       struct ttm_mem_type_manager *man)
  209. {
  210.         switch (type) {
  211.         case TTM_PL_SYSTEM:
  212.                 /* System memory */
  213.  
  214.                 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
  215.                 man->available_caching = TTM_PL_FLAG_CACHED;
  216.                 man->default_caching = TTM_PL_FLAG_CACHED;
  217.                 break;
  218.         case TTM_PL_VRAM:
  219.                 /* "On-card" video ram */
  220.                 man->func = &ttm_bo_manager_func;
  221.                 man->gpu_offset = 0;
  222.                 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
  223.                 man->available_caching = TTM_PL_FLAG_CACHED;
  224.                 man->default_caching = TTM_PL_FLAG_CACHED;
  225.                 break;
  226.         case VMW_PL_GMR:
  227.                 /*
  228.                  * "Guest Memory Regions" is an aperture like feature with
  229.                  *  one slot per bo. There is an upper limit of the number of
  230.                  *  slots as well as the bo size.
  231.                  */
  232.                 man->func = &vmw_gmrid_manager_func;
  233.                 man->gpu_offset = 0;
  234.                 man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
  235.                 man->available_caching = TTM_PL_FLAG_CACHED;
  236.                 man->default_caching = TTM_PL_FLAG_CACHED;
  237.                 break;
  238.         default:
  239.                 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
  240.                 return -EINVAL;
  241.         }
  242.         return 0;
  243. }
  244.  
  245. void vmw_evict_flags(struct ttm_buffer_object *bo,
  246.                      struct ttm_placement *placement)
  247. {
  248.         *placement = vmw_sys_placement;
  249. }
  250.  
  251. static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
  252. {
  253. //   struct ttm_object_file *tfile =
  254. //       vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
  255.  
  256.     return 0; //vmw_user_dmabuf_verify_access(bo, tfile);
  257. }
  258.  
  259. static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  260. {
  261.         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
  262.         struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
  263.  
  264.         mem->bus.addr = NULL;
  265.         mem->bus.is_iomem = false;
  266.         mem->bus.offset = 0;
  267.         mem->bus.size = mem->num_pages << PAGE_SHIFT;
  268.         mem->bus.base = 0;
  269.         if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
  270.                 return -EINVAL;
  271.         switch (mem->mem_type) {
  272.         case TTM_PL_SYSTEM:
  273.         case VMW_PL_GMR:
  274.                 return 0;
  275.         case TTM_PL_VRAM:
  276.                 mem->bus.offset = mem->start << PAGE_SHIFT;
  277.                 mem->bus.base = dev_priv->vram_start;
  278.                 mem->bus.is_iomem = true;
  279.                 break;
  280.         default:
  281.                 return -EINVAL;
  282.         }
  283.         return 0;
  284. }
  285.  
  286. static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  287. {
  288. }
  289.  
  290. static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
  291. {
  292.         return 0;
  293. }
  294.  
  295. /**
  296.  * FIXME: We're using the old vmware polling method to sync.
  297.  * Do this with fences instead.
  298.  */
  299.  
  300. static void *vmw_sync_obj_ref(void *sync_obj)
  301. {
  302.  
  303.         return (void *)
  304.                 vmw_fence_obj_reference((struct vmw_fence_obj *) sync_obj);
  305. }
  306.  
  307. static void vmw_sync_obj_unref(void **sync_obj)
  308. {
  309.    vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj);
  310. }
  311.  
  312. static int vmw_sync_obj_flush(void *sync_obj)
  313. {
  314.    vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj);
  315.         return 0;
  316. }
  317.  
  318. static bool vmw_sync_obj_signaled(void *sync_obj)
  319. {
  320.         return  vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj,
  321.                                        DRM_VMW_FENCE_FLAG_EXEC);
  322.  
  323. }
  324.  
  325. static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
  326. {
  327.         return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj,
  328.                                   DRM_VMW_FENCE_FLAG_EXEC,
  329.                                   lazy, interruptible,
  330.                                   VMW_FENCE_WAIT_TIMEOUT);
  331. }
  332.  
  333. struct ttm_bo_driver vmw_bo_driver = {
  334.         .ttm_tt_create = &vmw_ttm_tt_create,
  335.         .ttm_tt_populate = &ttm_pool_populate,
  336.         .ttm_tt_unpopulate = &ttm_pool_unpopulate,
  337.         .invalidate_caches = vmw_invalidate_caches,
  338.         .init_mem_type = vmw_init_mem_type,
  339.         .evict_flags = vmw_evict_flags,
  340.         .move = NULL,
  341.         .verify_access = vmw_verify_access,
  342.         .sync_obj_signaled = vmw_sync_obj_signaled,
  343.         .sync_obj_wait = vmw_sync_obj_wait,
  344.         .sync_obj_flush = vmw_sync_obj_flush,
  345.         .sync_obj_unref = vmw_sync_obj_unref,
  346.         .sync_obj_ref = vmw_sync_obj_ref,
  347.         .move_notify = NULL,
  348.         .swap_notify = NULL,
  349.         .fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
  350.         .io_mem_reserve = &vmw_ttm_io_mem_reserve,
  351.         .io_mem_free = &vmw_ttm_io_mem_free,
  352. };
  353.