Subversion Repositories Kolibri OS

Rev

Rev 2997 | Rev 5078 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright 2009 Jerome Glisse.
  3.  * All Rights Reserved.
  4.  *
  5.  * Permission is hereby granted, free of charge, to any person obtaining a
  6.  * copy of this software and associated documentation files (the
  7.  * "Software"), to deal in the Software without restriction, including
  8.  * without limitation the rights to use, copy, modify, merge, publish,
  9.  * distribute, sub license, and/or sell copies of the Software, and to
  10.  * permit persons to whom the Software is furnished to do so, subject to
  11.  * the following conditions:
  12.  *
  13.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16.  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17.  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18.  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19.  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20.  *
  21.  * The above copyright notice and this permission notice (including the
  22.  * next paragraph) shall be included in all copies or substantial portions
  23.  * of the Software.
  24.  *
  25.  */
  26. /*
  27.  * Authors:
  28.  *    Jerome Glisse <glisse@freedesktop.org>
  29.  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
  30.  *    Dave Airlie
  31.  */
  32. #include <ttm/ttm_bo_api.h>
  33. #include <ttm/ttm_bo_driver.h>
  34. #include <ttm/ttm_placement.h>
  35. #include <ttm/ttm_module.h>
  36. #include <ttm/ttm_page_alloc.h>
  37. #include <drm/drmP.h>
  38. #include <drm/radeon_drm.h>
  39. #include <linux/seq_file.h>
  40. #include <linux/slab.h>
  41. #include "radeon_reg.h"
  42. #include "radeon.h"
  43.  
  44. #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
  45.  
  46. static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
  47.  
  48. static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
  49. {
  50.         struct radeon_mman *mman;
  51.         struct radeon_device *rdev;
  52.  
  53.         mman = container_of(bdev, struct radeon_mman, bdev);
  54.         rdev = container_of(mman, struct radeon_device, mman);
  55.         return rdev;
  56. }
  57.  
  58.  
  59. /*
  60.  * Global memory.
  61.  */
  62. static int radeon_ttm_mem_global_init(struct drm_global_reference *ref)
  63. {
  64.         return ttm_mem_global_init(ref->object);
  65. }
  66.  
  67. static void radeon_ttm_mem_global_release(struct drm_global_reference *ref)
  68. {
  69.         ttm_mem_global_release(ref->object);
  70. }
  71.  
  72. static int radeon_ttm_global_init(struct radeon_device *rdev)
  73. {
  74.         struct drm_global_reference *global_ref;
  75.         int r;
  76.  
  77.     ENTER();
  78.  
  79.         rdev->mman.mem_global_referenced = false;
  80.         global_ref = &rdev->mman.mem_global_ref;
  81.         global_ref->global_type = DRM_GLOBAL_TTM_MEM;
  82.         global_ref->size = sizeof(struct ttm_mem_global);
  83.         global_ref->init = &radeon_ttm_mem_global_init;
  84.         global_ref->release = &radeon_ttm_mem_global_release;
  85.         r = drm_global_item_ref(global_ref);
  86.         if (r != 0) {
  87.                 DRM_ERROR("Failed setting up TTM memory accounting "
  88.                           "subsystem.\n");
  89.                 return r;
  90.         }
  91.  
  92.         rdev->mman.bo_global_ref.mem_glob =
  93.                 rdev->mman.mem_global_ref.object;
  94.         global_ref = &rdev->mman.bo_global_ref.ref;
  95.         global_ref->global_type = DRM_GLOBAL_TTM_BO;
  96.         global_ref->size = sizeof(struct ttm_bo_global);
  97.         global_ref->init = &ttm_bo_global_init;
  98.         global_ref->release = &ttm_bo_global_release;
  99.         r = drm_global_item_ref(global_ref);
  100.         if (r != 0) {
  101.                 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
  102.                 drm_global_item_unref(&rdev->mman.mem_global_ref);
  103.                 return r;
  104.         }
  105.  
  106.         rdev->mman.mem_global_referenced = true;
  107.  
  108.     LEAVE();
  109.  
  110.         return 0;
  111. }
  112.  
  113.  
  114.  
  115. static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
  116. {
  117.         return 0;
  118. }
  119.  
  120. static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
  121.                                 struct ttm_mem_type_manager *man)
  122. {
  123.         struct radeon_device *rdev;
  124.  
  125.     ENTER();
  126.  
  127.         rdev = radeon_get_rdev(bdev);
  128.  
  129.         switch (type) {
  130.         case TTM_PL_SYSTEM:
  131.                 /* System memory */
  132.                 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
  133.                 man->available_caching = TTM_PL_MASK_CACHING;
  134.                 man->default_caching = TTM_PL_FLAG_CACHED;
  135.                 break;
  136.         case TTM_PL_TT:
  137.                 man->func = &ttm_bo_manager_func;
  138.                 man->gpu_offset = rdev->mc.gtt_start;
  139.                 man->available_caching = TTM_PL_MASK_CACHING;
  140.                 man->default_caching = TTM_PL_FLAG_CACHED;
  141.                 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
  142. #if __OS_HAS_AGP
  143.                 if (rdev->flags & RADEON_IS_AGP) {
  144.                         if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
  145.                                 DRM_ERROR("AGP is not enabled for memory type %u\n",
  146.                                           (unsigned)type);
  147.                                 return -EINVAL;
  148.                         }
  149.                         if (!rdev->ddev->agp->cant_use_aperture)
  150.                                 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
  151.                         man->available_caching = TTM_PL_FLAG_UNCACHED |
  152.                                                  TTM_PL_FLAG_WC;
  153.                         man->default_caching = TTM_PL_FLAG_WC;
  154.                 }
  155. #endif
  156.                 break;
  157.         case TTM_PL_VRAM:
  158.                 /* "On-card" video ram */
  159.                 man->func = &ttm_bo_manager_func;
  160.                 man->gpu_offset = rdev->mc.vram_start;
  161.                 man->flags = TTM_MEMTYPE_FLAG_FIXED |
  162.                              TTM_MEMTYPE_FLAG_MAPPABLE;
  163.                 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
  164.                 man->default_caching = TTM_PL_FLAG_WC;
  165.                 break;
  166.         default:
  167.                 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
  168.                 return -EINVAL;
  169.         }
  170.  
  171.     LEAVE();
  172.  
  173.         return 0;
  174. }
  175.  
  176. static void radeon_evict_flags(struct ttm_buffer_object *bo,
  177.                                 struct ttm_placement *placement)
  178. {
  179.         struct radeon_bo *rbo;
  180.         static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
  181.  
  182.         if (!radeon_ttm_bo_is_radeon_bo(bo)) {
  183.                 placement->fpfn = 0;
  184.                 placement->lpfn = 0;
  185.                 placement->placement = &placements;
  186.                 placement->busy_placement = &placements;
  187.                 placement->num_placement = 1;
  188.                 placement->num_busy_placement = 1;
  189.                 return;
  190.         }
  191.         rbo = container_of(bo, struct radeon_bo, tbo);
  192.         switch (bo->mem.mem_type) {
  193.         case TTM_PL_VRAM:
  194.                 if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false)
  195.                         radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
  196.                 else
  197.                         radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
  198.                 break;
  199.         case TTM_PL_TT:
  200.         default:
  201.                 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
  202.         }
  203.         *placement = rbo->placement;
  204. }
  205.  
  206. static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
  207. {
  208.         return 0;
  209. }
  210.  
  211. static void radeon_move_null(struct ttm_buffer_object *bo,
  212.                              struct ttm_mem_reg *new_mem)
  213. {
  214.         struct ttm_mem_reg *old_mem = &bo->mem;
  215.  
  216.         BUG_ON(old_mem->mm_node != NULL);
  217.         *old_mem = *new_mem;
  218.         new_mem->mm_node = NULL;
  219. }
  220.  
  221. static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  222. {
  223. }
  224.  
  225. static int radeon_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
  226. {
  227.         return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
  228. }
  229.  
  230. static int radeon_sync_obj_flush(void *sync_obj)
  231. {
  232.         return 0;
  233. }
  234.  
  235. static void radeon_sync_obj_unref(void **sync_obj)
  236. {
  237.         radeon_fence_unref((struct radeon_fence **)sync_obj);
  238. }
  239.  
  240. static void *radeon_sync_obj_ref(void *sync_obj)
  241. {
  242.         return radeon_fence_ref((struct radeon_fence *)sync_obj);
  243. }
  244.  
  245. static bool radeon_sync_obj_signaled(void *sync_obj)
  246. {
  247.         return radeon_fence_signaled((struct radeon_fence *)sync_obj);
  248. }
  249.  
  250. /*
  251.  * TTM backend functions.
  252.  */
  253. struct radeon_ttm_tt {
  254.         struct ttm_dma_tt               ttm;
  255.         struct radeon_device            *rdev;
  256.         u64                             offset;
  257. };
  258.  
  259. static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
  260.                                    struct ttm_mem_reg *bo_mem)
  261. {
  262.         struct radeon_ttm_tt *gtt = (void*)ttm;
  263.         int r;
  264.  
  265.         gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
  266.         if (!ttm->num_pages) {
  267.                 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
  268.                      ttm->num_pages, bo_mem, ttm);
  269.         }
  270.         r = radeon_gart_bind(gtt->rdev, gtt->offset,
  271.                              ttm->num_pages, ttm->pages, gtt->ttm.dma_address);
  272.         if (r) {
  273.                 DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
  274.                           ttm->num_pages, (unsigned)gtt->offset);
  275.                 return r;
  276.         }
  277.         return 0;
  278. }
  279.  
  280. static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
  281. {
  282.         struct radeon_ttm_tt *gtt = (void *)ttm;
  283.  
  284.         radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
  285.         return 0;
  286. }
  287.  
  288. static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
  289. {
  290.         struct radeon_ttm_tt *gtt = (void *)ttm;
  291.  
  292.         ttm_dma_tt_fini(&gtt->ttm);
  293.         kfree(gtt);
  294. }
  295.  
  296. static struct ttm_backend_func radeon_backend_func = {
  297.         .bind = &radeon_ttm_backend_bind,
  298.         .unbind = &radeon_ttm_backend_unbind,
  299.         .destroy = &radeon_ttm_backend_destroy,
  300. };
  301.  
  302. static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
  303.                                     unsigned long size, uint32_t page_flags,
  304.                                     struct page *dummy_read_page)
  305. {
  306.         struct radeon_device *rdev;
  307.         struct radeon_ttm_tt *gtt;
  308.  
  309.         rdev = radeon_get_rdev(bdev);
  310. #if __OS_HAS_AGP
  311.         if (rdev->flags & RADEON_IS_AGP) {
  312.                 return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge,
  313.                                          size, page_flags, dummy_read_page);
  314.         }
  315. #endif
  316.  
  317.         gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL);
  318.         if (gtt == NULL) {
  319.                 return NULL;
  320.         }
  321.         gtt->ttm.ttm.func = &radeon_backend_func;
  322.         gtt->rdev = rdev;
  323.         if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
  324.                 kfree(gtt);
  325.                 return NULL;
  326.         }
  327.         return &gtt->ttm.ttm;
  328. }
  329.  
  330. static struct ttm_bo_driver radeon_bo_driver = {
  331.         .ttm_tt_create = &radeon_ttm_tt_create,
  332. //      .ttm_tt_populate = &radeon_ttm_tt_populate,
  333. //      .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
  334. //      .invalidate_caches = &radeon_invalidate_caches,
  335.         .init_mem_type = &radeon_init_mem_type,
  336. //      .evict_flags = &radeon_evict_flags,
  337. //      .move = &radeon_bo_move,
  338. //      .verify_access = &radeon_verify_access,
  339. //      .sync_obj_signaled = &radeon_sync_obj_signaled,
  340. //      .sync_obj_wait = &radeon_sync_obj_wait,
  341. //      .sync_obj_flush = &radeon_sync_obj_flush,
  342. //      .sync_obj_unref = &radeon_sync_obj_unref,
  343. //      .sync_obj_ref = &radeon_sync_obj_ref,
  344. //      .move_notify = &radeon_bo_move_notify,
  345. //      .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
  346. //      .io_mem_reserve = &radeon_ttm_io_mem_reserve,
  347. //      .io_mem_free = &radeon_ttm_io_mem_free,
  348. };
  349.  
  350. int radeon_ttm_init(struct radeon_device *rdev)
  351. {
  352.         int r;
  353.  
  354.     ENTER();
  355.  
  356.         r = radeon_ttm_global_init(rdev);
  357.         if (r) {
  358.                 return r;
  359.         }
  360.         /* No others user of address space so set it to 0 */
  361.         r = ttm_bo_device_init(&rdev->mman.bdev,
  362.                                rdev->mman.bo_global_ref.ref.object,
  363.                                &radeon_bo_driver, DRM_FILE_PAGE_OFFSET,
  364.                                rdev->need_dma32);
  365.         if (r) {
  366.                 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
  367.                 return r;
  368.         }
  369.         rdev->mman.initialized = true;
  370.         r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
  371.                                 rdev->mc.real_vram_size >> PAGE_SHIFT);
  372.         if (r) {
  373.                 DRM_ERROR("Failed initializing VRAM heap.\n");
  374.                 return r;
  375.         }
  376.  
  377. //   r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
  378. //               RADEON_GEM_DOMAIN_VRAM,
  379. //                NULL, &rdev->stollen_vga_memory);
  380. //   if (r) {
  381. //       return r;
  382. //   }
  383. //   r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
  384. //   if (r)
  385. //       return r;
  386. //   r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
  387. //   radeon_bo_unreserve(rdev->stollen_vga_memory);
  388. //   if (r) {
  389. //       radeon_bo_unref(&rdev->stollen_vga_memory);
  390. //       return r;
  391. //   }
  392.  
  393.         DRM_INFO("radeon: %uM of VRAM memory ready\n",
  394.                  (unsigned)rdev->mc.real_vram_size / (1024 * 1024));
  395.         r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
  396.                                 rdev->mc.gtt_size >> PAGE_SHIFT);
  397.         if (r) {
  398.                 DRM_ERROR("Failed initializing GTT heap.\n");
  399.                 return r;
  400.         }
  401.         DRM_INFO("radeon: %uM of GTT memory ready.\n",
  402.                  (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
  403.                 rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
  404.  
  405.     LEAVE();
  406.  
  407.     return 0;
  408. }
  409.  
  410.  
  411. /* this should only be called at bootup or when userspace
  412.  * isn't running */
  413. void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
  414. {
  415.         struct ttm_mem_type_manager *man;
  416.  
  417.         if (!rdev->mman.initialized)
  418.                 return;
  419.  
  420.         man = &rdev->mman.bdev.man[TTM_PL_VRAM];
  421.         /* this just adjusts TTM size idea, which sets lpfn to the correct value */
  422.         man->size = size >> PAGE_SHIFT;
  423. }
  424.  
  425. static struct vm_operations_struct radeon_ttm_vm_ops;
  426. static const struct vm_operations_struct *ttm_vm_ops = NULL;
  427.  
  428. #if 0
  429.  
  430. radeon_bo_init
  431. {
  432.     <6>[drm] Detected VRAM RAM=1024M, BAR=256M
  433.     <6>[drm] RAM width 128bits DDR
  434.  
  435.     radeon_ttm_init
  436.     {
  437.         radeon_ttm_global_init
  438.         {
  439.             radeon_ttm_mem_global_init
  440.  
  441.             ttm_bo_global_init
  442.         }
  443.  
  444.         ttm_bo_device_init
  445.         {
  446.             ttm_bo_init_mm
  447.             {
  448.                 radeon_init_mem_type
  449.             };
  450.         }
  451.  
  452.         ttm_bo_init_mm
  453.         {
  454.             radeon_init_mem_type
  455.  
  456.             ttm_bo_man_init
  457.         }
  458.  
  459.         <6>[drm] radeon: 1024M of VRAM memory ready
  460.  
  461.         ttm_bo_init_mm
  462.         {
  463.             radeon_init_mem_type
  464.  
  465.             ttm_bo_man_init
  466.         }
  467.  
  468.         <6>[drm] radeon: 512M of GTT memory ready.
  469.     }
  470. };
  471.  
  472. #endif
  473.  
  474.  
  475.  
  476.  
  477.