Subversion Repositories Kolibri OS

Rev

Rev 1430 | Rev 1986 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1.  
  2. #include <linux/list.h>
  3. #include <drm/drmP.h>
  4. #include "radeon_drm.h"
  5. #include "radeon.h"
  6.  
  7.  
  8. static struct drm_mm   mm_gtt;
  9. static struct drm_mm   mm_vram;
  10.  
  11. int drm_mm_alloc(struct drm_mm *mm, size_t num_pages,
  12.                  struct drm_mm_node **node)
  13. {
  14.     struct drm_mm_node *vm_node;
  15.     int    r;
  16.  
  17. retry_pre_get:
  18.  
  19.     r = drm_mm_pre_get(mm);
  20.  
  21.     if (unlikely(r != 0))
  22.        return r;
  23.  
  24.     vm_node = drm_mm_search_free(mm, num_pages, 0, 0);
  25.  
  26.     if (unlikely(vm_node == NULL)) {
  27.         r = -ENOMEM;
  28.         return r;
  29.     }
  30.  
  31.     *node =  drm_mm_get_block_atomic(vm_node, num_pages, 0);
  32.  
  33.     if (unlikely(*node == NULL)) {
  34.             goto retry_pre_get;
  35.     }
  36.  
  37.     return 0;
  38. };
  39.  
  40.  
  41. void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
  42. {
  43.     u32 c = 0;
  44.  
  45.     rbo->placement.fpfn = 0;
  46.     rbo->placement.lpfn = 0;
  47.     rbo->placement.placement = rbo->placements;
  48.     rbo->placement.busy_placement = rbo->placements;
  49.     if (domain & RADEON_GEM_DOMAIN_VRAM)
  50.         rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
  51.                     TTM_PL_FLAG_VRAM;
  52.     if (domain & RADEON_GEM_DOMAIN_GTT)
  53.         rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
  54.     if (domain & RADEON_GEM_DOMAIN_CPU)
  55.         rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
  56.     if (!c)
  57.         rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
  58.     rbo->placement.num_placement = c;
  59.     rbo->placement.num_busy_placement = c;
  60. }
  61.  
  62.  
  63. int radeon_bo_init(struct radeon_device *rdev)
  64. {
  65.     int r;
  66.  
  67.     DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
  68.         rdev->mc.mc_vram_size >> 20,
  69.         (unsigned long long)rdev->mc.aper_size >> 20);
  70.     DRM_INFO("RAM width %dbits %cDR\n",
  71.             rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
  72.  
  73.     r = drm_mm_init(&mm_vram, 0xC00000 >> PAGE_SHIFT,
  74.                ((rdev->mc.real_vram_size - 0xC00000) >> PAGE_SHIFT));
  75.     if (r) {
  76.         DRM_ERROR("Failed initializing VRAM heap.\n");
  77.         return r;
  78.     };
  79.  
  80.     r = drm_mm_init(&mm_gtt, 0, rdev->mc.gtt_size >> PAGE_SHIFT);
  81.     if (r) {
  82.         DRM_ERROR("Failed initializing GTT heap.\n");
  83.         return r;
  84.     }
  85.  
  86.     return 0;
  87. }
  88.  
  89.  
  90. int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
  91. {
  92.     int r;
  93.  
  94.     bo->tbo.reserved.counter = 1;
  95.  
  96.     return 0;
  97. }
  98.  
  99. void ttm_bo_unreserve(struct ttm_buffer_object *bo)
  100. {
  101.     bo->reserved.counter = 1;
  102. }
  103.  
  104. int radeon_bo_create(struct radeon_device *rdev,
  105.                 unsigned long size, int byte_align,
  106.                 bool kernel, u32 domain,
  107.                 struct radeon_bo **bo_ptr)
  108. {
  109.     enum ttm_bo_type type;
  110.  
  111.     struct radeon_bo   *bo;
  112.     size_t num_pages;
  113.     struct drm_mm      *mman;
  114.     u32                 bo_domain;
  115.     int r;
  116.  
  117.     num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  118.  
  119.     if (num_pages == 0) {
  120.         dbgprintf("Illegal buffer object size.\n");
  121.         return -EINVAL;
  122.     }
  123.  
  124.     if(domain & RADEON_GEM_DOMAIN_VRAM)
  125.     {
  126.         mman = &mm_vram;
  127.         bo_domain = RADEON_GEM_DOMAIN_VRAM;
  128.     }
  129.     else if(domain & RADEON_GEM_DOMAIN_GTT)
  130.     {
  131.         mman = &mm_gtt;
  132.         bo_domain = RADEON_GEM_DOMAIN_GTT;
  133.     }
  134.     else return -EINVAL;
  135.  
  136.     if (kernel) {
  137.         type = ttm_bo_type_kernel;
  138.     } else {
  139.         type = ttm_bo_type_device;
  140.     }
  141.     *bo_ptr = NULL;
  142.     bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
  143.     if (bo == NULL)
  144.         return -ENOMEM;
  145.  
  146.     bo->rdev = rdev;
  147.     bo->surface_reg = -1;
  148.     bo->tbo.num_pages = num_pages;
  149.     bo->domain = domain;
  150.  
  151.     INIT_LIST_HEAD(&bo->list);
  152.  
  153. //    radeon_ttm_placement_from_domain(bo, domain);
  154.     /* Kernel allocation are uninterruptible */
  155.  
  156.     r = drm_mm_alloc(mman, num_pages, &bo->tbo.vm_node);
  157.     if (unlikely(r != 0))
  158.         return r;
  159.  
  160.     *bo_ptr = bo;
  161.  
  162.     return 0;
  163. }
  164.  
  165. #define page_tabs  0xFDC00000      /* just another hack */
  166.  
  167. int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
  168. {
  169.     int r=0, i;
  170.  
  171.     if (bo->pin_count) {
  172.         bo->pin_count++;
  173.         if (gpu_addr)
  174.             *gpu_addr = radeon_bo_gpu_offset(bo);
  175.         return 0;
  176.     }
  177.  
  178.     bo->tbo.offset = bo->tbo.vm_node->start << PAGE_SHIFT;
  179.  
  180.     if(bo->domain & RADEON_GEM_DOMAIN_VRAM)
  181.     {
  182.         bo->tbo.offset += (u64)bo->rdev->mc.vram_start;
  183.     }
  184.     else if (bo->domain & RADEON_GEM_DOMAIN_GTT)
  185.     {
  186.         u32_t *pagelist;
  187.         bo->kptr  = KernelAlloc( bo->tbo.num_pages << PAGE_SHIFT );
  188.         dbgprintf("kernel alloc %x\n", bo->kptr );
  189.  
  190.         pagelist =  &((u32_t*)page_tabs)[(u32_t)bo->kptr >> 12];
  191.         dbgprintf("pagelist %x\n", pagelist);
  192.         radeon_gart_bind(bo->rdev, bo->tbo.offset,
  193.                          bo->tbo.vm_node->size,  pagelist);
  194.         bo->tbo.offset += (u64)bo->rdev->mc.gtt_start;
  195.     }
  196.     else
  197.     {
  198.         DRM_ERROR("Unknown placement %x\n", bo->domain);
  199.         bo->tbo.offset = -1;
  200.         r = -1;
  201.     };
  202.  
  203.     if (unlikely(r != 0)) {
  204.         DRM_ERROR("radeon: failed to pin object.\n");
  205.     }
  206.  
  207.     if (likely(r == 0)) {
  208.         bo->pin_count = 1;
  209.         if (gpu_addr != NULL)
  210.             *gpu_addr = radeon_bo_gpu_offset(bo);
  211.     }
  212.  
  213.     if (unlikely(r != 0))
  214.         dev_err(bo->rdev->dev, "%p pin failed\n", bo);
  215.     return r;
  216. };
  217.  
  218. int radeon_bo_unpin(struct radeon_bo *bo)
  219. {
  220.     int r = 0;
  221.  
  222.     if (!bo->pin_count) {
  223.         dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
  224.         return 0;
  225.     }
  226.     bo->pin_count--;
  227.     if (bo->pin_count)
  228.         return 0;
  229.  
  230.     if( bo->tbo.vm_node )
  231.     {
  232.         drm_mm_put_block(bo->tbo.vm_node);
  233.         bo->tbo.vm_node = NULL;
  234.     };
  235.  
  236.     return r;
  237. }
  238.  
  239. int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
  240. {
  241.     bool is_iomem;
  242.  
  243.     if (bo->kptr) {
  244.         if (ptr) {
  245.             *ptr = bo->kptr;
  246.         }
  247.         return 0;
  248.     }
  249.  
  250.     if(bo->domain & RADEON_GEM_DOMAIN_VRAM)
  251.     {
  252.         bo->cpu_addr = bo->rdev->mc.aper_base +
  253.                        (bo->tbo.vm_node->start << PAGE_SHIFT);
  254.         bo->kptr = (void*)MapIoMem(bo->cpu_addr,
  255.                         bo->tbo.vm_node->size << 12, PG_SW);
  256.     }
  257.     else
  258.     {
  259.         return -1;
  260.     }
  261.  
  262.     if (ptr) {
  263.         *ptr = bo->kptr;
  264.     }
  265.  
  266.     return 0;
  267. }
  268.  
  269. void radeon_bo_kunmap(struct radeon_bo *bo)
  270. {
  271.     if (bo->kptr == NULL)
  272.         return;
  273.  
  274.     if (bo->domain & RADEON_GEM_DOMAIN_VRAM)
  275.     {
  276.         FreeKernelSpace(bo->kptr);
  277.     }
  278.  
  279.     bo->kptr = NULL;
  280.  
  281. }
  282.  
  283. void radeon_bo_unref(struct radeon_bo **bo)
  284. {
  285.     struct ttm_buffer_object *tbo;
  286.  
  287.     if ((*bo) == NULL)
  288.         return;
  289.  
  290.     *bo = NULL;
  291. }
  292.  
  293.  
  294. void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
  295.                 uint32_t *tiling_flags,
  296.                 uint32_t *pitch)
  297. {
  298. //    BUG_ON(!atomic_read(&bo->tbo.reserved));
  299.     if (tiling_flags)
  300.         *tiling_flags = bo->tiling_flags;
  301.     if (pitch)
  302.         *pitch = bo->pitch;
  303. }
  304.  
  305.  
  306. /**
  307.  * Allocate a GEM object of the specified size with shmfs backing store
  308.  */
  309. struct drm_gem_object *
  310. drm_gem_object_alloc(struct drm_device *dev, size_t size)
  311. {
  312.     struct drm_gem_object *obj;
  313.  
  314.     BUG_ON((size & (PAGE_SIZE - 1)) != 0);
  315.  
  316.     obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  317.  
  318.     obj->dev = dev;
  319.     obj->size = size;
  320.     return obj;
  321. }
  322.  
  323.  
  324. int radeon_fb_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
  325.             unsigned long size, bool kernel, u32 domain,
  326.             struct radeon_bo **bo_ptr)
  327. {
  328.     enum ttm_bo_type    type;
  329.  
  330.     struct radeon_bo    *bo;
  331.     struct drm_mm       *mman;
  332.     struct drm_mm_node  *vm_node;
  333.  
  334.     size_t  num_pages;
  335.     u32     bo_domain;
  336.     int     r;
  337.  
  338.     num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  339.  
  340.     if (num_pages == 0) {
  341.         dbgprintf("Illegal buffer object size.\n");
  342.         return -EINVAL;
  343.     }
  344.  
  345.     if( (domain & RADEON_GEM_DOMAIN_VRAM) !=
  346.         RADEON_GEM_DOMAIN_VRAM )
  347.     {
  348.         return -EINVAL;
  349.     };
  350.  
  351.     if (kernel) {
  352.         type = ttm_bo_type_kernel;
  353.     } else {
  354.         type = ttm_bo_type_device;
  355.     }
  356.     *bo_ptr = NULL;
  357.     bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
  358.     if (bo == NULL)
  359.         return -ENOMEM;
  360.  
  361.     bo->rdev = rdev;
  362. //    bo->gobj = gobj;
  363.     bo->surface_reg = -1;
  364.     bo->tbo.num_pages = num_pages;
  365.     bo->domain = domain;
  366.  
  367.     INIT_LIST_HEAD(&bo->list);
  368.  
  369. //    radeon_ttm_placement_from_domain(bo, domain);
  370.     /* Kernel allocation are uninterruptible */
  371.  
  372.     vm_node = kzalloc(sizeof(*vm_node),0);
  373.  
  374.     vm_node->size = 0xC00000 >> 12;
  375.     vm_node->start = 0;
  376.     vm_node->mm = NULL;
  377.  
  378.     bo->tbo.vm_node = vm_node;
  379.     bo->tbo.offset  = bo->tbo.vm_node->start << PAGE_SHIFT;
  380.     bo->tbo.offset += (u64)bo->rdev->mc.vram_start;
  381.     bo->kptr        = (void*)0xFE000000;
  382.     bo->pin_count   = 1;
  383.  
  384.     *bo_ptr = bo;
  385.  
  386.     return 0;
  387. }
  388.