Subversion Repositories Kolibri OS

Rev

Rev 1986 | Rev 5078 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright 2008 Advanced Micro Devices, Inc.
  3.  * Copyright 2008 Red Hat Inc.
  4.  * Copyright 2009 Jerome Glisse.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the "Software"),
  8.  * to deal in the Software without restriction, including without limitation
  9.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10.  * and/or sell copies of the Software, and to permit persons to whom the
  11.  * Software is furnished to do so, subject to the following conditions:
  12.  *
  13.  * The above copyright notice and this permission notice shall be included in
  14.  * all copies or substantial portions of the Software.
  15.  *
  16.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19.  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20.  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21.  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22.  * OTHER DEALINGS IN THE SOFTWARE.
  23.  *
  24.  * Authors: Dave Airlie
  25.  *          Alex Deucher
  26.  *          Jerome Glisse
  27.  */
  28. #include <drm/drmP.h>
  29. #include <drm/radeon_drm.h>
  30. #include "radeon.h"
  31.  
  32. int radeon_gem_object_init(struct drm_gem_object *obj)
  33. {
  34.         BUG();
  35.  
  36.         return 0;
  37. }
  38.  
  39. void radeon_gem_object_free(struct drm_gem_object *gobj)
  40. {
  41.         struct radeon_bo *robj = gem_to_radeon_bo(gobj);
  42.  
  43.         if (robj) {
  44.                 radeon_bo_unref(&robj);
  45.         }
  46. }
  47.  
  48. int radeon_gem_object_create(struct radeon_device *rdev, int size,
  49.                 int alignment, int initial_domain,
  50.                 bool discardable, bool kernel,
  51.                 struct drm_gem_object **obj)
  52. {
  53.     struct radeon_bo *robj;
  54.         unsigned long max_size;
  55.         int r;
  56.  
  57.         *obj = NULL;
  58.         /* At least align on page size */
  59.         if (alignment < PAGE_SIZE) {
  60.                 alignment = PAGE_SIZE;
  61.         }
  62.  
  63.         /* maximun bo size is the minimun btw visible vram and gtt size */
  64.         max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
  65.         if (size > max_size) {
  66.                 printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n",
  67.                        __func__, __LINE__, size >> 20, max_size >> 20);
  68.                 return -ENOMEM;
  69.         }
  70.  
  71. retry:
  72.         r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
  73.         if (r) {
  74.                 if (r != -ERESTARTSYS) {
  75.                         if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
  76.                                 initial_domain |= RADEON_GEM_DOMAIN_GTT;
  77.                                 goto retry;
  78.                         }
  79.                         DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
  80.                                   size, initial_domain, alignment, r);
  81.                 }
  82.         return r;
  83.         }
  84.         *obj = &robj->gem_base;
  85.  
  86.         mutex_lock(&rdev->gem.mutex);
  87.         list_add_tail(&robj->list, &rdev->gem.objects);
  88.         mutex_unlock(&rdev->gem.mutex);
  89.  
  90.         return 0;
  91. }
  92.  
  93. int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
  94.                           uint64_t *gpu_addr)
  95. {
  96.         struct radeon_bo *robj = gem_to_radeon_bo(obj);
  97.         int r;
  98.  
  99.         r = radeon_bo_reserve(robj, false);
  100.         if (unlikely(r != 0))
  101.                 return r;
  102.         r = radeon_bo_pin(robj, pin_domain, gpu_addr);
  103.         radeon_bo_unreserve(robj);
  104.         return r;
  105. }
  106.  
  107. void radeon_gem_object_unpin(struct drm_gem_object *obj)
  108. {
  109.         struct radeon_bo *robj = gem_to_radeon_bo(obj);
  110.         int r;
  111.  
  112.         r = radeon_bo_reserve(robj, false);
  113.         if (likely(r == 0)) {
  114.                 radeon_bo_unpin(robj);
  115.                 radeon_bo_unreserve(robj);
  116.         }
  117. }
  118.  
  119. int radeon_gem_set_domain(struct drm_gem_object *gobj,
  120.                           uint32_t rdomain, uint32_t wdomain)
  121. {
  122.         struct radeon_bo *robj;
  123.         uint32_t domain;
  124.         int r;
  125.  
  126.         /* FIXME: reeimplement */
  127.         robj = gem_to_radeon_bo(gobj);
  128.         /* work out where to validate the buffer to */
  129.         domain = wdomain;
  130.         if (!domain) {
  131.                 domain = rdomain;
  132.         }
  133.         if (!domain) {
  134.                 /* Do nothings */
  135.                 printk(KERN_WARNING "Set domain without domain !\n");
  136.                 return 0;
  137.         }
  138.         if (domain == RADEON_GEM_DOMAIN_CPU) {
  139.                 /* Asking for cpu access wait for object idle */
  140. //              r = radeon_bo_wait(robj, NULL, false);
  141. //              if (r) {
  142. //                      printk(KERN_ERR "Failed to wait for object !\n");
  143. //                      return r;
  144. //              }
  145.         }
  146.         return 0;
  147. }
  148.  
  149. int radeon_gem_init(struct radeon_device *rdev)
  150. {
  151.         INIT_LIST_HEAD(&rdev->gem.objects);
  152.         return 0;
  153. }
  154.  
  155. void radeon_gem_fini(struct radeon_device *rdev)
  156. {
  157.  //  radeon_object_force_delete(rdev);
  158. }
  159.  
  160. #if 0
  161. /*
  162.  * GEM ioctls.
  163.  */
  164. int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
  165.                           struct drm_file *filp)
  166. {
  167.         struct radeon_device *rdev = dev->dev_private;
  168.         struct drm_radeon_gem_info *args = data;
  169.         struct ttm_mem_type_manager *man;
  170.         unsigned i;
  171.  
  172.         man = &rdev->mman.bdev.man[TTM_PL_VRAM];
  173.  
  174.         args->vram_size = rdev->mc.real_vram_size;
  175.         args->vram_visible = (u64)man->size << PAGE_SHIFT;
  176.         if (rdev->stollen_vga_memory)
  177.                 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
  178.         args->vram_visible -= radeon_fbdev_total_size(rdev);
  179.         args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
  180.         for(i = 0; i < RADEON_NUM_RINGS; ++i)
  181.                 args->gart_size -= rdev->ring[i].ring_size;
  182.         return 0;
  183. }
  184.  
  185. int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
  186.                            struct drm_file *filp)
  187. {
  188.         /* TODO: implement */
  189.         DRM_ERROR("unimplemented %s\n", __func__);
  190.         return -ENOSYS;
  191. }
  192.  
  193. int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
  194.                             struct drm_file *filp)
  195. {
  196.         /* TODO: implement */
  197.         DRM_ERROR("unimplemented %s\n", __func__);
  198.         return -ENOSYS;
  199. }
  200.  
  201. int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
  202.                             struct drm_file *filp)
  203. {
  204.         struct radeon_device *rdev = dev->dev_private;
  205.         struct drm_radeon_gem_create *args = data;
  206.         struct drm_gem_object *gobj;
  207.         uint32_t handle;
  208.         int r;
  209.  
  210.         down_read(&rdev->exclusive_lock);
  211.         /* create a gem object to contain this object in */
  212.         args->size = roundup(args->size, PAGE_SIZE);
  213.         r = radeon_gem_object_create(rdev, args->size, args->alignment,
  214.                                      args->initial_domain, false,
  215.                                         false, &gobj);
  216.         if (r) {
  217.                 up_read(&rdev->exclusive_lock);
  218.                 r = radeon_gem_handle_lockup(rdev, r);
  219.                 return r;
  220.         }
  221.         r = drm_gem_handle_create(filp, gobj, &handle);
  222.         /* drop reference from allocate - handle holds it now */
  223.         drm_gem_object_unreference_unlocked(gobj);
  224.         if (r) {
  225.                 up_read(&rdev->exclusive_lock);
  226.                 r = radeon_gem_handle_lockup(rdev, r);
  227.                 return r;
  228.         }
  229.         args->handle = handle;
  230.         up_read(&rdev->exclusive_lock);
  231.         return 0;
  232. }
  233.  
  234. int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
  235.                                 struct drm_file *filp)
  236. {
  237.         /* transition the BO to a domain -
  238.          * just validate the BO into a certain domain */
  239.         struct radeon_device *rdev = dev->dev_private;
  240.         struct drm_radeon_gem_set_domain *args = data;
  241.         struct drm_gem_object *gobj;
  242.         struct radeon_bo *robj;
  243.         int r;
  244.  
  245.         /* for now if someone requests domain CPU -
  246.          * just make sure the buffer is finished with */
  247.         down_read(&rdev->exclusive_lock);
  248.  
  249.         /* just do a BO wait for now */
  250.         gobj = drm_gem_object_lookup(dev, filp, args->handle);
  251.         if (gobj == NULL) {
  252.                 up_read(&rdev->exclusive_lock);
  253.                 return -ENOENT;
  254.         }
  255.         robj = gem_to_radeon_bo(gobj);
  256.  
  257.         r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
  258.  
  259.         drm_gem_object_unreference_unlocked(gobj);
  260.         up_read(&rdev->exclusive_lock);
  261.         r = radeon_gem_handle_lockup(robj->rdev, r);
  262.         return r;
  263. }
  264.  
  265. int radeon_mode_dumb_mmap(struct drm_file *filp,
  266.                           struct drm_device *dev,
  267.                           uint32_t handle, uint64_t *offset_p)
  268. {
  269.         struct drm_gem_object *gobj;
  270.         struct radeon_bo *robj;
  271.  
  272.         gobj = drm_gem_object_lookup(dev, filp, handle);
  273.         if (gobj == NULL) {
  274.                 return -ENOENT;
  275.         }
  276.         robj = gem_to_radeon_bo(gobj);
  277.         *offset_p = radeon_bo_mmap_offset(robj);
  278.         drm_gem_object_unreference_unlocked(gobj);
  279.         return 0;
  280. }
  281.  
  282. int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
  283.                           struct drm_file *filp)
  284. {
  285.         struct drm_radeon_gem_mmap *args = data;
  286.  
  287.         return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
  288. }
  289.  
  290. int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
  291.                           struct drm_file *filp)
  292. {
  293.         struct radeon_device *rdev = dev->dev_private;
  294.         struct drm_radeon_gem_busy *args = data;
  295.         struct drm_gem_object *gobj;
  296.         struct radeon_bo *robj;
  297.         int r;
  298.         uint32_t cur_placement = 0;
  299.  
  300.         gobj = drm_gem_object_lookup(dev, filp, args->handle);
  301.         if (gobj == NULL) {
  302.                 return -ENOENT;
  303.         }
  304.         robj = gem_to_radeon_bo(gobj);
  305.         r = radeon_bo_wait(robj, &cur_placement, true);
  306.         switch (cur_placement) {
  307.         case TTM_PL_VRAM:
  308.                 args->domain = RADEON_GEM_DOMAIN_VRAM;
  309.                 break;
  310.         case TTM_PL_TT:
  311.                 args->domain = RADEON_GEM_DOMAIN_GTT;
  312.                 break;
  313.         case TTM_PL_SYSTEM:
  314.                 args->domain = RADEON_GEM_DOMAIN_CPU;
  315.         default:
  316.                 break;
  317.         }
  318.         drm_gem_object_unreference_unlocked(gobj);
  319.         r = radeon_gem_handle_lockup(rdev, r);
  320.         return r;
  321. }
  322.  
  323. int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
  324.                               struct drm_file *filp)
  325. {
  326.         struct radeon_device *rdev = dev->dev_private;
  327.         struct drm_radeon_gem_wait_idle *args = data;
  328.         struct drm_gem_object *gobj;
  329.         struct radeon_bo *robj;
  330.         int r;
  331.  
  332.         gobj = drm_gem_object_lookup(dev, filp, args->handle);
  333.         if (gobj == NULL) {
  334.                 return -ENOENT;
  335.         }
  336.         robj = gem_to_radeon_bo(gobj);
  337.         r = radeon_bo_wait(robj, NULL, false);
  338.         /* callback hw specific functions if any */
  339.         if (rdev->asic->ioctl_wait_idle)
  340.                 robj->rdev->asic->ioctl_wait_idle(rdev, robj);
  341.         drm_gem_object_unreference_unlocked(gobj);
  342.         r = radeon_gem_handle_lockup(rdev, r);
  343.         return r;
  344. }
  345.  
  346. int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
  347.                                 struct drm_file *filp)
  348. {
  349.         struct drm_radeon_gem_set_tiling *args = data;
  350.         struct drm_gem_object *gobj;
  351.         struct radeon_bo *robj;
  352.         int r = 0;
  353.  
  354.         DRM_DEBUG("%d \n", args->handle);
  355.         gobj = drm_gem_object_lookup(dev, filp, args->handle);
  356.         if (gobj == NULL)
  357.                 return -ENOENT;
  358.         robj = gem_to_radeon_bo(gobj);
  359.         r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
  360.         drm_gem_object_unreference_unlocked(gobj);
  361.         return r;
  362. }
  363.  
  364. #endif
  365.