Subversion Repositories Kolibri OS

Rev

Rev 5271 | Rev 6104 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright 2008 Advanced Micro Devices, Inc.
  3.  * Copyright 2008 Red Hat Inc.
  4.  * Copyright 2009 Jerome Glisse.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the "Software"),
  8.  * to deal in the Software without restriction, including without limitation
  9.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10.  * and/or sell copies of the Software, and to permit persons to whom the
  11.  * Software is furnished to do so, subject to the following conditions:
  12.  *
  13.  * The above copyright notice and this permission notice shall be included in
  14.  * all copies or substantial portions of the Software.
  15.  *
  16.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19.  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20.  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21.  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22.  * OTHER DEALINGS IN THE SOFTWARE.
  23.  *
  24.  * Authors: Dave Airlie
  25.  *          Alex Deucher
  26.  *          Jerome Glisse
  27.  */
  28. #include <drm/drmP.h>
  29. #include <drm/radeon_drm.h>
  30. #include "radeon.h"
  31.  
  32. void radeon_gem_object_free(struct drm_gem_object *gobj)
  33. {
  34.         struct radeon_bo *robj = gem_to_radeon_bo(gobj);
  35.  
  36.         if (robj) {
  37.                 radeon_bo_unref(&robj);
  38.         }
  39. }
  40.  
  41. int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
  42.                 int alignment, int initial_domain,
  43.                                 u32 flags, bool kernel,
  44.                 struct drm_gem_object **obj)
  45. {
  46.     struct radeon_bo *robj;
  47.         unsigned long max_size;
  48.         int r;
  49.  
  50.         *obj = NULL;
  51.         /* At least align on page size */
  52.         if (alignment < PAGE_SIZE) {
  53.                 alignment = PAGE_SIZE;
  54.         }
  55.  
  56.         /* Maximum bo size is the unpinned gtt size since we use the gtt to
  57.          * handle vram to system pool migrations.
  58.          */
  59.         max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
  60.         if (size > max_size) {
  61.                 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
  62.                           size >> 20, max_size >> 20);
  63.                 return -ENOMEM;
  64.         }
  65.  
  66. retry:
  67.         r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
  68.                              flags, NULL, NULL, &robj);
  69.         if (r) {
  70.                 if (r != -ERESTARTSYS) {
  71.                         if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
  72.                                 initial_domain |= RADEON_GEM_DOMAIN_GTT;
  73.                                 goto retry;
  74.                         }
  75.                         DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
  76.                                   size, initial_domain, alignment, r);
  77.                 }
  78.         return r;
  79.         }
  80.         *obj = &robj->gem_base;
  81.  
  82.         mutex_lock(&rdev->gem.mutex);
  83.         list_add_tail(&robj->list, &rdev->gem.objects);
  84.         mutex_unlock(&rdev->gem.mutex);
  85.  
  86.         return 0;
  87. }
  88.  
  89. static int radeon_gem_set_domain(struct drm_gem_object *gobj,
  90.                           uint32_t rdomain, uint32_t wdomain)
  91. {
  92.         struct radeon_bo *robj;
  93.         uint32_t domain;
  94.         long r;
  95.  
  96.         /* FIXME: reeimplement */
  97.         robj = gem_to_radeon_bo(gobj);
  98.         /* work out where to validate the buffer to */
  99.         domain = wdomain;
  100.         if (!domain) {
  101.                 domain = rdomain;
  102.         }
  103.         if (!domain) {
  104.                 /* Do nothings */
  105.                 printk(KERN_WARNING "Set domain without domain !\n");
  106.                 return 0;
  107.         }
  108.         if (domain == RADEON_GEM_DOMAIN_CPU) {
  109.                 /* Asking for cpu access wait for object idle */
  110. //              r = radeon_bo_wait(robj, NULL, false);
  111. //              if (r) {
  112. //                      printk(KERN_ERR "Failed to wait for object !\n");
  113. //                      return r;
  114. //              }
  115.         }
  116.         return 0;
  117. }
  118.  
  119. int radeon_gem_init(struct radeon_device *rdev)
  120. {
  121.         INIT_LIST_HEAD(&rdev->gem.objects);
  122.         return 0;
  123. }
  124.  
  125. void radeon_gem_fini(struct radeon_device *rdev)
  126. {
  127.  //  radeon_object_force_delete(rdev);
  128. }
  129.  
  130. #if 0
  131. /*
  132.  * GEM ioctls.
  133.  */
  134. int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
  135.                           struct drm_file *filp)
  136. {
  137.         struct radeon_device *rdev = dev->dev_private;
  138.         struct drm_radeon_gem_info *args = data;
  139.         struct ttm_mem_type_manager *man;
  140.  
  141.         man = &rdev->mman.bdev.man[TTM_PL_VRAM];
  142.  
  143.         args->vram_size = rdev->mc.real_vram_size;
  144.         args->vram_visible = (u64)man->size << PAGE_SHIFT;
  145.         args->vram_visible -= rdev->vram_pin_size;
  146.         args->gart_size = rdev->mc.gtt_size;
  147.         args->gart_size -= rdev->gart_pin_size;
  148.  
  149.         return 0;
  150. }
  151.  
  152. int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
  153.                            struct drm_file *filp)
  154. {
  155.         /* TODO: implement */
  156.         DRM_ERROR("unimplemented %s\n", __func__);
  157.         return -ENOSYS;
  158. }
  159.  
  160. int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
  161.                             struct drm_file *filp)
  162. {
  163.         /* TODO: implement */
  164.         DRM_ERROR("unimplemented %s\n", __func__);
  165.         return -ENOSYS;
  166. }
  167.  
  168. int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
  169.                             struct drm_file *filp)
  170. {
  171.         struct radeon_device *rdev = dev->dev_private;
  172.         struct drm_radeon_gem_create *args = data;
  173.         struct drm_gem_object *gobj;
  174.         uint32_t handle;
  175.         int r;
  176.  
  177.         down_read(&rdev->exclusive_lock);
  178.         /* create a gem object to contain this object in */
  179.         args->size = roundup(args->size, PAGE_SIZE);
  180.         r = radeon_gem_object_create(rdev, args->size, args->alignment,
  181.                                      args->initial_domain, args->flags,
  182.                                         false, &gobj);
  183.         if (r) {
  184.                 up_read(&rdev->exclusive_lock);
  185.                 r = radeon_gem_handle_lockup(rdev, r);
  186.                 return r;
  187.         }
  188.         r = drm_gem_handle_create(filp, gobj, &handle);
  189.         /* drop reference from allocate - handle holds it now */
  190.         drm_gem_object_unreference_unlocked(gobj);
  191.         if (r) {
  192.                 up_read(&rdev->exclusive_lock);
  193.                 r = radeon_gem_handle_lockup(rdev, r);
  194.                 return r;
  195.         }
  196.         args->handle = handle;
  197.         up_read(&rdev->exclusive_lock);
  198.         return 0;
  199. }
  200.  
  201. int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
  202.                                 struct drm_file *filp)
  203. {
  204.         /* transition the BO to a domain -
  205.          * just validate the BO into a certain domain */
  206.         struct radeon_device *rdev = dev->dev_private;
  207.         struct drm_radeon_gem_set_domain *args = data;
  208.         struct drm_gem_object *gobj;
  209.         struct radeon_bo *robj;
  210.         int r;
  211.  
  212.         /* for now if someone requests domain CPU -
  213.          * just make sure the buffer is finished with */
  214.         down_read(&rdev->exclusive_lock);
  215.  
  216.         /* just do a BO wait for now */
  217.         gobj = drm_gem_object_lookup(dev, filp, args->handle);
  218.         if (gobj == NULL) {
  219.                 up_read(&rdev->exclusive_lock);
  220.                 return -ENOENT;
  221.         }
  222.         robj = gem_to_radeon_bo(gobj);
  223.  
  224.         r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
  225.  
  226.         drm_gem_object_unreference_unlocked(gobj);
  227.         up_read(&rdev->exclusive_lock);
  228.         r = radeon_gem_handle_lockup(robj->rdev, r);
  229.         return r;
  230. }
  231.  
  232. int radeon_mode_dumb_mmap(struct drm_file *filp,
  233.                           struct drm_device *dev,
  234.                           uint32_t handle, uint64_t *offset_p)
  235. {
  236.         struct drm_gem_object *gobj;
  237.         struct radeon_bo *robj;
  238.  
  239.         gobj = drm_gem_object_lookup(dev, filp, handle);
  240.         if (gobj == NULL) {
  241.                 return -ENOENT;
  242.         }
  243.         robj = gem_to_radeon_bo(gobj);
  244.         *offset_p = radeon_bo_mmap_offset(robj);
  245.         drm_gem_object_unreference_unlocked(gobj);
  246.         return 0;
  247. }
  248.  
  249. int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
  250.                           struct drm_file *filp)
  251. {
  252.         struct drm_radeon_gem_mmap *args = data;
  253.  
  254.         return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
  255. }
  256.  
  257. int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
  258.                           struct drm_file *filp)
  259. {
  260.         struct radeon_device *rdev = dev->dev_private;
  261.         struct drm_radeon_gem_busy *args = data;
  262.         struct drm_gem_object *gobj;
  263.         struct radeon_bo *robj;
  264.         int r;
  265.         uint32_t cur_placement = 0;
  266.  
  267.         gobj = drm_gem_object_lookup(dev, filp, args->handle);
  268.         if (gobj == NULL) {
  269.                 return -ENOENT;
  270.         }
  271.         robj = gem_to_radeon_bo(gobj);
  272.         r = radeon_bo_wait(robj, &cur_placement, true);
  273.         args->domain = radeon_mem_type_to_domain(cur_placement);
  274.         drm_gem_object_unreference_unlocked(gobj);
  275.         r = radeon_gem_handle_lockup(rdev, r);
  276.         return r;
  277. }
  278.  
  279. int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
  280.                               struct drm_file *filp)
  281. {
  282.         struct radeon_device *rdev = dev->dev_private;
  283.         struct drm_radeon_gem_wait_idle *args = data;
  284.         struct drm_gem_object *gobj;
  285.         struct radeon_bo *robj;
  286.         int r = 0;
  287.         uint32_t cur_placement = 0;
  288.         long ret;
  289.  
  290.         gobj = drm_gem_object_lookup(dev, filp, args->handle);
  291.         if (gobj == NULL) {
  292.                 return -ENOENT;
  293.         }
  294.         robj = gem_to_radeon_bo(gobj);
  295.         r = radeon_bo_wait(robj, &cur_placement, false);
  296.         /* Flush HDP cache via MMIO if necessary */
  297.         if (rdev->asic->mmio_hdp_flush &&
  298.             radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
  299.                 robj->rdev->asic->mmio_hdp_flush(rdev);
  300.         drm_gem_object_unreference_unlocked(gobj);
  301.         r = radeon_gem_handle_lockup(rdev, r);
  302.         return r;
  303. }
  304.  
  305. int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
  306.                                 struct drm_file *filp)
  307. {
  308.         struct drm_radeon_gem_set_tiling *args = data;
  309.         struct drm_gem_object *gobj;
  310.         struct radeon_bo *robj;
  311.         int r = 0;
  312.  
  313.         DRM_DEBUG("%d \n", args->handle);
  314.         gobj = drm_gem_object_lookup(dev, filp, args->handle);
  315.         if (gobj == NULL)
  316.                 return -ENOENT;
  317.         robj = gem_to_radeon_bo(gobj);
  318.         r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
  319.         drm_gem_object_unreference_unlocked(gobj);
  320.         return r;
  321. }
  322.  
  323. #endif
  324.