Subversion Repositories Kolibri OS

Rev

Rev 5078 | Rev 5346 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright 2008 Advanced Micro Devices, Inc.
  3.  * Copyright 2008 Red Hat Inc.
  4.  * Copyright 2009 Jerome Glisse.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the "Software"),
  8.  * to deal in the Software without restriction, including without limitation
  9.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10.  * and/or sell copies of the Software, and to permit persons to whom the
  11.  * Software is furnished to do so, subject to the following conditions:
  12.  *
  13.  * The above copyright notice and this permission notice shall be included in
  14.  * all copies or substantial portions of the Software.
  15.  *
  16.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19.  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20.  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21.  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22.  * OTHER DEALINGS IN THE SOFTWARE.
  23.  *
  24.  * Authors: Dave Airlie
  25.  *          Alex Deucher
  26.  *          Jerome Glisse
  27.  */
  28. #include <drm/drmP.h>
  29. #include <drm/radeon_drm.h>
  30. #include "radeon.h"
  31.  
  32. void radeon_gem_object_free(struct drm_gem_object *gobj)
  33. {
  34.         struct radeon_bo *robj = gem_to_radeon_bo(gobj);
  35.  
  36.         if (robj) {
  37.                 radeon_bo_unref(&robj);
  38.         }
  39. }
  40.  
  41. int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
  42.                 int alignment, int initial_domain,
  43.                                 u32 flags, bool kernel,
  44.                 struct drm_gem_object **obj)
  45. {
  46.     struct radeon_bo *robj;
  47.         unsigned long max_size;
  48.         int r;
  49.  
  50.         *obj = NULL;
  51.         /* At least align on page size */
  52.         if (alignment < PAGE_SIZE) {
  53.                 alignment = PAGE_SIZE;
  54.         }
  55.  
  56.         /* Maximum bo size is the unpinned gtt size since we use the gtt to
  57.          * handle vram to system pool migrations.
  58.          */
  59.         max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
  60.         if (size > max_size) {
  61.                 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
  62.                           size >> 20, max_size >> 20);
  63.                 return -ENOMEM;
  64.         }
  65.  
  66. retry:
  67.         r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
  68.                              flags, NULL, NULL, &robj);
  69.         if (r) {
  70.                 if (r != -ERESTARTSYS) {
  71.                         if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
  72.                                 initial_domain |= RADEON_GEM_DOMAIN_GTT;
  73.                                 goto retry;
  74.                         }
  75.                         DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
  76.                                   size, initial_domain, alignment, r);
  77.                 }
  78.         return r;
  79.         }
  80.         *obj = &robj->gem_base;
  81.  
  82.         mutex_lock(&rdev->gem.mutex);
  83.         list_add_tail(&robj->list, &rdev->gem.objects);
  84.         mutex_unlock(&rdev->gem.mutex);
  85.  
  86.         return 0;
  87. }
  88.  
  89. static int radeon_gem_set_domain(struct drm_gem_object *gobj,
  90.                           uint32_t rdomain, uint32_t wdomain)
  91. {
  92.         struct radeon_bo *robj;
  93.         uint32_t domain;
  94.         long r;
  95.  
  96.         /* FIXME: reeimplement */
  97.         robj = gem_to_radeon_bo(gobj);
  98.         /* work out where to validate the buffer to */
  99.         domain = wdomain;
  100.         if (!domain) {
  101.                 domain = rdomain;
  102.         }
  103.         if (!domain) {
  104.                 /* Do nothings */
  105.                 printk(KERN_WARNING "Set domain without domain !\n");
  106.                 return 0;
  107.         }
  108.         if (domain == RADEON_GEM_DOMAIN_CPU) {
  109.                 /* Asking for cpu access wait for object idle */
  110. //              r = radeon_bo_wait(robj, NULL, false);
  111. //              if (r) {
  112. //                      printk(KERN_ERR "Failed to wait for object !\n");
  113. //                      return r;
  114. //              }
  115.         }
  116.         return 0;
  117. }
  118.  
  119. int radeon_gem_init(struct radeon_device *rdev)
  120. {
  121.         INIT_LIST_HEAD(&rdev->gem.objects);
  122.         return 0;
  123. }
  124.  
  125. void radeon_gem_fini(struct radeon_device *rdev)
  126. {
  127.  //  radeon_object_force_delete(rdev);
  128. }
  129.  
  130. #if 0
  131. /*
  132.  * GEM ioctls.
  133.  */
  134. int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
  135.                           struct drm_file *filp)
  136. {
  137.         struct radeon_device *rdev = dev->dev_private;
  138.         struct drm_radeon_gem_info *args = data;
  139.         struct ttm_mem_type_manager *man;
  140.  
  141.         man = &rdev->mman.bdev.man[TTM_PL_VRAM];
  142.  
  143.         args->vram_size = rdev->mc.real_vram_size;
  144.         args->vram_visible = (u64)man->size << PAGE_SHIFT;
  145.         args->vram_visible -= rdev->vram_pin_size;
  146.         args->gart_size = rdev->mc.gtt_size;
  147.         args->gart_size -= rdev->gart_pin_size;
  148.  
  149.         return 0;
  150. }
  151.  
  152. int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
  153.                            struct drm_file *filp)
  154. {
  155.         /* TODO: implement */
  156.         DRM_ERROR("unimplemented %s\n", __func__);
  157.         return -ENOSYS;
  158. }
  159.  
  160. int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
  161.                             struct drm_file *filp)
  162. {
  163.         /* TODO: implement */
  164.         DRM_ERROR("unimplemented %s\n", __func__);
  165.         return -ENOSYS;
  166. }
  167.  
  168. int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
  169.                             struct drm_file *filp)
  170. {
  171.         struct radeon_device *rdev = dev->dev_private;
  172.         struct drm_radeon_gem_create *args = data;
  173.         struct drm_gem_object *gobj;
  174.         uint32_t handle;
  175.         int r;
  176.  
  177.         down_read(&rdev->exclusive_lock);
  178.         /* create a gem object to contain this object in */
  179.         args->size = roundup(args->size, PAGE_SIZE);
  180.         r = radeon_gem_object_create(rdev, args->size, args->alignment,
  181.                                      args->initial_domain, args->flags,
  182.                                         false, &gobj);
  183.         if (r) {
  184.                 up_read(&rdev->exclusive_lock);
  185.                 r = radeon_gem_handle_lockup(rdev, r);
  186.                 return r;
  187.         }
  188.         r = drm_gem_handle_create(filp, gobj, &handle);
  189.         /* drop reference from allocate - handle holds it now */
  190.         drm_gem_object_unreference_unlocked(gobj);
  191.         if (r) {
  192.                 up_read(&rdev->exclusive_lock);
  193.                 r = radeon_gem_handle_lockup(rdev, r);
  194.                 return r;
  195.         }
  196.         args->handle = handle;
  197.         up_read(&rdev->exclusive_lock);
  198.         return 0;
  199. }
  200.  
  201. int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
  202.                                 struct drm_file *filp)
  203. {
  204.         /* transition the BO to a domain -
  205.          * just validate the BO into a certain domain */
  206.         struct radeon_device *rdev = dev->dev_private;
  207.         struct drm_radeon_gem_set_domain *args = data;
  208.         struct drm_gem_object *gobj;
  209.         struct radeon_bo *robj;
  210.         int r;
  211.  
  212.         /* for now if someone requests domain CPU -
  213.          * just make sure the buffer is finished with */
  214.         down_read(&rdev->exclusive_lock);
  215.  
  216.         /* just do a BO wait for now */
  217.         gobj = drm_gem_object_lookup(dev, filp, args->handle);
  218.         if (gobj == NULL) {
  219.                 up_read(&rdev->exclusive_lock);
  220.                 return -ENOENT;
  221.         }
  222.         robj = gem_to_radeon_bo(gobj);
  223.  
  224.         r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
  225.  
  226.         drm_gem_object_unreference_unlocked(gobj);
  227.         up_read(&rdev->exclusive_lock);
  228.         r = radeon_gem_handle_lockup(robj->rdev, r);
  229.         return r;
  230. }
  231.  
  232. static int radeon_mode_mmap(struct drm_file *filp,
  233.                           struct drm_device *dev,
  234.                             uint32_t handle, bool dumb,
  235.                             uint64_t *offset_p)
  236. {
  237.         struct drm_gem_object *gobj;
  238.         struct radeon_bo *robj;
  239.  
  240.         gobj = drm_gem_object_lookup(dev, filp, handle);
  241.         if (gobj == NULL) {
  242.                 return -ENOENT;
  243.         }
  244.  
  245.         /*
  246.          * We don't allow dumb mmaps on objects created using another
  247.          * interface.
  248.          */
  249.         WARN_ONCE(dumb && !(gobj->dumb || gobj->import_attach),
  250.                 "Illegal dumb map of GPU buffer.\n");
  251.  
  252.         robj = gem_to_radeon_bo(gobj);
  253.         *offset_p = radeon_bo_mmap_offset(robj);
  254.         drm_gem_object_unreference_unlocked(gobj);
  255.         return 0;
  256. }
  257.  
  258. int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
  259.                           struct drm_file *filp)
  260. {
  261.         struct drm_radeon_gem_mmap *args = data;
  262.  
  263.         return radeon_mode_mmap(filp, dev, args->handle, false,
  264.                                 &args->addr_ptr);
  265. }
  266.  
  267. int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
  268.                           struct drm_file *filp)
  269. {
  270.         struct radeon_device *rdev = dev->dev_private;
  271.         struct drm_radeon_gem_busy *args = data;
  272.         struct drm_gem_object *gobj;
  273.         struct radeon_bo *robj;
  274.         int r;
  275.         uint32_t cur_placement = 0;
  276.  
  277.         gobj = drm_gem_object_lookup(dev, filp, args->handle);
  278.         if (gobj == NULL) {
  279.                 return -ENOENT;
  280.         }
  281.         robj = gem_to_radeon_bo(gobj);
  282.         r = radeon_bo_wait(robj, &cur_placement, true);
  283.         args->domain = radeon_mem_type_to_domain(cur_placement);
  284.         drm_gem_object_unreference_unlocked(gobj);
  285.         r = radeon_gem_handle_lockup(rdev, r);
  286.         return r;
  287. }
  288.  
  289. int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
  290.                               struct drm_file *filp)
  291. {
  292.         struct radeon_device *rdev = dev->dev_private;
  293.         struct drm_radeon_gem_wait_idle *args = data;
  294.         struct drm_gem_object *gobj;
  295.         struct radeon_bo *robj;
  296.         int r = 0;
  297.         uint32_t cur_placement = 0;
  298.         long ret;
  299.  
  300.         gobj = drm_gem_object_lookup(dev, filp, args->handle);
  301.         if (gobj == NULL) {
  302.                 return -ENOENT;
  303.         }
  304.         robj = gem_to_radeon_bo(gobj);
  305.         r = radeon_bo_wait(robj, &cur_placement, false);
  306.         /* Flush HDP cache via MMIO if necessary */
  307.         if (rdev->asic->mmio_hdp_flush &&
  308.             radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
  309.                 robj->rdev->asic->mmio_hdp_flush(rdev);
  310.         drm_gem_object_unreference_unlocked(gobj);
  311.         r = radeon_gem_handle_lockup(rdev, r);
  312.         return r;
  313. }
  314.  
  315. int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
  316.                                 struct drm_file *filp)
  317. {
  318.         struct drm_radeon_gem_set_tiling *args = data;
  319.         struct drm_gem_object *gobj;
  320.         struct radeon_bo *robj;
  321.         int r = 0;
  322.  
  323.         DRM_DEBUG("%d \n", args->handle);
  324.         gobj = drm_gem_object_lookup(dev, filp, args->handle);
  325.         if (gobj == NULL)
  326.                 return -ENOENT;
  327.         robj = gem_to_radeon_bo(gobj);
  328.         r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
  329.         drm_gem_object_unreference_unlocked(gobj);
  330.         return r;
  331. }
  332.  
  333. #endif
  334.