Subversion Repositories Kolibri OS

Rev

Rev 1126 | Rev 1963 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright 2008 Advanced Micro Devices, Inc.
  3.  * Copyright 2008 Red Hat Inc.
  4.  * Copyright 2009 Jerome Glisse.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the "Software"),
  8.  * to deal in the Software without restriction, including without limitation
  9.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10.  * and/or sell copies of the Software, and to permit persons to whom the
  11.  * Software is furnished to do so, subject to the following conditions:
  12.  *
  13.  * The above copyright notice and this permission notice shall be included in
  14.  * all copies or substantial portions of the Software.
  15.  *
  16.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19.  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20.  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21.  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22.  * OTHER DEALINGS IN THE SOFTWARE.
  23.  *
  24.  * Authors: Dave Airlie
  25.  *          Alex Deucher
  26.  *          Jerome Glisse
  27.  */
  28. #include "drmP.h"
  29. #include "drm.h"
  30. #include "radeon_drm.h"
  31. #include "radeon.h"
  32.  
  33.  
  34. #define TTM_PL_SYSTEM           0
  35. #define TTM_PL_TT               1
  36. #define TTM_PL_VRAM             2
  37. #define TTM_PL_PRIV0            3
  38. #define TTM_PL_PRIV1            4
  39. #define TTM_PL_PRIV2            5
  40. #define TTM_PL_PRIV3            6
  41. #define TTM_PL_PRIV4            7
  42. #define TTM_PL_PRIV5            8
  43. #define TTM_PL_SWAPPED          15
  44.  
  45. #define TTM_PL_FLAG_SYSTEM      (1 << TTM_PL_SYSTEM)
  46. #define TTM_PL_FLAG_TT          (1 << TTM_PL_TT)
  47. #define TTM_PL_FLAG_VRAM        (1 << TTM_PL_VRAM)
  48. #define TTM_PL_FLAG_PRIV0       (1 << TTM_PL_PRIV0)
  49. #define TTM_PL_FLAG_PRIV1       (1 << TTM_PL_PRIV1)
  50. #define TTM_PL_FLAG_PRIV2       (1 << TTM_PL_PRIV2)
  51. #define TTM_PL_FLAG_PRIV3       (1 << TTM_PL_PRIV3)
  52. #define TTM_PL_FLAG_PRIV4       (1 << TTM_PL_PRIV4)
  53. #define TTM_PL_FLAG_PRIV5       (1 << TTM_PL_PRIV5)
  54. #define TTM_PL_FLAG_SWAPPED     (1 << TTM_PL_SWAPPED)
  55. #define TTM_PL_MASK_MEM         0x0000FFFF
  56.  
  57.  
  58. int radeon_gem_object_init(struct drm_gem_object *obj)
  59. {
  60.         /* we do nothings here */
  61.         return 0;
  62. }
  63.  
  64. void radeon_gem_object_free(struct drm_gem_object *gobj)
  65. {
  66.         struct radeon_object *robj = gobj->driver_private;
  67.  
  68.         gobj->driver_private = NULL;
  69.         if (robj) {
  70. //       radeon_object_unref(&robj);
  71.         }
  72. }
  73.  
  74. int radeon_gem_object_create(struct radeon_device *rdev, int size,
  75.                              int alignment, int initial_domain,
  76.                              bool discardable, bool kernel,
  77.                              bool interruptible,
  78.                              struct drm_gem_object **obj)
  79. {
  80.         struct drm_gem_object *gobj;
  81.         struct radeon_object *robj;
  82.         int r;
  83.  
  84.         *obj = NULL;
  85.         gobj = drm_gem_object_alloc(rdev->ddev, size);
  86.         if (!gobj) {
  87.                 return -ENOMEM;
  88.         }
  89.         /* At least align on page size */
  90.         if (alignment < PAGE_SIZE) {
  91.                 alignment = PAGE_SIZE;
  92.         }
  93.         r = radeon_object_create(rdev, gobj, size, kernel, initial_domain,
  94.                                  interruptible, &robj);
  95.         if (r) {
  96.                 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n",
  97.                           size, initial_domain, alignment);
  98. //       mutex_lock(&rdev->ddev->struct_mutex);
  99. //       drm_gem_object_unreference(gobj);
  100. //       mutex_unlock(&rdev->ddev->struct_mutex);
  101.                 return r;
  102.         }
  103.         gobj->driver_private = robj;
  104.         *obj = gobj;
  105.         return 0;
  106. }
  107.  
  108. int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
  109.                           uint64_t *gpu_addr)
  110. {
  111.         struct radeon_object *robj = obj->driver_private;
  112.         uint32_t flags;
  113.  
  114.         switch (pin_domain) {
  115.         case RADEON_GEM_DOMAIN_VRAM:
  116.                 flags = TTM_PL_FLAG_VRAM;
  117.                 break;
  118.         case RADEON_GEM_DOMAIN_GTT:
  119.                 flags = TTM_PL_FLAG_TT;
  120.                 break;
  121.         default:
  122.                 flags = TTM_PL_FLAG_SYSTEM;
  123.                 break;
  124.         }
  125.         return radeon_object_pin(robj, flags, gpu_addr);
  126. }
  127.  
  128. void radeon_gem_object_unpin(struct drm_gem_object *obj)
  129. {
  130.         struct radeon_object *robj = obj->driver_private;
  131. //   radeon_object_unpin(robj);
  132. }
  133.  
  134. int radeon_gem_set_domain(struct drm_gem_object *gobj,
  135.                           uint32_t rdomain, uint32_t wdomain)
  136. {
  137.         struct radeon_object *robj;
  138.         uint32_t domain;
  139.         int r;
  140.  
  141.         /* FIXME: reeimplement */
  142.         robj = gobj->driver_private;
  143.         /* work out where to validate the buffer to */
  144.         domain = wdomain;
  145.         if (!domain) {
  146.                 domain = rdomain;
  147.         }
  148.         if (!domain) {
  149.                 /* Do nothings */
  150.                 printk(KERN_WARNING "Set domain withou domain !\n");
  151.                 return 0;
  152.         }
  153.         if (domain == RADEON_GEM_DOMAIN_CPU) {
  154.                 /* Asking for cpu access wait for object idle */
  155. //       r = radeon_object_wait(robj);
  156.                 if (r) {
  157.                         printk(KERN_ERR "Failed to wait for object !\n");
  158.                         return r;
  159.                 }
  160.         }
  161.         return 0;
  162. }
  163.  
  164. int radeon_gem_init(struct radeon_device *rdev)
  165. {
  166.         INIT_LIST_HEAD(&rdev->gem.objects);
  167.         return 0;
  168. }
  169.  
  170. void radeon_gem_fini(struct radeon_device *rdev)
  171. {
  172.  //  radeon_object_force_delete(rdev);
  173. }
  174.  
  175. #if 0
  176. /*
  177.  * GEM ioctls.
  178.  */
  179. int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
  180.                           struct drm_file *filp)
  181. {
  182.         struct radeon_device *rdev = dev->dev_private;
  183.         struct drm_radeon_gem_info *args = data;
  184.  
  185.         args->vram_size = rdev->mc.real_vram_size;
  186.         /* FIXME: report somethings that makes sense */
  187.         args->vram_visible = rdev->mc.real_vram_size - (4 * 1024 * 1024);
  188.         args->gart_size = rdev->mc.gtt_size;
  189.         return 0;
  190. }
  191.  
  192. int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
  193.                            struct drm_file *filp)
  194. {
  195.         /* TODO: implement */
  196.         DRM_ERROR("unimplemented %s\n", __func__);
  197.         return -ENOSYS;
  198. }
  199.  
  200. int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
  201.                             struct drm_file *filp)
  202. {
  203.         /* TODO: implement */
  204.         DRM_ERROR("unimplemented %s\n", __func__);
  205.         return -ENOSYS;
  206. }
  207.  
  208. int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
  209.                             struct drm_file *filp)
  210. {
  211.         struct radeon_device *rdev = dev->dev_private;
  212.         struct drm_radeon_gem_create *args = data;
  213.         struct drm_gem_object *gobj;
  214.         uint32_t handle;
  215.         int r;
  216.  
  217.         /* create a gem object to contain this object in */
  218.         args->size = roundup(args->size, PAGE_SIZE);
  219.         r = radeon_gem_object_create(rdev, args->size, args->alignment,
  220.                                      args->initial_domain, false,
  221.                                      false, true, &gobj);
  222.         if (r) {
  223.                 return r;
  224.         }
  225.         r = drm_gem_handle_create(filp, gobj, &handle);
  226.         if (r) {
  227.                 mutex_lock(&dev->struct_mutex);
  228.                 drm_gem_object_unreference(gobj);
  229.                 mutex_unlock(&dev->struct_mutex);
  230.                 return r;
  231.         }
  232.         mutex_lock(&dev->struct_mutex);
  233.         drm_gem_object_handle_unreference(gobj);
  234.         mutex_unlock(&dev->struct_mutex);
  235.         args->handle = handle;
  236.         return 0;
  237. }
  238.  
  239. int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
  240.                                 struct drm_file *filp)
  241. {
  242.         /* transition the BO to a domain -
  243.          * just validate the BO into a certain domain */
  244.         struct drm_radeon_gem_set_domain *args = data;
  245.         struct drm_gem_object *gobj;
  246.         struct radeon_object *robj;
  247.         int r;
  248.  
  249.         /* for now if someone requests domain CPU -
  250.          * just make sure the buffer is finished with */
  251.  
  252.         /* just do a BO wait for now */
  253.         gobj = drm_gem_object_lookup(dev, filp, args->handle);
  254.         if (gobj == NULL) {
  255.                 return -EINVAL;
  256.         }
  257.         robj = gobj->driver_private;
  258.  
  259.         r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
  260.  
  261.         mutex_lock(&dev->struct_mutex);
  262.         drm_gem_object_unreference(gobj);
  263.         mutex_unlock(&dev->struct_mutex);
  264.         return r;
  265. }
  266.  
  267. int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
  268.                           struct drm_file *filp)
  269. {
  270.         struct drm_radeon_gem_mmap *args = data;
  271.         struct drm_gem_object *gobj;
  272.         struct radeon_object *robj;
  273.         int r;
  274.  
  275.         gobj = drm_gem_object_lookup(dev, filp, args->handle);
  276.         if (gobj == NULL) {
  277.                 return -EINVAL;
  278.         }
  279.         robj = gobj->driver_private;
  280.         r = radeon_object_mmap(robj, &args->addr_ptr);
  281.         mutex_lock(&dev->struct_mutex);
  282.         drm_gem_object_unreference(gobj);
  283.         mutex_unlock(&dev->struct_mutex);
  284.         return r;
  285. }
  286.  
  287. int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
  288.                           struct drm_file *filp)
  289. {
  290.         /* FIXME: implement */
  291.         return 0;
  292. }
  293.  
  294. int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
  295.                               struct drm_file *filp)
  296. {
  297.         struct drm_radeon_gem_wait_idle *args = data;
  298.         struct drm_gem_object *gobj;
  299.         struct radeon_object *robj;
  300.         int r;
  301.  
  302.         gobj = drm_gem_object_lookup(dev, filp, args->handle);
  303.         if (gobj == NULL) {
  304.                 return -EINVAL;
  305.         }
  306.         robj = gobj->driver_private;
  307.         r = radeon_object_wait(robj);
  308.         mutex_lock(&dev->struct_mutex);
  309.         drm_gem_object_unreference(gobj);
  310.         mutex_unlock(&dev->struct_mutex);
  311.         return r;
  312. }
  313.  
  314. #endif
  315.