Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright 2009 Jerome Glisse.
  3.  * All Rights Reserved.
  4.  *
  5.  * Permission is hereby granted, free of charge, to any person obtaining a
  6.  * copy of this software and associated documentation files (the
  7.  * "Software"), to deal in the Software without restriction, including
  8.  * without limitation the rights to use, copy, modify, merge, publish,
  9.  * distribute, sub license, and/or sell copies of the Software, and to
  10.  * permit persons to whom the Software is furnished to do so, subject to
  11.  * the following conditions:
  12.  *
  13.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16.  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17.  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18.  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19.  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20.  *
  21.  * The above copyright notice and this permission notice (including the
  22.  * next paragraph) shall be included in all copies or substantial portions
  23.  * of the Software.
  24.  *
  25.  */
  26. /*
  27.  * Authors:
  28.  *    Jerome Glisse <glisse@freedesktop.org>
  29.  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
  30.  *    Dave Airlie
  31.  */
  32. //#include <linux/list.h>
  33. //#include <drm/drmP.h>
  34.  
  35. #include "radeon_drm.h"
  36. #include "radeon.h"
  37. #include <drm_mm.h>
  38.  
  39. int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
  40.              int pages, u32_t *pagelist);
  41.  
  42.  
  43. #define TTM_PL_SYSTEM           0
  44. #define TTM_PL_TT               1
  45. #define TTM_PL_VRAM             2
  46. #define TTM_PL_PRIV0            3
  47. #define TTM_PL_PRIV1            4
  48. #define TTM_PL_PRIV2            5
  49. #define TTM_PL_PRIV3            6
  50. #define TTM_PL_PRIV4            7
  51. #define TTM_PL_PRIV5            8
  52. #define TTM_PL_SWAPPED          15
  53.  
  54. #define TTM_PL_FLAG_SYSTEM      (1 << TTM_PL_SYSTEM)
  55. #define TTM_PL_FLAG_TT          (1 << TTM_PL_TT)
  56. #define TTM_PL_FLAG_VRAM        (1 << TTM_PL_VRAM)
  57. #define TTM_PL_FLAG_PRIV0       (1 << TTM_PL_PRIV0)
  58. #define TTM_PL_FLAG_PRIV1       (1 << TTM_PL_PRIV1)
  59. #define TTM_PL_FLAG_PRIV2       (1 << TTM_PL_PRIV2)
  60. #define TTM_PL_FLAG_PRIV3       (1 << TTM_PL_PRIV3)
  61. #define TTM_PL_FLAG_PRIV4       (1 << TTM_PL_PRIV4)
  62. #define TTM_PL_FLAG_PRIV5       (1 << TTM_PL_PRIV5)
  63. #define TTM_PL_FLAG_SWAPPED     (1 << TTM_PL_SWAPPED)
  64. #define TTM_PL_MASK_MEM         0x0000FFFF
  65.  
  66.  
  67. struct ttm_mem_type_manager {
  68.  
  69.     /*
  70.      * No protection. Constant from start.
  71.      */
  72.  
  73.     bool            has_type;
  74.     bool            use_type;
  75.     uint32_t        flags;
  76.     unsigned long   gpu_offset;
  77.     unsigned long   io_offset;
  78.     unsigned long   io_size;
  79.     void            *io_addr;
  80.     uint64_t        size;
  81.     uint32_t        available_caching;
  82.     uint32_t        default_caching;
  83.  
  84.     /*
  85.      * Protected by the bdev->lru_lock.
  86.      * TODO: Consider one lru_lock per ttm_mem_type_manager.
  87.      * Plays ill with list removal, though.
  88.      */
  89.  
  90.     struct drm_mm manager;
  91.     struct list_head lru;
  92. };
  93.  
  94. struct ttm_bo_driver {
  95.     const uint32_t      *mem_type_prio;
  96.     const uint32_t      *mem_busy_prio;
  97.     uint32_t             num_mem_type_prio;
  98.     uint32_t             num_mem_busy_prio;
  99.  
  100.     /**
  101.      * struct ttm_bo_driver member create_ttm_backend_entry
  102.      *
  103.      * @bdev: The buffer object device.
  104.      *
  105.      * Create a driver specific struct ttm_backend.
  106.      */
  107.  
  108. //    struct ttm_backend *(*create_ttm_backend_entry)(struct ttm_bo_device *bdev);
  109.  
  110.     /**
  111.      * struct ttm_bo_driver member invalidate_caches
  112.      *
  113.      * @bdev: the buffer object device.
  114.      * @flags: new placement of the rebound buffer object.
  115.      *
  116.      * A previosly evicted buffer has been rebound in a
  117.      * potentially new location. Tell the driver that it might
  118.      * consider invalidating read (texture) caches on the next command
  119.      * submission as a consequence.
  120.      */
  121.  
  122. //    int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
  123. //    int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
  124. //                  struct ttm_mem_type_manager *man);
  125.     /**
  126.      * struct ttm_bo_driver member evict_flags:
  127.      *
  128.      * @bo: the buffer object to be evicted
  129.      *
  130.      * Return the bo flags for a buffer which is not mapped to the hardware.
  131.      * These will be placed in proposed_flags so that when the move is
  132.      * finished, they'll end up in bo->mem.flags
  133.      */
  134.  
  135. //     uint32_t(*evict_flags) (struct ttm_buffer_object *bo);
  136.     /**
  137.      * struct ttm_bo_driver member move:
  138.      *
  139.      * @bo: the buffer to move
  140.      * @evict: whether this motion is evicting the buffer from
  141.      * the graphics address space
  142.      * @interruptible: Use interruptible sleeps if possible when sleeping.
  143.      * @no_wait: whether this should give up and return -EBUSY
  144.      * if this move would require sleeping
  145.      * @new_mem: the new memory region receiving the buffer
  146.      *
  147.      * Move a buffer between two memory regions.
  148.      */
  149. //    int (*move) (struct ttm_buffer_object *bo,
  150. //             bool evict, bool interruptible,
  151. //             bool no_wait, struct ttm_mem_reg *new_mem);
  152.  
  153.     /**
  154.      * struct ttm_bo_driver_member verify_access
  155.      *
  156.      * @bo: Pointer to a buffer object.
  157.      * @filp: Pointer to a struct file trying to access the object.
  158.      *
  159.      * Called from the map / write / read methods to verify that the
  160.      * caller is permitted to access the buffer object.
  161.      * This member may be set to NULL, which will refuse this kind of
  162.      * access for all buffer objects.
  163.      * This function should return 0 if access is granted, -EPERM otherwise.
  164.      */
  165. //    int (*verify_access) (struct ttm_buffer_object *bo,
  166. //                  struct file *filp);
  167.  
  168.     /**
  169.      * In case a driver writer dislikes the TTM fence objects,
  170.      * the driver writer can replace those with sync objects of
  171.      * his / her own. If it turns out that no driver writer is
  172.      * using these. I suggest we remove these hooks and plug in
  173.      * fences directly. The bo driver needs the following functionality:
  174.      * See the corresponding functions in the fence object API
  175.      * documentation.
  176.      */
  177.  
  178. //    bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg);
  179. //    int (*sync_obj_wait) (void *sync_obj, void *sync_arg,
  180. //                  bool lazy, bool interruptible);
  181. //    int (*sync_obj_flush) (void *sync_obj, void *sync_arg);
  182. //    void (*sync_obj_unref) (void **sync_obj);
  183. //    void *(*sync_obj_ref) (void *sync_obj);
  184. };
  185.  
  186. #define TTM_NUM_MEM_TYPES 8
  187.  
  188.  
  189. struct ttm_bo_device {
  190.  
  191.     /*
  192.      * Constant after bo device init / atomic.
  193.      */
  194.  
  195. //    struct ttm_mem_global *mem_glob;
  196.     struct ttm_bo_driver *driver;
  197. //    struct page *dummy_read_page;
  198. //    struct ttm_mem_shrink shrink;
  199.  
  200.     size_t      ttm_bo_extra_size;
  201.     size_t      ttm_bo_size;
  202.  
  203. //   rwlock_t vm_lock;
  204.     /*
  205.      * Protected by the vm lock.
  206.      */
  207.     struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
  208. //   struct rb_root addr_space_rb;
  209.     struct drm_mm       addr_space_mm;
  210.  
  211.     /*
  212.      * Might want to change this to one lock per manager.
  213.      */
  214. //   spinlock_t lru_lock;
  215.     /*
  216.      * Protected by the lru lock.
  217.      */
  218.     struct list_head ddestroy;
  219.     struct list_head swap_lru;
  220.  
  221.     /*
  222.      * Protected by load / firstopen / lastclose /unload sync.
  223.      */
  224.  
  225.     bool nice_mode;
  226. //   struct address_space *dev_mapping;
  227.  
  228.     /*
  229.      * Internal protection.
  230.      */
  231.  
  232. //   struct delayed_work wq;
  233. };
  234.  
  235. struct ttm_mem_reg {
  236.     struct drm_mm_node *mm_node;
  237.     unsigned long       size;
  238.     unsigned long       num_pages;
  239.     uint32_t            page_alignment;
  240.     uint32_t            mem_type;
  241.     uint32_t            placement;
  242. };
  243.  
  244. enum ttm_bo_type {
  245.     ttm_bo_type_device,
  246.     ttm_bo_type_user,
  247.     ttm_bo_type_kernel
  248. };
  249.  
  250. struct ttm_buffer_object {
  251.     /**
  252.      * Members constant at init.
  253.      */
  254.  
  255.     struct ttm_bo_device   *bdev;
  256.     unsigned long           buffer_start;
  257.     enum ttm_bo_type        type;
  258.     void (*destroy) (struct ttm_buffer_object *);
  259.     unsigned long           num_pages;
  260.     uint64_t                addr_space_offset;
  261.     size_t                  acc_size;
  262.  
  263.     /**
  264.     * Members not needing protection.
  265.     */
  266.  
  267. //    struct kref kref;
  268. //    struct kref list_kref;
  269. //    wait_queue_head_t event_queue;
  270. //    spinlock_t lock;
  271.  
  272.     /**
  273.      * Members protected by the bo::reserved lock.
  274.      */
  275.  
  276.     uint32_t                proposed_placement;
  277.     struct ttm_mem_reg      mem;
  278. //    struct file *persistant_swap_storage;
  279. //    struct ttm_tt *ttm;
  280.     bool evicted;
  281.  
  282.     /**
  283.      * Members protected by the bo::reserved lock only when written to.
  284.      */
  285.  
  286. //    atomic_t cpu_writers;
  287.  
  288.     /**
  289.      * Members protected by the bdev::lru_lock.
  290.      */
  291.  
  292.     struct list_head lru;
  293.     struct list_head ddestroy;
  294.     struct list_head swap;
  295.     uint32_t val_seq;
  296.     bool seq_valid;
  297.  
  298.     /**
  299.      * Members protected by the bdev::lru_lock
  300.      * only when written to.
  301.      */
  302.  
  303. //    atomic_t reserved;
  304.  
  305.  
  306.     /**
  307.      * Members protected by the bo::lock
  308.      */
  309.  
  310.     void *sync_obj_arg;
  311.     void *sync_obj;
  312.     unsigned long priv_flags;
  313.  
  314.     /**
  315.      * Members protected by the bdev::vm_lock
  316.      */
  317.  
  318. //    struct rb_node vm_rb;
  319.     struct drm_mm_node *vm_node;
  320.  
  321.  
  322.     /**
  323.      * Special members that are protected by the reserve lock
  324.      * and the bo::lock when written to. Can be read with
  325.      * either of these locks held.
  326.      */
  327.  
  328.     unsigned long offset;
  329.     uint32_t cur_placement;
  330. };
  331.  
  332. struct radeon_object
  333. {
  334.     struct ttm_buffer_object     tobj;
  335.     struct list_head            list;
  336.         struct radeon_device            *rdev;
  337. //   struct drm_gem_object       *gobj;
  338. //   struct ttm_bo_kmap_obj      kmap;
  339.  
  340.         unsigned                        pin_count;
  341.         uint64_t                        gpu_addr;
  342.         void                            *kptr;
  343.         bool                            is_iomem;
  344.  
  345.     struct drm_mm_node  *mm_node;
  346.     u32_t                vm_addr;
  347.     u32_t                cpu_addr;
  348.     u32_t                flags;
  349. };
  350.  
  351.  
  352.  
  353.  
  354. static struct drm_mm   mm_gtt;
  355. static struct drm_mm   mm_vram;
  356.  
  357.  
  358. int radeon_object_init(struct radeon_device *rdev)
  359. {
  360.     int r = 0;
  361.  
  362.     r = drm_mm_init(&mm_vram, 0x800000 >> PAGE_SHIFT,
  363.                ((rdev->mc.aper_size - 0x800000) >> PAGE_SHIFT));
  364.     if (r) {
  365.         DRM_ERROR("Failed initializing VRAM heap.\n");
  366.         return r;
  367.     };
  368.  
  369.     r = drm_mm_init(&mm_gtt, 0, ((rdev->mc.gtt_size) >> PAGE_SHIFT));
  370.     if (r) {
  371.         DRM_ERROR("Failed initializing GTT heap.\n");
  372.         return r;
  373.     }
  374.  
  375.     return r;
  376.  //   return radeon_ttm_init(rdev);
  377. }
  378.  
  379. static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
  380. {
  381.     uint32_t flags = 0;
  382.     if (domain & RADEON_GEM_DOMAIN_VRAM) {
  383.         flags |= TTM_PL_FLAG_VRAM;
  384.     }
  385.     if (domain & RADEON_GEM_DOMAIN_GTT) {
  386.         flags |= TTM_PL_FLAG_TT;
  387.     }
  388.     if (domain & RADEON_GEM_DOMAIN_CPU) {
  389.         flags |= TTM_PL_FLAG_SYSTEM;
  390.     }
  391.     if (!flags) {
  392.         flags |= TTM_PL_FLAG_SYSTEM;
  393.     }
  394.     return flags;
  395. }
  396.  
  397.  
  398. int radeon_object_create(struct radeon_device *rdev,
  399.              struct drm_gem_object *gobj,
  400.              unsigned long size,
  401.              bool kernel,
  402.              uint32_t domain,
  403.              bool interruptible,
  404.              struct radeon_object **robj_ptr)
  405. {
  406.     struct radeon_object *robj;
  407.     enum ttm_bo_type type;
  408.     uint32_t flags;
  409.     int r;
  410.  
  411.     dbgprintf("%s\n",__FUNCTION__);
  412.  
  413.     if (kernel) {
  414.         type = ttm_bo_type_kernel;
  415.     } else {
  416.         type = ttm_bo_type_device;
  417.     }
  418.     *robj_ptr = NULL;
  419.     robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
  420.     if (robj == NULL) {
  421.         return -ENOMEM;
  422.     }
  423.     robj->rdev = rdev;
  424. //    robj->gobj = gobj;
  425.     INIT_LIST_HEAD(&robj->list);
  426.  
  427.     flags = radeon_object_flags_from_domain(domain);
  428.  
  429.     robj->flags = flags;
  430.  
  431.     dbgprintf("robj flags %x\n", robj->flags);
  432.  
  433.     if( flags & TTM_PL_FLAG_VRAM)
  434.     {
  435.         size_t num_pages;
  436.  
  437.         struct drm_mm_node *vm_node;
  438.  
  439.         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  440.  
  441.         if (num_pages == 0) {
  442.             printk("Illegal buffer object size.\n");
  443.             return -EINVAL;
  444.         }
  445. retry_pre_get:
  446.         r = drm_mm_pre_get(&mm_vram);
  447.  
  448.         if (unlikely(r != 0))
  449.             return r;
  450.  
  451.         vm_node = drm_mm_search_free(&mm_vram, num_pages, 0, 0);
  452.  
  453.         if (unlikely(vm_node == NULL)) {
  454.             r = -ENOMEM;
  455.             return r;
  456.         }
  457.  
  458.         robj->mm_node =  drm_mm_get_block_atomic(vm_node, num_pages, 0);
  459.  
  460.         if (unlikely(robj->mm_node == NULL)) {
  461.             goto retry_pre_get;
  462.         }
  463.  
  464.         robj->vm_addr = ((uint32_t)robj->mm_node->start);
  465.  
  466.         dbgprintf("alloc vram: base %x size %x\n",
  467.                    robj->vm_addr << PAGE_SHIFT, num_pages  << PAGE_SHIFT);
  468.  
  469.     };
  470.  
  471.     if( flags & TTM_PL_FLAG_TT)
  472.     {
  473.         size_t num_pages;
  474.  
  475.         struct drm_mm_node *vm_node;
  476.  
  477.         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  478.  
  479.         if (num_pages == 0) {
  480.             printk("Illegal buffer object size.\n");
  481.             return -EINVAL;
  482.         }
  483. retry_pre_get1:
  484.         r = drm_mm_pre_get(&mm_gtt);
  485.  
  486.         if (unlikely(r != 0))
  487.             return r;
  488.  
  489.         vm_node = drm_mm_search_free(&mm_gtt, num_pages, 0, 0);
  490.  
  491.         if (unlikely(vm_node == NULL)) {
  492.             r = -ENOMEM;
  493.             return r;
  494.         }
  495.  
  496.         robj->mm_node =  drm_mm_get_block_atomic(vm_node, num_pages, 0);
  497.  
  498.         if (unlikely(robj->mm_node == NULL)) {
  499.             goto retry_pre_get1;
  500.         }
  501.  
  502.         robj->vm_addr = ((uint32_t)robj->mm_node->start) ;
  503.  
  504.         dbgprintf("alloc gtt: base %x size %x\n",
  505.                    robj->vm_addr << PAGE_SHIFT, num_pages  << PAGE_SHIFT);
  506.     };
  507.  
  508. //   r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags,
  509. //                  0, 0, false, NULL, size,
  510. //                  &radeon_ttm_object_object_destroy);
  511.     if (unlikely(r != 0)) {
  512.         /* ttm call radeon_ttm_object_object_destroy if error happen */
  513.         DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n",
  514.               size, flags, 0);
  515.         return r;
  516.     }
  517.     *robj_ptr = robj;
  518. //   if (gobj) {
  519. //       list_add_tail(&robj->list, &rdev->gem.objects);
  520. //   }
  521.     return 0;
  522. }
  523.  
  524. #define page_tabs  0xFDC00000
  525.  
  526. int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
  527.               uint64_t *gpu_addr)
  528. {
  529.     uint32_t flags;
  530.     uint32_t tmp;
  531.     int r = 0;
  532.  
  533.     dbgprintf("%s\n",__FUNCTION__);
  534.  
  535. //    flags = radeon_object_flags_from_domain(domain);
  536. //   spin_lock(&robj->tobj.lock);
  537.     if (robj->pin_count) {
  538.         robj->pin_count++;
  539.         if (gpu_addr != NULL) {
  540.             *gpu_addr = robj->gpu_addr;
  541.         }
  542. //       spin_unlock(&robj->tobj.lock);
  543.         return 0;
  544.     }
  545. //   spin_unlock(&robj->tobj.lock);
  546. //    r = radeon_object_reserve(robj, false);
  547. //    if (unlikely(r != 0)) {
  548. //        DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
  549. //        return r;
  550. //    }
  551. //    tmp = robj->tobj.mem.placement;
  552. //    ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
  553. //    robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
  554. //    r = ttm_buffer_object_validate(&robj->tobj,
  555. //                       robj->tobj.proposed_placement,
  556. //                       false, false);
  557.  
  558.     robj->gpu_addr = ((u64)robj->vm_addr) << PAGE_SHIFT;
  559.  
  560.     if(robj->flags & TTM_PL_FLAG_VRAM)
  561.         robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
  562.     else if (robj->flags & TTM_PL_FLAG_TT)
  563.     {
  564.         u32_t *pagelist;
  565.         robj->kptr  = KernelAlloc( robj->mm_node->size << PAGE_SHIFT );
  566.         dbgprintf("kernel alloc %x\n", robj->kptr );
  567.  
  568.         pagelist =  &((u32_t*)page_tabs)[(u32_t)robj->kptr >> 12];
  569.         dbgprintf("pagelist %x\n", pagelist);
  570.         radeon_gart_bind(robj->rdev, robj->gpu_addr,
  571.                          robj->mm_node->size,  pagelist);
  572.         robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
  573.     }
  574.     else
  575.     {
  576.         DRM_ERROR("Unknown placement %d\n", robj->flags);
  577.         robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
  578.         r = -1;
  579.     };
  580.  
  581. //    flags & TTM_PL_FLAG_VRAM
  582.     if (gpu_addr != NULL) {
  583.         *gpu_addr = robj->gpu_addr;
  584.     }
  585.     robj->pin_count = 1;
  586.     if (unlikely(r != 0)) {
  587.         DRM_ERROR("radeon: failed to pin object.\n");
  588.     }
  589.  
  590.     dbgprintf("done %s\n",__FUNCTION__);
  591.  
  592.     return r;
  593. }
  594.  
  595. int radeon_object_kmap(struct radeon_object *robj, void **ptr)
  596. {
  597.     int r = 0;
  598.  
  599.     dbgprintf("%s\n",__FUNCTION__);
  600.  
  601. //   spin_lock(&robj->tobj.lock);
  602.     if (robj->kptr) {
  603.         if (ptr) {
  604.             *ptr = robj->kptr;
  605.         }
  606. //       spin_unlock(&robj->tobj.lock);
  607.         return 0;
  608.     }
  609. //   spin_unlock(&robj->tobj.lock);
  610.  
  611.     if(robj->flags & TTM_PL_FLAG_VRAM)
  612.     {
  613.         robj->cpu_addr = robj->rdev->mc.aper_base +
  614.                          (robj->vm_addr << PAGE_SHIFT);
  615.         robj->kptr = (void*)MapIoMem(robj->cpu_addr,
  616.                            robj->mm_node->size << 12, PG_SW);
  617.         dbgprintf("map io mem %x at %x\n", robj->cpu_addr, robj->kptr);
  618.  
  619.     }
  620.     else
  621.     {
  622.         return -1;
  623.     }
  624.  
  625.     if (ptr) {
  626.         *ptr = robj->kptr;
  627.     }
  628.  
  629.     dbgprintf("done %s\n",__FUNCTION__);
  630.  
  631.     return 0;
  632. }
  633.  
  634.  
  635. #if 0
  636.  
  637. void radeon_object_unpin(struct radeon_object *robj)
  638. {
  639.     uint32_t flags;
  640.     int r;
  641.  
  642. //   spin_lock(&robj->tobj.lock);
  643.     if (!robj->pin_count) {
  644. //       spin_unlock(&robj->tobj.lock);
  645.         printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
  646.         return;
  647.     }
  648.     robj->pin_count--;
  649.     if (robj->pin_count) {
  650. //       spin_unlock(&robj->tobj.lock);
  651.         return;
  652.     }
  653. //   spin_unlock(&robj->tobj.lock);
  654.     r = radeon_object_reserve(robj, false);
  655.     if (unlikely(r != 0)) {
  656.         DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
  657.         return;
  658.     }
  659.     flags = robj->tobj.mem.placement;
  660.     robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
  661.     r = ttm_buffer_object_validate(&robj->tobj,
  662.                        robj->tobj.proposed_placement,
  663.                        false, false);
  664.     if (unlikely(r != 0)) {
  665.         DRM_ERROR("radeon: failed to unpin buffer.\n");
  666.     }
  667.     radeon_object_unreserve(robj);
  668. }
  669.  
  670.  
  671.  
  672.  
  673.  
  674. /*
  675.  * To exclude mutual BO access we rely on bo_reserve exclusion, as all
  676.  * function are calling it.
  677.  */
  678.  
  679. static int radeon_object_reserve(struct radeon_object *robj, bool interruptible)
  680. {
  681.         return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0);
  682. }
  683.  
  684. static void radeon_object_unreserve(struct radeon_object *robj)
  685. {
  686.         ttm_bo_unreserve(&robj->tobj);
  687. }
  688.  
  689. static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj)
  690. {
  691.         struct radeon_object *robj;
  692.  
  693.         robj = container_of(tobj, struct radeon_object, tobj);
  694. //   list_del_init(&robj->list);
  695.         kfree(robj);
  696. }
  697.  
  698. static inline void radeon_object_gpu_addr(struct radeon_object *robj)
  699. {
  700.         /* Default gpu address */
  701.         robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
  702.         if (robj->tobj.mem.mm_node == NULL) {
  703.                 return;
  704.         }
  705.         robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT;
  706.         switch (robj->tobj.mem.mem_type) {
  707.         case TTM_PL_VRAM:
  708.                 robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
  709.                 break;
  710.         case TTM_PL_TT:
  711.                 robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
  712.                 break;
  713.         default:
  714.                 DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type);
  715.                 robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
  716.                 return;
  717.         }
  718. }
  719.  
  720.  
  721. int radeon_object_create(struct radeon_device *rdev,
  722.                          struct drm_gem_object *gobj,
  723.                          unsigned long size,
  724.                          bool kernel,
  725.                          uint32_t domain,
  726.                          bool interruptible,
  727.                          struct radeon_object **robj_ptr)
  728. {
  729.         struct radeon_object *robj;
  730.         enum ttm_bo_type type;
  731.         uint32_t flags;
  732.         int r;
  733.  
  734. //   if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
  735. //       rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
  736. //   }
  737.         if (kernel) {
  738.                 type = ttm_bo_type_kernel;
  739.         } else {
  740.                 type = ttm_bo_type_device;
  741.         }
  742.         *robj_ptr = NULL;
  743.         robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
  744.         if (robj == NULL) {
  745.                 return -ENOMEM;
  746.         }
  747.         robj->rdev = rdev;
  748.         robj->gobj = gobj;
  749. //   INIT_LIST_HEAD(&robj->list);
  750.  
  751.         flags = radeon_object_flags_from_domain(domain);
  752. //   r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags,
  753. //                  0, 0, false, NULL, size,
  754. //                  &radeon_ttm_object_object_destroy);
  755.         if (unlikely(r != 0)) {
  756.                 /* ttm call radeon_ttm_object_object_destroy if error happen */
  757.                 DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n",
  758.                           size, flags, 0);
  759.                 return r;
  760.         }
  761.         *robj_ptr = robj;
  762. //   if (gobj) {
  763. //       list_add_tail(&robj->list, &rdev->gem.objects);
  764. //   }
  765.         return 0;
  766. }
  767.  
  768. int radeon_object_kmap(struct radeon_object *robj, void **ptr)
  769. {
  770.         int r;
  771.  
  772. //   spin_lock(&robj->tobj.lock);
  773.         if (robj->kptr) {
  774.                 if (ptr) {
  775.                         *ptr = robj->kptr;
  776.                 }
  777. //       spin_unlock(&robj->tobj.lock);
  778.                 return 0;
  779.         }
  780. //   spin_unlock(&robj->tobj.lock);
  781.         r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap);
  782.         if (r) {
  783.                 return r;
  784.         }
  785. //   spin_lock(&robj->tobj.lock);
  786.         robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem);
  787. //   spin_unlock(&robj->tobj.lock);
  788.         if (ptr) {
  789.                 *ptr = robj->kptr;
  790.         }
  791.         return 0;
  792. }
  793.  
  794. void radeon_object_kunmap(struct radeon_object *robj)
  795. {
  796. //   spin_lock(&robj->tobj.lock);
  797.         if (robj->kptr == NULL) {
  798. //       spin_unlock(&robj->tobj.lock);
  799.                 return;
  800.         }
  801.         robj->kptr = NULL;
  802. //   spin_unlock(&robj->tobj.lock);
  803.         ttm_bo_kunmap(&robj->kmap);
  804. }
  805.  
  806. void radeon_object_unref(struct radeon_object **robj)
  807. {
  808.         struct ttm_buffer_object *tobj;
  809.  
  810.         if ((*robj) == NULL) {
  811.                 return;
  812.         }
  813.         tobj = &((*robj)->tobj);
  814.         ttm_bo_unref(&tobj);
  815.         if (tobj == NULL) {
  816.                 *robj = NULL;
  817.         }
  818. }
  819.  
  820. int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset)
  821. {
  822.         *offset = robj->tobj.addr_space_offset;
  823.         return 0;
  824. }
  825.  
  826. int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
  827.                       uint64_t *gpu_addr)
  828. {
  829.         uint32_t flags;
  830.         uint32_t tmp;
  831.         int r;
  832.  
  833.         flags = radeon_object_flags_from_domain(domain);
  834. //   spin_lock(&robj->tobj.lock);
  835.         if (robj->pin_count) {
  836.                 robj->pin_count++;
  837.                 if (gpu_addr != NULL) {
  838.                         *gpu_addr = robj->gpu_addr;
  839.                 }
  840. //       spin_unlock(&robj->tobj.lock);
  841.                 return 0;
  842.         }
  843. //   spin_unlock(&robj->tobj.lock);
  844.         r = radeon_object_reserve(robj, false);
  845.         if (unlikely(r != 0)) {
  846.                 DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
  847.                 return r;
  848.         }
  849.         tmp = robj->tobj.mem.placement;
  850.         ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
  851.         robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
  852.         r = ttm_buffer_object_validate(&robj->tobj,
  853.                                        robj->tobj.proposed_placement,
  854.                                        false, false);
  855.         radeon_object_gpu_addr(robj);
  856.         if (gpu_addr != NULL) {
  857.                 *gpu_addr = robj->gpu_addr;
  858.         }
  859.         robj->pin_count = 1;
  860.         if (unlikely(r != 0)) {
  861.                 DRM_ERROR("radeon: failed to pin object.\n");
  862.         }
  863.         radeon_object_unreserve(robj);
  864.         return r;
  865. }
  866.  
  867. void radeon_object_unpin(struct radeon_object *robj)
  868. {
  869.         uint32_t flags;
  870.         int r;
  871.  
  872. //   spin_lock(&robj->tobj.lock);
  873.         if (!robj->pin_count) {
  874. //       spin_unlock(&robj->tobj.lock);
  875.                 printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
  876.                 return;
  877.         }
  878.         robj->pin_count--;
  879.         if (robj->pin_count) {
  880. //       spin_unlock(&robj->tobj.lock);
  881.                 return;
  882.         }
  883. //   spin_unlock(&robj->tobj.lock);
  884.         r = radeon_object_reserve(robj, false);
  885.         if (unlikely(r != 0)) {
  886.                 DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
  887.                 return;
  888.         }
  889.         flags = robj->tobj.mem.placement;
  890.         robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
  891.         r = ttm_buffer_object_validate(&robj->tobj,
  892.                                        robj->tobj.proposed_placement,
  893.                                        false, false);
  894.         if (unlikely(r != 0)) {
  895.                 DRM_ERROR("radeon: failed to unpin buffer.\n");
  896.         }
  897.         radeon_object_unreserve(robj);
  898. }
  899.  
  900. int radeon_object_wait(struct radeon_object *robj)
  901. {
  902.         int r = 0;
  903.  
  904.         /* FIXME: should use block reservation instead */
  905.         r = radeon_object_reserve(robj, true);
  906.         if (unlikely(r != 0)) {
  907.                 DRM_ERROR("radeon: failed to reserve object for waiting.\n");
  908.                 return r;
  909.         }
  910. //   spin_lock(&robj->tobj.lock);
  911.         if (robj->tobj.sync_obj) {
  912.                 r = ttm_bo_wait(&robj->tobj, true, false, false);
  913.         }
  914. //   spin_unlock(&robj->tobj.lock);
  915.         radeon_object_unreserve(robj);
  916.         return r;
  917. }
  918.  
  919. int radeon_object_evict_vram(struct radeon_device *rdev)
  920. {
  921.         if (rdev->flags & RADEON_IS_IGP) {
  922.                 /* Useless to evict on IGP chips */
  923.                 return 0;
  924.         }
  925.         return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
  926. }
  927.  
  928. void radeon_object_force_delete(struct radeon_device *rdev)
  929. {
  930.         struct radeon_object *robj, *n;
  931.         struct drm_gem_object *gobj;
  932.  
  933.         if (list_empty(&rdev->gem.objects)) {
  934.                 return;
  935.         }
  936.         DRM_ERROR("Userspace still has active objects !\n");
  937.         list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) {
  938.                 mutex_lock(&rdev->ddev->struct_mutex);
  939.                 gobj = robj->gobj;
  940.                 DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n",
  941.                           gobj, robj, (unsigned long)gobj->size,
  942.                           *((unsigned long *)&gobj->refcount));
  943.                 list_del_init(&robj->list);
  944.                 radeon_object_unref(&robj);
  945.                 gobj->driver_private = NULL;
  946.                 drm_gem_object_unreference(gobj);
  947.                 mutex_unlock(&rdev->ddev->struct_mutex);
  948.         }
  949. }
  950.  
  951. void radeon_object_fini(struct radeon_device *rdev)
  952. {
  953.         radeon_ttm_fini(rdev);
  954. }
  955.  
  956. void radeon_object_list_add_object(struct radeon_object_list *lobj,
  957.                                    struct list_head *head)
  958. {
  959.         if (lobj->wdomain) {
  960.                 list_add(&lobj->list, head);
  961.         } else {
  962.                 list_add_tail(&lobj->list, head);
  963.         }
  964. }
  965.  
  966. int radeon_object_list_reserve(struct list_head *head)
  967. {
  968.         struct radeon_object_list *lobj;
  969.         struct list_head *i;
  970.         int r;
  971.  
  972.         list_for_each(i, head) {
  973.                 lobj = list_entry(i, struct radeon_object_list, list);
  974.                 if (!lobj->robj->pin_count) {
  975.                         r = radeon_object_reserve(lobj->robj, true);
  976.                         if (unlikely(r != 0)) {
  977.                                 DRM_ERROR("radeon: failed to reserve object.\n");
  978.                                 return r;
  979.                         }
  980.                 } else {
  981.                 }
  982.         }
  983.         return 0;
  984. }
  985.  
  986. void radeon_object_list_unreserve(struct list_head *head)
  987. {
  988.         struct radeon_object_list *lobj;
  989.         struct list_head *i;
  990.  
  991.         list_for_each(i, head) {
  992.                 lobj = list_entry(i, struct radeon_object_list, list);
  993.                 if (!lobj->robj->pin_count) {
  994.                         radeon_object_unreserve(lobj->robj);
  995.                 } else {
  996.                 }
  997.         }
  998. }
  999.  
  1000. int radeon_object_list_validate(struct list_head *head, void *fence)
  1001. {
  1002.         struct radeon_object_list *lobj;
  1003.         struct radeon_object *robj;
  1004.         struct radeon_fence *old_fence = NULL;
  1005.         struct list_head *i;
  1006.         uint32_t flags;
  1007.         int r;
  1008.  
  1009.         r = radeon_object_list_reserve(head);
  1010.         if (unlikely(r != 0)) {
  1011.                 radeon_object_list_unreserve(head);
  1012.                 return r;
  1013.         }
  1014.         list_for_each(i, head) {
  1015.                 lobj = list_entry(i, struct radeon_object_list, list);
  1016.                 robj = lobj->robj;
  1017.                 if (lobj->wdomain) {
  1018.                         flags = radeon_object_flags_from_domain(lobj->wdomain);
  1019.                         flags |= TTM_PL_FLAG_TT;
  1020.                 } else {
  1021.                         flags = radeon_object_flags_from_domain(lobj->rdomain);
  1022.                         flags |= TTM_PL_FLAG_TT;
  1023.                         flags |= TTM_PL_FLAG_VRAM;
  1024.                 }
  1025.                 if (!robj->pin_count) {
  1026.                         robj->tobj.proposed_placement = flags | TTM_PL_MASK_CACHING;
  1027.                         r = ttm_buffer_object_validate(&robj->tobj,
  1028.                                                        robj->tobj.proposed_placement,
  1029.                                                        true, false);
  1030.                         if (unlikely(r)) {
  1031.                                 radeon_object_list_unreserve(head);
  1032.                                 DRM_ERROR("radeon: failed to validate.\n");
  1033.                                 return r;
  1034.                         }
  1035.                         radeon_object_gpu_addr(robj);
  1036.                 }
  1037.                 lobj->gpu_offset = robj->gpu_addr;
  1038.                 if (fence) {
  1039.                         old_fence = (struct radeon_fence *)robj->tobj.sync_obj;
  1040.                         robj->tobj.sync_obj = radeon_fence_ref(fence);
  1041.                         robj->tobj.sync_obj_arg = NULL;
  1042.                 }
  1043.                 if (old_fence) {
  1044.                         radeon_fence_unref(&old_fence);
  1045.                 }
  1046.         }
  1047.         return 0;
  1048. }
  1049.  
  1050. void radeon_object_list_unvalidate(struct list_head *head)
  1051. {
  1052.         struct radeon_object_list *lobj;
  1053.         struct radeon_fence *old_fence = NULL;
  1054.         struct list_head *i;
  1055.  
  1056.         list_for_each(i, head) {
  1057.                 lobj = list_entry(i, struct radeon_object_list, list);
  1058.                 old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj;
  1059.                 lobj->robj->tobj.sync_obj = NULL;
  1060.                 if (old_fence) {
  1061.                         radeon_fence_unref(&old_fence);
  1062.                 }
  1063.         }
  1064.         radeon_object_list_unreserve(head);
  1065. }
  1066.  
  1067. void radeon_object_list_clean(struct list_head *head)
  1068. {
  1069.         radeon_object_list_unreserve(head);
  1070. }
  1071.  
  1072. int radeon_object_fbdev_mmap(struct radeon_object *robj,
  1073.                              struct vm_area_struct *vma)
  1074. {
  1075.         return ttm_fbdev_mmap(vma, &robj->tobj);
  1076. }
  1077.  
  1078. unsigned long radeon_object_size(struct radeon_object *robj)
  1079. {
  1080.         return robj->tobj.num_pages << PAGE_SHIFT;
  1081. }
  1082.  
  1083.  
  1084. #endif
  1085.