Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
  3.  * All Rights Reserved.
  4.  *
  5.  * Permission is hereby granted, free of charge, to any person obtaining
  6.  * a copy of this software and associated documentation files (the
  7.  * "Software"), to deal in the Software without restriction, including
  8.  * without limitation the rights to use, copy, modify, merge, publish,
  9.  * distribute, sub license, and/or sell copies of the Software, and to
  10.  * permit persons to whom the Software is furnished to do so, subject to
  11.  * the following conditions:
  12.  *
  13.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  14.  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
  15.  * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  16.  * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
  17.  * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19.  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  20.  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  21.  *
  22.  * The above copyright notice and this permission notice (including the
  23.  * next paragraph) shall be included in all copies or substantial portions
  24.  * of the Software.
  25.  */
  26.  
  27. #include "radeon_drm_cs.h"
  28.  
  29. #include "util/u_hash_table.h"
  30. #include "util/u_memory.h"
  31. #include "util/simple_list.h"
  32. #include "util/list.h"
  33. #include "os/os_thread.h"
  34. #include "os/os_mman.h"
  35. #include "os/os_time.h"
  36.  
  37. #include "state_tracker/drm_driver.h"
  38.  
  39. #include <sys/ioctl.h>
  40. #include <xf86drm.h>
  41. #include <errno.h>
  42. #include <fcntl.h>
  43. #include <stdio.h>
  44.  
  45. static const struct pb_vtbl radeon_bo_vtbl;
  46.  
  47. static INLINE struct radeon_bo *radeon_bo(struct pb_buffer *bo)
  48. {
  49.     assert(bo->vtbl == &radeon_bo_vtbl);
  50.     return (struct radeon_bo *)bo;
  51. }
  52.  
  53. struct radeon_bo_va_hole {
  54.     struct list_head list;
  55.     uint64_t         offset;
  56.     uint64_t         size;
  57. };
  58.  
  59. struct radeon_bomgr {
  60.     /* Base class. */
  61.     struct pb_manager base;
  62.  
  63.     /* Winsys. */
  64.     struct radeon_drm_winsys *rws;
  65.  
  66.     /* List of buffer GEM names. Protected by bo_handles_mutex. */
  67.     struct util_hash_table *bo_names;
  68.     /* List of buffer handles. Protectded by bo_handles_mutex. */
  69.     struct util_hash_table *bo_handles;
  70.     /* List of buffer virtual memory ranges. Protectded by bo_handles_mutex. */
  71.     struct util_hash_table *bo_vas;
  72.     pipe_mutex bo_handles_mutex;
  73.     pipe_mutex bo_va_mutex;
  74.  
  75.     /* is virtual address supported */
  76.     bool va;
  77.     uint64_t va_offset;
  78.     struct list_head va_holes;
  79. };
  80.  
  81. static INLINE struct radeon_bomgr *radeon_bomgr(struct pb_manager *mgr)
  82. {
  83.     return (struct radeon_bomgr *)mgr;
  84. }
  85.  
  86. static struct radeon_bo *get_radeon_bo(struct pb_buffer *_buf)
  87. {
  88.     struct radeon_bo *bo = NULL;
  89.  
  90.     if (_buf->vtbl == &radeon_bo_vtbl) {
  91.         bo = radeon_bo(_buf);
  92.     } else {
  93.         struct pb_buffer *base_buf;
  94.         pb_size offset;
  95.         pb_get_base_buffer(_buf, &base_buf, &offset);
  96.  
  97.         if (base_buf->vtbl == &radeon_bo_vtbl)
  98.             bo = radeon_bo(base_buf);
  99.     }
  100.  
  101.     return bo;
  102. }
  103.  
  104. static void radeon_bo_wait(struct pb_buffer *_buf, enum radeon_bo_usage usage)
  105. {
  106.     struct radeon_bo *bo = get_radeon_bo(_buf);
  107.     struct drm_radeon_gem_wait_idle args = {0};
  108.  
  109.     while (p_atomic_read(&bo->num_active_ioctls)) {
  110.         sched_yield();
  111.     }
  112.  
  113.     args.handle = bo->handle;
  114.     while (drmCommandWrite(bo->rws->fd, DRM_RADEON_GEM_WAIT_IDLE,
  115.                            &args, sizeof(args)) == -EBUSY);
  116. }
  117.  
  118. static boolean radeon_bo_is_busy(struct pb_buffer *_buf,
  119.                                  enum radeon_bo_usage usage)
  120. {
  121.     struct radeon_bo *bo = get_radeon_bo(_buf);
  122.     struct drm_radeon_gem_busy args = {0};
  123.  
  124.     if (p_atomic_read(&bo->num_active_ioctls)) {
  125.         return TRUE;
  126.     }
  127.  
  128.     args.handle = bo->handle;
  129.     return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_BUSY,
  130.                                &args, sizeof(args)) != 0;
  131. }
  132.  
  133. static enum radeon_bo_domain get_valid_domain(enum radeon_bo_domain domain)
  134. {
  135.     /* Zero domains the driver doesn't understand. */
  136.     domain &= RADEON_DOMAIN_VRAM_GTT;
  137.  
  138.     /* If no domain is set, we must set something... */
  139.     if (!domain)
  140.         domain = RADEON_DOMAIN_VRAM_GTT;
  141.  
  142.     return domain;
  143. }
  144.  
  145. static enum radeon_bo_domain radeon_bo_get_initial_domain(
  146.                 struct radeon_winsys_cs_handle *buf)
  147. {
  148.     struct radeon_bo *bo = (struct radeon_bo*)buf;
  149.     struct drm_radeon_gem_op args;
  150.  
  151.     if (bo->rws->info.drm_minor < 38)
  152.         return RADEON_DOMAIN_VRAM_GTT;
  153.  
  154.     memset(&args, 0, sizeof(args));
  155.     args.handle = bo->handle;
  156.     args.op = RADEON_GEM_OP_GET_INITIAL_DOMAIN;
  157.  
  158.     drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_OP,
  159.                         &args, sizeof(args));
  160.  
  161.     /* GEM domains and winsys domains are defined the same. */
  162.     return get_valid_domain(args.value);
  163. }
  164.  
  165. static uint64_t radeon_bomgr_find_va(struct radeon_bomgr *mgr, uint64_t size, uint64_t alignment)
  166. {
  167.     struct radeon_bo_va_hole *hole, *n;
  168.     uint64_t offset = 0, waste = 0;
  169.  
  170.     alignment = MAX2(alignment, 4096);
  171.     size = align(size, 4096);
  172.  
  173.     pipe_mutex_lock(mgr->bo_va_mutex);
  174.     /* first look for a hole */
  175.     LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
  176.         offset = hole->offset;
  177.         waste = offset % alignment;
  178.         waste = waste ? alignment - waste : 0;
  179.         offset += waste;
  180.         if (offset >= (hole->offset + hole->size)) {
  181.             continue;
  182.         }
  183.         if (!waste && hole->size == size) {
  184.             offset = hole->offset;
  185.             list_del(&hole->list);
  186.             FREE(hole);
  187.             pipe_mutex_unlock(mgr->bo_va_mutex);
  188.             return offset;
  189.         }
  190.         if ((hole->size - waste) > size) {
  191.             if (waste) {
  192.                 n = CALLOC_STRUCT(radeon_bo_va_hole);
  193.                 n->size = waste;
  194.                 n->offset = hole->offset;
  195.                 list_add(&n->list, &hole->list);
  196.             }
  197.             hole->size -= (size + waste);
  198.             hole->offset += size + waste;
  199.             pipe_mutex_unlock(mgr->bo_va_mutex);
  200.             return offset;
  201.         }
  202.         if ((hole->size - waste) == size) {
  203.             hole->size = waste;
  204.             pipe_mutex_unlock(mgr->bo_va_mutex);
  205.             return offset;
  206.         }
  207.     }
  208.  
  209.     offset = mgr->va_offset;
  210.     waste = offset % alignment;
  211.     waste = waste ? alignment - waste : 0;
  212.     if (waste) {
  213.         n = CALLOC_STRUCT(radeon_bo_va_hole);
  214.         n->size = waste;
  215.         n->offset = offset;
  216.         list_add(&n->list, &mgr->va_holes);
  217.     }
  218.     offset += waste;
  219.     mgr->va_offset += size + waste;
  220.     pipe_mutex_unlock(mgr->bo_va_mutex);
  221.     return offset;
  222. }
  223.  
  224. static void radeon_bomgr_free_va(struct radeon_bomgr *mgr, uint64_t va, uint64_t size)
  225. {
  226.     struct radeon_bo_va_hole *hole;
  227.  
  228.     size = align(size, 4096);
  229.  
  230.     pipe_mutex_lock(mgr->bo_va_mutex);
  231.     if ((va + size) == mgr->va_offset) {
  232.         mgr->va_offset = va;
  233.         /* Delete uppermost hole if it reaches the new top */
  234.         if (!LIST_IS_EMPTY(&mgr->va_holes)) {
  235.             hole = container_of(mgr->va_holes.next, hole, list);
  236.             if ((hole->offset + hole->size) == va) {
  237.                 mgr->va_offset = hole->offset;
  238.                 list_del(&hole->list);
  239.                 FREE(hole);
  240.             }
  241.         }
  242.     } else {
  243.         struct radeon_bo_va_hole *next;
  244.  
  245.         hole = container_of(&mgr->va_holes, hole, list);
  246.         LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
  247.             if (next->offset < va)
  248.                 break;
  249.             hole = next;
  250.         }
  251.  
  252.         if (&hole->list != &mgr->va_holes) {
  253.             /* Grow upper hole if it's adjacent */
  254.             if (hole->offset == (va + size)) {
  255.                 hole->offset = va;
  256.                 hole->size += size;
  257.                 /* Merge lower hole if it's adjacent */
  258.                 if (next != hole && &next->list != &mgr->va_holes &&
  259.                     (next->offset + next->size) == va) {
  260.                     next->size += hole->size;
  261.                     list_del(&hole->list);
  262.                     FREE(hole);
  263.                 }
  264.                 goto out;
  265.             }
  266.         }
  267.  
  268.         /* Grow lower hole if it's adjacent */
  269.         if (next != hole && &next->list != &mgr->va_holes &&
  270.             (next->offset + next->size) == va) {
  271.             next->size += size;
  272.             goto out;
  273.         }
  274.  
  275.         /* FIXME on allocation failure we just lose virtual address space
  276.          * maybe print a warning
  277.          */
  278.         next = CALLOC_STRUCT(radeon_bo_va_hole);
  279.         if (next) {
  280.             next->size = size;
  281.             next->offset = va;
  282.             list_add(&next->list, &hole->list);
  283.         }
  284.     }
  285. out:
  286.     pipe_mutex_unlock(mgr->bo_va_mutex);
  287. }
  288.  
  289. static void radeon_bo_destroy(struct pb_buffer *_buf)
  290. {
  291.     struct radeon_bo *bo = radeon_bo(_buf);
  292.     struct radeon_bomgr *mgr = bo->mgr;
  293.     struct drm_gem_close args;
  294.  
  295.     memset(&args, 0, sizeof(args));
  296.  
  297.     pipe_mutex_lock(bo->mgr->bo_handles_mutex);
  298.     util_hash_table_remove(bo->mgr->bo_handles, (void*)(uintptr_t)bo->handle);
  299.     if (bo->flink_name) {
  300.         util_hash_table_remove(bo->mgr->bo_names,
  301.                                (void*)(uintptr_t)bo->flink_name);
  302.     }
  303.     pipe_mutex_unlock(bo->mgr->bo_handles_mutex);
  304.  
  305.     if (bo->ptr)
  306.         os_munmap(bo->ptr, bo->base.size);
  307.  
  308.     /* Close object. */
  309.     args.handle = bo->handle;
  310.     drmIoctl(bo->rws->fd, DRM_IOCTL_GEM_CLOSE, &args);
  311.  
  312.     if (mgr->va) {
  313.         radeon_bomgr_free_va(mgr, bo->va, bo->base.size);
  314.     }
  315.  
  316.     pipe_mutex_destroy(bo->map_mutex);
  317.  
  318.     if (bo->initial_domain & RADEON_DOMAIN_VRAM)
  319.         bo->rws->allocated_vram -= align(bo->base.size, 4096);
  320.     else if (bo->initial_domain & RADEON_DOMAIN_GTT)
  321.         bo->rws->allocated_gtt -= align(bo->base.size, 4096);
  322.     FREE(bo);
  323. }
  324.  
  325. void *radeon_bo_do_map(struct radeon_bo *bo)
  326. {
  327.     struct drm_radeon_gem_mmap args = {0};
  328.     void *ptr;
  329.  
  330.     /* If the buffer is created from user memory, return the user pointer. */
  331.     if (bo->user_ptr)
  332.         return bo->user_ptr;
  333.  
  334.     /* Return the pointer if it's already mapped. */
  335.     if (bo->ptr)
  336.         return bo->ptr;
  337.  
  338.     /* Map the buffer. */
  339.     pipe_mutex_lock(bo->map_mutex);
  340.     /* Return the pointer if it's already mapped (in case of a race). */
  341.     if (bo->ptr) {
  342.         pipe_mutex_unlock(bo->map_mutex);
  343.         return bo->ptr;
  344.     }
  345.     args.handle = bo->handle;
  346.     args.offset = 0;
  347.     args.size = (uint64_t)bo->base.size;
  348.     if (drmCommandWriteRead(bo->rws->fd,
  349.                             DRM_RADEON_GEM_MMAP,
  350.                             &args,
  351.                             sizeof(args))) {
  352.         pipe_mutex_unlock(bo->map_mutex);
  353.         fprintf(stderr, "radeon: gem_mmap failed: %p 0x%08X\n",
  354.                 bo, bo->handle);
  355.         return NULL;
  356.     }
  357.  
  358.     ptr = os_mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED,
  359.                bo->rws->fd, args.addr_ptr);
  360.     if (ptr == MAP_FAILED) {
  361.         pipe_mutex_unlock(bo->map_mutex);
  362.         fprintf(stderr, "radeon: mmap failed, errno: %i\n", errno);
  363.         return NULL;
  364.     }
  365.     bo->ptr = ptr;
  366.     pipe_mutex_unlock(bo->map_mutex);
  367.  
  368.     return bo->ptr;
  369. }
  370.  
  371. static void *radeon_bo_map(struct radeon_winsys_cs_handle *buf,
  372.                            struct radeon_winsys_cs *rcs,
  373.                            enum pipe_transfer_usage usage)
  374. {
  375.     struct radeon_bo *bo = (struct radeon_bo*)buf;
  376.     struct radeon_drm_cs *cs = (struct radeon_drm_cs*)rcs;
  377.  
  378.     /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
  379.     if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
  380.         /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
  381.         if (usage & PIPE_TRANSFER_DONTBLOCK) {
  382.             if (!(usage & PIPE_TRANSFER_WRITE)) {
  383.                 /* Mapping for read.
  384.                  *
  385.                  * Since we are mapping for read, we don't need to wait
  386.                  * if the GPU is using the buffer for read too
  387.                  * (neither one is changing it).
  388.                  *
  389.                  * Only check whether the buffer is being used for write. */
  390.                 if (cs && radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
  391.                     cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
  392.                     return NULL;
  393.                 }
  394.  
  395.                 if (radeon_bo_is_busy((struct pb_buffer*)bo,
  396.                                       RADEON_USAGE_WRITE)) {
  397.                     return NULL;
  398.                 }
  399.             } else {
  400.                 if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) {
  401.                     cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
  402.                     return NULL;
  403.                 }
  404.  
  405.                 if (radeon_bo_is_busy((struct pb_buffer*)bo,
  406.                                       RADEON_USAGE_READWRITE)) {
  407.                     return NULL;
  408.                 }
  409.             }
  410.         } else {
  411.             uint64_t time = os_time_get_nano();
  412.  
  413.             if (!(usage & PIPE_TRANSFER_WRITE)) {
  414.                 /* Mapping for read.
  415.                  *
  416.                  * Since we are mapping for read, we don't need to wait
  417.                  * if the GPU is using the buffer for read too
  418.                  * (neither one is changing it).
  419.                  *
  420.                  * Only check whether the buffer is being used for write. */
  421.                 if (cs && radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
  422.                     cs->flush_cs(cs->flush_data, 0, NULL);
  423.                 }
  424.                 radeon_bo_wait((struct pb_buffer*)bo,
  425.                                RADEON_USAGE_WRITE);
  426.             } else {
  427.                 /* Mapping for write. */
  428.                 if (cs) {
  429.                     if (radeon_bo_is_referenced_by_cs(cs, bo)) {
  430.                         cs->flush_cs(cs->flush_data, 0, NULL);
  431.                     } else {
  432.                         /* Try to avoid busy-waiting in radeon_bo_wait. */
  433.                         if (p_atomic_read(&bo->num_active_ioctls))
  434.                             radeon_drm_cs_sync_flush(rcs);
  435.                     }
  436.                 }
  437.  
  438.                 radeon_bo_wait((struct pb_buffer*)bo, RADEON_USAGE_READWRITE);
  439.             }
  440.  
  441.             bo->mgr->rws->buffer_wait_time += os_time_get_nano() - time;
  442.         }
  443.     }
  444.  
  445.     return radeon_bo_do_map(bo);
  446. }
  447.  
  448. static void radeon_bo_unmap(struct radeon_winsys_cs_handle *_buf)
  449. {
  450.     /* NOP */
  451. }
  452.  
  453. static void radeon_bo_get_base_buffer(struct pb_buffer *buf,
  454.                                       struct pb_buffer **base_buf,
  455.                                       unsigned *offset)
  456. {
  457.     *base_buf = buf;
  458.     *offset = 0;
  459. }
  460.  
  461. static enum pipe_error radeon_bo_validate(struct pb_buffer *_buf,
  462.                                           struct pb_validate *vl,
  463.                                           unsigned flags)
  464. {
  465.     /* Always pinned */
  466.     return PIPE_OK;
  467. }
  468.  
  469. static void radeon_bo_fence(struct pb_buffer *buf,
  470.                             struct pipe_fence_handle *fence)
  471. {
  472. }
  473.  
  474. static const struct pb_vtbl radeon_bo_vtbl = {
  475.     radeon_bo_destroy,
  476.     NULL, /* never called */
  477.     NULL, /* never called */
  478.     radeon_bo_validate,
  479.     radeon_bo_fence,
  480.     radeon_bo_get_base_buffer,
  481. };
  482.  
  483. #ifndef RADEON_GEM_GTT_WC
  484. #define RADEON_GEM_GTT_WC               (1 << 2)
  485. #endif
  486. #ifndef RADEON_GEM_CPU_ACCESS
  487. /* BO is expected to be accessed by the CPU */
  488. #define RADEON_GEM_CPU_ACCESS           (1 << 3)
  489. #endif
  490. #ifndef RADEON_GEM_NO_CPU_ACCESS
  491. /* CPU access is not expected to work for this BO */
  492. #define RADEON_GEM_NO_CPU_ACCESS        (1 << 4)
  493. #endif
  494.  
  495. static struct pb_buffer *radeon_bomgr_create_bo(struct pb_manager *_mgr,
  496.                                                 pb_size size,
  497.                                                 const struct pb_desc *desc)
  498. {
  499.     struct radeon_bomgr *mgr = radeon_bomgr(_mgr);
  500.     struct radeon_drm_winsys *rws = mgr->rws;
  501.     struct radeon_bo *bo;
  502.     struct drm_radeon_gem_create args;
  503.     struct radeon_bo_desc *rdesc = (struct radeon_bo_desc*)desc;
  504.     int r;
  505.  
  506.     memset(&args, 0, sizeof(args));
  507.  
  508.     assert(rdesc->initial_domains);
  509.     assert((rdesc->initial_domains &
  510.             ~(RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM)) == 0);
  511.  
  512.     args.size = size;
  513.     args.alignment = desc->alignment;
  514.     args.initial_domain = rdesc->initial_domains;
  515.     args.flags = 0;
  516.  
  517.     if (rdesc->flags & RADEON_FLAG_GTT_WC)
  518.         args.flags |= RADEON_GEM_GTT_WC;
  519.     if (rdesc->flags & RADEON_FLAG_CPU_ACCESS)
  520.         args.flags |= RADEON_GEM_CPU_ACCESS;
  521.     if (rdesc->flags & RADEON_FLAG_NO_CPU_ACCESS)
  522.         args.flags |= RADEON_GEM_NO_CPU_ACCESS;
  523.  
  524.     if (drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_CREATE,
  525.                             &args, sizeof(args))) {
  526.         fprintf(stderr, "radeon: Failed to allocate a buffer:\n");
  527.         fprintf(stderr, "radeon:    size      : %d bytes\n", size);
  528.         fprintf(stderr, "radeon:    alignment : %d bytes\n", desc->alignment);
  529.         fprintf(stderr, "radeon:    domains   : %d\n", args.initial_domain);
  530.         fprintf(stderr, "radeon:    flags     : %d\n", args.flags);
  531.         return NULL;
  532.     }
  533.  
  534.     bo = CALLOC_STRUCT(radeon_bo);
  535.     if (!bo)
  536.         return NULL;
  537.  
  538.     pipe_reference_init(&bo->base.reference, 1);
  539.     bo->base.alignment = desc->alignment;
  540.     bo->base.usage = desc->usage;
  541.     bo->base.size = size;
  542.     bo->base.vtbl = &radeon_bo_vtbl;
  543.     bo->mgr = mgr;
  544.     bo->rws = mgr->rws;
  545.     bo->handle = args.handle;
  546.     bo->va = 0;
  547.     bo->initial_domain = rdesc->initial_domains;
  548.     pipe_mutex_init(bo->map_mutex);
  549.  
  550.     if (mgr->va) {
  551.         struct drm_radeon_gem_va va;
  552.  
  553.         bo->va = radeon_bomgr_find_va(mgr, size, desc->alignment);
  554.  
  555.         va.handle = bo->handle;
  556.         va.vm_id = 0;
  557.         va.operation = RADEON_VA_MAP;
  558.         va.flags = RADEON_VM_PAGE_READABLE |
  559.                    RADEON_VM_PAGE_WRITEABLE |
  560.                    RADEON_VM_PAGE_SNOOPED;
  561.         va.offset = bo->va;
  562.         r = drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
  563.         if (r && va.operation == RADEON_VA_RESULT_ERROR) {
  564.             fprintf(stderr, "radeon: Failed to allocate virtual address for buffer:\n");
  565.             fprintf(stderr, "radeon:    size      : %d bytes\n", size);
  566.             fprintf(stderr, "radeon:    alignment : %d bytes\n", desc->alignment);
  567.             fprintf(stderr, "radeon:    domains   : %d\n", args.initial_domain);
  568.             fprintf(stderr, "radeon:    va        : 0x%016llx\n", (unsigned long long)bo->va);
  569.             radeon_bo_destroy(&bo->base);
  570.             return NULL;
  571.         }
  572.         pipe_mutex_lock(mgr->bo_handles_mutex);
  573.         if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
  574.             struct pb_buffer *b = &bo->base;
  575.             struct radeon_bo *old_bo =
  576.                 util_hash_table_get(mgr->bo_vas, (void*)(uintptr_t)va.offset);
  577.  
  578.             pipe_mutex_unlock(mgr->bo_handles_mutex);
  579.             pb_reference(&b, &old_bo->base);
  580.             return b;
  581.         }
  582.  
  583.         util_hash_table_set(mgr->bo_vas, (void*)(uintptr_t)bo->va, bo);
  584.         pipe_mutex_unlock(mgr->bo_handles_mutex);
  585.     }
  586.  
  587.     if (rdesc->initial_domains & RADEON_DOMAIN_VRAM)
  588.         rws->allocated_vram += align(size, 4096);
  589.     else if (rdesc->initial_domains & RADEON_DOMAIN_GTT)
  590.         rws->allocated_gtt += align(size, 4096);
  591.  
  592.     return &bo->base;
  593. }
  594.  
  595. static void radeon_bomgr_flush(struct pb_manager *mgr)
  596. {
  597.     /* NOP */
  598. }
  599.  
  600. /* This is for the cache bufmgr. */
  601. static boolean radeon_bomgr_is_buffer_busy(struct pb_manager *_mgr,
  602.                                            struct pb_buffer *_buf)
  603. {
  604.    struct radeon_bo *bo = radeon_bo(_buf);
  605.  
  606.    if (radeon_bo_is_referenced_by_any_cs(bo)) {
  607.        return TRUE;
  608.    }
  609.  
  610.    if (radeon_bo_is_busy((struct pb_buffer*)bo, RADEON_USAGE_READWRITE)) {
  611.        return TRUE;
  612.    }
  613.  
  614.    return FALSE;
  615. }
  616.  
  617. static void radeon_bomgr_destroy(struct pb_manager *_mgr)
  618. {
  619.     struct radeon_bomgr *mgr = radeon_bomgr(_mgr);
  620.     util_hash_table_destroy(mgr->bo_names);
  621.     util_hash_table_destroy(mgr->bo_handles);
  622.     util_hash_table_destroy(mgr->bo_vas);
  623.     pipe_mutex_destroy(mgr->bo_handles_mutex);
  624.     pipe_mutex_destroy(mgr->bo_va_mutex);
  625.     FREE(mgr);
  626. }
  627.  
  628. #define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
  629.  
  630. static unsigned handle_hash(void *key)
  631. {
  632.     return PTR_TO_UINT(key);
  633. }
  634.  
  635. static int handle_compare(void *key1, void *key2)
  636. {
  637.     return PTR_TO_UINT(key1) != PTR_TO_UINT(key2);
  638. }
  639.  
  640. struct pb_manager *radeon_bomgr_create(struct radeon_drm_winsys *rws)
  641. {
  642.     struct radeon_bomgr *mgr;
  643.  
  644.     mgr = CALLOC_STRUCT(radeon_bomgr);
  645.     if (!mgr)
  646.         return NULL;
  647.  
  648.     mgr->base.destroy = radeon_bomgr_destroy;
  649.     mgr->base.create_buffer = radeon_bomgr_create_bo;
  650.     mgr->base.flush = radeon_bomgr_flush;
  651.     mgr->base.is_buffer_busy = radeon_bomgr_is_buffer_busy;
  652.  
  653.     mgr->rws = rws;
  654.     mgr->bo_names = util_hash_table_create(handle_hash, handle_compare);
  655.     mgr->bo_handles = util_hash_table_create(handle_hash, handle_compare);
  656.     mgr->bo_vas = util_hash_table_create(handle_hash, handle_compare);
  657.     pipe_mutex_init(mgr->bo_handles_mutex);
  658.     pipe_mutex_init(mgr->bo_va_mutex);
  659.  
  660.     mgr->va = rws->info.r600_virtual_address;
  661.     mgr->va_offset = rws->va_start;
  662.     list_inithead(&mgr->va_holes);
  663.  
  664.     return &mgr->base;
  665. }
  666.  
  667. static unsigned eg_tile_split(unsigned tile_split)
  668. {
  669.     switch (tile_split) {
  670.     case 0:     tile_split = 64;    break;
  671.     case 1:     tile_split = 128;   break;
  672.     case 2:     tile_split = 256;   break;
  673.     case 3:     tile_split = 512;   break;
  674.     default:
  675.     case 4:     tile_split = 1024;  break;
  676.     case 5:     tile_split = 2048;  break;
  677.     case 6:     tile_split = 4096;  break;
  678.     }
  679.     return tile_split;
  680. }
  681.  
  682. static unsigned eg_tile_split_rev(unsigned eg_tile_split)
  683. {
  684.     switch (eg_tile_split) {
  685.     case 64:    return 0;
  686.     case 128:   return 1;
  687.     case 256:   return 2;
  688.     case 512:   return 3;
  689.     default:
  690.     case 1024:  return 4;
  691.     case 2048:  return 5;
  692.     case 4096:  return 6;
  693.     }
  694. }
  695.  
  696. static void radeon_bo_get_tiling(struct pb_buffer *_buf,
  697.                                  enum radeon_bo_layout *microtiled,
  698.                                  enum radeon_bo_layout *macrotiled,
  699.                                  unsigned *bankw, unsigned *bankh,
  700.                                  unsigned *tile_split,
  701.                                  unsigned *stencil_tile_split,
  702.                                  unsigned *mtilea,
  703.                                  bool *scanout)
  704. {
  705.     struct radeon_bo *bo = get_radeon_bo(_buf);
  706.     struct drm_radeon_gem_set_tiling args;
  707.  
  708.     memset(&args, 0, sizeof(args));
  709.  
  710.     args.handle = bo->handle;
  711.  
  712.     drmCommandWriteRead(bo->rws->fd,
  713.                         DRM_RADEON_GEM_GET_TILING,
  714.                         &args,
  715.                         sizeof(args));
  716.  
  717.     *microtiled = RADEON_LAYOUT_LINEAR;
  718.     *macrotiled = RADEON_LAYOUT_LINEAR;
  719.     if (args.tiling_flags & RADEON_TILING_MICRO)
  720.         *microtiled = RADEON_LAYOUT_TILED;
  721.     else if (args.tiling_flags & RADEON_TILING_MICRO_SQUARE)
  722.         *microtiled = RADEON_LAYOUT_SQUARETILED;
  723.  
  724.     if (args.tiling_flags & RADEON_TILING_MACRO)
  725.         *macrotiled = RADEON_LAYOUT_TILED;
  726.     if (bankw && tile_split && stencil_tile_split && mtilea && tile_split) {
  727.         *bankw = (args.tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
  728.         *bankh = (args.tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
  729.         *tile_split = (args.tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
  730.         *stencil_tile_split = (args.tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
  731.         *mtilea = (args.tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
  732.         *tile_split = eg_tile_split(*tile_split);
  733.     }
  734.     if (scanout)
  735.         *scanout = bo->rws->gen >= DRV_SI && !(args.tiling_flags & RADEON_TILING_R600_NO_SCANOUT);
  736. }
  737.  
  738. static void radeon_bo_set_tiling(struct pb_buffer *_buf,
  739.                                  struct radeon_winsys_cs *rcs,
  740.                                  enum radeon_bo_layout microtiled,
  741.                                  enum radeon_bo_layout macrotiled,
  742.                                  unsigned bankw, unsigned bankh,
  743.                                  unsigned tile_split,
  744.                                  unsigned stencil_tile_split,
  745.                                  unsigned mtilea,
  746.                                  uint32_t pitch,
  747.                                  bool scanout)
  748. {
  749.     struct radeon_bo *bo = get_radeon_bo(_buf);
  750.     struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
  751.     struct drm_radeon_gem_set_tiling args;
  752.  
  753.     memset(&args, 0, sizeof(args));
  754.  
  755.     /* Tiling determines how DRM treats the buffer data.
  756.      * We must flush CS when changing it if the buffer is referenced. */
  757.     if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) {
  758.         cs->flush_cs(cs->flush_data, 0, NULL);
  759.     }
  760.  
  761.     while (p_atomic_read(&bo->num_active_ioctls)) {
  762.         sched_yield();
  763.     }
  764.  
  765.     if (microtiled == RADEON_LAYOUT_TILED)
  766.         args.tiling_flags |= RADEON_TILING_MICRO;
  767.     else if (microtiled == RADEON_LAYOUT_SQUARETILED)
  768.         args.tiling_flags |= RADEON_TILING_MICRO_SQUARE;
  769.  
  770.     if (macrotiled == RADEON_LAYOUT_TILED)
  771.         args.tiling_flags |= RADEON_TILING_MACRO;
  772.  
  773.     args.tiling_flags |= (bankw & RADEON_TILING_EG_BANKW_MASK) <<
  774.         RADEON_TILING_EG_BANKW_SHIFT;
  775.     args.tiling_flags |= (bankh & RADEON_TILING_EG_BANKH_MASK) <<
  776.         RADEON_TILING_EG_BANKH_SHIFT;
  777.     if (tile_split) {
  778.         args.tiling_flags |= (eg_tile_split_rev(tile_split) &
  779.                               RADEON_TILING_EG_TILE_SPLIT_MASK) <<
  780.             RADEON_TILING_EG_TILE_SPLIT_SHIFT;
  781.     }
  782.     args.tiling_flags |= (stencil_tile_split &
  783.                           RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK) <<
  784.         RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT;
  785.     args.tiling_flags |= (mtilea & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK) <<
  786.         RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT;
  787.  
  788.     if (bo->rws->gen >= DRV_SI && !scanout)
  789.         args.tiling_flags |= RADEON_TILING_R600_NO_SCANOUT;
  790.  
  791.     args.handle = bo->handle;
  792.     args.pitch = pitch;
  793.  
  794.     drmCommandWriteRead(bo->rws->fd,
  795.                         DRM_RADEON_GEM_SET_TILING,
  796.                         &args,
  797.                         sizeof(args));
  798. }
  799.  
  800. static struct radeon_winsys_cs_handle *radeon_drm_get_cs_handle(struct pb_buffer *_buf)
  801. {
  802.     /* return radeon_bo. */
  803.     return (struct radeon_winsys_cs_handle*)get_radeon_bo(_buf);
  804. }
  805.  
  806. static struct pb_buffer *
  807. radeon_winsys_bo_create(struct radeon_winsys *rws,
  808.                         unsigned size,
  809.                         unsigned alignment,
  810.                         boolean use_reusable_pool,
  811.                         enum radeon_bo_domain domain,
  812.                         enum radeon_bo_flag flags)
  813. {
  814.     struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
  815.     struct radeon_bomgr *mgr = radeon_bomgr(ws->kman);
  816.     struct radeon_bo_desc desc;
  817.     struct pb_manager *provider;
  818.     struct pb_buffer *buffer;
  819.  
  820.     memset(&desc, 0, sizeof(desc));
  821.     desc.base.alignment = alignment;
  822.  
  823.     /* Only set one usage bit each for domains and flags, or the cache manager
  824.      * might consider different sets of domains / flags compatible
  825.      */
  826.     if (domain == RADEON_DOMAIN_VRAM_GTT)
  827.         desc.base.usage = 1 << 2;
  828.     else
  829.         desc.base.usage = domain >> 1;
  830.     assert(flags < sizeof(desc.base.usage) * 8 - 3);
  831.     desc.base.usage |= 1 << (flags + 3);
  832.  
  833.     desc.initial_domains = domain;
  834.     desc.flags = flags;
  835.  
  836.     /* Assign a buffer manager. */
  837.     if (use_reusable_pool)
  838.         provider = ws->cman;
  839.     else
  840.         provider = ws->kman;
  841.  
  842.     buffer = provider->create_buffer(provider, size, &desc.base);
  843.     if (!buffer)
  844.         return NULL;
  845.  
  846.     pipe_mutex_lock(mgr->bo_handles_mutex);
  847.     util_hash_table_set(mgr->bo_handles, (void*)(uintptr_t)get_radeon_bo(buffer)->handle, buffer);
  848.     pipe_mutex_unlock(mgr->bo_handles_mutex);
  849.  
  850.     return (struct pb_buffer*)buffer;
  851. }
  852.  
  853. static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
  854.                                                    void *pointer, unsigned size)
  855. {
  856.     struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
  857.     struct radeon_bomgr *mgr = radeon_bomgr(ws->kman);
  858.     struct drm_radeon_gem_userptr args;
  859.     struct radeon_bo *bo;
  860.     int r;
  861.  
  862.     bo = CALLOC_STRUCT(radeon_bo);
  863.     if (!bo)
  864.         return NULL;
  865.  
  866.     memset(&args, 0, sizeof(args));
  867.     args.addr = (uintptr_t)pointer;
  868.     args.size = align(size, sysconf(_SC_PAGE_SIZE));
  869.     args.flags = RADEON_GEM_USERPTR_ANONONLY |
  870.         RADEON_GEM_USERPTR_VALIDATE |
  871.         RADEON_GEM_USERPTR_REGISTER;
  872.     if (drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_USERPTR,
  873.                             &args, sizeof(args))) {
  874.         FREE(bo);
  875.         return NULL;
  876.     }
  877.  
  878.     pipe_mutex_lock(mgr->bo_handles_mutex);
  879.  
  880.     /* Initialize it. */
  881.     pipe_reference_init(&bo->base.reference, 1);
  882.     bo->handle = args.handle;
  883.     bo->base.alignment = 0;
  884.     bo->base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
  885.     bo->base.size = size;
  886.     bo->base.vtbl = &radeon_bo_vtbl;
  887.     bo->mgr = mgr;
  888.     bo->rws = mgr->rws;
  889.     bo->user_ptr = pointer;
  890.     bo->va = 0;
  891.     bo->initial_domain = RADEON_DOMAIN_GTT;
  892.     pipe_mutex_init(bo->map_mutex);
  893.  
  894.     util_hash_table_set(mgr->bo_handles, (void*)(uintptr_t)bo->handle, bo);
  895.  
  896.     pipe_mutex_unlock(mgr->bo_handles_mutex);
  897.  
  898.     if (mgr->va) {
  899.         struct drm_radeon_gem_va va;
  900.  
  901.         bo->va = radeon_bomgr_find_va(mgr, bo->base.size, 1 << 20);
  902.  
  903.         va.handle = bo->handle;
  904.         va.operation = RADEON_VA_MAP;
  905.         va.vm_id = 0;
  906.         va.offset = bo->va;
  907.         va.flags = RADEON_VM_PAGE_READABLE |
  908.                    RADEON_VM_PAGE_WRITEABLE |
  909.                    RADEON_VM_PAGE_SNOOPED;
  910.         va.offset = bo->va;
  911.         r = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
  912.         if (r && va.operation == RADEON_VA_RESULT_ERROR) {
  913.             fprintf(stderr, "radeon: Failed to assign virtual address space\n");
  914.             radeon_bo_destroy(&bo->base);
  915.             return NULL;
  916.         }
  917.         pipe_mutex_lock(mgr->bo_handles_mutex);
  918.         if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
  919.             struct pb_buffer *b = &bo->base;
  920.             struct radeon_bo *old_bo =
  921.                 util_hash_table_get(mgr->bo_vas, (void*)(uintptr_t)va.offset);
  922.  
  923.             pipe_mutex_unlock(mgr->bo_handles_mutex);
  924.             pb_reference(&b, &old_bo->base);
  925.             return b;
  926.         }
  927.  
  928.         util_hash_table_set(mgr->bo_vas, (void*)(uintptr_t)bo->va, bo);
  929.         pipe_mutex_unlock(mgr->bo_handles_mutex);
  930.     }
  931.  
  932.     ws->allocated_gtt += align(bo->base.size, 4096);
  933.  
  934.     return (struct pb_buffer*)bo;
  935. }
  936.  
  937. static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws,
  938.                                                       struct winsys_handle *whandle,
  939.                                                       unsigned *stride)
  940. {
  941.     struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
  942.     struct radeon_bo *bo;
  943.     struct radeon_bomgr *mgr = radeon_bomgr(ws->kman);
  944.     int r;
  945.     unsigned handle;
  946.     uint64_t size = 0;
  947.  
  948.     /* We must maintain a list of pairs <handle, bo>, so that we always return
  949.      * the same BO for one particular handle. If we didn't do that and created
  950.      * more than one BO for the same handle and then relocated them in a CS,
  951.      * we would hit a deadlock in the kernel.
  952.      *
  953.      * The list of pairs is guarded by a mutex, of course. */
  954.     pipe_mutex_lock(mgr->bo_handles_mutex);
  955.  
  956.     if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
  957.         /* First check if there already is an existing bo for the handle. */
  958.         bo = util_hash_table_get(mgr->bo_names, (void*)(uintptr_t)whandle->handle);
  959.     } else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
  960.         /* We must first get the GEM handle, as fds are unreliable keys */
  961.         r = drmPrimeFDToHandle(ws->fd, whandle->handle, &handle);
  962.         if (r)
  963.             goto fail;
  964.         bo = util_hash_table_get(mgr->bo_handles, (void*)(uintptr_t)handle);
  965.     } else {
  966.         /* Unknown handle type */
  967.         goto fail;
  968.     }
  969.  
  970.     if (bo) {
  971.         /* Increase the refcount. */
  972.         struct pb_buffer *b = NULL;
  973.         pb_reference(&b, &bo->base);
  974.         goto done;
  975.     }
  976.  
  977.     /* There isn't, create a new one. */
  978.     bo = CALLOC_STRUCT(radeon_bo);
  979.     if (!bo) {
  980.         goto fail;
  981.     }
  982.  
  983.     if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
  984.         struct drm_gem_open open_arg = {};
  985.         memset(&open_arg, 0, sizeof(open_arg));
  986.         /* Open the BO. */
  987.         open_arg.name = whandle->handle;
  988.         if (drmIoctl(ws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
  989.             FREE(bo);
  990.             goto fail;
  991.         }
  992.         handle = open_arg.handle;
  993.         size = open_arg.size;
  994.         bo->flink_name = whandle->handle;
  995.     } else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
  996.         size = lseek(whandle->handle, 0, SEEK_END);
  997.         /*
  998.          * Could check errno to determine whether the kernel is new enough, but
  999.          * it doesn't really matter why this failed, just that it failed.
  1000.          */
  1001.         if (size == (off_t)-1) {
  1002.             FREE(bo);
  1003.             goto fail;
  1004.         }
  1005.         lseek(whandle->handle, 0, SEEK_SET);
  1006.     }
  1007.  
  1008.     bo->handle = handle;
  1009.  
  1010.     /* Initialize it. */
  1011.     pipe_reference_init(&bo->base.reference, 1);
  1012.     bo->base.alignment = 0;
  1013.     bo->base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
  1014.     bo->base.size = (unsigned) size;
  1015.     bo->base.vtbl = &radeon_bo_vtbl;
  1016.     bo->mgr = mgr;
  1017.     bo->rws = mgr->rws;
  1018.     bo->va = 0;
  1019.     pipe_mutex_init(bo->map_mutex);
  1020.  
  1021.     if (bo->flink_name)
  1022.         util_hash_table_set(mgr->bo_names, (void*)(uintptr_t)bo->flink_name, bo);
  1023.  
  1024.     util_hash_table_set(mgr->bo_handles, (void*)(uintptr_t)bo->handle, bo);
  1025.  
  1026. done:
  1027.     pipe_mutex_unlock(mgr->bo_handles_mutex);
  1028.  
  1029.     if (stride)
  1030.         *stride = whandle->stride;
  1031.  
  1032.     if (mgr->va && !bo->va) {
  1033.         struct drm_radeon_gem_va va;
  1034.  
  1035.         bo->va = radeon_bomgr_find_va(mgr, bo->base.size, 1 << 20);
  1036.  
  1037.         va.handle = bo->handle;
  1038.         va.operation = RADEON_VA_MAP;
  1039.         va.vm_id = 0;
  1040.         va.offset = bo->va;
  1041.         va.flags = RADEON_VM_PAGE_READABLE |
  1042.                    RADEON_VM_PAGE_WRITEABLE |
  1043.                    RADEON_VM_PAGE_SNOOPED;
  1044.         va.offset = bo->va;
  1045.         r = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
  1046.         if (r && va.operation == RADEON_VA_RESULT_ERROR) {
  1047.             fprintf(stderr, "radeon: Failed to assign virtual address space\n");
  1048.             radeon_bo_destroy(&bo->base);
  1049.             return NULL;
  1050.         }
  1051.         pipe_mutex_lock(mgr->bo_handles_mutex);
  1052.         if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
  1053.             struct pb_buffer *b = &bo->base;
  1054.             struct radeon_bo *old_bo =
  1055.                 util_hash_table_get(mgr->bo_vas, (void*)(uintptr_t)va.offset);
  1056.  
  1057.             pipe_mutex_unlock(mgr->bo_handles_mutex);
  1058.             pb_reference(&b, &old_bo->base);
  1059.             return b;
  1060.         }
  1061.  
  1062.         util_hash_table_set(mgr->bo_vas, (void*)(uintptr_t)bo->va, bo);
  1063.         pipe_mutex_unlock(mgr->bo_handles_mutex);
  1064.     }
  1065.  
  1066.     bo->initial_domain = radeon_bo_get_initial_domain((void*)bo);
  1067.  
  1068.     if (bo->initial_domain & RADEON_DOMAIN_VRAM)
  1069.         ws->allocated_vram += align(bo->base.size, 4096);
  1070.     else if (bo->initial_domain & RADEON_DOMAIN_GTT)
  1071.         ws->allocated_gtt += align(bo->base.size, 4096);
  1072.  
  1073.     return (struct pb_buffer*)bo;
  1074.  
  1075. fail:
  1076.     pipe_mutex_unlock(mgr->bo_handles_mutex);
  1077.     return NULL;
  1078. }
  1079.  
  1080. static boolean radeon_winsys_bo_get_handle(struct pb_buffer *buffer,
  1081.                                            unsigned stride,
  1082.                                            struct winsys_handle *whandle)
  1083. {
  1084.     struct drm_gem_flink flink;
  1085.     struct radeon_bo *bo = get_radeon_bo(buffer);
  1086.  
  1087.     memset(&flink, 0, sizeof(flink));
  1088.  
  1089.     if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
  1090.         if (!bo->flink_name) {
  1091.             flink.handle = bo->handle;
  1092.  
  1093.             if (ioctl(bo->rws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
  1094.                 return FALSE;
  1095.             }
  1096.  
  1097.             bo->flink_name = flink.name;
  1098.  
  1099.             pipe_mutex_lock(bo->mgr->bo_handles_mutex);
  1100.             util_hash_table_set(bo->mgr->bo_names, (void*)(uintptr_t)bo->flink_name, bo);
  1101.             pipe_mutex_unlock(bo->mgr->bo_handles_mutex);
  1102.         }
  1103.         whandle->handle = bo->flink_name;
  1104.     } else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
  1105.         whandle->handle = bo->handle;
  1106.     } else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
  1107.         if (drmPrimeHandleToFD(bo->rws->fd, bo->handle, DRM_CLOEXEC, (int*)&whandle->handle))
  1108.             return FALSE;
  1109.     }
  1110.  
  1111.     whandle->stride = stride;
  1112.     return TRUE;
  1113. }
  1114.  
  1115. static uint64_t radeon_winsys_bo_va(struct radeon_winsys_cs_handle *buf)
  1116. {
  1117.     return ((struct radeon_bo*)buf)->va;
  1118. }
  1119.  
  1120. void radeon_bomgr_init_functions(struct radeon_drm_winsys *ws)
  1121. {
  1122.     ws->base.buffer_get_cs_handle = radeon_drm_get_cs_handle;
  1123.     ws->base.buffer_set_tiling = radeon_bo_set_tiling;
  1124.     ws->base.buffer_get_tiling = radeon_bo_get_tiling;
  1125.     ws->base.buffer_map = radeon_bo_map;
  1126.     ws->base.buffer_unmap = radeon_bo_unmap;
  1127.     ws->base.buffer_wait = radeon_bo_wait;
  1128.     ws->base.buffer_is_busy = radeon_bo_is_busy;
  1129.     ws->base.buffer_create = radeon_winsys_bo_create;
  1130.     ws->base.buffer_from_handle = radeon_winsys_bo_from_handle;
  1131.     ws->base.buffer_from_ptr = radeon_winsys_bo_from_ptr;
  1132.     ws->base.buffer_get_handle = radeon_winsys_bo_get_handle;
  1133.     ws->base.buffer_get_virtual_address = radeon_winsys_bo_va;
  1134.     ws->base.buffer_get_initial_domain = radeon_bo_get_initial_domain;
  1135. }
  1136.