Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright © 2009 Corbin Simpson
  3.  * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining
  7.  * a copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  15.  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
  16.  * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  17.  * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
  18.  * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  20.  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  21.  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  22.  *
  23.  * The above copyright notice and this permission notice (including the
  24.  * next paragraph) shall be included in all copies or substantial portions
  25.  * of the Software.
  26.  */
  27. /*
  28.  * Authors:
  29.  *      Corbin Simpson <MostAwesomeDude@gmail.com>
  30.  *      Joakim Sindholt <opensource@zhasha.com>
  31.  *      Marek Olšák <maraeo@gmail.com>
  32.  */
  33.  
  34. #include "radeon_drm_bo.h"
  35. #include "radeon_drm_cs.h"
  36. #include "radeon_drm_public.h"
  37.  
  38. #include "pipebuffer/pb_bufmgr.h"
  39. #include "util/u_memory.h"
  40. #include "util/u_hash_table.h"
  41.  
  42. #include <xf86drm.h>
  43. #include <stdio.h>
  44.  
  45. /*
  46.  * this are copy from radeon_drm, once an updated libdrm is released
  47.  * we should bump configure.ac requirement for it and remove the following
  48.  * field
  49.  */
  50. #ifndef RADEON_INFO_TILING_CONFIG
  51. #define RADEON_INFO_TILING_CONFIG 6
  52. #endif
  53.  
  54. #ifndef RADEON_INFO_WANT_HYPERZ
  55. #define RADEON_INFO_WANT_HYPERZ 7
  56. #endif
  57.  
  58. #ifndef RADEON_INFO_WANT_CMASK
  59. #define RADEON_INFO_WANT_CMASK 8
  60. #endif
  61.  
  62. #ifndef RADEON_INFO_CLOCK_CRYSTAL_FREQ
  63. #define RADEON_INFO_CLOCK_CRYSTAL_FREQ 9
  64. #endif
  65.  
  66. #ifndef RADEON_INFO_NUM_BACKENDS
  67. #define RADEON_INFO_NUM_BACKENDS 0xa
  68. #endif
  69.  
  70. #ifndef RADEON_INFO_NUM_TILE_PIPES
  71. #define RADEON_INFO_NUM_TILE_PIPES 0xb
  72. #endif
  73.  
  74. #ifndef RADEON_INFO_BACKEND_MAP
  75. #define RADEON_INFO_BACKEND_MAP 0xd
  76. #endif
  77.  
  78. #ifndef RADEON_INFO_VA_START
  79. /* virtual address start, va < start are reserved by the kernel */
  80. #define RADEON_INFO_VA_START        0x0e
  81. /* maximum size of ib using the virtual memory cs */
  82. #define RADEON_INFO_IB_VM_MAX_SIZE  0x0f
  83. #endif
  84.  
  85. #ifndef RADEON_INFO_MAX_PIPES
  86. #define RADEON_INFO_MAX_PIPES 0x10
  87. #endif
  88.  
  89. #ifndef RADEON_INFO_TIMESTAMP
  90. #define RADEON_INFO_TIMESTAMP 0x11
  91. #endif
  92.  
  93. #ifndef RADEON_INFO_RING_WORKING
  94. #define RADEON_INFO_RING_WORKING 0x15
  95. #endif
  96.  
  97. #ifndef RADEON_CS_RING_UVD
  98. #define RADEON_CS_RING_UVD      3
  99. #endif
  100.  
  101. static struct util_hash_table *fd_tab = NULL;
  102.  
  103. /* Enable/disable feature access for one command stream.
  104.  * If enable == TRUE, return TRUE on success.
  105.  * Otherwise, return FALSE.
  106.  *
  107.  * We basically do the same thing kernel does, because we have to deal
  108.  * with multiple contexts (here command streams) backed by one winsys. */
  109. static boolean radeon_set_fd_access(struct radeon_drm_cs *applier,
  110.                                     struct radeon_drm_cs **owner,
  111.                                     pipe_mutex *mutex,
  112.                                     unsigned request, const char *request_name,
  113.                                     boolean enable)
  114. {
  115.     struct drm_radeon_info info;
  116.     unsigned value = enable ? 1 : 0;
  117.  
  118.     memset(&info, 0, sizeof(info));
  119.  
  120.     pipe_mutex_lock(*mutex);
  121.  
  122.     /* Early exit if we are sure the request will fail. */
  123.     if (enable) {
  124.         if (*owner) {
  125.             pipe_mutex_unlock(*mutex);
  126.             return FALSE;
  127.         }
  128.     } else {
  129.         if (*owner != applier) {
  130.             pipe_mutex_unlock(*mutex);
  131.             return FALSE;
  132.         }
  133.     }
  134.  
  135.     /* Pass through the request to the kernel. */
  136.     info.value = (unsigned long)&value;
  137.     info.request = request;
  138.     if (drmCommandWriteRead(applier->ws->fd, DRM_RADEON_INFO,
  139.                             &info, sizeof(info)) != 0) {
  140.         pipe_mutex_unlock(*mutex);
  141.         return FALSE;
  142.     }
  143.  
  144.     /* Update the rights in the winsys. */
  145.     if (enable) {
  146.         if (value) {
  147.             *owner = applier;
  148.             printf("radeon: Acquired access to %s.\n", request_name);
  149.             pipe_mutex_unlock(*mutex);
  150.             return TRUE;
  151.         }
  152.     } else {
  153.         *owner = NULL;
  154.         printf("radeon: Released access to %s.\n", request_name);
  155.     }
  156.  
  157.     pipe_mutex_unlock(*mutex);
  158.     return FALSE;
  159. }
  160.  
  161. static boolean radeon_get_drm_value(int fd, unsigned request,
  162.                                     const char *errname, uint32_t *out)
  163. {
  164.     struct drm_radeon_info info;
  165.     int retval;
  166.  
  167.     memset(&info, 0, sizeof(info));
  168.  
  169.     info.value = (unsigned long)out;
  170.     info.request = request;
  171.  
  172.     retval = drmCommandWriteRead(fd, DRM_RADEON_INFO, &info, sizeof(info));
  173.     if (retval) {
  174.         if (errname) {
  175.             fprintf(stderr, "radeon: Failed to get %s, error number %d\n",
  176.                     errname, retval);
  177.         }
  178.         return FALSE;
  179.     }
  180.     return TRUE;
  181. }
  182.  
  183. /* Helper function to do the ioctls needed for setup and init. */
  184. static boolean do_winsys_init(struct radeon_drm_winsys *ws)
  185. {
  186.     struct drm_radeon_gem_info gem_info;
  187.     int retval;
  188.     drmVersionPtr version;
  189.  
  190.     memset(&gem_info, 0, sizeof(gem_info));
  191.  
  192.     /* We do things in a specific order here.
  193.      *
  194.      * DRM version first. We need to be sure we're running on a KMS chipset.
  195.      * This is also for some features.
  196.      *
  197.      * Then, the PCI ID. This is essential and should return usable numbers
  198.      * for all Radeons. If this fails, we probably got handed an FD for some
  199.      * non-Radeon card.
  200.      *
  201.      * The GEM info is actually bogus on the kernel side, as well as our side
  202.      * (see radeon_gem_info_ioctl in radeon_gem.c) but that's alright because
  203.      * we don't actually use the info for anything yet.
  204.      *
  205.      * The GB and Z pipe requests should always succeed, but they might not
  206.      * return sensical values for all chipsets, but that's alright because
  207.      * the pipe drivers already know that.
  208.      */
  209.  
  210.     /* Get DRM version. */
  211.     version = drmGetVersion(ws->fd);
  212.     if (version->version_major != 2 ||
  213.         version->version_minor < 3) {
  214.         fprintf(stderr, "%s: DRM version is %d.%d.%d but this driver is "
  215.                 "only compatible with 2.3.x (kernel 2.6.34) or later.\n",
  216.                 __FUNCTION__,
  217.                 version->version_major,
  218.                 version->version_minor,
  219.                 version->version_patchlevel);
  220.         drmFreeVersion(version);
  221.         return FALSE;
  222.     }
  223.  
  224.     ws->info.drm_major = version->version_major;
  225.     ws->info.drm_minor = version->version_minor;
  226.     ws->info.drm_patchlevel = version->version_patchlevel;
  227.     drmFreeVersion(version);
  228.  
  229.     /* Get PCI ID. */
  230.     if (!radeon_get_drm_value(ws->fd, RADEON_INFO_DEVICE_ID, "PCI ID",
  231.                               &ws->info.pci_id))
  232.         return FALSE;
  233.  
  234.     /* Check PCI ID. */
  235.     switch (ws->info.pci_id) {
  236. #define CHIPSET(pci_id, name, cfamily) case pci_id: ws->info.family = CHIP_##cfamily; ws->gen = DRV_R300; break;
  237. #include "pci_ids/r300_pci_ids.h"
  238. #undef CHIPSET
  239.  
  240. #define CHIPSET(pci_id, name, cfamily) case pci_id: ws->info.family = CHIP_##cfamily; ws->gen = DRV_R600; break;
  241. #include "pci_ids/r600_pci_ids.h"
  242. #undef CHIPSET
  243.  
  244. #define CHIPSET(pci_id, name, cfamily) case pci_id: ws->info.family = CHIP_##cfamily; ws->gen = DRV_SI; break;
  245. #include "pci_ids/radeonsi_pci_ids.h"
  246. #undef CHIPSET
  247.  
  248.     default:
  249.         fprintf(stderr, "radeon: Invalid PCI ID.\n");
  250.         return FALSE;
  251.     }
  252.  
  253.     switch (ws->info.family) {
  254.     default:
  255.     case CHIP_UNKNOWN:
  256.         fprintf(stderr, "radeon: Unknown family.\n");
  257.         return FALSE;
  258.     case CHIP_R300:
  259.     case CHIP_R350:
  260.     case CHIP_RV350:
  261.     case CHIP_RV370:
  262.     case CHIP_RV380:
  263.     case CHIP_RS400:
  264.     case CHIP_RC410:
  265.     case CHIP_RS480:
  266.         ws->info.chip_class = R300;
  267.         break;
  268.     case CHIP_R420:     /* R4xx-based cores. */
  269.     case CHIP_R423:
  270.     case CHIP_R430:
  271.     case CHIP_R480:
  272.     case CHIP_R481:
  273.     case CHIP_RV410:
  274.     case CHIP_RS600:
  275.     case CHIP_RS690:
  276.     case CHIP_RS740:
  277.         ws->info.chip_class = R400;
  278.         break;
  279.     case CHIP_RV515:    /* R5xx-based cores. */
  280.     case CHIP_R520:
  281.     case CHIP_RV530:
  282.     case CHIP_R580:
  283.     case CHIP_RV560:
  284.     case CHIP_RV570:
  285.         ws->info.chip_class = R500;
  286.         break;
  287.     case CHIP_R600:
  288.     case CHIP_RV610:
  289.     case CHIP_RV630:
  290.     case CHIP_RV670:
  291.     case CHIP_RV620:
  292.     case CHIP_RV635:
  293.     case CHIP_RS780:
  294.     case CHIP_RS880:
  295.         ws->info.chip_class = R600;
  296.         break;
  297.     case CHIP_RV770:
  298.     case CHIP_RV730:
  299.     case CHIP_RV710:
  300.     case CHIP_RV740:
  301.         ws->info.chip_class = R700;
  302.         break;
  303.     case CHIP_CEDAR:
  304.     case CHIP_REDWOOD:
  305.     case CHIP_JUNIPER:
  306.     case CHIP_CYPRESS:
  307.     case CHIP_HEMLOCK:
  308.     case CHIP_PALM:
  309.     case CHIP_SUMO:
  310.     case CHIP_SUMO2:
  311.     case CHIP_BARTS:
  312.     case CHIP_TURKS:
  313.     case CHIP_CAICOS:
  314.         ws->info.chip_class = EVERGREEN;
  315.         break;
  316.     case CHIP_CAYMAN:
  317.     case CHIP_ARUBA:
  318.         ws->info.chip_class = CAYMAN;
  319.         break;
  320.     case CHIP_TAHITI:
  321.     case CHIP_PITCAIRN:
  322.     case CHIP_VERDE:
  323.     case CHIP_OLAND:
  324.     case CHIP_HAINAN:
  325.         ws->info.chip_class = SI;
  326.         break;
  327.     case CHIP_BONAIRE:
  328.     case CHIP_KAVERI:
  329.     case CHIP_KABINI:
  330.         ws->info.chip_class = CIK;
  331.         break;
  332.     }
  333.  
  334.     /* Check for dma */
  335.     ws->info.r600_has_dma = FALSE;
  336.     if (ws->info.chip_class >= R700 && ws->info.drm_minor >= 27) {
  337.         ws->info.r600_has_dma = TRUE;
  338.     }
  339.  
  340.     /* Check for UVD */
  341.     ws->info.has_uvd = FALSE;
  342.     if (ws->info.drm_minor >= 32) {
  343.         uint32_t value = RADEON_CS_RING_UVD;
  344.         if (radeon_get_drm_value(ws->fd, RADEON_INFO_RING_WORKING,
  345.                                  "UVD Ring working", &value))
  346.             ws->info.has_uvd = value;
  347.     }
  348.  
  349.     /* Get GEM info. */
  350.     retval = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_INFO,
  351.             &gem_info, sizeof(gem_info));
  352.     if (retval) {
  353.         fprintf(stderr, "radeon: Failed to get MM info, error number %d\n",
  354.                 retval);
  355.         return FALSE;
  356.     }
  357.     ws->info.gart_size = gem_info.gart_size;
  358.     ws->info.vram_size = gem_info.vram_size;
  359.  
  360.     ws->num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
  361.  
  362.     /* Generation-specific queries. */
  363.     if (ws->gen == DRV_R300) {
  364.         if (!radeon_get_drm_value(ws->fd, RADEON_INFO_NUM_GB_PIPES,
  365.                                   "GB pipe count",
  366.                                   &ws->info.r300_num_gb_pipes))
  367.             return FALSE;
  368.  
  369.         if (!radeon_get_drm_value(ws->fd, RADEON_INFO_NUM_Z_PIPES,
  370.                                   "Z pipe count",
  371.                                   &ws->info.r300_num_z_pipes))
  372.             return FALSE;
  373.     }
  374.     else if (ws->gen >= DRV_R600) {
  375.         if (ws->info.drm_minor >= 9 &&
  376.             !radeon_get_drm_value(ws->fd, RADEON_INFO_NUM_BACKENDS,
  377.                                   "num backends",
  378.                                   &ws->info.r600_num_backends))
  379.             return FALSE;
  380.  
  381.         /* get the GPU counter frequency, failure is not fatal */
  382.         radeon_get_drm_value(ws->fd, RADEON_INFO_CLOCK_CRYSTAL_FREQ, NULL,
  383.                              &ws->info.r600_clock_crystal_freq);
  384.  
  385.         radeon_get_drm_value(ws->fd, RADEON_INFO_TILING_CONFIG, NULL,
  386.                              &ws->info.r600_tiling_config);
  387.  
  388.         if (ws->info.drm_minor >= 11) {
  389.             radeon_get_drm_value(ws->fd, RADEON_INFO_NUM_TILE_PIPES, NULL,
  390.                                  &ws->info.r600_num_tile_pipes);
  391.  
  392.             if (radeon_get_drm_value(ws->fd, RADEON_INFO_BACKEND_MAP, NULL,
  393.                                       &ws->info.r600_backend_map))
  394.                 ws->info.r600_backend_map_valid = TRUE;
  395.         }
  396.  
  397.         ws->info.r600_virtual_address = FALSE;
  398.         if (ws->info.drm_minor >= 13) {
  399.             ws->info.r600_virtual_address = TRUE;
  400.             if (!radeon_get_drm_value(ws->fd, RADEON_INFO_VA_START, NULL,
  401.                                       &ws->info.r600_va_start))
  402.                 ws->info.r600_virtual_address = FALSE;
  403.             if (!radeon_get_drm_value(ws->fd, RADEON_INFO_IB_VM_MAX_SIZE, NULL,
  404.                                       &ws->info.r600_ib_vm_max_size))
  405.                 ws->info.r600_virtual_address = FALSE;
  406.         }
  407.         if (ws->gen == DRV_R600 && !debug_get_bool_option("RADEON_VA", FALSE))
  408.                 ws->info.r600_virtual_address = FALSE;
  409.     }
  410.  
  411.     /* Get max pipes, this is only needed for compute shaders.  All evergreen+
  412.      * chips have at least 2 pipes, so we use 2 as a default. */
  413.     ws->info.r600_max_pipes = 2;
  414.     radeon_get_drm_value(ws->fd, RADEON_INFO_MAX_PIPES, NULL,
  415.                          &ws->info.r600_max_pipes);
  416.  
  417.     return TRUE;
  418. }
  419.  
  420. static void radeon_winsys_destroy(struct radeon_winsys *rws)
  421. {
  422.     struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
  423.  
  424.     if (ws->thread) {
  425.         ws->kill_thread = 1;
  426.         pipe_semaphore_signal(&ws->cs_queued);
  427.         pipe_thread_wait(ws->thread);
  428.     }
  429.     pipe_semaphore_destroy(&ws->cs_queued);
  430.     pipe_condvar_destroy(ws->cs_queue_empty);
  431.  
  432.     if (!pipe_reference(&ws->base.reference, NULL)) {
  433.         return;
  434.     }
  435.  
  436.     pipe_mutex_destroy(ws->hyperz_owner_mutex);
  437.     pipe_mutex_destroy(ws->cmask_owner_mutex);
  438.     pipe_mutex_destroy(ws->cs_stack_lock);
  439.  
  440.     ws->cman->destroy(ws->cman);
  441.     ws->kman->destroy(ws->kman);
  442.     if (ws->gen >= DRV_R600) {
  443.         radeon_surface_manager_free(ws->surf_man);
  444.     }
  445.     if (fd_tab) {
  446.         util_hash_table_remove(fd_tab, intptr_to_pointer(ws->fd));
  447.     }
  448.     FREE(rws);
  449. }
  450.  
  451. static void radeon_query_info(struct radeon_winsys *rws,
  452.                               struct radeon_info *info)
  453. {
  454.     *info = ((struct radeon_drm_winsys *)rws)->info;
  455. }
  456.  
  457. static boolean radeon_cs_request_feature(struct radeon_winsys_cs *rcs,
  458.                                          enum radeon_feature_id fid,
  459.                                          boolean enable)
  460. {
  461.     struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
  462.  
  463.     switch (fid) {
  464.     case RADEON_FID_R300_HYPERZ_ACCESS:
  465.         return radeon_set_fd_access(cs, &cs->ws->hyperz_owner,
  466.                                     &cs->ws->hyperz_owner_mutex,
  467.                                     RADEON_INFO_WANT_HYPERZ, "Hyper-Z",
  468.                                     enable);
  469.  
  470.     case RADEON_FID_R300_CMASK_ACCESS:
  471.         return radeon_set_fd_access(cs, &cs->ws->cmask_owner,
  472.                                     &cs->ws->cmask_owner_mutex,
  473.                                     RADEON_INFO_WANT_CMASK, "AA optimizations",
  474.                                     enable);
  475.     }
  476.     return FALSE;
  477. }
  478.  
  479. static int radeon_drm_winsys_surface_init(struct radeon_winsys *rws,
  480.                                           struct radeon_surface *surf)
  481. {
  482.     struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
  483.  
  484.     return radeon_surface_init(ws->surf_man, surf);
  485. }
  486.  
  487. static int radeon_drm_winsys_surface_best(struct radeon_winsys *rws,
  488.                                           struct radeon_surface *surf)
  489. {
  490.     struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
  491.  
  492.     return radeon_surface_best(ws->surf_man, surf);
  493. }
  494.  
  495. static uint64_t radeon_query_value(struct radeon_winsys *rws,
  496.                                    enum radeon_value_id value)
  497. {
  498.     struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
  499.     uint64_t ts = 0;
  500.  
  501.     switch (value) {
  502.     case RADEON_REQUESTED_VRAM_MEMORY:
  503.         return ws->allocated_vram;
  504.     case RADEON_REQUESTED_GTT_MEMORY:
  505.         return ws->allocated_gtt;
  506.     case RADEON_BUFFER_WAIT_TIME_NS:
  507.         return ws->buffer_wait_time;
  508.     case RADEON_TIMESTAMP:
  509.         if (ws->info.drm_minor < 20 || ws->gen < DRV_R600) {
  510.             assert(0);
  511.             return 0;
  512.         }
  513.  
  514.         radeon_get_drm_value(ws->fd, RADEON_INFO_TIMESTAMP, "timestamp",
  515.                              (uint32_t*)&ts);
  516.         return ts;
  517.     }
  518.     return 0;
  519. }
  520.  
  521. static unsigned hash_fd(void *key)
  522. {
  523.     return pointer_to_intptr(key);
  524. }
  525.  
  526. static int compare_fd(void *key1, void *key2)
  527. {
  528.     return pointer_to_intptr(key1) != pointer_to_intptr(key2);
  529. }
  530.  
  531. void radeon_drm_ws_queue_cs(struct radeon_drm_winsys *ws, struct radeon_drm_cs *cs)
  532. {
  533. retry:
  534.     pipe_mutex_lock(ws->cs_stack_lock);
  535.     if (p_atomic_read(&ws->ncs) >= RING_LAST) {
  536.         /* no room left for a flush */
  537.         pipe_mutex_unlock(ws->cs_stack_lock);
  538.         goto retry;
  539.     }
  540.     ws->cs_stack[p_atomic_read(&ws->ncs)] = cs;
  541.     p_atomic_inc(&ws->ncs);
  542.     pipe_mutex_unlock(ws->cs_stack_lock);
  543.     pipe_semaphore_signal(&ws->cs_queued);
  544. }
  545.  
  546. static PIPE_THREAD_ROUTINE(radeon_drm_cs_emit_ioctl, param)
  547. {
  548.     struct radeon_drm_winsys *ws = (struct radeon_drm_winsys *)param;
  549.     struct radeon_drm_cs *cs;
  550.     unsigned i, empty_stack;
  551.  
  552.     while (1) {
  553.         pipe_semaphore_wait(&ws->cs_queued);
  554.         if (ws->kill_thread)
  555.             break;
  556. next:
  557.         pipe_mutex_lock(ws->cs_stack_lock);
  558.         cs = ws->cs_stack[0];
  559.         pipe_mutex_unlock(ws->cs_stack_lock);
  560.  
  561.         if (cs) {
  562.             radeon_drm_cs_emit_ioctl_oneshot(cs, cs->cst);
  563.  
  564.             pipe_mutex_lock(ws->cs_stack_lock);
  565.             for (i = 1; i < p_atomic_read(&ws->ncs); i++) {
  566.                 ws->cs_stack[i - 1] = ws->cs_stack[i];
  567.             }
  568.             ws->cs_stack[p_atomic_read(&ws->ncs) - 1] = NULL;
  569.             empty_stack = p_atomic_dec_zero(&ws->ncs);
  570.             if (empty_stack) {
  571.                 pipe_condvar_signal(ws->cs_queue_empty);
  572.             }
  573.             pipe_mutex_unlock(ws->cs_stack_lock);
  574.  
  575.             pipe_semaphore_signal(&cs->flush_completed);
  576.  
  577.             if (!empty_stack) {
  578.                 goto next;
  579.             }
  580.         }
  581.     }
  582.     pipe_mutex_lock(ws->cs_stack_lock);
  583.     for (i = 0; i < p_atomic_read(&ws->ncs); i++) {
  584.         pipe_semaphore_signal(&ws->cs_stack[i]->flush_completed);
  585.         ws->cs_stack[i] = NULL;
  586.     }
  587.     p_atomic_set(&ws->ncs, 0);
  588.     pipe_condvar_signal(ws->cs_queue_empty);
  589.     pipe_mutex_unlock(ws->cs_stack_lock);
  590.     return NULL;
  591. }
  592.  
  593. DEBUG_GET_ONCE_BOOL_OPTION(thread, "RADEON_THREAD", TRUE)
  594. static PIPE_THREAD_ROUTINE(radeon_drm_cs_emit_ioctl, param);
  595.  
  596. struct radeon_winsys *radeon_drm_winsys_create(int fd)
  597. {
  598.     struct radeon_drm_winsys *ws;
  599.  
  600.     if (!fd_tab) {
  601.         fd_tab = util_hash_table_create(hash_fd, compare_fd);
  602.     }
  603.  
  604.     ws = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
  605.     if (ws) {
  606.         pipe_reference(NULL, &ws->base.reference);
  607.         return &ws->base;
  608.     }
  609.  
  610.     ws = CALLOC_STRUCT(radeon_drm_winsys);
  611.     if (!ws) {
  612.         return NULL;
  613.     }
  614.     ws->fd = fd;
  615.     util_hash_table_set(fd_tab, intptr_to_pointer(fd), ws);
  616.  
  617.     if (!do_winsys_init(ws))
  618.         goto fail;
  619.  
  620.     /* Create managers. */
  621.     ws->kman = radeon_bomgr_create(ws);
  622.     if (!ws->kman)
  623.         goto fail;
  624.     ws->cman = pb_cache_manager_create(ws->kman, 1000000);
  625.     if (!ws->cman)
  626.         goto fail;
  627.  
  628.     if (ws->gen >= DRV_R600) {
  629.         ws->surf_man = radeon_surface_manager_new(fd);
  630.         if (!ws->surf_man)
  631.             goto fail;
  632.     }
  633.  
  634.     /* init reference */
  635.     pipe_reference_init(&ws->base.reference, 1);
  636.  
  637.     /* Set functions. */
  638.     ws->base.destroy = radeon_winsys_destroy;
  639.     ws->base.query_info = radeon_query_info;
  640.     ws->base.cs_request_feature = radeon_cs_request_feature;
  641.     ws->base.surface_init = radeon_drm_winsys_surface_init;
  642.     ws->base.surface_best = radeon_drm_winsys_surface_best;
  643.     ws->base.query_value = radeon_query_value;
  644.  
  645.     radeon_bomgr_init_functions(ws);
  646.     radeon_drm_cs_init_functions(ws);
  647.  
  648.     pipe_mutex_init(ws->hyperz_owner_mutex);
  649.     pipe_mutex_init(ws->cmask_owner_mutex);
  650.     pipe_mutex_init(ws->cs_stack_lock);
  651.  
  652.     p_atomic_set(&ws->ncs, 0);
  653.     pipe_semaphore_init(&ws->cs_queued, 0);
  654.     pipe_condvar_init(ws->cs_queue_empty);
  655.     if (ws->num_cpus > 1 && debug_get_option_thread())
  656.         ws->thread = pipe_thread_create(radeon_drm_cs_emit_ioctl, ws);
  657.  
  658.     return &ws->base;
  659.  
  660. fail:
  661.     if (ws->cman)
  662.         ws->cman->destroy(ws->cman);
  663.     if (ws->kman)
  664.         ws->kman->destroy(ws->kman);
  665.     if (ws->surf_man)
  666.         radeon_surface_manager_free(ws->surf_man);
  667.     FREE(ws);
  668.     return NULL;
  669. }
  670.