Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright 2008 Advanced Micro Devices, Inc.
  3.  * Copyright 2008 Red Hat Inc.
  4.  * Copyright 2009 Jerome Glisse.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the "Software"),
  8.  * to deal in the Software without restriction, including without limitation
  9.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10.  * and/or sell copies of the Software, and to permit persons to whom the
  11.  * Software is furnished to do so, subject to the following conditions:
  12.  *
  13.  * The above copyright notice and this permission notice shall be included in
  14.  * all copies or substantial portions of the Software.
  15.  *
  16.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19.  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20.  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21.  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22.  * OTHER DEALINGS IN THE SOFTWARE.
  23.  *
  24.  * Authors: Dave Airlie
  25.  *          Alex Deucher
  26.  *          Jerome Glisse
  27.  *          Christian König
  28.  */
  29. #include <drm/drmP.h>
  30. #include "radeon.h"
  31.  
  32. /*
  33.  * IB
  34.  * IBs (Indirect Buffers) and areas of GPU accessible memory where
  35.  * commands are stored.  You can put a pointer to the IB in the
  36.  * command ring and the hw will fetch the commands from the IB
  37.  * and execute them.  Generally userspace acceleration drivers
  38.  * produce command buffers which are send to the kernel and
  39.  * put in IBs for execution by the requested ring.
  40.  */
  41. static int radeon_debugfs_sa_init(struct radeon_device *rdev);
  42.  
  43. /**
  44.  * radeon_ib_get - request an IB (Indirect Buffer)
  45.  *
  46.  * @rdev: radeon_device pointer
  47.  * @ring: ring index the IB is associated with
  48.  * @ib: IB object returned
  49.  * @size: requested IB size
  50.  *
  51.  * Request an IB (all asics).  IBs are allocated using the
  52.  * suballocator.
  53.  * Returns 0 on success, error on failure.
  54.  */
  55. int radeon_ib_get(struct radeon_device *rdev, int ring,
  56.                   struct radeon_ib *ib, struct radeon_vm *vm,
  57.                   unsigned size)
  58. {
  59.         int r;
  60.  
  61.         r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256);
  62.         if (r) {
  63.                 dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
  64.                 return r;
  65.         }
  66.  
  67.         r = radeon_semaphore_create(rdev, &ib->semaphore);
  68.         if (r) {
  69.                 return r;
  70.         }
  71.  
  72.         ib->ring = ring;
  73.         ib->fence = NULL;
  74.         ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
  75.         ib->vm = vm;
  76.         if (vm) {
  77.                 /* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address
  78.                  * space and soffset is the offset inside the pool bo
  79.                  */
  80.                 ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
  81.         } else {
  82.                 ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
  83.         }
  84.         ib->is_const_ib = false;
  85.  
  86.         return 0;
  87. }
  88.  
  89. /**
  90.  * radeon_ib_free - free an IB (Indirect Buffer)
  91.  *
  92.  * @rdev: radeon_device pointer
  93.  * @ib: IB object to free
  94.  *
  95.  * Free an IB (all asics).
  96.  */
  97. void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
  98. {
  99.         radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
  100.         radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
  101.         radeon_fence_unref(&ib->fence);
  102. }
  103.  
  104. /**
  105.  * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
  106.  *
  107.  * @rdev: radeon_device pointer
  108.  * @ib: IB object to schedule
  109.  * @const_ib: Const IB to schedule (SI only)
  110.  * @hdp_flush: Whether or not to perform an HDP cache flush
  111.  *
  112.  * Schedule an IB on the associated ring (all asics).
  113.  * Returns 0 on success, error on failure.
  114.  *
  115.  * On SI, there are two parallel engines fed from the primary ring,
  116.  * the CE (Constant Engine) and the DE (Drawing Engine).  Since
  117.  * resource descriptors have moved to memory, the CE allows you to
  118.  * prime the caches while the DE is updating register state so that
  119.  * the resource descriptors will be already in cache when the draw is
  120.  * processed.  To accomplish this, the userspace driver submits two
  121.  * IBs, one for the CE and one for the DE.  If there is a CE IB (called
  122.  * a CONST_IB), it will be put on the ring prior to the DE IB.  Prior
  123.  * to SI there was just a DE IB.
  124.  */
  125. int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
  126.                        struct radeon_ib *const_ib, bool hdp_flush)
  127. {
  128.         struct radeon_ring *ring = &rdev->ring[ib->ring];
  129.         int r = 0;
  130.  
  131.         if (!ib->length_dw || !ring->ready) {
  132.                 /* TODO: Nothings in the ib we should report. */
  133.                 dev_err(rdev->dev, "couldn't schedule ib\n");
  134.                 return -EINVAL;
  135.         }
  136.  
  137.         /* 64 dwords should be enough for fence too */
  138.         r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8);
  139.         if (r) {
  140.                 dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
  141.                 return r;
  142.         }
  143.  
  144.         /* grab a vm id if necessary */
  145.         if (ib->vm) {
  146.                 struct radeon_fence *vm_id_fence;
  147.                 vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring);
  148.                 radeon_semaphore_sync_to(ib->semaphore, vm_id_fence);
  149.         }
  150.  
  151.         /* sync with other rings */
  152.         r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring);
  153.         if (r) {
  154.                 dev_err(rdev->dev, "failed to sync rings (%d)\n", r);
  155.                 radeon_ring_unlock_undo(rdev, ring);
  156.                 return r;
  157.         }
  158.  
  159.         if (ib->vm)
  160.                 radeon_vm_flush(rdev, ib->vm, ib->ring);
  161.  
  162.         if (const_ib) {
  163.                 radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
  164.                 radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
  165.         }
  166.         radeon_ring_ib_execute(rdev, ib->ring, ib);
  167.         r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
  168.         if (r) {
  169.                 dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
  170.                 radeon_ring_unlock_undo(rdev, ring);
  171.                 return r;
  172.         }
  173.         if (const_ib) {
  174.                 const_ib->fence = radeon_fence_ref(ib->fence);
  175.         }
  176.  
  177.         if (ib->vm)
  178.                 radeon_vm_fence(rdev, ib->vm, ib->fence);
  179.  
  180.         radeon_ring_unlock_commit(rdev, ring, hdp_flush);
  181.         return 0;
  182. }
  183.  
  184. /**
  185.  * radeon_ib_pool_init - Init the IB (Indirect Buffer) pool
  186.  *
  187.  * @rdev: radeon_device pointer
  188.  *
  189.  * Initialize the suballocator to manage a pool of memory
  190.  * for use as IBs (all asics).
  191.  * Returns 0 on success, error on failure.
  192.  */
  193. int radeon_ib_pool_init(struct radeon_device *rdev)
  194. {
  195.         int r;
  196.  
  197.         if (rdev->ib_pool_ready) {
  198.                 return 0;
  199.         }
  200.  
  201.         if (rdev->family >= CHIP_BONAIRE) {
  202.                 r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
  203.                                               RADEON_IB_POOL_SIZE*64*1024,
  204.                                               RADEON_GPU_PAGE_SIZE,
  205.                                               RADEON_GEM_DOMAIN_GTT,
  206.                                               RADEON_GEM_GTT_WC);
  207.         } else {
  208.                 /* Before CIK, it's better to stick to cacheable GTT due
  209.                  * to the command stream checking
  210.                  */
  211.                 r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
  212.                                               RADEON_IB_POOL_SIZE*64*1024,
  213.                                               RADEON_GPU_PAGE_SIZE,
  214.                                               RADEON_GEM_DOMAIN_GTT, 0);
  215.         }
  216.         if (r) {
  217.                 return r;
  218.         }
  219.  
  220.         r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
  221.         if (r) {
  222.                 return r;
  223.         }
  224.  
  225.         rdev->ib_pool_ready = true;
  226.         if (radeon_debugfs_sa_init(rdev)) {
  227.                 dev_err(rdev->dev, "failed to register debugfs file for SA\n");
  228.         }
  229.         return 0;
  230. }
  231.  
  232. /**
  233.  * radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool
  234.  *
  235.  * @rdev: radeon_device pointer
  236.  *
  237.  * Tear down the suballocator managing the pool of memory
  238.  * for use as IBs (all asics).
  239.  */
  240. void radeon_ib_pool_fini(struct radeon_device *rdev)
  241. {
  242.         if (rdev->ib_pool_ready) {
  243.                 radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
  244.                 radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
  245.                 rdev->ib_pool_ready = false;
  246.         }
  247. }
  248.  
  249. /**
  250.  * radeon_ib_ring_tests - test IBs on the rings
  251.  *
  252.  * @rdev: radeon_device pointer
  253.  *
  254.  * Test an IB (Indirect Buffer) on each ring.
  255.  * If the test fails, disable the ring.
  256.  * Returns 0 on success, error if the primary GFX ring
  257.  * IB test fails.
  258.  */
  259. int radeon_ib_ring_tests(struct radeon_device *rdev)
  260. {
  261.         unsigned i;
  262.         int r;
  263.  
  264.         for (i = 0; i < RADEON_NUM_RINGS; ++i) {
  265.                 struct radeon_ring *ring = &rdev->ring[i];
  266.  
  267.                 if (!ring->ready)
  268.                         continue;
  269.  
  270.                 r = radeon_ib_test(rdev, i, ring);
  271.                 if (r) {
  272.                         ring->ready = false;
  273.                         rdev->needs_reset = false;
  274.  
  275.                         if (i == RADEON_RING_TYPE_GFX_INDEX) {
  276.                                 /* oh, oh, that's really bad */
  277.                                 DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
  278.                                 rdev->accel_working = false;
  279.                                 return r;
  280.  
  281.                         } else {
  282.                                 /* still not good, but we can live with it */
  283.                                 DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
  284.                         }
  285.                 }
  286.         }
  287.         return 0;
  288. }
  289.  
  290. /*
  291.  * Debugfs info
  292.  */
  293. #if defined(CONFIG_DEBUG_FS)
  294.  
  295. static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
  296. {
  297.         struct drm_info_node *node = (struct drm_info_node *) m->private;
  298.         struct drm_device *dev = node->minor->dev;
  299.         struct radeon_device *rdev = dev->dev_private;
  300.  
  301.         radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
  302.  
  303.         return 0;
  304.  
  305. }
  306.  
  307. static struct drm_info_list radeon_debugfs_sa_list[] = {
  308.         {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
  309. };
  310.  
  311. #endif
  312.  
  313. static int radeon_debugfs_sa_init(struct radeon_device *rdev)
  314. {
  315. #if defined(CONFIG_DEBUG_FS)
  316.         return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
  317. #else
  318.         return 0;
  319. #endif
  320. }
  321.