Subversion Repositories Kolibri OS

Rev

Rev 3031 | Rev 3764 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright 2009 Jerome Glisse.
  3.  * All Rights Reserved.
  4.  *
  5.  * Permission is hereby granted, free of charge, to any person obtaining a
  6.  * copy of this software and associated documentation files (the
  7.  * "Software"), to deal in the Software without restriction, including
  8.  * without limitation the rights to use, copy, modify, merge, publish,
  9.  * distribute, sub license, and/or sell copies of the Software, and to
  10.  * permit persons to whom the Software is furnished to do so, subject to
  11.  * the following conditions:
  12.  *
  13.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16.  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17.  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18.  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19.  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20.  *
  21.  * The above copyright notice and this permission notice (including the
  22.  * next paragraph) shall be included in all copies or substantial portions
  23.  * of the Software.
  24.  *
  25.  */
  26. /*
  27.  * Authors:
  28.  *    Jerome Glisse <glisse@freedesktop.org>
  29.  *    Dave Airlie
  30.  */
  31. #include <linux/seq_file.h>
  32. #include <asm/atomic.h>
  33. #include <linux/wait.h>
  34. #include <linux/list.h>
  35. #include <linux/kref.h>
  36. #include <linux/slab.h>
  37. #include <drm/drmP.h>
  38. #include "radeon_reg.h"
  39. #include "radeon.h"
  40.  
  41. /*
  42.  * Fences
  43.  * Fences mark an event in the GPUs pipeline and are used
  44.  * for GPU/CPU synchronization.  When the fence is written,
  45.  * it is expected that all buffers associated with that fence
  46.  * are no longer in use by the associated ring on the GPU and
  47.  * that the the relevant GPU caches have been flushed.  Whether
  48.  * we use a scratch register or memory location depends on the asic
  49.  * and whether writeback is enabled.
  50.  */
  51.  
  52. /**
  53.  * radeon_fence_write - write a fence value
  54.  *
  55.  * @rdev: radeon_device pointer
  56.  * @seq: sequence number to write
  57.  * @ring: ring index the fence is associated with
  58.  *
  59.  * Writes a fence value to memory or a scratch register (all asics).
  60.  */
  61. static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
  62. {
  63.         struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
  64.         if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
  65.                 *drv->cpu_addr = cpu_to_le32(seq);
  66.         } else {
  67.                 WREG32(drv->scratch_reg, seq);
  68.         }
  69. }
  70.  
  71. /**
  72.  * radeon_fence_read - read a fence value
  73.  *
  74.  * @rdev: radeon_device pointer
  75.  * @ring: ring index the fence is associated with
  76.  *
  77.  * Reads a fence value from memory or a scratch register (all asics).
  78.  * Returns the value of the fence read from memory or register.
  79.  */
  80. static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
  81. {
  82.         struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
  83.         u32 seq = 0;
  84.  
  85.         if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
  86.                 seq = le32_to_cpu(*drv->cpu_addr);
  87.         } else {
  88.                 seq = RREG32(drv->scratch_reg);
  89.         }
  90.         return seq;
  91. }
  92.  
  93. /**
  94.  * radeon_fence_emit - emit a fence on the requested ring
  95.  *
  96.  * @rdev: radeon_device pointer
  97.  * @fence: radeon fence object
  98.  * @ring: ring index the fence is associated with
  99.  *
  100.  * Emits a fence command on the requested ring (all asics).
  101.  * Returns 0 on success, -ENOMEM on failure.
  102.  */
  103. int radeon_fence_emit(struct radeon_device *rdev,
  104.                       struct radeon_fence **fence,
  105.                       int ring)
  106. {
  107.         /* we are protected by the ring emission mutex */
  108.         *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
  109.         if ((*fence) == NULL) {
  110.                 return -ENOMEM;
  111.         }
  112.         kref_init(&((*fence)->kref));
  113.         (*fence)->rdev = rdev;
  114.         (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
  115.         (*fence)->ring = ring;
  116.         radeon_fence_ring_emit(rdev, ring, *fence);
  117. //   trace_radeon_fence_emit(rdev->ddev, (*fence)->seq);
  118.         return 0;
  119. }
  120.  
  121. /**
  122.  * radeon_fence_process - process a fence
  123.  *
  124.  * @rdev: radeon_device pointer
  125.  * @ring: ring index the fence is associated with
  126.  *
  127.  * Checks the current fence value and wakes the fence queue
  128.  * if the sequence number has increased (all asics).
  129.  */
  130. void radeon_fence_process(struct radeon_device *rdev, int ring)
  131. {
  132.         uint64_t seq, last_seq, last_emitted;
  133.         unsigned count_loop = 0;
  134.         bool wake = false;
  135.  
  136.         /* Note there is a scenario here for an infinite loop but it's
  137.          * very unlikely to happen. For it to happen, the current polling
  138.          * process need to be interrupted by another process and another
  139.          * process needs to update the last_seq btw the atomic read and
  140.          * xchg of the current process.
  141.          *
  142.          * More over for this to go in infinite loop there need to be
  143.          * continuously new fence signaled ie radeon_fence_read needs
  144.          * to return a different value each time for both the currently
  145.          * polling process and the other process that xchg the last_seq
  146.          * btw atomic read and xchg of the current process. And the
  147.          * value the other process set as last seq must be higher than
  148.          * the seq value we just read. Which means that current process
  149.          * need to be interrupted after radeon_fence_read and before
  150.          * atomic xchg.
  151.          *
  152.          * To be even more safe we count the number of time we loop and
  153.          * we bail after 10 loop just accepting the fact that we might
  154.          * have temporarly set the last_seq not to the true real last
  155.          * seq but to an older one.
  156.          */
  157.         last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
  158.         do {
  159.                 last_emitted = rdev->fence_drv[ring].sync_seq[ring];
  160.                 seq = radeon_fence_read(rdev, ring);
  161.                 seq |= last_seq & 0xffffffff00000000LL;
  162.                 if (seq < last_seq) {
  163.                         seq &= 0xffffffff;
  164.                         seq |= last_emitted & 0xffffffff00000000LL;
  165.                 }
  166.  
  167.                 if (seq <= last_seq || seq > last_emitted) {
  168.                         break;
  169.                 }
  170.                 /* If we loop over we don't want to return without
  171.                  * checking if a fence is signaled as it means that the
  172.                  * seq we just read is different from the previous on.
  173.                  */
  174.                 wake = true;
  175.                 last_seq = seq;
  176.                 if ((count_loop++) > 10) {
  177.                         /* We looped over too many time leave with the
  178.                          * fact that we might have set an older fence
  179.                          * seq then the current real last seq as signaled
  180.                          * by the hw.
  181.                          */
  182.                         break;
  183.                 }
  184.         } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
  185.  
  186.         if (wake) {
  187.                 rdev->fence_drv[ring].last_activity = GetTimerTicks();
  188.                 wake_up_all(&rdev->fence_queue);
  189.         }
  190. }
  191.  
  192. /**
  193.  * radeon_fence_destroy - destroy a fence
  194.  *
  195.  * @kref: fence kref
  196.  *
  197.  * Frees the fence object (all asics).
  198.  */
  199. static void radeon_fence_destroy(struct kref *kref)
  200. {
  201.         struct radeon_fence *fence;
  202.  
  203.         fence = container_of(kref, struct radeon_fence, kref);
  204.         kfree(fence);
  205. }
  206.  
  207. /**
  208.  * radeon_fence_seq_signaled - check if a fence sequeuce number has signaled
  209.  *
  210.  * @rdev: radeon device pointer
  211.  * @seq: sequence number
  212.  * @ring: ring index the fence is associated with
  213.  *
  214.  * Check if the last singled fence sequnce number is >= the requested
  215.  * sequence number (all asics).
  216.  * Returns true if the fence has signaled (current fence value
  217.  * is >= requested value) or false if it has not (current fence
  218.  * value is < the requested value.  Helper function for
  219.  * radeon_fence_signaled().
  220.  */
  221. static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
  222.                                       u64 seq, unsigned ring)
  223. {
  224.         if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
  225.                 return true;
  226.         }
  227.         /* poll new last sequence at least once */
  228.         radeon_fence_process(rdev, ring);
  229.         if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
  230.                 return true;
  231.         }
  232.         return false;
  233. }
  234.  
  235. /**
  236.  * radeon_fence_signaled - check if a fence has signaled
  237.  *
  238.  * @fence: radeon fence object
  239.  *
  240.  * Check if the requested fence has signaled (all asics).
  241.  * Returns true if the fence has signaled or false if it has not.
  242.                                  */
  243. bool radeon_fence_signaled(struct radeon_fence *fence)
  244. {
  245.         if (!fence) {
  246.                 return true;
  247.         }
  248.         if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
  249.                 return true;
  250.         }
  251.         if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
  252.                 fence->seq = RADEON_FENCE_SIGNALED_SEQ;
  253.                 return true;
  254.         }
  255.         return false;
  256. }
  257.  
  258. /**
  259.  * radeon_fence_wait_seq - wait for a specific sequence number
  260.  *
  261.  * @rdev: radeon device pointer
  262.  * @target_seq: sequence number we want to wait for
  263.  * @ring: ring index the fence is associated with
  264.  * @intr: use interruptable sleep
  265.  * @lock_ring: whether the ring should be locked or not
  266.  *
  267.  * Wait for the requested sequence number to be written (all asics).
  268.  * @intr selects whether to use interruptable (true) or non-interruptable
  269.  * (false) sleep when waiting for the sequence number.  Helper function
  270.  * for radeon_fence_wait(), et al.
  271.  * Returns 0 if the sequence number has passed, error for all other cases.
  272.  * -EDEADLK is returned when a GPU lockup has been detected and the ring is
  273.  * marked as not ready so no further jobs get scheduled until a successful
  274.  * reset.
  275.  */
  276. static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
  277.                                  unsigned ring, bool intr, bool lock_ring)
  278. {
  279.         unsigned long timeout, last_activity;
  280.         uint64_t seq;
  281.         unsigned i;
  282.         bool signaled;
  283.         int r;
  284.  
  285.         while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) {
  286.                 if (!rdev->ring[ring].ready) {
  287.                         return -EBUSY;
  288.                         }
  289.  
  290.                 timeout = GetTimerTicks() - RADEON_FENCE_JIFFIES_TIMEOUT;
  291.                 if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
  292.                         /* the normal case, timeout is somewhere before last_activity */
  293.                         timeout = rdev->fence_drv[ring].last_activity - timeout;
  294.                 } else {
  295.                         /* either jiffies wrapped around, or no fence was signaled in the last 500ms
  296.                          * anyway we will just wait for the minimum amount and then check for a lockup
  297.                          */
  298.                         timeout = 1;
  299.         }
  300.                 seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
  301.                 /* Save current last activity valuee, used to check for GPU lockups */
  302.                 last_activity = rdev->fence_drv[ring].last_activity;
  303.  
  304. //              trace_radeon_fence_wait_begin(rdev->ddev, seq);
  305.                 radeon_irq_kms_sw_irq_get(rdev, ring);
  306.         if (intr) {
  307.                         r = wait_event_interruptible_timeout(rdev->fence_queue,
  308.                 (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
  309.                 timeout);
  310.             } else {
  311.             r = wait_event_timeout(rdev->fence_queue,
  312.                 (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
  313.                timeout);
  314.             }
  315.                 radeon_irq_kms_sw_irq_put(rdev, ring);
  316.         if (unlikely(r < 0)) {
  317.             return r;
  318.         }
  319. //              trace_radeon_fence_wait_end(rdev->ddev, seq);
  320.  
  321.                 if (unlikely(!signaled)) {
  322.                         /* we were interrupted for some reason and fence
  323.                          * isn't signaled yet, resume waiting */
  324.                         if (r) {
  325.                                 continue;
  326.         }
  327.  
  328.                         /* check if sequence value has changed since last_activity */
  329.                         if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
  330.                                 continue;
  331.                         }
  332.  
  333.                         if (lock_ring) {
  334.                                 mutex_lock(&rdev->ring_lock);
  335.         }
  336.  
  337.                         /* test if somebody else has already decided that this is a lockup */
  338.                         if (last_activity != rdev->fence_drv[ring].last_activity) {
  339.                                 if (lock_ring) {
  340.                                         mutex_unlock(&rdev->ring_lock);
  341.                                 }
  342.                                 continue;
  343.                         }
  344.  
  345.                         if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
  346.                                 /* good news we believe it's a lockup */
  347.                                 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n",
  348.                                          target_seq, seq);
  349.  
  350.                                 /* change last activity so nobody else think there is a lockup */
  351.                                 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
  352.                     rdev->fence_drv[i].last_activity = GetTimerTicks();
  353.                                 }
  354.  
  355.                                 /* mark the ring as not ready any more */
  356.                                 rdev->ring[ring].ready = false;
  357.                                 if (lock_ring) {
  358.                                         mutex_unlock(&rdev->ring_lock);
  359.                                 }
  360.                                 return -EDEADLK;
  361.                         }
  362.  
  363.                         if (lock_ring) {
  364.                                 mutex_unlock(&rdev->ring_lock);
  365.                         }
  366.                 }
  367.         }
  368.         return 0;
  369. }
  370.  
  371. /**
  372.  * radeon_fence_wait - wait for a fence to signal
  373.  *
  374.  * @fence: radeon fence object
  375.  * @intr: use interruptable sleep
  376.  *
  377.  * Wait for the requested fence to signal (all asics).
  378.  * @intr selects whether to use interruptable (true) or non-interruptable
  379.  * (false) sleep when waiting for the fence.
  380.  * Returns 0 if the fence has passed, error for all other cases.
  381.  */
  382. int radeon_fence_wait(struct radeon_fence *fence, bool intr)
  383. {
  384.         int r;
  385.  
  386.         if (fence == NULL) {
  387.                 WARN(1, "Querying an invalid fence : %p !\n", fence);
  388.                 return -EINVAL;
  389.         }
  390.  
  391.         r = radeon_fence_wait_seq(fence->rdev, fence->seq,
  392.                                   fence->ring, intr, true);
  393.         if (r) {
  394.                 return r;
  395.         }
  396.         fence->seq = RADEON_FENCE_SIGNALED_SEQ;
  397.                 return 0;
  398. }
  399.  
  400. static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
  401. {
  402.         unsigned i;
  403.  
  404.         for (i = 0; i < RADEON_NUM_RINGS; ++i) {
  405.                 if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
  406.                         return true;
  407.                 }
  408.         }
  409.         return false;
  410. }
  411.  
  412. /**
  413.  * radeon_fence_wait_any_seq - wait for a sequence number on any ring
  414.  *
  415.  * @rdev: radeon device pointer
  416.  * @target_seq: sequence number(s) we want to wait for
  417.  * @intr: use interruptable sleep
  418.  *
  419.  * Wait for the requested sequence number(s) to be written by any ring
  420.  * (all asics).  Sequnce number array is indexed by ring id.
  421.  * @intr selects whether to use interruptable (true) or non-interruptable
  422.  * (false) sleep when waiting for the sequence number.  Helper function
  423.  * for radeon_fence_wait_any(), et al.
  424.  * Returns 0 if the sequence number has passed, error for all other cases.
  425.  */
  426. static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
  427.                                      u64 *target_seq, bool intr)
  428. {
  429.         unsigned long timeout, last_activity, tmp;
  430.         unsigned i, ring = RADEON_NUM_RINGS;
  431.         bool signaled;
  432.         int r;
  433.  
  434.         for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
  435.                 if (!target_seq[i]) {
  436.                         continue;
  437.                 }
  438.  
  439.                 /* use the most recent one as indicator */
  440.                 if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
  441.                         last_activity = rdev->fence_drv[i].last_activity;
  442.         }
  443.  
  444.                 /* For lockup detection just pick the lowest ring we are
  445.                  * actively waiting for
  446.                  */
  447.                 if (i < ring) {
  448.                         ring = i;
  449.                 }
  450.         }
  451.  
  452.         /* nothing to wait for ? */
  453.         if (ring == RADEON_NUM_RINGS) {
  454.                 return -ENOENT;
  455.         }
  456.  
  457.         while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
  458.         timeout = GetTimerTicks() - RADEON_FENCE_JIFFIES_TIMEOUT;
  459.                 if (time_after(last_activity, timeout)) {
  460.                         /* the normal case, timeout is somewhere before last_activity */
  461.                         timeout = last_activity - timeout;
  462.                 } else {
  463.                         /* either jiffies wrapped around, or no fence was signaled in the last 500ms
  464.                          * anyway we will just wait for the minimum amount and then check for a lockup
  465.                          */
  466.                         timeout = 1;
  467.                 }
  468.  
  469. //              trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]);
  470.                 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
  471.                         if (target_seq[i]) {
  472.                                 radeon_irq_kms_sw_irq_get(rdev, i);
  473.                         }
  474.                 }
  475.                 if (intr) {
  476.                         r = wait_event_interruptible_timeout(rdev->fence_queue,
  477.                                 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
  478.                                 timeout);
  479.                 } else {
  480.                         r = wait_event_timeout(rdev->fence_queue,
  481.                                 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
  482.                                 timeout);
  483.                 }
  484.                 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
  485.                         if (target_seq[i]) {
  486.                                 radeon_irq_kms_sw_irq_put(rdev, i);
  487.                         }
  488.                 }
  489.                 if (unlikely(r < 0)) {
  490.                         return r;
  491.                 }
  492. //   trace_radeon_fence_wait_end(rdev->ddev, seq);
  493.  
  494.                 if (unlikely(!signaled)) {
  495.                         /* we were interrupted for some reason and fence
  496.                          * isn't signaled yet, resume waiting */
  497.                 if (r) {
  498.                                 continue;
  499.                         }
  500.  
  501.                         mutex_lock(&rdev->ring_lock);
  502.                         for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
  503.                                 if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
  504.                                         tmp = rdev->fence_drv[i].last_activity;
  505.                                 }
  506.                         }
  507.                         /* test if somebody else has already decided that this is a lockup */
  508.                         if (last_activity != tmp) {
  509.                                 last_activity = tmp;
  510.                                 mutex_unlock(&rdev->ring_lock);
  511.                                 continue;
  512.                 }
  513.  
  514.                         if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
  515.                         /* good news we believe it's a lockup */
  516.                                 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n",
  517.                                          target_seq[ring]);
  518.  
  519.                                 /* change last activity so nobody else think there is a lockup */
  520.                                 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
  521.                                         rdev->fence_drv[i].last_activity = GetTimerTicks();
  522.                                 }
  523.  
  524.                                 /* mark the ring as not ready any more */
  525.                                 rdev->ring[ring].ready = false;
  526.                                 mutex_unlock(&rdev->ring_lock);
  527.                                 return -EDEADLK;
  528.                         }
  529.                         mutex_unlock(&rdev->ring_lock);
  530.                         }
  531.         }
  532.     return 0;
  533. }
  534.  
  535. /**
  536.  * radeon_fence_wait_any - wait for a fence to signal on any ring
  537.  *
  538.  * @rdev: radeon device pointer
  539.  * @fences: radeon fence object(s)
  540.  * @intr: use interruptable sleep
  541.  *
  542.  * Wait for any requested fence to signal (all asics).  Fence
  543.  * array is indexed by ring id.  @intr selects whether to use
  544.  * interruptable (true) or non-interruptable (false) sleep when
  545.  * waiting for the fences. Used by the suballocator.
  546.  * Returns 0 if any fence has passed, error for all other cases.
  547.  */
  548. int radeon_fence_wait_any(struct radeon_device *rdev,
  549.                           struct radeon_fence **fences,
  550.                           bool intr)
  551. {
  552.         uint64_t seq[RADEON_NUM_RINGS];
  553.         unsigned i;
  554.         int r;
  555.  
  556.         for (i = 0; i < RADEON_NUM_RINGS; ++i) {
  557.                 seq[i] = 0;
  558.  
  559.                 if (!fences[i]) {
  560.                         continue;
  561.                 }
  562.  
  563.                 if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
  564.                         /* something was allready signaled */
  565.                 return 0;
  566.         }
  567.  
  568.                 seq[i] = fences[i]->seq;
  569.         }
  570.  
  571.         r = radeon_fence_wait_any_seq(rdev, seq, intr);
  572.         if (r) {
  573.                 return r;
  574.         }
  575.         return 0;
  576. }
  577.  
  578. /**
  579.  * radeon_fence_wait_next_locked - wait for the next fence to signal
  580.  *
  581.  * @rdev: radeon device pointer
  582.  * @ring: ring index the fence is associated with
  583.  *
  584.  * Wait for the next fence on the requested ring to signal (all asics).
  585.  * Returns 0 if the next fence has passed, error for all other cases.
  586.  * Caller must hold ring lock.
  587.  */
  588. int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
  589. {
  590.         uint64_t seq;
  591.  
  592.         seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
  593.         if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
  594.                 /* nothing to wait for, last_seq is
  595.                    already the last emited fence */
  596.                 return -ENOENT;
  597.         }
  598.         return radeon_fence_wait_seq(rdev, seq, ring, false, false);
  599. }
  600.  
  601. /**
  602.  * radeon_fence_wait_empty_locked - wait for all fences to signal
  603.  *
  604.  * @rdev: radeon device pointer
  605.  * @ring: ring index the fence is associated with
  606.  *
  607.  * Wait for all fences on the requested ring to signal (all asics).
  608.  * Returns 0 if the fences have passed, error for all other cases.
  609.  * Caller must hold ring lock.
  610.  */
  611. int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
  612. {
  613.         uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
  614.         int r;
  615.  
  616.                 r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
  617.         if (r) {
  618.                 if (r == -EDEADLK) {
  619.                         return -EDEADLK;
  620.         }
  621.                 dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
  622.                         ring, r);
  623.         }
  624.         return 0;
  625. }
  626.  
  627. /**
  628.  * radeon_fence_ref - take a ref on a fence
  629.  *
  630.  * @fence: radeon fence object
  631.  *
  632.  * Take a reference on a fence (all asics).
  633.  * Returns the fence.
  634.  */
  635. struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
  636. {
  637.         kref_get(&fence->kref);
  638.         return fence;
  639. }
  640.  
  641. /**
  642.  * radeon_fence_unref - remove a ref on a fence
  643.  *
  644.  * @fence: radeon fence object
  645.  *
  646.  * Remove a reference on a fence (all asics).
  647.  */
  648. void radeon_fence_unref(struct radeon_fence **fence)
  649. {
  650.     struct radeon_fence *tmp = *fence;
  651.  
  652.     *fence = NULL;
  653.         if (tmp) {
  654.                 kref_put(&tmp->kref, radeon_fence_destroy);
  655.         }
  656. }
  657.  
  658. /**
  659.  * radeon_fence_count_emitted - get the count of emitted fences
  660.  *
  661.  * @rdev: radeon device pointer
  662.  * @ring: ring index the fence is associated with
  663.  *
  664.  * Get the number of fences emitted on the requested ring (all asics).
  665.  * Returns the number of emitted fences on the ring.  Used by the
  666.  * dynpm code to ring track activity.
  667.  */
  668. unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
  669. {
  670.         uint64_t emitted;
  671.  
  672.         /* We are not protected by ring lock when reading the last sequence
  673.          * but it's ok to report slightly wrong fence count here.
  674.          */
  675.         radeon_fence_process(rdev, ring);
  676.         emitted = rdev->fence_drv[ring].sync_seq[ring]
  677.                 - atomic64_read(&rdev->fence_drv[ring].last_seq);
  678.         /* to avoid 32bits warp around */
  679.         if (emitted > 0x10000000) {
  680.                 emitted = 0x10000000;
  681.         }
  682.         return (unsigned)emitted;
  683. }
  684.  
  685. /**
  686.  * radeon_fence_need_sync - do we need a semaphore
  687.  *
  688.  * @fence: radeon fence object
  689.  * @dst_ring: which ring to check against
  690.  *
  691.  * Check if the fence needs to be synced against another ring
  692.  * (all asics).  If so, we need to emit a semaphore.
  693.  * Returns true if we need to sync with another ring, false if
  694.  * not.
  695.  */
  696. bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
  697. {
  698.         struct radeon_fence_driver *fdrv;
  699.  
  700.         if (!fence) {
  701.                 return false;
  702.         }
  703.  
  704.         if (fence->ring == dst_ring) {
  705.                 return false;
  706.         }
  707.  
  708.         /* we are protected by the ring mutex */
  709.         fdrv = &fence->rdev->fence_drv[dst_ring];
  710.         if (fence->seq <= fdrv->sync_seq[fence->ring]) {
  711.                 return false;
  712.         }
  713.  
  714.         return true;
  715. }
  716.  
  717. /**
  718.  * radeon_fence_note_sync - record the sync point
  719.  *
  720.  * @fence: radeon fence object
  721.  * @dst_ring: which ring to check against
  722.  *
  723.  * Note the sequence number at which point the fence will
  724.  * be synced with the requested ring (all asics).
  725.  */
  726. void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
  727. {
  728.         struct radeon_fence_driver *dst, *src;
  729.         unsigned i;
  730.  
  731.         if (!fence) {
  732.                 return;
  733.         }
  734.  
  735.         if (fence->ring == dst_ring) {
  736.                 return;
  737.         }
  738.  
  739.         /* we are protected by the ring mutex */
  740.         src = &fence->rdev->fence_drv[fence->ring];
  741.         dst = &fence->rdev->fence_drv[dst_ring];
  742.         for (i = 0; i < RADEON_NUM_RINGS; ++i) {
  743.                 if (i == dst_ring) {
  744.                         continue;
  745.                 }
  746.                 dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
  747.         }
  748. }
  749.  
  750. /**
  751.  * radeon_fence_driver_start_ring - make the fence driver
  752.  * ready for use on the requested ring.
  753.  *
  754.  * @rdev: radeon device pointer
  755.  * @ring: ring index to start the fence driver on
  756.  *
  757.  * Make the fence driver ready for processing (all asics).
  758.  * Not all asics have all rings, so each asic will only
  759.  * start the fence driver on the rings it has.
  760.  * Returns 0 for success, errors for failure.
  761.  */
  762. int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
  763. {
  764.         uint64_t index;
  765.         int r;
  766.  
  767.         radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
  768.         if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
  769.                 rdev->fence_drv[ring].scratch_reg = 0;
  770.                 index = R600_WB_EVENT_OFFSET + ring * 4;
  771.         } else {
  772.                 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
  773.         if (r) {
  774.                 dev_err(rdev->dev, "fence failed to get scratch register\n");
  775.                 return r;
  776.         }
  777.                 index = RADEON_WB_SCRATCH_OFFSET +
  778.                         rdev->fence_drv[ring].scratch_reg -
  779.                         rdev->scratch.reg_base;
  780.         }
  781.         rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
  782.         rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
  783.         radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
  784.         rdev->fence_drv[ring].initialized = true;
  785.         dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
  786.                  ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
  787.     return 0;
  788. }
  789.  
  790. /**
  791.  * radeon_fence_driver_init_ring - init the fence driver
  792.  * for the requested ring.
  793.  *
  794.  * @rdev: radeon device pointer
  795.  * @ring: ring index to start the fence driver on
  796.  *
  797.  * Init the fence driver for the requested ring (all asics).
  798.  * Helper function for radeon_fence_driver_init().
  799.  */
  800. static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
  801. {
  802.         int i;
  803.  
  804.         rdev->fence_drv[ring].scratch_reg = -1;
  805.         rdev->fence_drv[ring].cpu_addr = NULL;
  806.         rdev->fence_drv[ring].gpu_addr = 0;
  807.         for (i = 0; i < RADEON_NUM_RINGS; ++i)
  808.                 rdev->fence_drv[ring].sync_seq[i] = 0;
  809.         atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
  810.     rdev->fence_drv[ring].last_activity = GetTimerTicks();
  811.         rdev->fence_drv[ring].initialized = false;
  812. }
  813.  
  814. /**
  815.  * radeon_fence_driver_init - init the fence driver
  816.  * for all possible rings.
  817.  *
  818.  * @rdev: radeon device pointer
  819.  *
  820.  * Init the fence driver for all possible rings (all asics).
  821.  * Not all asics have all rings, so each asic will only
  822.  * start the fence driver on the rings it has using
  823.  * radeon_fence_driver_start_ring().
  824.  * Returns 0 for success.
  825.  */
  826. int radeon_fence_driver_init(struct radeon_device *rdev)
  827. {
  828.         int ring;
  829.  
  830.         init_waitqueue_head(&rdev->fence_queue);
  831.         for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
  832.                 radeon_fence_driver_init_ring(rdev, ring);
  833.         }
  834.         if (radeon_debugfs_fence_init(rdev)) {
  835.                 dev_err(rdev->dev, "fence debugfs file creation failed\n");
  836.         }
  837.         return 0;
  838. }
  839.  
  840. /**
  841.  * radeon_fence_driver_fini - tear down the fence driver
  842.  * for all possible rings.
  843.  *
  844.  * @rdev: radeon device pointer
  845.  *
  846.  * Tear down the fence driver for all possible rings (all asics).
  847.  */
  848. void radeon_fence_driver_fini(struct radeon_device *rdev)
  849. {
  850.         int ring, r;
  851.  
  852.         mutex_lock(&rdev->ring_lock);
  853.         for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
  854.                 if (!rdev->fence_drv[ring].initialized)
  855.                         continue;
  856.                 r = radeon_fence_wait_empty_locked(rdev, ring);
  857.                 if (r) {
  858.                         /* no need to trigger GPU reset as we are unloading */
  859.                         radeon_fence_driver_force_completion(rdev);
  860.                 }
  861.                 wake_up_all(&rdev->fence_queue);
  862.                 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
  863.                 rdev->fence_drv[ring].initialized = false;
  864.         }
  865.         mutex_unlock(&rdev->ring_lock);
  866. }
  867.  
  868. /**
  869.  * radeon_fence_driver_force_completion - force all fence waiter to complete
  870.  *
  871.  * @rdev: radeon device pointer
  872.  *
  873.  * In case of GPU reset failure make sure no process keep waiting on fence
  874.  * that will never complete.
  875.  */
  876. void radeon_fence_driver_force_completion(struct radeon_device *rdev)
  877. {
  878.         int ring;
  879.  
  880.         for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
  881.                 if (!rdev->fence_drv[ring].initialized)
  882.                         continue;
  883.                 radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
  884.         }
  885. }
  886.  
  887.  
  888. /*
  889.  * Fence debugfs
  890.  */
  891. #if defined(CONFIG_DEBUG_FS)
  892. static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
  893. {
  894.         struct drm_info_node *node = (struct drm_info_node *)m->private;
  895.         struct drm_device *dev = node->minor->dev;
  896.         struct radeon_device *rdev = dev->dev_private;
  897.         int i, j;
  898.  
  899.         for (i = 0; i < RADEON_NUM_RINGS; ++i) {
  900.                 if (!rdev->fence_drv[i].initialized)
  901.                         continue;
  902.  
  903.                 seq_printf(m, "--- ring %d ---\n", i);
  904.                 seq_printf(m, "Last signaled fence 0x%016llx\n",
  905.                            (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
  906.                 seq_printf(m, "Last emitted        0x%016llx\n",
  907.                            rdev->fence_drv[i].sync_seq[i]);
  908.  
  909.                 for (j = 0; j < RADEON_NUM_RINGS; ++j) {
  910.                         if (i != j && rdev->fence_drv[j].initialized)
  911.                                 seq_printf(m, "Last sync to ring %d 0x%016llx\n",
  912.                                            j, rdev->fence_drv[i].sync_seq[j]);
  913.                 }
  914.         }
  915.         return 0;
  916. }
  917.  
  918. static struct drm_info_list radeon_debugfs_fence_list[] = {
  919.         {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
  920. };
  921. #endif
  922.  
  923. int radeon_debugfs_fence_init(struct radeon_device *rdev)
  924. {
  925. #if defined(CONFIG_DEBUG_FS)
  926.         return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
  927. #else
  928.         return 0;
  929. #endif
  930. }
  931.