Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Fence mechanism for dma-buf and to allow for asynchronous dma access
  3.  *
  4.  * Copyright (C) 2012 Canonical Ltd
  5.  * Copyright (C) 2012 Texas Instruments
  6.  *
  7.  * Authors:
  8.  * Rob Clark <robdclark@gmail.com>
  9.  * Maarten Lankhorst <maarten.lankhorst@canonical.com>
  10.  *
  11.  * This program is free software; you can redistribute it and/or modify it
  12.  * under the terms of the GNU General Public License version 2 as published by
  13.  * the Free Software Foundation.
  14.  *
  15.  * This program is distributed in the hope that it will be useful, but WITHOUT
  16.  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  17.  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  18.  * more details.
  19.  */
  20.  
  21. #include <linux/slab.h>
  22. #include <linux/export.h>
  23. #include <linux/atomic.h>
  24. #include <linux/fence.h>
  25.  
  26. /*
  27.  * fence context counter: each execution context should have its own
  28.  * fence context, this allows checking if fences belong to the same
  29.  * context or not. One device can have multiple separate contexts,
  30.  * and they're used if some engine can run independently of another.
  31.  */
  32. static atomic_t fence_context_counter = ATOMIC_INIT(0);
  33.  
  34. /**
  35.  * fence_context_alloc - allocate an array of fence contexts
  36.  * @num:        [in]    amount of contexts to allocate
  37.  *
  38.  * This function will return the first index of the number of fences allocated.
  39.  * The fence context is used for setting fence->context to a unique number.
  40.  */
  41. unsigned fence_context_alloc(unsigned num)
  42. {
  43.         BUG_ON(!num);
  44.         return atomic_add_return(num, &fence_context_counter) - num;
  45. }
  46. EXPORT_SYMBOL(fence_context_alloc);
  47.  
  48. /**
  49.  * fence_signal_locked - signal completion of a fence
  50.  * @fence: the fence to signal
  51.  *
  52.  * Signal completion for software callbacks on a fence, this will unblock
  53.  * fence_wait() calls and run all the callbacks added with
  54.  * fence_add_callback(). Can be called multiple times, but since a fence
  55.  * can only go from unsignaled to signaled state, it will only be effective
  56.  * the first time.
  57.  *
  58.  * Unlike fence_signal, this function must be called with fence->lock held.
  59.  */
  60. int fence_signal_locked(struct fence *fence)
  61. {
  62.         struct fence_cb *cur, *tmp;
  63.         int ret = 0;
  64.  
  65.         if (WARN_ON(!fence))
  66.                 return -EINVAL;
  67.  
  68.         if (!ktime_to_ns(fence->timestamp)) {
  69.                 fence->timestamp = ktime_get();
  70.                 smp_mb__before_atomic();
  71.         }
  72.  
  73.         if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
  74.                 ret = -EINVAL;
  75.  
  76.                 /*
  77.                  * we might have raced with the unlocked fence_signal,
  78.                  * still run through all callbacks
  79.                  */
  80.     }
  81.  
  82.         list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
  83.                 list_del_init(&cur->node);
  84.                 cur->func(fence, cur);
  85.         }
  86.         return ret;
  87. }
  88. EXPORT_SYMBOL(fence_signal_locked);
  89.  
  90. /**
  91.  * fence_signal - signal completion of a fence
  92.  * @fence: the fence to signal
  93.  *
  94.  * Signal completion for software callbacks on a fence, this will unblock
  95.  * fence_wait() calls and run all the callbacks added with
  96.  * fence_add_callback(). Can be called multiple times, but since a fence
  97.  * can only go from unsignaled to signaled state, it will only be effective
  98.  * the first time.
  99.  */
  100. int fence_signal(struct fence *fence)
  101. {
  102.         unsigned long flags;
  103.  
  104.         if (!fence)
  105.                 return -EINVAL;
  106.  
  107.         if (!ktime_to_ns(fence->timestamp)) {
  108.                 fence->timestamp = ktime_get();
  109.                 smp_mb__before_atomic();
  110.         }
  111.  
  112.         if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
  113.                 return -EINVAL;
  114.  
  115.         if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
  116.                 struct fence_cb *cur, *tmp;
  117.  
  118.                 spin_lock_irqsave(fence->lock, flags);
  119.                 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
  120.                         list_del_init(&cur->node);
  121.                         cur->func(fence, cur);
  122.                 }
  123.                 spin_unlock_irqrestore(fence->lock, flags);
  124.         }
  125.         return 0;
  126. }
  127. EXPORT_SYMBOL(fence_signal);
  128.  
  129. /**
  130.  * fence_wait_timeout - sleep until the fence gets signaled
  131.  * or until timeout elapses
  132.  * @fence:      [in]    the fence to wait on
  133.  * @intr:       [in]    if true, do an interruptible wait
  134.  * @timeout:    [in]    timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
  135.  *
  136.  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
  137.  * remaining timeout in jiffies on success. Other error values may be
  138.  * returned on custom implementations.
  139.  *
  140.  * Performs a synchronous wait on this fence. It is assumed the caller
  141.  * directly or indirectly (buf-mgr between reservation and committing)
  142.  * holds a reference to the fence, otherwise the fence might be
  143.  * freed before return, resulting in undefined behavior.
  144.  */
  145. signed long
  146. fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
  147. {
  148.         signed long ret;
  149.  
  150.         if (WARN_ON(timeout < 0))
  151.                 return -EINVAL;
  152.  
  153.         if (timeout == 0)
  154.                 return fence_is_signaled(fence);
  155.  
  156.     ret = fence->ops->wait(fence, intr, timeout);
  157.     return ret;
  158. }
  159. EXPORT_SYMBOL(fence_wait_timeout);
  160.  
  161. void fence_release(struct kref *kref)
  162. {
  163.         struct fence *fence =
  164.                         container_of(kref, struct fence, refcount);
  165.  
  166.         BUG_ON(!list_empty(&fence->cb_list));
  167.  
  168.         if (fence->ops->release)
  169.                 fence->ops->release(fence);
  170.         else
  171.                 fence_free(fence);
  172. }
  173. EXPORT_SYMBOL(fence_release);
  174.  
  175. void fence_free(struct fence *fence)
  176. {
  177.         kfree_rcu(fence, rcu);
  178. }
  179. EXPORT_SYMBOL(fence_free);
  180.  
  181. /**
  182.  * fence_enable_sw_signaling - enable signaling on fence
  183.  * @fence:      [in]    the fence to enable
  184.  *
  185.  * this will request for sw signaling to be enabled, to make the fence
  186.  * complete as soon as possible
  187.  */
  188. void fence_enable_sw_signaling(struct fence *fence)
  189. {
  190.         unsigned long flags;
  191.  
  192.         if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
  193.             !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
  194.  
  195.                 spin_lock_irqsave(fence->lock, flags);
  196.  
  197.                 if (!fence->ops->enable_signaling(fence))
  198.                         fence_signal_locked(fence);
  199.  
  200.                 spin_unlock_irqrestore(fence->lock, flags);
  201.         }
  202. }
  203. EXPORT_SYMBOL(fence_enable_sw_signaling);
  204.  
  205. /**
  206.  * fence_add_callback - add a callback to be called when the fence
  207.  * is signaled
  208.  * @fence:      [in]    the fence to wait on
  209.  * @cb:         [in]    the callback to register
  210.  * @func:       [in]    the function to call
  211.  *
  212.  * cb will be initialized by fence_add_callback, no initialization
  213.  * by the caller is required. Any number of callbacks can be registered
  214.  * to a fence, but a callback can only be registered to one fence at a time.
  215.  *
  216.  * Note that the callback can be called from an atomic context.  If
  217.  * fence is already signaled, this function will return -ENOENT (and
  218.  * *not* call the callback)
  219.  *
  220.  * Add a software callback to the fence. Same restrictions apply to
  221.  * refcount as it does to fence_wait, however the caller doesn't need to
  222.  * keep a refcount to fence afterwards: when software access is enabled,
  223.  * the creator of the fence is required to keep the fence alive until
  224.  * after it signals with fence_signal. The callback itself can be called
  225.  * from irq context.
  226.  *
  227.  */
  228. int fence_add_callback(struct fence *fence, struct fence_cb *cb,
  229.                        fence_func_t func)
  230. {
  231.         unsigned long flags;
  232.         int ret = 0;
  233.         bool was_set;
  234.  
  235.         if (WARN_ON(!fence || !func))
  236.                 return -EINVAL;
  237.  
  238.         if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
  239.                 INIT_LIST_HEAD(&cb->node);
  240.                 return -ENOENT;
  241.         }
  242.  
  243.         spin_lock_irqsave(fence->lock, flags);
  244.  
  245.         was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
  246.  
  247.         if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
  248.                 ret = -ENOENT;
  249.         else if (!was_set) {
  250.  
  251.                 if (!fence->ops->enable_signaling(fence)) {
  252.                         fence_signal_locked(fence);
  253.                         ret = -ENOENT;
  254.                 }
  255.         }
  256.  
  257.         if (!ret) {
  258.                 cb->func = func;
  259.                 list_add_tail(&cb->node, &fence->cb_list);
  260.         } else
  261.                 INIT_LIST_HEAD(&cb->node);
  262.         spin_unlock_irqrestore(fence->lock, flags);
  263.  
  264.         return ret;
  265. }
  266. EXPORT_SYMBOL(fence_add_callback);
  267.  
  268. /**
  269.  * fence_remove_callback - remove a callback from the signaling list
  270.  * @fence:      [in]    the fence to wait on
  271.  * @cb:         [in]    the callback to remove
  272.  *
  273.  * Remove a previously queued callback from the fence. This function returns
  274.  * true if the callback is successfully removed, or false if the fence has
  275.  * already been signaled.
  276.  *
  277.  * *WARNING*:
  278.  * Cancelling a callback should only be done if you really know what you're
  279.  * doing, since deadlocks and race conditions could occur all too easily. For
  280.  * this reason, it should only ever be done on hardware lockup recovery,
  281.  * with a reference held to the fence.
  282.  */
  283. bool
  284. fence_remove_callback(struct fence *fence, struct fence_cb *cb)
  285. {
  286.         unsigned long flags;
  287.         bool ret;
  288.  
  289.         spin_lock_irqsave(fence->lock, flags);
  290.  
  291.         ret = !list_empty(&cb->node);
  292.         if (ret)
  293.                 list_del_init(&cb->node);
  294.  
  295.         spin_unlock_irqrestore(fence->lock, flags);
  296.  
  297.         return ret;
  298. }
  299. EXPORT_SYMBOL(fence_remove_callback);
  300.  
  301. struct default_wait_cb {
  302.         struct fence_cb base;
  303.         struct task_struct *task;
  304. };
  305.  
  306.  
  307. static bool
  308. fence_test_signaled_any(struct fence **fences, uint32_t count)
  309. {
  310.         int i;
  311.  
  312.         for (i = 0; i < count; ++i) {
  313.                 struct fence *fence = fences[i];
  314.                 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
  315.                         return true;
  316.         }
  317.         return false;
  318. }
  319.  
  320. /**
  321.  * fence_wait_any_timeout - sleep until any fence gets signaled
  322.  * or until timeout elapses
  323.  * @fences:     [in]    array of fences to wait on
  324.  * @count:      [in]    number of fences to wait on
  325.  * @intr:       [in]    if true, do an interruptible wait
  326.  * @timeout:    [in]    timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
  327.  *
  328.  * Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if
  329.  * interrupted, 0 if the wait timed out, or the remaining timeout in jiffies
  330.  * on success.
  331.  *
  332.  * Synchronous waits for the first fence in the array to be signaled. The
  333.  * caller needs to hold a reference to all fences in the array, otherwise a
  334.  * fence might be freed before return, resulting in undefined behavior.
  335.  */
  336.  
  337. /**
  338.  * fence_init - Initialize a custom fence.
  339.  * @fence:      [in]    the fence to initialize
  340.  * @ops:        [in]    the fence_ops for operations on this fence
  341.  * @lock:       [in]    the irqsafe spinlock to use for locking this fence
  342.  * @context:    [in]    the execution context this fence is run on
  343.  * @seqno:      [in]    a linear increasing sequence number for this context
  344.  *
  345.  * Initializes an allocated fence, the caller doesn't have to keep its
  346.  * refcount after committing with this fence, but it will need to hold a
  347.  * refcount again if fence_ops.enable_signaling gets called. This can
  348.  * be used for other implementing other types of fence.
  349.  *
  350.  * context and seqno are used for easy comparison between fences, allowing
  351.  * to check which fence is later by simply using fence_later.
  352.  */
  353. void
  354. fence_init(struct fence *fence, const struct fence_ops *ops,
  355.              spinlock_t *lock, unsigned context, unsigned seqno)
  356. {
  357.         BUG_ON(!lock);
  358.         BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
  359.                !ops->get_driver_name || !ops->get_timeline_name);
  360.  
  361.         kref_init(&fence->refcount);
  362.         fence->ops = ops;
  363.         INIT_LIST_HEAD(&fence->cb_list);
  364.         fence->lock = lock;
  365.         fence->context = context;
  366.         fence->seqno = seqno;
  367.         fence->flags = 0UL;
  368.  
  369. }
  370. EXPORT_SYMBOL(fence_init);
  371.