Subversion Repositories Kolibri OS

Rev

Rev 5078 | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /**************************************************************************
  2.  *
  3.  * Copyright © 2011-2014 VMware, Inc., Palo Alto, CA., USA
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice (including the
  15.  * next paragraph) shall be included in all copies or substantial portions
  16.  * of the Software.
  17.  *
  18.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21.  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22.  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23.  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24.  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25.  *
  26.  **************************************************************************/
  27.  
  28. #include <drm/drmP.h>
  29. #include "vmwgfx_drv.h"
  30.  
  31. #define VMW_FENCE_WRAP (1 << 31)
  32.  
  33. struct vmw_fence_manager {
  34.         int num_fence_objects;
  35.         struct vmw_private *dev_priv;
  36.         spinlock_t lock;
  37.         struct list_head fence_list;
  38.         struct work_struct work;
  39.         u32 user_fence_size;
  40.         u32 fence_size;
  41.         u32 event_fence_action_size;
  42.         bool fifo_down;
  43.         struct list_head cleanup_list;
  44.         uint32_t pending_actions[VMW_ACTION_MAX];
  45.         struct mutex goal_irq_mutex;
  46.         bool goal_irq_on; /* Protected by @goal_irq_mutex */
  47.         bool seqno_valid; /* Protected by @lock, and may not be set to true
  48.                              without the @goal_irq_mutex held. */
  49.         unsigned ctx;
  50. };
  51.  
  52. struct vmw_user_fence {
  53.         struct ttm_base_object base;
  54.         struct vmw_fence_obj fence;
  55. };
  56.  
  57. /**
  58.  * struct vmw_event_fence_action - fence action that delivers a drm event.
  59.  *
  60.  * @e: A struct drm_pending_event that controls the event delivery.
  61.  * @action: A struct vmw_fence_action to hook up to a fence.
  62.  * @fence: A referenced pointer to the fence to keep it alive while @action
  63.  * hangs on it.
  64.  * @dev: Pointer to a struct drm_device so we can access the event stuff.
  65.  * @kref: Both @e and @action has destructors, so we need to refcount.
  66.  * @size: Size accounted for this object.
  67.  * @tv_sec: If non-null, the variable pointed to will be assigned
  68.  * current time tv_sec val when the fence signals.
  69.  * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
  70.  * be assigned the current time tv_usec val when the fence signals.
  71.  */
  72. struct vmw_event_fence_action {
  73.         struct vmw_fence_action action;
  74.         struct list_head fpriv_head;
  75.  
  76.         struct drm_pending_event *event;
  77.         struct vmw_fence_obj *fence;
  78.         struct drm_device *dev;
  79.  
  80.         uint32_t *tv_sec;
  81.         uint32_t *tv_usec;
  82. };
  83.  
  84. static struct vmw_fence_manager *
  85. fman_from_fence(struct vmw_fence_obj *fence)
  86. {
  87.         return container_of(fence->base.lock, struct vmw_fence_manager, lock);
  88. }
  89.  
  90. /**
  91.  * Note on fencing subsystem usage of irqs:
  92.  * Typically the vmw_fences_update function is called
  93.  *
  94.  * a) When a new fence seqno has been submitted by the fifo code.
  95.  * b) On-demand when we have waiters. Sleeping waiters will switch on the
  96.  * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
  97.  * irq is received. When the last fence waiter is gone, that IRQ is masked
  98.  * away.
  99.  *
  100.  * In situations where there are no waiters and we don't submit any new fences,
  101.  * fence objects may not be signaled. This is perfectly OK, since there are
  102.  * no consumers of the signaled data, but that is NOT ok when there are fence
  103.  * actions attached to a fence. The fencing subsystem then makes use of the
  104.  * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
  105.  * which has an action attached, and each time vmw_fences_update is called,
  106.  * the subsystem makes sure the fence goal seqno is updated.
  107.  *
  108.  * The fence goal seqno irq is on as long as there are unsignaled fence
  109.  * objects with actions attached to them.
  110.  */
  111.  
  112. static void vmw_fence_obj_destroy(struct fence *f)
  113. {
  114.         struct vmw_fence_obj *fence =
  115.                 container_of(f, struct vmw_fence_obj, base);
  116.  
  117.         struct vmw_fence_manager *fman = fman_from_fence(fence);
  118.         unsigned long irq_flags;
  119.  
  120.         spin_lock_irqsave(&fman->lock, irq_flags);
  121.         list_del_init(&fence->head);
  122.         --fman->num_fence_objects;
  123.         spin_unlock_irqrestore(&fman->lock, irq_flags);
  124.         fence->destroy(fence);
  125. }
  126.  
  127. static const char *vmw_fence_get_driver_name(struct fence *f)
  128. {
  129.         return "vmwgfx";
  130. }
  131.  
  132. static const char *vmw_fence_get_timeline_name(struct fence *f)
  133. {
  134.         return "svga";
  135. }
  136.  
  137. static bool vmw_fence_enable_signaling(struct fence *f)
  138. {
  139.         struct vmw_fence_obj *fence =
  140.                 container_of(f, struct vmw_fence_obj, base);
  141.  
  142.         struct vmw_fence_manager *fman = fman_from_fence(fence);
  143.         struct vmw_private *dev_priv = fman->dev_priv;
  144.  
  145.         u32 *fifo_mem = dev_priv->mmio_virt;
  146.         u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
  147.         if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
  148.                 return false;
  149.  
  150.         vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
  151.  
  152.         return true;
  153. }
  154.  
  155. struct vmwgfx_wait_cb {
  156.         struct fence_cb base;
  157.         struct task_struct *task;
  158. };
  159.  
  160. static void
  161. vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb)
  162. {
  163.         struct vmwgfx_wait_cb *wait =
  164.                 container_of(cb, struct vmwgfx_wait_cb, base);
  165.  
  166. //   wake_up_process(wait->task);
  167. }
  168.  
  169. static void __vmw_fences_update(struct vmw_fence_manager *fman);
  170.  
  171. static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout)
  172. {
  173.         struct vmw_fence_obj *fence =
  174.                 container_of(f, struct vmw_fence_obj, base);
  175.  
  176.         struct vmw_fence_manager *fman = fman_from_fence(fence);
  177.         struct vmw_private *dev_priv = fman->dev_priv;
  178.         struct vmwgfx_wait_cb cb;
  179.         long ret = timeout;
  180.         unsigned long irq_flags;
  181.  
  182.         if (likely(vmw_fence_obj_signaled(fence)))
  183.                 return timeout;
  184.  
  185.         vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
  186.         vmw_seqno_waiter_add(dev_priv);
  187.  
  188.         spin_lock_irqsave(f->lock, irq_flags);
  189.  
  190. //   if (intr && signal_pending(current)) {
  191. //       ret = -ERESTARTSYS;
  192. //       goto out;
  193. //   }
  194.  
  195.         cb.base.func = vmwgfx_wait_cb;
  196.         cb.task = current;
  197.         list_add(&cb.base.node, &f->cb_list);
  198.  
  199.         while (ret > 0) {
  200.                 __vmw_fences_update(fman);
  201.                 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &f->flags))
  202.                         break;
  203.  
  204.                 spin_unlock_irqrestore(f->lock, irq_flags);
  205.  
  206. //              ret = schedule_timeout(ret);
  207.                 delay(1);
  208.                 ret = 0;
  209.                 spin_lock_irqsave(f->lock, irq_flags);
  210. //       if (ret > 0 && intr && signal_pending(current))
  211. //           ret = -ERESTARTSYS;
  212.         }
  213.  
  214.         if (!list_empty(&cb.base.node))
  215.                 list_del(&cb.base.node);
  216.  
  217. out:
  218.         spin_unlock_irqrestore(f->lock, irq_flags);
  219.  
  220.         vmw_seqno_waiter_remove(dev_priv);
  221.  
  222.         return ret;
  223. }
  224.  
  225. static struct fence_ops vmw_fence_ops = {
  226.         .get_driver_name = vmw_fence_get_driver_name,
  227.         .get_timeline_name = vmw_fence_get_timeline_name,
  228.         .enable_signaling = vmw_fence_enable_signaling,
  229.         .wait = vmw_fence_wait,
  230.         .release = vmw_fence_obj_destroy,
  231. };
  232.  
  233.  
  234. /**
  235.  * Execute signal actions on fences recently signaled.
  236.  * This is done from a workqueue so we don't have to execute
  237.  * signal actions from atomic context.
  238.  */
  239.  
  240. static void vmw_fence_work_func(struct work_struct *work)
  241. {
  242.         struct vmw_fence_manager *fman =
  243.                 container_of(work, struct vmw_fence_manager, work);
  244.         struct list_head list;
  245.         struct vmw_fence_action *action, *next_action;
  246.         bool seqno_valid;
  247.  
  248.         do {
  249.                 INIT_LIST_HEAD(&list);
  250.                 mutex_lock(&fman->goal_irq_mutex);
  251.  
  252.                 spin_lock_irq(&fman->lock);
  253.                 list_splice_init(&fman->cleanup_list, &list);
  254.                 seqno_valid = fman->seqno_valid;
  255.                 spin_unlock_irq(&fman->lock);
  256.  
  257.                 if (!seqno_valid && fman->goal_irq_on) {
  258.                         fman->goal_irq_on = false;
  259.                         vmw_goal_waiter_remove(fman->dev_priv);
  260.                 }
  261.                 mutex_unlock(&fman->goal_irq_mutex);
  262.  
  263.                 if (list_empty(&list))
  264.                         return;
  265.  
  266.                 /*
  267.                  * At this point, only we should be able to manipulate the
  268.                  * list heads of the actions we have on the private list.
  269.                  * hence fman::lock not held.
  270.                  */
  271.  
  272.                 list_for_each_entry_safe(action, next_action, &list, head) {
  273.                         list_del_init(&action->head);
  274.                         if (action->cleanup)
  275.                                 action->cleanup(action);
  276.                 }
  277.         } while (1);
  278. }
  279.  
  280. struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
  281. {
  282.         struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
  283.  
  284.         if (unlikely(fman == NULL))
  285.                 return NULL;
  286.  
  287.         fman->dev_priv = dev_priv;
  288.         spin_lock_init(&fman->lock);
  289.         INIT_LIST_HEAD(&fman->fence_list);
  290.         INIT_LIST_HEAD(&fman->cleanup_list);
  291.         INIT_WORK(&fman->work, &vmw_fence_work_func);
  292.         fman->fifo_down = true;
  293.         fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
  294.         fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
  295.         fman->event_fence_action_size =
  296.                 ttm_round_pot(sizeof(struct vmw_event_fence_action));
  297.         mutex_init(&fman->goal_irq_mutex);
  298.         fman->ctx = fence_context_alloc(1);
  299.  
  300.         return fman;
  301. }
  302.  
  303. void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
  304. {
  305.         unsigned long irq_flags;
  306.         bool lists_empty;
  307.  
  308.  //  (void) cancel_work_sync(&fman->work);
  309.  
  310.         spin_lock_irqsave(&fman->lock, irq_flags);
  311.         lists_empty = list_empty(&fman->fence_list) &&
  312.                 list_empty(&fman->cleanup_list);
  313.         spin_unlock_irqrestore(&fman->lock, irq_flags);
  314.  
  315.         BUG_ON(!lists_empty);
  316.         kfree(fman);
  317. }
  318.  
  319. static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
  320.                               struct vmw_fence_obj *fence, u32 seqno,
  321.                               void (*destroy) (struct vmw_fence_obj *fence))
  322. {
  323.         unsigned long irq_flags;
  324.         int ret = 0;
  325.  
  326.         fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
  327.                    fman->ctx, seqno);
  328.         INIT_LIST_HEAD(&fence->seq_passed_actions);
  329.         fence->destroy = destroy;
  330.  
  331.         spin_lock_irqsave(&fman->lock, irq_flags);
  332.         if (unlikely(fman->fifo_down)) {
  333.                 ret = -EBUSY;
  334.                 goto out_unlock;
  335.         }
  336.         list_add_tail(&fence->head, &fman->fence_list);
  337.         ++fman->num_fence_objects;
  338.  
  339. out_unlock:
  340.         spin_unlock_irqrestore(&fman->lock, irq_flags);
  341.         return ret;
  342.  
  343. }
  344.  
  345. static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
  346.                                 struct list_head *list)
  347. {
  348.         struct vmw_fence_action *action, *next_action;
  349.  
  350.         list_for_each_entry_safe(action, next_action, list, head) {
  351.                 list_del_init(&action->head);
  352.                 fman->pending_actions[action->type]--;
  353.                 if (action->seq_passed != NULL)
  354.                         action->seq_passed(action);
  355.  
  356.                 /*
  357.                  * Add the cleanup action to the cleanup list so that
  358.                  * it will be performed by a worker task.
  359.                  */
  360.  
  361.                 list_add_tail(&action->head, &fman->cleanup_list);
  362.         }
  363. }
  364.  
  365. /**
  366.  * vmw_fence_goal_new_locked - Figure out a new device fence goal
  367.  * seqno if needed.
  368.  *
  369.  * @fman: Pointer to a fence manager.
  370.  * @passed_seqno: The seqno the device currently signals as passed.
  371.  *
  372.  * This function should be called with the fence manager lock held.
  373.  * It is typically called when we have a new passed_seqno, and
  374.  * we might need to update the fence goal. It checks to see whether
  375.  * the current fence goal has already passed, and, in that case,
  376.  * scans through all unsignaled fences to get the next fence object with an
  377.  * action attached, and sets the seqno of that fence as a new fence goal.
  378.  *
  379.  * returns true if the device goal seqno was updated. False otherwise.
  380.  */
  381. static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
  382.                                       u32 passed_seqno)
  383. {
  384.         u32 goal_seqno;
  385.         u32 *fifo_mem;
  386.         struct vmw_fence_obj *fence;
  387.  
  388.         if (likely(!fman->seqno_valid))
  389.                 return false;
  390.  
  391.         fifo_mem = fman->dev_priv->mmio_virt;
  392.         goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
  393.         if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
  394.                 return false;
  395.  
  396.         fman->seqno_valid = false;
  397.         list_for_each_entry(fence, &fman->fence_list, head) {
  398.                 if (!list_empty(&fence->seq_passed_actions)) {
  399.                         fman->seqno_valid = true;
  400.                         vmw_mmio_write(fence->base.seqno,
  401.                                        fifo_mem + SVGA_FIFO_FENCE_GOAL);
  402.                         break;
  403.                 }
  404.         }
  405.  
  406.         return true;
  407. }
  408.  
  409.  
  410. /**
  411.  * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
  412.  * needed.
  413.  *
  414.  * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
  415.  * considered as a device fence goal.
  416.  *
  417.  * This function should be called with the fence manager lock held.
  418.  * It is typically called when an action has been attached to a fence to
  419.  * check whether the seqno of that fence should be used for a fence
  420.  * goal interrupt. This is typically needed if the current fence goal is
  421.  * invalid, or has a higher seqno than that of the current fence object.
  422.  *
  423.  * returns true if the device goal seqno was updated. False otherwise.
  424.  */
  425. static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
  426. {
  427.         struct vmw_fence_manager *fman = fman_from_fence(fence);
  428.         u32 goal_seqno;
  429.         u32 *fifo_mem;
  430.  
  431.         if (fence_is_signaled_locked(&fence->base))
  432.                 return false;
  433.  
  434.         fifo_mem = fman->dev_priv->mmio_virt;
  435.         goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
  436.         if (likely(fman->seqno_valid &&
  437.                    goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
  438.                 return false;
  439.  
  440.         vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
  441.         fman->seqno_valid = true;
  442.  
  443.         return true;
  444. }
  445.  
  446. static void __vmw_fences_update(struct vmw_fence_manager *fman)
  447. {
  448.         struct vmw_fence_obj *fence, *next_fence;
  449.         struct list_head action_list;
  450.         bool needs_rerun;
  451.         uint32_t seqno, new_seqno;
  452.         u32 *fifo_mem = fman->dev_priv->mmio_virt;
  453.  
  454.         seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
  455. rerun:
  456.         list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
  457.                 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
  458.                         list_del_init(&fence->head);
  459.                         fence_signal_locked(&fence->base);
  460.                         INIT_LIST_HEAD(&action_list);
  461.                         list_splice_init(&fence->seq_passed_actions,
  462.                                          &action_list);
  463.                         vmw_fences_perform_actions(fman, &action_list);
  464.                 } else
  465.                         break;
  466.         }
  467.  
  468.         /*
  469.          * Rerun if the fence goal seqno was updated, and the
  470.          * hardware might have raced with that update, so that
  471.          * we missed a fence_goal irq.
  472.          */
  473.  
  474.         needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
  475.         if (unlikely(needs_rerun)) {
  476.                 new_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
  477.                 if (new_seqno != seqno) {
  478.                         seqno = new_seqno;
  479.                         goto rerun;
  480.                 }
  481.         }
  482.  
  483. }
  484.  
  485. void vmw_fences_update(struct vmw_fence_manager *fman)
  486. {
  487.         unsigned long irq_flags;
  488.  
  489.         spin_lock_irqsave(&fman->lock, irq_flags);
  490.         __vmw_fences_update(fman);
  491.         spin_unlock_irqrestore(&fman->lock, irq_flags);
  492. }
  493.  
  494. bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
  495. {
  496.         struct vmw_fence_manager *fman = fman_from_fence(fence);
  497.  
  498.         if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
  499.                 return 1;
  500.  
  501.         vmw_fences_update(fman);
  502.  
  503.         return fence_is_signaled(&fence->base);
  504. }
  505.  
  506. int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
  507.                        bool interruptible, unsigned long timeout)
  508. {
  509.         long ret = fence_wait_timeout(&fence->base, interruptible, timeout);
  510.  
  511.         if (likely(ret > 0))
  512.                 return 0;
  513.         else if (ret == 0)
  514.                 return -EBUSY;
  515.         else
  516.                 return ret;
  517. }
  518.  
  519. void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
  520. {
  521.         struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv;
  522.  
  523.         vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
  524. }
  525.  
  526. static void vmw_fence_destroy(struct vmw_fence_obj *fence)
  527. {
  528.         fence_free(&fence->base);
  529. }
  530.  
  531. int vmw_fence_create(struct vmw_fence_manager *fman,
  532.                      uint32_t seqno,
  533.                      struct vmw_fence_obj **p_fence)
  534. {
  535.         struct vmw_fence_obj *fence;
  536.         int ret;
  537.  
  538.         fence = kzalloc(sizeof(*fence), GFP_KERNEL);
  539.         if (unlikely(fence == NULL))
  540.                 return -ENOMEM;
  541.  
  542.         ret = vmw_fence_obj_init(fman, fence, seqno,
  543.                                  vmw_fence_destroy);
  544.         if (unlikely(ret != 0))
  545.                 goto out_err_init;
  546.  
  547.         *p_fence = fence;
  548.         return 0;
  549.  
  550. out_err_init:
  551.         kfree(fence);
  552.         return ret;
  553. }
  554.  
  555.  
  556. static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
  557. {
  558.         struct vmw_user_fence *ufence =
  559.                 container_of(fence, struct vmw_user_fence, fence);
  560.         struct vmw_fence_manager *fman = fman_from_fence(fence);
  561.  
  562.         ttm_base_object_kfree(ufence, base);
  563.         /*
  564.          * Free kernel space accounting.
  565.          */
  566.         ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
  567.                             fman->user_fence_size);
  568. }
  569.  
  570. static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
  571. {
  572.         struct ttm_base_object *base = *p_base;
  573.         struct vmw_user_fence *ufence =
  574.                 container_of(base, struct vmw_user_fence, base);
  575.         struct vmw_fence_obj *fence = &ufence->fence;
  576.  
  577.         *p_base = NULL;
  578.         vmw_fence_obj_unreference(&fence);
  579. }
  580.  
  581. int vmw_user_fence_create(struct drm_file *file_priv,
  582.                           struct vmw_fence_manager *fman,
  583.                           uint32_t seqno,
  584.                           struct vmw_fence_obj **p_fence,
  585.                           uint32_t *p_handle)
  586. {
  587.         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  588.         struct vmw_user_fence *ufence;
  589.         struct vmw_fence_obj *tmp;
  590.         struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
  591.         int ret;
  592.  
  593.         /*
  594.          * Kernel memory space accounting, since this object may
  595.          * be created by a user-space request.
  596.          */
  597.  
  598.         ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
  599.                                    false, false);
  600.         if (unlikely(ret != 0))
  601.                 return ret;
  602.  
  603.         ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
  604.         if (unlikely(ufence == NULL)) {
  605.                 ret = -ENOMEM;
  606.                 goto out_no_object;
  607.         }
  608.  
  609.         ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
  610.                                  vmw_user_fence_destroy);
  611.         if (unlikely(ret != 0)) {
  612.                 kfree(ufence);
  613.                 goto out_no_object;
  614.         }
  615.  
  616.         /*
  617.          * The base object holds a reference which is freed in
  618.          * vmw_user_fence_base_release.
  619.          */
  620.         tmp = vmw_fence_obj_reference(&ufence->fence);
  621.         ret = ttm_base_object_init(tfile, &ufence->base, false,
  622.                                    VMW_RES_FENCE,
  623.                                    &vmw_user_fence_base_release, NULL);
  624.  
  625.  
  626.         if (unlikely(ret != 0)) {
  627.                 /*
  628.                  * Free the base object's reference
  629.                  */
  630.                 vmw_fence_obj_unreference(&tmp);
  631.                 goto out_err;
  632.         }
  633.  
  634.         *p_fence = &ufence->fence;
  635.         *p_handle = ufence->base.hash.key;
  636.  
  637.         return 0;
  638. out_err:
  639.         tmp = &ufence->fence;
  640.         vmw_fence_obj_unreference(&tmp);
  641. out_no_object:
  642.         ttm_mem_global_free(mem_glob, fman->user_fence_size);
  643.         return ret;
  644. }
  645.  
  646.  
  647. /**
  648.  * vmw_fence_fifo_down - signal all unsignaled fence objects.
  649.  */
  650.  
  651. void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
  652. {
  653.         struct list_head action_list;
  654.         int ret;
  655.  
  656.         /*
  657.          * The list may be altered while we traverse it, so always
  658.          * restart when we've released the fman->lock.
  659.          */
  660.  
  661.         spin_lock_irq(&fman->lock);
  662.         fman->fifo_down = true;
  663.         while (!list_empty(&fman->fence_list)) {
  664.                 struct vmw_fence_obj *fence =
  665.                         list_entry(fman->fence_list.prev, struct vmw_fence_obj,
  666.                                    head);
  667.                 fence_get(&fence->base);
  668.                 spin_unlock_irq(&fman->lock);
  669.  
  670.                 ret = vmw_fence_obj_wait(fence, false, false,
  671.                                          VMW_FENCE_WAIT_TIMEOUT);
  672.  
  673.                 if (unlikely(ret != 0)) {
  674.                         list_del_init(&fence->head);
  675.                         fence_signal(&fence->base);
  676.                         INIT_LIST_HEAD(&action_list);
  677.                         list_splice_init(&fence->seq_passed_actions,
  678.                                          &action_list);
  679.                         vmw_fences_perform_actions(fman, &action_list);
  680.                 }
  681.  
  682.                 BUG_ON(!list_empty(&fence->head));
  683.                 fence_put(&fence->base);
  684.                 spin_lock_irq(&fman->lock);
  685.         }
  686.         spin_unlock_irq(&fman->lock);
  687. }
  688.  
  689. void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
  690. {
  691.         unsigned long irq_flags;
  692.  
  693.         spin_lock_irqsave(&fman->lock, irq_flags);
  694.         fman->fifo_down = false;
  695.         spin_unlock_irqrestore(&fman->lock, irq_flags);
  696. }
  697.  
  698.  
  699. int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
  700.                              struct drm_file *file_priv)
  701. {
  702.         struct drm_vmw_fence_wait_arg *arg =
  703.             (struct drm_vmw_fence_wait_arg *)data;
  704.         unsigned long timeout;
  705.         struct ttm_base_object *base;
  706.         struct vmw_fence_obj *fence;
  707.         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  708.         int ret;
  709.         uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
  710.  
  711.         /*
  712.          * 64-bit division not present on 32-bit systems, so do an
  713.          * approximation. (Divide by 1000000).
  714.          */
  715.  
  716.         wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
  717.           (wait_timeout >> 26);
  718.  
  719.         if (!arg->cookie_valid) {
  720.                 arg->cookie_valid = 1;
  721.                 arg->kernel_cookie = jiffies + wait_timeout;
  722.         }
  723.  
  724.         base = ttm_base_object_lookup(tfile, arg->handle);
  725.         if (unlikely(base == NULL)) {
  726.                 printk(KERN_ERR "Wait invalid fence object handle "
  727.                        "0x%08lx.\n",
  728.                        (unsigned long)arg->handle);
  729.                 return -EINVAL;
  730.         }
  731.  
  732.         fence = &(container_of(base, struct vmw_user_fence, base)->fence);
  733.  
  734.         timeout = jiffies;
  735.         if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
  736.                 ret = ((vmw_fence_obj_signaled(fence)) ?
  737.                        0 : -EBUSY);
  738.                 goto out;
  739.         }
  740.  
  741.         timeout = (unsigned long)arg->kernel_cookie - timeout;
  742.  
  743.         ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
  744.  
  745. out:
  746.         ttm_base_object_unref(&base);
  747.  
  748.         /*
  749.          * Optionally unref the fence object.
  750.          */
  751.  
  752.         if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
  753.                 return ttm_ref_object_base_unref(tfile, arg->handle,
  754.                                                  TTM_REF_USAGE);
  755.         return ret;
  756. }
  757.  
  758. int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
  759.                                  struct drm_file *file_priv)
  760. {
  761.         struct drm_vmw_fence_signaled_arg *arg =
  762.                 (struct drm_vmw_fence_signaled_arg *) data;
  763.         struct ttm_base_object *base;
  764.         struct vmw_fence_obj *fence;
  765.         struct vmw_fence_manager *fman;
  766.         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  767.         struct vmw_private *dev_priv = vmw_priv(dev);
  768.  
  769.         base = ttm_base_object_lookup(tfile, arg->handle);
  770.         if (unlikely(base == NULL)) {
  771.                 printk(KERN_ERR "Fence signaled invalid fence object handle "
  772.                        "0x%08lx.\n",
  773.                        (unsigned long)arg->handle);
  774.                 return -EINVAL;
  775.         }
  776.  
  777.         fence = &(container_of(base, struct vmw_user_fence, base)->fence);
  778.         fman = fman_from_fence(fence);
  779.  
  780.         arg->signaled = vmw_fence_obj_signaled(fence);
  781.  
  782.         arg->signaled_flags = arg->flags;
  783.         spin_lock_irq(&fman->lock);
  784.         arg->passed_seqno = dev_priv->last_read_seqno;
  785.         spin_unlock_irq(&fman->lock);
  786.  
  787.         ttm_base_object_unref(&base);
  788.  
  789.         return 0;
  790. }
  791.  
  792.  
  793. int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
  794.                               struct drm_file *file_priv)
  795. {
  796.         struct drm_vmw_fence_arg *arg =
  797.                 (struct drm_vmw_fence_arg *) data;
  798.  
  799.         return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
  800.                                          arg->handle,
  801.                                          TTM_REF_USAGE);
  802. }
  803.  
  804. /**
  805.  * vmw_event_fence_fpriv_gone - Remove references to struct drm_file objects
  806.  *
  807.  * @fman: Pointer to a struct vmw_fence_manager
  808.  * @event_list: Pointer to linked list of struct vmw_event_fence_action objects
  809.  * with pointers to a struct drm_file object about to be closed.
  810.  *
  811.  * This function removes all pending fence events with references to a
  812.  * specific struct drm_file object about to be closed. The caller is required
  813.  * to pass a list of all struct vmw_event_fence_action objects with such
  814.  * events attached. This function is typically called before the
  815.  * struct drm_file object's event management is taken down.
  816.  */
  817. void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman,
  818.                                 struct list_head *event_list)
  819. {
  820.         struct vmw_event_fence_action *eaction;
  821.         struct drm_pending_event *event;
  822.         unsigned long irq_flags;
  823.  
  824.         while (1) {
  825.                 spin_lock_irqsave(&fman->lock, irq_flags);
  826.                 if (list_empty(event_list))
  827.                         goto out_unlock;
  828.                 eaction = list_first_entry(event_list,
  829.                                            struct vmw_event_fence_action,
  830.                                            fpriv_head);
  831.                 list_del_init(&eaction->fpriv_head);
  832.                 event = eaction->event;
  833.                 eaction->event = NULL;
  834.                 spin_unlock_irqrestore(&fman->lock, irq_flags);
  835.                 event->destroy(event);
  836.         }
  837. out_unlock:
  838.         spin_unlock_irqrestore(&fman->lock, irq_flags);
  839. }
  840.  
  841.  
  842. /**
  843.  * vmw_event_fence_action_seq_passed
  844.  *
  845.  * @action: The struct vmw_fence_action embedded in a struct
  846.  * vmw_event_fence_action.
  847.  *
  848.  * This function is called when the seqno of the fence where @action is
  849.  * attached has passed. It queues the event on the submitter's event list.
  850.  * This function is always called from atomic context, and may be called
  851.  * from irq context.
  852.  */
  853. static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
  854. {
  855.         struct vmw_event_fence_action *eaction =
  856.                 container_of(action, struct vmw_event_fence_action, action);
  857.         struct drm_device *dev = eaction->dev;
  858.         struct drm_pending_event *event = eaction->event;
  859.         struct drm_file *file_priv;
  860.         unsigned long irq_flags;
  861.  
  862.         if (unlikely(event == NULL))
  863.                 return;
  864.  
  865.         file_priv = event->file_priv;
  866.         spin_lock_irqsave(&dev->event_lock, irq_flags);
  867.  
  868.         if (likely(eaction->tv_sec != NULL)) {
  869.                 struct timeval tv;
  870.  
  871. //       do_gettimeofday(&tv);
  872.                 *eaction->tv_sec = tv.tv_sec;
  873.                 *eaction->tv_usec = tv.tv_usec;
  874.         }
  875.  
  876.         list_del_init(&eaction->fpriv_head);
  877.         list_add_tail(&eaction->event->link, &file_priv->event_list);
  878.         eaction->event = NULL;
  879.         wake_up_all(&file_priv->event_wait);
  880.         spin_unlock_irqrestore(&dev->event_lock, irq_flags);
  881. }
  882.  
  883. /**
  884.  * vmw_event_fence_action_cleanup
  885.  *
  886.  * @action: The struct vmw_fence_action embedded in a struct
  887.  * vmw_event_fence_action.
  888.  *
  889.  * This function is the struct vmw_fence_action destructor. It's typically
  890.  * called from a workqueue.
  891.  */
  892. static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
  893. {
  894.         struct vmw_event_fence_action *eaction =
  895.                 container_of(action, struct vmw_event_fence_action, action);
  896.         struct vmw_fence_manager *fman = fman_from_fence(eaction->fence);
  897.         unsigned long irq_flags;
  898.  
  899.         spin_lock_irqsave(&fman->lock, irq_flags);
  900.         list_del(&eaction->fpriv_head);
  901.         spin_unlock_irqrestore(&fman->lock, irq_flags);
  902.  
  903.         vmw_fence_obj_unreference(&eaction->fence);
  904.         kfree(eaction);
  905. }
  906.  
  907.  
  908. /**
  909.  * vmw_fence_obj_add_action - Add an action to a fence object.
  910.  *
  911.  * @fence - The fence object.
  912.  * @action - The action to add.
  913.  *
  914.  * Note that the action callbacks may be executed before this function
  915.  * returns.
  916.  */
  917. static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
  918.                               struct vmw_fence_action *action)
  919. {
  920.         struct vmw_fence_manager *fman = fman_from_fence(fence);
  921.         unsigned long irq_flags;
  922.         bool run_update = false;
  923.  
  924.         mutex_lock(&fman->goal_irq_mutex);
  925.         spin_lock_irqsave(&fman->lock, irq_flags);
  926.  
  927.         fman->pending_actions[action->type]++;
  928.         if (fence_is_signaled_locked(&fence->base)) {
  929.                 struct list_head action_list;
  930.  
  931.                 INIT_LIST_HEAD(&action_list);
  932.                 list_add_tail(&action->head, &action_list);
  933.                 vmw_fences_perform_actions(fman, &action_list);
  934.         } else {
  935.                 list_add_tail(&action->head, &fence->seq_passed_actions);
  936.  
  937.                 /*
  938.                  * This function may set fman::seqno_valid, so it must
  939.                  * be run with the goal_irq_mutex held.
  940.                  */
  941.                 run_update = vmw_fence_goal_check_locked(fence);
  942.         }
  943.  
  944.         spin_unlock_irqrestore(&fman->lock, irq_flags);
  945.  
  946.         if (run_update) {
  947.                 if (!fman->goal_irq_on) {
  948.                         fman->goal_irq_on = true;
  949.                         vmw_goal_waiter_add(fman->dev_priv);
  950.                 }
  951.                 vmw_fences_update(fman);
  952.         }
  953.         mutex_unlock(&fman->goal_irq_mutex);
  954.  
  955. }
  956.  
  957. /**
  958.  * vmw_event_fence_action_create - Post an event for sending when a fence
  959.  * object seqno has passed.
  960.  *
  961.  * @file_priv: The file connection on which the event should be posted.
  962.  * @fence: The fence object on which to post the event.
  963.  * @event: Event to be posted. This event should've been alloced
  964.  * using k[mz]alloc, and should've been completely initialized.
  965.  * @interruptible: Interruptible waits if possible.
  966.  *
  967.  * As a side effect, the object pointed to by @event may have been
  968.  * freed when this function returns. If this function returns with
  969.  * an error code, the caller needs to free that object.
  970.  */
  971.  
  972. int vmw_event_fence_action_queue(struct drm_file *file_priv,
  973.                                  struct vmw_fence_obj *fence,
  974.                                  struct drm_pending_event *event,
  975.                                  uint32_t *tv_sec,
  976.                                  uint32_t *tv_usec,
  977.                                  bool interruptible)
  978. {
  979.         struct vmw_event_fence_action *eaction;
  980.         struct vmw_fence_manager *fman = fman_from_fence(fence);
  981.         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
  982.         unsigned long irq_flags;
  983.  
  984.         eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
  985.         if (unlikely(eaction == NULL))
  986.                 return -ENOMEM;
  987.  
  988.         eaction->event = event;
  989.  
  990.         eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
  991.         eaction->action.cleanup = vmw_event_fence_action_cleanup;
  992.         eaction->action.type = VMW_ACTION_EVENT;
  993.  
  994.         eaction->fence = vmw_fence_obj_reference(fence);
  995.         eaction->dev = fman->dev_priv->dev;
  996.         eaction->tv_sec = tv_sec;
  997.         eaction->tv_usec = tv_usec;
  998.  
  999.         spin_lock_irqsave(&fman->lock, irq_flags);
  1000.         list_add_tail(&eaction->fpriv_head, &vmw_fp->fence_events);
  1001.         spin_unlock_irqrestore(&fman->lock, irq_flags);
  1002.  
  1003.         vmw_fence_obj_add_action(fence, &eaction->action);
  1004.  
  1005.         return 0;
  1006. }
  1007.  
  1008. struct vmw_event_fence_pending {
  1009.         struct drm_pending_event base;
  1010.         struct drm_vmw_event_fence event;
  1011. };
  1012.  
  1013. static int vmw_event_fence_action_create(struct drm_file *file_priv,
  1014.                                   struct vmw_fence_obj *fence,
  1015.                                   uint32_t flags,
  1016.                                   uint64_t user_data,
  1017.                                   bool interruptible)
  1018. {
  1019.         struct vmw_event_fence_pending *event;
  1020.         struct vmw_fence_manager *fman = fman_from_fence(fence);
  1021.         struct drm_device *dev = fman->dev_priv->dev;
  1022.         unsigned long irq_flags;
  1023.         int ret;
  1024.  
  1025.         spin_lock_irqsave(&dev->event_lock, irq_flags);
  1026.  
  1027.         ret = (file_priv->event_space < sizeof(event->event)) ? -EBUSY : 0;
  1028.         if (likely(ret == 0))
  1029.                 file_priv->event_space -= sizeof(event->event);
  1030.  
  1031.         spin_unlock_irqrestore(&dev->event_lock, irq_flags);
  1032.  
  1033.         if (unlikely(ret != 0)) {
  1034.                 DRM_ERROR("Failed to allocate event space for this file.\n");
  1035.                 goto out_no_space;
  1036.         }
  1037.  
  1038.  
  1039.         event = kzalloc(sizeof(*event), GFP_KERNEL);
  1040.         if (unlikely(event == NULL)) {
  1041.                 DRM_ERROR("Failed to allocate an event.\n");
  1042.                 ret = -ENOMEM;
  1043.                 goto out_no_event;
  1044.         }
  1045.  
  1046.         event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
  1047.         event->event.base.length = sizeof(*event);
  1048.         event->event.user_data = user_data;
  1049.  
  1050.         event->base.event = &event->event.base;
  1051.         event->base.file_priv = file_priv;
  1052.         event->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
  1053.  
  1054.  
  1055.         if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
  1056.                 ret = vmw_event_fence_action_queue(file_priv, fence,
  1057.                                                    &event->base,
  1058.                                                    &event->event.tv_sec,
  1059.                                                    &event->event.tv_usec,
  1060.                                                    interruptible);
  1061.         else
  1062.                 ret = vmw_event_fence_action_queue(file_priv, fence,
  1063.                                                    &event->base,
  1064.                                                    NULL,
  1065.                                                    NULL,
  1066.                                                    interruptible);
  1067.         if (ret != 0)
  1068.                 goto out_no_queue;
  1069.  
  1070.         return 0;
  1071.  
  1072. out_no_queue:
  1073.         event->base.destroy(&event->base);
  1074. out_no_event:
  1075.         spin_lock_irqsave(&dev->event_lock, irq_flags);
  1076.         file_priv->event_space += sizeof(*event);
  1077.         spin_unlock_irqrestore(&dev->event_lock, irq_flags);
  1078. out_no_space:
  1079.         return ret;
  1080. }
  1081.  
  1082. #if 0
  1083. int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
  1084.                           struct drm_file *file_priv)
  1085. {
  1086.         struct vmw_private *dev_priv = vmw_priv(dev);
  1087.         struct drm_vmw_fence_event_arg *arg =
  1088.                 (struct drm_vmw_fence_event_arg *) data;
  1089.         struct vmw_fence_obj *fence = NULL;
  1090.         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
  1091.         struct drm_vmw_fence_rep __user *user_fence_rep =
  1092.                 (struct drm_vmw_fence_rep __user *)(unsigned long)
  1093.                 arg->fence_rep;
  1094.         uint32_t handle;
  1095.         int ret;
  1096.  
  1097.         /*
  1098.          * Look up an existing fence object,
  1099.          * and if user-space wants a new reference,
  1100.          * add one.
  1101.          */
  1102.         if (arg->handle) {
  1103.                 struct ttm_base_object *base =
  1104.                         ttm_base_object_lookup_for_ref(dev_priv->tdev,
  1105.                                                        arg->handle);
  1106.  
  1107.                 if (unlikely(base == NULL)) {
  1108.                         DRM_ERROR("Fence event invalid fence object handle "
  1109.                                   "0x%08lx.\n",
  1110.                                   (unsigned long)arg->handle);
  1111.                         return -EINVAL;
  1112.                 }
  1113.                 fence = &(container_of(base, struct vmw_user_fence,
  1114.                                        base)->fence);
  1115.                 (void) vmw_fence_obj_reference(fence);
  1116.  
  1117.                 if (user_fence_rep != NULL) {
  1118.                         bool existed;
  1119.  
  1120.                         ret = ttm_ref_object_add(vmw_fp->tfile, base,
  1121.                                                  TTM_REF_USAGE, &existed);
  1122.                         if (unlikely(ret != 0)) {
  1123.                                 DRM_ERROR("Failed to reference a fence "
  1124.                                           "object.\n");
  1125.                                 goto out_no_ref_obj;
  1126.                         }
  1127.                         handle = base->hash.key;
  1128.                 }
  1129.                 ttm_base_object_unref(&base);
  1130.         }
  1131.  
  1132.         /*
  1133.          * Create a new fence object.
  1134.          */
  1135.         if (!fence) {
  1136.                 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
  1137.                                                  &fence,
  1138.                                                  (user_fence_rep) ?
  1139.                                                  &handle : NULL);
  1140.                 if (unlikely(ret != 0)) {
  1141.                         DRM_ERROR("Fence event failed to create fence.\n");
  1142.                         return ret;
  1143.                 }
  1144.         }
  1145.  
  1146.         BUG_ON(fence == NULL);
  1147.  
  1148.         ret = vmw_event_fence_action_create(file_priv, fence,
  1149.                                             arg->flags,
  1150.                                             arg->user_data,
  1151.                                             true);
  1152.         if (unlikely(ret != 0)) {
  1153.                 if (ret != -ERESTARTSYS)
  1154.                         DRM_ERROR("Failed to attach event to fence.\n");
  1155.                 goto out_no_create;
  1156.         }
  1157.  
  1158.         vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
  1159.                                     handle);
  1160.         vmw_fence_obj_unreference(&fence);
  1161.         return 0;
  1162. out_no_create:
  1163.         if (user_fence_rep != NULL)
  1164.                 ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
  1165.                                           handle, TTM_REF_USAGE);
  1166. out_no_ref_obj:
  1167.         vmw_fence_obj_unreference(&fence);
  1168.         return ret;
  1169. }
  1170. #endif
  1171.