Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /**************************************************************************
  2.  *
  3.  * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice (including the
  15.  * next paragraph) shall be included in all copies or substantial portions
  16.  * of the Software.
  17.  *
  18.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21.  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22.  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23.  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24.  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25.  *
  26.  **************************************************************************/
  27.  
  28.  
  29. #include "vmwgfx_drv.h"
  30. #include <linux/time.h>
  31.  
  32. struct vmw_marker {
  33.         struct list_head head;
  34.         uint32_t seqno;
  35.         struct timespec submitted;
  36. };
  37.  
  38. void vmw_marker_queue_init(struct vmw_marker_queue *queue)
  39. {
  40.         INIT_LIST_HEAD(&queue->head);
  41.         queue->lag = ns_to_timespec(0);
  42. //   getrawmonotonic(&queue->lag_time);
  43.         spin_lock_init(&queue->lock);
  44. }
  45.  
  46. void vmw_marker_queue_takedown(struct vmw_marker_queue *queue)
  47. {
  48.         struct vmw_marker *marker, *next;
  49.  
  50.         spin_lock(&queue->lock);
  51.         list_for_each_entry_safe(marker, next, &queue->head, head) {
  52.                 kfree(marker);
  53.         }
  54.         spin_unlock(&queue->lock);
  55. }
  56.  
  57. int vmw_marker_push(struct vmw_marker_queue *queue,
  58.                    uint32_t seqno)
  59. {
  60.         struct vmw_marker *marker = kmalloc(sizeof(*marker), GFP_KERNEL);
  61.  
  62.         if (unlikely(!marker))
  63.                 return -ENOMEM;
  64.  
  65.         marker->seqno = seqno;
  66. //   getrawmonotonic(&marker->submitted);
  67.         spin_lock(&queue->lock);
  68.         list_add_tail(&marker->head, &queue->head);
  69.         spin_unlock(&queue->lock);
  70.  
  71.         return 0;
  72. }
  73.  
  74. int vmw_marker_pull(struct vmw_marker_queue *queue,
  75.                    uint32_t signaled_seqno)
  76. {
  77.         struct vmw_marker *marker, *next;
  78.         struct timespec now;
  79.         bool updated = false;
  80.  
  81.         spin_lock(&queue->lock);
  82. //   getrawmonotonic(&now);
  83.  
  84.         if (list_empty(&queue->head)) {
  85. //       queue->lag = ns_to_timespec(0);
  86.                 queue->lag_time = now;
  87.                 updated = true;
  88.                 goto out_unlock;
  89.         }
  90.  
  91.         list_for_each_entry_safe(marker, next, &queue->head, head) {
  92.                 if (signaled_seqno - marker->seqno > (1 << 30))
  93.                         continue;
  94.  
  95. //       queue->lag = timespec_sub(now, marker->submitted);
  96.                 queue->lag_time = now;
  97.                 updated = true;
  98.                 list_del(&marker->head);
  99.                 kfree(marker);
  100.         }
  101.  
  102. out_unlock:
  103.         spin_unlock(&queue->lock);
  104.  
  105.         return (updated) ? 0 : -EBUSY;
  106. }
  107.  
  108. static struct timespec vmw_timespec_add(struct timespec t1,
  109.                                         struct timespec t2)
  110. {
  111.         t1.tv_sec += t2.tv_sec;
  112.         t1.tv_nsec += t2.tv_nsec;
  113.         if (t1.tv_nsec >= 1000000000L) {
  114.                 t1.tv_sec += 1;
  115.                 t1.tv_nsec -= 1000000000L;
  116.         }
  117.  
  118.         return t1;
  119. }
  120.  
  121. static struct timespec vmw_fifo_lag(struct vmw_marker_queue *queue)
  122. {
  123.         struct timespec now;
  124.  
  125.         spin_lock(&queue->lock);
  126. //   getrawmonotonic(&now);
  127. //   queue->lag = vmw_timespec_add(queue->lag,
  128. //                     timespec_sub(now, queue->lag_time));
  129.         queue->lag_time = now;
  130.         spin_unlock(&queue->lock);
  131.         return queue->lag;
  132. }
  133.  
  134.  
  135. static bool vmw_lag_lt(struct vmw_marker_queue *queue,
  136.                        uint32_t us)
  137. {
  138.         struct timespec lag, cond;
  139.  
  140.         cond = ns_to_timespec((s64) us * 1000);
  141.         lag = vmw_fifo_lag(queue);
  142.         return (timespec_compare(&lag, &cond) < 1);
  143. }
  144.  
  145. int vmw_wait_lag(struct vmw_private *dev_priv,
  146.                  struct vmw_marker_queue *queue, uint32_t us)
  147. {
  148.         struct vmw_marker *marker;
  149.         uint32_t seqno;
  150.         int ret;
  151.  
  152.         while (!vmw_lag_lt(queue, us)) {
  153.                 spin_lock(&queue->lock);
  154.                 if (list_empty(&queue->head))
  155.                         seqno = atomic_read(&dev_priv->marker_seq);
  156.                 else {
  157.                         marker = list_first_entry(&queue->head,
  158.                                                  struct vmw_marker, head);
  159.                         seqno = marker->seqno;
  160.                 }
  161.                 spin_unlock(&queue->lock);
  162.  
  163.                 ret = vmw_wait_seqno(dev_priv, false, seqno, true,
  164.                                         3*HZ);
  165.  
  166.                 if (unlikely(ret != 0))
  167.                         return ret;
  168.  
  169.                 (void) vmw_marker_pull(queue, seqno);
  170.         }
  171.         return 0;
  172. }
  173.  
  174. s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
  175. {
  176.     u64 quotient;
  177.  
  178.     if (dividend < 0) {
  179.             quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder);
  180.             *remainder = -*remainder;
  181.             if (divisor > 0)
  182.                     quotient = -quotient;
  183.     } else {
  184.             quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder);
  185.             if (divisor < 0)
  186.                     quotient = -quotient;
  187.     }
  188.     return quotient;
  189. }
  190.  
  191. struct timespec ns_to_timespec(const s64 nsec)
  192. {
  193.     struct timespec ts;
  194.     s32 rem;
  195.  
  196.     if (!nsec)
  197.             return (struct timespec) {0, 0};
  198.  
  199.     ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
  200.     if (unlikely(rem < 0)) {
  201.             ts.tv_sec--;
  202.             rem += NSEC_PER_SEC;
  203.     }
  204.     ts.tv_nsec = rem;
  205.  
  206.     return ts;
  207. }
  208.  
  209.