Rev 4075 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 4075 | Rev 5078 | ||
---|---|---|---|
Line 25... | Line 25... | ||
25 | * |
25 | * |
26 | **************************************************************************/ |
26 | **************************************************************************/ |
Line 27... | Line 27... | ||
27 | 27 | ||
28 | - | ||
Line 29... | Line 28... | ||
29 | #include "vmwgfx_drv.h" |
28 | |
30 | #include |
29 | #include "vmwgfx_drv.h" |
31 | 30 | ||
32 | struct vmw_marker { |
31 | struct vmw_marker { |
33 | struct list_head head; |
32 | struct list_head head; |
Line 34... | Line 33... | ||
34 | uint32_t seqno; |
33 | uint32_t seqno; |
35 | struct timespec submitted; |
34 | u64 submitted; |
36 | }; |
35 | }; |
37 | 36 | ||
38 | void vmw_marker_queue_init(struct vmw_marker_queue *queue) |
37 | void vmw_marker_queue_init(struct vmw_marker_queue *queue) |
39 | { |
38 | { |
40 | INIT_LIST_HEAD(&queue->head); |
39 | INIT_LIST_HEAD(&queue->head); |
Line 41... | Line 40... | ||
41 | queue->lag = ns_to_timespec(0); |
40 | queue->lag = 0; |
42 | // getrawmonotonic(&queue->lag_time); |
41 | queue->lag_time = ktime_get_raw_ns(); |
Line 61... | Line 60... | ||
61 | 60 | ||
62 | if (unlikely(!marker)) |
61 | if (unlikely(!marker)) |
Line 63... | Line 62... | ||
63 | return -ENOMEM; |
62 | return -ENOMEM; |
64 | 63 | ||
65 | marker->seqno = seqno; |
64 | marker->seqno = seqno; |
66 | // getrawmonotonic(&marker->submitted); |
65 | marker->submitted = ktime_get_raw_ns(); |
67 | spin_lock(&queue->lock); |
66 | spin_lock(&queue->lock); |
Line 68... | Line 67... | ||
68 | list_add_tail(&marker->head, &queue->head); |
67 | list_add_tail(&marker->head, &queue->head); |
Line 73... | Line 72... | ||
73 | 72 | ||
74 | int vmw_marker_pull(struct vmw_marker_queue *queue, |
73 | int vmw_marker_pull(struct vmw_marker_queue *queue, |
75 | uint32_t signaled_seqno) |
74 | uint32_t signaled_seqno) |
76 | { |
75 | { |
77 | struct vmw_marker *marker, *next; |
- | |
78 | struct timespec now; |
76 | struct vmw_marker *marker, *next; |
- | 77 | bool updated = false; |
|
Line 79... | Line 78... | ||
79 | bool updated = false; |
78 | u64 now; |
80 | 79 | ||
Line 81... | Line 80... | ||
81 | spin_lock(&queue->lock); |
80 | spin_lock(&queue->lock); |
82 | // getrawmonotonic(&now); |
81 | now = ktime_get_raw_ns(); |
83 | 82 | ||
84 | if (list_empty(&queue->head)) { |
83 | if (list_empty(&queue->head)) { |
85 | // queue->lag = ns_to_timespec(0); |
84 | queue->lag = 0; |
86 | queue->lag_time = now; |
85 | queue->lag_time = now; |
Line 87... | Line 86... | ||
87 | updated = true; |
86 | updated = true; |
88 | goto out_unlock; |
87 | goto out_unlock; |
89 | } |
88 | } |
Line 90... | Line 89... | ||
90 | 89 | ||
91 | list_for_each_entry_safe(marker, next, &queue->head, head) { |
90 | list_for_each_entry_safe(marker, next, &queue->head, head) { |
92 | if (signaled_seqno - marker->seqno > (1 << 30)) |
91 | if (signaled_seqno - marker->seqno > (1 << 30)) |
93 | continue; |
92 | continue; |
94 | 93 | ||
95 | // queue->lag = timespec_sub(now, marker->submitted); |
94 | queue->lag = now - marker->submitted; |
Line 103... | Line 102... | ||
103 | spin_unlock(&queue->lock); |
102 | spin_unlock(&queue->lock); |
Line 104... | Line 103... | ||
104 | 103 | ||
105 | return (updated) ? 0 : -EBUSY; |
104 | return (updated) ? 0 : -EBUSY; |
Line 106... | Line 105... | ||
106 | } |
105 | } |
107 | - | ||
108 | static struct timespec vmw_timespec_add(struct timespec t1, |
106 | |
109 | struct timespec t2) |
- | |
110 | { |
- | |
111 | t1.tv_sec += t2.tv_sec; |
- | |
112 | t1.tv_nsec += t2.tv_nsec; |
- | |
113 | if (t1.tv_nsec >= 1000000000L) { |
- | |
114 | t1.tv_sec += 1; |
- | |
115 | t1.tv_nsec -= 1000000000L; |
- | |
116 | } |
107 | static u64 vmw_fifo_lag(struct vmw_marker_queue *queue) |
117 | - | ||
118 | return t1; |
- | |
119 | } |
- | |
120 | - | ||
121 | static struct timespec vmw_fifo_lag(struct vmw_marker_queue *queue) |
- | |
Line 122... | Line 108... | ||
122 | { |
108 | { |
123 | struct timespec now; |
109 | u64 now; |
124 | 110 | ||
125 | spin_lock(&queue->lock); |
- | |
126 | // getrawmonotonic(&now); |
111 | spin_lock(&queue->lock); |
127 | // queue->lag = vmw_timespec_add(queue->lag, |
112 | now = ktime_get_raw_ns(); |
128 | // timespec_sub(now, queue->lag_time)); |
113 | queue->lag += now - queue->lag_time; |
129 | queue->lag_time = now; |
114 | queue->lag_time = now; |
Line 130... | Line 115... | ||
130 | spin_unlock(&queue->lock); |
115 | spin_unlock(&queue->lock); |
131 | return queue->lag; |
116 | return queue->lag; |
132 | } |
117 | } |
133 | 118 | ||
Line 134... | Line -... | ||
134 | - | ||
135 | static bool vmw_lag_lt(struct vmw_marker_queue *queue, |
119 | |
136 | uint32_t us) |
- | |
137 | { |
120 | static bool vmw_lag_lt(struct vmw_marker_queue *queue, |
Line 138... | Line 121... | ||
138 | struct timespec lag, cond; |
121 | uint32_t us) |
139 | 122 | { |
|
140 | cond = ns_to_timespec((s64) us * 1000); |
123 | u64 cond = (u64) us * NSEC_PER_USEC; |