Rev 2005 | Rev 3031 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2005 | Rev 2997 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright 2009 Jerome Glisse. |
2 | * Copyright 2009 Jerome Glisse. |
3 | * All Rights Reserved. |
3 | * All Rights Reserved. |
4 | * |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the |
6 | * copy of this software and associated documentation files (the |
7 | * "Software"), to deal in the Software without restriction, including |
7 | * "Software"), to deal in the Software without restriction, including |
8 | * without limitation the rights to use, copy, modify, merge, publish, |
8 | * without limitation the rights to use, copy, modify, merge, publish, |
9 | * distribute, sub license, and/or sell copies of the Software, and to |
9 | * distribute, sub license, and/or sell copies of the Software, and to |
10 | * permit persons to whom the Software is furnished to do so, subject to |
10 | * permit persons to whom the Software is furnished to do so, subject to |
11 | * the following conditions: |
11 | * the following conditions: |
12 | * |
12 | * |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
20 | * |
20 | * |
21 | * The above copyright notice and this permission notice (including the |
21 | * The above copyright notice and this permission notice (including the |
22 | * next paragraph) shall be included in all copies or substantial portions |
22 | * next paragraph) shall be included in all copies or substantial portions |
23 | * of the Software. |
23 | * of the Software. |
24 | * |
24 | * |
25 | */ |
25 | */ |
26 | /* |
26 | /* |
27 | * Authors: |
27 | * Authors: |
28 | * Jerome Glisse |
28 | * Jerome Glisse |
29 | * Dave Airlie |
29 | * Dave Airlie |
30 | */ |
30 | */ |
31 | #include |
31 | #include |
32 | #include |
32 | #include |
33 | //#include |
33 | //#include |
34 | #include |
34 | #include |
35 | #include |
35 | #include |
36 | #include |
36 | #include |
37 | #include "drmP.h" |
37 | #include |
38 | #include "drm.h" |
- | |
39 | #include "radeon_reg.h" |
38 | #include "radeon_reg.h" |
40 | #include "radeon.h" |
39 | #include "radeon.h" |
- | 40 | ||
- | 41 | /* |
|
- | 42 | * Fences |
|
- | 43 | * Fences mark an event in the GPUs pipeline and are used |
|
- | 44 | * for GPU/CPU synchronization. When the fence is written, |
|
- | 45 | * it is expected that all buffers associated with that fence |
|
- | 46 | * are no longer in use by the associated ring on the GPU and |
|
- | 47 | * that the the relevant GPU caches have been flushed. Whether |
|
- | 48 | * we use a scratch register or memory location depends on the asic |
|
- | 49 | * and whether writeback is enabled. |
|
- | 50 | */ |
|
- | 51 | ||
- | 52 | /** |
|
- | 53 | * radeon_fence_write - write a fence value |
|
- | 54 | * |
|
- | 55 | * @rdev: radeon_device pointer |
|
- | 56 | * @seq: sequence number to write |
|
- | 57 | * @ring: ring index the fence is associated with |
|
- | 58 | * |
|
- | 59 | * Writes a fence value to memory or a scratch register (all asics). |
|
41 | 60 | */ |
|
42 | static void radeon_fence_write(struct radeon_device *rdev, u32 seq) |
61 | static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) |
43 | { |
- | |
44 | if (rdev->wb.enabled) { |
- | |
45 | u32 scratch_index; |
- | |
46 | if (rdev->wb.use_event) |
62 | { |
47 | scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; |
- | |
48 | else |
63 | struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; |
49 | scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; |
64 | if (likely(rdev->wb.enabled || !drv->scratch_reg)) { |
50 | rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);; |
65 | *drv->cpu_addr = cpu_to_le32(seq); |
51 | } else |
66 | } else { |
52 | WREG32(rdev->fence_drv.scratch_reg, seq); |
67 | WREG32(drv->scratch_reg, seq); |
53 | } |
- | |
54 | - | ||
55 | static u32 radeon_fence_read(struct radeon_device *rdev) |
- | |
56 | { |
- | |
57 | u32 seq; |
- | |
58 | - | ||
59 | if (rdev->wb.enabled) { |
- | |
60 | u32 scratch_index; |
- | |
61 | if (rdev->wb.use_event) |
- | |
62 | scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; |
- | |
63 | else |
- | |
64 | scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; |
- | |
65 | seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]); |
- | |
66 | } else |
- | |
67 | seq = RREG32(rdev->fence_drv.scratch_reg); |
- | |
68 | return seq; |
68 | } |
- | 69 | } |
|
- | 70 | ||
- | 71 | /** |
|
- | 72 | * radeon_fence_read - read a fence value |
|
- | 73 | * |
|
- | 74 | * @rdev: radeon_device pointer |
|
- | 75 | * @ring: ring index the fence is associated with |
|
- | 76 | * |
|
- | 77 | * Reads a fence value from memory or a scratch register (all asics). |
|
69 | } |
78 | * Returns the value of the fence read from memory or register. |
70 | 79 | */ |
|
- | 80 | static u32 radeon_fence_read(struct radeon_device *rdev, int ring) |
|
71 | int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) |
81 | { |
72 | { |
82 | struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; |
- | 83 | u32 seq = 0; |
|
- | 84 | ||
- | 85 | if (likely(rdev->wb.enabled || !drv->scratch_reg)) { |
|
- | 86 | seq = le32_to_cpu(*drv->cpu_addr); |
|
- | 87 | } else { |
|
- | 88 | seq = RREG32(drv->scratch_reg); |
|
- | 89 | } |
|
- | 90 | return seq; |
|
- | 91 | } |
|
- | 92 | ||
- | 93 | /** |
|
73 | unsigned long irq_flags; |
94 | * radeon_fence_emit - emit a fence on the requested ring |
- | 95 | * |
|
- | 96 | * @rdev: radeon_device pointer |
|
- | 97 | * @fence: radeon fence object |
|
- | 98 | * @ring: ring index the fence is associated with |
|
- | 99 | * |
|
- | 100 | * Emits a fence command on the requested ring (all asics). |
|
- | 101 | * Returns 0 on success, -ENOMEM on failure. |
|
- | 102 | */ |
|
- | 103 | int radeon_fence_emit(struct radeon_device *rdev, |
|
- | 104 | struct radeon_fence **fence, |
|
74 | 105 | int ring) |
|
- | 106 | { |
|
75 | write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); |
107 | /* we are protected by the ring emission mutex */ |
76 | if (fence->emited) { |
108 | *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL); |
77 | write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
109 | if ((*fence) == NULL) { |
78 | return 0; |
110 | return -ENOMEM; |
79 | } |
111 | } |
80 | fence->seq = atomic_add_return(1, &rdev->fence_drv.seq); |
- | |
81 | if (!rdev->cp.ready) |
- | |
82 | /* FIXME: cp is not running assume everythings is done right |
112 | kref_init(&((*fence)->kref)); |
83 | * away |
- | |
84 | */ |
113 | (*fence)->rdev = rdev; |
85 | radeon_fence_write(rdev, fence->seq); |
- | |
86 | else |
114 | (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring]; |
87 | radeon_fence_ring_emit(rdev, fence); |
- | |
88 | - | ||
89 | // trace_radeon_fence_emit(rdev->ddev, fence->seq); |
- | |
90 | fence->emited = true; |
115 | (*fence)->ring = ring; |
91 | list_move_tail(&fence->list, &rdev->fence_drv.emited); |
116 | radeon_fence_ring_emit(rdev, ring, *fence); |
92 | write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
117 | // trace_radeon_fence_emit(rdev->ddev, (*fence)->seq); |
93 | return 0; |
118 | return 0; |
94 | } |
119 | } |
- | 120 | ||
- | 121 | /** |
|
- | 122 | * radeon_fence_process - process a fence |
|
- | 123 | * |
|
- | 124 | * @rdev: radeon_device pointer |
|
- | 125 | * @ring: ring index the fence is associated with |
|
- | 126 | * |
|
- | 127 | * Checks the current fence value and wakes the fence queue |
|
- | 128 | * if the sequence number has increased (all asics). |
|
95 | 129 | */ |
|
96 | static bool radeon_fence_poll_locked(struct radeon_device *rdev) |
130 | void radeon_fence_process(struct radeon_device *rdev, int ring) |
97 | { |
131 | { |
98 | struct radeon_fence *fence; |
132 | uint64_t seq, last_seq, last_emitted; |
99 | struct list_head *i, *n; |
- | |
100 | uint32_t seq; |
133 | unsigned count_loop = 0; |
101 | bool wake = false; |
- | |
102 | unsigned long cjiffies; |
134 | bool wake = false; |
103 | 135 | ||
104 | seq = radeon_fence_read(rdev); |
136 | /* Note there is a scenario here for an infinite loop but it's |
105 | if (seq != rdev->fence_drv.last_seq) { |
137 | * very unlikely to happen. For it to happen, the current polling |
106 | rdev->fence_drv.last_seq = seq; |
138 | * process need to be interrupted by another process and another |
107 | rdev->fence_drv.last_jiffies = GetTimerTicks(); |
139 | * process needs to update the last_seq btw the atomic read and |
108 | rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT; |
140 | * xchg of the current process. |
109 | } else { |
141 | * |
- | 142 | * More over for this to go in infinite loop there need to be |
|
- | 143 | * continuously new fence signaled ie radeon_fence_read needs |
|
110 | cjiffies = GetTimerTicks(); |
144 | * to return a different value each time for both the currently |
111 | if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) { |
145 | * polling process and the other process that xchg the last_seq |
112 | cjiffies -= rdev->fence_drv.last_jiffies; |
146 | * btw atomic read and xchg of the current process. And the |
113 | if (time_after(rdev->fence_drv.last_timeout, cjiffies)) { |
147 | * value the other process set as last seq must be higher than |
114 | /* update the timeout */ |
148 | * the seq value we just read. Which means that current process |
- | 149 | * need to be interrupted after radeon_fence_read and before |
|
- | 150 | * atomic xchg. |
|
- | 151 | * |
|
115 | rdev->fence_drv.last_timeout -= cjiffies; |
152 | * To be even more safe we count the number of time we loop and |
116 | } else { |
153 | * we bail after 10 loop just accepting the fact that we might |
117 | /* the 500ms timeout is elapsed we should test |
154 | * have temporarly set the last_seq not to the true real last |
- | 155 | * seq but to an older one. |
|
- | 156 | */ |
|
- | 157 | last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq); |
|
118 | * for GPU lockup |
158 | do { |
- | 159 | last_emitted = rdev->fence_drv[ring].sync_seq[ring]; |
|
- | 160 | seq = radeon_fence_read(rdev, ring); |
|
- | 161 | seq |= last_seq & 0xffffffff00000000LL; |
|
- | 162 | if (seq < last_seq) { |
|
119 | */ |
163 | seq &= 0xffffffff; |
- | 164 | seq |= last_emitted & 0xffffffff00000000LL; |
|
- | 165 | } |
|
120 | rdev->fence_drv.last_timeout = 1; |
166 | |
- | 167 | if (seq <= last_seq || seq > last_emitted) { |
|
- | 168 | break; |
|
- | 169 | } |
|
- | 170 | /* If we loop over we don't want to return without |
|
- | 171 | * checking if a fence is signaled as it means that the |
|
- | 172 | * seq we just read is different from the previous on. |
|
- | 173 | */ |
|
- | 174 | wake = true; |
|
121 | } |
175 | last_seq = seq; |
- | 176 | if ((count_loop++) > 10) { |
|
- | 177 | /* We looped over too many time leave with the |
|
122 | } else { |
178 | * fact that we might have set an older fence |
123 | /* wrap around update last jiffies, we will just wait |
179 | * seq then the current real last seq as signaled |
124 | * a little longer |
180 | * by the hw. |
125 | */ |
181 | */ |
- | 182 | break; |
|
- | 183 | } |
|
126 | rdev->fence_drv.last_jiffies = cjiffies; |
184 | } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq); |
- | 185 | ||
- | 186 | if (wake) { |
|
127 | } |
187 | rdev->fence_drv[ring].last_activity = GetTimerTicks(); |
128 | return false; |
- | |
129 | } |
- | |
130 | n = NULL; |
- | |
131 | list_for_each(i, &rdev->fence_drv.emited) { |
- | |
132 | fence = list_entry(i, struct radeon_fence, list); |
- | |
133 | if (fence->seq == seq) { |
- | |
134 | n = i; |
188 | wake_up_all(&rdev->fence_queue); |
- | 189 | } |
|
- | 190 | } |
|
- | 191 | ||
- | 192 | /** |
|
- | 193 | * radeon_fence_destroy - destroy a fence |
|
- | 194 | * |
|
- | 195 | * @kref: fence kref |
|
- | 196 | * |
|
- | 197 | * Frees the fence object (all asics). |
|
- | 198 | */ |
|
- | 199 | static void radeon_fence_destroy(struct kref *kref) |
|
- | 200 | { |
|
- | 201 | struct radeon_fence *fence; |
|
- | 202 | ||
135 | break; |
203 | fence = container_of(kref, struct radeon_fence, kref); |
- | 204 | kfree(fence); |
|
- | 205 | } |
|
136 | } |
206 | |
137 | } |
207 | /** |
138 | /* all fence previous to this one are considered as signaled */ |
208 | * radeon_fence_seq_signaled - check if a fence sequeuce number has signaled |
139 | if (n) { |
209 | * |
140 | kevent_t event; |
210 | * @rdev: radeon device pointer |
141 | event.code = -1; |
211 | * @seq: sequence number |
- | 212 | * @ring: ring index the fence is associated with |
|
142 | i = n; |
213 | * |
143 | do { |
214 | * Check if the last singled fence sequnce number is >= the requested |
- | 215 | * sequence number (all asics). |
|
144 | n = i->prev; |
216 | * Returns true if the fence has signaled (current fence value |
145 | list_move_tail(i, &rdev->fence_drv.signaled); |
217 | * is >= requested value) or false if it has not (current fence |
- | 218 | * value is < the requested value. Helper function for |
|
146 | fence = list_entry(i, struct radeon_fence, list); |
219 | * radeon_fence_signaled(). |
147 | fence->signaled = true; |
220 | */ |
148 | // dbgprintf("fence %x done\n", fence); |
221 | static bool radeon_fence_seq_signaled(struct radeon_device *rdev, |
149 | RaiseEvent(fence->evnt, 0, &event); |
222 | u64 seq, unsigned ring) |
150 | i = n; |
223 | { |
151 | } while (i != &rdev->fence_drv.emited); |
224 | if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { |
- | 225 | return true; |
|
- | 226 | } |
|
- | 227 | /* poll new last sequence at least once */ |
|
- | 228 | radeon_fence_process(rdev, ring); |
|
- | 229 | if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { |
|
152 | wake = true; |
230 | return true; |
153 | } |
231 | } |
- | 232 | return false; |
|
- | 233 | } |
|
- | 234 | ||
- | 235 | /** |
|
- | 236 | * radeon_fence_signaled - check if a fence has signaled |
|
- | 237 | * |
|
- | 238 | * @fence: radeon fence object |
|
- | 239 | * |
|
- | 240 | * Check if the requested fence has signaled (all asics). |
|
- | 241 | * Returns true if the fence has signaled or false if it has not. |
|
- | 242 | */ |
|
- | 243 | bool radeon_fence_signaled(struct radeon_fence *fence) |
|
- | 244 | { |
|
- | 245 | if (!fence) { |
|
- | 246 | return true; |
|
- | 247 | } |
|
- | 248 | if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) { |
|
- | 249 | return true; |
|
- | 250 | } |
|
- | 251 | if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) { |
|
- | 252 | fence->seq = RADEON_FENCE_SIGNALED_SEQ; |
|
- | 253 | return true; |
|
- | 254 | } |
|
- | 255 | return false; |
|
- | 256 | } |
|
- | 257 | ||
- | 258 | /** |
|
- | 259 | * radeon_fence_wait_seq - wait for a specific sequence number |
|
- | 260 | * |
|
- | 261 | * @rdev: radeon device pointer |
|
- | 262 | * @target_seq: sequence number we want to wait for |
|
- | 263 | * @ring: ring index the fence is associated with |
|
- | 264 | * @intr: use interruptable sleep |
|
- | 265 | * @lock_ring: whether the ring should be locked or not |
|
- | 266 | * |
|
- | 267 | * Wait for the requested sequence number to be written (all asics). |
|
- | 268 | * @intr selects whether to use interruptable (true) or non-interruptable |
|
- | 269 | * (false) sleep when waiting for the sequence number. Helper function |
|
- | 270 | * for radeon_fence_wait(), et al. |
|
- | 271 | * Returns 0 if the sequence number has passed, error for all other cases. |
|
154 | return wake; |
272 | * -EDEADLK is returned when a GPU lockup has been detected and the ring is |
- | 273 | * marked as not ready so no further jobs get scheduled until a successful |
|
155 | } |
274 | * reset. |
156 | 275 | */ |
|
- | 276 | static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq, |
|
- | 277 | unsigned ring, bool intr, bool lock_ring) |
|
- | 278 | { |
|
- | 279 | unsigned long timeout, last_activity; |
|
157 | 280 | uint64_t seq; |
|
158 | int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence) |
281 | unsigned i; |
159 | { |
282 | bool signaled; |
160 | unsigned long irq_flags; |
283 | int r; |
161 | 284 | ||
162 | *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL); |
285 | while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) { |
163 | if ((*fence) == NULL) { |
- | |
164 | return -ENOMEM; |
286 | if (!rdev->ring[ring].ready) { |
165 | } |
287 | return -EBUSY; |
166 | 288 | } |
|
167 | (*fence)->evnt = CreateEvent(NULL, MANUAL_DESTROY); |
- | |
168 | // kref_init(&((*fence)->kref)); |
- | |
169 | (*fence)->rdev = rdev; |
289 | |
170 | (*fence)->emited = false; |
- | |
171 | (*fence)->signaled = false; |
290 | timeout = GetTimerTicks() - RADEON_FENCE_JIFFIES_TIMEOUT; |
- | 291 | if (time_after(rdev->fence_drv[ring].last_activity, timeout)) { |
|
172 | (*fence)->seq = 0; |
292 | /* the normal case, timeout is somewhere before last_activity */ |
173 | INIT_LIST_HEAD(&(*fence)->list); |
293 | timeout = rdev->fence_drv[ring].last_activity - timeout; |
- | 294 | } else { |
|
- | 295 | /* either jiffies wrapped around, or no fence was signaled in the last 500ms |
|
- | 296 | * anyway we will just wait for the minimum amount and then check for a lockup |
|
- | 297 | */ |
|
- | 298 | timeout = 1; |
|
- | 299 | } |
|
- | 300 | seq = atomic64_read(&rdev->fence_drv[ring].last_seq); |
|
- | 301 | /* Save current last activity valuee, used to check for GPU lockups */ |
|
- | 302 | last_activity = rdev->fence_drv[ring].last_activity; |
|
- | 303 | ||
- | 304 | // trace_radeon_fence_wait_begin(rdev->ddev, seq); |
|
- | 305 | radeon_irq_kms_sw_irq_get(rdev, ring); |
|
- | 306 | // if (intr) { |
|
- | 307 | // r = wait_event_interruptible_timeout(rdev->fence_queue, |
|
- | 308 | // (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)), |
|
174 | 309 | // timeout); |
|
175 | write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); |
- | |
176 | list_add_tail(&(*fence)->list, &rdev->fence_drv.created); |
310 | // } else { |
177 | write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
311 | // r = wait_event_timeout(rdev->fence_queue, |
- | 312 | // (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)), |
|
- | 313 | // timeout); |
|
- | 314 | // } |
|
- | 315 | delay(1); |
|
- | 316 | ||
178 | return 0; |
317 | radeon_irq_kms_sw_irq_put(rdev, ring); |
179 | } |
318 | // if (unlikely(r < 0)) { |
- | 319 | // return r; |
|
- | 320 | // } |
|
180 | 321 | // trace_radeon_fence_wait_end(rdev->ddev, seq); |
|
181 | 322 | ||
- | 323 | if (unlikely(!signaled)) { |
|
182 | bool radeon_fence_signaled(struct radeon_fence *fence) |
- | |
183 | { |
- | |
184 | unsigned long irq_flags; |
- | |
185 | bool signaled = false; |
- | |
186 | - | ||
187 | if (!fence) |
- | |
188 | return true; |
- | |
189 | - | ||
190 | if (fence->rdev->gpu_lockup) |
- | |
191 | return true; |
- | |
192 | 324 | /* we were interrupted for some reason and fence |
|
193 | write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags); |
- | |
194 | signaled = fence->signaled; |
325 | * isn't signaled yet, resume waiting */ |
195 | /* if we are shuting down report all fence as signaled */ |
326 | if (r) { |
- | 327 | continue; |
|
- | 328 | } |
|
196 | if (fence->rdev->shutdown) { |
329 | |
197 | signaled = true; |
330 | /* check if sequence value has changed since last_activity */ |
- | 331 | if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) { |
|
198 | } |
332 | continue; |
- | 333 | } |
|
- | 334 | ||
- | 335 | if (lock_ring) { |
|
- | 336 | mutex_lock(&rdev->ring_lock); |
|
- | 337 | } |
|
- | 338 | ||
- | 339 | /* test if somebody else has already decided that this is a lockup */ |
|
- | 340 | if (last_activity != rdev->fence_drv[ring].last_activity) { |
|
- | 341 | if (lock_ring) { |
|
- | 342 | mutex_unlock(&rdev->ring_lock); |
|
- | 343 | } |
|
- | 344 | continue; |
|
- | 345 | } |
|
- | 346 | ||
- | 347 | if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { |
|
- | 348 | /* good news we believe it's a lockup */ |
|
- | 349 | dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n", |
|
- | 350 | target_seq, seq); |
|
- | 351 | ||
- | 352 | /* change last activity so nobody else think there is a lockup */ |
|
- | 353 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
|
- | 354 | rdev->fence_drv[i].last_activity = jiffies; |
|
- | 355 | } |
|
- | 356 | ||
- | 357 | /* mark the ring as not ready any more */ |
|
- | 358 | rdev->ring[ring].ready = false; |
|
- | 359 | if (lock_ring) { |
|
- | 360 | mutex_unlock(&rdev->ring_lock); |
|
- | 361 | } |
|
- | 362 | return -EDEADLK; |
|
- | 363 | } |
|
- | 364 | ||
- | 365 | if (lock_ring) { |
|
- | 366 | mutex_unlock(&rdev->ring_lock); |
|
- | 367 | } |
|
- | 368 | } |
|
- | 369 | } |
|
- | 370 | return 0; |
|
- | 371 | } |
|
199 | if (!fence->emited) { |
372 | |
200 | WARN(1, "Querying an unemited fence : %p !\n", fence); |
373 | /** |
201 | signaled = true; |
- | |
202 | } |
- | |
203 | if (!signaled) { |
- | |
204 | radeon_fence_poll_locked(fence->rdev); |
374 | * radeon_fence_wait - wait for a fence to signal |
205 | signaled = fence->signaled; |
375 | * |
206 | } |
376 | * @fence: radeon fence object |
207 | write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags); |
377 | * @intr: use interruptable sleep |
208 | return signaled; |
378 | * |
209 | } |
379 | * Wait for the requested fence to signal (all asics). |
210 | 380 | * @intr selects whether to use interruptable (true) or non-interruptable |
|
211 | int radeon_fence_wait(struct radeon_fence *fence, bool intr) |
381 | * (false) sleep when waiting for the fence. |
212 | { |
382 | * Returns 0 if the fence has passed, error for all other cases. |
213 | struct radeon_device *rdev; |
383 | */ |
214 | unsigned long irq_flags, timeout; |
384 | int radeon_fence_wait(struct radeon_fence *fence, bool intr) |
215 | u32 seq; |
385 | { |
216 | int r; |
386 | int r; |
217 | 387 | ||
218 | if (fence == NULL) { |
388 | if (fence == NULL) { |
219 | WARN(1, "Querying an invalid fence : %p !\n", fence); |
389 | WARN(1, "Querying an invalid fence : %p !\n", fence); |
220 | return 0; |
390 | return -EINVAL; |
221 | } |
391 | } |
- | 392 | ||
- | 393 | r = radeon_fence_wait_seq(fence->rdev, fence->seq, |
|
222 | rdev = fence->rdev; |
394 | fence->ring, intr, true); |
- | 395 | if (r) { |
|
- | 396 | return r; |
|
- | 397 | } |
|
223 | if (radeon_fence_signaled(fence)) { |
398 | fence->seq = RADEON_FENCE_SIGNALED_SEQ; |
224 | return 0; |
399 | return 0; |
225 | } |
400 | } |
226 | timeout = rdev->fence_drv.last_timeout; |
- | |
227 | retry: |
- | |
228 | /* save current sequence used to check for GPU lockup */ |
- | |
229 | seq = rdev->fence_drv.last_seq; |
- | |
230 | // trace_radeon_fence_wait_begin(rdev->ddev, seq); |
- | |
231 | if (intr) { |
- | |
232 | radeon_irq_kms_sw_irq_get(rdev); |
- | |
233 | // r = wait_event_interruptible_timeout(rdev->fence_drv.queue, |
- | |
234 | // radeon_fence_signaled(fence), timeout); |
- | |
- | 401 | ||
- | 402 | static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) |
|
235 | 403 | { |
|
- | 404 | unsigned i; |
|
- | 405 | ||
- | 406 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
|
- | 407 | if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) { |
|
- | 408 | return true; |
|
- | 409 | } |
|
- | 410 | } |
|
- | 411 | return false; |
|
- | 412 | } |
|
- | 413 | ||
- | 414 | /** |
|
- | 415 | * radeon_fence_wait_any_seq - wait for a sequence number on any ring |
|
- | 416 | * |
|
- | 417 | * @rdev: radeon device pointer |
|
- | 418 | * @target_seq: sequence number(s) we want to wait for |
|
- | 419 | * @intr: use interruptable sleep |
|
- | 420 | * |
|
- | 421 | * Wait for the requested sequence number(s) to be written by any ring |
|
- | 422 | * (all asics). Sequnce number array is indexed by ring id. |
|
236 | WaitEvent(fence->evnt); |
423 | * @intr selects whether to use interruptable (true) or non-interruptable |
- | 424 | * (false) sleep when waiting for the sequence number. Helper function |
|
- | 425 | * for radeon_fence_wait_any(), et al. |
|
- | 426 | * Returns 0 if the sequence number has passed, error for all other cases. |
|
- | 427 | */ |
|
- | 428 | static int radeon_fence_wait_any_seq(struct radeon_device *rdev, |
|
- | 429 | u64 *target_seq, bool intr) |
|
- | 430 | { |
|
- | 431 | unsigned long timeout, last_activity, tmp; |
|
- | 432 | unsigned i, ring = RADEON_NUM_RINGS; |
|
- | 433 | bool signaled; |
|
- | 434 | int r; |
|
237 | 435 | ||
- | 436 | for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) { |
|
- | 437 | if (!target_seq[i]) { |
|
- | 438 | continue; |
|
- | 439 | } |
|
- | 440 | ||
- | 441 | /* use the most recent one as indicator */ |
|
- | 442 | if (time_after(rdev->fence_drv[i].last_activity, last_activity)) { |
|
- | 443 | last_activity = rdev->fence_drv[i].last_activity; |
|
- | 444 | } |
|
- | 445 | ||
- | 446 | /* For lockup detection just pick the lowest ring we are |
|
- | 447 | * actively waiting for |
|
238 | radeon_irq_kms_sw_irq_put(rdev); |
448 | */ |
239 | if (unlikely(r < 0)) { |
449 | if (i < ring) { |
- | 450 | ring = i; |
|
- | 451 | } |
|
- | 452 | } |
|
- | 453 | ||
- | 454 | /* nothing to wait for ? */ |
|
- | 455 | if (ring == RADEON_NUM_RINGS) { |
|
- | 456 | return -ENOENT; |
|
- | 457 | } |
|
- | 458 | ||
- | 459 | while (!radeon_fence_any_seq_signaled(rdev, target_seq)) { |
|
- | 460 | timeout = GetTimerTicks() - RADEON_FENCE_JIFFIES_TIMEOUT; |
|
- | 461 | if (time_after(last_activity, timeout)) { |
|
240 | return r; |
462 | /* the normal case, timeout is somewhere before last_activity */ |
- | 463 | timeout = last_activity - timeout; |
|
- | 464 | } else { |
|
- | 465 | /* either jiffies wrapped around, or no fence was signaled in the last 500ms |
|
241 | } |
466 | * anyway we will just wait for the minimum amount and then check for a lockup |
- | 467 | */ |
|
- | 468 | timeout = 1; |
|
242 | } else { |
469 | } |
- | 470 | ||
- | 471 | // trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]); |
|
243 | radeon_irq_kms_sw_irq_get(rdev); |
472 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
- | 473 | if (target_seq[i]) { |
|
- | 474 | radeon_irq_kms_sw_irq_get(rdev, i); |
|
244 | // r = wait_event_timeout(rdev->fence_drv.queue, |
475 | } |
- | 476 | } |
|
- | 477 | ||
- | 478 | // WaitEvent(fence->evnt); |
|
- | 479 | ||
245 | // radeon_fence_signaled(fence), timeout); |
480 | r = 1; |
- | 481 | ||
- | 482 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
|
- | 483 | if (target_seq[i]) { |
|
- | 484 | radeon_irq_kms_sw_irq_put(rdev, i); |
|
246 | 485 | } |
|
247 | WaitEvent(fence->evnt); |
486 | } |
- | 487 | if (unlikely(r < 0)) { |
|
248 | 488 | return r; |
|
249 | radeon_irq_kms_sw_irq_put(rdev); |
489 | } |
250 | } |
490 | // trace_radeon_fence_wait_end(rdev->ddev, seq); |
251 | // trace_radeon_fence_wait_end(rdev->ddev, seq); |
- | |
252 | if (unlikely(!radeon_fence_signaled(fence))) { |
491 | |
253 | /* we were interrupted for some reason and fence isn't |
492 | if (unlikely(!signaled)) { |
254 | * isn't signaled yet, resume wait |
- | |
255 | */ |
493 | /* we were interrupted for some reason and fence |
- | 494 | * isn't signaled yet, resume waiting */ |
|
- | 495 | if (r) { |
|
- | 496 | continue; |
|
- | 497 | } |
|
256 | if (r) { |
498 | |
- | 499 | mutex_lock(&rdev->ring_lock); |
|
- | 500 | for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) { |
|
257 | timeout = r; |
501 | if (time_after(rdev->fence_drv[i].last_activity, tmp)) { |
- | 502 | tmp = rdev->fence_drv[i].last_activity; |
|
- | 503 | } |
|
- | 504 | } |
|
- | 505 | /* test if somebody else has already decided that this is a lockup */ |
|
258 | goto retry; |
506 | if (last_activity != tmp) { |
- | 507 | last_activity = tmp; |
|
259 | } |
508 | mutex_unlock(&rdev->ring_lock); |
260 | /* don't protect read access to rdev->fence_drv.last_seq |
509 | continue; |
261 | * if we experiencing a lockup the value doesn't change |
510 | } |
262 | */ |
511 | |
- | 512 | if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { |
|
263 | if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) { |
513 | /* good news we believe it's a lockup */ |
264 | /* good news we believe it's a lockup */ |
514 | dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n", |
- | 515 | target_seq[ring]); |
|
265 | WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", |
516 | |
266 | fence->seq, seq); |
- | |
267 | /* FIXME: what should we do ? marking everyone |
- | |
268 | * as signaled for now |
- | |
269 | */ |
- | |
270 | rdev->gpu_lockup = true; |
- | |
271 | // r = radeon_gpu_reset(rdev); |
517 | /* change last activity so nobody else think there is a lockup */ |
272 | // if (r) |
518 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
- | 519 | rdev->fence_drv[i].last_activity = GetTimerTicks(); |
|
- | 520 | } |
|
- | 521 | ||
- | 522 | /* mark the ring as not ready any more */ |
|
273 | // return r; |
523 | rdev->ring[ring].ready = false; |
274 | return true; |
- | |
275 | - | ||
276 | // radeon_fence_write(rdev, fence->seq); |
- | |
277 | // rdev->gpu_lockup = false; |
- | |
278 | } |
- | |
279 | timeout = RADEON_FENCE_JIFFIES_TIMEOUT; |
- | |
280 | write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); |
524 | mutex_unlock(&rdev->ring_lock); |
281 | rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT; |
525 | return -EDEADLK; |
282 | rdev->fence_drv.last_jiffies = GetTimerTicks(); |
526 | } |
283 | write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
527 | mutex_unlock(&rdev->ring_lock); |
284 | goto retry; |
528 | } |
285 | } |
529 | } |
286 | return 0; |
530 | return 0; |
287 | } |
531 | } |
- | 532 | ||
- | 533 | /** |
|
288 | 534 | * radeon_fence_wait_any - wait for a fence to signal on any ring |
|
- | 535 | * |
|
- | 536 | * @rdev: radeon device pointer |
|
- | 537 | * @fences: radeon fence object(s) |
|
- | 538 | * @intr: use interruptable sleep |
|
- | 539 | * |
|
- | 540 | * Wait for any requested fence to signal (all asics). Fence |
|
- | 541 | * array is indexed by ring id. @intr selects whether to use |
|
- | 542 | * interruptable (true) or non-interruptable (false) sleep when |
|
- | 543 | * waiting for the fences. Used by the suballocator. |
|
- | 544 | * Returns 0 if any fence has passed, error for all other cases. |
|
289 | #if 0 |
545 | */ |
- | 546 | int radeon_fence_wait_any(struct radeon_device *rdev, |
|
- | 547 | struct radeon_fence **fences, |
|
290 | int radeon_fence_wait_next(struct radeon_device *rdev) |
548 | bool intr) |
291 | { |
549 | { |
292 | unsigned long irq_flags; |
550 | uint64_t seq[RADEON_NUM_RINGS]; |
293 | struct radeon_fence *fence; |
551 | unsigned i; |
- | 552 | int r; |
|
- | 553 | ||
- | 554 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
|
294 | int r; |
555 | seq[i] = 0; |
295 | 556 | ||
296 | if (rdev->gpu_lockup) { |
557 | if (!fences[i]) { |
- | 558 | continue; |
|
297 | return 0; |
559 | } |
298 | } |
560 | |
299 | write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); |
- | |
300 | if (list_empty(&rdev->fence_drv.emited)) { |
561 | if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) { |
301 | write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
562 | /* something was allready signaled */ |
302 | return 0; |
- | |
- | 563 | return 0; |
|
303 | } |
564 | } |
304 | fence = list_entry(rdev->fence_drv.emited.next, |
565 | |
305 | struct radeon_fence, list); |
- | |
- | 566 | seq[i] = fences[i]->seq; |
|
306 | radeon_fence_ref(fence); |
567 | } |
307 | write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
568 | |
308 | r = radeon_fence_wait(fence, false); |
569 | r = radeon_fence_wait_any_seq(rdev, seq, intr); |
309 | radeon_fence_unref(&fence); |
570 | if (r) { |
- | 571 | return r; |
|
- | 572 | } |
|
- | 573 | return 0; |
|
- | 574 | } |
|
- | 575 | ||
- | 576 | /** |
|
- | 577 | * radeon_fence_wait_next_locked - wait for the next fence to signal |
|
- | 578 | * |
|
- | 579 | * @rdev: radeon device pointer |
|
- | 580 | * @ring: ring index the fence is associated with |
|
- | 581 | * |
|
- | 582 | * Wait for the next fence on the requested ring to signal (all asics). |
|
310 | return r; |
583 | * Returns 0 if the next fence has passed, error for all other cases. |
311 | } |
584 | * Caller must hold ring lock. |
312 | - | ||
313 | int radeon_fence_wait_last(struct radeon_device *rdev) |
- | |
314 | { |
585 | */ |
- | 586 | int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) |
|
- | 587 | { |
|
- | 588 | uint64_t seq; |
|
- | 589 | ||
- | 590 | seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; |
|
- | 591 | if (seq >= rdev->fence_drv[ring].sync_seq[ring]) { |
|
- | 592 | /* nothing to wait for, last_seq is |
|
- | 593 | already the last emited fence */ |
|
- | 594 | return -ENOENT; |
|
- | 595 | } |
|
- | 596 | return radeon_fence_wait_seq(rdev, seq, ring, false, false); |
|
- | 597 | } |
|
- | 598 | ||
- | 599 | /** |
|
- | 600 | * radeon_fence_wait_empty_locked - wait for all fences to signal |
|
- | 601 | * |
|
- | 602 | * @rdev: radeon device pointer |
|
- | 603 | * @ring: ring index the fence is associated with |
|
- | 604 | * |
|
- | 605 | * Wait for all fences on the requested ring to signal (all asics). |
|
- | 606 | * Returns 0 if the fences have passed, error for all other cases. |
|
- | 607 | * Caller must hold ring lock. |
|
- | 608 | */ |
|
- | 609 | void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) |
|
- | 610 | { |
|
- | 611 | uint64_t seq = rdev->fence_drv[ring].sync_seq[ring]; |
|
315 | unsigned long irq_flags; |
612 | |
- | 613 | while(1) { |
|
- | 614 | int r; |
|
- | 615 | r = radeon_fence_wait_seq(rdev, seq, ring, false, false); |
|
- | 616 | if (r == -EDEADLK) { |
|
316 | struct radeon_fence *fence; |
617 | mutex_unlock(&rdev->ring_lock); |
317 | int r; |
618 | r = radeon_gpu_reset(rdev); |
318 | 619 | mutex_lock(&rdev->ring_lock); |
|
319 | if (rdev->gpu_lockup) { |
620 | if (!r) |
320 | return 0; |
621 | continue; |
- | 622 | } |
|
321 | } |
623 | if (r) { |
322 | write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); |
624 | dev_err(rdev->dev, "error waiting for ring to become" |
323 | if (list_empty(&rdev->fence_drv.emited)) { |
- | |
324 | write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
- | |
325 | return 0; |
- | |
326 | } |
- | |
327 | fence = list_entry(rdev->fence_drv.emited.prev, |
- | |
328 | struct radeon_fence, list); |
- | |
329 | radeon_fence_ref(fence); |
- | |
330 | write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
625 | " idle (%d)\n", r); |
- | 626 | } |
|
- | 627 | return; |
|
- | 628 | } |
|
- | 629 | } |
|
- | 630 | ||
- | 631 | /** |
|
- | 632 | * radeon_fence_ref - take a ref on a fence |
|
- | 633 | * |
|
331 | r = radeon_fence_wait(fence, false); |
634 | * @fence: radeon fence object |
332 | radeon_fence_unref(&fence); |
635 | * |
333 | return r; |
636 | * Take a reference on a fence (all asics). |
334 | } |
637 | * Returns the fence. |
335 | 638 | */ |
|
336 | struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) |
639 | struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) |
337 | { |
640 | { |
338 | kref_get(&fence->kref); |
641 | kref_get(&fence->kref); |
339 | return fence; |
642 | return fence; |
340 | } |
643 | } |
341 | 644 | ||
- | 645 | /** |
|
342 | #endif |
646 | * radeon_fence_unref - remove a ref on a fence |
- | 647 | * |
|
- | 648 | * @fence: radeon fence object |
|
- | 649 | * |
|
- | 650 | * Remove a reference on a fence (all asics). |
|
343 | 651 | */ |
|
344 | void radeon_fence_unref(struct radeon_fence **fence) |
652 | void radeon_fence_unref(struct radeon_fence **fence) |
345 | { |
- | |
346 | unsigned long irq_flags; |
653 | { |
347 | struct radeon_fence *tmp = *fence; |
654 | struct radeon_fence *tmp = *fence; |
348 | 655 | ||
349 | *fence = NULL; |
656 | *fence = NULL; |
- | 657 | if (tmp) { |
|
- | 658 | kref_put(&tmp->kref, radeon_fence_destroy); |
|
- | 659 | } |
|
- | 660 | } |
|
- | 661 | ||
- | 662 | /** |
|
- | 663 | * radeon_fence_count_emitted - get the count of emitted fences |
|
- | 664 | * |
|
- | 665 | * @rdev: radeon device pointer |
|
- | 666 | * @ring: ring index the fence is associated with |
|
- | 667 | * |
|
- | 668 | * Get the number of fences emitted on the requested ring (all asics). |
|
- | 669 | * Returns the number of emitted fences on the ring. Used by the |
|
- | 670 | * dynpm code to ring track activity. |
|
- | 671 | */ |
|
- | 672 | unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring) |
|
- | 673 | { |
|
- | 674 | uint64_t emitted; |
|
- | 675 | ||
- | 676 | /* We are not protected by ring lock when reading the last sequence |
|
- | 677 | * but it's ok to report slightly wrong fence count here. |
|
- | 678 | */ |
|
- | 679 | radeon_fence_process(rdev, ring); |
|
- | 680 | emitted = rdev->fence_drv[ring].sync_seq[ring] |
|
- | 681 | - atomic64_read(&rdev->fence_drv[ring].last_seq); |
|
- | 682 | /* to avoid 32bits warp around */ |
|
- | 683 | if (emitted > 0x10000000) { |
|
- | 684 | emitted = 0x10000000; |
|
- | 685 | } |
|
- | 686 | return (unsigned)emitted; |
|
- | 687 | } |
|
- | 688 | ||
- | 689 | /** |
|
- | 690 | * radeon_fence_need_sync - do we need a semaphore |
|
- | 691 | * |
|
- | 692 | * @fence: radeon fence object |
|
- | 693 | * @dst_ring: which ring to check against |
|
- | 694 | * |
|
- | 695 | * Check if the fence needs to be synced against another ring |
|
- | 696 | * (all asics). If so, we need to emit a semaphore. |
|
350 | 697 | * Returns true if we need to sync with another ring, false if |
|
- | 698 | * not. |
|
- | 699 | */ |
|
351 | if(tmp) |
700 | bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring) |
352 | { |
701 | { |
- | 702 | struct radeon_fence_driver *fdrv; |
|
- | 703 | ||
- | 704 | if (!fence) { |
|
- | 705 | return false; |
|
- | 706 | } |
|
353 | write_lock_irqsave(&tmp->rdev->fence_drv.lock, irq_flags); |
707 | |
354 | list_del(&tmp->list); |
708 | if (fence->ring == dst_ring) { |
- | 709 | return false; |
|
- | 710 | } |
|
- | 711 | ||
355 | tmp->emited = false; |
712 | /* we are protected by the ring mutex */ |
- | 713 | fdrv = &fence->rdev->fence_drv[dst_ring]; |
|
- | 714 | if (fence->seq <= fdrv->sync_seq[fence->ring]) { |
|
356 | write_unlock_irqrestore(&tmp->rdev->fence_drv.lock, irq_flags); |
715 | return false; |
- | 716 | } |
|
- | 717 | ||
357 | }; |
718 | return true; |
- | 719 | } |
|
- | 720 | ||
- | 721 | /** |
|
- | 722 | * radeon_fence_note_sync - record the sync point |
|
- | 723 | * |
|
- | 724 | * @fence: radeon fence object |
|
- | 725 | * @dst_ring: which ring to check against |
|
- | 726 | * |
|
- | 727 | * Note the sequence number at which point the fence will |
|
358 | } |
728 | * be synced with the requested ring (all asics). |
359 | 729 | */ |
|
360 | void radeon_fence_process(struct radeon_device *rdev) |
730 | void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring) |
361 | { |
731 | { |
362 | unsigned long irq_flags; |
732 | struct radeon_fence_driver *dst, *src; |
363 | bool wake; |
733 | unsigned i; |
364 | - | ||
365 | write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); |
734 | |
- | 735 | if (!fence) { |
|
- | 736 | return; |
|
- | 737 | } |
|
- | 738 | ||
- | 739 | if (fence->ring == dst_ring) { |
|
- | 740 | return; |
|
- | 741 | } |
|
- | 742 | ||
- | 743 | /* we are protected by the ring mutex */ |
|
- | 744 | src = &fence->rdev->fence_drv[fence->ring]; |
|
- | 745 | dst = &fence->rdev->fence_drv[dst_ring]; |
|
- | 746 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
|
- | 747 | if (i == dst_ring) { |
|
- | 748 | continue; |
|
- | 749 | } |
|
- | 750 | dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]); |
|
- | 751 | } |
|
- | 752 | } |
|
- | 753 | ||
- | 754 | /** |
|
- | 755 | * radeon_fence_driver_start_ring - make the fence driver |
|
- | 756 | * ready for use on the requested ring. |
|
- | 757 | * |
|
- | 758 | * @rdev: radeon device pointer |
|
- | 759 | * @ring: ring index to start the fence driver on |
|
- | 760 | * |
|
- | 761 | * Make the fence driver ready for processing (all asics). |
|
366 | wake = radeon_fence_poll_locked(rdev); |
762 | * Not all asics have all rings, so each asic will only |
367 | write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
763 | * start the fence driver on the rings it has. |
368 | } |
764 | * Returns 0 for success, errors for failure. |
369 | 765 | */ |
|
370 | int radeon_fence_driver_init(struct radeon_device *rdev) |
766 | int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) |
- | 767 | { |
|
- | 768 | uint64_t index; |
|
- | 769 | int r; |
|
- | 770 | ||
371 | { |
771 | radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); |
372 | unsigned long irq_flags; |
772 | if (rdev->wb.use_event) { |
373 | int r; |
773 | rdev->fence_drv[ring].scratch_reg = 0; |
374 | - | ||
375 | write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); |
774 | index = R600_WB_EVENT_OFFSET + ring * 4; |
376 | r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg); |
775 | } else { |
377 | if (r) { |
776 | r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg); |
378 | dev_err(rdev->dev, "fence failed to get scratch register\n"); |
777 | if (r) { |
379 | write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
778 | dev_err(rdev->dev, "fence failed to get scratch register\n"); |
- | 779 | return r; |
|
380 | return r; |
780 | } |
381 | } |
781 | index = RADEON_WB_SCRATCH_OFFSET + |
382 | radeon_fence_write(rdev, 0); |
782 | rdev->fence_drv[ring].scratch_reg - |
383 | atomic_set(&rdev->fence_drv.seq, 0); |
783 | rdev->scratch.reg_base; |
- | 784 | } |
|
384 | INIT_LIST_HEAD(&rdev->fence_drv.created); |
785 | rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; |
385 | INIT_LIST_HEAD(&rdev->fence_drv.emited); |
786 | rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index; |
386 | INIT_LIST_HEAD(&rdev->fence_drv.signaled); |
787 | radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring); |
387 | // init_waitqueue_head(&rdev->fence_drv.queue); |
788 | rdev->fence_drv[ring].initialized = true; |
388 | rdev->fence_drv.initialized = true; |
789 | dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n", |
389 | write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
790 | ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr); |
390 | return 0; |
791 | return 0; |
391 | } |
792 | } |
- | 793 | ||
- | 794 | /** |
|
- | 795 | * radeon_fence_driver_init_ring - init the fence driver |
|
- | 796 | * for the requested ring. |
|
- | 797 | * |
|
- | 798 | * @rdev: radeon device pointer |
|
- | 799 | * @ring: ring index to start the fence driver on |
|
- | 800 | * |
|
- | 801 | * Init the fence driver for the requested ring (all asics). |
|
- | 802 | * Helper function for radeon_fence_driver_init(). |
|
- | 803 | */ |
|
- | 804 | static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) |
|
- | 805 | { |
|
- | 806 | int i; |
|
- | 807 | ||
- | 808 | rdev->fence_drv[ring].scratch_reg = -1; |
|
- | 809 | rdev->fence_drv[ring].cpu_addr = NULL; |
|
- | 810 | rdev->fence_drv[ring].gpu_addr = 0; |
|
- | 811 | for (i = 0; i < RADEON_NUM_RINGS; ++i) |
|
- | 812 | rdev->fence_drv[ring].sync_seq[i] = 0; |
|
- | 813 | atomic64_set(&rdev->fence_drv[ring].last_seq, 0); |
|
- | 814 | rdev->fence_drv[ring].last_activity = jiffies; |
|
- | 815 | rdev->fence_drv[ring].initialized = false; |
|
- | 816 | } |
|
- | 817 | ||
- | 818 | /** |
|
- | 819 | * radeon_fence_driver_init - init the fence driver |
|
- | 820 | * for all possible rings. |
|
- | 821 | * |
|
- | 822 | * @rdev: radeon device pointer |
|
- | 823 | * |
|
- | 824 | * Init the fence driver for all possible rings (all asics). |
|
- | 825 | * Not all asics have all rings, so each asic will only |
|
- | 826 | * start the fence driver on the rings it has using |
|
- | 827 | * radeon_fence_driver_start_ring(). |
|
- | 828 | * Returns 0 for success. |
|
- | 829 | */ |
|
- | 830 | int radeon_fence_driver_init(struct radeon_device *rdev) |
|
- | 831 | { |
|
- | 832 | int ring; |
|
- | 833 | ||
- | 834 | init_waitqueue_head(&rdev->fence_queue); |
|
- | 835 | for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { |
|
- | 836 | radeon_fence_driver_init_ring(rdev, ring); |
|
- | 837 | } |
|
- | 838 | if (radeon_debugfs_fence_init(rdev)) { |
|
- | 839 | dev_err(rdev->dev, "fence debugfs file creation failed\n"); |
|
- | 840 | } |
|
- | 841 | return 0; |
|
- | 842 | } |
|
- | 843 | ||
- | 844 | /** |
|
- | 845 | * radeon_fence_driver_fini - tear down the fence driver |
|
- | 846 | * for all possible rings. |
|
- | 847 | * |
|
- | 848 | * @rdev: radeon device pointer |
|
- | 849 | * |
|
- | 850 | * Tear down the fence driver for all possible rings (all asics). |
|
- | 851 | */ |
|
- | 852 | void radeon_fence_driver_fini(struct radeon_device *rdev) |
|
- | 853 | { |
|
- | 854 | int ring; |
|
- | 855 | ||
- | 856 | mutex_lock(&rdev->ring_lock); |
|
- | 857 | for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { |
|
- | 858 | if (!rdev->fence_drv[ring].initialized) |
|
- | 859 | continue; |
|
- | 860 | radeon_fence_wait_empty_locked(rdev, ring); |
|
- | 861 | wake_up_all(&rdev->fence_queue); |
|
- | 862 | radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); |
|
- | 863 | rdev->fence_drv[ring].initialized = false; |
|
- | 864 | } |
|
- | 865 | mutex_unlock(&rdev->ring_lock); |
|
- | 866 | } |
|
392 | 867 | ||
393 | 868 | ||
394 | /* |
869 | /* |
395 | * Fence debugfs |
870 | * Fence debugfs |
396 | */ |
871 | */ |
397 | #if defined(CONFIG_DEBUG_FS) |
872 | #if defined(CONFIG_DEBUG_FS) |
398 | static int radeon_debugfs_fence_info(struct seq_file *m, void *data) |
873 | static int radeon_debugfs_fence_info(struct seq_file *m, void *data) |
399 | { |
874 | { |
400 | struct drm_info_node *node = (struct drm_info_node *)m->private; |
875 | struct drm_info_node *node = (struct drm_info_node *)m->private; |
401 | struct drm_device *dev = node->minor->dev; |
876 | struct drm_device *dev = node->minor->dev; |
402 | struct radeon_device *rdev = dev->dev_private; |
877 | struct radeon_device *rdev = dev->dev_private; |
403 | struct radeon_fence *fence; |
878 | int i, j; |
- | 879 | ||
- | 880 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
|
- | 881 | if (!rdev->fence_drv[i].initialized) |
|
- | 882 | continue; |
|
- | 883 | ||
404 | 884 | seq_printf(m, "--- ring %d ---\n", i); |
|
405 | seq_printf(m, "Last signaled fence 0x%08X\n", |
885 | seq_printf(m, "Last signaled fence 0x%016llx\n", |
406 | radeon_fence_read(rdev)); |
886 | (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq)); |
407 | if (!list_empty(&rdev->fence_drv.emited)) { |
887 | seq_printf(m, "Last emitted 0x%016llx\n", |
- | 888 | rdev->fence_drv[i].sync_seq[i]); |
|
- | 889 | ||
408 | fence = list_entry(rdev->fence_drv.emited.prev, |
890 | for (j = 0; j < RADEON_NUM_RINGS; ++j) { |
409 | struct radeon_fence, list); |
891 | if (i != j && rdev->fence_drv[j].initialized) |
410 | seq_printf(m, "Last emited fence %p with 0x%08X\n", |
892 | seq_printf(m, "Last sync to ring %d 0x%016llx\n", |
- | 893 | j, rdev->fence_drv[i].sync_seq[j]); |
|
411 | fence, fence->seq); |
894 | } |
412 | } |
895 | } |
413 | return 0; |
896 | return 0; |
414 | } |
897 | } |
415 | 898 | ||
416 | static struct drm_info_list radeon_debugfs_fence_list[] = { |
899 | static struct drm_info_list radeon_debugfs_fence_list[] = { |
417 | {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL}, |
900 | {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL}, |
418 | }; |
901 | }; |
419 | #endif |
902 | #endif |
420 | 903 | ||
421 | int radeon_debugfs_fence_init(struct radeon_device *rdev) |
904 | int radeon_debugfs_fence_init(struct radeon_device *rdev) |
422 | { |
905 | { |
423 | #if defined(CONFIG_DEBUG_FS) |
906 | #if defined(CONFIG_DEBUG_FS) |
424 | return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1); |
907 | return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1); |
425 | #else |
908 | #else |
426 | return 0; |
909 | return 0; |
427 | #endif |
910 | #endif |
428 | }> |
911 | }>>>>>>=>>>>>>>>>>>>>=>> |