Rev 3764 | Rev 6104 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1125 | serge | 1 | /* |
2 | * Copyright 2009 Jerome Glisse. |
||
3 | * All Rights Reserved. |
||
4 | * |
||
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
6 | * copy of this software and associated documentation files (the |
||
7 | * "Software"), to deal in the Software without restriction, including |
||
8 | * without limitation the rights to use, copy, modify, merge, publish, |
||
9 | * distribute, sub license, and/or sell copies of the Software, and to |
||
10 | * permit persons to whom the Software is furnished to do so, subject to |
||
11 | * the following conditions: |
||
12 | * |
||
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
||
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
||
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
||
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
||
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
||
20 | * |
||
21 | * The above copyright notice and this permission notice (including the |
||
22 | * next paragraph) shall be included in all copies or substantial portions |
||
23 | * of the Software. |
||
24 | * |
||
25 | */ |
||
26 | /* |
||
27 | * Authors: |
||
28 | * Jerome Glisse |
||
29 | * Dave Airlie |
||
30 | */ |
||
31 | #include |
||
32 | #include |
||
3192 | Serge | 33 | #include |
1125 | serge | 34 | #include |
35 | #include |
||
1986 | serge | 36 | #include |
2997 | Serge | 37 | #include |
1125 | serge | 38 | #include "radeon_reg.h" |
39 | #include "radeon.h" |
||
5078 | serge | 40 | #include "radeon_trace.h" |
1125 | serge | 41 | |
2997 | Serge | 42 | /* |
43 | * Fences |
||
44 | * Fences mark an event in the GPUs pipeline and are used |
||
45 | * for GPU/CPU synchronization. When the fence is written, |
||
46 | * it is expected that all buffers associated with that fence |
||
47 | * are no longer in use by the associated ring on the GPU and |
||
48 | * that the the relevant GPU caches have been flushed. Whether |
||
49 | * we use a scratch register or memory location depends on the asic |
||
50 | * and whether writeback is enabled. |
||
51 | */ |
||
52 | |||
53 | /** |
||
54 | * radeon_fence_write - write a fence value |
||
55 | * |
||
56 | * @rdev: radeon_device pointer |
||
57 | * @seq: sequence number to write |
||
58 | * @ring: ring index the fence is associated with |
||
59 | * |
||
60 | * Writes a fence value to memory or a scratch register (all asics). |
||
61 | */ |
||
62 | static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) |
||
2004 | serge | 63 | { |
2997 | Serge | 64 | struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; |
65 | if (likely(rdev->wb.enabled || !drv->scratch_reg)) { |
||
3764 | Serge | 66 | if (drv->cpu_addr) { |
2997 | Serge | 67 | *drv->cpu_addr = cpu_to_le32(seq); |
3764 | Serge | 68 | } |
2997 | Serge | 69 | } else { |
70 | WREG32(drv->scratch_reg, seq); |
||
71 | } |
||
2004 | serge | 72 | } |
73 | |||
2997 | Serge | 74 | /** |
75 | * radeon_fence_read - read a fence value |
||
76 | * |
||
77 | * @rdev: radeon_device pointer |
||
78 | * @ring: ring index the fence is associated with |
||
79 | * |
||
80 | * Reads a fence value from memory or a scratch register (all asics). |
||
81 | * Returns the value of the fence read from memory or register. |
||
82 | */ |
||
83 | static u32 radeon_fence_read(struct radeon_device *rdev, int ring) |
||
2004 | serge | 84 | { |
2997 | Serge | 85 | struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; |
86 | u32 seq = 0; |
||
2004 | serge | 87 | |
2997 | Serge | 88 | if (likely(rdev->wb.enabled || !drv->scratch_reg)) { |
3764 | Serge | 89 | if (drv->cpu_addr) { |
2997 | Serge | 90 | seq = le32_to_cpu(*drv->cpu_addr); |
91 | } else { |
||
3764 | Serge | 92 | seq = lower_32_bits(atomic64_read(&drv->last_seq)); |
93 | } |
||
94 | } else { |
||
2997 | Serge | 95 | seq = RREG32(drv->scratch_reg); |
96 | } |
||
2004 | serge | 97 | return seq; |
98 | } |
||
99 | |||
2997 | Serge | 100 | /** |
101 | * radeon_fence_emit - emit a fence on the requested ring |
||
102 | * |
||
103 | * @rdev: radeon_device pointer |
||
104 | * @fence: radeon fence object |
||
105 | * @ring: ring index the fence is associated with |
||
106 | * |
||
107 | * Emits a fence command on the requested ring (all asics). |
||
108 | * Returns 0 on success, -ENOMEM on failure. |
||
109 | */ |
||
110 | int radeon_fence_emit(struct radeon_device *rdev, |
||
111 | struct radeon_fence **fence, |
||
112 | int ring) |
||
1125 | serge | 113 | { |
2997 | Serge | 114 | /* we are protected by the ring emission mutex */ |
115 | *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL); |
||
116 | if ((*fence) == NULL) { |
||
117 | return -ENOMEM; |
||
1125 | serge | 118 | } |
2997 | Serge | 119 | kref_init(&((*fence)->kref)); |
120 | (*fence)->rdev = rdev; |
||
121 | (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring]; |
||
122 | (*fence)->ring = ring; |
||
123 | radeon_fence_ring_emit(rdev, ring, *fence); |
||
5078 | serge | 124 | trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq); |
1125 | serge | 125 | return 0; |
126 | } |
||
127 | |||
2997 | Serge | 128 | /** |
129 | * radeon_fence_process - process a fence |
||
130 | * |
||
131 | * @rdev: radeon_device pointer |
||
132 | * @ring: ring index the fence is associated with |
||
133 | * |
||
134 | * Checks the current fence value and wakes the fence queue |
||
135 | * if the sequence number has increased (all asics). |
||
136 | */ |
||
137 | void radeon_fence_process(struct radeon_device *rdev, int ring) |
||
1125 | serge | 138 | { |
2997 | Serge | 139 | uint64_t seq, last_seq, last_emitted; |
140 | unsigned count_loop = 0; |
||
1125 | serge | 141 | bool wake = false; |
142 | |||
2997 | Serge | 143 | /* Note there is a scenario here for an infinite loop but it's |
144 | * very unlikely to happen. For it to happen, the current polling |
||
145 | * process need to be interrupted by another process and another |
||
146 | * process needs to update the last_seq btw the atomic read and |
||
147 | * xchg of the current process. |
||
148 | * |
||
149 | * More over for this to go in infinite loop there need to be |
||
150 | * continuously new fence signaled ie radeon_fence_read needs |
||
151 | * to return a different value each time for both the currently |
||
152 | * polling process and the other process that xchg the last_seq |
||
153 | * btw atomic read and xchg of the current process. And the |
||
154 | * value the other process set as last seq must be higher than |
||
155 | * the seq value we just read. Which means that current process |
||
156 | * need to be interrupted after radeon_fence_read and before |
||
157 | * atomic xchg. |
||
158 | * |
||
159 | * To be even more safe we count the number of time we loop and |
||
160 | * we bail after 10 loop just accepting the fact that we might |
||
161 | * have temporarly set the last_seq not to the true real last |
||
162 | * seq but to an older one. |
||
163 | */ |
||
164 | last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq); |
||
165 | do { |
||
166 | last_emitted = rdev->fence_drv[ring].sync_seq[ring]; |
||
167 | seq = radeon_fence_read(rdev, ring); |
||
168 | seq |= last_seq & 0xffffffff00000000LL; |
||
169 | if (seq < last_seq) { |
||
170 | seq &= 0xffffffff; |
||
171 | seq |= last_emitted & 0xffffffff00000000LL; |
||
172 | } |
||
173 | |||
174 | if (seq <= last_seq || seq > last_emitted) { |
||
175 | break; |
||
176 | } |
||
177 | /* If we loop over we don't want to return without |
||
178 | * checking if a fence is signaled as it means that the |
||
179 | * seq we just read is different from the previous on. |
||
180 | */ |
||
181 | wake = true; |
||
182 | last_seq = seq; |
||
183 | if ((count_loop++) > 10) { |
||
184 | /* We looped over too many time leave with the |
||
185 | * fact that we might have set an older fence |
||
186 | * seq then the current real last seq as signaled |
||
187 | * by the hw. |
||
1963 | serge | 188 | */ |
1125 | serge | 189 | break; |
190 | } |
||
2997 | Serge | 191 | } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq); |
192 | |||
5078 | serge | 193 | if (wake) |
2997 | Serge | 194 | wake_up_all(&rdev->fence_queue); |
195 | } |
||
196 | |||
197 | /** |
||
198 | * radeon_fence_destroy - destroy a fence |
||
199 | * |
||
200 | * @kref: fence kref |
||
201 | * |
||
202 | * Frees the fence object (all asics). |
||
203 | */ |
||
204 | static void radeon_fence_destroy(struct kref *kref) |
||
205 | { |
||
206 | struct radeon_fence *fence; |
||
207 | |||
208 | fence = container_of(kref, struct radeon_fence, kref); |
||
209 | kfree(fence); |
||
210 | } |
||
211 | |||
212 | /** |
||
5078 | serge | 213 | * radeon_fence_seq_signaled - check if a fence sequence number has signaled |
2997 | Serge | 214 | * |
215 | * @rdev: radeon device pointer |
||
216 | * @seq: sequence number |
||
217 | * @ring: ring index the fence is associated with |
||
218 | * |
||
5078 | serge | 219 | * Check if the last signaled fence sequnce number is >= the requested |
2997 | Serge | 220 | * sequence number (all asics). |
221 | * Returns true if the fence has signaled (current fence value |
||
222 | * is >= requested value) or false if it has not (current fence |
||
223 | * value is < the requested value. Helper function for |
||
224 | * radeon_fence_signaled(). |
||
225 | */ |
||
226 | static bool radeon_fence_seq_signaled(struct radeon_device *rdev, |
||
227 | u64 seq, unsigned ring) |
||
228 | { |
||
229 | if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { |
||
230 | return true; |
||
1125 | serge | 231 | } |
2997 | Serge | 232 | /* poll new last sequence at least once */ |
233 | radeon_fence_process(rdev, ring); |
||
234 | if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { |
||
235 | return true; |
||
236 | } |
||
237 | return false; |
||
1125 | serge | 238 | } |
239 | |||
2997 | Serge | 240 | /** |
241 | * radeon_fence_signaled - check if a fence has signaled |
||
242 | * |
||
243 | * @fence: radeon fence object |
||
244 | * |
||
245 | * Check if the requested fence has signaled (all asics). |
||
246 | * Returns true if the fence has signaled or false if it has not. |
||
247 | */ |
||
248 | bool radeon_fence_signaled(struct radeon_fence *fence) |
||
249 | { |
||
250 | if (!fence) { |
||
251 | return true; |
||
252 | } |
||
253 | if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) { |
||
254 | return true; |
||
255 | } |
||
256 | if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) { |
||
257 | fence->seq = RADEON_FENCE_SIGNALED_SEQ; |
||
258 | return true; |
||
259 | } |
||
260 | return false; |
||
261 | } |
||
1125 | serge | 262 | |
2997 | Serge | 263 | /** |
5078 | serge | 264 | * radeon_fence_any_seq_signaled - check if any sequence number is signaled |
2997 | Serge | 265 | * |
266 | * @rdev: radeon device pointer |
||
5078 | serge | 267 | * @seq: sequence numbers |
268 | * |
||
269 | * Check if the last signaled fence sequnce number is >= the requested |
||
270 | * sequence number (all asics). |
||
271 | * Returns true if any has signaled (current value is >= requested value) |
||
272 | * or false if it has not. Helper function for radeon_fence_wait_seq. |
||
273 | */ |
||
274 | static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) |
||
275 | { |
||
276 | unsigned i; |
||
277 | |||
278 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
||
279 | if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) |
||
280 | return true; |
||
281 | } |
||
282 | return false; |
||
283 | } |
||
284 | |||
285 | /** |
||
286 | * radeon_fence_wait_seq - wait for a specific sequence numbers |
||
287 | * |
||
288 | * @rdev: radeon device pointer |
||
289 | * @target_seq: sequence number(s) we want to wait for |
||
2997 | Serge | 290 | * @intr: use interruptable sleep |
291 | * |
||
5078 | serge | 292 | * Wait for the requested sequence number(s) to be written by any ring |
293 | * (all asics). Sequnce number array is indexed by ring id. |
||
2997 | Serge | 294 | * @intr selects whether to use interruptable (true) or non-interruptable |
295 | * (false) sleep when waiting for the sequence number. Helper function |
||
5078 | serge | 296 | * for radeon_fence_wait_*(). |
2997 | Serge | 297 | * Returns 0 if the sequence number has passed, error for all other cases. |
5078 | serge | 298 | * -EDEADLK is returned when a GPU lockup has been detected. |
2997 | Serge | 299 | */ |
5078 | serge | 300 | static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, |
301 | bool intr) |
||
1125 | serge | 302 | { |
5078 | serge | 303 | uint64_t last_seq[RADEON_NUM_RINGS]; |
2997 | Serge | 304 | bool signaled; |
5078 | serge | 305 | int i, r; |
1125 | serge | 306 | |
5078 | serge | 307 | while (!radeon_fence_any_seq_signaled(rdev, target_seq)) { |
308 | |||
309 | /* Save current sequence values, used to check for GPU lockups */ |
||
310 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
||
311 | if (!target_seq[i]) |
||
312 | continue; |
||
313 | |||
314 | last_seq[i] = atomic64_read(&rdev->fence_drv[i].last_seq); |
||
315 | trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]); |
||
316 | radeon_irq_kms_sw_irq_get(rdev, i); |
||
3764 | Serge | 317 | } |
2997 | Serge | 318 | |
5078 | serge | 319 | if (intr) { |
320 | r = wait_event_interruptible_timeout(rdev->fence_queue, ( |
||
321 | (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)) |
||
322 | || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT); |
||
2997 | Serge | 323 | } else { |
5078 | serge | 324 | r = wait_event_timeout(rdev->fence_queue, ( |
325 | (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)) |
||
326 | || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT); |
||
3764 | Serge | 327 | } |
2005 | serge | 328 | |
5078 | serge | 329 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
330 | if (!target_seq[i]) |
||
331 | continue; |
||
332 | |||
333 | radeon_irq_kms_sw_irq_put(rdev, i); |
||
334 | trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]); |
||
335 | } |
||
336 | |||
337 | if (unlikely(r < 0)) |
||
3192 | Serge | 338 | return r; |
1125 | serge | 339 | |
2997 | Serge | 340 | if (unlikely(!signaled)) { |
5078 | serge | 341 | if (rdev->needs_reset) |
342 | return -EDEADLK; |
||
343 | |||
2997 | Serge | 344 | /* we were interrupted for some reason and fence |
345 | * isn't signaled yet, resume waiting */ |
||
5078 | serge | 346 | if (r) |
2997 | Serge | 347 | continue; |
1125 | serge | 348 | |
5078 | serge | 349 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
350 | if (!target_seq[i]) |
||
2997 | Serge | 351 | continue; |
5078 | serge | 352 | |
353 | if (last_seq[i] != atomic64_read(&rdev->fence_drv[i].last_seq)) |
||
354 | break; |
||
2997 | Serge | 355 | } |
1125 | serge | 356 | |
5078 | serge | 357 | if (i != RADEON_NUM_RINGS) |
358 | continue; |
||
1404 | serge | 359 | |
5078 | serge | 360 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
361 | if (!target_seq[i]) |
||
2997 | Serge | 362 | continue; |
5078 | serge | 363 | |
364 | if (radeon_ring_is_lockup(rdev, i, &rdev->ring[i])) |
||
365 | break; |
||
2997 | Serge | 366 | } |
1404 | serge | 367 | |
5078 | serge | 368 | if (i < RADEON_NUM_RINGS) { |
2997 | Serge | 369 | /* good news we believe it's a lockup */ |
5078 | serge | 370 | dev_warn(rdev->dev, "GPU lockup (waiting for " |
371 | "0x%016llx last fence id 0x%016llx on" |
||
372 | " ring %d)\n", |
||
373 | target_seq[i], last_seq[i], i); |
||
2997 | Serge | 374 | |
5078 | serge | 375 | /* remember that we need an reset */ |
376 | rdev->needs_reset = true; |
||
377 | wake_up_all(&rdev->fence_queue); |
||
2997 | Serge | 378 | return -EDEADLK; |
379 | } |
||
380 | } |
||
1125 | serge | 381 | } |
2997 | Serge | 382 | return 0; |
1125 | serge | 383 | } |
384 | |||
2997 | Serge | 385 | /** |
386 | * radeon_fence_wait - wait for a fence to signal |
||
387 | * |
||
388 | * @fence: radeon fence object |
||
389 | * @intr: use interruptable sleep |
||
390 | * |
||
391 | * Wait for the requested fence to signal (all asics). |
||
392 | * @intr selects whether to use interruptable (true) or non-interruptable |
||
393 | * (false) sleep when waiting for the fence. |
||
394 | * Returns 0 if the fence has passed, error for all other cases. |
||
395 | */ |
||
1179 | serge | 396 | int radeon_fence_wait(struct radeon_fence *fence, bool intr) |
397 | { |
||
5078 | serge | 398 | uint64_t seq[RADEON_NUM_RINGS] = {}; |
1125 | serge | 399 | int r; |
400 | |||
401 | if (fence == NULL) { |
||
402 | WARN(1, "Querying an invalid fence : %p !\n", fence); |
||
2997 | Serge | 403 | return -EINVAL; |
1125 | serge | 404 | } |
2997 | Serge | 405 | |
5078 | serge | 406 | seq[fence->ring] = fence->seq; |
407 | if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ) |
||
1125 | serge | 408 | return 0; |
2997 | Serge | 409 | |
5078 | serge | 410 | r = radeon_fence_wait_seq(fence->rdev, seq, intr); |
411 | if (r) |
||
2997 | Serge | 412 | return r; |
413 | |||
5078 | serge | 414 | fence->seq = RADEON_FENCE_SIGNALED_SEQ; |
2997 | Serge | 415 | return 0; |
1125 | serge | 416 | } |
417 | |||
2997 | Serge | 418 | /** |
419 | * radeon_fence_wait_any - wait for a fence to signal on any ring |
||
420 | * |
||
421 | * @rdev: radeon device pointer |
||
422 | * @fences: radeon fence object(s) |
||
423 | * @intr: use interruptable sleep |
||
424 | * |
||
425 | * Wait for any requested fence to signal (all asics). Fence |
||
426 | * array is indexed by ring id. @intr selects whether to use |
||
427 | * interruptable (true) or non-interruptable (false) sleep when |
||
428 | * waiting for the fences. Used by the suballocator. |
||
429 | * Returns 0 if any fence has passed, error for all other cases. |
||
430 | */ |
||
431 | int radeon_fence_wait_any(struct radeon_device *rdev, |
||
432 | struct radeon_fence **fences, |
||
433 | bool intr) |
||
1125 | serge | 434 | { |
2997 | Serge | 435 | uint64_t seq[RADEON_NUM_RINGS]; |
5078 | serge | 436 | unsigned i, num_rings = 0; |
1125 | serge | 437 | int r; |
438 | |||
2997 | Serge | 439 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
440 | seq[i] = 0; |
||
441 | |||
442 | if (!fences[i]) { |
||
443 | continue; |
||
444 | } |
||
445 | |||
5078 | serge | 446 | seq[i] = fences[i]->seq; |
447 | ++num_rings; |
||
448 | |||
449 | /* test if something was allready signaled */ |
||
450 | if (seq[i] == RADEON_FENCE_SIGNALED_SEQ) |
||
1125 | serge | 451 | return 0; |
452 | } |
||
2997 | Serge | 453 | |
5078 | serge | 454 | /* nothing to wait for ? */ |
455 | if (num_rings == 0) |
||
456 | return -ENOENT; |
||
2997 | Serge | 457 | |
5078 | serge | 458 | r = radeon_fence_wait_seq(rdev, seq, intr); |
2997 | Serge | 459 | if (r) { |
460 | return r; |
||
461 | } |
||
462 | return 0; |
||
1125 | serge | 463 | } |
464 | |||
2997 | Serge | 465 | /** |
5078 | serge | 466 | * radeon_fence_wait_next - wait for the next fence to signal |
2997 | Serge | 467 | * |
468 | * @rdev: radeon device pointer |
||
469 | * @ring: ring index the fence is associated with |
||
470 | * |
||
471 | * Wait for the next fence on the requested ring to signal (all asics). |
||
472 | * Returns 0 if the next fence has passed, error for all other cases. |
||
473 | * Caller must hold ring lock. |
||
474 | */ |
||
5078 | serge | 475 | int radeon_fence_wait_next(struct radeon_device *rdev, int ring) |
1125 | serge | 476 | { |
5078 | serge | 477 | uint64_t seq[RADEON_NUM_RINGS] = {}; |
1125 | serge | 478 | |
5078 | serge | 479 | seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; |
480 | if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) { |
||
2997 | Serge | 481 | /* nothing to wait for, last_seq is |
482 | already the last emited fence */ |
||
483 | return -ENOENT; |
||
1125 | serge | 484 | } |
5078 | serge | 485 | return radeon_fence_wait_seq(rdev, seq, false); |
2997 | Serge | 486 | } |
487 | |||
488 | /** |
||
5078 | serge | 489 | * radeon_fence_wait_empty - wait for all fences to signal |
2997 | Serge | 490 | * |
491 | * @rdev: radeon device pointer |
||
492 | * @ring: ring index the fence is associated with |
||
493 | * |
||
494 | * Wait for all fences on the requested ring to signal (all asics). |
||
495 | * Returns 0 if the fences have passed, error for all other cases. |
||
496 | * Caller must hold ring lock. |
||
497 | */ |
||
5078 | serge | 498 | int radeon_fence_wait_empty(struct radeon_device *rdev, int ring) |
2997 | Serge | 499 | { |
5078 | serge | 500 | uint64_t seq[RADEON_NUM_RINGS] = {}; |
3192 | Serge | 501 | int r; |
2997 | Serge | 502 | |
5078 | serge | 503 | seq[ring] = rdev->fence_drv[ring].sync_seq[ring]; |
504 | if (!seq[ring]) |
||
505 | return 0; |
||
506 | |||
507 | r = radeon_fence_wait_seq(rdev, seq, false); |
||
3192 | Serge | 508 | if (r) { |
5078 | serge | 509 | if (r == -EDEADLK) |
3192 | Serge | 510 | return -EDEADLK; |
5078 | serge | 511 | |
3192 | Serge | 512 | dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n", |
513 | ring, r); |
||
2997 | Serge | 514 | } |
3192 | Serge | 515 | return 0; |
1125 | serge | 516 | } |
517 | |||
2997 | Serge | 518 | /** |
519 | * radeon_fence_ref - take a ref on a fence |
||
520 | * |
||
521 | * @fence: radeon fence object |
||
522 | * |
||
523 | * Take a reference on a fence (all asics). |
||
524 | * Returns the fence. |
||
525 | */ |
||
1125 | serge | 526 | struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) |
527 | { |
||
528 | kref_get(&fence->kref); |
||
529 | return fence; |
||
530 | } |
||
531 | |||
2997 | Serge | 532 | /** |
533 | * radeon_fence_unref - remove a ref on a fence |
||
534 | * |
||
535 | * @fence: radeon fence object |
||
536 | * |
||
537 | * Remove a reference on a fence (all asics). |
||
538 | */ |
||
1125 | serge | 539 | void radeon_fence_unref(struct radeon_fence **fence) |
540 | { |
||
2997 | Serge | 541 | struct radeon_fence *tmp = *fence; |
1125 | serge | 542 | |
2997 | Serge | 543 | *fence = NULL; |
544 | if (tmp) { |
||
545 | kref_put(&tmp->kref, radeon_fence_destroy); |
||
546 | } |
||
547 | } |
||
2005 | serge | 548 | |
2997 | Serge | 549 | /** |
550 | * radeon_fence_count_emitted - get the count of emitted fences |
||
551 | * |
||
552 | * @rdev: radeon device pointer |
||
553 | * @ring: ring index the fence is associated with |
||
554 | * |
||
555 | * Get the number of fences emitted on the requested ring (all asics). |
||
556 | * Returns the number of emitted fences on the ring. Used by the |
||
557 | * dynpm code to ring track activity. |
||
558 | */ |
||
559 | unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring) |
||
560 | { |
||
561 | uint64_t emitted; |
||
562 | |||
563 | /* We are not protected by ring lock when reading the last sequence |
||
564 | * but it's ok to report slightly wrong fence count here. |
||
565 | */ |
||
566 | radeon_fence_process(rdev, ring); |
||
567 | emitted = rdev->fence_drv[ring].sync_seq[ring] |
||
568 | - atomic64_read(&rdev->fence_drv[ring].last_seq); |
||
569 | /* to avoid 32bits warp around */ |
||
570 | if (emitted > 0x10000000) { |
||
571 | emitted = 0x10000000; |
||
572 | } |
||
573 | return (unsigned)emitted; |
||
1125 | serge | 574 | } |
575 | |||
2997 | Serge | 576 | /** |
577 | * radeon_fence_need_sync - do we need a semaphore |
||
578 | * |
||
579 | * @fence: radeon fence object |
||
580 | * @dst_ring: which ring to check against |
||
581 | * |
||
582 | * Check if the fence needs to be synced against another ring |
||
583 | * (all asics). If so, we need to emit a semaphore. |
||
584 | * Returns true if we need to sync with another ring, false if |
||
585 | * not. |
||
586 | */ |
||
587 | bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring) |
||
1125 | serge | 588 | { |
2997 | Serge | 589 | struct radeon_fence_driver *fdrv; |
1125 | serge | 590 | |
2997 | Serge | 591 | if (!fence) { |
592 | return false; |
||
593 | } |
||
594 | |||
595 | if (fence->ring == dst_ring) { |
||
596 | return false; |
||
597 | } |
||
598 | |||
599 | /* we are protected by the ring mutex */ |
||
600 | fdrv = &fence->rdev->fence_drv[dst_ring]; |
||
601 | if (fence->seq <= fdrv->sync_seq[fence->ring]) { |
||
602 | return false; |
||
603 | } |
||
604 | |||
605 | return true; |
||
1125 | serge | 606 | } |
607 | |||
2997 | Serge | 608 | /** |
609 | * radeon_fence_note_sync - record the sync point |
||
610 | * |
||
611 | * @fence: radeon fence object |
||
612 | * @dst_ring: which ring to check against |
||
613 | * |
||
614 | * Note the sequence number at which point the fence will |
||
615 | * be synced with the requested ring (all asics). |
||
616 | */ |
||
617 | void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring) |
||
1125 | serge | 618 | { |
2997 | Serge | 619 | struct radeon_fence_driver *dst, *src; |
620 | unsigned i; |
||
621 | |||
622 | if (!fence) { |
||
623 | return; |
||
624 | } |
||
625 | |||
626 | if (fence->ring == dst_ring) { |
||
627 | return; |
||
628 | } |
||
629 | |||
630 | /* we are protected by the ring mutex */ |
||
631 | src = &fence->rdev->fence_drv[fence->ring]; |
||
632 | dst = &fence->rdev->fence_drv[dst_ring]; |
||
633 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
||
634 | if (i == dst_ring) { |
||
635 | continue; |
||
636 | } |
||
637 | dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]); |
||
638 | } |
||
639 | } |
||
640 | |||
641 | /** |
||
642 | * radeon_fence_driver_start_ring - make the fence driver |
||
643 | * ready for use on the requested ring. |
||
644 | * |
||
645 | * @rdev: radeon device pointer |
||
646 | * @ring: ring index to start the fence driver on |
||
647 | * |
||
648 | * Make the fence driver ready for processing (all asics). |
||
649 | * Not all asics have all rings, so each asic will only |
||
650 | * start the fence driver on the rings it has. |
||
651 | * Returns 0 for success, errors for failure. |
||
652 | */ |
||
653 | int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) |
||
654 | { |
||
655 | uint64_t index; |
||
1125 | serge | 656 | int r; |
657 | |||
2997 | Serge | 658 | radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); |
3192 | Serge | 659 | if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) { |
2997 | Serge | 660 | rdev->fence_drv[ring].scratch_reg = 0; |
3764 | Serge | 661 | if (ring != R600_RING_TYPE_UVD_INDEX) { |
2997 | Serge | 662 | index = R600_WB_EVENT_OFFSET + ring * 4; |
3764 | Serge | 663 | rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; |
664 | rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + |
||
665 | index; |
||
666 | |||
667 | } else { |
||
668 | /* put fence directly behind firmware */ |
||
669 | index = ALIGN(rdev->uvd_fw->size, 8); |
||
670 | rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index; |
||
671 | rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index; |
||
672 | } |
||
673 | |||
2997 | Serge | 674 | } else { |
675 | r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg); |
||
1125 | serge | 676 | if (r) { |
1404 | serge | 677 | dev_err(rdev->dev, "fence failed to get scratch register\n"); |
1125 | serge | 678 | return r; |
679 | } |
||
2997 | Serge | 680 | index = RADEON_WB_SCRATCH_OFFSET + |
681 | rdev->fence_drv[ring].scratch_reg - |
||
682 | rdev->scratch.reg_base; |
||
683 | rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; |
||
684 | rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index; |
||
3764 | Serge | 685 | } |
2997 | Serge | 686 | radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring); |
687 | rdev->fence_drv[ring].initialized = true; |
||
688 | dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n", |
||
689 | ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr); |
||
2004 | serge | 690 | return 0; |
1125 | serge | 691 | } |
692 | |||
2997 | Serge | 693 | /** |
694 | * radeon_fence_driver_init_ring - init the fence driver |
||
695 | * for the requested ring. |
||
696 | * |
||
697 | * @rdev: radeon device pointer |
||
698 | * @ring: ring index to start the fence driver on |
||
699 | * |
||
700 | * Init the fence driver for the requested ring (all asics). |
||
701 | * Helper function for radeon_fence_driver_init(). |
||
702 | */ |
||
703 | static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) |
||
704 | { |
||
705 | int i; |
||
1125 | serge | 706 | |
2997 | Serge | 707 | rdev->fence_drv[ring].scratch_reg = -1; |
708 | rdev->fence_drv[ring].cpu_addr = NULL; |
||
709 | rdev->fence_drv[ring].gpu_addr = 0; |
||
710 | for (i = 0; i < RADEON_NUM_RINGS; ++i) |
||
711 | rdev->fence_drv[ring].sync_seq[i] = 0; |
||
712 | atomic64_set(&rdev->fence_drv[ring].last_seq, 0); |
||
713 | rdev->fence_drv[ring].initialized = false; |
||
714 | } |
||
715 | |||
716 | /** |
||
717 | * radeon_fence_driver_init - init the fence driver |
||
718 | * for all possible rings. |
||
719 | * |
||
720 | * @rdev: radeon device pointer |
||
721 | * |
||
722 | * Init the fence driver for all possible rings (all asics). |
||
723 | * Not all asics have all rings, so each asic will only |
||
724 | * start the fence driver on the rings it has using |
||
725 | * radeon_fence_driver_start_ring(). |
||
726 | * Returns 0 for success. |
||
727 | */ |
||
728 | int radeon_fence_driver_init(struct radeon_device *rdev) |
||
729 | { |
||
730 | int ring; |
||
731 | |||
732 | init_waitqueue_head(&rdev->fence_queue); |
||
733 | for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { |
||
734 | radeon_fence_driver_init_ring(rdev, ring); |
||
735 | } |
||
736 | if (radeon_debugfs_fence_init(rdev)) { |
||
737 | dev_err(rdev->dev, "fence debugfs file creation failed\n"); |
||
738 | } |
||
739 | return 0; |
||
740 | } |
||
741 | |||
742 | /** |
||
743 | * radeon_fence_driver_fini - tear down the fence driver |
||
744 | * for all possible rings. |
||
745 | * |
||
746 | * @rdev: radeon device pointer |
||
747 | * |
||
748 | * Tear down the fence driver for all possible rings (all asics). |
||
749 | */ |
||
750 | void radeon_fence_driver_fini(struct radeon_device *rdev) |
||
751 | { |
||
3192 | Serge | 752 | int ring, r; |
2997 | Serge | 753 | |
754 | mutex_lock(&rdev->ring_lock); |
||
755 | for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { |
||
756 | if (!rdev->fence_drv[ring].initialized) |
||
757 | continue; |
||
5078 | serge | 758 | r = radeon_fence_wait_empty(rdev, ring); |
3192 | Serge | 759 | if (r) { |
760 | /* no need to trigger GPU reset as we are unloading */ |
||
761 | radeon_fence_driver_force_completion(rdev); |
||
762 | } |
||
2997 | Serge | 763 | wake_up_all(&rdev->fence_queue); |
764 | radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); |
||
765 | rdev->fence_drv[ring].initialized = false; |
||
766 | } |
||
767 | mutex_unlock(&rdev->ring_lock); |
||
768 | } |
||
769 | |||
3192 | Serge | 770 | /** |
771 | * radeon_fence_driver_force_completion - force all fence waiter to complete |
||
772 | * |
||
773 | * @rdev: radeon device pointer |
||
774 | * |
||
775 | * In case of GPU reset failure make sure no process keep waiting on fence |
||
776 | * that will never complete. |
||
777 | */ |
||
778 | void radeon_fence_driver_force_completion(struct radeon_device *rdev) |
||
779 | { |
||
780 | int ring; |
||
2997 | Serge | 781 | |
3192 | Serge | 782 | for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { |
783 | if (!rdev->fence_drv[ring].initialized) |
||
784 | continue; |
||
785 | radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring); |
||
786 | } |
||
787 | } |
||
788 | |||
789 | |||
1125 | serge | 790 | /* |
791 | * Fence debugfs |
||
792 | */ |
||
793 | #if defined(CONFIG_DEBUG_FS) |
||
794 | static int radeon_debugfs_fence_info(struct seq_file *m, void *data) |
||
795 | { |
||
796 | struct drm_info_node *node = (struct drm_info_node *)m->private; |
||
797 | struct drm_device *dev = node->minor->dev; |
||
798 | struct radeon_device *rdev = dev->dev_private; |
||
2997 | Serge | 799 | int i, j; |
1125 | serge | 800 | |
2997 | Serge | 801 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
802 | if (!rdev->fence_drv[i].initialized) |
||
803 | continue; |
||
804 | |||
5078 | serge | 805 | radeon_fence_process(rdev, i); |
806 | |||
2997 | Serge | 807 | seq_printf(m, "--- ring %d ---\n", i); |
808 | seq_printf(m, "Last signaled fence 0x%016llx\n", |
||
809 | (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq)); |
||
810 | seq_printf(m, "Last emitted 0x%016llx\n", |
||
811 | rdev->fence_drv[i].sync_seq[i]); |
||
812 | |||
813 | for (j = 0; j < RADEON_NUM_RINGS; ++j) { |
||
814 | if (i != j && rdev->fence_drv[j].initialized) |
||
815 | seq_printf(m, "Last sync to ring %d 0x%016llx\n", |
||
816 | j, rdev->fence_drv[i].sync_seq[j]); |
||
817 | } |
||
1125 | serge | 818 | } |
819 | return 0; |
||
820 | } |
||
821 | |||
5078 | serge | 822 | /** |
823 | * radeon_debugfs_gpu_reset - manually trigger a gpu reset |
||
824 | * |
||
825 | * Manually trigger a gpu reset at the next fence wait. |
||
826 | */ |
||
827 | static int radeon_debugfs_gpu_reset(struct seq_file *m, void *data) |
||
828 | { |
||
829 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
||
830 | struct drm_device *dev = node->minor->dev; |
||
831 | struct radeon_device *rdev = dev->dev_private; |
||
832 | |||
833 | down_read(&rdev->exclusive_lock); |
||
834 | seq_printf(m, "%d\n", rdev->needs_reset); |
||
835 | rdev->needs_reset = true; |
||
836 | up_read(&rdev->exclusive_lock); |
||
837 | |||
838 | return 0; |
||
839 | } |
||
840 | |||
1125 | serge | 841 | static struct drm_info_list radeon_debugfs_fence_list[] = { |
842 | {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL}, |
||
5078 | serge | 843 | {"radeon_gpu_reset", &radeon_debugfs_gpu_reset, 0, NULL} |
1125 | serge | 844 | }; |
845 | #endif |
||
846 | |||
847 | int radeon_debugfs_fence_init(struct radeon_device *rdev) |
||
848 | { |
||
849 | #if defined(CONFIG_DEBUG_FS) |
||
5078 | serge | 850 | return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 2); |
1125 | serge | 851 | #else |
852 | return 0; |
||
853 | #endif |
||
854 | }>>>>>>>=>>>>>>>>>>=>> |