Rev 3764 | Rev 5271 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
5078 | serge | 1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
||
3 | * Copyright 2008 Red Hat Inc. |
||
4 | * Copyright 2009 Jerome Glisse. |
||
5 | * |
||
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
7 | * copy of this software and associated documentation files (the "Software"), |
||
8 | * to deal in the Software without restriction, including without limitation |
||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
||
10 | * and/or sell copies of the Software, and to permit persons to whom the |
||
11 | * Software is furnished to do so, subject to the following conditions: |
||
12 | * |
||
13 | * The above copyright notice and this permission notice shall be included in |
||
14 | * all copies or substantial portions of the Software. |
||
15 | * |
||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
||
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
||
22 | * OTHER DEALINGS IN THE SOFTWARE. |
||
23 | * |
||
24 | * Authors: Dave Airlie |
||
25 | * Alex Deucher |
||
26 | * Jerome Glisse |
||
27 | * Christian König |
||
28 | */ |
||
29 | #include |
||
30 | #include "radeon.h" |
||
31 | |||
32 | /* |
||
33 | * Rings |
||
34 | * Most engines on the GPU are fed via ring buffers. Ring |
||
35 | * buffers are areas of GPU accessible memory that the host |
||
36 | * writes commands into and the GPU reads commands out of. |
||
37 | * There is a rptr (read pointer) that determines where the |
||
38 | * GPU is currently reading, and a wptr (write pointer) |
||
39 | * which determines where the host has written. When the |
||
40 | * pointers are equal, the ring is idle. When the host |
||
41 | * writes commands to the ring buffer, it increments the |
||
42 | * wptr. The GPU then starts fetching commands and executes |
||
43 | * them until the pointers are equal again. |
||
44 | */ |
||
45 | static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring); |
||
46 | |||
47 | /** |
||
48 | * radeon_ring_write - write a value to the ring |
||
49 | * |
||
50 | * @ring: radeon_ring structure holding ring information |
||
51 | * @v: dword (dw) value to write |
||
52 | * |
||
53 | * Write a value to the requested ring buffer (all asics). |
||
54 | */ |
||
55 | void radeon_ring_write(struct radeon_ring *ring, uint32_t v) |
||
56 | { |
||
57 | #if DRM_DEBUG_CODE |
||
58 | if (ring->count_dw <= 0) { |
||
59 | DRM_ERROR("radeon: writing more dwords to the ring than expected!\n"); |
||
60 | } |
||
61 | #endif |
||
62 | ring->ring[ring->wptr++] = v; |
||
63 | ring->wptr &= ring->ptr_mask; |
||
64 | ring->count_dw--; |
||
65 | ring->ring_free_dw--; |
||
66 | } |
||
67 | |||
68 | /** |
||
69 | * radeon_ring_supports_scratch_reg - check if the ring supports |
||
70 | * writing to scratch registers |
||
71 | * |
||
72 | * @rdev: radeon_device pointer |
||
73 | * @ring: radeon_ring structure holding ring information |
||
74 | * |
||
75 | * Check if a specific ring supports writing to scratch registers (all asics). |
||
76 | * Returns true if the ring supports writing to scratch regs, false if not. |
||
77 | */ |
||
78 | bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev, |
||
79 | struct radeon_ring *ring) |
||
80 | { |
||
81 | switch (ring->idx) { |
||
82 | case RADEON_RING_TYPE_GFX_INDEX: |
||
83 | case CAYMAN_RING_TYPE_CP1_INDEX: |
||
84 | case CAYMAN_RING_TYPE_CP2_INDEX: |
||
85 | return true; |
||
86 | default: |
||
87 | return false; |
||
88 | } |
||
89 | } |
||
90 | |||
91 | /** |
||
92 | * radeon_ring_free_size - update the free size |
||
93 | * |
||
94 | * @rdev: radeon_device pointer |
||
95 | * @ring: radeon_ring structure holding ring information |
||
96 | * |
||
97 | * Update the free dw slots in the ring buffer (all asics). |
||
98 | */ |
||
99 | void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring) |
||
100 | { |
||
101 | uint32_t rptr = radeon_ring_get_rptr(rdev, ring); |
||
102 | |||
103 | /* This works because ring_size is a power of 2 */ |
||
104 | ring->ring_free_dw = rptr + (ring->ring_size / 4); |
||
105 | ring->ring_free_dw -= ring->wptr; |
||
106 | ring->ring_free_dw &= ring->ptr_mask; |
||
107 | if (!ring->ring_free_dw) { |
||
108 | /* this is an empty ring */ |
||
109 | ring->ring_free_dw = ring->ring_size / 4; |
||
110 | /* update lockup info to avoid false positive */ |
||
111 | radeon_ring_lockup_update(rdev, ring); |
||
112 | } |
||
113 | } |
||
114 | |||
115 | /** |
||
116 | * radeon_ring_alloc - allocate space on the ring buffer |
||
117 | * |
||
118 | * @rdev: radeon_device pointer |
||
119 | * @ring: radeon_ring structure holding ring information |
||
120 | * @ndw: number of dwords to allocate in the ring buffer |
||
121 | * |
||
122 | * Allocate @ndw dwords in the ring buffer (all asics). |
||
123 | * Returns 0 on success, error on failure. |
||
124 | */ |
||
125 | int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw) |
||
126 | { |
||
127 | int r; |
||
128 | |||
129 | /* make sure we aren't trying to allocate more space than there is on the ring */ |
||
130 | if (ndw > (ring->ring_size / 4)) |
||
131 | return -ENOMEM; |
||
132 | /* Align requested size with padding so unlock_commit can |
||
133 | * pad safely */ |
||
134 | radeon_ring_free_size(rdev, ring); |
||
135 | ndw = (ndw + ring->align_mask) & ~ring->align_mask; |
||
136 | while (ndw > (ring->ring_free_dw - 1)) { |
||
137 | radeon_ring_free_size(rdev, ring); |
||
138 | if (ndw < ring->ring_free_dw) { |
||
139 | break; |
||
140 | } |
||
141 | r = radeon_fence_wait_next(rdev, ring->idx); |
||
142 | if (r) |
||
143 | return r; |
||
144 | } |
||
145 | ring->count_dw = ndw; |
||
146 | ring->wptr_old = ring->wptr; |
||
147 | return 0; |
||
148 | } |
||
149 | |||
150 | /** |
||
151 | * radeon_ring_lock - lock the ring and allocate space on it |
||
152 | * |
||
153 | * @rdev: radeon_device pointer |
||
154 | * @ring: radeon_ring structure holding ring information |
||
155 | * @ndw: number of dwords to allocate in the ring buffer |
||
156 | * |
||
157 | * Lock the ring and allocate @ndw dwords in the ring buffer |
||
158 | * (all asics). |
||
159 | * Returns 0 on success, error on failure. |
||
160 | */ |
||
161 | int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw) |
||
162 | { |
||
163 | int r; |
||
164 | |||
165 | mutex_lock(&rdev->ring_lock); |
||
166 | r = radeon_ring_alloc(rdev, ring, ndw); |
||
167 | if (r) { |
||
168 | mutex_unlock(&rdev->ring_lock); |
||
169 | return r; |
||
170 | } |
||
171 | return 0; |
||
172 | } |
||
173 | |||
174 | /** |
||
175 | * radeon_ring_commit - tell the GPU to execute the new |
||
176 | * commands on the ring buffer |
||
177 | * |
||
178 | * @rdev: radeon_device pointer |
||
179 | * @ring: radeon_ring structure holding ring information |
||
180 | * @hdp_flush: Whether or not to perform an HDP cache flush |
||
181 | * |
||
182 | * Update the wptr (write pointer) to tell the GPU to |
||
183 | * execute new commands on the ring buffer (all asics). |
||
184 | */ |
||
185 | void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring, |
||
186 | bool hdp_flush) |
||
187 | { |
||
188 | /* If we are emitting the HDP flush via the ring buffer, we need to |
||
189 | * do it before padding. |
||
190 | */ |
||
191 | if (hdp_flush && rdev->asic->ring[ring->idx]->hdp_flush) |
||
192 | rdev->asic->ring[ring->idx]->hdp_flush(rdev, ring); |
||
193 | /* We pad to match fetch size */ |
||
194 | while (ring->wptr & ring->align_mask) { |
||
195 | radeon_ring_write(ring, ring->nop); |
||
196 | } |
||
197 | mb(); |
||
198 | /* If we are emitting the HDP flush via MMIO, we need to do it after |
||
199 | * all CPU writes to VRAM finished. |
||
200 | */ |
||
201 | if (hdp_flush && rdev->asic->mmio_hdp_flush) |
||
202 | rdev->asic->mmio_hdp_flush(rdev); |
||
203 | radeon_ring_set_wptr(rdev, ring); |
||
204 | } |
||
205 | |||
206 | /** |
||
207 | * radeon_ring_unlock_commit - tell the GPU to execute the new |
||
208 | * commands on the ring buffer and unlock it |
||
209 | * |
||
210 | * @rdev: radeon_device pointer |
||
211 | * @ring: radeon_ring structure holding ring information |
||
212 | * @hdp_flush: Whether or not to perform an HDP cache flush |
||
213 | * |
||
214 | * Call radeon_ring_commit() then unlock the ring (all asics). |
||
215 | */ |
||
216 | void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring, |
||
217 | bool hdp_flush) |
||
218 | { |
||
219 | radeon_ring_commit(rdev, ring, hdp_flush); |
||
220 | mutex_unlock(&rdev->ring_lock); |
||
221 | } |
||
222 | |||
223 | /** |
||
224 | * radeon_ring_undo - reset the wptr |
||
225 | * |
||
226 | * @ring: radeon_ring structure holding ring information |
||
227 | * |
||
228 | * Reset the driver's copy of the wptr (all asics). |
||
229 | */ |
||
230 | void radeon_ring_undo(struct radeon_ring *ring) |
||
231 | { |
||
232 | ring->wptr = ring->wptr_old; |
||
233 | } |
||
234 | |||
235 | /** |
||
236 | * radeon_ring_unlock_undo - reset the wptr and unlock the ring |
||
237 | * |
||
238 | * @ring: radeon_ring structure holding ring information |
||
239 | * |
||
240 | * Call radeon_ring_undo() then unlock the ring (all asics). |
||
241 | */ |
||
242 | void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring) |
||
243 | { |
||
244 | radeon_ring_undo(ring); |
||
245 | mutex_unlock(&rdev->ring_lock); |
||
246 | } |
||
247 | |||
248 | /** |
||
249 | * radeon_ring_lockup_update - update lockup variables |
||
250 | * |
||
251 | * @ring: radeon_ring structure holding ring information |
||
252 | * |
||
253 | * Update the last rptr value and timestamp (all asics). |
||
254 | */ |
||
255 | void radeon_ring_lockup_update(struct radeon_device *rdev, |
||
256 | struct radeon_ring *ring) |
||
257 | { |
||
258 | atomic_set(&ring->last_rptr, radeon_ring_get_rptr(rdev, ring)); |
||
259 | atomic64_set(&ring->last_activity, jiffies_64); |
||
260 | } |
||
261 | |||
262 | /** |
||
263 | * radeon_ring_test_lockup() - check if ring is lockedup by recording information |
||
264 | * @rdev: radeon device structure |
||
265 | * @ring: radeon_ring structure holding ring information |
||
266 | * |
||
267 | */ |
||
268 | bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring) |
||
269 | { |
||
270 | uint32_t rptr = radeon_ring_get_rptr(rdev, ring); |
||
271 | uint64_t last = atomic64_read(&ring->last_activity); |
||
272 | uint64_t elapsed; |
||
273 | |||
274 | if (rptr != atomic_read(&ring->last_rptr)) { |
||
275 | /* ring is still working, no lockup */ |
||
276 | radeon_ring_lockup_update(rdev, ring); |
||
277 | return false; |
||
278 | } |
||
279 | |||
280 | elapsed = jiffies_to_msecs(jiffies_64 - last); |
||
281 | if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) { |
||
282 | dev_err(rdev->dev, "ring %d stalled for more than %llumsec\n", |
||
283 | ring->idx, elapsed); |
||
284 | return true; |
||
285 | } |
||
286 | /* give a chance to the GPU ... */ |
||
287 | return false; |
||
288 | } |
||
289 | |||
290 | /** |
||
291 | * radeon_ring_backup - Back up the content of a ring |
||
292 | * |
||
293 | * @rdev: radeon_device pointer |
||
294 | * @ring: the ring we want to back up |
||
295 | * |
||
296 | * Saves all unprocessed commits from a ring, returns the number of dwords saved. |
||
297 | */ |
||
298 | unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring, |
||
299 | uint32_t **data) |
||
300 | { |
||
301 | unsigned size, ptr, i; |
||
302 | |||
303 | /* just in case lock the ring */ |
||
304 | mutex_lock(&rdev->ring_lock); |
||
305 | *data = NULL; |
||
306 | |||
307 | if (ring->ring_obj == NULL) { |
||
308 | mutex_unlock(&rdev->ring_lock); |
||
309 | return 0; |
||
310 | } |
||
311 | |||
312 | /* it doesn't make sense to save anything if all fences are signaled */ |
||
313 | if (!radeon_fence_count_emitted(rdev, ring->idx)) { |
||
314 | mutex_unlock(&rdev->ring_lock); |
||
315 | return 0; |
||
316 | } |
||
317 | |||
318 | /* calculate the number of dw on the ring */ |
||
319 | if (ring->rptr_save_reg) |
||
320 | ptr = RREG32(ring->rptr_save_reg); |
||
321 | else if (rdev->wb.enabled) |
||
322 | ptr = le32_to_cpu(*ring->next_rptr_cpu_addr); |
||
323 | else { |
||
324 | /* no way to read back the next rptr */ |
||
325 | mutex_unlock(&rdev->ring_lock); |
||
326 | return 0; |
||
327 | } |
||
328 | |||
329 | size = ring->wptr + (ring->ring_size / 4); |
||
330 | size -= ptr; |
||
331 | size &= ring->ptr_mask; |
||
332 | if (size == 0) { |
||
333 | mutex_unlock(&rdev->ring_lock); |
||
334 | return 0; |
||
335 | } |
||
336 | |||
337 | /* and then save the content of the ring */ |
||
338 | *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); |
||
339 | if (!*data) { |
||
340 | mutex_unlock(&rdev->ring_lock); |
||
341 | return 0; |
||
342 | } |
||
343 | for (i = 0; i < size; ++i) { |
||
344 | (*data)[i] = ring->ring[ptr++]; |
||
345 | ptr &= ring->ptr_mask; |
||
346 | } |
||
347 | |||
348 | mutex_unlock(&rdev->ring_lock); |
||
349 | return size; |
||
350 | } |
||
351 | |||
352 | /** |
||
353 | * radeon_ring_restore - append saved commands to the ring again |
||
354 | * |
||
355 | * @rdev: radeon_device pointer |
||
356 | * @ring: ring to append commands to |
||
357 | * @size: number of dwords we want to write |
||
358 | * @data: saved commands |
||
359 | * |
||
360 | * Allocates space on the ring and restore the previously saved commands. |
||
361 | */ |
||
362 | int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring, |
||
363 | unsigned size, uint32_t *data) |
||
364 | { |
||
365 | int i, r; |
||
366 | |||
367 | if (!size || !data) |
||
368 | return 0; |
||
369 | |||
370 | /* restore the saved ring content */ |
||
371 | r = radeon_ring_lock(rdev, ring, size); |
||
372 | if (r) |
||
373 | return r; |
||
374 | |||
375 | for (i = 0; i < size; ++i) { |
||
376 | radeon_ring_write(ring, data[i]); |
||
377 | } |
||
378 | |||
379 | radeon_ring_unlock_commit(rdev, ring, false); |
||
380 | kfree(data); |
||
381 | return 0; |
||
382 | } |
||
383 | |||
384 | /** |
||
385 | * radeon_ring_init - init driver ring struct. |
||
386 | * |
||
387 | * @rdev: radeon_device pointer |
||
388 | * @ring: radeon_ring structure holding ring information |
||
389 | * @ring_size: size of the ring |
||
390 | * @rptr_offs: offset of the rptr writeback location in the WB buffer |
||
391 | * @nop: nop packet for this ring |
||
392 | * |
||
393 | * Initialize the driver information for the selected ring (all asics). |
||
394 | * Returns 0 on success, error on failure. |
||
395 | */ |
||
396 | int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size, |
||
397 | unsigned rptr_offs, u32 nop) |
||
398 | { |
||
399 | int r; |
||
400 | |||
401 | ring->ring_size = ring_size; |
||
402 | ring->rptr_offs = rptr_offs; |
||
403 | ring->nop = nop; |
||
404 | /* Allocate ring buffer */ |
||
405 | if (ring->ring_obj == NULL) { |
||
406 | r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true, |
||
407 | RADEON_GEM_DOMAIN_GTT, 0, |
||
408 | NULL, &ring->ring_obj); |
||
409 | if (r) { |
||
410 | dev_err(rdev->dev, "(%d) ring create failed\n", r); |
||
411 | return r; |
||
412 | } |
||
413 | r = radeon_bo_reserve(ring->ring_obj, false); |
||
414 | if (unlikely(r != 0)) |
||
415 | return r; |
||
416 | r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT, |
||
417 | &ring->gpu_addr); |
||
418 | if (r) { |
||
419 | radeon_bo_unreserve(ring->ring_obj); |
||
420 | dev_err(rdev->dev, "(%d) ring pin failed\n", r); |
||
421 | return r; |
||
422 | } |
||
423 | r = radeon_bo_kmap(ring->ring_obj, |
||
424 | (void **)&ring->ring); |
||
425 | radeon_bo_unreserve(ring->ring_obj); |
||
426 | if (r) { |
||
427 | dev_err(rdev->dev, "(%d) ring map failed\n", r); |
||
428 | return r; |
||
429 | } |
||
430 | } |
||
431 | ring->ptr_mask = (ring->ring_size / 4) - 1; |
||
432 | ring->ring_free_dw = ring->ring_size / 4; |
||
433 | if (rdev->wb.enabled) { |
||
434 | u32 index = RADEON_WB_RING0_NEXT_RPTR + (ring->idx * 4); |
||
435 | ring->next_rptr_gpu_addr = rdev->wb.gpu_addr + index; |
||
436 | ring->next_rptr_cpu_addr = &rdev->wb.wb[index/4]; |
||
437 | } |
||
438 | if (radeon_debugfs_ring_init(rdev, ring)) { |
||
439 | DRM_ERROR("Failed to register debugfs file for rings !\n"); |
||
440 | } |
||
441 | radeon_ring_lockup_update(rdev, ring); |
||
442 | return 0; |
||
443 | } |
||
444 | |||
445 | /** |
||
446 | * radeon_ring_fini - tear down the driver ring struct. |
||
447 | * |
||
448 | * @rdev: radeon_device pointer |
||
449 | * @ring: radeon_ring structure holding ring information |
||
450 | * |
||
451 | * Tear down the driver information for the selected ring (all asics). |
||
452 | */ |
||
453 | void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring) |
||
454 | { |
||
455 | int r; |
||
456 | struct radeon_bo *ring_obj; |
||
457 | |||
458 | mutex_lock(&rdev->ring_lock); |
||
459 | ring_obj = ring->ring_obj; |
||
460 | ring->ready = false; |
||
461 | ring->ring = NULL; |
||
462 | ring->ring_obj = NULL; |
||
463 | mutex_unlock(&rdev->ring_lock); |
||
464 | |||
465 | if (ring_obj) { |
||
466 | r = radeon_bo_reserve(ring_obj, false); |
||
467 | if (likely(r == 0)) { |
||
468 | radeon_bo_kunmap(ring_obj); |
||
469 | radeon_bo_unpin(ring_obj); |
||
470 | radeon_bo_unreserve(ring_obj); |
||
471 | } |
||
472 | radeon_bo_unref(&ring_obj); |
||
473 | } |
||
474 | } |
||
475 | |||
476 | /* |
||
477 | * Debugfs info |
||
478 | */ |
||
479 | #if defined(CONFIG_DEBUG_FS) |
||
480 | |||
481 | static int radeon_debugfs_ring_info(struct seq_file *m, void *data) |
||
482 | { |
||
483 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
||
484 | struct drm_device *dev = node->minor->dev; |
||
485 | struct radeon_device *rdev = dev->dev_private; |
||
486 | int ridx = *(int*)node->info_ent->data; |
||
487 | struct radeon_ring *ring = &rdev->ring[ridx]; |
||
488 | |||
489 | uint32_t rptr, wptr, rptr_next; |
||
490 | unsigned count, i, j; |
||
491 | |||
492 | radeon_ring_free_size(rdev, ring); |
||
493 | count = (ring->ring_size / 4) - ring->ring_free_dw; |
||
494 | |||
495 | wptr = radeon_ring_get_wptr(rdev, ring); |
||
496 | seq_printf(m, "wptr: 0x%08x [%5d]\n", |
||
497 | wptr, wptr); |
||
498 | |||
499 | rptr = radeon_ring_get_rptr(rdev, ring); |
||
500 | seq_printf(m, "rptr: 0x%08x [%5d]\n", |
||
501 | rptr, rptr); |
||
502 | |||
503 | if (ring->rptr_save_reg) { |
||
504 | rptr_next = RREG32(ring->rptr_save_reg); |
||
505 | seq_printf(m, "rptr next(0x%04x): 0x%08x [%5d]\n", |
||
506 | ring->rptr_save_reg, rptr_next, rptr_next); |
||
507 | } else |
||
508 | rptr_next = ~0; |
||
509 | |||
510 | seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", |
||
511 | ring->wptr, ring->wptr); |
||
512 | seq_printf(m, "last semaphore signal addr : 0x%016llx\n", |
||
513 | ring->last_semaphore_signal_addr); |
||
514 | seq_printf(m, "last semaphore wait addr : 0x%016llx\n", |
||
515 | ring->last_semaphore_wait_addr); |
||
516 | seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); |
||
517 | seq_printf(m, "%u dwords in ring\n", count); |
||
518 | |||
519 | if (!ring->ready) |
||
520 | return 0; |
||
521 | |||
522 | /* print 8 dw before current rptr as often it's the last executed |
||
523 | * packet that is the root issue |
||
524 | */ |
||
525 | i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask; |
||
526 | for (j = 0; j <= (count + 32); j++) { |
||
527 | seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]); |
||
528 | if (rptr == i) |
||
529 | seq_puts(m, " *"); |
||
530 | if (rptr_next == i) |
||
531 | seq_puts(m, " #"); |
||
532 | seq_puts(m, "\n"); |
||
533 | i = (i + 1) & ring->ptr_mask; |
||
534 | } |
||
535 | return 0; |
||
536 | } |
||
537 | |||
538 | static int radeon_gfx_index = RADEON_RING_TYPE_GFX_INDEX; |
||
539 | static int cayman_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX; |
||
540 | static int cayman_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX; |
||
541 | static int radeon_dma1_index = R600_RING_TYPE_DMA_INDEX; |
||
542 | static int radeon_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX; |
||
543 | static int r600_uvd_index = R600_RING_TYPE_UVD_INDEX; |
||
544 | static int si_vce1_index = TN_RING_TYPE_VCE1_INDEX; |
||
545 | static int si_vce2_index = TN_RING_TYPE_VCE2_INDEX; |
||
546 | |||
547 | static struct drm_info_list radeon_debugfs_ring_info_list[] = { |
||
548 | {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_gfx_index}, |
||
549 | {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_cp1_index}, |
||
550 | {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_cp2_index}, |
||
551 | {"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_dma1_index}, |
||
552 | {"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_dma2_index}, |
||
553 | {"radeon_ring_uvd", radeon_debugfs_ring_info, 0, &r600_uvd_index}, |
||
554 | {"radeon_ring_vce1", radeon_debugfs_ring_info, 0, &si_vce1_index}, |
||
555 | {"radeon_ring_vce2", radeon_debugfs_ring_info, 0, &si_vce2_index}, |
||
556 | }; |
||
557 | |||
558 | #endif |
||
559 | |||
560 | static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring) |
||
561 | { |
||
562 | #if defined(CONFIG_DEBUG_FS) |
||
563 | unsigned i; |
||
564 | for (i = 0; i < ARRAY_SIZE(radeon_debugfs_ring_info_list); ++i) { |
||
565 | struct drm_info_list *info = &radeon_debugfs_ring_info_list[i]; |
||
566 | int ridx = *(int*)radeon_debugfs_ring_info_list[i].data; |
||
567 | unsigned r; |
||
568 | |||
569 | if (&rdev->ring[ridx] != ring) |
||
570 | continue; |
||
571 | |||
572 | r = radeon_debugfs_add_files(rdev, info, 1); |
||
573 | if (r) |
||
574 | return r; |
||
575 | } |
||
576 | #endif |
||
577 | return 0; |
||
578 | }>=>>>>=> |