Rev 1430 | Rev 2004 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1117 | serge | 1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
||
3 | * Copyright 2008 Red Hat Inc. |
||
4 | * Copyright 2009 Jerome Glisse. |
||
5 | * |
||
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
7 | * copy of this software and associated documentation files (the "Software"), |
||
8 | * to deal in the Software without restriction, including without limitation |
||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
||
10 | * and/or sell copies of the Software, and to permit persons to whom the |
||
11 | * Software is furnished to do so, subject to the following conditions: |
||
12 | * |
||
13 | * The above copyright notice and this permission notice shall be included in |
||
14 | * all copies or substantial portions of the Software. |
||
15 | * |
||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
||
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
||
22 | * OTHER DEALINGS IN THE SOFTWARE. |
||
23 | * |
||
24 | * Authors: Dave Airlie |
||
25 | * Alex Deucher |
||
26 | * Jerome Glisse |
||
27 | */ |
||
1179 | serge | 28 | #include |
1963 | serge | 29 | #include |
1125 | serge | 30 | #include "drmP.h" |
1117 | serge | 31 | #include "radeon_drm.h" |
32 | #include "radeon_reg.h" |
||
33 | #include "radeon.h" |
||
34 | #include "atom.h" |
||
35 | |||
36 | int radeon_debugfs_ib_init(struct radeon_device *rdev); |
||
37 | |||
38 | /* |
||
39 | * IB. |
||
40 | */ |
||
1120 | serge | 41 | |
42 | #if 0 |
||
43 | |||
1117 | serge | 44 | int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib) |
45 | { |
||
46 | struct radeon_fence *fence; |
||
47 | struct radeon_ib *nib; |
||
1428 | serge | 48 | int r = 0, i, c; |
1117 | serge | 49 | |
50 | *ib = NULL; |
||
51 | r = radeon_fence_create(rdev, &fence); |
||
52 | if (r) { |
||
1428 | serge | 53 | dev_err(rdev->dev, "failed to create fence for new IB\n"); |
1117 | serge | 54 | return r; |
55 | } |
||
56 | mutex_lock(&rdev->ib_pool.mutex); |
||
1428 | serge | 57 | for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) { |
58 | i &= (RADEON_IB_POOL_SIZE - 1); |
||
59 | if (rdev->ib_pool.ibs[i].free) { |
||
60 | nib = &rdev->ib_pool.ibs[i]; |
||
61 | break; |
||
1117 | serge | 62 | } |
1428 | serge | 63 | } |
64 | if (nib == NULL) { |
||
65 | /* This should never happen, it means we allocated all |
||
66 | * IB and haven't scheduled one yet, return EBUSY to |
||
67 | * userspace hoping that on ioctl recall we get better |
||
68 | * luck |
||
69 | */ |
||
70 | dev_err(rdev->dev, "no free indirect buffer !\n"); |
||
1179 | serge | 71 | mutex_unlock(&rdev->ib_pool.mutex); |
1428 | serge | 72 | radeon_fence_unref(&fence); |
73 | return -EBUSY; |
||
1117 | serge | 74 | } |
1428 | serge | 75 | rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1); |
76 | nib->free = false; |
||
77 | if (nib->fence) { |
||
1179 | serge | 78 | mutex_unlock(&rdev->ib_pool.mutex); |
1117 | serge | 79 | r = radeon_fence_wait(nib->fence, false); |
80 | if (r) { |
||
1428 | serge | 81 | dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n", |
82 | nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw); |
||
83 | mutex_lock(&rdev->ib_pool.mutex); |
||
84 | nib->free = true; |
||
85 | mutex_unlock(&rdev->ib_pool.mutex); |
||
86 | radeon_fence_unref(&fence); |
||
87 | return r; |
||
1430 | serge | 88 | } |
1428 | serge | 89 | mutex_lock(&rdev->ib_pool.mutex); |
90 | } |
||
1117 | serge | 91 | radeon_fence_unref(&nib->fence); |
1428 | serge | 92 | nib->fence = fence; |
1117 | serge | 93 | nib->length_dw = 0; |
1179 | serge | 94 | mutex_unlock(&rdev->ib_pool.mutex); |
1117 | serge | 95 | *ib = nib; |
1428 | serge | 96 | return 0; |
1117 | serge | 97 | } |
98 | |||
99 | void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) |
||
100 | { |
||
101 | struct radeon_ib *tmp = *ib; |
||
102 | |||
103 | *ib = NULL; |
||
104 | if (tmp == NULL) { |
||
105 | return; |
||
106 | } |
||
1428 | serge | 107 | if (!tmp->fence->emited) |
108 | radeon_fence_unref(&tmp->fence); |
||
1117 | serge | 109 | mutex_lock(&rdev->ib_pool.mutex); |
1428 | serge | 110 | tmp->free = true; |
1117 | serge | 111 | mutex_unlock(&rdev->ib_pool.mutex); |
112 | } |
||
113 | |||
114 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) |
||
115 | { |
||
116 | int r = 0; |
||
117 | |||
118 | if (!ib->length_dw || !rdev->cp.ready) { |
||
119 | /* TODO: Nothings in the ib we should report. */ |
||
1428 | serge | 120 | DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx); |
1117 | serge | 121 | return -EINVAL; |
122 | } |
||
1179 | serge | 123 | |
124 | /* 64 dwords should be enough for fence too */ |
||
1117 | serge | 125 | r = radeon_ring_lock(rdev, 64); |
126 | if (r) { |
||
1963 | serge | 127 | DRM_ERROR("radeon: scheduling IB failed (%d).\n", r); |
1117 | serge | 128 | return r; |
129 | } |
||
1179 | serge | 130 | radeon_ring_ib_execute(rdev, ib); |
1117 | serge | 131 | radeon_fence_emit(rdev, ib->fence); |
1179 | serge | 132 | mutex_lock(&rdev->ib_pool.mutex); |
1428 | serge | 133 | /* once scheduled IB is considered free and protected by the fence */ |
134 | ib->free = true; |
||
1117 | serge | 135 | mutex_unlock(&rdev->ib_pool.mutex); |
1179 | serge | 136 | radeon_ring_unlock_commit(rdev); |
1117 | serge | 137 | return 0; |
138 | } |
||
1120 | serge | 139 | #endif |
1117 | serge | 140 | |
141 | int radeon_ib_pool_init(struct radeon_device *rdev) |
||
142 | { |
||
143 | void *ptr; |
||
144 | uint64_t gpu_addr; |
||
145 | int i; |
||
146 | int r = 0; |
||
147 | |||
1179 | serge | 148 | if (rdev->ib_pool.robj) |
149 | return 0; |
||
1430 | serge | 150 | INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib); |
1117 | serge | 151 | /* Allocate 1M object buffer */ |
1963 | serge | 152 | r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024, |
153 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT, |
||
1404 | serge | 154 | &rdev->ib_pool.robj); |
1117 | serge | 155 | if (r) { |
156 | DRM_ERROR("radeon: failed to ib pool (%d).\n", r); |
||
157 | return r; |
||
158 | } |
||
1404 | serge | 159 | r = radeon_bo_reserve(rdev->ib_pool.robj, false); |
160 | if (unlikely(r != 0)) |
||
161 | return r; |
||
162 | r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr); |
||
1117 | serge | 163 | if (r) { |
1404 | serge | 164 | radeon_bo_unreserve(rdev->ib_pool.robj); |
1117 | serge | 165 | DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r); |
166 | return r; |
||
167 | } |
||
1404 | serge | 168 | r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr); |
169 | radeon_bo_unreserve(rdev->ib_pool.robj); |
||
1117 | serge | 170 | if (r) { |
1963 | serge | 171 | DRM_ERROR("radeon: failed to map ib pool (%d).\n", r); |
1117 | serge | 172 | return r; |
173 | } |
||
174 | for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { |
||
175 | unsigned offset; |
||
176 | |||
177 | offset = i * 64 * 1024; |
||
178 | rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset; |
||
179 | rdev->ib_pool.ibs[i].ptr = ptr + offset; |
||
180 | rdev->ib_pool.ibs[i].idx = i; |
||
181 | rdev->ib_pool.ibs[i].length_dw = 0; |
||
1428 | serge | 182 | rdev->ib_pool.ibs[i].free = true; |
1117 | serge | 183 | } |
1428 | serge | 184 | rdev->ib_pool.head_id = 0; |
1117 | serge | 185 | rdev->ib_pool.ready = true; |
186 | DRM_INFO("radeon: ib pool ready.\n"); |
||
1129 | serge | 187 | if (radeon_debugfs_ib_init(rdev)) { |
188 | DRM_ERROR("Failed to register debugfs file for IB !\n"); |
||
189 | } |
||
1117 | serge | 190 | return r; |
191 | } |
||
192 | |||
193 | void radeon_ib_pool_fini(struct radeon_device *rdev) |
||
194 | { |
||
1404 | serge | 195 | int r; |
1963 | serge | 196 | struct radeon_bo *robj; |
1404 | serge | 197 | |
1117 | serge | 198 | if (!rdev->ib_pool.ready) { |
199 | return; |
||
200 | } |
||
1179 | serge | 201 | mutex_lock(&rdev->ib_pool.mutex); |
1963 | serge | 202 | // radeon_ib_bogus_cleanup(rdev); |
203 | robj = rdev->ib_pool.robj; |
||
204 | rdev->ib_pool.robj = NULL; |
||
205 | mutex_unlock(&rdev->ib_pool.mutex); |
||
206 | |||
207 | if (robj) { |
||
208 | r = radeon_bo_reserve(robj, false); |
||
1404 | serge | 209 | if (likely(r == 0)) { |
1963 | serge | 210 | radeon_bo_kunmap(robj); |
211 | radeon_bo_unpin(robj); |
||
212 | radeon_bo_unreserve(robj); |
||
1404 | serge | 213 | } |
1963 | serge | 214 | radeon_bo_unref(&robj); |
1117 | serge | 215 | } |
216 | } |
||
217 | |||
1120 | serge | 218 | |
1117 | serge | 219 | /* |
220 | * Ring. |
||
221 | */ |
||
222 | void radeon_ring_free_size(struct radeon_device *rdev) |
||
223 | { |
||
1963 | serge | 224 | if (rdev->wb.enabled) |
225 | rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]); |
||
226 | else { |
||
1179 | serge | 227 | if (rdev->family >= CHIP_R600) |
228 | rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); |
||
229 | else |
||
1117 | serge | 230 | rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); |
1963 | serge | 231 | } |
1117 | serge | 232 | /* This works because ring_size is a power of 2 */ |
233 | rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4)); |
||
234 | rdev->cp.ring_free_dw -= rdev->cp.wptr; |
||
235 | rdev->cp.ring_free_dw &= rdev->cp.ptr_mask; |
||
236 | if (!rdev->cp.ring_free_dw) { |
||
237 | rdev->cp.ring_free_dw = rdev->cp.ring_size / 4; |
||
238 | } |
||
239 | } |
||
240 | |||
1963 | serge | 241 | int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw) |
1117 | serge | 242 | { |
243 | int r; |
||
244 | |||
245 | /* Align requested size with padding so unlock_commit can |
||
246 | * pad safely */ |
||
247 | ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask; |
||
248 | while (ndw > (rdev->cp.ring_free_dw - 1)) { |
||
249 | radeon_ring_free_size(rdev); |
||
250 | if (ndw < rdev->cp.ring_free_dw) { |
||
251 | break; |
||
252 | } |
||
253 | // r = radeon_fence_wait_next(rdev); |
||
254 | // if (r) { |
||
255 | // mutex_unlock(&rdev->cp.mutex); |
||
256 | // return r; |
||
257 | // } |
||
258 | } |
||
259 | rdev->cp.count_dw = ndw; |
||
260 | rdev->cp.wptr_old = rdev->cp.wptr; |
||
261 | return 0; |
||
262 | } |
||
263 | |||
1963 | serge | 264 | int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw) |
1117 | serge | 265 | { |
1963 | serge | 266 | int r; |
267 | |||
268 | mutex_lock(&rdev->cp.mutex); |
||
269 | r = radeon_ring_alloc(rdev, ndw); |
||
270 | if (r) { |
||
271 | mutex_unlock(&rdev->cp.mutex); |
||
272 | return r; |
||
273 | } |
||
274 | return 0; |
||
275 | } |
||
276 | |||
277 | void radeon_ring_commit(struct radeon_device *rdev) |
||
278 | { |
||
1117 | serge | 279 | unsigned count_dw_pad; |
280 | unsigned i; |
||
281 | |||
282 | /* We pad to match fetch size */ |
||
283 | count_dw_pad = (rdev->cp.align_mask + 1) - |
||
284 | (rdev->cp.wptr & rdev->cp.align_mask); |
||
285 | for (i = 0; i < count_dw_pad; i++) { |
||
1179 | serge | 286 | radeon_ring_write(rdev, 2 << 30); |
1117 | serge | 287 | } |
288 | DRM_MEMORYBARRIER(); |
||
1179 | serge | 289 | radeon_cp_commit(rdev); |
1963 | serge | 290 | } |
291 | |||
292 | void radeon_ring_unlock_commit(struct radeon_device *rdev) |
||
293 | { |
||
294 | radeon_ring_commit(rdev); |
||
1179 | serge | 295 | mutex_unlock(&rdev->cp.mutex); |
1117 | serge | 296 | } |
297 | |||
298 | void radeon_ring_unlock_undo(struct radeon_device *rdev) |
||
299 | { |
||
300 | rdev->cp.wptr = rdev->cp.wptr_old; |
||
1179 | serge | 301 | mutex_unlock(&rdev->cp.mutex); |
1117 | serge | 302 | } |
303 | |||
304 | int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size) |
||
305 | { |
||
306 | int r; |
||
307 | |||
308 | rdev->cp.ring_size = ring_size; |
||
1120 | serge | 309 | /* Allocate ring buffer */ |
1117 | serge | 310 | if (rdev->cp.ring_obj == NULL) { |
1963 | serge | 311 | r = radeon_bo_create(rdev, rdev->cp.ring_size, PAGE_SIZE, true, |
1117 | serge | 312 | RADEON_GEM_DOMAIN_GTT, |
313 | &rdev->cp.ring_obj); |
||
314 | if (r) { |
||
1404 | serge | 315 | dev_err(rdev->dev, "(%d) ring create failed\n", r); |
1117 | serge | 316 | return r; |
317 | } |
||
1404 | serge | 318 | r = radeon_bo_reserve(rdev->cp.ring_obj, false); |
319 | if (unlikely(r != 0)) |
||
320 | return r; |
||
321 | r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT, |
||
1117 | serge | 322 | &rdev->cp.gpu_addr); |
323 | if (r) { |
||
1404 | serge | 324 | radeon_bo_unreserve(rdev->cp.ring_obj); |
325 | dev_err(rdev->dev, "(%d) ring pin failed\n", r); |
||
1117 | serge | 326 | return r; |
327 | } |
||
1404 | serge | 328 | r = radeon_bo_kmap(rdev->cp.ring_obj, |
1117 | serge | 329 | (void **)&rdev->cp.ring); |
1404 | serge | 330 | radeon_bo_unreserve(rdev->cp.ring_obj); |
1117 | serge | 331 | if (r) { |
1404 | serge | 332 | dev_err(rdev->dev, "(%d) ring map failed\n", r); |
1117 | serge | 333 | return r; |
334 | } |
||
335 | } |
||
336 | rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1; |
||
337 | rdev->cp.ring_free_dw = rdev->cp.ring_size / 4; |
||
338 | return 0; |
||
339 | } |
||
340 | |||
341 | void radeon_ring_fini(struct radeon_device *rdev) |
||
342 | { |
||
1404 | serge | 343 | int r; |
1963 | serge | 344 | struct radeon_bo *ring_obj; |
1404 | serge | 345 | |
1179 | serge | 346 | mutex_lock(&rdev->cp.mutex); |
1963 | serge | 347 | ring_obj = rdev->cp.ring_obj; |
348 | rdev->cp.ring = NULL; |
||
349 | rdev->cp.ring_obj = NULL; |
||
350 | mutex_unlock(&rdev->cp.mutex); |
||
351 | |||
352 | if (ring_obj) { |
||
353 | r = radeon_bo_reserve(ring_obj, false); |
||
1404 | serge | 354 | if (likely(r == 0)) { |
1963 | serge | 355 | radeon_bo_kunmap(ring_obj); |
356 | radeon_bo_unpin(ring_obj); |
||
357 | radeon_bo_unreserve(ring_obj); |
||
1404 | serge | 358 | } |
1963 | serge | 359 | radeon_bo_unref(&ring_obj); |
1117 | serge | 360 | } |
361 | } |
||
362 | |||
363 | |||
364 | /* |
||
365 | * Debugfs info |
||
366 | */ |
||
367 | #if defined(CONFIG_DEBUG_FS) |
||
368 | static int radeon_debugfs_ib_info(struct seq_file *m, void *data) |
||
369 | { |
||
370 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
||
371 | struct radeon_ib *ib = node->info_ent->data; |
||
372 | unsigned i; |
||
373 | |||
374 | if (ib == NULL) { |
||
375 | return 0; |
||
376 | } |
||
1428 | serge | 377 | seq_printf(m, "IB %04u\n", ib->idx); |
1117 | serge | 378 | seq_printf(m, "IB fence %p\n", ib->fence); |
379 | seq_printf(m, "IB size %05u dwords\n", ib->length_dw); |
||
380 | for (i = 0; i < ib->length_dw; i++) { |
||
381 | seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]); |
||
382 | } |
||
383 | return 0; |
||
384 | } |
||
385 | |||
1963 | serge | 386 | static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data) |
387 | { |
||
388 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
||
389 | struct radeon_device *rdev = node->info_ent->data; |
||
390 | struct radeon_ib *ib; |
||
391 | unsigned i; |
||
392 | |||
393 | mutex_lock(&rdev->ib_pool.mutex); |
||
394 | if (list_empty(&rdev->ib_pool.bogus_ib)) { |
||
395 | mutex_unlock(&rdev->ib_pool.mutex); |
||
396 | seq_printf(m, "no bogus IB recorded\n"); |
||
397 | return 0; |
||
398 | } |
||
399 | ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list); |
||
400 | list_del_init(&ib->list); |
||
401 | mutex_unlock(&rdev->ib_pool.mutex); |
||
402 | seq_printf(m, "IB size %05u dwords\n", ib->length_dw); |
||
403 | for (i = 0; i < ib->length_dw; i++) { |
||
404 | seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]); |
||
405 | } |
||
406 | vfree(ib->ptr); |
||
407 | kfree(ib); |
||
408 | return 0; |
||
409 | } |
||
410 | |||
1117 | serge | 411 | static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE]; |
412 | static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32]; |
||
1963 | serge | 413 | |
414 | static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = { |
||
415 | {"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL}, |
||
416 | }; |
||
1117 | serge | 417 | #endif |
418 | |||
419 | int radeon_debugfs_ib_init(struct radeon_device *rdev) |
||
420 | { |
||
421 | #if defined(CONFIG_DEBUG_FS) |
||
422 | unsigned i; |
||
1430 | serge | 423 | int r; |
1117 | serge | 424 | |
1430 | serge | 425 | radeon_debugfs_ib_bogus_info_list[0].data = rdev; |
426 | r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1); |
||
427 | if (r) |
||
428 | return r; |
||
1117 | serge | 429 | for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { |
430 | sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i); |
||
431 | radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i]; |
||
432 | radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info; |
||
433 | radeon_debugfs_ib_list[i].driver_features = 0; |
||
434 | radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i]; |
||
435 | } |
||
436 | return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list, |
||
437 | RADEON_IB_POOL_SIZE); |
||
438 | #else |
||
439 | return 0; |
||
440 | #endif |
||
441 | }>>>><>>>>> |