Rev 1129 | Rev 1404 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1117 | serge | 1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
||
3 | * Copyright 2008 Red Hat Inc. |
||
4 | * Copyright 2009 Jerome Glisse. |
||
5 | * |
||
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
7 | * copy of this software and associated documentation files (the "Software"), |
||
8 | * to deal in the Software without restriction, including without limitation |
||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
||
10 | * and/or sell copies of the Software, and to permit persons to whom the |
||
11 | * Software is furnished to do so, subject to the following conditions: |
||
12 | * |
||
13 | * The above copyright notice and this permission notice shall be included in |
||
14 | * all copies or substantial portions of the Software. |
||
15 | * |
||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
||
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
||
22 | * OTHER DEALINGS IN THE SOFTWARE. |
||
23 | * |
||
24 | * Authors: Dave Airlie |
||
25 | * Alex Deucher |
||
26 | * Jerome Glisse |
||
27 | */ |
||
1179 | serge | 28 | #include |
1125 | serge | 29 | #include "drmP.h" |
1117 | serge | 30 | #include "radeon_drm.h" |
31 | #include "radeon_reg.h" |
||
32 | #include "radeon.h" |
||
33 | #include "atom.h" |
||
34 | |||
35 | int radeon_debugfs_ib_init(struct radeon_device *rdev); |
||
36 | |||
37 | /* |
||
38 | * IB. |
||
39 | */ |
||
1120 | serge | 40 | |
41 | #if 0 |
||
42 | |||
1117 | serge | 43 | int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib) |
44 | { |
||
45 | struct radeon_fence *fence; |
||
46 | struct radeon_ib *nib; |
||
47 | unsigned long i; |
||
48 | int r = 0; |
||
49 | |||
50 | *ib = NULL; |
||
51 | r = radeon_fence_create(rdev, &fence); |
||
52 | if (r) { |
||
53 | DRM_ERROR("failed to create fence for new IB\n"); |
||
54 | return r; |
||
55 | } |
||
56 | mutex_lock(&rdev->ib_pool.mutex); |
||
57 | i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); |
||
58 | if (i < RADEON_IB_POOL_SIZE) { |
||
59 | set_bit(i, rdev->ib_pool.alloc_bm); |
||
60 | rdev->ib_pool.ibs[i].length_dw = 0; |
||
61 | *ib = &rdev->ib_pool.ibs[i]; |
||
1179 | serge | 62 | mutex_unlock(&rdev->ib_pool.mutex); |
1117 | serge | 63 | goto out; |
64 | } |
||
65 | if (list_empty(&rdev->ib_pool.scheduled_ibs)) { |
||
66 | /* we go do nothings here */ |
||
1179 | serge | 67 | mutex_unlock(&rdev->ib_pool.mutex); |
1117 | serge | 68 | DRM_ERROR("all IB allocated none scheduled.\n"); |
69 | r = -EINVAL; |
||
70 | goto out; |
||
71 | } |
||
72 | /* get the first ib on the scheduled list */ |
||
73 | nib = list_entry(rdev->ib_pool.scheduled_ibs.next, |
||
74 | struct radeon_ib, list); |
||
75 | if (nib->fence == NULL) { |
||
76 | /* we go do nothings here */ |
||
1179 | serge | 77 | mutex_unlock(&rdev->ib_pool.mutex); |
1117 | serge | 78 | DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx); |
79 | r = -EINVAL; |
||
80 | goto out; |
||
81 | } |
||
1179 | serge | 82 | mutex_unlock(&rdev->ib_pool.mutex); |
83 | |||
1117 | serge | 84 | r = radeon_fence_wait(nib->fence, false); |
85 | if (r) { |
||
86 | DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx, |
||
87 | (unsigned long)nib->gpu_addr, nib->length_dw); |
||
88 | DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n"); |
||
89 | goto out; |
||
90 | } |
||
91 | radeon_fence_unref(&nib->fence); |
||
1179 | serge | 92 | |
1117 | serge | 93 | nib->length_dw = 0; |
1179 | serge | 94 | |
95 | /* scheduled list is accessed here */ |
||
96 | mutex_lock(&rdev->ib_pool.mutex); |
||
1117 | serge | 97 | list_del(&nib->list); |
98 | INIT_LIST_HEAD(&nib->list); |
||
1179 | serge | 99 | mutex_unlock(&rdev->ib_pool.mutex); |
100 | |||
1117 | serge | 101 | *ib = nib; |
102 | out: |
||
103 | if (r) { |
||
104 | radeon_fence_unref(&fence); |
||
105 | } else { |
||
106 | (*ib)->fence = fence; |
||
107 | } |
||
108 | return r; |
||
109 | } |
||
110 | |||
111 | void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) |
||
112 | { |
||
113 | struct radeon_ib *tmp = *ib; |
||
114 | |||
115 | *ib = NULL; |
||
116 | if (tmp == NULL) { |
||
117 | return; |
||
118 | } |
||
119 | mutex_lock(&rdev->ib_pool.mutex); |
||
120 | if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) { |
||
121 | /* IB is scheduled & not signaled don't do anythings */ |
||
122 | mutex_unlock(&rdev->ib_pool.mutex); |
||
123 | return; |
||
124 | } |
||
125 | list_del(&tmp->list); |
||
126 | INIT_LIST_HEAD(&tmp->list); |
||
1179 | serge | 127 | if (tmp->fence) |
1117 | serge | 128 | radeon_fence_unref(&tmp->fence); |
1179 | serge | 129 | |
1117 | serge | 130 | tmp->length_dw = 0; |
131 | clear_bit(tmp->idx, rdev->ib_pool.alloc_bm); |
||
132 | mutex_unlock(&rdev->ib_pool.mutex); |
||
133 | } |
||
134 | |||
135 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) |
||
136 | { |
||
137 | int r = 0; |
||
138 | |||
139 | if (!ib->length_dw || !rdev->cp.ready) { |
||
140 | /* TODO: Nothings in the ib we should report. */ |
||
141 | DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); |
||
142 | return -EINVAL; |
||
143 | } |
||
1179 | serge | 144 | |
145 | /* 64 dwords should be enough for fence too */ |
||
1117 | serge | 146 | r = radeon_ring_lock(rdev, 64); |
147 | if (r) { |
||
148 | DRM_ERROR("radeon: scheduling IB failled (%d).\n", r); |
||
149 | return r; |
||
150 | } |
||
1179 | serge | 151 | radeon_ring_ib_execute(rdev, ib); |
1117 | serge | 152 | radeon_fence_emit(rdev, ib->fence); |
1179 | serge | 153 | mutex_lock(&rdev->ib_pool.mutex); |
1117 | serge | 154 | list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs); |
155 | mutex_unlock(&rdev->ib_pool.mutex); |
||
1179 | serge | 156 | radeon_ring_unlock_commit(rdev); |
1117 | serge | 157 | return 0; |
158 | } |
||
1120 | serge | 159 | #endif |
1117 | serge | 160 | |
161 | int radeon_ib_pool_init(struct radeon_device *rdev) |
||
162 | { |
||
163 | void *ptr; |
||
164 | uint64_t gpu_addr; |
||
165 | int i; |
||
166 | int r = 0; |
||
167 | |||
1179 | serge | 168 | if (rdev->ib_pool.robj) |
169 | return 0; |
||
1117 | serge | 170 | /* Allocate 1M object buffer */ |
171 | INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs); |
||
172 | r = radeon_object_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, |
||
173 | true, RADEON_GEM_DOMAIN_GTT, |
||
174 | false, &rdev->ib_pool.robj); |
||
175 | if (r) { |
||
176 | DRM_ERROR("radeon: failed to ib pool (%d).\n", r); |
||
177 | return r; |
||
178 | } |
||
179 | r = radeon_object_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr); |
||
180 | if (r) { |
||
181 | DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r); |
||
182 | return r; |
||
183 | } |
||
184 | r = radeon_object_kmap(rdev->ib_pool.robj, &ptr); |
||
185 | if (r) { |
||
186 | DRM_ERROR("radeon: failed to map ib poll (%d).\n", r); |
||
187 | return r; |
||
188 | } |
||
189 | for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { |
||
190 | unsigned offset; |
||
191 | |||
192 | offset = i * 64 * 1024; |
||
193 | rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset; |
||
194 | rdev->ib_pool.ibs[i].ptr = ptr + offset; |
||
195 | rdev->ib_pool.ibs[i].idx = i; |
||
196 | rdev->ib_pool.ibs[i].length_dw = 0; |
||
197 | INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list); |
||
198 | } |
||
199 | bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); |
||
200 | rdev->ib_pool.ready = true; |
||
201 | DRM_INFO("radeon: ib pool ready.\n"); |
||
1129 | serge | 202 | if (radeon_debugfs_ib_init(rdev)) { |
203 | DRM_ERROR("Failed to register debugfs file for IB !\n"); |
||
204 | } |
||
1117 | serge | 205 | return r; |
206 | } |
||
207 | |||
208 | void radeon_ib_pool_fini(struct radeon_device *rdev) |
||
209 | { |
||
210 | if (!rdev->ib_pool.ready) { |
||
211 | return; |
||
212 | } |
||
1179 | serge | 213 | mutex_lock(&rdev->ib_pool.mutex); |
1117 | serge | 214 | bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); |
215 | if (rdev->ib_pool.robj) { |
||
1120 | serge | 216 | // radeon_object_kunmap(rdev->ib_pool.robj); |
217 | // radeon_object_unref(&rdev->ib_pool.robj); |
||
1117 | serge | 218 | rdev->ib_pool.robj = NULL; |
219 | } |
||
1179 | serge | 220 | mutex_unlock(&rdev->ib_pool.mutex); |
1117 | serge | 221 | } |
222 | |||
1120 | serge | 223 | |
1117 | serge | 224 | /* |
225 | * Ring. |
||
226 | */ |
||
227 | void radeon_ring_free_size(struct radeon_device *rdev) |
||
228 | { |
||
1179 | serge | 229 | if (rdev->family >= CHIP_R600) |
230 | rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); |
||
231 | else |
||
1117 | serge | 232 | rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); |
233 | /* This works because ring_size is a power of 2 */ |
||
234 | rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4)); |
||
235 | rdev->cp.ring_free_dw -= rdev->cp.wptr; |
||
236 | rdev->cp.ring_free_dw &= rdev->cp.ptr_mask; |
||
237 | if (!rdev->cp.ring_free_dw) { |
||
238 | rdev->cp.ring_free_dw = rdev->cp.ring_size / 4; |
||
239 | } |
||
240 | } |
||
241 | |||
242 | int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw) |
||
243 | { |
||
244 | int r; |
||
245 | |||
246 | /* Align requested size with padding so unlock_commit can |
||
247 | * pad safely */ |
||
248 | ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask; |
||
1179 | serge | 249 | mutex_lock(&rdev->cp.mutex); |
1117 | serge | 250 | while (ndw > (rdev->cp.ring_free_dw - 1)) { |
251 | radeon_ring_free_size(rdev); |
||
252 | if (ndw < rdev->cp.ring_free_dw) { |
||
253 | break; |
||
254 | } |
||
255 | // r = radeon_fence_wait_next(rdev); |
||
256 | // if (r) { |
||
257 | // mutex_unlock(&rdev->cp.mutex); |
||
258 | // return r; |
||
259 | // } |
||
260 | } |
||
261 | rdev->cp.count_dw = ndw; |
||
262 | rdev->cp.wptr_old = rdev->cp.wptr; |
||
263 | return 0; |
||
264 | } |
||
265 | |||
266 | void radeon_ring_unlock_commit(struct radeon_device *rdev) |
||
267 | { |
||
268 | unsigned count_dw_pad; |
||
269 | unsigned i; |
||
270 | |||
271 | /* We pad to match fetch size */ |
||
272 | count_dw_pad = (rdev->cp.align_mask + 1) - |
||
273 | (rdev->cp.wptr & rdev->cp.align_mask); |
||
274 | for (i = 0; i < count_dw_pad; i++) { |
||
1179 | serge | 275 | radeon_ring_write(rdev, 2 << 30); |
1117 | serge | 276 | } |
277 | DRM_MEMORYBARRIER(); |
||
1179 | serge | 278 | radeon_cp_commit(rdev); |
279 | mutex_unlock(&rdev->cp.mutex); |
||
1117 | serge | 280 | } |
281 | |||
282 | void radeon_ring_unlock_undo(struct radeon_device *rdev) |
||
283 | { |
||
284 | rdev->cp.wptr = rdev->cp.wptr_old; |
||
1179 | serge | 285 | mutex_unlock(&rdev->cp.mutex); |
1117 | serge | 286 | } |
287 | |||
288 | int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size) |
||
289 | { |
||
290 | int r; |
||
291 | |||
1179 | serge | 292 | ENTER(); |
1117 | serge | 293 | |
294 | rdev->cp.ring_size = ring_size; |
||
1120 | serge | 295 | /* Allocate ring buffer */ |
1117 | serge | 296 | if (rdev->cp.ring_obj == NULL) { |
297 | r = radeon_object_create(rdev, NULL, rdev->cp.ring_size, |
||
298 | true, |
||
299 | RADEON_GEM_DOMAIN_GTT, |
||
300 | false, |
||
301 | &rdev->cp.ring_obj); |
||
302 | if (r) { |
||
303 | DRM_ERROR("radeon: failed to create ring buffer (%d).\n", r); |
||
1179 | serge | 304 | mutex_unlock(&rdev->cp.mutex); |
1117 | serge | 305 | return r; |
306 | } |
||
307 | r = radeon_object_pin(rdev->cp.ring_obj, |
||
308 | RADEON_GEM_DOMAIN_GTT, |
||
309 | &rdev->cp.gpu_addr); |
||
310 | if (r) { |
||
311 | DRM_ERROR("radeon: failed to pin ring buffer (%d).\n", r); |
||
1179 | serge | 312 | mutex_unlock(&rdev->cp.mutex); |
1117 | serge | 313 | return r; |
314 | } |
||
315 | r = radeon_object_kmap(rdev->cp.ring_obj, |
||
316 | (void **)&rdev->cp.ring); |
||
317 | if (r) { |
||
318 | DRM_ERROR("radeon: failed to map ring buffer (%d).\n", r); |
||
1179 | serge | 319 | mutex_unlock(&rdev->cp.mutex); |
1117 | serge | 320 | return r; |
321 | } |
||
322 | } |
||
323 | |||
1119 | serge | 324 | |
1120 | serge | 325 | // rdev->cp.ring = CreateRingBuffer( ring_size, PG_SW ); |
1119 | serge | 326 | |
327 | dbgprintf("ring buffer %x\n", rdev->cp.ring ); |
||
328 | |||
1120 | serge | 329 | // rdev->cp.gpu_addr = rdev->mc.gtt_location; |
1119 | serge | 330 | |
1120 | serge | 331 | // u32_t *pagelist = &((u32_t*)page_tabs)[(u32_t)rdev->cp.ring >> 12]; |
1119 | serge | 332 | |
1120 | serge | 333 | // dbgprintf("pagelist %x\n", pagelist); |
1119 | serge | 334 | |
1120 | serge | 335 | // radeon_gart_bind(rdev, 0, ring_size / 4096, pagelist); |
1119 | serge | 336 | |
1117 | serge | 337 | rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1; |
338 | rdev->cp.ring_free_dw = rdev->cp.ring_size / 4; |
||
1119 | serge | 339 | |
1179 | serge | 340 | LEAVE(); |
1119 | serge | 341 | |
1117 | serge | 342 | return 0; |
343 | } |
||
344 | |||
345 | void radeon_ring_fini(struct radeon_device *rdev) |
||
346 | { |
||
1179 | serge | 347 | mutex_lock(&rdev->cp.mutex); |
1117 | serge | 348 | if (rdev->cp.ring_obj) { |
349 | // radeon_object_kunmap(rdev->cp.ring_obj); |
||
350 | // radeon_object_unpin(rdev->cp.ring_obj); |
||
351 | // radeon_object_unref(&rdev->cp.ring_obj); |
||
352 | rdev->cp.ring = NULL; |
||
353 | rdev->cp.ring_obj = NULL; |
||
354 | } |
||
1179 | serge | 355 | mutex_unlock(&rdev->cp.mutex); |
1117 | serge | 356 | } |
357 | |||
358 | |||
359 | /* |
||
360 | * Debugfs info |
||
361 | */ |
||
362 | #if defined(CONFIG_DEBUG_FS) |
||
363 | static int radeon_debugfs_ib_info(struct seq_file *m, void *data) |
||
364 | { |
||
365 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
||
366 | struct radeon_ib *ib = node->info_ent->data; |
||
367 | unsigned i; |
||
368 | |||
369 | if (ib == NULL) { |
||
370 | return 0; |
||
371 | } |
||
372 | seq_printf(m, "IB %04lu\n", ib->idx); |
||
373 | seq_printf(m, "IB fence %p\n", ib->fence); |
||
374 | seq_printf(m, "IB size %05u dwords\n", ib->length_dw); |
||
375 | for (i = 0; i < ib->length_dw; i++) { |
||
376 | seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]); |
||
377 | } |
||
378 | return 0; |
||
379 | } |
||
380 | |||
381 | static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE]; |
||
382 | static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32]; |
||
383 | #endif |
||
384 | |||
385 | int radeon_debugfs_ib_init(struct radeon_device *rdev) |
||
386 | { |
||
387 | #if defined(CONFIG_DEBUG_FS) |
||
388 | unsigned i; |
||
389 | |||
390 | for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { |
||
391 | sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i); |
||
392 | radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i]; |
||
393 | radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info; |
||
394 | radeon_debugfs_ib_list[i].driver_features = 0; |
||
395 | radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i]; |
||
396 | } |
||
397 | return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list, |
||
398 | RADEON_IB_POOL_SIZE); |
||
399 | #else |
||
400 | return 0; |
||
401 | #endif |
||
402 | }>>><>>>>> |