Rev 1129 | Rev 1404 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1129 | Rev 1179 | ||
---|---|---|---|
Line 23... | Line 23... | ||
23 | * |
23 | * |
24 | * Authors: Dave Airlie |
24 | * Authors: Dave Airlie |
25 | * Alex Deucher |
25 | * Alex Deucher |
26 | * Jerome Glisse |
26 | * Jerome Glisse |
27 | */ |
27 | */ |
28 | //#include |
28 | #include |
29 | #include "drmP.h" |
29 | #include "drmP.h" |
30 | #include "radeon_drm.h" |
30 | #include "radeon_drm.h" |
31 | #include "radeon_reg.h" |
31 | #include "radeon_reg.h" |
32 | #include "radeon.h" |
32 | #include "radeon.h" |
33 | #include "atom.h" |
33 | #include "atom.h" |
Line 34... | Line -... | ||
34 | - | ||
35 | 34 | ||
Line 36... | Line 35... | ||
36 | int radeon_debugfs_ib_init(struct radeon_device *rdev); |
35 | int radeon_debugfs_ib_init(struct radeon_device *rdev); |
37 | 36 | ||
38 | /* |
37 | /* |
Line 58... | Line 57... | ||
58 | i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); |
57 | i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); |
59 | if (i < RADEON_IB_POOL_SIZE) { |
58 | if (i < RADEON_IB_POOL_SIZE) { |
60 | set_bit(i, rdev->ib_pool.alloc_bm); |
59 | set_bit(i, rdev->ib_pool.alloc_bm); |
61 | rdev->ib_pool.ibs[i].length_dw = 0; |
60 | rdev->ib_pool.ibs[i].length_dw = 0; |
62 | *ib = &rdev->ib_pool.ibs[i]; |
61 | *ib = &rdev->ib_pool.ibs[i]; |
- | 62 | mutex_unlock(&rdev->ib_pool.mutex); |
|
63 | goto out; |
63 | goto out; |
64 | } |
64 | } |
65 | if (list_empty(&rdev->ib_pool.scheduled_ibs)) { |
65 | if (list_empty(&rdev->ib_pool.scheduled_ibs)) { |
66 | /* we go do nothings here */ |
66 | /* we go do nothings here */ |
- | 67 | mutex_unlock(&rdev->ib_pool.mutex); |
|
67 | DRM_ERROR("all IB allocated none scheduled.\n"); |
68 | DRM_ERROR("all IB allocated none scheduled.\n"); |
68 | r = -EINVAL; |
69 | r = -EINVAL; |
69 | goto out; |
70 | goto out; |
70 | } |
71 | } |
71 | /* get the first ib on the scheduled list */ |
72 | /* get the first ib on the scheduled list */ |
72 | nib = list_entry(rdev->ib_pool.scheduled_ibs.next, |
73 | nib = list_entry(rdev->ib_pool.scheduled_ibs.next, |
73 | struct radeon_ib, list); |
74 | struct radeon_ib, list); |
74 | if (nib->fence == NULL) { |
75 | if (nib->fence == NULL) { |
75 | /* we go do nothings here */ |
76 | /* we go do nothings here */ |
- | 77 | mutex_unlock(&rdev->ib_pool.mutex); |
|
76 | DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx); |
78 | DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx); |
77 | r = -EINVAL; |
79 | r = -EINVAL; |
78 | goto out; |
80 | goto out; |
79 | } |
81 | } |
- | 82 | mutex_unlock(&rdev->ib_pool.mutex); |
|
- | 83 | ||
80 | r = radeon_fence_wait(nib->fence, false); |
84 | r = radeon_fence_wait(nib->fence, false); |
81 | if (r) { |
85 | if (r) { |
82 | DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx, |
86 | DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx, |
83 | (unsigned long)nib->gpu_addr, nib->length_dw); |
87 | (unsigned long)nib->gpu_addr, nib->length_dw); |
84 | DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n"); |
88 | DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n"); |
85 | goto out; |
89 | goto out; |
86 | } |
90 | } |
87 | radeon_fence_unref(&nib->fence); |
91 | radeon_fence_unref(&nib->fence); |
- | 92 | ||
88 | nib->length_dw = 0; |
93 | nib->length_dw = 0; |
- | 94 | ||
- | 95 | /* scheduled list is accessed here */ |
|
- | 96 | mutex_lock(&rdev->ib_pool.mutex); |
|
89 | list_del(&nib->list); |
97 | list_del(&nib->list); |
90 | INIT_LIST_HEAD(&nib->list); |
98 | INIT_LIST_HEAD(&nib->list); |
- | 99 | mutex_unlock(&rdev->ib_pool.mutex); |
|
- | 100 | ||
91 | *ib = nib; |
101 | *ib = nib; |
92 | out: |
102 | out: |
93 | mutex_unlock(&rdev->ib_pool.mutex); |
- | |
94 | if (r) { |
103 | if (r) { |
95 | radeon_fence_unref(&fence); |
104 | radeon_fence_unref(&fence); |
96 | } else { |
105 | } else { |
97 | (*ib)->fence = fence; |
106 | (*ib)->fence = fence; |
98 | } |
107 | } |
Line 113... | Line 122... | ||
113 | mutex_unlock(&rdev->ib_pool.mutex); |
122 | mutex_unlock(&rdev->ib_pool.mutex); |
114 | return; |
123 | return; |
115 | } |
124 | } |
116 | list_del(&tmp->list); |
125 | list_del(&tmp->list); |
117 | INIT_LIST_HEAD(&tmp->list); |
126 | INIT_LIST_HEAD(&tmp->list); |
118 | if (tmp->fence) { |
127 | if (tmp->fence) |
119 | radeon_fence_unref(&tmp->fence); |
128 | radeon_fence_unref(&tmp->fence); |
120 | } |
129 | |
121 | tmp->length_dw = 0; |
130 | tmp->length_dw = 0; |
122 | clear_bit(tmp->idx, rdev->ib_pool.alloc_bm); |
131 | clear_bit(tmp->idx, rdev->ib_pool.alloc_bm); |
123 | mutex_unlock(&rdev->ib_pool.mutex); |
132 | mutex_unlock(&rdev->ib_pool.mutex); |
124 | } |
133 | } |
Line 125... | Line -... | ||
125 | - | ||
126 | static void radeon_ib_align(struct radeon_device *rdev, struct radeon_ib *ib) |
- | |
127 | { |
- | |
128 | while ((ib->length_dw & rdev->cp.align_mask)) { |
- | |
129 | ib->ptr[ib->length_dw++] = PACKET2(0); |
- | |
130 | } |
- | |
131 | } |
- | |
132 | - | ||
133 | static void radeon_ib_cpu_flush(struct radeon_device *rdev, |
- | |
134 | struct radeon_ib *ib) |
- | |
135 | { |
- | |
136 | unsigned long tmp; |
- | |
137 | unsigned i; |
- | |
138 | - | ||
139 | /* To force CPU cache flush ugly but seems reliable */ |
- | |
140 | for (i = 0; i < ib->length_dw; i += (rdev->cp.align_mask + 1)) { |
- | |
141 | tmp = readl(&ib->ptr[i]); |
- | |
142 | } |
- | |
143 | } |
- | |
144 | 134 | ||
145 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) |
135 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) |
146 | { |
136 | { |
Line 147... | Line -... | ||
147 | int r = 0; |
- | |
148 | - | ||
149 | mutex_lock(&rdev->ib_pool.mutex); |
- | |
150 | radeon_ib_align(rdev, ib); |
137 | int r = 0; |
151 | radeon_ib_cpu_flush(rdev, ib); |
138 | |
152 | if (!ib->length_dw || !rdev->cp.ready) { |
- | |
153 | /* TODO: Nothings in the ib we should report. */ |
139 | if (!ib->length_dw || !rdev->cp.ready) { |
154 | mutex_unlock(&rdev->ib_pool.mutex); |
140 | /* TODO: Nothings in the ib we should report. */ |
155 | DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); |
141 | DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); |
- | 142 | return -EINVAL; |
|
156 | return -EINVAL; |
143 | } |
157 | } |
144 | |
158 | /* 64 dwords should be enought for fence too */ |
145 | /* 64 dwords should be enough for fence too */ |
159 | r = radeon_ring_lock(rdev, 64); |
146 | r = radeon_ring_lock(rdev, 64); |
160 | if (r) { |
- | |
161 | DRM_ERROR("radeon: scheduling IB failled (%d).\n", r); |
147 | if (r) { |
162 | mutex_unlock(&rdev->ib_pool.mutex); |
148 | DRM_ERROR("radeon: scheduling IB failled (%d).\n", r); |
163 | return r; |
- | |
164 | } |
149 | return r; |
165 | radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1)); |
- | |
166 | radeon_ring_write(rdev, ib->gpu_addr); |
150 | } |
167 | radeon_ring_write(rdev, ib->length_dw); |
151 | radeon_ring_ib_execute(rdev, ib); |
168 | radeon_fence_emit(rdev, ib->fence); |
152 | radeon_fence_emit(rdev, ib->fence); |
169 | radeon_ring_unlock_commit(rdev); |
153 | mutex_lock(&rdev->ib_pool.mutex); |
- | 154 | list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs); |
|
170 | list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs); |
155 | mutex_unlock(&rdev->ib_pool.mutex); |
171 | mutex_unlock(&rdev->ib_pool.mutex); |
156 | radeon_ring_unlock_commit(rdev); |
172 | return 0; |
157 | return 0; |
Line 173... | Line 158... | ||
173 | } |
158 | } |
Line 178... | Line 163... | ||
178 | void *ptr; |
163 | void *ptr; |
179 | uint64_t gpu_addr; |
164 | uint64_t gpu_addr; |
180 | int i; |
165 | int i; |
181 | int r = 0; |
166 | int r = 0; |
Line -... | Line 167... | ||
- | 167 | ||
- | 168 | if (rdev->ib_pool.robj) |
|
182 | 169 | return 0; |
|
183 | /* Allocate 1M object buffer */ |
170 | /* Allocate 1M object buffer */ |
184 | INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs); |
171 | INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs); |
185 | r = radeon_object_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, |
172 | r = radeon_object_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, |
186 | true, RADEON_GEM_DOMAIN_GTT, |
173 | true, RADEON_GEM_DOMAIN_GTT, |
Line 221... | Line 208... | ||
221 | void radeon_ib_pool_fini(struct radeon_device *rdev) |
208 | void radeon_ib_pool_fini(struct radeon_device *rdev) |
222 | { |
209 | { |
223 | if (!rdev->ib_pool.ready) { |
210 | if (!rdev->ib_pool.ready) { |
224 | return; |
211 | return; |
225 | } |
212 | } |
226 | // mutex_lock(&rdev->ib_pool.mutex); |
213 | mutex_lock(&rdev->ib_pool.mutex); |
227 | bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); |
214 | bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); |
228 | if (rdev->ib_pool.robj) { |
215 | if (rdev->ib_pool.robj) { |
229 | // radeon_object_kunmap(rdev->ib_pool.robj); |
216 | // radeon_object_kunmap(rdev->ib_pool.robj); |
230 | // radeon_object_unref(&rdev->ib_pool.robj); |
217 | // radeon_object_unref(&rdev->ib_pool.robj); |
231 | rdev->ib_pool.robj = NULL; |
218 | rdev->ib_pool.robj = NULL; |
232 | } |
219 | } |
233 | // mutex_unlock(&rdev->ib_pool.mutex); |
220 | mutex_unlock(&rdev->ib_pool.mutex); |
234 | } |
- | |
235 | - | ||
236 | #if 0 |
- | |
237 | - | ||
238 | int radeon_ib_test(struct radeon_device *rdev) |
- | |
239 | { |
- | |
240 | struct radeon_ib *ib; |
- | |
241 | uint32_t scratch; |
- | |
242 | uint32_t tmp = 0; |
- | |
243 | unsigned i; |
- | |
244 | int r; |
- | |
245 | - | ||
246 | r = radeon_scratch_get(rdev, &scratch); |
- | |
247 | if (r) { |
- | |
248 | DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r); |
- | |
249 | return r; |
- | |
250 | } |
- | |
251 | WREG32(scratch, 0xCAFEDEAD); |
- | |
252 | r = radeon_ib_get(rdev, &ib); |
- | |
253 | if (r) { |
- | |
254 | return r; |
- | |
255 | } |
- | |
256 | ib->ptr[0] = PACKET0(scratch, 0); |
- | |
257 | ib->ptr[1] = 0xDEADBEEF; |
- | |
258 | ib->ptr[2] = PACKET2(0); |
- | |
259 | ib->ptr[3] = PACKET2(0); |
- | |
260 | ib->ptr[4] = PACKET2(0); |
- | |
261 | ib->ptr[5] = PACKET2(0); |
- | |
262 | ib->ptr[6] = PACKET2(0); |
- | |
263 | ib->ptr[7] = PACKET2(0); |
- | |
264 | ib->length_dw = 8; |
- | |
265 | r = radeon_ib_schedule(rdev, ib); |
- | |
266 | if (r) { |
- | |
267 | radeon_scratch_free(rdev, scratch); |
- | |
268 | radeon_ib_free(rdev, &ib); |
- | |
269 | return r; |
- | |
270 | } |
- | |
271 | r = radeon_fence_wait(ib->fence, false); |
- | |
272 | if (r) { |
- | |
273 | return r; |
- | |
274 | } |
- | |
275 | for (i = 0; i < rdev->usec_timeout; i++) { |
- | |
276 | tmp = RREG32(scratch); |
- | |
277 | if (tmp == 0xDEADBEEF) { |
- | |
278 | break; |
- | |
279 | } |
- | |
280 | DRM_UDELAY(1); |
- | |
281 | } |
- | |
282 | if (i < rdev->usec_timeout) { |
- | |
283 | DRM_INFO("ib test succeeded in %u usecs\n", i); |
- | |
284 | } else { |
- | |
285 | DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n", |
- | |
286 | scratch, tmp); |
- | |
287 | r = -EINVAL; |
- | |
288 | } |
- | |
289 | radeon_scratch_free(rdev, scratch); |
- | |
290 | radeon_ib_free(rdev, &ib); |
- | |
291 | return r; |
- | |
292 | } |
221 | } |
Line 293... | Line -... | ||
293 | - | ||
Line 294... | Line 222... | ||
294 | #endif |
222 | |
295 | 223 | ||
296 | /* |
224 | /* |
297 | * Ring. |
225 | * Ring. |
298 | */ |
226 | */ |
- | 227 | void radeon_ring_free_size(struct radeon_device *rdev) |
|
- | 228 | { |
|
- | 229 | if (rdev->family >= CHIP_R600) |
|
299 | void radeon_ring_free_size(struct radeon_device *rdev) |
230 | rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); |
300 | { |
231 | else |
301 | rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); |
232 | rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); |
302 | /* This works because ring_size is a power of 2 */ |
233 | /* This works because ring_size is a power of 2 */ |
303 | rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4)); |
234 | rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4)); |
Line 313... | Line 244... | ||
313 | int r; |
244 | int r; |
Line 314... | Line 245... | ||
314 | 245 | ||
315 | /* Align requested size with padding so unlock_commit can |
246 | /* Align requested size with padding so unlock_commit can |
316 | * pad safely */ |
247 | * pad safely */ |
317 | ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask; |
248 | ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask; |
318 | // mutex_lock(&rdev->cp.mutex); |
249 | mutex_lock(&rdev->cp.mutex); |
319 | while (ndw > (rdev->cp.ring_free_dw - 1)) { |
250 | while (ndw > (rdev->cp.ring_free_dw - 1)) { |
320 | radeon_ring_free_size(rdev); |
251 | radeon_ring_free_size(rdev); |
321 | if (ndw < rdev->cp.ring_free_dw) { |
252 | if (ndw < rdev->cp.ring_free_dw) { |
322 | break; |
253 | break; |
323 | } |
- | |
324 | delay(1); |
- | |
325 | 254 | } |
|
326 | // r = radeon_fence_wait_next(rdev); |
255 | // r = radeon_fence_wait_next(rdev); |
327 | // if (r) { |
256 | // if (r) { |
328 | // mutex_unlock(&rdev->cp.mutex); |
257 | // mutex_unlock(&rdev->cp.mutex); |
329 | // return r; |
258 | // return r; |
Line 341... | Line 270... | ||
341 | 270 | ||
342 | /* We pad to match fetch size */ |
271 | /* We pad to match fetch size */ |
343 | count_dw_pad = (rdev->cp.align_mask + 1) - |
272 | count_dw_pad = (rdev->cp.align_mask + 1) - |
344 | (rdev->cp.wptr & rdev->cp.align_mask); |
273 | (rdev->cp.wptr & rdev->cp.align_mask); |
345 | for (i = 0; i < count_dw_pad; i++) { |
274 | for (i = 0; i < count_dw_pad; i++) { |
346 | radeon_ring_write(rdev, PACKET2(0)); |
275 | radeon_ring_write(rdev, 2 << 30); |
347 | } |
276 | } |
348 | DRM_MEMORYBARRIER(); |
277 | DRM_MEMORYBARRIER(); |
349 | WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr); |
- | |
350 | (void)RREG32(RADEON_CP_RB_WPTR); |
278 | radeon_cp_commit(rdev); |
351 | // mutex_unlock(&rdev->cp.mutex); |
279 | mutex_unlock(&rdev->cp.mutex); |
Line 352... | Line 280... | ||
352 | } |
280 | } |
353 | 281 | ||
354 | void radeon_ring_unlock_undo(struct radeon_device *rdev) |
282 | void radeon_ring_unlock_undo(struct radeon_device *rdev) |
355 | { |
283 | { |
356 | rdev->cp.wptr = rdev->cp.wptr_old; |
- | |
357 | // mutex_unlock(&rdev->cp.mutex); |
- | |
358 | } |
- | |
359 | - | ||
360 | - | ||
361 | int radeon_ring_test(struct radeon_device *rdev) |
- | |
362 | { |
- | |
363 | uint32_t scratch; |
- | |
364 | uint32_t tmp = 0; |
- | |
365 | unsigned i; |
- | |
366 | int r; |
- | |
367 | - | ||
368 | dbgprintf("%s\n",__FUNCTION__); |
- | |
369 | - | ||
370 | r = radeon_scratch_get(rdev, &scratch); |
- | |
371 | if (r) { |
- | |
372 | DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r); |
- | |
373 | return r; |
- | |
374 | } |
- | |
375 | WREG32(scratch, 0xCAFEDEAD); |
- | |
376 | r = radeon_ring_lock(rdev, 2); |
- | |
377 | if (r) { |
- | |
378 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); |
- | |
379 | radeon_scratch_free(rdev, scratch); |
- | |
380 | return r; |
- | |
381 | } |
- | |
382 | radeon_ring_write(rdev, PACKET0(scratch, 0)); |
- | |
383 | radeon_ring_write(rdev, 0xDEADBEEF); |
- | |
384 | radeon_ring_unlock_commit(rdev); |
- | |
385 | for (i = 0; i < 100000; i++) { |
- | |
386 | tmp = RREG32(scratch); |
- | |
387 | if (tmp == 0xDEADBEEF) { |
284 | rdev->cp.wptr = rdev->cp.wptr_old; |
388 | break; |
- | |
389 | } |
- | |
390 | DRM_UDELAY(1); |
- | |
391 | } |
- | |
392 | if (i < 100000) { |
- | |
393 | DRM_INFO("ring test succeeded in %d usecs\n", i); |
- | |
394 | } else { |
- | |
395 | DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n", |
- | |
396 | scratch, tmp); |
- | |
397 | r = -EINVAL; |
- | |
398 | } |
- | |
399 | radeon_scratch_free(rdev, scratch); |
- | |
400 | - | ||
401 | dbgprintf("done %s\n",__FUNCTION__); |
- | |
402 | return r; |
- | |
403 | } |
- | |
404 | - | ||
405 | - | ||
406 | int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, |
- | |
407 | int pages, u32_t *pagelist); |
- | |
Line 408... | Line 285... | ||
408 | 285 | mutex_unlock(&rdev->cp.mutex); |
|
409 | 286 | } |
|
410 | 287 | ||
Line 411... | Line 288... | ||
411 | int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size) |
288 | int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size) |
Line 412... | Line 289... | ||
412 | { |
289 | { |
413 | int r; |
- | |
414 | 290 | int r; |
|
415 | dbgprintf("%s\n",__FUNCTION__); |
291 | |
416 | 292 | ENTER(); |
|
417 | rdev->cp.ring_size = ring_size; |
293 | |
418 | 294 | rdev->cp.ring_size = ring_size; |
|
419 | /* Allocate ring buffer */ |
295 | /* Allocate ring buffer */ |
420 | if (rdev->cp.ring_obj == NULL) { |
296 | if (rdev->cp.ring_obj == NULL) { |
421 | r = radeon_object_create(rdev, NULL, rdev->cp.ring_size, |
297 | r = radeon_object_create(rdev, NULL, rdev->cp.ring_size, |
422 | true, |
298 | true, |
423 | RADEON_GEM_DOMAIN_GTT, |
299 | RADEON_GEM_DOMAIN_GTT, |
424 | false, |
300 | false, |
425 | &rdev->cp.ring_obj); |
301 | &rdev->cp.ring_obj); |
426 | if (r) { |
302 | if (r) { |
427 | DRM_ERROR("radeon: failed to create ring buffer (%d).\n", r); |
303 | DRM_ERROR("radeon: failed to create ring buffer (%d).\n", r); |
428 | // mutex_unlock(&rdev->cp.mutex); |
304 | mutex_unlock(&rdev->cp.mutex); |
429 | return r; |
305 | return r; |
430 | } |
306 | } |
431 | r = radeon_object_pin(rdev->cp.ring_obj, |
307 | r = radeon_object_pin(rdev->cp.ring_obj, |
432 | RADEON_GEM_DOMAIN_GTT, |
308 | RADEON_GEM_DOMAIN_GTT, |
433 | &rdev->cp.gpu_addr); |
309 | &rdev->cp.gpu_addr); |
434 | if (r) { |
310 | if (r) { |
435 | DRM_ERROR("radeon: failed to pin ring buffer (%d).\n", r); |
311 | DRM_ERROR("radeon: failed to pin ring buffer (%d).\n", r); |
436 | // mutex_unlock(&rdev->cp.mutex); |
312 | mutex_unlock(&rdev->cp.mutex); |
437 | return r; |
313 | return r; |
438 | } |
314 | } |
439 | r = radeon_object_kmap(rdev->cp.ring_obj, |
315 | r = radeon_object_kmap(rdev->cp.ring_obj, |
440 | (void **)&rdev->cp.ring); |
316 | (void **)&rdev->cp.ring); |
441 | if (r) { |
317 | if (r) { |
Line 459... | Line 335... | ||
459 | // radeon_gart_bind(rdev, 0, ring_size / 4096, pagelist); |
335 | // radeon_gart_bind(rdev, 0, ring_size / 4096, pagelist); |
Line 460... | Line 336... | ||
460 | 336 | ||
461 | rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1; |
337 | rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1; |
Line 462... | Line 338... | ||
462 | rdev->cp.ring_free_dw = rdev->cp.ring_size / 4; |
338 | rdev->cp.ring_free_dw = rdev->cp.ring_size / 4; |
Line 463... | Line 339... | ||
463 | 339 | ||
464 | dbgprintf("done %s\n",__FUNCTION__); |
340 | LEAVE(); |
Line 465... | Line 341... | ||
465 | 341 | ||
466 | return 0; |
342 | return 0; |
467 | } |
343 | } |
468 | 344 | ||
469 | void radeon_ring_fini(struct radeon_device *rdev) |
345 | void radeon_ring_fini(struct radeon_device *rdev) |
470 | { |
346 | { |
471 | // mutex_lock(&rdev->cp.mutex); |
347 | mutex_lock(&rdev->cp.mutex); |
472 | if (rdev->cp.ring_obj) { |
348 | if (rdev->cp.ring_obj) { |
473 | // radeon_object_kunmap(rdev->cp.ring_obj); |
349 | // radeon_object_kunmap(rdev->cp.ring_obj); |
474 | // radeon_object_unpin(rdev->cp.ring_obj); |
350 | // radeon_object_unpin(rdev->cp.ring_obj); |
475 | // radeon_object_unref(&rdev->cp.ring_obj); |
351 | // radeon_object_unref(&rdev->cp.ring_obj); |
476 | rdev->cp.ring = NULL; |
352 | rdev->cp.ring = NULL; |
Line 477... | Line 353... | ||
477 | rdev->cp.ring_obj = NULL; |
353 | rdev->cp.ring_obj = NULL; |
478 | } |
354 | } |
Line 522... | Line 398... | ||
522 | RADEON_IB_POOL_SIZE); |
398 | RADEON_IB_POOL_SIZE); |
523 | #else |
399 | #else |
524 | return 0; |
400 | return 0; |
525 | #endif |
401 | #endif |
526 | } |
402 | }>>><>>>>> |
527 | - | ||
528 | - | ||
529 | int drm_order(unsigned long size) |
- | |
530 | { |
- | |
531 | int order; |
- | |
532 | unsigned long tmp; |
- | |
533 | - | ||
534 | for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ; |
- | |
535 | - | ||
536 | if (size & (size - 1)) |
- | |
537 | ++order; |
- | |
538 | - | ||
539 | return order; |
- | |
540 | }>>>>>>>>>>> |
- | |
541 | - |