Rev 1275 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1120 | serge | 1 | /* |
2 | * Copyright 2009 Jerome Glisse. |
||
3 | * All Rights Reserved. |
||
4 | * |
||
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
6 | * copy of this software and associated documentation files (the |
||
7 | * "Software"), to deal in the Software without restriction, including |
||
8 | * without limitation the rights to use, copy, modify, merge, publish, |
||
9 | * distribute, sub license, and/or sell copies of the Software, and to |
||
10 | * permit persons to whom the Software is furnished to do so, subject to |
||
11 | * the following conditions: |
||
12 | * |
||
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
||
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
||
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
||
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
||
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
||
20 | * |
||
21 | * The above copyright notice and this permission notice (including the |
||
22 | * next paragraph) shall be included in all copies or substantial portions |
||
23 | * of the Software. |
||
24 | * |
||
25 | */ |
||
26 | /* |
||
27 | * Authors: |
||
28 | * Jerome Glisse |
||
29 | * Thomas Hellstrom |
||
30 | * Dave Airlie |
||
31 | */ |
||
1179 | serge | 32 | #include |
33 | #include |
||
1120 | serge | 34 | #include "radeon_drm.h" |
35 | #include "radeon.h" |
||
36 | #include |
||
1126 | serge | 37 | #include "radeon_object.h" |
1120 | serge | 38 | |
39 | int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, |
||
40 | int pages, u32_t *pagelist); |
||
41 | |||
42 | |||
43 | |||
44 | |||
45 | static struct drm_mm mm_gtt; |
||
46 | static struct drm_mm mm_vram; |
||
47 | |||
48 | |||
49 | int radeon_object_init(struct radeon_device *rdev) |
||
50 | { |
||
51 | int r = 0; |
||
52 | |||
1182 | serge | 53 | ENTER(); |
1125 | serge | 54 | |
1313 | serge | 55 | r = drm_mm_init(&mm_vram, 0xC00000 >> PAGE_SHIFT, |
56 | ((rdev->mc.real_vram_size - 0xC00000) >> PAGE_SHIFT)); |
||
1120 | serge | 57 | if (r) { |
58 | DRM_ERROR("Failed initializing VRAM heap.\n"); |
||
59 | return r; |
||
60 | }; |
||
61 | |||
62 | r = drm_mm_init(&mm_gtt, 0, ((rdev->mc.gtt_size) >> PAGE_SHIFT)); |
||
63 | if (r) { |
||
64 | DRM_ERROR("Failed initializing GTT heap.\n"); |
||
65 | return r; |
||
66 | } |
||
67 | |||
68 | return r; |
||
69 | // return radeon_ttm_init(rdev); |
||
70 | } |
||
71 | |||
72 | static inline uint32_t radeon_object_flags_from_domain(uint32_t domain) |
||
73 | { |
||
74 | uint32_t flags = 0; |
||
75 | if (domain & RADEON_GEM_DOMAIN_VRAM) { |
||
76 | flags |= TTM_PL_FLAG_VRAM; |
||
77 | } |
||
78 | if (domain & RADEON_GEM_DOMAIN_GTT) { |
||
79 | flags |= TTM_PL_FLAG_TT; |
||
80 | } |
||
81 | if (domain & RADEON_GEM_DOMAIN_CPU) { |
||
82 | flags |= TTM_PL_FLAG_SYSTEM; |
||
83 | } |
||
84 | if (!flags) { |
||
85 | flags |= TTM_PL_FLAG_SYSTEM; |
||
86 | } |
||
87 | return flags; |
||
88 | } |
||
89 | |||
90 | |||
91 | int radeon_object_create(struct radeon_device *rdev, |
||
92 | struct drm_gem_object *gobj, |
||
93 | unsigned long size, |
||
94 | bool kernel, |
||
95 | uint32_t domain, |
||
96 | bool interruptible, |
||
97 | struct radeon_object **robj_ptr) |
||
98 | { |
||
99 | struct radeon_object *robj; |
||
100 | enum ttm_bo_type type; |
||
101 | uint32_t flags; |
||
102 | int r; |
||
103 | |||
104 | if (kernel) { |
||
105 | type = ttm_bo_type_kernel; |
||
106 | } else { |
||
107 | type = ttm_bo_type_device; |
||
108 | } |
||
109 | *robj_ptr = NULL; |
||
110 | robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL); |
||
111 | if (robj == NULL) { |
||
112 | return -ENOMEM; |
||
113 | } |
||
114 | robj->rdev = rdev; |
||
115 | // robj->gobj = gobj; |
||
116 | INIT_LIST_HEAD(&robj->list); |
||
117 | |||
118 | flags = radeon_object_flags_from_domain(domain); |
||
119 | |||
120 | robj->flags = flags; |
||
121 | |||
122 | if( flags & TTM_PL_FLAG_VRAM) |
||
123 | { |
||
124 | size_t num_pages; |
||
125 | |||
126 | struct drm_mm_node *vm_node; |
||
127 | |||
128 | num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
||
129 | |||
130 | if (num_pages == 0) { |
||
1182 | serge | 131 | dbgprintf("Illegal buffer object size.\n"); |
1120 | serge | 132 | return -EINVAL; |
133 | } |
||
134 | retry_pre_get: |
||
135 | r = drm_mm_pre_get(&mm_vram); |
||
136 | |||
137 | if (unlikely(r != 0)) |
||
138 | return r; |
||
139 | |||
140 | vm_node = drm_mm_search_free(&mm_vram, num_pages, 0, 0); |
||
141 | |||
142 | if (unlikely(vm_node == NULL)) { |
||
143 | r = -ENOMEM; |
||
144 | return r; |
||
145 | } |
||
146 | |||
147 | robj->mm_node = drm_mm_get_block_atomic(vm_node, num_pages, 0); |
||
148 | |||
149 | if (unlikely(robj->mm_node == NULL)) { |
||
150 | goto retry_pre_get; |
||
151 | } |
||
152 | |||
153 | robj->vm_addr = ((uint32_t)robj->mm_node->start); |
||
154 | |||
1268 | serge | 155 | // dbgprintf("alloc vram: base %x size %x\n", |
156 | // robj->vm_addr << PAGE_SHIFT, num_pages << PAGE_SHIFT); |
||
1120 | serge | 157 | |
158 | }; |
||
159 | |||
160 | if( flags & TTM_PL_FLAG_TT) |
||
161 | { |
||
162 | size_t num_pages; |
||
163 | |||
164 | struct drm_mm_node *vm_node; |
||
165 | |||
166 | num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
||
167 | |||
168 | if (num_pages == 0) { |
||
1182 | serge | 169 | dbgprintf("Illegal buffer object size.\n"); |
1120 | serge | 170 | return -EINVAL; |
171 | } |
||
172 | retry_pre_get1: |
||
173 | r = drm_mm_pre_get(&mm_gtt); |
||
174 | |||
175 | if (unlikely(r != 0)) |
||
176 | return r; |
||
177 | |||
178 | vm_node = drm_mm_search_free(&mm_gtt, num_pages, 0, 0); |
||
179 | |||
180 | if (unlikely(vm_node == NULL)) { |
||
181 | r = -ENOMEM; |
||
182 | return r; |
||
183 | } |
||
184 | |||
185 | robj->mm_node = drm_mm_get_block_atomic(vm_node, num_pages, 0); |
||
186 | |||
187 | if (unlikely(robj->mm_node == NULL)) { |
||
188 | goto retry_pre_get1; |
||
189 | } |
||
190 | |||
191 | robj->vm_addr = ((uint32_t)robj->mm_node->start) ; |
||
192 | |||
1268 | serge | 193 | // dbgprintf("alloc gtt: base %x size %x\n", |
194 | // robj->vm_addr << PAGE_SHIFT, num_pages << PAGE_SHIFT); |
||
1120 | serge | 195 | }; |
196 | |||
197 | // r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags, |
||
198 | // 0, 0, false, NULL, size, |
||
199 | // &radeon_ttm_object_object_destroy); |
||
200 | if (unlikely(r != 0)) { |
||
201 | /* ttm call radeon_ttm_object_object_destroy if error happen */ |
||
202 | DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n", |
||
203 | size, flags, 0); |
||
204 | return r; |
||
205 | } |
||
206 | *robj_ptr = robj; |
||
207 | // if (gobj) { |
||
208 | // list_add_tail(&robj->list, &rdev->gem.objects); |
||
209 | // } |
||
210 | return 0; |
||
211 | } |
||
212 | |||
213 | #define page_tabs 0xFDC00000 |
||
214 | |||
215 | int radeon_object_pin(struct radeon_object *robj, uint32_t domain, |
||
216 | uint64_t *gpu_addr) |
||
217 | { |
||
218 | uint32_t flags; |
||
219 | uint32_t tmp; |
||
220 | int r = 0; |
||
221 | |||
222 | // flags = radeon_object_flags_from_domain(domain); |
||
223 | // spin_lock(&robj->tobj.lock); |
||
224 | if (robj->pin_count) { |
||
225 | robj->pin_count++; |
||
226 | if (gpu_addr != NULL) { |
||
227 | *gpu_addr = robj->gpu_addr; |
||
228 | } |
||
229 | // spin_unlock(&robj->tobj.lock); |
||
230 | return 0; |
||
231 | } |
||
232 | // spin_unlock(&robj->tobj.lock); |
||
233 | // r = radeon_object_reserve(robj, false); |
||
234 | // if (unlikely(r != 0)) { |
||
235 | // DRM_ERROR("radeon: failed to reserve object for pinning it.\n"); |
||
236 | // return r; |
||
237 | // } |
||
238 | // tmp = robj->tobj.mem.placement; |
||
239 | // ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM); |
||
240 | // robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING; |
||
241 | // r = ttm_buffer_object_validate(&robj->tobj, |
||
242 | // robj->tobj.proposed_placement, |
||
243 | // false, false); |
||
244 | |||
245 | robj->gpu_addr = ((u64)robj->vm_addr) << PAGE_SHIFT; |
||
246 | |||
247 | if(robj->flags & TTM_PL_FLAG_VRAM) |
||
248 | robj->gpu_addr += (u64)robj->rdev->mc.vram_location; |
||
249 | else if (robj->flags & TTM_PL_FLAG_TT) |
||
250 | { |
||
251 | u32_t *pagelist; |
||
252 | robj->kptr = KernelAlloc( robj->mm_node->size << PAGE_SHIFT ); |
||
253 | dbgprintf("kernel alloc %x\n", robj->kptr ); |
||
254 | |||
255 | pagelist = &((u32_t*)page_tabs)[(u32_t)robj->kptr >> 12]; |
||
256 | dbgprintf("pagelist %x\n", pagelist); |
||
257 | radeon_gart_bind(robj->rdev, robj->gpu_addr, |
||
258 | robj->mm_node->size, pagelist); |
||
259 | robj->gpu_addr += (u64)robj->rdev->mc.gtt_location; |
||
260 | } |
||
261 | else |
||
262 | { |
||
263 | DRM_ERROR("Unknown placement %d\n", robj->flags); |
||
264 | robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL; |
||
265 | r = -1; |
||
266 | }; |
||
267 | |||
268 | // flags & TTM_PL_FLAG_VRAM |
||
269 | if (gpu_addr != NULL) { |
||
270 | *gpu_addr = robj->gpu_addr; |
||
271 | } |
||
272 | robj->pin_count = 1; |
||
273 | if (unlikely(r != 0)) { |
||
274 | DRM_ERROR("radeon: failed to pin object.\n"); |
||
275 | } |
||
276 | |||
277 | return r; |
||
278 | } |
||
279 | |||
280 | int radeon_object_kmap(struct radeon_object *robj, void **ptr) |
||
281 | { |
||
282 | int r = 0; |
||
283 | |||
284 | // spin_lock(&robj->tobj.lock); |
||
285 | if (robj->kptr) { |
||
286 | if (ptr) { |
||
287 | *ptr = robj->kptr; |
||
288 | } |
||
289 | // spin_unlock(&robj->tobj.lock); |
||
290 | return 0; |
||
291 | } |
||
292 | // spin_unlock(&robj->tobj.lock); |
||
293 | |||
294 | if(robj->flags & TTM_PL_FLAG_VRAM) |
||
295 | { |
||
296 | robj->cpu_addr = robj->rdev->mc.aper_base + |
||
297 | (robj->vm_addr << PAGE_SHIFT); |
||
298 | robj->kptr = (void*)MapIoMem(robj->cpu_addr, |
||
299 | robj->mm_node->size << 12, PG_SW); |
||
300 | } |
||
301 | else |
||
302 | { |
||
303 | return -1; |
||
304 | } |
||
305 | |||
306 | if (ptr) { |
||
307 | *ptr = robj->kptr; |
||
308 | } |
||
309 | |||
310 | return 0; |
||
311 | } |
||
312 | |||
1182 | serge | 313 | void radeon_object_kunmap(struct radeon_object *robj) |
314 | { |
||
315 | // spin_lock(&robj->tobj.lock); |
||
316 | if (robj->kptr == NULL) { |
||
317 | // spin_unlock(&robj->tobj.lock); |
||
318 | return; |
||
319 | } |
||
1120 | serge | 320 | |
1182 | serge | 321 | if (robj->flags & TTM_PL_FLAG_VRAM) |
322 | { |
||
323 | FreeKernelSpace(robj->kptr); |
||
324 | robj->kptr = NULL; |
||
325 | } |
||
326 | // spin_unlock(&robj->tobj.lock); |
||
327 | } |
||
328 | |||
1120 | serge | 329 | |
330 | void radeon_object_unpin(struct radeon_object *robj) |
||
331 | { |
||
332 | uint32_t flags; |
||
333 | int r; |
||
334 | |||
335 | // spin_lock(&robj->tobj.lock); |
||
336 | if (!robj->pin_count) { |
||
337 | // spin_unlock(&robj->tobj.lock); |
||
338 | printk(KERN_WARNING "Unpin not necessary for %p !\n", robj); |
||
339 | return; |
||
340 | } |
||
341 | robj->pin_count--; |
||
342 | if (robj->pin_count) { |
||
343 | // spin_unlock(&robj->tobj.lock); |
||
344 | return; |
||
345 | } |
||
346 | // spin_unlock(&robj->tobj.lock); |
||
1313 | serge | 347 | |
348 | drm_mm_put_block(robj->mm_node); |
||
349 | |||
350 | kfree(robj); |
||
1120 | serge | 351 | } |
352 | |||
353 | |||
1313 | serge | 354 | #if 0 |
1120 | serge | 355 | |
356 | |||
357 | /* |
||
358 | * To exclude mutual BO access we rely on bo_reserve exclusion, as all |
||
359 | * function are calling it. |
||
360 | */ |
||
361 | |||
362 | static int radeon_object_reserve(struct radeon_object *robj, bool interruptible) |
||
363 | { |
||
364 | return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0); |
||
365 | } |
||
366 | |||
367 | static void radeon_object_unreserve(struct radeon_object *robj) |
||
368 | { |
||
369 | ttm_bo_unreserve(&robj->tobj); |
||
370 | } |
||
371 | |||
372 | static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj) |
||
373 | { |
||
374 | struct radeon_object *robj; |
||
375 | |||
376 | robj = container_of(tobj, struct radeon_object, tobj); |
||
377 | // list_del_init(&robj->list); |
||
378 | kfree(robj); |
||
379 | } |
||
380 | |||
381 | static inline void radeon_object_gpu_addr(struct radeon_object *robj) |
||
382 | { |
||
383 | /* Default gpu address */ |
||
384 | robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL; |
||
385 | if (robj->tobj.mem.mm_node == NULL) { |
||
386 | return; |
||
387 | } |
||
388 | robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT; |
||
389 | switch (robj->tobj.mem.mem_type) { |
||
390 | case TTM_PL_VRAM: |
||
391 | robj->gpu_addr += (u64)robj->rdev->mc.vram_location; |
||
392 | break; |
||
393 | case TTM_PL_TT: |
||
394 | robj->gpu_addr += (u64)robj->rdev->mc.gtt_location; |
||
395 | break; |
||
396 | default: |
||
397 | DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type); |
||
398 | robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL; |
||
399 | return; |
||
400 | } |
||
401 | } |
||
402 | |||
403 | |||
404 | int radeon_object_create(struct radeon_device *rdev, |
||
405 | struct drm_gem_object *gobj, |
||
406 | unsigned long size, |
||
407 | bool kernel, |
||
408 | uint32_t domain, |
||
409 | bool interruptible, |
||
410 | struct radeon_object **robj_ptr) |
||
411 | { |
||
412 | struct radeon_object *robj; |
||
413 | enum ttm_bo_type type; |
||
414 | uint32_t flags; |
||
415 | int r; |
||
416 | |||
417 | // if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { |
||
418 | // rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; |
||
419 | // } |
||
420 | if (kernel) { |
||
421 | type = ttm_bo_type_kernel; |
||
422 | } else { |
||
423 | type = ttm_bo_type_device; |
||
424 | } |
||
425 | *robj_ptr = NULL; |
||
426 | robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL); |
||
427 | if (robj == NULL) { |
||
428 | return -ENOMEM; |
||
429 | } |
||
430 | robj->rdev = rdev; |
||
431 | robj->gobj = gobj; |
||
432 | // INIT_LIST_HEAD(&robj->list); |
||
433 | |||
434 | flags = radeon_object_flags_from_domain(domain); |
||
435 | // r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags, |
||
436 | // 0, 0, false, NULL, size, |
||
437 | // &radeon_ttm_object_object_destroy); |
||
438 | if (unlikely(r != 0)) { |
||
439 | /* ttm call radeon_ttm_object_object_destroy if error happen */ |
||
440 | DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n", |
||
441 | size, flags, 0); |
||
442 | return r; |
||
443 | } |
||
444 | *robj_ptr = robj; |
||
445 | // if (gobj) { |
||
446 | // list_add_tail(&robj->list, &rdev->gem.objects); |
||
447 | // } |
||
448 | return 0; |
||
449 | } |
||
450 | |||
451 | int radeon_object_kmap(struct radeon_object *robj, void **ptr) |
||
452 | { |
||
453 | int r; |
||
454 | |||
455 | // spin_lock(&robj->tobj.lock); |
||
456 | if (robj->kptr) { |
||
457 | if (ptr) { |
||
458 | *ptr = robj->kptr; |
||
459 | } |
||
460 | // spin_unlock(&robj->tobj.lock); |
||
461 | return 0; |
||
462 | } |
||
463 | // spin_unlock(&robj->tobj.lock); |
||
464 | r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap); |
||
465 | if (r) { |
||
466 | return r; |
||
467 | } |
||
468 | // spin_lock(&robj->tobj.lock); |
||
469 | robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem); |
||
470 | // spin_unlock(&robj->tobj.lock); |
||
471 | if (ptr) { |
||
472 | *ptr = robj->kptr; |
||
473 | } |
||
474 | return 0; |
||
475 | } |
||
476 | |||
477 | void radeon_object_kunmap(struct radeon_object *robj) |
||
478 | { |
||
479 | // spin_lock(&robj->tobj.lock); |
||
480 | if (robj->kptr == NULL) { |
||
481 | // spin_unlock(&robj->tobj.lock); |
||
482 | return; |
||
483 | } |
||
484 | robj->kptr = NULL; |
||
485 | // spin_unlock(&robj->tobj.lock); |
||
486 | ttm_bo_kunmap(&robj->kmap); |
||
487 | } |
||
488 | |||
489 | void radeon_object_unref(struct radeon_object **robj) |
||
490 | { |
||
491 | struct ttm_buffer_object *tobj; |
||
492 | |||
493 | if ((*robj) == NULL) { |
||
494 | return; |
||
495 | } |
||
496 | tobj = &((*robj)->tobj); |
||
497 | ttm_bo_unref(&tobj); |
||
498 | if (tobj == NULL) { |
||
499 | *robj = NULL; |
||
500 | } |
||
501 | } |
||
502 | |||
503 | int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset) |
||
504 | { |
||
505 | *offset = robj->tobj.addr_space_offset; |
||
506 | return 0; |
||
507 | } |
||
508 | |||
509 | int radeon_object_pin(struct radeon_object *robj, uint32_t domain, |
||
510 | uint64_t *gpu_addr) |
||
511 | { |
||
512 | uint32_t flags; |
||
513 | uint32_t tmp; |
||
514 | int r; |
||
515 | |||
516 | flags = radeon_object_flags_from_domain(domain); |
||
517 | // spin_lock(&robj->tobj.lock); |
||
518 | if (robj->pin_count) { |
||
519 | robj->pin_count++; |
||
520 | if (gpu_addr != NULL) { |
||
521 | *gpu_addr = robj->gpu_addr; |
||
522 | } |
||
523 | // spin_unlock(&robj->tobj.lock); |
||
524 | return 0; |
||
525 | } |
||
526 | // spin_unlock(&robj->tobj.lock); |
||
527 | r = radeon_object_reserve(robj, false); |
||
528 | if (unlikely(r != 0)) { |
||
529 | DRM_ERROR("radeon: failed to reserve object for pinning it.\n"); |
||
530 | return r; |
||
531 | } |
||
532 | tmp = robj->tobj.mem.placement; |
||
533 | ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM); |
||
534 | robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING; |
||
535 | r = ttm_buffer_object_validate(&robj->tobj, |
||
536 | robj->tobj.proposed_placement, |
||
537 | false, false); |
||
538 | radeon_object_gpu_addr(robj); |
||
539 | if (gpu_addr != NULL) { |
||
540 | *gpu_addr = robj->gpu_addr; |
||
541 | } |
||
542 | robj->pin_count = 1; |
||
543 | if (unlikely(r != 0)) { |
||
544 | DRM_ERROR("radeon: failed to pin object.\n"); |
||
545 | } |
||
546 | radeon_object_unreserve(robj); |
||
547 | return r; |
||
548 | } |
||
549 | |||
550 | void radeon_object_unpin(struct radeon_object *robj) |
||
551 | { |
||
552 | uint32_t flags; |
||
553 | int r; |
||
554 | |||
555 | // spin_lock(&robj->tobj.lock); |
||
556 | if (!robj->pin_count) { |
||
557 | // spin_unlock(&robj->tobj.lock); |
||
558 | printk(KERN_WARNING "Unpin not necessary for %p !\n", robj); |
||
559 | return; |
||
560 | } |
||
561 | robj->pin_count--; |
||
562 | if (robj->pin_count) { |
||
563 | // spin_unlock(&robj->tobj.lock); |
||
564 | return; |
||
565 | } |
||
566 | // spin_unlock(&robj->tobj.lock); |
||
567 | r = radeon_object_reserve(robj, false); |
||
568 | if (unlikely(r != 0)) { |
||
569 | DRM_ERROR("radeon: failed to reserve object for unpinning it.\n"); |
||
570 | return; |
||
571 | } |
||
572 | flags = robj->tobj.mem.placement; |
||
573 | robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT; |
||
574 | r = ttm_buffer_object_validate(&robj->tobj, |
||
575 | robj->tobj.proposed_placement, |
||
576 | false, false); |
||
577 | if (unlikely(r != 0)) { |
||
578 | DRM_ERROR("radeon: failed to unpin buffer.\n"); |
||
579 | } |
||
580 | radeon_object_unreserve(robj); |
||
581 | } |
||
582 | |||
583 | int radeon_object_wait(struct radeon_object *robj) |
||
584 | { |
||
585 | int r = 0; |
||
586 | |||
587 | /* FIXME: should use block reservation instead */ |
||
588 | r = radeon_object_reserve(robj, true); |
||
589 | if (unlikely(r != 0)) { |
||
590 | DRM_ERROR("radeon: failed to reserve object for waiting.\n"); |
||
591 | return r; |
||
592 | } |
||
593 | // spin_lock(&robj->tobj.lock); |
||
594 | if (robj->tobj.sync_obj) { |
||
595 | r = ttm_bo_wait(&robj->tobj, true, false, false); |
||
596 | } |
||
597 | // spin_unlock(&robj->tobj.lock); |
||
598 | radeon_object_unreserve(robj); |
||
599 | return r; |
||
600 | } |
||
601 | |||
602 | int radeon_object_evict_vram(struct radeon_device *rdev) |
||
603 | { |
||
604 | if (rdev->flags & RADEON_IS_IGP) { |
||
605 | /* Useless to evict on IGP chips */ |
||
606 | return 0; |
||
607 | } |
||
608 | return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); |
||
609 | } |
||
610 | |||
611 | void radeon_object_force_delete(struct radeon_device *rdev) |
||
612 | { |
||
613 | struct radeon_object *robj, *n; |
||
614 | struct drm_gem_object *gobj; |
||
615 | |||
616 | if (list_empty(&rdev->gem.objects)) { |
||
617 | return; |
||
618 | } |
||
619 | DRM_ERROR("Userspace still has active objects !\n"); |
||
620 | list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) { |
||
621 | mutex_lock(&rdev->ddev->struct_mutex); |
||
622 | gobj = robj->gobj; |
||
623 | DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n", |
||
624 | gobj, robj, (unsigned long)gobj->size, |
||
625 | *((unsigned long *)&gobj->refcount)); |
||
626 | list_del_init(&robj->list); |
||
627 | radeon_object_unref(&robj); |
||
628 | gobj->driver_private = NULL; |
||
629 | drm_gem_object_unreference(gobj); |
||
630 | mutex_unlock(&rdev->ddev->struct_mutex); |
||
631 | } |
||
632 | } |
||
633 | |||
634 | void radeon_object_fini(struct radeon_device *rdev) |
||
635 | { |
||
636 | radeon_ttm_fini(rdev); |
||
637 | } |
||
638 | |||
639 | void radeon_object_list_add_object(struct radeon_object_list *lobj, |
||
640 | struct list_head *head) |
||
641 | { |
||
642 | if (lobj->wdomain) { |
||
643 | list_add(&lobj->list, head); |
||
644 | } else { |
||
645 | list_add_tail(&lobj->list, head); |
||
646 | } |
||
647 | } |
||
648 | |||
649 | int radeon_object_list_reserve(struct list_head *head) |
||
650 | { |
||
651 | struct radeon_object_list *lobj; |
||
652 | struct list_head *i; |
||
653 | int r; |
||
654 | |||
655 | list_for_each(i, head) { |
||
656 | lobj = list_entry(i, struct radeon_object_list, list); |
||
657 | if (!lobj->robj->pin_count) { |
||
658 | r = radeon_object_reserve(lobj->robj, true); |
||
659 | if (unlikely(r != 0)) { |
||
660 | DRM_ERROR("radeon: failed to reserve object.\n"); |
||
661 | return r; |
||
662 | } |
||
663 | } else { |
||
664 | } |
||
665 | } |
||
666 | return 0; |
||
667 | } |
||
668 | |||
669 | void radeon_object_list_unreserve(struct list_head *head) |
||
670 | { |
||
671 | struct radeon_object_list *lobj; |
||
672 | struct list_head *i; |
||
673 | |||
674 | list_for_each(i, head) { |
||
675 | lobj = list_entry(i, struct radeon_object_list, list); |
||
676 | if (!lobj->robj->pin_count) { |
||
677 | radeon_object_unreserve(lobj->robj); |
||
678 | } else { |
||
679 | } |
||
680 | } |
||
681 | } |
||
682 | |||
683 | int radeon_object_list_validate(struct list_head *head, void *fence) |
||
684 | { |
||
685 | struct radeon_object_list *lobj; |
||
686 | struct radeon_object *robj; |
||
687 | struct radeon_fence *old_fence = NULL; |
||
688 | struct list_head *i; |
||
689 | uint32_t flags; |
||
690 | int r; |
||
691 | |||
692 | r = radeon_object_list_reserve(head); |
||
693 | if (unlikely(r != 0)) { |
||
694 | radeon_object_list_unreserve(head); |
||
695 | return r; |
||
696 | } |
||
697 | list_for_each(i, head) { |
||
698 | lobj = list_entry(i, struct radeon_object_list, list); |
||
699 | robj = lobj->robj; |
||
700 | if (lobj->wdomain) { |
||
701 | flags = radeon_object_flags_from_domain(lobj->wdomain); |
||
702 | flags |= TTM_PL_FLAG_TT; |
||
703 | } else { |
||
704 | flags = radeon_object_flags_from_domain(lobj->rdomain); |
||
705 | flags |= TTM_PL_FLAG_TT; |
||
706 | flags |= TTM_PL_FLAG_VRAM; |
||
707 | } |
||
708 | if (!robj->pin_count) { |
||
709 | robj->tobj.proposed_placement = flags | TTM_PL_MASK_CACHING; |
||
710 | r = ttm_buffer_object_validate(&robj->tobj, |
||
711 | robj->tobj.proposed_placement, |
||
712 | true, false); |
||
713 | if (unlikely(r)) { |
||
714 | radeon_object_list_unreserve(head); |
||
715 | DRM_ERROR("radeon: failed to validate.\n"); |
||
716 | return r; |
||
717 | } |
||
718 | radeon_object_gpu_addr(robj); |
||
719 | } |
||
720 | lobj->gpu_offset = robj->gpu_addr; |
||
721 | if (fence) { |
||
722 | old_fence = (struct radeon_fence *)robj->tobj.sync_obj; |
||
723 | robj->tobj.sync_obj = radeon_fence_ref(fence); |
||
724 | robj->tobj.sync_obj_arg = NULL; |
||
725 | } |
||
726 | if (old_fence) { |
||
727 | radeon_fence_unref(&old_fence); |
||
728 | } |
||
729 | } |
||
730 | return 0; |
||
731 | } |
||
732 | |||
733 | void radeon_object_list_unvalidate(struct list_head *head) |
||
734 | { |
||
735 | struct radeon_object_list *lobj; |
||
736 | struct radeon_fence *old_fence = NULL; |
||
737 | struct list_head *i; |
||
738 | |||
739 | list_for_each(i, head) { |
||
740 | lobj = list_entry(i, struct radeon_object_list, list); |
||
741 | old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj; |
||
742 | lobj->robj->tobj.sync_obj = NULL; |
||
743 | if (old_fence) { |
||
744 | radeon_fence_unref(&old_fence); |
||
745 | } |
||
746 | } |
||
747 | radeon_object_list_unreserve(head); |
||
748 | } |
||
749 | |||
750 | void radeon_object_list_clean(struct list_head *head) |
||
751 | { |
||
752 | radeon_object_list_unreserve(head); |
||
753 | } |
||
754 | |||
755 | int radeon_object_fbdev_mmap(struct radeon_object *robj, |
||
756 | struct vm_area_struct *vma) |
||
757 | { |
||
758 | return ttm_fbdev_mmap(vma, &robj->tobj); |
||
759 | } |
||
760 | |||
1128 | serge | 761 | #endif |
762 | |||
1120 | serge | 763 | unsigned long radeon_object_size(struct radeon_object *robj) |
764 | { |
||
765 | return robj->tobj.num_pages << PAGE_SHIFT; |
||
766 | }><>><>><>><>><>><>><>><>><>><> |
||
767 |