Rev 1126 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1120 | serge | 1 | /* |
2 | * Copyright 2009 Jerome Glisse. |
||
3 | * All Rights Reserved. |
||
4 | * |
||
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
6 | * copy of this software and associated documentation files (the |
||
7 | * "Software"), to deal in the Software without restriction, including |
||
8 | * without limitation the rights to use, copy, modify, merge, publish, |
||
9 | * distribute, sub license, and/or sell copies of the Software, and to |
||
10 | * permit persons to whom the Software is furnished to do so, subject to |
||
11 | * the following conditions: |
||
12 | * |
||
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
||
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
||
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
||
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
||
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
||
20 | * |
||
21 | * The above copyright notice and this permission notice (including the |
||
22 | * next paragraph) shall be included in all copies or substantial portions |
||
23 | * of the Software. |
||
24 | * |
||
25 | */ |
||
26 | /* |
||
27 | * Authors: |
||
28 | * Jerome Glisse |
||
29 | * Thomas Hellstrom |
||
30 | * Dave Airlie |
||
31 | */ |
||
1125 | serge | 32 | #include |
33 | #include |
||
1120 | serge | 34 | |
35 | #include "radeon_drm.h" |
||
36 | #include "radeon.h" |
||
37 | #include |
||
1126 | serge | 38 | #include "radeon_object.h" |
1120 | serge | 39 | |
40 | int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, |
||
41 | int pages, u32_t *pagelist); |
||
42 | |||
43 | |||
44 | |||
45 | |||
46 | static struct drm_mm mm_gtt; |
||
47 | static struct drm_mm mm_vram; |
||
48 | |||
49 | |||
50 | int radeon_object_init(struct radeon_device *rdev) |
||
51 | { |
||
52 | int r = 0; |
||
53 | |||
1125 | serge | 54 | dbgprintf("%s\n",__FUNCTION__); |
55 | |||
1120 | serge | 56 | r = drm_mm_init(&mm_vram, 0x800000 >> PAGE_SHIFT, |
57 | ((rdev->mc.aper_size - 0x800000) >> PAGE_SHIFT)); |
||
58 | if (r) { |
||
59 | DRM_ERROR("Failed initializing VRAM heap.\n"); |
||
60 | return r; |
||
61 | }; |
||
62 | |||
63 | r = drm_mm_init(&mm_gtt, 0, ((rdev->mc.gtt_size) >> PAGE_SHIFT)); |
||
64 | if (r) { |
||
65 | DRM_ERROR("Failed initializing GTT heap.\n"); |
||
66 | return r; |
||
67 | } |
||
68 | |||
69 | return r; |
||
70 | // return radeon_ttm_init(rdev); |
||
71 | } |
||
72 | |||
73 | static inline uint32_t radeon_object_flags_from_domain(uint32_t domain) |
||
74 | { |
||
75 | uint32_t flags = 0; |
||
76 | if (domain & RADEON_GEM_DOMAIN_VRAM) { |
||
77 | flags |= TTM_PL_FLAG_VRAM; |
||
78 | } |
||
79 | if (domain & RADEON_GEM_DOMAIN_GTT) { |
||
80 | flags |= TTM_PL_FLAG_TT; |
||
81 | } |
||
82 | if (domain & RADEON_GEM_DOMAIN_CPU) { |
||
83 | flags |= TTM_PL_FLAG_SYSTEM; |
||
84 | } |
||
85 | if (!flags) { |
||
86 | flags |= TTM_PL_FLAG_SYSTEM; |
||
87 | } |
||
88 | return flags; |
||
89 | } |
||
90 | |||
91 | |||
92 | int radeon_object_create(struct radeon_device *rdev, |
||
93 | struct drm_gem_object *gobj, |
||
94 | unsigned long size, |
||
95 | bool kernel, |
||
96 | uint32_t domain, |
||
97 | bool interruptible, |
||
98 | struct radeon_object **robj_ptr) |
||
99 | { |
||
100 | struct radeon_object *robj; |
||
101 | enum ttm_bo_type type; |
||
102 | uint32_t flags; |
||
103 | int r; |
||
104 | |||
105 | dbgprintf("%s\n",__FUNCTION__); |
||
106 | |||
107 | if (kernel) { |
||
108 | type = ttm_bo_type_kernel; |
||
109 | } else { |
||
110 | type = ttm_bo_type_device; |
||
111 | } |
||
112 | *robj_ptr = NULL; |
||
113 | robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL); |
||
114 | if (robj == NULL) { |
||
115 | return -ENOMEM; |
||
116 | } |
||
117 | robj->rdev = rdev; |
||
118 | // robj->gobj = gobj; |
||
119 | INIT_LIST_HEAD(&robj->list); |
||
120 | |||
121 | flags = radeon_object_flags_from_domain(domain); |
||
122 | |||
123 | robj->flags = flags; |
||
124 | |||
125 | dbgprintf("robj flags %x\n", robj->flags); |
||
126 | |||
127 | if( flags & TTM_PL_FLAG_VRAM) |
||
128 | { |
||
129 | size_t num_pages; |
||
130 | |||
131 | struct drm_mm_node *vm_node; |
||
132 | |||
133 | num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
||
134 | |||
135 | if (num_pages == 0) { |
||
136 | printk("Illegal buffer object size.\n"); |
||
137 | return -EINVAL; |
||
138 | } |
||
139 | retry_pre_get: |
||
140 | r = drm_mm_pre_get(&mm_vram); |
||
141 | |||
142 | if (unlikely(r != 0)) |
||
143 | return r; |
||
144 | |||
145 | vm_node = drm_mm_search_free(&mm_vram, num_pages, 0, 0); |
||
146 | |||
147 | if (unlikely(vm_node == NULL)) { |
||
148 | r = -ENOMEM; |
||
149 | return r; |
||
150 | } |
||
151 | |||
152 | robj->mm_node = drm_mm_get_block_atomic(vm_node, num_pages, 0); |
||
153 | |||
154 | if (unlikely(robj->mm_node == NULL)) { |
||
155 | goto retry_pre_get; |
||
156 | } |
||
157 | |||
158 | robj->vm_addr = ((uint32_t)robj->mm_node->start); |
||
159 | |||
160 | dbgprintf("alloc vram: base %x size %x\n", |
||
161 | robj->vm_addr << PAGE_SHIFT, num_pages << PAGE_SHIFT); |
||
162 | |||
163 | }; |
||
164 | |||
165 | if( flags & TTM_PL_FLAG_TT) |
||
166 | { |
||
167 | size_t num_pages; |
||
168 | |||
169 | struct drm_mm_node *vm_node; |
||
170 | |||
171 | num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
||
172 | |||
173 | if (num_pages == 0) { |
||
174 | printk("Illegal buffer object size.\n"); |
||
175 | return -EINVAL; |
||
176 | } |
||
177 | retry_pre_get1: |
||
178 | r = drm_mm_pre_get(&mm_gtt); |
||
179 | |||
180 | if (unlikely(r != 0)) |
||
181 | return r; |
||
182 | |||
183 | vm_node = drm_mm_search_free(&mm_gtt, num_pages, 0, 0); |
||
184 | |||
185 | if (unlikely(vm_node == NULL)) { |
||
186 | r = -ENOMEM; |
||
187 | return r; |
||
188 | } |
||
189 | |||
190 | robj->mm_node = drm_mm_get_block_atomic(vm_node, num_pages, 0); |
||
191 | |||
192 | if (unlikely(robj->mm_node == NULL)) { |
||
193 | goto retry_pre_get1; |
||
194 | } |
||
195 | |||
196 | robj->vm_addr = ((uint32_t)robj->mm_node->start) ; |
||
197 | |||
198 | dbgprintf("alloc gtt: base %x size %x\n", |
||
199 | robj->vm_addr << PAGE_SHIFT, num_pages << PAGE_SHIFT); |
||
200 | }; |
||
201 | |||
202 | // r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags, |
||
203 | // 0, 0, false, NULL, size, |
||
204 | // &radeon_ttm_object_object_destroy); |
||
205 | if (unlikely(r != 0)) { |
||
206 | /* ttm call radeon_ttm_object_object_destroy if error happen */ |
||
207 | DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n", |
||
208 | size, flags, 0); |
||
209 | return r; |
||
210 | } |
||
211 | *robj_ptr = robj; |
||
212 | // if (gobj) { |
||
213 | // list_add_tail(&robj->list, &rdev->gem.objects); |
||
214 | // } |
||
215 | return 0; |
||
216 | } |
||
217 | |||
218 | #define page_tabs 0xFDC00000 |
||
219 | |||
220 | int radeon_object_pin(struct radeon_object *robj, uint32_t domain, |
||
221 | uint64_t *gpu_addr) |
||
222 | { |
||
223 | uint32_t flags; |
||
224 | uint32_t tmp; |
||
225 | int r = 0; |
||
226 | |||
227 | dbgprintf("%s\n",__FUNCTION__); |
||
228 | |||
229 | // flags = radeon_object_flags_from_domain(domain); |
||
230 | // spin_lock(&robj->tobj.lock); |
||
231 | if (robj->pin_count) { |
||
232 | robj->pin_count++; |
||
233 | if (gpu_addr != NULL) { |
||
234 | *gpu_addr = robj->gpu_addr; |
||
235 | } |
||
236 | // spin_unlock(&robj->tobj.lock); |
||
237 | return 0; |
||
238 | } |
||
239 | // spin_unlock(&robj->tobj.lock); |
||
240 | // r = radeon_object_reserve(robj, false); |
||
241 | // if (unlikely(r != 0)) { |
||
242 | // DRM_ERROR("radeon: failed to reserve object for pinning it.\n"); |
||
243 | // return r; |
||
244 | // } |
||
245 | // tmp = robj->tobj.mem.placement; |
||
246 | // ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM); |
||
247 | // robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING; |
||
248 | // r = ttm_buffer_object_validate(&robj->tobj, |
||
249 | // robj->tobj.proposed_placement, |
||
250 | // false, false); |
||
251 | |||
252 | robj->gpu_addr = ((u64)robj->vm_addr) << PAGE_SHIFT; |
||
253 | |||
254 | if(robj->flags & TTM_PL_FLAG_VRAM) |
||
255 | robj->gpu_addr += (u64)robj->rdev->mc.vram_location; |
||
256 | else if (robj->flags & TTM_PL_FLAG_TT) |
||
257 | { |
||
258 | u32_t *pagelist; |
||
259 | robj->kptr = KernelAlloc( robj->mm_node->size << PAGE_SHIFT ); |
||
260 | dbgprintf("kernel alloc %x\n", robj->kptr ); |
||
261 | |||
262 | pagelist = &((u32_t*)page_tabs)[(u32_t)robj->kptr >> 12]; |
||
263 | dbgprintf("pagelist %x\n", pagelist); |
||
264 | radeon_gart_bind(robj->rdev, robj->gpu_addr, |
||
265 | robj->mm_node->size, pagelist); |
||
266 | robj->gpu_addr += (u64)robj->rdev->mc.gtt_location; |
||
267 | } |
||
268 | else |
||
269 | { |
||
270 | DRM_ERROR("Unknown placement %d\n", robj->flags); |
||
271 | robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL; |
||
272 | r = -1; |
||
273 | }; |
||
274 | |||
275 | // flags & TTM_PL_FLAG_VRAM |
||
276 | if (gpu_addr != NULL) { |
||
277 | *gpu_addr = robj->gpu_addr; |
||
278 | } |
||
279 | robj->pin_count = 1; |
||
280 | if (unlikely(r != 0)) { |
||
281 | DRM_ERROR("radeon: failed to pin object.\n"); |
||
282 | } |
||
283 | |||
284 | dbgprintf("done %s\n",__FUNCTION__); |
||
285 | |||
286 | return r; |
||
287 | } |
||
288 | |||
289 | int radeon_object_kmap(struct radeon_object *robj, void **ptr) |
||
290 | { |
||
291 | int r = 0; |
||
292 | |||
293 | dbgprintf("%s\n",__FUNCTION__); |
||
294 | |||
295 | // spin_lock(&robj->tobj.lock); |
||
296 | if (robj->kptr) { |
||
297 | if (ptr) { |
||
298 | *ptr = robj->kptr; |
||
299 | } |
||
300 | // spin_unlock(&robj->tobj.lock); |
||
301 | return 0; |
||
302 | } |
||
303 | // spin_unlock(&robj->tobj.lock); |
||
304 | |||
305 | if(robj->flags & TTM_PL_FLAG_VRAM) |
||
306 | { |
||
307 | robj->cpu_addr = robj->rdev->mc.aper_base + |
||
308 | (robj->vm_addr << PAGE_SHIFT); |
||
309 | robj->kptr = (void*)MapIoMem(robj->cpu_addr, |
||
310 | robj->mm_node->size << 12, PG_SW); |
||
311 | dbgprintf("map io mem %x at %x\n", robj->cpu_addr, robj->kptr); |
||
312 | |||
313 | } |
||
314 | else |
||
315 | { |
||
316 | return -1; |
||
317 | } |
||
318 | |||
319 | if (ptr) { |
||
320 | *ptr = robj->kptr; |
||
321 | } |
||
322 | |||
323 | dbgprintf("done %s\n",__FUNCTION__); |
||
324 | |||
325 | return 0; |
||
326 | } |
||
327 | |||
328 | |||
329 | #if 0 |
||
330 | |||
331 | void radeon_object_unpin(struct radeon_object *robj) |
||
332 | { |
||
333 | uint32_t flags; |
||
334 | int r; |
||
335 | |||
336 | // spin_lock(&robj->tobj.lock); |
||
337 | if (!robj->pin_count) { |
||
338 | // spin_unlock(&robj->tobj.lock); |
||
339 | printk(KERN_WARNING "Unpin not necessary for %p !\n", robj); |
||
340 | return; |
||
341 | } |
||
342 | robj->pin_count--; |
||
343 | if (robj->pin_count) { |
||
344 | // spin_unlock(&robj->tobj.lock); |
||
345 | return; |
||
346 | } |
||
347 | // spin_unlock(&robj->tobj.lock); |
||
348 | r = radeon_object_reserve(robj, false); |
||
349 | if (unlikely(r != 0)) { |
||
350 | DRM_ERROR("radeon: failed to reserve object for unpinning it.\n"); |
||
351 | return; |
||
352 | } |
||
353 | flags = robj->tobj.mem.placement; |
||
354 | robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT; |
||
355 | r = ttm_buffer_object_validate(&robj->tobj, |
||
356 | robj->tobj.proposed_placement, |
||
357 | false, false); |
||
358 | if (unlikely(r != 0)) { |
||
359 | DRM_ERROR("radeon: failed to unpin buffer.\n"); |
||
360 | } |
||
361 | radeon_object_unreserve(robj); |
||
362 | } |
||
363 | |||
364 | |||
365 | |||
366 | |||
367 | |||
368 | /* |
||
369 | * To exclude mutual BO access we rely on bo_reserve exclusion, as all |
||
370 | * function are calling it. |
||
371 | */ |
||
372 | |||
373 | static int radeon_object_reserve(struct radeon_object *robj, bool interruptible) |
||
374 | { |
||
375 | return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0); |
||
376 | } |
||
377 | |||
378 | static void radeon_object_unreserve(struct radeon_object *robj) |
||
379 | { |
||
380 | ttm_bo_unreserve(&robj->tobj); |
||
381 | } |
||
382 | |||
383 | static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj) |
||
384 | { |
||
385 | struct radeon_object *robj; |
||
386 | |||
387 | robj = container_of(tobj, struct radeon_object, tobj); |
||
388 | // list_del_init(&robj->list); |
||
389 | kfree(robj); |
||
390 | } |
||
391 | |||
392 | static inline void radeon_object_gpu_addr(struct radeon_object *robj) |
||
393 | { |
||
394 | /* Default gpu address */ |
||
395 | robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL; |
||
396 | if (robj->tobj.mem.mm_node == NULL) { |
||
397 | return; |
||
398 | } |
||
399 | robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT; |
||
400 | switch (robj->tobj.mem.mem_type) { |
||
401 | case TTM_PL_VRAM: |
||
402 | robj->gpu_addr += (u64)robj->rdev->mc.vram_location; |
||
403 | break; |
||
404 | case TTM_PL_TT: |
||
405 | robj->gpu_addr += (u64)robj->rdev->mc.gtt_location; |
||
406 | break; |
||
407 | default: |
||
408 | DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type); |
||
409 | robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL; |
||
410 | return; |
||
411 | } |
||
412 | } |
||
413 | |||
414 | |||
415 | int radeon_object_create(struct radeon_device *rdev, |
||
416 | struct drm_gem_object *gobj, |
||
417 | unsigned long size, |
||
418 | bool kernel, |
||
419 | uint32_t domain, |
||
420 | bool interruptible, |
||
421 | struct radeon_object **robj_ptr) |
||
422 | { |
||
423 | struct radeon_object *robj; |
||
424 | enum ttm_bo_type type; |
||
425 | uint32_t flags; |
||
426 | int r; |
||
427 | |||
428 | // if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { |
||
429 | // rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; |
||
430 | // } |
||
431 | if (kernel) { |
||
432 | type = ttm_bo_type_kernel; |
||
433 | } else { |
||
434 | type = ttm_bo_type_device; |
||
435 | } |
||
436 | *robj_ptr = NULL; |
||
437 | robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL); |
||
438 | if (robj == NULL) { |
||
439 | return -ENOMEM; |
||
440 | } |
||
441 | robj->rdev = rdev; |
||
442 | robj->gobj = gobj; |
||
443 | // INIT_LIST_HEAD(&robj->list); |
||
444 | |||
445 | flags = radeon_object_flags_from_domain(domain); |
||
446 | // r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags, |
||
447 | // 0, 0, false, NULL, size, |
||
448 | // &radeon_ttm_object_object_destroy); |
||
449 | if (unlikely(r != 0)) { |
||
450 | /* ttm call radeon_ttm_object_object_destroy if error happen */ |
||
451 | DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n", |
||
452 | size, flags, 0); |
||
453 | return r; |
||
454 | } |
||
455 | *robj_ptr = robj; |
||
456 | // if (gobj) { |
||
457 | // list_add_tail(&robj->list, &rdev->gem.objects); |
||
458 | // } |
||
459 | return 0; |
||
460 | } |
||
461 | |||
462 | int radeon_object_kmap(struct radeon_object *robj, void **ptr) |
||
463 | { |
||
464 | int r; |
||
465 | |||
466 | // spin_lock(&robj->tobj.lock); |
||
467 | if (robj->kptr) { |
||
468 | if (ptr) { |
||
469 | *ptr = robj->kptr; |
||
470 | } |
||
471 | // spin_unlock(&robj->tobj.lock); |
||
472 | return 0; |
||
473 | } |
||
474 | // spin_unlock(&robj->tobj.lock); |
||
475 | r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap); |
||
476 | if (r) { |
||
477 | return r; |
||
478 | } |
||
479 | // spin_lock(&robj->tobj.lock); |
||
480 | robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem); |
||
481 | // spin_unlock(&robj->tobj.lock); |
||
482 | if (ptr) { |
||
483 | *ptr = robj->kptr; |
||
484 | } |
||
485 | return 0; |
||
486 | } |
||
487 | |||
488 | void radeon_object_kunmap(struct radeon_object *robj) |
||
489 | { |
||
490 | // spin_lock(&robj->tobj.lock); |
||
491 | if (robj->kptr == NULL) { |
||
492 | // spin_unlock(&robj->tobj.lock); |
||
493 | return; |
||
494 | } |
||
495 | robj->kptr = NULL; |
||
496 | // spin_unlock(&robj->tobj.lock); |
||
497 | ttm_bo_kunmap(&robj->kmap); |
||
498 | } |
||
499 | |||
500 | void radeon_object_unref(struct radeon_object **robj) |
||
501 | { |
||
502 | struct ttm_buffer_object *tobj; |
||
503 | |||
504 | if ((*robj) == NULL) { |
||
505 | return; |
||
506 | } |
||
507 | tobj = &((*robj)->tobj); |
||
508 | ttm_bo_unref(&tobj); |
||
509 | if (tobj == NULL) { |
||
510 | *robj = NULL; |
||
511 | } |
||
512 | } |
||
513 | |||
514 | int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset) |
||
515 | { |
||
516 | *offset = robj->tobj.addr_space_offset; |
||
517 | return 0; |
||
518 | } |
||
519 | |||
520 | int radeon_object_pin(struct radeon_object *robj, uint32_t domain, |
||
521 | uint64_t *gpu_addr) |
||
522 | { |
||
523 | uint32_t flags; |
||
524 | uint32_t tmp; |
||
525 | int r; |
||
526 | |||
527 | flags = radeon_object_flags_from_domain(domain); |
||
528 | // spin_lock(&robj->tobj.lock); |
||
529 | if (robj->pin_count) { |
||
530 | robj->pin_count++; |
||
531 | if (gpu_addr != NULL) { |
||
532 | *gpu_addr = robj->gpu_addr; |
||
533 | } |
||
534 | // spin_unlock(&robj->tobj.lock); |
||
535 | return 0; |
||
536 | } |
||
537 | // spin_unlock(&robj->tobj.lock); |
||
538 | r = radeon_object_reserve(robj, false); |
||
539 | if (unlikely(r != 0)) { |
||
540 | DRM_ERROR("radeon: failed to reserve object for pinning it.\n"); |
||
541 | return r; |
||
542 | } |
||
543 | tmp = robj->tobj.mem.placement; |
||
544 | ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM); |
||
545 | robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING; |
||
546 | r = ttm_buffer_object_validate(&robj->tobj, |
||
547 | robj->tobj.proposed_placement, |
||
548 | false, false); |
||
549 | radeon_object_gpu_addr(robj); |
||
550 | if (gpu_addr != NULL) { |
||
551 | *gpu_addr = robj->gpu_addr; |
||
552 | } |
||
553 | robj->pin_count = 1; |
||
554 | if (unlikely(r != 0)) { |
||
555 | DRM_ERROR("radeon: failed to pin object.\n"); |
||
556 | } |
||
557 | radeon_object_unreserve(robj); |
||
558 | return r; |
||
559 | } |
||
560 | |||
561 | void radeon_object_unpin(struct radeon_object *robj) |
||
562 | { |
||
563 | uint32_t flags; |
||
564 | int r; |
||
565 | |||
566 | // spin_lock(&robj->tobj.lock); |
||
567 | if (!robj->pin_count) { |
||
568 | // spin_unlock(&robj->tobj.lock); |
||
569 | printk(KERN_WARNING "Unpin not necessary for %p !\n", robj); |
||
570 | return; |
||
571 | } |
||
572 | robj->pin_count--; |
||
573 | if (robj->pin_count) { |
||
574 | // spin_unlock(&robj->tobj.lock); |
||
575 | return; |
||
576 | } |
||
577 | // spin_unlock(&robj->tobj.lock); |
||
578 | r = radeon_object_reserve(robj, false); |
||
579 | if (unlikely(r != 0)) { |
||
580 | DRM_ERROR("radeon: failed to reserve object for unpinning it.\n"); |
||
581 | return; |
||
582 | } |
||
583 | flags = robj->tobj.mem.placement; |
||
584 | robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT; |
||
585 | r = ttm_buffer_object_validate(&robj->tobj, |
||
586 | robj->tobj.proposed_placement, |
||
587 | false, false); |
||
588 | if (unlikely(r != 0)) { |
||
589 | DRM_ERROR("radeon: failed to unpin buffer.\n"); |
||
590 | } |
||
591 | radeon_object_unreserve(robj); |
||
592 | } |
||
593 | |||
594 | int radeon_object_wait(struct radeon_object *robj) |
||
595 | { |
||
596 | int r = 0; |
||
597 | |||
598 | /* FIXME: should use block reservation instead */ |
||
599 | r = radeon_object_reserve(robj, true); |
||
600 | if (unlikely(r != 0)) { |
||
601 | DRM_ERROR("radeon: failed to reserve object for waiting.\n"); |
||
602 | return r; |
||
603 | } |
||
604 | // spin_lock(&robj->tobj.lock); |
||
605 | if (robj->tobj.sync_obj) { |
||
606 | r = ttm_bo_wait(&robj->tobj, true, false, false); |
||
607 | } |
||
608 | // spin_unlock(&robj->tobj.lock); |
||
609 | radeon_object_unreserve(robj); |
||
610 | return r; |
||
611 | } |
||
612 | |||
613 | int radeon_object_evict_vram(struct radeon_device *rdev) |
||
614 | { |
||
615 | if (rdev->flags & RADEON_IS_IGP) { |
||
616 | /* Useless to evict on IGP chips */ |
||
617 | return 0; |
||
618 | } |
||
619 | return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); |
||
620 | } |
||
621 | |||
622 | void radeon_object_force_delete(struct radeon_device *rdev) |
||
623 | { |
||
624 | struct radeon_object *robj, *n; |
||
625 | struct drm_gem_object *gobj; |
||
626 | |||
627 | if (list_empty(&rdev->gem.objects)) { |
||
628 | return; |
||
629 | } |
||
630 | DRM_ERROR("Userspace still has active objects !\n"); |
||
631 | list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) { |
||
632 | mutex_lock(&rdev->ddev->struct_mutex); |
||
633 | gobj = robj->gobj; |
||
634 | DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n", |
||
635 | gobj, robj, (unsigned long)gobj->size, |
||
636 | *((unsigned long *)&gobj->refcount)); |
||
637 | list_del_init(&robj->list); |
||
638 | radeon_object_unref(&robj); |
||
639 | gobj->driver_private = NULL; |
||
640 | drm_gem_object_unreference(gobj); |
||
641 | mutex_unlock(&rdev->ddev->struct_mutex); |
||
642 | } |
||
643 | } |
||
644 | |||
645 | void radeon_object_fini(struct radeon_device *rdev) |
||
646 | { |
||
647 | radeon_ttm_fini(rdev); |
||
648 | } |
||
649 | |||
650 | void radeon_object_list_add_object(struct radeon_object_list *lobj, |
||
651 | struct list_head *head) |
||
652 | { |
||
653 | if (lobj->wdomain) { |
||
654 | list_add(&lobj->list, head); |
||
655 | } else { |
||
656 | list_add_tail(&lobj->list, head); |
||
657 | } |
||
658 | } |
||
659 | |||
660 | int radeon_object_list_reserve(struct list_head *head) |
||
661 | { |
||
662 | struct radeon_object_list *lobj; |
||
663 | struct list_head *i; |
||
664 | int r; |
||
665 | |||
666 | list_for_each(i, head) { |
||
667 | lobj = list_entry(i, struct radeon_object_list, list); |
||
668 | if (!lobj->robj->pin_count) { |
||
669 | r = radeon_object_reserve(lobj->robj, true); |
||
670 | if (unlikely(r != 0)) { |
||
671 | DRM_ERROR("radeon: failed to reserve object.\n"); |
||
672 | return r; |
||
673 | } |
||
674 | } else { |
||
675 | } |
||
676 | } |
||
677 | return 0; |
||
678 | } |
||
679 | |||
680 | void radeon_object_list_unreserve(struct list_head *head) |
||
681 | { |
||
682 | struct radeon_object_list *lobj; |
||
683 | struct list_head *i; |
||
684 | |||
685 | list_for_each(i, head) { |
||
686 | lobj = list_entry(i, struct radeon_object_list, list); |
||
687 | if (!lobj->robj->pin_count) { |
||
688 | radeon_object_unreserve(lobj->robj); |
||
689 | } else { |
||
690 | } |
||
691 | } |
||
692 | } |
||
693 | |||
694 | int radeon_object_list_validate(struct list_head *head, void *fence) |
||
695 | { |
||
696 | struct radeon_object_list *lobj; |
||
697 | struct radeon_object *robj; |
||
698 | struct radeon_fence *old_fence = NULL; |
||
699 | struct list_head *i; |
||
700 | uint32_t flags; |
||
701 | int r; |
||
702 | |||
703 | r = radeon_object_list_reserve(head); |
||
704 | if (unlikely(r != 0)) { |
||
705 | radeon_object_list_unreserve(head); |
||
706 | return r; |
||
707 | } |
||
708 | list_for_each(i, head) { |
||
709 | lobj = list_entry(i, struct radeon_object_list, list); |
||
710 | robj = lobj->robj; |
||
711 | if (lobj->wdomain) { |
||
712 | flags = radeon_object_flags_from_domain(lobj->wdomain); |
||
713 | flags |= TTM_PL_FLAG_TT; |
||
714 | } else { |
||
715 | flags = radeon_object_flags_from_domain(lobj->rdomain); |
||
716 | flags |= TTM_PL_FLAG_TT; |
||
717 | flags |= TTM_PL_FLAG_VRAM; |
||
718 | } |
||
719 | if (!robj->pin_count) { |
||
720 | robj->tobj.proposed_placement = flags | TTM_PL_MASK_CACHING; |
||
721 | r = ttm_buffer_object_validate(&robj->tobj, |
||
722 | robj->tobj.proposed_placement, |
||
723 | true, false); |
||
724 | if (unlikely(r)) { |
||
725 | radeon_object_list_unreserve(head); |
||
726 | DRM_ERROR("radeon: failed to validate.\n"); |
||
727 | return r; |
||
728 | } |
||
729 | radeon_object_gpu_addr(robj); |
||
730 | } |
||
731 | lobj->gpu_offset = robj->gpu_addr; |
||
732 | if (fence) { |
||
733 | old_fence = (struct radeon_fence *)robj->tobj.sync_obj; |
||
734 | robj->tobj.sync_obj = radeon_fence_ref(fence); |
||
735 | robj->tobj.sync_obj_arg = NULL; |
||
736 | } |
||
737 | if (old_fence) { |
||
738 | radeon_fence_unref(&old_fence); |
||
739 | } |
||
740 | } |
||
741 | return 0; |
||
742 | } |
||
743 | |||
744 | void radeon_object_list_unvalidate(struct list_head *head) |
||
745 | { |
||
746 | struct radeon_object_list *lobj; |
||
747 | struct radeon_fence *old_fence = NULL; |
||
748 | struct list_head *i; |
||
749 | |||
750 | list_for_each(i, head) { |
||
751 | lobj = list_entry(i, struct radeon_object_list, list); |
||
752 | old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj; |
||
753 | lobj->robj->tobj.sync_obj = NULL; |
||
754 | if (old_fence) { |
||
755 | radeon_fence_unref(&old_fence); |
||
756 | } |
||
757 | } |
||
758 | radeon_object_list_unreserve(head); |
||
759 | } |
||
760 | |||
761 | void radeon_object_list_clean(struct list_head *head) |
||
762 | { |
||
763 | radeon_object_list_unreserve(head); |
||
764 | } |
||
765 | |||
766 | int radeon_object_fbdev_mmap(struct radeon_object *robj, |
||
767 | struct vm_area_struct *vma) |
||
768 | { |
||
769 | return ttm_fbdev_mmap(vma, &robj->tobj); |
||
770 | } |
||
771 | |||
1128 | serge | 772 | #endif |
773 | |||
1120 | serge | 774 | unsigned long radeon_object_size(struct radeon_object *robj) |
775 | { |
||
776 | return robj->tobj.num_pages << PAGE_SHIFT; |
||
777 | }><>><>><>><>><>><>><>><>><>><> |
||
778 |