Rev 1404 | Rev 4112 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1404 | serge | 1 | /************************************************************************** |
2 | * |
||
3 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA |
||
4 | * All Rights Reserved. |
||
5 | * |
||
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
7 | * copy of this software and associated documentation files (the |
||
8 | * "Software"), to deal in the Software without restriction, including |
||
9 | * without limitation the rights to use, copy, modify, merge, publish, |
||
10 | * distribute, sub license, and/or sell copies of the Software, and to |
||
11 | * permit persons to whom the Software is furnished to do so, subject to |
||
12 | * the following conditions: |
||
13 | * |
||
14 | * The above copyright notice and this permission notice (including the |
||
15 | * next paragraph) shall be included in all copies or substantial portions |
||
16 | * of the Software. |
||
17 | * |
||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
||
25 | * |
||
26 | **************************************************************************/ |
||
27 | /* |
||
28 | * Authors: Thomas Hellstrom |
||
29 | */ |
||
4075 | Serge | 30 | |
31 | #define pr_fmt(fmt) "[TTM] " fmt |
||
32 | |||
33 | #include |
||
34 | #include |
||
35 | #include |
||
36 | #include |
||
37 | #include |
||
38 | #include |
||
39 | #include |
||
40 | #include |
||
41 | |||
42 | #define pr_err(fmt, ...) \ |
||
43 | printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) |
||
44 | |||
45 | int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) |
||
46 | { |
||
47 | |||
48 | mutex_lock(&man->io_reserve_mutex); |
||
49 | return 0; |
||
50 | } |
||
51 | |||
52 | void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) |
||
53 | { |
||
54 | if (likely(man->io_reserve_fastpath)) |
||
55 | return; |
||
56 | |||
57 | mutex_unlock(&man->io_reserve_mutex); |
||
58 | } |
||
59 | |||
60 | |||
61 | #if 0 |
||
62 | static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) |
||
63 | { |
||
64 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
||
65 | |||
66 | pr_err(" has_type: %d\n", man->has_type); |
||
67 | pr_err(" use_type: %d\n", man->use_type); |
||
68 | pr_err(" flags: 0x%08X\n", man->flags); |
||
69 | pr_err(" gpu_offset: 0x%08lX\n", man->gpu_offset); |
||
70 | pr_err(" size: %llu\n", man->size); |
||
71 | pr_err(" available_caching: 0x%08X\n", man->available_caching); |
||
72 | pr_err(" default_caching: 0x%08X\n", man->default_caching); |
||
73 | if (mem_type != TTM_PL_SYSTEM) |
||
74 | (*man->func->debug)(man, TTM_PFX); |
||
75 | } |
||
76 | |||
77 | static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, |
||
78 | struct ttm_placement *placement) |
||
79 | { |
||
80 | int i, ret, mem_type; |
||
81 | |||
82 | pr_err("No space for %p (%lu pages, %luK, %luM)\n", |
||
83 | bo, bo->mem.num_pages, bo->mem.size >> 10, |
||
84 | bo->mem.size >> 20); |
||
85 | for (i = 0; i < placement->num_placement; i++) { |
||
86 | ret = ttm_mem_type_from_flags(placement->placement[i], |
||
87 | &mem_type); |
||
88 | if (ret) |
||
89 | return; |
||
90 | pr_err(" placement[%d]=0x%08X (%d)\n", |
||
91 | i, placement->placement[i], mem_type); |
||
92 | ttm_mem_type_debug(bo->bdev, mem_type); |
||
93 | } |
||
94 | } |
||
95 | |||
96 | static ssize_t ttm_bo_global_show(struct kobject *kobj, |
||
97 | struct attribute *attr, |
||
98 | char *buffer) |
||
99 | { |
||
100 | struct ttm_bo_global *glob = |
||
101 | container_of(kobj, struct ttm_bo_global, kobj); |
||
102 | |||
103 | return snprintf(buffer, PAGE_SIZE, "%lu\n", |
||
104 | (unsigned long) atomic_read(&glob->bo_count)); |
||
105 | } |
||
106 | |||
107 | static struct attribute *ttm_bo_global_attrs[] = { |
||
108 | &ttm_bo_count, |
||
109 | NULL |
||
110 | }; |
||
111 | |||
112 | static const struct sysfs_ops ttm_bo_global_ops = { |
||
113 | .show = &ttm_bo_global_show |
||
114 | }; |
||
115 | |||
116 | static struct kobj_type ttm_bo_glob_kobj_type = { |
||
117 | .release = &ttm_bo_global_kobj_release, |
||
118 | .sysfs_ops = &ttm_bo_global_ops, |
||
119 | .default_attrs = ttm_bo_global_attrs |
||
120 | }; |
||
121 | #endif |
||
122 | |||
123 | |||
124 | static inline uint32_t ttm_bo_type_flags(unsigned type) |
||
125 | { |
||
126 | return 1 << (type); |
||
127 | } |
||
128 | |||
129 | static void ttm_bo_release_list(struct kref *list_kref) |
||
130 | { |
||
131 | struct ttm_buffer_object *bo = |
||
132 | container_of(list_kref, struct ttm_buffer_object, list_kref); |
||
133 | struct ttm_bo_device *bdev = bo->bdev; |
||
134 | size_t acc_size = bo->acc_size; |
||
135 | |||
136 | BUG_ON(atomic_read(&bo->list_kref.refcount)); |
||
137 | BUG_ON(atomic_read(&bo->kref.refcount)); |
||
138 | BUG_ON(atomic_read(&bo->cpu_writers)); |
||
139 | BUG_ON(bo->sync_obj != NULL); |
||
140 | BUG_ON(bo->mem.mm_node != NULL); |
||
141 | BUG_ON(!list_empty(&bo->lru)); |
||
142 | BUG_ON(!list_empty(&bo->ddestroy)); |
||
143 | |||
144 | if (bo->ttm) |
||
145 | ttm_tt_destroy(bo->ttm); |
||
146 | atomic_dec(&bo->glob->bo_count); |
||
147 | if (bo->destroy) |
||
148 | bo->destroy(bo); |
||
149 | else { |
||
150 | kfree(bo); |
||
151 | } |
||
152 | ttm_mem_global_free(bdev->glob->mem_glob, acc_size); |
||
153 | } |
||
154 | |||
155 | void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) |
||
156 | { |
||
157 | struct ttm_bo_device *bdev = bo->bdev; |
||
158 | struct ttm_mem_type_manager *man; |
||
159 | |||
160 | // BUG_ON(!ttm_bo_is_reserved(bo)); |
||
161 | |||
162 | if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { |
||
163 | |||
164 | BUG_ON(!list_empty(&bo->lru)); |
||
165 | |||
166 | man = &bdev->man[bo->mem.mem_type]; |
||
167 | list_add_tail(&bo->lru, &man->lru); |
||
168 | kref_get(&bo->list_kref); |
||
169 | |||
170 | if (bo->ttm != NULL) { |
||
171 | list_add_tail(&bo->swap, &bo->glob->swap_lru); |
||
172 | kref_get(&bo->list_kref); |
||
173 | } |
||
174 | } |
||
175 | } |
||
176 | EXPORT_SYMBOL(ttm_bo_add_to_lru); |
||
177 | |||
178 | int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) |
||
179 | { |
||
180 | int put_count = 0; |
||
181 | |||
182 | if (!list_empty(&bo->swap)) { |
||
183 | list_del_init(&bo->swap); |
||
184 | ++put_count; |
||
185 | } |
||
186 | if (!list_empty(&bo->lru)) { |
||
187 | list_del_init(&bo->lru); |
||
188 | ++put_count; |
||
189 | } |
||
190 | |||
191 | /* |
||
192 | * TODO: Add a driver hook to delete from |
||
193 | * driver-specific LRU's here. |
||
194 | */ |
||
195 | |||
196 | return put_count; |
||
197 | } |
||
198 | |||
199 | static void ttm_bo_ref_bug(struct kref *list_kref) |
||
200 | { |
||
201 | BUG(); |
||
202 | } |
||
203 | |||
204 | void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, |
||
205 | bool never_free) |
||
206 | { |
||
207 | // kref_sub(&bo->list_kref, count, |
||
208 | // (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list); |
||
209 | } |
||
210 | |||
211 | void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo) |
||
212 | { |
||
213 | int put_count; |
||
214 | |||
215 | spin_lock(&bo->glob->lru_lock); |
||
216 | put_count = ttm_bo_del_from_lru(bo); |
||
217 | spin_unlock(&bo->glob->lru_lock); |
||
218 | ttm_bo_list_ref_sub(bo, put_count, true); |
||
219 | } |
||
220 | EXPORT_SYMBOL(ttm_bo_del_sub_from_lru); |
||
221 | |||
222 | |||
223 | /* |
||
224 | * Call bo->mutex locked. |
||
225 | */ |
||
226 | static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) |
||
227 | { |
||
228 | struct ttm_bo_device *bdev = bo->bdev; |
||
229 | struct ttm_bo_global *glob = bo->glob; |
||
230 | int ret = 0; |
||
231 | uint32_t page_flags = 0; |
||
232 | |||
233 | // TTM_ASSERT_LOCKED(&bo->mutex); |
||
234 | bo->ttm = NULL; |
||
235 | |||
236 | if (bdev->need_dma32) |
||
237 | page_flags |= TTM_PAGE_FLAG_DMA32; |
||
238 | |||
239 | switch (bo->type) { |
||
240 | case ttm_bo_type_device: |
||
241 | if (zero_alloc) |
||
242 | page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; |
||
243 | case ttm_bo_type_kernel: |
||
244 | bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, |
||
245 | page_flags, glob->dummy_read_page); |
||
246 | if (unlikely(bo->ttm == NULL)) |
||
247 | ret = -ENOMEM; |
||
248 | break; |
||
249 | case ttm_bo_type_sg: |
||
250 | bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, |
||
251 | page_flags | TTM_PAGE_FLAG_SG, |
||
252 | glob->dummy_read_page); |
||
253 | if (unlikely(bo->ttm == NULL)) { |
||
254 | ret = -ENOMEM; |
||
255 | break; |
||
256 | } |
||
257 | bo->ttm->sg = bo->sg; |
||
258 | break; |
||
259 | default: |
||
260 | pr_err("Illegal buffer object type\n"); |
||
261 | ret = -EINVAL; |
||
262 | break; |
||
263 | } |
||
264 | |||
265 | return ret; |
||
266 | } |
||
267 | |||
268 | #if 0 |
||
269 | static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, |
||
270 | struct ttm_mem_reg *mem, |
||
271 | bool evict, bool interruptible, |
||
272 | bool no_wait_gpu) |
||
273 | { |
||
274 | struct ttm_bo_device *bdev = bo->bdev; |
||
275 | bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); |
||
276 | bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem); |
||
277 | struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; |
||
278 | struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; |
||
279 | int ret = 0; |
||
280 | |||
281 | if (old_is_pci || new_is_pci || |
||
282 | ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { |
||
283 | ret = ttm_mem_io_lock(old_man, true); |
||
284 | if (unlikely(ret != 0)) |
||
285 | goto out_err; |
||
286 | ttm_bo_unmap_virtual_locked(bo); |
||
287 | ttm_mem_io_unlock(old_man); |
||
288 | } |
||
289 | |||
290 | /* |
||
291 | * Create and bind a ttm if required. |
||
292 | */ |
||
293 | |||
294 | if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { |
||
295 | if (bo->ttm == NULL) { |
||
296 | bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); |
||
297 | ret = ttm_bo_add_ttm(bo, zero); |
||
298 | if (ret) |
||
299 | goto out_err; |
||
300 | } |
||
301 | |||
302 | ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); |
||
303 | if (ret) |
||
304 | goto out_err; |
||
305 | |||
306 | if (mem->mem_type != TTM_PL_SYSTEM) { |
||
307 | ret = ttm_tt_bind(bo->ttm, mem); |
||
308 | if (ret) |
||
309 | goto out_err; |
||
310 | } |
||
311 | |||
312 | if (bo->mem.mem_type == TTM_PL_SYSTEM) { |
||
313 | if (bdev->driver->move_notify) |
||
314 | bdev->driver->move_notify(bo, mem); |
||
315 | bo->mem = *mem; |
||
316 | mem->mm_node = NULL; |
||
317 | goto moved; |
||
318 | } |
||
319 | } |
||
320 | |||
321 | if (bdev->driver->move_notify) |
||
322 | bdev->driver->move_notify(bo, mem); |
||
323 | |||
324 | if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && |
||
325 | !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) |
||
326 | ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem); |
||
327 | else if (bdev->driver->move) |
||
328 | ret = bdev->driver->move(bo, evict, interruptible, |
||
329 | no_wait_gpu, mem); |
||
330 | else |
||
331 | ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem); |
||
332 | |||
333 | if (ret) { |
||
334 | if (bdev->driver->move_notify) { |
||
335 | struct ttm_mem_reg tmp_mem = *mem; |
||
336 | *mem = bo->mem; |
||
337 | bo->mem = tmp_mem; |
||
338 | bdev->driver->move_notify(bo, mem); |
||
339 | bo->mem = *mem; |
||
340 | *mem = tmp_mem; |
||
341 | } |
||
342 | |||
343 | goto out_err; |
||
344 | } |
||
345 | |||
346 | moved: |
||
347 | if (bo->evicted) { |
||
348 | ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); |
||
349 | if (ret) |
||
350 | pr_err("Can not flush read caches\n"); |
||
351 | bo->evicted = false; |
||
352 | } |
||
353 | |||
354 | if (bo->mem.mm_node) { |
||
355 | bo->offset = (bo->mem.start << PAGE_SHIFT) + |
||
356 | bdev->man[bo->mem.mem_type].gpu_offset; |
||
357 | bo->cur_placement = bo->mem.placement; |
||
358 | } else |
||
359 | bo->offset = 0; |
||
360 | |||
361 | return 0; |
||
362 | |||
363 | out_err: |
||
364 | new_man = &bdev->man[bo->mem.mem_type]; |
||
365 | if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) { |
||
366 | ttm_tt_unbind(bo->ttm); |
||
367 | ttm_tt_destroy(bo->ttm); |
||
368 | bo->ttm = NULL; |
||
369 | } |
||
370 | |||
371 | return ret; |
||
372 | } |
||
373 | |||
374 | /** |
||
375 | * Call bo::reserved. |
||
376 | * Will release GPU memory type usage on destruction. |
||
377 | * This is the place to put in driver specific hooks to release |
||
378 | * driver private resources. |
||
379 | * Will release the bo::reserved lock. |
||
380 | */ |
||
381 | |||
382 | static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) |
||
383 | { |
||
384 | if (bo->bdev->driver->move_notify) |
||
385 | bo->bdev->driver->move_notify(bo, NULL); |
||
386 | |||
387 | if (bo->ttm) { |
||
388 | ttm_tt_unbind(bo->ttm); |
||
389 | ttm_tt_destroy(bo->ttm); |
||
390 | bo->ttm = NULL; |
||
391 | } |
||
392 | ttm_bo_mem_put(bo, &bo->mem); |
||
393 | |||
394 | ww_mutex_unlock (&bo->resv->lock); |
||
395 | } |
||
396 | |||
397 | static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) |
||
398 | { |
||
399 | struct ttm_bo_device *bdev = bo->bdev; |
||
400 | struct ttm_bo_global *glob = bo->glob; |
||
401 | struct ttm_bo_driver *driver = bdev->driver; |
||
402 | void *sync_obj = NULL; |
||
403 | int put_count; |
||
404 | int ret; |
||
405 | |||
406 | spin_lock(&glob->lru_lock); |
||
407 | ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); |
||
408 | |||
409 | spin_lock(&bdev->fence_lock); |
||
410 | (void) ttm_bo_wait(bo, false, false, true); |
||
411 | if (!ret && !bo->sync_obj) { |
||
412 | spin_unlock(&bdev->fence_lock); |
||
413 | put_count = ttm_bo_del_from_lru(bo); |
||
414 | |||
415 | spin_unlock(&glob->lru_lock); |
||
416 | ttm_bo_cleanup_memtype_use(bo); |
||
417 | |||
418 | ttm_bo_list_ref_sub(bo, put_count, true); |
||
419 | |||
420 | return; |
||
421 | } |
||
422 | if (bo->sync_obj) |
||
423 | sync_obj = driver->sync_obj_ref(bo->sync_obj); |
||
424 | spin_unlock(&bdev->fence_lock); |
||
425 | |||
426 | if (!ret) |
||
427 | ww_mutex_unlock(&bo->resv->lock); |
||
428 | |||
429 | kref_get(&bo->list_kref); |
||
430 | list_add_tail(&bo->ddestroy, &bdev->ddestroy); |
||
431 | spin_unlock(&glob->lru_lock); |
||
432 | |||
433 | if (sync_obj) { |
||
434 | driver->sync_obj_flush(sync_obj); |
||
435 | driver->sync_obj_unref(&sync_obj); |
||
436 | } |
||
437 | schedule_delayed_work(&bdev->wq, |
||
438 | ((HZ / 100) < 1) ? 1 : HZ / 100); |
||
439 | } |
||
440 | |||
441 | /** |
||
442 | * function ttm_bo_cleanup_refs_and_unlock |
||
443 | * If bo idle, remove from delayed- and lru lists, and unref. |
||
444 | * If not idle, do nothing. |
||
1404 | serge | 445 | * |
4075 | Serge | 446 | * Must be called with lru_lock and reservation held, this function |
447 | * will drop both before returning. |
||
448 | * |
||
449 | * @interruptible Any sleeps should occur interruptibly. |
||
450 | * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. |
||
1404 | serge | 451 | */ |
452 | |||
4075 | Serge | 453 | static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, |
454 | bool interruptible, |
||
455 | bool no_wait_gpu) |
||
456 | { |
||
457 | struct ttm_bo_device *bdev = bo->bdev; |
||
458 | struct ttm_bo_driver *driver = bdev->driver; |
||
459 | struct ttm_bo_global *glob = bo->glob; |
||
460 | int put_count; |
||
461 | int ret; |
||
1404 | serge | 462 | |
4075 | Serge | 463 | spin_lock(&bdev->fence_lock); |
464 | ret = ttm_bo_wait(bo, false, false, true); |
||
1404 | serge | 465 | |
4075 | Serge | 466 | if (ret && !no_wait_gpu) { |
467 | void *sync_obj; |
||
1404 | serge | 468 | |
4075 | Serge | 469 | /* |
470 | * Take a reference to the fence and unreserve, |
||
471 | * at this point the buffer should be dead, so |
||
472 | * no new sync objects can be attached. |
||
473 | */ |
||
474 | sync_obj = driver->sync_obj_ref(bo->sync_obj); |
||
475 | spin_unlock(&bdev->fence_lock); |
||
476 | |||
477 | ww_mutex_unlock(&bo->resv->lock); |
||
478 | spin_unlock(&glob->lru_lock); |
||
479 | |||
480 | ret = driver->sync_obj_wait(sync_obj, false, interruptible); |
||
481 | driver->sync_obj_unref(&sync_obj); |
||
482 | if (ret) |
||
483 | return ret; |
||
484 | |||
485 | /* |
||
486 | * remove sync_obj with ttm_bo_wait, the wait should be |
||
487 | * finished, and no new wait object should have been added. |
||
488 | */ |
||
489 | spin_lock(&bdev->fence_lock); |
||
490 | ret = ttm_bo_wait(bo, false, false, true); |
||
491 | WARN_ON(ret); |
||
492 | spin_unlock(&bdev->fence_lock); |
||
493 | if (ret) |
||
494 | return ret; |
||
495 | |||
496 | spin_lock(&glob->lru_lock); |
||
497 | ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); |
||
498 | |||
499 | /* |
||
500 | * We raced, and lost, someone else holds the reservation now, |
||
501 | * and is probably busy in ttm_bo_cleanup_memtype_use. |
||
502 | * |
||
503 | * Even if it's not the case, because we finished waiting any |
||
504 | * delayed destruction would succeed, so just return success |
||
505 | * here. |
||
506 | */ |
||
507 | if (ret) { |
||
508 | spin_unlock(&glob->lru_lock); |
||
509 | return 0; |
||
510 | } |
||
511 | } else |
||
512 | spin_unlock(&bdev->fence_lock); |
||
513 | |||
514 | if (ret || unlikely(list_empty(&bo->ddestroy))) { |
||
515 | ww_mutex_unlock(&bo->resv->lock); |
||
516 | spin_unlock(&glob->lru_lock); |
||
517 | return ret; |
||
518 | } |
||
519 | |||
520 | put_count = ttm_bo_del_from_lru(bo); |
||
521 | list_del_init(&bo->ddestroy); |
||
522 | ++put_count; |
||
523 | |||
524 | spin_unlock(&glob->lru_lock); |
||
525 | ttm_bo_cleanup_memtype_use(bo); |
||
526 | |||
527 | ttm_bo_list_ref_sub(bo, put_count, true); |
||
528 | |||
529 | return 0; |
||
530 | } |
||
531 | |||
532 | /** |
||
533 | * Traverse the delayed list, and call ttm_bo_cleanup_refs on all |
||
534 | * encountered buffers. |
||
535 | */ |
||
536 | |||
537 | static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) |
||
538 | { |
||
539 | struct ttm_bo_global *glob = bdev->glob; |
||
540 | struct ttm_buffer_object *entry = NULL; |
||
541 | int ret = 0; |
||
542 | |||
543 | spin_lock(&glob->lru_lock); |
||
544 | if (list_empty(&bdev->ddestroy)) |
||
545 | goto out_unlock; |
||
546 | |||
547 | entry = list_first_entry(&bdev->ddestroy, |
||
548 | struct ttm_buffer_object, ddestroy); |
||
549 | kref_get(&entry->list_kref); |
||
550 | |||
551 | for (;;) { |
||
552 | struct ttm_buffer_object *nentry = NULL; |
||
553 | |||
554 | if (entry->ddestroy.next != &bdev->ddestroy) { |
||
555 | nentry = list_first_entry(&entry->ddestroy, |
||
556 | struct ttm_buffer_object, ddestroy); |
||
557 | kref_get(&nentry->list_kref); |
||
558 | } |
||
559 | |||
560 | ret = ttm_bo_reserve_nolru(entry, false, true, false, 0); |
||
561 | if (remove_all && ret) { |
||
562 | spin_unlock(&glob->lru_lock); |
||
563 | ret = ttm_bo_reserve_nolru(entry, false, false, |
||
564 | false, 0); |
||
565 | spin_lock(&glob->lru_lock); |
||
566 | } |
||
567 | |||
568 | if (!ret) |
||
569 | ret = ttm_bo_cleanup_refs_and_unlock(entry, false, |
||
570 | !remove_all); |
||
571 | else |
||
572 | spin_unlock(&glob->lru_lock); |
||
573 | |||
574 | kref_put(&entry->list_kref, ttm_bo_release_list); |
||
575 | entry = nentry; |
||
576 | |||
577 | if (ret || !entry) |
||
578 | goto out; |
||
579 | |||
580 | spin_lock(&glob->lru_lock); |
||
581 | if (list_empty(&entry->ddestroy)) |
||
582 | break; |
||
583 | } |
||
584 | |||
585 | out_unlock: |
||
586 | spin_unlock(&glob->lru_lock); |
||
587 | out: |
||
588 | if (entry) |
||
589 | kref_put(&entry->list_kref, ttm_bo_release_list); |
||
590 | return ret; |
||
591 | } |
||
592 | |||
593 | static void ttm_bo_delayed_workqueue(struct work_struct *work) |
||
594 | { |
||
595 | struct ttm_bo_device *bdev = |
||
596 | container_of(work, struct ttm_bo_device, wq.work); |
||
597 | |||
598 | if (ttm_bo_delayed_delete(bdev, false)) { |
||
599 | schedule_delayed_work(&bdev->wq, |
||
600 | ((HZ / 100) < 1) ? 1 : HZ / 100); |
||
601 | } |
||
602 | } |
||
603 | #endif |
||
604 | |||
605 | static void ttm_bo_release(struct kref *kref) |
||
606 | { |
||
607 | struct ttm_buffer_object *bo = |
||
608 | container_of(kref, struct ttm_buffer_object, kref); |
||
609 | struct ttm_bo_device *bdev = bo->bdev; |
||
610 | struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; |
||
611 | |||
612 | write_lock(&bdev->vm_lock); |
||
613 | if (likely(bo->vm_node != NULL)) { |
||
614 | // rb_erase(&bo->vm_rb, &bdev->addr_space_rb); |
||
615 | drm_mm_put_block(bo->vm_node); |
||
616 | bo->vm_node = NULL; |
||
617 | } |
||
618 | write_unlock(&bdev->vm_lock); |
||
619 | ttm_mem_io_lock(man, false); |
||
620 | // ttm_mem_io_free_vm(bo); |
||
621 | ttm_mem_io_unlock(man); |
||
622 | // ttm_bo_cleanup_refs_or_queue(bo); |
||
623 | // kref_put(&bo->list_kref, ttm_bo_release_list); |
||
624 | } |
||
625 | |||
626 | void ttm_bo_unref(struct ttm_buffer_object **p_bo) |
||
627 | { |
||
628 | struct ttm_buffer_object *bo = *p_bo; |
||
629 | |||
630 | *p_bo = NULL; |
||
631 | kref_put(&bo->kref, ttm_bo_release); |
||
632 | } |
||
633 | EXPORT_SYMBOL(ttm_bo_unref); |
||
634 | |||
635 | #if 0 |
||
636 | int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev) |
||
637 | { |
||
638 | return cancel_delayed_work_sync(&bdev->wq); |
||
639 | } |
||
640 | EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue); |
||
641 | |||
642 | void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched) |
||
643 | { |
||
644 | if (resched) |
||
645 | schedule_delayed_work(&bdev->wq, |
||
646 | ((HZ / 100) < 1) ? 1 : HZ / 100); |
||
647 | } |
||
648 | EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue); |
||
649 | |||
650 | static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, |
||
651 | bool no_wait_gpu) |
||
652 | { |
||
653 | struct ttm_bo_device *bdev = bo->bdev; |
||
654 | struct ttm_mem_reg evict_mem; |
||
655 | struct ttm_placement placement; |
||
656 | int ret = 0; |
||
657 | |||
658 | spin_lock(&bdev->fence_lock); |
||
659 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); |
||
660 | spin_unlock(&bdev->fence_lock); |
||
661 | |||
662 | if (unlikely(ret != 0)) { |
||
663 | if (ret != -ERESTARTSYS) { |
||
664 | pr_err("Failed to expire sync object before buffer eviction\n"); |
||
665 | } |
||
666 | goto out; |
||
667 | } |
||
668 | |||
669 | // BUG_ON(!ttm_bo_is_reserved(bo)); |
||
670 | |||
671 | evict_mem = bo->mem; |
||
672 | evict_mem.mm_node = NULL; |
||
673 | evict_mem.bus.io_reserved_vm = false; |
||
674 | evict_mem.bus.io_reserved_count = 0; |
||
675 | |||
676 | placement.fpfn = 0; |
||
677 | placement.lpfn = 0; |
||
678 | placement.num_placement = 0; |
||
679 | placement.num_busy_placement = 0; |
||
680 | bdev->driver->evict_flags(bo, &placement); |
||
681 | ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, |
||
682 | no_wait_gpu); |
||
683 | if (ret) { |
||
684 | if (ret != -ERESTARTSYS) { |
||
685 | pr_err("Failed to find memory space for buffer 0x%p eviction\n", |
||
686 | bo); |
||
687 | ttm_bo_mem_space_debug(bo, &placement); |
||
688 | } |
||
689 | goto out; |
||
690 | } |
||
691 | |||
692 | ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, |
||
693 | no_wait_gpu); |
||
694 | if (ret) { |
||
695 | if (ret != -ERESTARTSYS) |
||
696 | pr_err("Buffer eviction failed\n"); |
||
697 | ttm_bo_mem_put(bo, &evict_mem); |
||
698 | goto out; |
||
699 | } |
||
700 | bo->evicted = true; |
||
701 | out: |
||
702 | return ret; |
||
703 | } |
||
704 | |||
705 | static int ttm_mem_evict_first(struct ttm_bo_device *bdev, |
||
706 | uint32_t mem_type, |
||
707 | bool interruptible, |
||
708 | bool no_wait_gpu) |
||
709 | { |
||
710 | struct ttm_bo_global *glob = bdev->glob; |
||
711 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
||
712 | struct ttm_buffer_object *bo; |
||
713 | int ret = -EBUSY, put_count; |
||
714 | |||
715 | spin_lock(&glob->lru_lock); |
||
716 | list_for_each_entry(bo, &man->lru, lru) { |
||
717 | ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); |
||
718 | if (!ret) |
||
719 | break; |
||
720 | } |
||
721 | |||
722 | if (ret) { |
||
723 | spin_unlock(&glob->lru_lock); |
||
724 | return ret; |
||
725 | } |
||
726 | |||
727 | kref_get(&bo->list_kref); |
||
728 | |||
729 | if (!list_empty(&bo->ddestroy)) { |
||
730 | ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible, |
||
731 | no_wait_gpu); |
||
732 | kref_put(&bo->list_kref, ttm_bo_release_list); |
||
733 | return ret; |
||
734 | } |
||
735 | |||
736 | put_count = ttm_bo_del_from_lru(bo); |
||
737 | spin_unlock(&glob->lru_lock); |
||
738 | |||
739 | BUG_ON(ret != 0); |
||
740 | |||
741 | ttm_bo_list_ref_sub(bo, put_count, true); |
||
742 | |||
743 | ret = ttm_bo_evict(bo, interruptible, no_wait_gpu); |
||
744 | ttm_bo_unreserve(bo); |
||
745 | |||
746 | kref_put(&bo->list_kref, ttm_bo_release_list); |
||
747 | return ret; |
||
748 | } |
||
749 | |||
750 | void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) |
||
751 | { |
||
752 | struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; |
||
753 | |||
754 | if (mem->mm_node) |
||
755 | (*man->func->put_node)(man, mem); |
||
756 | } |
||
757 | EXPORT_SYMBOL(ttm_bo_mem_put); |
||
758 | |||
759 | /** |
||
760 | * Repeatedly evict memory from the LRU for @mem_type until we create enough |
||
761 | * space, or we've evicted everything and there isn't enough space. |
||
762 | */ |
||
763 | static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, |
||
764 | uint32_t mem_type, |
||
765 | struct ttm_placement *placement, |
||
766 | struct ttm_mem_reg *mem, |
||
767 | bool interruptible, |
||
768 | bool no_wait_gpu) |
||
769 | { |
||
770 | struct ttm_bo_device *bdev = bo->bdev; |
||
771 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
||
772 | int ret; |
||
773 | |||
774 | do { |
||
775 | ret = (*man->func->get_node)(man, bo, placement, mem); |
||
776 | if (unlikely(ret != 0)) |
||
777 | return ret; |
||
778 | if (mem->mm_node) |
||
779 | break; |
||
780 | ret = ttm_mem_evict_first(bdev, mem_type, |
||
781 | interruptible, no_wait_gpu); |
||
782 | if (unlikely(ret != 0)) |
||
783 | return ret; |
||
784 | } while (1); |
||
785 | if (mem->mm_node == NULL) |
||
786 | return -ENOMEM; |
||
787 | mem->mem_type = mem_type; |
||
788 | return 0; |
||
789 | } |
||
790 | |||
791 | static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, |
||
792 | uint32_t cur_placement, |
||
793 | uint32_t proposed_placement) |
||
794 | { |
||
795 | uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING; |
||
796 | uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING; |
||
797 | |||
798 | /** |
||
799 | * Keep current caching if possible. |
||
800 | */ |
||
801 | |||
802 | if ((cur_placement & caching) != 0) |
||
803 | result |= (cur_placement & caching); |
||
804 | else if ((man->default_caching & caching) != 0) |
||
805 | result |= man->default_caching; |
||
806 | else if ((TTM_PL_FLAG_CACHED & caching) != 0) |
||
807 | result |= TTM_PL_FLAG_CACHED; |
||
808 | else if ((TTM_PL_FLAG_WC & caching) != 0) |
||
809 | result |= TTM_PL_FLAG_WC; |
||
810 | else if ((TTM_PL_FLAG_UNCACHED & caching) != 0) |
||
811 | result |= TTM_PL_FLAG_UNCACHED; |
||
812 | |||
813 | return result; |
||
814 | } |
||
815 | |||
816 | static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, |
||
817 | uint32_t mem_type, |
||
818 | uint32_t proposed_placement, |
||
819 | uint32_t *masked_placement) |
||
820 | { |
||
821 | uint32_t cur_flags = ttm_bo_type_flags(mem_type); |
||
822 | |||
823 | if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0) |
||
824 | return false; |
||
825 | |||
826 | if ((proposed_placement & man->available_caching) == 0) |
||
827 | return false; |
||
828 | |||
829 | cur_flags |= (proposed_placement & man->available_caching); |
||
830 | |||
831 | *masked_placement = cur_flags; |
||
832 | return true; |
||
833 | } |
||
834 | |||
835 | /** |
||
836 | * Creates space for memory region @mem according to its type. |
||
837 | * |
||
838 | * This function first searches for free space in compatible memory types in |
||
839 | * the priority order defined by the driver. If free space isn't found, then |
||
840 | * ttm_bo_mem_force_space is attempted in priority order to evict and find |
||
841 | * space. |
||
842 | */ |
||
843 | int ttm_bo_mem_space(struct ttm_buffer_object *bo, |
||
844 | struct ttm_placement *placement, |
||
845 | struct ttm_mem_reg *mem, |
||
846 | bool interruptible, |
||
847 | bool no_wait_gpu) |
||
848 | { |
||
849 | struct ttm_bo_device *bdev = bo->bdev; |
||
850 | struct ttm_mem_type_manager *man; |
||
851 | uint32_t mem_type = TTM_PL_SYSTEM; |
||
852 | uint32_t cur_flags = 0; |
||
853 | bool type_found = false; |
||
854 | bool type_ok = false; |
||
855 | bool has_erestartsys = false; |
||
856 | int i, ret; |
||
857 | |||
858 | mem->mm_node = NULL; |
||
859 | for (i = 0; i < placement->num_placement; ++i) { |
||
860 | ret = ttm_mem_type_from_flags(placement->placement[i], |
||
861 | &mem_type); |
||
862 | if (ret) |
||
863 | return ret; |
||
864 | man = &bdev->man[mem_type]; |
||
865 | |||
866 | type_ok = ttm_bo_mt_compatible(man, |
||
867 | mem_type, |
||
868 | placement->placement[i], |
||
869 | &cur_flags); |
||
870 | |||
871 | if (!type_ok) |
||
872 | continue; |
||
873 | |||
874 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, |
||
875 | cur_flags); |
||
876 | /* |
||
877 | * Use the access and other non-mapping-related flag bits from |
||
878 | * the memory placement flags to the current flags |
||
879 | */ |
||
880 | ttm_flag_masked(&cur_flags, placement->placement[i], |
||
881 | ~TTM_PL_MASK_MEMTYPE); |
||
882 | |||
883 | if (mem_type == TTM_PL_SYSTEM) |
||
884 | break; |
||
885 | |||
886 | if (man->has_type && man->use_type) { |
||
887 | type_found = true; |
||
888 | ret = (*man->func->get_node)(man, bo, placement, mem); |
||
889 | if (unlikely(ret)) |
||
890 | return ret; |
||
891 | } |
||
892 | if (mem->mm_node) |
||
893 | break; |
||
894 | } |
||
895 | |||
896 | if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) { |
||
897 | mem->mem_type = mem_type; |
||
898 | mem->placement = cur_flags; |
||
899 | return 0; |
||
900 | } |
||
901 | |||
902 | if (!type_found) |
||
903 | return -EINVAL; |
||
904 | |||
905 | for (i = 0; i < placement->num_busy_placement; ++i) { |
||
906 | ret = ttm_mem_type_from_flags(placement->busy_placement[i], |
||
907 | &mem_type); |
||
908 | if (ret) |
||
909 | return ret; |
||
910 | man = &bdev->man[mem_type]; |
||
911 | if (!man->has_type) |
||
912 | continue; |
||
913 | if (!ttm_bo_mt_compatible(man, |
||
914 | mem_type, |
||
915 | placement->busy_placement[i], |
||
916 | &cur_flags)) |
||
917 | continue; |
||
918 | |||
919 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, |
||
920 | cur_flags); |
||
921 | /* |
||
922 | * Use the access and other non-mapping-related flag bits from |
||
923 | * the memory placement flags to the current flags |
||
924 | */ |
||
925 | ttm_flag_masked(&cur_flags, placement->busy_placement[i], |
||
926 | ~TTM_PL_MASK_MEMTYPE); |
||
927 | |||
928 | |||
929 | if (mem_type == TTM_PL_SYSTEM) { |
||
930 | mem->mem_type = mem_type; |
||
931 | mem->placement = cur_flags; |
||
932 | mem->mm_node = NULL; |
||
933 | return 0; |
||
934 | } |
||
935 | |||
936 | ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, |
||
937 | interruptible, no_wait_gpu); |
||
938 | if (ret == 0 && mem->mm_node) { |
||
939 | mem->placement = cur_flags; |
||
940 | return 0; |
||
941 | } |
||
942 | if (ret == -ERESTARTSYS) |
||
943 | has_erestartsys = true; |
||
944 | } |
||
945 | ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; |
||
946 | return ret; |
||
947 | } |
||
948 | EXPORT_SYMBOL(ttm_bo_mem_space); |
||
949 | |||
950 | int ttm_bo_move_buffer(struct ttm_buffer_object *bo, |
||
951 | struct ttm_placement *placement, |
||
952 | bool interruptible, |
||
953 | bool no_wait_gpu) |
||
954 | { |
||
955 | int ret = 0; |
||
956 | struct ttm_mem_reg mem; |
||
957 | struct ttm_bo_device *bdev = bo->bdev; |
||
958 | |||
959 | // BUG_ON(!ttm_bo_is_reserved(bo)); |
||
960 | |||
961 | /* |
||
962 | * FIXME: It's possible to pipeline buffer moves. |
||
963 | * Have the driver move function wait for idle when necessary, |
||
964 | * instead of doing it here. |
||
965 | */ |
||
966 | spin_lock(&bdev->fence_lock); |
||
967 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); |
||
968 | spin_unlock(&bdev->fence_lock); |
||
969 | if (ret) |
||
970 | return ret; |
||
971 | mem.num_pages = bo->num_pages; |
||
972 | mem.size = mem.num_pages << PAGE_SHIFT; |
||
973 | mem.page_alignment = bo->mem.page_alignment; |
||
974 | mem.bus.io_reserved_vm = false; |
||
975 | mem.bus.io_reserved_count = 0; |
||
976 | /* |
||
977 | * Determine where to move the buffer. |
||
978 | */ |
||
979 | ret = ttm_bo_mem_space(bo, placement, &mem, |
||
980 | interruptible, no_wait_gpu); |
||
981 | if (ret) |
||
982 | goto out_unlock; |
||
983 | ret = ttm_bo_handle_move_mem(bo, &mem, false, |
||
984 | interruptible, no_wait_gpu); |
||
985 | out_unlock: |
||
986 | if (ret && mem.mm_node) |
||
987 | ttm_bo_mem_put(bo, &mem); |
||
988 | return ret; |
||
989 | } |
||
990 | #endif |
||
991 | |||
992 | static int ttm_bo_mem_compat(struct ttm_placement *placement, |
||
993 | struct ttm_mem_reg *mem) |
||
994 | { |
||
995 | int i; |
||
996 | |||
997 | if (mem->mm_node && placement->lpfn != 0 && |
||
998 | (mem->start < placement->fpfn || |
||
999 | mem->start + mem->num_pages > placement->lpfn)) |
||
1000 | return -1; |
||
1001 | |||
1002 | for (i = 0; i < placement->num_placement; i++) { |
||
1003 | if ((placement->placement[i] & mem->placement & |
||
1004 | TTM_PL_MASK_CACHING) && |
||
1005 | (placement->placement[i] & mem->placement & |
||
1006 | TTM_PL_MASK_MEM)) |
||
1007 | return i; |
||
1008 | } |
||
1009 | return -1; |
||
1010 | } |
||
1011 | |||
1012 | int ttm_bo_validate(struct ttm_buffer_object *bo, |
||
1013 | struct ttm_placement *placement, |
||
1014 | bool interruptible, |
||
1015 | bool no_wait_gpu) |
||
1016 | { |
||
1017 | int ret; |
||
1018 | |||
1019 | // BUG_ON(!ttm_bo_is_reserved(bo)); |
||
1020 | /* Check that range is valid */ |
||
1021 | if (placement->lpfn || placement->fpfn) |
||
1022 | if (placement->fpfn > placement->lpfn || |
||
1023 | (placement->lpfn - placement->fpfn) < bo->num_pages) |
||
1024 | return -EINVAL; |
||
1025 | /* |
||
1026 | * Check whether we need to move buffer. |
||
1027 | */ |
||
1028 | ret = ttm_bo_mem_compat(placement, &bo->mem); |
||
1029 | if (ret < 0) { |
||
1030 | // ret = ttm_bo_move_buffer(bo, placement, interruptible, |
||
1031 | // no_wait_gpu); |
||
1032 | if (ret) |
||
1033 | return ret; |
||
1034 | } else { |
||
1035 | /* |
||
1036 | * Use the access and other non-mapping-related flag bits from |
||
1037 | * the compatible memory placement flags to the active flags |
||
1038 | */ |
||
1039 | ttm_flag_masked(&bo->mem.placement, placement->placement[ret], |
||
1040 | ~TTM_PL_MASK_MEMTYPE); |
||
1041 | } |
||
1042 | /* |
||
1043 | * We might need to add a TTM. |
||
1044 | */ |
||
1045 | if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { |
||
1046 | ret = ttm_bo_add_ttm(bo, true); |
||
1047 | if (ret) |
||
1048 | return ret; |
||
1049 | } |
||
1050 | return 0; |
||
1051 | } |
||
1052 | EXPORT_SYMBOL(ttm_bo_validate); |
||
1053 | |||
1054 | int ttm_bo_check_placement(struct ttm_buffer_object *bo, |
||
1055 | struct ttm_placement *placement) |
||
1056 | { |
||
1057 | BUG_ON((placement->fpfn || placement->lpfn) && |
||
1058 | (bo->mem.num_pages > (placement->lpfn - placement->fpfn))); |
||
1059 | |||
1060 | return 0; |
||
1061 | } |
||
1062 | |||
1063 | int ttm_bo_init(struct ttm_bo_device *bdev, |
||
1064 | struct ttm_buffer_object *bo, |
||
1065 | unsigned long size, |
||
1066 | enum ttm_bo_type type, |
||
1067 | struct ttm_placement *placement, |
||
1068 | uint32_t page_alignment, |
||
1069 | bool interruptible, |
||
1070 | struct file *persistent_swap_storage, |
||
1071 | size_t acc_size, |
||
1072 | struct sg_table *sg, |
||
1073 | void (*destroy) (struct ttm_buffer_object *)) |
||
1074 | { |
||
1075 | int ret = 0; |
||
1076 | unsigned long num_pages; |
||
1077 | struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; |
||
1078 | bool locked; |
||
1079 | |||
1080 | // ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); |
||
1081 | if (ret) { |
||
1082 | pr_err("Out of kernel memory\n"); |
||
1083 | if (destroy) |
||
1084 | (*destroy)(bo); |
||
1085 | else |
||
1086 | kfree(bo); |
||
1087 | return -ENOMEM; |
||
1088 | } |
||
1089 | |||
1090 | num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
||
1091 | if (num_pages == 0) { |
||
1092 | pr_err("Illegal buffer object size\n"); |
||
1093 | if (destroy) |
||
1094 | (*destroy)(bo); |
||
1095 | else |
||
1096 | kfree(bo); |
||
1097 | // ttm_mem_global_free(mem_glob, acc_size); |
||
1098 | return -EINVAL; |
||
1099 | } |
||
1100 | bo->destroy = destroy; |
||
1101 | |||
1102 | kref_init(&bo->kref); |
||
1103 | kref_init(&bo->list_kref); |
||
1104 | atomic_set(&bo->cpu_writers, 0); |
||
1105 | INIT_LIST_HEAD(&bo->lru); |
||
1106 | INIT_LIST_HEAD(&bo->ddestroy); |
||
1107 | INIT_LIST_HEAD(&bo->swap); |
||
1108 | INIT_LIST_HEAD(&bo->io_reserve_lru); |
||
1109 | bo->bdev = bdev; |
||
1110 | bo->glob = bdev->glob; |
||
1111 | bo->type = type; |
||
1112 | bo->num_pages = num_pages; |
||
1113 | bo->mem.size = num_pages << PAGE_SHIFT; |
||
1114 | bo->mem.mem_type = TTM_PL_SYSTEM; |
||
1115 | bo->mem.num_pages = bo->num_pages; |
||
1116 | bo->mem.mm_node = NULL; |
||
1117 | bo->mem.page_alignment = page_alignment; |
||
1118 | bo->mem.bus.io_reserved_vm = false; |
||
1119 | bo->mem.bus.io_reserved_count = 0; |
||
1120 | bo->priv_flags = 0; |
||
1121 | bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); |
||
1122 | bo->persistent_swap_storage = persistent_swap_storage; |
||
1123 | bo->acc_size = acc_size; |
||
1124 | bo->sg = sg; |
||
1125 | bo->resv = &bo->ttm_resv; |
||
1126 | // reservation_object_init(bo->resv); |
||
1127 | atomic_inc(&bo->glob->bo_count); |
||
1128 | |||
1129 | ret = ttm_bo_check_placement(bo, placement); |
||
1130 | |||
1131 | /* |
||
1132 | * For ttm_bo_type_device buffers, allocate |
||
1133 | * address space from the device. |
||
1134 | */ |
||
1135 | // if (likely(!ret) && |
||
1136 | // (bo->type == ttm_bo_type_device || |
||
1137 | // bo->type == ttm_bo_type_sg)) |
||
1138 | // ret = ttm_bo_setup_vm(bo); |
||
1139 | |||
1140 | // if (likely(!ret)) |
||
1141 | // ret = ttm_bo_validate(bo, placement, interruptible, false); |
||
1142 | |||
1143 | // ttm_bo_unreserve(bo); |
||
1144 | |||
1145 | // if (unlikely(ret)) |
||
1146 | // ttm_bo_unref(&bo); |
||
1147 | |||
1148 | return ret; |
||
1149 | } |
||
1150 | EXPORT_SYMBOL(ttm_bo_init); |
||
1151 | |||
1152 | size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, |
||
1153 | unsigned long bo_size, |
||
1154 | unsigned struct_size) |
||
1155 | { |
||
1156 | unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; |
||
1157 | size_t size = 0; |
||
1158 | |||
1159 | size += ttm_round_pot(struct_size); |
||
1160 | size += PAGE_ALIGN(npages * sizeof(void *)); |
||
1161 | size += ttm_round_pot(sizeof(struct ttm_tt)); |
||
1162 | return size; |
||
1163 | } |
||
1164 | EXPORT_SYMBOL(ttm_bo_acc_size); |
||
1165 | |||
1166 | size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, |
||
1167 | unsigned long bo_size, |
||
1168 | unsigned struct_size) |
||
1169 | { |
||
1170 | unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; |
||
1171 | size_t size = 0; |
||
1172 | |||
1173 | size += ttm_round_pot(struct_size); |
||
1174 | size += PAGE_ALIGN(npages * sizeof(void *)); |
||
1175 | size += PAGE_ALIGN(npages * sizeof(dma_addr_t)); |
||
1176 | size += ttm_round_pot(sizeof(struct ttm_dma_tt)); |
||
1177 | return size; |
||
1178 | } |
||
1179 | EXPORT_SYMBOL(ttm_bo_dma_acc_size); |
||
1180 | |||
1181 | int ttm_bo_create(struct ttm_bo_device *bdev, |
||
1182 | unsigned long size, |
||
1183 | enum ttm_bo_type type, |
||
1184 | struct ttm_placement *placement, |
||
1185 | uint32_t page_alignment, |
||
1186 | bool interruptible, |
||
1187 | struct file *persistent_swap_storage, |
||
1188 | struct ttm_buffer_object **p_bo) |
||
1189 | { |
||
1190 | struct ttm_buffer_object *bo; |
||
1191 | size_t acc_size; |
||
1192 | int ret; |
||
1193 | |||
1194 | bo = kzalloc(sizeof(*bo), GFP_KERNEL); |
||
1195 | if (unlikely(bo == NULL)) |
||
1196 | return -ENOMEM; |
||
1197 | |||
1198 | acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); |
||
1199 | ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, |
||
1200 | interruptible, persistent_swap_storage, acc_size, |
||
1201 | NULL, NULL); |
||
1202 | if (likely(ret == 0)) |
||
1203 | *p_bo = bo; |
||
1204 | |||
1205 | return ret; |
||
1206 | } |
||
1207 | EXPORT_SYMBOL(ttm_bo_create); |
||
1208 | |||
1209 | |||
1210 | |||
1211 | |||
1212 | |||
1213 | |||
1214 | |||
1215 | |||
1216 | |||
1217 | |||
1218 | |||
1219 | |||
1220 | |||
1221 | |||
1222 | |||
1223 | |||
1224 | |||
1225 | |||
1226 | |||
1227 | |||
1404 | serge | 1228 | int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, |
4075 | Serge | 1229 | unsigned long p_size) |
1404 | serge | 1230 | { |
1231 | int ret = -EINVAL; |
||
1232 | struct ttm_mem_type_manager *man; |
||
1233 | |||
4075 | Serge | 1234 | ENTER(); |
1404 | serge | 1235 | |
4075 | Serge | 1236 | BUG_ON(type >= TTM_NUM_MEM_TYPES); |
1404 | serge | 1237 | man = &bdev->man[type]; |
4075 | Serge | 1238 | BUG_ON(man->has_type); |
1239 | man->io_reserve_fastpath = true; |
||
1240 | man->use_io_reserve_lru = false; |
||
1241 | mutex_init(&man->io_reserve_mutex); |
||
1242 | INIT_LIST_HEAD(&man->io_reserve_lru); |
||
1404 | serge | 1243 | |
1244 | ret = bdev->driver->init_mem_type(bdev, type, man); |
||
1245 | if (ret) |
||
1246 | return ret; |
||
4075 | Serge | 1247 | man->bdev = bdev; |
1404 | serge | 1248 | |
1249 | ret = 0; |
||
1250 | if (type != TTM_PL_SYSTEM) { |
||
4075 | Serge | 1251 | ret = (*man->func->init)(man, p_size); |
1404 | serge | 1252 | if (ret) |
1253 | return ret; |
||
1254 | } |
||
1255 | man->has_type = true; |
||
1256 | man->use_type = true; |
||
1257 | man->size = p_size; |
||
1258 | |||
1259 | INIT_LIST_HEAD(&man->lru); |
||
1260 | |||
4075 | Serge | 1261 | LEAVE(); |
1262 | |||
1404 | serge | 1263 | return 0; |
1264 | } |
||
1265 | |||
4075 | Serge | 1266 | |
1267 | void ttm_bo_global_release(struct drm_global_reference *ref) |
||
1404 | serge | 1268 | { |
4075 | Serge | 1269 | struct ttm_bo_global *glob = ref->object; |
1270 | |||
1271 | } |
||
1272 | EXPORT_SYMBOL(ttm_bo_global_release); |
||
1273 | |||
1274 | int ttm_bo_global_init(struct drm_global_reference *ref) |
||
1275 | { |
||
1404 | serge | 1276 | struct ttm_bo_global_ref *bo_ref = |
1277 | container_of(ref, struct ttm_bo_global_ref, ref); |
||
1278 | struct ttm_bo_global *glob = ref->object; |
||
1279 | int ret; |
||
1280 | |||
4075 | Serge | 1281 | ENTER(); |
1282 | |||
1283 | mutex_init(&glob->device_list_mutex); |
||
1284 | spin_lock_init(&glob->lru_lock); |
||
1404 | serge | 1285 | glob->mem_glob = bo_ref->mem_glob; |
4075 | Serge | 1286 | glob->dummy_read_page = AllocPage(); |
1404 | serge | 1287 | |
1288 | if (unlikely(glob->dummy_read_page == NULL)) { |
||
1289 | ret = -ENOMEM; |
||
1290 | goto out_no_drp; |
||
1291 | } |
||
1292 | |||
1293 | INIT_LIST_HEAD(&glob->swap_lru); |
||
1294 | INIT_LIST_HEAD(&glob->device_list); |
||
1295 | |||
4075 | Serge | 1296 | atomic_set(&glob->bo_count, 0); |
1404 | serge | 1297 | |
4075 | Serge | 1298 | LEAVE(); |
1404 | serge | 1299 | |
4075 | Serge | 1300 | return 0; |
1404 | serge | 1301 | |
1302 | out_no_drp: |
||
1303 | kfree(glob); |
||
1304 | return ret; |
||
1305 | } |
||
1306 | |||
1307 | |||
4075 | Serge | 1308 | int ttm_bo_device_init(struct ttm_bo_device *bdev, |
1309 | struct ttm_bo_global *glob, |
||
1310 | struct ttm_bo_driver *driver, |
||
1311 | uint64_t file_page_offset, |
||
1312 | bool need_dma32) |
||
1313 | { |
||
1314 | int ret = -EINVAL; |
||
1315 | |||
1316 | ENTER(); |
||
1317 | |||
1318 | // rwlock_init(&bdev->vm_lock); |
||
1319 | bdev->driver = driver; |
||
1320 | |||
1321 | memset(bdev->man, 0, sizeof(bdev->man)); |
||
1322 | |||
1323 | /* |
||
1324 | * Initialize the system memory buffer type. |
||
1325 | * Other types need to be driver / IOCTL initialized. |
||
1326 | */ |
||
1327 | ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0); |
||
1328 | if (unlikely(ret != 0)) |
||
1329 | goto out_no_sys; |
||
1330 | |||
1331 | bdev->addr_space_rb = RB_ROOT; |
||
1332 | drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000); |
||
1333 | |||
1334 | // INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); |
||
1335 | INIT_LIST_HEAD(&bdev->ddestroy); |
||
1336 | bdev->dev_mapping = NULL; |
||
1337 | bdev->glob = glob; |
||
1338 | bdev->need_dma32 = need_dma32; |
||
1339 | bdev->val_seq = 0; |
||
1340 | spin_lock_init(&bdev->fence_lock); |
||
1341 | mutex_lock(&glob->device_list_mutex); |
||
1342 | list_add_tail(&bdev->device_list, &glob->device_list); |
||
1343 | mutex_unlock(&glob->device_list_mutex); |
||
1344 | |||
1345 | LEAVE(); |
||
1346 | |||
1347 | return 0; |
||
1348 | out_no_sys: |
||
1349 | return ret; |
||
1350 | } |
||
1351 | EXPORT_SYMBOL(ttm_bo_device_init); |
||
1352 | |||
1353 | /* |
||
1354 | * buffer object vm functions. |
||
1355 | */ |
||
1356 | |||
1357 | bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
||
1358 | { |
||
1359 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
||
1360 | |||
1361 | if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) { |
||
1362 | if (mem->mem_type == TTM_PL_SYSTEM) |
||
1363 | return false; |
||
1364 | |||
1365 | if (man->flags & TTM_MEMTYPE_FLAG_CMA) |
||
1366 | return false; |
||
1367 | |||
1368 | if (mem->placement & TTM_PL_FLAG_CACHED) |
||
1369 | return false; |
||
1370 | } |
||
1371 | return true; |
||
1372 | }><>>>>>><>>>>>>><>><>><>><>> |
||
1373 |