Rev 4112 | Rev 5078 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1404 | serge | 1 | /************************************************************************** |
2 | * |
||
3 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA |
||
4 | * All Rights Reserved. |
||
5 | * |
||
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
7 | * copy of this software and associated documentation files (the |
||
8 | * "Software"), to deal in the Software without restriction, including |
||
9 | * without limitation the rights to use, copy, modify, merge, publish, |
||
10 | * distribute, sub license, and/or sell copies of the Software, and to |
||
11 | * permit persons to whom the Software is furnished to do so, subject to |
||
12 | * the following conditions: |
||
13 | * |
||
14 | * The above copyright notice and this permission notice (including the |
||
15 | * next paragraph) shall be included in all copies or substantial portions |
||
16 | * of the Software. |
||
17 | * |
||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
||
25 | * |
||
26 | **************************************************************************/ |
||
27 | /* |
||
28 | * Authors: Thomas Hellstrom |
||
29 | */ |
||
4075 | Serge | 30 | |
31 | #define pr_fmt(fmt) "[TTM] " fmt |
||
32 | |||
33 | #include |
||
34 | #include |
||
35 | #include |
||
36 | #include |
||
37 | #include |
||
38 | #include |
||
39 | #include |
||
40 | #include |
||
41 | |||
4112 | Serge | 42 | #define TTM_ASSERT_LOCKED(param) |
43 | #define TTM_DEBUG(fmt, arg...) |
||
44 | #define TTM_BO_HASH_ORDER 13 |
||
45 | |||
4075 | Serge | 46 | #define pr_err(fmt, ...) \ |
47 | printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) |
||
48 | |||
49 | int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) |
||
50 | { |
||
51 | |||
52 | mutex_lock(&man->io_reserve_mutex); |
||
53 | return 0; |
||
54 | } |
||
55 | |||
56 | void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) |
||
57 | { |
||
58 | if (likely(man->io_reserve_fastpath)) |
||
59 | return; |
||
60 | |||
61 | mutex_unlock(&man->io_reserve_mutex); |
||
62 | } |
||
63 | |||
64 | |||
65 | #if 0 |
||
66 | static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) |
||
67 | { |
||
68 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
||
69 | |||
70 | pr_err(" has_type: %d\n", man->has_type); |
||
71 | pr_err(" use_type: %d\n", man->use_type); |
||
72 | pr_err(" flags: 0x%08X\n", man->flags); |
||
73 | pr_err(" gpu_offset: 0x%08lX\n", man->gpu_offset); |
||
74 | pr_err(" size: %llu\n", man->size); |
||
75 | pr_err(" available_caching: 0x%08X\n", man->available_caching); |
||
76 | pr_err(" default_caching: 0x%08X\n", man->default_caching); |
||
77 | if (mem_type != TTM_PL_SYSTEM) |
||
78 | (*man->func->debug)(man, TTM_PFX); |
||
79 | } |
||
80 | |||
81 | static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, |
||
82 | struct ttm_placement *placement) |
||
83 | { |
||
84 | int i, ret, mem_type; |
||
85 | |||
86 | pr_err("No space for %p (%lu pages, %luK, %luM)\n", |
||
87 | bo, bo->mem.num_pages, bo->mem.size >> 10, |
||
88 | bo->mem.size >> 20); |
||
89 | for (i = 0; i < placement->num_placement; i++) { |
||
90 | ret = ttm_mem_type_from_flags(placement->placement[i], |
||
91 | &mem_type); |
||
92 | if (ret) |
||
93 | return; |
||
94 | pr_err(" placement[%d]=0x%08X (%d)\n", |
||
95 | i, placement->placement[i], mem_type); |
||
96 | ttm_mem_type_debug(bo->bdev, mem_type); |
||
97 | } |
||
98 | } |
||
99 | |||
100 | static ssize_t ttm_bo_global_show(struct kobject *kobj, |
||
101 | struct attribute *attr, |
||
102 | char *buffer) |
||
103 | { |
||
104 | struct ttm_bo_global *glob = |
||
105 | container_of(kobj, struct ttm_bo_global, kobj); |
||
106 | |||
107 | return snprintf(buffer, PAGE_SIZE, "%lu\n", |
||
108 | (unsigned long) atomic_read(&glob->bo_count)); |
||
109 | } |
||
110 | |||
111 | static struct attribute *ttm_bo_global_attrs[] = { |
||
112 | &ttm_bo_count, |
||
113 | NULL |
||
114 | }; |
||
115 | |||
116 | static const struct sysfs_ops ttm_bo_global_ops = { |
||
117 | .show = &ttm_bo_global_show |
||
118 | }; |
||
119 | |||
120 | static struct kobj_type ttm_bo_glob_kobj_type = { |
||
121 | .release = &ttm_bo_global_kobj_release, |
||
122 | .sysfs_ops = &ttm_bo_global_ops, |
||
123 | .default_attrs = ttm_bo_global_attrs |
||
124 | }; |
||
125 | #endif |
||
126 | |||
127 | |||
128 | static inline uint32_t ttm_bo_type_flags(unsigned type) |
||
129 | { |
||
130 | return 1 << (type); |
||
131 | } |
||
132 | |||
133 | static void ttm_bo_release_list(struct kref *list_kref) |
||
134 | { |
||
135 | struct ttm_buffer_object *bo = |
||
136 | container_of(list_kref, struct ttm_buffer_object, list_kref); |
||
137 | struct ttm_bo_device *bdev = bo->bdev; |
||
138 | size_t acc_size = bo->acc_size; |
||
139 | |||
140 | BUG_ON(atomic_read(&bo->list_kref.refcount)); |
||
141 | BUG_ON(atomic_read(&bo->kref.refcount)); |
||
142 | BUG_ON(atomic_read(&bo->cpu_writers)); |
||
143 | BUG_ON(bo->sync_obj != NULL); |
||
144 | BUG_ON(bo->mem.mm_node != NULL); |
||
145 | BUG_ON(!list_empty(&bo->lru)); |
||
146 | BUG_ON(!list_empty(&bo->ddestroy)); |
||
147 | |||
148 | if (bo->ttm) |
||
149 | ttm_tt_destroy(bo->ttm); |
||
150 | atomic_dec(&bo->glob->bo_count); |
||
151 | if (bo->destroy) |
||
152 | bo->destroy(bo); |
||
153 | else { |
||
154 | kfree(bo); |
||
155 | } |
||
156 | ttm_mem_global_free(bdev->glob->mem_glob, acc_size); |
||
157 | } |
||
158 | |||
159 | void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) |
||
160 | { |
||
161 | struct ttm_bo_device *bdev = bo->bdev; |
||
162 | struct ttm_mem_type_manager *man; |
||
163 | |||
164 | // BUG_ON(!ttm_bo_is_reserved(bo)); |
||
165 | |||
166 | if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { |
||
167 | |||
168 | BUG_ON(!list_empty(&bo->lru)); |
||
169 | |||
170 | man = &bdev->man[bo->mem.mem_type]; |
||
171 | list_add_tail(&bo->lru, &man->lru); |
||
172 | kref_get(&bo->list_kref); |
||
173 | |||
174 | if (bo->ttm != NULL) { |
||
175 | list_add_tail(&bo->swap, &bo->glob->swap_lru); |
||
176 | kref_get(&bo->list_kref); |
||
177 | } |
||
178 | } |
||
179 | } |
||
180 | EXPORT_SYMBOL(ttm_bo_add_to_lru); |
||
181 | |||
182 | int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) |
||
183 | { |
||
184 | int put_count = 0; |
||
185 | |||
186 | if (!list_empty(&bo->swap)) { |
||
187 | list_del_init(&bo->swap); |
||
188 | ++put_count; |
||
189 | } |
||
190 | if (!list_empty(&bo->lru)) { |
||
191 | list_del_init(&bo->lru); |
||
192 | ++put_count; |
||
193 | } |
||
194 | |||
195 | /* |
||
196 | * TODO: Add a driver hook to delete from |
||
197 | * driver-specific LRU's here. |
||
198 | */ |
||
199 | |||
200 | return put_count; |
||
201 | } |
||
202 | |||
203 | static void ttm_bo_ref_bug(struct kref *list_kref) |
||
204 | { |
||
205 | BUG(); |
||
206 | } |
||
207 | |||
208 | void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, |
||
209 | bool never_free) |
||
210 | { |
||
211 | // kref_sub(&bo->list_kref, count, |
||
212 | // (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list); |
||
213 | } |
||
214 | |||
215 | void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo) |
||
216 | { |
||
217 | int put_count; |
||
218 | |||
219 | spin_lock(&bo->glob->lru_lock); |
||
220 | put_count = ttm_bo_del_from_lru(bo); |
||
221 | spin_unlock(&bo->glob->lru_lock); |
||
222 | ttm_bo_list_ref_sub(bo, put_count, true); |
||
223 | } |
||
224 | EXPORT_SYMBOL(ttm_bo_del_sub_from_lru); |
||
225 | |||
226 | /* |
||
227 | * Call bo->mutex locked. |
||
228 | */ |
||
229 | static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) |
||
230 | { |
||
231 | struct ttm_bo_device *bdev = bo->bdev; |
||
232 | struct ttm_bo_global *glob = bo->glob; |
||
233 | int ret = 0; |
||
234 | uint32_t page_flags = 0; |
||
235 | |||
4112 | Serge | 236 | TTM_ASSERT_LOCKED(&bo->mutex); |
4075 | Serge | 237 | bo->ttm = NULL; |
238 | |||
239 | if (bdev->need_dma32) |
||
240 | page_flags |= TTM_PAGE_FLAG_DMA32; |
||
241 | |||
242 | switch (bo->type) { |
||
243 | case ttm_bo_type_device: |
||
244 | if (zero_alloc) |
||
245 | page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; |
||
246 | case ttm_bo_type_kernel: |
||
247 | bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, |
||
248 | page_flags, glob->dummy_read_page); |
||
249 | if (unlikely(bo->ttm == NULL)) |
||
250 | ret = -ENOMEM; |
||
251 | break; |
||
252 | case ttm_bo_type_sg: |
||
253 | bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, |
||
254 | page_flags | TTM_PAGE_FLAG_SG, |
||
255 | glob->dummy_read_page); |
||
256 | if (unlikely(bo->ttm == NULL)) { |
||
257 | ret = -ENOMEM; |
||
258 | break; |
||
259 | } |
||
260 | bo->ttm->sg = bo->sg; |
||
261 | break; |
||
262 | default: |
||
263 | pr_err("Illegal buffer object type\n"); |
||
264 | ret = -EINVAL; |
||
265 | break; |
||
266 | } |
||
267 | |||
268 | return ret; |
||
269 | } |
||
270 | |||
271 | #if 0 |
||
272 | static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, |
||
273 | struct ttm_mem_reg *mem, |
||
274 | bool evict, bool interruptible, |
||
275 | bool no_wait_gpu) |
||
276 | { |
||
277 | struct ttm_bo_device *bdev = bo->bdev; |
||
278 | bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); |
||
279 | bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem); |
||
280 | struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; |
||
281 | struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; |
||
282 | int ret = 0; |
||
283 | |||
284 | if (old_is_pci || new_is_pci || |
||
285 | ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { |
||
286 | ret = ttm_mem_io_lock(old_man, true); |
||
287 | if (unlikely(ret != 0)) |
||
288 | goto out_err; |
||
289 | ttm_bo_unmap_virtual_locked(bo); |
||
290 | ttm_mem_io_unlock(old_man); |
||
291 | } |
||
292 | |||
293 | /* |
||
294 | * Create and bind a ttm if required. |
||
295 | */ |
||
296 | |||
297 | if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { |
||
298 | if (bo->ttm == NULL) { |
||
299 | bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); |
||
300 | ret = ttm_bo_add_ttm(bo, zero); |
||
301 | if (ret) |
||
302 | goto out_err; |
||
303 | } |
||
304 | |||
305 | ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); |
||
306 | if (ret) |
||
307 | goto out_err; |
||
308 | |||
309 | if (mem->mem_type != TTM_PL_SYSTEM) { |
||
310 | ret = ttm_tt_bind(bo->ttm, mem); |
||
311 | if (ret) |
||
312 | goto out_err; |
||
313 | } |
||
314 | |||
315 | if (bo->mem.mem_type == TTM_PL_SYSTEM) { |
||
316 | if (bdev->driver->move_notify) |
||
317 | bdev->driver->move_notify(bo, mem); |
||
318 | bo->mem = *mem; |
||
319 | mem->mm_node = NULL; |
||
320 | goto moved; |
||
321 | } |
||
322 | } |
||
323 | |||
324 | if (bdev->driver->move_notify) |
||
325 | bdev->driver->move_notify(bo, mem); |
||
326 | |||
327 | if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && |
||
328 | !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) |
||
329 | ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem); |
||
330 | else if (bdev->driver->move) |
||
331 | ret = bdev->driver->move(bo, evict, interruptible, |
||
332 | no_wait_gpu, mem); |
||
333 | else |
||
334 | ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem); |
||
335 | |||
336 | if (ret) { |
||
337 | if (bdev->driver->move_notify) { |
||
338 | struct ttm_mem_reg tmp_mem = *mem; |
||
339 | *mem = bo->mem; |
||
340 | bo->mem = tmp_mem; |
||
341 | bdev->driver->move_notify(bo, mem); |
||
342 | bo->mem = *mem; |
||
343 | *mem = tmp_mem; |
||
344 | } |
||
345 | |||
346 | goto out_err; |
||
347 | } |
||
348 | |||
349 | moved: |
||
350 | if (bo->evicted) { |
||
351 | ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); |
||
352 | if (ret) |
||
353 | pr_err("Can not flush read caches\n"); |
||
354 | bo->evicted = false; |
||
355 | } |
||
356 | |||
357 | if (bo->mem.mm_node) { |
||
358 | bo->offset = (bo->mem.start << PAGE_SHIFT) + |
||
359 | bdev->man[bo->mem.mem_type].gpu_offset; |
||
360 | bo->cur_placement = bo->mem.placement; |
||
361 | } else |
||
362 | bo->offset = 0; |
||
363 | |||
364 | return 0; |
||
365 | |||
366 | out_err: |
||
367 | new_man = &bdev->man[bo->mem.mem_type]; |
||
368 | if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) { |
||
369 | ttm_tt_unbind(bo->ttm); |
||
370 | ttm_tt_destroy(bo->ttm); |
||
371 | bo->ttm = NULL; |
||
372 | } |
||
373 | |||
374 | return ret; |
||
375 | } |
||
376 | |||
377 | /** |
||
378 | * Call bo::reserved. |
||
379 | * Will release GPU memory type usage on destruction. |
||
380 | * This is the place to put in driver specific hooks to release |
||
381 | * driver private resources. |
||
382 | * Will release the bo::reserved lock. |
||
383 | */ |
||
384 | |||
385 | static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) |
||
386 | { |
||
387 | if (bo->bdev->driver->move_notify) |
||
388 | bo->bdev->driver->move_notify(bo, NULL); |
||
389 | |||
390 | if (bo->ttm) { |
||
391 | ttm_tt_unbind(bo->ttm); |
||
392 | ttm_tt_destroy(bo->ttm); |
||
393 | bo->ttm = NULL; |
||
394 | } |
||
395 | ttm_bo_mem_put(bo, &bo->mem); |
||
396 | |||
397 | ww_mutex_unlock (&bo->resv->lock); |
||
398 | } |
||
399 | |||
400 | static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) |
||
401 | { |
||
402 | struct ttm_bo_device *bdev = bo->bdev; |
||
403 | struct ttm_bo_global *glob = bo->glob; |
||
404 | struct ttm_bo_driver *driver = bdev->driver; |
||
405 | void *sync_obj = NULL; |
||
406 | int put_count; |
||
407 | int ret; |
||
408 | |||
409 | spin_lock(&glob->lru_lock); |
||
410 | ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); |
||
411 | |||
412 | spin_lock(&bdev->fence_lock); |
||
413 | (void) ttm_bo_wait(bo, false, false, true); |
||
414 | if (!ret && !bo->sync_obj) { |
||
415 | spin_unlock(&bdev->fence_lock); |
||
416 | put_count = ttm_bo_del_from_lru(bo); |
||
417 | |||
418 | spin_unlock(&glob->lru_lock); |
||
419 | ttm_bo_cleanup_memtype_use(bo); |
||
420 | |||
421 | ttm_bo_list_ref_sub(bo, put_count, true); |
||
422 | |||
423 | return; |
||
424 | } |
||
425 | if (bo->sync_obj) |
||
426 | sync_obj = driver->sync_obj_ref(bo->sync_obj); |
||
427 | spin_unlock(&bdev->fence_lock); |
||
428 | |||
4569 | Serge | 429 | if (!ret) { |
430 | |||
431 | /* |
||
432 | * Make NO_EVICT bos immediately available to |
||
433 | * shrinkers, now that they are queued for |
||
434 | * destruction. |
||
435 | */ |
||
436 | if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { |
||
437 | bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT; |
||
438 | ttm_bo_add_to_lru(bo); |
||
439 | } |
||
440 | |||
4075 | Serge | 441 | ww_mutex_unlock(&bo->resv->lock); |
4569 | Serge | 442 | } |
4075 | Serge | 443 | |
444 | kref_get(&bo->list_kref); |
||
445 | list_add_tail(&bo->ddestroy, &bdev->ddestroy); |
||
446 | spin_unlock(&glob->lru_lock); |
||
447 | |||
448 | if (sync_obj) { |
||
449 | driver->sync_obj_flush(sync_obj); |
||
450 | driver->sync_obj_unref(&sync_obj); |
||
451 | } |
||
452 | schedule_delayed_work(&bdev->wq, |
||
453 | ((HZ / 100) < 1) ? 1 : HZ / 100); |
||
454 | } |
||
455 | |||
456 | /** |
||
457 | * function ttm_bo_cleanup_refs_and_unlock |
||
458 | * If bo idle, remove from delayed- and lru lists, and unref. |
||
459 | * If not idle, do nothing. |
||
1404 | serge | 460 | * |
4075 | Serge | 461 | * Must be called with lru_lock and reservation held, this function |
462 | * will drop both before returning. |
||
463 | * |
||
464 | * @interruptible Any sleeps should occur interruptibly. |
||
465 | * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. |
||
1404 | serge | 466 | */ |
467 | |||
4075 | Serge | 468 | static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, |
469 | bool interruptible, |
||
470 | bool no_wait_gpu) |
||
471 | { |
||
472 | struct ttm_bo_device *bdev = bo->bdev; |
||
473 | struct ttm_bo_driver *driver = bdev->driver; |
||
474 | struct ttm_bo_global *glob = bo->glob; |
||
475 | int put_count; |
||
476 | int ret; |
||
1404 | serge | 477 | |
4075 | Serge | 478 | spin_lock(&bdev->fence_lock); |
479 | ret = ttm_bo_wait(bo, false, false, true); |
||
1404 | serge | 480 | |
4075 | Serge | 481 | if (ret && !no_wait_gpu) { |
482 | void *sync_obj; |
||
1404 | serge | 483 | |
4075 | Serge | 484 | /* |
485 | * Take a reference to the fence and unreserve, |
||
486 | * at this point the buffer should be dead, so |
||
487 | * no new sync objects can be attached. |
||
488 | */ |
||
489 | sync_obj = driver->sync_obj_ref(bo->sync_obj); |
||
490 | spin_unlock(&bdev->fence_lock); |
||
491 | |||
492 | ww_mutex_unlock(&bo->resv->lock); |
||
493 | spin_unlock(&glob->lru_lock); |
||
494 | |||
495 | ret = driver->sync_obj_wait(sync_obj, false, interruptible); |
||
496 | driver->sync_obj_unref(&sync_obj); |
||
497 | if (ret) |
||
498 | return ret; |
||
499 | |||
500 | /* |
||
501 | * remove sync_obj with ttm_bo_wait, the wait should be |
||
502 | * finished, and no new wait object should have been added. |
||
503 | */ |
||
504 | spin_lock(&bdev->fence_lock); |
||
505 | ret = ttm_bo_wait(bo, false, false, true); |
||
506 | WARN_ON(ret); |
||
507 | spin_unlock(&bdev->fence_lock); |
||
508 | if (ret) |
||
509 | return ret; |
||
510 | |||
511 | spin_lock(&glob->lru_lock); |
||
512 | ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); |
||
513 | |||
514 | /* |
||
515 | * We raced, and lost, someone else holds the reservation now, |
||
516 | * and is probably busy in ttm_bo_cleanup_memtype_use. |
||
517 | * |
||
518 | * Even if it's not the case, because we finished waiting any |
||
519 | * delayed destruction would succeed, so just return success |
||
520 | * here. |
||
521 | */ |
||
522 | if (ret) { |
||
523 | spin_unlock(&glob->lru_lock); |
||
524 | return 0; |
||
525 | } |
||
526 | } else |
||
527 | spin_unlock(&bdev->fence_lock); |
||
528 | |||
529 | if (ret || unlikely(list_empty(&bo->ddestroy))) { |
||
530 | ww_mutex_unlock(&bo->resv->lock); |
||
531 | spin_unlock(&glob->lru_lock); |
||
532 | return ret; |
||
533 | } |
||
534 | |||
535 | put_count = ttm_bo_del_from_lru(bo); |
||
536 | list_del_init(&bo->ddestroy); |
||
537 | ++put_count; |
||
538 | |||
539 | spin_unlock(&glob->lru_lock); |
||
540 | ttm_bo_cleanup_memtype_use(bo); |
||
541 | |||
542 | ttm_bo_list_ref_sub(bo, put_count, true); |
||
543 | |||
544 | return 0; |
||
545 | } |
||
546 | |||
547 | /** |
||
548 | * Traverse the delayed list, and call ttm_bo_cleanup_refs on all |
||
549 | * encountered buffers. |
||
550 | */ |
||
551 | |||
552 | static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) |
||
553 | { |
||
554 | struct ttm_bo_global *glob = bdev->glob; |
||
555 | struct ttm_buffer_object *entry = NULL; |
||
556 | int ret = 0; |
||
557 | |||
558 | spin_lock(&glob->lru_lock); |
||
559 | if (list_empty(&bdev->ddestroy)) |
||
560 | goto out_unlock; |
||
561 | |||
562 | entry = list_first_entry(&bdev->ddestroy, |
||
563 | struct ttm_buffer_object, ddestroy); |
||
564 | kref_get(&entry->list_kref); |
||
565 | |||
566 | for (;;) { |
||
567 | struct ttm_buffer_object *nentry = NULL; |
||
568 | |||
569 | if (entry->ddestroy.next != &bdev->ddestroy) { |
||
570 | nentry = list_first_entry(&entry->ddestroy, |
||
571 | struct ttm_buffer_object, ddestroy); |
||
572 | kref_get(&nentry->list_kref); |
||
573 | } |
||
574 | |||
575 | ret = ttm_bo_reserve_nolru(entry, false, true, false, 0); |
||
576 | if (remove_all && ret) { |
||
577 | spin_unlock(&glob->lru_lock); |
||
578 | ret = ttm_bo_reserve_nolru(entry, false, false, |
||
579 | false, 0); |
||
580 | spin_lock(&glob->lru_lock); |
||
581 | } |
||
582 | |||
583 | if (!ret) |
||
584 | ret = ttm_bo_cleanup_refs_and_unlock(entry, false, |
||
585 | !remove_all); |
||
586 | else |
||
587 | spin_unlock(&glob->lru_lock); |
||
588 | |||
589 | kref_put(&entry->list_kref, ttm_bo_release_list); |
||
590 | entry = nentry; |
||
591 | |||
592 | if (ret || !entry) |
||
593 | goto out; |
||
594 | |||
595 | spin_lock(&glob->lru_lock); |
||
596 | if (list_empty(&entry->ddestroy)) |
||
597 | break; |
||
598 | } |
||
599 | |||
600 | out_unlock: |
||
601 | spin_unlock(&glob->lru_lock); |
||
602 | out: |
||
603 | if (entry) |
||
604 | kref_put(&entry->list_kref, ttm_bo_release_list); |
||
605 | return ret; |
||
606 | } |
||
607 | |||
608 | static void ttm_bo_delayed_workqueue(struct work_struct *work) |
||
609 | { |
||
610 | struct ttm_bo_device *bdev = |
||
611 | container_of(work, struct ttm_bo_device, wq.work); |
||
612 | |||
613 | if (ttm_bo_delayed_delete(bdev, false)) { |
||
614 | schedule_delayed_work(&bdev->wq, |
||
615 | ((HZ / 100) < 1) ? 1 : HZ / 100); |
||
616 | } |
||
617 | } |
||
618 | #endif |
||
619 | |||
620 | static void ttm_bo_release(struct kref *kref) |
||
621 | { |
||
622 | struct ttm_buffer_object *bo = |
||
623 | container_of(kref, struct ttm_buffer_object, kref); |
||
624 | struct ttm_bo_device *bdev = bo->bdev; |
||
625 | struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; |
||
626 | |||
4112 | Serge | 627 | drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node); |
4075 | Serge | 628 | ttm_mem_io_lock(man, false); |
629 | // ttm_mem_io_free_vm(bo); |
||
630 | ttm_mem_io_unlock(man); |
||
631 | // ttm_bo_cleanup_refs_or_queue(bo); |
||
632 | // kref_put(&bo->list_kref, ttm_bo_release_list); |
||
633 | } |
||
634 | |||
635 | void ttm_bo_unref(struct ttm_buffer_object **p_bo) |
||
636 | { |
||
637 | struct ttm_buffer_object *bo = *p_bo; |
||
638 | |||
639 | *p_bo = NULL; |
||
640 | kref_put(&bo->kref, ttm_bo_release); |
||
641 | } |
||
642 | EXPORT_SYMBOL(ttm_bo_unref); |
||
643 | |||
644 | #if 0 |
||
645 | int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev) |
||
646 | { |
||
647 | return cancel_delayed_work_sync(&bdev->wq); |
||
648 | } |
||
649 | EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue); |
||
650 | |||
651 | void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched) |
||
652 | { |
||
653 | if (resched) |
||
654 | schedule_delayed_work(&bdev->wq, |
||
655 | ((HZ / 100) < 1) ? 1 : HZ / 100); |
||
656 | } |
||
657 | EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue); |
||
658 | |||
659 | static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, |
||
660 | bool no_wait_gpu) |
||
661 | { |
||
662 | struct ttm_bo_device *bdev = bo->bdev; |
||
663 | struct ttm_mem_reg evict_mem; |
||
664 | struct ttm_placement placement; |
||
665 | int ret = 0; |
||
666 | |||
667 | spin_lock(&bdev->fence_lock); |
||
668 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); |
||
669 | spin_unlock(&bdev->fence_lock); |
||
670 | |||
671 | if (unlikely(ret != 0)) { |
||
672 | if (ret != -ERESTARTSYS) { |
||
673 | pr_err("Failed to expire sync object before buffer eviction\n"); |
||
674 | } |
||
675 | goto out; |
||
676 | } |
||
677 | |||
678 | // BUG_ON(!ttm_bo_is_reserved(bo)); |
||
679 | |||
680 | evict_mem = bo->mem; |
||
681 | evict_mem.mm_node = NULL; |
||
682 | evict_mem.bus.io_reserved_vm = false; |
||
683 | evict_mem.bus.io_reserved_count = 0; |
||
684 | |||
685 | placement.fpfn = 0; |
||
686 | placement.lpfn = 0; |
||
687 | placement.num_placement = 0; |
||
688 | placement.num_busy_placement = 0; |
||
689 | bdev->driver->evict_flags(bo, &placement); |
||
690 | ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, |
||
691 | no_wait_gpu); |
||
692 | if (ret) { |
||
693 | if (ret != -ERESTARTSYS) { |
||
694 | pr_err("Failed to find memory space for buffer 0x%p eviction\n", |
||
695 | bo); |
||
696 | ttm_bo_mem_space_debug(bo, &placement); |
||
697 | } |
||
698 | goto out; |
||
699 | } |
||
700 | |||
701 | ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, |
||
702 | no_wait_gpu); |
||
703 | if (ret) { |
||
704 | if (ret != -ERESTARTSYS) |
||
705 | pr_err("Buffer eviction failed\n"); |
||
706 | ttm_bo_mem_put(bo, &evict_mem); |
||
707 | goto out; |
||
708 | } |
||
709 | bo->evicted = true; |
||
710 | out: |
||
711 | return ret; |
||
712 | } |
||
713 | |||
714 | static int ttm_mem_evict_first(struct ttm_bo_device *bdev, |
||
715 | uint32_t mem_type, |
||
716 | bool interruptible, |
||
717 | bool no_wait_gpu) |
||
718 | { |
||
719 | struct ttm_bo_global *glob = bdev->glob; |
||
720 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
||
721 | struct ttm_buffer_object *bo; |
||
722 | int ret = -EBUSY, put_count; |
||
723 | |||
724 | spin_lock(&glob->lru_lock); |
||
725 | list_for_each_entry(bo, &man->lru, lru) { |
||
726 | ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); |
||
727 | if (!ret) |
||
728 | break; |
||
729 | } |
||
730 | |||
731 | if (ret) { |
||
732 | spin_unlock(&glob->lru_lock); |
||
733 | return ret; |
||
734 | } |
||
735 | |||
736 | kref_get(&bo->list_kref); |
||
737 | |||
738 | if (!list_empty(&bo->ddestroy)) { |
||
739 | ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible, |
||
740 | no_wait_gpu); |
||
741 | kref_put(&bo->list_kref, ttm_bo_release_list); |
||
742 | return ret; |
||
743 | } |
||
744 | |||
745 | put_count = ttm_bo_del_from_lru(bo); |
||
746 | spin_unlock(&glob->lru_lock); |
||
747 | |||
748 | BUG_ON(ret != 0); |
||
749 | |||
750 | ttm_bo_list_ref_sub(bo, put_count, true); |
||
751 | |||
752 | ret = ttm_bo_evict(bo, interruptible, no_wait_gpu); |
||
753 | ttm_bo_unreserve(bo); |
||
754 | |||
755 | kref_put(&bo->list_kref, ttm_bo_release_list); |
||
756 | return ret; |
||
757 | } |
||
758 | |||
759 | void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) |
||
760 | { |
||
761 | struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; |
||
762 | |||
763 | if (mem->mm_node) |
||
764 | (*man->func->put_node)(man, mem); |
||
765 | } |
||
766 | EXPORT_SYMBOL(ttm_bo_mem_put); |
||
767 | |||
768 | /** |
||
769 | * Repeatedly evict memory from the LRU for @mem_type until we create enough |
||
770 | * space, or we've evicted everything and there isn't enough space. |
||
771 | */ |
||
772 | static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, |
||
773 | uint32_t mem_type, |
||
774 | struct ttm_placement *placement, |
||
775 | struct ttm_mem_reg *mem, |
||
776 | bool interruptible, |
||
777 | bool no_wait_gpu) |
||
778 | { |
||
779 | struct ttm_bo_device *bdev = bo->bdev; |
||
780 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
||
781 | int ret; |
||
782 | |||
783 | do { |
||
784 | ret = (*man->func->get_node)(man, bo, placement, mem); |
||
785 | if (unlikely(ret != 0)) |
||
786 | return ret; |
||
787 | if (mem->mm_node) |
||
788 | break; |
||
789 | ret = ttm_mem_evict_first(bdev, mem_type, |
||
790 | interruptible, no_wait_gpu); |
||
791 | if (unlikely(ret != 0)) |
||
792 | return ret; |
||
793 | } while (1); |
||
794 | if (mem->mm_node == NULL) |
||
795 | return -ENOMEM; |
||
796 | mem->mem_type = mem_type; |
||
797 | return 0; |
||
798 | } |
||
799 | |||
800 | static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, |
||
801 | uint32_t cur_placement, |
||
802 | uint32_t proposed_placement) |
||
803 | { |
||
804 | uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING; |
||
805 | uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING; |
||
806 | |||
807 | /** |
||
808 | * Keep current caching if possible. |
||
809 | */ |
||
810 | |||
811 | if ((cur_placement & caching) != 0) |
||
812 | result |= (cur_placement & caching); |
||
813 | else if ((man->default_caching & caching) != 0) |
||
814 | result |= man->default_caching; |
||
815 | else if ((TTM_PL_FLAG_CACHED & caching) != 0) |
||
816 | result |= TTM_PL_FLAG_CACHED; |
||
817 | else if ((TTM_PL_FLAG_WC & caching) != 0) |
||
818 | result |= TTM_PL_FLAG_WC; |
||
819 | else if ((TTM_PL_FLAG_UNCACHED & caching) != 0) |
||
820 | result |= TTM_PL_FLAG_UNCACHED; |
||
821 | |||
822 | return result; |
||
823 | } |
||
824 | |||
825 | static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, |
||
826 | uint32_t mem_type, |
||
827 | uint32_t proposed_placement, |
||
828 | uint32_t *masked_placement) |
||
829 | { |
||
830 | uint32_t cur_flags = ttm_bo_type_flags(mem_type); |
||
831 | |||
832 | if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0) |
||
833 | return false; |
||
834 | |||
835 | if ((proposed_placement & man->available_caching) == 0) |
||
836 | return false; |
||
837 | |||
838 | cur_flags |= (proposed_placement & man->available_caching); |
||
839 | |||
840 | *masked_placement = cur_flags; |
||
841 | return true; |
||
842 | } |
||
843 | |||
844 | /** |
||
845 | * Creates space for memory region @mem according to its type. |
||
846 | * |
||
847 | * This function first searches for free space in compatible memory types in |
||
848 | * the priority order defined by the driver. If free space isn't found, then |
||
849 | * ttm_bo_mem_force_space is attempted in priority order to evict and find |
||
850 | * space. |
||
851 | */ |
||
852 | int ttm_bo_mem_space(struct ttm_buffer_object *bo, |
||
853 | struct ttm_placement *placement, |
||
854 | struct ttm_mem_reg *mem, |
||
855 | bool interruptible, |
||
856 | bool no_wait_gpu) |
||
857 | { |
||
858 | struct ttm_bo_device *bdev = bo->bdev; |
||
859 | struct ttm_mem_type_manager *man; |
||
860 | uint32_t mem_type = TTM_PL_SYSTEM; |
||
861 | uint32_t cur_flags = 0; |
||
862 | bool type_found = false; |
||
863 | bool type_ok = false; |
||
864 | bool has_erestartsys = false; |
||
865 | int i, ret; |
||
866 | |||
867 | mem->mm_node = NULL; |
||
868 | for (i = 0; i < placement->num_placement; ++i) { |
||
869 | ret = ttm_mem_type_from_flags(placement->placement[i], |
||
870 | &mem_type); |
||
871 | if (ret) |
||
872 | return ret; |
||
873 | man = &bdev->man[mem_type]; |
||
874 | |||
875 | type_ok = ttm_bo_mt_compatible(man, |
||
876 | mem_type, |
||
877 | placement->placement[i], |
||
878 | &cur_flags); |
||
879 | |||
880 | if (!type_ok) |
||
881 | continue; |
||
882 | |||
883 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, |
||
884 | cur_flags); |
||
885 | /* |
||
886 | * Use the access and other non-mapping-related flag bits from |
||
887 | * the memory placement flags to the current flags |
||
888 | */ |
||
889 | ttm_flag_masked(&cur_flags, placement->placement[i], |
||
890 | ~TTM_PL_MASK_MEMTYPE); |
||
891 | |||
892 | if (mem_type == TTM_PL_SYSTEM) |
||
893 | break; |
||
894 | |||
895 | if (man->has_type && man->use_type) { |
||
896 | type_found = true; |
||
897 | ret = (*man->func->get_node)(man, bo, placement, mem); |
||
898 | if (unlikely(ret)) |
||
899 | return ret; |
||
900 | } |
||
901 | if (mem->mm_node) |
||
902 | break; |
||
903 | } |
||
904 | |||
905 | if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) { |
||
906 | mem->mem_type = mem_type; |
||
907 | mem->placement = cur_flags; |
||
908 | return 0; |
||
909 | } |
||
910 | |||
911 | if (!type_found) |
||
912 | return -EINVAL; |
||
913 | |||
914 | for (i = 0; i < placement->num_busy_placement; ++i) { |
||
915 | ret = ttm_mem_type_from_flags(placement->busy_placement[i], |
||
916 | &mem_type); |
||
917 | if (ret) |
||
918 | return ret; |
||
919 | man = &bdev->man[mem_type]; |
||
920 | if (!man->has_type) |
||
921 | continue; |
||
922 | if (!ttm_bo_mt_compatible(man, |
||
923 | mem_type, |
||
924 | placement->busy_placement[i], |
||
925 | &cur_flags)) |
||
926 | continue; |
||
927 | |||
928 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, |
||
929 | cur_flags); |
||
930 | /* |
||
931 | * Use the access and other non-mapping-related flag bits from |
||
932 | * the memory placement flags to the current flags |
||
933 | */ |
||
934 | ttm_flag_masked(&cur_flags, placement->busy_placement[i], |
||
935 | ~TTM_PL_MASK_MEMTYPE); |
||
936 | |||
937 | |||
938 | if (mem_type == TTM_PL_SYSTEM) { |
||
939 | mem->mem_type = mem_type; |
||
940 | mem->placement = cur_flags; |
||
941 | mem->mm_node = NULL; |
||
942 | return 0; |
||
943 | } |
||
944 | |||
945 | ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, |
||
946 | interruptible, no_wait_gpu); |
||
947 | if (ret == 0 && mem->mm_node) { |
||
948 | mem->placement = cur_flags; |
||
949 | return 0; |
||
950 | } |
||
951 | if (ret == -ERESTARTSYS) |
||
952 | has_erestartsys = true; |
||
953 | } |
||
954 | ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; |
||
955 | return ret; |
||
956 | } |
||
957 | EXPORT_SYMBOL(ttm_bo_mem_space); |
||
958 | |||
4569 | Serge | 959 | static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, |
4075 | Serge | 960 | struct ttm_placement *placement, |
961 | bool interruptible, |
||
962 | bool no_wait_gpu) |
||
963 | { |
||
964 | int ret = 0; |
||
965 | struct ttm_mem_reg mem; |
||
966 | struct ttm_bo_device *bdev = bo->bdev; |
||
967 | |||
968 | // BUG_ON(!ttm_bo_is_reserved(bo)); |
||
969 | |||
970 | /* |
||
971 | * FIXME: It's possible to pipeline buffer moves. |
||
972 | * Have the driver move function wait for idle when necessary, |
||
973 | * instead of doing it here. |
||
974 | */ |
||
975 | spin_lock(&bdev->fence_lock); |
||
976 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); |
||
977 | spin_unlock(&bdev->fence_lock); |
||
978 | if (ret) |
||
979 | return ret; |
||
980 | mem.num_pages = bo->num_pages; |
||
981 | mem.size = mem.num_pages << PAGE_SHIFT; |
||
982 | mem.page_alignment = bo->mem.page_alignment; |
||
983 | mem.bus.io_reserved_vm = false; |
||
984 | mem.bus.io_reserved_count = 0; |
||
985 | /* |
||
986 | * Determine where to move the buffer. |
||
987 | */ |
||
988 | ret = ttm_bo_mem_space(bo, placement, &mem, |
||
989 | interruptible, no_wait_gpu); |
||
990 | if (ret) |
||
991 | goto out_unlock; |
||
992 | ret = ttm_bo_handle_move_mem(bo, &mem, false, |
||
993 | interruptible, no_wait_gpu); |
||
994 | out_unlock: |
||
995 | if (ret && mem.mm_node) |
||
996 | ttm_bo_mem_put(bo, &mem); |
||
997 | return ret; |
||
998 | } |
||
999 | #endif |
||
1000 | |||
4569 | Serge | 1001 | static bool ttm_bo_mem_compat(struct ttm_placement *placement, |
1002 | struct ttm_mem_reg *mem, |
||
1003 | uint32_t *new_flags) |
||
4075 | Serge | 1004 | { |
1005 | int i; |
||
1006 | |||
1007 | if (mem->mm_node && placement->lpfn != 0 && |
||
1008 | (mem->start < placement->fpfn || |
||
1009 | mem->start + mem->num_pages > placement->lpfn)) |
||
4569 | Serge | 1010 | return false; |
4075 | Serge | 1011 | |
1012 | for (i = 0; i < placement->num_placement; i++) { |
||
4569 | Serge | 1013 | *new_flags = placement->placement[i]; |
1014 | if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && |
||
1015 | (*new_flags & mem->placement & TTM_PL_MASK_MEM)) |
||
1016 | return true; |
||
4075 | Serge | 1017 | } |
4569 | Serge | 1018 | |
1019 | for (i = 0; i < placement->num_busy_placement; i++) { |
||
1020 | *new_flags = placement->busy_placement[i]; |
||
1021 | if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && |
||
1022 | (*new_flags & mem->placement & TTM_PL_MASK_MEM)) |
||
1023 | return true; |
||
1024 | } |
||
1025 | |||
1026 | return false; |
||
4075 | Serge | 1027 | } |
1028 | |||
1029 | int ttm_bo_validate(struct ttm_buffer_object *bo, |
||
1030 | struct ttm_placement *placement, |
||
1031 | bool interruptible, |
||
1032 | bool no_wait_gpu) |
||
1033 | { |
||
1034 | int ret; |
||
4569 | Serge | 1035 | uint32_t new_flags; |
4075 | Serge | 1036 | |
1037 | // BUG_ON(!ttm_bo_is_reserved(bo)); |
||
1038 | /* Check that range is valid */ |
||
1039 | if (placement->lpfn || placement->fpfn) |
||
1040 | if (placement->fpfn > placement->lpfn || |
||
1041 | (placement->lpfn - placement->fpfn) < bo->num_pages) |
||
1042 | return -EINVAL; |
||
1043 | /* |
||
1044 | * Check whether we need to move buffer. |
||
1045 | */ |
||
4569 | Serge | 1046 | if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) { |
4075 | Serge | 1047 | // ret = ttm_bo_move_buffer(bo, placement, interruptible, |
1048 | // no_wait_gpu); |
||
1049 | if (ret) |
||
1050 | return ret; |
||
1051 | } else { |
||
1052 | /* |
||
1053 | * Use the access and other non-mapping-related flag bits from |
||
1054 | * the compatible memory placement flags to the active flags |
||
1055 | */ |
||
4569 | Serge | 1056 | ttm_flag_masked(&bo->mem.placement, new_flags, |
4075 | Serge | 1057 | ~TTM_PL_MASK_MEMTYPE); |
1058 | } |
||
1059 | /* |
||
1060 | * We might need to add a TTM. |
||
1061 | */ |
||
1062 | if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { |
||
1063 | ret = ttm_bo_add_ttm(bo, true); |
||
1064 | if (ret) |
||
1065 | return ret; |
||
1066 | } |
||
1067 | return 0; |
||
1068 | } |
||
1069 | EXPORT_SYMBOL(ttm_bo_validate); |
||
1070 | |||
1071 | int ttm_bo_check_placement(struct ttm_buffer_object *bo, |
||
1072 | struct ttm_placement *placement) |
||
1073 | { |
||
1074 | BUG_ON((placement->fpfn || placement->lpfn) && |
||
1075 | (bo->mem.num_pages > (placement->lpfn - placement->fpfn))); |
||
1076 | |||
1077 | return 0; |
||
1078 | } |
||
1079 | |||
1080 | int ttm_bo_init(struct ttm_bo_device *bdev, |
||
1081 | struct ttm_buffer_object *bo, |
||
1082 | unsigned long size, |
||
1083 | enum ttm_bo_type type, |
||
1084 | struct ttm_placement *placement, |
||
1085 | uint32_t page_alignment, |
||
1086 | bool interruptible, |
||
1087 | struct file *persistent_swap_storage, |
||
1088 | size_t acc_size, |
||
1089 | struct sg_table *sg, |
||
1090 | void (*destroy) (struct ttm_buffer_object *)) |
||
1091 | { |
||
1092 | int ret = 0; |
||
1093 | unsigned long num_pages; |
||
1094 | struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; |
||
1095 | bool locked; |
||
1096 | |||
1097 | // ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); |
||
1098 | if (ret) { |
||
1099 | pr_err("Out of kernel memory\n"); |
||
1100 | if (destroy) |
||
1101 | (*destroy)(bo); |
||
1102 | else |
||
1103 | kfree(bo); |
||
1104 | return -ENOMEM; |
||
1105 | } |
||
1106 | |||
1107 | num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
||
1108 | if (num_pages == 0) { |
||
1109 | pr_err("Illegal buffer object size\n"); |
||
1110 | if (destroy) |
||
1111 | (*destroy)(bo); |
||
1112 | else |
||
1113 | kfree(bo); |
||
1114 | // ttm_mem_global_free(mem_glob, acc_size); |
||
1115 | return -EINVAL; |
||
1116 | } |
||
1117 | bo->destroy = destroy; |
||
1118 | |||
1119 | kref_init(&bo->kref); |
||
1120 | kref_init(&bo->list_kref); |
||
1121 | atomic_set(&bo->cpu_writers, 0); |
||
1122 | INIT_LIST_HEAD(&bo->lru); |
||
1123 | INIT_LIST_HEAD(&bo->ddestroy); |
||
1124 | INIT_LIST_HEAD(&bo->swap); |
||
1125 | INIT_LIST_HEAD(&bo->io_reserve_lru); |
||
4569 | Serge | 1126 | mutex_init(&bo->wu_mutex); |
4075 | Serge | 1127 | bo->bdev = bdev; |
1128 | bo->glob = bdev->glob; |
||
1129 | bo->type = type; |
||
1130 | bo->num_pages = num_pages; |
||
1131 | bo->mem.size = num_pages << PAGE_SHIFT; |
||
1132 | bo->mem.mem_type = TTM_PL_SYSTEM; |
||
1133 | bo->mem.num_pages = bo->num_pages; |
||
1134 | bo->mem.mm_node = NULL; |
||
1135 | bo->mem.page_alignment = page_alignment; |
||
1136 | bo->mem.bus.io_reserved_vm = false; |
||
1137 | bo->mem.bus.io_reserved_count = 0; |
||
1138 | bo->priv_flags = 0; |
||
1139 | bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); |
||
1140 | bo->persistent_swap_storage = persistent_swap_storage; |
||
1141 | bo->acc_size = acc_size; |
||
1142 | bo->sg = sg; |
||
1143 | bo->resv = &bo->ttm_resv; |
||
1144 | // reservation_object_init(bo->resv); |
||
1145 | atomic_inc(&bo->glob->bo_count); |
||
4112 | Serge | 1146 | drm_vma_node_reset(&bo->vma_node); |
4075 | Serge | 1147 | |
1148 | ret = ttm_bo_check_placement(bo, placement); |
||
1149 | |||
1150 | /* |
||
1151 | * For ttm_bo_type_device buffers, allocate |
||
1152 | * address space from the device. |
||
1153 | */ |
||
1154 | // if (likely(!ret) && |
||
1155 | // (bo->type == ttm_bo_type_device || |
||
1156 | // bo->type == ttm_bo_type_sg)) |
||
1157 | // ret = ttm_bo_setup_vm(bo); |
||
1158 | |||
1159 | // if (likely(!ret)) |
||
1160 | // ret = ttm_bo_validate(bo, placement, interruptible, false); |
||
1161 | |||
1162 | // ttm_bo_unreserve(bo); |
||
1163 | |||
1164 | // if (unlikely(ret)) |
||
1165 | // ttm_bo_unref(&bo); |
||
1166 | |||
1167 | return ret; |
||
1168 | } |
||
1169 | EXPORT_SYMBOL(ttm_bo_init); |
||
1170 | |||
1171 | size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, |
||
1172 | unsigned long bo_size, |
||
1173 | unsigned struct_size) |
||
1174 | { |
||
1175 | unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; |
||
1176 | size_t size = 0; |
||
1177 | |||
1178 | size += ttm_round_pot(struct_size); |
||
1179 | size += PAGE_ALIGN(npages * sizeof(void *)); |
||
1180 | size += ttm_round_pot(sizeof(struct ttm_tt)); |
||
1181 | return size; |
||
1182 | } |
||
1183 | EXPORT_SYMBOL(ttm_bo_acc_size); |
||
1184 | |||
1185 | size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, |
||
1186 | unsigned long bo_size, |
||
1187 | unsigned struct_size) |
||
1188 | { |
||
1189 | unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; |
||
1190 | size_t size = 0; |
||
1191 | |||
1192 | size += ttm_round_pot(struct_size); |
||
1193 | size += PAGE_ALIGN(npages * sizeof(void *)); |
||
1194 | size += PAGE_ALIGN(npages * sizeof(dma_addr_t)); |
||
1195 | size += ttm_round_pot(sizeof(struct ttm_dma_tt)); |
||
1196 | return size; |
||
1197 | } |
||
1198 | EXPORT_SYMBOL(ttm_bo_dma_acc_size); |
||
1199 | |||
1200 | int ttm_bo_create(struct ttm_bo_device *bdev, |
||
1201 | unsigned long size, |
||
1202 | enum ttm_bo_type type, |
||
1203 | struct ttm_placement *placement, |
||
1204 | uint32_t page_alignment, |
||
1205 | bool interruptible, |
||
1206 | struct file *persistent_swap_storage, |
||
1207 | struct ttm_buffer_object **p_bo) |
||
1208 | { |
||
1209 | struct ttm_buffer_object *bo; |
||
1210 | size_t acc_size; |
||
1211 | int ret; |
||
1212 | |||
1213 | bo = kzalloc(sizeof(*bo), GFP_KERNEL); |
||
1214 | if (unlikely(bo == NULL)) |
||
1215 | return -ENOMEM; |
||
1216 | |||
1217 | acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); |
||
1218 | ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, |
||
1219 | interruptible, persistent_swap_storage, acc_size, |
||
1220 | NULL, NULL); |
||
1221 | if (likely(ret == 0)) |
||
1222 | *p_bo = bo; |
||
1223 | |||
1224 | return ret; |
||
1225 | } |
||
1226 | EXPORT_SYMBOL(ttm_bo_create); |
||
1227 | |||
1228 | |||
1229 | |||
1230 | |||
1231 | |||
1232 | |||
1233 | |||
1234 | |||
1235 | |||
1236 | |||
1237 | |||
1238 | |||
1239 | |||
1240 | |||
1241 | |||
1242 | |||
1243 | |||
1244 | |||
1245 | |||
1246 | |||
1404 | serge | 1247 | int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, |
4075 | Serge | 1248 | unsigned long p_size) |
1404 | serge | 1249 | { |
1250 | int ret = -EINVAL; |
||
1251 | struct ttm_mem_type_manager *man; |
||
1252 | |||
4075 | Serge | 1253 | ENTER(); |
1404 | serge | 1254 | |
4075 | Serge | 1255 | BUG_ON(type >= TTM_NUM_MEM_TYPES); |
1404 | serge | 1256 | man = &bdev->man[type]; |
4075 | Serge | 1257 | BUG_ON(man->has_type); |
1258 | man->io_reserve_fastpath = true; |
||
1259 | man->use_io_reserve_lru = false; |
||
1260 | mutex_init(&man->io_reserve_mutex); |
||
1261 | INIT_LIST_HEAD(&man->io_reserve_lru); |
||
1404 | serge | 1262 | |
1263 | ret = bdev->driver->init_mem_type(bdev, type, man); |
||
1264 | if (ret) |
||
1265 | return ret; |
||
4075 | Serge | 1266 | man->bdev = bdev; |
1404 | serge | 1267 | |
1268 | ret = 0; |
||
1269 | if (type != TTM_PL_SYSTEM) { |
||
4075 | Serge | 1270 | ret = (*man->func->init)(man, p_size); |
1404 | serge | 1271 | if (ret) |
1272 | return ret; |
||
1273 | } |
||
1274 | man->has_type = true; |
||
1275 | man->use_type = true; |
||
1276 | man->size = p_size; |
||
1277 | |||
1278 | INIT_LIST_HEAD(&man->lru); |
||
1279 | |||
4075 | Serge | 1280 | LEAVE(); |
1281 | |||
1404 | serge | 1282 | return 0; |
1283 | } |
||
1284 | |||
4075 | Serge | 1285 | |
1286 | void ttm_bo_global_release(struct drm_global_reference *ref) |
||
1404 | serge | 1287 | { |
4075 | Serge | 1288 | struct ttm_bo_global *glob = ref->object; |
1289 | |||
1290 | } |
||
1291 | EXPORT_SYMBOL(ttm_bo_global_release); |
||
1292 | |||
1293 | int ttm_bo_global_init(struct drm_global_reference *ref) |
||
1294 | { |
||
1404 | serge | 1295 | struct ttm_bo_global_ref *bo_ref = |
1296 | container_of(ref, struct ttm_bo_global_ref, ref); |
||
1297 | struct ttm_bo_global *glob = ref->object; |
||
1298 | int ret; |
||
1299 | |||
4075 | Serge | 1300 | ENTER(); |
1301 | |||
1302 | mutex_init(&glob->device_list_mutex); |
||
1303 | spin_lock_init(&glob->lru_lock); |
||
1404 | serge | 1304 | glob->mem_glob = bo_ref->mem_glob; |
4075 | Serge | 1305 | glob->dummy_read_page = AllocPage(); |
1404 | serge | 1306 | |
1307 | if (unlikely(glob->dummy_read_page == NULL)) { |
||
1308 | ret = -ENOMEM; |
||
1309 | goto out_no_drp; |
||
1310 | } |
||
1311 | |||
1312 | INIT_LIST_HEAD(&glob->swap_lru); |
||
1313 | INIT_LIST_HEAD(&glob->device_list); |
||
1314 | |||
4075 | Serge | 1315 | atomic_set(&glob->bo_count, 0); |
1404 | serge | 1316 | |
4075 | Serge | 1317 | LEAVE(); |
1404 | serge | 1318 | |
4075 | Serge | 1319 | return 0; |
1404 | serge | 1320 | |
1321 | out_no_drp: |
||
1322 | kfree(glob); |
||
1323 | return ret; |
||
1324 | } |
||
4112 | Serge | 1325 | EXPORT_SYMBOL(ttm_bo_global_init); |
1404 | serge | 1326 | |
1327 | |||
4075 | Serge | 1328 | int ttm_bo_device_init(struct ttm_bo_device *bdev, |
1329 | struct ttm_bo_global *glob, |
||
1330 | struct ttm_bo_driver *driver, |
||
1331 | uint64_t file_page_offset, |
||
1332 | bool need_dma32) |
||
1333 | { |
||
1334 | int ret = -EINVAL; |
||
1335 | |||
1336 | ENTER(); |
||
1337 | |||
1338 | bdev->driver = driver; |
||
1339 | |||
1340 | memset(bdev->man, 0, sizeof(bdev->man)); |
||
1341 | |||
1342 | /* |
||
1343 | * Initialize the system memory buffer type. |
||
1344 | * Other types need to be driver / IOCTL initialized. |
||
1345 | */ |
||
1346 | ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0); |
||
1347 | if (unlikely(ret != 0)) |
||
1348 | goto out_no_sys; |
||
1349 | |||
4112 | Serge | 1350 | drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset, |
1351 | 0x10000000); |
||
4075 | Serge | 1352 | // INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); |
1353 | INIT_LIST_HEAD(&bdev->ddestroy); |
||
1354 | bdev->dev_mapping = NULL; |
||
1355 | bdev->glob = glob; |
||
1356 | bdev->need_dma32 = need_dma32; |
||
1357 | bdev->val_seq = 0; |
||
1358 | spin_lock_init(&bdev->fence_lock); |
||
1359 | mutex_lock(&glob->device_list_mutex); |
||
1360 | list_add_tail(&bdev->device_list, &glob->device_list); |
||
1361 | mutex_unlock(&glob->device_list_mutex); |
||
1362 | |||
1363 | LEAVE(); |
||
1364 | |||
1365 | return 0; |
||
1366 | out_no_sys: |
||
1367 | return ret; |
||
1368 | } |
||
1369 | EXPORT_SYMBOL(ttm_bo_device_init); |
||
1370 | |||
1371 | /* |
||
1372 | * buffer object vm functions. |
||
1373 | */ |
||
1374 | |||
1375 | bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
||
1376 | { |
||
1377 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
||
1378 | |||
1379 | if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) { |
||
1380 | if (mem->mem_type == TTM_PL_SYSTEM) |
||
1381 | return false; |
||
1382 | |||
1383 | if (man->flags & TTM_MEMTYPE_FLAG_CMA) |
||
1384 | return false; |
||
1385 | |||
1386 | if (mem->placement & TTM_PL_FLAG_CACHED) |
||
1387 | return false; |
||
1388 | } |
||
1389 | return true; |
||
1390 | } |
||
1391 | |||
4569 | Serge | 1392 | int ttm_bo_wait(struct ttm_buffer_object *bo, |
1393 | bool lazy, bool interruptible, bool no_wait) |
||
1394 | { |
||
1395 | struct ttm_bo_driver *driver = bo->bdev->driver; |
||
1396 | struct ttm_bo_device *bdev = bo->bdev; |
||
1397 | void *sync_obj; |
||
1398 | int ret = 0; |
||
1399 | |||
1400 | if (likely(bo->sync_obj == NULL)) |
||
1401 | return 0; |
||
1402 | |||
1403 | return 0; |
||
1404 | } |
||
1405 | EXPORT_SYMBOL(ttm_bo_wait); |
||
1406 | |||
1407 | int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) |
||
1408 | { |
||
1409 | struct ttm_bo_device *bdev = bo->bdev; |
||
1410 | int ret = 0; |
||
1411 | |||
1412 | /* |
||
1413 | * Using ttm_bo_reserve makes sure the lru lists are updated. |
||
1414 | */ |
||
1415 | |||
1416 | return ret; |
||
1417 | } |
||
1418 | EXPORT_SYMBOL(ttm_bo_synccpu_write_grab); |
||
1419 | |||
1420 | void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) |
||
1421 | { |
||
1422 | atomic_dec(&bo->cpu_writers); |
||
1423 | } |
||
1424 | EXPORT_SYMBOL(ttm_bo_synccpu_write_release);><>>>>>><>>>>>>><>><>><>><>> |