Rev 6296 | Rev 6938 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1404 | serge | 1 | /************************************************************************** |
2 | * |
||
3 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA |
||
4 | * All Rights Reserved. |
||
5 | * |
||
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
7 | * copy of this software and associated documentation files (the |
||
8 | * "Software"), to deal in the Software without restriction, including |
||
9 | * without limitation the rights to use, copy, modify, merge, publish, |
||
10 | * distribute, sub license, and/or sell copies of the Software, and to |
||
11 | * permit persons to whom the Software is furnished to do so, subject to |
||
12 | * the following conditions: |
||
13 | * |
||
14 | * The above copyright notice and this permission notice (including the |
||
15 | * next paragraph) shall be included in all copies or substantial portions |
||
16 | * of the Software. |
||
17 | * |
||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
||
25 | * |
||
26 | **************************************************************************/ |
||
27 | /* |
||
28 | * Authors: Thomas Hellstrom |
||
29 | */ |
||
4075 | Serge | 30 | |
31 | #define pr_fmt(fmt) "[TTM] " fmt |
||
32 | |||
33 | #include |
||
34 | #include |
||
35 | #include |
||
36 | #include |
||
37 | #include |
||
38 | #include |
||
39 | #include |
||
5271 | serge | 40 | #include |
4075 | Serge | 41 | #include |
5271 | serge | 42 | #include |
43 | #include |
||
4075 | Serge | 44 | |
4112 | Serge | 45 | #define TTM_ASSERT_LOCKED(param) |
46 | #define TTM_DEBUG(fmt, arg...) |
||
47 | #define TTM_BO_HASH_ORDER 13 |
||
48 | |||
4075 | Serge | 49 | |
5078 | serge | 50 | |
5271 | serge | 51 | static inline int ttm_mem_type_from_place(const struct ttm_place *place, |
52 | uint32_t *mem_type) |
||
4075 | Serge | 53 | { |
5078 | serge | 54 | int i; |
4075 | Serge | 55 | |
5078 | serge | 56 | for (i = 0; i <= TTM_PL_PRIV5; i++) |
5271 | serge | 57 | if (place->flags & (1 << i)) { |
5078 | serge | 58 | *mem_type = i; |
5271 | serge | 59 | return 0; |
5078 | serge | 60 | } |
61 | return -EINVAL; |
||
4075 | Serge | 62 | } |
63 | |||
64 | |||
65 | |||
66 | |||
67 | |||
68 | |||
69 | |||
70 | |||
71 | |||
72 | static inline uint32_t ttm_bo_type_flags(unsigned type) |
||
73 | { |
||
74 | return 1 << (type); |
||
75 | } |
||
76 | |||
77 | static void ttm_bo_release_list(struct kref *list_kref) |
||
78 | { |
||
79 | struct ttm_buffer_object *bo = |
||
80 | container_of(list_kref, struct ttm_buffer_object, list_kref); |
||
81 | struct ttm_bo_device *bdev = bo->bdev; |
||
82 | size_t acc_size = bo->acc_size; |
||
83 | |||
84 | BUG_ON(atomic_read(&bo->list_kref.refcount)); |
||
85 | BUG_ON(atomic_read(&bo->kref.refcount)); |
||
86 | BUG_ON(atomic_read(&bo->cpu_writers)); |
||
87 | BUG_ON(bo->mem.mm_node != NULL); |
||
88 | BUG_ON(!list_empty(&bo->lru)); |
||
89 | BUG_ON(!list_empty(&bo->ddestroy)); |
||
90 | |||
91 | if (bo->ttm) |
||
92 | ttm_tt_destroy(bo->ttm); |
||
93 | atomic_dec(&bo->glob->bo_count); |
||
5078 | serge | 94 | if (bo->resv == &bo->ttm_resv) |
95 | reservation_object_fini(&bo->ttm_resv); |
||
96 | mutex_destroy(&bo->wu_mutex); |
||
4075 | Serge | 97 | if (bo->destroy) |
98 | bo->destroy(bo); |
||
99 | else { |
||
100 | kfree(bo); |
||
101 | } |
||
102 | } |
||
103 | |||
104 | void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) |
||
105 | { |
||
106 | struct ttm_bo_device *bdev = bo->bdev; |
||
107 | struct ttm_mem_type_manager *man; |
||
108 | |||
5078 | serge | 109 | lockdep_assert_held(&bo->resv->lock.base); |
4075 | Serge | 110 | |
111 | if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { |
||
112 | |||
113 | BUG_ON(!list_empty(&bo->lru)); |
||
114 | |||
115 | man = &bdev->man[bo->mem.mem_type]; |
||
116 | list_add_tail(&bo->lru, &man->lru); |
||
117 | kref_get(&bo->list_kref); |
||
118 | |||
119 | if (bo->ttm != NULL) { |
||
120 | list_add_tail(&bo->swap, &bo->glob->swap_lru); |
||
121 | kref_get(&bo->list_kref); |
||
122 | } |
||
123 | } |
||
124 | } |
||
125 | EXPORT_SYMBOL(ttm_bo_add_to_lru); |
||
126 | |||
127 | int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) |
||
128 | { |
||
129 | int put_count = 0; |
||
130 | |||
131 | if (!list_empty(&bo->swap)) { |
||
132 | list_del_init(&bo->swap); |
||
133 | ++put_count; |
||
134 | } |
||
135 | if (!list_empty(&bo->lru)) { |
||
136 | list_del_init(&bo->lru); |
||
137 | ++put_count; |
||
138 | } |
||
139 | |||
140 | /* |
||
141 | * TODO: Add a driver hook to delete from |
||
142 | * driver-specific LRU's here. |
||
143 | */ |
||
144 | |||
145 | return put_count; |
||
146 | } |
||
147 | |||
148 | static void ttm_bo_ref_bug(struct kref *list_kref) |
||
149 | { |
||
150 | BUG(); |
||
151 | } |
||
152 | |||
153 | void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, |
||
154 | bool never_free) |
||
155 | { |
||
6296 | serge | 156 | kref_sub(&bo->list_kref, count, |
157 | (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list); |
||
4075 | Serge | 158 | } |
159 | |||
160 | void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo) |
||
161 | { |
||
162 | int put_count; |
||
163 | |||
164 | spin_lock(&bo->glob->lru_lock); |
||
165 | put_count = ttm_bo_del_from_lru(bo); |
||
166 | spin_unlock(&bo->glob->lru_lock); |
||
167 | ttm_bo_list_ref_sub(bo, put_count, true); |
||
168 | } |
||
169 | EXPORT_SYMBOL(ttm_bo_del_sub_from_lru); |
||
170 | |||
171 | /* |
||
172 | * Call bo->mutex locked. |
||
173 | */ |
||
174 | static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) |
||
175 | { |
||
176 | struct ttm_bo_device *bdev = bo->bdev; |
||
177 | struct ttm_bo_global *glob = bo->glob; |
||
178 | int ret = 0; |
||
179 | uint32_t page_flags = 0; |
||
180 | |||
4112 | Serge | 181 | TTM_ASSERT_LOCKED(&bo->mutex); |
4075 | Serge | 182 | bo->ttm = NULL; |
183 | |||
184 | if (bdev->need_dma32) |
||
185 | page_flags |= TTM_PAGE_FLAG_DMA32; |
||
186 | |||
187 | switch (bo->type) { |
||
188 | case ttm_bo_type_device: |
||
189 | if (zero_alloc) |
||
190 | page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; |
||
191 | case ttm_bo_type_kernel: |
||
192 | bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, |
||
193 | page_flags, glob->dummy_read_page); |
||
194 | if (unlikely(bo->ttm == NULL)) |
||
195 | ret = -ENOMEM; |
||
196 | break; |
||
197 | case ttm_bo_type_sg: |
||
198 | bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, |
||
199 | page_flags | TTM_PAGE_FLAG_SG, |
||
200 | glob->dummy_read_page); |
||
201 | if (unlikely(bo->ttm == NULL)) { |
||
202 | ret = -ENOMEM; |
||
203 | break; |
||
204 | } |
||
205 | bo->ttm->sg = bo->sg; |
||
206 | break; |
||
207 | default: |
||
208 | pr_err("Illegal buffer object type\n"); |
||
209 | ret = -EINVAL; |
||
210 | break; |
||
211 | } |
||
212 | |||
213 | return ret; |
||
214 | } |
||
215 | |||
216 | static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, |
||
217 | struct ttm_mem_reg *mem, |
||
218 | bool evict, bool interruptible, |
||
219 | bool no_wait_gpu) |
||
220 | { |
||
221 | struct ttm_bo_device *bdev = bo->bdev; |
||
222 | bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); |
||
223 | bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem); |
||
224 | struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; |
||
225 | struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; |
||
226 | int ret = 0; |
||
227 | |||
228 | if (old_is_pci || new_is_pci || |
||
229 | ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { |
||
230 | ret = ttm_mem_io_lock(old_man, true); |
||
231 | if (unlikely(ret != 0)) |
||
232 | goto out_err; |
||
233 | ttm_bo_unmap_virtual_locked(bo); |
||
234 | ttm_mem_io_unlock(old_man); |
||
235 | } |
||
236 | |||
237 | /* |
||
238 | * Create and bind a ttm if required. |
||
239 | */ |
||
240 | |||
241 | if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { |
||
242 | if (bo->ttm == NULL) { |
||
243 | bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); |
||
244 | ret = ttm_bo_add_ttm(bo, zero); |
||
245 | if (ret) |
||
246 | goto out_err; |
||
247 | } |
||
248 | |||
249 | ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); |
||
250 | if (ret) |
||
251 | goto out_err; |
||
252 | |||
253 | if (mem->mem_type != TTM_PL_SYSTEM) { |
||
254 | ret = ttm_tt_bind(bo->ttm, mem); |
||
255 | if (ret) |
||
256 | goto out_err; |
||
257 | } |
||
258 | |||
259 | if (bo->mem.mem_type == TTM_PL_SYSTEM) { |
||
260 | if (bdev->driver->move_notify) |
||
261 | bdev->driver->move_notify(bo, mem); |
||
262 | bo->mem = *mem; |
||
263 | mem->mm_node = NULL; |
||
264 | goto moved; |
||
265 | } |
||
266 | } |
||
267 | |||
268 | if (bdev->driver->move_notify) |
||
269 | bdev->driver->move_notify(bo, mem); |
||
270 | |||
271 | if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && |
||
272 | !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) |
||
273 | ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem); |
||
274 | else if (bdev->driver->move) |
||
275 | ret = bdev->driver->move(bo, evict, interruptible, |
||
276 | no_wait_gpu, mem); |
||
277 | else |
||
278 | ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem); |
||
279 | |||
280 | if (ret) { |
||
281 | if (bdev->driver->move_notify) { |
||
282 | struct ttm_mem_reg tmp_mem = *mem; |
||
283 | *mem = bo->mem; |
||
284 | bo->mem = tmp_mem; |
||
285 | bdev->driver->move_notify(bo, mem); |
||
286 | bo->mem = *mem; |
||
287 | *mem = tmp_mem; |
||
288 | } |
||
289 | |||
290 | goto out_err; |
||
291 | } |
||
292 | |||
293 | moved: |
||
294 | if (bo->evicted) { |
||
5078 | serge | 295 | if (bdev->driver->invalidate_caches) { |
5271 | serge | 296 | ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); |
297 | if (ret) |
||
298 | pr_err("Can not flush read caches\n"); |
||
5078 | serge | 299 | } |
4075 | Serge | 300 | bo->evicted = false; |
301 | } |
||
302 | |||
303 | if (bo->mem.mm_node) { |
||
304 | bo->offset = (bo->mem.start << PAGE_SHIFT) + |
||
305 | bdev->man[bo->mem.mem_type].gpu_offset; |
||
306 | bo->cur_placement = bo->mem.placement; |
||
307 | } else |
||
308 | bo->offset = 0; |
||
309 | |||
310 | return 0; |
||
311 | |||
312 | out_err: |
||
313 | new_man = &bdev->man[bo->mem.mem_type]; |
||
314 | if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) { |
||
315 | ttm_tt_unbind(bo->ttm); |
||
316 | ttm_tt_destroy(bo->ttm); |
||
317 | bo->ttm = NULL; |
||
318 | } |
||
319 | |||
320 | return ret; |
||
321 | } |
||
322 | |||
323 | /** |
||
324 | * Call bo::reserved. |
||
325 | * Will release GPU memory type usage on destruction. |
||
326 | * This is the place to put in driver specific hooks to release |
||
327 | * driver private resources. |
||
328 | * Will release the bo::reserved lock. |
||
329 | */ |
||
330 | |||
331 | static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) |
||
332 | { |
||
333 | if (bo->bdev->driver->move_notify) |
||
334 | bo->bdev->driver->move_notify(bo, NULL); |
||
335 | |||
336 | if (bo->ttm) { |
||
337 | ttm_tt_unbind(bo->ttm); |
||
338 | ttm_tt_destroy(bo->ttm); |
||
339 | bo->ttm = NULL; |
||
340 | } |
||
341 | ttm_bo_mem_put(bo, &bo->mem); |
||
342 | |||
343 | ww_mutex_unlock (&bo->resv->lock); |
||
344 | } |
||
345 | |||
5271 | serge | 346 | static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) |
347 | { |
||
348 | struct reservation_object_list *fobj; |
||
349 | struct fence *fence; |
||
350 | int i; |
||
351 | |||
352 | fobj = reservation_object_get_list(bo->resv); |
||
353 | fence = reservation_object_get_excl(bo->resv); |
||
354 | if (fence && !fence->ops->signaled) |
||
355 | fence_enable_sw_signaling(fence); |
||
356 | |||
357 | for (i = 0; fobj && i < fobj->shared_count; ++i) { |
||
358 | fence = rcu_dereference_protected(fobj->shared[i], |
||
359 | reservation_object_held(bo->resv)); |
||
360 | |||
361 | if (!fence->ops->signaled) |
||
362 | fence_enable_sw_signaling(fence); |
||
363 | } |
||
364 | } |
||
365 | |||
4075 | Serge | 366 | static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) |
367 | { |
||
368 | struct ttm_bo_device *bdev = bo->bdev; |
||
369 | struct ttm_bo_global *glob = bo->glob; |
||
370 | int put_count; |
||
371 | int ret; |
||
372 | |||
373 | spin_lock(&glob->lru_lock); |
||
5078 | serge | 374 | ret = __ttm_bo_reserve(bo, false, true, false, NULL); |
4075 | Serge | 375 | |
5271 | serge | 376 | if (!ret) { |
377 | if (!ttm_bo_wait(bo, false, false, true)) { |
||
6104 | serge | 378 | put_count = ttm_bo_del_from_lru(bo); |
4075 | Serge | 379 | |
6104 | serge | 380 | spin_unlock(&glob->lru_lock); |
381 | ttm_bo_cleanup_memtype_use(bo); |
||
4075 | Serge | 382 | |
6104 | serge | 383 | ttm_bo_list_ref_sub(bo, put_count, true); |
4075 | Serge | 384 | |
6104 | serge | 385 | return; |
5271 | serge | 386 | } else |
387 | ttm_bo_flush_all_fences(bo); |
||
4075 | Serge | 388 | |
4569 | Serge | 389 | /* |
390 | * Make NO_EVICT bos immediately available to |
||
391 | * shrinkers, now that they are queued for |
||
392 | * destruction. |
||
393 | */ |
||
394 | if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { |
||
395 | bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT; |
||
396 | ttm_bo_add_to_lru(bo); |
||
397 | } |
||
398 | |||
5078 | serge | 399 | __ttm_bo_unreserve(bo); |
4569 | Serge | 400 | } |
4075 | Serge | 401 | |
402 | kref_get(&bo->list_kref); |
||
403 | list_add_tail(&bo->ddestroy, &bdev->ddestroy); |
||
404 | spin_unlock(&glob->lru_lock); |
||
405 | |||
5078 | serge | 406 | // schedule_delayed_work(&bdev->wq, |
407 | // ((HZ / 100) < 1) ? 1 : HZ / 100); |
||
4075 | Serge | 408 | } |
409 | |||
410 | /** |
||
411 | * function ttm_bo_cleanup_refs_and_unlock |
||
412 | * If bo idle, remove from delayed- and lru lists, and unref. |
||
413 | * If not idle, do nothing. |
||
1404 | serge | 414 | * |
4075 | Serge | 415 | * Must be called with lru_lock and reservation held, this function |
416 | * will drop both before returning. |
||
417 | * |
||
418 | * @interruptible Any sleeps should occur interruptibly. |
||
419 | * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. |
||
1404 | serge | 420 | */ |
421 | |||
4075 | Serge | 422 | static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, |
423 | bool interruptible, |
||
424 | bool no_wait_gpu) |
||
425 | { |
||
426 | struct ttm_bo_global *glob = bo->glob; |
||
427 | int put_count; |
||
428 | int ret; |
||
1404 | serge | 429 | |
4075 | Serge | 430 | ret = ttm_bo_wait(bo, false, false, true); |
1404 | serge | 431 | |
4075 | Serge | 432 | if (ret && !no_wait_gpu) { |
5271 | serge | 433 | long lret; |
434 | ww_mutex_unlock(&bo->resv->lock); |
||
4075 | Serge | 435 | spin_unlock(&glob->lru_lock); |
436 | |||
5271 | serge | 437 | lret = reservation_object_wait_timeout_rcu(bo->resv, |
438 | true, |
||
439 | interruptible, |
||
440 | 30 * HZ); |
||
4075 | Serge | 441 | |
5271 | serge | 442 | if (lret < 0) |
443 | return lret; |
||
444 | else if (lret == 0) |
||
445 | return -EBUSY; |
||
4075 | Serge | 446 | |
447 | spin_lock(&glob->lru_lock); |
||
5078 | serge | 448 | ret = __ttm_bo_reserve(bo, false, true, false, NULL); |
4075 | Serge | 449 | |
450 | /* |
||
451 | * We raced, and lost, someone else holds the reservation now, |
||
452 | * and is probably busy in ttm_bo_cleanup_memtype_use. |
||
453 | * |
||
454 | * Even if it's not the case, because we finished waiting any |
||
455 | * delayed destruction would succeed, so just return success |
||
456 | * here. |
||
457 | */ |
||
458 | if (ret) { |
||
459 | spin_unlock(&glob->lru_lock); |
||
460 | return 0; |
||
461 | } |
||
462 | |||
5271 | serge | 463 | /* |
464 | * remove sync_obj with ttm_bo_wait, the wait should be |
||
465 | * finished, and no new wait object should have been added. |
||
466 | */ |
||
467 | ret = ttm_bo_wait(bo, false, false, true); |
||
468 | WARN_ON(ret); |
||
469 | } |
||
470 | |||
4075 | Serge | 471 | if (ret || unlikely(list_empty(&bo->ddestroy))) { |
5078 | serge | 472 | __ttm_bo_unreserve(bo); |
4075 | Serge | 473 | spin_unlock(&glob->lru_lock); |
474 | return ret; |
||
475 | } |
||
476 | |||
477 | put_count = ttm_bo_del_from_lru(bo); |
||
478 | list_del_init(&bo->ddestroy); |
||
479 | ++put_count; |
||
480 | |||
481 | spin_unlock(&glob->lru_lock); |
||
482 | ttm_bo_cleanup_memtype_use(bo); |
||
483 | |||
484 | ttm_bo_list_ref_sub(bo, put_count, true); |
||
485 | |||
486 | return 0; |
||
487 | } |
||
488 | |||
489 | /** |
||
490 | * Traverse the delayed list, and call ttm_bo_cleanup_refs on all |
||
491 | * encountered buffers. |
||
492 | */ |
||
493 | |||
494 | static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) |
||
495 | { |
||
496 | struct ttm_bo_global *glob = bdev->glob; |
||
497 | struct ttm_buffer_object *entry = NULL; |
||
498 | int ret = 0; |
||
499 | |||
500 | spin_lock(&glob->lru_lock); |
||
501 | if (list_empty(&bdev->ddestroy)) |
||
502 | goto out_unlock; |
||
503 | |||
504 | entry = list_first_entry(&bdev->ddestroy, |
||
505 | struct ttm_buffer_object, ddestroy); |
||
506 | kref_get(&entry->list_kref); |
||
507 | |||
508 | for (;;) { |
||
509 | struct ttm_buffer_object *nentry = NULL; |
||
510 | |||
511 | if (entry->ddestroy.next != &bdev->ddestroy) { |
||
512 | nentry = list_first_entry(&entry->ddestroy, |
||
513 | struct ttm_buffer_object, ddestroy); |
||
514 | kref_get(&nentry->list_kref); |
||
515 | } |
||
516 | |||
5078 | serge | 517 | ret = __ttm_bo_reserve(entry, false, true, false, NULL); |
4075 | Serge | 518 | if (remove_all && ret) { |
519 | spin_unlock(&glob->lru_lock); |
||
5078 | serge | 520 | ret = __ttm_bo_reserve(entry, false, false, |
521 | false, NULL); |
||
4075 | Serge | 522 | spin_lock(&glob->lru_lock); |
523 | } |
||
524 | |||
525 | if (!ret) |
||
526 | ret = ttm_bo_cleanup_refs_and_unlock(entry, false, |
||
527 | !remove_all); |
||
528 | else |
||
529 | spin_unlock(&glob->lru_lock); |
||
530 | |||
531 | kref_put(&entry->list_kref, ttm_bo_release_list); |
||
532 | entry = nentry; |
||
533 | |||
534 | if (ret || !entry) |
||
535 | goto out; |
||
536 | |||
537 | spin_lock(&glob->lru_lock); |
||
538 | if (list_empty(&entry->ddestroy)) |
||
539 | break; |
||
540 | } |
||
541 | |||
542 | out_unlock: |
||
543 | spin_unlock(&glob->lru_lock); |
||
544 | out: |
||
545 | if (entry) |
||
546 | kref_put(&entry->list_kref, ttm_bo_release_list); |
||
547 | return ret; |
||
548 | } |
||
549 | |||
550 | static void ttm_bo_delayed_workqueue(struct work_struct *work) |
||
551 | { |
||
552 | struct ttm_bo_device *bdev = |
||
553 | container_of(work, struct ttm_bo_device, wq.work); |
||
554 | |||
555 | if (ttm_bo_delayed_delete(bdev, false)) { |
||
556 | schedule_delayed_work(&bdev->wq, |
||
557 | ((HZ / 100) < 1) ? 1 : HZ / 100); |
||
558 | } |
||
559 | } |
||
560 | |||
561 | static void ttm_bo_release(struct kref *kref) |
||
562 | { |
||
563 | struct ttm_buffer_object *bo = |
||
564 | container_of(kref, struct ttm_buffer_object, kref); |
||
565 | struct ttm_bo_device *bdev = bo->bdev; |
||
566 | struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; |
||
567 | |||
4112 | Serge | 568 | drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node); |
4075 | Serge | 569 | ttm_mem_io_lock(man, false); |
5078 | serge | 570 | ttm_mem_io_free_vm(bo); |
4075 | Serge | 571 | ttm_mem_io_unlock(man); |
5078 | serge | 572 | ttm_bo_cleanup_refs_or_queue(bo); |
573 | kref_put(&bo->list_kref, ttm_bo_release_list); |
||
4075 | Serge | 574 | } |
575 | |||
576 | void ttm_bo_unref(struct ttm_buffer_object **p_bo) |
||
577 | { |
||
578 | struct ttm_buffer_object *bo = *p_bo; |
||
579 | |||
580 | *p_bo = NULL; |
||
581 | kref_put(&bo->kref, ttm_bo_release); |
||
582 | } |
||
583 | EXPORT_SYMBOL(ttm_bo_unref); |
||
584 | |||
585 | void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) |
||
586 | { |
||
587 | struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; |
||
588 | |||
589 | if (mem->mm_node) |
||
590 | (*man->func->put_node)(man, mem); |
||
591 | } |
||
592 | EXPORT_SYMBOL(ttm_bo_mem_put); |
||
593 | |||
594 | /** |
||
595 | * Repeatedly evict memory from the LRU for @mem_type until we create enough |
||
596 | * space, or we've evicted everything and there isn't enough space. |
||
597 | */ |
||
598 | static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, |
||
599 | uint32_t mem_type, |
||
5271 | serge | 600 | const struct ttm_place *place, |
4075 | Serge | 601 | struct ttm_mem_reg *mem, |
602 | bool interruptible, |
||
603 | bool no_wait_gpu) |
||
604 | { |
||
605 | struct ttm_bo_device *bdev = bo->bdev; |
||
606 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
||
607 | int ret; |
||
608 | |||
609 | do { |
||
5271 | serge | 610 | ret = (*man->func->get_node)(man, bo, place, mem); |
4075 | Serge | 611 | if (unlikely(ret != 0)) |
612 | return ret; |
||
613 | if (mem->mm_node) |
||
614 | break; |
||
5078 | serge | 615 | // ret = ttm_mem_evict_first(bdev, mem_type, |
616 | // interruptible, no_wait_gpu); |
||
617 | // if (unlikely(ret != 0)) |
||
618 | // return ret; |
||
4075 | Serge | 619 | } while (1); |
620 | if (mem->mm_node == NULL) |
||
621 | return -ENOMEM; |
||
622 | mem->mem_type = mem_type; |
||
623 | return 0; |
||
624 | } |
||
625 | |||
626 | static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, |
||
627 | uint32_t cur_placement, |
||
628 | uint32_t proposed_placement) |
||
629 | { |
||
630 | uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING; |
||
631 | uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING; |
||
632 | |||
633 | /** |
||
634 | * Keep current caching if possible. |
||
635 | */ |
||
636 | |||
637 | if ((cur_placement & caching) != 0) |
||
638 | result |= (cur_placement & caching); |
||
639 | else if ((man->default_caching & caching) != 0) |
||
640 | result |= man->default_caching; |
||
641 | else if ((TTM_PL_FLAG_CACHED & caching) != 0) |
||
642 | result |= TTM_PL_FLAG_CACHED; |
||
643 | else if ((TTM_PL_FLAG_WC & caching) != 0) |
||
644 | result |= TTM_PL_FLAG_WC; |
||
645 | else if ((TTM_PL_FLAG_UNCACHED & caching) != 0) |
||
646 | result |= TTM_PL_FLAG_UNCACHED; |
||
647 | |||
648 | return result; |
||
649 | } |
||
650 | |||
651 | static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, |
||
652 | uint32_t mem_type, |
||
5271 | serge | 653 | const struct ttm_place *place, |
4075 | Serge | 654 | uint32_t *masked_placement) |
655 | { |
||
656 | uint32_t cur_flags = ttm_bo_type_flags(mem_type); |
||
657 | |||
5271 | serge | 658 | if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0) |
4075 | Serge | 659 | return false; |
660 | |||
5271 | serge | 661 | if ((place->flags & man->available_caching) == 0) |
4075 | Serge | 662 | return false; |
663 | |||
5271 | serge | 664 | cur_flags |= (place->flags & man->available_caching); |
4075 | Serge | 665 | |
666 | *masked_placement = cur_flags; |
||
667 | return true; |
||
668 | } |
||
669 | |||
670 | /** |
||
671 | * Creates space for memory region @mem according to its type. |
||
672 | * |
||
673 | * This function first searches for free space in compatible memory types in |
||
674 | * the priority order defined by the driver. If free space isn't found, then |
||
675 | * ttm_bo_mem_force_space is attempted in priority order to evict and find |
||
676 | * space. |
||
677 | */ |
||
678 | int ttm_bo_mem_space(struct ttm_buffer_object *bo, |
||
679 | struct ttm_placement *placement, |
||
680 | struct ttm_mem_reg *mem, |
||
681 | bool interruptible, |
||
682 | bool no_wait_gpu) |
||
683 | { |
||
684 | struct ttm_bo_device *bdev = bo->bdev; |
||
685 | struct ttm_mem_type_manager *man; |
||
686 | uint32_t mem_type = TTM_PL_SYSTEM; |
||
687 | uint32_t cur_flags = 0; |
||
688 | bool type_found = false; |
||
689 | bool type_ok = false; |
||
690 | bool has_erestartsys = false; |
||
691 | int i, ret; |
||
692 | |||
693 | mem->mm_node = NULL; |
||
694 | for (i = 0; i < placement->num_placement; ++i) { |
||
5271 | serge | 695 | const struct ttm_place *place = &placement->placement[i]; |
696 | |||
697 | ret = ttm_mem_type_from_place(place, &mem_type); |
||
4075 | Serge | 698 | if (ret) |
699 | return ret; |
||
700 | man = &bdev->man[mem_type]; |
||
6296 | serge | 701 | if (!man->has_type || !man->use_type) |
702 | continue; |
||
4075 | Serge | 703 | |
5271 | serge | 704 | type_ok = ttm_bo_mt_compatible(man, mem_type, place, |
4075 | Serge | 705 | &cur_flags); |
706 | |||
707 | if (!type_ok) |
||
708 | continue; |
||
709 | |||
6296 | serge | 710 | type_found = true; |
4075 | Serge | 711 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, |
712 | cur_flags); |
||
713 | /* |
||
714 | * Use the access and other non-mapping-related flag bits from |
||
715 | * the memory placement flags to the current flags |
||
716 | */ |
||
5271 | serge | 717 | ttm_flag_masked(&cur_flags, place->flags, |
4075 | Serge | 718 | ~TTM_PL_MASK_MEMTYPE); |
719 | |||
720 | if (mem_type == TTM_PL_SYSTEM) |
||
721 | break; |
||
722 | |||
5271 | serge | 723 | ret = (*man->func->get_node)(man, bo, place, mem); |
4075 | Serge | 724 | if (unlikely(ret)) |
725 | return ret; |
||
6296 | serge | 726 | |
4075 | Serge | 727 | if (mem->mm_node) |
728 | break; |
||
729 | } |
||
730 | |||
731 | if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) { |
||
732 | mem->mem_type = mem_type; |
||
733 | mem->placement = cur_flags; |
||
734 | return 0; |
||
735 | } |
||
736 | |||
737 | for (i = 0; i < placement->num_busy_placement; ++i) { |
||
5271 | serge | 738 | const struct ttm_place *place = &placement->busy_placement[i]; |
739 | |||
740 | ret = ttm_mem_type_from_place(place, &mem_type); |
||
4075 | Serge | 741 | if (ret) |
742 | return ret; |
||
743 | man = &bdev->man[mem_type]; |
||
6296 | serge | 744 | if (!man->has_type || !man->use_type) |
4075 | Serge | 745 | continue; |
5271 | serge | 746 | if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) |
4075 | Serge | 747 | continue; |
748 | |||
6296 | serge | 749 | type_found = true; |
4075 | Serge | 750 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, |
751 | cur_flags); |
||
752 | /* |
||
753 | * Use the access and other non-mapping-related flag bits from |
||
754 | * the memory placement flags to the current flags |
||
755 | */ |
||
5271 | serge | 756 | ttm_flag_masked(&cur_flags, place->flags, |
4075 | Serge | 757 | ~TTM_PL_MASK_MEMTYPE); |
758 | |||
759 | if (mem_type == TTM_PL_SYSTEM) { |
||
760 | mem->mem_type = mem_type; |
||
761 | mem->placement = cur_flags; |
||
762 | mem->mm_node = NULL; |
||
763 | return 0; |
||
764 | } |
||
765 | |||
5271 | serge | 766 | ret = ttm_bo_mem_force_space(bo, mem_type, place, mem, |
4075 | Serge | 767 | interruptible, no_wait_gpu); |
768 | if (ret == 0 && mem->mm_node) { |
||
769 | mem->placement = cur_flags; |
||
770 | return 0; |
||
771 | } |
||
772 | if (ret == -ERESTARTSYS) |
||
773 | has_erestartsys = true; |
||
774 | } |
||
6296 | serge | 775 | |
776 | if (!type_found) { |
||
777 | printk(KERN_ERR TTM_PFX "No compatible memory type found.\n"); |
||
778 | return -EINVAL; |
||
779 | } |
||
780 | |||
781 | return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; |
||
4075 | Serge | 782 | } |
783 | EXPORT_SYMBOL(ttm_bo_mem_space); |
||
784 | |||
4569 | Serge | 785 | static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, |
4075 | Serge | 786 | struct ttm_placement *placement, |
787 | bool interruptible, |
||
788 | bool no_wait_gpu) |
||
789 | { |
||
790 | int ret = 0; |
||
791 | struct ttm_mem_reg mem; |
||
792 | |||
5078 | serge | 793 | lockdep_assert_held(&bo->resv->lock.base); |
4075 | Serge | 794 | |
795 | /* |
||
796 | * FIXME: It's possible to pipeline buffer moves. |
||
797 | * Have the driver move function wait for idle when necessary, |
||
798 | * instead of doing it here. |
||
799 | */ |
||
800 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); |
||
801 | if (ret) |
||
802 | return ret; |
||
803 | mem.num_pages = bo->num_pages; |
||
804 | mem.size = mem.num_pages << PAGE_SHIFT; |
||
805 | mem.page_alignment = bo->mem.page_alignment; |
||
806 | mem.bus.io_reserved_vm = false; |
||
807 | mem.bus.io_reserved_count = 0; |
||
808 | /* |
||
809 | * Determine where to move the buffer. |
||
810 | */ |
||
811 | ret = ttm_bo_mem_space(bo, placement, &mem, |
||
812 | interruptible, no_wait_gpu); |
||
813 | if (ret) |
||
814 | goto out_unlock; |
||
815 | ret = ttm_bo_handle_move_mem(bo, &mem, false, |
||
816 | interruptible, no_wait_gpu); |
||
817 | out_unlock: |
||
818 | if (ret && mem.mm_node) |
||
819 | ttm_bo_mem_put(bo, &mem); |
||
820 | return ret; |
||
821 | } |
||
822 | |||
6661 | serge | 823 | bool ttm_bo_mem_compat(struct ttm_placement *placement, |
4569 | Serge | 824 | struct ttm_mem_reg *mem, |
825 | uint32_t *new_flags) |
||
4075 | Serge | 826 | { |
827 | int i; |
||
828 | |||
5271 | serge | 829 | for (i = 0; i < placement->num_placement; i++) { |
830 | const struct ttm_place *heap = &placement->placement[i]; |
||
831 | if (mem->mm_node && |
||
832 | (mem->start < heap->fpfn || |
||
833 | (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) |
||
834 | continue; |
||
4075 | Serge | 835 | |
5271 | serge | 836 | *new_flags = heap->flags; |
4569 | Serge | 837 | if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && |
838 | (*new_flags & mem->placement & TTM_PL_MASK_MEM)) |
||
839 | return true; |
||
4075 | Serge | 840 | } |
4569 | Serge | 841 | |
842 | for (i = 0; i < placement->num_busy_placement; i++) { |
||
5271 | serge | 843 | const struct ttm_place *heap = &placement->busy_placement[i]; |
844 | if (mem->mm_node && |
||
845 | (mem->start < heap->fpfn || |
||
846 | (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) |
||
847 | continue; |
||
848 | |||
849 | *new_flags = heap->flags; |
||
4569 | Serge | 850 | if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && |
851 | (*new_flags & mem->placement & TTM_PL_MASK_MEM)) |
||
852 | return true; |
||
853 | } |
||
854 | |||
855 | return false; |
||
4075 | Serge | 856 | } |
6661 | serge | 857 | EXPORT_SYMBOL(ttm_bo_mem_compat); |
4075 | Serge | 858 | |
859 | int ttm_bo_validate(struct ttm_buffer_object *bo, |
||
860 | struct ttm_placement *placement, |
||
861 | bool interruptible, |
||
862 | bool no_wait_gpu) |
||
863 | { |
||
864 | int ret; |
||
4569 | Serge | 865 | uint32_t new_flags; |
4075 | Serge | 866 | |
5078 | serge | 867 | lockdep_assert_held(&bo->resv->lock.base); |
4075 | Serge | 868 | /* |
869 | * Check whether we need to move buffer. |
||
870 | */ |
||
4569 | Serge | 871 | if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) { |
5078 | serge | 872 | ret = ttm_bo_move_buffer(bo, placement, interruptible, |
873 | no_wait_gpu); |
||
4075 | Serge | 874 | if (ret) |
875 | return ret; |
||
876 | } else { |
||
877 | /* |
||
878 | * Use the access and other non-mapping-related flag bits from |
||
879 | * the compatible memory placement flags to the active flags |
||
880 | */ |
||
4569 | Serge | 881 | ttm_flag_masked(&bo->mem.placement, new_flags, |
4075 | Serge | 882 | ~TTM_PL_MASK_MEMTYPE); |
883 | } |
||
884 | /* |
||
885 | * We might need to add a TTM. |
||
886 | */ |
||
887 | if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { |
||
888 | ret = ttm_bo_add_ttm(bo, true); |
||
889 | if (ret) |
||
890 | return ret; |
||
891 | } |
||
892 | return 0; |
||
893 | } |
||
894 | EXPORT_SYMBOL(ttm_bo_validate); |
||
895 | |||
896 | int ttm_bo_init(struct ttm_bo_device *bdev, |
||
5271 | serge | 897 | struct ttm_buffer_object *bo, |
898 | unsigned long size, |
||
899 | enum ttm_bo_type type, |
||
900 | struct ttm_placement *placement, |
||
901 | uint32_t page_alignment, |
||
902 | bool interruptible, |
||
903 | struct file *persistent_swap_storage, |
||
904 | size_t acc_size, |
||
905 | struct sg_table *sg, |
||
906 | struct reservation_object *resv, |
||
907 | void (*destroy) (struct ttm_buffer_object *)) |
||
4075 | Serge | 908 | { |
5271 | serge | 909 | int ret = 0; |
910 | unsigned long num_pages; |
||
911 | bool locked; |
||
4075 | Serge | 912 | |
5271 | serge | 913 | num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
914 | if (num_pages == 0) { |
||
915 | pr_err("Illegal buffer object size\n"); |
||
916 | if (destroy) |
||
917 | (*destroy)(bo); |
||
918 | else |
||
919 | kfree(bo); |
||
920 | return -EINVAL; |
||
921 | } |
||
922 | bo->destroy = destroy; |
||
4075 | Serge | 923 | |
5271 | serge | 924 | kref_init(&bo->kref); |
925 | kref_init(&bo->list_kref); |
||
4075 | Serge | 926 | atomic_set(&bo->cpu_writers, 0); |
5271 | serge | 927 | INIT_LIST_HEAD(&bo->lru); |
928 | INIT_LIST_HEAD(&bo->ddestroy); |
||
929 | INIT_LIST_HEAD(&bo->swap); |
||
930 | INIT_LIST_HEAD(&bo->io_reserve_lru); |
||
931 | mutex_init(&bo->wu_mutex); |
||
932 | bo->bdev = bdev; |
||
933 | bo->glob = bdev->glob; |
||
934 | bo->type = type; |
||
935 | bo->num_pages = num_pages; |
||
936 | bo->mem.size = num_pages << PAGE_SHIFT; |
||
937 | bo->mem.mem_type = TTM_PL_SYSTEM; |
||
938 | bo->mem.num_pages = bo->num_pages; |
||
939 | bo->mem.mm_node = NULL; |
||
940 | bo->mem.page_alignment = page_alignment; |
||
941 | bo->mem.bus.io_reserved_vm = false; |
||
942 | bo->mem.bus.io_reserved_count = 0; |
||
943 | bo->priv_flags = 0; |
||
944 | bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); |
||
945 | bo->persistent_swap_storage = persistent_swap_storage; |
||
946 | bo->acc_size = acc_size; |
||
947 | bo->sg = sg; |
||
948 | if (resv) { |
||
949 | bo->resv = resv; |
||
950 | lockdep_assert_held(&bo->resv->lock.base); |
||
951 | } else { |
||
952 | bo->resv = &bo->ttm_resv; |
||
953 | reservation_object_init(&bo->ttm_resv); |
||
954 | } |
||
4075 | Serge | 955 | atomic_inc(&bo->glob->bo_count); |
5271 | serge | 956 | drm_vma_node_reset(&bo->vma_node); |
4075 | Serge | 957 | |
5271 | serge | 958 | /* |
959 | * For ttm_bo_type_device buffers, allocate |
||
960 | * address space from the device. |
||
961 | */ |
||
962 | if (bo->type == ttm_bo_type_device || |
||
963 | bo->type == ttm_bo_type_sg) |
||
5078 | serge | 964 | ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, |
965 | bo->mem.num_pages); |
||
4075 | Serge | 966 | |
5271 | serge | 967 | /* passed reservation objects should already be locked, |
968 | * since otherwise lockdep will be angered in radeon. |
||
969 | */ |
||
970 | if (!resv) { |
||
971 | locked = ww_mutex_trylock(&bo->resv->lock); |
||
972 | WARN_ON(!locked); |
||
973 | } |
||
4075 | Serge | 974 | |
5078 | serge | 975 | if (likely(!ret)) |
976 | ret = ttm_bo_validate(bo, placement, interruptible, false); |
||
4075 | Serge | 977 | |
5271 | serge | 978 | if (!resv) |
979 | ttm_bo_unreserve(bo); |
||
4075 | Serge | 980 | |
5078 | serge | 981 | if (unlikely(ret)) |
982 | ttm_bo_unref(&bo); |
||
983 | |||
5271 | serge | 984 | return ret; |
4075 | Serge | 985 | } |
986 | EXPORT_SYMBOL(ttm_bo_init); |
||
987 | |||
988 | size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, |
||
989 | unsigned long bo_size, |
||
990 | unsigned struct_size) |
||
991 | { |
||
992 | unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; |
||
993 | size_t size = 0; |
||
994 | |||
995 | size += ttm_round_pot(struct_size); |
||
996 | size += PAGE_ALIGN(npages * sizeof(void *)); |
||
997 | size += ttm_round_pot(sizeof(struct ttm_tt)); |
||
998 | return size; |
||
999 | } |
||
1000 | EXPORT_SYMBOL(ttm_bo_acc_size); |
||
1001 | |||
1002 | size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, |
||
1003 | unsigned long bo_size, |
||
1004 | unsigned struct_size) |
||
1005 | { |
||
1006 | unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; |
||
1007 | size_t size = 0; |
||
1008 | |||
1009 | size += ttm_round_pot(struct_size); |
||
1010 | size += PAGE_ALIGN(npages * sizeof(void *)); |
||
1011 | size += PAGE_ALIGN(npages * sizeof(dma_addr_t)); |
||
1012 | size += ttm_round_pot(sizeof(struct ttm_dma_tt)); |
||
1013 | return size; |
||
1014 | } |
||
1015 | EXPORT_SYMBOL(ttm_bo_dma_acc_size); |
||
1016 | |||
6296 | serge | 1017 | int ttm_bo_create(struct ttm_bo_device *bdev, |
1018 | unsigned long size, |
||
1019 | enum ttm_bo_type type, |
||
1020 | struct ttm_placement *placement, |
||
1021 | uint32_t page_alignment, |
||
1022 | bool interruptible, |
||
1023 | struct file *persistent_swap_storage, |
||
1024 | struct ttm_buffer_object **p_bo) |
||
1025 | { |
||
1026 | struct ttm_buffer_object *bo; |
||
1027 | size_t acc_size; |
||
1028 | int ret; |
||
1029 | |||
1030 | bo = kzalloc(sizeof(*bo), GFP_KERNEL); |
||
1031 | if (unlikely(bo == NULL)) |
||
1032 | return -ENOMEM; |
||
1033 | |||
1034 | acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); |
||
1035 | ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, |
||
1036 | interruptible, persistent_swap_storage, acc_size, |
||
1037 | NULL, NULL, NULL); |
||
1038 | if (likely(ret == 0)) |
||
1039 | *p_bo = bo; |
||
1040 | |||
1041 | return ret; |
||
1042 | } |
||
1043 | EXPORT_SYMBOL(ttm_bo_create); |
||
1044 | |||
1045 | static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, |
||
1046 | unsigned mem_type, bool allow_errors) |
||
1047 | { |
||
1048 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
||
1049 | struct ttm_bo_global *glob = bdev->glob; |
||
1050 | int ret; |
||
1051 | |||
1052 | /* |
||
1053 | * Can't use standard list traversal since we're unlocking. |
||
1054 | */ |
||
1055 | |||
1056 | spin_lock(&glob->lru_lock); |
||
1057 | while (!list_empty(&man->lru)) { |
||
1058 | spin_unlock(&glob->lru_lock); |
||
1059 | ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false); |
||
1060 | if (ret) { |
||
1061 | if (allow_errors) { |
||
1062 | return ret; |
||
1063 | } else { |
||
1064 | pr_err("Cleanup eviction failed\n"); |
||
1065 | } |
||
1066 | } |
||
1067 | spin_lock(&glob->lru_lock); |
||
1068 | } |
||
1069 | spin_unlock(&glob->lru_lock); |
||
1070 | return 0; |
||
1071 | } |
||
1404 | serge | 1072 | int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, |
4075 | Serge | 1073 | unsigned long p_size) |
1404 | serge | 1074 | { |
5271 | serge | 1075 | int ret = -EINVAL; |
1076 | struct ttm_mem_type_manager *man; |
||
1404 | serge | 1077 | |
4075 | Serge | 1078 | BUG_ON(type >= TTM_NUM_MEM_TYPES); |
5271 | serge | 1079 | man = &bdev->man[type]; |
4075 | Serge | 1080 | BUG_ON(man->has_type); |
1081 | man->io_reserve_fastpath = true; |
||
1082 | man->use_io_reserve_lru = false; |
||
1083 | mutex_init(&man->io_reserve_mutex); |
||
1084 | INIT_LIST_HEAD(&man->io_reserve_lru); |
||
1404 | serge | 1085 | |
5271 | serge | 1086 | ret = bdev->driver->init_mem_type(bdev, type, man); |
1087 | if (ret) |
||
1088 | return ret; |
||
4075 | Serge | 1089 | man->bdev = bdev; |
1404 | serge | 1090 | |
5271 | serge | 1091 | ret = 0; |
1092 | if (type != TTM_PL_SYSTEM) { |
||
4075 | Serge | 1093 | ret = (*man->func->init)(man, p_size); |
5271 | serge | 1094 | if (ret) |
1095 | return ret; |
||
1096 | } |
||
1097 | man->has_type = true; |
||
1098 | man->use_type = true; |
||
1099 | man->size = p_size; |
||
1404 | serge | 1100 | |
5271 | serge | 1101 | INIT_LIST_HEAD(&man->lru); |
1404 | serge | 1102 | |
5271 | serge | 1103 | return 0; |
1404 | serge | 1104 | } |
5078 | serge | 1105 | EXPORT_SYMBOL(ttm_bo_init_mm); |
4075 | Serge | 1106 | void ttm_bo_global_release(struct drm_global_reference *ref) |
1404 | serge | 1107 | { |
4075 | Serge | 1108 | struct ttm_bo_global *glob = ref->object; |
1109 | |||
1110 | } |
||
1111 | EXPORT_SYMBOL(ttm_bo_global_release); |
||
1112 | |||
1113 | int ttm_bo_global_init(struct drm_global_reference *ref) |
||
1114 | { |
||
1404 | serge | 1115 | struct ttm_bo_global_ref *bo_ref = |
1116 | container_of(ref, struct ttm_bo_global_ref, ref); |
||
1117 | struct ttm_bo_global *glob = ref->object; |
||
1118 | int ret; |
||
1119 | |||
5271 | serge | 1120 | mutex_init(&glob->device_list_mutex); |
1121 | spin_lock_init(&glob->lru_lock); |
||
1404 | serge | 1122 | glob->mem_glob = bo_ref->mem_glob; |
5078 | serge | 1123 | glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); |
1404 | serge | 1124 | |
1125 | if (unlikely(glob->dummy_read_page == NULL)) { |
||
1126 | ret = -ENOMEM; |
||
1127 | goto out_no_drp; |
||
1128 | } |
||
1129 | |||
1130 | INIT_LIST_HEAD(&glob->swap_lru); |
||
1131 | INIT_LIST_HEAD(&glob->device_list); |
||
1132 | |||
4075 | Serge | 1133 | atomic_set(&glob->bo_count, 0); |
1404 | serge | 1134 | |
4075 | Serge | 1135 | return 0; |
1404 | serge | 1136 | |
1137 | out_no_drp: |
||
1138 | kfree(glob); |
||
1139 | return ret; |
||
1140 | } |
||
4112 | Serge | 1141 | EXPORT_SYMBOL(ttm_bo_global_init); |
1404 | serge | 1142 | |
4075 | Serge | 1143 | int ttm_bo_device_init(struct ttm_bo_device *bdev, |
1144 | struct ttm_bo_global *glob, |
||
1145 | struct ttm_bo_driver *driver, |
||
5078 | serge | 1146 | struct address_space *mapping, |
4075 | Serge | 1147 | uint64_t file_page_offset, |
1148 | bool need_dma32) |
||
1149 | { |
||
1150 | int ret = -EINVAL; |
||
1151 | |||
1152 | bdev->driver = driver; |
||
1153 | |||
1154 | memset(bdev->man, 0, sizeof(bdev->man)); |
||
1155 | |||
1156 | /* |
||
1157 | * Initialize the system memory buffer type. |
||
1158 | * Other types need to be driver / IOCTL initialized. |
||
1159 | */ |
||
1160 | ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0); |
||
1161 | if (unlikely(ret != 0)) |
||
1162 | goto out_no_sys; |
||
1163 | |||
4112 | Serge | 1164 | drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset, |
1165 | 0x10000000); |
||
5078 | serge | 1166 | INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); |
4075 | Serge | 1167 | INIT_LIST_HEAD(&bdev->ddestroy); |
5078 | serge | 1168 | bdev->dev_mapping = mapping; |
4075 | Serge | 1169 | bdev->glob = glob; |
1170 | bdev->need_dma32 = need_dma32; |
||
1171 | bdev->val_seq = 0; |
||
1172 | mutex_lock(&glob->device_list_mutex); |
||
1173 | list_add_tail(&bdev->device_list, &glob->device_list); |
||
1174 | mutex_unlock(&glob->device_list_mutex); |
||
1175 | |||
1176 | return 0; |
||
1177 | out_no_sys: |
||
1178 | return ret; |
||
1179 | } |
||
1180 | EXPORT_SYMBOL(ttm_bo_device_init); |
||
1181 | |||
1182 | /* |
||
1183 | * buffer object vm functions. |
||
1184 | */ |
||
1185 | |||
1186 | bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
||
1187 | { |
||
1188 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
||
1189 | |||
1190 | if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) { |
||
1191 | if (mem->mem_type == TTM_PL_SYSTEM) |
||
1192 | return false; |
||
1193 | |||
1194 | if (man->flags & TTM_MEMTYPE_FLAG_CMA) |
||
1195 | return false; |
||
1196 | |||
1197 | if (mem->placement & TTM_PL_FLAG_CACHED) |
||
1198 | return false; |
||
1199 | } |
||
1200 | return true; |
||
1201 | } |
||
1202 | |||
5078 | serge | 1203 | void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) |
1204 | { |
||
1205 | struct ttm_bo_device *bdev = bo->bdev; |
||
1206 | |||
1207 | drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping); |
||
1208 | ttm_mem_io_free_vm(bo); |
||
1209 | } |
||
1210 | |||
1211 | void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) |
||
1212 | { |
||
1213 | struct ttm_bo_device *bdev = bo->bdev; |
||
1214 | struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; |
||
1215 | |||
1216 | ttm_mem_io_lock(man, false); |
||
1217 | ttm_bo_unmap_virtual_locked(bo); |
||
1218 | ttm_mem_io_unlock(man); |
||
1219 | } |
||
1220 | |||
1221 | |||
1222 | EXPORT_SYMBOL(ttm_bo_unmap_virtual); |
||
1223 | |||
4569 | Serge | 1224 | int ttm_bo_wait(struct ttm_buffer_object *bo, |
1225 | bool lazy, bool interruptible, bool no_wait) |
||
1226 | { |
||
5271 | serge | 1227 | struct reservation_object_list *fobj; |
1228 | struct reservation_object *resv; |
||
1229 | struct fence *excl; |
||
1230 | long timeout = 15 * HZ; |
||
1231 | int i; |
||
4569 | Serge | 1232 | |
5271 | serge | 1233 | resv = bo->resv; |
1234 | fobj = reservation_object_get_list(resv); |
||
1235 | excl = reservation_object_get_excl(resv); |
||
1236 | if (excl) { |
||
1237 | if (!fence_is_signaled(excl)) { |
||
1238 | if (no_wait) |
||
1239 | return -EBUSY; |
||
4569 | Serge | 1240 | |
5271 | serge | 1241 | timeout = fence_wait_timeout(excl, |
1242 | interruptible, timeout); |
||
1243 | } |
||
1244 | } |
||
4569 | Serge | 1245 | |
5271 | serge | 1246 | for (i = 0; fobj && timeout > 0 && i < fobj->shared_count; ++i) { |
1247 | struct fence *fence; |
||
1248 | fence = rcu_dereference_protected(fobj->shared[i], |
||
1249 | reservation_object_held(resv)); |
||
1250 | |||
1251 | if (!fence_is_signaled(fence)) { |
||
1252 | if (no_wait) |
||
1253 | return -EBUSY; |
||
1254 | |||
1255 | timeout = fence_wait_timeout(fence, |
||
1256 | interruptible, timeout); |
||
5078 | serge | 1257 | } |
5271 | serge | 1258 | } |
4569 | Serge | 1259 | |
5271 | serge | 1260 | if (timeout < 0) |
1261 | return timeout; |
||
1262 | |||
1263 | if (timeout == 0) |
||
5078 | serge | 1264 | return -EBUSY; |
4569 | Serge | 1265 | |
5271 | serge | 1266 | reservation_object_add_excl_fence(resv, NULL); |
1267 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); |
||
5078 | serge | 1268 | return 0; |
4569 | Serge | 1269 | } |
5078 | serge | 1270 | EXPORT_SYMBOL(ttm_bo_wait); |
4569 | Serge | 1271 | |
6296 | serge | 1272 | int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) |
1273 | { |
||
1274 | int ret = 0; |
||
5271 | serge | 1275 | |
6296 | serge | 1276 | /* |
1277 | * Using ttm_bo_reserve makes sure the lru lists are updated. |
||
1278 | */ |
||
1279 | |||
1280 | ret = ttm_bo_reserve(bo, true, no_wait, false, NULL); |
||
1281 | if (unlikely(ret != 0)) |
||
1282 | return ret; |
||
1283 | ret = ttm_bo_wait(bo, false, true, no_wait); |
||
1284 | if (likely(ret == 0)) |
||
1285 | atomic_inc(&bo->cpu_writers); |
||
1286 | ttm_bo_unreserve(bo); |
||
1287 | return ret; |
||
1288 | } |
||
1289 | EXPORT_SYMBOL(ttm_bo_synccpu_write_grab); |
||
1290 | |||
1291 | void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) |
||
1292 | { |
||
1293 | atomic_dec(&bo->cpu_writers); |
||
1294 | } |
||
1295 | int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo) |
||
1296 | { |
||
1297 | int ret; |
||
1298 | |||
1299 | /* |
||
1300 | * In the absense of a wait_unlocked API, |
||
1301 | * Use the bo::wu_mutex to avoid triggering livelocks due to |
||
1302 | * concurrent use of this function. Note that this use of |
||
1303 | * bo::wu_mutex can go away if we change locking order to |
||
1304 | * mmap_sem -> bo::reserve. |
||
1305 | */ |
||
1306 | ret = mutex_lock_interruptible(&bo->wu_mutex); |
||
1307 | if (unlikely(ret != 0)) |
||
1308 | return -ERESTARTSYS; |
||
1309 | if (!ww_mutex_is_locked(&bo->resv->lock)) |
||
1310 | goto out_unlock; |
||
1311 | ret = __ttm_bo_reserve(bo, true, false, false, NULL); |
||
1312 | if (unlikely(ret != 0)) |
||
1313 | goto out_unlock; |
||
1314 | __ttm_bo_unreserve(bo); |
||
1315 | |||
1316 | out_unlock: |
||
1317 | mutex_unlock(&bo->wu_mutex); |
||
1318 | return ret; |
||
1319 | }>>><>>>>>><>>>>>>>><>><>><>><>><>=> |