Rev 5078 | Rev 6104 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 5078 | Rev 5271 | ||
---|---|---|---|
1 | /************************************************************************** |
1 | /************************************************************************** |
2 | * |
2 | * |
3 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA |
3 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA |
4 | * All Rights Reserved. |
4 | * All Rights Reserved. |
5 | * |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the |
7 | * copy of this software and associated documentation files (the |
8 | * "Software"), to deal in the Software without restriction, including |
8 | * "Software"), to deal in the Software without restriction, including |
9 | * without limitation the rights to use, copy, modify, merge, publish, |
9 | * without limitation the rights to use, copy, modify, merge, publish, |
10 | * distribute, sub license, and/or sell copies of the Software, and to |
10 | * distribute, sub license, and/or sell copies of the Software, and to |
11 | * permit persons to whom the Software is furnished to do so, subject to |
11 | * permit persons to whom the Software is furnished to do so, subject to |
12 | * the following conditions: |
12 | * the following conditions: |
13 | * |
13 | * |
14 | * The above copyright notice and this permission notice (including the |
14 | * The above copyright notice and this permission notice (including the |
15 | * next paragraph) shall be included in all copies or substantial portions |
15 | * next paragraph) shall be included in all copies or substantial portions |
16 | * of the Software. |
16 | * of the Software. |
17 | * |
17 | * |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
25 | * |
25 | * |
26 | **************************************************************************/ |
26 | **************************************************************************/ |
27 | /* |
27 | /* |
28 | * Authors: Thomas Hellstrom |
28 | * Authors: Thomas Hellstrom |
29 | */ |
29 | */ |
30 | 30 | ||
31 | #define pr_fmt(fmt) "[TTM] " fmt |
31 | #define pr_fmt(fmt) "[TTM] " fmt |
32 | 32 | ||
33 | #include |
33 | #include |
34 | #include |
34 | #include |
35 | #include |
35 | #include |
36 | #include |
36 | #include |
37 | #include |
37 | #include |
38 | #include |
38 | #include |
39 | #include |
39 | #include |
- | 40 | #include |
|
40 | #include |
41 | #include |
41 | - | ||
42 | #define pr_err(fmt, ...) \ |
42 | #include |
43 | printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) |
43 | #include |
44 | 44 | ||
45 | #define TTM_ASSERT_LOCKED(param) |
45 | #define TTM_ASSERT_LOCKED(param) |
46 | #define TTM_DEBUG(fmt, arg...) |
46 | #define TTM_DEBUG(fmt, arg...) |
47 | #define TTM_BO_HASH_ORDER 13 |
47 | #define TTM_BO_HASH_ORDER 13 |
48 | 48 | ||
49 | 49 | ||
50 | 50 | ||
- | 51 | static inline int ttm_mem_type_from_place(const struct ttm_place *place, |
|
51 | static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type) |
52 | uint32_t *mem_type) |
52 | { |
53 | { |
53 | int i; |
54 | int i; |
54 | 55 | ||
55 | for (i = 0; i <= TTM_PL_PRIV5; i++) |
56 | for (i = 0; i <= TTM_PL_PRIV5; i++) |
56 | if (flags & (1 << i)) { |
57 | if (place->flags & (1 << i)) { |
57 | *mem_type = i; |
58 | *mem_type = i; |
58 | return 0; |
59 | return 0; |
59 | } |
60 | } |
60 | return -EINVAL; |
61 | return -EINVAL; |
61 | } |
62 | } |
62 | 63 | ||
63 | 64 | ||
64 | 65 | ||
65 | 66 | ||
66 | 67 | ||
67 | 68 | ||
68 | 69 | ||
69 | 70 | ||
70 | 71 | ||
71 | static inline uint32_t ttm_bo_type_flags(unsigned type) |
72 | static inline uint32_t ttm_bo_type_flags(unsigned type) |
72 | { |
73 | { |
73 | return 1 << (type); |
74 | return 1 << (type); |
74 | } |
75 | } |
75 | 76 | ||
76 | static void ttm_bo_release_list(struct kref *list_kref) |
77 | static void ttm_bo_release_list(struct kref *list_kref) |
77 | { |
78 | { |
78 | struct ttm_buffer_object *bo = |
79 | struct ttm_buffer_object *bo = |
79 | container_of(list_kref, struct ttm_buffer_object, list_kref); |
80 | container_of(list_kref, struct ttm_buffer_object, list_kref); |
80 | struct ttm_bo_device *bdev = bo->bdev; |
81 | struct ttm_bo_device *bdev = bo->bdev; |
81 | size_t acc_size = bo->acc_size; |
82 | size_t acc_size = bo->acc_size; |
82 | 83 | ||
83 | BUG_ON(atomic_read(&bo->list_kref.refcount)); |
84 | BUG_ON(atomic_read(&bo->list_kref.refcount)); |
84 | BUG_ON(atomic_read(&bo->kref.refcount)); |
85 | BUG_ON(atomic_read(&bo->kref.refcount)); |
85 | BUG_ON(atomic_read(&bo->cpu_writers)); |
86 | BUG_ON(atomic_read(&bo->cpu_writers)); |
86 | BUG_ON(bo->sync_obj != NULL); |
- | |
87 | BUG_ON(bo->mem.mm_node != NULL); |
87 | BUG_ON(bo->mem.mm_node != NULL); |
88 | BUG_ON(!list_empty(&bo->lru)); |
88 | BUG_ON(!list_empty(&bo->lru)); |
89 | BUG_ON(!list_empty(&bo->ddestroy)); |
89 | BUG_ON(!list_empty(&bo->ddestroy)); |
90 | 90 | ||
91 | if (bo->ttm) |
91 | if (bo->ttm) |
92 | ttm_tt_destroy(bo->ttm); |
92 | ttm_tt_destroy(bo->ttm); |
93 | atomic_dec(&bo->glob->bo_count); |
93 | atomic_dec(&bo->glob->bo_count); |
94 | if (bo->resv == &bo->ttm_resv) |
94 | if (bo->resv == &bo->ttm_resv) |
95 | reservation_object_fini(&bo->ttm_resv); |
95 | reservation_object_fini(&bo->ttm_resv); |
96 | mutex_destroy(&bo->wu_mutex); |
96 | mutex_destroy(&bo->wu_mutex); |
97 | if (bo->destroy) |
97 | if (bo->destroy) |
98 | bo->destroy(bo); |
98 | bo->destroy(bo); |
99 | else { |
99 | else { |
100 | kfree(bo); |
100 | kfree(bo); |
101 | } |
101 | } |
102 | } |
102 | } |
103 | 103 | ||
104 | void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) |
104 | void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) |
105 | { |
105 | { |
106 | struct ttm_bo_device *bdev = bo->bdev; |
106 | struct ttm_bo_device *bdev = bo->bdev; |
107 | struct ttm_mem_type_manager *man; |
107 | struct ttm_mem_type_manager *man; |
108 | 108 | ||
109 | lockdep_assert_held(&bo->resv->lock.base); |
109 | lockdep_assert_held(&bo->resv->lock.base); |
110 | 110 | ||
111 | if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { |
111 | if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { |
112 | 112 | ||
113 | BUG_ON(!list_empty(&bo->lru)); |
113 | BUG_ON(!list_empty(&bo->lru)); |
114 | 114 | ||
115 | man = &bdev->man[bo->mem.mem_type]; |
115 | man = &bdev->man[bo->mem.mem_type]; |
116 | list_add_tail(&bo->lru, &man->lru); |
116 | list_add_tail(&bo->lru, &man->lru); |
117 | kref_get(&bo->list_kref); |
117 | kref_get(&bo->list_kref); |
118 | 118 | ||
119 | if (bo->ttm != NULL) { |
119 | if (bo->ttm != NULL) { |
120 | list_add_tail(&bo->swap, &bo->glob->swap_lru); |
120 | list_add_tail(&bo->swap, &bo->glob->swap_lru); |
121 | kref_get(&bo->list_kref); |
121 | kref_get(&bo->list_kref); |
122 | } |
122 | } |
123 | } |
123 | } |
124 | } |
124 | } |
125 | EXPORT_SYMBOL(ttm_bo_add_to_lru); |
125 | EXPORT_SYMBOL(ttm_bo_add_to_lru); |
126 | 126 | ||
127 | int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) |
127 | int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) |
128 | { |
128 | { |
129 | int put_count = 0; |
129 | int put_count = 0; |
130 | 130 | ||
131 | if (!list_empty(&bo->swap)) { |
131 | if (!list_empty(&bo->swap)) { |
132 | list_del_init(&bo->swap); |
132 | list_del_init(&bo->swap); |
133 | ++put_count; |
133 | ++put_count; |
134 | } |
134 | } |
135 | if (!list_empty(&bo->lru)) { |
135 | if (!list_empty(&bo->lru)) { |
136 | list_del_init(&bo->lru); |
136 | list_del_init(&bo->lru); |
137 | ++put_count; |
137 | ++put_count; |
138 | } |
138 | } |
139 | 139 | ||
140 | /* |
140 | /* |
141 | * TODO: Add a driver hook to delete from |
141 | * TODO: Add a driver hook to delete from |
142 | * driver-specific LRU's here. |
142 | * driver-specific LRU's here. |
143 | */ |
143 | */ |
144 | 144 | ||
145 | return put_count; |
145 | return put_count; |
146 | } |
146 | } |
147 | 147 | ||
148 | static void ttm_bo_ref_bug(struct kref *list_kref) |
148 | static void ttm_bo_ref_bug(struct kref *list_kref) |
149 | { |
149 | { |
150 | BUG(); |
150 | BUG(); |
151 | } |
151 | } |
152 | 152 | ||
153 | void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, |
153 | void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, |
154 | bool never_free) |
154 | bool never_free) |
155 | { |
155 | { |
156 | // kref_sub(&bo->list_kref, count, |
156 | // kref_sub(&bo->list_kref, count, |
157 | // (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list); |
157 | // (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list); |
158 | } |
158 | } |
159 | 159 | ||
160 | void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo) |
160 | void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo) |
161 | { |
161 | { |
162 | int put_count; |
162 | int put_count; |
163 | 163 | ||
164 | spin_lock(&bo->glob->lru_lock); |
164 | spin_lock(&bo->glob->lru_lock); |
165 | put_count = ttm_bo_del_from_lru(bo); |
165 | put_count = ttm_bo_del_from_lru(bo); |
166 | spin_unlock(&bo->glob->lru_lock); |
166 | spin_unlock(&bo->glob->lru_lock); |
167 | ttm_bo_list_ref_sub(bo, put_count, true); |
167 | ttm_bo_list_ref_sub(bo, put_count, true); |
168 | } |
168 | } |
169 | EXPORT_SYMBOL(ttm_bo_del_sub_from_lru); |
169 | EXPORT_SYMBOL(ttm_bo_del_sub_from_lru); |
170 | 170 | ||
171 | /* |
171 | /* |
172 | * Call bo->mutex locked. |
172 | * Call bo->mutex locked. |
173 | */ |
173 | */ |
174 | static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) |
174 | static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) |
175 | { |
175 | { |
176 | struct ttm_bo_device *bdev = bo->bdev; |
176 | struct ttm_bo_device *bdev = bo->bdev; |
177 | struct ttm_bo_global *glob = bo->glob; |
177 | struct ttm_bo_global *glob = bo->glob; |
178 | int ret = 0; |
178 | int ret = 0; |
179 | uint32_t page_flags = 0; |
179 | uint32_t page_flags = 0; |
180 | 180 | ||
181 | TTM_ASSERT_LOCKED(&bo->mutex); |
181 | TTM_ASSERT_LOCKED(&bo->mutex); |
182 | bo->ttm = NULL; |
182 | bo->ttm = NULL; |
183 | 183 | ||
184 | if (bdev->need_dma32) |
184 | if (bdev->need_dma32) |
185 | page_flags |= TTM_PAGE_FLAG_DMA32; |
185 | page_flags |= TTM_PAGE_FLAG_DMA32; |
186 | 186 | ||
187 | switch (bo->type) { |
187 | switch (bo->type) { |
188 | case ttm_bo_type_device: |
188 | case ttm_bo_type_device: |
189 | if (zero_alloc) |
189 | if (zero_alloc) |
190 | page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; |
190 | page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; |
191 | case ttm_bo_type_kernel: |
191 | case ttm_bo_type_kernel: |
192 | bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, |
192 | bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, |
193 | page_flags, glob->dummy_read_page); |
193 | page_flags, glob->dummy_read_page); |
194 | if (unlikely(bo->ttm == NULL)) |
194 | if (unlikely(bo->ttm == NULL)) |
195 | ret = -ENOMEM; |
195 | ret = -ENOMEM; |
196 | break; |
196 | break; |
197 | case ttm_bo_type_sg: |
197 | case ttm_bo_type_sg: |
198 | bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, |
198 | bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, |
199 | page_flags | TTM_PAGE_FLAG_SG, |
199 | page_flags | TTM_PAGE_FLAG_SG, |
200 | glob->dummy_read_page); |
200 | glob->dummy_read_page); |
201 | if (unlikely(bo->ttm == NULL)) { |
201 | if (unlikely(bo->ttm == NULL)) { |
202 | ret = -ENOMEM; |
202 | ret = -ENOMEM; |
203 | break; |
203 | break; |
204 | } |
204 | } |
205 | bo->ttm->sg = bo->sg; |
205 | bo->ttm->sg = bo->sg; |
206 | break; |
206 | break; |
207 | default: |
207 | default: |
208 | pr_err("Illegal buffer object type\n"); |
208 | pr_err("Illegal buffer object type\n"); |
209 | ret = -EINVAL; |
209 | ret = -EINVAL; |
210 | break; |
210 | break; |
211 | } |
211 | } |
212 | 212 | ||
213 | return ret; |
213 | return ret; |
214 | } |
214 | } |
215 | 215 | ||
216 | static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, |
216 | static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, |
217 | struct ttm_mem_reg *mem, |
217 | struct ttm_mem_reg *mem, |
218 | bool evict, bool interruptible, |
218 | bool evict, bool interruptible, |
219 | bool no_wait_gpu) |
219 | bool no_wait_gpu) |
220 | { |
220 | { |
221 | struct ttm_bo_device *bdev = bo->bdev; |
221 | struct ttm_bo_device *bdev = bo->bdev; |
222 | bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); |
222 | bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); |
223 | bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem); |
223 | bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem); |
224 | struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; |
224 | struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; |
225 | struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; |
225 | struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; |
226 | int ret = 0; |
226 | int ret = 0; |
227 | 227 | ||
228 | if (old_is_pci || new_is_pci || |
228 | if (old_is_pci || new_is_pci || |
229 | ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { |
229 | ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { |
230 | ret = ttm_mem_io_lock(old_man, true); |
230 | ret = ttm_mem_io_lock(old_man, true); |
231 | if (unlikely(ret != 0)) |
231 | if (unlikely(ret != 0)) |
232 | goto out_err; |
232 | goto out_err; |
233 | ttm_bo_unmap_virtual_locked(bo); |
233 | ttm_bo_unmap_virtual_locked(bo); |
234 | ttm_mem_io_unlock(old_man); |
234 | ttm_mem_io_unlock(old_man); |
235 | } |
235 | } |
236 | 236 | ||
237 | /* |
237 | /* |
238 | * Create and bind a ttm if required. |
238 | * Create and bind a ttm if required. |
239 | */ |
239 | */ |
240 | 240 | ||
241 | if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { |
241 | if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { |
242 | if (bo->ttm == NULL) { |
242 | if (bo->ttm == NULL) { |
243 | bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); |
243 | bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); |
244 | ret = ttm_bo_add_ttm(bo, zero); |
244 | ret = ttm_bo_add_ttm(bo, zero); |
245 | if (ret) |
245 | if (ret) |
246 | goto out_err; |
246 | goto out_err; |
247 | } |
247 | } |
248 | 248 | ||
249 | ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); |
249 | ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); |
250 | if (ret) |
250 | if (ret) |
251 | goto out_err; |
251 | goto out_err; |
252 | 252 | ||
253 | if (mem->mem_type != TTM_PL_SYSTEM) { |
253 | if (mem->mem_type != TTM_PL_SYSTEM) { |
254 | ret = ttm_tt_bind(bo->ttm, mem); |
254 | ret = ttm_tt_bind(bo->ttm, mem); |
255 | if (ret) |
255 | if (ret) |
256 | goto out_err; |
256 | goto out_err; |
257 | } |
257 | } |
258 | 258 | ||
259 | if (bo->mem.mem_type == TTM_PL_SYSTEM) { |
259 | if (bo->mem.mem_type == TTM_PL_SYSTEM) { |
260 | if (bdev->driver->move_notify) |
260 | if (bdev->driver->move_notify) |
261 | bdev->driver->move_notify(bo, mem); |
261 | bdev->driver->move_notify(bo, mem); |
262 | bo->mem = *mem; |
262 | bo->mem = *mem; |
263 | mem->mm_node = NULL; |
263 | mem->mm_node = NULL; |
264 | goto moved; |
264 | goto moved; |
265 | } |
265 | } |
266 | } |
266 | } |
267 | 267 | ||
268 | if (bdev->driver->move_notify) |
268 | if (bdev->driver->move_notify) |
269 | bdev->driver->move_notify(bo, mem); |
269 | bdev->driver->move_notify(bo, mem); |
270 | 270 | ||
271 | if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && |
271 | if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && |
272 | !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) |
272 | !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) |
273 | ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem); |
273 | ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem); |
274 | else if (bdev->driver->move) |
274 | else if (bdev->driver->move) |
275 | ret = bdev->driver->move(bo, evict, interruptible, |
275 | ret = bdev->driver->move(bo, evict, interruptible, |
276 | no_wait_gpu, mem); |
276 | no_wait_gpu, mem); |
277 | else |
277 | else |
278 | ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem); |
278 | ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem); |
279 | 279 | ||
280 | if (ret) { |
280 | if (ret) { |
281 | if (bdev->driver->move_notify) { |
281 | if (bdev->driver->move_notify) { |
282 | struct ttm_mem_reg tmp_mem = *mem; |
282 | struct ttm_mem_reg tmp_mem = *mem; |
283 | *mem = bo->mem; |
283 | *mem = bo->mem; |
284 | bo->mem = tmp_mem; |
284 | bo->mem = tmp_mem; |
285 | bdev->driver->move_notify(bo, mem); |
285 | bdev->driver->move_notify(bo, mem); |
286 | bo->mem = *mem; |
286 | bo->mem = *mem; |
287 | *mem = tmp_mem; |
287 | *mem = tmp_mem; |
288 | } |
288 | } |
289 | 289 | ||
290 | goto out_err; |
290 | goto out_err; |
291 | } |
291 | } |
292 | 292 | ||
293 | moved: |
293 | moved: |
294 | if (bo->evicted) { |
294 | if (bo->evicted) { |
295 | if (bdev->driver->invalidate_caches) { |
295 | if (bdev->driver->invalidate_caches) { |
296 | ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); |
296 | ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); |
297 | if (ret) |
297 | if (ret) |
298 | pr_err("Can not flush read caches\n"); |
298 | pr_err("Can not flush read caches\n"); |
299 | } |
299 | } |
300 | bo->evicted = false; |
300 | bo->evicted = false; |
301 | } |
301 | } |
302 | 302 | ||
303 | if (bo->mem.mm_node) { |
303 | if (bo->mem.mm_node) { |
304 | bo->offset = (bo->mem.start << PAGE_SHIFT) + |
304 | bo->offset = (bo->mem.start << PAGE_SHIFT) + |
305 | bdev->man[bo->mem.mem_type].gpu_offset; |
305 | bdev->man[bo->mem.mem_type].gpu_offset; |
306 | bo->cur_placement = bo->mem.placement; |
306 | bo->cur_placement = bo->mem.placement; |
307 | } else |
307 | } else |
308 | bo->offset = 0; |
308 | bo->offset = 0; |
309 | 309 | ||
310 | return 0; |
310 | return 0; |
311 | 311 | ||
312 | out_err: |
312 | out_err: |
313 | new_man = &bdev->man[bo->mem.mem_type]; |
313 | new_man = &bdev->man[bo->mem.mem_type]; |
314 | if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) { |
314 | if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) { |
315 | ttm_tt_unbind(bo->ttm); |
315 | ttm_tt_unbind(bo->ttm); |
316 | ttm_tt_destroy(bo->ttm); |
316 | ttm_tt_destroy(bo->ttm); |
317 | bo->ttm = NULL; |
317 | bo->ttm = NULL; |
318 | } |
318 | } |
319 | 319 | ||
320 | return ret; |
320 | return ret; |
321 | } |
321 | } |
322 | 322 | ||
323 | /** |
323 | /** |
324 | * Call bo::reserved. |
324 | * Call bo::reserved. |
325 | * Will release GPU memory type usage on destruction. |
325 | * Will release GPU memory type usage on destruction. |
326 | * This is the place to put in driver specific hooks to release |
326 | * This is the place to put in driver specific hooks to release |
327 | * driver private resources. |
327 | * driver private resources. |
328 | * Will release the bo::reserved lock. |
328 | * Will release the bo::reserved lock. |
329 | */ |
329 | */ |
330 | 330 | ||
331 | static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) |
331 | static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) |
332 | { |
332 | { |
333 | if (bo->bdev->driver->move_notify) |
333 | if (bo->bdev->driver->move_notify) |
334 | bo->bdev->driver->move_notify(bo, NULL); |
334 | bo->bdev->driver->move_notify(bo, NULL); |
335 | 335 | ||
336 | if (bo->ttm) { |
336 | if (bo->ttm) { |
337 | ttm_tt_unbind(bo->ttm); |
337 | ttm_tt_unbind(bo->ttm); |
338 | ttm_tt_destroy(bo->ttm); |
338 | ttm_tt_destroy(bo->ttm); |
339 | bo->ttm = NULL; |
339 | bo->ttm = NULL; |
340 | } |
340 | } |
341 | ttm_bo_mem_put(bo, &bo->mem); |
341 | ttm_bo_mem_put(bo, &bo->mem); |
342 | 342 | ||
343 | ww_mutex_unlock (&bo->resv->lock); |
343 | ww_mutex_unlock (&bo->resv->lock); |
344 | } |
344 | } |
- | 345 | ||
- | 346 | static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) |
|
- | 347 | { |
|
- | 348 | struct reservation_object_list *fobj; |
|
- | 349 | struct fence *fence; |
|
- | 350 | int i; |
|
- | 351 | ||
- | 352 | fobj = reservation_object_get_list(bo->resv); |
|
- | 353 | fence = reservation_object_get_excl(bo->resv); |
|
- | 354 | if (fence && !fence->ops->signaled) |
|
- | 355 | fence_enable_sw_signaling(fence); |
|
- | 356 | ||
- | 357 | for (i = 0; fobj && i < fobj->shared_count; ++i) { |
|
- | 358 | fence = rcu_dereference_protected(fobj->shared[i], |
|
- | 359 | reservation_object_held(bo->resv)); |
|
- | 360 | ||
- | 361 | if (!fence->ops->signaled) |
|
- | 362 | fence_enable_sw_signaling(fence); |
|
- | 363 | } |
|
- | 364 | } |
|
345 | 365 | ||
346 | static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) |
366 | static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) |
347 | { |
367 | { |
348 | struct ttm_bo_device *bdev = bo->bdev; |
368 | struct ttm_bo_device *bdev = bo->bdev; |
349 | struct ttm_bo_global *glob = bo->glob; |
369 | struct ttm_bo_global *glob = bo->glob; |
350 | struct ttm_bo_driver *driver = bdev->driver; |
- | |
351 | void *sync_obj = NULL; |
- | |
352 | int put_count; |
370 | int put_count; |
353 | int ret; |
371 | int ret; |
354 | 372 | ||
355 | spin_lock(&glob->lru_lock); |
373 | spin_lock(&glob->lru_lock); |
356 | ret = __ttm_bo_reserve(bo, false, true, false, NULL); |
374 | ret = __ttm_bo_reserve(bo, false, true, false, NULL); |
357 | 375 | ||
358 | spin_lock(&bdev->fence_lock); |
376 | if (!ret) { |
359 | (void) ttm_bo_wait(bo, false, false, true); |
- | |
360 | if (!ret && !bo->sync_obj) { |
- | |
361 | spin_unlock(&bdev->fence_lock); |
377 | if (!ttm_bo_wait(bo, false, false, true)) { |
362 | put_count = ttm_bo_del_from_lru(bo); |
378 | put_count = ttm_bo_del_from_lru(bo); |
363 | 379 | ||
364 | spin_unlock(&glob->lru_lock); |
380 | spin_unlock(&glob->lru_lock); |
365 | ttm_bo_cleanup_memtype_use(bo); |
381 | ttm_bo_cleanup_memtype_use(bo); |
366 | 382 | ||
367 | ttm_bo_list_ref_sub(bo, put_count, true); |
383 | ttm_bo_list_ref_sub(bo, put_count, true); |
368 | 384 | ||
369 | return; |
385 | return; |
370 | } |
386 | } else |
371 | if (bo->sync_obj) |
- | |
372 | sync_obj = driver->sync_obj_ref(bo->sync_obj); |
- | |
373 | spin_unlock(&bdev->fence_lock); |
387 | ttm_bo_flush_all_fences(bo); |
374 | - | ||
375 | if (!ret) { |
- | |
376 | 388 | ||
377 | /* |
389 | /* |
378 | * Make NO_EVICT bos immediately available to |
390 | * Make NO_EVICT bos immediately available to |
379 | * shrinkers, now that they are queued for |
391 | * shrinkers, now that they are queued for |
380 | * destruction. |
392 | * destruction. |
381 | */ |
393 | */ |
382 | if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { |
394 | if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { |
383 | bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT; |
395 | bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT; |
384 | ttm_bo_add_to_lru(bo); |
396 | ttm_bo_add_to_lru(bo); |
385 | } |
397 | } |
386 | 398 | ||
387 | __ttm_bo_unreserve(bo); |
399 | __ttm_bo_unreserve(bo); |
388 | } |
400 | } |
389 | 401 | ||
390 | kref_get(&bo->list_kref); |
402 | kref_get(&bo->list_kref); |
391 | list_add_tail(&bo->ddestroy, &bdev->ddestroy); |
403 | list_add_tail(&bo->ddestroy, &bdev->ddestroy); |
392 | spin_unlock(&glob->lru_lock); |
404 | spin_unlock(&glob->lru_lock); |
393 | - | ||
394 | if (sync_obj) { |
- | |
395 | driver->sync_obj_flush(sync_obj); |
- | |
396 | driver->sync_obj_unref(&sync_obj); |
- | |
397 | } |
405 | |
398 | // schedule_delayed_work(&bdev->wq, |
406 | // schedule_delayed_work(&bdev->wq, |
399 | // ((HZ / 100) < 1) ? 1 : HZ / 100); |
407 | // ((HZ / 100) < 1) ? 1 : HZ / 100); |
400 | } |
408 | } |
401 | 409 | ||
402 | /** |
410 | /** |
403 | * function ttm_bo_cleanup_refs_and_unlock |
411 | * function ttm_bo_cleanup_refs_and_unlock |
404 | * If bo idle, remove from delayed- and lru lists, and unref. |
412 | * If bo idle, remove from delayed- and lru lists, and unref. |
405 | * If not idle, do nothing. |
413 | * If not idle, do nothing. |
406 | * |
414 | * |
407 | * Must be called with lru_lock and reservation held, this function |
415 | * Must be called with lru_lock and reservation held, this function |
408 | * will drop both before returning. |
416 | * will drop both before returning. |
409 | * |
417 | * |
410 | * @interruptible Any sleeps should occur interruptibly. |
418 | * @interruptible Any sleeps should occur interruptibly. |
411 | * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. |
419 | * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. |
412 | */ |
420 | */ |
413 | 421 | ||
414 | static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, |
422 | static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, |
415 | bool interruptible, |
423 | bool interruptible, |
416 | bool no_wait_gpu) |
424 | bool no_wait_gpu) |
417 | { |
425 | { |
418 | struct ttm_bo_device *bdev = bo->bdev; |
- | |
419 | struct ttm_bo_driver *driver = bdev->driver; |
- | |
420 | struct ttm_bo_global *glob = bo->glob; |
426 | struct ttm_bo_global *glob = bo->glob; |
421 | int put_count; |
427 | int put_count; |
422 | int ret; |
428 | int ret; |
423 | - | ||
424 | spin_lock(&bdev->fence_lock); |
429 | |
425 | ret = ttm_bo_wait(bo, false, false, true); |
430 | ret = ttm_bo_wait(bo, false, false, true); |
426 | 431 | ||
427 | if (ret && !no_wait_gpu) { |
432 | if (ret && !no_wait_gpu) { |
428 | void *sync_obj; |
433 | long lret; |
429 | - | ||
430 | /* |
- | |
431 | * Take a reference to the fence and unreserve, |
- | |
432 | * at this point the buffer should be dead, so |
- | |
433 | * no new sync objects can be attached. |
- | |
434 | */ |
- | |
435 | sync_obj = driver->sync_obj_ref(bo->sync_obj); |
- | |
436 | spin_unlock(&bdev->fence_lock); |
434 | ww_mutex_unlock(&bo->resv->lock); |
437 | - | ||
438 | __ttm_bo_unreserve(bo); |
- | |
439 | spin_unlock(&glob->lru_lock); |
435 | spin_unlock(&glob->lru_lock); |
440 | 436 | ||
441 | ret = driver->sync_obj_wait(sync_obj, false, interruptible); |
437 | lret = reservation_object_wait_timeout_rcu(bo->resv, |
442 | driver->sync_obj_unref(&sync_obj); |
438 | true, |
443 | if (ret) |
439 | interruptible, |
444 | return ret; |
440 | 30 * HZ); |
445 | - | ||
446 | /* |
- | |
447 | * remove sync_obj with ttm_bo_wait, the wait should be |
- | |
448 | * finished, and no new wait object should have been added. |
- | |
449 | */ |
441 | |
450 | spin_lock(&bdev->fence_lock); |
- | |
451 | ret = ttm_bo_wait(bo, false, false, true); |
442 | if (lret < 0) |
452 | WARN_ON(ret); |
- | |
453 | spin_unlock(&bdev->fence_lock); |
443 | return lret; |
454 | if (ret) |
444 | else if (lret == 0) |
455 | return ret; |
445 | return -EBUSY; |
456 | 446 | ||
457 | spin_lock(&glob->lru_lock); |
447 | spin_lock(&glob->lru_lock); |
458 | ret = __ttm_bo_reserve(bo, false, true, false, NULL); |
448 | ret = __ttm_bo_reserve(bo, false, true, false, NULL); |
459 | 449 | ||
460 | /* |
450 | /* |
461 | * We raced, and lost, someone else holds the reservation now, |
451 | * We raced, and lost, someone else holds the reservation now, |
462 | * and is probably busy in ttm_bo_cleanup_memtype_use. |
452 | * and is probably busy in ttm_bo_cleanup_memtype_use. |
463 | * |
453 | * |
464 | * Even if it's not the case, because we finished waiting any |
454 | * Even if it's not the case, because we finished waiting any |
465 | * delayed destruction would succeed, so just return success |
455 | * delayed destruction would succeed, so just return success |
466 | * here. |
456 | * here. |
467 | */ |
457 | */ |
468 | if (ret) { |
458 | if (ret) { |
469 | spin_unlock(&glob->lru_lock); |
459 | spin_unlock(&glob->lru_lock); |
470 | return 0; |
460 | return 0; |
471 | } |
461 | } |
- | 462 | ||
- | 463 | /* |
|
- | 464 | * remove sync_obj with ttm_bo_wait, the wait should be |
|
- | 465 | * finished, and no new wait object should have been added. |
|
472 | } else |
466 | */ |
473 | spin_unlock(&bdev->fence_lock); |
467 | ret = ttm_bo_wait(bo, false, false, true); |
- | 468 | WARN_ON(ret); |
|
- | 469 | } |
|
474 | 470 | ||
475 | if (ret || unlikely(list_empty(&bo->ddestroy))) { |
471 | if (ret || unlikely(list_empty(&bo->ddestroy))) { |
476 | __ttm_bo_unreserve(bo); |
472 | __ttm_bo_unreserve(bo); |
477 | spin_unlock(&glob->lru_lock); |
473 | spin_unlock(&glob->lru_lock); |
478 | return ret; |
474 | return ret; |
479 | } |
475 | } |
480 | 476 | ||
481 | put_count = ttm_bo_del_from_lru(bo); |
477 | put_count = ttm_bo_del_from_lru(bo); |
482 | list_del_init(&bo->ddestroy); |
478 | list_del_init(&bo->ddestroy); |
483 | ++put_count; |
479 | ++put_count; |
484 | 480 | ||
485 | spin_unlock(&glob->lru_lock); |
481 | spin_unlock(&glob->lru_lock); |
486 | ttm_bo_cleanup_memtype_use(bo); |
482 | ttm_bo_cleanup_memtype_use(bo); |
487 | 483 | ||
488 | ttm_bo_list_ref_sub(bo, put_count, true); |
484 | ttm_bo_list_ref_sub(bo, put_count, true); |
489 | 485 | ||
490 | return 0; |
486 | return 0; |
491 | } |
487 | } |
492 | 488 | ||
493 | /** |
489 | /** |
494 | * Traverse the delayed list, and call ttm_bo_cleanup_refs on all |
490 | * Traverse the delayed list, and call ttm_bo_cleanup_refs on all |
495 | * encountered buffers. |
491 | * encountered buffers. |
496 | */ |
492 | */ |
497 | 493 | ||
498 | static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) |
494 | static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) |
499 | { |
495 | { |
500 | struct ttm_bo_global *glob = bdev->glob; |
496 | struct ttm_bo_global *glob = bdev->glob; |
501 | struct ttm_buffer_object *entry = NULL; |
497 | struct ttm_buffer_object *entry = NULL; |
502 | int ret = 0; |
498 | int ret = 0; |
503 | 499 | ||
504 | spin_lock(&glob->lru_lock); |
500 | spin_lock(&glob->lru_lock); |
505 | if (list_empty(&bdev->ddestroy)) |
501 | if (list_empty(&bdev->ddestroy)) |
506 | goto out_unlock; |
502 | goto out_unlock; |
507 | 503 | ||
508 | entry = list_first_entry(&bdev->ddestroy, |
504 | entry = list_first_entry(&bdev->ddestroy, |
509 | struct ttm_buffer_object, ddestroy); |
505 | struct ttm_buffer_object, ddestroy); |
510 | kref_get(&entry->list_kref); |
506 | kref_get(&entry->list_kref); |
511 | 507 | ||
512 | for (;;) { |
508 | for (;;) { |
513 | struct ttm_buffer_object *nentry = NULL; |
509 | struct ttm_buffer_object *nentry = NULL; |
514 | 510 | ||
515 | if (entry->ddestroy.next != &bdev->ddestroy) { |
511 | if (entry->ddestroy.next != &bdev->ddestroy) { |
516 | nentry = list_first_entry(&entry->ddestroy, |
512 | nentry = list_first_entry(&entry->ddestroy, |
517 | struct ttm_buffer_object, ddestroy); |
513 | struct ttm_buffer_object, ddestroy); |
518 | kref_get(&nentry->list_kref); |
514 | kref_get(&nentry->list_kref); |
519 | } |
515 | } |
520 | 516 | ||
521 | ret = __ttm_bo_reserve(entry, false, true, false, NULL); |
517 | ret = __ttm_bo_reserve(entry, false, true, false, NULL); |
522 | if (remove_all && ret) { |
518 | if (remove_all && ret) { |
523 | spin_unlock(&glob->lru_lock); |
519 | spin_unlock(&glob->lru_lock); |
524 | ret = __ttm_bo_reserve(entry, false, false, |
520 | ret = __ttm_bo_reserve(entry, false, false, |
525 | false, NULL); |
521 | false, NULL); |
526 | spin_lock(&glob->lru_lock); |
522 | spin_lock(&glob->lru_lock); |
527 | } |
523 | } |
528 | 524 | ||
529 | if (!ret) |
525 | if (!ret) |
530 | ret = ttm_bo_cleanup_refs_and_unlock(entry, false, |
526 | ret = ttm_bo_cleanup_refs_and_unlock(entry, false, |
531 | !remove_all); |
527 | !remove_all); |
532 | else |
528 | else |
533 | spin_unlock(&glob->lru_lock); |
529 | spin_unlock(&glob->lru_lock); |
534 | 530 | ||
535 | kref_put(&entry->list_kref, ttm_bo_release_list); |
531 | kref_put(&entry->list_kref, ttm_bo_release_list); |
536 | entry = nentry; |
532 | entry = nentry; |
537 | 533 | ||
538 | if (ret || !entry) |
534 | if (ret || !entry) |
539 | goto out; |
535 | goto out; |
540 | 536 | ||
541 | spin_lock(&glob->lru_lock); |
537 | spin_lock(&glob->lru_lock); |
542 | if (list_empty(&entry->ddestroy)) |
538 | if (list_empty(&entry->ddestroy)) |
543 | break; |
539 | break; |
544 | } |
540 | } |
545 | 541 | ||
546 | out_unlock: |
542 | out_unlock: |
547 | spin_unlock(&glob->lru_lock); |
543 | spin_unlock(&glob->lru_lock); |
548 | out: |
544 | out: |
549 | if (entry) |
545 | if (entry) |
550 | kref_put(&entry->list_kref, ttm_bo_release_list); |
546 | kref_put(&entry->list_kref, ttm_bo_release_list); |
551 | return ret; |
547 | return ret; |
552 | } |
548 | } |
553 | 549 | ||
554 | static void ttm_bo_delayed_workqueue(struct work_struct *work) |
550 | static void ttm_bo_delayed_workqueue(struct work_struct *work) |
555 | { |
551 | { |
556 | struct ttm_bo_device *bdev = |
552 | struct ttm_bo_device *bdev = |
557 | container_of(work, struct ttm_bo_device, wq.work); |
553 | container_of(work, struct ttm_bo_device, wq.work); |
558 | 554 | ||
559 | if (ttm_bo_delayed_delete(bdev, false)) { |
555 | if (ttm_bo_delayed_delete(bdev, false)) { |
560 | schedule_delayed_work(&bdev->wq, |
556 | schedule_delayed_work(&bdev->wq, |
561 | ((HZ / 100) < 1) ? 1 : HZ / 100); |
557 | ((HZ / 100) < 1) ? 1 : HZ / 100); |
562 | } |
558 | } |
563 | } |
559 | } |
564 | 560 | ||
565 | static void ttm_bo_release(struct kref *kref) |
561 | static void ttm_bo_release(struct kref *kref) |
566 | { |
562 | { |
567 | struct ttm_buffer_object *bo = |
563 | struct ttm_buffer_object *bo = |
568 | container_of(kref, struct ttm_buffer_object, kref); |
564 | container_of(kref, struct ttm_buffer_object, kref); |
569 | struct ttm_bo_device *bdev = bo->bdev; |
565 | struct ttm_bo_device *bdev = bo->bdev; |
570 | struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; |
566 | struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; |
571 | 567 | ||
572 | drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node); |
568 | drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node); |
573 | ttm_mem_io_lock(man, false); |
569 | ttm_mem_io_lock(man, false); |
574 | ttm_mem_io_free_vm(bo); |
570 | ttm_mem_io_free_vm(bo); |
575 | ttm_mem_io_unlock(man); |
571 | ttm_mem_io_unlock(man); |
576 | ttm_bo_cleanup_refs_or_queue(bo); |
572 | ttm_bo_cleanup_refs_or_queue(bo); |
577 | kref_put(&bo->list_kref, ttm_bo_release_list); |
573 | kref_put(&bo->list_kref, ttm_bo_release_list); |
578 | } |
574 | } |
579 | 575 | ||
580 | void ttm_bo_unref(struct ttm_buffer_object **p_bo) |
576 | void ttm_bo_unref(struct ttm_buffer_object **p_bo) |
581 | { |
577 | { |
582 | struct ttm_buffer_object *bo = *p_bo; |
578 | struct ttm_buffer_object *bo = *p_bo; |
583 | 579 | ||
584 | *p_bo = NULL; |
580 | *p_bo = NULL; |
585 | kref_put(&bo->kref, ttm_bo_release); |
581 | kref_put(&bo->kref, ttm_bo_release); |
586 | } |
582 | } |
587 | EXPORT_SYMBOL(ttm_bo_unref); |
583 | EXPORT_SYMBOL(ttm_bo_unref); |
588 | 584 | ||
589 | void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) |
585 | void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) |
590 | { |
586 | { |
591 | struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; |
587 | struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; |
592 | 588 | ||
593 | if (mem->mm_node) |
589 | if (mem->mm_node) |
594 | (*man->func->put_node)(man, mem); |
590 | (*man->func->put_node)(man, mem); |
595 | } |
591 | } |
596 | EXPORT_SYMBOL(ttm_bo_mem_put); |
592 | EXPORT_SYMBOL(ttm_bo_mem_put); |
597 | 593 | ||
598 | /** |
594 | /** |
599 | * Repeatedly evict memory from the LRU for @mem_type until we create enough |
595 | * Repeatedly evict memory from the LRU for @mem_type until we create enough |
600 | * space, or we've evicted everything and there isn't enough space. |
596 | * space, or we've evicted everything and there isn't enough space. |
601 | */ |
597 | */ |
602 | static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, |
598 | static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, |
603 | uint32_t mem_type, |
599 | uint32_t mem_type, |
604 | struct ttm_placement *placement, |
600 | const struct ttm_place *place, |
605 | struct ttm_mem_reg *mem, |
601 | struct ttm_mem_reg *mem, |
606 | bool interruptible, |
602 | bool interruptible, |
607 | bool no_wait_gpu) |
603 | bool no_wait_gpu) |
608 | { |
604 | { |
609 | struct ttm_bo_device *bdev = bo->bdev; |
605 | struct ttm_bo_device *bdev = bo->bdev; |
610 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
606 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
611 | int ret; |
607 | int ret; |
612 | 608 | ||
613 | do { |
609 | do { |
614 | ret = (*man->func->get_node)(man, bo, placement, 0, mem); |
610 | ret = (*man->func->get_node)(man, bo, place, mem); |
615 | if (unlikely(ret != 0)) |
611 | if (unlikely(ret != 0)) |
616 | return ret; |
612 | return ret; |
617 | if (mem->mm_node) |
613 | if (mem->mm_node) |
618 | break; |
614 | break; |
619 | // ret = ttm_mem_evict_first(bdev, mem_type, |
615 | // ret = ttm_mem_evict_first(bdev, mem_type, |
620 | // interruptible, no_wait_gpu); |
616 | // interruptible, no_wait_gpu); |
621 | // if (unlikely(ret != 0)) |
617 | // if (unlikely(ret != 0)) |
622 | // return ret; |
618 | // return ret; |
623 | } while (1); |
619 | } while (1); |
624 | if (mem->mm_node == NULL) |
620 | if (mem->mm_node == NULL) |
625 | return -ENOMEM; |
621 | return -ENOMEM; |
626 | mem->mem_type = mem_type; |
622 | mem->mem_type = mem_type; |
627 | return 0; |
623 | return 0; |
628 | } |
624 | } |
629 | 625 | ||
630 | static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, |
626 | static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, |
631 | uint32_t cur_placement, |
627 | uint32_t cur_placement, |
632 | uint32_t proposed_placement) |
628 | uint32_t proposed_placement) |
633 | { |
629 | { |
634 | uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING; |
630 | uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING; |
635 | uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING; |
631 | uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING; |
636 | 632 | ||
637 | /** |
633 | /** |
638 | * Keep current caching if possible. |
634 | * Keep current caching if possible. |
639 | */ |
635 | */ |
640 | 636 | ||
641 | if ((cur_placement & caching) != 0) |
637 | if ((cur_placement & caching) != 0) |
642 | result |= (cur_placement & caching); |
638 | result |= (cur_placement & caching); |
643 | else if ((man->default_caching & caching) != 0) |
639 | else if ((man->default_caching & caching) != 0) |
644 | result |= man->default_caching; |
640 | result |= man->default_caching; |
645 | else if ((TTM_PL_FLAG_CACHED & caching) != 0) |
641 | else if ((TTM_PL_FLAG_CACHED & caching) != 0) |
646 | result |= TTM_PL_FLAG_CACHED; |
642 | result |= TTM_PL_FLAG_CACHED; |
647 | else if ((TTM_PL_FLAG_WC & caching) != 0) |
643 | else if ((TTM_PL_FLAG_WC & caching) != 0) |
648 | result |= TTM_PL_FLAG_WC; |
644 | result |= TTM_PL_FLAG_WC; |
649 | else if ((TTM_PL_FLAG_UNCACHED & caching) != 0) |
645 | else if ((TTM_PL_FLAG_UNCACHED & caching) != 0) |
650 | result |= TTM_PL_FLAG_UNCACHED; |
646 | result |= TTM_PL_FLAG_UNCACHED; |
651 | 647 | ||
652 | return result; |
648 | return result; |
653 | } |
649 | } |
654 | 650 | ||
655 | static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, |
651 | static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, |
656 | uint32_t mem_type, |
652 | uint32_t mem_type, |
657 | uint32_t proposed_placement, |
653 | const struct ttm_place *place, |
658 | uint32_t *masked_placement) |
654 | uint32_t *masked_placement) |
659 | { |
655 | { |
660 | uint32_t cur_flags = ttm_bo_type_flags(mem_type); |
656 | uint32_t cur_flags = ttm_bo_type_flags(mem_type); |
661 | 657 | ||
662 | if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0) |
658 | if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0) |
663 | return false; |
659 | return false; |
664 | 660 | ||
665 | if ((proposed_placement & man->available_caching) == 0) |
661 | if ((place->flags & man->available_caching) == 0) |
666 | return false; |
662 | return false; |
667 | 663 | ||
668 | cur_flags |= (proposed_placement & man->available_caching); |
664 | cur_flags |= (place->flags & man->available_caching); |
669 | 665 | ||
670 | *masked_placement = cur_flags; |
666 | *masked_placement = cur_flags; |
671 | return true; |
667 | return true; |
672 | } |
668 | } |
673 | 669 | ||
674 | /** |
670 | /** |
675 | * Creates space for memory region @mem according to its type. |
671 | * Creates space for memory region @mem according to its type. |
676 | * |
672 | * |
677 | * This function first searches for free space in compatible memory types in |
673 | * This function first searches for free space in compatible memory types in |
678 | * the priority order defined by the driver. If free space isn't found, then |
674 | * the priority order defined by the driver. If free space isn't found, then |
679 | * ttm_bo_mem_force_space is attempted in priority order to evict and find |
675 | * ttm_bo_mem_force_space is attempted in priority order to evict and find |
680 | * space. |
676 | * space. |
681 | */ |
677 | */ |
682 | int ttm_bo_mem_space(struct ttm_buffer_object *bo, |
678 | int ttm_bo_mem_space(struct ttm_buffer_object *bo, |
683 | struct ttm_placement *placement, |
679 | struct ttm_placement *placement, |
684 | struct ttm_mem_reg *mem, |
680 | struct ttm_mem_reg *mem, |
685 | bool interruptible, |
681 | bool interruptible, |
686 | bool no_wait_gpu) |
682 | bool no_wait_gpu) |
687 | { |
683 | { |
688 | struct ttm_bo_device *bdev = bo->bdev; |
684 | struct ttm_bo_device *bdev = bo->bdev; |
689 | struct ttm_mem_type_manager *man; |
685 | struct ttm_mem_type_manager *man; |
690 | uint32_t mem_type = TTM_PL_SYSTEM; |
686 | uint32_t mem_type = TTM_PL_SYSTEM; |
691 | uint32_t cur_flags = 0; |
687 | uint32_t cur_flags = 0; |
692 | bool type_found = false; |
688 | bool type_found = false; |
693 | bool type_ok = false; |
689 | bool type_ok = false; |
694 | bool has_erestartsys = false; |
690 | bool has_erestartsys = false; |
695 | int i, ret; |
691 | int i, ret; |
696 | 692 | ||
697 | mem->mm_node = NULL; |
693 | mem->mm_node = NULL; |
698 | for (i = 0; i < placement->num_placement; ++i) { |
694 | for (i = 0; i < placement->num_placement; ++i) { |
699 | ret = ttm_mem_type_from_flags(placement->placement[i], |
695 | const struct ttm_place *place = &placement->placement[i]; |
- | 696 | ||
700 | &mem_type); |
697 | ret = ttm_mem_type_from_place(place, &mem_type); |
701 | if (ret) |
698 | if (ret) |
702 | return ret; |
699 | return ret; |
703 | man = &bdev->man[mem_type]; |
700 | man = &bdev->man[mem_type]; |
704 | 701 | ||
705 | type_ok = ttm_bo_mt_compatible(man, |
- | |
706 | mem_type, |
- | |
707 | placement->placement[i], |
702 | type_ok = ttm_bo_mt_compatible(man, mem_type, place, |
708 | &cur_flags); |
703 | &cur_flags); |
709 | 704 | ||
710 | if (!type_ok) |
705 | if (!type_ok) |
711 | continue; |
706 | continue; |
712 | 707 | ||
713 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, |
708 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, |
714 | cur_flags); |
709 | cur_flags); |
715 | /* |
710 | /* |
716 | * Use the access and other non-mapping-related flag bits from |
711 | * Use the access and other non-mapping-related flag bits from |
717 | * the memory placement flags to the current flags |
712 | * the memory placement flags to the current flags |
718 | */ |
713 | */ |
719 | ttm_flag_masked(&cur_flags, placement->placement[i], |
714 | ttm_flag_masked(&cur_flags, place->flags, |
720 | ~TTM_PL_MASK_MEMTYPE); |
715 | ~TTM_PL_MASK_MEMTYPE); |
721 | 716 | ||
722 | if (mem_type == TTM_PL_SYSTEM) |
717 | if (mem_type == TTM_PL_SYSTEM) |
723 | break; |
718 | break; |
724 | 719 | ||
725 | if (man->has_type && man->use_type) { |
720 | if (man->has_type && man->use_type) { |
726 | type_found = true; |
721 | type_found = true; |
727 | ret = (*man->func->get_node)(man, bo, placement, |
722 | ret = (*man->func->get_node)(man, bo, place, mem); |
728 | cur_flags, mem); |
- | |
729 | if (unlikely(ret)) |
723 | if (unlikely(ret)) |
730 | return ret; |
724 | return ret; |
731 | } |
725 | } |
732 | if (mem->mm_node) |
726 | if (mem->mm_node) |
733 | break; |
727 | break; |
734 | } |
728 | } |
735 | 729 | ||
736 | if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) { |
730 | if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) { |
737 | mem->mem_type = mem_type; |
731 | mem->mem_type = mem_type; |
738 | mem->placement = cur_flags; |
732 | mem->placement = cur_flags; |
739 | return 0; |
733 | return 0; |
740 | } |
734 | } |
741 | 735 | ||
742 | if (!type_found) |
736 | if (!type_found) |
743 | return -EINVAL; |
737 | return -EINVAL; |
744 | 738 | ||
745 | for (i = 0; i < placement->num_busy_placement; ++i) { |
739 | for (i = 0; i < placement->num_busy_placement; ++i) { |
746 | ret = ttm_mem_type_from_flags(placement->busy_placement[i], |
740 | const struct ttm_place *place = &placement->busy_placement[i]; |
- | 741 | ||
747 | &mem_type); |
742 | ret = ttm_mem_type_from_place(place, &mem_type); |
748 | if (ret) |
743 | if (ret) |
749 | return ret; |
744 | return ret; |
750 | man = &bdev->man[mem_type]; |
745 | man = &bdev->man[mem_type]; |
751 | if (!man->has_type) |
746 | if (!man->has_type) |
752 | continue; |
747 | continue; |
753 | if (!ttm_bo_mt_compatible(man, |
748 | if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) |
754 | mem_type, |
- | |
755 | placement->busy_placement[i], |
- | |
756 | &cur_flags)) |
- | |
757 | continue; |
749 | continue; |
758 | 750 | ||
759 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, |
751 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, |
760 | cur_flags); |
752 | cur_flags); |
761 | /* |
753 | /* |
762 | * Use the access and other non-mapping-related flag bits from |
754 | * Use the access and other non-mapping-related flag bits from |
763 | * the memory placement flags to the current flags |
755 | * the memory placement flags to the current flags |
764 | */ |
756 | */ |
765 | ttm_flag_masked(&cur_flags, placement->busy_placement[i], |
757 | ttm_flag_masked(&cur_flags, place->flags, |
766 | ~TTM_PL_MASK_MEMTYPE); |
758 | ~TTM_PL_MASK_MEMTYPE); |
767 | 759 | ||
768 | if (mem_type == TTM_PL_SYSTEM) { |
760 | if (mem_type == TTM_PL_SYSTEM) { |
769 | mem->mem_type = mem_type; |
761 | mem->mem_type = mem_type; |
770 | mem->placement = cur_flags; |
762 | mem->placement = cur_flags; |
771 | mem->mm_node = NULL; |
763 | mem->mm_node = NULL; |
772 | return 0; |
764 | return 0; |
773 | } |
765 | } |
774 | 766 | ||
775 | ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, |
767 | ret = ttm_bo_mem_force_space(bo, mem_type, place, mem, |
776 | interruptible, no_wait_gpu); |
768 | interruptible, no_wait_gpu); |
777 | if (ret == 0 && mem->mm_node) { |
769 | if (ret == 0 && mem->mm_node) { |
778 | mem->placement = cur_flags; |
770 | mem->placement = cur_flags; |
779 | return 0; |
771 | return 0; |
780 | } |
772 | } |
781 | if (ret == -ERESTARTSYS) |
773 | if (ret == -ERESTARTSYS) |
782 | has_erestartsys = true; |
774 | has_erestartsys = true; |
783 | } |
775 | } |
784 | ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; |
776 | ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; |
785 | return ret; |
777 | return ret; |
786 | } |
778 | } |
787 | EXPORT_SYMBOL(ttm_bo_mem_space); |
779 | EXPORT_SYMBOL(ttm_bo_mem_space); |
788 | 780 | ||
789 | static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, |
781 | static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, |
790 | struct ttm_placement *placement, |
782 | struct ttm_placement *placement, |
791 | bool interruptible, |
783 | bool interruptible, |
792 | bool no_wait_gpu) |
784 | bool no_wait_gpu) |
793 | { |
785 | { |
794 | int ret = 0; |
786 | int ret = 0; |
795 | struct ttm_mem_reg mem; |
787 | struct ttm_mem_reg mem; |
796 | struct ttm_bo_device *bdev = bo->bdev; |
- | |
797 | 788 | ||
798 | lockdep_assert_held(&bo->resv->lock.base); |
789 | lockdep_assert_held(&bo->resv->lock.base); |
799 | 790 | ||
800 | /* |
791 | /* |
801 | * FIXME: It's possible to pipeline buffer moves. |
792 | * FIXME: It's possible to pipeline buffer moves. |
802 | * Have the driver move function wait for idle when necessary, |
793 | * Have the driver move function wait for idle when necessary, |
803 | * instead of doing it here. |
794 | * instead of doing it here. |
804 | */ |
795 | */ |
805 | spin_lock(&bdev->fence_lock); |
- | |
806 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); |
796 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); |
807 | spin_unlock(&bdev->fence_lock); |
- | |
808 | if (ret) |
797 | if (ret) |
809 | return ret; |
798 | return ret; |
810 | mem.num_pages = bo->num_pages; |
799 | mem.num_pages = bo->num_pages; |
811 | mem.size = mem.num_pages << PAGE_SHIFT; |
800 | mem.size = mem.num_pages << PAGE_SHIFT; |
812 | mem.page_alignment = bo->mem.page_alignment; |
801 | mem.page_alignment = bo->mem.page_alignment; |
813 | mem.bus.io_reserved_vm = false; |
802 | mem.bus.io_reserved_vm = false; |
814 | mem.bus.io_reserved_count = 0; |
803 | mem.bus.io_reserved_count = 0; |
815 | /* |
804 | /* |
816 | * Determine where to move the buffer. |
805 | * Determine where to move the buffer. |
817 | */ |
806 | */ |
818 | ret = ttm_bo_mem_space(bo, placement, &mem, |
807 | ret = ttm_bo_mem_space(bo, placement, &mem, |
819 | interruptible, no_wait_gpu); |
808 | interruptible, no_wait_gpu); |
820 | if (ret) |
809 | if (ret) |
821 | goto out_unlock; |
810 | goto out_unlock; |
822 | ret = ttm_bo_handle_move_mem(bo, &mem, false, |
811 | ret = ttm_bo_handle_move_mem(bo, &mem, false, |
823 | interruptible, no_wait_gpu); |
812 | interruptible, no_wait_gpu); |
824 | out_unlock: |
813 | out_unlock: |
825 | if (ret && mem.mm_node) |
814 | if (ret && mem.mm_node) |
826 | ttm_bo_mem_put(bo, &mem); |
815 | ttm_bo_mem_put(bo, &mem); |
827 | return ret; |
816 | return ret; |
828 | } |
817 | } |
829 | 818 | ||
830 | static bool ttm_bo_mem_compat(struct ttm_placement *placement, |
819 | static bool ttm_bo_mem_compat(struct ttm_placement *placement, |
831 | struct ttm_mem_reg *mem, |
820 | struct ttm_mem_reg *mem, |
832 | uint32_t *new_flags) |
821 | uint32_t *new_flags) |
833 | { |
822 | { |
834 | int i; |
823 | int i; |
835 | - | ||
836 | if (mem->mm_node && placement->lpfn != 0 && |
- | |
837 | (mem->start < placement->fpfn || |
- | |
838 | mem->start + mem->num_pages > placement->lpfn)) |
- | |
839 | return false; |
- | |
840 | 824 | ||
841 | for (i = 0; i < placement->num_placement; i++) { |
825 | for (i = 0; i < placement->num_placement; i++) { |
- | 826 | const struct ttm_place *heap = &placement->placement[i]; |
|
- | 827 | if (mem->mm_node && |
|
- | 828 | (mem->start < heap->fpfn || |
|
- | 829 | (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) |
|
- | 830 | continue; |
|
- | 831 | ||
842 | *new_flags = placement->placement[i]; |
832 | *new_flags = heap->flags; |
843 | if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && |
833 | if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && |
844 | (*new_flags & mem->placement & TTM_PL_MASK_MEM)) |
834 | (*new_flags & mem->placement & TTM_PL_MASK_MEM)) |
845 | return true; |
835 | return true; |
846 | } |
836 | } |
847 | 837 | ||
848 | for (i = 0; i < placement->num_busy_placement; i++) { |
838 | for (i = 0; i < placement->num_busy_placement; i++) { |
849 | *new_flags = placement->busy_placement[i]; |
839 | const struct ttm_place *heap = &placement->busy_placement[i]; |
- | 840 | if (mem->mm_node && |
|
- | 841 | (mem->start < heap->fpfn || |
|
- | 842 | (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) |
|
- | 843 | continue; |
|
- | 844 | ||
- | 845 | *new_flags = heap->flags; |
|
850 | if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && |
846 | if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && |
851 | (*new_flags & mem->placement & TTM_PL_MASK_MEM)) |
847 | (*new_flags & mem->placement & TTM_PL_MASK_MEM)) |
852 | return true; |
848 | return true; |
853 | } |
849 | } |
854 | 850 | ||
855 | return false; |
851 | return false; |
856 | } |
852 | } |
857 | 853 | ||
858 | int ttm_bo_validate(struct ttm_buffer_object *bo, |
854 | int ttm_bo_validate(struct ttm_buffer_object *bo, |
859 | struct ttm_placement *placement, |
855 | struct ttm_placement *placement, |
860 | bool interruptible, |
856 | bool interruptible, |
861 | bool no_wait_gpu) |
857 | bool no_wait_gpu) |
862 | { |
858 | { |
863 | int ret; |
859 | int ret; |
864 | uint32_t new_flags; |
860 | uint32_t new_flags; |
865 | 861 | ||
866 | lockdep_assert_held(&bo->resv->lock.base); |
862 | lockdep_assert_held(&bo->resv->lock.base); |
867 | /* Check that range is valid */ |
- | |
868 | if (placement->lpfn || placement->fpfn) |
- | |
869 | if (placement->fpfn > placement->lpfn || |
- | |
870 | (placement->lpfn - placement->fpfn) < bo->num_pages) |
- | |
871 | return -EINVAL; |
- | |
872 | /* |
863 | /* |
873 | * Check whether we need to move buffer. |
864 | * Check whether we need to move buffer. |
874 | */ |
865 | */ |
875 | if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) { |
866 | if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) { |
876 | ret = ttm_bo_move_buffer(bo, placement, interruptible, |
867 | ret = ttm_bo_move_buffer(bo, placement, interruptible, |
877 | no_wait_gpu); |
868 | no_wait_gpu); |
878 | if (ret) |
869 | if (ret) |
879 | return ret; |
870 | return ret; |
880 | } else { |
871 | } else { |
881 | /* |
872 | /* |
882 | * Use the access and other non-mapping-related flag bits from |
873 | * Use the access and other non-mapping-related flag bits from |
883 | * the compatible memory placement flags to the active flags |
874 | * the compatible memory placement flags to the active flags |
884 | */ |
875 | */ |
885 | ttm_flag_masked(&bo->mem.placement, new_flags, |
876 | ttm_flag_masked(&bo->mem.placement, new_flags, |
886 | ~TTM_PL_MASK_MEMTYPE); |
877 | ~TTM_PL_MASK_MEMTYPE); |
887 | } |
878 | } |
888 | /* |
879 | /* |
889 | * We might need to add a TTM. |
880 | * We might need to add a TTM. |
890 | */ |
881 | */ |
891 | if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { |
882 | if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { |
892 | ret = ttm_bo_add_ttm(bo, true); |
883 | ret = ttm_bo_add_ttm(bo, true); |
893 | if (ret) |
884 | if (ret) |
894 | return ret; |
885 | return ret; |
895 | } |
886 | } |
896 | return 0; |
887 | return 0; |
897 | } |
888 | } |
898 | EXPORT_SYMBOL(ttm_bo_validate); |
889 | EXPORT_SYMBOL(ttm_bo_validate); |
899 | - | ||
900 | int ttm_bo_check_placement(struct ttm_buffer_object *bo, |
- | |
901 | struct ttm_placement *placement) |
- | |
902 | { |
- | |
903 | BUG_ON((placement->fpfn || placement->lpfn) && |
- | |
904 | (bo->mem.num_pages > (placement->lpfn - placement->fpfn))); |
- | |
905 | - | ||
906 | return 0; |
- | |
907 | } |
- | |
908 | 890 | ||
909 | int ttm_bo_init(struct ttm_bo_device *bdev, |
891 | int ttm_bo_init(struct ttm_bo_device *bdev, |
910 | struct ttm_buffer_object *bo, |
892 | struct ttm_buffer_object *bo, |
911 | unsigned long size, |
893 | unsigned long size, |
912 | enum ttm_bo_type type, |
894 | enum ttm_bo_type type, |
913 | struct ttm_placement *placement, |
895 | struct ttm_placement *placement, |
914 | uint32_t page_alignment, |
896 | uint32_t page_alignment, |
915 | bool interruptible, |
897 | bool interruptible, |
916 | struct file *persistent_swap_storage, |
898 | struct file *persistent_swap_storage, |
917 | size_t acc_size, |
899 | size_t acc_size, |
- | 900 | struct sg_table *sg, |
|
918 | struct sg_table *sg, |
901 | struct reservation_object *resv, |
919 | void (*destroy) (struct ttm_buffer_object *)) |
902 | void (*destroy) (struct ttm_buffer_object *)) |
920 | { |
903 | { |
921 | int ret = 0; |
904 | int ret = 0; |
922 | unsigned long num_pages; |
905 | unsigned long num_pages; |
923 | bool locked; |
906 | bool locked; |
924 | 907 | ||
925 | num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
908 | num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
926 | if (num_pages == 0) { |
909 | if (num_pages == 0) { |
927 | pr_err("Illegal buffer object size\n"); |
910 | pr_err("Illegal buffer object size\n"); |
928 | if (destroy) |
911 | if (destroy) |
929 | (*destroy)(bo); |
912 | (*destroy)(bo); |
930 | else |
913 | else |
931 | kfree(bo); |
914 | kfree(bo); |
932 | return -EINVAL; |
915 | return -EINVAL; |
933 | } |
916 | } |
934 | bo->destroy = destroy; |
917 | bo->destroy = destroy; |
935 | 918 | ||
936 | kref_init(&bo->kref); |
919 | kref_init(&bo->kref); |
937 | kref_init(&bo->list_kref); |
920 | kref_init(&bo->list_kref); |
938 | atomic_set(&bo->cpu_writers, 0); |
921 | atomic_set(&bo->cpu_writers, 0); |
939 | INIT_LIST_HEAD(&bo->lru); |
922 | INIT_LIST_HEAD(&bo->lru); |
940 | INIT_LIST_HEAD(&bo->ddestroy); |
923 | INIT_LIST_HEAD(&bo->ddestroy); |
941 | INIT_LIST_HEAD(&bo->swap); |
924 | INIT_LIST_HEAD(&bo->swap); |
942 | INIT_LIST_HEAD(&bo->io_reserve_lru); |
925 | INIT_LIST_HEAD(&bo->io_reserve_lru); |
943 | mutex_init(&bo->wu_mutex); |
926 | mutex_init(&bo->wu_mutex); |
944 | bo->bdev = bdev; |
927 | bo->bdev = bdev; |
945 | bo->glob = bdev->glob; |
928 | bo->glob = bdev->glob; |
946 | bo->type = type; |
929 | bo->type = type; |
947 | bo->num_pages = num_pages; |
930 | bo->num_pages = num_pages; |
948 | bo->mem.size = num_pages << PAGE_SHIFT; |
931 | bo->mem.size = num_pages << PAGE_SHIFT; |
949 | bo->mem.mem_type = TTM_PL_SYSTEM; |
932 | bo->mem.mem_type = TTM_PL_SYSTEM; |
950 | bo->mem.num_pages = bo->num_pages; |
933 | bo->mem.num_pages = bo->num_pages; |
951 | bo->mem.mm_node = NULL; |
934 | bo->mem.mm_node = NULL; |
952 | bo->mem.page_alignment = page_alignment; |
935 | bo->mem.page_alignment = page_alignment; |
953 | bo->mem.bus.io_reserved_vm = false; |
936 | bo->mem.bus.io_reserved_vm = false; |
954 | bo->mem.bus.io_reserved_count = 0; |
937 | bo->mem.bus.io_reserved_count = 0; |
955 | bo->priv_flags = 0; |
938 | bo->priv_flags = 0; |
956 | bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); |
939 | bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); |
957 | bo->persistent_swap_storage = persistent_swap_storage; |
940 | bo->persistent_swap_storage = persistent_swap_storage; |
- | 941 | bo->acc_size = acc_size; |
|
- | 942 | bo->sg = sg; |
|
- | 943 | if (resv) { |
|
- | 944 | bo->resv = resv; |
|
958 | bo->acc_size = acc_size; |
945 | lockdep_assert_held(&bo->resv->lock.base); |
959 | bo->sg = sg; |
946 | } else { |
- | 947 | bo->resv = &bo->ttm_resv; |
|
960 | bo->resv = &bo->ttm_resv; |
948 | reservation_object_init(&bo->ttm_resv); |
961 | reservation_object_init(bo->resv); |
949 | } |
962 | atomic_inc(&bo->glob->bo_count); |
- | |
963 | drm_vma_node_reset(&bo->vma_node); |
- | |
964 | 950 | atomic_inc(&bo->glob->bo_count); |
|
965 | ret = ttm_bo_check_placement(bo, placement); |
951 | drm_vma_node_reset(&bo->vma_node); |
966 | 952 | ||
967 | /* |
953 | /* |
968 | * For ttm_bo_type_device buffers, allocate |
- | |
969 | * address space from the device. |
954 | * For ttm_bo_type_device buffers, allocate |
970 | */ |
955 | * address space from the device. |
971 | if (likely(!ret) && |
956 | */ |
972 | (bo->type == ttm_bo_type_device || |
957 | if (bo->type == ttm_bo_type_device || |
973 | bo->type == ttm_bo_type_sg)) |
958 | bo->type == ttm_bo_type_sg) |
974 | ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, |
959 | ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, |
975 | bo->mem.num_pages); |
960 | bo->mem.num_pages); |
- | 961 | ||
- | 962 | /* passed reservation objects should already be locked, |
|
- | 963 | * since otherwise lockdep will be angered in radeon. |
|
- | 964 | */ |
|
976 | 965 | if (!resv) { |
|
977 | locked = ww_mutex_trylock(&bo->resv->lock); |
966 | locked = ww_mutex_trylock(&bo->resv->lock); |
- | 967 | WARN_ON(!locked); |
|
978 | WARN_ON(!locked); |
968 | } |
979 | 969 | ||
980 | if (likely(!ret)) |
970 | if (likely(!ret)) |
981 | ret = ttm_bo_validate(bo, placement, interruptible, false); |
971 | ret = ttm_bo_validate(bo, placement, interruptible, false); |
- | 972 | ||
982 | 973 | if (!resv) |
|
983 | ttm_bo_unreserve(bo); |
974 | ttm_bo_unreserve(bo); |
984 | 975 | ||
985 | if (unlikely(ret)) |
976 | if (unlikely(ret)) |
986 | ttm_bo_unref(&bo); |
977 | ttm_bo_unref(&bo); |
987 | 978 | ||
988 | return ret; |
979 | return ret; |
989 | } |
980 | } |
990 | EXPORT_SYMBOL(ttm_bo_init); |
981 | EXPORT_SYMBOL(ttm_bo_init); |
991 | 982 | ||
992 | size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, |
983 | size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, |
993 | unsigned long bo_size, |
984 | unsigned long bo_size, |
994 | unsigned struct_size) |
985 | unsigned struct_size) |
995 | { |
986 | { |
996 | unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; |
987 | unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; |
997 | size_t size = 0; |
988 | size_t size = 0; |
998 | 989 | ||
999 | size += ttm_round_pot(struct_size); |
990 | size += ttm_round_pot(struct_size); |
1000 | size += PAGE_ALIGN(npages * sizeof(void *)); |
991 | size += PAGE_ALIGN(npages * sizeof(void *)); |
1001 | size += ttm_round_pot(sizeof(struct ttm_tt)); |
992 | size += ttm_round_pot(sizeof(struct ttm_tt)); |
1002 | return size; |
993 | return size; |
1003 | } |
994 | } |
1004 | EXPORT_SYMBOL(ttm_bo_acc_size); |
995 | EXPORT_SYMBOL(ttm_bo_acc_size); |
1005 | 996 | ||
1006 | size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, |
997 | size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, |
1007 | unsigned long bo_size, |
998 | unsigned long bo_size, |
1008 | unsigned struct_size) |
999 | unsigned struct_size) |
1009 | { |
1000 | { |
1010 | unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; |
1001 | unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; |
1011 | size_t size = 0; |
1002 | size_t size = 0; |
1012 | 1003 | ||
1013 | size += ttm_round_pot(struct_size); |
1004 | size += ttm_round_pot(struct_size); |
1014 | size += PAGE_ALIGN(npages * sizeof(void *)); |
1005 | size += PAGE_ALIGN(npages * sizeof(void *)); |
1015 | size += PAGE_ALIGN(npages * sizeof(dma_addr_t)); |
1006 | size += PAGE_ALIGN(npages * sizeof(dma_addr_t)); |
1016 | size += ttm_round_pot(sizeof(struct ttm_dma_tt)); |
1007 | size += ttm_round_pot(sizeof(struct ttm_dma_tt)); |
1017 | return size; |
1008 | return size; |
1018 | } |
1009 | } |
1019 | EXPORT_SYMBOL(ttm_bo_dma_acc_size); |
1010 | EXPORT_SYMBOL(ttm_bo_dma_acc_size); |
1020 | 1011 | ||
1021 | int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, |
1012 | int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, |
1022 | unsigned long p_size) |
1013 | unsigned long p_size) |
1023 | { |
1014 | { |
1024 | int ret = -EINVAL; |
1015 | int ret = -EINVAL; |
1025 | struct ttm_mem_type_manager *man; |
1016 | struct ttm_mem_type_manager *man; |
1026 | 1017 | ||
1027 | BUG_ON(type >= TTM_NUM_MEM_TYPES); |
1018 | BUG_ON(type >= TTM_NUM_MEM_TYPES); |
1028 | man = &bdev->man[type]; |
1019 | man = &bdev->man[type]; |
1029 | BUG_ON(man->has_type); |
1020 | BUG_ON(man->has_type); |
1030 | man->io_reserve_fastpath = true; |
1021 | man->io_reserve_fastpath = true; |
1031 | man->use_io_reserve_lru = false; |
1022 | man->use_io_reserve_lru = false; |
1032 | mutex_init(&man->io_reserve_mutex); |
1023 | mutex_init(&man->io_reserve_mutex); |
1033 | INIT_LIST_HEAD(&man->io_reserve_lru); |
1024 | INIT_LIST_HEAD(&man->io_reserve_lru); |
1034 | 1025 | ||
1035 | ret = bdev->driver->init_mem_type(bdev, type, man); |
1026 | ret = bdev->driver->init_mem_type(bdev, type, man); |
1036 | if (ret) |
1027 | if (ret) |
1037 | return ret; |
1028 | return ret; |
1038 | man->bdev = bdev; |
1029 | man->bdev = bdev; |
1039 | 1030 | ||
1040 | ret = 0; |
1031 | ret = 0; |
1041 | if (type != TTM_PL_SYSTEM) { |
1032 | if (type != TTM_PL_SYSTEM) { |
1042 | ret = (*man->func->init)(man, p_size); |
1033 | ret = (*man->func->init)(man, p_size); |
1043 | if (ret) |
1034 | if (ret) |
1044 | return ret; |
1035 | return ret; |
1045 | } |
1036 | } |
1046 | man->has_type = true; |
1037 | man->has_type = true; |
1047 | man->use_type = true; |
1038 | man->use_type = true; |
1048 | man->size = p_size; |
1039 | man->size = p_size; |
1049 | 1040 | ||
1050 | INIT_LIST_HEAD(&man->lru); |
1041 | INIT_LIST_HEAD(&man->lru); |
1051 | 1042 | ||
1052 | return 0; |
1043 | return 0; |
1053 | } |
1044 | } |
1054 | EXPORT_SYMBOL(ttm_bo_init_mm); |
1045 | EXPORT_SYMBOL(ttm_bo_init_mm); |
1055 | void ttm_bo_global_release(struct drm_global_reference *ref) |
1046 | void ttm_bo_global_release(struct drm_global_reference *ref) |
1056 | { |
1047 | { |
1057 | struct ttm_bo_global *glob = ref->object; |
1048 | struct ttm_bo_global *glob = ref->object; |
1058 | 1049 | ||
1059 | } |
1050 | } |
1060 | EXPORT_SYMBOL(ttm_bo_global_release); |
1051 | EXPORT_SYMBOL(ttm_bo_global_release); |
1061 | 1052 | ||
1062 | int ttm_bo_global_init(struct drm_global_reference *ref) |
1053 | int ttm_bo_global_init(struct drm_global_reference *ref) |
1063 | { |
1054 | { |
1064 | struct ttm_bo_global_ref *bo_ref = |
1055 | struct ttm_bo_global_ref *bo_ref = |
1065 | container_of(ref, struct ttm_bo_global_ref, ref); |
1056 | container_of(ref, struct ttm_bo_global_ref, ref); |
1066 | struct ttm_bo_global *glob = ref->object; |
1057 | struct ttm_bo_global *glob = ref->object; |
1067 | int ret; |
1058 | int ret; |
1068 | 1059 | ||
1069 | mutex_init(&glob->device_list_mutex); |
1060 | mutex_init(&glob->device_list_mutex); |
1070 | spin_lock_init(&glob->lru_lock); |
1061 | spin_lock_init(&glob->lru_lock); |
1071 | glob->mem_glob = bo_ref->mem_glob; |
1062 | glob->mem_glob = bo_ref->mem_glob; |
1072 | glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); |
1063 | glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); |
1073 | 1064 | ||
1074 | if (unlikely(glob->dummy_read_page == NULL)) { |
1065 | if (unlikely(glob->dummy_read_page == NULL)) { |
1075 | ret = -ENOMEM; |
1066 | ret = -ENOMEM; |
1076 | goto out_no_drp; |
1067 | goto out_no_drp; |
1077 | } |
1068 | } |
1078 | 1069 | ||
1079 | INIT_LIST_HEAD(&glob->swap_lru); |
1070 | INIT_LIST_HEAD(&glob->swap_lru); |
1080 | INIT_LIST_HEAD(&glob->device_list); |
1071 | INIT_LIST_HEAD(&glob->device_list); |
1081 | 1072 | ||
1082 | atomic_set(&glob->bo_count, 0); |
1073 | atomic_set(&glob->bo_count, 0); |
1083 | 1074 | ||
1084 | return 0; |
1075 | return 0; |
1085 | 1076 | ||
1086 | out_no_drp: |
1077 | out_no_drp: |
1087 | kfree(glob); |
1078 | kfree(glob); |
1088 | return ret; |
1079 | return ret; |
1089 | } |
1080 | } |
1090 | EXPORT_SYMBOL(ttm_bo_global_init); |
1081 | EXPORT_SYMBOL(ttm_bo_global_init); |
1091 | 1082 | ||
1092 | int ttm_bo_device_init(struct ttm_bo_device *bdev, |
1083 | int ttm_bo_device_init(struct ttm_bo_device *bdev, |
1093 | struct ttm_bo_global *glob, |
1084 | struct ttm_bo_global *glob, |
1094 | struct ttm_bo_driver *driver, |
1085 | struct ttm_bo_driver *driver, |
1095 | struct address_space *mapping, |
1086 | struct address_space *mapping, |
1096 | uint64_t file_page_offset, |
1087 | uint64_t file_page_offset, |
1097 | bool need_dma32) |
1088 | bool need_dma32) |
1098 | { |
1089 | { |
1099 | int ret = -EINVAL; |
1090 | int ret = -EINVAL; |
1100 | 1091 | ||
1101 | bdev->driver = driver; |
1092 | bdev->driver = driver; |
1102 | 1093 | ||
1103 | memset(bdev->man, 0, sizeof(bdev->man)); |
1094 | memset(bdev->man, 0, sizeof(bdev->man)); |
1104 | 1095 | ||
1105 | /* |
1096 | /* |
1106 | * Initialize the system memory buffer type. |
1097 | * Initialize the system memory buffer type. |
1107 | * Other types need to be driver / IOCTL initialized. |
1098 | * Other types need to be driver / IOCTL initialized. |
1108 | */ |
1099 | */ |
1109 | ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0); |
1100 | ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0); |
1110 | if (unlikely(ret != 0)) |
1101 | if (unlikely(ret != 0)) |
1111 | goto out_no_sys; |
1102 | goto out_no_sys; |
1112 | 1103 | ||
1113 | drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset, |
1104 | drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset, |
1114 | 0x10000000); |
1105 | 0x10000000); |
1115 | INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); |
1106 | INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); |
1116 | INIT_LIST_HEAD(&bdev->ddestroy); |
1107 | INIT_LIST_HEAD(&bdev->ddestroy); |
1117 | bdev->dev_mapping = mapping; |
1108 | bdev->dev_mapping = mapping; |
1118 | bdev->glob = glob; |
1109 | bdev->glob = glob; |
1119 | bdev->need_dma32 = need_dma32; |
1110 | bdev->need_dma32 = need_dma32; |
1120 | bdev->val_seq = 0; |
1111 | bdev->val_seq = 0; |
1121 | spin_lock_init(&bdev->fence_lock); |
- | |
1122 | mutex_lock(&glob->device_list_mutex); |
1112 | mutex_lock(&glob->device_list_mutex); |
1123 | list_add_tail(&bdev->device_list, &glob->device_list); |
1113 | list_add_tail(&bdev->device_list, &glob->device_list); |
1124 | mutex_unlock(&glob->device_list_mutex); |
1114 | mutex_unlock(&glob->device_list_mutex); |
1125 | 1115 | ||
1126 | return 0; |
1116 | return 0; |
1127 | out_no_sys: |
1117 | out_no_sys: |
1128 | return ret; |
1118 | return ret; |
1129 | } |
1119 | } |
1130 | EXPORT_SYMBOL(ttm_bo_device_init); |
1120 | EXPORT_SYMBOL(ttm_bo_device_init); |
1131 | 1121 | ||
1132 | /* |
1122 | /* |
1133 | * buffer object vm functions. |
1123 | * buffer object vm functions. |
1134 | */ |
1124 | */ |
1135 | 1125 | ||
1136 | bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
1126 | bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
1137 | { |
1127 | { |
1138 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
1128 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
1139 | 1129 | ||
1140 | if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) { |
1130 | if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) { |
1141 | if (mem->mem_type == TTM_PL_SYSTEM) |
1131 | if (mem->mem_type == TTM_PL_SYSTEM) |
1142 | return false; |
1132 | return false; |
1143 | 1133 | ||
1144 | if (man->flags & TTM_MEMTYPE_FLAG_CMA) |
1134 | if (man->flags & TTM_MEMTYPE_FLAG_CMA) |
1145 | return false; |
1135 | return false; |
1146 | 1136 | ||
1147 | if (mem->placement & TTM_PL_FLAG_CACHED) |
1137 | if (mem->placement & TTM_PL_FLAG_CACHED) |
1148 | return false; |
1138 | return false; |
1149 | } |
1139 | } |
1150 | return true; |
1140 | return true; |
1151 | } |
1141 | } |
1152 | 1142 | ||
1153 | void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) |
1143 | void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) |
1154 | { |
1144 | { |
1155 | struct ttm_bo_device *bdev = bo->bdev; |
1145 | struct ttm_bo_device *bdev = bo->bdev; |
1156 | 1146 | ||
1157 | drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping); |
1147 | drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping); |
1158 | ttm_mem_io_free_vm(bo); |
1148 | ttm_mem_io_free_vm(bo); |
1159 | } |
1149 | } |
1160 | 1150 | ||
1161 | void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) |
1151 | void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) |
1162 | { |
1152 | { |
1163 | struct ttm_bo_device *bdev = bo->bdev; |
1153 | struct ttm_bo_device *bdev = bo->bdev; |
1164 | struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; |
1154 | struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; |
1165 | 1155 | ||
1166 | ttm_mem_io_lock(man, false); |
1156 | ttm_mem_io_lock(man, false); |
1167 | ttm_bo_unmap_virtual_locked(bo); |
1157 | ttm_bo_unmap_virtual_locked(bo); |
1168 | ttm_mem_io_unlock(man); |
1158 | ttm_mem_io_unlock(man); |
1169 | } |
1159 | } |
1170 | 1160 | ||
1171 | 1161 | ||
1172 | EXPORT_SYMBOL(ttm_bo_unmap_virtual); |
1162 | EXPORT_SYMBOL(ttm_bo_unmap_virtual); |
1173 | - | ||
1174 | 1163 | ||
1175 | int ttm_bo_wait(struct ttm_buffer_object *bo, |
1164 | int ttm_bo_wait(struct ttm_buffer_object *bo, |
1176 | bool lazy, bool interruptible, bool no_wait) |
1165 | bool lazy, bool interruptible, bool no_wait) |
1177 | { |
1166 | { |
1178 | struct ttm_bo_driver *driver = bo->bdev->driver; |
1167 | struct reservation_object_list *fobj; |
1179 | struct ttm_bo_device *bdev = bo->bdev; |
1168 | struct reservation_object *resv; |
1180 | void *sync_obj; |
1169 | struct fence *excl; |
1181 | int ret = 0; |
1170 | long timeout = 15 * HZ; |
1182 | - | ||
1183 | if (likely(bo->sync_obj == NULL)) |
- | |
1184 | return 0; |
1171 | int i; |
- | 1172 | ||
- | 1173 | resv = bo->resv; |
|
- | 1174 | fobj = reservation_object_get_list(resv); |
|
1185 | 1175 | excl = reservation_object_get_excl(resv); |
|
- | 1176 | if (excl) { |
|
- | 1177 | if (!fence_is_signaled(excl)) { |
|
- | 1178 | if (no_wait) |
|
1186 | while (bo->sync_obj) { |
- | |
1187 | - | ||
1188 | if (driver->sync_obj_signaled(bo->sync_obj)) { |
- | |
1189 | void *tmp_obj = bo->sync_obj; |
- | |
1190 | bo->sync_obj = NULL; |
- | |
1191 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); |
1179 | return -EBUSY; |
1192 | spin_unlock(&bdev->fence_lock); |
1180 | |
1193 | driver->sync_obj_unref(&tmp_obj); |
- | |
1194 | spin_lock(&bdev->fence_lock); |
1181 | timeout = fence_wait_timeout(excl, |
- | 1182 | interruptible, timeout); |
|
- | 1183 | } |
|
- | 1184 | } |
|
- | 1185 | ||
- | 1186 | for (i = 0; fobj && timeout > 0 && i < fobj->shared_count; ++i) { |
|
- | 1187 | struct fence *fence; |
|
- | 1188 | fence = rcu_dereference_protected(fobj->shared[i], |
|
1195 | continue; |
1189 | reservation_object_held(resv)); |
1196 | } |
1190 | |
1197 | - | ||
1198 | if (no_wait) |
- | |
1199 | return -EBUSY; |
1191 | if (!fence_is_signaled(fence)) { |
1200 | 1192 | if (no_wait) |
|
1201 | sync_obj = driver->sync_obj_ref(bo->sync_obj); |
- | |
1202 | spin_unlock(&bdev->fence_lock); |
- | |
1203 | ret = driver->sync_obj_wait(sync_obj, |
- | |
1204 | lazy, interruptible); |
- | |
1205 | if (unlikely(ret != 0)) { |
1193 | return -EBUSY; |
1206 | driver->sync_obj_unref(&sync_obj); |
- | |
1207 | spin_lock(&bdev->fence_lock); |
- | |
1208 | return ret; |
- | |
1209 | } |
- | |
1210 | spin_lock(&bdev->fence_lock); |
- | |
1211 | if (likely(bo->sync_obj == sync_obj)) { |
- | |
1212 | void *tmp_obj = bo->sync_obj; |
- | |
1213 | bo->sync_obj = NULL; |
- | |
1214 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, |
- | |
1215 | &bo->priv_flags); |
- | |
1216 | spin_unlock(&bdev->fence_lock); |
- | |
1217 | driver->sync_obj_unref(&sync_obj); |
- | |
1218 | driver->sync_obj_unref(&tmp_obj); |
- | |
1219 | spin_lock(&bdev->fence_lock); |
- | |
1220 | } else { |
1194 | |
- | 1195 | timeout = fence_wait_timeout(fence, |
|
- | 1196 | interruptible, timeout); |
|
- | 1197 | } |
|
1221 | spin_unlock(&bdev->fence_lock); |
1198 | } |
- | 1199 | ||
- | 1200 | if (timeout < 0) |
|
- | 1201 | return timeout; |
|
- | 1202 | ||
- | 1203 | if (timeout == 0) |
|
1222 | driver->sync_obj_unref(&sync_obj); |
1204 | return -EBUSY; |
1223 | spin_lock(&bdev->fence_lock); |
1205 | |
1224 | } |
1206 | reservation_object_add_excl_fence(resv, NULL); |
1225 | } |
1207 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); |
1226 | return 0; |
1208 | return 0; |
1227 | } |
1209 | } |
1228 | EXPORT_SYMBOL(ttm_bo_wait);><>>>>>><>>>>>><>><>><>><>><>=> |
1210 | EXPORT_SYMBOL(ttm_bo_wait);>>><>>>>>><>>>>>>>><>><>><>><>><>=> |
- | 1211 |