Rev 6938 | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 6938 | Rev 7146 | ||
---|---|---|---|
1 | /************************************************************************** |
1 | /************************************************************************** |
2 | * |
2 | * |
3 | * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA |
3 | * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA |
4 | * All Rights Reserved. |
4 | * All Rights Reserved. |
5 | * |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the |
7 | * copy of this software and associated documentation files (the |
8 | * "Software"), to deal in the Software without restriction, including |
8 | * "Software"), to deal in the Software without restriction, including |
9 | * without limitation the rights to use, copy, modify, merge, publish, |
9 | * without limitation the rights to use, copy, modify, merge, publish, |
10 | * distribute, sub license, and/or sell copies of the Software, and to |
10 | * distribute, sub license, and/or sell copies of the Software, and to |
11 | * permit persons to whom the Software is furnished to do so, subject to |
11 | * permit persons to whom the Software is furnished to do so, subject to |
12 | * the following conditions: |
12 | * the following conditions: |
13 | * |
13 | * |
14 | * The above copyright notice and this permission notice (including the |
14 | * The above copyright notice and this permission notice (including the |
15 | * next paragraph) shall be included in all copies or substantial portions |
15 | * next paragraph) shall be included in all copies or substantial portions |
16 | * of the Software. |
16 | * of the Software. |
17 | * |
17 | * |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
25 | * |
25 | * |
26 | **************************************************************************/ |
26 | **************************************************************************/ |
27 | /* |
27 | /* |
28 | * Authors: Thomas Hellstrom |
28 | * Authors: Thomas Hellstrom |
29 | */ |
29 | */ |
30 | 30 | ||
31 | #include |
31 | #include |
32 | #include |
32 | #include |
33 | #include |
33 | #include |
34 | #include |
34 | #include |
35 | //#include |
35 | //#include |
36 | #include |
36 | #include |
37 | #include |
37 | #include |
38 | #include |
38 | #include |
39 | #include |
39 | #include |
40 | 40 | ||
41 | #define __pgprot(x) ((pgprot_t) { (x) } ) |
41 | #define __pgprot(x) ((pgprot_t) { (x) } ) |
42 | - | ||
43 | void *vmap(struct page **pages, unsigned int count, |
- | |
44 | unsigned long flags, pgprot_t prot); |
42 | |
45 | 43 | ||
46 | void ttm_bo_free_old_node(struct ttm_buffer_object *bo) |
44 | void ttm_bo_free_old_node(struct ttm_buffer_object *bo) |
47 | { |
45 | { |
48 | ttm_bo_mem_put(bo, &bo->mem); |
46 | ttm_bo_mem_put(bo, &bo->mem); |
49 | } |
47 | } |
50 | 48 | ||
51 | int ttm_bo_move_ttm(struct ttm_buffer_object *bo, |
49 | int ttm_bo_move_ttm(struct ttm_buffer_object *bo, |
52 | bool evict, |
50 | bool evict, |
53 | bool no_wait_gpu, struct ttm_mem_reg *new_mem) |
51 | bool no_wait_gpu, struct ttm_mem_reg *new_mem) |
54 | { |
52 | { |
55 | struct ttm_tt *ttm = bo->ttm; |
53 | struct ttm_tt *ttm = bo->ttm; |
56 | struct ttm_mem_reg *old_mem = &bo->mem; |
54 | struct ttm_mem_reg *old_mem = &bo->mem; |
57 | int ret; |
55 | int ret; |
58 | 56 | ||
59 | if (old_mem->mem_type != TTM_PL_SYSTEM) { |
57 | if (old_mem->mem_type != TTM_PL_SYSTEM) { |
60 | ttm_tt_unbind(ttm); |
58 | ttm_tt_unbind(ttm); |
61 | ttm_bo_free_old_node(bo); |
59 | ttm_bo_free_old_node(bo); |
62 | ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, |
60 | ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, |
63 | TTM_PL_MASK_MEM); |
61 | TTM_PL_MASK_MEM); |
64 | old_mem->mem_type = TTM_PL_SYSTEM; |
62 | old_mem->mem_type = TTM_PL_SYSTEM; |
65 | } |
63 | } |
66 | 64 | ||
67 | ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); |
65 | ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); |
68 | if (unlikely(ret != 0)) |
66 | if (unlikely(ret != 0)) |
69 | return ret; |
67 | return ret; |
70 | 68 | ||
71 | if (new_mem->mem_type != TTM_PL_SYSTEM) { |
69 | if (new_mem->mem_type != TTM_PL_SYSTEM) { |
72 | ret = ttm_tt_bind(ttm, new_mem); |
70 | ret = ttm_tt_bind(ttm, new_mem); |
73 | if (unlikely(ret != 0)) |
71 | if (unlikely(ret != 0)) |
74 | return ret; |
72 | return ret; |
75 | } |
73 | } |
76 | 74 | ||
77 | *old_mem = *new_mem; |
75 | *old_mem = *new_mem; |
78 | new_mem->mm_node = NULL; |
76 | new_mem->mm_node = NULL; |
79 | 77 | ||
80 | return 0; |
78 | return 0; |
81 | } |
79 | } |
82 | EXPORT_SYMBOL(ttm_bo_move_ttm); |
80 | EXPORT_SYMBOL(ttm_bo_move_ttm); |
83 | 81 | ||
84 | int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) |
82 | int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) |
85 | { |
83 | { |
86 | if (likely(man->io_reserve_fastpath)) |
84 | if (likely(man->io_reserve_fastpath)) |
87 | return 0; |
85 | return 0; |
88 | 86 | ||
89 | if (interruptible) |
87 | if (interruptible) |
90 | return mutex_lock_interruptible(&man->io_reserve_mutex); |
88 | return mutex_lock_interruptible(&man->io_reserve_mutex); |
91 | 89 | ||
92 | mutex_lock(&man->io_reserve_mutex); |
90 | mutex_lock(&man->io_reserve_mutex); |
93 | return 0; |
91 | return 0; |
94 | } |
92 | } |
95 | EXPORT_SYMBOL(ttm_mem_io_lock); |
93 | EXPORT_SYMBOL(ttm_mem_io_lock); |
96 | 94 | ||
97 | void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) |
95 | void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) |
98 | { |
96 | { |
99 | if (likely(man->io_reserve_fastpath)) |
97 | if (likely(man->io_reserve_fastpath)) |
100 | return; |
98 | return; |
101 | 99 | ||
102 | mutex_unlock(&man->io_reserve_mutex); |
100 | mutex_unlock(&man->io_reserve_mutex); |
103 | } |
101 | } |
104 | EXPORT_SYMBOL(ttm_mem_io_unlock); |
102 | EXPORT_SYMBOL(ttm_mem_io_unlock); |
105 | 103 | ||
106 | static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) |
104 | static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) |
107 | { |
105 | { |
108 | struct ttm_buffer_object *bo; |
106 | struct ttm_buffer_object *bo; |
109 | 107 | ||
110 | if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru)) |
108 | if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru)) |
111 | return -EAGAIN; |
109 | return -EAGAIN; |
112 | 110 | ||
113 | bo = list_first_entry(&man->io_reserve_lru, |
111 | bo = list_first_entry(&man->io_reserve_lru, |
114 | struct ttm_buffer_object, |
112 | struct ttm_buffer_object, |
115 | io_reserve_lru); |
113 | io_reserve_lru); |
116 | list_del_init(&bo->io_reserve_lru); |
114 | list_del_init(&bo->io_reserve_lru); |
117 | ttm_bo_unmap_virtual_locked(bo); |
115 | ttm_bo_unmap_virtual_locked(bo); |
118 | 116 | ||
119 | return 0; |
117 | return 0; |
120 | } |
118 | } |
121 | 119 | ||
122 | 120 | ||
123 | int ttm_mem_io_reserve(struct ttm_bo_device *bdev, |
121 | int ttm_mem_io_reserve(struct ttm_bo_device *bdev, |
124 | struct ttm_mem_reg *mem) |
122 | struct ttm_mem_reg *mem) |
125 | { |
123 | { |
126 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
124 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
127 | int ret = 0; |
125 | int ret = 0; |
128 | 126 | ||
129 | if (!bdev->driver->io_mem_reserve) |
127 | if (!bdev->driver->io_mem_reserve) |
130 | return 0; |
128 | return 0; |
131 | if (likely(man->io_reserve_fastpath)) |
129 | if (likely(man->io_reserve_fastpath)) |
132 | return bdev->driver->io_mem_reserve(bdev, mem); |
130 | return bdev->driver->io_mem_reserve(bdev, mem); |
133 | 131 | ||
134 | if (bdev->driver->io_mem_reserve && |
132 | if (bdev->driver->io_mem_reserve && |
135 | mem->bus.io_reserved_count++ == 0) { |
133 | mem->bus.io_reserved_count++ == 0) { |
136 | retry: |
134 | retry: |
137 | ret = bdev->driver->io_mem_reserve(bdev, mem); |
135 | ret = bdev->driver->io_mem_reserve(bdev, mem); |
138 | if (ret == -EAGAIN) { |
136 | if (ret == -EAGAIN) { |
139 | ret = ttm_mem_io_evict(man); |
137 | ret = ttm_mem_io_evict(man); |
140 | if (ret == 0) |
138 | if (ret == 0) |
141 | goto retry; |
139 | goto retry; |
142 | } |
140 | } |
143 | } |
141 | } |
144 | return ret; |
142 | return ret; |
145 | } |
143 | } |
146 | EXPORT_SYMBOL(ttm_mem_io_reserve); |
144 | EXPORT_SYMBOL(ttm_mem_io_reserve); |
147 | 145 | ||
148 | void ttm_mem_io_free(struct ttm_bo_device *bdev, |
146 | void ttm_mem_io_free(struct ttm_bo_device *bdev, |
149 | struct ttm_mem_reg *mem) |
147 | struct ttm_mem_reg *mem) |
150 | { |
148 | { |
151 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
149 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
152 | 150 | ||
153 | if (likely(man->io_reserve_fastpath)) |
151 | if (likely(man->io_reserve_fastpath)) |
154 | return; |
152 | return; |
155 | 153 | ||
156 | if (bdev->driver->io_mem_reserve && |
154 | if (bdev->driver->io_mem_reserve && |
157 | --mem->bus.io_reserved_count == 0 && |
155 | --mem->bus.io_reserved_count == 0 && |
158 | bdev->driver->io_mem_free) |
156 | bdev->driver->io_mem_free) |
159 | bdev->driver->io_mem_free(bdev, mem); |
157 | bdev->driver->io_mem_free(bdev, mem); |
160 | 158 | ||
161 | } |
159 | } |
162 | EXPORT_SYMBOL(ttm_mem_io_free); |
160 | EXPORT_SYMBOL(ttm_mem_io_free); |
163 | 161 | ||
164 | int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) |
162 | int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) |
165 | { |
163 | { |
166 | struct ttm_mem_reg *mem = &bo->mem; |
164 | struct ttm_mem_reg *mem = &bo->mem; |
167 | int ret; |
165 | int ret; |
168 | 166 | ||
169 | if (!mem->bus.io_reserved_vm) { |
167 | if (!mem->bus.io_reserved_vm) { |
170 | struct ttm_mem_type_manager *man = |
168 | struct ttm_mem_type_manager *man = |
171 | &bo->bdev->man[mem->mem_type]; |
169 | &bo->bdev->man[mem->mem_type]; |
172 | 170 | ||
173 | ret = ttm_mem_io_reserve(bo->bdev, mem); |
171 | ret = ttm_mem_io_reserve(bo->bdev, mem); |
174 | if (unlikely(ret != 0)) |
172 | if (unlikely(ret != 0)) |
175 | return ret; |
173 | return ret; |
176 | mem->bus.io_reserved_vm = true; |
174 | mem->bus.io_reserved_vm = true; |
177 | if (man->use_io_reserve_lru) |
175 | if (man->use_io_reserve_lru) |
178 | list_add_tail(&bo->io_reserve_lru, |
176 | list_add_tail(&bo->io_reserve_lru, |
179 | &man->io_reserve_lru); |
177 | &man->io_reserve_lru); |
180 | } |
178 | } |
181 | return 0; |
179 | return 0; |
182 | } |
180 | } |
183 | 181 | ||
184 | void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) |
182 | void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) |
185 | { |
183 | { |
186 | struct ttm_mem_reg *mem = &bo->mem; |
184 | struct ttm_mem_reg *mem = &bo->mem; |
187 | 185 | ||
188 | if (mem->bus.io_reserved_vm) { |
186 | if (mem->bus.io_reserved_vm) { |
189 | mem->bus.io_reserved_vm = false; |
187 | mem->bus.io_reserved_vm = false; |
190 | list_del_init(&bo->io_reserve_lru); |
188 | list_del_init(&bo->io_reserve_lru); |
191 | ttm_mem_io_free(bo->bdev, mem); |
189 | ttm_mem_io_free(bo->bdev, mem); |
192 | } |
190 | } |
193 | } |
191 | } |
194 | 192 | ||
195 | static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
193 | static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
196 | void **virtual) |
194 | void **virtual) |
197 | { |
195 | { |
198 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
196 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
199 | int ret; |
197 | int ret; |
200 | void *addr; |
198 | void *addr; |
201 | 199 | ||
202 | *virtual = NULL; |
200 | *virtual = NULL; |
203 | (void) ttm_mem_io_lock(man, false); |
201 | (void) ttm_mem_io_lock(man, false); |
204 | ret = ttm_mem_io_reserve(bdev, mem); |
202 | ret = ttm_mem_io_reserve(bdev, mem); |
205 | ttm_mem_io_unlock(man); |
203 | ttm_mem_io_unlock(man); |
206 | if (ret || !mem->bus.is_iomem) |
204 | if (ret || !mem->bus.is_iomem) |
207 | return ret; |
205 | return ret; |
208 | 206 | ||
209 | if (mem->bus.addr) { |
207 | if (mem->bus.addr) { |
210 | addr = mem->bus.addr; |
208 | addr = mem->bus.addr; |
211 | } else { |
209 | } else { |
212 | if (mem->placement & TTM_PL_FLAG_WC) |
210 | if (mem->placement & TTM_PL_FLAG_WC) |
213 | addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size); |
211 | addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size); |
214 | else |
212 | else |
215 | addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); |
213 | addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); |
216 | if (!addr) { |
214 | if (!addr) { |
217 | (void) ttm_mem_io_lock(man, false); |
215 | (void) ttm_mem_io_lock(man, false); |
218 | ttm_mem_io_free(bdev, mem); |
216 | ttm_mem_io_free(bdev, mem); |
219 | ttm_mem_io_unlock(man); |
217 | ttm_mem_io_unlock(man); |
220 | return -ENOMEM; |
218 | return -ENOMEM; |
221 | } |
219 | } |
222 | } |
220 | } |
223 | *virtual = addr; |
221 | *virtual = addr; |
224 | return 0; |
222 | return 0; |
225 | } |
223 | } |
226 | 224 | ||
227 | static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
225 | static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
228 | void *virtual) |
226 | void *virtual) |
229 | { |
227 | { |
230 | struct ttm_mem_type_manager *man; |
228 | struct ttm_mem_type_manager *man; |
231 | 229 | ||
232 | man = &bdev->man[mem->mem_type]; |
230 | man = &bdev->man[mem->mem_type]; |
233 | 231 | ||
234 | if (virtual && mem->bus.addr == NULL) |
232 | if (virtual && mem->bus.addr == NULL) |
235 | iounmap(virtual); |
233 | iounmap(virtual); |
236 | (void) ttm_mem_io_lock(man, false); |
234 | (void) ttm_mem_io_lock(man, false); |
237 | ttm_mem_io_free(bdev, mem); |
235 | ttm_mem_io_free(bdev, mem); |
238 | ttm_mem_io_unlock(man); |
236 | ttm_mem_io_unlock(man); |
239 | } |
237 | } |
240 | 238 | ||
241 | static int ttm_copy_io_page(void *dst, void *src, unsigned long page) |
239 | static int ttm_copy_io_page(void *dst, void *src, unsigned long page) |
242 | { |
240 | { |
243 | uint32_t *dstP = |
241 | uint32_t *dstP = |
244 | (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT)); |
242 | (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT)); |
245 | uint32_t *srcP = |
243 | uint32_t *srcP = |
246 | (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT)); |
244 | (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT)); |
247 | 245 | ||
248 | int i; |
246 | int i; |
249 | for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i) |
247 | for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i) |
250 | iowrite32(ioread32(srcP++), dstP++); |
248 | iowrite32(ioread32(srcP++), dstP++); |
251 | return 0; |
249 | return 0; |
252 | } |
250 | } |
253 | 251 | ||
254 | static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, |
252 | static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, |
255 | unsigned long page, |
253 | unsigned long page, |
256 | pgprot_t prot) |
254 | pgprot_t prot) |
257 | { |
255 | { |
258 | struct page *d = ttm->pages[page]; |
256 | struct page *d = ttm->pages[page]; |
259 | void *dst; |
257 | void *dst; |
260 | 258 | ||
261 | if (!d) |
259 | if (!d) |
262 | return -ENOMEM; |
260 | return -ENOMEM; |
263 | 261 | ||
264 | src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); |
262 | src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); |
265 | 263 | ||
266 | dst = (void*)MapIoMem((addr_t)d, 4096, PG_SW); |
264 | dst = (void*)MapIoMem((addr_t)d, 4096, PG_SW); |
267 | 265 | ||
268 | if (!dst) |
266 | if (!dst) |
269 | return -ENOMEM; |
267 | return -ENOMEM; |
270 | 268 | ||
271 | memcpy(dst, src, PAGE_SIZE); |
269 | memcpy(dst, src, PAGE_SIZE); |
272 | 270 | ||
273 | FreeKernelSpace(dst); |
271 | FreeKernelSpace(dst); |
274 | 272 | ||
275 | return 0; |
273 | return 0; |
276 | } |
274 | } |
277 | 275 | ||
278 | static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, |
276 | static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, |
279 | unsigned long page, |
277 | unsigned long page, |
280 | pgprot_t prot) |
278 | pgprot_t prot) |
281 | { |
279 | { |
282 | struct page *s = ttm->pages[page]; |
280 | struct page *s = ttm->pages[page]; |
283 | void *src; |
281 | void *src; |
284 | 282 | ||
285 | if (!s) |
283 | if (!s) |
286 | return -ENOMEM; |
284 | return -ENOMEM; |
287 | 285 | ||
288 | dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); |
286 | dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); |
289 | 287 | ||
290 | src = (void*)MapIoMem((addr_t)s, 4096, PG_SW); |
288 | src = (void*)MapIoMem((addr_t)s, 4096, PG_SW); |
291 | 289 | ||
292 | if (!src) |
290 | if (!src) |
293 | return -ENOMEM; |
291 | return -ENOMEM; |
294 | 292 | ||
295 | memcpy(dst, src, PAGE_SIZE); |
293 | memcpy(dst, src, PAGE_SIZE); |
296 | 294 | ||
297 | FreeKernelSpace(src); |
295 | FreeKernelSpace(src); |
298 | 296 | ||
299 | return 0; |
297 | return 0; |
300 | } |
298 | } |
301 | 299 | ||
302 | int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, |
300 | int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, |
303 | bool evict, bool no_wait_gpu, |
301 | bool evict, bool no_wait_gpu, |
304 | struct ttm_mem_reg *new_mem) |
302 | struct ttm_mem_reg *new_mem) |
305 | { |
303 | { |
306 | struct ttm_bo_device *bdev = bo->bdev; |
304 | struct ttm_bo_device *bdev = bo->bdev; |
307 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; |
305 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; |
308 | struct ttm_tt *ttm = bo->ttm; |
306 | struct ttm_tt *ttm = bo->ttm; |
309 | struct ttm_mem_reg *old_mem = &bo->mem; |
307 | struct ttm_mem_reg *old_mem = &bo->mem; |
310 | struct ttm_mem_reg old_copy = *old_mem; |
308 | struct ttm_mem_reg old_copy = *old_mem; |
311 | void *old_iomap; |
309 | void *old_iomap; |
312 | void *new_iomap; |
310 | void *new_iomap; |
313 | int ret; |
311 | int ret; |
314 | unsigned long i; |
312 | unsigned long i; |
315 | unsigned long page; |
313 | unsigned long page; |
316 | unsigned long add = 0; |
314 | unsigned long add = 0; |
317 | int dir; |
315 | int dir; |
318 | 316 | ||
319 | ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap); |
317 | ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap); |
320 | if (ret) |
318 | if (ret) |
321 | return ret; |
319 | return ret; |
322 | ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap); |
320 | ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap); |
323 | if (ret) |
321 | if (ret) |
324 | goto out; |
322 | goto out; |
325 | 323 | ||
326 | /* |
324 | /* |
327 | * Single TTM move. NOP. |
325 | * Single TTM move. NOP. |
328 | */ |
326 | */ |
329 | if (old_iomap == NULL && new_iomap == NULL) |
327 | if (old_iomap == NULL && new_iomap == NULL) |
330 | goto out2; |
328 | goto out2; |
331 | 329 | ||
332 | /* |
330 | /* |
333 | * Don't move nonexistent data. Clear destination instead. |
331 | * Don't move nonexistent data. Clear destination instead. |
334 | */ |
332 | */ |
335 | if (old_iomap == NULL && |
333 | if (old_iomap == NULL && |
336 | (ttm == NULL || (ttm->state == tt_unpopulated && |
334 | (ttm == NULL || (ttm->state == tt_unpopulated && |
337 | !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) { |
335 | !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) { |
338 | memset(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); |
336 | memset(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); |
339 | goto out2; |
337 | goto out2; |
340 | } |
338 | } |
341 | 339 | ||
342 | /* |
340 | /* |
343 | * TTM might be null for moves within the same region. |
341 | * TTM might be null for moves within the same region. |
344 | */ |
342 | */ |
345 | if (ttm && ttm->state == tt_unpopulated) { |
343 | if (ttm && ttm->state == tt_unpopulated) { |
346 | ret = ttm->bdev->driver->ttm_tt_populate(ttm); |
344 | ret = ttm->bdev->driver->ttm_tt_populate(ttm); |
347 | if (ret) |
345 | if (ret) |
348 | goto out1; |
346 | goto out1; |
349 | } |
347 | } |
350 | 348 | ||
351 | add = 0; |
349 | add = 0; |
352 | dir = 1; |
350 | dir = 1; |
353 | 351 | ||
354 | if ((old_mem->mem_type == new_mem->mem_type) && |
352 | if ((old_mem->mem_type == new_mem->mem_type) && |
355 | (new_mem->start < old_mem->start + old_mem->size)) { |
353 | (new_mem->start < old_mem->start + old_mem->size)) { |
356 | dir = -1; |
354 | dir = -1; |
357 | add = new_mem->num_pages - 1; |
355 | add = new_mem->num_pages - 1; |
358 | } |
356 | } |
359 | 357 | ||
360 | for (i = 0; i < new_mem->num_pages; ++i) { |
358 | for (i = 0; i < new_mem->num_pages; ++i) { |
361 | page = i * dir + add; |
359 | page = i * dir + add; |
362 | if (old_iomap == NULL) { |
360 | if (old_iomap == NULL) { |
363 | pgprot_t prot = ttm_io_prot(old_mem->placement, |
361 | pgprot_t prot = ttm_io_prot(old_mem->placement, |
364 | PAGE_KERNEL); |
362 | PAGE_KERNEL); |
365 | ret = ttm_copy_ttm_io_page(ttm, new_iomap, page, |
363 | ret = ttm_copy_ttm_io_page(ttm, new_iomap, page, |
366 | prot); |
364 | prot); |
367 | } else if (new_iomap == NULL) { |
365 | } else if (new_iomap == NULL) { |
368 | pgprot_t prot = ttm_io_prot(new_mem->placement, |
366 | pgprot_t prot = ttm_io_prot(new_mem->placement, |
369 | PAGE_KERNEL); |
367 | PAGE_KERNEL); |
370 | ret = ttm_copy_io_ttm_page(ttm, old_iomap, page, |
368 | ret = ttm_copy_io_ttm_page(ttm, old_iomap, page, |
371 | prot); |
369 | prot); |
372 | } else |
370 | } else |
373 | ret = ttm_copy_io_page(new_iomap, old_iomap, page); |
371 | ret = ttm_copy_io_page(new_iomap, old_iomap, page); |
374 | if (ret) |
372 | if (ret) |
375 | goto out1; |
373 | goto out1; |
376 | } |
374 | } |
377 | mb(); |
375 | mb(); |
378 | out2: |
376 | out2: |
379 | old_copy = *old_mem; |
377 | old_copy = *old_mem; |
380 | *old_mem = *new_mem; |
378 | *old_mem = *new_mem; |
381 | new_mem->mm_node = NULL; |
379 | new_mem->mm_node = NULL; |
382 | 380 | ||
383 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { |
381 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { |
384 | ttm_tt_unbind(ttm); |
382 | ttm_tt_unbind(ttm); |
385 | ttm_tt_destroy(ttm); |
383 | ttm_tt_destroy(ttm); |
386 | bo->ttm = NULL; |
384 | bo->ttm = NULL; |
387 | } |
385 | } |
388 | 386 | ||
389 | out1: |
387 | out1: |
390 | ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); |
388 | ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); |
391 | out: |
389 | out: |
392 | ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); |
390 | ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); |
393 | 391 | ||
394 | /* |
392 | /* |
395 | * On error, keep the mm node! |
393 | * On error, keep the mm node! |
396 | */ |
394 | */ |
397 | if (!ret) |
395 | if (!ret) |
398 | ttm_bo_mem_put(bo, &old_copy); |
396 | ttm_bo_mem_put(bo, &old_copy); |
399 | return ret; |
397 | return ret; |
400 | } |
398 | } |
401 | EXPORT_SYMBOL(ttm_bo_move_memcpy); |
399 | EXPORT_SYMBOL(ttm_bo_move_memcpy); |
402 | 400 | ||
403 | static void ttm_transfered_destroy(struct ttm_buffer_object *bo) |
401 | static void ttm_transfered_destroy(struct ttm_buffer_object *bo) |
404 | { |
402 | { |
405 | kfree(bo); |
403 | kfree(bo); |
406 | } |
404 | } |
407 | 405 | ||
408 | /** |
406 | /** |
409 | * ttm_buffer_object_transfer |
407 | * ttm_buffer_object_transfer |
410 | * |
408 | * |
411 | * @bo: A pointer to a struct ttm_buffer_object. |
409 | * @bo: A pointer to a struct ttm_buffer_object. |
412 | * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object, |
410 | * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object, |
413 | * holding the data of @bo with the old placement. |
411 | * holding the data of @bo with the old placement. |
414 | * |
412 | * |
415 | * This is a utility function that may be called after an accelerated move |
413 | * This is a utility function that may be called after an accelerated move |
416 | * has been scheduled. A new buffer object is created as a placeholder for |
414 | * has been scheduled. A new buffer object is created as a placeholder for |
417 | * the old data while it's being copied. When that buffer object is idle, |
415 | * the old data while it's being copied. When that buffer object is idle, |
418 | * it can be destroyed, releasing the space of the old placement. |
416 | * it can be destroyed, releasing the space of the old placement. |
419 | * Returns: |
417 | * Returns: |
420 | * !0: Failure. |
418 | * !0: Failure. |
421 | */ |
419 | */ |
422 | 420 | ||
423 | static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, |
421 | static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, |
424 | struct ttm_buffer_object **new_obj) |
422 | struct ttm_buffer_object **new_obj) |
425 | { |
423 | { |
426 | struct ttm_buffer_object *fbo; |
424 | struct ttm_buffer_object *fbo; |
427 | int ret; |
425 | int ret; |
428 | 426 | ||
429 | fbo = kmalloc(sizeof(*fbo), GFP_KERNEL); |
427 | fbo = kmalloc(sizeof(*fbo), GFP_KERNEL); |
430 | if (!fbo) |
428 | if (!fbo) |
431 | return -ENOMEM; |
429 | return -ENOMEM; |
432 | 430 | ||
433 | *fbo = *bo; |
431 | *fbo = *bo; |
434 | 432 | ||
435 | /** |
433 | /** |
436 | * Fix up members that we shouldn't copy directly: |
434 | * Fix up members that we shouldn't copy directly: |
437 | * TODO: Explicit member copy would probably be better here. |
435 | * TODO: Explicit member copy would probably be better here. |
438 | */ |
436 | */ |
439 | 437 | ||
440 | INIT_LIST_HEAD(&fbo->ddestroy); |
438 | INIT_LIST_HEAD(&fbo->ddestroy); |
441 | INIT_LIST_HEAD(&fbo->lru); |
439 | INIT_LIST_HEAD(&fbo->lru); |
442 | INIT_LIST_HEAD(&fbo->swap); |
440 | INIT_LIST_HEAD(&fbo->swap); |
443 | INIT_LIST_HEAD(&fbo->io_reserve_lru); |
441 | INIT_LIST_HEAD(&fbo->io_reserve_lru); |
444 | drm_vma_node_reset(&fbo->vma_node); |
442 | drm_vma_node_reset(&fbo->vma_node); |
445 | atomic_set(&fbo->cpu_writers, 0); |
443 | atomic_set(&fbo->cpu_writers, 0); |
446 | 444 | ||
447 | kref_init(&fbo->list_kref); |
445 | kref_init(&fbo->list_kref); |
448 | kref_init(&fbo->kref); |
446 | kref_init(&fbo->kref); |
449 | fbo->destroy = &ttm_transfered_destroy; |
447 | fbo->destroy = &ttm_transfered_destroy; |
450 | fbo->acc_size = 0; |
448 | fbo->acc_size = 0; |
451 | fbo->resv = &fbo->ttm_resv; |
449 | fbo->resv = &fbo->ttm_resv; |
452 | reservation_object_init(fbo->resv); |
450 | reservation_object_init(fbo->resv); |
453 | ret = ww_mutex_trylock(&fbo->resv->lock); |
451 | ret = ww_mutex_trylock(&fbo->resv->lock); |
454 | WARN_ON(!ret); |
452 | WARN_ON(!ret); |
455 | 453 | ||
456 | *new_obj = fbo; |
454 | *new_obj = fbo; |
457 | return 0; |
455 | return 0; |
458 | } |
456 | } |
459 | 457 | ||
460 | pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) |
458 | pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) |
461 | { |
459 | { |
462 | /* Cached mappings need no adjustment */ |
460 | /* Cached mappings need no adjustment */ |
463 | if (caching_flags & TTM_PL_FLAG_CACHED) |
461 | if (caching_flags & TTM_PL_FLAG_CACHED) |
464 | return tmp; |
462 | return tmp; |
465 | return tmp; |
463 | return tmp; |
466 | } |
464 | } |
467 | EXPORT_SYMBOL(ttm_io_prot); |
465 | EXPORT_SYMBOL(ttm_io_prot); |
468 | 466 | ||
469 | static int ttm_bo_ioremap(struct ttm_buffer_object *bo, |
467 | static int ttm_bo_ioremap(struct ttm_buffer_object *bo, |
470 | unsigned long offset, |
468 | unsigned long offset, |
471 | unsigned long size, |
469 | unsigned long size, |
472 | struct ttm_bo_kmap_obj *map) |
470 | struct ttm_bo_kmap_obj *map) |
473 | { |
471 | { |
474 | struct ttm_mem_reg *mem = &bo->mem; |
472 | struct ttm_mem_reg *mem = &bo->mem; |
475 | 473 | ||
476 | if (bo->mem.bus.addr) { |
474 | if (bo->mem.bus.addr) { |
477 | map->bo_kmap_type = ttm_bo_map_premapped; |
475 | map->bo_kmap_type = ttm_bo_map_premapped; |
478 | map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); |
476 | map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); |
479 | } else { |
477 | } else { |
480 | map->bo_kmap_type = ttm_bo_map_iomap; |
478 | map->bo_kmap_type = ttm_bo_map_iomap; |
481 | if (mem->placement & TTM_PL_FLAG_WC) |
479 | if (mem->placement & TTM_PL_FLAG_WC) |
482 | map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset, |
480 | map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset, |
483 | size); |
481 | size); |
484 | else |
482 | else |
485 | map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset, |
483 | map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset, |
486 | size); |
484 | size); |
487 | } |
485 | } |
488 | return (!map->virtual) ? -ENOMEM : 0; |
486 | return (!map->virtual) ? -ENOMEM : 0; |
489 | } |
487 | } |
490 | 488 | ||
491 | static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, |
489 | static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, |
492 | unsigned long start_page, |
490 | unsigned long start_page, |
493 | unsigned long num_pages, |
491 | unsigned long num_pages, |
494 | struct ttm_bo_kmap_obj *map) |
492 | struct ttm_bo_kmap_obj *map) |
495 | { |
493 | { |
496 | struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot; |
494 | struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot; |
497 | struct ttm_tt *ttm = bo->ttm; |
495 | struct ttm_tt *ttm = bo->ttm; |
498 | int ret; |
496 | int ret; |
499 | 497 | ||
500 | BUG_ON(!ttm); |
498 | BUG_ON(!ttm); |
501 | 499 | ||
502 | if (ttm->state == tt_unpopulated) { |
500 | if (ttm->state == tt_unpopulated) { |
503 | ret = ttm->bdev->driver->ttm_tt_populate(ttm); |
501 | ret = ttm->bdev->driver->ttm_tt_populate(ttm); |
504 | if (ret) |
502 | if (ret) |
505 | return ret; |
503 | return ret; |
506 | } |
504 | } |
507 | 505 | ||
508 | if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { |
506 | if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { |
509 | /* |
507 | /* |
510 | * We're mapping a single page, and the desired |
508 | * We're mapping a single page, and the desired |
511 | * page protection is consistent with the bo. |
509 | * page protection is consistent with the bo. |
512 | */ |
510 | */ |
513 | 511 | ||
514 | map->bo_kmap_type = ttm_bo_map_kmap; |
512 | map->bo_kmap_type = ttm_bo_map_kmap; |
515 | map->page = ttm->pages[start_page]; |
513 | map->page = ttm->pages[start_page]; |
516 | map->virtual = kmap(map->page); |
514 | map->virtual = kmap(map->page); |
517 | } else { |
515 | } else { |
518 | /* |
516 | /* |
519 | * We need to use vmap to get the desired page protection |
517 | * We need to use vmap to get the desired page protection |
520 | * or to make the buffer object look contiguous. |
518 | * or to make the buffer object look contiguous. |
521 | */ |
519 | */ |
522 | prot = ttm_io_prot(mem->placement, PAGE_KERNEL); |
520 | prot = ttm_io_prot(mem->placement, PAGE_KERNEL); |
523 | map->bo_kmap_type = ttm_bo_map_vmap; |
521 | map->bo_kmap_type = ttm_bo_map_vmap; |
524 | map->virtual = vmap(ttm->pages + start_page, num_pages, |
522 | map->virtual = vmap(ttm->pages + start_page, num_pages, |
525 | 0, prot); |
523 | 0, prot); |
526 | } |
524 | } |
527 | return (!map->virtual) ? -ENOMEM : 0; |
525 | return (!map->virtual) ? -ENOMEM : 0; |
528 | } |
526 | } |
529 | 527 | ||
530 | int ttm_bo_kmap(struct ttm_buffer_object *bo, |
528 | int ttm_bo_kmap(struct ttm_buffer_object *bo, |
531 | unsigned long start_page, unsigned long num_pages, |
529 | unsigned long start_page, unsigned long num_pages, |
532 | struct ttm_bo_kmap_obj *map) |
530 | struct ttm_bo_kmap_obj *map) |
533 | { |
531 | { |
534 | struct ttm_mem_type_manager *man = |
532 | struct ttm_mem_type_manager *man = |
535 | &bo->bdev->man[bo->mem.mem_type]; |
533 | &bo->bdev->man[bo->mem.mem_type]; |
536 | unsigned long offset, size; |
534 | unsigned long offset, size; |
537 | int ret; |
535 | int ret; |
538 | 536 | ||
539 | BUG_ON(!list_empty(&bo->swap)); |
537 | BUG_ON(!list_empty(&bo->swap)); |
540 | map->virtual = NULL; |
538 | map->virtual = NULL; |
541 | map->bo = bo; |
539 | map->bo = bo; |
542 | if (num_pages > bo->num_pages) |
540 | if (num_pages > bo->num_pages) |
543 | return -EINVAL; |
541 | return -EINVAL; |
544 | if (start_page > bo->num_pages) |
542 | if (start_page > bo->num_pages) |
545 | return -EINVAL; |
543 | return -EINVAL; |
546 | #if 0 |
544 | #if 0 |
547 | if (num_pages > 1 && !capable(CAP_SYS_ADMIN)) |
545 | if (num_pages > 1 && !capable(CAP_SYS_ADMIN)) |
548 | return -EPERM; |
546 | return -EPERM; |
549 | #endif |
547 | #endif |
550 | (void) ttm_mem_io_lock(man, false); |
548 | (void) ttm_mem_io_lock(man, false); |
551 | ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); |
549 | ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); |
552 | ttm_mem_io_unlock(man); |
550 | ttm_mem_io_unlock(man); |
553 | if (ret) |
551 | if (ret) |
554 | return ret; |
552 | return ret; |
555 | if (!bo->mem.bus.is_iomem) { |
553 | if (!bo->mem.bus.is_iomem) { |
556 | return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); |
554 | return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); |
557 | } else { |
555 | } else { |
558 | offset = start_page << PAGE_SHIFT; |
556 | offset = start_page << PAGE_SHIFT; |
559 | size = num_pages << PAGE_SHIFT; |
557 | size = num_pages << PAGE_SHIFT; |
560 | return ttm_bo_ioremap(bo, offset, size, map); |
558 | return ttm_bo_ioremap(bo, offset, size, map); |
561 | } |
559 | } |
562 | } |
560 | } |
563 | EXPORT_SYMBOL(ttm_bo_kmap); |
561 | EXPORT_SYMBOL(ttm_bo_kmap); |
564 | 562 | ||
565 | void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) |
563 | void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) |
566 | { |
564 | { |
567 | struct ttm_buffer_object *bo = map->bo; |
565 | struct ttm_buffer_object *bo = map->bo; |
568 | struct ttm_mem_type_manager *man = |
566 | struct ttm_mem_type_manager *man = |
569 | &bo->bdev->man[bo->mem.mem_type]; |
567 | &bo->bdev->man[bo->mem.mem_type]; |
570 | 568 | ||
571 | if (!map->virtual) |
569 | if (!map->virtual) |
572 | return; |
570 | return; |
573 | switch (map->bo_kmap_type) { |
571 | switch (map->bo_kmap_type) { |
574 | case ttm_bo_map_iomap: |
572 | case ttm_bo_map_iomap: |
575 | iounmap(map->virtual); |
573 | iounmap(map->virtual); |
576 | break; |
574 | break; |
577 | case ttm_bo_map_vmap: |
575 | case ttm_bo_map_vmap: |
- | 576 | vunmap(map->virtual); |
|
578 | break; |
577 | break; |
579 | case ttm_bo_map_kmap: |
578 | case ttm_bo_map_kmap: |
580 | kunmap(map->page); |
579 | kunmap(map->page); |
581 | break; |
580 | break; |
582 | case ttm_bo_map_premapped: |
581 | case ttm_bo_map_premapped: |
583 | break; |
582 | break; |
584 | default: |
583 | default: |
585 | BUG(); |
584 | BUG(); |
586 | } |
585 | } |
587 | (void) ttm_mem_io_lock(man, false); |
586 | (void) ttm_mem_io_lock(man, false); |
588 | ttm_mem_io_free(map->bo->bdev, &map->bo->mem); |
587 | ttm_mem_io_free(map->bo->bdev, &map->bo->mem); |
589 | ttm_mem_io_unlock(man); |
588 | ttm_mem_io_unlock(man); |
590 | map->virtual = NULL; |
589 | map->virtual = NULL; |
591 | map->page = NULL; |
590 | map->page = NULL; |
592 | } |
591 | } |
593 | EXPORT_SYMBOL(ttm_bo_kunmap); |
592 | EXPORT_SYMBOL(ttm_bo_kunmap); |
594 | 593 | ||
595 | int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, |
594 | int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, |
596 | struct fence *fence, |
595 | struct fence *fence, |
597 | bool evict, |
596 | bool evict, |
598 | bool no_wait_gpu, |
597 | bool no_wait_gpu, |
599 | struct ttm_mem_reg *new_mem) |
598 | struct ttm_mem_reg *new_mem) |
600 | { |
599 | { |
601 | struct ttm_bo_device *bdev = bo->bdev; |
600 | struct ttm_bo_device *bdev = bo->bdev; |
602 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; |
601 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; |
603 | struct ttm_mem_reg *old_mem = &bo->mem; |
602 | struct ttm_mem_reg *old_mem = &bo->mem; |
604 | int ret; |
603 | int ret; |
605 | struct ttm_buffer_object *ghost_obj; |
604 | struct ttm_buffer_object *ghost_obj; |
606 | 605 | ||
607 | reservation_object_add_excl_fence(bo->resv, fence); |
606 | reservation_object_add_excl_fence(bo->resv, fence); |
608 | if (evict) { |
607 | if (evict) { |
609 | ret = ttm_bo_wait(bo, false, false, false); |
608 | ret = ttm_bo_wait(bo, false, false, false); |
610 | if (ret) |
609 | if (ret) |
611 | return ret; |
610 | return ret; |
612 | 611 | ||
613 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && |
612 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && |
614 | (bo->ttm != NULL)) { |
613 | (bo->ttm != NULL)) { |
615 | ttm_tt_unbind(bo->ttm); |
614 | ttm_tt_unbind(bo->ttm); |
616 | ttm_tt_destroy(bo->ttm); |
615 | ttm_tt_destroy(bo->ttm); |
617 | bo->ttm = NULL; |
616 | bo->ttm = NULL; |
618 | } |
617 | } |
619 | ttm_bo_free_old_node(bo); |
618 | ttm_bo_free_old_node(bo); |
620 | } else { |
619 | } else { |
621 | /** |
620 | /** |
622 | * This should help pipeline ordinary buffer moves. |
621 | * This should help pipeline ordinary buffer moves. |
623 | * |
622 | * |
624 | * Hang old buffer memory on a new buffer object, |
623 | * Hang old buffer memory on a new buffer object, |
625 | * and leave it to be released when the GPU |
624 | * and leave it to be released when the GPU |
626 | * operation has completed. |
625 | * operation has completed. |
627 | */ |
626 | */ |
628 | 627 | ||
629 | set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); |
628 | set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); |
630 | 629 | ||
631 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); |
630 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); |
632 | if (ret) |
631 | if (ret) |
633 | return ret; |
632 | return ret; |
634 | 633 | ||
635 | reservation_object_add_excl_fence(ghost_obj->resv, fence); |
634 | reservation_object_add_excl_fence(ghost_obj->resv, fence); |
636 | 635 | ||
637 | /** |
636 | /** |
638 | * If we're not moving to fixed memory, the TTM object |
637 | * If we're not moving to fixed memory, the TTM object |
639 | * needs to stay alive. Otherwhise hang it on the ghost |
638 | * needs to stay alive. Otherwhise hang it on the ghost |
640 | * bo to be unbound and destroyed. |
639 | * bo to be unbound and destroyed. |
641 | */ |
640 | */ |
642 | 641 | ||
643 | if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) |
642 | if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) |
644 | ghost_obj->ttm = NULL; |
643 | ghost_obj->ttm = NULL; |
645 | else |
644 | else |
646 | bo->ttm = NULL; |
645 | bo->ttm = NULL; |
647 | 646 | ||
648 | ttm_bo_unreserve(ghost_obj); |
647 | ttm_bo_unreserve(ghost_obj); |
649 | ttm_bo_unref(&ghost_obj); |
648 | ttm_bo_unref(&ghost_obj); |
650 | } |
649 | } |
651 | 650 | ||
652 | *old_mem = *new_mem; |
651 | *old_mem = *new_mem; |
653 | new_mem->mm_node = NULL; |
652 | new_mem->mm_node = NULL; |
654 | 653 | ||
655 | return 0; |
654 | return 0; |
656 | } |
655 | } |
657 | EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); |
656 | EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); |
658 | 657 | ||
659 | 658 | ||
660 | void *vmap(struct page **pages, unsigned int count, |
659 | void *vmap(struct page **pages, unsigned int count, |
661 | unsigned long flags, pgprot_t prot) |
660 | unsigned long flags, pgprot_t prot) |
662 | { |
661 | { |
663 | void *vaddr; |
662 | void *vaddr; |
664 | char *tmp; |
663 | char *tmp; |
665 | int i; |
664 | int i; |
666 | 665 | ||
667 | vaddr = AllocKernelSpace(count << 12); |
666 | vaddr = AllocKernelSpace(count << 12); |
668 | if(vaddr == NULL) |
667 | if(vaddr == NULL) |
669 | return NULL; |
668 | return NULL; |
670 | 669 | ||
671 | for(i = 0, tmp = vaddr; i < count; i++) |
670 | for(i = 0, tmp = vaddr; i < count; i++) |
672 | { |
671 | { |
673 | MapPage(tmp, page_to_phys(pages[i]), PG_SW); |
672 | MapPage(tmp, page_to_phys(pages[i]), PG_SW); |
674 | tmp+= 4096; |
673 | tmp+= 4096; |
675 | }; |
674 | }; |
676 | 675 | ||
677 | return vaddr; |
676 | return vaddr; |
678 | };>><>><>><>>>><>><>>><>><> |
677 | }; |
- | 678 | ||
- | 679 | void vunmap(const void *addr) |
|
- | 680 | { |
|
- | 681 | FreeKernelSpace((void*)addr); |
|
- | 682 | }>><>><>><>>>><>><>>><>><> |