Rev 6296 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 6296 | Rev 6938 | ||
---|---|---|---|
1 | /************************************************************************** |
1 | /************************************************************************** |
2 | * |
2 | * |
3 | * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA |
3 | * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA |
4 | * All Rights Reserved. |
4 | * All Rights Reserved. |
5 | * |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the |
7 | * copy of this software and associated documentation files (the |
8 | * "Software"), to deal in the Software without restriction, including |
8 | * "Software"), to deal in the Software without restriction, including |
9 | * without limitation the rights to use, copy, modify, merge, publish, |
9 | * without limitation the rights to use, copy, modify, merge, publish, |
10 | * distribute, sub license, and/or sell copies of the Software, and to |
10 | * distribute, sub license, and/or sell copies of the Software, and to |
11 | * permit persons to whom the Software is furnished to do so, subject to |
11 | * permit persons to whom the Software is furnished to do so, subject to |
12 | * the following conditions: |
12 | * the following conditions: |
13 | * |
13 | * |
14 | * The above copyright notice and this permission notice (including the |
14 | * The above copyright notice and this permission notice (including the |
15 | * next paragraph) shall be included in all copies or substantial portions |
15 | * next paragraph) shall be included in all copies or substantial portions |
16 | * of the Software. |
16 | * of the Software. |
17 | * |
17 | * |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
25 | * |
25 | * |
26 | **************************************************************************/ |
26 | **************************************************************************/ |
27 | /* |
27 | /* |
28 | * Authors: Thomas Hellstrom |
28 | * Authors: Thomas Hellstrom |
29 | */ |
29 | */ |
30 | #define iowrite32(v, addr) writel((v), (addr)) |
- | |
31 | #define ioread32(addr) readl(addr) |
- | |
32 | 30 | ||
33 | #include |
31 | #include |
34 | #include |
32 | #include |
35 | #include |
33 | #include |
36 | #include |
34 | #include |
37 | //#include |
35 | //#include |
38 | #include |
36 | #include |
39 | #include |
37 | #include |
40 | #include |
38 | #include |
41 | #include |
39 | #include |
42 | 40 | ||
43 | #define __pgprot(x) ((pgprot_t) { (x) } ) |
41 | #define __pgprot(x) ((pgprot_t) { (x) } ) |
44 | 42 | ||
45 | void *vmap(struct page **pages, unsigned int count, |
43 | void *vmap(struct page **pages, unsigned int count, |
46 | unsigned long flags, pgprot_t prot); |
44 | unsigned long flags, pgprot_t prot); |
47 | 45 | ||
48 | void ttm_bo_free_old_node(struct ttm_buffer_object *bo) |
46 | void ttm_bo_free_old_node(struct ttm_buffer_object *bo) |
49 | { |
47 | { |
50 | ttm_bo_mem_put(bo, &bo->mem); |
48 | ttm_bo_mem_put(bo, &bo->mem); |
51 | } |
49 | } |
52 | 50 | ||
53 | int ttm_bo_move_ttm(struct ttm_buffer_object *bo, |
51 | int ttm_bo_move_ttm(struct ttm_buffer_object *bo, |
54 | bool evict, |
52 | bool evict, |
55 | bool no_wait_gpu, struct ttm_mem_reg *new_mem) |
53 | bool no_wait_gpu, struct ttm_mem_reg *new_mem) |
56 | { |
54 | { |
57 | struct ttm_tt *ttm = bo->ttm; |
55 | struct ttm_tt *ttm = bo->ttm; |
58 | struct ttm_mem_reg *old_mem = &bo->mem; |
56 | struct ttm_mem_reg *old_mem = &bo->mem; |
59 | int ret; |
57 | int ret; |
60 | 58 | ||
61 | if (old_mem->mem_type != TTM_PL_SYSTEM) { |
59 | if (old_mem->mem_type != TTM_PL_SYSTEM) { |
62 | ttm_tt_unbind(ttm); |
60 | ttm_tt_unbind(ttm); |
63 | ttm_bo_free_old_node(bo); |
61 | ttm_bo_free_old_node(bo); |
64 | ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, |
62 | ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, |
65 | TTM_PL_MASK_MEM); |
63 | TTM_PL_MASK_MEM); |
66 | old_mem->mem_type = TTM_PL_SYSTEM; |
64 | old_mem->mem_type = TTM_PL_SYSTEM; |
67 | } |
65 | } |
68 | 66 | ||
69 | ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); |
67 | ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); |
70 | if (unlikely(ret != 0)) |
68 | if (unlikely(ret != 0)) |
71 | return ret; |
69 | return ret; |
72 | 70 | ||
73 | if (new_mem->mem_type != TTM_PL_SYSTEM) { |
71 | if (new_mem->mem_type != TTM_PL_SYSTEM) { |
74 | ret = ttm_tt_bind(ttm, new_mem); |
72 | ret = ttm_tt_bind(ttm, new_mem); |
75 | if (unlikely(ret != 0)) |
73 | if (unlikely(ret != 0)) |
76 | return ret; |
74 | return ret; |
77 | } |
75 | } |
78 | 76 | ||
79 | *old_mem = *new_mem; |
77 | *old_mem = *new_mem; |
80 | new_mem->mm_node = NULL; |
78 | new_mem->mm_node = NULL; |
81 | 79 | ||
82 | return 0; |
80 | return 0; |
83 | } |
81 | } |
84 | EXPORT_SYMBOL(ttm_bo_move_ttm); |
82 | EXPORT_SYMBOL(ttm_bo_move_ttm); |
85 | 83 | ||
86 | int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) |
84 | int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) |
87 | { |
85 | { |
88 | if (likely(man->io_reserve_fastpath)) |
86 | if (likely(man->io_reserve_fastpath)) |
89 | return 0; |
87 | return 0; |
90 | 88 | ||
91 | if (interruptible) |
89 | if (interruptible) |
92 | return mutex_lock_interruptible(&man->io_reserve_mutex); |
90 | return mutex_lock_interruptible(&man->io_reserve_mutex); |
93 | 91 | ||
94 | mutex_lock(&man->io_reserve_mutex); |
92 | mutex_lock(&man->io_reserve_mutex); |
95 | return 0; |
93 | return 0; |
96 | } |
94 | } |
97 | EXPORT_SYMBOL(ttm_mem_io_lock); |
95 | EXPORT_SYMBOL(ttm_mem_io_lock); |
98 | 96 | ||
99 | void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) |
97 | void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) |
100 | { |
98 | { |
101 | if (likely(man->io_reserve_fastpath)) |
99 | if (likely(man->io_reserve_fastpath)) |
102 | return; |
100 | return; |
103 | 101 | ||
104 | mutex_unlock(&man->io_reserve_mutex); |
102 | mutex_unlock(&man->io_reserve_mutex); |
105 | } |
103 | } |
106 | EXPORT_SYMBOL(ttm_mem_io_unlock); |
104 | EXPORT_SYMBOL(ttm_mem_io_unlock); |
107 | 105 | ||
108 | static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) |
106 | static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) |
109 | { |
107 | { |
110 | struct ttm_buffer_object *bo; |
108 | struct ttm_buffer_object *bo; |
111 | 109 | ||
112 | if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru)) |
110 | if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru)) |
113 | return -EAGAIN; |
111 | return -EAGAIN; |
114 | 112 | ||
115 | bo = list_first_entry(&man->io_reserve_lru, |
113 | bo = list_first_entry(&man->io_reserve_lru, |
116 | struct ttm_buffer_object, |
114 | struct ttm_buffer_object, |
117 | io_reserve_lru); |
115 | io_reserve_lru); |
118 | list_del_init(&bo->io_reserve_lru); |
116 | list_del_init(&bo->io_reserve_lru); |
119 | ttm_bo_unmap_virtual_locked(bo); |
117 | ttm_bo_unmap_virtual_locked(bo); |
120 | 118 | ||
121 | return 0; |
119 | return 0; |
122 | } |
120 | } |
123 | 121 | ||
124 | 122 | ||
125 | int ttm_mem_io_reserve(struct ttm_bo_device *bdev, |
123 | int ttm_mem_io_reserve(struct ttm_bo_device *bdev, |
126 | struct ttm_mem_reg *mem) |
124 | struct ttm_mem_reg *mem) |
127 | { |
125 | { |
128 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
126 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
129 | int ret = 0; |
127 | int ret = 0; |
130 | 128 | ||
131 | if (!bdev->driver->io_mem_reserve) |
129 | if (!bdev->driver->io_mem_reserve) |
132 | return 0; |
130 | return 0; |
133 | if (likely(man->io_reserve_fastpath)) |
131 | if (likely(man->io_reserve_fastpath)) |
134 | return bdev->driver->io_mem_reserve(bdev, mem); |
132 | return bdev->driver->io_mem_reserve(bdev, mem); |
135 | 133 | ||
136 | if (bdev->driver->io_mem_reserve && |
134 | if (bdev->driver->io_mem_reserve && |
137 | mem->bus.io_reserved_count++ == 0) { |
135 | mem->bus.io_reserved_count++ == 0) { |
138 | retry: |
136 | retry: |
139 | ret = bdev->driver->io_mem_reserve(bdev, mem); |
137 | ret = bdev->driver->io_mem_reserve(bdev, mem); |
140 | if (ret == -EAGAIN) { |
138 | if (ret == -EAGAIN) { |
141 | ret = ttm_mem_io_evict(man); |
139 | ret = ttm_mem_io_evict(man); |
142 | if (ret == 0) |
140 | if (ret == 0) |
143 | goto retry; |
141 | goto retry; |
144 | } |
142 | } |
145 | } |
143 | } |
146 | return ret; |
144 | return ret; |
147 | } |
145 | } |
148 | EXPORT_SYMBOL(ttm_mem_io_reserve); |
146 | EXPORT_SYMBOL(ttm_mem_io_reserve); |
149 | 147 | ||
150 | void ttm_mem_io_free(struct ttm_bo_device *bdev, |
148 | void ttm_mem_io_free(struct ttm_bo_device *bdev, |
151 | struct ttm_mem_reg *mem) |
149 | struct ttm_mem_reg *mem) |
152 | { |
150 | { |
153 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
151 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
154 | 152 | ||
155 | if (likely(man->io_reserve_fastpath)) |
153 | if (likely(man->io_reserve_fastpath)) |
156 | return; |
154 | return; |
157 | 155 | ||
158 | if (bdev->driver->io_mem_reserve && |
156 | if (bdev->driver->io_mem_reserve && |
159 | --mem->bus.io_reserved_count == 0 && |
157 | --mem->bus.io_reserved_count == 0 && |
160 | bdev->driver->io_mem_free) |
158 | bdev->driver->io_mem_free) |
161 | bdev->driver->io_mem_free(bdev, mem); |
159 | bdev->driver->io_mem_free(bdev, mem); |
162 | 160 | ||
163 | } |
161 | } |
164 | EXPORT_SYMBOL(ttm_mem_io_free); |
162 | EXPORT_SYMBOL(ttm_mem_io_free); |
165 | 163 | ||
166 | int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) |
164 | int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) |
167 | { |
165 | { |
168 | struct ttm_mem_reg *mem = &bo->mem; |
166 | struct ttm_mem_reg *mem = &bo->mem; |
169 | int ret; |
167 | int ret; |
170 | 168 | ||
171 | if (!mem->bus.io_reserved_vm) { |
169 | if (!mem->bus.io_reserved_vm) { |
172 | struct ttm_mem_type_manager *man = |
170 | struct ttm_mem_type_manager *man = |
173 | &bo->bdev->man[mem->mem_type]; |
171 | &bo->bdev->man[mem->mem_type]; |
174 | 172 | ||
175 | ret = ttm_mem_io_reserve(bo->bdev, mem); |
173 | ret = ttm_mem_io_reserve(bo->bdev, mem); |
176 | if (unlikely(ret != 0)) |
174 | if (unlikely(ret != 0)) |
177 | return ret; |
175 | return ret; |
178 | mem->bus.io_reserved_vm = true; |
176 | mem->bus.io_reserved_vm = true; |
179 | if (man->use_io_reserve_lru) |
177 | if (man->use_io_reserve_lru) |
180 | list_add_tail(&bo->io_reserve_lru, |
178 | list_add_tail(&bo->io_reserve_lru, |
181 | &man->io_reserve_lru); |
179 | &man->io_reserve_lru); |
182 | } |
180 | } |
183 | return 0; |
181 | return 0; |
184 | } |
182 | } |
185 | 183 | ||
186 | void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) |
184 | void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) |
187 | { |
185 | { |
188 | struct ttm_mem_reg *mem = &bo->mem; |
186 | struct ttm_mem_reg *mem = &bo->mem; |
189 | 187 | ||
190 | if (mem->bus.io_reserved_vm) { |
188 | if (mem->bus.io_reserved_vm) { |
191 | mem->bus.io_reserved_vm = false; |
189 | mem->bus.io_reserved_vm = false; |
192 | list_del_init(&bo->io_reserve_lru); |
190 | list_del_init(&bo->io_reserve_lru); |
193 | ttm_mem_io_free(bo->bdev, mem); |
191 | ttm_mem_io_free(bo->bdev, mem); |
194 | } |
192 | } |
195 | } |
193 | } |
196 | 194 | ||
197 | static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
195 | static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
198 | void **virtual) |
196 | void **virtual) |
199 | { |
197 | { |
200 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
198 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
201 | int ret; |
199 | int ret; |
202 | void *addr; |
200 | void *addr; |
203 | 201 | ||
204 | *virtual = NULL; |
202 | *virtual = NULL; |
205 | (void) ttm_mem_io_lock(man, false); |
203 | (void) ttm_mem_io_lock(man, false); |
206 | ret = ttm_mem_io_reserve(bdev, mem); |
204 | ret = ttm_mem_io_reserve(bdev, mem); |
207 | ttm_mem_io_unlock(man); |
205 | ttm_mem_io_unlock(man); |
208 | if (ret || !mem->bus.is_iomem) |
206 | if (ret || !mem->bus.is_iomem) |
209 | return ret; |
207 | return ret; |
210 | 208 | ||
211 | if (mem->bus.addr) { |
209 | if (mem->bus.addr) { |
212 | addr = mem->bus.addr; |
210 | addr = mem->bus.addr; |
213 | } else { |
211 | } else { |
214 | if (mem->placement & TTM_PL_FLAG_WC) |
212 | if (mem->placement & TTM_PL_FLAG_WC) |
215 | addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size); |
213 | addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size); |
216 | else |
214 | else |
217 | addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); |
215 | addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); |
218 | if (!addr) { |
216 | if (!addr) { |
219 | (void) ttm_mem_io_lock(man, false); |
217 | (void) ttm_mem_io_lock(man, false); |
220 | ttm_mem_io_free(bdev, mem); |
218 | ttm_mem_io_free(bdev, mem); |
221 | ttm_mem_io_unlock(man); |
219 | ttm_mem_io_unlock(man); |
222 | return -ENOMEM; |
220 | return -ENOMEM; |
223 | } |
221 | } |
224 | } |
222 | } |
225 | *virtual = addr; |
223 | *virtual = addr; |
226 | return 0; |
224 | return 0; |
227 | } |
225 | } |
228 | 226 | ||
229 | static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
227 | static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
230 | void *virtual) |
228 | void *virtual) |
231 | { |
229 | { |
232 | struct ttm_mem_type_manager *man; |
230 | struct ttm_mem_type_manager *man; |
233 | 231 | ||
234 | man = &bdev->man[mem->mem_type]; |
232 | man = &bdev->man[mem->mem_type]; |
235 | 233 | ||
236 | if (virtual && mem->bus.addr == NULL) |
234 | if (virtual && mem->bus.addr == NULL) |
237 | iounmap(virtual); |
235 | iounmap(virtual); |
238 | (void) ttm_mem_io_lock(man, false); |
236 | (void) ttm_mem_io_lock(man, false); |
239 | ttm_mem_io_free(bdev, mem); |
237 | ttm_mem_io_free(bdev, mem); |
240 | ttm_mem_io_unlock(man); |
238 | ttm_mem_io_unlock(man); |
241 | } |
239 | } |
242 | 240 | ||
243 | static int ttm_copy_io_page(void *dst, void *src, unsigned long page) |
241 | static int ttm_copy_io_page(void *dst, void *src, unsigned long page) |
244 | { |
242 | { |
245 | uint32_t *dstP = |
243 | uint32_t *dstP = |
246 | (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT)); |
244 | (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT)); |
247 | uint32_t *srcP = |
245 | uint32_t *srcP = |
248 | (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT)); |
246 | (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT)); |
249 | 247 | ||
250 | int i; |
248 | int i; |
251 | for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i) |
249 | for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i) |
252 | iowrite32(ioread32(srcP++), dstP++); |
250 | iowrite32(ioread32(srcP++), dstP++); |
253 | return 0; |
251 | return 0; |
254 | } |
252 | } |
255 | 253 | ||
256 | static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, |
254 | static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, |
257 | unsigned long page, |
255 | unsigned long page, |
258 | pgprot_t prot) |
256 | pgprot_t prot) |
259 | { |
257 | { |
260 | struct page *d = ttm->pages[page]; |
258 | struct page *d = ttm->pages[page]; |
261 | void *dst; |
259 | void *dst; |
262 | 260 | ||
263 | if (!d) |
261 | if (!d) |
264 | return -ENOMEM; |
262 | return -ENOMEM; |
265 | 263 | ||
266 | src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); |
264 | src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); |
267 | 265 | ||
268 | dst = (void*)MapIoMem((addr_t)d, 4096, PG_SW); |
266 | dst = (void*)MapIoMem((addr_t)d, 4096, PG_SW); |
269 | 267 | ||
270 | if (!dst) |
268 | if (!dst) |
271 | return -ENOMEM; |
269 | return -ENOMEM; |
272 | 270 | ||
273 | memcpy(dst, src, PAGE_SIZE); |
271 | memcpy(dst, src, PAGE_SIZE); |
274 | 272 | ||
275 | FreeKernelSpace(dst); |
273 | FreeKernelSpace(dst); |
276 | 274 | ||
277 | return 0; |
275 | return 0; |
278 | } |
276 | } |
279 | 277 | ||
280 | static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, |
278 | static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, |
281 | unsigned long page, |
279 | unsigned long page, |
282 | pgprot_t prot) |
280 | pgprot_t prot) |
283 | { |
281 | { |
284 | struct page *s = ttm->pages[page]; |
282 | struct page *s = ttm->pages[page]; |
285 | void *src; |
283 | void *src; |
286 | 284 | ||
287 | if (!s) |
285 | if (!s) |
288 | return -ENOMEM; |
286 | return -ENOMEM; |
289 | 287 | ||
290 | dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); |
288 | dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); |
291 | 289 | ||
292 | src = (void*)MapIoMem((addr_t)s, 4096, PG_SW); |
290 | src = (void*)MapIoMem((addr_t)s, 4096, PG_SW); |
293 | 291 | ||
294 | if (!src) |
292 | if (!src) |
295 | return -ENOMEM; |
293 | return -ENOMEM; |
296 | 294 | ||
297 | memcpy(dst, src, PAGE_SIZE); |
295 | memcpy(dst, src, PAGE_SIZE); |
298 | 296 | ||
299 | FreeKernelSpace(src); |
297 | FreeKernelSpace(src); |
300 | 298 | ||
301 | return 0; |
299 | return 0; |
302 | } |
300 | } |
303 | 301 | ||
304 | int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, |
302 | int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, |
305 | bool evict, bool no_wait_gpu, |
303 | bool evict, bool no_wait_gpu, |
306 | struct ttm_mem_reg *new_mem) |
304 | struct ttm_mem_reg *new_mem) |
307 | { |
305 | { |
308 | struct ttm_bo_device *bdev = bo->bdev; |
306 | struct ttm_bo_device *bdev = bo->bdev; |
309 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; |
307 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; |
310 | struct ttm_tt *ttm = bo->ttm; |
308 | struct ttm_tt *ttm = bo->ttm; |
311 | struct ttm_mem_reg *old_mem = &bo->mem; |
309 | struct ttm_mem_reg *old_mem = &bo->mem; |
312 | struct ttm_mem_reg old_copy = *old_mem; |
310 | struct ttm_mem_reg old_copy = *old_mem; |
313 | void *old_iomap; |
311 | void *old_iomap; |
314 | void *new_iomap; |
312 | void *new_iomap; |
315 | int ret; |
313 | int ret; |
316 | unsigned long i; |
314 | unsigned long i; |
317 | unsigned long page; |
315 | unsigned long page; |
318 | unsigned long add = 0; |
316 | unsigned long add = 0; |
319 | int dir; |
317 | int dir; |
320 | 318 | ||
321 | ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap); |
319 | ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap); |
322 | if (ret) |
320 | if (ret) |
323 | return ret; |
321 | return ret; |
324 | ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap); |
322 | ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap); |
325 | if (ret) |
323 | if (ret) |
326 | goto out; |
324 | goto out; |
327 | 325 | ||
328 | /* |
326 | /* |
329 | * Single TTM move. NOP. |
327 | * Single TTM move. NOP. |
330 | */ |
328 | */ |
331 | if (old_iomap == NULL && new_iomap == NULL) |
329 | if (old_iomap == NULL && new_iomap == NULL) |
332 | goto out2; |
330 | goto out2; |
333 | 331 | ||
334 | /* |
332 | /* |
335 | * Don't move nonexistent data. Clear destination instead. |
333 | * Don't move nonexistent data. Clear destination instead. |
336 | */ |
334 | */ |
337 | if (old_iomap == NULL && |
335 | if (old_iomap == NULL && |
338 | (ttm == NULL || (ttm->state == tt_unpopulated && |
336 | (ttm == NULL || (ttm->state == tt_unpopulated && |
339 | !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) { |
337 | !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) { |
340 | memset(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); |
338 | memset(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); |
341 | goto out2; |
339 | goto out2; |
342 | } |
340 | } |
343 | 341 | ||
344 | /* |
342 | /* |
345 | * TTM might be null for moves within the same region. |
343 | * TTM might be null for moves within the same region. |
346 | */ |
344 | */ |
347 | if (ttm && ttm->state == tt_unpopulated) { |
345 | if (ttm && ttm->state == tt_unpopulated) { |
348 | ret = ttm->bdev->driver->ttm_tt_populate(ttm); |
346 | ret = ttm->bdev->driver->ttm_tt_populate(ttm); |
349 | if (ret) |
347 | if (ret) |
350 | goto out1; |
348 | goto out1; |
351 | } |
349 | } |
352 | 350 | ||
353 | add = 0; |
351 | add = 0; |
354 | dir = 1; |
352 | dir = 1; |
355 | 353 | ||
356 | if ((old_mem->mem_type == new_mem->mem_type) && |
354 | if ((old_mem->mem_type == new_mem->mem_type) && |
357 | (new_mem->start < old_mem->start + old_mem->size)) { |
355 | (new_mem->start < old_mem->start + old_mem->size)) { |
358 | dir = -1; |
356 | dir = -1; |
359 | add = new_mem->num_pages - 1; |
357 | add = new_mem->num_pages - 1; |
360 | } |
358 | } |
361 | 359 | ||
362 | for (i = 0; i < new_mem->num_pages; ++i) { |
360 | for (i = 0; i < new_mem->num_pages; ++i) { |
363 | page = i * dir + add; |
361 | page = i * dir + add; |
364 | if (old_iomap == NULL) { |
362 | if (old_iomap == NULL) { |
365 | pgprot_t prot = ttm_io_prot(old_mem->placement, |
363 | pgprot_t prot = ttm_io_prot(old_mem->placement, |
366 | PAGE_KERNEL); |
364 | PAGE_KERNEL); |
367 | ret = ttm_copy_ttm_io_page(ttm, new_iomap, page, |
365 | ret = ttm_copy_ttm_io_page(ttm, new_iomap, page, |
368 | prot); |
366 | prot); |
369 | } else if (new_iomap == NULL) { |
367 | } else if (new_iomap == NULL) { |
370 | pgprot_t prot = ttm_io_prot(new_mem->placement, |
368 | pgprot_t prot = ttm_io_prot(new_mem->placement, |
371 | PAGE_KERNEL); |
369 | PAGE_KERNEL); |
372 | ret = ttm_copy_io_ttm_page(ttm, old_iomap, page, |
370 | ret = ttm_copy_io_ttm_page(ttm, old_iomap, page, |
373 | prot); |
371 | prot); |
374 | } else |
372 | } else |
375 | ret = ttm_copy_io_page(new_iomap, old_iomap, page); |
373 | ret = ttm_copy_io_page(new_iomap, old_iomap, page); |
376 | if (ret) |
374 | if (ret) |
377 | goto out1; |
375 | goto out1; |
378 | } |
376 | } |
379 | mb(); |
377 | mb(); |
380 | out2: |
378 | out2: |
381 | old_copy = *old_mem; |
379 | old_copy = *old_mem; |
382 | *old_mem = *new_mem; |
380 | *old_mem = *new_mem; |
383 | new_mem->mm_node = NULL; |
381 | new_mem->mm_node = NULL; |
384 | 382 | ||
385 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { |
383 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { |
386 | ttm_tt_unbind(ttm); |
384 | ttm_tt_unbind(ttm); |
387 | ttm_tt_destroy(ttm); |
385 | ttm_tt_destroy(ttm); |
388 | bo->ttm = NULL; |
386 | bo->ttm = NULL; |
389 | } |
387 | } |
390 | 388 | ||
391 | out1: |
389 | out1: |
392 | ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); |
390 | ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); |
393 | out: |
391 | out: |
394 | ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); |
392 | ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); |
395 | 393 | ||
396 | /* |
394 | /* |
397 | * On error, keep the mm node! |
395 | * On error, keep the mm node! |
398 | */ |
396 | */ |
399 | if (!ret) |
397 | if (!ret) |
400 | ttm_bo_mem_put(bo, &old_copy); |
398 | ttm_bo_mem_put(bo, &old_copy); |
401 | return ret; |
399 | return ret; |
402 | } |
400 | } |
403 | EXPORT_SYMBOL(ttm_bo_move_memcpy); |
401 | EXPORT_SYMBOL(ttm_bo_move_memcpy); |
404 | 402 | ||
405 | static void ttm_transfered_destroy(struct ttm_buffer_object *bo) |
403 | static void ttm_transfered_destroy(struct ttm_buffer_object *bo) |
406 | { |
404 | { |
407 | kfree(bo); |
405 | kfree(bo); |
408 | } |
406 | } |
409 | 407 | ||
410 | /** |
408 | /** |
411 | * ttm_buffer_object_transfer |
409 | * ttm_buffer_object_transfer |
412 | * |
410 | * |
413 | * @bo: A pointer to a struct ttm_buffer_object. |
411 | * @bo: A pointer to a struct ttm_buffer_object. |
414 | * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object, |
412 | * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object, |
415 | * holding the data of @bo with the old placement. |
413 | * holding the data of @bo with the old placement. |
416 | * |
414 | * |
417 | * This is a utility function that may be called after an accelerated move |
415 | * This is a utility function that may be called after an accelerated move |
418 | * has been scheduled. A new buffer object is created as a placeholder for |
416 | * has been scheduled. A new buffer object is created as a placeholder for |
419 | * the old data while it's being copied. When that buffer object is idle, |
417 | * the old data while it's being copied. When that buffer object is idle, |
420 | * it can be destroyed, releasing the space of the old placement. |
418 | * it can be destroyed, releasing the space of the old placement. |
421 | * Returns: |
419 | * Returns: |
422 | * !0: Failure. |
420 | * !0: Failure. |
423 | */ |
421 | */ |
424 | 422 | ||
425 | static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, |
423 | static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, |
426 | struct ttm_buffer_object **new_obj) |
424 | struct ttm_buffer_object **new_obj) |
427 | { |
425 | { |
428 | struct ttm_buffer_object *fbo; |
426 | struct ttm_buffer_object *fbo; |
429 | int ret; |
427 | int ret; |
430 | 428 | ||
431 | fbo = kmalloc(sizeof(*fbo), GFP_KERNEL); |
429 | fbo = kmalloc(sizeof(*fbo), GFP_KERNEL); |
432 | if (!fbo) |
430 | if (!fbo) |
433 | return -ENOMEM; |
431 | return -ENOMEM; |
434 | 432 | ||
435 | *fbo = *bo; |
433 | *fbo = *bo; |
436 | 434 | ||
437 | /** |
435 | /** |
438 | * Fix up members that we shouldn't copy directly: |
436 | * Fix up members that we shouldn't copy directly: |
439 | * TODO: Explicit member copy would probably be better here. |
437 | * TODO: Explicit member copy would probably be better here. |
440 | */ |
438 | */ |
441 | 439 | ||
442 | INIT_LIST_HEAD(&fbo->ddestroy); |
440 | INIT_LIST_HEAD(&fbo->ddestroy); |
443 | INIT_LIST_HEAD(&fbo->lru); |
441 | INIT_LIST_HEAD(&fbo->lru); |
444 | INIT_LIST_HEAD(&fbo->swap); |
442 | INIT_LIST_HEAD(&fbo->swap); |
445 | INIT_LIST_HEAD(&fbo->io_reserve_lru); |
443 | INIT_LIST_HEAD(&fbo->io_reserve_lru); |
446 | drm_vma_node_reset(&fbo->vma_node); |
444 | drm_vma_node_reset(&fbo->vma_node); |
447 | atomic_set(&fbo->cpu_writers, 0); |
445 | atomic_set(&fbo->cpu_writers, 0); |
448 | 446 | ||
449 | kref_init(&fbo->list_kref); |
447 | kref_init(&fbo->list_kref); |
450 | kref_init(&fbo->kref); |
448 | kref_init(&fbo->kref); |
451 | fbo->destroy = &ttm_transfered_destroy; |
449 | fbo->destroy = &ttm_transfered_destroy; |
452 | fbo->acc_size = 0; |
450 | fbo->acc_size = 0; |
453 | fbo->resv = &fbo->ttm_resv; |
451 | fbo->resv = &fbo->ttm_resv; |
454 | reservation_object_init(fbo->resv); |
452 | reservation_object_init(fbo->resv); |
455 | ret = ww_mutex_trylock(&fbo->resv->lock); |
453 | ret = ww_mutex_trylock(&fbo->resv->lock); |
456 | WARN_ON(!ret); |
454 | WARN_ON(!ret); |
457 | 455 | ||
458 | *new_obj = fbo; |
456 | *new_obj = fbo; |
459 | return 0; |
457 | return 0; |
460 | } |
458 | } |
461 | 459 | ||
462 | pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) |
460 | pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) |
463 | { |
461 | { |
- | 462 | /* Cached mappings need no adjustment */ |
|
- | 463 | if (caching_flags & TTM_PL_FLAG_CACHED) |
|
- | 464 | return tmp; |
|
464 | return tmp; |
465 | return tmp; |
465 | } |
466 | } |
466 | EXPORT_SYMBOL(ttm_io_prot); |
467 | EXPORT_SYMBOL(ttm_io_prot); |
467 | 468 | ||
468 | static int ttm_bo_ioremap(struct ttm_buffer_object *bo, |
469 | static int ttm_bo_ioremap(struct ttm_buffer_object *bo, |
469 | unsigned long offset, |
470 | unsigned long offset, |
470 | unsigned long size, |
471 | unsigned long size, |
471 | struct ttm_bo_kmap_obj *map) |
472 | struct ttm_bo_kmap_obj *map) |
472 | { |
473 | { |
473 | struct ttm_mem_reg *mem = &bo->mem; |
474 | struct ttm_mem_reg *mem = &bo->mem; |
474 | 475 | ||
475 | if (bo->mem.bus.addr) { |
476 | if (bo->mem.bus.addr) { |
476 | map->bo_kmap_type = ttm_bo_map_premapped; |
477 | map->bo_kmap_type = ttm_bo_map_premapped; |
477 | map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); |
478 | map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); |
478 | } else { |
479 | } else { |
479 | map->bo_kmap_type = ttm_bo_map_iomap; |
480 | map->bo_kmap_type = ttm_bo_map_iomap; |
480 | if (mem->placement & TTM_PL_FLAG_WC) |
481 | if (mem->placement & TTM_PL_FLAG_WC) |
481 | map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset, |
482 | map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset, |
482 | size); |
483 | size); |
483 | else |
484 | else |
484 | map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset, |
485 | map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset, |
485 | size); |
486 | size); |
486 | } |
487 | } |
487 | return (!map->virtual) ? -ENOMEM : 0; |
488 | return (!map->virtual) ? -ENOMEM : 0; |
488 | } |
489 | } |
489 | 490 | ||
490 | static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, |
491 | static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, |
491 | unsigned long start_page, |
492 | unsigned long start_page, |
492 | unsigned long num_pages, |
493 | unsigned long num_pages, |
493 | struct ttm_bo_kmap_obj *map) |
494 | struct ttm_bo_kmap_obj *map) |
494 | { |
495 | { |
495 | struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot; |
496 | struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot; |
496 | struct ttm_tt *ttm = bo->ttm; |
497 | struct ttm_tt *ttm = bo->ttm; |
497 | int ret; |
498 | int ret; |
498 | 499 | ||
499 | BUG_ON(!ttm); |
500 | BUG_ON(!ttm); |
500 | 501 | ||
501 | if (ttm->state == tt_unpopulated) { |
502 | if (ttm->state == tt_unpopulated) { |
502 | ret = ttm->bdev->driver->ttm_tt_populate(ttm); |
503 | ret = ttm->bdev->driver->ttm_tt_populate(ttm); |
503 | if (ret) |
504 | if (ret) |
504 | return ret; |
505 | return ret; |
505 | } |
506 | } |
506 | 507 | ||
507 | if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { |
508 | if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { |
508 | /* |
509 | /* |
509 | * We're mapping a single page, and the desired |
510 | * We're mapping a single page, and the desired |
510 | * page protection is consistent with the bo. |
511 | * page protection is consistent with the bo. |
511 | */ |
512 | */ |
512 | 513 | ||
513 | map->bo_kmap_type = ttm_bo_map_kmap; |
514 | map->bo_kmap_type = ttm_bo_map_kmap; |
514 | map->page = ttm->pages[start_page]; |
515 | map->page = ttm->pages[start_page]; |
515 | map->virtual = kmap(map->page); |
516 | map->virtual = kmap(map->page); |
516 | } else { |
517 | } else { |
517 | /* |
518 | /* |
518 | * We need to use vmap to get the desired page protection |
519 | * We need to use vmap to get the desired page protection |
519 | * or to make the buffer object look contiguous. |
520 | * or to make the buffer object look contiguous. |
520 | */ |
521 | */ |
521 | prot = ttm_io_prot(mem->placement, PAGE_KERNEL); |
522 | prot = ttm_io_prot(mem->placement, PAGE_KERNEL); |
522 | map->bo_kmap_type = ttm_bo_map_vmap; |
523 | map->bo_kmap_type = ttm_bo_map_vmap; |
523 | map->virtual = vmap(ttm->pages + start_page, num_pages, |
524 | map->virtual = vmap(ttm->pages + start_page, num_pages, |
524 | 0, prot); |
525 | 0, prot); |
525 | } |
526 | } |
526 | return (!map->virtual) ? -ENOMEM : 0; |
527 | return (!map->virtual) ? -ENOMEM : 0; |
527 | } |
528 | } |
528 | 529 | ||
529 | int ttm_bo_kmap(struct ttm_buffer_object *bo, |
530 | int ttm_bo_kmap(struct ttm_buffer_object *bo, |
530 | unsigned long start_page, unsigned long num_pages, |
531 | unsigned long start_page, unsigned long num_pages, |
531 | struct ttm_bo_kmap_obj *map) |
532 | struct ttm_bo_kmap_obj *map) |
532 | { |
533 | { |
533 | struct ttm_mem_type_manager *man = |
534 | struct ttm_mem_type_manager *man = |
534 | &bo->bdev->man[bo->mem.mem_type]; |
535 | &bo->bdev->man[bo->mem.mem_type]; |
535 | unsigned long offset, size; |
536 | unsigned long offset, size; |
536 | int ret; |
537 | int ret; |
537 | 538 | ||
538 | BUG_ON(!list_empty(&bo->swap)); |
539 | BUG_ON(!list_empty(&bo->swap)); |
539 | map->virtual = NULL; |
540 | map->virtual = NULL; |
540 | map->bo = bo; |
541 | map->bo = bo; |
541 | if (num_pages > bo->num_pages) |
542 | if (num_pages > bo->num_pages) |
542 | return -EINVAL; |
543 | return -EINVAL; |
543 | if (start_page > bo->num_pages) |
544 | if (start_page > bo->num_pages) |
544 | return -EINVAL; |
545 | return -EINVAL; |
545 | #if 0 |
546 | #if 0 |
546 | if (num_pages > 1 && !capable(CAP_SYS_ADMIN)) |
547 | if (num_pages > 1 && !capable(CAP_SYS_ADMIN)) |
547 | return -EPERM; |
548 | return -EPERM; |
548 | #endif |
549 | #endif |
549 | (void) ttm_mem_io_lock(man, false); |
550 | (void) ttm_mem_io_lock(man, false); |
550 | ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); |
551 | ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); |
551 | ttm_mem_io_unlock(man); |
552 | ttm_mem_io_unlock(man); |
552 | if (ret) |
553 | if (ret) |
553 | return ret; |
554 | return ret; |
554 | if (!bo->mem.bus.is_iomem) { |
555 | if (!bo->mem.bus.is_iomem) { |
555 | return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); |
556 | return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); |
556 | } else { |
557 | } else { |
557 | offset = start_page << PAGE_SHIFT; |
558 | offset = start_page << PAGE_SHIFT; |
558 | size = num_pages << PAGE_SHIFT; |
559 | size = num_pages << PAGE_SHIFT; |
559 | return ttm_bo_ioremap(bo, offset, size, map); |
560 | return ttm_bo_ioremap(bo, offset, size, map); |
560 | } |
561 | } |
561 | } |
562 | } |
562 | EXPORT_SYMBOL(ttm_bo_kmap); |
563 | EXPORT_SYMBOL(ttm_bo_kmap); |
563 | 564 | ||
564 | void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) |
565 | void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) |
565 | { |
566 | { |
566 | struct ttm_buffer_object *bo = map->bo; |
567 | struct ttm_buffer_object *bo = map->bo; |
567 | struct ttm_mem_type_manager *man = |
568 | struct ttm_mem_type_manager *man = |
568 | &bo->bdev->man[bo->mem.mem_type]; |
569 | &bo->bdev->man[bo->mem.mem_type]; |
569 | 570 | ||
570 | if (!map->virtual) |
571 | if (!map->virtual) |
571 | return; |
572 | return; |
572 | switch (map->bo_kmap_type) { |
573 | switch (map->bo_kmap_type) { |
573 | case ttm_bo_map_iomap: |
574 | case ttm_bo_map_iomap: |
574 | iounmap(map->virtual); |
575 | iounmap(map->virtual); |
575 | break; |
576 | break; |
576 | case ttm_bo_map_vmap: |
577 | case ttm_bo_map_vmap: |
577 | break; |
578 | break; |
578 | case ttm_bo_map_kmap: |
579 | case ttm_bo_map_kmap: |
579 | kunmap(map->page); |
580 | kunmap(map->page); |
580 | break; |
581 | break; |
581 | case ttm_bo_map_premapped: |
582 | case ttm_bo_map_premapped: |
582 | break; |
583 | break; |
583 | default: |
584 | default: |
584 | BUG(); |
585 | BUG(); |
585 | } |
586 | } |
586 | (void) ttm_mem_io_lock(man, false); |
587 | (void) ttm_mem_io_lock(man, false); |
587 | ttm_mem_io_free(map->bo->bdev, &map->bo->mem); |
588 | ttm_mem_io_free(map->bo->bdev, &map->bo->mem); |
588 | ttm_mem_io_unlock(man); |
589 | ttm_mem_io_unlock(man); |
589 | map->virtual = NULL; |
590 | map->virtual = NULL; |
590 | map->page = NULL; |
591 | map->page = NULL; |
591 | } |
592 | } |
592 | EXPORT_SYMBOL(ttm_bo_kunmap); |
593 | EXPORT_SYMBOL(ttm_bo_kunmap); |
593 | 594 | ||
594 | int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, |
595 | int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, |
595 | struct fence *fence, |
596 | struct fence *fence, |
596 | bool evict, |
597 | bool evict, |
597 | bool no_wait_gpu, |
598 | bool no_wait_gpu, |
598 | struct ttm_mem_reg *new_mem) |
599 | struct ttm_mem_reg *new_mem) |
599 | { |
600 | { |
600 | struct ttm_bo_device *bdev = bo->bdev; |
601 | struct ttm_bo_device *bdev = bo->bdev; |
601 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; |
602 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; |
602 | struct ttm_mem_reg *old_mem = &bo->mem; |
603 | struct ttm_mem_reg *old_mem = &bo->mem; |
603 | int ret; |
604 | int ret; |
604 | struct ttm_buffer_object *ghost_obj; |
605 | struct ttm_buffer_object *ghost_obj; |
605 | 606 | ||
606 | reservation_object_add_excl_fence(bo->resv, fence); |
607 | reservation_object_add_excl_fence(bo->resv, fence); |
607 | if (evict) { |
608 | if (evict) { |
608 | ret = ttm_bo_wait(bo, false, false, false); |
609 | ret = ttm_bo_wait(bo, false, false, false); |
609 | if (ret) |
610 | if (ret) |
610 | return ret; |
611 | return ret; |
611 | 612 | ||
612 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && |
613 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && |
613 | (bo->ttm != NULL)) { |
614 | (bo->ttm != NULL)) { |
614 | ttm_tt_unbind(bo->ttm); |
615 | ttm_tt_unbind(bo->ttm); |
615 | ttm_tt_destroy(bo->ttm); |
616 | ttm_tt_destroy(bo->ttm); |
616 | bo->ttm = NULL; |
617 | bo->ttm = NULL; |
617 | } |
618 | } |
618 | ttm_bo_free_old_node(bo); |
619 | ttm_bo_free_old_node(bo); |
619 | } else { |
620 | } else { |
620 | /** |
621 | /** |
621 | * This should help pipeline ordinary buffer moves. |
622 | * This should help pipeline ordinary buffer moves. |
622 | * |
623 | * |
623 | * Hang old buffer memory on a new buffer object, |
624 | * Hang old buffer memory on a new buffer object, |
624 | * and leave it to be released when the GPU |
625 | * and leave it to be released when the GPU |
625 | * operation has completed. |
626 | * operation has completed. |
626 | */ |
627 | */ |
627 | 628 | ||
628 | set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); |
629 | set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); |
629 | 630 | ||
630 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); |
631 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); |
631 | if (ret) |
632 | if (ret) |
632 | return ret; |
633 | return ret; |
633 | 634 | ||
634 | reservation_object_add_excl_fence(ghost_obj->resv, fence); |
635 | reservation_object_add_excl_fence(ghost_obj->resv, fence); |
635 | 636 | ||
636 | /** |
637 | /** |
637 | * If we're not moving to fixed memory, the TTM object |
638 | * If we're not moving to fixed memory, the TTM object |
638 | * needs to stay alive. Otherwhise hang it on the ghost |
639 | * needs to stay alive. Otherwhise hang it on the ghost |
639 | * bo to be unbound and destroyed. |
640 | * bo to be unbound and destroyed. |
640 | */ |
641 | */ |
641 | 642 | ||
642 | if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) |
643 | if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) |
643 | ghost_obj->ttm = NULL; |
644 | ghost_obj->ttm = NULL; |
644 | else |
645 | else |
645 | bo->ttm = NULL; |
646 | bo->ttm = NULL; |
646 | 647 | ||
647 | ttm_bo_unreserve(ghost_obj); |
648 | ttm_bo_unreserve(ghost_obj); |
648 | ttm_bo_unref(&ghost_obj); |
649 | ttm_bo_unref(&ghost_obj); |
649 | } |
650 | } |
650 | 651 | ||
651 | *old_mem = *new_mem; |
652 | *old_mem = *new_mem; |
652 | new_mem->mm_node = NULL; |
653 | new_mem->mm_node = NULL; |
653 | 654 | ||
654 | return 0; |
655 | return 0; |
655 | } |
656 | } |
656 | EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); |
657 | EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); |
657 | 658 | ||
658 | 659 | ||
659 | void *vmap(struct page **pages, unsigned int count, |
660 | void *vmap(struct page **pages, unsigned int count, |
660 | unsigned long flags, pgprot_t prot) |
661 | unsigned long flags, pgprot_t prot) |
661 | { |
662 | { |
662 | void *vaddr; |
663 | void *vaddr; |
663 | char *tmp; |
664 | char *tmp; |
664 | int i; |
665 | int i; |
665 | 666 | ||
666 | vaddr = AllocKernelSpace(count << 12); |
667 | vaddr = AllocKernelSpace(count << 12); |
667 | if(vaddr == NULL) |
668 | if(vaddr == NULL) |
668 | return NULL; |
669 | return NULL; |
669 | 670 | ||
670 | for(i = 0, tmp = vaddr; i < count; i++) |
671 | for(i = 0, tmp = vaddr; i < count; i++) |
671 | { |
672 | { |
672 | MapPage(tmp, page_to_phys(pages[i]), PG_SW); |
673 | MapPage(tmp, page_to_phys(pages[i]), PG_SW); |
673 | tmp+= 4096; |
674 | tmp+= 4096; |
674 | }; |
675 | }; |
675 | 676 | ||
676 | return vaddr; |
677 | return vaddr; |
677 | };>><>><>><>>>><>><>>><>><> |
678 | };>><>><>><>>>><>><>>><>><> |