Rev 5271 | Rev 6938 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
4075 | Serge | 1 | /************************************************************************** |
2 | * |
||
3 | * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA |
||
4 | * All Rights Reserved. |
||
5 | * |
||
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
7 | * copy of this software and associated documentation files (the |
||
8 | * "Software"), to deal in the Software without restriction, including |
||
9 | * without limitation the rights to use, copy, modify, merge, publish, |
||
10 | * distribute, sub license, and/or sell copies of the Software, and to |
||
11 | * permit persons to whom the Software is furnished to do so, subject to |
||
12 | * the following conditions: |
||
13 | * |
||
14 | * The above copyright notice and this permission notice (including the |
||
15 | * next paragraph) shall be included in all copies or substantial portions |
||
16 | * of the Software. |
||
17 | * |
||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
||
25 | * |
||
26 | **************************************************************************/ |
||
27 | /* |
||
28 | * Authors: Thomas Hellstrom |
||
29 | */ |
||
5078 | serge | 30 | #define iowrite32(v, addr) writel((v), (addr)) |
31 | #define ioread32(addr) readl(addr) |
||
4075 | Serge | 32 | |
33 | #include |
||
34 | #include |
||
4112 | Serge | 35 | #include |
6296 | serge | 36 | #include |
5078 | serge | 37 | //#include |
4075 | Serge | 38 | #include |
39 | #include |
||
6296 | serge | 40 | #include |
4075 | Serge | 41 | #include |
42 | |||
5078 | serge | 43 | #define __pgprot(x) ((pgprot_t) { (x) } ) |
44 | |||
45 | void *vmap(struct page **pages, unsigned int count, |
||
46 | unsigned long flags, pgprot_t prot); |
||
47 | |||
4075 | Serge | 48 | void ttm_bo_free_old_node(struct ttm_buffer_object *bo) |
49 | { |
||
50 | ttm_bo_mem_put(bo, &bo->mem); |
||
51 | } |
||
52 | |||
53 | int ttm_bo_move_ttm(struct ttm_buffer_object *bo, |
||
54 | bool evict, |
||
55 | bool no_wait_gpu, struct ttm_mem_reg *new_mem) |
||
56 | { |
||
57 | struct ttm_tt *ttm = bo->ttm; |
||
58 | struct ttm_mem_reg *old_mem = &bo->mem; |
||
59 | int ret; |
||
60 | |||
61 | if (old_mem->mem_type != TTM_PL_SYSTEM) { |
||
62 | ttm_tt_unbind(ttm); |
||
63 | ttm_bo_free_old_node(bo); |
||
64 | ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, |
||
65 | TTM_PL_MASK_MEM); |
||
66 | old_mem->mem_type = TTM_PL_SYSTEM; |
||
67 | } |
||
68 | |||
69 | ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); |
||
70 | if (unlikely(ret != 0)) |
||
71 | return ret; |
||
72 | |||
73 | if (new_mem->mem_type != TTM_PL_SYSTEM) { |
||
74 | ret = ttm_tt_bind(ttm, new_mem); |
||
75 | if (unlikely(ret != 0)) |
||
76 | return ret; |
||
77 | } |
||
78 | |||
79 | *old_mem = *new_mem; |
||
80 | new_mem->mm_node = NULL; |
||
81 | |||
82 | return 0; |
||
83 | } |
||
84 | EXPORT_SYMBOL(ttm_bo_move_ttm); |
||
85 | |||
86 | int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) |
||
87 | { |
||
88 | if (likely(man->io_reserve_fastpath)) |
||
89 | return 0; |
||
90 | |||
91 | if (interruptible) |
||
92 | return mutex_lock_interruptible(&man->io_reserve_mutex); |
||
93 | |||
94 | mutex_lock(&man->io_reserve_mutex); |
||
95 | return 0; |
||
96 | } |
||
97 | EXPORT_SYMBOL(ttm_mem_io_lock); |
||
98 | |||
99 | void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) |
||
100 | { |
||
101 | if (likely(man->io_reserve_fastpath)) |
||
102 | return; |
||
103 | |||
104 | mutex_unlock(&man->io_reserve_mutex); |
||
105 | } |
||
106 | EXPORT_SYMBOL(ttm_mem_io_unlock); |
||
107 | |||
108 | static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) |
||
109 | { |
||
110 | struct ttm_buffer_object *bo; |
||
111 | |||
112 | if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru)) |
||
113 | return -EAGAIN; |
||
114 | |||
115 | bo = list_first_entry(&man->io_reserve_lru, |
||
116 | struct ttm_buffer_object, |
||
117 | io_reserve_lru); |
||
118 | list_del_init(&bo->io_reserve_lru); |
||
119 | ttm_bo_unmap_virtual_locked(bo); |
||
120 | |||
121 | return 0; |
||
122 | } |
||
123 | |||
124 | |||
125 | int ttm_mem_io_reserve(struct ttm_bo_device *bdev, |
||
126 | struct ttm_mem_reg *mem) |
||
127 | { |
||
128 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
||
129 | int ret = 0; |
||
130 | |||
131 | if (!bdev->driver->io_mem_reserve) |
||
132 | return 0; |
||
133 | if (likely(man->io_reserve_fastpath)) |
||
134 | return bdev->driver->io_mem_reserve(bdev, mem); |
||
135 | |||
136 | if (bdev->driver->io_mem_reserve && |
||
137 | mem->bus.io_reserved_count++ == 0) { |
||
138 | retry: |
||
139 | ret = bdev->driver->io_mem_reserve(bdev, mem); |
||
140 | if (ret == -EAGAIN) { |
||
141 | ret = ttm_mem_io_evict(man); |
||
142 | if (ret == 0) |
||
143 | goto retry; |
||
144 | } |
||
145 | } |
||
146 | return ret; |
||
147 | } |
||
148 | EXPORT_SYMBOL(ttm_mem_io_reserve); |
||
149 | |||
150 | void ttm_mem_io_free(struct ttm_bo_device *bdev, |
||
151 | struct ttm_mem_reg *mem) |
||
152 | { |
||
153 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
||
154 | |||
155 | if (likely(man->io_reserve_fastpath)) |
||
156 | return; |
||
157 | |||
158 | if (bdev->driver->io_mem_reserve && |
||
159 | --mem->bus.io_reserved_count == 0 && |
||
160 | bdev->driver->io_mem_free) |
||
161 | bdev->driver->io_mem_free(bdev, mem); |
||
162 | |||
163 | } |
||
164 | EXPORT_SYMBOL(ttm_mem_io_free); |
||
165 | |||
166 | int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) |
||
167 | { |
||
168 | struct ttm_mem_reg *mem = &bo->mem; |
||
169 | int ret; |
||
170 | |||
171 | if (!mem->bus.io_reserved_vm) { |
||
172 | struct ttm_mem_type_manager *man = |
||
173 | &bo->bdev->man[mem->mem_type]; |
||
174 | |||
175 | ret = ttm_mem_io_reserve(bo->bdev, mem); |
||
176 | if (unlikely(ret != 0)) |
||
177 | return ret; |
||
178 | mem->bus.io_reserved_vm = true; |
||
179 | if (man->use_io_reserve_lru) |
||
180 | list_add_tail(&bo->io_reserve_lru, |
||
181 | &man->io_reserve_lru); |
||
182 | } |
||
183 | return 0; |
||
184 | } |
||
185 | |||
186 | void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) |
||
187 | { |
||
188 | struct ttm_mem_reg *mem = &bo->mem; |
||
189 | |||
190 | if (mem->bus.io_reserved_vm) { |
||
191 | mem->bus.io_reserved_vm = false; |
||
192 | list_del_init(&bo->io_reserve_lru); |
||
193 | ttm_mem_io_free(bo->bdev, mem); |
||
194 | } |
||
195 | } |
||
196 | |||
4569 | Serge | 197 | static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
4075 | Serge | 198 | void **virtual) |
199 | { |
||
200 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
||
201 | int ret; |
||
202 | void *addr; |
||
203 | |||
204 | *virtual = NULL; |
||
205 | (void) ttm_mem_io_lock(man, false); |
||
206 | ret = ttm_mem_io_reserve(bdev, mem); |
||
207 | ttm_mem_io_unlock(man); |
||
208 | if (ret || !mem->bus.is_iomem) |
||
209 | return ret; |
||
210 | |||
211 | if (mem->bus.addr) { |
||
212 | addr = mem->bus.addr; |
||
213 | } else { |
||
214 | if (mem->placement & TTM_PL_FLAG_WC) |
||
215 | addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size); |
||
216 | else |
||
6296 | serge | 217 | addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); |
4075 | Serge | 218 | if (!addr) { |
219 | (void) ttm_mem_io_lock(man, false); |
||
220 | ttm_mem_io_free(bdev, mem); |
||
221 | ttm_mem_io_unlock(man); |
||
222 | return -ENOMEM; |
||
223 | } |
||
224 | } |
||
225 | *virtual = addr; |
||
226 | return 0; |
||
227 | } |
||
228 | |||
4569 | Serge | 229 | static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
4075 | Serge | 230 | void *virtual) |
231 | { |
||
232 | struct ttm_mem_type_manager *man; |
||
233 | |||
234 | man = &bdev->man[mem->mem_type]; |
||
235 | |||
236 | if (virtual && mem->bus.addr == NULL) |
||
237 | iounmap(virtual); |
||
238 | (void) ttm_mem_io_lock(man, false); |
||
239 | ttm_mem_io_free(bdev, mem); |
||
240 | ttm_mem_io_unlock(man); |
||
241 | } |
||
242 | |||
243 | static int ttm_copy_io_page(void *dst, void *src, unsigned long page) |
||
244 | { |
||
245 | uint32_t *dstP = |
||
246 | (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT)); |
||
247 | uint32_t *srcP = |
||
248 | (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT)); |
||
249 | |||
250 | int i; |
||
251 | for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i) |
||
252 | iowrite32(ioread32(srcP++), dstP++); |
||
253 | return 0; |
||
254 | } |
||
255 | |||
256 | static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, |
||
257 | unsigned long page, |
||
258 | pgprot_t prot) |
||
259 | { |
||
260 | struct page *d = ttm->pages[page]; |
||
261 | void *dst; |
||
262 | |||
263 | if (!d) |
||
264 | return -ENOMEM; |
||
265 | |||
266 | src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); |
||
267 | |||
5078 | serge | 268 | dst = (void*)MapIoMem((addr_t)d, 4096, PG_SW); |
269 | |||
4075 | Serge | 270 | if (!dst) |
271 | return -ENOMEM; |
||
272 | |||
5078 | serge | 273 | memcpy(dst, src, PAGE_SIZE); |
4075 | Serge | 274 | |
5078 | serge | 275 | FreeKernelSpace(dst); |
4075 | Serge | 276 | |
277 | return 0; |
||
278 | } |
||
279 | |||
280 | static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, |
||
281 | unsigned long page, |
||
282 | pgprot_t prot) |
||
283 | { |
||
284 | struct page *s = ttm->pages[page]; |
||
285 | void *src; |
||
286 | |||
287 | if (!s) |
||
288 | return -ENOMEM; |
||
289 | |||
290 | dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); |
||
5078 | serge | 291 | |
292 | src = (void*)MapIoMem((addr_t)s, 4096, PG_SW); |
||
293 | |||
4075 | Serge | 294 | if (!src) |
295 | return -ENOMEM; |
||
296 | |||
5078 | serge | 297 | memcpy(dst, src, PAGE_SIZE); |
4075 | Serge | 298 | |
5078 | serge | 299 | FreeKernelSpace(src); |
4075 | Serge | 300 | |
301 | return 0; |
||
302 | } |
||
303 | |||
304 | int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, |
||
305 | bool evict, bool no_wait_gpu, |
||
306 | struct ttm_mem_reg *new_mem) |
||
307 | { |
||
308 | struct ttm_bo_device *bdev = bo->bdev; |
||
309 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; |
||
310 | struct ttm_tt *ttm = bo->ttm; |
||
311 | struct ttm_mem_reg *old_mem = &bo->mem; |
||
312 | struct ttm_mem_reg old_copy = *old_mem; |
||
313 | void *old_iomap; |
||
314 | void *new_iomap; |
||
315 | int ret; |
||
316 | unsigned long i; |
||
317 | unsigned long page; |
||
318 | unsigned long add = 0; |
||
319 | int dir; |
||
320 | |||
321 | ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap); |
||
322 | if (ret) |
||
323 | return ret; |
||
324 | ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap); |
||
325 | if (ret) |
||
326 | goto out; |
||
327 | |||
4569 | Serge | 328 | /* |
329 | * Single TTM move. NOP. |
||
330 | */ |
||
4075 | Serge | 331 | if (old_iomap == NULL && new_iomap == NULL) |
332 | goto out2; |
||
4569 | Serge | 333 | |
334 | /* |
||
335 | * Don't move nonexistent data. Clear destination instead. |
||
336 | */ |
||
5078 | serge | 337 | if (old_iomap == NULL && |
338 | (ttm == NULL || (ttm->state == tt_unpopulated && |
||
339 | !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) { |
||
340 | memset(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); |
||
4075 | Serge | 341 | goto out2; |
5078 | serge | 342 | } |
4075 | Serge | 343 | |
4569 | Serge | 344 | /* |
345 | * TTM might be null for moves within the same region. |
||
346 | */ |
||
347 | if (ttm && ttm->state == tt_unpopulated) { |
||
4075 | Serge | 348 | ret = ttm->bdev->driver->ttm_tt_populate(ttm); |
4569 | Serge | 349 | if (ret) |
4075 | Serge | 350 | goto out1; |
5271 | serge | 351 | } |
4075 | Serge | 352 | |
353 | add = 0; |
||
354 | dir = 1; |
||
355 | |||
356 | if ((old_mem->mem_type == new_mem->mem_type) && |
||
357 | (new_mem->start < old_mem->start + old_mem->size)) { |
||
358 | dir = -1; |
||
359 | add = new_mem->num_pages - 1; |
||
360 | } |
||
361 | |||
362 | for (i = 0; i < new_mem->num_pages; ++i) { |
||
363 | page = i * dir + add; |
||
364 | if (old_iomap == NULL) { |
||
365 | pgprot_t prot = ttm_io_prot(old_mem->placement, |
||
366 | PAGE_KERNEL); |
||
367 | ret = ttm_copy_ttm_io_page(ttm, new_iomap, page, |
||
368 | prot); |
||
369 | } else if (new_iomap == NULL) { |
||
370 | pgprot_t prot = ttm_io_prot(new_mem->placement, |
||
371 | PAGE_KERNEL); |
||
372 | ret = ttm_copy_io_ttm_page(ttm, old_iomap, page, |
||
373 | prot); |
||
374 | } else |
||
375 | ret = ttm_copy_io_page(new_iomap, old_iomap, page); |
||
4569 | Serge | 376 | if (ret) |
4075 | Serge | 377 | goto out1; |
5271 | serge | 378 | } |
4075 | Serge | 379 | mb(); |
380 | out2: |
||
381 | old_copy = *old_mem; |
||
382 | *old_mem = *new_mem; |
||
383 | new_mem->mm_node = NULL; |
||
384 | |||
385 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { |
||
386 | ttm_tt_unbind(ttm); |
||
387 | ttm_tt_destroy(ttm); |
||
388 | bo->ttm = NULL; |
||
389 | } |
||
390 | |||
391 | out1: |
||
392 | ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); |
||
393 | out: |
||
394 | ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); |
||
4569 | Serge | 395 | |
396 | /* |
||
397 | * On error, keep the mm node! |
||
398 | */ |
||
399 | if (!ret) |
||
5271 | serge | 400 | ttm_bo_mem_put(bo, &old_copy); |
4075 | Serge | 401 | return ret; |
402 | } |
||
403 | EXPORT_SYMBOL(ttm_bo_move_memcpy); |
||
404 | |||
405 | static void ttm_transfered_destroy(struct ttm_buffer_object *bo) |
||
406 | { |
||
407 | kfree(bo); |
||
408 | } |
||
409 | |||
410 | /** |
||
411 | * ttm_buffer_object_transfer |
||
412 | * |
||
413 | * @bo: A pointer to a struct ttm_buffer_object. |
||
414 | * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object, |
||
415 | * holding the data of @bo with the old placement. |
||
416 | * |
||
417 | * This is a utility function that may be called after an accelerated move |
||
418 | * has been scheduled. A new buffer object is created as a placeholder for |
||
419 | * the old data while it's being copied. When that buffer object is idle, |
||
420 | * it can be destroyed, releasing the space of the old placement. |
||
421 | * Returns: |
||
422 | * !0: Failure. |
||
423 | */ |
||
424 | |||
425 | static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, |
||
426 | struct ttm_buffer_object **new_obj) |
||
427 | { |
||
428 | struct ttm_buffer_object *fbo; |
||
429 | int ret; |
||
430 | |||
431 | fbo = kmalloc(sizeof(*fbo), GFP_KERNEL); |
||
432 | if (!fbo) |
||
433 | return -ENOMEM; |
||
434 | |||
435 | *fbo = *bo; |
||
436 | |||
437 | /** |
||
438 | * Fix up members that we shouldn't copy directly: |
||
439 | * TODO: Explicit member copy would probably be better here. |
||
440 | */ |
||
441 | |||
442 | INIT_LIST_HEAD(&fbo->ddestroy); |
||
443 | INIT_LIST_HEAD(&fbo->lru); |
||
444 | INIT_LIST_HEAD(&fbo->swap); |
||
445 | INIT_LIST_HEAD(&fbo->io_reserve_lru); |
||
4112 | Serge | 446 | drm_vma_node_reset(&fbo->vma_node); |
4075 | Serge | 447 | atomic_set(&fbo->cpu_writers, 0); |
448 | |||
449 | kref_init(&fbo->list_kref); |
||
450 | kref_init(&fbo->kref); |
||
451 | fbo->destroy = &ttm_transfered_destroy; |
||
452 | fbo->acc_size = 0; |
||
453 | fbo->resv = &fbo->ttm_resv; |
||
454 | reservation_object_init(fbo->resv); |
||
455 | ret = ww_mutex_trylock(&fbo->resv->lock); |
||
456 | WARN_ON(!ret); |
||
457 | |||
458 | *new_obj = fbo; |
||
459 | return 0; |
||
460 | } |
||
461 | |||
462 | pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) |
||
463 | { |
||
464 | return tmp; |
||
465 | } |
||
466 | EXPORT_SYMBOL(ttm_io_prot); |
||
467 | |||
468 | static int ttm_bo_ioremap(struct ttm_buffer_object *bo, |
||
469 | unsigned long offset, |
||
470 | unsigned long size, |
||
471 | struct ttm_bo_kmap_obj *map) |
||
472 | { |
||
473 | struct ttm_mem_reg *mem = &bo->mem; |
||
474 | |||
475 | if (bo->mem.bus.addr) { |
||
476 | map->bo_kmap_type = ttm_bo_map_premapped; |
||
477 | map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); |
||
478 | } else { |
||
479 | map->bo_kmap_type = ttm_bo_map_iomap; |
||
480 | if (mem->placement & TTM_PL_FLAG_WC) |
||
481 | map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset, |
||
482 | size); |
||
483 | else |
||
6296 | serge | 484 | map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset, |
4075 | Serge | 485 | size); |
486 | } |
||
487 | return (!map->virtual) ? -ENOMEM : 0; |
||
488 | } |
||
489 | |||
490 | static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, |
||
491 | unsigned long start_page, |
||
492 | unsigned long num_pages, |
||
493 | struct ttm_bo_kmap_obj *map) |
||
494 | { |
||
495 | struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot; |
||
496 | struct ttm_tt *ttm = bo->ttm; |
||
497 | int ret; |
||
498 | |||
499 | BUG_ON(!ttm); |
||
500 | |||
501 | if (ttm->state == tt_unpopulated) { |
||
502 | ret = ttm->bdev->driver->ttm_tt_populate(ttm); |
||
503 | if (ret) |
||
504 | return ret; |
||
505 | } |
||
506 | |||
507 | if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { |
||
508 | /* |
||
509 | * We're mapping a single page, and the desired |
||
510 | * page protection is consistent with the bo. |
||
511 | */ |
||
512 | |||
513 | map->bo_kmap_type = ttm_bo_map_kmap; |
||
514 | map->page = ttm->pages[start_page]; |
||
6296 | serge | 515 | map->virtual = kmap(map->page); |
4075 | Serge | 516 | } else { |
517 | /* |
||
518 | * We need to use vmap to get the desired page protection |
||
519 | * or to make the buffer object look contiguous. |
||
520 | */ |
||
6296 | serge | 521 | prot = ttm_io_prot(mem->placement, PAGE_KERNEL); |
4075 | Serge | 522 | map->bo_kmap_type = ttm_bo_map_vmap; |
523 | map->virtual = vmap(ttm->pages + start_page, num_pages, |
||
524 | 0, prot); |
||
525 | } |
||
526 | return (!map->virtual) ? -ENOMEM : 0; |
||
527 | } |
||
528 | |||
529 | int ttm_bo_kmap(struct ttm_buffer_object *bo, |
||
530 | unsigned long start_page, unsigned long num_pages, |
||
531 | struct ttm_bo_kmap_obj *map) |
||
532 | { |
||
533 | struct ttm_mem_type_manager *man = |
||
534 | &bo->bdev->man[bo->mem.mem_type]; |
||
535 | unsigned long offset, size; |
||
536 | int ret; |
||
537 | |||
538 | BUG_ON(!list_empty(&bo->swap)); |
||
539 | map->virtual = NULL; |
||
540 | map->bo = bo; |
||
541 | if (num_pages > bo->num_pages) |
||
542 | return -EINVAL; |
||
543 | if (start_page > bo->num_pages) |
||
544 | return -EINVAL; |
||
545 | #if 0 |
||
4569 | Serge | 546 | if (num_pages > 1 && !capable(CAP_SYS_ADMIN)) |
4075 | Serge | 547 | return -EPERM; |
548 | #endif |
||
549 | (void) ttm_mem_io_lock(man, false); |
||
550 | ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); |
||
551 | ttm_mem_io_unlock(man); |
||
552 | if (ret) |
||
553 | return ret; |
||
554 | if (!bo->mem.bus.is_iomem) { |
||
555 | return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); |
||
556 | } else { |
||
557 | offset = start_page << PAGE_SHIFT; |
||
558 | size = num_pages << PAGE_SHIFT; |
||
559 | return ttm_bo_ioremap(bo, offset, size, map); |
||
560 | } |
||
561 | } |
||
562 | EXPORT_SYMBOL(ttm_bo_kmap); |
||
563 | |||
564 | void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) |
||
565 | { |
||
566 | struct ttm_buffer_object *bo = map->bo; |
||
567 | struct ttm_mem_type_manager *man = |
||
568 | &bo->bdev->man[bo->mem.mem_type]; |
||
569 | |||
570 | if (!map->virtual) |
||
571 | return; |
||
572 | switch (map->bo_kmap_type) { |
||
573 | case ttm_bo_map_iomap: |
||
574 | iounmap(map->virtual); |
||
575 | break; |
||
576 | case ttm_bo_map_vmap: |
||
6296 | serge | 577 | break; |
4075 | Serge | 578 | case ttm_bo_map_kmap: |
6296 | serge | 579 | kunmap(map->page); |
4075 | Serge | 580 | break; |
581 | case ttm_bo_map_premapped: |
||
582 | break; |
||
583 | default: |
||
584 | BUG(); |
||
585 | } |
||
586 | (void) ttm_mem_io_lock(man, false); |
||
587 | ttm_mem_io_free(map->bo->bdev, &map->bo->mem); |
||
588 | ttm_mem_io_unlock(man); |
||
589 | map->virtual = NULL; |
||
590 | map->page = NULL; |
||
591 | } |
||
592 | EXPORT_SYMBOL(ttm_bo_kunmap); |
||
593 | |||
594 | int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, |
||
5271 | serge | 595 | struct fence *fence, |
4075 | Serge | 596 | bool evict, |
597 | bool no_wait_gpu, |
||
598 | struct ttm_mem_reg *new_mem) |
||
599 | { |
||
600 | struct ttm_bo_device *bdev = bo->bdev; |
||
601 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; |
||
602 | struct ttm_mem_reg *old_mem = &bo->mem; |
||
603 | int ret; |
||
604 | struct ttm_buffer_object *ghost_obj; |
||
605 | |||
5271 | serge | 606 | reservation_object_add_excl_fence(bo->resv, fence); |
4075 | Serge | 607 | if (evict) { |
608 | ret = ttm_bo_wait(bo, false, false, false); |
||
609 | if (ret) |
||
610 | return ret; |
||
611 | |||
612 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && |
||
613 | (bo->ttm != NULL)) { |
||
614 | ttm_tt_unbind(bo->ttm); |
||
615 | ttm_tt_destroy(bo->ttm); |
||
616 | bo->ttm = NULL; |
||
617 | } |
||
618 | ttm_bo_free_old_node(bo); |
||
619 | } else { |
||
620 | /** |
||
621 | * This should help pipeline ordinary buffer moves. |
||
622 | * |
||
623 | * Hang old buffer memory on a new buffer object, |
||
624 | * and leave it to be released when the GPU |
||
625 | * operation has completed. |
||
626 | */ |
||
627 | |||
628 | set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); |
||
629 | |||
630 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); |
||
631 | if (ret) |
||
632 | return ret; |
||
633 | |||
5271 | serge | 634 | reservation_object_add_excl_fence(ghost_obj->resv, fence); |
635 | |||
4075 | Serge | 636 | /** |
637 | * If we're not moving to fixed memory, the TTM object |
||
638 | * needs to stay alive. Otherwhise hang it on the ghost |
||
639 | * bo to be unbound and destroyed. |
||
640 | */ |
||
641 | |||
642 | if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) |
||
643 | ghost_obj->ttm = NULL; |
||
644 | else |
||
645 | bo->ttm = NULL; |
||
646 | |||
647 | ttm_bo_unreserve(ghost_obj); |
||
648 | ttm_bo_unref(&ghost_obj); |
||
649 | } |
||
650 | |||
651 | *old_mem = *new_mem; |
||
652 | new_mem->mm_node = NULL; |
||
653 | |||
654 | return 0; |
||
655 | } |
||
656 | EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); |
||
5078 | serge | 657 | |
658 | |||
659 | void *vmap(struct page **pages, unsigned int count, |
||
660 | unsigned long flags, pgprot_t prot) |
||
661 | { |
||
662 | void *vaddr; |
||
663 | char *tmp; |
||
664 | int i; |
||
665 | |||
666 | vaddr = AllocKernelSpace(count << 12); |
||
667 | if(vaddr == NULL) |
||
668 | return NULL; |
||
669 | |||
670 | for(i = 0, tmp = vaddr; i < count; i++) |
||
671 | { |
||
672 | MapPage(tmp, page_to_phys(pages[i]), PG_SW); |
||
673 | tmp+= 4096; |
||
674 | }; |
||
675 | |||
676 | return vaddr; |
||
677 | };>><>><>><>>>><>><>>><>><> |
||
678 |