Rev 4075 | Rev 4569 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
4075 | Serge | 1 | /************************************************************************** |
2 | * |
||
3 | * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA |
||
4 | * All Rights Reserved. |
||
5 | * |
||
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
7 | * copy of this software and associated documentation files (the |
||
8 | * "Software"), to deal in the Software without restriction, including |
||
9 | * without limitation the rights to use, copy, modify, merge, publish, |
||
10 | * distribute, sub license, and/or sell copies of the Software, and to |
||
11 | * permit persons to whom the Software is furnished to do so, subject to |
||
12 | * the following conditions: |
||
13 | * |
||
14 | * The above copyright notice and this permission notice (including the |
||
15 | * next paragraph) shall be included in all copies or substantial portions |
||
16 | * of the Software. |
||
17 | * |
||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
||
25 | * |
||
26 | **************************************************************************/ |
||
27 | /* |
||
28 | * Authors: Thomas Hellstrom |
||
29 | */ |
||
30 | |||
31 | #include |
||
32 | #include |
||
4112 | Serge | 33 | #include |
4075 | Serge | 34 | #include |
35 | #include |
||
36 | #include |
||
37 | #include |
||
38 | #include |
||
39 | #include |
||
40 | |||
41 | void ttm_bo_free_old_node(struct ttm_buffer_object *bo) |
||
42 | { |
||
43 | ttm_bo_mem_put(bo, &bo->mem); |
||
44 | } |
||
45 | |||
46 | int ttm_bo_move_ttm(struct ttm_buffer_object *bo, |
||
47 | bool evict, |
||
48 | bool no_wait_gpu, struct ttm_mem_reg *new_mem) |
||
49 | { |
||
50 | struct ttm_tt *ttm = bo->ttm; |
||
51 | struct ttm_mem_reg *old_mem = &bo->mem; |
||
52 | int ret; |
||
53 | |||
54 | if (old_mem->mem_type != TTM_PL_SYSTEM) { |
||
55 | ttm_tt_unbind(ttm); |
||
56 | ttm_bo_free_old_node(bo); |
||
57 | ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, |
||
58 | TTM_PL_MASK_MEM); |
||
59 | old_mem->mem_type = TTM_PL_SYSTEM; |
||
60 | } |
||
61 | |||
62 | ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); |
||
63 | if (unlikely(ret != 0)) |
||
64 | return ret; |
||
65 | |||
66 | if (new_mem->mem_type != TTM_PL_SYSTEM) { |
||
67 | ret = ttm_tt_bind(ttm, new_mem); |
||
68 | if (unlikely(ret != 0)) |
||
69 | return ret; |
||
70 | } |
||
71 | |||
72 | *old_mem = *new_mem; |
||
73 | new_mem->mm_node = NULL; |
||
74 | |||
75 | return 0; |
||
76 | } |
||
77 | EXPORT_SYMBOL(ttm_bo_move_ttm); |
||
78 | |||
79 | int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) |
||
80 | { |
||
81 | if (likely(man->io_reserve_fastpath)) |
||
82 | return 0; |
||
83 | |||
84 | if (interruptible) |
||
85 | return mutex_lock_interruptible(&man->io_reserve_mutex); |
||
86 | |||
87 | mutex_lock(&man->io_reserve_mutex); |
||
88 | return 0; |
||
89 | } |
||
90 | EXPORT_SYMBOL(ttm_mem_io_lock); |
||
91 | |||
92 | void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) |
||
93 | { |
||
94 | if (likely(man->io_reserve_fastpath)) |
||
95 | return; |
||
96 | |||
97 | mutex_unlock(&man->io_reserve_mutex); |
||
98 | } |
||
99 | EXPORT_SYMBOL(ttm_mem_io_unlock); |
||
100 | |||
101 | static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) |
||
102 | { |
||
103 | struct ttm_buffer_object *bo; |
||
104 | |||
105 | if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru)) |
||
106 | return -EAGAIN; |
||
107 | |||
108 | bo = list_first_entry(&man->io_reserve_lru, |
||
109 | struct ttm_buffer_object, |
||
110 | io_reserve_lru); |
||
111 | list_del_init(&bo->io_reserve_lru); |
||
112 | ttm_bo_unmap_virtual_locked(bo); |
||
113 | |||
114 | return 0; |
||
115 | } |
||
116 | |||
117 | |||
118 | int ttm_mem_io_reserve(struct ttm_bo_device *bdev, |
||
119 | struct ttm_mem_reg *mem) |
||
120 | { |
||
121 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
||
122 | int ret = 0; |
||
123 | |||
124 | if (!bdev->driver->io_mem_reserve) |
||
125 | return 0; |
||
126 | if (likely(man->io_reserve_fastpath)) |
||
127 | return bdev->driver->io_mem_reserve(bdev, mem); |
||
128 | |||
129 | if (bdev->driver->io_mem_reserve && |
||
130 | mem->bus.io_reserved_count++ == 0) { |
||
131 | retry: |
||
132 | ret = bdev->driver->io_mem_reserve(bdev, mem); |
||
133 | if (ret == -EAGAIN) { |
||
134 | ret = ttm_mem_io_evict(man); |
||
135 | if (ret == 0) |
||
136 | goto retry; |
||
137 | } |
||
138 | } |
||
139 | return ret; |
||
140 | } |
||
141 | EXPORT_SYMBOL(ttm_mem_io_reserve); |
||
142 | |||
143 | void ttm_mem_io_free(struct ttm_bo_device *bdev, |
||
144 | struct ttm_mem_reg *mem) |
||
145 | { |
||
146 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
||
147 | |||
148 | if (likely(man->io_reserve_fastpath)) |
||
149 | return; |
||
150 | |||
151 | if (bdev->driver->io_mem_reserve && |
||
152 | --mem->bus.io_reserved_count == 0 && |
||
153 | bdev->driver->io_mem_free) |
||
154 | bdev->driver->io_mem_free(bdev, mem); |
||
155 | |||
156 | } |
||
157 | EXPORT_SYMBOL(ttm_mem_io_free); |
||
158 | |||
159 | int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) |
||
160 | { |
||
161 | struct ttm_mem_reg *mem = &bo->mem; |
||
162 | int ret; |
||
163 | |||
164 | if (!mem->bus.io_reserved_vm) { |
||
165 | struct ttm_mem_type_manager *man = |
||
166 | &bo->bdev->man[mem->mem_type]; |
||
167 | |||
168 | ret = ttm_mem_io_reserve(bo->bdev, mem); |
||
169 | if (unlikely(ret != 0)) |
||
170 | return ret; |
||
171 | mem->bus.io_reserved_vm = true; |
||
172 | if (man->use_io_reserve_lru) |
||
173 | list_add_tail(&bo->io_reserve_lru, |
||
174 | &man->io_reserve_lru); |
||
175 | } |
||
176 | return 0; |
||
177 | } |
||
178 | |||
179 | void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) |
||
180 | { |
||
181 | struct ttm_mem_reg *mem = &bo->mem; |
||
182 | |||
183 | if (mem->bus.io_reserved_vm) { |
||
184 | mem->bus.io_reserved_vm = false; |
||
185 | list_del_init(&bo->io_reserve_lru); |
||
186 | ttm_mem_io_free(bo->bdev, mem); |
||
187 | } |
||
188 | } |
||
189 | |||
190 | int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
||
191 | void **virtual) |
||
192 | { |
||
193 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
||
194 | int ret; |
||
195 | void *addr; |
||
196 | |||
197 | *virtual = NULL; |
||
198 | (void) ttm_mem_io_lock(man, false); |
||
199 | ret = ttm_mem_io_reserve(bdev, mem); |
||
200 | ttm_mem_io_unlock(man); |
||
201 | if (ret || !mem->bus.is_iomem) |
||
202 | return ret; |
||
203 | |||
204 | if (mem->bus.addr) { |
||
205 | addr = mem->bus.addr; |
||
206 | } else { |
||
207 | if (mem->placement & TTM_PL_FLAG_WC) |
||
208 | addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size); |
||
209 | else |
||
210 | addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); |
||
211 | if (!addr) { |
||
212 | (void) ttm_mem_io_lock(man, false); |
||
213 | ttm_mem_io_free(bdev, mem); |
||
214 | ttm_mem_io_unlock(man); |
||
215 | return -ENOMEM; |
||
216 | } |
||
217 | } |
||
218 | *virtual = addr; |
||
219 | return 0; |
||
220 | } |
||
221 | |||
222 | void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
||
223 | void *virtual) |
||
224 | { |
||
225 | struct ttm_mem_type_manager *man; |
||
226 | |||
227 | man = &bdev->man[mem->mem_type]; |
||
228 | |||
229 | if (virtual && mem->bus.addr == NULL) |
||
230 | iounmap(virtual); |
||
231 | (void) ttm_mem_io_lock(man, false); |
||
232 | ttm_mem_io_free(bdev, mem); |
||
233 | ttm_mem_io_unlock(man); |
||
234 | } |
||
235 | |||
236 | static int ttm_copy_io_page(void *dst, void *src, unsigned long page) |
||
237 | { |
||
238 | uint32_t *dstP = |
||
239 | (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT)); |
||
240 | uint32_t *srcP = |
||
241 | (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT)); |
||
242 | |||
243 | int i; |
||
244 | for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i) |
||
245 | iowrite32(ioread32(srcP++), dstP++); |
||
246 | return 0; |
||
247 | } |
||
248 | |||
249 | static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, |
||
250 | unsigned long page, |
||
251 | pgprot_t prot) |
||
252 | { |
||
253 | struct page *d = ttm->pages[page]; |
||
254 | void *dst; |
||
255 | |||
256 | if (!d) |
||
257 | return -ENOMEM; |
||
258 | |||
259 | src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); |
||
260 | |||
261 | #ifdef CONFIG_X86 |
||
262 | dst = kmap_atomic_prot(d, prot); |
||
263 | #else |
||
264 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) |
||
265 | dst = vmap(&d, 1, 0, prot); |
||
266 | else |
||
267 | dst = kmap(d); |
||
268 | #endif |
||
269 | if (!dst) |
||
270 | return -ENOMEM; |
||
271 | |||
272 | memcpy_fromio(dst, src, PAGE_SIZE); |
||
273 | |||
274 | #ifdef CONFIG_X86 |
||
275 | kunmap_atomic(dst); |
||
276 | #else |
||
277 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) |
||
278 | vunmap(dst); |
||
279 | else |
||
280 | kunmap(d); |
||
281 | #endif |
||
282 | |||
283 | return 0; |
||
284 | } |
||
285 | |||
286 | static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, |
||
287 | unsigned long page, |
||
288 | pgprot_t prot) |
||
289 | { |
||
290 | struct page *s = ttm->pages[page]; |
||
291 | void *src; |
||
292 | |||
293 | if (!s) |
||
294 | return -ENOMEM; |
||
295 | |||
296 | dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); |
||
297 | #ifdef CONFIG_X86 |
||
298 | src = kmap_atomic_prot(s, prot); |
||
299 | #else |
||
300 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) |
||
301 | src = vmap(&s, 1, 0, prot); |
||
302 | else |
||
303 | src = kmap(s); |
||
304 | #endif |
||
305 | if (!src) |
||
306 | return -ENOMEM; |
||
307 | |||
308 | memcpy_toio(dst, src, PAGE_SIZE); |
||
309 | |||
310 | #ifdef CONFIG_X86 |
||
311 | kunmap_atomic(src); |
||
312 | #else |
||
313 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) |
||
314 | vunmap(src); |
||
315 | else |
||
316 | kunmap(s); |
||
317 | #endif |
||
318 | |||
319 | return 0; |
||
320 | } |
||
321 | |||
322 | int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, |
||
323 | bool evict, bool no_wait_gpu, |
||
324 | struct ttm_mem_reg *new_mem) |
||
325 | { |
||
326 | struct ttm_bo_device *bdev = bo->bdev; |
||
327 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; |
||
328 | struct ttm_tt *ttm = bo->ttm; |
||
329 | struct ttm_mem_reg *old_mem = &bo->mem; |
||
330 | struct ttm_mem_reg old_copy = *old_mem; |
||
331 | void *old_iomap; |
||
332 | void *new_iomap; |
||
333 | int ret; |
||
334 | unsigned long i; |
||
335 | unsigned long page; |
||
336 | unsigned long add = 0; |
||
337 | int dir; |
||
338 | |||
339 | ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap); |
||
340 | if (ret) |
||
341 | return ret; |
||
342 | ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap); |
||
343 | if (ret) |
||
344 | goto out; |
||
345 | |||
346 | if (old_iomap == NULL && new_iomap == NULL) |
||
347 | goto out2; |
||
348 | if (old_iomap == NULL && ttm == NULL) |
||
349 | goto out2; |
||
350 | |||
351 | if (ttm->state == tt_unpopulated) { |
||
352 | ret = ttm->bdev->driver->ttm_tt_populate(ttm); |
||
353 | if (ret) { |
||
354 | /* if we fail here don't nuke the mm node |
||
355 | * as the bo still owns it */ |
||
356 | old_copy.mm_node = NULL; |
||
357 | goto out1; |
||
358 | } |
||
359 | } |
||
360 | |||
361 | add = 0; |
||
362 | dir = 1; |
||
363 | |||
364 | if ((old_mem->mem_type == new_mem->mem_type) && |
||
365 | (new_mem->start < old_mem->start + old_mem->size)) { |
||
366 | dir = -1; |
||
367 | add = new_mem->num_pages - 1; |
||
368 | } |
||
369 | |||
370 | for (i = 0; i < new_mem->num_pages; ++i) { |
||
371 | page = i * dir + add; |
||
372 | if (old_iomap == NULL) { |
||
373 | pgprot_t prot = ttm_io_prot(old_mem->placement, |
||
374 | PAGE_KERNEL); |
||
375 | ret = ttm_copy_ttm_io_page(ttm, new_iomap, page, |
||
376 | prot); |
||
377 | } else if (new_iomap == NULL) { |
||
378 | pgprot_t prot = ttm_io_prot(new_mem->placement, |
||
379 | PAGE_KERNEL); |
||
380 | ret = ttm_copy_io_ttm_page(ttm, old_iomap, page, |
||
381 | prot); |
||
382 | } else |
||
383 | ret = ttm_copy_io_page(new_iomap, old_iomap, page); |
||
384 | if (ret) { |
||
385 | /* failing here, means keep old copy as-is */ |
||
386 | old_copy.mm_node = NULL; |
||
387 | goto out1; |
||
388 | } |
||
389 | } |
||
390 | mb(); |
||
391 | out2: |
||
392 | old_copy = *old_mem; |
||
393 | *old_mem = *new_mem; |
||
394 | new_mem->mm_node = NULL; |
||
395 | |||
396 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { |
||
397 | ttm_tt_unbind(ttm); |
||
398 | ttm_tt_destroy(ttm); |
||
399 | bo->ttm = NULL; |
||
400 | } |
||
401 | |||
402 | out1: |
||
403 | ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); |
||
404 | out: |
||
405 | ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); |
||
406 | ttm_bo_mem_put(bo, &old_copy); |
||
407 | return ret; |
||
408 | } |
||
409 | EXPORT_SYMBOL(ttm_bo_move_memcpy); |
||
410 | |||
411 | static void ttm_transfered_destroy(struct ttm_buffer_object *bo) |
||
412 | { |
||
413 | kfree(bo); |
||
414 | } |
||
415 | |||
416 | /** |
||
417 | * ttm_buffer_object_transfer |
||
418 | * |
||
419 | * @bo: A pointer to a struct ttm_buffer_object. |
||
420 | * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object, |
||
421 | * holding the data of @bo with the old placement. |
||
422 | * |
||
423 | * This is a utility function that may be called after an accelerated move |
||
424 | * has been scheduled. A new buffer object is created as a placeholder for |
||
425 | * the old data while it's being copied. When that buffer object is idle, |
||
426 | * it can be destroyed, releasing the space of the old placement. |
||
427 | * Returns: |
||
428 | * !0: Failure. |
||
429 | */ |
||
430 | |||
431 | static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, |
||
432 | struct ttm_buffer_object **new_obj) |
||
433 | { |
||
434 | struct ttm_buffer_object *fbo; |
||
435 | struct ttm_bo_device *bdev = bo->bdev; |
||
436 | struct ttm_bo_driver *driver = bdev->driver; |
||
437 | int ret; |
||
438 | |||
439 | fbo = kmalloc(sizeof(*fbo), GFP_KERNEL); |
||
440 | if (!fbo) |
||
441 | return -ENOMEM; |
||
442 | |||
443 | *fbo = *bo; |
||
444 | |||
445 | /** |
||
446 | * Fix up members that we shouldn't copy directly: |
||
447 | * TODO: Explicit member copy would probably be better here. |
||
448 | */ |
||
449 | |||
450 | INIT_LIST_HEAD(&fbo->ddestroy); |
||
451 | INIT_LIST_HEAD(&fbo->lru); |
||
452 | INIT_LIST_HEAD(&fbo->swap); |
||
453 | INIT_LIST_HEAD(&fbo->io_reserve_lru); |
||
4112 | Serge | 454 | drm_vma_node_reset(&fbo->vma_node); |
4075 | Serge | 455 | atomic_set(&fbo->cpu_writers, 0); |
456 | |||
457 | spin_lock(&bdev->fence_lock); |
||
458 | if (bo->sync_obj) |
||
459 | fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); |
||
460 | else |
||
461 | fbo->sync_obj = NULL; |
||
462 | spin_unlock(&bdev->fence_lock); |
||
463 | kref_init(&fbo->list_kref); |
||
464 | kref_init(&fbo->kref); |
||
465 | fbo->destroy = &ttm_transfered_destroy; |
||
466 | fbo->acc_size = 0; |
||
467 | fbo->resv = &fbo->ttm_resv; |
||
468 | reservation_object_init(fbo->resv); |
||
469 | ret = ww_mutex_trylock(&fbo->resv->lock); |
||
470 | WARN_ON(!ret); |
||
471 | |||
472 | *new_obj = fbo; |
||
473 | return 0; |
||
474 | } |
||
475 | |||
476 | pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) |
||
477 | { |
||
478 | #if defined(__i386__) || defined(__x86_64__) |
||
479 | if (caching_flags & TTM_PL_FLAG_WC) |
||
480 | tmp = pgprot_writecombine(tmp); |
||
481 | else if (boot_cpu_data.x86 > 3) |
||
482 | tmp = pgprot_noncached(tmp); |
||
483 | |||
484 | #elif defined(__powerpc__) |
||
485 | if (!(caching_flags & TTM_PL_FLAG_CACHED)) { |
||
486 | pgprot_val(tmp) |= _PAGE_NO_CACHE; |
||
487 | if (caching_flags & TTM_PL_FLAG_UNCACHED) |
||
488 | pgprot_val(tmp) |= _PAGE_GUARDED; |
||
489 | } |
||
490 | #endif |
||
491 | #if defined(__ia64__) |
||
492 | if (caching_flags & TTM_PL_FLAG_WC) |
||
493 | tmp = pgprot_writecombine(tmp); |
||
494 | else |
||
495 | tmp = pgprot_noncached(tmp); |
||
496 | #endif |
||
497 | #if defined(__sparc__) || defined(__mips__) |
||
498 | if (!(caching_flags & TTM_PL_FLAG_CACHED)) |
||
499 | tmp = pgprot_noncached(tmp); |
||
500 | #endif |
||
501 | return tmp; |
||
502 | } |
||
503 | EXPORT_SYMBOL(ttm_io_prot); |
||
504 | |||
505 | static int ttm_bo_ioremap(struct ttm_buffer_object *bo, |
||
506 | unsigned long offset, |
||
507 | unsigned long size, |
||
508 | struct ttm_bo_kmap_obj *map) |
||
509 | { |
||
510 | struct ttm_mem_reg *mem = &bo->mem; |
||
511 | |||
512 | if (bo->mem.bus.addr) { |
||
513 | map->bo_kmap_type = ttm_bo_map_premapped; |
||
514 | map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); |
||
515 | } else { |
||
516 | map->bo_kmap_type = ttm_bo_map_iomap; |
||
517 | if (mem->placement & TTM_PL_FLAG_WC) |
||
518 | map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset, |
||
519 | size); |
||
520 | else |
||
521 | map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset, |
||
522 | size); |
||
523 | } |
||
524 | return (!map->virtual) ? -ENOMEM : 0; |
||
525 | } |
||
526 | |||
527 | static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, |
||
528 | unsigned long start_page, |
||
529 | unsigned long num_pages, |
||
530 | struct ttm_bo_kmap_obj *map) |
||
531 | { |
||
532 | struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot; |
||
533 | struct ttm_tt *ttm = bo->ttm; |
||
534 | int ret; |
||
535 | |||
536 | BUG_ON(!ttm); |
||
537 | |||
538 | if (ttm->state == tt_unpopulated) { |
||
539 | ret = ttm->bdev->driver->ttm_tt_populate(ttm); |
||
540 | if (ret) |
||
541 | return ret; |
||
542 | } |
||
543 | |||
544 | if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { |
||
545 | /* |
||
546 | * We're mapping a single page, and the desired |
||
547 | * page protection is consistent with the bo. |
||
548 | */ |
||
549 | |||
550 | map->bo_kmap_type = ttm_bo_map_kmap; |
||
551 | map->page = ttm->pages[start_page]; |
||
552 | map->virtual = kmap(map->page); |
||
553 | } else { |
||
554 | /* |
||
555 | * We need to use vmap to get the desired page protection |
||
556 | * or to make the buffer object look contiguous. |
||
557 | */ |
||
558 | prot = (mem->placement & TTM_PL_FLAG_CACHED) ? |
||
559 | PAGE_KERNEL : |
||
560 | ttm_io_prot(mem->placement, PAGE_KERNEL); |
||
561 | map->bo_kmap_type = ttm_bo_map_vmap; |
||
562 | map->virtual = vmap(ttm->pages + start_page, num_pages, |
||
563 | 0, prot); |
||
564 | } |
||
565 | return (!map->virtual) ? -ENOMEM : 0; |
||
566 | } |
||
567 | |||
568 | int ttm_bo_kmap(struct ttm_buffer_object *bo, |
||
569 | unsigned long start_page, unsigned long num_pages, |
||
570 | struct ttm_bo_kmap_obj *map) |
||
571 | { |
||
572 | struct ttm_mem_type_manager *man = |
||
573 | &bo->bdev->man[bo->mem.mem_type]; |
||
574 | unsigned long offset, size; |
||
575 | int ret; |
||
576 | |||
577 | BUG_ON(!list_empty(&bo->swap)); |
||
578 | map->virtual = NULL; |
||
579 | map->bo = bo; |
||
580 | if (num_pages > bo->num_pages) |
||
581 | return -EINVAL; |
||
582 | if (start_page > bo->num_pages) |
||
583 | return -EINVAL; |
||
584 | #if 0 |
||
585 | if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) |
||
586 | return -EPERM; |
||
587 | #endif |
||
588 | (void) ttm_mem_io_lock(man, false); |
||
589 | ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); |
||
590 | ttm_mem_io_unlock(man); |
||
591 | if (ret) |
||
592 | return ret; |
||
593 | if (!bo->mem.bus.is_iomem) { |
||
594 | return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); |
||
595 | } else { |
||
596 | offset = start_page << PAGE_SHIFT; |
||
597 | size = num_pages << PAGE_SHIFT; |
||
598 | return ttm_bo_ioremap(bo, offset, size, map); |
||
599 | } |
||
600 | } |
||
601 | EXPORT_SYMBOL(ttm_bo_kmap); |
||
602 | |||
603 | void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) |
||
604 | { |
||
605 | struct ttm_buffer_object *bo = map->bo; |
||
606 | struct ttm_mem_type_manager *man = |
||
607 | &bo->bdev->man[bo->mem.mem_type]; |
||
608 | |||
609 | if (!map->virtual) |
||
610 | return; |
||
611 | switch (map->bo_kmap_type) { |
||
612 | case ttm_bo_map_iomap: |
||
613 | iounmap(map->virtual); |
||
614 | break; |
||
615 | case ttm_bo_map_vmap: |
||
616 | vunmap(map->virtual); |
||
617 | break; |
||
618 | case ttm_bo_map_kmap: |
||
619 | kunmap(map->page); |
||
620 | break; |
||
621 | case ttm_bo_map_premapped: |
||
622 | break; |
||
623 | default: |
||
624 | BUG(); |
||
625 | } |
||
626 | (void) ttm_mem_io_lock(man, false); |
||
627 | ttm_mem_io_free(map->bo->bdev, &map->bo->mem); |
||
628 | ttm_mem_io_unlock(man); |
||
629 | map->virtual = NULL; |
||
630 | map->page = NULL; |
||
631 | } |
||
632 | EXPORT_SYMBOL(ttm_bo_kunmap); |
||
633 | |||
634 | int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, |
||
635 | void *sync_obj, |
||
636 | bool evict, |
||
637 | bool no_wait_gpu, |
||
638 | struct ttm_mem_reg *new_mem) |
||
639 | { |
||
640 | struct ttm_bo_device *bdev = bo->bdev; |
||
641 | struct ttm_bo_driver *driver = bdev->driver; |
||
642 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; |
||
643 | struct ttm_mem_reg *old_mem = &bo->mem; |
||
644 | int ret; |
||
645 | struct ttm_buffer_object *ghost_obj; |
||
646 | void *tmp_obj = NULL; |
||
647 | |||
648 | spin_lock(&bdev->fence_lock); |
||
649 | if (bo->sync_obj) { |
||
650 | tmp_obj = bo->sync_obj; |
||
651 | bo->sync_obj = NULL; |
||
652 | } |
||
653 | bo->sync_obj = driver->sync_obj_ref(sync_obj); |
||
654 | if (evict) { |
||
655 | ret = ttm_bo_wait(bo, false, false, false); |
||
656 | spin_unlock(&bdev->fence_lock); |
||
657 | if (tmp_obj) |
||
658 | driver->sync_obj_unref(&tmp_obj); |
||
659 | if (ret) |
||
660 | return ret; |
||
661 | |||
662 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && |
||
663 | (bo->ttm != NULL)) { |
||
664 | ttm_tt_unbind(bo->ttm); |
||
665 | ttm_tt_destroy(bo->ttm); |
||
666 | bo->ttm = NULL; |
||
667 | } |
||
668 | ttm_bo_free_old_node(bo); |
||
669 | } else { |
||
670 | /** |
||
671 | * This should help pipeline ordinary buffer moves. |
||
672 | * |
||
673 | * Hang old buffer memory on a new buffer object, |
||
674 | * and leave it to be released when the GPU |
||
675 | * operation has completed. |
||
676 | */ |
||
677 | |||
678 | set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); |
||
679 | spin_unlock(&bdev->fence_lock); |
||
680 | if (tmp_obj) |
||
681 | driver->sync_obj_unref(&tmp_obj); |
||
682 | |||
683 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); |
||
684 | if (ret) |
||
685 | return ret; |
||
686 | |||
687 | /** |
||
688 | * If we're not moving to fixed memory, the TTM object |
||
689 | * needs to stay alive. Otherwhise hang it on the ghost |
||
690 | * bo to be unbound and destroyed. |
||
691 | */ |
||
692 | |||
693 | if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) |
||
694 | ghost_obj->ttm = NULL; |
||
695 | else |
||
696 | bo->ttm = NULL; |
||
697 | |||
698 | ttm_bo_unreserve(ghost_obj); |
||
699 | ttm_bo_unref(&ghost_obj); |
||
700 | } |
||
701 | |||
702 | *old_mem = *new_mem; |
||
703 | new_mem->mm_node = NULL; |
||
704 | |||
705 | return 0; |
||
706 | } |
||
707 | EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);><>><>>>><>><>>><>><> |