Subversion Repositories Kolibri OS

Rev

Rev 4112 | Rev 5078 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4075 Serge 1
/**************************************************************************
2
 *
3
 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4
 * All Rights Reserved.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
13
 *
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
26
 **************************************************************************/
27
/*
28
 * Authors: Thomas Hellstrom 
29
 */
30
 
31
#include 
32
#include 
4112 Serge 33
#include 
4075 Serge 34
#include 
35
#include 
36
#include 
37
#include 
38
#include 
39
#include 
40
 
41
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
42
{
43
	ttm_bo_mem_put(bo, &bo->mem);
44
}
45
 
46
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
47
		    bool evict,
48
		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
49
{
50
	struct ttm_tt *ttm = bo->ttm;
51
	struct ttm_mem_reg *old_mem = &bo->mem;
52
	int ret;
53
 
54
	if (old_mem->mem_type != TTM_PL_SYSTEM) {
55
		ttm_tt_unbind(ttm);
56
		ttm_bo_free_old_node(bo);
57
		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
58
				TTM_PL_MASK_MEM);
59
		old_mem->mem_type = TTM_PL_SYSTEM;
60
	}
61
 
62
	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
63
	if (unlikely(ret != 0))
64
		return ret;
65
 
66
	if (new_mem->mem_type != TTM_PL_SYSTEM) {
67
		ret = ttm_tt_bind(ttm, new_mem);
68
		if (unlikely(ret != 0))
69
			return ret;
70
	}
71
 
72
	*old_mem = *new_mem;
73
	new_mem->mm_node = NULL;
74
 
75
	return 0;
76
}
77
EXPORT_SYMBOL(ttm_bo_move_ttm);
78
 
79
int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
80
{
81
	if (likely(man->io_reserve_fastpath))
82
		return 0;
83
 
84
	if (interruptible)
85
		return mutex_lock_interruptible(&man->io_reserve_mutex);
86
 
87
	mutex_lock(&man->io_reserve_mutex);
88
	return 0;
89
}
90
EXPORT_SYMBOL(ttm_mem_io_lock);
91
 
92
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
93
{
94
	if (likely(man->io_reserve_fastpath))
95
		return;
96
 
97
	mutex_unlock(&man->io_reserve_mutex);
98
}
99
EXPORT_SYMBOL(ttm_mem_io_unlock);
100
 
101
static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
102
{
103
	struct ttm_buffer_object *bo;
104
 
105
	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
106
		return -EAGAIN;
107
 
108
	bo = list_first_entry(&man->io_reserve_lru,
109
			      struct ttm_buffer_object,
110
			      io_reserve_lru);
111
	list_del_init(&bo->io_reserve_lru);
112
	ttm_bo_unmap_virtual_locked(bo);
113
 
114
	return 0;
115
}
116
 
117
 
118
int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
119
		       struct ttm_mem_reg *mem)
120
{
121
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
122
	int ret = 0;
123
 
124
	if (!bdev->driver->io_mem_reserve)
125
		return 0;
126
	if (likely(man->io_reserve_fastpath))
127
		return bdev->driver->io_mem_reserve(bdev, mem);
128
 
129
	if (bdev->driver->io_mem_reserve &&
130
	    mem->bus.io_reserved_count++ == 0) {
131
retry:
132
		ret = bdev->driver->io_mem_reserve(bdev, mem);
133
		if (ret == -EAGAIN) {
134
			ret = ttm_mem_io_evict(man);
135
			if (ret == 0)
136
				goto retry;
137
		}
138
	}
139
	return ret;
140
}
141
EXPORT_SYMBOL(ttm_mem_io_reserve);
142
 
143
void ttm_mem_io_free(struct ttm_bo_device *bdev,
144
		     struct ttm_mem_reg *mem)
145
{
146
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
147
 
148
	if (likely(man->io_reserve_fastpath))
149
		return;
150
 
151
	if (bdev->driver->io_mem_reserve &&
152
	    --mem->bus.io_reserved_count == 0 &&
153
	    bdev->driver->io_mem_free)
154
		bdev->driver->io_mem_free(bdev, mem);
155
 
156
}
157
EXPORT_SYMBOL(ttm_mem_io_free);
158
 
159
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
160
{
161
	struct ttm_mem_reg *mem = &bo->mem;
162
	int ret;
163
 
164
	if (!mem->bus.io_reserved_vm) {
165
		struct ttm_mem_type_manager *man =
166
			&bo->bdev->man[mem->mem_type];
167
 
168
		ret = ttm_mem_io_reserve(bo->bdev, mem);
169
		if (unlikely(ret != 0))
170
			return ret;
171
		mem->bus.io_reserved_vm = true;
172
		if (man->use_io_reserve_lru)
173
			list_add_tail(&bo->io_reserve_lru,
174
				      &man->io_reserve_lru);
175
	}
176
	return 0;
177
}
178
 
179
void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
180
{
181
	struct ttm_mem_reg *mem = &bo->mem;
182
 
183
	if (mem->bus.io_reserved_vm) {
184
		mem->bus.io_reserved_vm = false;
185
		list_del_init(&bo->io_reserve_lru);
186
		ttm_mem_io_free(bo->bdev, mem);
187
	}
188
}
189
 
4569 Serge 190
static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
4075 Serge 191
			void **virtual)
192
{
193
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
194
	int ret;
195
	void *addr;
196
 
197
	*virtual = NULL;
198
	(void) ttm_mem_io_lock(man, false);
199
	ret = ttm_mem_io_reserve(bdev, mem);
200
	ttm_mem_io_unlock(man);
201
	if (ret || !mem->bus.is_iomem)
202
		return ret;
203
 
204
	if (mem->bus.addr) {
205
		addr = mem->bus.addr;
206
	} else {
207
		if (mem->placement & TTM_PL_FLAG_WC)
208
			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
209
		else
210
			addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
211
		if (!addr) {
212
			(void) ttm_mem_io_lock(man, false);
213
			ttm_mem_io_free(bdev, mem);
214
			ttm_mem_io_unlock(man);
215
			return -ENOMEM;
216
		}
217
	}
218
	*virtual = addr;
219
	return 0;
220
}
221
 
4569 Serge 222
static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
4075 Serge 223
			 void *virtual)
224
{
225
	struct ttm_mem_type_manager *man;
226
 
227
	man = &bdev->man[mem->mem_type];
228
 
229
	if (virtual && mem->bus.addr == NULL)
230
		iounmap(virtual);
231
	(void) ttm_mem_io_lock(man, false);
232
	ttm_mem_io_free(bdev, mem);
233
	ttm_mem_io_unlock(man);
234
}
235
 
236
static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
237
{
238
	uint32_t *dstP =
239
	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
240
	uint32_t *srcP =
241
	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
242
 
243
	int i;
244
	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
245
		iowrite32(ioread32(srcP++), dstP++);
246
	return 0;
247
}
248
 
249
static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
250
				unsigned long page,
251
				pgprot_t prot)
252
{
253
	struct page *d = ttm->pages[page];
254
	void *dst;
255
 
256
	if (!d)
257
		return -ENOMEM;
258
 
259
	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
260
 
261
#ifdef CONFIG_X86
262
	dst = kmap_atomic_prot(d, prot);
263
#else
264
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
265
		dst = vmap(&d, 1, 0, prot);
266
	else
267
		dst = kmap(d);
268
#endif
269
	if (!dst)
270
		return -ENOMEM;
271
 
272
	memcpy_fromio(dst, src, PAGE_SIZE);
273
 
274
#ifdef CONFIG_X86
275
	kunmap_atomic(dst);
276
#else
277
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
278
		vunmap(dst);
279
	else
280
		kunmap(d);
281
#endif
282
 
283
	return 0;
284
}
285
 
286
static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
287
				unsigned long page,
288
				pgprot_t prot)
289
{
290
	struct page *s = ttm->pages[page];
291
	void *src;
292
 
293
	if (!s)
294
		return -ENOMEM;
295
 
296
	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
297
#ifdef CONFIG_X86
298
	src = kmap_atomic_prot(s, prot);
299
#else
300
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
301
		src = vmap(&s, 1, 0, prot);
302
	else
303
		src = kmap(s);
304
#endif
305
	if (!src)
306
		return -ENOMEM;
307
 
308
	memcpy_toio(dst, src, PAGE_SIZE);
309
 
310
#ifdef CONFIG_X86
311
	kunmap_atomic(src);
312
#else
313
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
314
		vunmap(src);
315
	else
316
		kunmap(s);
317
#endif
318
 
319
	return 0;
320
}
321
 
322
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
323
		       bool evict, bool no_wait_gpu,
324
		       struct ttm_mem_reg *new_mem)
325
{
326
	struct ttm_bo_device *bdev = bo->bdev;
327
	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
328
	struct ttm_tt *ttm = bo->ttm;
329
	struct ttm_mem_reg *old_mem = &bo->mem;
330
	struct ttm_mem_reg old_copy = *old_mem;
331
	void *old_iomap;
332
	void *new_iomap;
333
	int ret;
334
	unsigned long i;
335
	unsigned long page;
336
	unsigned long add = 0;
337
	int dir;
338
 
339
	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
340
	if (ret)
341
		return ret;
342
	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
343
	if (ret)
344
		goto out;
345
 
4569 Serge 346
	/*
347
	 * Single TTM move. NOP.
348
	 */
4075 Serge 349
	if (old_iomap == NULL && new_iomap == NULL)
350
		goto out2;
4569 Serge 351
 
352
	/*
353
	 * Don't move nonexistent data. Clear destination instead.
354
	 */
4075 Serge 355
	if (old_iomap == NULL && ttm == NULL)
356
		goto out2;
357
 
4569 Serge 358
	/*
359
	 * TTM might be null for moves within the same region.
360
	 */
361
	if (ttm && ttm->state == tt_unpopulated) {
4075 Serge 362
		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
4569 Serge 363
		if (ret)
4075 Serge 364
			goto out1;
365
		}
366
 
367
	add = 0;
368
	dir = 1;
369
 
370
	if ((old_mem->mem_type == new_mem->mem_type) &&
371
	    (new_mem->start < old_mem->start + old_mem->size)) {
372
		dir = -1;
373
		add = new_mem->num_pages - 1;
374
	}
375
 
376
	for (i = 0; i < new_mem->num_pages; ++i) {
377
		page = i * dir + add;
378
		if (old_iomap == NULL) {
379
			pgprot_t prot = ttm_io_prot(old_mem->placement,
380
						    PAGE_KERNEL);
381
			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
382
						   prot);
383
		} else if (new_iomap == NULL) {
384
			pgprot_t prot = ttm_io_prot(new_mem->placement,
385
						    PAGE_KERNEL);
386
			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
387
						   prot);
388
		} else
389
			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
4569 Serge 390
		if (ret)
4075 Serge 391
			goto out1;
392
		}
393
	mb();
394
out2:
395
	old_copy = *old_mem;
396
	*old_mem = *new_mem;
397
	new_mem->mm_node = NULL;
398
 
399
	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
400
		ttm_tt_unbind(ttm);
401
		ttm_tt_destroy(ttm);
402
		bo->ttm = NULL;
403
	}
404
 
405
out1:
406
	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
407
out:
408
	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
4569 Serge 409
 
410
	/*
411
	 * On error, keep the mm node!
412
	 */
413
	if (!ret)
4075 Serge 414
	ttm_bo_mem_put(bo, &old_copy);
415
	return ret;
416
}
417
EXPORT_SYMBOL(ttm_bo_move_memcpy);
418
 
419
static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
420
{
421
	kfree(bo);
422
}
423
 
424
/**
425
 * ttm_buffer_object_transfer
426
 *
427
 * @bo: A pointer to a struct ttm_buffer_object.
428
 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
429
 * holding the data of @bo with the old placement.
430
 *
431
 * This is a utility function that may be called after an accelerated move
432
 * has been scheduled. A new buffer object is created as a placeholder for
433
 * the old data while it's being copied. When that buffer object is idle,
434
 * it can be destroyed, releasing the space of the old placement.
435
 * Returns:
436
 * !0: Failure.
437
 */
438
 
439
static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
440
				      struct ttm_buffer_object **new_obj)
441
{
442
	struct ttm_buffer_object *fbo;
443
	struct ttm_bo_device *bdev = bo->bdev;
444
	struct ttm_bo_driver *driver = bdev->driver;
445
	int ret;
446
 
447
	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
448
	if (!fbo)
449
		return -ENOMEM;
450
 
451
	*fbo = *bo;
452
 
453
	/**
454
	 * Fix up members that we shouldn't copy directly:
455
	 * TODO: Explicit member copy would probably be better here.
456
	 */
457
 
458
	INIT_LIST_HEAD(&fbo->ddestroy);
459
	INIT_LIST_HEAD(&fbo->lru);
460
	INIT_LIST_HEAD(&fbo->swap);
461
	INIT_LIST_HEAD(&fbo->io_reserve_lru);
4112 Serge 462
	drm_vma_node_reset(&fbo->vma_node);
4075 Serge 463
	atomic_set(&fbo->cpu_writers, 0);
464
 
465
	spin_lock(&bdev->fence_lock);
466
	if (bo->sync_obj)
467
		fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
468
	else
469
		fbo->sync_obj = NULL;
470
	spin_unlock(&bdev->fence_lock);
471
	kref_init(&fbo->list_kref);
472
	kref_init(&fbo->kref);
473
	fbo->destroy = &ttm_transfered_destroy;
474
	fbo->acc_size = 0;
475
	fbo->resv = &fbo->ttm_resv;
476
	reservation_object_init(fbo->resv);
477
	ret = ww_mutex_trylock(&fbo->resv->lock);
478
	WARN_ON(!ret);
479
 
480
	*new_obj = fbo;
481
	return 0;
482
}
483
 
484
pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
485
{
486
#if defined(__i386__) || defined(__x86_64__)
487
	if (caching_flags & TTM_PL_FLAG_WC)
488
		tmp = pgprot_writecombine(tmp);
489
	else if (boot_cpu_data.x86 > 3)
490
		tmp = pgprot_noncached(tmp);
491
 
492
#elif defined(__powerpc__)
493
	if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
494
		pgprot_val(tmp) |= _PAGE_NO_CACHE;
495
		if (caching_flags & TTM_PL_FLAG_UNCACHED)
496
			pgprot_val(tmp) |= _PAGE_GUARDED;
497
	}
498
#endif
499
#if defined(__ia64__)
500
	if (caching_flags & TTM_PL_FLAG_WC)
501
		tmp = pgprot_writecombine(tmp);
502
	else
503
		tmp = pgprot_noncached(tmp);
504
#endif
505
#if defined(__sparc__) || defined(__mips__)
506
	if (!(caching_flags & TTM_PL_FLAG_CACHED))
507
		tmp = pgprot_noncached(tmp);
508
#endif
509
	return tmp;
510
}
511
EXPORT_SYMBOL(ttm_io_prot);
512
 
513
static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
514
			  unsigned long offset,
515
			  unsigned long size,
516
			  struct ttm_bo_kmap_obj *map)
517
{
518
	struct ttm_mem_reg *mem = &bo->mem;
519
 
520
	if (bo->mem.bus.addr) {
521
		map->bo_kmap_type = ttm_bo_map_premapped;
522
		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
523
	} else {
524
		map->bo_kmap_type = ttm_bo_map_iomap;
525
		if (mem->placement & TTM_PL_FLAG_WC)
526
			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
527
						  size);
528
		else
529
			map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
530
						       size);
531
	}
532
	return (!map->virtual) ? -ENOMEM : 0;
533
}
534
 
535
static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
536
			   unsigned long start_page,
537
			   unsigned long num_pages,
538
			   struct ttm_bo_kmap_obj *map)
539
{
540
	struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
541
	struct ttm_tt *ttm = bo->ttm;
542
	int ret;
543
 
544
	BUG_ON(!ttm);
545
 
546
	if (ttm->state == tt_unpopulated) {
547
		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
548
		if (ret)
549
			return ret;
550
	}
551
 
552
	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
553
		/*
554
		 * We're mapping a single page, and the desired
555
		 * page protection is consistent with the bo.
556
		 */
557
 
558
		map->bo_kmap_type = ttm_bo_map_kmap;
559
		map->page = ttm->pages[start_page];
560
		map->virtual = kmap(map->page);
561
	} else {
562
		/*
563
		 * We need to use vmap to get the desired page protection
564
		 * or to make the buffer object look contiguous.
565
		 */
566
		prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
567
			PAGE_KERNEL :
568
			ttm_io_prot(mem->placement, PAGE_KERNEL);
569
		map->bo_kmap_type = ttm_bo_map_vmap;
570
		map->virtual = vmap(ttm->pages + start_page, num_pages,
571
				    0, prot);
572
	}
573
	return (!map->virtual) ? -ENOMEM : 0;
574
}
575
 
576
int ttm_bo_kmap(struct ttm_buffer_object *bo,
577
		unsigned long start_page, unsigned long num_pages,
578
		struct ttm_bo_kmap_obj *map)
579
{
580
	struct ttm_mem_type_manager *man =
581
		&bo->bdev->man[bo->mem.mem_type];
582
	unsigned long offset, size;
583
	int ret;
584
 
585
	BUG_ON(!list_empty(&bo->swap));
586
	map->virtual = NULL;
587
	map->bo = bo;
588
	if (num_pages > bo->num_pages)
589
		return -EINVAL;
590
	if (start_page > bo->num_pages)
591
		return -EINVAL;
592
#if 0
4569 Serge 593
	if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
4075 Serge 594
		return -EPERM;
595
#endif
596
	(void) ttm_mem_io_lock(man, false);
597
	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
598
	ttm_mem_io_unlock(man);
599
	if (ret)
600
		return ret;
601
	if (!bo->mem.bus.is_iomem) {
602
		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
603
	} else {
604
		offset = start_page << PAGE_SHIFT;
605
		size = num_pages << PAGE_SHIFT;
606
		return ttm_bo_ioremap(bo, offset, size, map);
607
	}
608
}
609
EXPORT_SYMBOL(ttm_bo_kmap);
610
 
611
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
612
{
613
	struct ttm_buffer_object *bo = map->bo;
614
	struct ttm_mem_type_manager *man =
615
		&bo->bdev->man[bo->mem.mem_type];
616
 
617
	if (!map->virtual)
618
		return;
619
	switch (map->bo_kmap_type) {
620
	case ttm_bo_map_iomap:
621
		iounmap(map->virtual);
622
		break;
623
	case ttm_bo_map_vmap:
624
		vunmap(map->virtual);
625
		break;
626
	case ttm_bo_map_kmap:
627
		kunmap(map->page);
628
		break;
629
	case ttm_bo_map_premapped:
630
		break;
631
	default:
632
		BUG();
633
	}
634
	(void) ttm_mem_io_lock(man, false);
635
	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
636
	ttm_mem_io_unlock(man);
637
	map->virtual = NULL;
638
	map->page = NULL;
639
}
640
EXPORT_SYMBOL(ttm_bo_kunmap);
641
 
642
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
643
			      void *sync_obj,
644
			      bool evict,
645
			      bool no_wait_gpu,
646
			      struct ttm_mem_reg *new_mem)
647
{
648
	struct ttm_bo_device *bdev = bo->bdev;
649
	struct ttm_bo_driver *driver = bdev->driver;
650
	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
651
	struct ttm_mem_reg *old_mem = &bo->mem;
652
	int ret;
653
	struct ttm_buffer_object *ghost_obj;
654
	void *tmp_obj = NULL;
655
 
656
	spin_lock(&bdev->fence_lock);
657
	if (bo->sync_obj) {
658
		tmp_obj = bo->sync_obj;
659
		bo->sync_obj = NULL;
660
	}
661
	bo->sync_obj = driver->sync_obj_ref(sync_obj);
662
	if (evict) {
663
		ret = ttm_bo_wait(bo, false, false, false);
664
		spin_unlock(&bdev->fence_lock);
665
		if (tmp_obj)
666
			driver->sync_obj_unref(&tmp_obj);
667
		if (ret)
668
			return ret;
669
 
670
		if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
671
		    (bo->ttm != NULL)) {
672
			ttm_tt_unbind(bo->ttm);
673
			ttm_tt_destroy(bo->ttm);
674
			bo->ttm = NULL;
675
		}
676
		ttm_bo_free_old_node(bo);
677
	} else {
678
		/**
679
		 * This should help pipeline ordinary buffer moves.
680
		 *
681
		 * Hang old buffer memory on a new buffer object,
682
		 * and leave it to be released when the GPU
683
		 * operation has completed.
684
		 */
685
 
686
		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
687
		spin_unlock(&bdev->fence_lock);
688
		if (tmp_obj)
689
			driver->sync_obj_unref(&tmp_obj);
690
 
691
		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
692
		if (ret)
693
			return ret;
694
 
695
		/**
696
		 * If we're not moving to fixed memory, the TTM object
697
		 * needs to stay alive. Otherwhise hang it on the ghost
698
		 * bo to be unbound and destroyed.
699
		 */
700
 
701
		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
702
			ghost_obj->ttm = NULL;
703
		else
704
			bo->ttm = NULL;
705
 
706
		ttm_bo_unreserve(ghost_obj);
707
		ttm_bo_unref(&ghost_obj);
708
	}
709
 
710
	*old_mem = *new_mem;
711
	new_mem->mm_node = NULL;
712
 
713
	return 0;
714
}
715
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);