Subversion Repositories Kolibri OS

Rev

Rev 6296 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4075 Serge 1
/**************************************************************************
2
 *
3
 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4
 * All Rights Reserved.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
13
 *
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
26
 **************************************************************************/
27
/*
28
 * Authors: Thomas Hellstrom 
29
 */
30
 
31
#include 
32
#include 
4112 Serge 33
#include 
6296 serge 34
#include 
5078 serge 35
//#include 
4075 Serge 36
#include 
37
#include 
6296 serge 38
#include 
4075 Serge 39
#include 
40
 
5078 serge 41
#define __pgprot(x)     ((pgprot_t) { (x) } )
42
 
43
void *vmap(struct page **pages, unsigned int count,
44
           unsigned long flags, pgprot_t prot);
45
 
4075 Serge 46
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
47
{
48
	ttm_bo_mem_put(bo, &bo->mem);
49
}
50
 
51
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
52
		    bool evict,
53
		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
54
{
55
	struct ttm_tt *ttm = bo->ttm;
56
	struct ttm_mem_reg *old_mem = &bo->mem;
57
	int ret;
58
 
59
	if (old_mem->mem_type != TTM_PL_SYSTEM) {
60
		ttm_tt_unbind(ttm);
61
		ttm_bo_free_old_node(bo);
62
		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
63
				TTM_PL_MASK_MEM);
64
		old_mem->mem_type = TTM_PL_SYSTEM;
65
	}
66
 
67
	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
68
	if (unlikely(ret != 0))
69
		return ret;
70
 
71
	if (new_mem->mem_type != TTM_PL_SYSTEM) {
72
		ret = ttm_tt_bind(ttm, new_mem);
73
		if (unlikely(ret != 0))
74
			return ret;
75
	}
76
 
77
	*old_mem = *new_mem;
78
	new_mem->mm_node = NULL;
79
 
80
	return 0;
81
}
82
EXPORT_SYMBOL(ttm_bo_move_ttm);
83
 
84
int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
85
{
86
	if (likely(man->io_reserve_fastpath))
87
		return 0;
88
 
89
	if (interruptible)
90
		return mutex_lock_interruptible(&man->io_reserve_mutex);
91
 
92
	mutex_lock(&man->io_reserve_mutex);
93
	return 0;
94
}
95
EXPORT_SYMBOL(ttm_mem_io_lock);
96
 
97
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
98
{
99
	if (likely(man->io_reserve_fastpath))
100
		return;
101
 
102
	mutex_unlock(&man->io_reserve_mutex);
103
}
104
EXPORT_SYMBOL(ttm_mem_io_unlock);
105
 
106
static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
107
{
108
	struct ttm_buffer_object *bo;
109
 
110
	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
111
		return -EAGAIN;
112
 
113
	bo = list_first_entry(&man->io_reserve_lru,
114
			      struct ttm_buffer_object,
115
			      io_reserve_lru);
116
	list_del_init(&bo->io_reserve_lru);
117
	ttm_bo_unmap_virtual_locked(bo);
118
 
119
	return 0;
120
}
121
 
122
 
123
int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
124
		       struct ttm_mem_reg *mem)
125
{
126
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
127
	int ret = 0;
128
 
129
	if (!bdev->driver->io_mem_reserve)
130
		return 0;
131
	if (likely(man->io_reserve_fastpath))
132
		return bdev->driver->io_mem_reserve(bdev, mem);
133
 
134
	if (bdev->driver->io_mem_reserve &&
135
	    mem->bus.io_reserved_count++ == 0) {
136
retry:
137
		ret = bdev->driver->io_mem_reserve(bdev, mem);
138
		if (ret == -EAGAIN) {
139
			ret = ttm_mem_io_evict(man);
140
			if (ret == 0)
141
				goto retry;
142
		}
143
	}
144
	return ret;
145
}
146
EXPORT_SYMBOL(ttm_mem_io_reserve);
147
 
148
void ttm_mem_io_free(struct ttm_bo_device *bdev,
149
		     struct ttm_mem_reg *mem)
150
{
151
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
152
 
153
	if (likely(man->io_reserve_fastpath))
154
		return;
155
 
156
	if (bdev->driver->io_mem_reserve &&
157
	    --mem->bus.io_reserved_count == 0 &&
158
	    bdev->driver->io_mem_free)
159
		bdev->driver->io_mem_free(bdev, mem);
160
 
161
}
162
EXPORT_SYMBOL(ttm_mem_io_free);
163
 
164
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
165
{
166
	struct ttm_mem_reg *mem = &bo->mem;
167
	int ret;
168
 
169
	if (!mem->bus.io_reserved_vm) {
170
		struct ttm_mem_type_manager *man =
171
			&bo->bdev->man[mem->mem_type];
172
 
173
		ret = ttm_mem_io_reserve(bo->bdev, mem);
174
		if (unlikely(ret != 0))
175
			return ret;
176
		mem->bus.io_reserved_vm = true;
177
		if (man->use_io_reserve_lru)
178
			list_add_tail(&bo->io_reserve_lru,
179
				      &man->io_reserve_lru);
180
	}
181
	return 0;
182
}
183
 
184
void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
185
{
186
	struct ttm_mem_reg *mem = &bo->mem;
187
 
188
	if (mem->bus.io_reserved_vm) {
189
		mem->bus.io_reserved_vm = false;
190
		list_del_init(&bo->io_reserve_lru);
191
		ttm_mem_io_free(bo->bdev, mem);
192
	}
193
}
194
 
4569 Serge 195
static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
4075 Serge 196
			void **virtual)
197
{
198
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
199
	int ret;
200
	void *addr;
201
 
202
	*virtual = NULL;
203
	(void) ttm_mem_io_lock(man, false);
204
	ret = ttm_mem_io_reserve(bdev, mem);
205
	ttm_mem_io_unlock(man);
206
	if (ret || !mem->bus.is_iomem)
207
		return ret;
208
 
209
	if (mem->bus.addr) {
210
		addr = mem->bus.addr;
211
	} else {
212
		if (mem->placement & TTM_PL_FLAG_WC)
213
			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
214
		else
6296 serge 215
			addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
4075 Serge 216
		if (!addr) {
217
			(void) ttm_mem_io_lock(man, false);
218
			ttm_mem_io_free(bdev, mem);
219
			ttm_mem_io_unlock(man);
220
			return -ENOMEM;
221
		}
222
	}
223
	*virtual = addr;
224
	return 0;
225
}
226
 
4569 Serge 227
static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
4075 Serge 228
			 void *virtual)
229
{
230
	struct ttm_mem_type_manager *man;
231
 
232
	man = &bdev->man[mem->mem_type];
233
 
234
	if (virtual && mem->bus.addr == NULL)
235
		iounmap(virtual);
236
	(void) ttm_mem_io_lock(man, false);
237
	ttm_mem_io_free(bdev, mem);
238
	ttm_mem_io_unlock(man);
239
}
240
 
241
static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
242
{
243
	uint32_t *dstP =
244
	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
245
	uint32_t *srcP =
246
	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
247
 
248
	int i;
249
	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
250
		iowrite32(ioread32(srcP++), dstP++);
251
	return 0;
252
}
253
 
254
static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
255
				unsigned long page,
256
				pgprot_t prot)
257
{
258
	struct page *d = ttm->pages[page];
259
	void *dst;
260
 
261
	if (!d)
262
		return -ENOMEM;
263
 
264
	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
265
 
5078 serge 266
    dst = (void*)MapIoMem((addr_t)d, 4096, PG_SW);
267
 
4075 Serge 268
	if (!dst)
269
		return -ENOMEM;
270
 
5078 serge 271
    memcpy(dst, src, PAGE_SIZE);
4075 Serge 272
 
5078 serge 273
    FreeKernelSpace(dst);
4075 Serge 274
 
275
	return 0;
276
}
277
 
278
static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
279
				unsigned long page,
280
				pgprot_t prot)
281
{
282
	struct page *s = ttm->pages[page];
283
	void *src;
284
 
285
	if (!s)
286
		return -ENOMEM;
287
 
288
	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
5078 serge 289
 
290
    src = (void*)MapIoMem((addr_t)s, 4096, PG_SW);
291
 
4075 Serge 292
	if (!src)
293
		return -ENOMEM;
294
 
5078 serge 295
    memcpy(dst, src, PAGE_SIZE);
4075 Serge 296
 
5078 serge 297
    FreeKernelSpace(src);
4075 Serge 298
 
299
	return 0;
300
}
301
 
302
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
303
		       bool evict, bool no_wait_gpu,
304
		       struct ttm_mem_reg *new_mem)
305
{
306
	struct ttm_bo_device *bdev = bo->bdev;
307
	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
308
	struct ttm_tt *ttm = bo->ttm;
309
	struct ttm_mem_reg *old_mem = &bo->mem;
310
	struct ttm_mem_reg old_copy = *old_mem;
311
	void *old_iomap;
312
	void *new_iomap;
313
	int ret;
314
	unsigned long i;
315
	unsigned long page;
316
	unsigned long add = 0;
317
	int dir;
318
 
319
	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
320
	if (ret)
321
		return ret;
322
	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
323
	if (ret)
324
		goto out;
325
 
4569 Serge 326
	/*
327
	 * Single TTM move. NOP.
328
	 */
4075 Serge 329
	if (old_iomap == NULL && new_iomap == NULL)
330
		goto out2;
4569 Serge 331
 
332
	/*
333
	 * Don't move nonexistent data. Clear destination instead.
334
	 */
5078 serge 335
	if (old_iomap == NULL &&
336
	    (ttm == NULL || (ttm->state == tt_unpopulated &&
337
			     !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
338
        memset(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
4075 Serge 339
		goto out2;
5078 serge 340
	}
4075 Serge 341
 
4569 Serge 342
	/*
343
	 * TTM might be null for moves within the same region.
344
	 */
345
	if (ttm && ttm->state == tt_unpopulated) {
4075 Serge 346
		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
4569 Serge 347
		if (ret)
4075 Serge 348
			goto out1;
5271 serge 349
	}
4075 Serge 350
 
351
	add = 0;
352
	dir = 1;
353
 
354
	if ((old_mem->mem_type == new_mem->mem_type) &&
355
	    (new_mem->start < old_mem->start + old_mem->size)) {
356
		dir = -1;
357
		add = new_mem->num_pages - 1;
358
	}
359
 
360
	for (i = 0; i < new_mem->num_pages; ++i) {
361
		page = i * dir + add;
362
		if (old_iomap == NULL) {
363
			pgprot_t prot = ttm_io_prot(old_mem->placement,
364
						    PAGE_KERNEL);
365
			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
366
						   prot);
367
		} else if (new_iomap == NULL) {
368
			pgprot_t prot = ttm_io_prot(new_mem->placement,
369
						    PAGE_KERNEL);
370
			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
371
						   prot);
372
		} else
373
			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
4569 Serge 374
		if (ret)
4075 Serge 375
			goto out1;
5271 serge 376
	}
4075 Serge 377
	mb();
378
out2:
379
	old_copy = *old_mem;
380
	*old_mem = *new_mem;
381
	new_mem->mm_node = NULL;
382
 
383
	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
384
		ttm_tt_unbind(ttm);
385
		ttm_tt_destroy(ttm);
386
		bo->ttm = NULL;
387
	}
388
 
389
out1:
390
	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
391
out:
392
	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
4569 Serge 393
 
394
	/*
395
	 * On error, keep the mm node!
396
	 */
397
	if (!ret)
5271 serge 398
		ttm_bo_mem_put(bo, &old_copy);
4075 Serge 399
	return ret;
400
}
401
EXPORT_SYMBOL(ttm_bo_move_memcpy);
402
 
403
static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
404
{
405
	kfree(bo);
406
}
407
 
408
/**
409
 * ttm_buffer_object_transfer
410
 *
411
 * @bo: A pointer to a struct ttm_buffer_object.
412
 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
413
 * holding the data of @bo with the old placement.
414
 *
415
 * This is a utility function that may be called after an accelerated move
416
 * has been scheduled. A new buffer object is created as a placeholder for
417
 * the old data while it's being copied. When that buffer object is idle,
418
 * it can be destroyed, releasing the space of the old placement.
419
 * Returns:
420
 * !0: Failure.
421
 */
422
 
423
static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
424
				      struct ttm_buffer_object **new_obj)
425
{
426
	struct ttm_buffer_object *fbo;
427
	int ret;
428
 
429
	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
430
	if (!fbo)
431
		return -ENOMEM;
432
 
433
	*fbo = *bo;
434
 
435
	/**
436
	 * Fix up members that we shouldn't copy directly:
437
	 * TODO: Explicit member copy would probably be better here.
438
	 */
439
 
440
	INIT_LIST_HEAD(&fbo->ddestroy);
441
	INIT_LIST_HEAD(&fbo->lru);
442
	INIT_LIST_HEAD(&fbo->swap);
443
	INIT_LIST_HEAD(&fbo->io_reserve_lru);
4112 Serge 444
	drm_vma_node_reset(&fbo->vma_node);
4075 Serge 445
	atomic_set(&fbo->cpu_writers, 0);
446
 
447
	kref_init(&fbo->list_kref);
448
	kref_init(&fbo->kref);
449
	fbo->destroy = &ttm_transfered_destroy;
450
	fbo->acc_size = 0;
451
	fbo->resv = &fbo->ttm_resv;
452
	reservation_object_init(fbo->resv);
453
	ret = ww_mutex_trylock(&fbo->resv->lock);
454
	WARN_ON(!ret);
455
 
456
	*new_obj = fbo;
457
	return 0;
458
}
459
 
460
pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
461
{
6938 serge 462
	/* Cached mappings need no adjustment */
463
	if (caching_flags & TTM_PL_FLAG_CACHED)
464
		return tmp;
4075 Serge 465
	return tmp;
466
}
467
EXPORT_SYMBOL(ttm_io_prot);
468
 
469
static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
470
			  unsigned long offset,
471
			  unsigned long size,
472
			  struct ttm_bo_kmap_obj *map)
473
{
474
	struct ttm_mem_reg *mem = &bo->mem;
475
 
476
	if (bo->mem.bus.addr) {
477
		map->bo_kmap_type = ttm_bo_map_premapped;
478
		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
479
	} else {
480
		map->bo_kmap_type = ttm_bo_map_iomap;
481
		if (mem->placement & TTM_PL_FLAG_WC)
482
			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
483
						  size);
484
		else
6296 serge 485
			map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
4075 Serge 486
						       size);
487
	}
488
	return (!map->virtual) ? -ENOMEM : 0;
489
}
490
 
491
static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
492
			   unsigned long start_page,
493
			   unsigned long num_pages,
494
			   struct ttm_bo_kmap_obj *map)
495
{
496
	struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
497
	struct ttm_tt *ttm = bo->ttm;
498
	int ret;
499
 
500
	BUG_ON(!ttm);
501
 
502
	if (ttm->state == tt_unpopulated) {
503
		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
504
		if (ret)
505
			return ret;
506
	}
507
 
508
	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
509
		/*
510
		 * We're mapping a single page, and the desired
511
		 * page protection is consistent with the bo.
512
		 */
513
 
514
		map->bo_kmap_type = ttm_bo_map_kmap;
515
		map->page = ttm->pages[start_page];
6296 serge 516
		map->virtual = kmap(map->page);
4075 Serge 517
	} else {
518
		/*
519
		 * We need to use vmap to get the desired page protection
520
		 * or to make the buffer object look contiguous.
521
		 */
6296 serge 522
		prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
4075 Serge 523
		map->bo_kmap_type = ttm_bo_map_vmap;
524
		map->virtual = vmap(ttm->pages + start_page, num_pages,
525
				    0, prot);
526
	}
527
	return (!map->virtual) ? -ENOMEM : 0;
528
}
529
 
530
int ttm_bo_kmap(struct ttm_buffer_object *bo,
531
		unsigned long start_page, unsigned long num_pages,
532
		struct ttm_bo_kmap_obj *map)
533
{
534
	struct ttm_mem_type_manager *man =
535
		&bo->bdev->man[bo->mem.mem_type];
536
	unsigned long offset, size;
537
	int ret;
538
 
539
	BUG_ON(!list_empty(&bo->swap));
540
	map->virtual = NULL;
541
	map->bo = bo;
542
	if (num_pages > bo->num_pages)
543
		return -EINVAL;
544
	if (start_page > bo->num_pages)
545
		return -EINVAL;
546
#if 0
4569 Serge 547
	if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
4075 Serge 548
		return -EPERM;
549
#endif
550
	(void) ttm_mem_io_lock(man, false);
551
	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
552
	ttm_mem_io_unlock(man);
553
	if (ret)
554
		return ret;
555
	if (!bo->mem.bus.is_iomem) {
556
		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
557
	} else {
558
		offset = start_page << PAGE_SHIFT;
559
		size = num_pages << PAGE_SHIFT;
560
		return ttm_bo_ioremap(bo, offset, size, map);
561
	}
562
}
563
EXPORT_SYMBOL(ttm_bo_kmap);
564
 
565
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
566
{
567
	struct ttm_buffer_object *bo = map->bo;
568
	struct ttm_mem_type_manager *man =
569
		&bo->bdev->man[bo->mem.mem_type];
570
 
571
	if (!map->virtual)
572
		return;
573
	switch (map->bo_kmap_type) {
574
	case ttm_bo_map_iomap:
575
		iounmap(map->virtual);
576
		break;
577
	case ttm_bo_map_vmap:
6296 serge 578
		break;
4075 Serge 579
	case ttm_bo_map_kmap:
6296 serge 580
		kunmap(map->page);
4075 Serge 581
		break;
582
	case ttm_bo_map_premapped:
583
		break;
584
	default:
585
		BUG();
586
	}
587
	(void) ttm_mem_io_lock(man, false);
588
	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
589
	ttm_mem_io_unlock(man);
590
	map->virtual = NULL;
591
	map->page = NULL;
592
}
593
EXPORT_SYMBOL(ttm_bo_kunmap);
594
 
595
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
5271 serge 596
			      struct fence *fence,
4075 Serge 597
			      bool evict,
598
			      bool no_wait_gpu,
599
			      struct ttm_mem_reg *new_mem)
600
{
601
	struct ttm_bo_device *bdev = bo->bdev;
602
	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
603
	struct ttm_mem_reg *old_mem = &bo->mem;
604
	int ret;
605
	struct ttm_buffer_object *ghost_obj;
606
 
5271 serge 607
	reservation_object_add_excl_fence(bo->resv, fence);
4075 Serge 608
	if (evict) {
609
		ret = ttm_bo_wait(bo, false, false, false);
610
		if (ret)
611
			return ret;
612
 
613
		if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
614
		    (bo->ttm != NULL)) {
615
			ttm_tt_unbind(bo->ttm);
616
			ttm_tt_destroy(bo->ttm);
617
			bo->ttm = NULL;
618
		}
619
		ttm_bo_free_old_node(bo);
620
	} else {
621
		/**
622
		 * This should help pipeline ordinary buffer moves.
623
		 *
624
		 * Hang old buffer memory on a new buffer object,
625
		 * and leave it to be released when the GPU
626
		 * operation has completed.
627
		 */
628
 
629
		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
630
 
631
		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
632
		if (ret)
633
			return ret;
634
 
5271 serge 635
		reservation_object_add_excl_fence(ghost_obj->resv, fence);
636
 
4075 Serge 637
		/**
638
		 * If we're not moving to fixed memory, the TTM object
639
		 * needs to stay alive. Otherwhise hang it on the ghost
640
		 * bo to be unbound and destroyed.
641
		 */
642
 
643
		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
644
			ghost_obj->ttm = NULL;
645
		else
646
			bo->ttm = NULL;
647
 
648
		ttm_bo_unreserve(ghost_obj);
649
		ttm_bo_unref(&ghost_obj);
650
	}
651
 
652
	*old_mem = *new_mem;
653
	new_mem->mm_node = NULL;
654
 
655
	return 0;
656
}
657
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
5078 serge 658
 
659
 
660
void *vmap(struct page **pages, unsigned int count,
661
           unsigned long flags, pgprot_t prot)
662
{
663
    void *vaddr;
664
    char *tmp;
665
    int i;
666
 
667
    vaddr = AllocKernelSpace(count << 12);
668
    if(vaddr == NULL)
669
        return NULL;
670
 
671
    for(i = 0, tmp = vaddr; i < count; i++)
672
    {
673
        MapPage(tmp, page_to_phys(pages[i]), PG_SW);
674
        tmp+= 4096;
675
    };
676
 
677
    return vaddr;
678
};
679