Subversion Repositories Kolibri OS

Rev

Rev 5078 | Rev 6296 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4075 Serge 1
/**************************************************************************
2
 *
3
 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4
 * All Rights Reserved.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
13
 *
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
26
 **************************************************************************/
27
/*
28
 * Authors: Thomas Hellstrom 
29
 */
5078 serge 30
#define iowrite32(v, addr)      writel((v), (addr))
31
#define ioread32(addr)          readl(addr)
4075 Serge 32
 
33
#include 
34
#include 
4112 Serge 35
#include 
5078 serge 36
//#include 
37
//#include 
4075 Serge 38
#include 
39
#include 
5078 serge 40
//#include 
4075 Serge 41
#include 
42
 
5078 serge 43
#define __pgprot(x)     ((pgprot_t) { (x) } )
44
 
45
void *vmap(struct page **pages, unsigned int count,
46
           unsigned long flags, pgprot_t prot);
47
 
4075 Serge 48
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
49
{
50
	ttm_bo_mem_put(bo, &bo->mem);
51
}
52
 
53
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
54
		    bool evict,
55
		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
56
{
57
	struct ttm_tt *ttm = bo->ttm;
58
	struct ttm_mem_reg *old_mem = &bo->mem;
59
	int ret;
60
 
61
	if (old_mem->mem_type != TTM_PL_SYSTEM) {
62
		ttm_tt_unbind(ttm);
63
		ttm_bo_free_old_node(bo);
64
		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
65
				TTM_PL_MASK_MEM);
66
		old_mem->mem_type = TTM_PL_SYSTEM;
67
	}
68
 
69
	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
70
	if (unlikely(ret != 0))
71
		return ret;
72
 
73
	if (new_mem->mem_type != TTM_PL_SYSTEM) {
74
		ret = ttm_tt_bind(ttm, new_mem);
75
		if (unlikely(ret != 0))
76
			return ret;
77
	}
78
 
79
	*old_mem = *new_mem;
80
	new_mem->mm_node = NULL;
81
 
82
	return 0;
83
}
84
EXPORT_SYMBOL(ttm_bo_move_ttm);
85
 
86
int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
87
{
88
	if (likely(man->io_reserve_fastpath))
89
		return 0;
90
 
91
	if (interruptible)
92
		return mutex_lock_interruptible(&man->io_reserve_mutex);
93
 
94
	mutex_lock(&man->io_reserve_mutex);
95
	return 0;
96
}
97
EXPORT_SYMBOL(ttm_mem_io_lock);
98
 
99
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
100
{
101
	if (likely(man->io_reserve_fastpath))
102
		return;
103
 
104
	mutex_unlock(&man->io_reserve_mutex);
105
}
106
EXPORT_SYMBOL(ttm_mem_io_unlock);
107
 
108
static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
109
{
110
	struct ttm_buffer_object *bo;
111
 
112
	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
113
		return -EAGAIN;
114
 
115
	bo = list_first_entry(&man->io_reserve_lru,
116
			      struct ttm_buffer_object,
117
			      io_reserve_lru);
118
	list_del_init(&bo->io_reserve_lru);
119
	ttm_bo_unmap_virtual_locked(bo);
120
 
121
	return 0;
122
}
123
 
124
 
125
int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
126
		       struct ttm_mem_reg *mem)
127
{
128
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
129
	int ret = 0;
130
 
131
	if (!bdev->driver->io_mem_reserve)
132
		return 0;
133
	if (likely(man->io_reserve_fastpath))
134
		return bdev->driver->io_mem_reserve(bdev, mem);
135
 
136
	if (bdev->driver->io_mem_reserve &&
137
	    mem->bus.io_reserved_count++ == 0) {
138
retry:
139
		ret = bdev->driver->io_mem_reserve(bdev, mem);
140
		if (ret == -EAGAIN) {
141
			ret = ttm_mem_io_evict(man);
142
			if (ret == 0)
143
				goto retry;
144
		}
145
	}
146
	return ret;
147
}
148
EXPORT_SYMBOL(ttm_mem_io_reserve);
149
 
150
void ttm_mem_io_free(struct ttm_bo_device *bdev,
151
		     struct ttm_mem_reg *mem)
152
{
153
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
154
 
155
	if (likely(man->io_reserve_fastpath))
156
		return;
157
 
158
	if (bdev->driver->io_mem_reserve &&
159
	    --mem->bus.io_reserved_count == 0 &&
160
	    bdev->driver->io_mem_free)
161
		bdev->driver->io_mem_free(bdev, mem);
162
 
163
}
164
EXPORT_SYMBOL(ttm_mem_io_free);
165
 
5078 serge 166
#if 0
4075 Serge 167
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
168
{
169
	struct ttm_mem_reg *mem = &bo->mem;
170
	int ret;
171
 
172
	if (!mem->bus.io_reserved_vm) {
173
		struct ttm_mem_type_manager *man =
174
			&bo->bdev->man[mem->mem_type];
175
 
176
		ret = ttm_mem_io_reserve(bo->bdev, mem);
177
		if (unlikely(ret != 0))
178
			return ret;
179
		mem->bus.io_reserved_vm = true;
180
		if (man->use_io_reserve_lru)
181
			list_add_tail(&bo->io_reserve_lru,
182
				      &man->io_reserve_lru);
183
	}
184
	return 0;
185
}
5078 serge 186
#endif
4075 Serge 187
 
188
void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
189
{
190
	struct ttm_mem_reg *mem = &bo->mem;
191
 
192
	if (mem->bus.io_reserved_vm) {
193
		mem->bus.io_reserved_vm = false;
194
		list_del_init(&bo->io_reserve_lru);
195
		ttm_mem_io_free(bo->bdev, mem);
196
	}
197
}
198
 
4569 Serge 199
static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
4075 Serge 200
			void **virtual)
201
{
202
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
203
	int ret;
204
	void *addr;
205
 
206
	*virtual = NULL;
207
	(void) ttm_mem_io_lock(man, false);
208
	ret = ttm_mem_io_reserve(bdev, mem);
209
	ttm_mem_io_unlock(man);
210
	if (ret || !mem->bus.is_iomem)
211
		return ret;
212
 
213
	if (mem->bus.addr) {
214
		addr = mem->bus.addr;
215
	} else {
216
		if (mem->placement & TTM_PL_FLAG_WC)
217
			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
218
		else
5078 serge 219
            addr = ioremap(mem->bus.base + mem->bus.offset, mem->bus.size);
4075 Serge 220
		if (!addr) {
221
			(void) ttm_mem_io_lock(man, false);
222
			ttm_mem_io_free(bdev, mem);
223
			ttm_mem_io_unlock(man);
224
			return -ENOMEM;
225
		}
226
	}
227
	*virtual = addr;
228
	return 0;
229
}
230
 
4569 Serge 231
static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
4075 Serge 232
			 void *virtual)
233
{
234
	struct ttm_mem_type_manager *man;
235
 
236
	man = &bdev->man[mem->mem_type];
237
 
238
	if (virtual && mem->bus.addr == NULL)
239
		iounmap(virtual);
240
	(void) ttm_mem_io_lock(man, false);
241
	ttm_mem_io_free(bdev, mem);
242
	ttm_mem_io_unlock(man);
243
}
244
 
245
static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
246
{
247
	uint32_t *dstP =
248
	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
249
	uint32_t *srcP =
250
	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
251
 
252
	int i;
253
	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
254
		iowrite32(ioread32(srcP++), dstP++);
255
	return 0;
256
}
257
 
258
static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
259
				unsigned long page,
260
				pgprot_t prot)
261
{
262
	struct page *d = ttm->pages[page];
263
	void *dst;
264
 
265
	if (!d)
266
		return -ENOMEM;
267
 
268
	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
269
 
5078 serge 270
    dst = (void*)MapIoMem((addr_t)d, 4096, PG_SW);
271
 
4075 Serge 272
	if (!dst)
273
		return -ENOMEM;
274
 
5078 serge 275
    memcpy(dst, src, PAGE_SIZE);
4075 Serge 276
 
5078 serge 277
    FreeKernelSpace(dst);
4075 Serge 278
 
279
	return 0;
280
}
281
 
282
static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
283
				unsigned long page,
284
				pgprot_t prot)
285
{
286
	struct page *s = ttm->pages[page];
287
	void *src;
288
 
289
	if (!s)
290
		return -ENOMEM;
291
 
292
	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
5078 serge 293
 
294
    src = (void*)MapIoMem((addr_t)s, 4096, PG_SW);
295
 
4075 Serge 296
	if (!src)
297
		return -ENOMEM;
298
 
5078 serge 299
    memcpy(dst, src, PAGE_SIZE);
4075 Serge 300
 
5078 serge 301
    FreeKernelSpace(src);
4075 Serge 302
 
303
	return 0;
304
}
305
 
306
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
307
		       bool evict, bool no_wait_gpu,
308
		       struct ttm_mem_reg *new_mem)
309
{
310
	struct ttm_bo_device *bdev = bo->bdev;
311
	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
312
	struct ttm_tt *ttm = bo->ttm;
313
	struct ttm_mem_reg *old_mem = &bo->mem;
314
	struct ttm_mem_reg old_copy = *old_mem;
315
	void *old_iomap;
316
	void *new_iomap;
317
	int ret;
318
	unsigned long i;
319
	unsigned long page;
320
	unsigned long add = 0;
321
	int dir;
322
 
323
	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
324
	if (ret)
325
		return ret;
326
	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
327
	if (ret)
328
		goto out;
329
 
4569 Serge 330
	/*
331
	 * Single TTM move. NOP.
332
	 */
4075 Serge 333
	if (old_iomap == NULL && new_iomap == NULL)
334
		goto out2;
4569 Serge 335
 
336
	/*
337
	 * Don't move nonexistent data. Clear destination instead.
338
	 */
5078 serge 339
	if (old_iomap == NULL &&
340
	    (ttm == NULL || (ttm->state == tt_unpopulated &&
341
			     !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
342
        memset(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
4075 Serge 343
		goto out2;
5078 serge 344
	}
4075 Serge 345
 
4569 Serge 346
	/*
347
	 * TTM might be null for moves within the same region.
348
	 */
349
	if (ttm && ttm->state == tt_unpopulated) {
4075 Serge 350
		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
4569 Serge 351
		if (ret)
4075 Serge 352
			goto out1;
5271 serge 353
	}
4075 Serge 354
 
355
	add = 0;
356
	dir = 1;
357
 
358
	if ((old_mem->mem_type == new_mem->mem_type) &&
359
	    (new_mem->start < old_mem->start + old_mem->size)) {
360
		dir = -1;
361
		add = new_mem->num_pages - 1;
362
	}
363
 
364
	for (i = 0; i < new_mem->num_pages; ++i) {
365
		page = i * dir + add;
366
		if (old_iomap == NULL) {
367
			pgprot_t prot = ttm_io_prot(old_mem->placement,
368
						    PAGE_KERNEL);
369
			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
370
						   prot);
371
		} else if (new_iomap == NULL) {
372
			pgprot_t prot = ttm_io_prot(new_mem->placement,
373
						    PAGE_KERNEL);
374
			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
375
						   prot);
376
		} else
377
			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
4569 Serge 378
		if (ret)
4075 Serge 379
			goto out1;
5271 serge 380
	}
4075 Serge 381
	mb();
382
out2:
383
	old_copy = *old_mem;
384
	*old_mem = *new_mem;
385
	new_mem->mm_node = NULL;
386
 
387
	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
388
		ttm_tt_unbind(ttm);
389
		ttm_tt_destroy(ttm);
390
		bo->ttm = NULL;
391
	}
392
 
393
out1:
394
	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
395
out:
396
	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
4569 Serge 397
 
398
	/*
399
	 * On error, keep the mm node!
400
	 */
401
	if (!ret)
5271 serge 402
		ttm_bo_mem_put(bo, &old_copy);
4075 Serge 403
	return ret;
404
}
405
EXPORT_SYMBOL(ttm_bo_move_memcpy);
406
 
407
static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
408
{
409
	kfree(bo);
410
}
411
 
412
/**
413
 * ttm_buffer_object_transfer
414
 *
415
 * @bo: A pointer to a struct ttm_buffer_object.
416
 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
417
 * holding the data of @bo with the old placement.
418
 *
419
 * This is a utility function that may be called after an accelerated move
420
 * has been scheduled. A new buffer object is created as a placeholder for
421
 * the old data while it's being copied. When that buffer object is idle,
422
 * it can be destroyed, releasing the space of the old placement.
423
 * Returns:
424
 * !0: Failure.
425
 */
426
 
427
static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
428
				      struct ttm_buffer_object **new_obj)
429
{
430
	struct ttm_buffer_object *fbo;
431
	int ret;
432
 
433
	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
434
	if (!fbo)
435
		return -ENOMEM;
436
 
437
	*fbo = *bo;
438
 
439
	/**
440
	 * Fix up members that we shouldn't copy directly:
441
	 * TODO: Explicit member copy would probably be better here.
442
	 */
443
 
444
	INIT_LIST_HEAD(&fbo->ddestroy);
445
	INIT_LIST_HEAD(&fbo->lru);
446
	INIT_LIST_HEAD(&fbo->swap);
447
	INIT_LIST_HEAD(&fbo->io_reserve_lru);
4112 Serge 448
	drm_vma_node_reset(&fbo->vma_node);
4075 Serge 449
	atomic_set(&fbo->cpu_writers, 0);
450
 
451
	kref_init(&fbo->list_kref);
452
	kref_init(&fbo->kref);
453
	fbo->destroy = &ttm_transfered_destroy;
454
	fbo->acc_size = 0;
455
	fbo->resv = &fbo->ttm_resv;
456
	reservation_object_init(fbo->resv);
457
	ret = ww_mutex_trylock(&fbo->resv->lock);
458
	WARN_ON(!ret);
459
 
460
	*new_obj = fbo;
461
	return 0;
462
}
463
 
464
pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
465
{
466
	return tmp;
467
}
468
EXPORT_SYMBOL(ttm_io_prot);
469
 
470
static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
471
			  unsigned long offset,
472
			  unsigned long size,
473
			  struct ttm_bo_kmap_obj *map)
474
{
475
	struct ttm_mem_reg *mem = &bo->mem;
476
 
477
	if (bo->mem.bus.addr) {
478
		map->bo_kmap_type = ttm_bo_map_premapped;
479
		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
480
	} else {
481
		map->bo_kmap_type = ttm_bo_map_iomap;
482
		if (mem->placement & TTM_PL_FLAG_WC)
483
			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
484
						  size);
485
		else
5078 serge 486
			map->virtual = ioremap(bo->mem.bus.base + bo->mem.bus.offset + offset,
4075 Serge 487
						       size);
488
	}
489
	return (!map->virtual) ? -ENOMEM : 0;
490
}
491
 
492
static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
493
			   unsigned long start_page,
494
			   unsigned long num_pages,
495
			   struct ttm_bo_kmap_obj *map)
496
{
497
	struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
498
	struct ttm_tt *ttm = bo->ttm;
499
	int ret;
500
 
501
	BUG_ON(!ttm);
502
 
503
	if (ttm->state == tt_unpopulated) {
504
		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
505
		if (ret)
506
			return ret;
507
	}
508
 
509
	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
510
		/*
511
		 * We're mapping a single page, and the desired
512
		 * page protection is consistent with the bo.
513
		 */
514
 
515
		map->bo_kmap_type = ttm_bo_map_kmap;
516
		map->page = ttm->pages[start_page];
5078 serge 517
		map->virtual = (void*)MapIoMem(page_to_phys(map->page), 4096, PG_SW);
4075 Serge 518
	} else {
519
		/*
520
		 * We need to use vmap to get the desired page protection
521
		 * or to make the buffer object look contiguous.
522
		 */
523
		prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
524
			PAGE_KERNEL :
525
			ttm_io_prot(mem->placement, PAGE_KERNEL);
526
		map->bo_kmap_type = ttm_bo_map_vmap;
527
		map->virtual = vmap(ttm->pages + start_page, num_pages,
528
				    0, prot);
529
	}
530
	return (!map->virtual) ? -ENOMEM : 0;
531
}
532
 
533
int ttm_bo_kmap(struct ttm_buffer_object *bo,
534
		unsigned long start_page, unsigned long num_pages,
535
		struct ttm_bo_kmap_obj *map)
536
{
537
	struct ttm_mem_type_manager *man =
538
		&bo->bdev->man[bo->mem.mem_type];
539
	unsigned long offset, size;
540
	int ret;
541
 
542
	BUG_ON(!list_empty(&bo->swap));
543
	map->virtual = NULL;
544
	map->bo = bo;
545
	if (num_pages > bo->num_pages)
546
		return -EINVAL;
547
	if (start_page > bo->num_pages)
548
		return -EINVAL;
549
#if 0
4569 Serge 550
	if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
4075 Serge 551
		return -EPERM;
552
#endif
553
	(void) ttm_mem_io_lock(man, false);
554
	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
555
	ttm_mem_io_unlock(man);
556
	if (ret)
557
		return ret;
558
	if (!bo->mem.bus.is_iomem) {
559
		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
560
	} else {
561
		offset = start_page << PAGE_SHIFT;
562
		size = num_pages << PAGE_SHIFT;
563
		return ttm_bo_ioremap(bo, offset, size, map);
564
	}
565
}
566
EXPORT_SYMBOL(ttm_bo_kmap);
567
 
568
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
569
{
570
	struct ttm_buffer_object *bo = map->bo;
571
	struct ttm_mem_type_manager *man =
572
		&bo->bdev->man[bo->mem.mem_type];
573
 
574
	if (!map->virtual)
575
		return;
576
	switch (map->bo_kmap_type) {
577
	case ttm_bo_map_iomap:
578
		iounmap(map->virtual);
579
		break;
580
	case ttm_bo_map_vmap:
581
	case ttm_bo_map_kmap:
5078 serge 582
        FreeKernelSpace(map->virtual);
4075 Serge 583
		break;
584
	case ttm_bo_map_premapped:
585
		break;
586
	default:
587
		BUG();
588
	}
589
	(void) ttm_mem_io_lock(man, false);
590
	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
591
	ttm_mem_io_unlock(man);
592
	map->virtual = NULL;
593
	map->page = NULL;
594
}
595
EXPORT_SYMBOL(ttm_bo_kunmap);
596
 
597
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
5271 serge 598
			      struct fence *fence,
4075 Serge 599
			      bool evict,
600
			      bool no_wait_gpu,
601
			      struct ttm_mem_reg *new_mem)
602
{
603
	struct ttm_bo_device *bdev = bo->bdev;
604
	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
605
	struct ttm_mem_reg *old_mem = &bo->mem;
606
	int ret;
607
	struct ttm_buffer_object *ghost_obj;
608
 
5271 serge 609
	reservation_object_add_excl_fence(bo->resv, fence);
4075 Serge 610
	if (evict) {
611
		ret = ttm_bo_wait(bo, false, false, false);
612
		if (ret)
613
			return ret;
614
 
615
		if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
616
		    (bo->ttm != NULL)) {
617
			ttm_tt_unbind(bo->ttm);
618
			ttm_tt_destroy(bo->ttm);
619
			bo->ttm = NULL;
620
		}
621
		ttm_bo_free_old_node(bo);
622
	} else {
623
		/**
624
		 * This should help pipeline ordinary buffer moves.
625
		 *
626
		 * Hang old buffer memory on a new buffer object,
627
		 * and leave it to be released when the GPU
628
		 * operation has completed.
629
		 */
630
 
631
		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
632
 
633
		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
634
		if (ret)
635
			return ret;
636
 
5271 serge 637
		reservation_object_add_excl_fence(ghost_obj->resv, fence);
638
 
4075 Serge 639
		/**
640
		 * If we're not moving to fixed memory, the TTM object
641
		 * needs to stay alive. Otherwhise hang it on the ghost
642
		 * bo to be unbound and destroyed.
643
		 */
644
 
645
		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
646
			ghost_obj->ttm = NULL;
647
		else
648
			bo->ttm = NULL;
649
 
650
		ttm_bo_unreserve(ghost_obj);
651
		ttm_bo_unref(&ghost_obj);
652
	}
653
 
654
	*old_mem = *new_mem;
655
	new_mem->mm_node = NULL;
656
 
657
	return 0;
658
}
659
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
5078 serge 660
 
661
 
662
void *vmap(struct page **pages, unsigned int count,
663
           unsigned long flags, pgprot_t prot)
664
{
665
    void *vaddr;
666
    char *tmp;
667
    int i;
668
 
669
    vaddr = AllocKernelSpace(count << 12);
670
    if(vaddr == NULL)
671
        return NULL;
672
 
673
    for(i = 0, tmp = vaddr; i < count; i++)
674
    {
675
        MapPage(tmp, page_to_phys(pages[i]), PG_SW);
676
        tmp+= 4096;
677
    };
678
 
679
    return vaddr;
680
};
681