Subversion Repositories Kolibri OS

Rev

Rev 6938 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4075 Serge 1
/**************************************************************************
2
 *
3
 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4
 * All Rights Reserved.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
13
 *
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
26
 **************************************************************************/
27
/*
28
 * Authors: Thomas Hellstrom 
29
 */
30
 
31
#include 
32
#include 
4112 Serge 33
#include 
6296 serge 34
#include 
5078 serge 35
//#include 
4075 Serge 36
#include 
37
#include 
6296 serge 38
#include 
4075 Serge 39
#include 
40
 
5078 serge 41
#define __pgprot(x)     ((pgprot_t) { (x) } )
42
 
43
 
4075 Serge 44
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
45
{
46
	ttm_bo_mem_put(bo, &bo->mem);
47
}
48
 
49
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
50
		    bool evict,
51
		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
52
{
53
	struct ttm_tt *ttm = bo->ttm;
54
	struct ttm_mem_reg *old_mem = &bo->mem;
55
	int ret;
56
 
57
	if (old_mem->mem_type != TTM_PL_SYSTEM) {
58
		ttm_tt_unbind(ttm);
59
		ttm_bo_free_old_node(bo);
60
		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
61
				TTM_PL_MASK_MEM);
62
		old_mem->mem_type = TTM_PL_SYSTEM;
63
	}
64
 
65
	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
66
	if (unlikely(ret != 0))
67
		return ret;
68
 
69
	if (new_mem->mem_type != TTM_PL_SYSTEM) {
70
		ret = ttm_tt_bind(ttm, new_mem);
71
		if (unlikely(ret != 0))
72
			return ret;
73
	}
74
 
75
	*old_mem = *new_mem;
76
	new_mem->mm_node = NULL;
77
 
78
	return 0;
79
}
80
EXPORT_SYMBOL(ttm_bo_move_ttm);
81
 
82
int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
83
{
84
	if (likely(man->io_reserve_fastpath))
85
		return 0;
86
 
87
	if (interruptible)
88
		return mutex_lock_interruptible(&man->io_reserve_mutex);
89
 
90
	mutex_lock(&man->io_reserve_mutex);
91
	return 0;
92
}
93
EXPORT_SYMBOL(ttm_mem_io_lock);
94
 
95
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
96
{
97
	if (likely(man->io_reserve_fastpath))
98
		return;
99
 
100
	mutex_unlock(&man->io_reserve_mutex);
101
}
102
EXPORT_SYMBOL(ttm_mem_io_unlock);
103
 
104
static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
105
{
106
	struct ttm_buffer_object *bo;
107
 
108
	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
109
		return -EAGAIN;
110
 
111
	bo = list_first_entry(&man->io_reserve_lru,
112
			      struct ttm_buffer_object,
113
			      io_reserve_lru);
114
	list_del_init(&bo->io_reserve_lru);
115
	ttm_bo_unmap_virtual_locked(bo);
116
 
117
	return 0;
118
}
119
 
120
 
121
int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
122
		       struct ttm_mem_reg *mem)
123
{
124
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
125
	int ret = 0;
126
 
127
	if (!bdev->driver->io_mem_reserve)
128
		return 0;
129
	if (likely(man->io_reserve_fastpath))
130
		return bdev->driver->io_mem_reserve(bdev, mem);
131
 
132
	if (bdev->driver->io_mem_reserve &&
133
	    mem->bus.io_reserved_count++ == 0) {
134
retry:
135
		ret = bdev->driver->io_mem_reserve(bdev, mem);
136
		if (ret == -EAGAIN) {
137
			ret = ttm_mem_io_evict(man);
138
			if (ret == 0)
139
				goto retry;
140
		}
141
	}
142
	return ret;
143
}
144
EXPORT_SYMBOL(ttm_mem_io_reserve);
145
 
146
void ttm_mem_io_free(struct ttm_bo_device *bdev,
147
		     struct ttm_mem_reg *mem)
148
{
149
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
150
 
151
	if (likely(man->io_reserve_fastpath))
152
		return;
153
 
154
	if (bdev->driver->io_mem_reserve &&
155
	    --mem->bus.io_reserved_count == 0 &&
156
	    bdev->driver->io_mem_free)
157
		bdev->driver->io_mem_free(bdev, mem);
158
 
159
}
160
EXPORT_SYMBOL(ttm_mem_io_free);
161
 
162
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
163
{
164
	struct ttm_mem_reg *mem = &bo->mem;
165
	int ret;
166
 
167
	if (!mem->bus.io_reserved_vm) {
168
		struct ttm_mem_type_manager *man =
169
			&bo->bdev->man[mem->mem_type];
170
 
171
		ret = ttm_mem_io_reserve(bo->bdev, mem);
172
		if (unlikely(ret != 0))
173
			return ret;
174
		mem->bus.io_reserved_vm = true;
175
		if (man->use_io_reserve_lru)
176
			list_add_tail(&bo->io_reserve_lru,
177
				      &man->io_reserve_lru);
178
	}
179
	return 0;
180
}
181
 
182
void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
183
{
184
	struct ttm_mem_reg *mem = &bo->mem;
185
 
186
	if (mem->bus.io_reserved_vm) {
187
		mem->bus.io_reserved_vm = false;
188
		list_del_init(&bo->io_reserve_lru);
189
		ttm_mem_io_free(bo->bdev, mem);
190
	}
191
}
192
 
4569 Serge 193
static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
4075 Serge 194
			void **virtual)
195
{
196
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
197
	int ret;
198
	void *addr;
199
 
200
	*virtual = NULL;
201
	(void) ttm_mem_io_lock(man, false);
202
	ret = ttm_mem_io_reserve(bdev, mem);
203
	ttm_mem_io_unlock(man);
204
	if (ret || !mem->bus.is_iomem)
205
		return ret;
206
 
207
	if (mem->bus.addr) {
208
		addr = mem->bus.addr;
209
	} else {
210
		if (mem->placement & TTM_PL_FLAG_WC)
211
			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
212
		else
6296 serge 213
			addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
4075 Serge 214
		if (!addr) {
215
			(void) ttm_mem_io_lock(man, false);
216
			ttm_mem_io_free(bdev, mem);
217
			ttm_mem_io_unlock(man);
218
			return -ENOMEM;
219
		}
220
	}
221
	*virtual = addr;
222
	return 0;
223
}
224
 
4569 Serge 225
static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
4075 Serge 226
			 void *virtual)
227
{
228
	struct ttm_mem_type_manager *man;
229
 
230
	man = &bdev->man[mem->mem_type];
231
 
232
	if (virtual && mem->bus.addr == NULL)
233
		iounmap(virtual);
234
	(void) ttm_mem_io_lock(man, false);
235
	ttm_mem_io_free(bdev, mem);
236
	ttm_mem_io_unlock(man);
237
}
238
 
239
static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
240
{
241
	uint32_t *dstP =
242
	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
243
	uint32_t *srcP =
244
	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
245
 
246
	int i;
247
	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
248
		iowrite32(ioread32(srcP++), dstP++);
249
	return 0;
250
}
251
 
252
static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
253
				unsigned long page,
254
				pgprot_t prot)
255
{
256
	struct page *d = ttm->pages[page];
257
	void *dst;
258
 
259
	if (!d)
260
		return -ENOMEM;
261
 
262
	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
263
 
5078 serge 264
    dst = (void*)MapIoMem((addr_t)d, 4096, PG_SW);
265
 
4075 Serge 266
	if (!dst)
267
		return -ENOMEM;
268
 
5078 serge 269
    memcpy(dst, src, PAGE_SIZE);
4075 Serge 270
 
5078 serge 271
    FreeKernelSpace(dst);
4075 Serge 272
 
273
	return 0;
274
}
275
 
276
static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
277
				unsigned long page,
278
				pgprot_t prot)
279
{
280
	struct page *s = ttm->pages[page];
281
	void *src;
282
 
283
	if (!s)
284
		return -ENOMEM;
285
 
286
	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
5078 serge 287
 
288
    src = (void*)MapIoMem((addr_t)s, 4096, PG_SW);
289
 
4075 Serge 290
	if (!src)
291
		return -ENOMEM;
292
 
5078 serge 293
    memcpy(dst, src, PAGE_SIZE);
4075 Serge 294
 
5078 serge 295
    FreeKernelSpace(src);
4075 Serge 296
 
297
	return 0;
298
}
299
 
300
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
301
		       bool evict, bool no_wait_gpu,
302
		       struct ttm_mem_reg *new_mem)
303
{
304
	struct ttm_bo_device *bdev = bo->bdev;
305
	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
306
	struct ttm_tt *ttm = bo->ttm;
307
	struct ttm_mem_reg *old_mem = &bo->mem;
308
	struct ttm_mem_reg old_copy = *old_mem;
309
	void *old_iomap;
310
	void *new_iomap;
311
	int ret;
312
	unsigned long i;
313
	unsigned long page;
314
	unsigned long add = 0;
315
	int dir;
316
 
317
	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
318
	if (ret)
319
		return ret;
320
	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
321
	if (ret)
322
		goto out;
323
 
4569 Serge 324
	/*
325
	 * Single TTM move. NOP.
326
	 */
4075 Serge 327
	if (old_iomap == NULL && new_iomap == NULL)
328
		goto out2;
4569 Serge 329
 
330
	/*
331
	 * Don't move nonexistent data. Clear destination instead.
332
	 */
5078 serge 333
	if (old_iomap == NULL &&
334
	    (ttm == NULL || (ttm->state == tt_unpopulated &&
335
			     !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
336
        memset(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
4075 Serge 337
		goto out2;
5078 serge 338
	}
4075 Serge 339
 
4569 Serge 340
	/*
341
	 * TTM might be null for moves within the same region.
342
	 */
343
	if (ttm && ttm->state == tt_unpopulated) {
4075 Serge 344
		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
4569 Serge 345
		if (ret)
4075 Serge 346
			goto out1;
5271 serge 347
	}
4075 Serge 348
 
349
	add = 0;
350
	dir = 1;
351
 
352
	if ((old_mem->mem_type == new_mem->mem_type) &&
353
	    (new_mem->start < old_mem->start + old_mem->size)) {
354
		dir = -1;
355
		add = new_mem->num_pages - 1;
356
	}
357
 
358
	for (i = 0; i < new_mem->num_pages; ++i) {
359
		page = i * dir + add;
360
		if (old_iomap == NULL) {
361
			pgprot_t prot = ttm_io_prot(old_mem->placement,
362
						    PAGE_KERNEL);
363
			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
364
						   prot);
365
		} else if (new_iomap == NULL) {
366
			pgprot_t prot = ttm_io_prot(new_mem->placement,
367
						    PAGE_KERNEL);
368
			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
369
						   prot);
370
		} else
371
			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
4569 Serge 372
		if (ret)
4075 Serge 373
			goto out1;
5271 serge 374
	}
4075 Serge 375
	mb();
376
out2:
377
	old_copy = *old_mem;
378
	*old_mem = *new_mem;
379
	new_mem->mm_node = NULL;
380
 
381
	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
382
		ttm_tt_unbind(ttm);
383
		ttm_tt_destroy(ttm);
384
		bo->ttm = NULL;
385
	}
386
 
387
out1:
388
	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
389
out:
390
	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
4569 Serge 391
 
392
	/*
393
	 * On error, keep the mm node!
394
	 */
395
	if (!ret)
5271 serge 396
		ttm_bo_mem_put(bo, &old_copy);
4075 Serge 397
	return ret;
398
}
399
EXPORT_SYMBOL(ttm_bo_move_memcpy);
400
 
401
static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
402
{
403
	kfree(bo);
404
}
405
 
406
/**
407
 * ttm_buffer_object_transfer
408
 *
409
 * @bo: A pointer to a struct ttm_buffer_object.
410
 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
411
 * holding the data of @bo with the old placement.
412
 *
413
 * This is a utility function that may be called after an accelerated move
414
 * has been scheduled. A new buffer object is created as a placeholder for
415
 * the old data while it's being copied. When that buffer object is idle,
416
 * it can be destroyed, releasing the space of the old placement.
417
 * Returns:
418
 * !0: Failure.
419
 */
420
 
421
static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
422
				      struct ttm_buffer_object **new_obj)
423
{
424
	struct ttm_buffer_object *fbo;
425
	int ret;
426
 
427
	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
428
	if (!fbo)
429
		return -ENOMEM;
430
 
431
	*fbo = *bo;
432
 
433
	/**
434
	 * Fix up members that we shouldn't copy directly:
435
	 * TODO: Explicit member copy would probably be better here.
436
	 */
437
 
438
	INIT_LIST_HEAD(&fbo->ddestroy);
439
	INIT_LIST_HEAD(&fbo->lru);
440
	INIT_LIST_HEAD(&fbo->swap);
441
	INIT_LIST_HEAD(&fbo->io_reserve_lru);
4112 Serge 442
	drm_vma_node_reset(&fbo->vma_node);
4075 Serge 443
	atomic_set(&fbo->cpu_writers, 0);
444
 
445
	kref_init(&fbo->list_kref);
446
	kref_init(&fbo->kref);
447
	fbo->destroy = &ttm_transfered_destroy;
448
	fbo->acc_size = 0;
449
	fbo->resv = &fbo->ttm_resv;
450
	reservation_object_init(fbo->resv);
451
	ret = ww_mutex_trylock(&fbo->resv->lock);
452
	WARN_ON(!ret);
453
 
454
	*new_obj = fbo;
455
	return 0;
456
}
457
 
458
pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
459
{
6938 serge 460
	/* Cached mappings need no adjustment */
461
	if (caching_flags & TTM_PL_FLAG_CACHED)
462
		return tmp;
4075 Serge 463
	return tmp;
464
}
465
EXPORT_SYMBOL(ttm_io_prot);
466
 
467
static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
468
			  unsigned long offset,
469
			  unsigned long size,
470
			  struct ttm_bo_kmap_obj *map)
471
{
472
	struct ttm_mem_reg *mem = &bo->mem;
473
 
474
	if (bo->mem.bus.addr) {
475
		map->bo_kmap_type = ttm_bo_map_premapped;
476
		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
477
	} else {
478
		map->bo_kmap_type = ttm_bo_map_iomap;
479
		if (mem->placement & TTM_PL_FLAG_WC)
480
			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
481
						  size);
482
		else
6296 serge 483
			map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
4075 Serge 484
						       size);
485
	}
486
	return (!map->virtual) ? -ENOMEM : 0;
487
}
488
 
489
static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
490
			   unsigned long start_page,
491
			   unsigned long num_pages,
492
			   struct ttm_bo_kmap_obj *map)
493
{
494
	struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
495
	struct ttm_tt *ttm = bo->ttm;
496
	int ret;
497
 
498
	BUG_ON(!ttm);
499
 
500
	if (ttm->state == tt_unpopulated) {
501
		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
502
		if (ret)
503
			return ret;
504
	}
505
 
506
	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
507
		/*
508
		 * We're mapping a single page, and the desired
509
		 * page protection is consistent with the bo.
510
		 */
511
 
512
		map->bo_kmap_type = ttm_bo_map_kmap;
513
		map->page = ttm->pages[start_page];
6296 serge 514
		map->virtual = kmap(map->page);
4075 Serge 515
	} else {
516
		/*
517
		 * We need to use vmap to get the desired page protection
518
		 * or to make the buffer object look contiguous.
519
		 */
6296 serge 520
		prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
4075 Serge 521
		map->bo_kmap_type = ttm_bo_map_vmap;
522
		map->virtual = vmap(ttm->pages + start_page, num_pages,
523
				    0, prot);
524
	}
525
	return (!map->virtual) ? -ENOMEM : 0;
526
}
527
 
528
int ttm_bo_kmap(struct ttm_buffer_object *bo,
529
		unsigned long start_page, unsigned long num_pages,
530
		struct ttm_bo_kmap_obj *map)
531
{
532
	struct ttm_mem_type_manager *man =
533
		&bo->bdev->man[bo->mem.mem_type];
534
	unsigned long offset, size;
535
	int ret;
536
 
537
	BUG_ON(!list_empty(&bo->swap));
538
	map->virtual = NULL;
539
	map->bo = bo;
540
	if (num_pages > bo->num_pages)
541
		return -EINVAL;
542
	if (start_page > bo->num_pages)
543
		return -EINVAL;
544
#if 0
4569 Serge 545
	if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
4075 Serge 546
		return -EPERM;
547
#endif
548
	(void) ttm_mem_io_lock(man, false);
549
	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
550
	ttm_mem_io_unlock(man);
551
	if (ret)
552
		return ret;
553
	if (!bo->mem.bus.is_iomem) {
554
		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
555
	} else {
556
		offset = start_page << PAGE_SHIFT;
557
		size = num_pages << PAGE_SHIFT;
558
		return ttm_bo_ioremap(bo, offset, size, map);
559
	}
560
}
561
EXPORT_SYMBOL(ttm_bo_kmap);
562
 
563
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
564
{
565
	struct ttm_buffer_object *bo = map->bo;
566
	struct ttm_mem_type_manager *man =
567
		&bo->bdev->man[bo->mem.mem_type];
568
 
569
	if (!map->virtual)
570
		return;
571
	switch (map->bo_kmap_type) {
572
	case ttm_bo_map_iomap:
573
		iounmap(map->virtual);
574
		break;
575
	case ttm_bo_map_vmap:
7146 serge 576
		vunmap(map->virtual);
6296 serge 577
		break;
4075 Serge 578
	case ttm_bo_map_kmap:
6296 serge 579
		kunmap(map->page);
4075 Serge 580
		break;
581
	case ttm_bo_map_premapped:
582
		break;
583
	default:
584
		BUG();
585
	}
586
	(void) ttm_mem_io_lock(man, false);
587
	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
588
	ttm_mem_io_unlock(man);
589
	map->virtual = NULL;
590
	map->page = NULL;
591
}
592
EXPORT_SYMBOL(ttm_bo_kunmap);
593
 
594
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
5271 serge 595
			      struct fence *fence,
4075 Serge 596
			      bool evict,
597
			      bool no_wait_gpu,
598
			      struct ttm_mem_reg *new_mem)
599
{
600
	struct ttm_bo_device *bdev = bo->bdev;
601
	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
602
	struct ttm_mem_reg *old_mem = &bo->mem;
603
	int ret;
604
	struct ttm_buffer_object *ghost_obj;
605
 
5271 serge 606
	reservation_object_add_excl_fence(bo->resv, fence);
4075 Serge 607
	if (evict) {
608
		ret = ttm_bo_wait(bo, false, false, false);
609
		if (ret)
610
			return ret;
611
 
612
		if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
613
		    (bo->ttm != NULL)) {
614
			ttm_tt_unbind(bo->ttm);
615
			ttm_tt_destroy(bo->ttm);
616
			bo->ttm = NULL;
617
		}
618
		ttm_bo_free_old_node(bo);
619
	} else {
620
		/**
621
		 * This should help pipeline ordinary buffer moves.
622
		 *
623
		 * Hang old buffer memory on a new buffer object,
624
		 * and leave it to be released when the GPU
625
		 * operation has completed.
626
		 */
627
 
628
		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
629
 
630
		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
631
		if (ret)
632
			return ret;
633
 
5271 serge 634
		reservation_object_add_excl_fence(ghost_obj->resv, fence);
635
 
4075 Serge 636
		/**
637
		 * If we're not moving to fixed memory, the TTM object
638
		 * needs to stay alive. Otherwhise hang it on the ghost
639
		 * bo to be unbound and destroyed.
640
		 */
641
 
642
		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
643
			ghost_obj->ttm = NULL;
644
		else
645
			bo->ttm = NULL;
646
 
647
		ttm_bo_unreserve(ghost_obj);
648
		ttm_bo_unref(&ghost_obj);
649
	}
650
 
651
	*old_mem = *new_mem;
652
	new_mem->mm_node = NULL;
653
 
654
	return 0;
655
}
656
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
5078 serge 657
 
658
 
659
void *vmap(struct page **pages, unsigned int count,
660
           unsigned long flags, pgprot_t prot)
661
{
662
    void *vaddr;
663
    char *tmp;
664
    int i;
665
 
666
    vaddr = AllocKernelSpace(count << 12);
667
    if(vaddr == NULL)
668
        return NULL;
669
 
670
    for(i = 0, tmp = vaddr; i < count; i++)
671
    {
672
        MapPage(tmp, page_to_phys(pages[i]), PG_SW);
673
        tmp+= 4096;
674
    };
675
 
676
    return vaddr;
677
};
678
 
7146 serge 679
void vunmap(const void *addr)
680
{
681
    FreeKernelSpace((void*)addr);
682
}
683