Subversion Repositories Kolibri OS

Rev

Rev 4569 | Rev 5271 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4075 Serge 1
/**************************************************************************
2
 *
3
 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4
 * All Rights Reserved.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
13
 *
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
26
 **************************************************************************/
27
/*
28
 * Authors: Thomas Hellstrom 
29
 */
5078 serge 30
#define iowrite32(v, addr)      writel((v), (addr))
31
#define ioread32(addr)          readl(addr)
4075 Serge 32
 
33
#include 
34
#include 
4112 Serge 35
#include 
5078 serge 36
//#include 
37
//#include 
4075 Serge 38
#include 
39
#include 
5078 serge 40
//#include 
4075 Serge 41
#include 
42
 
5078 serge 43
#define __pgprot(x)     ((pgprot_t) { (x) } )
44
#define PAGE_KERNEL                     __pgprot(3)
45
 
46
void *vmap(struct page **pages, unsigned int count,
47
           unsigned long flags, pgprot_t prot);
48
 
4075 Serge 49
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
50
{
51
	ttm_bo_mem_put(bo, &bo->mem);
52
}
53
 
54
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
55
		    bool evict,
56
		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
57
{
58
	struct ttm_tt *ttm = bo->ttm;
59
	struct ttm_mem_reg *old_mem = &bo->mem;
60
	int ret;
61
 
62
	if (old_mem->mem_type != TTM_PL_SYSTEM) {
63
		ttm_tt_unbind(ttm);
64
		ttm_bo_free_old_node(bo);
65
		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
66
				TTM_PL_MASK_MEM);
67
		old_mem->mem_type = TTM_PL_SYSTEM;
68
	}
69
 
70
	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
71
	if (unlikely(ret != 0))
72
		return ret;
73
 
74
	if (new_mem->mem_type != TTM_PL_SYSTEM) {
75
		ret = ttm_tt_bind(ttm, new_mem);
76
		if (unlikely(ret != 0))
77
			return ret;
78
	}
79
 
80
	*old_mem = *new_mem;
81
	new_mem->mm_node = NULL;
82
 
83
	return 0;
84
}
85
EXPORT_SYMBOL(ttm_bo_move_ttm);
86
 
87
int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
88
{
89
	if (likely(man->io_reserve_fastpath))
90
		return 0;
91
 
92
	if (interruptible)
93
		return mutex_lock_interruptible(&man->io_reserve_mutex);
94
 
95
	mutex_lock(&man->io_reserve_mutex);
96
	return 0;
97
}
98
EXPORT_SYMBOL(ttm_mem_io_lock);
99
 
100
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
101
{
102
	if (likely(man->io_reserve_fastpath))
103
		return;
104
 
105
	mutex_unlock(&man->io_reserve_mutex);
106
}
107
EXPORT_SYMBOL(ttm_mem_io_unlock);
108
 
109
static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
110
{
111
	struct ttm_buffer_object *bo;
112
 
113
	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
114
		return -EAGAIN;
115
 
116
	bo = list_first_entry(&man->io_reserve_lru,
117
			      struct ttm_buffer_object,
118
			      io_reserve_lru);
119
	list_del_init(&bo->io_reserve_lru);
120
	ttm_bo_unmap_virtual_locked(bo);
121
 
122
	return 0;
123
}
124
 
125
 
126
int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
127
		       struct ttm_mem_reg *mem)
128
{
129
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
130
	int ret = 0;
131
 
132
	if (!bdev->driver->io_mem_reserve)
133
		return 0;
134
	if (likely(man->io_reserve_fastpath))
135
		return bdev->driver->io_mem_reserve(bdev, mem);
136
 
137
	if (bdev->driver->io_mem_reserve &&
138
	    mem->bus.io_reserved_count++ == 0) {
139
retry:
140
		ret = bdev->driver->io_mem_reserve(bdev, mem);
141
		if (ret == -EAGAIN) {
142
			ret = ttm_mem_io_evict(man);
143
			if (ret == 0)
144
				goto retry;
145
		}
146
	}
147
	return ret;
148
}
149
EXPORT_SYMBOL(ttm_mem_io_reserve);
150
 
151
void ttm_mem_io_free(struct ttm_bo_device *bdev,
152
		     struct ttm_mem_reg *mem)
153
{
154
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
155
 
156
	if (likely(man->io_reserve_fastpath))
157
		return;
158
 
159
	if (bdev->driver->io_mem_reserve &&
160
	    --mem->bus.io_reserved_count == 0 &&
161
	    bdev->driver->io_mem_free)
162
		bdev->driver->io_mem_free(bdev, mem);
163
 
164
}
165
EXPORT_SYMBOL(ttm_mem_io_free);
166
 
5078 serge 167
#if 0
4075 Serge 168
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
169
{
170
	struct ttm_mem_reg *mem = &bo->mem;
171
	int ret;
172
 
173
	if (!mem->bus.io_reserved_vm) {
174
		struct ttm_mem_type_manager *man =
175
			&bo->bdev->man[mem->mem_type];
176
 
177
		ret = ttm_mem_io_reserve(bo->bdev, mem);
178
		if (unlikely(ret != 0))
179
			return ret;
180
		mem->bus.io_reserved_vm = true;
181
		if (man->use_io_reserve_lru)
182
			list_add_tail(&bo->io_reserve_lru,
183
				      &man->io_reserve_lru);
184
	}
185
	return 0;
186
}
5078 serge 187
#endif
4075 Serge 188
 
189
void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
190
{
191
	struct ttm_mem_reg *mem = &bo->mem;
192
 
193
	if (mem->bus.io_reserved_vm) {
194
		mem->bus.io_reserved_vm = false;
195
		list_del_init(&bo->io_reserve_lru);
196
		ttm_mem_io_free(bo->bdev, mem);
197
	}
198
}
199
 
4569 Serge 200
static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
4075 Serge 201
			void **virtual)
202
{
203
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
204
	int ret;
205
	void *addr;
206
 
207
	*virtual = NULL;
208
	(void) ttm_mem_io_lock(man, false);
209
	ret = ttm_mem_io_reserve(bdev, mem);
210
	ttm_mem_io_unlock(man);
211
	if (ret || !mem->bus.is_iomem)
212
		return ret;
213
 
214
	if (mem->bus.addr) {
215
		addr = mem->bus.addr;
216
	} else {
217
		if (mem->placement & TTM_PL_FLAG_WC)
218
			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
219
		else
5078 serge 220
            addr = ioremap(mem->bus.base + mem->bus.offset, mem->bus.size);
4075 Serge 221
		if (!addr) {
222
			(void) ttm_mem_io_lock(man, false);
223
			ttm_mem_io_free(bdev, mem);
224
			ttm_mem_io_unlock(man);
225
			return -ENOMEM;
226
		}
227
	}
228
	*virtual = addr;
229
	return 0;
230
}
231
 
4569 Serge 232
static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
4075 Serge 233
			 void *virtual)
234
{
235
	struct ttm_mem_type_manager *man;
236
 
237
	man = &bdev->man[mem->mem_type];
238
 
239
	if (virtual && mem->bus.addr == NULL)
240
		iounmap(virtual);
241
	(void) ttm_mem_io_lock(man, false);
242
	ttm_mem_io_free(bdev, mem);
243
	ttm_mem_io_unlock(man);
244
}
245
 
246
static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
247
{
248
	uint32_t *dstP =
249
	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
250
	uint32_t *srcP =
251
	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
252
 
253
	int i;
254
	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
255
		iowrite32(ioread32(srcP++), dstP++);
256
	return 0;
257
}
258
 
259
static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
260
				unsigned long page,
261
				pgprot_t prot)
262
{
263
	struct page *d = ttm->pages[page];
264
	void *dst;
265
 
266
	if (!d)
267
		return -ENOMEM;
268
 
269
	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
270
 
5078 serge 271
    dst = (void*)MapIoMem((addr_t)d, 4096, PG_SW);
272
 
4075 Serge 273
	if (!dst)
274
		return -ENOMEM;
275
 
5078 serge 276
    memcpy(dst, src, PAGE_SIZE);
4075 Serge 277
 
5078 serge 278
    FreeKernelSpace(dst);
4075 Serge 279
 
280
	return 0;
281
}
282
 
283
static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
284
				unsigned long page,
285
				pgprot_t prot)
286
{
287
	struct page *s = ttm->pages[page];
288
	void *src;
289
 
290
	if (!s)
291
		return -ENOMEM;
292
 
293
	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
5078 serge 294
 
295
    src = (void*)MapIoMem((addr_t)s, 4096, PG_SW);
296
 
4075 Serge 297
	if (!src)
298
		return -ENOMEM;
299
 
5078 serge 300
    memcpy(dst, src, PAGE_SIZE);
4075 Serge 301
 
5078 serge 302
    FreeKernelSpace(src);
4075 Serge 303
 
304
	return 0;
305
}
306
 
307
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
308
		       bool evict, bool no_wait_gpu,
309
		       struct ttm_mem_reg *new_mem)
310
{
311
	struct ttm_bo_device *bdev = bo->bdev;
312
	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
313
	struct ttm_tt *ttm = bo->ttm;
314
	struct ttm_mem_reg *old_mem = &bo->mem;
315
	struct ttm_mem_reg old_copy = *old_mem;
316
	void *old_iomap;
317
	void *new_iomap;
318
	int ret;
319
	unsigned long i;
320
	unsigned long page;
321
	unsigned long add = 0;
322
	int dir;
323
 
324
	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
325
	if (ret)
326
		return ret;
327
	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
328
	if (ret)
329
		goto out;
330
 
4569 Serge 331
	/*
332
	 * Single TTM move. NOP.
333
	 */
4075 Serge 334
	if (old_iomap == NULL && new_iomap == NULL)
335
		goto out2;
4569 Serge 336
 
337
	/*
338
	 * Don't move nonexistent data. Clear destination instead.
339
	 */
5078 serge 340
	if (old_iomap == NULL &&
341
	    (ttm == NULL || (ttm->state == tt_unpopulated &&
342
			     !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
343
        memset(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
4075 Serge 344
		goto out2;
5078 serge 345
	}
4075 Serge 346
 
4569 Serge 347
	/*
348
	 * TTM might be null for moves within the same region.
349
	 */
350
	if (ttm && ttm->state == tt_unpopulated) {
4075 Serge 351
		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
4569 Serge 352
		if (ret)
4075 Serge 353
			goto out1;
354
		}
355
 
356
	add = 0;
357
	dir = 1;
358
 
359
	if ((old_mem->mem_type == new_mem->mem_type) &&
360
	    (new_mem->start < old_mem->start + old_mem->size)) {
361
		dir = -1;
362
		add = new_mem->num_pages - 1;
363
	}
364
 
365
	for (i = 0; i < new_mem->num_pages; ++i) {
366
		page = i * dir + add;
367
		if (old_iomap == NULL) {
368
			pgprot_t prot = ttm_io_prot(old_mem->placement,
369
						    PAGE_KERNEL);
370
			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
371
						   prot);
372
		} else if (new_iomap == NULL) {
373
			pgprot_t prot = ttm_io_prot(new_mem->placement,
374
						    PAGE_KERNEL);
375
			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
376
						   prot);
377
		} else
378
			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
4569 Serge 379
		if (ret)
4075 Serge 380
			goto out1;
381
		}
382
	mb();
383
out2:
384
	old_copy = *old_mem;
385
	*old_mem = *new_mem;
386
	new_mem->mm_node = NULL;
387
 
388
	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
389
		ttm_tt_unbind(ttm);
390
		ttm_tt_destroy(ttm);
391
		bo->ttm = NULL;
392
	}
393
 
394
out1:
395
	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
396
out:
397
	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
4569 Serge 398
 
399
	/*
400
	 * On error, keep the mm node!
401
	 */
402
	if (!ret)
4075 Serge 403
	ttm_bo_mem_put(bo, &old_copy);
404
	return ret;
405
}
406
EXPORT_SYMBOL(ttm_bo_move_memcpy);
407
 
408
static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
409
{
410
	kfree(bo);
411
}
412
 
413
/**
414
 * ttm_buffer_object_transfer
415
 *
416
 * @bo: A pointer to a struct ttm_buffer_object.
417
 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
418
 * holding the data of @bo with the old placement.
419
 *
420
 * This is a utility function that may be called after an accelerated move
421
 * has been scheduled. A new buffer object is created as a placeholder for
422
 * the old data while it's being copied. When that buffer object is idle,
423
 * it can be destroyed, releasing the space of the old placement.
424
 * Returns:
425
 * !0: Failure.
426
 */
427
 
428
static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
429
				      struct ttm_buffer_object **new_obj)
430
{
431
	struct ttm_buffer_object *fbo;
432
	struct ttm_bo_device *bdev = bo->bdev;
433
	struct ttm_bo_driver *driver = bdev->driver;
434
	int ret;
435
 
436
	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
437
	if (!fbo)
438
		return -ENOMEM;
439
 
440
	*fbo = *bo;
441
 
442
	/**
443
	 * Fix up members that we shouldn't copy directly:
444
	 * TODO: Explicit member copy would probably be better here.
445
	 */
446
 
447
	INIT_LIST_HEAD(&fbo->ddestroy);
448
	INIT_LIST_HEAD(&fbo->lru);
449
	INIT_LIST_HEAD(&fbo->swap);
450
	INIT_LIST_HEAD(&fbo->io_reserve_lru);
4112 Serge 451
	drm_vma_node_reset(&fbo->vma_node);
4075 Serge 452
	atomic_set(&fbo->cpu_writers, 0);
453
 
454
	spin_lock(&bdev->fence_lock);
455
	if (bo->sync_obj)
456
		fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
457
	else
458
		fbo->sync_obj = NULL;
459
	spin_unlock(&bdev->fence_lock);
460
	kref_init(&fbo->list_kref);
461
	kref_init(&fbo->kref);
462
	fbo->destroy = &ttm_transfered_destroy;
463
	fbo->acc_size = 0;
464
	fbo->resv = &fbo->ttm_resv;
465
	reservation_object_init(fbo->resv);
466
	ret = ww_mutex_trylock(&fbo->resv->lock);
467
	WARN_ON(!ret);
468
 
469
	*new_obj = fbo;
470
	return 0;
471
}
472
 
473
pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
474
{
475
	return tmp;
476
}
477
EXPORT_SYMBOL(ttm_io_prot);
478
 
479
static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
480
			  unsigned long offset,
481
			  unsigned long size,
482
			  struct ttm_bo_kmap_obj *map)
483
{
484
	struct ttm_mem_reg *mem = &bo->mem;
485
 
486
	if (bo->mem.bus.addr) {
487
		map->bo_kmap_type = ttm_bo_map_premapped;
488
		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
489
	} else {
490
		map->bo_kmap_type = ttm_bo_map_iomap;
491
		if (mem->placement & TTM_PL_FLAG_WC)
492
			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
493
						  size);
494
		else
5078 serge 495
			map->virtual = ioremap(bo->mem.bus.base + bo->mem.bus.offset + offset,
4075 Serge 496
						       size);
497
	}
498
	return (!map->virtual) ? -ENOMEM : 0;
499
}
500
 
501
static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
502
			   unsigned long start_page,
503
			   unsigned long num_pages,
504
			   struct ttm_bo_kmap_obj *map)
505
{
506
	struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
507
	struct ttm_tt *ttm = bo->ttm;
508
	int ret;
509
 
510
	BUG_ON(!ttm);
511
 
512
	if (ttm->state == tt_unpopulated) {
513
		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
514
		if (ret)
515
			return ret;
516
	}
517
 
518
	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
519
		/*
520
		 * We're mapping a single page, and the desired
521
		 * page protection is consistent with the bo.
522
		 */
523
 
524
		map->bo_kmap_type = ttm_bo_map_kmap;
525
		map->page = ttm->pages[start_page];
5078 serge 526
		map->virtual = (void*)MapIoMem(page_to_phys(map->page), 4096, PG_SW);
4075 Serge 527
	} else {
528
		/*
529
		 * We need to use vmap to get the desired page protection
530
		 * or to make the buffer object look contiguous.
531
		 */
532
		prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
533
			PAGE_KERNEL :
534
			ttm_io_prot(mem->placement, PAGE_KERNEL);
535
		map->bo_kmap_type = ttm_bo_map_vmap;
536
		map->virtual = vmap(ttm->pages + start_page, num_pages,
537
				    0, prot);
538
	}
539
	return (!map->virtual) ? -ENOMEM : 0;
540
}
541
 
542
int ttm_bo_kmap(struct ttm_buffer_object *bo,
543
		unsigned long start_page, unsigned long num_pages,
544
		struct ttm_bo_kmap_obj *map)
545
{
546
	struct ttm_mem_type_manager *man =
547
		&bo->bdev->man[bo->mem.mem_type];
548
	unsigned long offset, size;
549
	int ret;
550
 
551
	BUG_ON(!list_empty(&bo->swap));
552
	map->virtual = NULL;
553
	map->bo = bo;
554
	if (num_pages > bo->num_pages)
555
		return -EINVAL;
556
	if (start_page > bo->num_pages)
557
		return -EINVAL;
558
#if 0
4569 Serge 559
	if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
4075 Serge 560
		return -EPERM;
561
#endif
562
	(void) ttm_mem_io_lock(man, false);
563
	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
564
	ttm_mem_io_unlock(man);
565
	if (ret)
566
		return ret;
567
	if (!bo->mem.bus.is_iomem) {
568
		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
569
	} else {
570
		offset = start_page << PAGE_SHIFT;
571
		size = num_pages << PAGE_SHIFT;
572
		return ttm_bo_ioremap(bo, offset, size, map);
573
	}
574
}
575
EXPORT_SYMBOL(ttm_bo_kmap);
576
 
577
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
578
{
579
	struct ttm_buffer_object *bo = map->bo;
580
	struct ttm_mem_type_manager *man =
581
		&bo->bdev->man[bo->mem.mem_type];
582
 
583
	if (!map->virtual)
584
		return;
585
	switch (map->bo_kmap_type) {
586
	case ttm_bo_map_iomap:
587
		iounmap(map->virtual);
588
		break;
589
	case ttm_bo_map_vmap:
590
	case ttm_bo_map_kmap:
5078 serge 591
        FreeKernelSpace(map->virtual);
4075 Serge 592
		break;
593
	case ttm_bo_map_premapped:
594
		break;
595
	default:
596
		BUG();
597
	}
598
	(void) ttm_mem_io_lock(man, false);
599
	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
600
	ttm_mem_io_unlock(man);
601
	map->virtual = NULL;
602
	map->page = NULL;
603
}
604
EXPORT_SYMBOL(ttm_bo_kunmap);
605
 
606
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
607
			      void *sync_obj,
608
			      bool evict,
609
			      bool no_wait_gpu,
610
			      struct ttm_mem_reg *new_mem)
611
{
612
	struct ttm_bo_device *bdev = bo->bdev;
613
	struct ttm_bo_driver *driver = bdev->driver;
614
	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
615
	struct ttm_mem_reg *old_mem = &bo->mem;
616
	int ret;
617
	struct ttm_buffer_object *ghost_obj;
618
	void *tmp_obj = NULL;
619
 
620
	spin_lock(&bdev->fence_lock);
621
	if (bo->sync_obj) {
622
		tmp_obj = bo->sync_obj;
623
		bo->sync_obj = NULL;
624
	}
625
	bo->sync_obj = driver->sync_obj_ref(sync_obj);
626
	if (evict) {
627
		ret = ttm_bo_wait(bo, false, false, false);
628
		spin_unlock(&bdev->fence_lock);
629
		if (tmp_obj)
630
			driver->sync_obj_unref(&tmp_obj);
631
		if (ret)
632
			return ret;
633
 
634
		if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
635
		    (bo->ttm != NULL)) {
636
			ttm_tt_unbind(bo->ttm);
637
			ttm_tt_destroy(bo->ttm);
638
			bo->ttm = NULL;
639
		}
640
		ttm_bo_free_old_node(bo);
641
	} else {
642
		/**
643
		 * This should help pipeline ordinary buffer moves.
644
		 *
645
		 * Hang old buffer memory on a new buffer object,
646
		 * and leave it to be released when the GPU
647
		 * operation has completed.
648
		 */
649
 
650
		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
651
		spin_unlock(&bdev->fence_lock);
652
		if (tmp_obj)
653
			driver->sync_obj_unref(&tmp_obj);
654
 
655
		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
656
		if (ret)
657
			return ret;
658
 
659
		/**
660
		 * If we're not moving to fixed memory, the TTM object
661
		 * needs to stay alive. Otherwhise hang it on the ghost
662
		 * bo to be unbound and destroyed.
663
		 */
664
 
665
		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
666
			ghost_obj->ttm = NULL;
667
		else
668
			bo->ttm = NULL;
669
 
670
		ttm_bo_unreserve(ghost_obj);
671
		ttm_bo_unref(&ghost_obj);
672
	}
673
 
674
	*old_mem = *new_mem;
675
	new_mem->mm_node = NULL;
676
 
677
	return 0;
678
}
679
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
5078 serge 680
 
681
 
682
void *vmap(struct page **pages, unsigned int count,
683
           unsigned long flags, pgprot_t prot)
684
{
685
    void *vaddr;
686
    char *tmp;
687
    int i;
688
 
689
    vaddr = AllocKernelSpace(count << 12);
690
    if(vaddr == NULL)
691
        return NULL;
692
 
693
    for(i = 0, tmp = vaddr; i < count; i++)
694
    {
695
        MapPage(tmp, page_to_phys(pages[i]), PG_SW);
696
        tmp+= 4096;
697
    };
698
 
699
    return vaddr;
700
};
701