Subversion Repositories Kolibri OS

Rev

Rev 4569 | Rev 5271 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4569 Rev 5078
Line 25... Line 25...
25
 *
25
 *
26
 **************************************************************************/
26
 **************************************************************************/
27
/*
27
/*
28
 * Authors: Thomas Hellstrom 
28
 * Authors: Thomas Hellstrom 
29
 */
29
 */
-
 
30
#define iowrite32(v, addr)      writel((v), (addr))
-
 
31
#define ioread32(addr)          readl(addr)
Line 30... Line 32...
30
 
32
 
31
#include 
33
#include 
32
#include 
34
#include 
33
#include 
35
#include 
34
#include 
36
//#include 
35
#include 
37
//#include 
36
#include 
38
#include 
37
#include 
39
#include 
38
#include 
40
//#include 
Line -... Line 41...
-
 
41
#include 
-
 
42
 
-
 
43
#define __pgprot(x)     ((pgprot_t) { (x) } )
-
 
44
#define PAGE_KERNEL                     __pgprot(3)
-
 
45
 
-
 
46
void *vmap(struct page **pages, unsigned int count,
39
#include 
47
           unsigned long flags, pgprot_t prot);
40
 
48
 
41
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
49
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
42
{
50
{
Line 154... Line 162...
154
		bdev->driver->io_mem_free(bdev, mem);
162
		bdev->driver->io_mem_free(bdev, mem);
Line 155... Line 163...
155
 
163
 
156
}
164
}
Line -... Line 165...
-
 
165
EXPORT_SYMBOL(ttm_mem_io_free);
157
EXPORT_SYMBOL(ttm_mem_io_free);
166
 
158
 
167
#if 0
159
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
168
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
160
{
169
{
Line 173... Line 182...
173
			list_add_tail(&bo->io_reserve_lru,
182
			list_add_tail(&bo->io_reserve_lru,
174
				      &man->io_reserve_lru);
183
				      &man->io_reserve_lru);
175
	}
184
	}
176
	return 0;
185
	return 0;
177
}
186
}
-
 
187
#endif
Line 178... Line 188...
178
 
188
 
179
void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
189
void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
180
{
190
{
Line 205... Line 215...
205
		addr = mem->bus.addr;
215
		addr = mem->bus.addr;
206
	} else {
216
	} else {
207
		if (mem->placement & TTM_PL_FLAG_WC)
217
		if (mem->placement & TTM_PL_FLAG_WC)
208
			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
218
			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
209
		else
219
		else
210
			addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
220
            addr = ioremap(mem->bus.base + mem->bus.offset, mem->bus.size);
211
		if (!addr) {
221
		if (!addr) {
212
			(void) ttm_mem_io_lock(man, false);
222
			(void) ttm_mem_io_lock(man, false);
213
			ttm_mem_io_free(bdev, mem);
223
			ttm_mem_io_free(bdev, mem);
214
			ttm_mem_io_unlock(man);
224
			ttm_mem_io_unlock(man);
215
			return -ENOMEM;
225
			return -ENOMEM;
Line 256... Line 266...
256
	if (!d)
266
	if (!d)
257
		return -ENOMEM;
267
		return -ENOMEM;
Line 258... Line 268...
258
 
268
 
Line 259... Line -...
259
	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
-
 
260
 
-
 
261
#ifdef CONFIG_X86
-
 
262
	dst = kmap_atomic_prot(d, prot);
-
 
263
#else
269
	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
264
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
270
 
265
		dst = vmap(&d, 1, 0, prot);
-
 
266
	else
-
 
267
		dst = kmap(d);
271
    dst = (void*)MapIoMem((addr_t)d, 4096, PG_SW);
268
#endif
272
 
Line 269... Line 273...
269
	if (!dst)
273
	if (!dst)
Line 270... Line -...
270
		return -ENOMEM;
-
 
271
 
274
		return -ENOMEM;
272
	memcpy_fromio(dst, src, PAGE_SIZE);
-
 
273
 
-
 
274
#ifdef CONFIG_X86
-
 
275
	kunmap_atomic(dst);
-
 
276
#else
-
 
277
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
-
 
Line 278... Line 275...
278
		vunmap(dst);
275
 
279
	else
276
    memcpy(dst, src, PAGE_SIZE);
Line 280... Line 277...
280
		kunmap(d);
277
 
Line 292... Line 289...
292
 
289
 
293
	if (!s)
290
	if (!s)
Line 294... Line 291...
294
		return -ENOMEM;
291
		return -ENOMEM;
295
 
-
 
296
	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
-
 
297
#ifdef CONFIG_X86
292
 
298
	src = kmap_atomic_prot(s, prot);
-
 
299
#else
293
	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
300
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
294
 
301
		src = vmap(&s, 1, 0, prot);
-
 
302
	else
-
 
303
		src = kmap(s);
295
    src = (void*)MapIoMem((addr_t)s, 4096, PG_SW);
304
#endif
296
 
Line 305... Line 297...
305
	if (!src)
297
	if (!src)
Line 306... Line -...
306
		return -ENOMEM;
-
 
307
 
298
		return -ENOMEM;
308
	memcpy_toio(dst, src, PAGE_SIZE);
-
 
309
 
-
 
310
#ifdef CONFIG_X86
-
 
311
	kunmap_atomic(src);
-
 
312
#else
-
 
313
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
-
 
Line 314... Line 299...
314
		vunmap(src);
299
 
315
	else
300
    memcpy(dst, src, PAGE_SIZE);
Line 316... Line 301...
316
		kunmap(s);
301
 
Line 350... Line 335...
350
		goto out2;
335
		goto out2;
Line 351... Line 336...
351
 
336
 
352
	/*
337
	/*
353
	 * Don't move nonexistent data. Clear destination instead.
338
	 * Don't move nonexistent data. Clear destination instead.
354
	 */
339
	 */
-
 
340
	if (old_iomap == NULL &&
-
 
341
	    (ttm == NULL || (ttm->state == tt_unpopulated &&
-
 
342
			     !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
355
	if (old_iomap == NULL && ttm == NULL)
343
        memset(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
-
 
344
		goto out2;
Line 356... Line 345...
356
		goto out2;
345
	}
357
 
346
 
358
	/*
347
	/*
359
	 * TTM might be null for moves within the same region.
348
	 * TTM might be null for moves within the same region.
Line 481... Line 470...
481
	return 0;
470
	return 0;
482
}
471
}
Line 483... Line 472...
483
 
472
 
484
pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
473
pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
485
{
-
 
486
#if defined(__i386__) || defined(__x86_64__)
-
 
487
	if (caching_flags & TTM_PL_FLAG_WC)
-
 
488
		tmp = pgprot_writecombine(tmp);
-
 
489
	else if (boot_cpu_data.x86 > 3)
-
 
490
		tmp = pgprot_noncached(tmp);
-
 
491
 
-
 
492
#elif defined(__powerpc__)
-
 
493
	if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
-
 
494
		pgprot_val(tmp) |= _PAGE_NO_CACHE;
-
 
495
		if (caching_flags & TTM_PL_FLAG_UNCACHED)
-
 
496
			pgprot_val(tmp) |= _PAGE_GUARDED;
-
 
497
	}
-
 
498
#endif
-
 
499
#if defined(__ia64__)
-
 
500
	if (caching_flags & TTM_PL_FLAG_WC)
-
 
501
		tmp = pgprot_writecombine(tmp);
-
 
502
	else
-
 
503
		tmp = pgprot_noncached(tmp);
-
 
504
#endif
-
 
505
#if defined(__sparc__) || defined(__mips__)
-
 
506
	if (!(caching_flags & TTM_PL_FLAG_CACHED))
-
 
507
		tmp = pgprot_noncached(tmp);
-
 
508
#endif
474
{
509
	return tmp;
475
	return tmp;
510
}
476
}
Line 511... Line 477...
511
EXPORT_SYMBOL(ttm_io_prot);
477
EXPORT_SYMBOL(ttm_io_prot);
Line 524... Line 490...
524
		map->bo_kmap_type = ttm_bo_map_iomap;
490
		map->bo_kmap_type = ttm_bo_map_iomap;
525
		if (mem->placement & TTM_PL_FLAG_WC)
491
		if (mem->placement & TTM_PL_FLAG_WC)
526
			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
492
			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
527
						  size);
493
						  size);
528
		else
494
		else
529
			map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
495
			map->virtual = ioremap(bo->mem.bus.base + bo->mem.bus.offset + offset,
530
						       size);
496
						       size);
531
	}
497
	}
532
	return (!map->virtual) ? -ENOMEM : 0;
498
	return (!map->virtual) ? -ENOMEM : 0;
533
}
499
}
Line 555... Line 521...
555
		 * page protection is consistent with the bo.
521
		 * page protection is consistent with the bo.
556
		 */
522
		 */
Line 557... Line 523...
557
 
523
 
558
		map->bo_kmap_type = ttm_bo_map_kmap;
524
		map->bo_kmap_type = ttm_bo_map_kmap;
559
		map->page = ttm->pages[start_page];
525
		map->page = ttm->pages[start_page];
560
		map->virtual = kmap(map->page);
526
		map->virtual = (void*)MapIoMem(page_to_phys(map->page), 4096, PG_SW);
561
	} else {
527
	} else {
562
		/*
528
		/*
563
		 * We need to use vmap to get the desired page protection
529
		 * We need to use vmap to get the desired page protection
564
		 * or to make the buffer object look contiguous.
530
		 * or to make the buffer object look contiguous.
Line 619... Line 585...
619
	switch (map->bo_kmap_type) {
585
	switch (map->bo_kmap_type) {
620
	case ttm_bo_map_iomap:
586
	case ttm_bo_map_iomap:
621
		iounmap(map->virtual);
587
		iounmap(map->virtual);
622
		break;
588
		break;
623
	case ttm_bo_map_vmap:
589
	case ttm_bo_map_vmap:
624
		vunmap(map->virtual);
-
 
625
		break;
-
 
626
	case ttm_bo_map_kmap:
590
	case ttm_bo_map_kmap:
627
		kunmap(map->page);
591
        FreeKernelSpace(map->virtual);
628
		break;
592
		break;
629
	case ttm_bo_map_premapped:
593
	case ttm_bo_map_premapped:
630
		break;
594
		break;
631
	default:
595
	default:
632
		BUG();
596
		BUG();
Line 711... Line 675...
711
	new_mem->mm_node = NULL;
675
	new_mem->mm_node = NULL;
Line 712... Line 676...
712
 
676
 
713
	return 0;
677
	return 0;
714
}
678
}
-
 
679
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
-
 
680
 
-
 
681
 
-
 
682
void *vmap(struct page **pages, unsigned int count,
-
 
683
           unsigned long flags, pgprot_t prot)
-
 
684
{
-
 
685
    void *vaddr;
-
 
686
    char *tmp;
-
 
687
    int i;
-
 
688
 
-
 
689
    vaddr = AllocKernelSpace(count << 12);
-
 
690
    if(vaddr == NULL)
-
 
691
        return NULL;
-
 
692
 
-
 
693
    for(i = 0, tmp = vaddr; i < count; i++)
-
 
694
    {
-
 
695
        MapPage(tmp, page_to_phys(pages[i]), PG_SW);
-
 
696
        tmp+= 4096;
-
 
697
    };
-
 
698
 
-
 
699
    return vaddr;
-
 
700
};