Subversion Repositories Kolibri OS

Rev

Rev 6084 | Rev 6296 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 6084 Rev 6131
1
/**************************************************************************
1
/**************************************************************************
2
 *
2
 *
3
 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
3
 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4
 * All Rights Reserved.
4
 * All Rights Reserved.
5
 *
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
12
 * the following conditions:
13
 *
13
 *
14
 * The above copyright notice and this permission notice (including the
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
16
 * of the Software.
17
 *
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
25
 *
26
 **************************************************************************/
26
 **************************************************************************/
27
/*
27
/*
28
 * Authors: Thomas Hellström 
28
 * Authors: Thomas Hellström 
29
 */
29
 */
30
 
30
 
31
#include 
31
#include 
32
#include 
32
#include 
33
 
33
 
34
extern int x86_clflush_size;
34
extern int x86_clflush_size;
-
 
35
 
-
 
36
#if defined(CONFIG_X86)
-
 
37
 
-
 
38
/*
-
 
39
 * clflushopt is an unordered instruction which needs fencing with mfence or
35
 
40
 * sfence to avoid ordering issues.  For drm_clflush_page this fencing happens
36
 
41
 * in the caller.
37
#if 0
42
 */
38
static void
43
static void
39
drm_clflush_page(struct page *page)
44
drm_clflush_page(struct page *page)
40
{
45
{
41
	uint8_t *page_virtual;
46
	uint8_t *page_virtual;
42
	unsigned int i;
47
	unsigned int i;
43
	const int size = boot_cpu_data.x86_clflush_size;
48
	const int size = boot_cpu_data.x86_clflush_size;
44
 
49
 
45
	if (unlikely(page == NULL))
50
	if (unlikely(page == NULL))
46
		return;
51
		return;
47
 
52
 
48
	page_virtual = kmap_atomic(page);
53
	page_virtual = kmap_atomic(page);
49
	for (i = 0; i < PAGE_SIZE; i += size)
54
	for (i = 0; i < PAGE_SIZE; i += size)
50
		clflush(page_virtual + i);
55
		clflush(page_virtual + i);
51
	kunmap_atomic(page_virtual);
56
	kunmap_atomic(page_virtual);
52
}
57
}
53
 
58
 
54
static void drm_cache_flush_clflush(struct page *pages[],
59
static void drm_cache_flush_clflush(struct page *pages[],
55
				    unsigned long num_pages)
60
				    unsigned long num_pages)
56
{
61
{
57
	unsigned long i;
62
	unsigned long i;
58
 
63
 
59
	mb();
64
	mb();
60
	for (i = 0; i < num_pages; i++)
65
	for (i = 0; i < num_pages; i++)
61
		drm_clflush_page(*pages++);
66
		drm_clflush_page(*pages++);
62
	mb();
67
	mb();
63
}
68
}
64
#endif
69
#endif
65
 
70
 
66
void
71
void
67
drm_clflush_pages(struct page *pages[], unsigned long num_pages)
72
drm_clflush_pages(struct page *pages[], unsigned long num_pages)
68
{
73
{
69
    uint8_t *pva;
-
 
70
    unsigned int i, j;
-
 
-
 
74
 
71
 
75
#if defined(CONFIG_X86)
-
 
76
	drm_cache_flush_clflush(pages, num_pages);
72
    pva = AllocKernelSpace(4096);
77
	return;
73
 
78
 
-
 
79
#elif defined(__powerpc__)
74
    if(pva != NULL)
80
	unsigned long i;
75
    {
81
	for (i = 0; i < num_pages; i++) {
76
        dma_addr_t *src, *dst;
82
		struct page *page = pages[i];
77
        u32 count;
83
		void *page_virtual;
-
 
84
 
78
 
85
		if (unlikely(page == NULL))
79
        for (i = 0; i < num_pages; i++)
86
			continue;
80
        {
-
 
81
            mb();
87
 
82
            MapPage(pva, page_to_phys(pages[i]), 0x001);
-
 
83
            for (j = 0; j < PAGE_SIZE; j += x86_clflush_size)
88
		page_virtual = kmap_atomic(page);
84
                clflush(pva + j);
89
		flush_dcache_range((unsigned long)page_virtual,
-
 
90
				   (unsigned long)page_virtual + PAGE_SIZE);
-
 
91
		kunmap_atomic(page_virtual);
85
        }
92
	}
-
 
93
#else
86
        FreeKernelSpace(pva);
94
	printk(KERN_ERR "Architecture has no drm_cache.c support\n");
87
    }
95
	WARN_ON_ONCE(1);
88
    mb();
96
#endif
89
}
97
}
90
EXPORT_SYMBOL(drm_clflush_pages);
98
EXPORT_SYMBOL(drm_clflush_pages);
91
 
99
 
92
void
100
void
93
drm_clflush_sg(struct sg_table *st)
101
drm_clflush_sg(struct sg_table *st)
94
{
102
{
-
 
103
#if defined(CONFIG_X86)
-
 
104
	if (cpu_has_clflush) {
95
    struct sg_page_iter sg_iter;
105
		struct sg_page_iter sg_iter;
96
    struct page *page;
-
 
97
 
-
 
98
    uint8_t *pva;
-
 
99
    unsigned int i;
-
 
100
 
-
 
101
    pva = AllocKernelSpace(4096);
-
 
102
    if( pva != NULL)
-
 
103
    {
106
 
104
        mb();
107
		mb();
105
        for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
-
 
106
        {
108
		for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
-
 
109
			drm_clflush_page(sg_page_iter_page(&sg_iter));
107
            page = sg_page_iter_page(&sg_iter);
110
		mb();
-
 
111
 
108
 
112
		return;
109
            MapPage(pva,page_to_phys(page), 0x001);
113
	}
110
 
114
 
111
            for (i = 0; i < PAGE_SIZE; i += x86_clflush_size)
115
	if (wbinvd_on_all_cpus())
112
                clflush(pva + i);
116
		printk(KERN_ERR "Timed out waiting for cache flush.\n");
113
        };
117
#else
114
        FreeKernelSpace(pva);
118
	printk(KERN_ERR "Architecture has no drm_cache.c support\n");
115
    };
119
	WARN_ON_ONCE(1);
116
    mb();
120
#endif
117
}
121
}
118
EXPORT_SYMBOL(drm_clflush_sg);
122
EXPORT_SYMBOL(drm_clflush_sg);
119
 
-
 
120
#if 0
123
 
121
void
124
void
122
drm_clflush_virt_range(void *addr, unsigned long length)
125
drm_clflush_virt_range(void *addr, unsigned long length)
123
{
126
{
124
#if defined(CONFIG_X86)
127
#if defined(CONFIG_X86)
125
	if (cpu_has_clflush) {
128
	if (1) {
126
		const int size = boot_cpu_data.x86_clflush_size;
129
		const int size = x86_clflush_size;
127
		void *end = addr + length;
130
		void *end = addr + length;
128
		addr = (void *)(((unsigned long)addr) & -size);
131
		addr = (void *)(((unsigned long)addr) & -size);
129
		mb();
132
		mb();
130
		for (; addr < end; addr += size)
133
		for (; addr < end; addr += size)
131
			clflushopt(addr);
134
			clflush(addr);
132
		mb();
135
		mb();
133
		return;
136
		return;
134
	}
137
	}
135
 
-
 
136
	if (wbinvd_on_all_cpus())
-
 
137
		printk(KERN_ERR "Timed out waiting for cache flush.\n");
138
 
138
#else
139
#else
139
	printk(KERN_ERR "Architecture has no drm_cache.c support\n");
140
	printk(KERN_ERR "Architecture has no drm_cache.c support\n");
140
	WARN_ON_ONCE(1);
141
	WARN_ON_ONCE(1);
141
#endif
142
#endif
142
}
143
}
143
EXPORT_SYMBOL(drm_clflush_virt_range);
144
EXPORT_SYMBOL(drm_clflush_virt_range);
144
 
-
 
145
#endif
-