Rev 6084 | Rev 6296 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 6084 | Rev 6131 | ||
---|---|---|---|
Line 31... | Line 31... | ||
31 | #include |
31 | #include |
32 | #include |
32 | #include |
Line 33... | Line 33... | ||
33 | 33 | ||
Line -... | Line 34... | ||
- | 34 | extern int x86_clflush_size; |
|
Line -... | Line 35... | ||
- | 35 | ||
- | 36 | #if defined(CONFIG_X86) |
|
- | 37 | ||
- | 38 | /* |
|
34 | extern int x86_clflush_size; |
39 | * clflushopt is an unordered instruction which needs fencing with mfence or |
35 | 40 | * sfence to avoid ordering issues. For drm_clflush_page this fencing happens |
|
36 | 41 | * in the caller. |
|
37 | #if 0 |
42 | */ |
38 | static void |
43 | static void |
39 | drm_clflush_page(struct page *page) |
44 | drm_clflush_page(struct page *page) |
Line 64... | Line 69... | ||
64 | #endif |
69 | #endif |
Line 65... | Line 70... | ||
65 | 70 | ||
66 | void |
71 | void |
67 | drm_clflush_pages(struct page *pages[], unsigned long num_pages) |
72 | drm_clflush_pages(struct page *pages[], unsigned long num_pages) |
68 | { |
- | |
69 | uint8_t *pva; |
- | |
Line -... | Line 73... | ||
- | 73 | { |
|
70 | unsigned int i, j; |
74 | |
- | 75 | #if defined(CONFIG_X86) |
|
Line 71... | Line 76... | ||
71 | 76 | drm_cache_flush_clflush(pages, num_pages); |
|
72 | pva = AllocKernelSpace(4096); |
77 | return; |
- | 78 | ||
73 | 79 | #elif defined(__powerpc__) |
|
74 | if(pva != NULL) |
80 | unsigned long i; |
Line 75... | Line 81... | ||
75 | { |
81 | for (i = 0; i < num_pages; i++) { |
76 | dma_addr_t *src, *dst; |
82 | struct page *page = pages[i]; |
- | 83 | void *page_virtual; |
|
77 | u32 count; |
84 | |
78 | 85 | if (unlikely(page == NULL)) |
|
79 | for (i = 0; i < num_pages; i++) |
- | |
80 | { |
86 | continue; |
81 | mb(); |
- | |
82 | MapPage(pva, page_to_phys(pages[i]), 0x001); |
87 | |
83 | for (j = 0; j < PAGE_SIZE; j += x86_clflush_size) |
88 | page_virtual = kmap_atomic(page); |
- | 89 | flush_dcache_range((unsigned long)page_virtual, |
|
- | 90 | (unsigned long)page_virtual + PAGE_SIZE); |
|
84 | clflush(pva + j); |
91 | kunmap_atomic(page_virtual); |
- | 92 | } |
|
85 | } |
93 | #else |
86 | FreeKernelSpace(pva); |
94 | printk(KERN_ERR "Architecture has no drm_cache.c support\n"); |
Line 87... | Line 95... | ||
87 | } |
95 | WARN_ON_ONCE(1); |
88 | mb(); |
96 | #endif |
89 | } |
97 | } |
- | 98 | EXPORT_SYMBOL(drm_clflush_pages); |
|
- | 99 | ||
90 | EXPORT_SYMBOL(drm_clflush_pages); |
100 | void |
91 | - | ||
Line 92... | Line -... | ||
92 | void |
- | |
93 | drm_clflush_sg(struct sg_table *st) |
- | |
94 | { |
- | |
95 | struct sg_page_iter sg_iter; |
- | |
96 | struct page *page; |
- | |
97 | - | ||
98 | uint8_t *pva; |
101 | drm_clflush_sg(struct sg_table *st) |
99 | unsigned int i; |
102 | { |
100 | - | ||
101 | pva = AllocKernelSpace(4096); |
103 | #if defined(CONFIG_X86) |
- | 104 | if (cpu_has_clflush) { |
|
Line 102... | Line 105... | ||
102 | if( pva != NULL) |
105 | struct sg_page_iter sg_iter; |
- | 106 | ||
Line 103... | Line 107... | ||
103 | { |
107 | mb(); |
104 | mb(); |
108 | for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) |
105 | for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) |
109 | drm_clflush_page(sg_page_iter_page(&sg_iter)); |
106 | { |
110 | mb(); |
107 | page = sg_page_iter_page(&sg_iter); |
111 | |
108 | 112 | return; |
|
109 | MapPage(pva,page_to_phys(page), 0x001); |
113 | } |
110 | 114 | ||
Line 111... | Line -... | ||
111 | for (i = 0; i < PAGE_SIZE; i += x86_clflush_size) |
- | |
112 | clflush(pva + i); |
115 | if (wbinvd_on_all_cpus()) |
113 | }; |
116 | printk(KERN_ERR "Timed out waiting for cache flush.\n"); |
114 | FreeKernelSpace(pva); |
117 | #else |
115 | }; |
118 | printk(KERN_ERR "Architecture has no drm_cache.c support\n"); |
116 | mb(); |
119 | WARN_ON_ONCE(1); |
117 | } |
120 | #endif |
118 | EXPORT_SYMBOL(drm_clflush_sg); |
121 | } |
119 | 122 | EXPORT_SYMBOL(drm_clflush_sg); |
|
120 | #if 0 |
123 | |
121 | void |
124 | void |
122 | drm_clflush_virt_range(void *addr, unsigned long length) |
125 | drm_clflush_virt_range(void *addr, unsigned long length) |
123 | { |
126 | { |
124 | #if defined(CONFIG_X86) |
127 | #if defined(CONFIG_X86) |
125 | if (cpu_has_clflush) { |
128 | if (1) { |
Line 126... | Line -... | ||
126 | const int size = boot_cpu_data.x86_clflush_size; |
- | |
127 | void *end = addr + length; |
- | |
128 | addr = (void *)(((unsigned long)addr) & -size); |
129 | const int size = x86_clflush_size; |
129 | mb(); |
130 | void *end = addr + length; |
130 | for (; addr < end; addr += size) |
131 | addr = (void *)(((unsigned long)addr) & -size); |
131 | clflushopt(addr); |
132 | mb(); |
132 | mb(); |
133 | for (; addr < end; addr += size) |
133 | return; |
134 | clflush(addr); |
134 | } |
- | |
135 | - |