Rev 4104 | Rev 4539 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 4104 | Rev 4293 | ||
---|---|---|---|
Line 27... | Line 27... | ||
27 | /* |
27 | /* |
28 | * Authors: Thomas Hellström |
28 | * Authors: Thomas Hellström |
29 | */ |
29 | */ |
Line 30... | Line 30... | ||
30 | 30 | ||
- | 31 | #include |
|
31 | #include |
32 | #include |
Line 32... | Line 33... | ||
32 | #include |
33 | #include |
Line 33... | Line 34... | ||
33 | 34 | ||
34 | extern int x86_clflush_size; |
35 | extern int x86_clflush_size; |
35 | 36 | ||
36 | static inline void clflush(volatile void *__p) |
37 | static inline void clflush(volatile void *__p) |
- | 38 | { |
|
37 | { |
39 | asm volatile("clflush %0" : "+m" (*(volatile char*)__p)); |
38 | asm volatile("clflush %0" : "+m" (*(volatile char*)__p)); |
40 | } |
39 | } |
41 | |
40 | #if 0 |
42 | #if 0 |
41 | static void |
43 | static void |
Line 73... | Line 75... | ||
73 | #endif |
75 | #endif |
Line 74... | Line 76... | ||
74 | 76 | ||
75 | void |
77 | void |
76 | drm_clflush_pages(struct page *pages[], unsigned long num_pages) |
78 | drm_clflush_pages(struct page *pages[], unsigned long num_pages) |
77 | { |
79 | { |
78 | uint8_t *page_virtual; |
80 | uint8_t *pva; |
Line 79... | Line 81... | ||
79 | unsigned int i, j; |
81 | unsigned int i, j; |
Line 80... | Line 82... | ||
80 | 82 | ||
81 | page_virtual = AllocKernelSpace(4096); |
83 | pva = AllocKernelSpace(4096); |
82 | 84 | ||
83 | if(page_virtual != NULL) |
85 | if(pva != NULL) |
Line 84... | Line 86... | ||
84 | { |
86 | { |
85 | dma_addr_t *src, *dst; |
87 | dma_addr_t *src, *dst; |
86 | u32 count; |
88 | u32 count; |
87 | - | ||
88 | for (i = 0; i < num_pages; i++) |
- | |
89 | { |
89 | |
90 | mb(); |
90 | for (i = 0; i < num_pages; i++) |
91 | // asm volatile("mfence"); |
91 | { |
92 | - | ||
93 | MapPage(page_virtual,*pages++, 0x001); |
92 | mb(); |
94 | for (j = 0; j < PAGE_SIZE; j += x86_clflush_size) |
93 | MapPage(pva, page_to_phys(pages[i]), 0x001); |
95 | clflush(page_virtual + j); |
94 | for (j = 0; j < PAGE_SIZE; j += x86_clflush_size) |
96 | mb(); |
95 | clflush(pva + j); |
97 | } |
96 | } |
98 | FreeKernelSpace(page_virtual); |
97 | FreeKernelSpace(pva); |
Line 99... | Line -... | ||
99 | } |
- | |
100 | 98 | } |
|
101 | } |
99 | mb(); |
102 | EXPORT_SYMBOL(drm_clflush_pages); |
100 | } |
103 | - | ||
104 | #if 0 |
- | |
105 | void |
101 | EXPORT_SYMBOL(drm_clflush_pages); |
- | 102 | ||
Line -... | Line 103... | ||
- | 103 | void |
|
- | 104 | drm_clflush_sg(struct sg_table *st) |
|
- | 105 | { |
|
- | 106 | struct sg_page_iter sg_iter; |
|
- | 107 | struct page *page; |
|
- | 108 | ||
106 | drm_clflush_sg(struct sg_table *st) |
109 | uint8_t *pva; |
107 | { |
110 | unsigned int i; |
- | 111 | ||
108 | #if defined(CONFIG_X86) |
112 | pva = AllocKernelSpace(4096); |
109 | if (cpu_has_clflush) { |
- | |
Line 110... | Line 113... | ||
110 | struct sg_page_iter sg_iter; |
113 | if( pva != NULL) |
111 | - | ||
Line 112... | Line 114... | ||
112 | mb(); |
114 | { |
113 | for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) |
115 | mb(); |
114 | drm_clflush_page(sg_page_iter_page(&sg_iter)); |
116 | for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) |
115 | mb(); |
117 | { |
116 | 118 | page = sg_page_iter_page(&sg_iter); |
|
117 | return; |
119 | |
118 | } |
120 | MapPage(pva,page_to_phys(page), 0x001); |
119 | 121 | ||
Line -... | Line 122... | ||
- | 122 | for (i = 0; i < PAGE_SIZE; i += x86_clflush_size) |
|
120 | if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0) |
123 | clflush(pva + i); |
121 | printk(KERN_ERR "Timed out waiting for cache flush.\n"); |
124 | }; |
122 | #else |
125 | FreeKernelSpace(pva); |
123 | printk(KERN_ERR "Architecture has no drm_cache.c support\n"); |
126 | }; |
124 | WARN_ON_ONCE(1); |
127 | mb(); |