Rev 1891 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1891 | Rev 3931 | ||
---|---|---|---|
Line 1... | Line -... | ||
1 | /* WARNING: This file is generated by combine.pl from combine.inc. |
- | |
2 | Please edit one of those files rather than this one. */ |
- | |
3 | - | ||
4 | #line 1 "pixman-combine.c.template" |
- | |
5 | - | ||
6 | #define COMPONENT_SIZE 8 |
1 | #define COMPONENT_SIZE 8 |
7 | #define MASK 0xff |
2 | #define MASK 0xff |
8 | #define ONE_HALF 0x80 |
3 | #define ONE_HALF 0x80 |
Line 9... | Line 4... | ||
9 | 4 | ||
Line 23... | Line 18... | ||
23 | #define RED_8(x) (((x) >> R_SHIFT) & MASK) |
18 | #define RED_8(x) (((x) >> R_SHIFT) & MASK) |
24 | #define GREEN_8(x) (((x) >> G_SHIFT) & MASK) |
19 | #define GREEN_8(x) (((x) >> G_SHIFT) & MASK) |
25 | #define BLUE_8(x) ((x) & MASK) |
20 | #define BLUE_8(x) ((x) & MASK) |
Line 26... | Line 21... | ||
26 | 21 | ||
- | 22 | /* |
|
- | 23 | * ARMv6 has UQADD8 instruction, which implements unsigned saturated |
|
- | 24 | * addition for 8-bit values packed in 32-bit registers. It is very useful |
|
- | 25 | * for UN8x4_ADD_UN8x4, UN8_rb_ADD_UN8_rb and ADD_UN8 macros (which would |
|
- | 26 | * otherwise need a lot of arithmetic operations to simulate this operation). |
|
- | 27 | * Since most of the major ARM linux distros are built for ARMv7, we are |
|
- | 28 | * much less dependent on runtime CPU detection and can get practical |
|
- | 29 | * benefits from conditional compilation here for a lot of users. |
|
- | 30 | */ |
|
- | 31 | ||
- | 32 | #if defined(USE_GCC_INLINE_ASM) && defined(__arm__) && \ |
|
- | 33 | !defined(__aarch64__) && (!defined(__thumb__) || defined(__thumb2__)) |
|
- | 34 | #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \ |
|
- | 35 | defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \ |
|
- | 36 | defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) || \ |
|
- | 37 | defined(__ARM_ARCH_6M__) || defined(__ARM_ARCH_7__) || \ |
|
- | 38 | defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || \ |
|
- | 39 | defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7EM__) |
|
- | 40 | ||
- | 41 | static force_inline uint32_t |
|
- | 42 | un8x4_add_un8x4 (uint32_t x, uint32_t y) |
|
- | 43 | { |
|
- | 44 | uint32_t t; |
|
- | 45 | asm ("uqadd8 %0, %1, %2" : "=r" (t) : "%r" (x), "r" (y)); |
|
- | 46 | return t; |
|
- | 47 | } |
|
- | 48 | ||
- | 49 | #define UN8x4_ADD_UN8x4(x, y) \ |
|
- | 50 | ((x) = un8x4_add_un8x4 ((x), (y))) |
|
- | 51 | ||
- | 52 | #define UN8_rb_ADD_UN8_rb(x, y, t) \ |
|
- | 53 | ((t) = un8x4_add_un8x4 ((x), (y)), (x) = (t)) |
|
- | 54 | ||
- | 55 | #define ADD_UN8(x, y, t) \ |
|
- | 56 | ((t) = (x), un8x4_add_un8x4 ((t), (y))) |
|
- | 57 | ||
- | 58 | #endif |
|
- | 59 | #endif |
|
- | 60 | ||
- | 61 | /*****************************************************************************/ |
|
- | 62 | ||
27 | /* |
63 | /* |
28 | * Helper macros. |
64 | * Helper macros. |
Line 29... | Line 65... | ||
29 | */ |
65 | */ |
30 | 66 | ||
Line 31... | Line 67... | ||
31 | #define MUL_UN8(a, b, t) \ |
67 | #define MUL_UN8(a, b, t) \ |
32 | ((t) = (a) * (b) + ONE_HALF, ((((t) >> G_SHIFT ) + (t) ) >> G_SHIFT )) |
68 | ((t) = (a) * (uint16_t)(b) + ONE_HALF, ((((t) >> G_SHIFT ) + (t) ) >> G_SHIFT )) |
Line -... | Line 69... | ||
- | 69 | ||
33 | 70 | #define DIV_UN8(a, b) \ |
|
34 | #define DIV_UN8(a, b) \ |
71 | (((uint16_t) (a) * MASK + ((b) / 2)) / (b)) |
35 | (((uint16_t) (a) * MASK) / (b)) |
72 | |
- | 73 | #ifndef ADD_UN8 |
|
Line 36... | Line 74... | ||
36 | 74 | #define ADD_UN8(x, y, t) \ |
|
37 | #define ADD_UN8(x, y, t) \ |
75 | ((t) = (x) + (y), \ |
Line 38... | Line 76... | ||
38 | ((t) = (x) + (y), \ |
76 | (uint32_t) (uint8_t) ((t) | (0 - ((t) >> G_SHIFT)))) |
Line 59... | Line 97... | ||
59 | } while (0) |
97 | } while (0) |
Line 60... | Line 98... | ||
60 | 98 | ||
61 | /* |
99 | /* |
62 | * x_rb = min (x_rb + y_rb, 255) |
100 | * x_rb = min (x_rb + y_rb, 255) |
- | 101 | */ |
|
63 | */ |
102 | #ifndef UN8_rb_ADD_UN8_rb |
64 | #define UN8_rb_ADD_UN8_rb(x, y, t) \ |
103 | #define UN8_rb_ADD_UN8_rb(x, y, t) \ |
65 | do \ |
104 | do \ |
66 | { \ |
105 | { \ |
67 | t = ((x) + (y)); \ |
106 | t = ((x) + (y)); \ |
68 | t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \ |
107 | t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \ |
69 | x = (t & RB_MASK); \ |
108 | x = (t & RB_MASK); \ |
- | 109 | } while (0) |
|
Line 70... | Line 110... | ||
70 | } while (0) |
110 | #endif |
71 | 111 | ||
72 | /* |
112 | /* |
73 | * x_rb = (x_rb * a_rb) / 255 |
113 | * x_rb = (x_rb * a_rb) / 255 |
Line 211... | Line 251... | ||
211 | } while (0) |
251 | } while (0) |
Line 212... | Line 252... | ||
212 | 252 | ||
213 | /* |
253 | /* |
214 | x_c = min(x_c + y_c, 255) |
254 | x_c = min(x_c + y_c, 255) |
- | 255 | */ |
|
215 | */ |
256 | #ifndef UN8x4_ADD_UN8x4 |
216 | #define UN8x4_ADD_UN8x4(x, y) \ |
257 | #define UN8x4_ADD_UN8x4(x, y) \ |
217 | do \ |
258 | do \ |
218 | { \ |
259 | { \ |
219 | uint32_t r1__, r2__, r3__, t__; \ |
260 | uint32_t r1__, r2__, r3__, t__; \ |
Line 226... | Line 267... | ||
226 | r3__ = ((y) >> G_SHIFT) & RB_MASK; \ |
267 | r3__ = ((y) >> G_SHIFT) & RB_MASK; \ |
227 | UN8_rb_ADD_UN8_rb (r2__, r3__, t__); \ |
268 | UN8_rb_ADD_UN8_rb (r2__, r3__, t__); \ |
228 | \ |
269 | \ |
229 | x = r1__ | (r2__ << G_SHIFT); \ |
270 | x = r1__ | (r2__ << G_SHIFT); \ |
230 | } while (0)><>><>><>><>><>><>><> |
271 | } while (0) |
- | 272 | #endif><>><>><>><>><>><>><> |