Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
4349 Serge 1
/*
2
 * VP8 compatible video decoder
3
 *
4
 * Copyright (C) 2010 David Conrad
5
 *
6
 * This file is part of FFmpeg.
7
 *
8
 * FFmpeg is free software; you can redistribute it and/or
9
 * modify it under the terms of the GNU Lesser General Public
10
 * License as published by the Free Software Foundation; either
11
 * version 2.1 of the License, or (at your option) any later version.
12
 *
13
 * FFmpeg is distributed in the hope that it will be useful,
14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16
 * Lesser General Public License for more details.
17
 *
18
 * You should have received a copy of the GNU Lesser General Public
19
 * License along with FFmpeg; if not, write to the Free Software
20
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21
 */
22
 
23
#include "config.h"
24
#include "libavutil/cpu.h"
25
#include "libavutil/mem.h"
26
#include "libavutil/ppc/types_altivec.h"
27
#include "libavutil/ppc/util_altivec.h"
28
#include "libavcodec/vp8dsp.h"
29
#include "dsputil_altivec.h"
30
 
31
#if HAVE_ALTIVEC
32
#define REPT4(...) { __VA_ARGS__, __VA_ARGS__, __VA_ARGS__, __VA_ARGS__ }
33
 
34
// h subpel filter uses msum to multiply+add 4 pixel taps at once
35
static const vec_s8 h_subpel_filters_inner[7] =
36
{
37
    REPT4( -6, 123,  12,  -1),
38
    REPT4(-11, 108,  36,  -8),
39
    REPT4( -9,  93,  50,  -6),
40
    REPT4(-16,  77,  77, -16),
41
    REPT4( -6,  50,  93,  -9),
42
    REPT4( -8,  36, 108, -11),
43
    REPT4( -1,  12, 123,  -6),
44
};
45
 
46
// for 6tap filters, these are the outer two taps
47
// The zeros mask off pixels 4-7 when filtering 0-3
48
// and vice-versa
49
static const vec_s8 h_subpel_filters_outer[3] =
50
{
51
    REPT4(0, 0, 2, 1),
52
    REPT4(0, 0, 3, 3),
53
    REPT4(0, 0, 1, 2),
54
};
55
 
56
#define LOAD_H_SUBPEL_FILTER(i) \
57
    vec_s8 filter_inner  = h_subpel_filters_inner[i]; \
58
    vec_s8 filter_outerh = h_subpel_filters_outer[(i)>>1]; \
59
    vec_s8 filter_outerl = vec_sld(filter_outerh, filter_outerh, 2)
60
 
61
#define FILTER_H(dstv, off) \
62
    a = vec_ld((off)-is6tap-1,    src); \
63
    b = vec_ld((off)-is6tap-1+15, src); \
64
\
65
    pixh  = vec_perm(a, b, permh##off); \
66
    pixl  = vec_perm(a, b, perml##off); \
67
    filth = vec_msum(filter_inner, pixh, c64); \
68
    filtl = vec_msum(filter_inner, pixl, c64); \
69
\
70
    if (is6tap) { \
71
        outer = vec_perm(a, b, perm_6tap##off); \
72
        filth = vec_msum(filter_outerh, outer, filth); \
73
        filtl = vec_msum(filter_outerl, outer, filtl); \
74
    } \
75
    if (w == 4) \
76
        filtl = filth; /* discard pixels 4-7 */ \
77
    dstv = vec_packs(filth, filtl); \
78
    dstv = vec_sra(dstv, c7)
79
 
80
static av_always_inline
81
void put_vp8_epel_h_altivec_core(uint8_t *dst, ptrdiff_t dst_stride,
82
                                 uint8_t *src, ptrdiff_t src_stride,
83
                                 int h, int mx, int w, int is6tap)
84
{
85
    LOAD_H_SUBPEL_FILTER(mx-1);
86
    vec_u8 align_vec0, align_vec8, permh0, permh8, filt;
87
    vec_u8 perm_6tap0, perm_6tap8, perml0, perml8;
88
    vec_u8 a, b, pixh, pixl, outer;
89
    vec_s16 f16h, f16l;
90
    vec_s32 filth, filtl;
91
 
92
    vec_u8 perm_inner6 = { 1,2,3,4, 2,3,4,5, 3,4,5,6, 4,5,6,7 };
93
    vec_u8 perm_inner4 = { 0,1,2,3, 1,2,3,4, 2,3,4,5, 3,4,5,6 };
94
    vec_u8 perm_inner  = is6tap ? perm_inner6 : perm_inner4;
95
    vec_u8 perm_outer = { 4,9, 0,5, 5,10, 1,6, 6,11, 2,7, 7,12, 3,8 };
96
    vec_s32 c64 = vec_sl(vec_splat_s32(1), vec_splat_u32(6));
97
    vec_u16 c7  = vec_splat_u16(7);
98
 
99
    align_vec0 = vec_lvsl( -is6tap-1, src);
100
    align_vec8 = vec_lvsl(8-is6tap-1, src);
101
 
102
    permh0     = vec_perm(align_vec0, align_vec0, perm_inner);
103
    permh8     = vec_perm(align_vec8, align_vec8, perm_inner);
104
    perm_inner = vec_add(perm_inner, vec_splat_u8(4));
105
    perml0     = vec_perm(align_vec0, align_vec0, perm_inner);
106
    perml8     = vec_perm(align_vec8, align_vec8, perm_inner);
107
    perm_6tap0 = vec_perm(align_vec0, align_vec0, perm_outer);
108
    perm_6tap8 = vec_perm(align_vec8, align_vec8, perm_outer);
109
 
110
    while (h --> 0) {
111
        FILTER_H(f16h, 0);
112
 
113
        if (w == 16) {
114
            FILTER_H(f16l, 8);
115
            filt = vec_packsu(f16h, f16l);
116
            vec_st(filt, 0, dst);
117
        } else {
118
            filt = vec_packsu(f16h, f16h);
119
            vec_ste((vec_u32)filt, 0, (uint32_t*)dst);
120
            if (w == 8)
121
                vec_ste((vec_u32)filt, 4, (uint32_t*)dst);
122
        }
123
        src += src_stride;
124
        dst += dst_stride;
125
    }
126
}
127
 
128
// v subpel filter does a simple vertical multiply + add
129
static const vec_u8 v_subpel_filters[7] =
130
{
131
    { 0,   6, 123,  12,   1,   0 },
132
    { 2,  11, 108,  36,   8,   1 },
133
    { 0,   9,  93,  50,   6,   0 },
134
    { 3,  16,  77,  77,  16,   3 },
135
    { 0,   6,  50,  93,   9,   0 },
136
    { 1,   8,  36, 108,  11,   2 },
137
    { 0,   1,  12, 123,   6,   0 },
138
};
139
 
140
#define LOAD_V_SUBPEL_FILTER(i) \
141
    vec_u8 subpel_filter = v_subpel_filters[i]; \
142
    vec_u8 f0 = vec_splat(subpel_filter, 0); \
143
    vec_u8 f1 = vec_splat(subpel_filter, 1); \
144
    vec_u8 f2 = vec_splat(subpel_filter, 2); \
145
    vec_u8 f3 = vec_splat(subpel_filter, 3); \
146
    vec_u8 f4 = vec_splat(subpel_filter, 4); \
147
    vec_u8 f5 = vec_splat(subpel_filter, 5)
148
 
149
#define FILTER_V(dstv, vec_mul) \
150
    s1f = (vec_s16)vec_mul(s1, f1); \
151
    s2f = (vec_s16)vec_mul(s2, f2); \
152
    s3f = (vec_s16)vec_mul(s3, f3); \
153
    s4f = (vec_s16)vec_mul(s4, f4); \
154
    s2f = vec_subs(s2f, s1f); \
155
    s3f = vec_subs(s3f, s4f); \
156
    if (is6tap) { \
157
        s0f = (vec_s16)vec_mul(s0, f0); \
158
        s5f = (vec_s16)vec_mul(s5, f5); \
159
        s2f = vec_adds(s2f, s0f); \
160
        s3f = vec_adds(s3f, s5f); \
161
    } \
162
    dstv = vec_adds(s2f, s3f); \
163
    dstv = vec_adds(dstv, c64); \
164
    dstv = vec_sra(dstv, c7)
165
 
166
static av_always_inline
167
void put_vp8_epel_v_altivec_core(uint8_t *dst, ptrdiff_t dst_stride,
168
                                 uint8_t *src, ptrdiff_t src_stride,
169
                                 int h, int my, int w, int is6tap)
170
{
171
    LOAD_V_SUBPEL_FILTER(my-1);
172
    vec_u8 s0, s1, s2, s3, s4, s5, filt, align_vech, perm_vec, align_vecl;
173
    vec_s16 s0f, s1f, s2f, s3f, s4f, s5f, f16h, f16l;
174
    vec_s16 c64 = vec_sl(vec_splat_s16(1), vec_splat_u16(6));
175
    vec_u16 c7  = vec_splat_u16(7);
176
 
177
    // we want pixels 0-7 to be in the even positions and 8-15 in the odd,
178
    // so combine this permute with the alignment permute vector
179
    align_vech = vec_lvsl(0, src);
180
    align_vecl = vec_sld(align_vech, align_vech, 8);
181
    if (w ==16)
182
        perm_vec = vec_mergeh(align_vech, align_vecl);
183
    else
184
        perm_vec = vec_mergeh(align_vech, align_vech);
185
 
186
    if (is6tap)
187
        s0 = load_with_perm_vec(-2*src_stride, src, perm_vec);
188
    s1 = load_with_perm_vec(-1*src_stride, src, perm_vec);
189
    s2 = load_with_perm_vec( 0*src_stride, src, perm_vec);
190
    s3 = load_with_perm_vec( 1*src_stride, src, perm_vec);
191
    if (is6tap)
192
        s4 = load_with_perm_vec( 2*src_stride, src, perm_vec);
193
 
194
    src += (2+is6tap)*src_stride;
195
 
196
    while (h --> 0) {
197
        if (is6tap)
198
            s5 = load_with_perm_vec(0, src, perm_vec);
199
        else
200
            s4 = load_with_perm_vec(0, src, perm_vec);
201
 
202
        FILTER_V(f16h, vec_mule);
203
 
204
        if (w == 16) {
205
            FILTER_V(f16l, vec_mulo);
206
            filt = vec_packsu(f16h, f16l);
207
            vec_st(filt, 0, dst);
208
        } else {
209
            filt = vec_packsu(f16h, f16h);
210
            if (w == 4)
211
                filt = (vec_u8)vec_splat((vec_u32)filt, 0);
212
            else
213
                vec_ste((vec_u32)filt, 4, (uint32_t*)dst);
214
            vec_ste((vec_u32)filt, 0, (uint32_t*)dst);
215
        }
216
 
217
        if (is6tap)
218
            s0 = s1;
219
        s1 = s2;
220
        s2 = s3;
221
        s3 = s4;
222
        if (is6tap)
223
            s4 = s5;
224
 
225
        dst += dst_stride;
226
        src += src_stride;
227
    }
228
}
229
 
230
#define EPEL_FUNCS(WIDTH, TAPS) \
231
static av_noinline \
232
void put_vp8_epel ## WIDTH ## _h ## TAPS ## _altivec(uint8_t *dst, ptrdiff_t dst_stride, uint8_t *src, ptrdiff_t src_stride, int h, int mx, int my) \
233
{ \
234
    put_vp8_epel_h_altivec_core(dst, dst_stride, src, src_stride, h, mx, WIDTH, TAPS == 6); \
235
} \
236
\
237
static av_noinline \
238
void put_vp8_epel ## WIDTH ## _v ## TAPS ## _altivec(uint8_t *dst, ptrdiff_t dst_stride, uint8_t *src, ptrdiff_t src_stride, int h, int mx, int my) \
239
{ \
240
    put_vp8_epel_v_altivec_core(dst, dst_stride, src, src_stride, h, my, WIDTH, TAPS == 6); \
241
}
242
 
243
#define EPEL_HV(WIDTH, HTAPS, VTAPS) \
244
static void put_vp8_epel ## WIDTH ## _h ## HTAPS ## v ## VTAPS ## _altivec(uint8_t *dst, ptrdiff_t dstride, uint8_t *src, ptrdiff_t sstride, int h, int mx, int my) \
245
{ \
246
    DECLARE_ALIGNED(16, uint8_t, tmp)[(2*WIDTH+5)*16]; \
247
    if (VTAPS == 6) { \
248
        put_vp8_epel ## WIDTH ## _h ## HTAPS ## _altivec(tmp, 16,      src-2*sstride, sstride, h+5, mx, my); \
249
        put_vp8_epel ## WIDTH ## _v ## VTAPS ## _altivec(dst, dstride, tmp+2*16,      16,      h,   mx, my); \
250
    } else { \
251
        put_vp8_epel ## WIDTH ## _h ## HTAPS ## _altivec(tmp, 16,      src-sstride, sstride, h+4, mx, my); \
252
        put_vp8_epel ## WIDTH ## _v ## VTAPS ## _altivec(dst, dstride, tmp+16,      16,      h,   mx, my); \
253
    } \
254
}
255
 
256
EPEL_FUNCS(16,6)
257
EPEL_FUNCS(8, 6)
258
EPEL_FUNCS(8, 4)
259
EPEL_FUNCS(4, 6)
260
EPEL_FUNCS(4, 4)
261
 
262
EPEL_HV(16, 6,6)
263
EPEL_HV(8,  6,6)
264
EPEL_HV(8,  4,6)
265
EPEL_HV(8,  6,4)
266
EPEL_HV(8,  4,4)
267
EPEL_HV(4,  6,6)
268
EPEL_HV(4,  4,6)
269
EPEL_HV(4,  6,4)
270
EPEL_HV(4,  4,4)
271
 
272
static void put_vp8_pixels16_altivec(uint8_t *dst, ptrdiff_t dstride, uint8_t *src, ptrdiff_t sstride, int h, int mx, int my)
273
{
274
    register vector unsigned char pixelsv1, pixelsv2;
275
    register vector unsigned char pixelsv1B, pixelsv2B;
276
    register vector unsigned char pixelsv1C, pixelsv2C;
277
    register vector unsigned char pixelsv1D, pixelsv2D;
278
 
279
    register vector unsigned char perm = vec_lvsl(0, src);
280
    int i;
281
    register ptrdiff_t dstride2 = dstride << 1, sstride2 = sstride << 1;
282
    register ptrdiff_t dstride3 = dstride2 + dstride, sstride3 = sstride + sstride2;
283
    register ptrdiff_t dstride4 = dstride << 2, sstride4 = sstride << 2;
284
 
285
// hand-unrolling the loop by 4 gains about 15%
286
// mininum execution time goes from 74 to 60 cycles
287
// it's faster than -funroll-loops, but using
288
// -funroll-loops w/ this is bad - 74 cycles again.
289
// all this is on a 7450, tuning for the 7450
290
    for (i = 0; i < h; i += 4) {
291
        pixelsv1  = vec_ld( 0, src);
292
        pixelsv2  = vec_ld(15, src);
293
        pixelsv1B = vec_ld(sstride, src);
294
        pixelsv2B = vec_ld(15 + sstride, src);
295
        pixelsv1C = vec_ld(sstride2, src);
296
        pixelsv2C = vec_ld(15 + sstride2, src);
297
        pixelsv1D = vec_ld(sstride3, src);
298
        pixelsv2D = vec_ld(15 + sstride3, src);
299
        vec_st(vec_perm(pixelsv1, pixelsv2, perm),
300
               0, (unsigned char*)dst);
301
        vec_st(vec_perm(pixelsv1B, pixelsv2B, perm),
302
               dstride, (unsigned char*)dst);
303
        vec_st(vec_perm(pixelsv1C, pixelsv2C, perm),
304
               dstride2, (unsigned char*)dst);
305
        vec_st(vec_perm(pixelsv1D, pixelsv2D, perm),
306
               dstride3, (unsigned char*)dst);
307
        src += sstride4;
308
        dst += dstride4;
309
    }
310
}
311
 
312
#endif /* HAVE_ALTIVEC */
313
 
314
av_cold void ff_vp8dsp_init_ppc(VP8DSPContext *c)
315
{
316
#if HAVE_ALTIVEC
317
    if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC))
318
        return;
319
 
320
    c->put_vp8_epel_pixels_tab[0][0][0] = put_vp8_pixels16_altivec;
321
    c->put_vp8_epel_pixels_tab[0][0][2] = put_vp8_epel16_h6_altivec;
322
    c->put_vp8_epel_pixels_tab[0][2][0] = put_vp8_epel16_v6_altivec;
323
    c->put_vp8_epel_pixels_tab[0][2][2] = put_vp8_epel16_h6v6_altivec;
324
 
325
    c->put_vp8_epel_pixels_tab[1][0][2] = put_vp8_epel8_h6_altivec;
326
    c->put_vp8_epel_pixels_tab[1][2][0] = put_vp8_epel8_v6_altivec;
327
    c->put_vp8_epel_pixels_tab[1][0][1] = put_vp8_epel8_h4_altivec;
328
    c->put_vp8_epel_pixels_tab[1][1][0] = put_vp8_epel8_v4_altivec;
329
 
330
    c->put_vp8_epel_pixels_tab[1][2][2] = put_vp8_epel8_h6v6_altivec;
331
    c->put_vp8_epel_pixels_tab[1][1][1] = put_vp8_epel8_h4v4_altivec;
332
    c->put_vp8_epel_pixels_tab[1][1][2] = put_vp8_epel8_h6v4_altivec;
333
    c->put_vp8_epel_pixels_tab[1][2][1] = put_vp8_epel8_h4v6_altivec;
334
 
335
    c->put_vp8_epel_pixels_tab[2][0][2] = put_vp8_epel4_h6_altivec;
336
    c->put_vp8_epel_pixels_tab[2][2][0] = put_vp8_epel4_v6_altivec;
337
    c->put_vp8_epel_pixels_tab[2][0][1] = put_vp8_epel4_h4_altivec;
338
    c->put_vp8_epel_pixels_tab[2][1][0] = put_vp8_epel4_v4_altivec;
339
 
340
    c->put_vp8_epel_pixels_tab[2][2][2] = put_vp8_epel4_h6v6_altivec;
341
    c->put_vp8_epel_pixels_tab[2][1][1] = put_vp8_epel4_h4v4_altivec;
342
    c->put_vp8_epel_pixels_tab[2][1][2] = put_vp8_epel4_h6v4_altivec;
343
    c->put_vp8_epel_pixels_tab[2][2][1] = put_vp8_epel4_h4v6_altivec;
344
#endif /* HAVE_ALTIVEC */
345
}