Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright (C) 2009 David Conrad
  3.  *
  4.  * This file is part of FFmpeg.
  5.  *
  6.  * FFmpeg is free software; you can redistribute it and/or
  7.  * modify it under the terms of the GNU Lesser General Public
  8.  * License as published by the Free Software Foundation; either
  9.  * version 2.1 of the License, or (at your option) any later version.
  10.  *
  11.  * FFmpeg is distributed in the hope that it will be useful,
  12.  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13.  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14.  * Lesser General Public License for more details.
  15.  *
  16.  * You should have received a copy of the GNU Lesser General Public
  17.  * License along with FFmpeg; if not, write to the Free Software
  18.  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19.  */
  20.  
  21. #include <string.h>
  22.  
  23. #include "config.h"
  24. #include "libavutil/attributes.h"
  25. #include "libavutil/cpu.h"
  26. #include "libavutil/ppc/cpu.h"
  27. #include "libavutil/ppc/types_altivec.h"
  28. #include "libavutil/ppc/util_altivec.h"
  29. #include "libavcodec/vp3dsp.h"
  30.  
  31. #if HAVE_ALTIVEC
  32.  
  33. static const vec_s16 constants =
  34.     {0, 64277, 60547, 54491, 46341, 36410, 25080, 12785};
  35. #if HAVE_BIGENDIAN
  36. static const vec_u8 interleave_high =
  37.     {0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29};
  38. #else
  39. static const vec_u8 interleave_high =
  40.     {2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31};
  41. #endif
  42.  
  43. #define IDCT_START \
  44.     vec_s16 A, B, C, D, Ad, Bd, Cd, Dd, E, F, G, H;\
  45.     vec_s16 Ed, Gd, Add, Bdd, Fd, Hd;\
  46.     vec_s16 eight = vec_splat_s16(8);\
  47.     vec_u16 four = vec_splat_u16(4);\
  48. \
  49.     vec_s16 C1 = vec_splat(constants, 1);\
  50.     vec_s16 C2 = vec_splat(constants, 2);\
  51.     vec_s16 C3 = vec_splat(constants, 3);\
  52.     vec_s16 C4 = vec_splat(constants, 4);\
  53.     vec_s16 C5 = vec_splat(constants, 5);\
  54.     vec_s16 C6 = vec_splat(constants, 6);\
  55.     vec_s16 C7 = vec_splat(constants, 7);\
  56. \
  57.     vec_s16 b0 = vec_ld(0x00, block);\
  58.     vec_s16 b1 = vec_ld(0x10, block);\
  59.     vec_s16 b2 = vec_ld(0x20, block);\
  60.     vec_s16 b3 = vec_ld(0x30, block);\
  61.     vec_s16 b4 = vec_ld(0x40, block);\
  62.     vec_s16 b5 = vec_ld(0x50, block);\
  63.     vec_s16 b6 = vec_ld(0x60, block);\
  64.     vec_s16 b7 = vec_ld(0x70, block);
  65.  
  66. // these functions do (a*C)>>16
  67. // things are tricky because a is signed, but C unsigned.
  68. // M15 is used if C fits in 15 bit unsigned (C6,C7)
  69. // M16 is used if C requires 16 bits unsigned
  70. static inline vec_s16 M15(vec_s16 a, vec_s16 C)
  71. {
  72.     return (vec_s16)vec_perm(vec_mule(a,C), vec_mulo(a,C), interleave_high);
  73. }
  74. static inline vec_s16 M16(vec_s16 a, vec_s16 C)
  75. {
  76.     return vec_add(a, M15(a, C));
  77. }
  78.  
  79. #define IDCT_1D(ADD, SHIFT)\
  80.     A = vec_add(M16(b1, C1), M15(b7, C7));\
  81.     B = vec_sub(M15(b1, C7), M16(b7, C1));\
  82.     C = vec_add(M16(b3, C3), M16(b5, C5));\
  83.     D = vec_sub(M16(b5, C3), M16(b3, C5));\
  84. \
  85.     Ad = M16(vec_sub(A, C), C4);\
  86.     Bd = M16(vec_sub(B, D), C4);\
  87. \
  88.     Cd = vec_add(A, C);\
  89.     Dd = vec_add(B, D);\
  90. \
  91.     E = ADD(M16(vec_add(b0, b4), C4));\
  92.     F = ADD(M16(vec_sub(b0, b4), C4));\
  93. \
  94.     G = vec_add(M16(b2, C2), M15(b6, C6));\
  95.     H = vec_sub(M15(b2, C6), M16(b6, C2));\
  96. \
  97.     Ed = vec_sub(E, G);\
  98.     Gd = vec_add(E, G);\
  99. \
  100.     Add = vec_add(F, Ad);\
  101.     Bdd = vec_sub(Bd, H);\
  102. \
  103.     Fd = vec_sub(F, Ad);\
  104.     Hd = vec_add(Bd, H);\
  105. \
  106.     b0 = SHIFT(vec_add(Gd, Cd));\
  107.     b7 = SHIFT(vec_sub(Gd, Cd));\
  108. \
  109.     b1 = SHIFT(vec_add(Add, Hd));\
  110.     b2 = SHIFT(vec_sub(Add, Hd));\
  111. \
  112.     b3 = SHIFT(vec_add(Ed, Dd));\
  113.     b4 = SHIFT(vec_sub(Ed, Dd));\
  114. \
  115.     b5 = SHIFT(vec_add(Fd, Bdd));\
  116.     b6 = SHIFT(vec_sub(Fd, Bdd));
  117.  
  118. #define NOP(a) a
  119. #define ADD8(a) vec_add(a, eight)
  120. #define SHIFT4(a) vec_sra(a, four)
  121.  
  122. static void vp3_idct_put_altivec(uint8_t *dst, int stride, int16_t block[64])
  123. {
  124.     vec_u8 t;
  125.     IDCT_START
  126.  
  127.     // pixels are signed; so add 128*16 in addition to the normal 8
  128.     vec_s16 v2048 = vec_sl(vec_splat_s16(1), vec_splat_u16(11));
  129.     eight = vec_add(eight, v2048);
  130.  
  131.     IDCT_1D(NOP, NOP)
  132.     TRANSPOSE8(b0, b1, b2, b3, b4, b5, b6, b7);
  133.     IDCT_1D(ADD8, SHIFT4)
  134.  
  135. #define PUT(a)\
  136.     t = vec_packsu(a, a);\
  137.     vec_ste((vec_u32)t, 0, (unsigned int *)dst);\
  138.     vec_ste((vec_u32)t, 4, (unsigned int *)dst);
  139.  
  140.     PUT(b0)     dst += stride;
  141.     PUT(b1)     dst += stride;
  142.     PUT(b2)     dst += stride;
  143.     PUT(b3)     dst += stride;
  144.     PUT(b4)     dst += stride;
  145.     PUT(b5)     dst += stride;
  146.     PUT(b6)     dst += stride;
  147.     PUT(b7)
  148.     memset(block, 0, sizeof(*block) * 64);
  149. }
  150.  
  151. static void vp3_idct_add_altivec(uint8_t *dst, int stride, int16_t block[64])
  152. {
  153.     LOAD_ZERO;
  154.     vec_u8 t, vdst;
  155.     vec_s16 vdst_16;
  156.     vec_u8 vdst_mask = vec_mergeh(vec_splat_u8(-1), vec_lvsl(0, dst));
  157.  
  158.     IDCT_START
  159.  
  160.     IDCT_1D(NOP, NOP)
  161.     TRANSPOSE8(b0, b1, b2, b3, b4, b5, b6, b7);
  162.     IDCT_1D(ADD8, SHIFT4)
  163.  
  164. #if HAVE_BIGENDIAN
  165. #define GET_VDST16\
  166.     vdst = vec_ld(0, dst);\
  167.     vdst_16 = (vec_s16)vec_perm(vdst, zero_u8v, vdst_mask);
  168. #else
  169. #define GET_VDST16\
  170.     vdst = vec_vsx_ld(0,dst);\
  171.     vdst_16 = (vec_s16)vec_mergeh(vdst, zero_u8v);
  172. #endif
  173.  
  174. #define ADD(a)\
  175.     GET_VDST16;\
  176.     vdst_16 = vec_adds(a, vdst_16);\
  177.     t = vec_packsu(vdst_16, vdst_16);\
  178.     vec_ste((vec_u32)t, 0, (unsigned int *)dst);\
  179.     vec_ste((vec_u32)t, 4, (unsigned int *)dst);
  180.  
  181.     ADD(b0)     dst += stride;
  182.     ADD(b1)     dst += stride;
  183.     ADD(b2)     dst += stride;
  184.     ADD(b3)     dst += stride;
  185.     ADD(b4)     dst += stride;
  186.     ADD(b5)     dst += stride;
  187.     ADD(b6)     dst += stride;
  188.     ADD(b7)
  189.     memset(block, 0, sizeof(*block) * 64);
  190. }
  191.  
  192. #endif /* HAVE_ALTIVEC */
  193.  
  194. av_cold void ff_vp3dsp_init_ppc(VP3DSPContext *c, int flags)
  195. {
  196. #if HAVE_ALTIVEC
  197.     if (!PPC_ALTIVEC(av_get_cpu_flags()))
  198.         return;
  199.  
  200.     c->idct_put = vp3_idct_put_altivec;
  201.     c->idct_add = vp3_idct_add_altivec;
  202. #endif
  203. }
  204.