Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Last modification | View Log | RSS feed

  1. ;******************************************************************************
  2. ;* VP9 SIMD optimizations
  3. ;*
  4. ;* Copyright (c) 2013 Ronald S. Bultje <rsbultje gmail com>
  5. ;*
  6. ;* This file is part of FFmpeg.
  7. ;*
  8. ;* FFmpeg is free software; you can redistribute it and/or
  9. ;* modify it under the terms of the GNU Lesser General Public
  10. ;* License as published by the Free Software Foundation; either
  11. ;* version 2.1 of the License, or (at your option) any later version.
  12. ;*
  13. ;* FFmpeg is distributed in the hope that it will be useful,
  14. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16. ;* Lesser General Public License for more details.
  17. ;*
  18. ;* You should have received a copy of the GNU Lesser General Public
  19. ;* License along with FFmpeg; if not, write to the Free Software
  20. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. ;******************************************************************************
  22.  
  23. %include "libavutil/x86/x86util.asm"
  24.  
  25. SECTION_RODATA
  26.  
  27. ; FIXME share with vp8dsp.asm
  28. pw_256:   times 8 dw 256
  29.  
  30. %macro F8_TAPS 8
  31. times 8 db %1, %2
  32. times 8 db %3, %4
  33. times 8 db %5, %6
  34. times 8 db %7, %8
  35. %endmacro
  36. ; int8_t ff_filters_ssse3[3][15][4][16]
  37. const filters_ssse3 ; smooth
  38.                     F8_TAPS -3, -1,  32,  64,  38,   1, -3,  0
  39.                     F8_TAPS -2, -2,  29,  63,  41,   2, -3,  0
  40.                     F8_TAPS -2, -2,  26,  63,  43,   4, -4,  0
  41.                     F8_TAPS -2, -3,  24,  62,  46,   5, -4,  0
  42.                     F8_TAPS -2, -3,  21,  60,  49,   7, -4,  0
  43.                     F8_TAPS -1, -4,  18,  59,  51,   9, -4,  0
  44.                     F8_TAPS -1, -4,  16,  57,  53,  12, -4, -1
  45.                     F8_TAPS -1, -4,  14,  55,  55,  14, -4, -1
  46.                     F8_TAPS -1, -4,  12,  53,  57,  16, -4, -1
  47.                     F8_TAPS  0, -4,   9,  51,  59,  18, -4, -1
  48.                     F8_TAPS  0, -4,   7,  49,  60,  21, -3, -2
  49.                     F8_TAPS  0, -4,   5,  46,  62,  24, -3, -2
  50.                     F8_TAPS  0, -4,   4,  43,  63,  26, -2, -2
  51.                     F8_TAPS  0, -3,   2,  41,  63,  29, -2, -2
  52.                     F8_TAPS  0, -3,   1,  38,  64,  32, -1, -3
  53.                     ; regular
  54.                     F8_TAPS  0,  1,  -5, 126,   8,  -3,  1,  0
  55.                     F8_TAPS -1,  3, -10, 122,  18,  -6,  2,  0
  56.                     F8_TAPS -1,  4, -13, 118,  27,  -9,  3, -1
  57.                     F8_TAPS -1,  4, -16, 112,  37, -11,  4, -1
  58.                     F8_TAPS -1,  5, -18, 105,  48, -14,  4, -1
  59.                     F8_TAPS -1,  5, -19,  97,  58, -16,  5, -1
  60.                     F8_TAPS -1,  6, -19,  88,  68, -18,  5, -1
  61.                     F8_TAPS -1,  6, -19,  78,  78, -19,  6, -1
  62.                     F8_TAPS -1,  5, -18,  68,  88, -19,  6, -1
  63.                     F8_TAPS -1,  5, -16,  58,  97, -19,  5, -1
  64.                     F8_TAPS -1,  4, -14,  48, 105, -18,  5, -1
  65.                     F8_TAPS -1,  4, -11,  37, 112, -16,  4, -1
  66.                     F8_TAPS -1,  3,  -9,  27, 118, -13,  4, -1
  67.                     F8_TAPS  0,  2,  -6,  18, 122, -10,  3, -1
  68.                     F8_TAPS  0,  1,  -3,   8, 126,  -5,  1,  0
  69.                     ; sharp
  70.                     F8_TAPS -1,  3,  -7, 127,   8,  -3,  1,  0
  71.                     F8_TAPS -2,  5, -13, 125,  17,  -6,  3, -1
  72.                     F8_TAPS -3,  7, -17, 121,  27, -10,  5, -2
  73.                     F8_TAPS -4,  9, -20, 115,  37, -13,  6, -2
  74.                     F8_TAPS -4, 10, -23, 108,  48, -16,  8, -3
  75.                     F8_TAPS -4, 10, -24, 100,  59, -19,  9, -3
  76.                     F8_TAPS -4, 11, -24,  90,  70, -21, 10, -4
  77.                     F8_TAPS -4, 11, -23,  80,  80, -23, 11, -4
  78.                     F8_TAPS -4, 10, -21,  70,  90, -24, 11, -4
  79.                     F8_TAPS -3,  9, -19,  59, 100, -24, 10, -4
  80.                     F8_TAPS -3,  8, -16,  48, 108, -23, 10, -4
  81.                     F8_TAPS -2,  6, -13,  37, 115, -20,  9, -4
  82.                     F8_TAPS -2,  5, -10,  27, 121, -17,  7, -3
  83.                     F8_TAPS -1,  3,  -6,  17, 125, -13,  5, -2
  84.                     F8_TAPS  0,  1,  -3,   8, 127,  -7,  3, -1
  85.  
  86. SECTION .text
  87.  
  88. %macro filter_h_fn 1
  89. %assign %%px mmsize/2
  90. cglobal %1_8tap_1d_h_ %+ %%px, 6, 6, 11, dst, dstride, src, sstride, h, filtery
  91.     mova        m6, [pw_256]
  92.     mova        m7, [filteryq+ 0]
  93. %if ARCH_X86_64 && mmsize > 8
  94.     mova        m8, [filteryq+16]
  95.     mova        m9, [filteryq+32]
  96.     mova       m10, [filteryq+48]
  97. %endif
  98. .loop:
  99.     movh        m0, [srcq-3]
  100.     movh        m1, [srcq-2]
  101.     movh        m2, [srcq-1]
  102.     movh        m3, [srcq+0]
  103.     movh        m4, [srcq+1]
  104.     movh        m5, [srcq+2]
  105.     punpcklbw   m0, m1
  106.     punpcklbw   m2, m3
  107.     movh        m1, [srcq+3]
  108.     movh        m3, [srcq+4]
  109.     add       srcq, sstrideq
  110.     punpcklbw   m4, m5
  111.     punpcklbw   m1, m3
  112.     pmaddubsw   m0, m7
  113. %if ARCH_X86_64 && mmsize > 8
  114.     pmaddubsw   m2, m8
  115.     pmaddubsw   m4, m9
  116.     pmaddubsw   m1, m10
  117. %else
  118.     pmaddubsw   m2, [filteryq+16]
  119.     pmaddubsw   m4, [filteryq+32]
  120.     pmaddubsw   m1, [filteryq+48]
  121. %endif
  122.     paddw       m0, m2
  123.     paddw       m4, m1
  124.     paddsw      m0, m4
  125.     pmulhrsw    m0, m6
  126. %ifidn %1, avg
  127.     movh        m1, [dstq]
  128. %endif
  129.     packuswb    m0, m0
  130. %ifidn %1, avg
  131.     pavgb       m0, m1
  132. %endif
  133.     movh    [dstq], m0
  134.     add       dstq, dstrideq
  135.     dec         hd
  136.     jg .loop
  137.     RET
  138. %endmacro
  139.  
  140. INIT_MMX ssse3
  141. filter_h_fn put
  142. filter_h_fn avg
  143.  
  144. INIT_XMM ssse3
  145. filter_h_fn put
  146. filter_h_fn avg
  147.  
  148. %macro filter_v_fn 1
  149. %assign %%px mmsize/2
  150. %if ARCH_X86_64
  151. cglobal %1_8tap_1d_v_ %+ %%px, 6, 8, 11, dst, dstride, src, sstride, h, filtery, src4, sstride3
  152. %else
  153. cglobal %1_8tap_1d_v_ %+ %%px, 4, 7, 11, dst, dstride, src, sstride, filtery, src4, sstride3
  154.     mov   filteryq, r5mp
  155. %define hd r4mp
  156. %endif
  157.     sub       srcq, sstrideq
  158.     lea  sstride3q, [sstrideq*3]
  159.     sub       srcq, sstrideq
  160.     mova        m6, [pw_256]
  161.     sub       srcq, sstrideq
  162.     mova        m7, [filteryq+ 0]
  163.     lea      src4q, [srcq+sstrideq*4]
  164. %if ARCH_X86_64 && mmsize > 8
  165.     mova        m8, [filteryq+16]
  166.     mova        m9, [filteryq+32]
  167.     mova       m10, [filteryq+48]
  168. %endif
  169. .loop:
  170.     ; FIXME maybe reuse loads from previous rows, or just
  171.     ; more generally unroll this to prevent multiple loads of
  172.     ; the same data?
  173.     movh        m0, [srcq]
  174.     movh        m1, [srcq+sstrideq]
  175.     movh        m2, [srcq+sstrideq*2]
  176.     movh        m3, [srcq+sstride3q]
  177.     movh        m4, [src4q]
  178.     movh        m5, [src4q+sstrideq]
  179.     punpcklbw   m0, m1
  180.     punpcklbw   m2, m3
  181.     movh        m1, [src4q+sstrideq*2]
  182.     movh        m3, [src4q+sstride3q]
  183.     add       srcq, sstrideq
  184.     add      src4q, sstrideq
  185.     punpcklbw   m4, m5
  186.     punpcklbw   m1, m3
  187.     pmaddubsw   m0, m7
  188. %if ARCH_X86_64 && mmsize > 8
  189.     pmaddubsw   m2, m8
  190.     pmaddubsw   m4, m9
  191.     pmaddubsw   m1, m10
  192. %else
  193.     pmaddubsw   m2, [filteryq+16]
  194.     pmaddubsw   m4, [filteryq+32]
  195.     pmaddubsw   m1, [filteryq+48]
  196. %endif
  197.     paddw       m0, m2
  198.     paddw       m4, m1
  199.     paddsw      m0, m4
  200.     pmulhrsw    m0, m6
  201. %ifidn %1, avg
  202.     movh        m1, [dstq]
  203. %endif
  204.     packuswb    m0, m0
  205. %ifidn %1, avg
  206.     pavgb       m0, m1
  207. %endif
  208.     movh    [dstq], m0
  209.     add       dstq, dstrideq
  210.     dec         hd
  211.     jg .loop
  212.     RET
  213. %endmacro
  214.  
  215. INIT_MMX ssse3
  216. filter_v_fn put
  217. filter_v_fn avg
  218.  
  219. INIT_XMM ssse3
  220. filter_v_fn put
  221. filter_v_fn avg
  222.  
  223. %macro fpel_fn 6
  224. %if %2 == 4
  225. %define %%srcfn movh
  226. %define %%dstfn movh
  227. %else
  228. %define %%srcfn movu
  229. %define %%dstfn mova
  230. %endif
  231.  
  232. %if %2 <= 16
  233. cglobal %1%2, 5, 7, 4, dst, dstride, src, sstride, h, dstride3, sstride3
  234.     lea  sstride3q, [sstrideq*3]
  235.     lea  dstride3q, [dstrideq*3]
  236. %else
  237. cglobal %1%2, 5, 5, 4, dst, dstride, src, sstride, h
  238. %endif
  239. .loop:
  240.     %%srcfn     m0, [srcq]
  241.     %%srcfn     m1, [srcq+s%3]
  242.     %%srcfn     m2, [srcq+s%4]
  243.     %%srcfn     m3, [srcq+s%5]
  244.     lea       srcq, [srcq+sstrideq*%6]
  245. %ifidn %1, avg
  246.     pavgb       m0, [dstq]
  247.     pavgb       m1, [dstq+d%3]
  248.     pavgb       m2, [dstq+d%4]
  249.     pavgb       m3, [dstq+d%5]
  250. %endif
  251.     %%dstfn [dstq], m0
  252.     %%dstfn [dstq+d%3], m1
  253.     %%dstfn [dstq+d%4], m2
  254.     %%dstfn [dstq+d%5], m3
  255.     lea       dstq, [dstq+dstrideq*%6]
  256.     sub         hd, %6
  257.     jnz .loop
  258.     RET
  259. %endmacro
  260.  
  261. %define d16 16
  262. %define s16 16
  263. INIT_MMX mmx
  264. fpel_fn put, 4,  strideq, strideq*2, stride3q, 4
  265. fpel_fn put, 8,  strideq, strideq*2, stride3q, 4
  266. INIT_MMX sse
  267. fpel_fn avg, 4,  strideq, strideq*2, stride3q, 4
  268. fpel_fn avg, 8,  strideq, strideq*2, stride3q, 4
  269. INIT_XMM sse
  270. fpel_fn put, 16, strideq, strideq*2, stride3q, 4
  271. fpel_fn put, 32, mmsize,  strideq,   strideq+mmsize, 2
  272. fpel_fn put, 64, mmsize,  mmsize*2,  mmsize*3, 1
  273. INIT_XMM sse2
  274. fpel_fn avg, 16, strideq, strideq*2, stride3q, 4
  275. fpel_fn avg, 32, mmsize,  strideq,   strideq+mmsize, 2
  276. fpel_fn avg, 64, mmsize,  mmsize*2,  mmsize*3, 1
  277. %undef s16
  278. %undef d16
  279.