Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Last modification | View Log | RSS feed

  1. ;******************************************************************************
  2. ;* MMX optimized DSP utils
  3. ;* Copyright (c) 2008 Loren Merritt
  4. ;* Copyright (c) 2003-2013 Michael Niedermayer
  5. ;* Copyright (c) 2013 Daniel Kang
  6. ;*
  7. ;* This file is part of FFmpeg.
  8. ;*
  9. ;* FFmpeg is free software; you can redistribute it and/or
  10. ;* modify it under the terms of the GNU Lesser General Public
  11. ;* License as published by the Free Software Foundation; either
  12. ;* version 2.1 of the License, or (at your option) any later version.
  13. ;*
  14. ;* FFmpeg is distributed in the hope that it will be useful,
  15. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  17. ;* Lesser General Public License for more details.
  18. ;*
  19. ;* You should have received a copy of the GNU Lesser General Public
  20. ;* License along with FFmpeg; if not, write to the Free Software
  21. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. ;******************************************************************************
  23.  
  24. %include "libavutil/x86/x86util.asm"
  25.  
  26. SECTION .text
  27.  
  28. INIT_MMX mmxext
  29. ; void pixels(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
  30. %macro PIXELS48 2
  31. %if %2 == 4
  32. %define OP movh
  33. %else
  34. %define OP mova
  35. %endif
  36. cglobal %1_pixels%2, 4,5
  37.     movsxdifnidn r2, r2d
  38.     lea          r4, [r2*3]
  39. .loop:
  40.     OP           m0, [r1]
  41.     OP           m1, [r1+r2]
  42.     OP           m2, [r1+r2*2]
  43.     OP           m3, [r1+r4]
  44.     lea          r1, [r1+r2*4]
  45. %ifidn %1, avg
  46.     pavgb        m0, [r0]
  47.     pavgb        m1, [r0+r2]
  48.     pavgb        m2, [r0+r2*2]
  49.     pavgb        m3, [r0+r4]
  50. %endif
  51.     OP         [r0], m0
  52.     OP      [r0+r2], m1
  53.     OP    [r0+r2*2], m2
  54.     OP      [r0+r4], m3
  55.     sub         r3d, 4
  56.     lea          r0, [r0+r2*4]
  57.     jne       .loop
  58.     RET
  59. %endmacro
  60.  
  61. PIXELS48 put, 4
  62. PIXELS48 avg, 4
  63. PIXELS48 put, 8
  64. PIXELS48 avg, 8
  65.  
  66.  
  67. INIT_XMM sse2
  68. ; void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
  69. cglobal put_pixels16, 4,5,4
  70.     lea          r4, [r2*3]
  71. .loop:
  72.     movu         m0, [r1]
  73.     movu         m1, [r1+r2]
  74.     movu         m2, [r1+r2*2]
  75.     movu         m3, [r1+r4]
  76.     lea          r1, [r1+r2*4]
  77.     mova       [r0], m0
  78.     mova    [r0+r2], m1
  79.     mova  [r0+r2*2], m2
  80.     mova    [r0+r4], m3
  81.     sub         r3d, 4
  82.     lea          r0, [r0+r2*4]
  83.     jnz       .loop
  84.     REP_RET
  85.  
  86. ; void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
  87. cglobal avg_pixels16, 4,5,4
  88.     lea          r4, [r2*3]
  89. .loop:
  90.     movu         m0, [r1]
  91.     movu         m1, [r1+r2]
  92.     movu         m2, [r1+r2*2]
  93.     movu         m3, [r1+r4]
  94.     lea          r1, [r1+r2*4]
  95.     pavgb        m0, [r0]
  96.     pavgb        m1, [r0+r2]
  97.     pavgb        m2, [r0+r2*2]
  98.     pavgb        m3, [r0+r4]
  99.     mova       [r0], m0
  100.     mova    [r0+r2], m1
  101.     mova  [r0+r2*2], m2
  102.     mova    [r0+r4], m3
  103.     sub         r3d, 4
  104.     lea          r0, [r0+r2*4]
  105.     jnz       .loop
  106.     REP_RET
  107.