Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright (c) 2012 Michael Niedermayer <michaelni@gmx.at>
  3.  *
  4.  * This file is part of FFmpeg.
  5.  *
  6.  * FFmpeg is free software; you can redistribute it and/or
  7.  * modify it under the terms of the GNU Lesser General Public
  8.  * License as published by the Free Software Foundation; either
  9.  * version 2.1 of the License, or (at your option) any later version.
  10.  *
  11.  * FFmpeg is distributed in the hope that it will be useful,
  12.  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13.  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14.  * Lesser General Public License for more details.
  15.  *
  16.  * You should have received a copy of the GNU Lesser General Public
  17.  * License along with FFmpeg; if not, write to the Free Software
  18.  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19.  */
  20.  
  21. #include "libavutil/x86/asm.h"
  22. #include "libavutil/cpu.h"
  23. #include "libswresample/swresample_internal.h"
  24.  
  25. int swri_resample_int16_mmx2 (struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx);
  26. int swri_resample_int16_ssse3(struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx);
  27.  
  28. DECLARE_ALIGNED(16, const uint64_t, ff_resample_int16_rounder)[2]    = { 0x0000000000004000ULL, 0x0000000000000000ULL};
  29.  
  30. #define COMMON_CORE_INT16_MMX2 \
  31.     x86_reg len= -2*c->filter_length;\
  32. __asm__ volatile(\
  33.     "movq "MANGLE(ff_resample_int16_rounder)", %%mm0 \n\t"\
  34.     "1:                         \n\t"\
  35.     "movq    (%1, %0), %%mm1    \n\t"\
  36.     "pmaddwd (%2, %0), %%mm1    \n\t"\
  37.     "paddd  %%mm1, %%mm0        \n\t"\
  38.     "add       $8, %0           \n\t"\
  39.     " js 1b                     \n\t"\
  40.     "pshufw $0x0E, %%mm0, %%mm1 \n\t"\
  41.     "paddd %%mm1, %%mm0         \n\t"\
  42.     "psrad    $15, %%mm0        \n\t"\
  43.     "packssdw %%mm0, %%mm0      \n\t"\
  44.     "movd %%mm0, (%3)           \n\t"\
  45.     : "+r" (len)\
  46.     : "r" (((uint8_t*)(src+sample_index))-len),\
  47.       "r" (((uint8_t*)filter)-len),\
  48.       "r" (dst+dst_index)\
  49. );
  50.  
  51. #define COMMON_CORE_INT16_SSSE3 \
  52.     x86_reg len= -2*c->filter_length;\
  53. __asm__ volatile(\
  54.     "movdqa "MANGLE(ff_resample_int16_rounder)", %%xmm0 \n\t"\
  55.     "1:                           \n\t"\
  56.     "movdqu  (%1, %0), %%xmm1     \n\t"\
  57.     "pmaddwd (%2, %0), %%xmm1     \n\t"\
  58.     "paddd  %%xmm1, %%xmm0        \n\t"\
  59.     "add       $16, %0            \n\t"\
  60.     " js 1b                       \n\t"\
  61.     "phaddd %%xmm0, %%xmm0        \n\t"\
  62.     "phaddd %%xmm0, %%xmm0        \n\t"\
  63.     "psrad    $15, %%xmm0         \n\t"\
  64.     "packssdw %%xmm0, %%xmm0      \n\t"\
  65.     "movd %%xmm0, (%3)            \n\t"\
  66.     : "+r" (len)\
  67.     : "r" (((uint8_t*)(src+sample_index))-len),\
  68.       "r" (((uint8_t*)filter)-len),\
  69.       "r" (dst+dst_index)\
  70. );
  71.