Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * SIMD-optimized HuffYUV encoding functions
  3.  * Copyright (c) 2000, 2001 Fabrice Bellard
  4.  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5.  *
  6.  * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
  7.  *
  8.  * This file is part of FFmpeg.
  9.  *
  10.  * FFmpeg is free software; you can redistribute it and/or
  11.  * modify it under the terms of the GNU Lesser General Public
  12.  * License as published by the Free Software Foundation; either
  13.  * version 2.1 of the License, or (at your option) any later version.
  14.  *
  15.  * FFmpeg is distributed in the hope that it will be useful,
  16.  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17.  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  18.  * Lesser General Public License for more details.
  19.  *
  20.  * You should have received a copy of the GNU Lesser General Public
  21.  * License along with FFmpeg; if not, write to the Free Software
  22.  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23.  */
  24.  
  25. #include "libavutil/attributes.h"
  26. #include "libavutil/cpu.h"
  27. #include "libavutil/x86/asm.h"
  28. #include "libavutil/x86/cpu.h"
  29. #include "libavcodec/huffyuvencdsp.h"
  30. #include "libavcodec/mathops.h"
  31.  
  32. #if HAVE_INLINE_ASM
  33.  
  34. static void diff_bytes_mmx(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w)
  35. {
  36.     x86_reg i = 0;
  37.  
  38.     if (w >= 16)
  39.     __asm__ volatile (
  40.         "1:                             \n\t"
  41.         "movq  (%2, %0), %%mm0          \n\t"
  42.         "movq  (%1, %0), %%mm1          \n\t"
  43.         "psubb %%mm0, %%mm1             \n\t"
  44.         "movq %%mm1, (%3, %0)           \n\t"
  45.         "movq 8(%2, %0), %%mm0          \n\t"
  46.         "movq 8(%1, %0), %%mm1          \n\t"
  47.         "psubb %%mm0, %%mm1             \n\t"
  48.         "movq %%mm1, 8(%3, %0)          \n\t"
  49.         "add $16, %0                    \n\t"
  50.         "cmp %4, %0                     \n\t"
  51.         " jb 1b                         \n\t"
  52.         : "+r" (i)
  53.         : "r" (src1), "r" (src2), "r" (dst), "r" ((x86_reg) w - 15));
  54.  
  55.     for (; i < w; i++)
  56.         dst[i + 0] = src1[i + 0] - src2[i + 0];
  57. }
  58.  
  59. static void sub_hfyu_median_pred_mmxext(uint8_t *dst, const uint8_t *src1,
  60.                                         const uint8_t *src2, int w,
  61.                                         int *left, int *left_top)
  62. {
  63.     x86_reg i = 0;
  64.     uint8_t l, lt;
  65.  
  66.     __asm__ volatile (
  67.         "movq  (%1, %0), %%mm0          \n\t" // LT
  68.         "psllq $8, %%mm0                \n\t"
  69.         "1:                             \n\t"
  70.         "movq  (%1, %0), %%mm1          \n\t" // T
  71.         "movq  -1(%2, %0), %%mm2        \n\t" // L
  72.         "movq  (%2, %0), %%mm3          \n\t" // X
  73.         "movq %%mm2, %%mm4              \n\t" // L
  74.         "psubb %%mm0, %%mm2             \n\t"
  75.         "paddb %%mm1, %%mm2             \n\t" // L + T - LT
  76.         "movq %%mm4, %%mm5              \n\t" // L
  77.         "pmaxub %%mm1, %%mm4            \n\t" // max(T, L)
  78.         "pminub %%mm5, %%mm1            \n\t" // min(T, L)
  79.         "pminub %%mm2, %%mm4            \n\t"
  80.         "pmaxub %%mm1, %%mm4            \n\t"
  81.         "psubb %%mm4, %%mm3             \n\t" // dst - pred
  82.         "movq %%mm3, (%3, %0)           \n\t"
  83.         "add $8, %0                     \n\t"
  84.         "movq -1(%1, %0), %%mm0         \n\t" // LT
  85.         "cmp %4, %0                     \n\t"
  86.         " jb 1b                         \n\t"
  87.         : "+r" (i)
  88.         : "r" (src1), "r" (src2), "r" (dst), "r" ((x86_reg) w));
  89.  
  90.     l  = *left;
  91.     lt = *left_top;
  92.  
  93.     dst[0] = src2[0] - mid_pred(l, src1[0], (l + src1[0] - lt) & 0xFF);
  94.  
  95.     *left_top = src1[w - 1];
  96.     *left     = src2[w - 1];
  97. }
  98.  
  99. #endif /* HAVE_INLINE_ASM */
  100.  
  101. av_cold void ff_huffyuvencdsp_init_x86(HuffYUVEncDSPContext *c)
  102. {
  103. #if HAVE_INLINE_ASM
  104.     int cpu_flags = av_get_cpu_flags();
  105.  
  106.     if (INLINE_MMX(cpu_flags)) {
  107.         c->diff_bytes = diff_bytes_mmx;
  108.     }
  109.  
  110.     if (INLINE_MMXEXT(cpu_flags)) {
  111.         c->sub_hfyu_median_pred = sub_hfyu_median_pred_mmxext;
  112.     }
  113. #endif /* HAVE_INLINE_ASM */
  114. }
  115.