Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * VC-1 and WMV3 - DSP functions MMX-optimized
  3.  * Copyright (c) 2007 Christophe GISQUET <christophe.gisquet@free.fr>
  4.  *
  5.  * Permission is hereby granted, free of charge, to any person
  6.  * obtaining a copy of this software and associated documentation
  7.  * files (the "Software"), to deal in the Software without
  8.  * restriction, including without limitation the rights to use,
  9.  * copy, modify, merge, publish, distribute, sublicense, and/or sell
  10.  * copies of the Software, and to permit persons to whom the
  11.  * Software is furnished to do so, subject to the following
  12.  * conditions:
  13.  *
  14.  * The above copyright notice and this permission notice shall be
  15.  * included in all copies or substantial portions of the Software.
  16.  *
  17.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  18.  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
  19.  * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  20.  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
  21.  * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
  22.  * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  23.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  24.  * OTHER DEALINGS IN THE SOFTWARE.
  25.  */
  26.  
  27. #include "libavutil/cpu.h"
  28. #include "libavutil/x86/cpu.h"
  29. #include "libavcodec/vc1dsp.h"
  30. #include "dsputil_x86.h"
  31. #include "vc1dsp.h"
  32. #include "config.h"
  33.  
  34. #define LOOP_FILTER(EXT) \
  35. void ff_vc1_v_loop_filter4_ ## EXT(uint8_t *src, int stride, int pq); \
  36. void ff_vc1_h_loop_filter4_ ## EXT(uint8_t *src, int stride, int pq); \
  37. void ff_vc1_v_loop_filter8_ ## EXT(uint8_t *src, int stride, int pq); \
  38. void ff_vc1_h_loop_filter8_ ## EXT(uint8_t *src, int stride, int pq); \
  39. \
  40. static void vc1_v_loop_filter16_ ## EXT(uint8_t *src, int stride, int pq) \
  41. { \
  42.     ff_vc1_v_loop_filter8_ ## EXT(src,   stride, pq); \
  43.     ff_vc1_v_loop_filter8_ ## EXT(src+8, stride, pq); \
  44. } \
  45. \
  46. static void vc1_h_loop_filter16_ ## EXT(uint8_t *src, int stride, int pq) \
  47. { \
  48.     ff_vc1_h_loop_filter8_ ## EXT(src,          stride, pq); \
  49.     ff_vc1_h_loop_filter8_ ## EXT(src+8*stride, stride, pq); \
  50. }
  51.  
  52. #if HAVE_YASM
  53. LOOP_FILTER(mmxext)
  54. LOOP_FILTER(sse2)
  55. LOOP_FILTER(ssse3)
  56.  
  57. void ff_vc1_h_loop_filter8_sse4(uint8_t *src, int stride, int pq);
  58.  
  59. static void vc1_h_loop_filter16_sse4(uint8_t *src, int stride, int pq)
  60. {
  61.     ff_vc1_h_loop_filter8_sse4(src,          stride, pq);
  62.     ff_vc1_h_loop_filter8_sse4(src+8*stride, stride, pq);
  63. }
  64.  
  65. static void avg_vc1_mspel_mc00_mmxext(uint8_t *dst, const uint8_t *src,
  66.                                       ptrdiff_t stride, int rnd)
  67. {
  68.     ff_avg_pixels8_mmxext(dst, src, stride, 8);
  69. }
  70. #endif /* HAVE_YASM */
  71.  
  72. void ff_put_vc1_chroma_mc8_nornd_mmx  (uint8_t *dst, uint8_t *src,
  73.                                        int stride, int h, int x, int y);
  74. void ff_avg_vc1_chroma_mc8_nornd_mmxext(uint8_t *dst, uint8_t *src,
  75.                                         int stride, int h, int x, int y);
  76. void ff_avg_vc1_chroma_mc8_nornd_3dnow(uint8_t *dst, uint8_t *src,
  77.                                        int stride, int h, int x, int y);
  78. void ff_put_vc1_chroma_mc8_nornd_ssse3(uint8_t *dst, uint8_t *src,
  79.                                        int stride, int h, int x, int y);
  80. void ff_avg_vc1_chroma_mc8_nornd_ssse3(uint8_t *dst, uint8_t *src,
  81.                                        int stride, int h, int x, int y);
  82.  
  83.  
  84. av_cold void ff_vc1dsp_init_x86(VC1DSPContext *dsp)
  85. {
  86.     int cpu_flags = av_get_cpu_flags();
  87.  
  88.     if (INLINE_MMX(cpu_flags))
  89.         ff_vc1dsp_init_mmx(dsp);
  90.  
  91.     if (INLINE_MMXEXT(cpu_flags))
  92.         ff_vc1dsp_init_mmxext(dsp);
  93.  
  94. #define ASSIGN_LF(EXT) \
  95.         dsp->vc1_v_loop_filter4  = ff_vc1_v_loop_filter4_ ## EXT; \
  96.         dsp->vc1_h_loop_filter4  = ff_vc1_h_loop_filter4_ ## EXT; \
  97.         dsp->vc1_v_loop_filter8  = ff_vc1_v_loop_filter8_ ## EXT; \
  98.         dsp->vc1_h_loop_filter8  = ff_vc1_h_loop_filter8_ ## EXT; \
  99.         dsp->vc1_v_loop_filter16 = vc1_v_loop_filter16_ ## EXT; \
  100.         dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_ ## EXT
  101.  
  102. #if HAVE_YASM
  103.     if (EXTERNAL_MMX(cpu_flags)) {
  104.         dsp->put_no_rnd_vc1_chroma_pixels_tab[0] = ff_put_vc1_chroma_mc8_nornd_mmx;
  105.     }
  106.     if (EXTERNAL_AMD3DNOW(cpu_flags)) {
  107.         dsp->avg_no_rnd_vc1_chroma_pixels_tab[0] = ff_avg_vc1_chroma_mc8_nornd_3dnow;
  108.     }
  109.     if (EXTERNAL_MMXEXT(cpu_flags)) {
  110.         ASSIGN_LF(mmxext);
  111.         dsp->avg_no_rnd_vc1_chroma_pixels_tab[0] = ff_avg_vc1_chroma_mc8_nornd_mmxext;
  112.  
  113.         dsp->avg_vc1_mspel_pixels_tab[0]         = avg_vc1_mspel_mc00_mmxext;
  114.     }
  115.     if (EXTERNAL_SSE2(cpu_flags)) {
  116.         dsp->vc1_v_loop_filter8  = ff_vc1_v_loop_filter8_sse2;
  117.         dsp->vc1_h_loop_filter8  = ff_vc1_h_loop_filter8_sse2;
  118.         dsp->vc1_v_loop_filter16 = vc1_v_loop_filter16_sse2;
  119.         dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_sse2;
  120.     }
  121.     if (EXTERNAL_SSSE3(cpu_flags)) {
  122.         ASSIGN_LF(ssse3);
  123.         dsp->put_no_rnd_vc1_chroma_pixels_tab[0] = ff_put_vc1_chroma_mc8_nornd_ssse3;
  124.         dsp->avg_no_rnd_vc1_chroma_pixels_tab[0] = ff_avg_vc1_chroma_mc8_nornd_ssse3;
  125.     }
  126.     if (EXTERNAL_SSE4(cpu_flags)) {
  127.         dsp->vc1_h_loop_filter8  = ff_vc1_h_loop_filter8_sse4;
  128.         dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_sse4;
  129.     }
  130. #endif /* HAVE_YASM */
  131. }
  132.