Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * XVID MPEG-4 VIDEO CODEC
  3.  * - SSE2 inverse discrete cosine transform -
  4.  *
  5.  * Copyright(C) 2003 Pascal Massimino <skal@planet-d.net>
  6.  *
  7.  * Conversion to gcc syntax with modifications
  8.  * by Alexander Strange <astrange@ithinksw.com>
  9.  *
  10.  * Originally from dct/x86_asm/fdct_sse2_skal.asm in Xvid.
  11.  *
  12.  * This file is part of FFmpeg.
  13.  *
  14.  * Vertical pass is an implementation of the scheme:
  15.  *  Loeffler C., Ligtenberg A., and Moschytz C.S.:
  16.  *  Practical Fast 1D DCT Algorithm with Eleven Multiplications,
  17.  *  Proc. ICASSP 1989, 988-991.
  18.  *
  19.  * Horizontal pass is a double 4x4 vector/matrix multiplication,
  20.  * (see also Intel's Application Note 922:
  21.  *  http://developer.intel.com/vtune/cbts/strmsimd/922down.htm
  22.  *  Copyright (C) 1999 Intel Corporation)
  23.  *
  24.  * More details at http://skal.planet-d.net/coding/dct.html
  25.  *
  26.  * FFmpeg is free software; you can redistribute it and/or
  27.  * modify it under the terms of the GNU Lesser General Public
  28.  * License as published by the Free Software Foundation; either
  29.  * version 2.1 of the License, or (at your option) any later version.
  30.  *
  31.  * FFmpeg is distributed in the hope that it will be useful,
  32.  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  33.  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  34.  * Lesser General Public License for more details.
  35.  *
  36.  * You should have received a copy of the GNU Lesser General Public License
  37.  * along with FFmpeg; if not, write to the Free Software Foundation,
  38.  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  39.  */
  40.  
  41. #include "libavutil/mem.h"
  42. #include "libavutil/x86/asm.h"
  43. #include "idct_xvid.h"
  44. #include "dsputil_x86.h"
  45.  
  46. #if HAVE_SSE2_INLINE
  47.  
  48. /**
  49.  * @file
  50.  * @brief SSE2 idct compatible with xvidmmx
  51.  */
  52.  
  53. #define X8(x)     x,x,x,x,x,x,x,x
  54.  
  55. #define ROW_SHIFT 11
  56. #define COL_SHIFT 6
  57.  
  58. DECLARE_ASM_CONST(16, int16_t, tan1)[] = {X8(13036)}; // tan( pi/16)
  59. DECLARE_ASM_CONST(16, int16_t, tan2)[] = {X8(27146)}; // tan(2pi/16) = sqrt(2)-1
  60. DECLARE_ASM_CONST(16, int16_t, tan3)[] = {X8(43790)}; // tan(3pi/16)-1
  61. DECLARE_ASM_CONST(16, int16_t, sqrt2)[]= {X8(23170)}; // 0.5/sqrt(2)
  62. DECLARE_ASM_CONST(8,  uint8_t, m127)[] = {X8(127)};
  63.  
  64. DECLARE_ASM_CONST(16, int16_t, iTab1)[] = {
  65.  0x4000, 0x539f, 0xc000, 0xac61, 0x4000, 0xdd5d, 0x4000, 0xdd5d,
  66.  0x4000, 0x22a3, 0x4000, 0x22a3, 0xc000, 0x539f, 0x4000, 0xac61,
  67.  0x3249, 0x11a8, 0x4b42, 0xee58, 0x11a8, 0x4b42, 0x11a8, 0xcdb7,
  68.  0x58c5, 0x4b42, 0xa73b, 0xcdb7, 0x3249, 0xa73b, 0x4b42, 0xa73b
  69. };
  70.  
  71. DECLARE_ASM_CONST(16, int16_t, iTab2)[] = {
  72.  0x58c5, 0x73fc, 0xa73b, 0x8c04, 0x58c5, 0xcff5, 0x58c5, 0xcff5,
  73.  0x58c5, 0x300b, 0x58c5, 0x300b, 0xa73b, 0x73fc, 0x58c5, 0x8c04,
  74.  0x45bf, 0x187e, 0x6862, 0xe782, 0x187e, 0x6862, 0x187e, 0xba41,
  75.  0x7b21, 0x6862, 0x84df, 0xba41, 0x45bf, 0x84df, 0x6862, 0x84df
  76. };
  77.  
  78. DECLARE_ASM_CONST(16, int16_t, iTab3)[] = {
  79.  0x539f, 0x6d41, 0xac61, 0x92bf, 0x539f, 0xd2bf, 0x539f, 0xd2bf,
  80.  0x539f, 0x2d41, 0x539f, 0x2d41, 0xac61, 0x6d41, 0x539f, 0x92bf,
  81.  0x41b3, 0x1712, 0x6254, 0xe8ee, 0x1712, 0x6254, 0x1712, 0xbe4d,
  82.  0x73fc, 0x6254, 0x8c04, 0xbe4d, 0x41b3, 0x8c04, 0x6254, 0x8c04
  83. };
  84.  
  85. DECLARE_ASM_CONST(16, int16_t, iTab4)[] = {
  86.  0x4b42, 0x6254, 0xb4be, 0x9dac, 0x4b42, 0xd746, 0x4b42, 0xd746,
  87.  0x4b42, 0x28ba, 0x4b42, 0x28ba, 0xb4be, 0x6254, 0x4b42, 0x9dac,
  88.  0x3b21, 0x14c3, 0x587e, 0xeb3d, 0x14c3, 0x587e, 0x14c3, 0xc4df,
  89.  0x6862, 0x587e, 0x979e, 0xc4df, 0x3b21, 0x979e, 0x587e, 0x979e
  90. };
  91.  
  92. DECLARE_ASM_CONST(16, int32_t, walkenIdctRounders)[] = {
  93.  65536, 65536, 65536, 65536,
  94.   3597,  3597,  3597,  3597,
  95.   2260,  2260,  2260,  2260,
  96.   1203,  1203,  1203,  1203,
  97.    120,   120,   120,   120,
  98.    512,   512,   512,   512
  99. };
  100.  
  101. // Temporary storage before the column pass
  102. #define ROW1 "%%xmm6"
  103. #define ROW3 "%%xmm4"
  104. #define ROW5 "%%xmm5"
  105. #define ROW7 "%%xmm7"
  106.  
  107. #define CLEAR_ODD(r) "pxor  "r","r" \n\t"
  108. #define PUT_ODD(dst) "pshufhw  $0x1B, %%xmm2, "dst"   \n\t"
  109.  
  110. #if ARCH_X86_64
  111.  
  112. # define ROW0 "%%xmm8"
  113. # define REG0 ROW0
  114. # define ROW2 "%%xmm9"
  115. # define REG2 ROW2
  116. # define ROW4 "%%xmm10"
  117. # define REG4 ROW4
  118. # define ROW6 "%%xmm11"
  119. # define REG6 ROW6
  120. # define CLEAR_EVEN(r) CLEAR_ODD(r)
  121. # define PUT_EVEN(dst) PUT_ODD(dst)
  122. # define XMMS "%%xmm12"
  123. # define MOV_32_ONLY "#"
  124. # define SREG2 REG2
  125. # define TAN3 "%%xmm13"
  126. # define TAN1 "%%xmm14"
  127.  
  128. #else
  129.  
  130. # define ROW0 "(%0)"
  131. # define REG0 "%%xmm4"
  132. # define ROW2 "2*16(%0)"
  133. # define REG2 "%%xmm4"
  134. # define ROW4 "4*16(%0)"
  135. # define REG4 "%%xmm6"
  136. # define ROW6 "6*16(%0)"
  137. # define REG6 "%%xmm6"
  138. # define CLEAR_EVEN(r)
  139. # define PUT_EVEN(dst) \
  140.     "pshufhw  $0x1B, %%xmm2, %%xmm2   \n\t" \
  141.     "movdqa          %%xmm2, "dst"    \n\t"
  142. # define XMMS "%%xmm2"
  143. # define MOV_32_ONLY "movdqa "
  144. # define SREG2 "%%xmm7"
  145. # define TAN3 "%%xmm0"
  146. # define TAN1 "%%xmm2"
  147.  
  148. #endif
  149.  
  150. #define ROUND(x) "paddd   "MANGLE(x)
  151.  
  152. #define JZ(reg, to)                         \
  153.     "testl     "reg","reg"            \n\t" \
  154.     "jz        "to"                   \n\t"
  155.  
  156. #define JNZ(reg, to)                        \
  157.     "testl     "reg","reg"            \n\t" \
  158.     "jnz       "to"                   \n\t"
  159.  
  160. #define TEST_ONE_ROW(src, reg, clear)       \
  161.     clear                                   \
  162.     "movq     "src", %%mm1            \n\t" \
  163.     "por    8+"src", %%mm1            \n\t" \
  164.     "paddusb  %%mm0, %%mm1            \n\t" \
  165.     "pmovmskb %%mm1, "reg"            \n\t"
  166.  
  167. #define TEST_TWO_ROWS(row1, row2, reg1, reg2, clear1, clear2) \
  168.     clear1                                  \
  169.     clear2                                  \
  170.     "movq     "row1", %%mm1           \n\t" \
  171.     "por    8+"row1", %%mm1           \n\t" \
  172.     "movq     "row2", %%mm2           \n\t" \
  173.     "por    8+"row2", %%mm2           \n\t" \
  174.     "paddusb   %%mm0, %%mm1           \n\t" \
  175.     "paddusb   %%mm0, %%mm2           \n\t" \
  176.     "pmovmskb  %%mm1, "reg1"          \n\t" \
  177.     "pmovmskb  %%mm2, "reg2"          \n\t"
  178.  
  179. ///IDCT pass on rows.
  180. #define iMTX_MULT(src, table, rounder, put) \
  181.     "movdqa        "src", %%xmm3      \n\t" \
  182.     "movdqa       %%xmm3, %%xmm0      \n\t" \
  183.     "pshufd   $0x11, %%xmm3, %%xmm1   \n\t" /* 4602 */ \
  184.     "punpcklqdq   %%xmm0, %%xmm0      \n\t" /* 0246 */ \
  185.     "pmaddwd     "table", %%xmm0      \n\t" \
  186.     "pmaddwd  16+"table", %%xmm1      \n\t" \
  187.     "pshufd   $0xBB, %%xmm3, %%xmm2   \n\t" /* 5713 */ \
  188.     "punpckhqdq   %%xmm3, %%xmm3      \n\t" /* 1357 */ \
  189.     "pmaddwd  32+"table", %%xmm2      \n\t" \
  190.     "pmaddwd  48+"table", %%xmm3      \n\t" \
  191.     "paddd        %%xmm1, %%xmm0      \n\t" \
  192.     "paddd        %%xmm3, %%xmm2      \n\t" \
  193.     rounder",     %%xmm0              \n\t" \
  194.     "movdqa       %%xmm2, %%xmm3      \n\t" \
  195.     "paddd        %%xmm0, %%xmm2      \n\t" \
  196.     "psubd        %%xmm3, %%xmm0      \n\t" \
  197.     "psrad           $11, %%xmm2      \n\t" \
  198.     "psrad           $11, %%xmm0      \n\t" \
  199.     "packssdw     %%xmm0, %%xmm2      \n\t" \
  200.     put                                     \
  201.     "1:                               \n\t"
  202.  
  203. #define iLLM_HEAD                           \
  204.     "movdqa   "MANGLE(tan3)", "TAN3"  \n\t" \
  205.     "movdqa   "MANGLE(tan1)", "TAN1"  \n\t" \
  206.  
  207. ///IDCT pass on columns.
  208. #define iLLM_PASS(dct)                      \
  209.     "movdqa   "TAN3", %%xmm1          \n\t" \
  210.     "movdqa   "TAN1", %%xmm3          \n\t" \
  211.     "pmulhw   %%xmm4, "TAN3"          \n\t" \
  212.     "pmulhw   %%xmm5, %%xmm1          \n\t" \
  213.     "paddsw   %%xmm4, "TAN3"          \n\t" \
  214.     "paddsw   %%xmm5, %%xmm1          \n\t" \
  215.     "psubsw   %%xmm5, "TAN3"          \n\t" \
  216.     "paddsw   %%xmm4, %%xmm1          \n\t" \
  217.     "pmulhw   %%xmm7, %%xmm3          \n\t" \
  218.     "pmulhw   %%xmm6, "TAN1"          \n\t" \
  219.     "paddsw   %%xmm6, %%xmm3          \n\t" \
  220.     "psubsw   %%xmm7, "TAN1"          \n\t" \
  221.     "movdqa   %%xmm3, %%xmm7          \n\t" \
  222.     "movdqa   "TAN1", %%xmm6          \n\t" \
  223.     "psubsw   %%xmm1, %%xmm3          \n\t" \
  224.     "psubsw   "TAN3", "TAN1"          \n\t" \
  225.     "paddsw   %%xmm7, %%xmm1          \n\t" \
  226.     "paddsw   %%xmm6, "TAN3"          \n\t" \
  227.     "movdqa   %%xmm3, %%xmm6          \n\t" \
  228.     "psubsw   "TAN3", %%xmm3          \n\t" \
  229.     "paddsw   %%xmm6, "TAN3"          \n\t" \
  230.     "movdqa   "MANGLE(sqrt2)", %%xmm4 \n\t" \
  231.     "pmulhw   %%xmm4, %%xmm3          \n\t" \
  232.     "pmulhw   %%xmm4, "TAN3"          \n\t" \
  233.     "paddsw   "TAN3", "TAN3"          \n\t" \
  234.     "paddsw   %%xmm3, %%xmm3          \n\t" \
  235.     "movdqa   "MANGLE(tan2)", %%xmm7  \n\t" \
  236.     MOV_32_ONLY ROW2", "REG2"         \n\t" \
  237.     MOV_32_ONLY ROW6", "REG6"         \n\t" \
  238.     "movdqa   %%xmm7, %%xmm5          \n\t" \
  239.     "pmulhw   "REG6", %%xmm7          \n\t" \
  240.     "pmulhw   "REG2", %%xmm5          \n\t" \
  241.     "paddsw   "REG2", %%xmm7          \n\t" \
  242.     "psubsw   "REG6", %%xmm5          \n\t" \
  243.     MOV_32_ONLY ROW0", "REG0"         \n\t" \
  244.     MOV_32_ONLY ROW4", "REG4"         \n\t" \
  245.     MOV_32_ONLY"  "TAN1", (%0)        \n\t" \
  246.     "movdqa   "REG0", "XMMS"          \n\t" \
  247.     "psubsw   "REG4", "REG0"          \n\t" \
  248.     "paddsw   "XMMS", "REG4"          \n\t" \
  249.     "movdqa   "REG4", "XMMS"          \n\t" \
  250.     "psubsw   %%xmm7, "REG4"          \n\t" \
  251.     "paddsw   "XMMS", %%xmm7          \n\t" \
  252.     "movdqa   "REG0", "XMMS"          \n\t" \
  253.     "psubsw   %%xmm5, "REG0"          \n\t" \
  254.     "paddsw   "XMMS", %%xmm5          \n\t" \
  255.     "movdqa   %%xmm5, "XMMS"          \n\t" \
  256.     "psubsw   "TAN3", %%xmm5          \n\t" \
  257.     "paddsw   "XMMS", "TAN3"          \n\t" \
  258.     "movdqa   "REG0", "XMMS"          \n\t" \
  259.     "psubsw   %%xmm3, "REG0"          \n\t" \
  260.     "paddsw   "XMMS", %%xmm3          \n\t" \
  261.     MOV_32_ONLY"  (%0), "TAN1"        \n\t" \
  262.     "psraw        $6, %%xmm5          \n\t" \
  263.     "psraw        $6, "REG0"          \n\t" \
  264.     "psraw        $6, "TAN3"          \n\t" \
  265.     "psraw        $6, %%xmm3          \n\t" \
  266.     "movdqa   "TAN3", 1*16("dct")     \n\t" \
  267.     "movdqa   %%xmm3, 2*16("dct")     \n\t" \
  268.     "movdqa   "REG0", 5*16("dct")     \n\t" \
  269.     "movdqa   %%xmm5, 6*16("dct")     \n\t" \
  270.     "movdqa   %%xmm7, %%xmm0          \n\t" \
  271.     "movdqa   "REG4", %%xmm4          \n\t" \
  272.     "psubsw   %%xmm1, %%xmm7          \n\t" \
  273.     "psubsw   "TAN1", "REG4"          \n\t" \
  274.     "paddsw   %%xmm0, %%xmm1          \n\t" \
  275.     "paddsw   %%xmm4, "TAN1"          \n\t" \
  276.     "psraw        $6, %%xmm1          \n\t" \
  277.     "psraw        $6, %%xmm7          \n\t" \
  278.     "psraw        $6, "TAN1"          \n\t" \
  279.     "psraw        $6, "REG4"          \n\t" \
  280.     "movdqa   %%xmm1, ("dct")         \n\t" \
  281.     "movdqa   "TAN1", 3*16("dct")     \n\t" \
  282.     "movdqa   "REG4", 4*16("dct")     \n\t" \
  283.     "movdqa   %%xmm7, 7*16("dct")     \n\t"
  284.  
  285. ///IDCT pass on columns, assuming rows 4-7 are zero.
  286. #define iLLM_PASS_SPARSE(dct)               \
  287.     "pmulhw   %%xmm4, "TAN3"          \n\t" \
  288.     "paddsw   %%xmm4, "TAN3"          \n\t" \
  289.     "movdqa   %%xmm6, %%xmm3          \n\t" \
  290.     "pmulhw   %%xmm6, "TAN1"          \n\t" \
  291.     "movdqa   %%xmm4, %%xmm1          \n\t" \
  292.     "psubsw   %%xmm1, %%xmm3          \n\t" \
  293.     "paddsw   %%xmm6, %%xmm1          \n\t" \
  294.     "movdqa   "TAN1", %%xmm6          \n\t" \
  295.     "psubsw   "TAN3", "TAN1"          \n\t" \
  296.     "paddsw   %%xmm6, "TAN3"          \n\t" \
  297.     "movdqa   %%xmm3, %%xmm6          \n\t" \
  298.     "psubsw   "TAN3", %%xmm3          \n\t" \
  299.     "paddsw   %%xmm6, "TAN3"          \n\t" \
  300.     "movdqa   "MANGLE(sqrt2)", %%xmm4 \n\t" \
  301.     "pmulhw   %%xmm4, %%xmm3          \n\t" \
  302.     "pmulhw   %%xmm4, "TAN3"          \n\t" \
  303.     "paddsw   "TAN3", "TAN3"          \n\t" \
  304.     "paddsw   %%xmm3, %%xmm3          \n\t" \
  305.     "movdqa   "MANGLE(tan2)", %%xmm5  \n\t" \
  306.     MOV_32_ONLY ROW2", "SREG2"        \n\t" \
  307.     "pmulhw   "SREG2", %%xmm5         \n\t" \
  308.     MOV_32_ONLY ROW0", "REG0"         \n\t" \
  309.     "movdqa   "REG0", %%xmm6          \n\t" \
  310.     "psubsw   "SREG2", %%xmm6         \n\t" \
  311.     "paddsw   "REG0", "SREG2"         \n\t" \
  312.     MOV_32_ONLY"  "TAN1", (%0)        \n\t" \
  313.     "movdqa   "REG0", "XMMS"          \n\t" \
  314.     "psubsw   %%xmm5, "REG0"          \n\t" \
  315.     "paddsw   "XMMS", %%xmm5          \n\t" \
  316.     "movdqa   %%xmm5, "XMMS"          \n\t" \
  317.     "psubsw   "TAN3", %%xmm5          \n\t" \
  318.     "paddsw   "XMMS", "TAN3"          \n\t" \
  319.     "movdqa   "REG0", "XMMS"          \n\t" \
  320.     "psubsw   %%xmm3, "REG0"          \n\t" \
  321.     "paddsw   "XMMS", %%xmm3          \n\t" \
  322.     MOV_32_ONLY"  (%0), "TAN1"        \n\t" \
  323.     "psraw        $6, %%xmm5          \n\t" \
  324.     "psraw        $6, "REG0"          \n\t" \
  325.     "psraw        $6, "TAN3"          \n\t" \
  326.     "psraw        $6, %%xmm3          \n\t" \
  327.     "movdqa   "TAN3", 1*16("dct")     \n\t" \
  328.     "movdqa   %%xmm3, 2*16("dct")     \n\t" \
  329.     "movdqa   "REG0", 5*16("dct")     \n\t" \
  330.     "movdqa   %%xmm5, 6*16("dct")     \n\t" \
  331.     "movdqa   "SREG2", %%xmm0         \n\t" \
  332.     "movdqa   %%xmm6, %%xmm4          \n\t" \
  333.     "psubsw   %%xmm1, "SREG2"         \n\t" \
  334.     "psubsw   "TAN1", %%xmm6          \n\t" \
  335.     "paddsw   %%xmm0, %%xmm1          \n\t" \
  336.     "paddsw   %%xmm4, "TAN1"          \n\t" \
  337.     "psraw        $6, %%xmm1          \n\t" \
  338.     "psraw        $6, "SREG2"         \n\t" \
  339.     "psraw        $6, "TAN1"          \n\t" \
  340.     "psraw        $6, %%xmm6          \n\t" \
  341.     "movdqa   %%xmm1, ("dct")         \n\t" \
  342.     "movdqa   "TAN1", 3*16("dct")     \n\t" \
  343.     "movdqa   %%xmm6, 4*16("dct")     \n\t" \
  344.     "movdqa   "SREG2", 7*16("dct")    \n\t"
  345.  
  346. inline void ff_idct_xvid_sse2(short *block)
  347. {
  348.     __asm__ volatile(
  349.     "movq     "MANGLE(m127)", %%mm0                              \n\t"
  350.     iMTX_MULT("(%0)",     MANGLE(iTab1), ROUND(walkenIdctRounders),      PUT_EVEN(ROW0))
  351.     iMTX_MULT("1*16(%0)", MANGLE(iTab2), ROUND(walkenIdctRounders+1*16), PUT_ODD(ROW1))
  352.     iMTX_MULT("2*16(%0)", MANGLE(iTab3), ROUND(walkenIdctRounders+2*16), PUT_EVEN(ROW2))
  353.  
  354.     TEST_TWO_ROWS("3*16(%0)", "4*16(%0)", "%%eax", "%%ecx", CLEAR_ODD(ROW3), CLEAR_EVEN(ROW4))
  355.     JZ("%%eax", "1f")
  356.     iMTX_MULT("3*16(%0)", MANGLE(iTab4), ROUND(walkenIdctRounders+3*16), PUT_ODD(ROW3))
  357.  
  358.     TEST_TWO_ROWS("5*16(%0)", "6*16(%0)", "%%eax", "%%edx", CLEAR_ODD(ROW5), CLEAR_EVEN(ROW6))
  359.     TEST_ONE_ROW("7*16(%0)", "%%esi", CLEAR_ODD(ROW7))
  360.     iLLM_HEAD
  361.     ".p2align 4 \n\t"
  362.     JNZ("%%ecx", "2f")
  363.     JNZ("%%eax", "3f")
  364.     JNZ("%%edx", "4f")
  365.     JNZ("%%esi", "5f")
  366.     iLLM_PASS_SPARSE("%0")
  367.     "jmp 6f                                                      \n\t"
  368.     "2:                                                          \n\t"
  369.     iMTX_MULT("4*16(%0)", MANGLE(iTab1), "#", PUT_EVEN(ROW4))
  370.     "3:                                                          \n\t"
  371.     iMTX_MULT("5*16(%0)", MANGLE(iTab4), ROUND(walkenIdctRounders+4*16), PUT_ODD(ROW5))
  372.     JZ("%%edx", "1f")
  373.     "4:                                                          \n\t"
  374.     iMTX_MULT("6*16(%0)", MANGLE(iTab3), ROUND(walkenIdctRounders+5*16), PUT_EVEN(ROW6))
  375.     JZ("%%esi", "1f")
  376.     "5:                                                          \n\t"
  377.     iMTX_MULT("7*16(%0)", MANGLE(iTab2), ROUND(walkenIdctRounders+5*16), PUT_ODD(ROW7))
  378. #if ARCH_X86_32
  379.     iLLM_HEAD
  380. #endif
  381.     iLLM_PASS("%0")
  382.     "6:                                                          \n\t"
  383.     : "+r"(block)
  384.     :
  385.     : XMM_CLOBBERS("%xmm0" , "%xmm1" , "%xmm2" , "%xmm3" ,
  386.                    "%xmm4" , "%xmm5" , "%xmm6" , "%xmm7" ,)
  387. #if ARCH_X86_64
  388.       XMM_CLOBBERS("%xmm8" , "%xmm9" , "%xmm10", "%xmm11",
  389.                    "%xmm12", "%xmm13", "%xmm14",)
  390. #endif
  391.       "%eax", "%ecx", "%edx", "%esi", "memory"
  392.     );
  393. }
  394.  
  395. void ff_idct_xvid_sse2_put(uint8_t *dest, int line_size, short *block)
  396. {
  397.     ff_idct_xvid_sse2(block);
  398.     ff_put_pixels_clamped_mmx(block, dest, line_size);
  399. }
  400.  
  401. void ff_idct_xvid_sse2_add(uint8_t *dest, int line_size, short *block)
  402. {
  403.     ff_idct_xvid_sse2(block);
  404.     ff_add_pixels_clamped_mmx(block, dest, line_size);
  405. }
  406.  
  407. #endif /* HAVE_SSE2_INLINE */
  408.