Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 4348 → Rev 4349

/contrib/sdk/sources/ffmpeg/libpostproc/Makefile
0,0 → 1,9
include $(SUBDIR)../config.mak
 
NAME = postproc
FFLIBS = avutil
 
HEADERS = postprocess.h \
version.h \
 
OBJS = postprocess.o
/contrib/sdk/sources/ffmpeg/libpostproc/libpostproc.v
0,0 → 1,4
LIBPOSTPROC_$MAJOR {
global: postproc_*; pp_*;
local: *;
};
/contrib/sdk/sources/ffmpeg/libpostproc/postprocess.c
0,0 → 1,1045
/*
* Copyright (C) 2001-2003 Michael Niedermayer (michaelni@gmx.at)
*
* AltiVec optimizations (C) 2004 Romain Dolbeau <romain@dolbeau.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* postprocessing.
*/
 
/*
C MMX MMX2 3DNow AltiVec
isVertDC Ec Ec Ec
isVertMinMaxOk Ec Ec Ec
doVertLowPass E e e Ec
doVertDefFilter Ec Ec e e Ec
isHorizDC Ec Ec Ec
isHorizMinMaxOk a E Ec
doHorizLowPass E e e Ec
doHorizDefFilter Ec Ec e e Ec
do_a_deblock Ec E Ec E
deRing E e e* Ecp
Vertical RKAlgo1 E a a
Horizontal RKAlgo1 a a
Vertical X1# a E E
Horizontal X1# a E E
LinIpolDeinterlace e E E*
CubicIpolDeinterlace a e e*
LinBlendDeinterlace e E E*
MedianDeinterlace# E Ec Ec
TempDeNoiser# E e e Ec
 
* I do not have a 3DNow! CPU -> it is untested, but no one said it does not work so it seems to work
# more or less selfinvented filters so the exactness is not too meaningful
E = Exact implementation
e = almost exact implementation (slightly different rounding,...)
a = alternative / approximate impl
c = checked against the other implementations (-vo md5)
p = partially optimized, still some work to do
*/
 
/*
TODO:
reduce the time wasted on the mem transfer
unroll stuff if instructions depend too much on the prior one
move YScale thing to the end instead of fixing QP
write a faster and higher quality deblocking filter :)
make the mainloop more flexible (variable number of blocks at once
(the if/else stuff per block is slowing things down)
compare the quality & speed of all filters
split this huge file
optimize c versions
try to unroll inner for(x=0 ... loop to avoid these damn if(x ... checks
...
*/
 
//Changelog: use git log
 
#include "config.h"
#include "libavutil/avutil.h"
#include "libavutil/avassert.h"
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
//#undef HAVE_MMXEXT_INLINE
//#define HAVE_AMD3DNOW_INLINE
//#undef HAVE_MMX_INLINE
//#undef ARCH_X86
//#define DEBUG_BRIGHTNESS
#include "postprocess.h"
#include "postprocess_internal.h"
#include "libavutil/avstring.h"
 
unsigned postproc_version(void)
{
av_assert0(LIBPOSTPROC_VERSION_MICRO >= 100);
return LIBPOSTPROC_VERSION_INT;
}
 
const char *postproc_configuration(void)
{
return FFMPEG_CONFIGURATION;
}
 
const char *postproc_license(void)
{
#define LICENSE_PREFIX "libpostproc license: "
return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
}
 
#if HAVE_ALTIVEC_H
#include <altivec.h>
#endif
 
#define GET_MODE_BUFFER_SIZE 500
#define OPTIONS_ARRAY_SIZE 10
#define BLOCK_SIZE 8
#define TEMP_STRIDE 8
//#define NUM_BLOCKS_AT_ONCE 16 //not used yet
 
#if ARCH_X86 && HAVE_INLINE_ASM
DECLARE_ASM_CONST(8, uint64_t, w05)= 0x0005000500050005LL;
DECLARE_ASM_CONST(8, uint64_t, w04)= 0x0004000400040004LL;
DECLARE_ASM_CONST(8, uint64_t, w20)= 0x0020002000200020LL;
DECLARE_ASM_CONST(8, uint64_t, b00)= 0x0000000000000000LL;
DECLARE_ASM_CONST(8, uint64_t, b01)= 0x0101010101010101LL;
DECLARE_ASM_CONST(8, uint64_t, b02)= 0x0202020202020202LL;
DECLARE_ASM_CONST(8, uint64_t, b08)= 0x0808080808080808LL;
DECLARE_ASM_CONST(8, uint64_t, b80)= 0x8080808080808080LL;
#endif
 
DECLARE_ASM_CONST(8, int, deringThreshold)= 20;
 
 
static const struct PPFilter filters[]=
{
{"hb", "hdeblock", 1, 1, 3, H_DEBLOCK},
{"vb", "vdeblock", 1, 2, 4, V_DEBLOCK},
/* {"hr", "rkhdeblock", 1, 1, 3, H_RK1_FILTER},
{"vr", "rkvdeblock", 1, 2, 4, V_RK1_FILTER},*/
{"h1", "x1hdeblock", 1, 1, 3, H_X1_FILTER},
{"v1", "x1vdeblock", 1, 2, 4, V_X1_FILTER},
{"ha", "ahdeblock", 1, 1, 3, H_A_DEBLOCK},
{"va", "avdeblock", 1, 2, 4, V_A_DEBLOCK},
{"dr", "dering", 1, 5, 6, DERING},
{"al", "autolevels", 0, 1, 2, LEVEL_FIX},
{"lb", "linblenddeint", 1, 1, 4, LINEAR_BLEND_DEINT_FILTER},
{"li", "linipoldeint", 1, 1, 4, LINEAR_IPOL_DEINT_FILTER},
{"ci", "cubicipoldeint", 1, 1, 4, CUBIC_IPOL_DEINT_FILTER},
{"md", "mediandeint", 1, 1, 4, MEDIAN_DEINT_FILTER},
{"fd", "ffmpegdeint", 1, 1, 4, FFMPEG_DEINT_FILTER},
{"l5", "lowpass5", 1, 1, 4, LOWPASS5_DEINT_FILTER},
{"tn", "tmpnoise", 1, 7, 8, TEMP_NOISE_FILTER},
{"fq", "forcequant", 1, 0, 0, FORCE_QUANT},
{"be", "bitexact", 1, 0, 0, BITEXACT},
{NULL, NULL,0,0,0,0} //End Marker
};
 
static const char *replaceTable[]=
{
"default", "hb:a,vb:a,dr:a",
"de", "hb:a,vb:a,dr:a",
"fast", "h1:a,v1:a,dr:a",
"fa", "h1:a,v1:a,dr:a",
"ac", "ha:a:128:7,va:a,dr:a",
NULL //End Marker
};
 
 
#if ARCH_X86 && HAVE_INLINE_ASM
static inline void prefetchnta(void *p)
{
__asm__ volatile( "prefetchnta (%0)\n\t"
: : "r" (p)
);
}
 
static inline void prefetcht0(void *p)
{
__asm__ volatile( "prefetcht0 (%0)\n\t"
: : "r" (p)
);
}
 
static inline void prefetcht1(void *p)
{
__asm__ volatile( "prefetcht1 (%0)\n\t"
: : "r" (p)
);
}
 
static inline void prefetcht2(void *p)
{
__asm__ volatile( "prefetcht2 (%0)\n\t"
: : "r" (p)
);
}
#endif
 
/* The horizontal functions exist only in C because the MMX
* code is faster with vertical filters and transposing. */
 
/**
* Check if the given 8x8 Block is mostly "flat"
*/
static inline int isHorizDC_C(const uint8_t src[], int stride, const PPContext *c)
{
int numEq= 0;
int y;
const int dcOffset= ((c->nonBQP*c->ppMode.baseDcDiff)>>8) + 1;
const int dcThreshold= dcOffset*2 + 1;
 
for(y=0; y<BLOCK_SIZE; y++){
if(((unsigned)(src[0] - src[1] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[1] - src[2] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[2] - src[3] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[3] - src[4] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[4] - src[5] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[5] - src[6] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[6] - src[7] + dcOffset)) < dcThreshold) numEq++;
src+= stride;
}
return numEq > c->ppMode.flatnessThreshold;
}
 
/**
* Check if the middle 8x8 Block in the given 8x16 block is flat
*/
static inline int isVertDC_C(const uint8_t src[], int stride, const PPContext *c)
{
int numEq= 0;
int y;
const int dcOffset= ((c->nonBQP*c->ppMode.baseDcDiff)>>8) + 1;
const int dcThreshold= dcOffset*2 + 1;
 
src+= stride*4; // src points to begin of the 8x8 Block
for(y=0; y<BLOCK_SIZE-1; y++){
if(((unsigned)(src[0] - src[0+stride] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[1] - src[1+stride] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[2] - src[2+stride] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[3] - src[3+stride] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[4] - src[4+stride] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[5] - src[5+stride] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[6] - src[6+stride] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[7] - src[7+stride] + dcOffset)) < dcThreshold) numEq++;
src+= stride;
}
return numEq > c->ppMode.flatnessThreshold;
}
 
static inline int isHorizMinMaxOk_C(const uint8_t src[], int stride, int QP)
{
int i;
for(i=0; i<2; i++){
if((unsigned)(src[0] - src[5] + 2*QP) > 4*QP) return 0;
src += stride;
if((unsigned)(src[2] - src[7] + 2*QP) > 4*QP) return 0;
src += stride;
if((unsigned)(src[4] - src[1] + 2*QP) > 4*QP) return 0;
src += stride;
if((unsigned)(src[6] - src[3] + 2*QP) > 4*QP) return 0;
src += stride;
}
return 1;
}
 
static inline int isVertMinMaxOk_C(const uint8_t src[], int stride, int QP)
{
int x;
src+= stride*4;
for(x=0; x<BLOCK_SIZE; x+=4){
if((unsigned)(src[ x + 0*stride] - src[ x + 5*stride] + 2*QP) > 4*QP) return 0;
if((unsigned)(src[1+x + 2*stride] - src[1+x + 7*stride] + 2*QP) > 4*QP) return 0;
if((unsigned)(src[2+x + 4*stride] - src[2+x + 1*stride] + 2*QP) > 4*QP) return 0;
if((unsigned)(src[3+x + 6*stride] - src[3+x + 3*stride] + 2*QP) > 4*QP) return 0;
}
return 1;
}
 
static inline int horizClassify_C(const uint8_t src[], int stride, const PPContext *c)
{
if( isHorizDC_C(src, stride, c) ){
if( isHorizMinMaxOk_C(src, stride, c->QP) )
return 1;
else
return 0;
}else{
return 2;
}
}
 
static inline int vertClassify_C(const uint8_t src[], int stride, const PPContext *c)
{
if( isVertDC_C(src, stride, c) ){
if( isVertMinMaxOk_C(src, stride, c->QP) )
return 1;
else
return 0;
}else{
return 2;
}
}
 
static inline void doHorizDefFilter_C(uint8_t dst[], int stride, const PPContext *c)
{
int y;
for(y=0; y<BLOCK_SIZE; y++){
const int middleEnergy= 5*(dst[4] - dst[3]) + 2*(dst[2] - dst[5]);
 
if(FFABS(middleEnergy) < 8*c->QP){
const int q=(dst[3] - dst[4])/2;
const int leftEnergy= 5*(dst[2] - dst[1]) + 2*(dst[0] - dst[3]);
const int rightEnergy= 5*(dst[6] - dst[5]) + 2*(dst[4] - dst[7]);
 
int d= FFABS(middleEnergy) - FFMIN( FFABS(leftEnergy), FFABS(rightEnergy) );
d= FFMAX(d, 0);
 
d= (5*d + 32) >> 6;
d*= FFSIGN(-middleEnergy);
 
if(q>0)
{
d= d<0 ? 0 : d;
d= d>q ? q : d;
}
else
{
d= d>0 ? 0 : d;
d= d<q ? q : d;
}
 
dst[3]-= d;
dst[4]+= d;
}
dst+= stride;
}
}
 
/**
* Do a horizontal low pass filter on the 10x8 block (dst points to middle 8x8 Block)
* using the 9-Tap Filter (1,1,2,2,4,2,2,1,1)/16 (C version)
*/
static inline void doHorizLowPass_C(uint8_t dst[], int stride, const PPContext *c)
{
int y;
for(y=0; y<BLOCK_SIZE; y++){
const int first= FFABS(dst[-1] - dst[0]) < c->QP ? dst[-1] : dst[0];
const int last= FFABS(dst[8] - dst[7]) < c->QP ? dst[8] : dst[7];
 
int sums[10];
sums[0] = 4*first + dst[0] + dst[1] + dst[2] + 4;
sums[1] = sums[0] - first + dst[3];
sums[2] = sums[1] - first + dst[4];
sums[3] = sums[2] - first + dst[5];
sums[4] = sums[3] - first + dst[6];
sums[5] = sums[4] - dst[0] + dst[7];
sums[6] = sums[5] - dst[1] + last;
sums[7] = sums[6] - dst[2] + last;
sums[8] = sums[7] - dst[3] + last;
sums[9] = sums[8] - dst[4] + last;
 
dst[0]= (sums[0] + sums[2] + 2*dst[0])>>4;
dst[1]= (sums[1] + sums[3] + 2*dst[1])>>4;
dst[2]= (sums[2] + sums[4] + 2*dst[2])>>4;
dst[3]= (sums[3] + sums[5] + 2*dst[3])>>4;
dst[4]= (sums[4] + sums[6] + 2*dst[4])>>4;
dst[5]= (sums[5] + sums[7] + 2*dst[5])>>4;
dst[6]= (sums[6] + sums[8] + 2*dst[6])>>4;
dst[7]= (sums[7] + sums[9] + 2*dst[7])>>4;
 
dst+= stride;
}
}
 
/**
* Experimental Filter 1 (Horizontal)
* will not damage linear gradients
* Flat blocks should look like they were passed through the (1,1,2,2,4,2,2,1,1) 9-Tap filter
* can only smooth blocks at the expected locations (it cannot smooth them if they did move)
* MMX2 version does correct clipping C version does not
* not identical with the vertical one
*/
static inline void horizX1Filter(uint8_t *src, int stride, int QP)
{
int y;
static uint64_t lut[256];
if(!lut[255])
{
int i;
for(i=0; i<256; i++)
{
int v= i < 128 ? 2*i : 2*(i-256);
/*
//Simulate 112242211 9-Tap filter
uint64_t a= (v/16) & 0xFF;
uint64_t b= (v/8) & 0xFF;
uint64_t c= (v/4) & 0xFF;
uint64_t d= (3*v/8) & 0xFF;
*/
//Simulate piecewise linear interpolation
uint64_t a= (v/16) & 0xFF;
uint64_t b= (v*3/16) & 0xFF;
uint64_t c= (v*5/16) & 0xFF;
uint64_t d= (7*v/16) & 0xFF;
uint64_t A= (0x100 - a)&0xFF;
uint64_t B= (0x100 - b)&0xFF;
uint64_t C= (0x100 - c)&0xFF;
uint64_t D= (0x100 - c)&0xFF;
 
lut[i] = (a<<56) | (b<<48) | (c<<40) | (d<<32) |
(D<<24) | (C<<16) | (B<<8) | (A);
//lut[i] = (v<<32) | (v<<24);
}
}
 
for(y=0; y<BLOCK_SIZE; y++){
int a= src[1] - src[2];
int b= src[3] - src[4];
int c= src[5] - src[6];
 
int d= FFMAX(FFABS(b) - (FFABS(a) + FFABS(c))/2, 0);
 
if(d < QP){
int v = d * FFSIGN(-b);
 
src[1] +=v/8;
src[2] +=v/4;
src[3] +=3*v/8;
src[4] -=3*v/8;
src[5] -=v/4;
src[6] -=v/8;
}
src+=stride;
}
}
 
/**
* accurate deblock filter
*/
static av_always_inline void do_a_deblock_C(uint8_t *src, int step,
int stride, const PPContext *c)
{
int y;
const int QP= c->QP;
const int dcOffset= ((c->nonBQP*c->ppMode.baseDcDiff)>>8) + 1;
const int dcThreshold= dcOffset*2 + 1;
//START_TIMER
src+= step*4; // src points to begin of the 8x8 Block
for(y=0; y<8; y++){
int numEq= 0;
 
if(((unsigned)(src[-1*step] - src[0*step] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[ 0*step] - src[1*step] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[ 1*step] - src[2*step] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[ 2*step] - src[3*step] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[ 3*step] - src[4*step] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[ 4*step] - src[5*step] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[ 5*step] - src[6*step] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[ 6*step] - src[7*step] + dcOffset)) < dcThreshold) numEq++;
if(((unsigned)(src[ 7*step] - src[8*step] + dcOffset)) < dcThreshold) numEq++;
if(numEq > c->ppMode.flatnessThreshold){
int min, max, x;
 
if(src[0] > src[step]){
max= src[0];
min= src[step];
}else{
max= src[step];
min= src[0];
}
for(x=2; x<8; x+=2){
if(src[x*step] > src[(x+1)*step]){
if(src[x *step] > max) max= src[ x *step];
if(src[(x+1)*step] < min) min= src[(x+1)*step];
}else{
if(src[(x+1)*step] > max) max= src[(x+1)*step];
if(src[ x *step] < min) min= src[ x *step];
}
}
if(max-min < 2*QP){
const int first= FFABS(src[-1*step] - src[0]) < QP ? src[-1*step] : src[0];
const int last= FFABS(src[8*step] - src[7*step]) < QP ? src[8*step] : src[7*step];
 
int sums[10];
sums[0] = 4*first + src[0*step] + src[1*step] + src[2*step] + 4;
sums[1] = sums[0] - first + src[3*step];
sums[2] = sums[1] - first + src[4*step];
sums[3] = sums[2] - first + src[5*step];
sums[4] = sums[3] - first + src[6*step];
sums[5] = sums[4] - src[0*step] + src[7*step];
sums[6] = sums[5] - src[1*step] + last;
sums[7] = sums[6] - src[2*step] + last;
sums[8] = sums[7] - src[3*step] + last;
sums[9] = sums[8] - src[4*step] + last;
 
src[0*step]= (sums[0] + sums[2] + 2*src[0*step])>>4;
src[1*step]= (sums[1] + sums[3] + 2*src[1*step])>>4;
src[2*step]= (sums[2] + sums[4] + 2*src[2*step])>>4;
src[3*step]= (sums[3] + sums[5] + 2*src[3*step])>>4;
src[4*step]= (sums[4] + sums[6] + 2*src[4*step])>>4;
src[5*step]= (sums[5] + sums[7] + 2*src[5*step])>>4;
src[6*step]= (sums[6] + sums[8] + 2*src[6*step])>>4;
src[7*step]= (sums[7] + sums[9] + 2*src[7*step])>>4;
}
}else{
const int middleEnergy= 5*(src[4*step] - src[3*step]) + 2*(src[2*step] - src[5*step]);
 
if(FFABS(middleEnergy) < 8*QP){
const int q=(src[3*step] - src[4*step])/2;
const int leftEnergy= 5*(src[2*step] - src[1*step]) + 2*(src[0*step] - src[3*step]);
const int rightEnergy= 5*(src[6*step] - src[5*step]) + 2*(src[4*step] - src[7*step]);
 
int d= FFABS(middleEnergy) - FFMIN( FFABS(leftEnergy), FFABS(rightEnergy) );
d= FFMAX(d, 0);
 
d= (5*d + 32) >> 6;
d*= FFSIGN(-middleEnergy);
 
if(q>0){
d= d<0 ? 0 : d;
d= d>q ? q : d;
}else{
d= d>0 ? 0 : d;
d= d<q ? q : d;
}
 
src[3*step]-= d;
src[4*step]+= d;
}
}
 
src += stride;
}
/*if(step==16){
STOP_TIMER("step16")
}else{
STOP_TIMER("stepX")
}*/
}
 
//Note: we have C, MMX, MMX2, 3DNOW version there is no 3DNOW+MMX2 one
//Plain C versions
//we always compile C for testing which needs bitexactness
#define TEMPLATE_PP_C 1
#include "postprocess_template.c"
 
#if HAVE_ALTIVEC
# define TEMPLATE_PP_ALTIVEC 1
# include "postprocess_altivec_template.c"
# include "postprocess_template.c"
#endif
 
#if ARCH_X86 && HAVE_INLINE_ASM
# if CONFIG_RUNTIME_CPUDETECT
# define TEMPLATE_PP_MMX 1
# include "postprocess_template.c"
# define TEMPLATE_PP_MMXEXT 1
# include "postprocess_template.c"
# define TEMPLATE_PP_3DNOW 1
# include "postprocess_template.c"
# define TEMPLATE_PP_SSE2 1
# include "postprocess_template.c"
# else
# if HAVE_SSE2_INLINE
# define TEMPLATE_PP_SSE2 1
# include "postprocess_template.c"
# elif HAVE_MMXEXT_INLINE
# define TEMPLATE_PP_MMXEXT 1
# include "postprocess_template.c"
# elif HAVE_AMD3DNOW_INLINE
# define TEMPLATE_PP_3DNOW 1
# include "postprocess_template.c"
# elif HAVE_MMX_INLINE
# define TEMPLATE_PP_MMX 1
# include "postprocess_template.c"
# endif
# endif
#endif
 
typedef void (*pp_fn)(const uint8_t src[], int srcStride, uint8_t dst[], int dstStride, int width, int height,
const QP_STORE_T QPs[], int QPStride, int isColor, PPContext *c2);
 
static inline void postProcess(const uint8_t src[], int srcStride, uint8_t dst[], int dstStride, int width, int height,
const QP_STORE_T QPs[], int QPStride, int isColor, pp_mode *vm, pp_context *vc)
{
pp_fn pp = postProcess_C;
PPContext *c= (PPContext *)vc;
PPMode *ppMode= (PPMode *)vm;
c->ppMode= *ppMode; //FIXME
 
if (!(ppMode->lumMode & BITEXACT)) {
#if CONFIG_RUNTIME_CPUDETECT
#if ARCH_X86 && HAVE_INLINE_ASM
// ordered per speed fastest first
if (c->cpuCaps & AV_CPU_FLAG_SSE2) pp = postProcess_SSE2;
else if (c->cpuCaps & AV_CPU_FLAG_MMXEXT) pp = postProcess_MMX2;
else if (c->cpuCaps & AV_CPU_FLAG_3DNOW) pp = postProcess_3DNow;
else if (c->cpuCaps & AV_CPU_FLAG_MMX) pp = postProcess_MMX;
#elif HAVE_ALTIVEC
if (c->cpuCaps & AV_CPU_FLAG_ALTIVEC) pp = postProcess_altivec;
#endif
#else /* CONFIG_RUNTIME_CPUDETECT */
#if HAVE_SSE2_INLINE
pp = postProcess_SSE2;
#elif HAVE_MMXEXT_INLINE
pp = postProcess_MMX2;
#elif HAVE_AMD3DNOW_INLINE
pp = postProcess_3DNow;
#elif HAVE_MMX_INLINE
pp = postProcess_MMX;
#elif HAVE_ALTIVEC
pp = postProcess_altivec;
#endif
#endif /* !CONFIG_RUNTIME_CPUDETECT */
}
 
pp(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c);
}
 
/* -pp Command line Help
*/
const char pp_help[] =
"Available postprocessing filters:\n"
"Filters Options\n"
"short long name short long option Description\n"
"* * a autoq CPU power dependent enabler\n"
" c chrom chrominance filtering enabled\n"
" y nochrom chrominance filtering disabled\n"
" n noluma luma filtering disabled\n"
"hb hdeblock (2 threshold) horizontal deblocking filter\n"
" 1. difference factor: default=32, higher -> more deblocking\n"
" 2. flatness threshold: default=39, lower -> more deblocking\n"
" the h & v deblocking filters share these\n"
" so you can't set different thresholds for h / v\n"
"vb vdeblock (2 threshold) vertical deblocking filter\n"
"ha hadeblock (2 threshold) horizontal deblocking filter\n"
"va vadeblock (2 threshold) vertical deblocking filter\n"
"h1 x1hdeblock experimental h deblock filter 1\n"
"v1 x1vdeblock experimental v deblock filter 1\n"
"dr dering deringing filter\n"
"al autolevels automatic brightness / contrast\n"
" f fullyrange stretch luminance to (0..255)\n"
"lb linblenddeint linear blend deinterlacer\n"
"li linipoldeint linear interpolating deinterlace\n"
"ci cubicipoldeint cubic interpolating deinterlacer\n"
"md mediandeint median deinterlacer\n"
"fd ffmpegdeint ffmpeg deinterlacer\n"
"l5 lowpass5 FIR lowpass deinterlacer\n"
"de default hb:a,vb:a,dr:a\n"
"fa fast h1:a,v1:a,dr:a\n"
"ac ha:a:128:7,va:a,dr:a\n"
"tn tmpnoise (3 threshold) temporal noise reducer\n"
" 1. <= 2. <= 3. larger -> stronger filtering\n"
"fq forceQuant <quantizer> force quantizer\n"
"Usage:\n"
"<filterName>[:<option>[:<option>...]][[,|/][-]<filterName>[:<option>...]]...\n"
"long form example:\n"
"vdeblock:autoq/hdeblock:autoq/linblenddeint default,-vdeblock\n"
"short form example:\n"
"vb:a/hb:a/lb de,-vb\n"
"more examples:\n"
"tn:64:128:256\n"
"\n"
;
 
pp_mode *pp_get_mode_by_name_and_quality(const char *name, int quality)
{
char temp[GET_MODE_BUFFER_SIZE];
char *p= temp;
static const char filterDelimiters[] = ",/";
static const char optionDelimiters[] = ":|";
struct PPMode *ppMode;
char *filterToken;
 
if (!name) {
av_log(NULL, AV_LOG_ERROR, "pp: Missing argument\n");
return NULL;
}
 
if (!strcmp(name, "help")) {
const char *p;
for (p = pp_help; strchr(p, '\n'); p = strchr(p, '\n') + 1) {
av_strlcpy(temp, p, FFMIN(sizeof(temp), strchr(p, '\n') - p + 2));
av_log(NULL, AV_LOG_INFO, "%s", temp);
}
return NULL;
}
 
ppMode= av_malloc(sizeof(PPMode));
 
ppMode->lumMode= 0;
ppMode->chromMode= 0;
ppMode->maxTmpNoise[0]= 700;
ppMode->maxTmpNoise[1]= 1500;
ppMode->maxTmpNoise[2]= 3000;
ppMode->maxAllowedY= 234;
ppMode->minAllowedY= 16;
ppMode->baseDcDiff= 256/8;
ppMode->flatnessThreshold= 56-16-1;
ppMode->maxClippedThreshold= 0.01;
ppMode->error=0;
 
memset(temp, 0, GET_MODE_BUFFER_SIZE);
av_strlcpy(temp, name, GET_MODE_BUFFER_SIZE - 1);
 
av_log(NULL, AV_LOG_DEBUG, "pp: %s\n", name);
 
for(;;){
char *filterName;
int q= 1000000; //PP_QUALITY_MAX;
int chrom=-1;
int luma=-1;
char *option;
char *options[OPTIONS_ARRAY_SIZE];
int i;
int filterNameOk=0;
int numOfUnknownOptions=0;
int enable=1; //does the user want us to enabled or disabled the filter
 
filterToken= strtok(p, filterDelimiters);
if(filterToken == NULL) break;
p+= strlen(filterToken) + 1; // p points to next filterToken
filterName= strtok(filterToken, optionDelimiters);
av_log(NULL, AV_LOG_DEBUG, "pp: %s::%s\n", filterToken, filterName);
 
if(*filterName == '-'){
enable=0;
filterName++;
}
 
for(;;){ //for all options
option= strtok(NULL, optionDelimiters);
if(option == NULL) break;
 
av_log(NULL, AV_LOG_DEBUG, "pp: option: %s\n", option);
if(!strcmp("autoq", option) || !strcmp("a", option)) q= quality;
else if(!strcmp("nochrom", option) || !strcmp("y", option)) chrom=0;
else if(!strcmp("chrom", option) || !strcmp("c", option)) chrom=1;
else if(!strcmp("noluma", option) || !strcmp("n", option)) luma=0;
else{
options[numOfUnknownOptions] = option;
numOfUnknownOptions++;
}
if(numOfUnknownOptions >= OPTIONS_ARRAY_SIZE-1) break;
}
options[numOfUnknownOptions] = NULL;
 
/* replace stuff from the replace Table */
for(i=0; replaceTable[2*i]!=NULL; i++){
if(!strcmp(replaceTable[2*i], filterName)){
int newlen= strlen(replaceTable[2*i + 1]);
int plen;
int spaceLeft;
 
p--, *p=',';
 
plen= strlen(p);
spaceLeft= p - temp + plen;
if(spaceLeft + newlen >= GET_MODE_BUFFER_SIZE - 1){
ppMode->error++;
break;
}
memmove(p + newlen, p, plen+1);
memcpy(p, replaceTable[2*i + 1], newlen);
filterNameOk=1;
}
}
 
for(i=0; filters[i].shortName!=NULL; i++){
if( !strcmp(filters[i].longName, filterName)
|| !strcmp(filters[i].shortName, filterName)){
ppMode->lumMode &= ~filters[i].mask;
ppMode->chromMode &= ~filters[i].mask;
 
filterNameOk=1;
if(!enable) break; // user wants to disable it
 
if(q >= filters[i].minLumQuality && luma)
ppMode->lumMode|= filters[i].mask;
if(chrom==1 || (chrom==-1 && filters[i].chromDefault))
if(q >= filters[i].minChromQuality)
ppMode->chromMode|= filters[i].mask;
 
if(filters[i].mask == LEVEL_FIX){
int o;
ppMode->minAllowedY= 16;
ppMode->maxAllowedY= 234;
for(o=0; options[o]!=NULL; o++){
if( !strcmp(options[o],"fullyrange")
||!strcmp(options[o],"f")){
ppMode->minAllowedY= 0;
ppMode->maxAllowedY= 255;
numOfUnknownOptions--;
}
}
}
else if(filters[i].mask == TEMP_NOISE_FILTER)
{
int o;
int numOfNoises=0;
 
for(o=0; options[o]!=NULL; o++){
char *tail;
ppMode->maxTmpNoise[numOfNoises]=
strtol(options[o], &tail, 0);
if(tail!=options[o]){
numOfNoises++;
numOfUnknownOptions--;
if(numOfNoises >= 3) break;
}
}
}
else if(filters[i].mask == V_DEBLOCK || filters[i].mask == H_DEBLOCK
|| filters[i].mask == V_A_DEBLOCK || filters[i].mask == H_A_DEBLOCK){
int o;
 
for(o=0; options[o]!=NULL && o<2; o++){
char *tail;
int val= strtol(options[o], &tail, 0);
if(tail==options[o]) break;
 
numOfUnknownOptions--;
if(o==0) ppMode->baseDcDiff= val;
else ppMode->flatnessThreshold= val;
}
}
else if(filters[i].mask == FORCE_QUANT){
int o;
ppMode->forcedQuant= 15;
 
for(o=0; options[o]!=NULL && o<1; o++){
char *tail;
int val= strtol(options[o], &tail, 0);
if(tail==options[o]) break;
 
numOfUnknownOptions--;
ppMode->forcedQuant= val;
}
}
}
}
if(!filterNameOk) ppMode->error++;
ppMode->error += numOfUnknownOptions;
}
 
av_log(NULL, AV_LOG_DEBUG, "pp: lumMode=%X, chromMode=%X\n", ppMode->lumMode, ppMode->chromMode);
if(ppMode->error){
av_log(NULL, AV_LOG_ERROR, "%d errors in postprocess string \"%s\"\n", ppMode->error, name);
av_free(ppMode);
return NULL;
}
return ppMode;
}
 
void pp_free_mode(pp_mode *mode){
av_free(mode);
}
 
static void reallocAlign(void **p, int alignment, int size){
av_free(*p);
*p= av_mallocz(size);
}
 
static void reallocBuffers(PPContext *c, int width, int height, int stride, int qpStride){
int mbWidth = (width+15)>>4;
int mbHeight= (height+15)>>4;
int i;
 
c->stride= stride;
c->qpStride= qpStride;
 
reallocAlign((void **)&c->tempDst, 8, stride*24+32);
reallocAlign((void **)&c->tempSrc, 8, stride*24);
reallocAlign((void **)&c->tempBlocks, 8, 2*16*8);
reallocAlign((void **)&c->yHistogram, 8, 256*sizeof(uint64_t));
for(i=0; i<256; i++)
c->yHistogram[i]= width*height/64*15/256;
 
for(i=0; i<3; i++){
//Note: The +17*1024 is just there so I do not have to worry about r/w over the end.
reallocAlign((void **)&c->tempBlurred[i], 8, stride*mbHeight*16 + 17*1024);
reallocAlign((void **)&c->tempBlurredPast[i], 8, 256*((height+7)&(~7))/2 + 17*1024);//FIXME size
}
 
reallocAlign((void **)&c->deintTemp, 8, 2*width+32);
reallocAlign((void **)&c->nonBQPTable, 8, qpStride*mbHeight*sizeof(QP_STORE_T));
reallocAlign((void **)&c->stdQPTable, 8, qpStride*mbHeight*sizeof(QP_STORE_T));
reallocAlign((void **)&c->forcedQPTable, 8, mbWidth*sizeof(QP_STORE_T));
}
 
static const char * context_to_name(void * ptr) {
return "postproc";
}
 
static const AVClass av_codec_context_class = { "Postproc", context_to_name, NULL };
 
pp_context *pp_get_context(int width, int height, int cpuCaps){
PPContext *c= av_malloc(sizeof(PPContext));
int stride= FFALIGN(width, 16); //assumed / will realloc if needed
int qpStride= (width+15)/16 + 2; //assumed / will realloc if needed
 
memset(c, 0, sizeof(PPContext));
c->av_class = &av_codec_context_class;
if(cpuCaps&PP_FORMAT){
c->hChromaSubSample= cpuCaps&0x3;
c->vChromaSubSample= (cpuCaps>>4)&0x3;
}else{
c->hChromaSubSample= 1;
c->vChromaSubSample= 1;
}
if (cpuCaps & PP_CPU_CAPS_AUTO) {
c->cpuCaps = av_get_cpu_flags();
} else {
c->cpuCaps = 0;
if (cpuCaps & PP_CPU_CAPS_MMX) c->cpuCaps |= AV_CPU_FLAG_MMX;
if (cpuCaps & PP_CPU_CAPS_MMX2) c->cpuCaps |= AV_CPU_FLAG_MMXEXT;
if (cpuCaps & PP_CPU_CAPS_3DNOW) c->cpuCaps |= AV_CPU_FLAG_3DNOW;
if (cpuCaps & PP_CPU_CAPS_ALTIVEC) c->cpuCaps |= AV_CPU_FLAG_ALTIVEC;
}
 
reallocBuffers(c, width, height, stride, qpStride);
 
c->frameNum=-1;
 
return c;
}
 
void pp_free_context(void *vc){
PPContext *c = (PPContext*)vc;
int i;
 
for(i=0; i<3; i++) av_free(c->tempBlurred[i]);
for(i=0; i<3; i++) av_free(c->tempBlurredPast[i]);
 
av_free(c->tempBlocks);
av_free(c->yHistogram);
av_free(c->tempDst);
av_free(c->tempSrc);
av_free(c->deintTemp);
av_free(c->stdQPTable);
av_free(c->nonBQPTable);
av_free(c->forcedQPTable);
 
memset(c, 0, sizeof(PPContext));
 
av_free(c);
}
 
void pp_postprocess(const uint8_t * src[3], const int srcStride[3],
uint8_t * dst[3], const int dstStride[3],
int width, int height,
const QP_STORE_T *QP_store, int QPStride,
pp_mode *vm, void *vc, int pict_type)
{
int mbWidth = (width+15)>>4;
int mbHeight= (height+15)>>4;
PPMode *mode = (PPMode*)vm;
PPContext *c = (PPContext*)vc;
int minStride= FFMAX(FFABS(srcStride[0]), FFABS(dstStride[0]));
int absQPStride = FFABS(QPStride);
 
// c->stride and c->QPStride are always positive
if(c->stride < minStride || c->qpStride < absQPStride)
reallocBuffers(c, width, height,
FFMAX(minStride, c->stride),
FFMAX(c->qpStride, absQPStride));
 
if(QP_store==NULL || (mode->lumMode & FORCE_QUANT)){
int i;
QP_store= c->forcedQPTable;
absQPStride = QPStride = 0;
if(mode->lumMode & FORCE_QUANT)
for(i=0; i<mbWidth; i++) c->forcedQPTable[i]= mode->forcedQuant;
else
for(i=0; i<mbWidth; i++) c->forcedQPTable[i]= 1;
}
 
if(pict_type & PP_PICT_TYPE_QP2){
int i;
const int count= mbHeight * absQPStride;
for(i=0; i<(count>>2); i++){
((uint32_t*)c->stdQPTable)[i] = (((const uint32_t*)QP_store)[i]>>1) & 0x7F7F7F7F;
}
for(i<<=2; i<count; i++){
c->stdQPTable[i] = QP_store[i]>>1;
}
QP_store= c->stdQPTable;
QPStride= absQPStride;
}
 
if(0){
int x,y;
for(y=0; y<mbHeight; y++){
for(x=0; x<mbWidth; x++){
av_log(c, AV_LOG_INFO, "%2d ", QP_store[x + y*QPStride]);
}
av_log(c, AV_LOG_INFO, "\n");
}
av_log(c, AV_LOG_INFO, "\n");
}
 
if((pict_type&7)!=3){
if (QPStride >= 0){
int i;
const int count= mbHeight * QPStride;
for(i=0; i<(count>>2); i++){
((uint32_t*)c->nonBQPTable)[i] = ((const uint32_t*)QP_store)[i] & 0x3F3F3F3F;
}
for(i<<=2; i<count; i++){
c->nonBQPTable[i] = QP_store[i] & 0x3F;
}
} else {
int i,j;
for(i=0; i<mbHeight; i++) {
for(j=0; j<absQPStride; j++) {
c->nonBQPTable[i*absQPStride+j] = QP_store[i*QPStride+j] & 0x3F;
}
}
}
}
 
av_log(c, AV_LOG_DEBUG, "using npp filters 0x%X/0x%X\n",
mode->lumMode, mode->chromMode);
 
postProcess(src[0], srcStride[0], dst[0], dstStride[0],
width, height, QP_store, QPStride, 0, mode, c);
 
width = (width )>>c->hChromaSubSample;
height = (height)>>c->vChromaSubSample;
 
if(mode->chromMode){
postProcess(src[1], srcStride[1], dst[1], dstStride[1],
width, height, QP_store, QPStride, 1, mode, c);
postProcess(src[2], srcStride[2], dst[2], dstStride[2],
width, height, QP_store, QPStride, 2, mode, c);
}
else if(srcStride[1] == dstStride[1] && srcStride[2] == dstStride[2]){
linecpy(dst[1], src[1], height, srcStride[1]);
linecpy(dst[2], src[2], height, srcStride[2]);
}else{
int y;
for(y=0; y<height; y++){
memcpy(&(dst[1][y*dstStride[1]]), &(src[1][y*srcStride[1]]), width);
memcpy(&(dst[2][y*dstStride[2]]), &(src[2][y*srcStride[2]]), width);
}
}
}
/contrib/sdk/sources/ffmpeg/libpostproc/postprocess.h
0,0 → 1,106
/*
* Copyright (C) 2001-2003 Michael Niedermayer (michaelni@gmx.at)
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef POSTPROC_POSTPROCESS_H
#define POSTPROC_POSTPROCESS_H
 
/**
* @file
* @ingroup lpp
* external API header
*/
 
/**
* @defgroup lpp Libpostproc
* @{
*/
 
#include "libpostproc/version.h"
 
/**
* Return the LIBPOSTPROC_VERSION_INT constant.
*/
unsigned postproc_version(void);
 
/**
* Return the libpostproc build-time configuration.
*/
const char *postproc_configuration(void);
 
/**
* Return the libpostproc license.
*/
const char *postproc_license(void);
 
#define PP_QUALITY_MAX 6
 
#define QP_STORE_T int8_t
 
#include <inttypes.h>
 
typedef void pp_context;
typedef void pp_mode;
 
#if LIBPOSTPROC_VERSION_INT < (52<<16)
typedef pp_context pp_context_t;
typedef pp_mode pp_mode_t;
extern const char *const pp_help; ///< a simple help text
#else
extern const char pp_help[]; ///< a simple help text
#endif
 
void pp_postprocess(const uint8_t * src[3], const int srcStride[3],
uint8_t * dst[3], const int dstStride[3],
int horizontalSize, int verticalSize,
const QP_STORE_T *QP_store, int QP_stride,
pp_mode *mode, pp_context *ppContext, int pict_type);
 
 
/**
* Return a pp_mode or NULL if an error occurred.
*
* @param name the string after "-pp" on the command line
* @param quality a number from 0 to PP_QUALITY_MAX
*/
pp_mode *pp_get_mode_by_name_and_quality(const char *name, int quality);
void pp_free_mode(pp_mode *mode);
 
pp_context *pp_get_context(int width, int height, int flags);
void pp_free_context(pp_context *ppContext);
 
#define PP_CPU_CAPS_MMX 0x80000000
#define PP_CPU_CAPS_MMX2 0x20000000
#define PP_CPU_CAPS_3DNOW 0x40000000
#define PP_CPU_CAPS_ALTIVEC 0x10000000
#define PP_CPU_CAPS_AUTO 0x00080000
 
#define PP_FORMAT 0x00000008
#define PP_FORMAT_420 (0x00000011|PP_FORMAT)
#define PP_FORMAT_422 (0x00000001|PP_FORMAT)
#define PP_FORMAT_411 (0x00000002|PP_FORMAT)
#define PP_FORMAT_444 (0x00000000|PP_FORMAT)
 
#define PP_PICT_TYPE_QP2 0x00000010 ///< MPEG2 style QScale
 
/**
* @}
*/
 
#endif /* POSTPROC_POSTPROCESS_H */
/contrib/sdk/sources/ffmpeg/libpostproc/postprocess_altivec_template.c
0,0 → 1,1210
/*
* AltiVec optimizations (C) 2004 Romain Dolbeau <romain@dolbeau.org>
*
* based on code by Copyright (C) 2001-2003 Michael Niedermayer (michaelni@gmx.at)
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avutil.h"
 
#define ALTIVEC_TRANSPOSE_8x8_SHORT(src_a,src_b,src_c,src_d,src_e,src_f,src_g,src_h) \
do { \
__typeof__(src_a) tempA1, tempB1, tempC1, tempD1; \
__typeof__(src_a) tempE1, tempF1, tempG1, tempH1; \
__typeof__(src_a) tempA2, tempB2, tempC2, tempD2; \
__typeof__(src_a) tempE2, tempF2, tempG2, tempH2; \
tempA1 = vec_mergeh (src_a, src_e); \
tempB1 = vec_mergel (src_a, src_e); \
tempC1 = vec_mergeh (src_b, src_f); \
tempD1 = vec_mergel (src_b, src_f); \
tempE1 = vec_mergeh (src_c, src_g); \
tempF1 = vec_mergel (src_c, src_g); \
tempG1 = vec_mergeh (src_d, src_h); \
tempH1 = vec_mergel (src_d, src_h); \
tempA2 = vec_mergeh (tempA1, tempE1); \
tempB2 = vec_mergel (tempA1, tempE1); \
tempC2 = vec_mergeh (tempB1, tempF1); \
tempD2 = vec_mergel (tempB1, tempF1); \
tempE2 = vec_mergeh (tempC1, tempG1); \
tempF2 = vec_mergel (tempC1, tempG1); \
tempG2 = vec_mergeh (tempD1, tempH1); \
tempH2 = vec_mergel (tempD1, tempH1); \
src_a = vec_mergeh (tempA2, tempE2); \
src_b = vec_mergel (tempA2, tempE2); \
src_c = vec_mergeh (tempB2, tempF2); \
src_d = vec_mergel (tempB2, tempF2); \
src_e = vec_mergeh (tempC2, tempG2); \
src_f = vec_mergel (tempC2, tempG2); \
src_g = vec_mergeh (tempD2, tempH2); \
src_h = vec_mergel (tempD2, tempH2); \
} while (0)
 
 
static inline int vertClassify_altivec(uint8_t src[], int stride, PPContext *c) {
/*
this code makes no assumption on src or stride.
One could remove the recomputation of the perm
vector by assuming (stride % 16) == 0, unfortunately
this is not always true.
*/
short data_0 = ((c->nonBQP*c->ppMode.baseDcDiff)>>8) + 1;
DECLARE_ALIGNED(16, short, data)[8] =
{
data_0,
data_0 * 2 + 1,
c->QP * 2,
c->QP * 4
};
int numEq;
uint8_t *src2 = src;
vector signed short v_dcOffset;
vector signed short v2QP;
vector unsigned short v4QP;
vector unsigned short v_dcThreshold;
const int properStride = (stride % 16);
const int srcAlign = ((unsigned long)src2 % 16);
const int two_vectors = ((srcAlign > 8) || properStride) ? 1 : 0;
const vector signed int zero = vec_splat_s32(0);
const vector signed short mask = vec_splat_s16(1);
vector signed int v_numEq = vec_splat_s32(0);
vector signed short v_data = vec_ld(0, data);
vector signed short v_srcAss0, v_srcAss1, v_srcAss2, v_srcAss3,
v_srcAss4, v_srcAss5, v_srcAss6, v_srcAss7;
//FIXME avoid this mess if possible
register int j0 = 0,
j1 = stride,
j2 = 2 * stride,
j3 = 3 * stride,
j4 = 4 * stride,
j5 = 5 * stride,
j6 = 6 * stride,
j7 = 7 * stride;
vector unsigned char v_srcA0, v_srcA1, v_srcA2, v_srcA3,
v_srcA4, v_srcA5, v_srcA6, v_srcA7;
 
v_dcOffset = vec_splat(v_data, 0);
v_dcThreshold = (vector unsigned short)vec_splat(v_data, 1);
v2QP = vec_splat(v_data, 2);
v4QP = (vector unsigned short)vec_splat(v_data, 3);
 
src2 += stride * 4;
 
#define LOAD_LINE(i) \
{ \
vector unsigned char perm##i = vec_lvsl(j##i, src2); \
vector unsigned char v_srcA2##i; \
vector unsigned char v_srcA1##i = vec_ld(j##i, src2); \
if (two_vectors) \
v_srcA2##i = vec_ld(j##i + 16, src2); \
v_srcA##i = \
vec_perm(v_srcA1##i, v_srcA2##i, perm##i); \
v_srcAss##i = \
(vector signed short)vec_mergeh((vector signed char)zero, \
(vector signed char)v_srcA##i); }
 
#define LOAD_LINE_ALIGNED(i) \
v_srcA##i = vec_ld(j##i, src2); \
v_srcAss##i = \
(vector signed short)vec_mergeh((vector signed char)zero, \
(vector signed char)v_srcA##i)
 
/* Special-casing the aligned case is worthwhile, as all calls from
* the (transposed) horizontable deblocks will be aligned, in addition
* to the naturally aligned vertical deblocks. */
if (properStride && srcAlign) {
LOAD_LINE_ALIGNED(0);
LOAD_LINE_ALIGNED(1);
LOAD_LINE_ALIGNED(2);
LOAD_LINE_ALIGNED(3);
LOAD_LINE_ALIGNED(4);
LOAD_LINE_ALIGNED(5);
LOAD_LINE_ALIGNED(6);
LOAD_LINE_ALIGNED(7);
} else {
LOAD_LINE(0);
LOAD_LINE(1);
LOAD_LINE(2);
LOAD_LINE(3);
LOAD_LINE(4);
LOAD_LINE(5);
LOAD_LINE(6);
LOAD_LINE(7);
}
#undef LOAD_LINE
#undef LOAD_LINE_ALIGNED
 
#define ITER(i, j) \
const vector signed short v_diff##i = \
vec_sub(v_srcAss##i, v_srcAss##j); \
const vector signed short v_sum##i = \
vec_add(v_diff##i, v_dcOffset); \
const vector signed short v_comp##i = \
(vector signed short)vec_cmplt((vector unsigned short)v_sum##i, \
v_dcThreshold); \
const vector signed short v_part##i = vec_and(mask, v_comp##i);
 
{
ITER(0, 1)
ITER(1, 2)
ITER(2, 3)
ITER(3, 4)
ITER(4, 5)
ITER(5, 6)
ITER(6, 7)
 
v_numEq = vec_sum4s(v_part0, v_numEq);
v_numEq = vec_sum4s(v_part1, v_numEq);
v_numEq = vec_sum4s(v_part2, v_numEq);
v_numEq = vec_sum4s(v_part3, v_numEq);
v_numEq = vec_sum4s(v_part4, v_numEq);
v_numEq = vec_sum4s(v_part5, v_numEq);
v_numEq = vec_sum4s(v_part6, v_numEq);
}
 
#undef ITER
 
v_numEq = vec_sums(v_numEq, zero);
 
v_numEq = vec_splat(v_numEq, 3);
vec_ste(v_numEq, 0, &numEq);
 
if (numEq > c->ppMode.flatnessThreshold){
const vector unsigned char mmoP1 = (const vector unsigned char)
{0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
0x00, 0x01, 0x12, 0x13, 0x08, 0x09, 0x1A, 0x1B};
const vector unsigned char mmoP2 = (const vector unsigned char)
{0x04, 0x05, 0x16, 0x17, 0x0C, 0x0D, 0x1E, 0x1F,
0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f};
const vector unsigned char mmoP = (const vector unsigned char)
vec_lvsl(8, (unsigned char*)0);
 
vector signed short mmoL1 = vec_perm(v_srcAss0, v_srcAss2, mmoP1);
vector signed short mmoL2 = vec_perm(v_srcAss4, v_srcAss6, mmoP2);
vector signed short mmoL = vec_perm(mmoL1, mmoL2, mmoP);
vector signed short mmoR1 = vec_perm(v_srcAss5, v_srcAss7, mmoP1);
vector signed short mmoR2 = vec_perm(v_srcAss1, v_srcAss3, mmoP2);
vector signed short mmoR = vec_perm(mmoR1, mmoR2, mmoP);
vector signed short mmoDiff = vec_sub(mmoL, mmoR);
vector unsigned short mmoSum = (vector unsigned short)vec_add(mmoDiff, v2QP);
 
if (vec_any_gt(mmoSum, v4QP))
return 0;
else
return 1;
}
else return 2;
}
 
static inline void doVertLowPass_altivec(uint8_t *src, int stride, PPContext *c) {
/*
this code makes no assumption on src or stride.
One could remove the recomputation of the perm
vector by assuming (stride % 16) == 0, unfortunately
this is not always true. Quite a lot of load/stores
can be removed by assuming proper alignment of
src & stride :-(
*/
uint8_t *src2 = src;
const vector signed int zero = vec_splat_s32(0);
const int properStride = (stride % 16);
const int srcAlign = ((unsigned long)src2 % 16);
DECLARE_ALIGNED(16, short, qp)[8] = {c->QP};
vector signed short vqp = vec_ld(0, qp);
vector signed short vb0, vb1, vb2, vb3, vb4, vb5, vb6, vb7, vb8, vb9;
vector unsigned char vbA0, av_uninit(vbA1), av_uninit(vbA2), av_uninit(vbA3), av_uninit(vbA4), av_uninit(vbA5), av_uninit(vbA6), av_uninit(vbA7), av_uninit(vbA8), vbA9;
vector unsigned char vbB0, av_uninit(vbB1), av_uninit(vbB2), av_uninit(vbB3), av_uninit(vbB4), av_uninit(vbB5), av_uninit(vbB6), av_uninit(vbB7), av_uninit(vbB8), vbB9;
vector unsigned char vbT0, vbT1, vbT2, vbT3, vbT4, vbT5, vbT6, vbT7, vbT8, vbT9;
vector unsigned char perml0, perml1, perml2, perml3, perml4,
perml5, perml6, perml7, perml8, perml9;
register int j0 = 0,
j1 = stride,
j2 = 2 * stride,
j3 = 3 * stride,
j4 = 4 * stride,
j5 = 5 * stride,
j6 = 6 * stride,
j7 = 7 * stride,
j8 = 8 * stride,
j9 = 9 * stride;
 
vqp = vec_splat(vqp, 0);
 
src2 += stride*3;
 
#define LOAD_LINE(i) \
perml##i = vec_lvsl(i * stride, src2); \
vbA##i = vec_ld(i * stride, src2); \
vbB##i = vec_ld(i * stride + 16, src2); \
vbT##i = vec_perm(vbA##i, vbB##i, perml##i); \
vb##i = \
(vector signed short)vec_mergeh((vector unsigned char)zero, \
(vector unsigned char)vbT##i)
 
#define LOAD_LINE_ALIGNED(i) \
vbT##i = vec_ld(j##i, src2); \
vb##i = \
(vector signed short)vec_mergeh((vector signed char)zero, \
(vector signed char)vbT##i)
 
/* Special-casing the aligned case is worthwhile, as all calls from
* the (transposed) horizontable deblocks will be aligned, in addition
* to the naturally aligned vertical deblocks. */
if (properStride && srcAlign) {
LOAD_LINE_ALIGNED(0);
LOAD_LINE_ALIGNED(1);
LOAD_LINE_ALIGNED(2);
LOAD_LINE_ALIGNED(3);
LOAD_LINE_ALIGNED(4);
LOAD_LINE_ALIGNED(5);
LOAD_LINE_ALIGNED(6);
LOAD_LINE_ALIGNED(7);
LOAD_LINE_ALIGNED(8);
LOAD_LINE_ALIGNED(9);
} else {
LOAD_LINE(0);
LOAD_LINE(1);
LOAD_LINE(2);
LOAD_LINE(3);
LOAD_LINE(4);
LOAD_LINE(5);
LOAD_LINE(6);
LOAD_LINE(7);
LOAD_LINE(8);
LOAD_LINE(9);
}
#undef LOAD_LINE
#undef LOAD_LINE_ALIGNED
{
const vector unsigned short v_2 = vec_splat_u16(2);
const vector unsigned short v_4 = vec_splat_u16(4);
 
const vector signed short v_diff01 = vec_sub(vb0, vb1);
const vector unsigned short v_cmp01 =
(const vector unsigned short) vec_cmplt(vec_abs(v_diff01), vqp);
const vector signed short v_first = vec_sel(vb1, vb0, v_cmp01);
const vector signed short v_diff89 = vec_sub(vb8, vb9);
const vector unsigned short v_cmp89 =
(const vector unsigned short) vec_cmplt(vec_abs(v_diff89), vqp);
const vector signed short v_last = vec_sel(vb8, vb9, v_cmp89);
 
const vector signed short temp01 = vec_mladd(v_first, (vector signed short)v_4, vb1);
const vector signed short temp02 = vec_add(vb2, vb3);
const vector signed short temp03 = vec_add(temp01, (vector signed short)v_4);
const vector signed short v_sumsB0 = vec_add(temp02, temp03);
 
const vector signed short temp11 = vec_sub(v_sumsB0, v_first);
const vector signed short v_sumsB1 = vec_add(temp11, vb4);
 
const vector signed short temp21 = vec_sub(v_sumsB1, v_first);
const vector signed short v_sumsB2 = vec_add(temp21, vb5);
 
const vector signed short temp31 = vec_sub(v_sumsB2, v_first);
const vector signed short v_sumsB3 = vec_add(temp31, vb6);
 
const vector signed short temp41 = vec_sub(v_sumsB3, v_first);
const vector signed short v_sumsB4 = vec_add(temp41, vb7);
 
const vector signed short temp51 = vec_sub(v_sumsB4, vb1);
const vector signed short v_sumsB5 = vec_add(temp51, vb8);
 
const vector signed short temp61 = vec_sub(v_sumsB5, vb2);
const vector signed short v_sumsB6 = vec_add(temp61, v_last);
 
const vector signed short temp71 = vec_sub(v_sumsB6, vb3);
const vector signed short v_sumsB7 = vec_add(temp71, v_last);
 
const vector signed short temp81 = vec_sub(v_sumsB7, vb4);
const vector signed short v_sumsB8 = vec_add(temp81, v_last);
 
const vector signed short temp91 = vec_sub(v_sumsB8, vb5);
const vector signed short v_sumsB9 = vec_add(temp91, v_last);
 
#define COMPUTE_VR(i, j, k) \
const vector signed short temps1##i = \
vec_add(v_sumsB##i, v_sumsB##k); \
const vector signed short temps2##i = \
vec_mladd(vb##j, (vector signed short)v_2, temps1##i); \
const vector signed short vr##j = vec_sra(temps2##i, v_4)
 
COMPUTE_VR(0, 1, 2);
COMPUTE_VR(1, 2, 3);
COMPUTE_VR(2, 3, 4);
COMPUTE_VR(3, 4, 5);
COMPUTE_VR(4, 5, 6);
COMPUTE_VR(5, 6, 7);
COMPUTE_VR(6, 7, 8);
COMPUTE_VR(7, 8, 9);
 
const vector signed char neg1 = vec_splat_s8(-1);
const vector unsigned char permHH = (const vector unsigned char){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F};
 
#define PACK_AND_STORE(i) \
{ const vector unsigned char perms##i = \
vec_lvsr(i * stride, src2); \
const vector unsigned char vf##i = \
vec_packsu(vr##i, (vector signed short)zero); \
const vector unsigned char vg##i = \
vec_perm(vf##i, vbT##i, permHH); \
const vector unsigned char mask##i = \
vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \
const vector unsigned char vg2##i = \
vec_perm(vg##i, vg##i, perms##i); \
const vector unsigned char svA##i = \
vec_sel(vbA##i, vg2##i, mask##i); \
const vector unsigned char svB##i = \
vec_sel(vg2##i, vbB##i, mask##i); \
vec_st(svA##i, i * stride, src2); \
vec_st(svB##i, i * stride + 16, src2);}
 
#define PACK_AND_STORE_ALIGNED(i) \
{ const vector unsigned char vf##i = \
vec_packsu(vr##i, (vector signed short)zero); \
const vector unsigned char vg##i = \
vec_perm(vf##i, vbT##i, permHH); \
vec_st(vg##i, i * stride, src2);}
 
/* Special-casing the aligned case is worthwhile, as all calls from
* the (transposed) horizontable deblocks will be aligned, in addition
* to the naturally aligned vertical deblocks. */
if (properStride && srcAlign) {
PACK_AND_STORE_ALIGNED(1)
PACK_AND_STORE_ALIGNED(2)
PACK_AND_STORE_ALIGNED(3)
PACK_AND_STORE_ALIGNED(4)
PACK_AND_STORE_ALIGNED(5)
PACK_AND_STORE_ALIGNED(6)
PACK_AND_STORE_ALIGNED(7)
PACK_AND_STORE_ALIGNED(8)
} else {
PACK_AND_STORE(1)
PACK_AND_STORE(2)
PACK_AND_STORE(3)
PACK_AND_STORE(4)
PACK_AND_STORE(5)
PACK_AND_STORE(6)
PACK_AND_STORE(7)
PACK_AND_STORE(8)
}
#undef PACK_AND_STORE
#undef PACK_AND_STORE_ALIGNED
}
}
 
 
 
static inline void doVertDefFilter_altivec(uint8_t src[], int stride, PPContext *c) {
/*
this code makes no assumption on src or stride.
One could remove the recomputation of the perm
vector by assuming (stride % 16) == 0, unfortunately
this is not always true. Quite a lot of load/stores
can be removed by assuming proper alignment of
src & stride :-(
*/
uint8_t *src2 = src + stride*3;
const vector signed int zero = vec_splat_s32(0);
DECLARE_ALIGNED(16, short, qp)[8] = {8*c->QP};
vector signed short vqp = vec_splat(
(vector signed short)vec_ld(0, qp), 0);
 
#define LOAD_LINE(i) \
const vector unsigned char perm##i = \
vec_lvsl(i * stride, src2); \
const vector unsigned char vbA##i = \
vec_ld(i * stride, src2); \
const vector unsigned char vbB##i = \
vec_ld(i * stride + 16, src2); \
const vector unsigned char vbT##i = \
vec_perm(vbA##i, vbB##i, perm##i); \
const vector signed short vb##i = \
(vector signed short)vec_mergeh((vector unsigned char)zero, \
(vector unsigned char)vbT##i)
 
LOAD_LINE(1);
LOAD_LINE(2);
LOAD_LINE(3);
LOAD_LINE(4);
LOAD_LINE(5);
LOAD_LINE(6);
LOAD_LINE(7);
LOAD_LINE(8);
#undef LOAD_LINE
 
const vector signed short v_1 = vec_splat_s16(1);
const vector signed short v_2 = vec_splat_s16(2);
const vector signed short v_5 = vec_splat_s16(5);
const vector signed short v_32 = vec_sl(v_1,
(vector unsigned short)v_5);
/* middle energy */
const vector signed short l3minusl6 = vec_sub(vb3, vb6);
const vector signed short l5minusl4 = vec_sub(vb5, vb4);
const vector signed short twotimes_l3minusl6 = vec_mladd(v_2, l3minusl6, (vector signed short)zero);
const vector signed short mE = vec_mladd(v_5, l5minusl4, twotimes_l3minusl6);
const vector signed short absmE = vec_abs(mE);
/* left & right energy */
const vector signed short l1minusl4 = vec_sub(vb1, vb4);
const vector signed short l3minusl2 = vec_sub(vb3, vb2);
const vector signed short l5minusl8 = vec_sub(vb5, vb8);
const vector signed short l7minusl6 = vec_sub(vb7, vb6);
const vector signed short twotimes_l1minusl4 = vec_mladd(v_2, l1minusl4, (vector signed short)zero);
const vector signed short twotimes_l5minusl8 = vec_mladd(v_2, l5minusl8, (vector signed short)zero);
const vector signed short lE = vec_mladd(v_5, l3minusl2, twotimes_l1minusl4);
const vector signed short rE = vec_mladd(v_5, l7minusl6, twotimes_l5minusl8);
/* d */
const vector signed short ddiff = vec_sub(absmE,
vec_min(vec_abs(lE),
vec_abs(rE)));
const vector signed short ddiffclamp = vec_max(ddiff, (vector signed short)zero);
const vector signed short dtimes64 = vec_mladd(v_5, ddiffclamp, v_32);
const vector signed short d = vec_sra(dtimes64, vec_splat_u16(6));
const vector signed short minusd = vec_sub((vector signed short)zero, d);
const vector signed short finald = vec_sel(minusd,
d,
vec_cmpgt(vec_sub((vector signed short)zero, mE),
(vector signed short)zero));
/* q */
const vector signed short qtimes2 = vec_sub(vb4, vb5);
/* for a shift right to behave like /2, we need to add one
to all negative integer */
const vector signed short rounddown = vec_sel((vector signed short)zero,
v_1,
vec_cmplt(qtimes2, (vector signed short)zero));
const vector signed short q = vec_sra(vec_add(qtimes2, rounddown), vec_splat_u16(1));
/* clamp */
const vector signed short dclamp_P1 = vec_max((vector signed short)zero, finald);
const vector signed short dclamp_P = vec_min(dclamp_P1, q);
const vector signed short dclamp_N1 = vec_min((vector signed short)zero, finald);
const vector signed short dclamp_N = vec_max(dclamp_N1, q);
 
const vector signed short dclampedfinal = vec_sel(dclamp_N,
dclamp_P,
vec_cmpgt(q, (vector signed short)zero));
const vector signed short dornotd = vec_sel((vector signed short)zero,
dclampedfinal,
vec_cmplt(absmE, vqp));
/* add/subtract to l4 and l5 */
const vector signed short vb4minusd = vec_sub(vb4, dornotd);
const vector signed short vb5plusd = vec_add(vb5, dornotd);
/* finally, stores */
const vector unsigned char st4 = vec_packsu(vb4minusd, (vector signed short)zero);
const vector unsigned char st5 = vec_packsu(vb5plusd, (vector signed short)zero);
 
const vector signed char neg1 = vec_splat_s8(-1);
const vector unsigned char permHH = (const vector unsigned char){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F};
 
#define STORE(i) \
{ const vector unsigned char perms##i = \
vec_lvsr(i * stride, src2); \
const vector unsigned char vg##i = \
vec_perm(st##i, vbT##i, permHH); \
const vector unsigned char mask##i = \
vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \
const vector unsigned char vg2##i = \
vec_perm(vg##i, vg##i, perms##i); \
const vector unsigned char svA##i = \
vec_sel(vbA##i, vg2##i, mask##i); \
const vector unsigned char svB##i = \
vec_sel(vg2##i, vbB##i, mask##i); \
vec_st(svA##i, i * stride, src2); \
vec_st(svB##i, i * stride + 16, src2);}
 
STORE(4)
STORE(5)
}
 
static inline void dering_altivec(uint8_t src[], int stride, PPContext *c) {
const vector signed int vsint32_8 = vec_splat_s32(8);
const vector unsigned int vuint32_4 = vec_splat_u32(4);
const vector signed char neg1 = vec_splat_s8(-1);
 
const vector unsigned char permA1 = (vector unsigned char)
{0x00, 0x01, 0x02, 0x10, 0x11, 0x12, 0x1F, 0x1F,
0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F};
const vector unsigned char permA2 = (vector unsigned char)
{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x10, 0x11,
0x12, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F};
const vector unsigned char permA1inc = (vector unsigned char)
{0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
const vector unsigned char permA2inc = (vector unsigned char)
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01,
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
const vector unsigned char magic = (vector unsigned char)
{0x01, 0x02, 0x01, 0x02, 0x04, 0x02, 0x01, 0x02,
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
const vector unsigned char extractPerm = (vector unsigned char)
{0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01,
0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01};
const vector unsigned char extractPermInc = (vector unsigned char)
{0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01};
const vector unsigned char identity = vec_lvsl(0,(unsigned char *)0);
const vector unsigned char tenRight = (vector unsigned char)
{0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
const vector unsigned char eightLeft = (vector unsigned char)
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08};
 
/*
this code makes no assumption on src or stride.
One could remove the recomputation of the perm
vector by assuming (stride % 16) == 0, unfortunately
this is not always true. Quite a lot of load/stores
can be removed by assuming proper alignment of
src & stride :-(
*/
uint8_t *srcCopy = src;
DECLARE_ALIGNED(16, uint8_t, dt)[16] = { deringThreshold };
const vector signed int zero = vec_splat_s32(0);
vector unsigned char v_dt = vec_splat(vec_ld(0, dt), 0);
 
#define LOAD_LINE(i) \
const vector unsigned char perm##i = \
vec_lvsl(i * stride, srcCopy); \
vector unsigned char sA##i = vec_ld(i * stride, srcCopy); \
vector unsigned char sB##i = vec_ld(i * stride + 16, srcCopy); \
vector unsigned char src##i = vec_perm(sA##i, sB##i, perm##i)
 
LOAD_LINE(0);
LOAD_LINE(1);
LOAD_LINE(2);
LOAD_LINE(3);
LOAD_LINE(4);
LOAD_LINE(5);
LOAD_LINE(6);
LOAD_LINE(7);
LOAD_LINE(8);
LOAD_LINE(9);
#undef LOAD_LINE
 
vector unsigned char v_avg;
DECLARE_ALIGNED(16, signed int, S)[8];
DECLARE_ALIGNED(16, int, tQP2)[4] = { c->QP/2 + 1 };
vector signed int vQP2 = vec_ld(0, tQP2);
vQP2 = vec_splat(vQP2, 0);
 
{
const vector unsigned char trunc_perm = (vector unsigned char)
{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18};
const vector unsigned char trunc_src12 = vec_perm(src1, src2, trunc_perm);
const vector unsigned char trunc_src34 = vec_perm(src3, src4, trunc_perm);
const vector unsigned char trunc_src56 = vec_perm(src5, src6, trunc_perm);
const vector unsigned char trunc_src78 = vec_perm(src7, src8, trunc_perm);
 
#define EXTRACT(op) do { \
const vector unsigned char s_1 = vec_##op(trunc_src12, trunc_src34); \
const vector unsigned char s_2 = vec_##op(trunc_src56, trunc_src78); \
const vector unsigned char s_6 = vec_##op(s_1, s_2); \
const vector unsigned char s_8h = vec_mergeh(s_6, s_6); \
const vector unsigned char s_8l = vec_mergel(s_6, s_6); \
const vector unsigned char s_9 = vec_##op(s_8h, s_8l); \
const vector unsigned char s_9h = vec_mergeh(s_9, s_9); \
const vector unsigned char s_9l = vec_mergel(s_9, s_9); \
const vector unsigned char s_10 = vec_##op(s_9h, s_9l); \
const vector unsigned char s_10h = vec_mergeh(s_10, s_10); \
const vector unsigned char s_10l = vec_mergel(s_10, s_10); \
const vector unsigned char s_11 = vec_##op(s_10h, s_10l); \
const vector unsigned char s_11h = vec_mergeh(s_11, s_11); \
const vector unsigned char s_11l = vec_mergel(s_11, s_11); \
v_##op = vec_##op(s_11h, s_11l); \
} while (0)
 
vector unsigned char v_min;
vector unsigned char v_max;
EXTRACT(min);
EXTRACT(max);
#undef EXTRACT
 
if (vec_all_lt(vec_sub(v_max, v_min), v_dt))
return;
 
v_avg = vec_avg(v_min, v_max);
}
 
{
const vector unsigned short mask1 = (vector unsigned short)
{0x0001, 0x0002, 0x0004, 0x0008,
0x0010, 0x0020, 0x0040, 0x0080};
const vector unsigned short mask2 = (vector unsigned short)
{0x0100, 0x0200, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000};
 
const vector unsigned int vuint32_16 = vec_sl(vec_splat_u32(1), vec_splat_u32(4));
const vector unsigned int vuint32_1 = vec_splat_u32(1);
 
vector signed int sumA2;
vector signed int sumB2;
vector signed int sum0, sum1, sum2, sum3, sum4;
vector signed int sum5, sum6, sum7, sum8, sum9;
 
#define COMPARE(i) \
do { \
const vector unsigned char cmp = \
(vector unsigned char)vec_cmpgt(src##i, v_avg); \
const vector unsigned short cmpHi = \
(vector unsigned short)vec_mergeh(cmp, cmp); \
const vector unsigned short cmpLi = \
(vector unsigned short)vec_mergel(cmp, cmp); \
const vector signed short cmpHf = \
(vector signed short)vec_and(cmpHi, mask1); \
const vector signed short cmpLf = \
(vector signed short)vec_and(cmpLi, mask2); \
const vector signed int sump = vec_sum4s(cmpHf, zero); \
const vector signed int sumq = vec_sum4s(cmpLf, sump); \
sum##i = vec_sums(sumq, zero); \
} while (0)
 
COMPARE(0);
COMPARE(1);
COMPARE(2);
COMPARE(3);
COMPARE(4);
COMPARE(5);
COMPARE(6);
COMPARE(7);
COMPARE(8);
COMPARE(9);
#undef COMPARE
 
{
const vector signed int sump02 = vec_mergel(sum0, sum2);
const vector signed int sump13 = vec_mergel(sum1, sum3);
const vector signed int sumA = vec_mergel(sump02, sump13);
 
const vector signed int sump46 = vec_mergel(sum4, sum6);
const vector signed int sump57 = vec_mergel(sum5, sum7);
const vector signed int sumB = vec_mergel(sump46, sump57);
 
const vector signed int sump8A = vec_mergel(sum8, zero);
const vector signed int sump9B = vec_mergel(sum9, zero);
const vector signed int sumC = vec_mergel(sump8A, sump9B);
 
const vector signed int tA = vec_sl(vec_nor(zero, sumA), vuint32_16);
const vector signed int tB = vec_sl(vec_nor(zero, sumB), vuint32_16);
const vector signed int tC = vec_sl(vec_nor(zero, sumC), vuint32_16);
const vector signed int t2A = vec_or(sumA, tA);
const vector signed int t2B = vec_or(sumB, tB);
const vector signed int t2C = vec_or(sumC, tC);
const vector signed int t3A = vec_and(vec_sra(t2A, vuint32_1),
vec_sl(t2A, vuint32_1));
const vector signed int t3B = vec_and(vec_sra(t2B, vuint32_1),
vec_sl(t2B, vuint32_1));
const vector signed int t3C = vec_and(vec_sra(t2C, vuint32_1),
vec_sl(t2C, vuint32_1));
const vector signed int yA = vec_and(t2A, t3A);
const vector signed int yB = vec_and(t2B, t3B);
const vector signed int yC = vec_and(t2C, t3C);
 
const vector unsigned char strangeperm1 = vec_lvsl(4, (unsigned char*)0);
const vector unsigned char strangeperm2 = vec_lvsl(8, (unsigned char*)0);
const vector signed int sumAd4 = vec_perm(yA, yB, strangeperm1);
const vector signed int sumAd8 = vec_perm(yA, yB, strangeperm2);
const vector signed int sumBd4 = vec_perm(yB, yC, strangeperm1);
const vector signed int sumBd8 = vec_perm(yB, yC, strangeperm2);
const vector signed int sumAp = vec_and(yA,
vec_and(sumAd4,sumAd8));
const vector signed int sumBp = vec_and(yB,
vec_and(sumBd4,sumBd8));
sumA2 = vec_or(sumAp,
vec_sra(sumAp,
vuint32_16));
sumB2 = vec_or(sumBp,
vec_sra(sumBp,
vuint32_16));
}
vec_st(sumA2, 0, S);
vec_st(sumB2, 16, S);
}
 
/* I'm not sure the following is actually faster
than straight, unvectorized C code :-( */
 
#define F_INIT() \
vector unsigned char tenRightM = tenRight; \
vector unsigned char permA1M = permA1; \
vector unsigned char permA2M = permA2; \
vector unsigned char extractPermM = extractPerm
 
#define F2(i, j, k, l) \
if (S[i] & (1 << (l+1))) { \
const vector unsigned char a_A = vec_perm(src##i, src##j, permA1M); \
const vector unsigned char a_B = vec_perm(a_A, src##k, permA2M); \
const vector signed int a_sump = \
(vector signed int)vec_msum(a_B, magic, (vector unsigned int)zero);\
vector signed int F = vec_sr(vec_sums(a_sump, vsint32_8), vuint32_4); \
const vector signed int p = \
(vector signed int)vec_perm(src##j, (vector unsigned char)zero, \
extractPermM); \
const vector signed int sum = vec_add(p, vQP2); \
const vector signed int diff = vec_sub(p, vQP2); \
vector signed int newpm; \
vector unsigned char newpm2, mask; \
F = vec_splat(F, 3); \
if (vec_all_lt(sum, F)) \
newpm = sum; \
else if (vec_all_gt(diff, F)) \
newpm = diff; \
else newpm = F; \
newpm2 = vec_splat((vector unsigned char)newpm, 15); \
mask = vec_add(identity, tenRightM); \
src##j = vec_perm(src##j, newpm2, mask); \
} \
permA1M = vec_add(permA1M, permA1inc); \
permA2M = vec_add(permA2M, permA2inc); \
tenRightM = vec_sro(tenRightM, eightLeft); \
extractPermM = vec_add(extractPermM, extractPermInc)
 
#define ITER(i, j, k) do { \
F_INIT(); \
F2(i, j, k, 0); \
F2(i, j, k, 1); \
F2(i, j, k, 2); \
F2(i, j, k, 3); \
F2(i, j, k, 4); \
F2(i, j, k, 5); \
F2(i, j, k, 6); \
F2(i, j, k, 7); \
} while (0)
 
ITER(0, 1, 2);
ITER(1, 2, 3);
ITER(2, 3, 4);
ITER(3, 4, 5);
ITER(4, 5, 6);
ITER(5, 6, 7);
ITER(6, 7, 8);
ITER(7, 8, 9);
 
#define STORE_LINE(i) do { \
const vector unsigned char permST = \
vec_lvsr(i * stride, srcCopy); \
const vector unsigned char maskST = \
vec_perm((vector unsigned char)zero, \
(vector unsigned char)neg1, permST); \
src##i = vec_perm(src##i ,src##i, permST); \
sA##i= vec_sel(sA##i, src##i, maskST); \
sB##i= vec_sel(src##i, sB##i, maskST); \
vec_st(sA##i, i * stride, srcCopy); \
vec_st(sB##i, i * stride + 16, srcCopy); \
} while (0)
 
STORE_LINE(1);
STORE_LINE(2);
STORE_LINE(3);
STORE_LINE(4);
STORE_LINE(5);
STORE_LINE(6);
STORE_LINE(7);
STORE_LINE(8);
 
#undef STORE_LINE
#undef ITER
#undef F2
}
 
#define doHorizLowPass_altivec(a...) doHorizLowPass_C(a)
#define doHorizDefFilter_altivec(a...) doHorizDefFilter_C(a)
#define do_a_deblock_altivec(a...) do_a_deblock_C(a)
 
static inline void tempNoiseReducer_altivec(uint8_t *src, int stride,
uint8_t *tempBlurred, uint32_t *tempBlurredPast, int *maxNoise)
{
const vector signed char neg1 = vec_splat_s8(-1);
const vector unsigned char permHH = (const vector unsigned char){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F};
 
const vector signed int zero = vec_splat_s32(0);
const vector signed short vsint16_1 = vec_splat_s16(1);
vector signed int v_dp = zero;
vector signed int v_sysdp = zero;
int d, sysd, i;
 
#define LOAD_LINE(src, i) \
register int j##src##i = i * stride; \
vector unsigned char perm##src##i = vec_lvsl(j##src##i, src); \
const vector unsigned char v_##src##A1##i = vec_ld(j##src##i, src); \
const vector unsigned char v_##src##A2##i = vec_ld(j##src##i + 16, src); \
const vector unsigned char v_##src##A##i = \
vec_perm(v_##src##A1##i, v_##src##A2##i, perm##src##i); \
vector signed short v_##src##Ass##i = \
(vector signed short)vec_mergeh((vector signed char)zero, \
(vector signed char)v_##src##A##i)
 
LOAD_LINE(src, 0);
LOAD_LINE(src, 1);
LOAD_LINE(src, 2);
LOAD_LINE(src, 3);
LOAD_LINE(src, 4);
LOAD_LINE(src, 5);
LOAD_LINE(src, 6);
LOAD_LINE(src, 7);
 
LOAD_LINE(tempBlurred, 0);
LOAD_LINE(tempBlurred, 1);
LOAD_LINE(tempBlurred, 2);
LOAD_LINE(tempBlurred, 3);
LOAD_LINE(tempBlurred, 4);
LOAD_LINE(tempBlurred, 5);
LOAD_LINE(tempBlurred, 6);
LOAD_LINE(tempBlurred, 7);
#undef LOAD_LINE
 
#define ACCUMULATE_DIFFS(i) do { \
vector signed short v_d = vec_sub(v_tempBlurredAss##i, \
v_srcAss##i); \
v_dp = vec_msums(v_d, v_d, v_dp); \
v_sysdp = vec_msums(v_d, vsint16_1, v_sysdp); \
} while (0)
 
ACCUMULATE_DIFFS(0);
ACCUMULATE_DIFFS(1);
ACCUMULATE_DIFFS(2);
ACCUMULATE_DIFFS(3);
ACCUMULATE_DIFFS(4);
ACCUMULATE_DIFFS(5);
ACCUMULATE_DIFFS(6);
ACCUMULATE_DIFFS(7);
#undef ACCUMULATE_DIFFS
 
tempBlurredPast[127]= maxNoise[0];
tempBlurredPast[128]= maxNoise[1];
tempBlurredPast[129]= maxNoise[2];
 
v_dp = vec_sums(v_dp, zero);
v_sysdp = vec_sums(v_sysdp, zero);
 
v_dp = vec_splat(v_dp, 3);
v_sysdp = vec_splat(v_sysdp, 3);
 
vec_ste(v_dp, 0, &d);
vec_ste(v_sysdp, 0, &sysd);
 
i = d;
d = (4*d
+(*(tempBlurredPast-256))
+(*(tempBlurredPast-1))+ (*(tempBlurredPast+1))
+(*(tempBlurredPast+256))
+4)>>3;
 
*tempBlurredPast=i;
 
if (d > maxNoise[1]) {
if (d < maxNoise[2]) {
#define OP(i) v_tempBlurredAss##i = vec_avg(v_tempBlurredAss##i, v_srcAss##i);
 
OP(0);
OP(1);
OP(2);
OP(3);
OP(4);
OP(5);
OP(6);
OP(7);
#undef OP
} else {
#define OP(i) v_tempBlurredAss##i = v_srcAss##i;
 
OP(0);
OP(1);
OP(2);
OP(3);
OP(4);
OP(5);
OP(6);
OP(7);
#undef OP
}
} else {
if (d < maxNoise[0]) {
const vector signed short vsint16_7 = vec_splat_s16(7);
const vector signed short vsint16_4 = vec_splat_s16(4);
const vector unsigned short vuint16_3 = vec_splat_u16(3);
 
#define OP(i) do { \
const vector signed short v_temp = \
vec_mladd(v_tempBlurredAss##i, vsint16_7, v_srcAss##i); \
const vector signed short v_temp2 = vec_add(v_temp, vsint16_4); \
v_tempBlurredAss##i = vec_sr(v_temp2, vuint16_3); \
} while (0)
 
OP(0);
OP(1);
OP(2);
OP(3);
OP(4);
OP(5);
OP(6);
OP(7);
#undef OP
} else {
const vector signed short vsint16_3 = vec_splat_s16(3);
const vector signed short vsint16_2 = vec_splat_s16(2);
 
#define OP(i) do { \
const vector signed short v_temp = \
vec_mladd(v_tempBlurredAss##i, vsint16_3, v_srcAss##i); \
const vector signed short v_temp2 = vec_add(v_temp, vsint16_2); \
v_tempBlurredAss##i = \
vec_sr(v_temp2, (vector unsigned short)vsint16_2); \
} while (0)
 
OP(0);
OP(1);
OP(2);
OP(3);
OP(4);
OP(5);
OP(6);
OP(7);
#undef OP
}
}
 
#define PACK_AND_STORE(src, i) do { \
const vector unsigned char perms = vec_lvsr(i * stride, src); \
const vector unsigned char vf = \
vec_packsu(v_tempBlurredAss##1, (vector signed short)zero); \
const vector unsigned char vg = vec_perm(vf, v_##src##A##i, permHH); \
const vector unsigned char mask = \
vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms); \
const vector unsigned char vg2 = vec_perm(vg, vg, perms); \
const vector unsigned char svA = vec_sel(v_##src##A1##i, vg2, mask); \
const vector unsigned char svB = vec_sel(vg2, v_##src##A2##i, mask); \
vec_st(svA, i * stride, src); \
vec_st(svB, i * stride + 16, src); \
} while (0)
 
PACK_AND_STORE(src, 0);
PACK_AND_STORE(src, 1);
PACK_AND_STORE(src, 2);
PACK_AND_STORE(src, 3);
PACK_AND_STORE(src, 4);
PACK_AND_STORE(src, 5);
PACK_AND_STORE(src, 6);
PACK_AND_STORE(src, 7);
PACK_AND_STORE(tempBlurred, 0);
PACK_AND_STORE(tempBlurred, 1);
PACK_AND_STORE(tempBlurred, 2);
PACK_AND_STORE(tempBlurred, 3);
PACK_AND_STORE(tempBlurred, 4);
PACK_AND_STORE(tempBlurred, 5);
PACK_AND_STORE(tempBlurred, 6);
PACK_AND_STORE(tempBlurred, 7);
#undef PACK_AND_STORE
}
 
static inline void transpose_16x8_char_toPackedAlign_altivec(unsigned char* dst, unsigned char* src, int stride) {
const vector unsigned char zero = vec_splat_u8(0);
 
#define LOAD_DOUBLE_LINE(i, j) \
vector unsigned char perm1##i = vec_lvsl(i * stride, src); \
vector unsigned char perm2##i = vec_lvsl(j * stride, src); \
vector unsigned char srcA##i = vec_ld(i * stride, src); \
vector unsigned char srcB##i = vec_ld(i * stride + 16, src); \
vector unsigned char srcC##i = vec_ld(j * stride, src); \
vector unsigned char srcD##i = vec_ld(j * stride+ 16, src); \
vector unsigned char src##i = vec_perm(srcA##i, srcB##i, perm1##i); \
vector unsigned char src##j = vec_perm(srcC##i, srcD##i, perm2##i)
 
LOAD_DOUBLE_LINE(0, 1);
LOAD_DOUBLE_LINE(2, 3);
LOAD_DOUBLE_LINE(4, 5);
LOAD_DOUBLE_LINE(6, 7);
#undef LOAD_DOUBLE_LINE
 
vector unsigned char tempA = vec_mergeh(src0, zero);
vector unsigned char tempB = vec_mergel(src0, zero);
vector unsigned char tempC = vec_mergeh(src1, zero);
vector unsigned char tempD = vec_mergel(src1, zero);
vector unsigned char tempE = vec_mergeh(src2, zero);
vector unsigned char tempF = vec_mergel(src2, zero);
vector unsigned char tempG = vec_mergeh(src3, zero);
vector unsigned char tempH = vec_mergel(src3, zero);
vector unsigned char tempI = vec_mergeh(src4, zero);
vector unsigned char tempJ = vec_mergel(src4, zero);
vector unsigned char tempK = vec_mergeh(src5, zero);
vector unsigned char tempL = vec_mergel(src5, zero);
vector unsigned char tempM = vec_mergeh(src6, zero);
vector unsigned char tempN = vec_mergel(src6, zero);
vector unsigned char tempO = vec_mergeh(src7, zero);
vector unsigned char tempP = vec_mergel(src7, zero);
 
vector unsigned char temp0 = vec_mergeh(tempA, tempI);
vector unsigned char temp1 = vec_mergel(tempA, tempI);
vector unsigned char temp2 = vec_mergeh(tempB, tempJ);
vector unsigned char temp3 = vec_mergel(tempB, tempJ);
vector unsigned char temp4 = vec_mergeh(tempC, tempK);
vector unsigned char temp5 = vec_mergel(tempC, tempK);
vector unsigned char temp6 = vec_mergeh(tempD, tempL);
vector unsigned char temp7 = vec_mergel(tempD, tempL);
vector unsigned char temp8 = vec_mergeh(tempE, tempM);
vector unsigned char temp9 = vec_mergel(tempE, tempM);
vector unsigned char temp10 = vec_mergeh(tempF, tempN);
vector unsigned char temp11 = vec_mergel(tempF, tempN);
vector unsigned char temp12 = vec_mergeh(tempG, tempO);
vector unsigned char temp13 = vec_mergel(tempG, tempO);
vector unsigned char temp14 = vec_mergeh(tempH, tempP);
vector unsigned char temp15 = vec_mergel(tempH, tempP);
 
tempA = vec_mergeh(temp0, temp8);
tempB = vec_mergel(temp0, temp8);
tempC = vec_mergeh(temp1, temp9);
tempD = vec_mergel(temp1, temp9);
tempE = vec_mergeh(temp2, temp10);
tempF = vec_mergel(temp2, temp10);
tempG = vec_mergeh(temp3, temp11);
tempH = vec_mergel(temp3, temp11);
tempI = vec_mergeh(temp4, temp12);
tempJ = vec_mergel(temp4, temp12);
tempK = vec_mergeh(temp5, temp13);
tempL = vec_mergel(temp5, temp13);
tempM = vec_mergeh(temp6, temp14);
tempN = vec_mergel(temp6, temp14);
tempO = vec_mergeh(temp7, temp15);
tempP = vec_mergel(temp7, temp15);
 
temp0 = vec_mergeh(tempA, tempI);
temp1 = vec_mergel(tempA, tempI);
temp2 = vec_mergeh(tempB, tempJ);
temp3 = vec_mergel(tempB, tempJ);
temp4 = vec_mergeh(tempC, tempK);
temp5 = vec_mergel(tempC, tempK);
temp6 = vec_mergeh(tempD, tempL);
temp7 = vec_mergel(tempD, tempL);
temp8 = vec_mergeh(tempE, tempM);
temp9 = vec_mergel(tempE, tempM);
temp10 = vec_mergeh(tempF, tempN);
temp11 = vec_mergel(tempF, tempN);
temp12 = vec_mergeh(tempG, tempO);
temp13 = vec_mergel(tempG, tempO);
temp14 = vec_mergeh(tempH, tempP);
temp15 = vec_mergel(tempH, tempP);
 
vec_st(temp0, 0, dst);
vec_st(temp1, 16, dst);
vec_st(temp2, 32, dst);
vec_st(temp3, 48, dst);
vec_st(temp4, 64, dst);
vec_st(temp5, 80, dst);
vec_st(temp6, 96, dst);
vec_st(temp7, 112, dst);
vec_st(temp8, 128, dst);
vec_st(temp9, 144, dst);
vec_st(temp10, 160, dst);
vec_st(temp11, 176, dst);
vec_st(temp12, 192, dst);
vec_st(temp13, 208, dst);
vec_st(temp14, 224, dst);
vec_st(temp15, 240, dst);
}
 
static inline void transpose_8x16_char_fromPackedAlign_altivec(unsigned char* dst, unsigned char* src, int stride) {
const vector unsigned char zero = vec_splat_u8(0);
const vector signed char neg1 = vec_splat_s8(-1);
 
#define LOAD_DOUBLE_LINE(i, j) \
vector unsigned char src##i = vec_ld(i * 16, src); \
vector unsigned char src##j = vec_ld(j * 16, src)
 
LOAD_DOUBLE_LINE(0, 1);
LOAD_DOUBLE_LINE(2, 3);
LOAD_DOUBLE_LINE(4, 5);
LOAD_DOUBLE_LINE(6, 7);
LOAD_DOUBLE_LINE(8, 9);
LOAD_DOUBLE_LINE(10, 11);
LOAD_DOUBLE_LINE(12, 13);
LOAD_DOUBLE_LINE(14, 15);
#undef LOAD_DOUBLE_LINE
 
vector unsigned char tempA = vec_mergeh(src0, src8);
vector unsigned char tempB;
vector unsigned char tempC = vec_mergeh(src1, src9);
vector unsigned char tempD;
vector unsigned char tempE = vec_mergeh(src2, src10);
vector unsigned char tempG = vec_mergeh(src3, src11);
vector unsigned char tempI = vec_mergeh(src4, src12);
vector unsigned char tempJ;
vector unsigned char tempK = vec_mergeh(src5, src13);
vector unsigned char tempL;
vector unsigned char tempM = vec_mergeh(src6, src14);
vector unsigned char tempO = vec_mergeh(src7, src15);
 
vector unsigned char temp0 = vec_mergeh(tempA, tempI);
vector unsigned char temp1 = vec_mergel(tempA, tempI);
vector unsigned char temp2;
vector unsigned char temp3;
vector unsigned char temp4 = vec_mergeh(tempC, tempK);
vector unsigned char temp5 = vec_mergel(tempC, tempK);
vector unsigned char temp6;
vector unsigned char temp7;
vector unsigned char temp8 = vec_mergeh(tempE, tempM);
vector unsigned char temp9 = vec_mergel(tempE, tempM);
vector unsigned char temp12 = vec_mergeh(tempG, tempO);
vector unsigned char temp13 = vec_mergel(tempG, tempO);
 
tempA = vec_mergeh(temp0, temp8);
tempB = vec_mergel(temp0, temp8);
tempC = vec_mergeh(temp1, temp9);
tempD = vec_mergel(temp1, temp9);
tempI = vec_mergeh(temp4, temp12);
tempJ = vec_mergel(temp4, temp12);
tempK = vec_mergeh(temp5, temp13);
tempL = vec_mergel(temp5, temp13);
 
temp0 = vec_mergeh(tempA, tempI);
temp1 = vec_mergel(tempA, tempI);
temp2 = vec_mergeh(tempB, tempJ);
temp3 = vec_mergel(tempB, tempJ);
temp4 = vec_mergeh(tempC, tempK);
temp5 = vec_mergel(tempC, tempK);
temp6 = vec_mergeh(tempD, tempL);
temp7 = vec_mergel(tempD, tempL);
 
 
#define STORE_DOUBLE_LINE(i, j) do { \
vector unsigned char dstAi = vec_ld(i * stride, dst); \
vector unsigned char dstBi = vec_ld(i * stride + 16, dst); \
vector unsigned char dstAj = vec_ld(j * stride, dst); \
vector unsigned char dstBj = vec_ld(j * stride+ 16, dst); \
vector unsigned char aligni = vec_lvsr(i * stride, dst); \
vector unsigned char alignj = vec_lvsr(j * stride, dst); \
vector unsigned char maski = \
vec_perm(zero, (vector unsigned char)neg1, aligni); \
vector unsigned char maskj = \
vec_perm(zero, (vector unsigned char)neg1, alignj); \
vector unsigned char dstRi = vec_perm(temp##i, temp##i, aligni); \
vector unsigned char dstRj = vec_perm(temp##j, temp##j, alignj); \
vector unsigned char dstAFi = vec_sel(dstAi, dstRi, maski); \
vector unsigned char dstBFi = vec_sel(dstRi, dstBi, maski); \
vector unsigned char dstAFj = vec_sel(dstAj, dstRj, maskj); \
vector unsigned char dstBFj = vec_sel(dstRj, dstBj, maskj); \
vec_st(dstAFi, i * stride, dst); \
vec_st(dstBFi, i * stride + 16, dst); \
vec_st(dstAFj, j * stride, dst); \
vec_st(dstBFj, j * stride + 16, dst); \
} while (0)
 
STORE_DOUBLE_LINE(0,1);
STORE_DOUBLE_LINE(2,3);
STORE_DOUBLE_LINE(4,5);
STORE_DOUBLE_LINE(6,7);
}
/contrib/sdk/sources/ffmpeg/libpostproc/postprocess_internal.h
0,0 → 1,179
/*
* Copyright (C) 2001-2002 Michael Niedermayer (michaelni@gmx.at)
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* internal API header.
*/
 
#ifndef POSTPROC_POSTPROCESS_INTERNAL_H
#define POSTPROC_POSTPROCESS_INTERNAL_H
 
#include <string.h>
#include "libavutil/avutil.h"
#include "libavutil/intmath.h"
#include "libavutil/log.h"
#include "postprocess.h"
 
#define V_DEBLOCK 0x01
#define H_DEBLOCK 0x02
#define DERING 0x04
#define LEVEL_FIX 0x08 ///< Brightness & Contrast
 
#define LUM_V_DEBLOCK V_DEBLOCK // 1
#define LUM_H_DEBLOCK H_DEBLOCK // 2
#define CHROM_V_DEBLOCK (V_DEBLOCK<<4) // 16
#define CHROM_H_DEBLOCK (H_DEBLOCK<<4) // 32
#define LUM_DERING DERING // 4
#define CHROM_DERING (DERING<<4) // 64
#define LUM_LEVEL_FIX LEVEL_FIX // 8
#define CHROM_LEVEL_FIX (LEVEL_FIX<<4) // 128 (not implemented yet)
 
// Experimental vertical filters
#define V_X1_FILTER 0x0200 // 512
#define V_A_DEBLOCK 0x0400
 
// Experimental horizontal filters
#define H_X1_FILTER 0x2000 // 8192
#define H_A_DEBLOCK 0x4000
 
/// select between full y range (255-0) or standard one (234-16)
#define FULL_Y_RANGE 0x8000 // 32768
 
//Deinterlacing Filters
#define LINEAR_IPOL_DEINT_FILTER 0x10000 // 65536
#define LINEAR_BLEND_DEINT_FILTER 0x20000 // 131072
#define CUBIC_BLEND_DEINT_FILTER 0x8000 // (not implemented yet)
#define CUBIC_IPOL_DEINT_FILTER 0x40000 // 262144
#define MEDIAN_DEINT_FILTER 0x80000 // 524288
#define FFMPEG_DEINT_FILTER 0x400000
#define LOWPASS5_DEINT_FILTER 0x800000
 
#define TEMP_NOISE_FILTER 0x100000
#define FORCE_QUANT 0x200000
#define BITEXACT 0x1000000
 
//use if you want a faster postprocessing code
//cannot differentiate between chroma & luma filters (both on or both off)
//obviously the -pp option on the command line has no effect except turning the here selected
//filters on
//#define COMPILE_TIME_MODE 0x77
 
#define CLIP av_clip_uint8
 
/**
* Postprocessing filter.
*/
struct PPFilter{
const char *shortName;
const char *longName;
int chromDefault; ///< is chrominance filtering on by default if this filter is manually activated
int minLumQuality; ///< minimum quality to turn luminance filtering on
int minChromQuality; ///< minimum quality to turn chrominance filtering on
int mask; ///< Bitmask to turn this filter on
};
 
/**
* Postprocessing mode.
*/
typedef struct PPMode{
int lumMode; ///< activates filters for luminance
int chromMode; ///< activates filters for chrominance
int error; ///< non zero on error
 
int minAllowedY; ///< for brightness correction
int maxAllowedY; ///< for brightness correction
float maxClippedThreshold; ///< amount of "black" you are willing to lose to get a brightness-corrected picture
 
int maxTmpNoise[3]; ///< for Temporal Noise Reducing filter (Maximal sum of abs differences)
 
int baseDcDiff;
int flatnessThreshold;
 
int forcedQuant; ///< quantizer if FORCE_QUANT is used
} PPMode;
 
/**
* postprocess context.
*/
typedef struct PPContext{
/**
* info on struct for av_log
*/
const AVClass *av_class;
 
uint8_t *tempBlocks; ///<used for the horizontal code
 
/**
* luma histogram.
* we need 64bit here otherwise we'll going to have a problem
* after watching a black picture for 5 hours
*/
uint64_t *yHistogram;
 
DECLARE_ALIGNED(8, uint64_t, packedYOffset);
DECLARE_ALIGNED(8, uint64_t, packedYScale);
 
/** Temporal noise reducing buffers */
uint8_t *tempBlurred[3];
int32_t *tempBlurredPast[3];
 
/** Temporary buffers for handling the last row(s) */
uint8_t *tempDst;
uint8_t *tempSrc;
 
uint8_t *deintTemp;
 
DECLARE_ALIGNED(8, uint64_t, pQPb);
DECLARE_ALIGNED(8, uint64_t, pQPb2);
 
DECLARE_ALIGNED(8, uint64_t, mmxDcOffset)[64];
DECLARE_ALIGNED(8, uint64_t, mmxDcThreshold)[64];
 
QP_STORE_T *stdQPTable; ///< used to fix MPEG2 style qscale
QP_STORE_T *nonBQPTable;
QP_STORE_T *forcedQPTable;
 
int QP;
int nonBQP;
 
int frameNum;
 
int cpuCaps;
 
int qpStride; ///<size of qp buffers (needed to realloc them if needed)
int stride; ///<size of some buffers (needed to realloc them if needed)
 
int hChromaSubSample;
int vChromaSubSample;
 
PPMode ppMode;
} PPContext;
 
 
static inline void linecpy(void *dest, const void *src, int lines, int stride) {
if (stride > 0) {
memcpy(dest, src, lines*stride);
} else {
memcpy((uint8_t*)dest+(lines-1)*stride, (const uint8_t*)src+(lines-1)*stride, -lines*stride);
}
}
 
#endif /* POSTPROC_POSTPROCESS_INTERNAL_H */
/contrib/sdk/sources/ffmpeg/libpostproc/postprocess_template.c
0,0 → 1,3714
/*
* Copyright (C) 2001-2002 Michael Niedermayer (michaelni@gmx.at)
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* mmx/mmx2/3dnow postprocess code.
*/
 
#include "libavutil/x86/asm.h"
 
/* A single TEMPLATE_PP_* should be defined (to 1) when this template is
* included. The following macros will define its dependencies to 1 as well
* (like MMX2 depending on MMX), and will define to 0 all the others. Every
* TEMPLATE_PP_* need to be undef at the end. */
 
#ifdef TEMPLATE_PP_C
# define RENAME(a) a ## _C
#else
# define TEMPLATE_PP_C 0
#endif
 
#ifdef TEMPLATE_PP_ALTIVEC
# define RENAME(a) a ## _altivec
#else
# define TEMPLATE_PP_ALTIVEC 0
#endif
 
#ifdef TEMPLATE_PP_MMX
# define RENAME(a) a ## _MMX
#else
# define TEMPLATE_PP_MMX 0
#endif
 
#ifdef TEMPLATE_PP_MMXEXT
# undef TEMPLATE_PP_MMX
# define TEMPLATE_PP_MMX 1
# define RENAME(a) a ## _MMX2
#else
# define TEMPLATE_PP_MMXEXT 0
#endif
 
#ifdef TEMPLATE_PP_3DNOW
# undef TEMPLATE_PP_MMX
# define TEMPLATE_PP_MMX 1
# define RENAME(a) a ## _3DNow
#else
# define TEMPLATE_PP_3DNOW 0
#endif
 
#ifdef TEMPLATE_PP_SSE2
# undef TEMPLATE_PP_MMX
# define TEMPLATE_PP_MMX 1
# undef TEMPLATE_PP_MMXEXT
# define TEMPLATE_PP_MMXEXT 1
# define RENAME(a) a ## _SSE2
#else
# define TEMPLATE_PP_SSE2 0
#endif
 
#undef REAL_PAVGB
#undef PAVGB
#undef PMINUB
#undef PMAXUB
 
#if TEMPLATE_PP_MMXEXT
#define REAL_PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
#elif TEMPLATE_PP_3DNOW
#define REAL_PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
#endif
#define PAVGB(a,b) REAL_PAVGB(a,b)
 
#if TEMPLATE_PP_MMXEXT
#define PMINUB(a,b,t) "pminub " #a ", " #b " \n\t"
#elif TEMPLATE_PP_MMX
#define PMINUB(b,a,t) \
"movq " #a ", " #t " \n\t"\
"psubusb " #b ", " #t " \n\t"\
"psubb " #t ", " #a " \n\t"
#endif
 
#if TEMPLATE_PP_MMXEXT
#define PMAXUB(a,b) "pmaxub " #a ", " #b " \n\t"
#elif TEMPLATE_PP_MMX
#define PMAXUB(a,b) \
"psubusb " #a ", " #b " \n\t"\
"paddb " #a ", " #b " \n\t"
#endif
 
//FIXME? |255-0| = 1 (should not be a problem ...)
#if TEMPLATE_PP_MMX
/**
* Check if the middle 8x8 Block in the given 8x16 block is flat
*/
static inline int RENAME(vertClassify)(uint8_t src[], int stride, PPContext *c){
int numEq= 0, dcOk;
src+= stride*4; // src points to begin of the 8x8 Block
__asm__ volatile(
"movq %0, %%mm7 \n\t"
"movq %1, %%mm6 \n\t"
: : "m" (c->mmxDcOffset[c->nonBQP]), "m" (c->mmxDcThreshold[c->nonBQP])
);
 
__asm__ volatile(
"lea (%2, %3), %%"REG_a" \n\t"
// 0 1 2 3 4 5 6 7 8 9
// %1 eax eax+%2 eax+2%2 %1+4%2 ecx ecx+%2 ecx+2%2 %1+8%2 ecx+4%2
 
"movq (%2), %%mm0 \n\t"
"movq (%%"REG_a"), %%mm1 \n\t"
"movq %%mm0, %%mm3 \n\t"
"movq %%mm0, %%mm4 \n\t"
PMAXUB(%%mm1, %%mm4)
PMINUB(%%mm1, %%mm3, %%mm5)
"psubb %%mm1, %%mm0 \n\t" // mm0 = difference
"paddb %%mm7, %%mm0 \n\t"
"pcmpgtb %%mm6, %%mm0 \n\t"
 
"movq (%%"REG_a",%3), %%mm2 \n\t"
PMAXUB(%%mm2, %%mm4)
PMINUB(%%mm2, %%mm3, %%mm5)
"psubb %%mm2, %%mm1 \n\t"
"paddb %%mm7, %%mm1 \n\t"
"pcmpgtb %%mm6, %%mm1 \n\t"
"paddb %%mm1, %%mm0 \n\t"
 
"movq (%%"REG_a", %3, 2), %%mm1 \n\t"
PMAXUB(%%mm1, %%mm4)
PMINUB(%%mm1, %%mm3, %%mm5)
"psubb %%mm1, %%mm2 \n\t"
"paddb %%mm7, %%mm2 \n\t"
"pcmpgtb %%mm6, %%mm2 \n\t"
"paddb %%mm2, %%mm0 \n\t"
 
"lea (%%"REG_a", %3, 4), %%"REG_a" \n\t"
 
"movq (%2, %3, 4), %%mm2 \n\t"
PMAXUB(%%mm2, %%mm4)
PMINUB(%%mm2, %%mm3, %%mm5)
"psubb %%mm2, %%mm1 \n\t"
"paddb %%mm7, %%mm1 \n\t"
"pcmpgtb %%mm6, %%mm1 \n\t"
"paddb %%mm1, %%mm0 \n\t"
 
"movq (%%"REG_a"), %%mm1 \n\t"
PMAXUB(%%mm1, %%mm4)
PMINUB(%%mm1, %%mm3, %%mm5)
"psubb %%mm1, %%mm2 \n\t"
"paddb %%mm7, %%mm2 \n\t"
"pcmpgtb %%mm6, %%mm2 \n\t"
"paddb %%mm2, %%mm0 \n\t"
 
"movq (%%"REG_a", %3), %%mm2 \n\t"
PMAXUB(%%mm2, %%mm4)
PMINUB(%%mm2, %%mm3, %%mm5)
"psubb %%mm2, %%mm1 \n\t"
"paddb %%mm7, %%mm1 \n\t"
"pcmpgtb %%mm6, %%mm1 \n\t"
"paddb %%mm1, %%mm0 \n\t"
 
"movq (%%"REG_a", %3, 2), %%mm1 \n\t"
PMAXUB(%%mm1, %%mm4)
PMINUB(%%mm1, %%mm3, %%mm5)
"psubb %%mm1, %%mm2 \n\t"
"paddb %%mm7, %%mm2 \n\t"
"pcmpgtb %%mm6, %%mm2 \n\t"
"paddb %%mm2, %%mm0 \n\t"
"psubusb %%mm3, %%mm4 \n\t"
 
" \n\t"
#if TEMPLATE_PP_MMXEXT
"pxor %%mm7, %%mm7 \n\t"
"psadbw %%mm7, %%mm0 \n\t"
#else
"movq %%mm0, %%mm1 \n\t"
"psrlw $8, %%mm0 \n\t"
"paddb %%mm1, %%mm0 \n\t"
"movq %%mm0, %%mm1 \n\t"
"psrlq $16, %%mm0 \n\t"
"paddb %%mm1, %%mm0 \n\t"
"movq %%mm0, %%mm1 \n\t"
"psrlq $32, %%mm0 \n\t"
"paddb %%mm1, %%mm0 \n\t"
#endif
"movq %4, %%mm7 \n\t" // QP,..., QP
"paddusb %%mm7, %%mm7 \n\t" // 2QP ... 2QP
"psubusb %%mm7, %%mm4 \n\t" // Diff <= 2QP -> 0
"packssdw %%mm4, %%mm4 \n\t"
"movd %%mm0, %0 \n\t"
"movd %%mm4, %1 \n\t"
 
: "=r" (numEq), "=r" (dcOk)
: "r" (src), "r" ((x86_reg)stride), "m" (c->pQPb)
: "%"REG_a
);
 
numEq= (-numEq) &0xFF;
if(numEq > c->ppMode.flatnessThreshold){
if(dcOk) return 0;
else return 1;
}else{
return 2;
}
}
#endif //TEMPLATE_PP_MMX
 
/**
* Do a vertical low pass filter on the 8x16 block (only write to the 8x8 block in the middle)
* using the 9-Tap Filter (1,1,2,2,4,2,2,1,1)/16
*/
#if !TEMPLATE_PP_ALTIVEC
static inline void RENAME(doVertLowPass)(uint8_t *src, int stride, PPContext *c)
{
#if TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW
src+= stride*3;
__asm__ volatile( //"movv %0 %1 %2\n\t"
"movq %2, %%mm0 \n\t" // QP,..., QP
"pxor %%mm4, %%mm4 \n\t"
 
"movq (%0), %%mm6 \n\t"
"movq (%0, %1), %%mm5 \n\t"
"movq %%mm5, %%mm1 \n\t"
"movq %%mm6, %%mm2 \n\t"
"psubusb %%mm6, %%mm5 \n\t"
"psubusb %%mm1, %%mm2 \n\t"
"por %%mm5, %%mm2 \n\t" // ABS Diff of lines
"psubusb %%mm0, %%mm2 \n\t" // diff <= QP -> 0
"pcmpeqb %%mm4, %%mm2 \n\t" // diff <= QP -> FF
 
"pand %%mm2, %%mm6 \n\t"
"pandn %%mm1, %%mm2 \n\t"
"por %%mm2, %%mm6 \n\t"// First Line to Filter
 
"movq (%0, %1, 8), %%mm5 \n\t"
"lea (%0, %1, 4), %%"REG_a" \n\t"
"lea (%0, %1, 8), %%"REG_c" \n\t"
"sub %1, %%"REG_c" \n\t"
"add %1, %0 \n\t" // %0 points to line 1 not 0
"movq (%0, %1, 8), %%mm7 \n\t"
"movq %%mm5, %%mm1 \n\t"
"movq %%mm7, %%mm2 \n\t"
"psubusb %%mm7, %%mm5 \n\t"
"psubusb %%mm1, %%mm2 \n\t"
"por %%mm5, %%mm2 \n\t" // ABS Diff of lines
"psubusb %%mm0, %%mm2 \n\t" // diff <= QP -> 0
"pcmpeqb %%mm4, %%mm2 \n\t" // diff <= QP -> FF
 
"pand %%mm2, %%mm7 \n\t"
"pandn %%mm1, %%mm2 \n\t"
"por %%mm2, %%mm7 \n\t" // First Line to Filter
 
 
// 1 2 3 4 5 6 7 8
// %0 %0+%1 %0+2%1 eax %0+4%1 eax+2%1 ecx eax+4%1
// 6 4 2 2 1 1
// 6 4 4 2
// 6 8 2
 
"movq (%0, %1), %%mm0 \n\t" // 1
"movq %%mm0, %%mm1 \n\t" // 1
PAVGB(%%mm6, %%mm0) //1 1 /2
PAVGB(%%mm6, %%mm0) //3 1 /4
 
"movq (%0, %1, 4), %%mm2 \n\t" // 1
"movq %%mm2, %%mm5 \n\t" // 1
PAVGB((%%REGa), %%mm2) // 11 /2
PAVGB((%0, %1, 2), %%mm2) // 211 /4
"movq %%mm2, %%mm3 \n\t" // 211 /4
"movq (%0), %%mm4 \n\t" // 1
PAVGB(%%mm4, %%mm3) // 4 211 /8
PAVGB(%%mm0, %%mm3) //642211 /16
"movq %%mm3, (%0) \n\t" // X
// mm1=2 mm2=3(211) mm4=1 mm5=5 mm6=0 mm7=9
"movq %%mm1, %%mm0 \n\t" // 1
PAVGB(%%mm6, %%mm0) //1 1 /2
"movq %%mm4, %%mm3 \n\t" // 1
PAVGB((%0,%1,2), %%mm3) // 1 1 /2
PAVGB((%%REGa,%1,2), %%mm5) // 11 /2
PAVGB((%%REGa), %%mm5) // 211 /4
PAVGB(%%mm5, %%mm3) // 2 2211 /8
PAVGB(%%mm0, %%mm3) //4242211 /16
"movq %%mm3, (%0,%1) \n\t" // X
// mm1=2 mm2=3(211) mm4=1 mm5=4(211) mm6=0 mm7=9
PAVGB(%%mm4, %%mm6) //11 /2
"movq (%%"REG_c"), %%mm0 \n\t" // 1
PAVGB((%%REGa, %1, 2), %%mm0) // 11/2
"movq %%mm0, %%mm3 \n\t" // 11/2
PAVGB(%%mm1, %%mm0) // 2 11/4
PAVGB(%%mm6, %%mm0) //222 11/8
PAVGB(%%mm2, %%mm0) //22242211/16
"movq (%0, %1, 2), %%mm2 \n\t" // 1
"movq %%mm0, (%0, %1, 2) \n\t" // X
// mm1=2 mm2=3 mm3=6(11) mm4=1 mm5=4(211) mm6=0(11) mm7=9
"movq (%%"REG_a", %1, 4), %%mm0 \n\t" // 1
PAVGB((%%REGc), %%mm0) // 11 /2
PAVGB(%%mm0, %%mm6) //11 11 /4
PAVGB(%%mm1, %%mm4) // 11 /2
PAVGB(%%mm2, %%mm1) // 11 /2
PAVGB(%%mm1, %%mm6) //1122 11 /8
PAVGB(%%mm5, %%mm6) //112242211 /16
"movq (%%"REG_a"), %%mm5 \n\t" // 1
"movq %%mm6, (%%"REG_a") \n\t" // X
// mm0=7(11) mm1=2(11) mm2=3 mm3=6(11) mm4=1(11) mm5=4 mm7=9
"movq (%%"REG_a", %1, 4), %%mm6 \n\t" // 1
PAVGB(%%mm7, %%mm6) // 11 /2
PAVGB(%%mm4, %%mm6) // 11 11 /4
PAVGB(%%mm3, %%mm6) // 11 2211 /8
PAVGB(%%mm5, %%mm2) // 11 /2
"movq (%0, %1, 4), %%mm4 \n\t" // 1
PAVGB(%%mm4, %%mm2) // 112 /4
PAVGB(%%mm2, %%mm6) // 112242211 /16
"movq %%mm6, (%0, %1, 4) \n\t" // X
// mm0=7(11) mm1=2(11) mm2=3(112) mm3=6(11) mm4=5 mm5=4 mm7=9
PAVGB(%%mm7, %%mm1) // 11 2 /4
PAVGB(%%mm4, %%mm5) // 11 /2
PAVGB(%%mm5, %%mm0) // 11 11 /4
"movq (%%"REG_a", %1, 2), %%mm6 \n\t" // 1
PAVGB(%%mm6, %%mm1) // 11 4 2 /8
PAVGB(%%mm0, %%mm1) // 11224222 /16
"movq %%mm1, (%%"REG_a", %1, 2) \n\t" // X
// mm2=3(112) mm3=6(11) mm4=5 mm5=4(11) mm6=6 mm7=9
PAVGB((%%REGc), %%mm2) // 112 4 /8
"movq (%%"REG_a", %1, 4), %%mm0 \n\t" // 1
PAVGB(%%mm0, %%mm6) // 1 1 /2
PAVGB(%%mm7, %%mm6) // 1 12 /4
PAVGB(%%mm2, %%mm6) // 1122424 /4
"movq %%mm6, (%%"REG_c") \n\t" // X
// mm0=8 mm3=6(11) mm4=5 mm5=4(11) mm7=9
PAVGB(%%mm7, %%mm5) // 11 2 /4
PAVGB(%%mm7, %%mm5) // 11 6 /8
 
PAVGB(%%mm3, %%mm0) // 112 /4
PAVGB(%%mm0, %%mm5) // 112246 /16
"movq %%mm5, (%%"REG_a", %1, 4) \n\t" // X
"sub %1, %0 \n\t"
 
:
: "r" (src), "r" ((x86_reg)stride), "m" (c->pQPb)
: "%"REG_a, "%"REG_c
);
#else //TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW
const int l1= stride;
const int l2= stride + l1;
const int l3= stride + l2;
const int l4= stride + l3;
const int l5= stride + l4;
const int l6= stride + l5;
const int l7= stride + l6;
const int l8= stride + l7;
const int l9= stride + l8;
int x;
src+= stride*3;
for(x=0; x<BLOCK_SIZE; x++){
const int first= FFABS(src[0] - src[l1]) < c->QP ? src[0] : src[l1];
const int last= FFABS(src[l8] - src[l9]) < c->QP ? src[l9] : src[l8];
 
int sums[10];
sums[0] = 4*first + src[l1] + src[l2] + src[l3] + 4;
sums[1] = sums[0] - first + src[l4];
sums[2] = sums[1] - first + src[l5];
sums[3] = sums[2] - first + src[l6];
sums[4] = sums[3] - first + src[l7];
sums[5] = sums[4] - src[l1] + src[l8];
sums[6] = sums[5] - src[l2] + last;
sums[7] = sums[6] - src[l3] + last;
sums[8] = sums[7] - src[l4] + last;
sums[9] = sums[8] - src[l5] + last;
 
src[l1]= (sums[0] + sums[2] + 2*src[l1])>>4;
src[l2]= (sums[1] + sums[3] + 2*src[l2])>>4;
src[l3]= (sums[2] + sums[4] + 2*src[l3])>>4;
src[l4]= (sums[3] + sums[5] + 2*src[l4])>>4;
src[l5]= (sums[4] + sums[6] + 2*src[l5])>>4;
src[l6]= (sums[5] + sums[7] + 2*src[l6])>>4;
src[l7]= (sums[6] + sums[8] + 2*src[l7])>>4;
src[l8]= (sums[7] + sums[9] + 2*src[l8])>>4;
 
src++;
}
#endif //TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW
}
#endif //TEMPLATE_PP_ALTIVEC
 
/**
* Experimental Filter 1
* will not damage linear gradients
* Flat blocks should look like they were passed through the (1,1,2,2,4,2,2,1,1) 9-Tap filter
* can only smooth blocks at the expected locations (it cannot smooth them if they did move)
* MMX2 version does correct clipping C version does not
*/
static inline void RENAME(vertX1Filter)(uint8_t *src, int stride, PPContext *co)
{
#if TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW
src+= stride*3;
 
__asm__ volatile(
"pxor %%mm7, %%mm7 \n\t" // 0
"lea (%0, %1), %%"REG_a" \n\t"
"lea (%%"REG_a", %1, 4), %%"REG_c" \n\t"
// 0 1 2 3 4 5 6 7 8 9
// %0 eax eax+%1 eax+2%1 %0+4%1 ecx ecx+%1 ecx+2%1 %0+8%1 ecx+4%1
"movq (%%"REG_a", %1, 2), %%mm0 \n\t" // line 3
"movq (%0, %1, 4), %%mm1 \n\t" // line 4
"movq %%mm1, %%mm2 \n\t" // line 4
"psubusb %%mm0, %%mm1 \n\t"
"psubusb %%mm2, %%mm0 \n\t"
"por %%mm1, %%mm0 \n\t" // |l2 - l3|
"movq (%%"REG_c"), %%mm3 \n\t" // line 5
"movq (%%"REG_c", %1), %%mm4 \n\t" // line 6
"movq %%mm3, %%mm5 \n\t" // line 5
"psubusb %%mm4, %%mm3 \n\t"
"psubusb %%mm5, %%mm4 \n\t"
"por %%mm4, %%mm3 \n\t" // |l5 - l6|
PAVGB(%%mm3, %%mm0) // (|l2 - l3| + |l5 - l6|)/2
"movq %%mm2, %%mm1 \n\t" // line 4
"psubusb %%mm5, %%mm2 \n\t"
"movq %%mm2, %%mm4 \n\t"
"pcmpeqb %%mm7, %%mm2 \n\t" // (l4 - l5) <= 0 ? -1 : 0
"psubusb %%mm1, %%mm5 \n\t"
"por %%mm5, %%mm4 \n\t" // |l4 - l5|
"psubusb %%mm0, %%mm4 \n\t" //d = MAX(0, |l4-l5| - (|l2-l3| + |l5-l6|)/2)
"movq %%mm4, %%mm3 \n\t" // d
"movq %2, %%mm0 \n\t"
"paddusb %%mm0, %%mm0 \n\t"
"psubusb %%mm0, %%mm4 \n\t"
"pcmpeqb %%mm7, %%mm4 \n\t" // d <= QP ? -1 : 0
"psubusb "MANGLE(b01)", %%mm3 \n\t"
"pand %%mm4, %%mm3 \n\t" // d <= QP ? d : 0
 
PAVGB(%%mm7, %%mm3) // d/2
"movq %%mm3, %%mm1 \n\t" // d/2
PAVGB(%%mm7, %%mm3) // d/4
PAVGB(%%mm1, %%mm3) // 3*d/8
 
"movq (%0, %1, 4), %%mm0 \n\t" // line 4
"pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l4-1 : l4
"psubusb %%mm3, %%mm0 \n\t"
"pxor %%mm2, %%mm0 \n\t"
"movq %%mm0, (%0, %1, 4) \n\t" // line 4
 
"movq (%%"REG_c"), %%mm0 \n\t" // line 5
"pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l5-1 : l5
"paddusb %%mm3, %%mm0 \n\t"
"pxor %%mm2, %%mm0 \n\t"
"movq %%mm0, (%%"REG_c") \n\t" // line 5
 
PAVGB(%%mm7, %%mm1) // d/4
 
"movq (%%"REG_a", %1, 2), %%mm0 \n\t" // line 3
"pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l4-1 : l4
"psubusb %%mm1, %%mm0 \n\t"
"pxor %%mm2, %%mm0 \n\t"
"movq %%mm0, (%%"REG_a", %1, 2) \n\t" // line 3
 
"movq (%%"REG_c", %1), %%mm0 \n\t" // line 6
"pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l5-1 : l5
"paddusb %%mm1, %%mm0 \n\t"
"pxor %%mm2, %%mm0 \n\t"
"movq %%mm0, (%%"REG_c", %1) \n\t" // line 6
 
PAVGB(%%mm7, %%mm1) // d/8
 
"movq (%%"REG_a", %1), %%mm0 \n\t" // line 2
"pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l2-1 : l2
"psubusb %%mm1, %%mm0 \n\t"
"pxor %%mm2, %%mm0 \n\t"
"movq %%mm0, (%%"REG_a", %1) \n\t" // line 2
 
"movq (%%"REG_c", %1, 2), %%mm0 \n\t" // line 7
"pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l7-1 : l7
"paddusb %%mm1, %%mm0 \n\t"
"pxor %%mm2, %%mm0 \n\t"
"movq %%mm0, (%%"REG_c", %1, 2) \n\t" // line 7
 
:
: "r" (src), "r" ((x86_reg)stride), "m" (co->pQPb)
: "%"REG_a, "%"REG_c
);
#else //TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW
 
const int l1= stride;
const int l2= stride + l1;
const int l3= stride + l2;
const int l4= stride + l3;
const int l5= stride + l4;
const int l6= stride + l5;
const int l7= stride + l6;
// const int l8= stride + l7;
// const int l9= stride + l8;
int x;
 
src+= stride*3;
for(x=0; x<BLOCK_SIZE; x++){
int a= src[l3] - src[l4];
int b= src[l4] - src[l5];
int c= src[l5] - src[l6];
 
int d= FFABS(b) - ((FFABS(a) + FFABS(c))>>1);
d= FFMAX(d, 0);
 
if(d < co->QP*2){
int v = d * FFSIGN(-b);
 
src[l2] +=v>>3;
src[l3] +=v>>2;
src[l4] +=(3*v)>>3;
src[l5] -=(3*v)>>3;
src[l6] -=v>>2;
src[l7] -=v>>3;
}
src++;
}
#endif //TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW
}
 
#if !TEMPLATE_PP_ALTIVEC
static inline void RENAME(doVertDefFilter)(uint8_t src[], int stride, PPContext *c)
{
#if TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW
/*
uint8_t tmp[16];
const int l1= stride;
const int l2= stride + l1;
const int l3= stride + l2;
const int l4= (int)tmp - (int)src - stride*3;
const int l5= (int)tmp - (int)src - stride*3 + 8;
const int l6= stride*3 + l3;
const int l7= stride + l6;
const int l8= stride + l7;
 
memcpy(tmp, src+stride*7, 8);
memcpy(tmp+8, src+stride*8, 8);
*/
src+= stride*4;
__asm__ volatile(
 
#if 0 //slightly more accurate and slightly slower
"pxor %%mm7, %%mm7 \n\t" // 0
"lea (%0, %1), %%"REG_a" \n\t"
"lea (%%"REG_a", %1, 4), %%"REG_c" \n\t"
// 0 1 2 3 4 5 6 7
// %0 %0+%1 %0+2%1 eax+2%1 %0+4%1 eax+4%1 ecx+%1 ecx+2%1
// %0 eax eax+%1 eax+2%1 %0+4%1 ecx ecx+%1 ecx+2%1
 
 
"movq (%0, %1, 2), %%mm0 \n\t" // l2
"movq (%0), %%mm1 \n\t" // l0
"movq %%mm0, %%mm2 \n\t" // l2
PAVGB(%%mm7, %%mm0) // ~l2/2
PAVGB(%%mm1, %%mm0) // ~(l2 + 2l0)/4
PAVGB(%%mm2, %%mm0) // ~(5l2 + 2l0)/8
 
"movq (%%"REG_a"), %%mm1 \n\t" // l1
"movq (%%"REG_a", %1, 2), %%mm3 \n\t" // l3
"movq %%mm1, %%mm4 \n\t" // l1
PAVGB(%%mm7, %%mm1) // ~l1/2
PAVGB(%%mm3, %%mm1) // ~(l1 + 2l3)/4
PAVGB(%%mm4, %%mm1) // ~(5l1 + 2l3)/8
 
"movq %%mm0, %%mm4 \n\t" // ~(5l2 + 2l0)/8
"psubusb %%mm1, %%mm0 \n\t"
"psubusb %%mm4, %%mm1 \n\t"
"por %%mm0, %%mm1 \n\t" // ~|2l0 - 5l1 + 5l2 - 2l3|/8
// mm1= |lenergy|, mm2= l2, mm3= l3, mm7=0
 
"movq (%0, %1, 4), %%mm0 \n\t" // l4
"movq %%mm0, %%mm4 \n\t" // l4
PAVGB(%%mm7, %%mm0) // ~l4/2
PAVGB(%%mm2, %%mm0) // ~(l4 + 2l2)/4
PAVGB(%%mm4, %%mm0) // ~(5l4 + 2l2)/8
 
"movq (%%"REG_c"), %%mm2 \n\t" // l5
"movq %%mm3, %%mm5 \n\t" // l3
PAVGB(%%mm7, %%mm3) // ~l3/2
PAVGB(%%mm2, %%mm3) // ~(l3 + 2l5)/4
PAVGB(%%mm5, %%mm3) // ~(5l3 + 2l5)/8
 
"movq %%mm0, %%mm6 \n\t" // ~(5l4 + 2l2)/8
"psubusb %%mm3, %%mm0 \n\t"
"psubusb %%mm6, %%mm3 \n\t"
"por %%mm0, %%mm3 \n\t" // ~|2l2 - 5l3 + 5l4 - 2l5|/8
"pcmpeqb %%mm7, %%mm0 \n\t" // SIGN(2l2 - 5l3 + 5l4 - 2l5)
// mm0= SIGN(menergy), mm1= |lenergy|, mm2= l5, mm3= |menergy|, mm4=l4, mm5= l3, mm7=0
 
"movq (%%"REG_c", %1), %%mm6 \n\t" // l6
"movq %%mm6, %%mm5 \n\t" // l6
PAVGB(%%mm7, %%mm6) // ~l6/2
PAVGB(%%mm4, %%mm6) // ~(l6 + 2l4)/4
PAVGB(%%mm5, %%mm6) // ~(5l6 + 2l4)/8
 
"movq (%%"REG_c", %1, 2), %%mm5 \n\t" // l7
"movq %%mm2, %%mm4 \n\t" // l5
PAVGB(%%mm7, %%mm2) // ~l5/2
PAVGB(%%mm5, %%mm2) // ~(l5 + 2l7)/4
PAVGB(%%mm4, %%mm2) // ~(5l5 + 2l7)/8
 
"movq %%mm6, %%mm4 \n\t" // ~(5l6 + 2l4)/8
"psubusb %%mm2, %%mm6 \n\t"
"psubusb %%mm4, %%mm2 \n\t"
"por %%mm6, %%mm2 \n\t" // ~|2l4 - 5l5 + 5l6 - 2l7|/8
// mm0= SIGN(menergy), mm1= |lenergy|/8, mm2= |renergy|/8, mm3= |menergy|/8, mm7=0
 
 
PMINUB(%%mm2, %%mm1, %%mm4) // MIN(|lenergy|,|renergy|)/8
"movq %2, %%mm4 \n\t" // QP //FIXME QP+1 ?
"paddusb "MANGLE(b01)", %%mm4 \n\t"
"pcmpgtb %%mm3, %%mm4 \n\t" // |menergy|/8 < QP
"psubusb %%mm1, %%mm3 \n\t" // d=|menergy|/8-MIN(|lenergy|,|renergy|)/8
"pand %%mm4, %%mm3 \n\t"
 
"movq %%mm3, %%mm1 \n\t"
// "psubusb "MANGLE(b01)", %%mm3 \n\t"
PAVGB(%%mm7, %%mm3)
PAVGB(%%mm7, %%mm3)
"paddusb %%mm1, %%mm3 \n\t"
// "paddusb "MANGLE(b01)", %%mm3 \n\t"
 
"movq (%%"REG_a", %1, 2), %%mm6 \n\t" //l3
"movq (%0, %1, 4), %%mm5 \n\t" //l4
"movq (%0, %1, 4), %%mm4 \n\t" //l4
"psubusb %%mm6, %%mm5 \n\t"
"psubusb %%mm4, %%mm6 \n\t"
"por %%mm6, %%mm5 \n\t" // |l3-l4|
"pcmpeqb %%mm7, %%mm6 \n\t" // SIGN(l3-l4)
"pxor %%mm6, %%mm0 \n\t"
"pand %%mm0, %%mm3 \n\t"
PMINUB(%%mm5, %%mm3, %%mm0)
 
"psubusb "MANGLE(b01)", %%mm3 \n\t"
PAVGB(%%mm7, %%mm3)
 
"movq (%%"REG_a", %1, 2), %%mm0 \n\t"
"movq (%0, %1, 4), %%mm2 \n\t"
"pxor %%mm6, %%mm0 \n\t"
"pxor %%mm6, %%mm2 \n\t"
"psubb %%mm3, %%mm0 \n\t"
"paddb %%mm3, %%mm2 \n\t"
"pxor %%mm6, %%mm0 \n\t"
"pxor %%mm6, %%mm2 \n\t"
"movq %%mm0, (%%"REG_a", %1, 2) \n\t"
"movq %%mm2, (%0, %1, 4) \n\t"
#endif //0
 
"lea (%0, %1), %%"REG_a" \n\t"
"pcmpeqb %%mm6, %%mm6 \n\t" // -1
// 0 1 2 3 4 5 6 7
// %0 %0+%1 %0+2%1 eax+2%1 %0+4%1 eax+4%1 ecx+%1 ecx+2%1
// %0 eax eax+%1 eax+2%1 %0+4%1 ecx ecx+%1 ecx+2%1
 
 
"movq (%%"REG_a", %1, 2), %%mm1 \n\t" // l3
"movq (%0, %1, 4), %%mm0 \n\t" // l4
"pxor %%mm6, %%mm1 \n\t" // -l3-1
PAVGB(%%mm1, %%mm0) // -q+128 = (l4-l3+256)/2
// mm1=-l3-1, mm0=128-q
 
"movq (%%"REG_a", %1, 4), %%mm2 \n\t" // l5
"movq (%%"REG_a", %1), %%mm3 \n\t" // l2
"pxor %%mm6, %%mm2 \n\t" // -l5-1
"movq %%mm2, %%mm5 \n\t" // -l5-1
"movq "MANGLE(b80)", %%mm4 \n\t" // 128
"lea (%%"REG_a", %1, 4), %%"REG_c" \n\t"
PAVGB(%%mm3, %%mm2) // (l2-l5+256)/2
PAVGB(%%mm0, %%mm4) // ~(l4-l3)/4 + 128
PAVGB(%%mm2, %%mm4) // ~(l2-l5)/4 +(l4-l3)/8 + 128
PAVGB(%%mm0, %%mm4) // ~(l2-l5)/8 +5(l4-l3)/16 + 128
// mm1=-l3-1, mm0=128-q, mm3=l2, mm4=menergy/16 + 128, mm5= -l5-1
 
"movq (%%"REG_a"), %%mm2 \n\t" // l1
"pxor %%mm6, %%mm2 \n\t" // -l1-1
PAVGB(%%mm3, %%mm2) // (l2-l1+256)/2
PAVGB((%0), %%mm1) // (l0-l3+256)/2
"movq "MANGLE(b80)", %%mm3 \n\t" // 128
PAVGB(%%mm2, %%mm3) // ~(l2-l1)/4 + 128
PAVGB(%%mm1, %%mm3) // ~(l0-l3)/4 +(l2-l1)/8 + 128
PAVGB(%%mm2, %%mm3) // ~(l0-l3)/8 +5(l2-l1)/16 + 128
// mm0=128-q, mm3=lenergy/16 + 128, mm4= menergy/16 + 128, mm5= -l5-1
 
PAVGB((%%REGc, %1), %%mm5) // (l6-l5+256)/2
"movq (%%"REG_c", %1, 2), %%mm1 \n\t" // l7
"pxor %%mm6, %%mm1 \n\t" // -l7-1
PAVGB((%0, %1, 4), %%mm1) // (l4-l7+256)/2
"movq "MANGLE(b80)", %%mm2 \n\t" // 128
PAVGB(%%mm5, %%mm2) // ~(l6-l5)/4 + 128
PAVGB(%%mm1, %%mm2) // ~(l4-l7)/4 +(l6-l5)/8 + 128
PAVGB(%%mm5, %%mm2) // ~(l4-l7)/8 +5(l6-l5)/16 + 128
// mm0=128-q, mm2=renergy/16 + 128, mm3=lenergy/16 + 128, mm4= menergy/16 + 128
 
"movq "MANGLE(b00)", %%mm1 \n\t" // 0
"movq "MANGLE(b00)", %%mm5 \n\t" // 0
"psubb %%mm2, %%mm1 \n\t" // 128 - renergy/16
"psubb %%mm3, %%mm5 \n\t" // 128 - lenergy/16
PMAXUB(%%mm1, %%mm2) // 128 + |renergy/16|
PMAXUB(%%mm5, %%mm3) // 128 + |lenergy/16|
PMINUB(%%mm2, %%mm3, %%mm1) // 128 + MIN(|lenergy|,|renergy|)/16
 
// mm0=128-q, mm3=128 + MIN(|lenergy|,|renergy|)/16, mm4= menergy/16 + 128
 
"movq "MANGLE(b00)", %%mm7 \n\t" // 0
"movq %2, %%mm2 \n\t" // QP
PAVGB(%%mm6, %%mm2) // 128 + QP/2
"psubb %%mm6, %%mm2 \n\t"
 
"movq %%mm4, %%mm1 \n\t"
"pcmpgtb %%mm7, %%mm1 \n\t" // SIGN(menergy)
"pxor %%mm1, %%mm4 \n\t"
"psubb %%mm1, %%mm4 \n\t" // 128 + |menergy|/16
"pcmpgtb %%mm4, %%mm2 \n\t" // |menergy|/16 < QP/2
"psubusb %%mm3, %%mm4 \n\t" //d=|menergy|/16 - MIN(|lenergy|,|renergy|)/16
// mm0=128-q, mm1= SIGN(menergy), mm2= |menergy|/16 < QP/2, mm4= d/16
 
"movq %%mm4, %%mm3 \n\t" // d
"psubusb "MANGLE(b01)", %%mm4 \n\t"
PAVGB(%%mm7, %%mm4) // d/32
PAVGB(%%mm7, %%mm4) // (d + 32)/64
"paddb %%mm3, %%mm4 \n\t" // 5d/64
"pand %%mm2, %%mm4 \n\t"
 
"movq "MANGLE(b80)", %%mm5 \n\t" // 128
"psubb %%mm0, %%mm5 \n\t" // q
"paddsb %%mm6, %%mm5 \n\t" // fix bad rounding
"pcmpgtb %%mm5, %%mm7 \n\t" // SIGN(q)
"pxor %%mm7, %%mm5 \n\t"
 
PMINUB(%%mm5, %%mm4, %%mm3) // MIN(|q|, 5d/64)
"pxor %%mm1, %%mm7 \n\t" // SIGN(d*q)
 
"pand %%mm7, %%mm4 \n\t"
"movq (%%"REG_a", %1, 2), %%mm0 \n\t"
"movq (%0, %1, 4), %%mm2 \n\t"
"pxor %%mm1, %%mm0 \n\t"
"pxor %%mm1, %%mm2 \n\t"
"paddb %%mm4, %%mm0 \n\t"
"psubb %%mm4, %%mm2 \n\t"
"pxor %%mm1, %%mm0 \n\t"
"pxor %%mm1, %%mm2 \n\t"
"movq %%mm0, (%%"REG_a", %1, 2) \n\t"
"movq %%mm2, (%0, %1, 4) \n\t"
 
:
: "r" (src), "r" ((x86_reg)stride), "m" (c->pQPb)
: "%"REG_a, "%"REG_c
);
 
/*
{
int x;
src-= stride;
for(x=0; x<BLOCK_SIZE; x++){
const int middleEnergy= 5*(src[l5] - src[l4]) + 2*(src[l3] - src[l6]);
if(FFABS(middleEnergy)< 8*QP){
const int q=(src[l4] - src[l5])/2;
const int leftEnergy= 5*(src[l3] - src[l2]) + 2*(src[l1] - src[l4]);
const int rightEnergy= 5*(src[l7] - src[l6]) + 2*(src[l5] - src[l8]);
 
int d= FFABS(middleEnergy) - FFMIN( FFABS(leftEnergy), FFABS(rightEnergy) );
d= FFMAX(d, 0);
 
d= (5*d + 32) >> 6;
d*= FFSIGN(-middleEnergy);
 
if(q>0){
d= d<0 ? 0 : d;
d= d>q ? q : d;
}else{
d= d>0 ? 0 : d;
d= d<q ? q : d;
}
 
src[l4]-= d;
src[l5]+= d;
}
src++;
}
src-=8;
for(x=0; x<8; x++){
int y;
for(y=4; y<6; y++){
int d= src[x+y*stride] - tmp[x+(y-4)*8];
int ad= FFABS(d);
static int max=0;
static int sum=0;
static int num=0;
static int bias=0;
 
if(max<ad) max=ad;
sum+= ad>3 ? 1 : 0;
if(ad>3){
src[0] = src[7] = src[stride*7] = src[(stride+1)*7]=255;
}
if(y==4) bias+=d;
num++;
if(num%1000000 == 0){
av_log(c, AV_LOG_INFO, " %d %d %d %d\n", num, sum, max, bias);
}
}
}
}
*/
#elif TEMPLATE_PP_MMX
DECLARE_ALIGNED(8, uint64_t, tmp)[4]; // make space for 4 8-byte vars
src+= stride*4;
__asm__ volatile(
"pxor %%mm7, %%mm7 \n\t"
// 0 1 2 3 4 5 6 7
// %0 %0+%1 %0+2%1 eax+2%1 %0+4%1 eax+4%1 edx+%1 edx+2%1
// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1
 
"movq (%0), %%mm0 \n\t"
"movq %%mm0, %%mm1 \n\t"
"punpcklbw %%mm7, %%mm0 \n\t" // low part of line 0
"punpckhbw %%mm7, %%mm1 \n\t" // high part of line 0
 
"movq (%0, %1), %%mm2 \n\t"
"lea (%0, %1, 2), %%"REG_a" \n\t"
"movq %%mm2, %%mm3 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t" // low part of line 1
"punpckhbw %%mm7, %%mm3 \n\t" // high part of line 1
 
"movq (%%"REG_a"), %%mm4 \n\t"
"movq %%mm4, %%mm5 \n\t"
"punpcklbw %%mm7, %%mm4 \n\t" // low part of line 2
"punpckhbw %%mm7, %%mm5 \n\t" // high part of line 2
 
"paddw %%mm0, %%mm0 \n\t" // 2L0
"paddw %%mm1, %%mm1 \n\t" // 2H0
"psubw %%mm4, %%mm2 \n\t" // L1 - L2
"psubw %%mm5, %%mm3 \n\t" // H1 - H2
"psubw %%mm2, %%mm0 \n\t" // 2L0 - L1 + L2
"psubw %%mm3, %%mm1 \n\t" // 2H0 - H1 + H2
 
"psllw $2, %%mm2 \n\t" // 4L1 - 4L2
"psllw $2, %%mm3 \n\t" // 4H1 - 4H2
"psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2
"psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2
 
"movq (%%"REG_a", %1), %%mm2 \n\t"
"movq %%mm2, %%mm3 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t" // L3
"punpckhbw %%mm7, %%mm3 \n\t" // H3
 
"psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2 - L3
"psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2 - H3
"psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
"psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
"movq %%mm0, (%3) \n\t" // 2L0 - 5L1 + 5L2 - 2L3
"movq %%mm1, 8(%3) \n\t" // 2H0 - 5H1 + 5H2 - 2H3
 
"movq (%%"REG_a", %1, 2), %%mm0 \n\t"
"movq %%mm0, %%mm1 \n\t"
"punpcklbw %%mm7, %%mm0 \n\t" // L4
"punpckhbw %%mm7, %%mm1 \n\t" // H4
 
"psubw %%mm0, %%mm2 \n\t" // L3 - L4
"psubw %%mm1, %%mm3 \n\t" // H3 - H4
"movq %%mm2, 16(%3) \n\t" // L3 - L4
"movq %%mm3, 24(%3) \n\t" // H3 - H4
"paddw %%mm4, %%mm4 \n\t" // 2L2
"paddw %%mm5, %%mm5 \n\t" // 2H2
"psubw %%mm2, %%mm4 \n\t" // 2L2 - L3 + L4
"psubw %%mm3, %%mm5 \n\t" // 2H2 - H3 + H4
 
"lea (%%"REG_a", %1), %0 \n\t"
"psllw $2, %%mm2 \n\t" // 4L3 - 4L4
"psllw $2, %%mm3 \n\t" // 4H3 - 4H4
"psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4
"psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4
//50 opcodes so far
"movq (%0, %1, 2), %%mm2 \n\t"
"movq %%mm2, %%mm3 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t" // L5
"punpckhbw %%mm7, %%mm3 \n\t" // H5
"psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4 - L5
"psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4 - H5
"psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4 - 2L5
"psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4 - 2H5
 
"movq (%%"REG_a", %1, 4), %%mm6 \n\t"
"punpcklbw %%mm7, %%mm6 \n\t" // L6
"psubw %%mm6, %%mm2 \n\t" // L5 - L6
"movq (%%"REG_a", %1, 4), %%mm6 \n\t"
"punpckhbw %%mm7, %%mm6 \n\t" // H6
"psubw %%mm6, %%mm3 \n\t" // H5 - H6
 
"paddw %%mm0, %%mm0 \n\t" // 2L4
"paddw %%mm1, %%mm1 \n\t" // 2H4
"psubw %%mm2, %%mm0 \n\t" // 2L4 - L5 + L6
"psubw %%mm3, %%mm1 \n\t" // 2H4 - H5 + H6
 
"psllw $2, %%mm2 \n\t" // 4L5 - 4L6
"psllw $2, %%mm3 \n\t" // 4H5 - 4H6
"psubw %%mm2, %%mm0 \n\t" // 2L4 - 5L5 + 5L6
"psubw %%mm3, %%mm1 \n\t" // 2H4 - 5H5 + 5H6
 
"movq (%0, %1, 4), %%mm2 \n\t"
"movq %%mm2, %%mm3 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t" // L7
"punpckhbw %%mm7, %%mm3 \n\t" // H7
 
"paddw %%mm2, %%mm2 \n\t" // 2L7
"paddw %%mm3, %%mm3 \n\t" // 2H7
"psubw %%mm2, %%mm0 \n\t" // 2L4 - 5L5 + 5L6 - 2L7
"psubw %%mm3, %%mm1 \n\t" // 2H4 - 5H5 + 5H6 - 2H7
 
"movq (%3), %%mm2 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
"movq 8(%3), %%mm3 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
 
#if TEMPLATE_PP_MMXEXT
"movq %%mm7, %%mm6 \n\t" // 0
"psubw %%mm0, %%mm6 \n\t"
"pmaxsw %%mm6, %%mm0 \n\t" // |2L4 - 5L5 + 5L6 - 2L7|
"movq %%mm7, %%mm6 \n\t" // 0
"psubw %%mm1, %%mm6 \n\t"
"pmaxsw %%mm6, %%mm1 \n\t" // |2H4 - 5H5 + 5H6 - 2H7|
"movq %%mm7, %%mm6 \n\t" // 0
"psubw %%mm2, %%mm6 \n\t"
"pmaxsw %%mm6, %%mm2 \n\t" // |2L0 - 5L1 + 5L2 - 2L3|
"movq %%mm7, %%mm6 \n\t" // 0
"psubw %%mm3, %%mm6 \n\t"
"pmaxsw %%mm6, %%mm3 \n\t" // |2H0 - 5H1 + 5H2 - 2H3|
#else
"movq %%mm7, %%mm6 \n\t" // 0
"pcmpgtw %%mm0, %%mm6 \n\t"
"pxor %%mm6, %%mm0 \n\t"
"psubw %%mm6, %%mm0 \n\t" // |2L4 - 5L5 + 5L6 - 2L7|
"movq %%mm7, %%mm6 \n\t" // 0
"pcmpgtw %%mm1, %%mm6 \n\t"
"pxor %%mm6, %%mm1 \n\t"
"psubw %%mm6, %%mm1 \n\t" // |2H4 - 5H5 + 5H6 - 2H7|
"movq %%mm7, %%mm6 \n\t" // 0
"pcmpgtw %%mm2, %%mm6 \n\t"
"pxor %%mm6, %%mm2 \n\t"
"psubw %%mm6, %%mm2 \n\t" // |2L0 - 5L1 + 5L2 - 2L3|
"movq %%mm7, %%mm6 \n\t" // 0
"pcmpgtw %%mm3, %%mm6 \n\t"
"pxor %%mm6, %%mm3 \n\t"
"psubw %%mm6, %%mm3 \n\t" // |2H0 - 5H1 + 5H2 - 2H3|
#endif
 
#if TEMPLATE_PP_MMXEXT
"pminsw %%mm2, %%mm0 \n\t"
"pminsw %%mm3, %%mm1 \n\t"
#else
"movq %%mm0, %%mm6 \n\t"
"psubusw %%mm2, %%mm6 \n\t"
"psubw %%mm6, %%mm0 \n\t"
"movq %%mm1, %%mm6 \n\t"
"psubusw %%mm3, %%mm6 \n\t"
"psubw %%mm6, %%mm1 \n\t"
#endif
 
"movd %2, %%mm2 \n\t" // QP
"punpcklbw %%mm7, %%mm2 \n\t"
 
"movq %%mm7, %%mm6 \n\t" // 0
"pcmpgtw %%mm4, %%mm6 \n\t" // sign(2L2 - 5L3 + 5L4 - 2L5)
"pxor %%mm6, %%mm4 \n\t"
"psubw %%mm6, %%mm4 \n\t" // |2L2 - 5L3 + 5L4 - 2L5|
"pcmpgtw %%mm5, %%mm7 \n\t" // sign(2H2 - 5H3 + 5H4 - 2H5)
"pxor %%mm7, %%mm5 \n\t"
"psubw %%mm7, %%mm5 \n\t" // |2H2 - 5H3 + 5H4 - 2H5|
// 100 opcodes
"psllw $3, %%mm2 \n\t" // 8QP
"movq %%mm2, %%mm3 \n\t" // 8QP
"pcmpgtw %%mm4, %%mm2 \n\t"
"pcmpgtw %%mm5, %%mm3 \n\t"
"pand %%mm2, %%mm4 \n\t"
"pand %%mm3, %%mm5 \n\t"
 
 
"psubusw %%mm0, %%mm4 \n\t" // hd
"psubusw %%mm1, %%mm5 \n\t" // ld
 
 
"movq "MANGLE(w05)", %%mm2 \n\t" // 5
"pmullw %%mm2, %%mm4 \n\t"
"pmullw %%mm2, %%mm5 \n\t"
"movq "MANGLE(w20)", %%mm2 \n\t" // 32
"paddw %%mm2, %%mm4 \n\t"
"paddw %%mm2, %%mm5 \n\t"
"psrlw $6, %%mm4 \n\t"
"psrlw $6, %%mm5 \n\t"
 
"movq 16(%3), %%mm0 \n\t" // L3 - L4
"movq 24(%3), %%mm1 \n\t" // H3 - H4
 
"pxor %%mm2, %%mm2 \n\t"
"pxor %%mm3, %%mm3 \n\t"
 
"pcmpgtw %%mm0, %%mm2 \n\t" // sign (L3-L4)
"pcmpgtw %%mm1, %%mm3 \n\t" // sign (H3-H4)
"pxor %%mm2, %%mm0 \n\t"
"pxor %%mm3, %%mm1 \n\t"
"psubw %%mm2, %%mm0 \n\t" // |L3-L4|
"psubw %%mm3, %%mm1 \n\t" // |H3-H4|
"psrlw $1, %%mm0 \n\t" // |L3 - L4|/2
"psrlw $1, %%mm1 \n\t" // |H3 - H4|/2
 
"pxor %%mm6, %%mm2 \n\t"
"pxor %%mm7, %%mm3 \n\t"
"pand %%mm2, %%mm4 \n\t"
"pand %%mm3, %%mm5 \n\t"
 
#if TEMPLATE_PP_MMXEXT
"pminsw %%mm0, %%mm4 \n\t"
"pminsw %%mm1, %%mm5 \n\t"
#else
"movq %%mm4, %%mm2 \n\t"
"psubusw %%mm0, %%mm2 \n\t"
"psubw %%mm2, %%mm4 \n\t"
"movq %%mm5, %%mm2 \n\t"
"psubusw %%mm1, %%mm2 \n\t"
"psubw %%mm2, %%mm5 \n\t"
#endif
"pxor %%mm6, %%mm4 \n\t"
"pxor %%mm7, %%mm5 \n\t"
"psubw %%mm6, %%mm4 \n\t"
"psubw %%mm7, %%mm5 \n\t"
"packsswb %%mm5, %%mm4 \n\t"
"movq (%0), %%mm0 \n\t"
"paddb %%mm4, %%mm0 \n\t"
"movq %%mm0, (%0) \n\t"
"movq (%0, %1), %%mm0 \n\t"
"psubb %%mm4, %%mm0 \n\t"
"movq %%mm0, (%0, %1) \n\t"
 
: "+r" (src)
: "r" ((x86_reg)stride), "m" (c->pQPb), "r"(tmp)
: "%"REG_a
);
#else //TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW
const int l1= stride;
const int l2= stride + l1;
const int l3= stride + l2;
const int l4= stride + l3;
const int l5= stride + l4;
const int l6= stride + l5;
const int l7= stride + l6;
const int l8= stride + l7;
// const int l9= stride + l8;
int x;
src+= stride*3;
for(x=0; x<BLOCK_SIZE; x++){
const int middleEnergy= 5*(src[l5] - src[l4]) + 2*(src[l3] - src[l6]);
if(FFABS(middleEnergy) < 8*c->QP){
const int q=(src[l4] - src[l5])/2;
const int leftEnergy= 5*(src[l3] - src[l2]) + 2*(src[l1] - src[l4]);
const int rightEnergy= 5*(src[l7] - src[l6]) + 2*(src[l5] - src[l8]);
 
int d= FFABS(middleEnergy) - FFMIN( FFABS(leftEnergy), FFABS(rightEnergy) );
d= FFMAX(d, 0);
 
d= (5*d + 32) >> 6;
d*= FFSIGN(-middleEnergy);
 
if(q>0){
d= d<0 ? 0 : d;
d= d>q ? q : d;
}else{
d= d>0 ? 0 : d;
d= d<q ? q : d;
}
 
src[l4]-= d;
src[l5]+= d;
}
src++;
}
#endif //TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW
}
#endif //TEMPLATE_PP_ALTIVEC
 
#if !TEMPLATE_PP_ALTIVEC
static inline void RENAME(dering)(uint8_t src[], int stride, PPContext *c)
{
#if HAVE_7REGS && (TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW)
DECLARE_ALIGNED(8, uint64_t, tmp)[3];
__asm__ volatile(
"pxor %%mm6, %%mm6 \n\t"
"pcmpeqb %%mm7, %%mm7 \n\t"
"movq %2, %%mm0 \n\t"
"punpcklbw %%mm6, %%mm0 \n\t"
"psrlw $1, %%mm0 \n\t"
"psubw %%mm7, %%mm0 \n\t"
"packuswb %%mm0, %%mm0 \n\t"
"movq %%mm0, %3 \n\t"
 
"lea (%0, %1), %%"REG_a" \n\t"
"lea (%%"REG_a", %1, 4), %%"REG_d" \n\t"
 
// 0 1 2 3 4 5 6 7 8 9
// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1
 
#undef REAL_FIND_MIN_MAX
#undef FIND_MIN_MAX
#if TEMPLATE_PP_MMXEXT
#define REAL_FIND_MIN_MAX(addr)\
"movq " #addr ", %%mm0 \n\t"\
"pminub %%mm0, %%mm7 \n\t"\
"pmaxub %%mm0, %%mm6 \n\t"
#else
#define REAL_FIND_MIN_MAX(addr)\
"movq " #addr ", %%mm0 \n\t"\
"movq %%mm7, %%mm1 \n\t"\
"psubusb %%mm0, %%mm6 \n\t"\
"paddb %%mm0, %%mm6 \n\t"\
"psubusb %%mm0, %%mm1 \n\t"\
"psubb %%mm1, %%mm7 \n\t"
#endif
#define FIND_MIN_MAX(addr) REAL_FIND_MIN_MAX(addr)
 
FIND_MIN_MAX((%%REGa))
FIND_MIN_MAX((%%REGa, %1))
FIND_MIN_MAX((%%REGa, %1, 2))
FIND_MIN_MAX((%0, %1, 4))
FIND_MIN_MAX((%%REGd))
FIND_MIN_MAX((%%REGd, %1))
FIND_MIN_MAX((%%REGd, %1, 2))
FIND_MIN_MAX((%0, %1, 8))
 
"movq %%mm7, %%mm4 \n\t"
"psrlq $8, %%mm7 \n\t"
#if TEMPLATE_PP_MMXEXT
"pminub %%mm4, %%mm7 \n\t" // min of pixels
"pshufw $0xF9, %%mm7, %%mm4 \n\t"
"pminub %%mm4, %%mm7 \n\t" // min of pixels
"pshufw $0xFE, %%mm7, %%mm4 \n\t"
"pminub %%mm4, %%mm7 \n\t"
#else
"movq %%mm7, %%mm1 \n\t"
"psubusb %%mm4, %%mm1 \n\t"
"psubb %%mm1, %%mm7 \n\t"
"movq %%mm7, %%mm4 \n\t"
"psrlq $16, %%mm7 \n\t"
"movq %%mm7, %%mm1 \n\t"
"psubusb %%mm4, %%mm1 \n\t"
"psubb %%mm1, %%mm7 \n\t"
"movq %%mm7, %%mm4 \n\t"
"psrlq $32, %%mm7 \n\t"
"movq %%mm7, %%mm1 \n\t"
"psubusb %%mm4, %%mm1 \n\t"
"psubb %%mm1, %%mm7 \n\t"
#endif
 
 
"movq %%mm6, %%mm4 \n\t"
"psrlq $8, %%mm6 \n\t"
#if TEMPLATE_PP_MMXEXT
"pmaxub %%mm4, %%mm6 \n\t" // max of pixels
"pshufw $0xF9, %%mm6, %%mm4 \n\t"
"pmaxub %%mm4, %%mm6 \n\t"
"pshufw $0xFE, %%mm6, %%mm4 \n\t"
"pmaxub %%mm4, %%mm6 \n\t"
#else
"psubusb %%mm4, %%mm6 \n\t"
"paddb %%mm4, %%mm6 \n\t"
"movq %%mm6, %%mm4 \n\t"
"psrlq $16, %%mm6 \n\t"
"psubusb %%mm4, %%mm6 \n\t"
"paddb %%mm4, %%mm6 \n\t"
"movq %%mm6, %%mm4 \n\t"
"psrlq $32, %%mm6 \n\t"
"psubusb %%mm4, %%mm6 \n\t"
"paddb %%mm4, %%mm6 \n\t"
#endif
"movq %%mm6, %%mm0 \n\t" // max
"psubb %%mm7, %%mm6 \n\t" // max - min
"push %4 \n\t"
"movd %%mm6, %k4 \n\t"
"cmpb "MANGLE(deringThreshold)", %b4 \n\t"
"pop %4 \n\t"
" jb 1f \n\t"
PAVGB(%%mm0, %%mm7) // a=(max + min)/2
"punpcklbw %%mm7, %%mm7 \n\t"
"punpcklbw %%mm7, %%mm7 \n\t"
"punpcklbw %%mm7, %%mm7 \n\t"
"movq %%mm7, (%4) \n\t"
 
"movq (%0), %%mm0 \n\t" // L10
"movq %%mm0, %%mm1 \n\t" // L10
"movq %%mm0, %%mm2 \n\t" // L10
"psllq $8, %%mm1 \n\t"
"psrlq $8, %%mm2 \n\t"
"movd -4(%0), %%mm3 \n\t"
"movd 8(%0), %%mm4 \n\t"
"psrlq $24, %%mm3 \n\t"
"psllq $56, %%mm4 \n\t"
"por %%mm3, %%mm1 \n\t" // L00
"por %%mm4, %%mm2 \n\t" // L20
"movq %%mm1, %%mm3 \n\t" // L00
PAVGB(%%mm2, %%mm1) // (L20 + L00)/2
PAVGB(%%mm0, %%mm1) // (L20 + L00 + 2L10)/4
"psubusb %%mm7, %%mm0 \n\t"
"psubusb %%mm7, %%mm2 \n\t"
"psubusb %%mm7, %%mm3 \n\t"
"pcmpeqb "MANGLE(b00)", %%mm0 \n\t" // L10 > a ? 0 : -1
"pcmpeqb "MANGLE(b00)", %%mm2 \n\t" // L20 > a ? 0 : -1
"pcmpeqb "MANGLE(b00)", %%mm3 \n\t" // L00 > a ? 0 : -1
"paddb %%mm2, %%mm0 \n\t"
"paddb %%mm3, %%mm0 \n\t"
 
"movq (%%"REG_a"), %%mm2 \n\t" // L11
"movq %%mm2, %%mm3 \n\t" // L11
"movq %%mm2, %%mm4 \n\t" // L11
"psllq $8, %%mm3 \n\t"
"psrlq $8, %%mm4 \n\t"
"movd -4(%%"REG_a"), %%mm5 \n\t"
"movd 8(%%"REG_a"), %%mm6 \n\t"
"psrlq $24, %%mm5 \n\t"
"psllq $56, %%mm6 \n\t"
"por %%mm5, %%mm3 \n\t" // L01
"por %%mm6, %%mm4 \n\t" // L21
"movq %%mm3, %%mm5 \n\t" // L01
PAVGB(%%mm4, %%mm3) // (L21 + L01)/2
PAVGB(%%mm2, %%mm3) // (L21 + L01 + 2L11)/4
"psubusb %%mm7, %%mm2 \n\t"
"psubusb %%mm7, %%mm4 \n\t"
"psubusb %%mm7, %%mm5 \n\t"
"pcmpeqb "MANGLE(b00)", %%mm2 \n\t" // L11 > a ? 0 : -1
"pcmpeqb "MANGLE(b00)", %%mm4 \n\t" // L21 > a ? 0 : -1
"pcmpeqb "MANGLE(b00)", %%mm5 \n\t" // L01 > a ? 0 : -1
"paddb %%mm4, %%mm2 \n\t"
"paddb %%mm5, %%mm2 \n\t"
// 0, 2, 3, 1
#define REAL_DERING_CORE(dst,src,ppsx,psx,sx,pplx,plx,lx,t0,t1) \
"movq " #src ", " #sx " \n\t" /* src[0] */\
"movq " #sx ", " #lx " \n\t" /* src[0] */\
"movq " #sx ", " #t0 " \n\t" /* src[0] */\
"psllq $8, " #lx " \n\t"\
"psrlq $8, " #t0 " \n\t"\
"movd -4" #src ", " #t1 " \n\t"\
"psrlq $24, " #t1 " \n\t"\
"por " #t1 ", " #lx " \n\t" /* src[-1] */\
"movd 8" #src ", " #t1 " \n\t"\
"psllq $56, " #t1 " \n\t"\
"por " #t1 ", " #t0 " \n\t" /* src[+1] */\
"movq " #lx ", " #t1 " \n\t" /* src[-1] */\
PAVGB(t0, lx) /* (src[-1] + src[+1])/2 */\
PAVGB(sx, lx) /* (src[-1] + 2src[0] + src[+1])/4 */\
PAVGB(lx, pplx) \
"movq " #lx ", 8(%4) \n\t"\
"movq (%4), " #lx " \n\t"\
"psubusb " #lx ", " #t1 " \n\t"\
"psubusb " #lx ", " #t0 " \n\t"\
"psubusb " #lx ", " #sx " \n\t"\
"movq "MANGLE(b00)", " #lx " \n\t"\
"pcmpeqb " #lx ", " #t1 " \n\t" /* src[-1] > a ? 0 : -1*/\
"pcmpeqb " #lx ", " #t0 " \n\t" /* src[+1] > a ? 0 : -1*/\
"pcmpeqb " #lx ", " #sx " \n\t" /* src[0] > a ? 0 : -1*/\
"paddb " #t1 ", " #t0 " \n\t"\
"paddb " #t0 ", " #sx " \n\t"\
\
PAVGB(plx, pplx) /* filtered */\
"movq " #dst ", " #t0 " \n\t" /* dst */\
"movq " #t0 ", " #t1 " \n\t" /* dst */\
"psubusb %3, " #t0 " \n\t"\
"paddusb %3, " #t1 " \n\t"\
PMAXUB(t0, pplx)\
PMINUB(t1, pplx, t0)\
"paddb " #sx ", " #ppsx " \n\t"\
"paddb " #psx ", " #ppsx " \n\t"\
"#paddb "MANGLE(b02)", " #ppsx " \n\t"\
"pand "MANGLE(b08)", " #ppsx " \n\t"\
"pcmpeqb " #lx ", " #ppsx " \n\t"\
"pand " #ppsx ", " #pplx " \n\t"\
"pandn " #dst ", " #ppsx " \n\t"\
"por " #pplx ", " #ppsx " \n\t"\
"movq " #ppsx ", " #dst " \n\t"\
"movq 8(%4), " #lx " \n\t"
 
#define DERING_CORE(dst,src,ppsx,psx,sx,pplx,plx,lx,t0,t1) \
REAL_DERING_CORE(dst,src,ppsx,psx,sx,pplx,plx,lx,t0,t1)
/*
0000000
1111111
 
1111110
1111101
1111100
1111011
1111010
1111001
 
1111000
1110111
 
*/
//DERING_CORE(dst ,src ,ppsx ,psx ,sx ,pplx ,plx ,lx ,t0 ,t1)
DERING_CORE((%%REGa) ,(%%REGa, %1) ,%%mm0,%%mm2,%%mm4,%%mm1,%%mm3,%%mm5,%%mm6,%%mm7)
DERING_CORE((%%REGa, %1) ,(%%REGa, %1, 2),%%mm2,%%mm4,%%mm0,%%mm3,%%mm5,%%mm1,%%mm6,%%mm7)
DERING_CORE((%%REGa, %1, 2),(%0, %1, 4) ,%%mm4,%%mm0,%%mm2,%%mm5,%%mm1,%%mm3,%%mm6,%%mm7)
DERING_CORE((%0, %1, 4) ,(%%REGd) ,%%mm0,%%mm2,%%mm4,%%mm1,%%mm3,%%mm5,%%mm6,%%mm7)
DERING_CORE((%%REGd) ,(%%REGd, %1) ,%%mm2,%%mm4,%%mm0,%%mm3,%%mm5,%%mm1,%%mm6,%%mm7)
DERING_CORE((%%REGd, %1) ,(%%REGd, %1, 2),%%mm4,%%mm0,%%mm2,%%mm5,%%mm1,%%mm3,%%mm6,%%mm7)
DERING_CORE((%%REGd, %1, 2),(%0, %1, 8) ,%%mm0,%%mm2,%%mm4,%%mm1,%%mm3,%%mm5,%%mm6,%%mm7)
DERING_CORE((%0, %1, 8) ,(%%REGd, %1, 4),%%mm2,%%mm4,%%mm0,%%mm3,%%mm5,%%mm1,%%mm6,%%mm7)
 
"1: \n\t"
: : "r" (src), "r" ((x86_reg)stride), "m" (c->pQPb), "m"(c->pQPb2), "q"(tmp)
: "%"REG_a, "%"REG_d, "%"REG_SP
);
#else // HAVE_7REGS && (TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW)
int y;
int min=255;
int max=0;
int avg;
uint8_t *p;
int s[10];
const int QP2= c->QP/2 + 1;
 
src --;
for(y=1; y<9; y++){
int x;
p= src + stride*y;
for(x=1; x<9; x++){
p++;
if(*p > max) max= *p;
if(*p < min) min= *p;
}
}
avg= (min + max + 1)>>1;
 
if(max - min <deringThreshold) return;
 
for(y=0; y<10; y++){
int t = 0;
 
if(src[stride*y + 0] > avg) t+= 1;
if(src[stride*y + 1] > avg) t+= 2;
if(src[stride*y + 2] > avg) t+= 4;
if(src[stride*y + 3] > avg) t+= 8;
if(src[stride*y + 4] > avg) t+= 16;
if(src[stride*y + 5] > avg) t+= 32;
if(src[stride*y + 6] > avg) t+= 64;
if(src[stride*y + 7] > avg) t+= 128;
if(src[stride*y + 8] > avg) t+= 256;
if(src[stride*y + 9] > avg) t+= 512;
 
t |= (~t)<<16;
t &= (t<<1) & (t>>1);
s[y] = t;
}
 
for(y=1; y<9; y++){
int t = s[y-1] & s[y] & s[y+1];
t|= t>>16;
s[y-1]= t;
}
 
for(y=1; y<9; y++){
int x;
int t = s[y-1];
 
p= src + stride*y;
for(x=1; x<9; x++){
p++;
if(t & (1<<x)){
int f= (*(p-stride-1)) + 2*(*(p-stride)) + (*(p-stride+1))
+2*(*(p -1)) + 4*(*p ) + 2*(*(p +1))
+(*(p+stride-1)) + 2*(*(p+stride)) + (*(p+stride+1));
f= (f + 8)>>4;
 
#ifdef DEBUG_DERING_THRESHOLD
__asm__ volatile("emms\n\t":);
{
static long long numPixels=0;
if(x!=1 && x!=8 && y!=1 && y!=8) numPixels++;
// if((max-min)<20 || (max-min)*QP<200)
// if((max-min)*QP < 500)
// if(max-min<QP/2)
if(max-min < 20){
static int numSkipped=0;
static int errorSum=0;
static int worstQP=0;
static int worstRange=0;
static int worstDiff=0;
int diff= (f - *p);
int absDiff= FFABS(diff);
int error= diff*diff;
 
if(x==1 || x==8 || y==1 || y==8) continue;
 
numSkipped++;
if(absDiff > worstDiff){
worstDiff= absDiff;
worstQP= QP;
worstRange= max-min;
}
errorSum+= error;
 
if(1024LL*1024LL*1024LL % numSkipped == 0){
av_log(c, AV_LOG_INFO, "sum:%1.3f, skip:%d, wQP:%d, "
"wRange:%d, wDiff:%d, relSkip:%1.3f\n",
(float)errorSum/numSkipped, numSkipped, worstQP, worstRange,
worstDiff, (float)numSkipped/numPixels);
}
}
}
#endif
if (*p + QP2 < f) *p= *p + QP2;
else if(*p - QP2 > f) *p= *p - QP2;
else *p=f;
}
}
}
#ifdef DEBUG_DERING_THRESHOLD
if(max-min < 20){
for(y=1; y<9; y++){
int x;
int t = 0;
p= src + stride*y;
for(x=1; x<9; x++){
p++;
*p = FFMIN(*p + 20, 255);
}
}
// src[0] = src[7]=src[stride*7]=src[stride*7 + 7]=255;
}
#endif
#endif //TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW
}
#endif //TEMPLATE_PP_ALTIVEC
 
/**
* Deinterlace the given block by linearly interpolating every second line.
* will be called for every 8x8 block and can read & write from line 4-15
* lines 0-3 have been passed through the deblock / dering filters already, but can be read, too.
* lines 4-12 will be read into the deblocking filter and should be deinterlaced
*/
static inline void RENAME(deInterlaceInterpolateLinear)(uint8_t src[], int stride)
{
#if TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW
src+= 4*stride;
__asm__ volatile(
"lea (%0, %1), %%"REG_a" \n\t"
"lea (%%"REG_a", %1, 4), %%"REG_c" \n\t"
// 0 1 2 3 4 5 6 7 8 9
// %0 eax eax+%1 eax+2%1 %0+4%1 ecx ecx+%1 ecx+2%1 %0+8%1 ecx+4%1
 
"movq (%0), %%mm0 \n\t"
"movq (%%"REG_a", %1), %%mm1 \n\t"
PAVGB(%%mm1, %%mm0)
"movq %%mm0, (%%"REG_a") \n\t"
"movq (%0, %1, 4), %%mm0 \n\t"
PAVGB(%%mm0, %%mm1)
"movq %%mm1, (%%"REG_a", %1, 2) \n\t"
"movq (%%"REG_c", %1), %%mm1 \n\t"
PAVGB(%%mm1, %%mm0)
"movq %%mm0, (%%"REG_c") \n\t"
"movq (%0, %1, 8), %%mm0 \n\t"
PAVGB(%%mm0, %%mm1)
"movq %%mm1, (%%"REG_c", %1, 2) \n\t"
 
: : "r" (src), "r" ((x86_reg)stride)
: "%"REG_a, "%"REG_c
);
#else
int a, b, x;
src+= 4*stride;
 
for(x=0; x<2; x++){
a= *(uint32_t*)&src[stride*0];
b= *(uint32_t*)&src[stride*2];
*(uint32_t*)&src[stride*1]= (a|b) - (((a^b)&0xFEFEFEFEUL)>>1);
a= *(uint32_t*)&src[stride*4];
*(uint32_t*)&src[stride*3]= (a|b) - (((a^b)&0xFEFEFEFEUL)>>1);
b= *(uint32_t*)&src[stride*6];
*(uint32_t*)&src[stride*5]= (a|b) - (((a^b)&0xFEFEFEFEUL)>>1);
a= *(uint32_t*)&src[stride*8];
*(uint32_t*)&src[stride*7]= (a|b) - (((a^b)&0xFEFEFEFEUL)>>1);
src += 4;
}
#endif
}
 
/**
* Deinterlace the given block by cubic interpolating every second line.
* will be called for every 8x8 block and can read & write from line 4-15
* lines 0-3 have been passed through the deblock / dering filters already, but can be read, too.
* lines 4-12 will be read into the deblocking filter and should be deinterlaced
* this filter will read lines 3-15 and write 7-13
*/
static inline void RENAME(deInterlaceInterpolateCubic)(uint8_t src[], int stride)
{
#if TEMPLATE_PP_SSE2 || TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW
src+= stride*3;
__asm__ volatile(
"lea (%0, %1), %%"REG_a" \n\t"
"lea (%%"REG_a", %1, 4), %%"REG_d" \n\t"
"lea (%%"REG_d", %1, 4), %%"REG_c" \n\t"
"add %1, %%"REG_c" \n\t"
#if TEMPLATE_PP_SSE2
"pxor %%xmm7, %%xmm7 \n\t"
#define REAL_DEINT_CUBIC(a,b,c,d,e)\
"movq " #a ", %%xmm0 \n\t"\
"movq " #b ", %%xmm1 \n\t"\
"movq " #d ", %%xmm2 \n\t"\
"movq " #e ", %%xmm3 \n\t"\
"pavgb %%xmm2, %%xmm1 \n\t"\
"pavgb %%xmm3, %%xmm0 \n\t"\
"punpcklbw %%xmm7, %%xmm0 \n\t"\
"punpcklbw %%xmm7, %%xmm1 \n\t"\
"psubw %%xmm1, %%xmm0 \n\t"\
"psraw $3, %%xmm0 \n\t"\
"psubw %%xmm0, %%xmm1 \n\t"\
"packuswb %%xmm1, %%xmm1 \n\t"\
"movlps %%xmm1, " #c " \n\t"
#else //TEMPLATE_PP_SSE2
"pxor %%mm7, %%mm7 \n\t"
// 0 1 2 3 4 5 6 7 8 9 10
// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1 ecx
 
#define REAL_DEINT_CUBIC(a,b,c,d,e)\
"movq " #a ", %%mm0 \n\t"\
"movq " #b ", %%mm1 \n\t"\
"movq " #d ", %%mm2 \n\t"\
"movq " #e ", %%mm3 \n\t"\
PAVGB(%%mm2, %%mm1) /* (b+d) /2 */\
PAVGB(%%mm3, %%mm0) /* (a+e) /2 */\
"movq %%mm0, %%mm2 \n\t"\
"punpcklbw %%mm7, %%mm0 \n\t"\
"punpckhbw %%mm7, %%mm2 \n\t"\
"movq %%mm1, %%mm3 \n\t"\
"punpcklbw %%mm7, %%mm1 \n\t"\
"punpckhbw %%mm7, %%mm3 \n\t"\
"psubw %%mm1, %%mm0 \n\t" /* L(a+e - (b+d))/2 */\
"psubw %%mm3, %%mm2 \n\t" /* H(a+e - (b+d))/2 */\
"psraw $3, %%mm0 \n\t" /* L(a+e - (b+d))/16 */\
"psraw $3, %%mm2 \n\t" /* H(a+e - (b+d))/16 */\
"psubw %%mm0, %%mm1 \n\t" /* L(9b + 9d - a - e)/16 */\
"psubw %%mm2, %%mm3 \n\t" /* H(9b + 9d - a - e)/16 */\
"packuswb %%mm3, %%mm1 \n\t"\
"movq %%mm1, " #c " \n\t"
#endif //TEMPLATE_PP_SSE2
#define DEINT_CUBIC(a,b,c,d,e) REAL_DEINT_CUBIC(a,b,c,d,e)
 
DEINT_CUBIC((%0) , (%%REGa, %1), (%%REGa, %1, 2), (%0, %1, 4) , (%%REGd, %1))
DEINT_CUBIC((%%REGa, %1), (%0, %1, 4) , (%%REGd) , (%%REGd, %1), (%0, %1, 8))
DEINT_CUBIC((%0, %1, 4) , (%%REGd, %1), (%%REGd, %1, 2), (%0, %1, 8) , (%%REGc))
DEINT_CUBIC((%%REGd, %1), (%0, %1, 8) , (%%REGd, %1, 4), (%%REGc) , (%%REGc, %1, 2))
 
: : "r" (src), "r" ((x86_reg)stride)
:
#if TEMPLATE_PP_SSE2
XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm7",)
#endif
"%"REG_a, "%"REG_d, "%"REG_c
);
#undef REAL_DEINT_CUBIC
#else //TEMPLATE_PP_SSE2 || TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW
int x;
src+= stride*3;
for(x=0; x<8; x++){
src[stride*3] = CLIP((-src[0] + 9*src[stride*2] + 9*src[stride*4] - src[stride*6])>>4);
src[stride*5] = CLIP((-src[stride*2] + 9*src[stride*4] + 9*src[stride*6] - src[stride*8])>>4);
src[stride*7] = CLIP((-src[stride*4] + 9*src[stride*6] + 9*src[stride*8] - src[stride*10])>>4);
src[stride*9] = CLIP((-src[stride*6] + 9*src[stride*8] + 9*src[stride*10] - src[stride*12])>>4);
src++;
}
#endif //TEMPLATE_PP_SSE2 || TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW
}
 
/**
* Deinterlace the given block by filtering every second line with a (-1 4 2 4 -1) filter.
* will be called for every 8x8 block and can read & write from line 4-15
* lines 0-3 have been passed through the deblock / dering filters already, but can be read, too.
* lines 4-12 will be read into the deblocking filter and should be deinterlaced
* this filter will read lines 4-13 and write 5-11
*/
static inline void RENAME(deInterlaceFF)(uint8_t src[], int stride, uint8_t *tmp)
{
#if TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW
src+= stride*4;
__asm__ volatile(
"lea (%0, %1), %%"REG_a" \n\t"
"lea (%%"REG_a", %1, 4), %%"REG_d" \n\t"
"pxor %%mm7, %%mm7 \n\t"
"movq (%2), %%mm0 \n\t"
// 0 1 2 3 4 5 6 7 8 9 10
// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1 ecx
 
#define REAL_DEINT_FF(a,b,c,d)\
"movq " #a ", %%mm1 \n\t"\
"movq " #b ", %%mm2 \n\t"\
"movq " #c ", %%mm3 \n\t"\
"movq " #d ", %%mm4 \n\t"\
PAVGB(%%mm3, %%mm1) \
PAVGB(%%mm4, %%mm0) \
"movq %%mm0, %%mm3 \n\t"\
"punpcklbw %%mm7, %%mm0 \n\t"\
"punpckhbw %%mm7, %%mm3 \n\t"\
"movq %%mm1, %%mm4 \n\t"\
"punpcklbw %%mm7, %%mm1 \n\t"\
"punpckhbw %%mm7, %%mm4 \n\t"\
"psllw $2, %%mm1 \n\t"\
"psllw $2, %%mm4 \n\t"\
"psubw %%mm0, %%mm1 \n\t"\
"psubw %%mm3, %%mm4 \n\t"\
"movq %%mm2, %%mm5 \n\t"\
"movq %%mm2, %%mm0 \n\t"\
"punpcklbw %%mm7, %%mm2 \n\t"\
"punpckhbw %%mm7, %%mm5 \n\t"\
"paddw %%mm2, %%mm1 \n\t"\
"paddw %%mm5, %%mm4 \n\t"\
"psraw $2, %%mm1 \n\t"\
"psraw $2, %%mm4 \n\t"\
"packuswb %%mm4, %%mm1 \n\t"\
"movq %%mm1, " #b " \n\t"\
 
#define DEINT_FF(a,b,c,d) REAL_DEINT_FF(a,b,c,d)
 
DEINT_FF((%0) , (%%REGa) , (%%REGa, %1), (%%REGa, %1, 2))
DEINT_FF((%%REGa, %1), (%%REGa, %1, 2), (%0, %1, 4) , (%%REGd) )
DEINT_FF((%0, %1, 4) , (%%REGd) , (%%REGd, %1), (%%REGd, %1, 2))
DEINT_FF((%%REGd, %1), (%%REGd, %1, 2), (%0, %1, 8) , (%%REGd, %1, 4))
 
"movq %%mm0, (%2) \n\t"
: : "r" (src), "r" ((x86_reg)stride), "r"(tmp)
: "%"REG_a, "%"REG_d
);
#else //TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW
int x;
src+= stride*4;
for(x=0; x<8; x++){
int t1= tmp[x];
int t2= src[stride*1];
 
src[stride*1]= CLIP((-t1 + 4*src[stride*0] + 2*t2 + 4*src[stride*2] - src[stride*3] + 4)>>3);
t1= src[stride*4];
src[stride*3]= CLIP((-t2 + 4*src[stride*2] + 2*t1 + 4*src[stride*4] - src[stride*5] + 4)>>3);
t2= src[stride*6];
src[stride*5]= CLIP((-t1 + 4*src[stride*4] + 2*t2 + 4*src[stride*6] - src[stride*7] + 4)>>3);
t1= src[stride*8];
src[stride*7]= CLIP((-t2 + 4*src[stride*6] + 2*t1 + 4*src[stride*8] - src[stride*9] + 4)>>3);
tmp[x]= t1;
 
src++;
}
#endif //TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW
}
 
/**
* Deinterlace the given block by filtering every line with a (-1 2 6 2 -1) filter.
* will be called for every 8x8 block and can read & write from line 4-15
* lines 0-3 have been passed through the deblock / dering filters already, but can be read, too.
* lines 4-12 will be read into the deblocking filter and should be deinterlaced
* this filter will read lines 4-13 and write 4-11
*/
static inline void RENAME(deInterlaceL5)(uint8_t src[], int stride, uint8_t *tmp, uint8_t *tmp2)
{
#if TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW
src+= stride*4;
__asm__ volatile(
"lea (%0, %1), %%"REG_a" \n\t"
"lea (%%"REG_a", %1, 4), %%"REG_d" \n\t"
"pxor %%mm7, %%mm7 \n\t"
"movq (%2), %%mm0 \n\t"
"movq (%3), %%mm1 \n\t"
// 0 1 2 3 4 5 6 7 8 9 10
// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1 ecx
 
#define REAL_DEINT_L5(t1,t2,a,b,c)\
"movq " #a ", %%mm2 \n\t"\
"movq " #b ", %%mm3 \n\t"\
"movq " #c ", %%mm4 \n\t"\
PAVGB(t2, %%mm3) \
PAVGB(t1, %%mm4) \
"movq %%mm2, %%mm5 \n\t"\
"movq %%mm2, " #t1 " \n\t"\
"punpcklbw %%mm7, %%mm2 \n\t"\
"punpckhbw %%mm7, %%mm5 \n\t"\
"movq %%mm2, %%mm6 \n\t"\
"paddw %%mm2, %%mm2 \n\t"\
"paddw %%mm6, %%mm2 \n\t"\
"movq %%mm5, %%mm6 \n\t"\
"paddw %%mm5, %%mm5 \n\t"\
"paddw %%mm6, %%mm5 \n\t"\
"movq %%mm3, %%mm6 \n\t"\
"punpcklbw %%mm7, %%mm3 \n\t"\
"punpckhbw %%mm7, %%mm6 \n\t"\
"paddw %%mm3, %%mm3 \n\t"\
"paddw %%mm6, %%mm6 \n\t"\
"paddw %%mm3, %%mm2 \n\t"\
"paddw %%mm6, %%mm5 \n\t"\
"movq %%mm4, %%mm6 \n\t"\
"punpcklbw %%mm7, %%mm4 \n\t"\
"punpckhbw %%mm7, %%mm6 \n\t"\
"psubw %%mm4, %%mm2 \n\t"\
"psubw %%mm6, %%mm5 \n\t"\
"psraw $2, %%mm2 \n\t"\
"psraw $2, %%mm5 \n\t"\
"packuswb %%mm5, %%mm2 \n\t"\
"movq %%mm2, " #a " \n\t"\
 
#define DEINT_L5(t1,t2,a,b,c) REAL_DEINT_L5(t1,t2,a,b,c)
 
DEINT_L5(%%mm0, %%mm1, (%0) , (%%REGa) , (%%REGa, %1) )
DEINT_L5(%%mm1, %%mm0, (%%REGa) , (%%REGa, %1) , (%%REGa, %1, 2))
DEINT_L5(%%mm0, %%mm1, (%%REGa, %1) , (%%REGa, %1, 2), (%0, %1, 4) )
DEINT_L5(%%mm1, %%mm0, (%%REGa, %1, 2), (%0, %1, 4) , (%%REGd) )
DEINT_L5(%%mm0, %%mm1, (%0, %1, 4) , (%%REGd) , (%%REGd, %1) )
DEINT_L5(%%mm1, %%mm0, (%%REGd) , (%%REGd, %1) , (%%REGd, %1, 2))
DEINT_L5(%%mm0, %%mm1, (%%REGd, %1) , (%%REGd, %1, 2), (%0, %1, 8) )
DEINT_L5(%%mm1, %%mm0, (%%REGd, %1, 2), (%0, %1, 8) , (%%REGd, %1, 4))
 
"movq %%mm0, (%2) \n\t"
"movq %%mm1, (%3) \n\t"
: : "r" (src), "r" ((x86_reg)stride), "r"(tmp), "r"(tmp2)
: "%"REG_a, "%"REG_d
);
#else //TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW
int x;
src+= stride*4;
for(x=0; x<8; x++){
int t1= tmp[x];
int t2= tmp2[x];
int t3= src[0];
 
src[stride*0]= CLIP((-(t1 + src[stride*2]) + 2*(t2 + src[stride*1]) + 6*t3 + 4)>>3);
t1= src[stride*1];
src[stride*1]= CLIP((-(t2 + src[stride*3]) + 2*(t3 + src[stride*2]) + 6*t1 + 4)>>3);
t2= src[stride*2];
src[stride*2]= CLIP((-(t3 + src[stride*4]) + 2*(t1 + src[stride*3]) + 6*t2 + 4)>>3);
t3= src[stride*3];
src[stride*3]= CLIP((-(t1 + src[stride*5]) + 2*(t2 + src[stride*4]) + 6*t3 + 4)>>3);
t1= src[stride*4];
src[stride*4]= CLIP((-(t2 + src[stride*6]) + 2*(t3 + src[stride*5]) + 6*t1 + 4)>>3);
t2= src[stride*5];
src[stride*5]= CLIP((-(t3 + src[stride*7]) + 2*(t1 + src[stride*6]) + 6*t2 + 4)>>3);
t3= src[stride*6];
src[stride*6]= CLIP((-(t1 + src[stride*8]) + 2*(t2 + src[stride*7]) + 6*t3 + 4)>>3);
t1= src[stride*7];
src[stride*7]= CLIP((-(t2 + src[stride*9]) + 2*(t3 + src[stride*8]) + 6*t1 + 4)>>3);
 
tmp[x]= t3;
tmp2[x]= t1;
 
src++;
}
#endif //TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW
}
 
/**
* Deinterlace the given block by filtering all lines with a (1 2 1) filter.
* will be called for every 8x8 block and can read & write from line 4-15
* lines 0-3 have been passed through the deblock / dering filters already, but can be read, too.
* lines 4-12 will be read into the deblocking filter and should be deinterlaced
* this filter will read lines 4-13 and write 4-11
*/
static inline void RENAME(deInterlaceBlendLinear)(uint8_t src[], int stride, uint8_t *tmp)
{
#if TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW
src+= 4*stride;
__asm__ volatile(
"lea (%0, %1), %%"REG_a" \n\t"
"lea (%%"REG_a", %1, 4), %%"REG_d" \n\t"
// 0 1 2 3 4 5 6 7 8 9
// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1
 
"movq (%2), %%mm0 \n\t" // L0
"movq (%%"REG_a"), %%mm1 \n\t" // L2
PAVGB(%%mm1, %%mm0) // L0+L2
"movq (%0), %%mm2 \n\t" // L1
PAVGB(%%mm2, %%mm0)
"movq %%mm0, (%0) \n\t"
"movq (%%"REG_a", %1), %%mm0 \n\t" // L3
PAVGB(%%mm0, %%mm2) // L1+L3
PAVGB(%%mm1, %%mm2) // 2L2 + L1 + L3
"movq %%mm2, (%%"REG_a") \n\t"
"movq (%%"REG_a", %1, 2), %%mm2 \n\t" // L4
PAVGB(%%mm2, %%mm1) // L2+L4
PAVGB(%%mm0, %%mm1) // 2L3 + L2 + L4
"movq %%mm1, (%%"REG_a", %1) \n\t"
"movq (%0, %1, 4), %%mm1 \n\t" // L5
PAVGB(%%mm1, %%mm0) // L3+L5
PAVGB(%%mm2, %%mm0) // 2L4 + L3 + L5
"movq %%mm0, (%%"REG_a", %1, 2) \n\t"
"movq (%%"REG_d"), %%mm0 \n\t" // L6
PAVGB(%%mm0, %%mm2) // L4+L6
PAVGB(%%mm1, %%mm2) // 2L5 + L4 + L6
"movq %%mm2, (%0, %1, 4) \n\t"
"movq (%%"REG_d", %1), %%mm2 \n\t" // L7
PAVGB(%%mm2, %%mm1) // L5+L7
PAVGB(%%mm0, %%mm1) // 2L6 + L5 + L7
"movq %%mm1, (%%"REG_d") \n\t"
"movq (%%"REG_d", %1, 2), %%mm1 \n\t" // L8
PAVGB(%%mm1, %%mm0) // L6+L8
PAVGB(%%mm2, %%mm0) // 2L7 + L6 + L8
"movq %%mm0, (%%"REG_d", %1) \n\t"
"movq (%0, %1, 8), %%mm0 \n\t" // L9
PAVGB(%%mm0, %%mm2) // L7+L9
PAVGB(%%mm1, %%mm2) // 2L8 + L7 + L9
"movq %%mm2, (%%"REG_d", %1, 2) \n\t"
"movq %%mm1, (%2) \n\t"
 
: : "r" (src), "r" ((x86_reg)stride), "r" (tmp)
: "%"REG_a, "%"REG_d
);
#else //TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW
int a, b, c, x;
src+= 4*stride;
 
for(x=0; x<2; x++){
a= *(uint32_t*)&tmp[stride*0];
b= *(uint32_t*)&src[stride*0];
c= *(uint32_t*)&src[stride*1];
a= (a&c) + (((a^c)&0xFEFEFEFEUL)>>1);
*(uint32_t*)&src[stride*0]= (a|b) - (((a^b)&0xFEFEFEFEUL)>>1);
 
a= *(uint32_t*)&src[stride*2];
b= (a&b) + (((a^b)&0xFEFEFEFEUL)>>1);
*(uint32_t*)&src[stride*1]= (c|b) - (((c^b)&0xFEFEFEFEUL)>>1);
 
b= *(uint32_t*)&src[stride*3];
c= (b&c) + (((b^c)&0xFEFEFEFEUL)>>1);
*(uint32_t*)&src[stride*2]= (c|a) - (((c^a)&0xFEFEFEFEUL)>>1);
 
c= *(uint32_t*)&src[stride*4];
a= (a&c) + (((a^c)&0xFEFEFEFEUL)>>1);
*(uint32_t*)&src[stride*3]= (a|b) - (((a^b)&0xFEFEFEFEUL)>>1);
 
a= *(uint32_t*)&src[stride*5];
b= (a&b) + (((a^b)&0xFEFEFEFEUL)>>1);
*(uint32_t*)&src[stride*4]= (c|b) - (((c^b)&0xFEFEFEFEUL)>>1);
 
b= *(uint32_t*)&src[stride*6];
c= (b&c) + (((b^c)&0xFEFEFEFEUL)>>1);
*(uint32_t*)&src[stride*5]= (c|a) - (((c^a)&0xFEFEFEFEUL)>>1);
 
c= *(uint32_t*)&src[stride*7];
a= (a&c) + (((a^c)&0xFEFEFEFEUL)>>1);
*(uint32_t*)&src[stride*6]= (a|b) - (((a^b)&0xFEFEFEFEUL)>>1);
 
a= *(uint32_t*)&src[stride*8];
b= (a&b) + (((a^b)&0xFEFEFEFEUL)>>1);
*(uint32_t*)&src[stride*7]= (c|b) - (((c^b)&0xFEFEFEFEUL)>>1);
 
*(uint32_t*)&tmp[stride*0]= c;
src += 4;
tmp += 4;
}
#endif //TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW
}
 
/**
* Deinterlace the given block by applying a median filter to every second line.
* will be called for every 8x8 block and can read & write from line 4-15,
* lines 0-3 have been passed through the deblock / dering filters already, but can be read, too.
* lines 4-12 will be read into the deblocking filter and should be deinterlaced
*/
static inline void RENAME(deInterlaceMedian)(uint8_t src[], int stride)
{
#if TEMPLATE_PP_MMX
src+= 4*stride;
#if TEMPLATE_PP_MMXEXT
__asm__ volatile(
"lea (%0, %1), %%"REG_a" \n\t"
"lea (%%"REG_a", %1, 4), %%"REG_d" \n\t"
// 0 1 2 3 4 5 6 7 8 9
// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1
 
"movq (%0), %%mm0 \n\t"
"movq (%%"REG_a", %1), %%mm2 \n\t"
"movq (%%"REG_a"), %%mm1 \n\t"
"movq %%mm0, %%mm3 \n\t"
"pmaxub %%mm1, %%mm0 \n\t"
"pminub %%mm3, %%mm1 \n\t"
"pmaxub %%mm2, %%mm1 \n\t"
"pminub %%mm1, %%mm0 \n\t"
"movq %%mm0, (%%"REG_a") \n\t"
 
"movq (%0, %1, 4), %%mm0 \n\t"
"movq (%%"REG_a", %1, 2), %%mm1 \n\t"
"movq %%mm2, %%mm3 \n\t"
"pmaxub %%mm1, %%mm2 \n\t"
"pminub %%mm3, %%mm1 \n\t"
"pmaxub %%mm0, %%mm1 \n\t"
"pminub %%mm1, %%mm2 \n\t"
"movq %%mm2, (%%"REG_a", %1, 2) \n\t"
 
"movq (%%"REG_d"), %%mm2 \n\t"
"movq (%%"REG_d", %1), %%mm1 \n\t"
"movq %%mm2, %%mm3 \n\t"
"pmaxub %%mm0, %%mm2 \n\t"
"pminub %%mm3, %%mm0 \n\t"
"pmaxub %%mm1, %%mm0 \n\t"
"pminub %%mm0, %%mm2 \n\t"
"movq %%mm2, (%%"REG_d") \n\t"
 
"movq (%%"REG_d", %1, 2), %%mm2 \n\t"
"movq (%0, %1, 8), %%mm0 \n\t"
"movq %%mm2, %%mm3 \n\t"
"pmaxub %%mm0, %%mm2 \n\t"
"pminub %%mm3, %%mm0 \n\t"
"pmaxub %%mm1, %%mm0 \n\t"
"pminub %%mm0, %%mm2 \n\t"
"movq %%mm2, (%%"REG_d", %1, 2) \n\t"
 
 
: : "r" (src), "r" ((x86_reg)stride)
: "%"REG_a, "%"REG_d
);
 
#else // MMX without MMX2
__asm__ volatile(
"lea (%0, %1), %%"REG_a" \n\t"
"lea (%%"REG_a", %1, 4), %%"REG_d" \n\t"
// 0 1 2 3 4 5 6 7 8 9
// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1
"pxor %%mm7, %%mm7 \n\t"
 
#define REAL_MEDIAN(a,b,c)\
"movq " #a ", %%mm0 \n\t"\
"movq " #b ", %%mm2 \n\t"\
"movq " #c ", %%mm1 \n\t"\
"movq %%mm0, %%mm3 \n\t"\
"movq %%mm1, %%mm4 \n\t"\
"movq %%mm2, %%mm5 \n\t"\
"psubusb %%mm1, %%mm3 \n\t"\
"psubusb %%mm2, %%mm4 \n\t"\
"psubusb %%mm0, %%mm5 \n\t"\
"pcmpeqb %%mm7, %%mm3 \n\t"\
"pcmpeqb %%mm7, %%mm4 \n\t"\
"pcmpeqb %%mm7, %%mm5 \n\t"\
"movq %%mm3, %%mm6 \n\t"\
"pxor %%mm4, %%mm3 \n\t"\
"pxor %%mm5, %%mm4 \n\t"\
"pxor %%mm6, %%mm5 \n\t"\
"por %%mm3, %%mm1 \n\t"\
"por %%mm4, %%mm2 \n\t"\
"por %%mm5, %%mm0 \n\t"\
"pand %%mm2, %%mm0 \n\t"\
"pand %%mm1, %%mm0 \n\t"\
"movq %%mm0, " #b " \n\t"
#define MEDIAN(a,b,c) REAL_MEDIAN(a,b,c)
 
MEDIAN((%0) , (%%REGa) , (%%REGa, %1))
MEDIAN((%%REGa, %1), (%%REGa, %1, 2), (%0, %1, 4))
MEDIAN((%0, %1, 4) , (%%REGd) , (%%REGd, %1))
MEDIAN((%%REGd, %1), (%%REGd, %1, 2), (%0, %1, 8))
 
: : "r" (src), "r" ((x86_reg)stride)
: "%"REG_a, "%"REG_d
);
#endif //TEMPLATE_PP_MMXEXT
#else //TEMPLATE_PP_MMX
int x, y;
src+= 4*stride;
// FIXME - there should be a way to do a few columns in parallel like w/mmx
for(x=0; x<8; x++){
uint8_t *colsrc = src;
for (y=0; y<4; y++){
int a, b, c, d, e, f;
a = colsrc[0 ];
b = colsrc[stride ];
c = colsrc[stride*2];
d = (a-b)>>31;
e = (b-c)>>31;
f = (c-a)>>31;
colsrc[stride ] = (a|(d^f)) & (b|(d^e)) & (c|(e^f));
colsrc += stride*2;
}
src++;
}
#endif //TEMPLATE_PP_MMX
}
 
#if TEMPLATE_PP_MMX
/**
* Transpose and shift the given 8x8 Block into dst1 and dst2.
*/
static inline void RENAME(transpose1)(uint8_t *dst1, uint8_t *dst2, uint8_t *src, int srcStride)
{
__asm__(
"lea (%0, %1), %%"REG_a" \n\t"
// 0 1 2 3 4 5 6 7 8 9
// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1
"movq (%0), %%mm0 \n\t" // 12345678
"movq (%%"REG_a"), %%mm1 \n\t" // abcdefgh
"movq %%mm0, %%mm2 \n\t" // 12345678
"punpcklbw %%mm1, %%mm0 \n\t" // 1a2b3c4d
"punpckhbw %%mm1, %%mm2 \n\t" // 5e6f7g8h
 
"movq (%%"REG_a", %1), %%mm1 \n\t"
"movq (%%"REG_a", %1, 2), %%mm3 \n\t"
"movq %%mm1, %%mm4 \n\t"
"punpcklbw %%mm3, %%mm1 \n\t"
"punpckhbw %%mm3, %%mm4 \n\t"
 
"movq %%mm0, %%mm3 \n\t"
"punpcklwd %%mm1, %%mm0 \n\t"
"punpckhwd %%mm1, %%mm3 \n\t"
"movq %%mm2, %%mm1 \n\t"
"punpcklwd %%mm4, %%mm2 \n\t"
"punpckhwd %%mm4, %%mm1 \n\t"
 
"movd %%mm0, 128(%2) \n\t"
"psrlq $32, %%mm0 \n\t"
"movd %%mm0, 144(%2) \n\t"
"movd %%mm3, 160(%2) \n\t"
"psrlq $32, %%mm3 \n\t"
"movd %%mm3, 176(%2) \n\t"
"movd %%mm3, 48(%3) \n\t"
"movd %%mm2, 192(%2) \n\t"
"movd %%mm2, 64(%3) \n\t"
"psrlq $32, %%mm2 \n\t"
"movd %%mm2, 80(%3) \n\t"
"movd %%mm1, 96(%3) \n\t"
"psrlq $32, %%mm1 \n\t"
"movd %%mm1, 112(%3) \n\t"
 
"lea (%%"REG_a", %1, 4), %%"REG_a" \n\t"
 
"movq (%0, %1, 4), %%mm0 \n\t" // 12345678
"movq (%%"REG_a"), %%mm1 \n\t" // abcdefgh
"movq %%mm0, %%mm2 \n\t" // 12345678
"punpcklbw %%mm1, %%mm0 \n\t" // 1a2b3c4d
"punpckhbw %%mm1, %%mm2 \n\t" // 5e6f7g8h
 
"movq (%%"REG_a", %1), %%mm1 \n\t"
"movq (%%"REG_a", %1, 2), %%mm3 \n\t"
"movq %%mm1, %%mm4 \n\t"
"punpcklbw %%mm3, %%mm1 \n\t"
"punpckhbw %%mm3, %%mm4 \n\t"
 
"movq %%mm0, %%mm3 \n\t"
"punpcklwd %%mm1, %%mm0 \n\t"
"punpckhwd %%mm1, %%mm3 \n\t"
"movq %%mm2, %%mm1 \n\t"
"punpcklwd %%mm4, %%mm2 \n\t"
"punpckhwd %%mm4, %%mm1 \n\t"
 
"movd %%mm0, 132(%2) \n\t"
"psrlq $32, %%mm0 \n\t"
"movd %%mm0, 148(%2) \n\t"
"movd %%mm3, 164(%2) \n\t"
"psrlq $32, %%mm3 \n\t"
"movd %%mm3, 180(%2) \n\t"
"movd %%mm3, 52(%3) \n\t"
"movd %%mm2, 196(%2) \n\t"
"movd %%mm2, 68(%3) \n\t"
"psrlq $32, %%mm2 \n\t"
"movd %%mm2, 84(%3) \n\t"
"movd %%mm1, 100(%3) \n\t"
"psrlq $32, %%mm1 \n\t"
"movd %%mm1, 116(%3) \n\t"
 
 
:: "r" (src), "r" ((x86_reg)srcStride), "r" (dst1), "r" (dst2)
: "%"REG_a
);
}
 
/**
* Transpose the given 8x8 block.
*/
static inline void RENAME(transpose2)(uint8_t *dst, int dstStride, uint8_t *src)
{
__asm__(
"lea (%0, %1), %%"REG_a" \n\t"
"lea (%%"REG_a",%1,4), %%"REG_d" \n\t"
// 0 1 2 3 4 5 6 7 8 9
// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1
"movq (%2), %%mm0 \n\t" // 12345678
"movq 16(%2), %%mm1 \n\t" // abcdefgh
"movq %%mm0, %%mm2 \n\t" // 12345678
"punpcklbw %%mm1, %%mm0 \n\t" // 1a2b3c4d
"punpckhbw %%mm1, %%mm2 \n\t" // 5e6f7g8h
 
"movq 32(%2), %%mm1 \n\t"
"movq 48(%2), %%mm3 \n\t"
"movq %%mm1, %%mm4 \n\t"
"punpcklbw %%mm3, %%mm1 \n\t"
"punpckhbw %%mm3, %%mm4 \n\t"
 
"movq %%mm0, %%mm3 \n\t"
"punpcklwd %%mm1, %%mm0 \n\t"
"punpckhwd %%mm1, %%mm3 \n\t"
"movq %%mm2, %%mm1 \n\t"
"punpcklwd %%mm4, %%mm2 \n\t"
"punpckhwd %%mm4, %%mm1 \n\t"
 
"movd %%mm0, (%0) \n\t"
"psrlq $32, %%mm0 \n\t"
"movd %%mm0, (%%"REG_a") \n\t"
"movd %%mm3, (%%"REG_a", %1) \n\t"
"psrlq $32, %%mm3 \n\t"
"movd %%mm3, (%%"REG_a", %1, 2) \n\t"
"movd %%mm2, (%0, %1, 4) \n\t"
"psrlq $32, %%mm2 \n\t"
"movd %%mm2, (%%"REG_d") \n\t"
"movd %%mm1, (%%"REG_d", %1) \n\t"
"psrlq $32, %%mm1 \n\t"
"movd %%mm1, (%%"REG_d", %1, 2) \n\t"
 
 
"movq 64(%2), %%mm0 \n\t" // 12345678
"movq 80(%2), %%mm1 \n\t" // abcdefgh
"movq %%mm0, %%mm2 \n\t" // 12345678
"punpcklbw %%mm1, %%mm0 \n\t" // 1a2b3c4d
"punpckhbw %%mm1, %%mm2 \n\t" // 5e6f7g8h
 
"movq 96(%2), %%mm1 \n\t"
"movq 112(%2), %%mm3 \n\t"
"movq %%mm1, %%mm4 \n\t"
"punpcklbw %%mm3, %%mm1 \n\t"
"punpckhbw %%mm3, %%mm4 \n\t"
 
"movq %%mm0, %%mm3 \n\t"
"punpcklwd %%mm1, %%mm0 \n\t"
"punpckhwd %%mm1, %%mm3 \n\t"
"movq %%mm2, %%mm1 \n\t"
"punpcklwd %%mm4, %%mm2 \n\t"
"punpckhwd %%mm4, %%mm1 \n\t"
 
"movd %%mm0, 4(%0) \n\t"
"psrlq $32, %%mm0 \n\t"
"movd %%mm0, 4(%%"REG_a") \n\t"
"movd %%mm3, 4(%%"REG_a", %1) \n\t"
"psrlq $32, %%mm3 \n\t"
"movd %%mm3, 4(%%"REG_a", %1, 2) \n\t"
"movd %%mm2, 4(%0, %1, 4) \n\t"
"psrlq $32, %%mm2 \n\t"
"movd %%mm2, 4(%%"REG_d") \n\t"
"movd %%mm1, 4(%%"REG_d", %1) \n\t"
"psrlq $32, %%mm1 \n\t"
"movd %%mm1, 4(%%"REG_d", %1, 2) \n\t"
 
:: "r" (dst), "r" ((x86_reg)dstStride), "r" (src)
: "%"REG_a, "%"REG_d
);
}
#endif //TEMPLATE_PP_MMX
//static long test=0;
 
#if !TEMPLATE_PP_ALTIVEC
static inline void RENAME(tempNoiseReducer)(uint8_t *src, int stride,
uint8_t *tempBlurred, uint32_t *tempBlurredPast, int *maxNoise)
{
// to save a register (FIXME do this outside of the loops)
tempBlurredPast[127]= maxNoise[0];
tempBlurredPast[128]= maxNoise[1];
tempBlurredPast[129]= maxNoise[2];
 
#define FAST_L2_DIFF
//#define L1_DIFF //u should change the thresholds too if u try that one
#if TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW
__asm__ volatile(
"lea (%2, %2, 2), %%"REG_a" \n\t" // 3*stride
"lea (%2, %2, 4), %%"REG_d" \n\t" // 5*stride
"lea (%%"REG_d", %2, 2), %%"REG_c" \n\t" // 7*stride
// 0 1 2 3 4 5 6 7 8 9
// %x %x+%2 %x+2%2 %x+eax %x+4%2 %x+edx %x+2eax %x+ecx %x+8%2
//FIXME reorder?
#ifdef L1_DIFF //needs mmx2
"movq (%0), %%mm0 \n\t" // L0
"psadbw (%1), %%mm0 \n\t" // |L0-R0|
"movq (%0, %2), %%mm1 \n\t" // L1
"psadbw (%1, %2), %%mm1 \n\t" // |L1-R1|
"movq (%0, %2, 2), %%mm2 \n\t" // L2
"psadbw (%1, %2, 2), %%mm2 \n\t" // |L2-R2|
"movq (%0, %%"REG_a"), %%mm3 \n\t" // L3
"psadbw (%1, %%"REG_a"), %%mm3 \n\t" // |L3-R3|
 
"movq (%0, %2, 4), %%mm4 \n\t" // L4
"paddw %%mm1, %%mm0 \n\t"
"psadbw (%1, %2, 4), %%mm4 \n\t" // |L4-R4|
"movq (%0, %%"REG_d"), %%mm5 \n\t" // L5
"paddw %%mm2, %%mm0 \n\t"
"psadbw (%1, %%"REG_d"), %%mm5 \n\t" // |L5-R5|
"movq (%0, %%"REG_a", 2), %%mm6 \n\t" // L6
"paddw %%mm3, %%mm0 \n\t"
"psadbw (%1, %%"REG_a", 2), %%mm6 \n\t" // |L6-R6|
"movq (%0, %%"REG_c"), %%mm7 \n\t" // L7
"paddw %%mm4, %%mm0 \n\t"
"psadbw (%1, %%"REG_c"), %%mm7 \n\t" // |L7-R7|
"paddw %%mm5, %%mm6 \n\t"
"paddw %%mm7, %%mm6 \n\t"
"paddw %%mm6, %%mm0 \n\t"
#else //L1_DIFF
#if defined (FAST_L2_DIFF)
"pcmpeqb %%mm7, %%mm7 \n\t"
"movq "MANGLE(b80)", %%mm6 \n\t"
"pxor %%mm0, %%mm0 \n\t"
#define REAL_L2_DIFF_CORE(a, b)\
"movq " #a ", %%mm5 \n\t"\
"movq " #b ", %%mm2 \n\t"\
"pxor %%mm7, %%mm2 \n\t"\
PAVGB(%%mm2, %%mm5)\
"paddb %%mm6, %%mm5 \n\t"\
"movq %%mm5, %%mm2 \n\t"\
"psllw $8, %%mm5 \n\t"\
"pmaddwd %%mm5, %%mm5 \n\t"\
"pmaddwd %%mm2, %%mm2 \n\t"\
"paddd %%mm2, %%mm5 \n\t"\
"psrld $14, %%mm5 \n\t"\
"paddd %%mm5, %%mm0 \n\t"
 
#else //defined (FAST_L2_DIFF)
"pxor %%mm7, %%mm7 \n\t"
"pxor %%mm0, %%mm0 \n\t"
#define REAL_L2_DIFF_CORE(a, b)\
"movq " #a ", %%mm5 \n\t"\
"movq " #b ", %%mm2 \n\t"\
"movq %%mm5, %%mm1 \n\t"\
"movq %%mm2, %%mm3 \n\t"\
"punpcklbw %%mm7, %%mm5 \n\t"\
"punpckhbw %%mm7, %%mm1 \n\t"\
"punpcklbw %%mm7, %%mm2 \n\t"\
"punpckhbw %%mm7, %%mm3 \n\t"\
"psubw %%mm2, %%mm5 \n\t"\
"psubw %%mm3, %%mm1 \n\t"\
"pmaddwd %%mm5, %%mm5 \n\t"\
"pmaddwd %%mm1, %%mm1 \n\t"\
"paddd %%mm1, %%mm5 \n\t"\
"paddd %%mm5, %%mm0 \n\t"
 
#endif //defined (FAST_L2_DIFF)
 
#define L2_DIFF_CORE(a, b) REAL_L2_DIFF_CORE(a, b)
 
L2_DIFF_CORE((%0) , (%1))
L2_DIFF_CORE((%0, %2) , (%1, %2))
L2_DIFF_CORE((%0, %2, 2) , (%1, %2, 2))
L2_DIFF_CORE((%0, %%REGa) , (%1, %%REGa))
L2_DIFF_CORE((%0, %2, 4) , (%1, %2, 4))
L2_DIFF_CORE((%0, %%REGd) , (%1, %%REGd))
L2_DIFF_CORE((%0, %%REGa,2), (%1, %%REGa,2))
L2_DIFF_CORE((%0, %%REGc) , (%1, %%REGc))
 
#endif //L1_DIFF
 
"movq %%mm0, %%mm4 \n\t"
"psrlq $32, %%mm0 \n\t"
"paddd %%mm0, %%mm4 \n\t"
"movd %%mm4, %%ecx \n\t"
"shll $2, %%ecx \n\t"
"mov %3, %%"REG_d" \n\t"
"addl -4(%%"REG_d"), %%ecx \n\t"
"addl 4(%%"REG_d"), %%ecx \n\t"
"addl -1024(%%"REG_d"), %%ecx \n\t"
"addl $4, %%ecx \n\t"
"addl 1024(%%"REG_d"), %%ecx \n\t"
"shrl $3, %%ecx \n\t"
"movl %%ecx, (%%"REG_d") \n\t"
 
// "mov %3, %%"REG_c" \n\t"
// "mov %%"REG_c", test \n\t"
// "jmp 4f \n\t"
"cmpl 512(%%"REG_d"), %%ecx \n\t"
" jb 2f \n\t"
"cmpl 516(%%"REG_d"), %%ecx \n\t"
" jb 1f \n\t"
 
"lea (%%"REG_a", %2, 2), %%"REG_d" \n\t" // 5*stride
"lea (%%"REG_d", %2, 2), %%"REG_c" \n\t" // 7*stride
"movq (%0), %%mm0 \n\t" // L0
"movq (%0, %2), %%mm1 \n\t" // L1
"movq (%0, %2, 2), %%mm2 \n\t" // L2
"movq (%0, %%"REG_a"), %%mm3 \n\t" // L3
"movq (%0, %2, 4), %%mm4 \n\t" // L4
"movq (%0, %%"REG_d"), %%mm5 \n\t" // L5
"movq (%0, %%"REG_a", 2), %%mm6 \n\t" // L6
"movq (%0, %%"REG_c"), %%mm7 \n\t" // L7
"movq %%mm0, (%1) \n\t" // L0
"movq %%mm1, (%1, %2) \n\t" // L1
"movq %%mm2, (%1, %2, 2) \n\t" // L2
"movq %%mm3, (%1, %%"REG_a") \n\t" // L3
"movq %%mm4, (%1, %2, 4) \n\t" // L4
"movq %%mm5, (%1, %%"REG_d") \n\t" // L5
"movq %%mm6, (%1, %%"REG_a", 2) \n\t" // L6
"movq %%mm7, (%1, %%"REG_c") \n\t" // L7
"jmp 4f \n\t"
 
"1: \n\t"
"lea (%%"REG_a", %2, 2), %%"REG_d" \n\t" // 5*stride
"lea (%%"REG_d", %2, 2), %%"REG_c" \n\t" // 7*stride
"movq (%0), %%mm0 \n\t" // L0
PAVGB((%1), %%mm0) // L0
"movq (%0, %2), %%mm1 \n\t" // L1
PAVGB((%1, %2), %%mm1) // L1
"movq (%0, %2, 2), %%mm2 \n\t" // L2
PAVGB((%1, %2, 2), %%mm2) // L2
"movq (%0, %%"REG_a"), %%mm3 \n\t" // L3
PAVGB((%1, %%REGa), %%mm3) // L3
"movq (%0, %2, 4), %%mm4 \n\t" // L4
PAVGB((%1, %2, 4), %%mm4) // L4
"movq (%0, %%"REG_d"), %%mm5 \n\t" // L5
PAVGB((%1, %%REGd), %%mm5) // L5
"movq (%0, %%"REG_a", 2), %%mm6 \n\t" // L6
PAVGB((%1, %%REGa, 2), %%mm6) // L6
"movq (%0, %%"REG_c"), %%mm7 \n\t" // L7
PAVGB((%1, %%REGc), %%mm7) // L7
"movq %%mm0, (%1) \n\t" // R0
"movq %%mm1, (%1, %2) \n\t" // R1
"movq %%mm2, (%1, %2, 2) \n\t" // R2
"movq %%mm3, (%1, %%"REG_a") \n\t" // R3
"movq %%mm4, (%1, %2, 4) \n\t" // R4
"movq %%mm5, (%1, %%"REG_d") \n\t" // R5
"movq %%mm6, (%1, %%"REG_a", 2) \n\t" // R6
"movq %%mm7, (%1, %%"REG_c") \n\t" // R7
"movq %%mm0, (%0) \n\t" // L0
"movq %%mm1, (%0, %2) \n\t" // L1
"movq %%mm2, (%0, %2, 2) \n\t" // L2
"movq %%mm3, (%0, %%"REG_a") \n\t" // L3
"movq %%mm4, (%0, %2, 4) \n\t" // L4
"movq %%mm5, (%0, %%"REG_d") \n\t" // L5
"movq %%mm6, (%0, %%"REG_a", 2) \n\t" // L6
"movq %%mm7, (%0, %%"REG_c") \n\t" // L7
"jmp 4f \n\t"
 
"2: \n\t"
"cmpl 508(%%"REG_d"), %%ecx \n\t"
" jb 3f \n\t"
 
"lea (%%"REG_a", %2, 2), %%"REG_d" \n\t" // 5*stride
"lea (%%"REG_d", %2, 2), %%"REG_c" \n\t" // 7*stride
"movq (%0), %%mm0 \n\t" // L0
"movq (%0, %2), %%mm1 \n\t" // L1
"movq (%0, %2, 2), %%mm2 \n\t" // L2
"movq (%0, %%"REG_a"), %%mm3 \n\t" // L3
"movq (%1), %%mm4 \n\t" // R0
"movq (%1, %2), %%mm5 \n\t" // R1
"movq (%1, %2, 2), %%mm6 \n\t" // R2
"movq (%1, %%"REG_a"), %%mm7 \n\t" // R3
PAVGB(%%mm4, %%mm0)
PAVGB(%%mm5, %%mm1)
PAVGB(%%mm6, %%mm2)
PAVGB(%%mm7, %%mm3)
PAVGB(%%mm4, %%mm0)
PAVGB(%%mm5, %%mm1)
PAVGB(%%mm6, %%mm2)
PAVGB(%%mm7, %%mm3)
"movq %%mm0, (%1) \n\t" // R0
"movq %%mm1, (%1, %2) \n\t" // R1
"movq %%mm2, (%1, %2, 2) \n\t" // R2
"movq %%mm3, (%1, %%"REG_a") \n\t" // R3
"movq %%mm0, (%0) \n\t" // L0
"movq %%mm1, (%0, %2) \n\t" // L1
"movq %%mm2, (%0, %2, 2) \n\t" // L2
"movq %%mm3, (%0, %%"REG_a") \n\t" // L3
 
"movq (%0, %2, 4), %%mm0 \n\t" // L4
"movq (%0, %%"REG_d"), %%mm1 \n\t" // L5
"movq (%0, %%"REG_a", 2), %%mm2 \n\t" // L6
"movq (%0, %%"REG_c"), %%mm3 \n\t" // L7
"movq (%1, %2, 4), %%mm4 \n\t" // R4
"movq (%1, %%"REG_d"), %%mm5 \n\t" // R5
"movq (%1, %%"REG_a", 2), %%mm6 \n\t" // R6
"movq (%1, %%"REG_c"), %%mm7 \n\t" // R7
PAVGB(%%mm4, %%mm0)
PAVGB(%%mm5, %%mm1)
PAVGB(%%mm6, %%mm2)
PAVGB(%%mm7, %%mm3)
PAVGB(%%mm4, %%mm0)
PAVGB(%%mm5, %%mm1)
PAVGB(%%mm6, %%mm2)
PAVGB(%%mm7, %%mm3)
"movq %%mm0, (%1, %2, 4) \n\t" // R4
"movq %%mm1, (%1, %%"REG_d") \n\t" // R5
"movq %%mm2, (%1, %%"REG_a", 2) \n\t" // R6
"movq %%mm3, (%1, %%"REG_c") \n\t" // R7
"movq %%mm0, (%0, %2, 4) \n\t" // L4
"movq %%mm1, (%0, %%"REG_d") \n\t" // L5
"movq %%mm2, (%0, %%"REG_a", 2) \n\t" // L6
"movq %%mm3, (%0, %%"REG_c") \n\t" // L7
"jmp 4f \n\t"
 
"3: \n\t"
"lea (%%"REG_a", %2, 2), %%"REG_d" \n\t" // 5*stride
"lea (%%"REG_d", %2, 2), %%"REG_c" \n\t" // 7*stride
"movq (%0), %%mm0 \n\t" // L0
"movq (%0, %2), %%mm1 \n\t" // L1
"movq (%0, %2, 2), %%mm2 \n\t" // L2
"movq (%0, %%"REG_a"), %%mm3 \n\t" // L3
"movq (%1), %%mm4 \n\t" // R0
"movq (%1, %2), %%mm5 \n\t" // R1
"movq (%1, %2, 2), %%mm6 \n\t" // R2
"movq (%1, %%"REG_a"), %%mm7 \n\t" // R3
PAVGB(%%mm4, %%mm0)
PAVGB(%%mm5, %%mm1)
PAVGB(%%mm6, %%mm2)
PAVGB(%%mm7, %%mm3)
PAVGB(%%mm4, %%mm0)
PAVGB(%%mm5, %%mm1)
PAVGB(%%mm6, %%mm2)
PAVGB(%%mm7, %%mm3)
PAVGB(%%mm4, %%mm0)
PAVGB(%%mm5, %%mm1)
PAVGB(%%mm6, %%mm2)
PAVGB(%%mm7, %%mm3)
"movq %%mm0, (%1) \n\t" // R0
"movq %%mm1, (%1, %2) \n\t" // R1
"movq %%mm2, (%1, %2, 2) \n\t" // R2
"movq %%mm3, (%1, %%"REG_a") \n\t" // R3
"movq %%mm0, (%0) \n\t" // L0
"movq %%mm1, (%0, %2) \n\t" // L1
"movq %%mm2, (%0, %2, 2) \n\t" // L2
"movq %%mm3, (%0, %%"REG_a") \n\t" // L3
 
"movq (%0, %2, 4), %%mm0 \n\t" // L4
"movq (%0, %%"REG_d"), %%mm1 \n\t" // L5
"movq (%0, %%"REG_a", 2), %%mm2 \n\t" // L6
"movq (%0, %%"REG_c"), %%mm3 \n\t" // L7
"movq (%1, %2, 4), %%mm4 \n\t" // R4
"movq (%1, %%"REG_d"), %%mm5 \n\t" // R5
"movq (%1, %%"REG_a", 2), %%mm6 \n\t" // R6
"movq (%1, %%"REG_c"), %%mm7 \n\t" // R7
PAVGB(%%mm4, %%mm0)
PAVGB(%%mm5, %%mm1)
PAVGB(%%mm6, %%mm2)
PAVGB(%%mm7, %%mm3)
PAVGB(%%mm4, %%mm0)
PAVGB(%%mm5, %%mm1)
PAVGB(%%mm6, %%mm2)
PAVGB(%%mm7, %%mm3)
PAVGB(%%mm4, %%mm0)
PAVGB(%%mm5, %%mm1)
PAVGB(%%mm6, %%mm2)
PAVGB(%%mm7, %%mm3)
"movq %%mm0, (%1, %2, 4) \n\t" // R4
"movq %%mm1, (%1, %%"REG_d") \n\t" // R5
"movq %%mm2, (%1, %%"REG_a", 2) \n\t" // R6
"movq %%mm3, (%1, %%"REG_c") \n\t" // R7
"movq %%mm0, (%0, %2, 4) \n\t" // L4
"movq %%mm1, (%0, %%"REG_d") \n\t" // L5
"movq %%mm2, (%0, %%"REG_a", 2) \n\t" // L6
"movq %%mm3, (%0, %%"REG_c") \n\t" // L7
 
"4: \n\t"
 
:: "r" (src), "r" (tempBlurred), "r"((x86_reg)stride), "m" (tempBlurredPast)
: "%"REG_a, "%"REG_d, "%"REG_c, "memory"
);
#else //TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW
{
int y;
int d=0;
// int sysd=0;
int i;
 
for(y=0; y<8; y++){
int x;
for(x=0; x<8; x++){
int ref= tempBlurred[ x + y*stride ];
int cur= src[ x + y*stride ];
int d1=ref - cur;
// if(x==0 || x==7) d1+= d1>>1;
// if(y==0 || y==7) d1+= d1>>1;
// d+= FFABS(d1);
d+= d1*d1;
// sysd+= d1;
}
}
i=d;
d= (
4*d
+(*(tempBlurredPast-256))
+(*(tempBlurredPast-1))+ (*(tempBlurredPast+1))
+(*(tempBlurredPast+256))
+4)>>3;
*tempBlurredPast=i;
// ((*tempBlurredPast)*3 + d + 2)>>2;
 
/*
Switch between
1 0 0 0 0 0 0 (0)
64 32 16 8 4 2 1 (1)
64 48 36 27 20 15 11 (33) (approx)
64 56 49 43 37 33 29 (200) (approx)
*/
if(d > maxNoise[1]){
if(d < maxNoise[2]){
for(y=0; y<8; y++){
int x;
for(x=0; x<8; x++){
int ref= tempBlurred[ x + y*stride ];
int cur= src[ x + y*stride ];
tempBlurred[ x + y*stride ]=
src[ x + y*stride ]=
(ref + cur + 1)>>1;
}
}
}else{
for(y=0; y<8; y++){
int x;
for(x=0; x<8; x++){
tempBlurred[ x + y*stride ]= src[ x + y*stride ];
}
}
}
}else{
if(d < maxNoise[0]){
for(y=0; y<8; y++){
int x;
for(x=0; x<8; x++){
int ref= tempBlurred[ x + y*stride ];
int cur= src[ x + y*stride ];
tempBlurred[ x + y*stride ]=
src[ x + y*stride ]=
(ref*7 + cur + 4)>>3;
}
}
}else{
for(y=0; y<8; y++){
int x;
for(x=0; x<8; x++){
int ref= tempBlurred[ x + y*stride ];
int cur= src[ x + y*stride ];
tempBlurred[ x + y*stride ]=
src[ x + y*stride ]=
(ref*3 + cur + 2)>>2;
}
}
}
}
}
#endif //TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW
}
#endif //TEMPLATE_PP_ALTIVEC
 
#if TEMPLATE_PP_MMX
/**
* accurate deblock filter
*/
static av_always_inline void RENAME(do_a_deblock)(uint8_t *src, int step, int stride, PPContext *c){
int64_t dc_mask, eq_mask, both_masks;
int64_t sums[10*8*2];
src+= step*3; // src points to begin of the 8x8 Block
//{ START_TIMER
__asm__ volatile(
"movq %0, %%mm7 \n\t"
"movq %1, %%mm6 \n\t"
: : "m" (c->mmxDcOffset[c->nonBQP]), "m" (c->mmxDcThreshold[c->nonBQP])
);
 
__asm__ volatile(
"lea (%2, %3), %%"REG_a" \n\t"
// 0 1 2 3 4 5 6 7 8 9
// %1 eax eax+%2 eax+2%2 %1+4%2 ecx ecx+%2 ecx+2%2 %1+8%2 ecx+4%2
 
"movq (%2), %%mm0 \n\t"
"movq (%%"REG_a"), %%mm1 \n\t"
"movq %%mm1, %%mm3 \n\t"
"movq %%mm1, %%mm4 \n\t"
"psubb %%mm1, %%mm0 \n\t" // mm0 = difference
"paddb %%mm7, %%mm0 \n\t"
"pcmpgtb %%mm6, %%mm0 \n\t"
 
"movq (%%"REG_a",%3), %%mm2 \n\t"
PMAXUB(%%mm2, %%mm4)
PMINUB(%%mm2, %%mm3, %%mm5)
"psubb %%mm2, %%mm1 \n\t"
"paddb %%mm7, %%mm1 \n\t"
"pcmpgtb %%mm6, %%mm1 \n\t"
"paddb %%mm1, %%mm0 \n\t"
 
"movq (%%"REG_a", %3, 2), %%mm1 \n\t"
PMAXUB(%%mm1, %%mm4)
PMINUB(%%mm1, %%mm3, %%mm5)
"psubb %%mm1, %%mm2 \n\t"
"paddb %%mm7, %%mm2 \n\t"
"pcmpgtb %%mm6, %%mm2 \n\t"
"paddb %%mm2, %%mm0 \n\t"
 
"lea (%%"REG_a", %3, 4), %%"REG_a" \n\t"
 
"movq (%2, %3, 4), %%mm2 \n\t"
PMAXUB(%%mm2, %%mm4)
PMINUB(%%mm2, %%mm3, %%mm5)
"psubb %%mm2, %%mm1 \n\t"
"paddb %%mm7, %%mm1 \n\t"
"pcmpgtb %%mm6, %%mm1 \n\t"
"paddb %%mm1, %%mm0 \n\t"
 
"movq (%%"REG_a"), %%mm1 \n\t"
PMAXUB(%%mm1, %%mm4)
PMINUB(%%mm1, %%mm3, %%mm5)
"psubb %%mm1, %%mm2 \n\t"
"paddb %%mm7, %%mm2 \n\t"
"pcmpgtb %%mm6, %%mm2 \n\t"
"paddb %%mm2, %%mm0 \n\t"
 
"movq (%%"REG_a", %3), %%mm2 \n\t"
PMAXUB(%%mm2, %%mm4)
PMINUB(%%mm2, %%mm3, %%mm5)
"psubb %%mm2, %%mm1 \n\t"
"paddb %%mm7, %%mm1 \n\t"
"pcmpgtb %%mm6, %%mm1 \n\t"
"paddb %%mm1, %%mm0 \n\t"
 
"movq (%%"REG_a", %3, 2), %%mm1 \n\t"
PMAXUB(%%mm1, %%mm4)
PMINUB(%%mm1, %%mm3, %%mm5)
"psubb %%mm1, %%mm2 \n\t"
"paddb %%mm7, %%mm2 \n\t"
"pcmpgtb %%mm6, %%mm2 \n\t"
"paddb %%mm2, %%mm0 \n\t"
 
"movq (%2, %3, 8), %%mm2 \n\t"
PMAXUB(%%mm2, %%mm4)
PMINUB(%%mm2, %%mm3, %%mm5)
"psubb %%mm2, %%mm1 \n\t"
"paddb %%mm7, %%mm1 \n\t"
"pcmpgtb %%mm6, %%mm1 \n\t"
"paddb %%mm1, %%mm0 \n\t"
 
"movq (%%"REG_a", %3, 4), %%mm1 \n\t"
"psubb %%mm1, %%mm2 \n\t"
"paddb %%mm7, %%mm2 \n\t"
"pcmpgtb %%mm6, %%mm2 \n\t"
"paddb %%mm2, %%mm0 \n\t"
"psubusb %%mm3, %%mm4 \n\t"
 
"pxor %%mm6, %%mm6 \n\t"
"movq %4, %%mm7 \n\t" // QP,..., QP
"paddusb %%mm7, %%mm7 \n\t" // 2QP ... 2QP
"psubusb %%mm4, %%mm7 \n\t" // Diff >=2QP -> 0
"pcmpeqb %%mm6, %%mm7 \n\t" // Diff < 2QP -> 0
"pcmpeqb %%mm6, %%mm7 \n\t" // Diff < 2QP -> 0
"movq %%mm7, %1 \n\t"
 
"movq %5, %%mm7 \n\t"
"punpcklbw %%mm7, %%mm7 \n\t"
"punpcklbw %%mm7, %%mm7 \n\t"
"punpcklbw %%mm7, %%mm7 \n\t"
"psubb %%mm0, %%mm6 \n\t"
"pcmpgtb %%mm7, %%mm6 \n\t"
"movq %%mm6, %0 \n\t"
 
: "=m" (eq_mask), "=m" (dc_mask)
: "r" (src), "r" ((x86_reg)step), "m" (c->pQPb), "m"(c->ppMode.flatnessThreshold)
: "%"REG_a
);
 
both_masks = dc_mask & eq_mask;
 
if(both_masks){
x86_reg offset= -8*step;
int64_t *temp_sums= sums;
 
__asm__ volatile(
"movq %2, %%mm0 \n\t" // QP,..., QP
"pxor %%mm4, %%mm4 \n\t"
 
"movq (%0), %%mm6 \n\t"
"movq (%0, %1), %%mm5 \n\t"
"movq %%mm5, %%mm1 \n\t"
"movq %%mm6, %%mm2 \n\t"
"psubusb %%mm6, %%mm5 \n\t"
"psubusb %%mm1, %%mm2 \n\t"
"por %%mm5, %%mm2 \n\t" // ABS Diff of lines
"psubusb %%mm2, %%mm0 \n\t" // diff >= QP -> 0
"pcmpeqb %%mm4, %%mm0 \n\t" // diff >= QP -> FF
 
"pxor %%mm6, %%mm1 \n\t"
"pand %%mm0, %%mm1 \n\t"
"pxor %%mm1, %%mm6 \n\t"
// 0:QP 6:First
 
"movq (%0, %1, 8), %%mm5 \n\t"
"add %1, %0 \n\t" // %0 points to line 1 not 0
"movq (%0, %1, 8), %%mm7 \n\t"
"movq %%mm5, %%mm1 \n\t"
"movq %%mm7, %%mm2 \n\t"
"psubusb %%mm7, %%mm5 \n\t"
"psubusb %%mm1, %%mm2 \n\t"
"por %%mm5, %%mm2 \n\t" // ABS Diff of lines
"movq %2, %%mm0 \n\t" // QP,..., QP
"psubusb %%mm2, %%mm0 \n\t" // diff >= QP -> 0
"pcmpeqb %%mm4, %%mm0 \n\t" // diff >= QP -> FF
 
"pxor %%mm7, %%mm1 \n\t"
"pand %%mm0, %%mm1 \n\t"
"pxor %%mm1, %%mm7 \n\t"
 
"movq %%mm6, %%mm5 \n\t"
"punpckhbw %%mm4, %%mm6 \n\t"
"punpcklbw %%mm4, %%mm5 \n\t"
// 4:0 5/6:First 7:Last
 
"movq %%mm5, %%mm0 \n\t"
"movq %%mm6, %%mm1 \n\t"
"psllw $2, %%mm0 \n\t"
"psllw $2, %%mm1 \n\t"
"paddw "MANGLE(w04)", %%mm0 \n\t"
"paddw "MANGLE(w04)", %%mm1 \n\t"
 
#define NEXT\
"movq (%0), %%mm2 \n\t"\
"movq (%0), %%mm3 \n\t"\
"add %1, %0 \n\t"\
"punpcklbw %%mm4, %%mm2 \n\t"\
"punpckhbw %%mm4, %%mm3 \n\t"\
"paddw %%mm2, %%mm0 \n\t"\
"paddw %%mm3, %%mm1 \n\t"
 
#define PREV\
"movq (%0), %%mm2 \n\t"\
"movq (%0), %%mm3 \n\t"\
"add %1, %0 \n\t"\
"punpcklbw %%mm4, %%mm2 \n\t"\
"punpckhbw %%mm4, %%mm3 \n\t"\
"psubw %%mm2, %%mm0 \n\t"\
"psubw %%mm3, %%mm1 \n\t"
 
 
NEXT //0
NEXT //1
NEXT //2
"movq %%mm0, (%3) \n\t"
"movq %%mm1, 8(%3) \n\t"
 
NEXT //3
"psubw %%mm5, %%mm0 \n\t"
"psubw %%mm6, %%mm1 \n\t"
"movq %%mm0, 16(%3) \n\t"
"movq %%mm1, 24(%3) \n\t"
 
NEXT //4
"psubw %%mm5, %%mm0 \n\t"
"psubw %%mm6, %%mm1 \n\t"
"movq %%mm0, 32(%3) \n\t"
"movq %%mm1, 40(%3) \n\t"
 
NEXT //5
"psubw %%mm5, %%mm0 \n\t"
"psubw %%mm6, %%mm1 \n\t"
"movq %%mm0, 48(%3) \n\t"
"movq %%mm1, 56(%3) \n\t"
 
NEXT //6
"psubw %%mm5, %%mm0 \n\t"
"psubw %%mm6, %%mm1 \n\t"
"movq %%mm0, 64(%3) \n\t"
"movq %%mm1, 72(%3) \n\t"
 
"movq %%mm7, %%mm6 \n\t"
"punpckhbw %%mm4, %%mm7 \n\t"
"punpcklbw %%mm4, %%mm6 \n\t"
 
NEXT //7
"mov %4, %0 \n\t"
"add %1, %0 \n\t"
PREV //0
"movq %%mm0, 80(%3) \n\t"
"movq %%mm1, 88(%3) \n\t"
 
PREV //1
"paddw %%mm6, %%mm0 \n\t"
"paddw %%mm7, %%mm1 \n\t"
"movq %%mm0, 96(%3) \n\t"
"movq %%mm1, 104(%3) \n\t"
 
PREV //2
"paddw %%mm6, %%mm0 \n\t"
"paddw %%mm7, %%mm1 \n\t"
"movq %%mm0, 112(%3) \n\t"
"movq %%mm1, 120(%3) \n\t"
 
PREV //3
"paddw %%mm6, %%mm0 \n\t"
"paddw %%mm7, %%mm1 \n\t"
"movq %%mm0, 128(%3) \n\t"
"movq %%mm1, 136(%3) \n\t"
 
PREV //4
"paddw %%mm6, %%mm0 \n\t"
"paddw %%mm7, %%mm1 \n\t"
"movq %%mm0, 144(%3) \n\t"
"movq %%mm1, 152(%3) \n\t"
 
"mov %4, %0 \n\t" //FIXME
 
: "+&r"(src)
: "r" ((x86_reg)step), "m" (c->pQPb), "r"(sums), "g"(src)
);
 
src+= step; // src points to begin of the 8x8 Block
 
__asm__ volatile(
"movq %4, %%mm6 \n\t"
"pcmpeqb %%mm5, %%mm5 \n\t"
"pxor %%mm6, %%mm5 \n\t"
"pxor %%mm7, %%mm7 \n\t"
 
"1: \n\t"
"movq (%1), %%mm0 \n\t"
"movq 8(%1), %%mm1 \n\t"
"paddw 32(%1), %%mm0 \n\t"
"paddw 40(%1), %%mm1 \n\t"
"movq (%0, %3), %%mm2 \n\t"
"movq %%mm2, %%mm3 \n\t"
"movq %%mm2, %%mm4 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t"
"punpckhbw %%mm7, %%mm3 \n\t"
"paddw %%mm2, %%mm0 \n\t"
"paddw %%mm3, %%mm1 \n\t"
"paddw %%mm2, %%mm0 \n\t"
"paddw %%mm3, %%mm1 \n\t"
"psrlw $4, %%mm0 \n\t"
"psrlw $4, %%mm1 \n\t"
"packuswb %%mm1, %%mm0 \n\t"
"pand %%mm6, %%mm0 \n\t"
"pand %%mm5, %%mm4 \n\t"
"por %%mm4, %%mm0 \n\t"
"movq %%mm0, (%0, %3) \n\t"
"add $16, %1 \n\t"
"add %2, %0 \n\t"
" js 1b \n\t"
 
: "+r"(offset), "+r"(temp_sums)
: "r" ((x86_reg)step), "r"(src - offset), "m"(both_masks)
);
}else
src+= step; // src points to begin of the 8x8 Block
 
if(eq_mask != -1LL){
uint8_t *temp_src= src;
DECLARE_ALIGNED(8, uint64_t, tmp)[4]; // make space for 4 8-byte vars
__asm__ volatile(
"pxor %%mm7, %%mm7 \n\t"
// 0 1 2 3 4 5 6 7 8 9
// %0 eax eax+%1 eax+2%1 %0+4%1 ecx ecx+%1 ecx+2%1 %1+8%1 ecx+4%1
 
"movq (%0), %%mm0 \n\t"
"movq %%mm0, %%mm1 \n\t"
"punpcklbw %%mm7, %%mm0 \n\t" // low part of line 0
"punpckhbw %%mm7, %%mm1 \n\t" // high part of line 0
 
"movq (%0, %1), %%mm2 \n\t"
"lea (%0, %1, 2), %%"REG_a" \n\t"
"movq %%mm2, %%mm3 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t" // low part of line 1
"punpckhbw %%mm7, %%mm3 \n\t" // high part of line 1
 
"movq (%%"REG_a"), %%mm4 \n\t"
"movq %%mm4, %%mm5 \n\t"
"punpcklbw %%mm7, %%mm4 \n\t" // low part of line 2
"punpckhbw %%mm7, %%mm5 \n\t" // high part of line 2
 
"paddw %%mm0, %%mm0 \n\t" // 2L0
"paddw %%mm1, %%mm1 \n\t" // 2H0
"psubw %%mm4, %%mm2 \n\t" // L1 - L2
"psubw %%mm5, %%mm3 \n\t" // H1 - H2
"psubw %%mm2, %%mm0 \n\t" // 2L0 - L1 + L2
"psubw %%mm3, %%mm1 \n\t" // 2H0 - H1 + H2
 
"psllw $2, %%mm2 \n\t" // 4L1 - 4L2
"psllw $2, %%mm3 \n\t" // 4H1 - 4H2
"psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2
"psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2
 
"movq (%%"REG_a", %1), %%mm2 \n\t"
"movq %%mm2, %%mm3 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t" // L3
"punpckhbw %%mm7, %%mm3 \n\t" // H3
 
"psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2 - L3
"psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2 - H3
"psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
"psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
"movq %%mm0, (%4) \n\t" // 2L0 - 5L1 + 5L2 - 2L3
"movq %%mm1, 8(%4) \n\t" // 2H0 - 5H1 + 5H2 - 2H3
 
"movq (%%"REG_a", %1, 2), %%mm0 \n\t"
"movq %%mm0, %%mm1 \n\t"
"punpcklbw %%mm7, %%mm0 \n\t" // L4
"punpckhbw %%mm7, %%mm1 \n\t" // H4
 
"psubw %%mm0, %%mm2 \n\t" // L3 - L4
"psubw %%mm1, %%mm3 \n\t" // H3 - H4
"movq %%mm2, 16(%4) \n\t" // L3 - L4
"movq %%mm3, 24(%4) \n\t" // H3 - H4
"paddw %%mm4, %%mm4 \n\t" // 2L2
"paddw %%mm5, %%mm5 \n\t" // 2H2
"psubw %%mm2, %%mm4 \n\t" // 2L2 - L3 + L4
"psubw %%mm3, %%mm5 \n\t" // 2H2 - H3 + H4
 
"lea (%%"REG_a", %1), %0 \n\t"
"psllw $2, %%mm2 \n\t" // 4L3 - 4L4
"psllw $2, %%mm3 \n\t" // 4H3 - 4H4
"psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4
"psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4
//50 opcodes so far
"movq (%0, %1, 2), %%mm2 \n\t"
"movq %%mm2, %%mm3 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t" // L5
"punpckhbw %%mm7, %%mm3 \n\t" // H5
"psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4 - L5
"psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4 - H5
"psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4 - 2L5
"psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4 - 2H5
 
"movq (%%"REG_a", %1, 4), %%mm6 \n\t"
"punpcklbw %%mm7, %%mm6 \n\t" // L6
"psubw %%mm6, %%mm2 \n\t" // L5 - L6
"movq (%%"REG_a", %1, 4), %%mm6 \n\t"
"punpckhbw %%mm7, %%mm6 \n\t" // H6
"psubw %%mm6, %%mm3 \n\t" // H5 - H6
 
"paddw %%mm0, %%mm0 \n\t" // 2L4
"paddw %%mm1, %%mm1 \n\t" // 2H4
"psubw %%mm2, %%mm0 \n\t" // 2L4 - L5 + L6
"psubw %%mm3, %%mm1 \n\t" // 2H4 - H5 + H6
 
"psllw $2, %%mm2 \n\t" // 4L5 - 4L6
"psllw $2, %%mm3 \n\t" // 4H5 - 4H6
"psubw %%mm2, %%mm0 \n\t" // 2L4 - 5L5 + 5L6
"psubw %%mm3, %%mm1 \n\t" // 2H4 - 5H5 + 5H6
 
"movq (%0, %1, 4), %%mm2 \n\t"
"movq %%mm2, %%mm3 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t" // L7
"punpckhbw %%mm7, %%mm3 \n\t" // H7
 
"paddw %%mm2, %%mm2 \n\t" // 2L7
"paddw %%mm3, %%mm3 \n\t" // 2H7
"psubw %%mm2, %%mm0 \n\t" // 2L4 - 5L5 + 5L6 - 2L7
"psubw %%mm3, %%mm1 \n\t" // 2H4 - 5H5 + 5H6 - 2H7
 
"movq (%4), %%mm2 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
"movq 8(%4), %%mm3 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
 
#if TEMPLATE_PP_MMXEXT
"movq %%mm7, %%mm6 \n\t" // 0
"psubw %%mm0, %%mm6 \n\t"
"pmaxsw %%mm6, %%mm0 \n\t" // |2L4 - 5L5 + 5L6 - 2L7|
"movq %%mm7, %%mm6 \n\t" // 0
"psubw %%mm1, %%mm6 \n\t"
"pmaxsw %%mm6, %%mm1 \n\t" // |2H4 - 5H5 + 5H6 - 2H7|
"movq %%mm7, %%mm6 \n\t" // 0
"psubw %%mm2, %%mm6 \n\t"
"pmaxsw %%mm6, %%mm2 \n\t" // |2L0 - 5L1 + 5L2 - 2L3|
"movq %%mm7, %%mm6 \n\t" // 0
"psubw %%mm3, %%mm6 \n\t"
"pmaxsw %%mm6, %%mm3 \n\t" // |2H0 - 5H1 + 5H2 - 2H3|
#else
"movq %%mm7, %%mm6 \n\t" // 0
"pcmpgtw %%mm0, %%mm6 \n\t"
"pxor %%mm6, %%mm0 \n\t"
"psubw %%mm6, %%mm0 \n\t" // |2L4 - 5L5 + 5L6 - 2L7|
"movq %%mm7, %%mm6 \n\t" // 0
"pcmpgtw %%mm1, %%mm6 \n\t"
"pxor %%mm6, %%mm1 \n\t"
"psubw %%mm6, %%mm1 \n\t" // |2H4 - 5H5 + 5H6 - 2H7|
"movq %%mm7, %%mm6 \n\t" // 0
"pcmpgtw %%mm2, %%mm6 \n\t"
"pxor %%mm6, %%mm2 \n\t"
"psubw %%mm6, %%mm2 \n\t" // |2L0 - 5L1 + 5L2 - 2L3|
"movq %%mm7, %%mm6 \n\t" // 0
"pcmpgtw %%mm3, %%mm6 \n\t"
"pxor %%mm6, %%mm3 \n\t"
"psubw %%mm6, %%mm3 \n\t" // |2H0 - 5H1 + 5H2 - 2H3|
#endif
 
#if TEMPLATE_PP_MMXEXT
"pminsw %%mm2, %%mm0 \n\t"
"pminsw %%mm3, %%mm1 \n\t"
#else
"movq %%mm0, %%mm6 \n\t"
"psubusw %%mm2, %%mm6 \n\t"
"psubw %%mm6, %%mm0 \n\t"
"movq %%mm1, %%mm6 \n\t"
"psubusw %%mm3, %%mm6 \n\t"
"psubw %%mm6, %%mm1 \n\t"
#endif
 
"movd %2, %%mm2 \n\t" // QP
"punpcklbw %%mm7, %%mm2 \n\t"
 
"movq %%mm7, %%mm6 \n\t" // 0
"pcmpgtw %%mm4, %%mm6 \n\t" // sign(2L2 - 5L3 + 5L4 - 2L5)
"pxor %%mm6, %%mm4 \n\t"
"psubw %%mm6, %%mm4 \n\t" // |2L2 - 5L3 + 5L4 - 2L5|
"pcmpgtw %%mm5, %%mm7 \n\t" // sign(2H2 - 5H3 + 5H4 - 2H5)
"pxor %%mm7, %%mm5 \n\t"
"psubw %%mm7, %%mm5 \n\t" // |2H2 - 5H3 + 5H4 - 2H5|
// 100 opcodes
"psllw $3, %%mm2 \n\t" // 8QP
"movq %%mm2, %%mm3 \n\t" // 8QP
"pcmpgtw %%mm4, %%mm2 \n\t"
"pcmpgtw %%mm5, %%mm3 \n\t"
"pand %%mm2, %%mm4 \n\t"
"pand %%mm3, %%mm5 \n\t"
 
 
"psubusw %%mm0, %%mm4 \n\t" // hd
"psubusw %%mm1, %%mm5 \n\t" // ld
 
 
"movq "MANGLE(w05)", %%mm2 \n\t" // 5
"pmullw %%mm2, %%mm4 \n\t"
"pmullw %%mm2, %%mm5 \n\t"
"movq "MANGLE(w20)", %%mm2 \n\t" // 32
"paddw %%mm2, %%mm4 \n\t"
"paddw %%mm2, %%mm5 \n\t"
"psrlw $6, %%mm4 \n\t"
"psrlw $6, %%mm5 \n\t"
 
"movq 16(%4), %%mm0 \n\t" // L3 - L4
"movq 24(%4), %%mm1 \n\t" // H3 - H4
 
"pxor %%mm2, %%mm2 \n\t"
"pxor %%mm3, %%mm3 \n\t"
 
"pcmpgtw %%mm0, %%mm2 \n\t" // sign (L3-L4)
"pcmpgtw %%mm1, %%mm3 \n\t" // sign (H3-H4)
"pxor %%mm2, %%mm0 \n\t"
"pxor %%mm3, %%mm1 \n\t"
"psubw %%mm2, %%mm0 \n\t" // |L3-L4|
"psubw %%mm3, %%mm1 \n\t" // |H3-H4|
"psrlw $1, %%mm0 \n\t" // |L3 - L4|/2
"psrlw $1, %%mm1 \n\t" // |H3 - H4|/2
 
"pxor %%mm6, %%mm2 \n\t"
"pxor %%mm7, %%mm3 \n\t"
"pand %%mm2, %%mm4 \n\t"
"pand %%mm3, %%mm5 \n\t"
 
#if TEMPLATE_PP_MMXEXT
"pminsw %%mm0, %%mm4 \n\t"
"pminsw %%mm1, %%mm5 \n\t"
#else
"movq %%mm4, %%mm2 \n\t"
"psubusw %%mm0, %%mm2 \n\t"
"psubw %%mm2, %%mm4 \n\t"
"movq %%mm5, %%mm2 \n\t"
"psubusw %%mm1, %%mm2 \n\t"
"psubw %%mm2, %%mm5 \n\t"
#endif
"pxor %%mm6, %%mm4 \n\t"
"pxor %%mm7, %%mm5 \n\t"
"psubw %%mm6, %%mm4 \n\t"
"psubw %%mm7, %%mm5 \n\t"
"packsswb %%mm5, %%mm4 \n\t"
"movq %3, %%mm1 \n\t"
"pandn %%mm4, %%mm1 \n\t"
"movq (%0), %%mm0 \n\t"
"paddb %%mm1, %%mm0 \n\t"
"movq %%mm0, (%0) \n\t"
"movq (%0, %1), %%mm0 \n\t"
"psubb %%mm1, %%mm0 \n\t"
"movq %%mm0, (%0, %1) \n\t"
 
: "+r" (temp_src)
: "r" ((x86_reg)step), "m" (c->pQPb), "m"(eq_mask), "r"(tmp)
: "%"REG_a
);
}
/*if(step==16){
STOP_TIMER("step16")
}else{
STOP_TIMER("stepX")
}
} */
}
#endif //TEMPLATE_PP_MMX
 
static void RENAME(postProcess)(const uint8_t src[], int srcStride, uint8_t dst[], int dstStride, int width, int height,
const QP_STORE_T QPs[], int QPStride, int isColor, PPContext *c);
 
/**
* Copy a block from src to dst and fixes the blacklevel.
* levelFix == 0 -> do not touch the brightness & contrast
*/
#undef REAL_SCALED_CPY
#undef SCALED_CPY
 
static inline void RENAME(blockCopy)(uint8_t dst[], int dstStride, const uint8_t src[], int srcStride,
int levelFix, int64_t *packedOffsetAndScale)
{
#if !TEMPLATE_PP_MMX
int i;
#endif
if(levelFix){
#if TEMPLATE_PP_MMX
__asm__ volatile(
"movq (%%"REG_a"), %%mm2 \n\t" // packedYOffset
"movq 8(%%"REG_a"), %%mm3 \n\t" // packedYScale
"lea (%2,%4), %%"REG_a" \n\t"
"lea (%3,%5), %%"REG_d" \n\t"
"pxor %%mm4, %%mm4 \n\t"
#if TEMPLATE_PP_MMXEXT
#define REAL_SCALED_CPY(src1, src2, dst1, dst2) \
"movq " #src1 ", %%mm0 \n\t"\
"movq " #src1 ", %%mm5 \n\t"\
"movq " #src2 ", %%mm1 \n\t"\
"movq " #src2 ", %%mm6 \n\t"\
"punpcklbw %%mm0, %%mm0 \n\t"\
"punpckhbw %%mm5, %%mm5 \n\t"\
"punpcklbw %%mm1, %%mm1 \n\t"\
"punpckhbw %%mm6, %%mm6 \n\t"\
"pmulhuw %%mm3, %%mm0 \n\t"\
"pmulhuw %%mm3, %%mm5 \n\t"\
"pmulhuw %%mm3, %%mm1 \n\t"\
"pmulhuw %%mm3, %%mm6 \n\t"\
"psubw %%mm2, %%mm0 \n\t"\
"psubw %%mm2, %%mm5 \n\t"\
"psubw %%mm2, %%mm1 \n\t"\
"psubw %%mm2, %%mm6 \n\t"\
"packuswb %%mm5, %%mm0 \n\t"\
"packuswb %%mm6, %%mm1 \n\t"\
"movq %%mm0, " #dst1 " \n\t"\
"movq %%mm1, " #dst2 " \n\t"\
 
#else //TEMPLATE_PP_MMXEXT
#define REAL_SCALED_CPY(src1, src2, dst1, dst2) \
"movq " #src1 ", %%mm0 \n\t"\
"movq " #src1 ", %%mm5 \n\t"\
"punpcklbw %%mm4, %%mm0 \n\t"\
"punpckhbw %%mm4, %%mm5 \n\t"\
"psubw %%mm2, %%mm0 \n\t"\
"psubw %%mm2, %%mm5 \n\t"\
"movq " #src2 ", %%mm1 \n\t"\
"psllw $6, %%mm0 \n\t"\
"psllw $6, %%mm5 \n\t"\
"pmulhw %%mm3, %%mm0 \n\t"\
"movq " #src2 ", %%mm6 \n\t"\
"pmulhw %%mm3, %%mm5 \n\t"\
"punpcklbw %%mm4, %%mm1 \n\t"\
"punpckhbw %%mm4, %%mm6 \n\t"\
"psubw %%mm2, %%mm1 \n\t"\
"psubw %%mm2, %%mm6 \n\t"\
"psllw $6, %%mm1 \n\t"\
"psllw $6, %%mm6 \n\t"\
"pmulhw %%mm3, %%mm1 \n\t"\
"pmulhw %%mm3, %%mm6 \n\t"\
"packuswb %%mm5, %%mm0 \n\t"\
"packuswb %%mm6, %%mm1 \n\t"\
"movq %%mm0, " #dst1 " \n\t"\
"movq %%mm1, " #dst2 " \n\t"\
 
#endif //TEMPLATE_PP_MMXEXT
#define SCALED_CPY(src1, src2, dst1, dst2)\
REAL_SCALED_CPY(src1, src2, dst1, dst2)
 
SCALED_CPY((%2) , (%2, %4) , (%3) , (%3, %5))
SCALED_CPY((%2, %4, 2), (%%REGa, %4, 2), (%3, %5, 2), (%%REGd, %5, 2))
SCALED_CPY((%2, %4, 4), (%%REGa, %4, 4), (%3, %5, 4), (%%REGd, %5, 4))
"lea (%%"REG_a",%4,4), %%"REG_a" \n\t"
"lea (%%"REG_d",%5,4), %%"REG_d" \n\t"
SCALED_CPY((%%REGa, %4), (%%REGa, %4, 2), (%%REGd, %5), (%%REGd, %5, 2))
 
 
: "=&a" (packedOffsetAndScale)
: "0" (packedOffsetAndScale),
"r"(src),
"r"(dst),
"r" ((x86_reg)srcStride),
"r" ((x86_reg)dstStride)
: "%"REG_d
);
#else //TEMPLATE_PP_MMX
for(i=0; i<8; i++)
memcpy( &(dst[dstStride*i]),
&(src[srcStride*i]), BLOCK_SIZE);
#endif //TEMPLATE_PP_MMX
}else{
#if TEMPLATE_PP_MMX
__asm__ volatile(
"lea (%0,%2), %%"REG_a" \n\t"
"lea (%1,%3), %%"REG_d" \n\t"
 
#define REAL_SIMPLE_CPY(src1, src2, dst1, dst2) \
"movq " #src1 ", %%mm0 \n\t"\
"movq " #src2 ", %%mm1 \n\t"\
"movq %%mm0, " #dst1 " \n\t"\
"movq %%mm1, " #dst2 " \n\t"\
 
#define SIMPLE_CPY(src1, src2, dst1, dst2)\
REAL_SIMPLE_CPY(src1, src2, dst1, dst2)
 
SIMPLE_CPY((%0) , (%0, %2) , (%1) , (%1, %3))
SIMPLE_CPY((%0, %2, 2), (%%REGa, %2, 2), (%1, %3, 2), (%%REGd, %3, 2))
SIMPLE_CPY((%0, %2, 4), (%%REGa, %2, 4), (%1, %3, 4), (%%REGd, %3, 4))
"lea (%%"REG_a",%2,4), %%"REG_a" \n\t"
"lea (%%"REG_d",%3,4), %%"REG_d" \n\t"
SIMPLE_CPY((%%REGa, %2), (%%REGa, %2, 2), (%%REGd, %3), (%%REGd, %3, 2))
 
: : "r" (src),
"r" (dst),
"r" ((x86_reg)srcStride),
"r" ((x86_reg)dstStride)
: "%"REG_a, "%"REG_d
);
#else //TEMPLATE_PP_MMX
for(i=0; i<8; i++)
memcpy( &(dst[dstStride*i]),
&(src[srcStride*i]), BLOCK_SIZE);
#endif //TEMPLATE_PP_MMX
}
}
 
/**
* Duplicate the given 8 src pixels ? times upward
*/
static inline void RENAME(duplicate)(uint8_t src[], int stride)
{
#if TEMPLATE_PP_MMX
__asm__ volatile(
"movq (%0), %%mm0 \n\t"
"movq %%mm0, (%0, %1, 4) \n\t"
"add %1, %0 \n\t"
"movq %%mm0, (%0) \n\t"
"movq %%mm0, (%0, %1) \n\t"
"movq %%mm0, (%0, %1, 2) \n\t"
"movq %%mm0, (%0, %1, 4) \n\t"
: "+r" (src)
: "r" ((x86_reg)-stride)
);
#else
int i;
uint8_t *p=src;
for(i=0; i<5; i++){
p-= stride;
memcpy(p, src, 8);
}
#endif
}
 
/**
* Filter array of bytes (Y or U or V values)
*/
static void RENAME(postProcess)(const uint8_t src[], int srcStride, uint8_t dst[], int dstStride, int width, int height,
const QP_STORE_T QPs[], int QPStride, int isColor, PPContext *c2)
{
DECLARE_ALIGNED(8, PPContext, c)= *c2; //copy to stack for faster access
int x,y;
#ifdef TEMPLATE_PP_TIME_MODE
const int mode= TEMPLATE_PP_TIME_MODE;
#else
const int mode= isColor ? c.ppMode.chromMode : c.ppMode.lumMode;
#endif
int black=0, white=255; // blackest black and whitest white in the picture
int QPCorrecture= 256*256;
 
int copyAhead;
#if TEMPLATE_PP_MMX
int i;
#endif
 
const int qpHShift= isColor ? 4-c.hChromaSubSample : 4;
const int qpVShift= isColor ? 4-c.vChromaSubSample : 4;
 
//FIXME remove
uint64_t * const yHistogram= c.yHistogram;
uint8_t * const tempSrc= srcStride > 0 ? c.tempSrc : c.tempSrc - 23*srcStride;
uint8_t * const tempDst= (dstStride > 0 ? c.tempDst : c.tempDst - 23*dstStride) + 32;
//const int mbWidth= isColor ? (width+7)>>3 : (width+15)>>4;
 
#if TEMPLATE_PP_MMX
for(i=0; i<57; i++){
int offset= ((i*c.ppMode.baseDcDiff)>>8) + 1;
int threshold= offset*2 + 1;
c.mmxDcOffset[i]= 0x7F - offset;
c.mmxDcThreshold[i]= 0x7F - threshold;
c.mmxDcOffset[i]*= 0x0101010101010101LL;
c.mmxDcThreshold[i]*= 0x0101010101010101LL;
}
#endif
 
if(mode & CUBIC_IPOL_DEINT_FILTER) copyAhead=16;
else if( (mode & LINEAR_BLEND_DEINT_FILTER)
|| (mode & FFMPEG_DEINT_FILTER)
|| (mode & LOWPASS5_DEINT_FILTER)) copyAhead=14;
else if( (mode & V_DEBLOCK)
|| (mode & LINEAR_IPOL_DEINT_FILTER)
|| (mode & MEDIAN_DEINT_FILTER)
|| (mode & V_A_DEBLOCK)) copyAhead=13;
else if(mode & V_X1_FILTER) copyAhead=11;
// else if(mode & V_RK1_FILTER) copyAhead=10;
else if(mode & DERING) copyAhead=9;
else copyAhead=8;
 
copyAhead-= 8;
 
if(!isColor){
uint64_t sum= 0;
int i;
uint64_t maxClipped;
uint64_t clipped;
double scale;
 
c.frameNum++;
// first frame is fscked so we ignore it
if(c.frameNum == 1) yHistogram[0]= width*(uint64_t)height/64*15/256;
 
for(i=0; i<256; i++){
sum+= yHistogram[i];
}
 
/* We always get a completely black picture first. */
maxClipped= (uint64_t)(sum * c.ppMode.maxClippedThreshold);
 
clipped= sum;
for(black=255; black>0; black--){
if(clipped < maxClipped) break;
clipped-= yHistogram[black];
}
 
clipped= sum;
for(white=0; white<256; white++){
if(clipped < maxClipped) break;
clipped-= yHistogram[white];
}
 
scale= (double)(c.ppMode.maxAllowedY - c.ppMode.minAllowedY) / (double)(white-black);
 
#if TEMPLATE_PP_MMXEXT
c.packedYScale= (uint16_t)(scale*256.0 + 0.5);
c.packedYOffset= (((black*c.packedYScale)>>8) - c.ppMode.minAllowedY) & 0xFFFF;
#else
c.packedYScale= (uint16_t)(scale*1024.0 + 0.5);
c.packedYOffset= (black - c.ppMode.minAllowedY) & 0xFFFF;
#endif
 
c.packedYOffset|= c.packedYOffset<<32;
c.packedYOffset|= c.packedYOffset<<16;
 
c.packedYScale|= c.packedYScale<<32;
c.packedYScale|= c.packedYScale<<16;
 
if(mode & LEVEL_FIX) QPCorrecture= (int)(scale*256*256 + 0.5);
else QPCorrecture= 256*256;
}else{
c.packedYScale= 0x0100010001000100LL;
c.packedYOffset= 0;
QPCorrecture= 256*256;
}
 
/* copy & deinterlace first row of blocks */
y=-BLOCK_SIZE;
{
const uint8_t *srcBlock= &(src[y*srcStride]);
uint8_t *dstBlock= tempDst + dstStride;
 
// From this point on it is guaranteed that we can read and write 16 lines downward
// finish 1 block before the next otherwise we might have a problem
// with the L1 Cache of the P4 ... or only a few blocks at a time or something
for(x=0; x<width; x+=BLOCK_SIZE){
 
#if TEMPLATE_PP_MMXEXT
/*
prefetchnta(srcBlock + (((x>>2)&6) + 5)*srcStride + 32);
prefetchnta(srcBlock + (((x>>2)&6) + 6)*srcStride + 32);
prefetcht0(dstBlock + (((x>>2)&6) + 5)*dstStride + 32);
prefetcht0(dstBlock + (((x>>2)&6) + 6)*dstStride + 32);
*/
 
__asm__(
"mov %4, %%"REG_a" \n\t"
"shr $2, %%"REG_a" \n\t"
"and $6, %%"REG_a" \n\t"
"add %5, %%"REG_a" \n\t"
"mov %%"REG_a", %%"REG_d" \n\t"
"imul %1, %%"REG_a" \n\t"
"imul %3, %%"REG_d" \n\t"
"prefetchnta 32(%%"REG_a", %0) \n\t"
"prefetcht0 32(%%"REG_d", %2) \n\t"
"add %1, %%"REG_a" \n\t"
"add %3, %%"REG_d" \n\t"
"prefetchnta 32(%%"REG_a", %0) \n\t"
"prefetcht0 32(%%"REG_d", %2) \n\t"
:: "r" (srcBlock), "r" ((x86_reg)srcStride), "r" (dstBlock), "r" ((x86_reg)dstStride),
"g" ((x86_reg)x), "g" ((x86_reg)copyAhead)
: "%"REG_a, "%"REG_d
);
 
#elif TEMPLATE_PP_3DNOW
//FIXME check if this is faster on an 3dnow chip or if it is faster without the prefetch or ...
/* prefetch(srcBlock + (((x>>3)&3) + 5)*srcStride + 32);
prefetch(srcBlock + (((x>>3)&3) + 9)*srcStride + 32);
prefetchw(dstBlock + (((x>>3)&3) + 5)*dstStride + 32);
prefetchw(dstBlock + (((x>>3)&3) + 9)*dstStride + 32);
*/
#endif
 
RENAME(blockCopy)(dstBlock + dstStride*8, dstStride,
srcBlock + srcStride*8, srcStride, mode & LEVEL_FIX, &c.packedYOffset);
 
RENAME(duplicate)(dstBlock + dstStride*8, dstStride);
 
if(mode & LINEAR_IPOL_DEINT_FILTER)
RENAME(deInterlaceInterpolateLinear)(dstBlock, dstStride);
else if(mode & LINEAR_BLEND_DEINT_FILTER)
RENAME(deInterlaceBlendLinear)(dstBlock, dstStride, c.deintTemp + x);
else if(mode & MEDIAN_DEINT_FILTER)
RENAME(deInterlaceMedian)(dstBlock, dstStride);
else if(mode & CUBIC_IPOL_DEINT_FILTER)
RENAME(deInterlaceInterpolateCubic)(dstBlock, dstStride);
else if(mode & FFMPEG_DEINT_FILTER)
RENAME(deInterlaceFF)(dstBlock, dstStride, c.deintTemp + x);
else if(mode & LOWPASS5_DEINT_FILTER)
RENAME(deInterlaceL5)(dstBlock, dstStride, c.deintTemp + x, c.deintTemp + width + x);
/* else if(mode & CUBIC_BLEND_DEINT_FILTER)
RENAME(deInterlaceBlendCubic)(dstBlock, dstStride);
*/
dstBlock+=8;
srcBlock+=8;
}
if(width==FFABS(dstStride))
linecpy(dst, tempDst + 9*dstStride, copyAhead, dstStride);
else{
int i;
for(i=0; i<copyAhead; i++){
memcpy(dst + i*dstStride, tempDst + (9+i)*dstStride, width);
}
}
}
 
for(y=0; y<height; y+=BLOCK_SIZE){
//1% speedup if these are here instead of the inner loop
const uint8_t *srcBlock= &(src[y*srcStride]);
uint8_t *dstBlock= &(dst[y*dstStride]);
#if TEMPLATE_PP_MMX
uint8_t *tempBlock1= c.tempBlocks;
uint8_t *tempBlock2= c.tempBlocks + 8;
#endif
const int8_t *QPptr= &QPs[(y>>qpVShift)*QPStride];
int8_t *nonBQPptr= &c.nonBQPTable[(y>>qpVShift)*FFABS(QPStride)];
int QP=0;
/* can we mess with a 8x16 block from srcBlock/dstBlock downwards and 1 line upwards
if not than use a temporary buffer */
if(y+15 >= height){
int i;
/* copy from line (copyAhead) to (copyAhead+7) of src, these will be copied with
blockcopy to dst later */
linecpy(tempSrc + srcStride*copyAhead, srcBlock + srcStride*copyAhead,
FFMAX(height-y-copyAhead, 0), srcStride);
 
/* duplicate last line of src to fill the void up to line (copyAhead+7) */
for(i=FFMAX(height-y, 8); i<copyAhead+8; i++)
memcpy(tempSrc + srcStride*i, src + srcStride*(height-1), FFABS(srcStride));
 
/* copy up to (copyAhead+1) lines of dst (line -1 to (copyAhead-1))*/
linecpy(tempDst, dstBlock - dstStride, FFMIN(height-y+1, copyAhead+1), dstStride);
 
/* duplicate last line of dst to fill the void up to line (copyAhead) */
for(i=height-y+1; i<=copyAhead; i++)
memcpy(tempDst + dstStride*i, dst + dstStride*(height-1), FFABS(dstStride));
 
dstBlock= tempDst + dstStride;
srcBlock= tempSrc;
}
 
// From this point on it is guaranteed that we can read and write 16 lines downward
// finish 1 block before the next otherwise we might have a problem
// with the L1 Cache of the P4 ... or only a few blocks at a time or something
for(x=0; x<width; x+=BLOCK_SIZE){
const int stride= dstStride;
#if TEMPLATE_PP_MMX
uint8_t *tmpXchg;
#endif
if(isColor){
QP= QPptr[x>>qpHShift];
c.nonBQP= nonBQPptr[x>>qpHShift];
}else{
QP= QPptr[x>>4];
QP= (QP* QPCorrecture + 256*128)>>16;
c.nonBQP= nonBQPptr[x>>4];
c.nonBQP= (c.nonBQP* QPCorrecture + 256*128)>>16;
yHistogram[ srcBlock[srcStride*12 + 4] ]++;
}
c.QP= QP;
#if TEMPLATE_PP_MMX
__asm__ volatile(
"movd %1, %%mm7 \n\t"
"packuswb %%mm7, %%mm7 \n\t" // 0, 0, 0, QP, 0, 0, 0, QP
"packuswb %%mm7, %%mm7 \n\t" // 0,QP, 0, QP, 0,QP, 0, QP
"packuswb %%mm7, %%mm7 \n\t" // QP,..., QP
"movq %%mm7, %0 \n\t"
: "=m" (c.pQPb)
: "r" (QP)
);
#endif
 
 
#if TEMPLATE_PP_MMXEXT
/*
prefetchnta(srcBlock + (((x>>2)&6) + 5)*srcStride + 32);
prefetchnta(srcBlock + (((x>>2)&6) + 6)*srcStride + 32);
prefetcht0(dstBlock + (((x>>2)&6) + 5)*dstStride + 32);
prefetcht0(dstBlock + (((x>>2)&6) + 6)*dstStride + 32);
*/
 
__asm__(
"mov %4, %%"REG_a" \n\t"
"shr $2, %%"REG_a" \n\t"
"and $6, %%"REG_a" \n\t"
"add %5, %%"REG_a" \n\t"
"mov %%"REG_a", %%"REG_d" \n\t"
"imul %1, %%"REG_a" \n\t"
"imul %3, %%"REG_d" \n\t"
"prefetchnta 32(%%"REG_a", %0) \n\t"
"prefetcht0 32(%%"REG_d", %2) \n\t"
"add %1, %%"REG_a" \n\t"
"add %3, %%"REG_d" \n\t"
"prefetchnta 32(%%"REG_a", %0) \n\t"
"prefetcht0 32(%%"REG_d", %2) \n\t"
:: "r" (srcBlock), "r" ((x86_reg)srcStride), "r" (dstBlock), "r" ((x86_reg)dstStride),
"g" ((x86_reg)x), "g" ((x86_reg)copyAhead)
: "%"REG_a, "%"REG_d
);
 
#elif TEMPLATE_PP_3DNOW
//FIXME check if this is faster on an 3dnow chip or if it is faster without the prefetch or ...
/* prefetch(srcBlock + (((x>>3)&3) + 5)*srcStride + 32);
prefetch(srcBlock + (((x>>3)&3) + 9)*srcStride + 32);
prefetchw(dstBlock + (((x>>3)&3) + 5)*dstStride + 32);
prefetchw(dstBlock + (((x>>3)&3) + 9)*dstStride + 32);
*/
#endif
 
RENAME(blockCopy)(dstBlock + dstStride*copyAhead, dstStride,
srcBlock + srcStride*copyAhead, srcStride, mode & LEVEL_FIX, &c.packedYOffset);
 
if(mode & LINEAR_IPOL_DEINT_FILTER)
RENAME(deInterlaceInterpolateLinear)(dstBlock, dstStride);
else if(mode & LINEAR_BLEND_DEINT_FILTER)
RENAME(deInterlaceBlendLinear)(dstBlock, dstStride, c.deintTemp + x);
else if(mode & MEDIAN_DEINT_FILTER)
RENAME(deInterlaceMedian)(dstBlock, dstStride);
else if(mode & CUBIC_IPOL_DEINT_FILTER)
RENAME(deInterlaceInterpolateCubic)(dstBlock, dstStride);
else if(mode & FFMPEG_DEINT_FILTER)
RENAME(deInterlaceFF)(dstBlock, dstStride, c.deintTemp + x);
else if(mode & LOWPASS5_DEINT_FILTER)
RENAME(deInterlaceL5)(dstBlock, dstStride, c.deintTemp + x, c.deintTemp + width + x);
/* else if(mode & CUBIC_BLEND_DEINT_FILTER)
RENAME(deInterlaceBlendCubic)(dstBlock, dstStride);
*/
 
/* only deblock if we have 2 blocks */
if(y + 8 < height){
if(mode & V_X1_FILTER)
RENAME(vertX1Filter)(dstBlock, stride, &c);
else if(mode & V_DEBLOCK){
const int t= RENAME(vertClassify)(dstBlock, stride, &c);
 
if(t==1)
RENAME(doVertLowPass)(dstBlock, stride, &c);
else if(t==2)
RENAME(doVertDefFilter)(dstBlock, stride, &c);
}else if(mode & V_A_DEBLOCK){
RENAME(do_a_deblock)(dstBlock, stride, 1, &c);
}
}
 
#if TEMPLATE_PP_MMX
RENAME(transpose1)(tempBlock1, tempBlock2, dstBlock, dstStride);
#endif
/* check if we have a previous block to deblock it with dstBlock */
if(x - 8 >= 0){
#if TEMPLATE_PP_MMX
if(mode & H_X1_FILTER)
RENAME(vertX1Filter)(tempBlock1, 16, &c);
else if(mode & H_DEBLOCK){
//START_TIMER
const int t= RENAME(vertClassify)(tempBlock1, 16, &c);
//STOP_TIMER("dc & minmax")
if(t==1)
RENAME(doVertLowPass)(tempBlock1, 16, &c);
else if(t==2)
RENAME(doVertDefFilter)(tempBlock1, 16, &c);
}else if(mode & H_A_DEBLOCK){
RENAME(do_a_deblock)(tempBlock1, 16, 1, &c);
}
 
RENAME(transpose2)(dstBlock-4, dstStride, tempBlock1 + 4*16);
 
#else
if(mode & H_X1_FILTER)
horizX1Filter(dstBlock-4, stride, QP);
else if(mode & H_DEBLOCK){
#if TEMPLATE_PP_ALTIVEC
DECLARE_ALIGNED(16, unsigned char, tempBlock)[272];
int t;
transpose_16x8_char_toPackedAlign_altivec(tempBlock, dstBlock - (4 + 1), stride);
 
t = vertClassify_altivec(tempBlock-48, 16, &c);
if(t==1) {
doVertLowPass_altivec(tempBlock-48, 16, &c);
transpose_8x16_char_fromPackedAlign_altivec(dstBlock - (4 + 1), tempBlock, stride);
}
else if(t==2) {
doVertDefFilter_altivec(tempBlock-48, 16, &c);
transpose_8x16_char_fromPackedAlign_altivec(dstBlock - (4 + 1), tempBlock, stride);
}
#else
const int t= RENAME(horizClassify)(dstBlock-4, stride, &c);
 
if(t==1)
RENAME(doHorizLowPass)(dstBlock-4, stride, &c);
else if(t==2)
RENAME(doHorizDefFilter)(dstBlock-4, stride, &c);
#endif
}else if(mode & H_A_DEBLOCK){
RENAME(do_a_deblock)(dstBlock-8, 1, stride, &c);
}
#endif //TEMPLATE_PP_MMX
if(mode & DERING){
//FIXME filter first line
if(y>0) RENAME(dering)(dstBlock - stride - 8, stride, &c);
}
 
if(mode & TEMP_NOISE_FILTER)
{
RENAME(tempNoiseReducer)(dstBlock-8, stride,
c.tempBlurred[isColor] + y*dstStride + x,
c.tempBlurredPast[isColor] + (y>>3)*256 + (x>>3) + 256,
c.ppMode.maxTmpNoise);
}
}
 
dstBlock+=8;
srcBlock+=8;
 
#if TEMPLATE_PP_MMX
tmpXchg= tempBlock1;
tempBlock1= tempBlock2;
tempBlock2 = tmpXchg;
#endif
}
 
if(mode & DERING){
if(y > 0) RENAME(dering)(dstBlock - dstStride - 8, dstStride, &c);
}
 
if((mode & TEMP_NOISE_FILTER)){
RENAME(tempNoiseReducer)(dstBlock-8, dstStride,
c.tempBlurred[isColor] + y*dstStride + x,
c.tempBlurredPast[isColor] + (y>>3)*256 + (x>>3) + 256,
c.ppMode.maxTmpNoise);
}
 
/* did we use a tmp buffer for the last lines*/
if(y+15 >= height){
uint8_t *dstBlock= &(dst[y*dstStride]);
if(width==FFABS(dstStride))
linecpy(dstBlock, tempDst + dstStride, height-y, dstStride);
else{
int i;
for(i=0; i<height-y; i++){
memcpy(dstBlock + i*dstStride, tempDst + (i+1)*dstStride, width);
}
}
}
/*
for(x=0; x<width; x+=32){
volatile int i;
i+= dstBlock[x + 7*dstStride] + dstBlock[x + 8*dstStride]
+ dstBlock[x + 9*dstStride] + dstBlock[x +10*dstStride]
+ dstBlock[x +11*dstStride] + dstBlock[x +12*dstStride];
+ dstBlock[x +13*dstStride]
+ dstBlock[x +14*dstStride] + dstBlock[x +15*dstStride];
}*/
}
#if TEMPLATE_PP_3DNOW
__asm__ volatile("femms");
#elif TEMPLATE_PP_MMX
__asm__ volatile("emms");
#endif
 
#ifdef DEBUG_BRIGHTNESS
if(!isColor){
int max=1;
int i;
for(i=0; i<256; i++)
if(yHistogram[i] > max) max=yHistogram[i];
 
for(i=1; i<256; i++){
int x;
int start=yHistogram[i-1]/(max/256+1);
int end=yHistogram[i]/(max/256+1);
int inc= end > start ? 1 : -1;
for(x=start; x!=end+inc; x+=inc)
dst[ i*dstStride + x]+=128;
}
 
for(i=0; i<100; i+=2){
dst[ (white)*dstStride + i]+=128;
dst[ (black)*dstStride + i]+=128;
}
}
#endif
 
*c2= c; //copy local context back
 
}
 
#undef RENAME
#undef TEMPLATE_PP_C
#undef TEMPLATE_PP_ALTIVEC
#undef TEMPLATE_PP_MMX
#undef TEMPLATE_PP_MMXEXT
#undef TEMPLATE_PP_3DNOW
#undef TEMPLATE_PP_SSE2
/contrib/sdk/sources/ffmpeg/libpostproc/version.h
0,0 → 1,45
/*
* Version macros.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef POSTPROC_POSTPROCESS_VERSION_H
#define POSTPROC_POSTPROCESS_VERSION_H
 
/**
* @file
* Libpostproc version macros
*/
 
#include "libavutil/avutil.h"
 
#define LIBPOSTPROC_VERSION_MAJOR 52
#define LIBPOSTPROC_VERSION_MINOR 3
#define LIBPOSTPROC_VERSION_MICRO 100
 
#define LIBPOSTPROC_VERSION_INT AV_VERSION_INT(LIBPOSTPROC_VERSION_MAJOR, \
LIBPOSTPROC_VERSION_MINOR, \
LIBPOSTPROC_VERSION_MICRO)
#define LIBPOSTPROC_VERSION AV_VERSION(LIBPOSTPROC_VERSION_MAJOR, \
LIBPOSTPROC_VERSION_MINOR, \
LIBPOSTPROC_VERSION_MICRO)
#define LIBPOSTPROC_BUILD LIBPOSTPROC_VERSION_INT
 
#define LIBPOSTPROC_IDENT "postproc" AV_STRINGIFY(LIBPOSTPROC_VERSION)
 
#endif /* POSTPROC_POSTPROCESS_VERSION_H */