Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright (C) 2005 Michael Niedermayer <michaelni@gmx.at>
  3.  *
  4.  * This file is part of MPlayer.
  5.  *
  6.  * MPlayer is free software; you can redistribute it and/or modify
  7.  * it under the terms of the GNU General Public License as published by
  8.  * the Free Software Foundation; either version 2 of the License, or
  9.  * (at your option) any later version.
  10.  *
  11.  * MPlayer is distributed in the hope that it will be useful,
  12.  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13.  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14.  * GNU General Public License for more details.
  15.  *
  16.  * You should have received a copy of the GNU General Public License along
  17.  * with MPlayer; if not, write to the Free Software Foundation, Inc.,
  18.  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  19.  */
  20.  
  21.  
  22. #include <stdio.h>
  23. #include <stdlib.h>
  24. #include <string.h>
  25. #include <inttypes.h>
  26. #include <math.h>
  27.  
  28. #include "config.h"
  29.  
  30. #include "mp_msg.h"
  31. #include "cpudetect.h"
  32.  
  33. #if HAVE_MALLOC_H
  34. #include <malloc.h>
  35. #endif
  36.  
  37. #include "libavutil/mem.h"
  38.  
  39. #include "img_format.h"
  40. #include "mp_image.h"
  41. #include "vf.h"
  42. #include "libvo/fastmemcpy.h"
  43.  
  44. #define XMIN(a,b) ((a) < (b) ? (a) : (b))
  45. #define XMAX(a,b) ((a) > (b) ? (a) : (b))
  46.  
  47. //===========================================================================//
  48. static const uint8_t  __attribute__((aligned(8))) dither[8][8]={
  49. {  0,  48,  12,  60,   3,  51,  15,  63, },
  50. { 32,  16,  44,  28,  35,  19,  47,  31, },
  51. {  8,  56,   4,  52,  11,  59,   7,  55, },
  52. { 40,  24,  36,  20,  43,  27,  39,  23, },
  53. {  2,  50,  14,  62,   1,  49,  13,  61, },
  54. { 34,  18,  46,  30,  33,  17,  45,  29, },
  55. { 10,  58,   6,  54,   9,  57,   5,  53, },
  56. { 42,  26,  38,  22,  41,  25,  37,  21, },
  57. };
  58.  
  59. struct vf_priv_s {
  60.     int qp;
  61.     int mode;
  62.     int mpeg2;
  63.     int temp_stride;
  64.     uint8_t *src;
  65. };
  66. #if 0
  67. static inline void dct7_c(int16_t *dst, int s0, int s1, int s2, int s3, int step){
  68.     int s, d;
  69.     int dst2[64];
  70. //#define S0 (1024/0.37796447300922719759)
  71. #define C0 ((int)(1024*0.37796447300922719759+0.5)) //sqrt(1/7)
  72. #define C1 ((int)(1024*0.53452248382484879308/6+0.5)) //sqrt(2/7)/6
  73.  
  74. #define C2 ((int)(1024*0.45221175985034745004/2+0.5))
  75. #define C3 ((int)(1024*0.36264567479870879474/2+0.5))
  76.  
  77. //0.1962505182412941918 0.0149276808419397944-0.2111781990832339584
  78. #define C4 ((int)(1024*0.1962505182412941918+0.5))
  79. #define C5 ((int)(1024*0.0149276808419397944+0.5))
  80. //#define C6 ((int)(1024*0.2111781990832339584+0.5))
  81. #if 0
  82.     s= s0 + s1 + s2;
  83.     dst[0*step] = ((s + s3)*C0 + 512) >> 10;
  84.     s= (s - 6*s3)*C1 + 512;
  85.     d= (s0-s2)*C4 + (s1-s2)*C5;
  86.     dst[1*step] = (s + 2*d)>>10;
  87.     s -= d;
  88.     d= (s1-s0)*C2 + (s1-s2)*C3;
  89.     dst[2*step] = (s + d)>>10;
  90.     dst[3*step] = (s - d)>>10;
  91. #elif 1
  92.     s = s3+s3;
  93.     s3= s-s0;
  94.     s0= s+s0;
  95.     s = s2+s1;
  96.     s2= s2-s1;
  97.     dst[0*step]= s0 + s;
  98.     dst[2*step]= s0 - s;
  99.     dst[1*step]= 2*s3 +   s2;
  100.     dst[3*step]=   s3 - 2*s2;
  101. #else
  102.     int i,j,n=7;
  103.     for(i=0; i<7; i+=2){
  104.         dst2[i*step/2]= 0;
  105.         for(j=0; j<4; j++)
  106.             dst2[i*step/2] += src[j*step] * cos(i*M_PI/n*(j+0.5)) * sqrt((i?2.0:1.0)/n);
  107.         if(fabs(dst2[i*step/2] - dst[i*step/2]) > 20)
  108.             printf("%d %d %d (%d %d %d %d) -> (%d %d %d %d)\n", i,dst2[i*step/2], dst[i*step/2],src[0*step], src[1*step], src[2*step], src[3*step], dst[0*step], dst[1*step],dst[2*step],dst[3*step]);
  109.     }
  110. #endif
  111. }
  112. #endif
  113.  
  114. static inline void dctA_c(int16_t *dst, uint8_t *src, int stride){
  115.     int i;
  116.  
  117.     for(i=0; i<4; i++){
  118.         int s0=  src[0*stride] + src[6*stride];
  119.         int s1=  src[1*stride] + src[5*stride];
  120.         int s2=  src[2*stride] + src[4*stride];
  121.         int s3=  src[3*stride];
  122.         int s= s3+s3;
  123.         s3= s-s0;
  124.         s0= s+s0;
  125.         s = s2+s1;
  126.         s2= s2-s1;
  127.         dst[0]= s0 + s;
  128.         dst[2]= s0 - s;
  129.         dst[1]= 2*s3 +   s2;
  130.         dst[3]=   s3 - 2*s2;
  131.         src++;
  132.         dst+=4;
  133.     }
  134. }
  135.  
  136. static void dctB_c(int16_t *dst, int16_t *src){
  137.     int i;
  138.  
  139.     for(i=0; i<4; i++){
  140.         int s0=  src[0*4] + src[6*4];
  141.         int s1=  src[1*4] + src[5*4];
  142.         int s2=  src[2*4] + src[4*4];
  143.         int s3=  src[3*4];
  144.         int s= s3+s3;
  145.         s3= s-s0;
  146.         s0= s+s0;
  147.         s = s2+s1;
  148.         s2= s2-s1;
  149.         dst[0*4]= s0 + s;
  150.         dst[2*4]= s0 - s;
  151.         dst[1*4]= 2*s3 +   s2;
  152.         dst[3*4]=   s3 - 2*s2;
  153.         src++;
  154.         dst++;
  155.     }
  156. }
  157.  
  158. #if HAVE_MMX
  159. static void dctB_mmx(int16_t *dst, int16_t *src){
  160.     __asm__ volatile (
  161.         "movq  (%0), %%mm0      \n\t"
  162.         "movq  1*4*2(%0), %%mm1 \n\t"
  163.         "paddw 6*4*2(%0), %%mm0 \n\t"
  164.         "paddw 5*4*2(%0), %%mm1 \n\t"
  165.         "movq  2*4*2(%0), %%mm2 \n\t"
  166.         "movq  3*4*2(%0), %%mm3 \n\t"
  167.         "paddw 4*4*2(%0), %%mm2 \n\t"
  168.         "paddw %%mm3, %%mm3     \n\t" //s
  169.         "movq %%mm3, %%mm4      \n\t" //s
  170.         "psubw %%mm0, %%mm3     \n\t" //s-s0
  171.         "paddw %%mm0, %%mm4     \n\t" //s+s0
  172.         "movq %%mm2, %%mm0      \n\t" //s2
  173.         "psubw %%mm1, %%mm2     \n\t" //s2-s1
  174.         "paddw %%mm1, %%mm0     \n\t" //s2+s1
  175.         "movq %%mm4, %%mm1      \n\t" //s0'
  176.         "psubw %%mm0, %%mm4     \n\t" //s0'-s'
  177.         "paddw %%mm0, %%mm1     \n\t" //s0'+s'
  178.         "movq %%mm3, %%mm0      \n\t" //s3'
  179.         "psubw %%mm2, %%mm3     \n\t"
  180.         "psubw %%mm2, %%mm3     \n\t"
  181.         "paddw %%mm0, %%mm2     \n\t"
  182.         "paddw %%mm0, %%mm2     \n\t"
  183.         "movq %%mm1, (%1)       \n\t"
  184.         "movq %%mm4, 2*4*2(%1)  \n\t"
  185.         "movq %%mm2, 1*4*2(%1)  \n\t"
  186.         "movq %%mm3, 3*4*2(%1)  \n\t"
  187.         :: "r" (src), "r"(dst)
  188.     );
  189. }
  190. #endif
  191.  
  192. static void (*dctB)(int16_t *dst, int16_t *src)= dctB_c;
  193.  
  194. #define N0 4
  195. #define N1 5
  196. #define N2 10
  197. #define SN0 2
  198. #define SN1 2.2360679775
  199. #define SN2 3.16227766017
  200. #define N (1<<16)
  201.  
  202. static const int factor[16]={
  203.     N/(N0*N0), N/(N0*N1), N/(N0*N0),N/(N0*N2),
  204.     N/(N1*N0), N/(N1*N1), N/(N1*N0),N/(N1*N2),
  205.     N/(N0*N0), N/(N0*N1), N/(N0*N0),N/(N0*N2),
  206.     N/(N2*N0), N/(N2*N1), N/(N2*N0),N/(N2*N2),
  207. };
  208.  
  209. static const int thres[16]={
  210.     N/(SN0*SN0), N/(SN0*SN2), N/(SN0*SN0),N/(SN0*SN2),
  211.     N/(SN2*SN0), N/(SN2*SN2), N/(SN2*SN0),N/(SN2*SN2),
  212.     N/(SN0*SN0), N/(SN0*SN2), N/(SN0*SN0),N/(SN0*SN2),
  213.     N/(SN2*SN0), N/(SN2*SN2), N/(SN2*SN0),N/(SN2*SN2),
  214. };
  215.  
  216. static int thres2[99][16];
  217.  
  218. static void init_thres2(void){
  219.     int qp, i;
  220.     int bias= 0; //FIXME
  221.  
  222.     for(qp=0; qp<99; qp++){
  223.         for(i=0; i<16; i++){
  224.             thres2[qp][i]= ((i&1)?SN2:SN0) * ((i&4)?SN2:SN0) * XMAX(1,qp) * (1<<2) - 1 - bias;
  225.         }
  226.     }
  227. }
  228.  
  229. static int hardthresh_c(int16_t *src, int qp){
  230.     int i;
  231.     int a;
  232.  
  233.     a= src[0] * factor[0];
  234.     for(i=1; i<16; i++){
  235.         unsigned int threshold1= thres2[qp][i];
  236.         unsigned int threshold2= (threshold1<<1);
  237.         int level= src[i];
  238.         if(((unsigned)(level+threshold1))>threshold2){
  239.             a += level * factor[i];
  240.         }
  241.     }
  242.     return (a + (1<<11))>>12;
  243. }
  244.  
  245. static int mediumthresh_c(int16_t *src, int qp){
  246.     int i;
  247.     int a;
  248.  
  249.     a= src[0] * factor[0];
  250.     for(i=1; i<16; i++){
  251.         unsigned int threshold1= thres2[qp][i];
  252.         unsigned int threshold2= (threshold1<<1);
  253.         int level= src[i];
  254.         if(((unsigned)(level+threshold1))>threshold2){
  255.             if(((unsigned)(level+2*threshold1))>2*threshold2){
  256.                 a += level * factor[i];
  257.             }else{
  258.                 if(level>0) a+= 2*(level - (int)threshold1)*factor[i];
  259.                 else        a+= 2*(level + (int)threshold1)*factor[i];
  260.             }
  261.         }
  262.     }
  263.     return (a + (1<<11))>>12;
  264. }
  265.  
  266. static int softthresh_c(int16_t *src, int qp){
  267.     int i;
  268.     int a;
  269.  
  270.     a= src[0] * factor[0];
  271.     for(i=1; i<16; i++){
  272.         unsigned int threshold1= thres2[qp][i];
  273.         unsigned int threshold2= (threshold1<<1);
  274.         int level= src[i];
  275.         if(((unsigned)(level+threshold1))>threshold2){
  276.             if(level>0) a+= (level - (int)threshold1)*factor[i];
  277.             else        a+= (level + (int)threshold1)*factor[i];
  278.         }
  279.     }
  280.     return (a + (1<<11))>>12;
  281. }
  282.  
  283. static int (*requantize)(int16_t *src, int qp)= hardthresh_c;
  284.  
  285. static void filter(struct vf_priv_s *p, uint8_t *dst, uint8_t *src, int dst_stride, int src_stride, int width, int height, uint8_t *qp_store, int qp_stride, int is_luma){
  286.     int x, y;
  287.     const int stride= is_luma ? p->temp_stride : ((width+16+15)&(~15));
  288.     uint8_t  *p_src= p->src + 8*stride;
  289.     int16_t *block= (int16_t *)p->src;
  290.     int16_t *temp= (int16_t *)(p->src + 32);
  291.  
  292.     if (!src || !dst) return; // HACK avoid crash for Y8 colourspace
  293.     for(y=0; y<height; y++){
  294.         int index= 8 + 8*stride + y*stride;
  295.         fast_memcpy(p_src + index, src + y*src_stride, width);
  296.         for(x=0; x<8; x++){
  297.             p_src[index         - x - 1]= p_src[index +         x    ];
  298.             p_src[index + width + x    ]= p_src[index + width - x - 1];
  299.         }
  300.     }
  301.     for(y=0; y<8; y++){
  302.         fast_memcpy(p_src + (       7-y)*stride, p_src + (       y+8)*stride, stride);
  303.         fast_memcpy(p_src + (height+8+y)*stride, p_src + (height-y+7)*stride, stride);
  304.     }
  305.     //FIXME (try edge emu)
  306.  
  307.     for(y=0; y<height; y++){
  308.         for(x=-8; x<0; x+=4){
  309.             const int index= x + y*stride + (8-3)*(1+stride) + 8; //FIXME silly offset
  310.             uint8_t *src  = p_src + index;
  311.             int16_t *tp= temp+4*x;
  312.  
  313.             dctA_c(tp+4*8, src, stride);
  314.         }
  315.         for(x=0; x<width; ){
  316.             const int qps= 3 + is_luma;
  317.             int qp;
  318.             int end= XMIN(x+8, width);
  319.  
  320.             if(p->qp)
  321.                 qp= p->qp;
  322.             else{
  323.                 qp= qp_store[ (XMIN(x, width-1)>>qps) + (XMIN(y, height-1)>>qps) * qp_stride];
  324.                 qp=norm_qscale(qp, p->mpeg2);
  325.             }
  326.             for(; x<end; x++){
  327.                 const int index= x + y*stride + (8-3)*(1+stride) + 8; //FIXME silly offset
  328.                 uint8_t *src  = p_src + index;
  329.                 int16_t *tp= temp+4*x;
  330.                 int v;
  331.  
  332.                 if((x&3)==0)
  333.                     dctA_c(tp+4*8, src, stride);
  334.  
  335.                 dctB(block, tp);
  336.  
  337.                 v= requantize(block, qp);
  338.                 v= (v + dither[y&7][x&7])>>6;
  339.                 if((unsigned)v > 255)
  340.                     v= (-v)>>31;
  341.                 dst[x + y*dst_stride]= v;
  342.             }
  343.         }
  344.     }
  345. }
  346.  
  347. static int config(struct vf_instance *vf,
  348.     int width, int height, int d_width, int d_height,
  349.     unsigned int flags, unsigned int outfmt){
  350.     int h= (height+16+15)&(~15);
  351.  
  352.     vf->priv->temp_stride= (width+16+15)&(~15);
  353.     vf->priv->src = av_malloc(vf->priv->temp_stride*(h+8)*sizeof(uint8_t));
  354.  
  355.     return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt);
  356. }
  357.  
  358. static void get_image(struct vf_instance *vf, mp_image_t *mpi){
  359.     if(mpi->flags&MP_IMGFLAG_PRESERVE) return; // don't change
  360.     // ok, we can do pp in-place (or pp disabled):
  361.     vf->dmpi=ff_vf_get_image(vf->next,mpi->imgfmt,
  362.         mpi->type, mpi->flags | MP_IMGFLAG_READABLE, mpi->width, mpi->height);
  363.     mpi->planes[0]=vf->dmpi->planes[0];
  364.     mpi->stride[0]=vf->dmpi->stride[0];
  365.     mpi->width=vf->dmpi->width;
  366.     if(mpi->flags&MP_IMGFLAG_PLANAR){
  367.         mpi->planes[1]=vf->dmpi->planes[1];
  368.         mpi->planes[2]=vf->dmpi->planes[2];
  369.         mpi->stride[1]=vf->dmpi->stride[1];
  370.         mpi->stride[2]=vf->dmpi->stride[2];
  371.     }
  372.     mpi->flags|=MP_IMGFLAG_DIRECT;
  373. }
  374.  
  375. static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){
  376.     mp_image_t *dmpi;
  377.  
  378.     if(mpi->flags&MP_IMGFLAG_DIRECT){
  379.         dmpi=vf->dmpi;
  380.     }else{
  381.         // no DR, so get a new image! hope we'll get DR buffer:
  382.         dmpi=ff_vf_get_image(vf->next,mpi->imgfmt,
  383.             MP_IMGTYPE_TEMP,
  384.             MP_IMGFLAG_ACCEPT_STRIDE|MP_IMGFLAG_PREFER_ALIGNED_STRIDE,
  385.             mpi->width,mpi->height);
  386.         ff_vf_clone_mpi_attributes(dmpi, mpi);
  387.     }
  388.  
  389.     vf->priv->mpeg2= mpi->qscale_type;
  390.     if(mpi->qscale || vf->priv->qp){
  391.         filter(vf->priv, dmpi->planes[0], mpi->planes[0], dmpi->stride[0], mpi->stride[0], mpi->w, mpi->h, mpi->qscale, mpi->qstride, 1);
  392.         filter(vf->priv, dmpi->planes[1], mpi->planes[1], dmpi->stride[1], mpi->stride[1], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, mpi->qscale, mpi->qstride, 0);
  393.         filter(vf->priv, dmpi->planes[2], mpi->planes[2], dmpi->stride[2], mpi->stride[2], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, mpi->qscale, mpi->qstride, 0);
  394.     }else{
  395.         memcpy_pic(dmpi->planes[0], mpi->planes[0], mpi->w, mpi->h, dmpi->stride[0], mpi->stride[0]);
  396.         memcpy_pic(dmpi->planes[1], mpi->planes[1], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, dmpi->stride[1], mpi->stride[1]);
  397.         memcpy_pic(dmpi->planes[2], mpi->planes[2], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, dmpi->stride[2], mpi->stride[2]);
  398.     }
  399.  
  400. #if HAVE_MMX
  401.     if(ff_gCpuCaps.hasMMX) __asm__ volatile ("emms\n\t");
  402. #endif
  403. #if HAVE_MMX2
  404.     if(ff_gCpuCaps.hasMMX2) __asm__ volatile ("sfence\n\t");
  405. #endif
  406.  
  407.     return ff_vf_next_put_image(vf,dmpi, pts);
  408. }
  409.  
  410. static void uninit(struct vf_instance *vf){
  411.     if(!vf->priv) return;
  412.  
  413.     av_free(vf->priv->src);
  414.     vf->priv->src= NULL;
  415.  
  416.     free(vf->priv);
  417.     vf->priv=NULL;
  418. }
  419.  
  420. //===========================================================================//
  421. static int query_format(struct vf_instance *vf, unsigned int fmt){
  422.     switch(fmt){
  423.     case IMGFMT_YVU9:
  424.     case IMGFMT_IF09:
  425.     case IMGFMT_YV12:
  426.     case IMGFMT_I420:
  427.     case IMGFMT_IYUV:
  428.     case IMGFMT_CLPL:
  429.     case IMGFMT_Y800:
  430.     case IMGFMT_Y8:
  431.     case IMGFMT_444P:
  432.     case IMGFMT_422P:
  433.     case IMGFMT_411P:
  434.         return ff_vf_next_query_format(vf,fmt);
  435.     }
  436.     return 0;
  437. }
  438.  
  439. static int control(struct vf_instance *vf, int request, void* data){
  440.     return ff_vf_next_control(vf,request,data);
  441. }
  442.  
  443. static int vf_open(vf_instance_t *vf, char *args){
  444.     vf->config=config;
  445.     vf->put_image=put_image;
  446.     vf->get_image=get_image;
  447.     vf->query_format=query_format;
  448.     vf->uninit=uninit;
  449.     vf->control= control;
  450.     vf->priv=malloc(sizeof(struct vf_priv_s));
  451.     memset(vf->priv, 0, sizeof(struct vf_priv_s));
  452.  
  453.     if (args) sscanf(args, "%d:%d", &vf->priv->qp, &vf->priv->mode);
  454.  
  455.     if(vf->priv->qp < 0)
  456.         vf->priv->qp = 0;
  457.  
  458.     init_thres2();
  459.  
  460.     switch(vf->priv->mode){
  461.         case 0: requantize= hardthresh_c; break;
  462.         case 1: requantize= softthresh_c; break;
  463.         default:
  464.         case 2: requantize= mediumthresh_c; break;
  465.     }
  466.  
  467. #if HAVE_MMX
  468.     if(ff_gCpuCaps.hasMMX){
  469.         dctB= dctB_mmx;
  470.     }
  471. #endif
  472. #if 0
  473.     if(ff_gCpuCaps.hasMMX){
  474.         switch(vf->priv->mode){
  475.             case 0: requantize= hardthresh_mmx; break;
  476.             case 1: requantize= softthresh_mmx; break;
  477.         }
  478.     }
  479. #endif
  480.  
  481.     return 1;
  482. }
  483.  
  484. const vf_info_t ff_vf_info_pp7 = {
  485.     "postprocess 7",
  486.     "pp7",
  487.     "Michael Niedermayer",
  488.     "",
  489.     vf_open,
  490.     NULL
  491. };
  492.