Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * H263/MPEG4 backend for encoder and decoder
  3.  * Copyright (c) 2000,2001 Fabrice Bellard
  4.  * H263+ support.
  5.  * Copyright (c) 2001 Juan J. Sierralta P
  6.  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  7.  *
  8.  * This file is part of FFmpeg.
  9.  *
  10.  * FFmpeg is free software; you can redistribute it and/or
  11.  * modify it under the terms of the GNU Lesser General Public
  12.  * License as published by the Free Software Foundation; either
  13.  * version 2.1 of the License, or (at your option) any later version.
  14.  *
  15.  * FFmpeg is distributed in the hope that it will be useful,
  16.  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17.  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  18.  * Lesser General Public License for more details.
  19.  *
  20.  * You should have received a copy of the GNU Lesser General Public
  21.  * License along with FFmpeg; if not, write to the Free Software
  22.  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23.  */
  24.  
  25. /**
  26.  * @file
  27.  * h263/mpeg4 codec.
  28.  */
  29.  
  30. #include <limits.h>
  31.  
  32. #include "avcodec.h"
  33. #include "mpegvideo.h"
  34. #include "h263.h"
  35. #include "h263data.h"
  36. #include "mathops.h"
  37. #include "mpegutils.h"
  38. #include "unary.h"
  39. #include "flv.h"
  40. #include "mpeg4video.h"
  41.  
  42.  
  43. void ff_h263_update_motion_val(MpegEncContext * s){
  44.     const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
  45.                //FIXME a lot of that is only needed for !low_delay
  46.     const int wrap = s->b8_stride;
  47.     const int xy = s->block_index[0];
  48.  
  49.     s->current_picture.mbskip_table[mb_xy] = s->mb_skipped;
  50.  
  51.     if(s->mv_type != MV_TYPE_8X8){
  52.         int motion_x, motion_y;
  53.         if (s->mb_intra) {
  54.             motion_x = 0;
  55.             motion_y = 0;
  56.         } else if (s->mv_type == MV_TYPE_16X16) {
  57.             motion_x = s->mv[0][0][0];
  58.             motion_y = s->mv[0][0][1];
  59.         } else /*if (s->mv_type == MV_TYPE_FIELD)*/ {
  60.             int i;
  61.             motion_x = s->mv[0][0][0] + s->mv[0][1][0];
  62.             motion_y = s->mv[0][0][1] + s->mv[0][1][1];
  63.             motion_x = (motion_x>>1) | (motion_x&1);
  64.             for(i=0; i<2; i++){
  65.                 s->p_field_mv_table[i][0][mb_xy][0]= s->mv[0][i][0];
  66.                 s->p_field_mv_table[i][0][mb_xy][1]= s->mv[0][i][1];
  67.             }
  68.             s->current_picture.ref_index[0][4*mb_xy    ] =
  69.             s->current_picture.ref_index[0][4*mb_xy + 1] = s->field_select[0][0];
  70.             s->current_picture.ref_index[0][4*mb_xy + 2] =
  71.             s->current_picture.ref_index[0][4*mb_xy + 3] = s->field_select[0][1];
  72.         }
  73.  
  74.         /* no update if 8X8 because it has been done during parsing */
  75.         s->current_picture.motion_val[0][xy][0]            = motion_x;
  76.         s->current_picture.motion_val[0][xy][1]            = motion_y;
  77.         s->current_picture.motion_val[0][xy + 1][0]        = motion_x;
  78.         s->current_picture.motion_val[0][xy + 1][1]        = motion_y;
  79.         s->current_picture.motion_val[0][xy + wrap][0]     = motion_x;
  80.         s->current_picture.motion_val[0][xy + wrap][1]     = motion_y;
  81.         s->current_picture.motion_val[0][xy + 1 + wrap][0] = motion_x;
  82.         s->current_picture.motion_val[0][xy + 1 + wrap][1] = motion_y;
  83.     }
  84.  
  85.     if(s->encoding){ //FIXME encoding MUST be cleaned up
  86.         if (s->mv_type == MV_TYPE_8X8)
  87.             s->current_picture.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_8x8;
  88.         else if(s->mb_intra)
  89.             s->current_picture.mb_type[mb_xy] = MB_TYPE_INTRA;
  90.         else
  91.             s->current_picture.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_16x16;
  92.     }
  93. }
  94.  
  95. int ff_h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr)
  96. {
  97.     int x, y, wrap, a, c, pred_dc;
  98.     int16_t *dc_val;
  99.  
  100.     /* find prediction */
  101.     if (n < 4) {
  102.         x = 2 * s->mb_x + (n & 1);
  103.         y = 2 * s->mb_y + ((n & 2) >> 1);
  104.         wrap = s->b8_stride;
  105.         dc_val = s->dc_val[0];
  106.     } else {
  107.         x = s->mb_x;
  108.         y = s->mb_y;
  109.         wrap = s->mb_stride;
  110.         dc_val = s->dc_val[n - 4 + 1];
  111.     }
  112.     /* B C
  113.      * A X
  114.      */
  115.     a = dc_val[(x - 1) + (y) * wrap];
  116.     c = dc_val[(x) + (y - 1) * wrap];
  117.  
  118.     /* No prediction outside GOB boundary */
  119.     if(s->first_slice_line && n!=3){
  120.         if(n!=2) c= 1024;
  121.         if(n!=1 && s->mb_x == s->resync_mb_x) a= 1024;
  122.     }
  123.     /* just DC prediction */
  124.     if (a != 1024 && c != 1024)
  125.         pred_dc = (a + c) >> 1;
  126.     else if (a != 1024)
  127.         pred_dc = a;
  128.     else
  129.         pred_dc = c;
  130.  
  131.     /* we assume pred is positive */
  132.     *dc_val_ptr = &dc_val[x + y * wrap];
  133.     return pred_dc;
  134. }
  135.  
  136. void ff_h263_loop_filter(MpegEncContext * s){
  137.     int qp_c;
  138.     const int linesize  = s->linesize;
  139.     const int uvlinesize= s->uvlinesize;
  140.     const int xy = s->mb_y * s->mb_stride + s->mb_x;
  141.     uint8_t *dest_y = s->dest[0];
  142.     uint8_t *dest_cb= s->dest[1];
  143.     uint8_t *dest_cr= s->dest[2];
  144.  
  145. //    if(s->pict_type==AV_PICTURE_TYPE_B && !s->readable) return;
  146.  
  147.     /*
  148.        Diag Top
  149.        Left Center
  150.     */
  151.     if (!IS_SKIP(s->current_picture.mb_type[xy])) {
  152.         qp_c= s->qscale;
  153.         s->h263dsp.h263_v_loop_filter(dest_y + 8 * linesize,     linesize, qp_c);
  154.         s->h263dsp.h263_v_loop_filter(dest_y + 8 * linesize + 8, linesize, qp_c);
  155.     }else
  156.         qp_c= 0;
  157.  
  158.     if(s->mb_y){
  159.         int qp_dt, qp_tt, qp_tc;
  160.  
  161.         if (IS_SKIP(s->current_picture.mb_type[xy - s->mb_stride]))
  162.             qp_tt=0;
  163.         else
  164.             qp_tt = s->current_picture.qscale_table[xy - s->mb_stride];
  165.  
  166.         if(qp_c)
  167.             qp_tc= qp_c;
  168.         else
  169.             qp_tc= qp_tt;
  170.  
  171.         if(qp_tc){
  172.             const int chroma_qp= s->chroma_qscale_table[qp_tc];
  173.             s->h263dsp.h263_v_loop_filter(dest_y,     linesize, qp_tc);
  174.             s->h263dsp.h263_v_loop_filter(dest_y + 8, linesize, qp_tc);
  175.  
  176.             s->h263dsp.h263_v_loop_filter(dest_cb, uvlinesize, chroma_qp);
  177.             s->h263dsp.h263_v_loop_filter(dest_cr, uvlinesize, chroma_qp);
  178.         }
  179.  
  180.         if(qp_tt)
  181.             s->h263dsp.h263_h_loop_filter(dest_y - 8 * linesize + 8, linesize, qp_tt);
  182.  
  183.         if(s->mb_x){
  184.             if (qp_tt || IS_SKIP(s->current_picture.mb_type[xy - 1 - s->mb_stride]))
  185.                 qp_dt= qp_tt;
  186.             else
  187.                 qp_dt = s->current_picture.qscale_table[xy - 1 - s->mb_stride];
  188.  
  189.             if(qp_dt){
  190.                 const int chroma_qp= s->chroma_qscale_table[qp_dt];
  191.                 s->h263dsp.h263_h_loop_filter(dest_y  - 8 * linesize,   linesize,   qp_dt);
  192.                 s->h263dsp.h263_h_loop_filter(dest_cb - 8 * uvlinesize, uvlinesize, chroma_qp);
  193.                 s->h263dsp.h263_h_loop_filter(dest_cr - 8 * uvlinesize, uvlinesize, chroma_qp);
  194.             }
  195.         }
  196.     }
  197.  
  198.     if(qp_c){
  199.         s->h263dsp.h263_h_loop_filter(dest_y + 8, linesize, qp_c);
  200.         if(s->mb_y + 1 == s->mb_height)
  201.             s->h263dsp.h263_h_loop_filter(dest_y + 8 * linesize + 8, linesize, qp_c);
  202.     }
  203.  
  204.     if(s->mb_x){
  205.         int qp_lc;
  206.         if (qp_c || IS_SKIP(s->current_picture.mb_type[xy - 1]))
  207.             qp_lc= qp_c;
  208.         else
  209.             qp_lc = s->current_picture.qscale_table[xy - 1];
  210.  
  211.         if(qp_lc){
  212.             s->h263dsp.h263_h_loop_filter(dest_y, linesize, qp_lc);
  213.             if(s->mb_y + 1 == s->mb_height){
  214.                 const int chroma_qp= s->chroma_qscale_table[qp_lc];
  215.                 s->h263dsp.h263_h_loop_filter(dest_y + 8 * linesize, linesize, qp_lc);
  216.                 s->h263dsp.h263_h_loop_filter(dest_cb, uvlinesize, chroma_qp);
  217.                 s->h263dsp.h263_h_loop_filter(dest_cr, uvlinesize, chroma_qp);
  218.             }
  219.         }
  220.     }
  221. }
  222.  
  223. void ff_h263_pred_acdc(MpegEncContext * s, int16_t *block, int n)
  224. {
  225.     int x, y, wrap, a, c, pred_dc, scale, i;
  226.     int16_t *dc_val, *ac_val, *ac_val1;
  227.  
  228.     /* find prediction */
  229.     if (n < 4) {
  230.         x = 2 * s->mb_x + (n & 1);
  231.         y = 2 * s->mb_y + (n>> 1);
  232.         wrap = s->b8_stride;
  233.         dc_val = s->dc_val[0];
  234.         ac_val = s->ac_val[0][0];
  235.         scale = s->y_dc_scale;
  236.     } else {
  237.         x = s->mb_x;
  238.         y = s->mb_y;
  239.         wrap = s->mb_stride;
  240.         dc_val = s->dc_val[n - 4 + 1];
  241.         ac_val = s->ac_val[n - 4 + 1][0];
  242.         scale = s->c_dc_scale;
  243.     }
  244.  
  245.     ac_val += ((y) * wrap + (x)) * 16;
  246.     ac_val1 = ac_val;
  247.  
  248.     /* B C
  249.      * A X
  250.      */
  251.     a = dc_val[(x - 1) + (y) * wrap];
  252.     c = dc_val[(x) + (y - 1) * wrap];
  253.  
  254.     /* No prediction outside GOB boundary */
  255.     if(s->first_slice_line && n!=3){
  256.         if(n!=2) c= 1024;
  257.         if(n!=1 && s->mb_x == s->resync_mb_x) a= 1024;
  258.     }
  259.  
  260.     if (s->ac_pred) {
  261.         pred_dc = 1024;
  262.         if (s->h263_aic_dir) {
  263.             /* left prediction */
  264.             if (a != 1024) {
  265.                 ac_val -= 16;
  266.                 for(i=1;i<8;i++) {
  267.                     block[s->idsp.idct_permutation[i << 3]] += ac_val[i];
  268.                 }
  269.                 pred_dc = a;
  270.             }
  271.         } else {
  272.             /* top prediction */
  273.             if (c != 1024) {
  274.                 ac_val -= 16 * wrap;
  275.                 for(i=1;i<8;i++) {
  276.                     block[s->idsp.idct_permutation[i]] += ac_val[i + 8];
  277.                 }
  278.                 pred_dc = c;
  279.             }
  280.         }
  281.     } else {
  282.         /* just DC prediction */
  283.         if (a != 1024 && c != 1024)
  284.             pred_dc = (a + c) >> 1;
  285.         else if (a != 1024)
  286.             pred_dc = a;
  287.         else
  288.             pred_dc = c;
  289.     }
  290.  
  291.     /* we assume pred is positive */
  292.     block[0]=block[0]*scale + pred_dc;
  293.  
  294.     if (block[0] < 0)
  295.         block[0] = 0;
  296.     else
  297.         block[0] |= 1;
  298.  
  299.     /* Update AC/DC tables */
  300.     dc_val[(x) + (y) * wrap] = block[0];
  301.  
  302.     /* left copy */
  303.     for(i=1;i<8;i++)
  304.         ac_val1[i]     = block[s->idsp.idct_permutation[i << 3]];
  305.     /* top copy */
  306.     for(i=1;i<8;i++)
  307.         ac_val1[8 + i] = block[s->idsp.idct_permutation[i]];
  308. }
  309.  
  310. int16_t *ff_h263_pred_motion(MpegEncContext * s, int block, int dir,
  311.                              int *px, int *py)
  312. {
  313.     int wrap;
  314.     int16_t *A, *B, *C, (*mot_val)[2];
  315.     static const int off[4]= {2, 1, 1, -1};
  316.  
  317.     wrap = s->b8_stride;
  318.     mot_val = s->current_picture.motion_val[dir] + s->block_index[block];
  319.  
  320.     A = mot_val[ - 1];
  321.     /* special case for first (slice) line */
  322.     if (s->first_slice_line && block<3) {
  323.         // we can't just change some MVs to simulate that as we need them for the B frames (and ME)
  324.         // and if we ever support non rectangular objects than we need to do a few ifs here anyway :(
  325.         if(block==0){ //most common case
  326.             if(s->mb_x  == s->resync_mb_x){ //rare
  327.                 *px= *py = 0;
  328.             }else if(s->mb_x + 1 == s->resync_mb_x && s->h263_pred){ //rare
  329.                 C = mot_val[off[block] - wrap];
  330.                 if(s->mb_x==0){
  331.                     *px = C[0];
  332.                     *py = C[1];
  333.                 }else{
  334.                     *px = mid_pred(A[0], 0, C[0]);
  335.                     *py = mid_pred(A[1], 0, C[1]);
  336.                 }
  337.             }else{
  338.                 *px = A[0];
  339.                 *py = A[1];
  340.             }
  341.         }else if(block==1){
  342.             if(s->mb_x + 1 == s->resync_mb_x && s->h263_pred){ //rare
  343.                 C = mot_val[off[block] - wrap];
  344.                 *px = mid_pred(A[0], 0, C[0]);
  345.                 *py = mid_pred(A[1], 0, C[1]);
  346.             }else{
  347.                 *px = A[0];
  348.                 *py = A[1];
  349.             }
  350.         }else{ /* block==2*/
  351.             B = mot_val[ - wrap];
  352.             C = mot_val[off[block] - wrap];
  353.             if(s->mb_x == s->resync_mb_x) //rare
  354.                 A[0]=A[1]=0;
  355.  
  356.             *px = mid_pred(A[0], B[0], C[0]);
  357.             *py = mid_pred(A[1], B[1], C[1]);
  358.         }
  359.     } else {
  360.         B = mot_val[ - wrap];
  361.         C = mot_val[off[block] - wrap];
  362.         *px = mid_pred(A[0], B[0], C[0]);
  363.         *py = mid_pred(A[1], B[1], C[1]);
  364.     }
  365.     return *mot_val;
  366. }
  367.