Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
  3.  *
  4.  * FFmpeg is free software; you can redistribute it and/or modify
  5.  * it under the terms of the GNU General Public License as published by
  6.  * the Free Software Foundation; either version 2 of the License, or
  7.  * (at your option) any later version.
  8.  *
  9.  * FFmpeg is distributed in the hope that it will be useful,
  10.  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11.  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12.  * GNU General Public License for more details.
  13.  *
  14.  * You should have received a copy of the GNU General Public License along
  15.  * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
  16.  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  17.  */
  18.  
  19. /**
  20.  * @file
  21.  * Motion Compensation Deinterlacer
  22.  * Ported from MPlayer libmpcodecs/vf_mcdeint.c.
  23.  *
  24.  * Known Issues:
  25.  *
  26.  * The motion estimation is somewhat at the mercy of the input, if the
  27.  * input frames are created purely based on spatial interpolation then
  28.  * for example a thin black line or another random and not
  29.  * interpolateable pattern will cause problems.
  30.  * Note: completely ignoring the "unavailable" lines during motion
  31.  * estimation did not look any better, so the most obvious solution
  32.  * would be to improve tfields or penalize problematic motion vectors.
  33.  *
  34.  * If non iterative ME is used then snow currently ignores the OBMC
  35.  * window and as a result sometimes creates artifacts.
  36.  *
  37.  * Only past frames are used, we should ideally use future frames too,
  38.  * something like filtering the whole movie in forward and then
  39.  * backward direction seems like a interesting idea but the current
  40.  * filter framework is FAR from supporting such things.
  41.  *
  42.  * Combining the motion compensated image with the input image also is
  43.  * not as trivial as it seems, simple blindly taking even lines from
  44.  * one and odd ones from the other does not work at all as ME/MC
  45.  * sometimes has nothing in the previous frames which matches the
  46.  * current. The current algorithm has been found by trial and error
  47.  * and almost certainly can be improved...
  48.  */
  49.  
  50. #include "libavutil/opt.h"
  51. #include "libavutil/pixdesc.h"
  52. #include "libavcodec/avcodec.h"
  53. #include "avfilter.h"
  54. #include "formats.h"
  55. #include "internal.h"
  56.  
  57. enum MCDeintMode {
  58.     MODE_FAST = 0,
  59.     MODE_MEDIUM,
  60.     MODE_SLOW,
  61.     MODE_EXTRA_SLOW,
  62.     MODE_NB,
  63. };
  64.  
  65. enum MCDeintParity {
  66.     PARITY_TFF  =  0, ///< top field first
  67.     PARITY_BFF  =  1, ///< bottom field first
  68. };
  69.  
  70. typedef struct {
  71.     const AVClass *class;
  72.     enum MCDeintMode mode;
  73.     enum MCDeintParity parity;
  74.     int qp;
  75.     AVCodecContext *enc_ctx;
  76. } MCDeintContext;
  77.  
  78. #define OFFSET(x) offsetof(MCDeintContext, x)
  79. #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  80. #define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, INT_MIN, INT_MAX, FLAGS, unit }
  81.  
  82. static const AVOption mcdeint_options[] = {
  83.     { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_FAST}, 0, MODE_NB-1, FLAGS, .unit="mode" },
  84.     CONST("fast",       NULL, MODE_FAST,       "mode"),
  85.     CONST("medium",     NULL, MODE_MEDIUM,     "mode"),
  86.     CONST("slow",       NULL, MODE_SLOW,       "mode"),
  87.     CONST("extra_slow", NULL, MODE_EXTRA_SLOW, "mode"),
  88.  
  89.     { "parity", "set the assumed picture field parity", OFFSET(parity), AV_OPT_TYPE_INT, {.i64=PARITY_BFF}, -1, 1, FLAGS, "parity" },
  90.     CONST("tff", "assume top field first",    PARITY_TFF, "parity"),
  91.     CONST("bff", "assume bottom field first", PARITY_BFF, "parity"),
  92.  
  93.     { "qp", "set qp", OFFSET(qp), AV_OPT_TYPE_INT, {.i64=1}, INT_MIN, INT_MAX, FLAGS },
  94.     { NULL }
  95. };
  96.  
  97. AVFILTER_DEFINE_CLASS(mcdeint);
  98.  
  99. static int config_props(AVFilterLink *inlink)
  100. {
  101.     AVFilterContext *ctx = inlink->dst;
  102.     MCDeintContext *mcdeint = ctx->priv;
  103.     AVCodec *enc;
  104.     AVCodecContext *enc_ctx;
  105.     AVDictionary *opts = NULL;
  106.     int ret;
  107.  
  108.     if (!(enc = avcodec_find_encoder(AV_CODEC_ID_SNOW))) {
  109.         av_log(ctx, AV_LOG_ERROR, "Snow encoder is not enabled in libavcodec\n");
  110.         return AVERROR(EINVAL);
  111.     }
  112.  
  113.     mcdeint->enc_ctx = avcodec_alloc_context3(enc);
  114.     if (!mcdeint->enc_ctx)
  115.         return AVERROR(ENOMEM);
  116.     enc_ctx = mcdeint->enc_ctx;
  117.     enc_ctx->width  = inlink->w;
  118.     enc_ctx->height = inlink->h;
  119.     enc_ctx->time_base = (AVRational){1,25};  // meaningless
  120.     enc_ctx->gop_size = 300;
  121.     enc_ctx->max_b_frames = 0;
  122.     enc_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
  123.     enc_ctx->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_LOW_DELAY;
  124.     enc_ctx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
  125.     enc_ctx->global_quality = 1;
  126.     enc_ctx->me_cmp = enc_ctx->me_sub_cmp = FF_CMP_SAD;
  127.     enc_ctx->mb_cmp = FF_CMP_SSE;
  128.     av_dict_set(&opts, "memc_only", "1", 0);
  129.  
  130.     switch (mcdeint->mode) {
  131.     case MODE_EXTRA_SLOW:
  132.         enc_ctx->refs = 3;
  133.     case MODE_SLOW:
  134.         enc_ctx->me_method = ME_ITER;
  135.     case MODE_MEDIUM:
  136.         enc_ctx->flags |= CODEC_FLAG_4MV;
  137.         enc_ctx->dia_size = 2;
  138.     case MODE_FAST:
  139.         enc_ctx->flags |= CODEC_FLAG_QPEL;
  140.     }
  141.  
  142.     ret = avcodec_open2(enc_ctx, enc, &opts);
  143.     av_dict_free(&opts);
  144.     if (ret < 0)
  145.         return ret;
  146.  
  147.     return 0;
  148. }
  149.  
  150. static av_cold void uninit(AVFilterContext *ctx)
  151. {
  152.     MCDeintContext *mcdeint = ctx->priv;
  153.  
  154.     if (mcdeint->enc_ctx) {
  155.         avcodec_close(mcdeint->enc_ctx);
  156.         av_freep(&mcdeint->enc_ctx);
  157.     }
  158. }
  159.  
  160. static int query_formats(AVFilterContext *ctx)
  161. {
  162.     static const enum PixelFormat pix_fmts[] = {
  163.         AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE
  164.     };
  165.  
  166.     ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
  167.  
  168.     return 0;
  169. }
  170.  
  171. static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
  172. {
  173.     MCDeintContext *mcdeint = inlink->dst->priv;
  174.     AVFilterLink *outlink = inlink->dst->outputs[0];
  175.     AVFrame *outpic, *frame_dec;
  176.     AVPacket pkt;
  177.     int x, y, i, ret, got_frame = 0;
  178.  
  179.     outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  180.     if (!outpic) {
  181.         av_frame_free(&inpic);
  182.         return AVERROR(ENOMEM);
  183.     }
  184.     av_frame_copy_props(outpic, inpic);
  185.     inpic->quality = mcdeint->qp * FF_QP2LAMBDA;
  186.  
  187.     av_init_packet(&pkt);
  188.     pkt.data = NULL;    // packet data will be allocated by the encoder
  189.     pkt.size = 0;
  190.  
  191.     ret = avcodec_encode_video2(mcdeint->enc_ctx, &pkt, inpic, &got_frame);
  192.     if (ret < 0)
  193.         goto end;
  194.  
  195.     frame_dec = mcdeint->enc_ctx->coded_frame;
  196.  
  197.     for (i = 0; i < 3; i++) {
  198.         int is_chroma = !!i;
  199.         int w = FF_CEIL_RSHIFT(inlink->w, is_chroma);
  200.         int h = FF_CEIL_RSHIFT(inlink->h, is_chroma);
  201.         int fils = frame_dec->linesize[i];
  202.         int srcs = inpic    ->linesize[i];
  203.         int dsts = outpic   ->linesize[i];
  204.  
  205.         for (y = 0; y < h; y++) {
  206.             if ((y ^ mcdeint->parity) & 1) {
  207.                 for (x = 0; x < w; x++) {
  208.                     uint8_t *filp = &frame_dec->data[i][x + y*fils];
  209.                     uint8_t *srcp = &inpic    ->data[i][x + y*srcs];
  210.                     uint8_t *dstp = &outpic   ->data[i][x + y*dsts];
  211.  
  212.                     if (y > 0 && y < h-1){
  213.                         int is_edge = x < 3 || x > w-4;
  214.                         int diff0 = filp[-fils] - srcp[-srcs];
  215.                         int diff1 = filp[+fils] - srcp[+srcs];
  216.                         int temp = filp[0];
  217.  
  218. #define DELTA(j) av_clip(j, -x, w-1-x)
  219.  
  220. #define GET_SCORE_EDGE(j)\
  221.    FFABS(srcp[-srcs+DELTA(-1+(j))] - srcp[+srcs+DELTA(-1-(j))])+\
  222.    FFABS(srcp[-srcs+DELTA(j)     ] - srcp[+srcs+DELTA(  -(j))])+\
  223.    FFABS(srcp[-srcs+DELTA(1+(j)) ] - srcp[+srcs+DELTA( 1-(j))])
  224.  
  225. #define GET_SCORE(j)\
  226.    FFABS(srcp[-srcs-1+(j)] - srcp[+srcs-1-(j)])+\
  227.    FFABS(srcp[-srcs  +(j)] - srcp[+srcs  -(j)])+\
  228.    FFABS(srcp[-srcs+1+(j)] - srcp[+srcs+1-(j)])
  229.  
  230. #define CHECK_EDGE(j)\
  231.     {   int score = GET_SCORE_EDGE(j);\
  232.         if (score < spatial_score){\
  233.             spatial_score = score;\
  234.             diff0 = filp[-fils+DELTA(j)]    - srcp[-srcs+DELTA(j)];\
  235.             diff1 = filp[+fils+DELTA(-(j))] - srcp[+srcs+DELTA(-(j))];\
  236.  
  237. #define CHECK(j)\
  238.     {   int score = GET_SCORE(j);\
  239.         if (score < spatial_score){\
  240.             spatial_score= score;\
  241.             diff0 = filp[-fils+(j)] - srcp[-srcs+(j)];\
  242.             diff1 = filp[+fils-(j)] - srcp[+srcs-(j)];\
  243.  
  244.                         if (is_edge) {
  245.                             int spatial_score = GET_SCORE_EDGE(0) - 1;
  246.                             CHECK_EDGE(-1) CHECK_EDGE(-2) }} }}
  247.                             CHECK_EDGE( 1) CHECK_EDGE( 2) }} }}
  248.                         } else {
  249.                             int spatial_score = GET_SCORE(0) - 1;
  250.                             CHECK(-1) CHECK(-2) }} }}
  251.                             CHECK( 1) CHECK( 2) }} }}
  252.                         }
  253.  
  254.  
  255.                         if (diff0 + diff1 > 0)
  256.                             temp -= (diff0 + diff1 - FFABS(FFABS(diff0) - FFABS(diff1)) / 2) / 2;
  257.                         else
  258.                             temp -= (diff0 + diff1 + FFABS(FFABS(diff0) - FFABS(diff1)) / 2) / 2;
  259.                         *filp = *dstp = temp > 255U ? ~(temp>>31) : temp;
  260.                     } else {
  261.                         *dstp = *filp;
  262.                     }
  263.                 }
  264.             }
  265.         }
  266.  
  267.         for (y = 0; y < h; y++) {
  268.             if (!((y ^ mcdeint->parity) & 1)) {
  269.                 for (x = 0; x < w; x++) {
  270.                     frame_dec->data[i][x + y*fils] =
  271.                     outpic   ->data[i][x + y*dsts] = inpic->data[i][x + y*srcs];
  272.                 }
  273.             }
  274.         }
  275.     }
  276.     mcdeint->parity ^= 1;
  277.  
  278. end:
  279.     av_free_packet(&pkt);
  280.     av_frame_free(&inpic);
  281.     if (ret < 0) {
  282.         av_frame_free(&outpic);
  283.         return ret;
  284.     }
  285.     return ff_filter_frame(outlink, outpic);
  286. }
  287.  
  288. static const AVFilterPad mcdeint_inputs[] = {
  289.     {
  290.         .name         = "default",
  291.         .type         = AVMEDIA_TYPE_VIDEO,
  292.         .filter_frame = filter_frame,
  293.         .config_props = config_props,
  294.     },
  295.     { NULL }
  296. };
  297.  
  298. static const AVFilterPad mcdeint_outputs[] = {
  299.     {
  300.         .name = "default",
  301.         .type = AVMEDIA_TYPE_VIDEO,
  302.     },
  303.     { NULL }
  304. };
  305.  
  306. AVFilter avfilter_vf_mcdeint = {
  307.     .name          = "mcdeint",
  308.     .description   = NULL_IF_CONFIG_SMALL("Apply motion compensating deinterlacing."),
  309.     .priv_size     = sizeof(MCDeintContext),
  310.     .uninit        = uninit,
  311.     .query_formats = query_formats,
  312.     .inputs        = mcdeint_inputs,
  313.     .outputs       = mcdeint_outputs,
  314.     .priv_class    = &mcdeint_class,
  315. };
  316.