Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright (C) 2006-2011 Michael Niedermayer <michaelni@gmx.at>
  3.  *               2010      James Darnley <james.darnley@gmail.com>
  4.  *
  5.  * FFmpeg is free software; you can redistribute it and/or modify
  6.  * it under the terms of the GNU General Public License as published by
  7.  * the Free Software Foundation; either version 2 of the License, or
  8.  * (at your option) any later version.
  9.  *
  10.  * FFmpeg is distributed in the hope that it will be useful,
  11.  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12.  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13.  * GNU General Public License for more details.
  14.  *
  15.  * You should have received a copy of the GNU General Public License along
  16.  * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
  17.  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  18.  */
  19.  
  20. #include "libavutil/avassert.h"
  21. #include "libavutil/cpu.h"
  22. #include "libavutil/common.h"
  23. #include "libavutil/opt.h"
  24. #include "libavutil/pixdesc.h"
  25. #include "libavutil/imgutils.h"
  26. #include "avfilter.h"
  27. #include "formats.h"
  28. #include "internal.h"
  29. #include "video.h"
  30. #include "yadif.h"
  31.  
  32. typedef struct ThreadData {
  33.     AVFrame *frame;
  34.     int plane;
  35.     int w, h;
  36.     int parity;
  37.     int tff;
  38. } ThreadData;
  39.  
  40. #define CHECK(j)\
  41.     {   int score = FFABS(cur[mrefs - 1 + (j)] - cur[prefs - 1 - (j)])\
  42.                   + FFABS(cur[mrefs  +(j)] - cur[prefs  -(j)])\
  43.                   + FFABS(cur[mrefs + 1 + (j)] - cur[prefs + 1 - (j)]);\
  44.         if (score < spatial_score) {\
  45.             spatial_score= score;\
  46.             spatial_pred= (cur[mrefs  +(j)] + cur[prefs  -(j)])>>1;\
  47.  
  48. /* The is_not_edge argument here controls when the code will enter a branch
  49.  * which reads up to and including x-3 and x+3. */
  50.  
  51. #define FILTER(start, end, is_not_edge) \
  52.     for (x = start;  x < end; x++) { \
  53.         int c = cur[mrefs]; \
  54.         int d = (prev2[0] + next2[0])>>1; \
  55.         int e = cur[prefs]; \
  56.         int temporal_diff0 = FFABS(prev2[0] - next2[0]); \
  57.         int temporal_diff1 =(FFABS(prev[mrefs] - c) + FFABS(prev[prefs] - e) )>>1; \
  58.         int temporal_diff2 =(FFABS(next[mrefs] - c) + FFABS(next[prefs] - e) )>>1; \
  59.         int diff = FFMAX3(temporal_diff0 >> 1, temporal_diff1, temporal_diff2); \
  60.         int spatial_pred = (c+e) >> 1; \
  61.  \
  62.         if (is_not_edge) {\
  63.             int spatial_score = FFABS(cur[mrefs - 1] - cur[prefs - 1]) + FFABS(c-e) \
  64.                               + FFABS(cur[mrefs + 1] - cur[prefs + 1]) - 1; \
  65.             CHECK(-1) CHECK(-2) }} }} \
  66.             CHECK( 1) CHECK( 2) }} }} \
  67.         }\
  68.  \
  69.         if (!(mode&2)) { \
  70.             int b = (prev2[2 * mrefs] + next2[2 * mrefs])>>1; \
  71.             int f = (prev2[2 * prefs] + next2[2 * prefs])>>1; \
  72.             int max = FFMAX3(d - e, d - c, FFMIN(b - c, f - e)); \
  73.             int min = FFMIN3(d - e, d - c, FFMAX(b - c, f - e)); \
  74.  \
  75.             diff = FFMAX3(diff, min, -max); \
  76.         } \
  77.  \
  78.         if (spatial_pred > d + diff) \
  79.            spatial_pred = d + diff; \
  80.         else if (spatial_pred < d - diff) \
  81.            spatial_pred = d - diff; \
  82.  \
  83.         dst[0] = spatial_pred; \
  84.  \
  85.         dst++; \
  86.         cur++; \
  87.         prev++; \
  88.         next++; \
  89.         prev2++; \
  90.         next2++; \
  91.     }
  92.  
  93. static void filter_line_c(void *dst1,
  94.                           void *prev1, void *cur1, void *next1,
  95.                           int w, int prefs, int mrefs, int parity, int mode)
  96. {
  97.     uint8_t *dst  = dst1;
  98.     uint8_t *prev = prev1;
  99.     uint8_t *cur  = cur1;
  100.     uint8_t *next = next1;
  101.     int x;
  102.     uint8_t *prev2 = parity ? prev : cur ;
  103.     uint8_t *next2 = parity ? cur  : next;
  104.  
  105.     /* The function is called with the pointers already pointing to data[3] and
  106.      * with 6 subtracted from the width.  This allows the FILTER macro to be
  107.      * called so that it processes all the pixels normally.  A constant value of
  108.      * true for is_not_edge lets the compiler ignore the if statement. */
  109.     FILTER(0, w, 1)
  110. }
  111.  
  112. #define MAX_ALIGN 8
  113. static void filter_edges(void *dst1, void *prev1, void *cur1, void *next1,
  114.                          int w, int prefs, int mrefs, int parity, int mode)
  115. {
  116.     uint8_t *dst  = dst1;
  117.     uint8_t *prev = prev1;
  118.     uint8_t *cur  = cur1;
  119.     uint8_t *next = next1;
  120.     int x;
  121.     uint8_t *prev2 = parity ? prev : cur ;
  122.     uint8_t *next2 = parity ? cur  : next;
  123.  
  124.     /* Only edge pixels need to be processed here.  A constant value of false
  125.      * for is_not_edge should let the compiler ignore the whole branch. */
  126.     FILTER(0, 3, 0)
  127.  
  128.     dst  = (uint8_t*)dst1  + w - (MAX_ALIGN-1);
  129.     prev = (uint8_t*)prev1 + w - (MAX_ALIGN-1);
  130.     cur  = (uint8_t*)cur1  + w - (MAX_ALIGN-1);
  131.     next = (uint8_t*)next1 + w - (MAX_ALIGN-1);
  132.     prev2 = (uint8_t*)(parity ? prev : cur);
  133.     next2 = (uint8_t*)(parity ? cur  : next);
  134.  
  135.     FILTER(w - (MAX_ALIGN-1), w - 3, 1)
  136.     FILTER(w - 3, w, 0)
  137. }
  138.  
  139.  
  140. static void filter_line_c_16bit(void *dst1,
  141.                                 void *prev1, void *cur1, void *next1,
  142.                                 int w, int prefs, int mrefs, int parity,
  143.                                 int mode)
  144. {
  145.     uint16_t *dst  = dst1;
  146.     uint16_t *prev = prev1;
  147.     uint16_t *cur  = cur1;
  148.     uint16_t *next = next1;
  149.     int x;
  150.     uint16_t *prev2 = parity ? prev : cur ;
  151.     uint16_t *next2 = parity ? cur  : next;
  152.     mrefs /= 2;
  153.     prefs /= 2;
  154.  
  155.     FILTER(0, w, 1)
  156. }
  157.  
  158. static void filter_edges_16bit(void *dst1, void *prev1, void *cur1, void *next1,
  159.                                int w, int prefs, int mrefs, int parity, int mode)
  160. {
  161.     uint16_t *dst  = dst1;
  162.     uint16_t *prev = prev1;
  163.     uint16_t *cur  = cur1;
  164.     uint16_t *next = next1;
  165.     int x;
  166.     uint16_t *prev2 = parity ? prev : cur ;
  167.     uint16_t *next2 = parity ? cur  : next;
  168.     mrefs /= 2;
  169.     prefs /= 2;
  170.  
  171.     FILTER(0, 3, 0)
  172.  
  173.     dst   = (uint16_t*)dst1  + w - (MAX_ALIGN/2-1);
  174.     prev  = (uint16_t*)prev1 + w - (MAX_ALIGN/2-1);
  175.     cur   = (uint16_t*)cur1  + w - (MAX_ALIGN/2-1);
  176.     next  = (uint16_t*)next1 + w - (MAX_ALIGN/2-1);
  177.     prev2 = (uint16_t*)(parity ? prev : cur);
  178.     next2 = (uint16_t*)(parity ? cur  : next);
  179.  
  180.     FILTER(w - (MAX_ALIGN/2-1), w - 3, 1)
  181.     FILTER(w - 3, w, 0)
  182. }
  183.  
  184. static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  185. {
  186.     YADIFContext *s = ctx->priv;
  187.     ThreadData *td  = arg;
  188.     int refs = s->cur->linesize[td->plane];
  189.     int df = (s->csp->comp[td->plane].depth_minus1 + 8) / 8;
  190.     int pix_3 = 3 * df;
  191.     int slice_start = (td->h *  jobnr   ) / nb_jobs;
  192.     int slice_end   = (td->h * (jobnr+1)) / nb_jobs;
  193.     int y;
  194.  
  195.     /* filtering reads 3 pixels to the left/right; to avoid invalid reads,
  196.      * we need to call the c variant which avoids this for border pixels
  197.      */
  198.     for (y = slice_start; y < slice_end; y++) {
  199.         if ((y ^ td->parity) & 1) {
  200.             uint8_t *prev = &s->prev->data[td->plane][y * refs];
  201.             uint8_t *cur  = &s->cur ->data[td->plane][y * refs];
  202.             uint8_t *next = &s->next->data[td->plane][y * refs];
  203.             uint8_t *dst  = &td->frame->data[td->plane][y * td->frame->linesize[td->plane]];
  204.             int     mode  = y == 1 || y + 2 == td->h ? 2 : s->mode;
  205.             s->filter_line(dst + pix_3, prev + pix_3, cur + pix_3,
  206.                            next + pix_3, td->w - (3 + MAX_ALIGN/df-1),
  207.                            y + 1 < td->h ? refs : -refs,
  208.                            y ? -refs : refs,
  209.                            td->parity ^ td->tff, mode);
  210.             s->filter_edges(dst, prev, cur, next, td->w,
  211.                             y + 1 < td->h ? refs : -refs,
  212.                             y ? -refs : refs,
  213.                             td->parity ^ td->tff, mode);
  214.         } else {
  215.             memcpy(&td->frame->data[td->plane][y * td->frame->linesize[td->plane]],
  216.                    &s->cur->data[td->plane][y * refs], td->w * df);
  217.         }
  218.     }
  219.     return 0;
  220. }
  221.  
  222. static void filter(AVFilterContext *ctx, AVFrame *dstpic,
  223.                    int parity, int tff)
  224. {
  225.     YADIFContext *yadif = ctx->priv;
  226.     ThreadData td = { .frame = dstpic, .parity = parity, .tff = tff };
  227.     int i;
  228.  
  229.     for (i = 0; i < yadif->csp->nb_components; i++) {
  230.         int w = dstpic->width;
  231.         int h = dstpic->height;
  232.  
  233.         if (i == 1 || i == 2) {
  234.             w = FF_CEIL_RSHIFT(w, yadif->csp->log2_chroma_w);
  235.             h = FF_CEIL_RSHIFT(h, yadif->csp->log2_chroma_h);
  236.         }
  237.  
  238.  
  239.         td.w       = w;
  240.         td.h       = h;
  241.         td.plane   = i;
  242.  
  243.         ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(h, ctx->graph->nb_threads));
  244.     }
  245.  
  246.     emms_c();
  247. }
  248.  
  249. static int return_frame(AVFilterContext *ctx, int is_second)
  250. {
  251.     YADIFContext *yadif = ctx->priv;
  252.     AVFilterLink *link  = ctx->outputs[0];
  253.     int tff, ret;
  254.  
  255.     if (yadif->parity == -1) {
  256.         tff = yadif->cur->interlaced_frame ?
  257.               yadif->cur->top_field_first : 1;
  258.     } else {
  259.         tff = yadif->parity ^ 1;
  260.     }
  261.  
  262.     if (is_second) {
  263.         yadif->out = ff_get_video_buffer(link, link->w, link->h);
  264.         if (!yadif->out)
  265.             return AVERROR(ENOMEM);
  266.  
  267.         av_frame_copy_props(yadif->out, yadif->cur);
  268.         yadif->out->interlaced_frame = 0;
  269.     }
  270.  
  271.     filter(ctx, yadif->out, tff ^ !is_second, tff);
  272.  
  273.     if (is_second) {
  274.         int64_t cur_pts  = yadif->cur->pts;
  275.         int64_t next_pts = yadif->next->pts;
  276.  
  277.         if (next_pts != AV_NOPTS_VALUE && cur_pts != AV_NOPTS_VALUE) {
  278.             yadif->out->pts = cur_pts + next_pts;
  279.         } else {
  280.             yadif->out->pts = AV_NOPTS_VALUE;
  281.         }
  282.     }
  283.     ret = ff_filter_frame(ctx->outputs[0], yadif->out);
  284.  
  285.     yadif->frame_pending = (yadif->mode&1) && !is_second;
  286.     return ret;
  287. }
  288.  
  289. static int checkstride(YADIFContext *yadif, const AVFrame *a, const AVFrame *b)
  290. {
  291.     int i;
  292.     for (i = 0; i < yadif->csp->nb_components; i++)
  293.         if (a->linesize[i] != b->linesize[i])
  294.             return 1;
  295.     return 0;
  296. }
  297.  
  298. static void fixstride(AVFilterLink *link, AVFrame *f)
  299. {
  300.     AVFrame *dst = ff_default_get_video_buffer(link, f->width, f->height);
  301.     if(!dst)
  302.         return;
  303.     av_frame_copy_props(dst, f);
  304.     av_image_copy(dst->data, dst->linesize,
  305.                   (const uint8_t **)f->data, f->linesize,
  306.                   dst->format, dst->width, dst->height);
  307.     av_frame_unref(f);
  308.     av_frame_move_ref(f, dst);
  309.     av_frame_free(&dst);
  310. }
  311.  
  312. static int filter_frame(AVFilterLink *link, AVFrame *frame)
  313. {
  314.     AVFilterContext *ctx = link->dst;
  315.     YADIFContext *yadif = ctx->priv;
  316.  
  317.     av_assert0(frame);
  318.  
  319.     if (yadif->frame_pending)
  320.         return_frame(ctx, 1);
  321.  
  322.     if (yadif->prev)
  323.         av_frame_free(&yadif->prev);
  324.     yadif->prev = yadif->cur;
  325.     yadif->cur  = yadif->next;
  326.     yadif->next = frame;
  327.  
  328.     if (!yadif->cur)
  329.         return 0;
  330.  
  331.     if (checkstride(yadif, yadif->next, yadif->cur)) {
  332.         av_log(ctx, AV_LOG_VERBOSE, "Reallocating frame due to differing stride\n");
  333.         fixstride(link, yadif->next);
  334.     }
  335.     if (checkstride(yadif, yadif->next, yadif->cur))
  336.         fixstride(link, yadif->cur);
  337.     if (yadif->prev && checkstride(yadif, yadif->next, yadif->prev))
  338.         fixstride(link, yadif->prev);
  339.     if (checkstride(yadif, yadif->next, yadif->cur) || (yadif->prev && checkstride(yadif, yadif->next, yadif->prev))) {
  340.         av_log(ctx, AV_LOG_ERROR, "Failed to reallocate frame\n");
  341.         return -1;
  342.     }
  343.  
  344.     if ((yadif->deint && !yadif->cur->interlaced_frame) || ctx->is_disabled) {
  345.         yadif->out  = av_frame_clone(yadif->cur);
  346.         if (!yadif->out)
  347.             return AVERROR(ENOMEM);
  348.  
  349.         av_frame_free(&yadif->prev);
  350.         if (yadif->out->pts != AV_NOPTS_VALUE)
  351.             yadif->out->pts *= 2;
  352.         return ff_filter_frame(ctx->outputs[0], yadif->out);
  353.     }
  354.  
  355.     if (!yadif->prev &&
  356.         !(yadif->prev = av_frame_clone(yadif->cur)))
  357.         return AVERROR(ENOMEM);
  358.  
  359.     yadif->out = ff_get_video_buffer(ctx->outputs[0], link->w, link->h);
  360.     if (!yadif->out)
  361.         return AVERROR(ENOMEM);
  362.  
  363.     av_frame_copy_props(yadif->out, yadif->cur);
  364.     yadif->out->interlaced_frame = 0;
  365.  
  366.     if (yadif->out->pts != AV_NOPTS_VALUE)
  367.         yadif->out->pts *= 2;
  368.  
  369.     return return_frame(ctx, 0);
  370. }
  371.  
  372. static int request_frame(AVFilterLink *link)
  373. {
  374.     AVFilterContext *ctx = link->src;
  375.     YADIFContext *yadif = ctx->priv;
  376.  
  377.     if (yadif->frame_pending) {
  378.         return_frame(ctx, 1);
  379.         return 0;
  380.     }
  381.  
  382.     do {
  383.         int ret;
  384.  
  385.         if (yadif->eof)
  386.             return AVERROR_EOF;
  387.  
  388.         ret  = ff_request_frame(link->src->inputs[0]);
  389.  
  390.         if (ret == AVERROR_EOF && yadif->cur) {
  391.             AVFrame *next = av_frame_clone(yadif->next);
  392.  
  393.             if (!next)
  394.                 return AVERROR(ENOMEM);
  395.  
  396.             next->pts = yadif->next->pts * 2 - yadif->cur->pts;
  397.  
  398.             filter_frame(link->src->inputs[0], next);
  399.             yadif->eof = 1;
  400.         } else if (ret < 0) {
  401.             return ret;
  402.         }
  403.     } while (!yadif->cur);
  404.  
  405.     return 0;
  406. }
  407.  
  408. static av_cold void uninit(AVFilterContext *ctx)
  409. {
  410.     YADIFContext *yadif = ctx->priv;
  411.  
  412.     av_frame_free(&yadif->prev);
  413.     av_frame_free(&yadif->cur );
  414.     av_frame_free(&yadif->next);
  415. }
  416.  
  417. static int query_formats(AVFilterContext *ctx)
  418. {
  419.     static const enum AVPixelFormat pix_fmts[] = {
  420.         AV_PIX_FMT_YUV420P,
  421.         AV_PIX_FMT_YUV422P,
  422.         AV_PIX_FMT_YUV444P,
  423.         AV_PIX_FMT_YUV410P,
  424.         AV_PIX_FMT_YUV411P,
  425.         AV_PIX_FMT_GRAY8,
  426.         AV_PIX_FMT_YUVJ420P,
  427.         AV_PIX_FMT_YUVJ422P,
  428.         AV_PIX_FMT_YUVJ444P,
  429.         AV_PIX_FMT_GRAY16,
  430.         AV_PIX_FMT_YUV440P,
  431.         AV_PIX_FMT_YUVJ440P,
  432.         AV_PIX_FMT_YUV420P9,
  433.         AV_PIX_FMT_YUV422P9,
  434.         AV_PIX_FMT_YUV444P9,
  435.         AV_PIX_FMT_YUV420P10,
  436.         AV_PIX_FMT_YUV422P10,
  437.         AV_PIX_FMT_YUV444P10,
  438.         AV_PIX_FMT_YUV420P12,
  439.         AV_PIX_FMT_YUV422P12,
  440.         AV_PIX_FMT_YUV444P12,
  441.         AV_PIX_FMT_YUV420P14,
  442.         AV_PIX_FMT_YUV422P14,
  443.         AV_PIX_FMT_YUV444P14,
  444.         AV_PIX_FMT_YUV420P16,
  445.         AV_PIX_FMT_YUV422P16,
  446.         AV_PIX_FMT_YUV444P16,
  447.         AV_PIX_FMT_YUVA420P,
  448.         AV_PIX_FMT_YUVA422P,
  449.         AV_PIX_FMT_YUVA444P,
  450.         AV_PIX_FMT_GBRP,
  451.         AV_PIX_FMT_GBRAP,
  452.         AV_PIX_FMT_NONE
  453.     };
  454.  
  455.     ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
  456.  
  457.     return 0;
  458. }
  459.  
  460. static int config_props(AVFilterLink *link)
  461. {
  462.     AVFilterContext *ctx = link->src;
  463.     YADIFContext *s = link->src->priv;
  464.  
  465.     link->time_base.num = link->src->inputs[0]->time_base.num;
  466.     link->time_base.den = link->src->inputs[0]->time_base.den * 2;
  467.     link->w             = link->src->inputs[0]->w;
  468.     link->h             = link->src->inputs[0]->h;
  469.  
  470.     if(s->mode&1)
  471.         link->frame_rate = av_mul_q(link->src->inputs[0]->frame_rate, (AVRational){2,1});
  472.  
  473.     if (link->w < 3 || link->h < 3) {
  474.         av_log(ctx, AV_LOG_ERROR, "Video of less than 3 columns or lines is not supported\n");
  475.         return AVERROR(EINVAL);
  476.     }
  477.  
  478.     s->csp = av_pix_fmt_desc_get(link->format);
  479.     if (s->csp->comp[0].depth_minus1 / 8 == 1) {
  480.         s->filter_line  = filter_line_c_16bit;
  481.         s->filter_edges = filter_edges_16bit;
  482.     } else {
  483.         s->filter_line  = filter_line_c;
  484.         s->filter_edges = filter_edges;
  485.     }
  486.  
  487.     if (ARCH_X86)
  488.         ff_yadif_init_x86(s);
  489.  
  490.     return 0;
  491. }
  492.  
  493.  
  494. #define OFFSET(x) offsetof(YADIFContext, x)
  495. #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  496.  
  497. #define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, INT_MIN, INT_MAX, FLAGS, unit }
  498.  
  499. static const AVOption yadif_options[] = {
  500.     { "mode",   "specify the interlacing mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=YADIF_MODE_SEND_FRAME}, 0, 3, FLAGS, "mode"},
  501.     CONST("send_frame",           "send one frame for each frame",                                     YADIF_MODE_SEND_FRAME,           "mode"),
  502.     CONST("send_field",           "send one frame for each field",                                     YADIF_MODE_SEND_FIELD,           "mode"),
  503.     CONST("send_frame_nospatial", "send one frame for each frame, but skip spatial interlacing check", YADIF_MODE_SEND_FRAME_NOSPATIAL, "mode"),
  504.     CONST("send_field_nospatial", "send one frame for each field, but skip spatial interlacing check", YADIF_MODE_SEND_FIELD_NOSPATIAL, "mode"),
  505.  
  506.     { "parity", "specify the assumed picture field parity", OFFSET(parity), AV_OPT_TYPE_INT, {.i64=YADIF_PARITY_AUTO}, -1, 1, FLAGS, "parity" },
  507.     CONST("tff",  "assume top field first",    YADIF_PARITY_TFF,  "parity"),
  508.     CONST("bff",  "assume bottom field first", YADIF_PARITY_BFF,  "parity"),
  509.     CONST("auto", "auto detect parity",        YADIF_PARITY_AUTO, "parity"),
  510.  
  511.     { "deint", "specify which frames to deinterlace", OFFSET(deint), AV_OPT_TYPE_INT, {.i64=YADIF_DEINT_ALL}, 0, 1, FLAGS, "deint" },
  512.     CONST("all",        "deinterlace all frames",                       YADIF_DEINT_ALL,         "deint"),
  513.     CONST("interlaced", "only deinterlace frames marked as interlaced", YADIF_DEINT_INTERLACED,  "deint"),
  514.  
  515.     { NULL }
  516. };
  517.  
  518. AVFILTER_DEFINE_CLASS(yadif);
  519.  
  520. static const AVFilterPad avfilter_vf_yadif_inputs[] = {
  521.     {
  522.         .name          = "default",
  523.         .type          = AVMEDIA_TYPE_VIDEO,
  524.         .filter_frame  = filter_frame,
  525.     },
  526.     { NULL }
  527. };
  528.  
  529. static const AVFilterPad avfilter_vf_yadif_outputs[] = {
  530.     {
  531.         .name          = "default",
  532.         .type          = AVMEDIA_TYPE_VIDEO,
  533.         .request_frame = request_frame,
  534.         .config_props  = config_props,
  535.     },
  536.     { NULL }
  537. };
  538.  
  539. AVFilter avfilter_vf_yadif = {
  540.     .name          = "yadif",
  541.     .description   = NULL_IF_CONFIG_SMALL("Deinterlace the input image."),
  542.     .priv_size     = sizeof(YADIFContext),
  543.     .priv_class    = &yadif_class,
  544.     .uninit        = uninit,
  545.     .query_formats = query_formats,
  546.     .inputs        = avfilter_vf_yadif_inputs,
  547.     .outputs       = avfilter_vf_yadif_outputs,
  548.     .flags         = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
  549. };
  550.