Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright (c) 2007 Bobby Bingham
  3.  *
  4.  * This file is part of FFmpeg.
  5.  *
  6.  * FFmpeg is free software; you can redistribute it and/or
  7.  * modify it under the terms of the GNU Lesser General Public
  8.  * License as published by the Free Software Foundation; either
  9.  * version 2.1 of the License, or (at your option) any later version.
  10.  *
  11.  * FFmpeg is distributed in the hope that it will be useful,
  12.  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13.  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14.  * Lesser General Public License for more details.
  15.  *
  16.  * You should have received a copy of the GNU Lesser General Public
  17.  * License along with FFmpeg; if not, write to the Free Software
  18.  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19.  */
  20.  
  21. /**
  22.  * @file
  23.  * video crop filter
  24.  */
  25.  
  26. #include <stdio.h>
  27.  
  28. #include "avfilter.h"
  29. #include "formats.h"
  30. #include "internal.h"
  31. #include "video.h"
  32. #include "libavutil/eval.h"
  33. #include "libavutil/avstring.h"
  34. #include "libavutil/internal.h"
  35. #include "libavutil/libm.h"
  36. #include "libavutil/imgutils.h"
  37. #include "libavutil/mathematics.h"
  38. #include "libavutil/opt.h"
  39.  
  40. static const char *const var_names[] = {
  41.     "in_w", "iw",   ///< width  of the input video
  42.     "in_h", "ih",   ///< height of the input video
  43.     "out_w", "ow",  ///< width  of the cropped video
  44.     "out_h", "oh",  ///< height of the cropped video
  45.     "a",
  46.     "sar",
  47.     "dar",
  48.     "hsub",
  49.     "vsub",
  50.     "x",
  51.     "y",
  52.     "n",            ///< number of frame
  53.     "pos",          ///< position in the file
  54.     "t",            ///< timestamp expressed in seconds
  55.     NULL
  56. };
  57.  
  58. enum var_name {
  59.     VAR_IN_W,  VAR_IW,
  60.     VAR_IN_H,  VAR_IH,
  61.     VAR_OUT_W, VAR_OW,
  62.     VAR_OUT_H, VAR_OH,
  63.     VAR_A,
  64.     VAR_SAR,
  65.     VAR_DAR,
  66.     VAR_HSUB,
  67.     VAR_VSUB,
  68.     VAR_X,
  69.     VAR_Y,
  70.     VAR_N,
  71.     VAR_POS,
  72.     VAR_T,
  73.     VAR_VARS_NB
  74. };
  75.  
  76. typedef struct CropContext {
  77.     const AVClass *class;
  78.     int  x;             ///< x offset of the non-cropped area with respect to the input area
  79.     int  y;             ///< y offset of the non-cropped area with respect to the input area
  80.     int  w;             ///< width of the cropped area
  81.     int  h;             ///< height of the cropped area
  82.  
  83.     AVRational out_sar; ///< output sample aspect ratio
  84.     int keep_aspect;    ///< keep display aspect ratio when cropping
  85.  
  86.     int max_step[4];    ///< max pixel step for each plane, expressed as a number of bytes
  87.     int hsub, vsub;     ///< chroma subsampling
  88.     char *x_expr, *y_expr, *w_expr, *h_expr;
  89.     AVExpr *x_pexpr, *y_pexpr;  /* parsed expressions for x and y */
  90.     double var_values[VAR_VARS_NB];
  91. } CropContext;
  92.  
  93. static int query_formats(AVFilterContext *ctx)
  94. {
  95.     AVFilterFormats *formats = NULL;
  96.     int fmt;
  97.  
  98.     for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) {
  99.         const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
  100.         if (!(desc->flags & (AV_PIX_FMT_FLAG_HWACCEL | AV_PIX_FMT_FLAG_BITSTREAM)) &&
  101.             !((desc->log2_chroma_w || desc->log2_chroma_h) && !(desc->flags & AV_PIX_FMT_FLAG_PLANAR)))
  102.             ff_add_format(&formats, fmt);
  103.     }
  104.  
  105.     return ff_set_common_formats(ctx, formats);
  106. }
  107.  
  108. static av_cold void uninit(AVFilterContext *ctx)
  109. {
  110.     CropContext *s = ctx->priv;
  111.  
  112.     av_expr_free(s->x_pexpr);
  113.     s->x_pexpr = NULL;
  114.     av_expr_free(s->y_pexpr);
  115.     s->y_pexpr = NULL;
  116. }
  117.  
  118. static inline int normalize_double(int *n, double d)
  119. {
  120.     int ret = 0;
  121.  
  122.     if (isnan(d)) {
  123.         ret = AVERROR(EINVAL);
  124.     } else if (d > INT_MAX || d < INT_MIN) {
  125.         *n = d > INT_MAX ? INT_MAX : INT_MIN;
  126.         ret = AVERROR(EINVAL);
  127.     } else
  128.         *n = round(d);
  129.  
  130.     return ret;
  131. }
  132.  
  133. static int config_input(AVFilterLink *link)
  134. {
  135.     AVFilterContext *ctx = link->dst;
  136.     CropContext *s = ctx->priv;
  137.     const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(link->format);
  138.     int ret;
  139.     const char *expr;
  140.     double res;
  141.  
  142.     s->var_values[VAR_IN_W]  = s->var_values[VAR_IW] = ctx->inputs[0]->w;
  143.     s->var_values[VAR_IN_H]  = s->var_values[VAR_IH] = ctx->inputs[0]->h;
  144.     s->var_values[VAR_A]     = (float) link->w / link->h;
  145.     s->var_values[VAR_SAR]   = link->sample_aspect_ratio.num ? av_q2d(link->sample_aspect_ratio) : 1;
  146.     s->var_values[VAR_DAR]   = s->var_values[VAR_A] * s->var_values[VAR_SAR];
  147.     s->var_values[VAR_HSUB]  = 1<<pix_desc->log2_chroma_w;
  148.     s->var_values[VAR_VSUB]  = 1<<pix_desc->log2_chroma_h;
  149.     s->var_values[VAR_X]     = NAN;
  150.     s->var_values[VAR_Y]     = NAN;
  151.     s->var_values[VAR_OUT_W] = s->var_values[VAR_OW] = NAN;
  152.     s->var_values[VAR_OUT_H] = s->var_values[VAR_OH] = NAN;
  153.     s->var_values[VAR_N]     = 0;
  154.     s->var_values[VAR_T]     = NAN;
  155.     s->var_values[VAR_POS]   = NAN;
  156.  
  157.     av_image_fill_max_pixsteps(s->max_step, NULL, pix_desc);
  158.     s->hsub = pix_desc->log2_chroma_w;
  159.     s->vsub = pix_desc->log2_chroma_h;
  160.  
  161.     if ((ret = av_expr_parse_and_eval(&res, (expr = s->w_expr),
  162.                                       var_names, s->var_values,
  163.                                       NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
  164.         goto fail_expr;
  165.     s->var_values[VAR_OUT_W] = s->var_values[VAR_OW] = res;
  166.     if ((ret = av_expr_parse_and_eval(&res, (expr = s->h_expr),
  167.                                       var_names, s->var_values,
  168.                                       NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
  169.         goto fail_expr;
  170.     s->var_values[VAR_OUT_H] = s->var_values[VAR_OH] = res;
  171.     /* evaluate again ow as it may depend on oh */
  172.     if ((ret = av_expr_parse_and_eval(&res, (expr = s->w_expr),
  173.                                       var_names, s->var_values,
  174.                                       NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
  175.         goto fail_expr;
  176.  
  177.     s->var_values[VAR_OUT_W] = s->var_values[VAR_OW] = res;
  178.     if (normalize_double(&s->w, s->var_values[VAR_OUT_W]) < 0 ||
  179.         normalize_double(&s->h, s->var_values[VAR_OUT_H]) < 0) {
  180.         av_log(ctx, AV_LOG_ERROR,
  181.                "Too big value or invalid expression for out_w/ow or out_h/oh. "
  182.                "Maybe the expression for out_w:'%s' or for out_h:'%s' is self-referencing.\n",
  183.                s->w_expr, s->h_expr);
  184.         return AVERROR(EINVAL);
  185.     }
  186.     s->w &= ~((1 << s->hsub) - 1);
  187.     s->h &= ~((1 << s->vsub) - 1);
  188.  
  189.     av_expr_free(s->x_pexpr);
  190.     av_expr_free(s->y_pexpr);
  191.     s->x_pexpr = s->y_pexpr = NULL;
  192.     if ((ret = av_expr_parse(&s->x_pexpr, s->x_expr, var_names,
  193.                              NULL, NULL, NULL, NULL, 0, ctx)) < 0 ||
  194.         (ret = av_expr_parse(&s->y_pexpr, s->y_expr, var_names,
  195.                              NULL, NULL, NULL, NULL, 0, ctx)) < 0)
  196.         return AVERROR(EINVAL);
  197.  
  198.     if (s->keep_aspect) {
  199.         AVRational dar = av_mul_q(link->sample_aspect_ratio,
  200.                                   (AVRational){ link->w, link->h });
  201.         av_reduce(&s->out_sar.num, &s->out_sar.den,
  202.                   dar.num * s->h, dar.den * s->w, INT_MAX);
  203.     } else
  204.         s->out_sar = link->sample_aspect_ratio;
  205.  
  206.     av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d sar:%d/%d -> w:%d h:%d sar:%d/%d\n",
  207.            link->w, link->h, link->sample_aspect_ratio.num, link->sample_aspect_ratio.den,
  208.            s->w, s->h, s->out_sar.num, s->out_sar.den);
  209.  
  210.     if (s->w <= 0 || s->h <= 0 ||
  211.         s->w > link->w || s->h > link->h) {
  212.         av_log(ctx, AV_LOG_ERROR,
  213.                "Invalid too big or non positive size for width '%d' or height '%d'\n",
  214.                s->w, s->h);
  215.         return AVERROR(EINVAL);
  216.     }
  217.  
  218.     /* set default, required in the case the first computed value for x/y is NAN */
  219.     s->x = (link->w - s->w) / 2;
  220.     s->y = (link->h - s->h) / 2;
  221.     s->x &= ~((1 << s->hsub) - 1);
  222.     s->y &= ~((1 << s->vsub) - 1);
  223.     return 0;
  224.  
  225. fail_expr:
  226.     av_log(NULL, AV_LOG_ERROR, "Error when evaluating the expression '%s'\n", expr);
  227.     return ret;
  228. }
  229.  
  230. static int config_output(AVFilterLink *link)
  231. {
  232.     CropContext *s = link->src->priv;
  233.  
  234.     link->w = s->w;
  235.     link->h = s->h;
  236.     link->sample_aspect_ratio = s->out_sar;
  237.  
  238.     return 0;
  239. }
  240.  
  241. static int filter_frame(AVFilterLink *link, AVFrame *frame)
  242. {
  243.     AVFilterContext *ctx = link->dst;
  244.     CropContext *s = ctx->priv;
  245.     const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
  246.     int i;
  247.  
  248.     frame->width  = s->w;
  249.     frame->height = s->h;
  250.  
  251.     s->var_values[VAR_N] = link->frame_count;
  252.     s->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ?
  253.         NAN : frame->pts * av_q2d(link->time_base);
  254.     s->var_values[VAR_POS] = av_frame_get_pkt_pos(frame) == -1 ?
  255.         NAN : av_frame_get_pkt_pos(frame);
  256.     s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, NULL);
  257.     s->var_values[VAR_Y] = av_expr_eval(s->y_pexpr, s->var_values, NULL);
  258.     s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, NULL);
  259.  
  260.     normalize_double(&s->x, s->var_values[VAR_X]);
  261.     normalize_double(&s->y, s->var_values[VAR_Y]);
  262.  
  263.     if (s->x < 0)
  264.         s->x = 0;
  265.     if (s->y < 0)
  266.         s->y = 0;
  267.     if ((unsigned)s->x + (unsigned)s->w > link->w)
  268.         s->x = link->w - s->w;
  269.     if ((unsigned)s->y + (unsigned)s->h > link->h)
  270.         s->y = link->h - s->h;
  271.     s->x &= ~((1 << s->hsub) - 1);
  272.     s->y &= ~((1 << s->vsub) - 1);
  273.  
  274.     av_log(ctx, AV_LOG_TRACE, "n:%d t:%f pos:%f x:%d y:%d x+w:%d y+h:%d\n",
  275.             (int)s->var_values[VAR_N], s->var_values[VAR_T], s->var_values[VAR_POS],
  276.             s->x, s->y, s->x+s->w, s->y+s->h);
  277.  
  278.     frame->data[0] += s->y * frame->linesize[0];
  279.     frame->data[0] += s->x * s->max_step[0];
  280.  
  281.     if (!(desc->flags & AV_PIX_FMT_FLAG_PAL || desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL)) {
  282.         for (i = 1; i < 3; i ++) {
  283.             if (frame->data[i]) {
  284.                 frame->data[i] += (s->y >> s->vsub) * frame->linesize[i];
  285.                 frame->data[i] += (s->x * s->max_step[i]) >> s->hsub;
  286.             }
  287.         }
  288.     }
  289.  
  290.     /* alpha plane */
  291.     if (frame->data[3]) {
  292.         frame->data[3] += s->y * frame->linesize[3];
  293.         frame->data[3] += s->x * s->max_step[3];
  294.     }
  295.  
  296.     return ff_filter_frame(link->dst->outputs[0], frame);
  297. }
  298.  
  299. static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
  300.                            char *res, int res_len, int flags)
  301. {
  302.     CropContext *s = ctx->priv;
  303.     int ret;
  304.  
  305.     if (   !strcmp(cmd, "out_w")  || !strcmp(cmd, "w")
  306.         || !strcmp(cmd, "out_h")  || !strcmp(cmd, "h")
  307.         || !strcmp(cmd, "x")      || !strcmp(cmd, "y")) {
  308.  
  309.         int old_x = s->x;
  310.         int old_y = s->y;
  311.         int old_w = s->w;
  312.         int old_h = s->h;
  313.  
  314.         AVFilterLink *outlink = ctx->outputs[0];
  315.         AVFilterLink *inlink  = ctx->inputs[0];
  316.  
  317.         av_opt_set(s, cmd, args, 0);
  318.  
  319.         if ((ret = config_input(inlink)) < 0) {
  320.             s->x = old_x;
  321.             s->y = old_y;
  322.             s->w = old_w;
  323.             s->h = old_h;
  324.             return ret;
  325.         }
  326.  
  327.         ret = config_output(outlink);
  328.  
  329.     } else
  330.         ret = AVERROR(ENOSYS);
  331.  
  332.     return ret;
  333. }
  334.  
  335. #define OFFSET(x) offsetof(CropContext, x)
  336. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  337.  
  338. static const AVOption crop_options[] = {
  339.     { "out_w",       "set the width crop area expression",   OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, FLAGS },
  340.     { "w",           "set the width crop area expression",   OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, FLAGS },
  341.     { "out_h",       "set the height crop area expression",  OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, FLAGS },
  342.     { "h",           "set the height crop area expression",  OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, FLAGS },
  343.     { "x",           "set the x crop area expression",       OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str = "(in_w-out_w)/2"}, CHAR_MIN, CHAR_MAX, FLAGS },
  344.     { "y",           "set the y crop area expression",       OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str = "(in_h-out_h)/2"}, CHAR_MIN, CHAR_MAX, FLAGS },
  345.     { "keep_aspect", "keep aspect ratio",                    OFFSET(keep_aspect), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
  346.     { NULL }
  347. };
  348.  
  349. AVFILTER_DEFINE_CLASS(crop);
  350.  
  351. static const AVFilterPad avfilter_vf_crop_inputs[] = {
  352.     {
  353.         .name         = "default",
  354.         .type         = AVMEDIA_TYPE_VIDEO,
  355.         .filter_frame = filter_frame,
  356.         .config_props = config_input,
  357.     },
  358.     { NULL }
  359. };
  360.  
  361. static const AVFilterPad avfilter_vf_crop_outputs[] = {
  362.     {
  363.         .name         = "default",
  364.         .type         = AVMEDIA_TYPE_VIDEO,
  365.         .config_props = config_output,
  366.     },
  367.     { NULL }
  368. };
  369.  
  370. AVFilter ff_vf_crop = {
  371.     .name            = "crop",
  372.     .description     = NULL_IF_CONFIG_SMALL("Crop the input video."),
  373.     .priv_size       = sizeof(CropContext),
  374.     .priv_class      = &crop_class,
  375.     .query_formats   = query_formats,
  376.     .uninit          = uninit,
  377.     .inputs          = avfilter_vf_crop_inputs,
  378.     .outputs         = avfilter_vf_crop_outputs,
  379.     .process_command = process_command,
  380. };
  381.