Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright (c) 2013 Georg Martius <georg dot martius at web dot de>
  3.  *
  4.  * This file is part of FFmpeg.
  5.  *
  6.  * FFmpeg is free software; you can redistribute it and/or
  7.  * modify it under the terms of the GNU Lesser General Public
  8.  * License as published by the Free Software Foundation; either
  9.  * version 2.1 of the License, or (at your option) any later version.
  10.  *
  11.  * FFmpeg is distributed in the hope that it will be useful,
  12.  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13.  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14.  * Lesser General Public License for more details.
  15.  *
  16.  * You should have received a copy of the GNU Lesser General Public
  17.  * License along with FFmpeg; if not, write to the Free Software
  18.  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19.  */
  20.  
  21. #define DEFAULT_INPUT_NAME     "transforms.trf"
  22.  
  23. #include <vid.stab/libvidstab.h>
  24.  
  25. #include "libavutil/common.h"
  26. #include "libavutil/opt.h"
  27. #include "libavutil/imgutils.h"
  28. #include "avfilter.h"
  29. #include "internal.h"
  30.  
  31. #include "vidstabutils.h"
  32.  
  33. typedef struct {
  34.     const AVClass *class;
  35.  
  36.     VSTransformData td;
  37.     VSTransformConfig conf;
  38.  
  39.     VSTransformations trans;    // transformations
  40.     char *input;                // name of transform file
  41.     int tripod;
  42. } TransformContext;
  43.  
  44. #define OFFSET(x) offsetof(TransformContext, x)
  45. #define OFFSETC(x) (offsetof(TransformContext, conf)+offsetof(VSTransformConfig, x))
  46. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  47.  
  48. static const AVOption vidstabtransform_options[] = {
  49.     {"input",     "path to the file storing the transforms",                        OFFSET(input),
  50.                    AV_OPT_TYPE_STRING, {.str = DEFAULT_INPUT_NAME}, .flags = FLAGS },
  51.     {"smoothing", "number of frames*2 + 1 used for lowpass filtering",              OFFSETC(smoothing),
  52.                    AV_OPT_TYPE_INT,    {.i64 = 10},       1, 1000, FLAGS},
  53.     {"maxshift",  "maximal number of pixels to translate image",                    OFFSETC(maxShift),
  54.                    AV_OPT_TYPE_INT,    {.i64 = -1},      -1, 500,  FLAGS},
  55.     {"maxangle",  "maximal angle in rad to rotate image",                           OFFSETC(maxAngle),
  56.                    AV_OPT_TYPE_DOUBLE, {.dbl = -1.0},  -1.0, 3.14, FLAGS},
  57.     {"crop",      "set cropping mode",                                              OFFSETC(crop),
  58.                    AV_OPT_TYPE_INT,    {.i64 = 0},        0, 1,    FLAGS, "crop"},
  59.     {  "keep",    "keep border",                                                    0,
  60.                    AV_OPT_TYPE_CONST,  {.i64 = VSKeepBorder }, 0, 0, FLAGS, "crop"},
  61.     {  "black",   "black border",                                                   0,
  62.                    AV_OPT_TYPE_CONST,  {.i64 = VSCropBorder }, 0, 0, FLAGS, "crop"},
  63.     {"invert",    "1: invert transforms",                                           OFFSETC(invert),
  64.                    AV_OPT_TYPE_INT,    {.i64 = 0},        0, 1,    FLAGS},
  65.     {"relative",  "consider transforms as 0: absolute, 1: relative",                OFFSETC(relative),
  66.                    AV_OPT_TYPE_INT,    {.i64 = 1},        0, 1,    FLAGS},
  67.     {"zoom",      "percentage to zoom >0: zoom in, <0 zoom out",                    OFFSETC(zoom),
  68.                    AV_OPT_TYPE_DOUBLE, {.dbl = 0},     -100, 100,  FLAGS},
  69.     {"optzoom",   "0: nothing, 1: determine optimal zoom (added to 'zoom')",        OFFSETC(optZoom),
  70.                    AV_OPT_TYPE_INT,    {.i64 = 1},        0, 2,    FLAGS},
  71.     {"interpol",  "type of interpolation",                                          OFFSETC(interpolType),
  72.                    AV_OPT_TYPE_INT,    {.i64 = 2},        0, 3,    FLAGS, "interpol"},
  73.     {  "no",      "no interpolation",                                               0,
  74.                    AV_OPT_TYPE_CONST,  {.i64 = VS_Zero  },  0, 0,  FLAGS, "interpol"},
  75.     {  "linear",  "linear (horizontal)",                                            0,
  76.                    AV_OPT_TYPE_CONST,  {.i64 = VS_Linear }, 0, 0,  FLAGS, "interpol"},
  77.     {  "bilinear","bi-linear",                                                      0,
  78.                    AV_OPT_TYPE_CONST,  {.i64 = VS_BiLinear},0, 0,  FLAGS, "interpol"},
  79.     {  "bicubic", "bi-cubic",                                                       0,
  80.                    AV_OPT_TYPE_CONST,  {.i64 = VS_BiCubic },0, 0,  FLAGS, "interpol"},
  81.     {"tripod",    "if 1: virtual tripod mode (equiv. to relative=0:smoothing=0)",   OFFSET(tripod),
  82.                    AV_OPT_TYPE_INT,    {.i64 = 0},        0, 1,    FLAGS},
  83.     {NULL}
  84. };
  85.  
  86. AVFILTER_DEFINE_CLASS(vidstabtransform);
  87.  
  88. static av_cold int init(AVFilterContext *ctx)
  89. {
  90.     TransformContext *tc = ctx->priv;
  91.     vs_set_mem_and_log_functions();
  92.     tc->class = &vidstabtransform_class;
  93.     av_log(ctx, AV_LOG_VERBOSE, "vidstabtransform filter: init %s\n", LIBVIDSTAB_VERSION);
  94.     return 0;
  95. }
  96.  
  97. static av_cold void uninit(AVFilterContext *ctx)
  98. {
  99.     TransformContext *tc = ctx->priv;
  100.  
  101.     vsTransformDataCleanup(&tc->td);
  102.     vsTransformationsCleanup(&tc->trans);
  103. }
  104.  
  105. static int query_formats(AVFilterContext *ctx)
  106. {
  107.     // If you add something here also add it in vidstabutils.c
  108.     static const enum AVPixelFormat pix_fmts[] = {
  109.         AV_PIX_FMT_YUV444P,  AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
  110.         AV_PIX_FMT_YUV411P,  AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUVA420P,
  111.         AV_PIX_FMT_YUV440P,  AV_PIX_FMT_GRAY8,
  112.         AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, AV_PIX_FMT_RGBA,
  113.         AV_PIX_FMT_NONE
  114.     };
  115.  
  116.     ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
  117.     return 0;
  118. }
  119.  
  120.  
  121. static int config_input(AVFilterLink *inlink)
  122. {
  123.     AVFilterContext *ctx = inlink->dst;
  124.     TransformContext *tc = ctx->priv;
  125.     FILE *f;
  126.  
  127.     const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  128.  
  129.     VSTransformData *td = &(tc->td);
  130.  
  131.     VSFrameInfo fi_src;
  132.     VSFrameInfo fi_dest;
  133.  
  134.     if (!vsFrameInfoInit(&fi_src, inlink->w, inlink->h,
  135.                          av_2_vs_pixel_format(ctx, inlink->format)) ||
  136.         !vsFrameInfoInit(&fi_dest, inlink->w, inlink->h,
  137.                          av_2_vs_pixel_format(ctx, inlink->format))) {
  138.         av_log(ctx, AV_LOG_ERROR, "unknown pixel format: %i (%s)",
  139.                inlink->format, desc->name);
  140.         return AVERROR(EINVAL);
  141.     }
  142.  
  143.     if (fi_src.bytesPerPixel != av_get_bits_per_pixel(desc)/8 ||
  144.         fi_src.log2ChromaW != desc->log2_chroma_w ||
  145.         fi_src.log2ChromaH != desc->log2_chroma_h) {
  146.         av_log(ctx, AV_LOG_ERROR, "pixel-format error: bpp %i<>%i  ",
  147.                fi_src.bytesPerPixel, av_get_bits_per_pixel(desc)/8);
  148.         av_log(ctx, AV_LOG_ERROR, "chroma_subsampl: w: %i<>%i  h: %i<>%i\n",
  149.                fi_src.log2ChromaW, desc->log2_chroma_w,
  150.                fi_src.log2ChromaH, desc->log2_chroma_h);
  151.         return AVERROR(EINVAL);
  152.     }
  153.  
  154.     // set values that are not initializes by the options
  155.     tc->conf.modName = "vidstabtransform";
  156.     tc->conf.verbose =1;
  157.     if (tc->tripod) {
  158.         av_log(ctx, AV_LOG_INFO, "Virtual tripod mode: relative=0, smoothing=0");
  159.         tc->conf.relative  = 0;
  160.         tc->conf.smoothing = 0;
  161.     }
  162.  
  163.     if (vsTransformDataInit(td, &tc->conf, &fi_src, &fi_dest) != VS_OK) {
  164.         av_log(ctx, AV_LOG_ERROR, "initialization of vid.stab transform failed, please report a BUG\n");
  165.         return AVERROR(EINVAL);
  166.     }
  167.  
  168.     vsTransformGetConfig(&tc->conf, td);
  169.     av_log(ctx, AV_LOG_INFO, "Video transformation/stabilization settings (pass 2/2):\n");
  170.     av_log(ctx, AV_LOG_INFO, "    input     = %s\n", tc->input);
  171.     av_log(ctx, AV_LOG_INFO, "    smoothing = %d\n", tc->conf.smoothing);
  172.     av_log(ctx, AV_LOG_INFO, "    maxshift  = %d\n", tc->conf.maxShift);
  173.     av_log(ctx, AV_LOG_INFO, "    maxangle  = %f\n", tc->conf.maxAngle);
  174.     av_log(ctx, AV_LOG_INFO, "    crop      = %s\n", tc->conf.crop ? "Black" : "Keep");
  175.     av_log(ctx, AV_LOG_INFO, "    relative  = %s\n", tc->conf.relative ? "True": "False");
  176.     av_log(ctx, AV_LOG_INFO, "    invert    = %s\n", tc->conf.invert ? "True" : "False");
  177.     av_log(ctx, AV_LOG_INFO, "    zoom      = %f\n", tc->conf.zoom);
  178.     av_log(ctx, AV_LOG_INFO, "    optzoom   = %s\n", tc->conf.optZoom ? "On" : "Off");
  179.     av_log(ctx, AV_LOG_INFO, "    interpol  = %s\n", getInterpolationTypeName(tc->conf.interpolType));
  180.  
  181.     f = fopen(tc->input, "r");
  182.     if (f == NULL) {
  183.         av_log(ctx, AV_LOG_ERROR, "cannot open input file %s\n", tc->input);
  184.         return AVERROR(errno);
  185.     } else {
  186.         VSManyLocalMotions mlms;
  187.         if (vsReadLocalMotionsFile(f, &mlms) == VS_OK) {
  188.             // calculate the actual transforms from the local motions
  189.             if (vsLocalmotions2TransformsSimple(td, &mlms, &tc->trans) != VS_OK) {
  190.                 av_log(ctx, AV_LOG_ERROR, "calculating transformations failed\n");
  191.                 return AVERROR(EINVAL);
  192.             }
  193.         } else { // try to read old format
  194.             if (!vsReadOldTransforms(td, f, &tc->trans)) { /* read input file */
  195.                 av_log(ctx, AV_LOG_ERROR, "error parsing input file %s\n", tc->input);
  196.                 return AVERROR(EINVAL);
  197.             }
  198.         }
  199.     }
  200.     fclose(f);
  201.  
  202.     if (vsPreprocessTransforms(td, &tc->trans) != VS_OK ) {
  203.         av_log(ctx, AV_LOG_ERROR, "error while preprocessing transforms\n");
  204.         return AVERROR(EINVAL);
  205.     }
  206.  
  207.     // TODO: add sharpening, so far the user needs to call the unsharp filter manually
  208.     return 0;
  209. }
  210.  
  211.  
  212. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  213. {
  214.     AVFilterContext *ctx = inlink->dst;
  215.     TransformContext *tc = ctx->priv;
  216.     VSTransformData* td = &(tc->td);
  217.  
  218.     AVFilterLink *outlink = inlink->dst->outputs[0];
  219.     int direct = 0;
  220.     AVFrame *out;
  221.     VSFrame inframe;
  222.     int plane;
  223.  
  224.     if (av_frame_is_writable(in)) {
  225.         direct = 1;
  226.         out = in;
  227.     } else {
  228.         out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  229.         if (!out) {
  230.             av_frame_free(&in);
  231.             return AVERROR(ENOMEM);
  232.         }
  233.         av_frame_copy_props(out, in);
  234.     }
  235.  
  236.     for (plane = 0; plane < vsTransformGetSrcFrameInfo(td)->planes; plane++) {
  237.         inframe.data[plane] = in->data[plane];
  238.         inframe.linesize[plane] = in->linesize[plane];
  239.     }
  240.     if (direct) {
  241.         vsTransformPrepare(td, &inframe, &inframe);
  242.     } else { // separate frames
  243.         VSFrame outframe;
  244.         for (plane = 0; plane < vsTransformGetDestFrameInfo(td)->planes; plane++) {
  245.             outframe.data[plane] = out->data[plane];
  246.             outframe.linesize[plane] = out->linesize[plane];
  247.         }
  248.         vsTransformPrepare(td, &inframe, &outframe);
  249.     }
  250.  
  251.     vsDoTransform(td, vsGetNextTransform(td, &tc->trans));
  252.  
  253.     vsTransformFinish(td);
  254.  
  255.     if (!direct)
  256.         av_frame_free(&in);
  257.  
  258.     return ff_filter_frame(outlink, out);
  259. }
  260.  
  261. static const AVFilterPad avfilter_vf_vidstabtransform_inputs[] = {
  262.     {
  263.         .name         = "default",
  264.         .type         = AVMEDIA_TYPE_VIDEO,
  265.         .filter_frame = filter_frame,
  266.         .config_props = config_input,
  267.     },
  268.     { NULL }
  269. };
  270.  
  271. static const AVFilterPad avfilter_vf_vidstabtransform_outputs[] = {
  272.     {
  273.         .name = "default",
  274.         .type = AVMEDIA_TYPE_VIDEO,
  275.     },
  276.     { NULL }
  277. };
  278.  
  279. AVFilter avfilter_vf_vidstabtransform = {
  280.     .name          = "vidstabtransform",
  281.     .description   = NULL_IF_CONFIG_SMALL("Transform the frames, "
  282.                                           "pass 2 of 2 for stabilization "
  283.                                           "(see vidstabdetect for pass 1)."),
  284.     .priv_size     = sizeof(TransformContext),
  285.     .init          = init,
  286.     .uninit        = uninit,
  287.     .query_formats = query_formats,
  288.     .inputs        = avfilter_vf_vidstabtransform_inputs,
  289.     .outputs       = avfilter_vf_vidstabtransform_outputs,
  290.     .priv_class    = &vidstabtransform_class,
  291. };
  292.