Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright (c) 2015 Paul B. Mahol
  3.  *
  4.  * This file is part of FFmpeg.
  5.  *
  6.  * FFmpeg is free software; you can redistribute it and/or
  7.  * modify it under the terms of the GNU Lesser General Public
  8.  * License as published by the Free Software Foundation; either
  9.  * version 2.1 of the License, or (at your option) any later version.
  10.  *
  11.  * FFmpeg is distributed in the hope that it will be useful,
  12.  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13.  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14.  * Lesser General Public License for more details.
  15.  *
  16.  * You should have received a copy of the GNU Lesser General Public
  17.  * License along with FFmpeg; if not, write to the Free Software
  18.  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19.  */
  20.  
  21. #include "libavutil/avstring.h"
  22. #include "libavutil/imgutils.h"
  23. #include "libavutil/opt.h"
  24. #include "libavutil/pixdesc.h"
  25.  
  26. #include "avfilter.h"
  27. #include "formats.h"
  28. #include "internal.h"
  29. #include "framesync.h"
  30. #include "video.h"
  31.  
  32. typedef struct StackContext {
  33.     const AVClass *class;
  34.     const AVPixFmtDescriptor *desc;
  35.     int nb_inputs;
  36.     int is_vertical;
  37.     int nb_planes;
  38.  
  39.     AVFrame **frames;
  40.     FFFrameSync fs;
  41. } StackContext;
  42.  
  43. static int query_formats(AVFilterContext *ctx)
  44. {
  45.     AVFilterFormats *pix_fmts = NULL;
  46.     int fmt;
  47.  
  48.     for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) {
  49.         const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
  50.         if (!(desc->flags & AV_PIX_FMT_FLAG_PAL ||
  51.               desc->flags & AV_PIX_FMT_FLAG_HWACCEL ||
  52.               desc->flags & AV_PIX_FMT_FLAG_BITSTREAM))
  53.             ff_add_format(&pix_fmts, fmt);
  54.     }
  55.  
  56.     return ff_set_common_formats(ctx, pix_fmts);
  57. }
  58.  
  59. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  60. {
  61.     StackContext *s = inlink->dst->priv;
  62.     return ff_framesync_filter_frame(&s->fs, inlink, in);
  63. }
  64.  
  65. static av_cold int init(AVFilterContext *ctx)
  66. {
  67.     StackContext *s = ctx->priv;
  68.     int i, ret;
  69.  
  70.     if (!strcmp(ctx->filter->name, "vstack"))
  71.         s->is_vertical = 1;
  72.  
  73.     s->frames = av_calloc(s->nb_inputs, sizeof(*s->frames));
  74.     if (!s->frames)
  75.         return AVERROR(ENOMEM);
  76.  
  77.     for (i = 0; i < s->nb_inputs; i++) {
  78.         AVFilterPad pad = { 0 };
  79.  
  80.         pad.type = AVMEDIA_TYPE_VIDEO;
  81.         pad.name = av_asprintf("input%d", i);
  82.         if (!pad.name)
  83.             return AVERROR(ENOMEM);
  84.         pad.filter_frame = filter_frame;
  85.  
  86.         if ((ret = ff_insert_inpad(ctx, i, &pad)) < 0) {
  87.             av_freep(&pad.name);
  88.             return ret;
  89.         }
  90.     }
  91.  
  92.     return 0;
  93. }
  94.  
  95. static int process_frame(FFFrameSync *fs)
  96. {
  97.     AVFilterContext *ctx = fs->parent;
  98.     AVFilterLink *outlink = ctx->outputs[0];
  99.     StackContext *s = fs->opaque;
  100.     AVFrame **in = s->frames;
  101.     AVFrame *out;
  102.     int i, p, ret, offset[4] = { 0 };
  103.  
  104.     for (i = 0; i < s->nb_inputs; i++) {
  105.         if ((ret = ff_framesync_get_frame(&s->fs, i, &in[i], 0)) < 0)
  106.             return ret;
  107.     }
  108.  
  109.     out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  110.     if (!out)
  111.         return AVERROR(ENOMEM);
  112.     out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);
  113.  
  114.     for (i = 0; i < s->nb_inputs; i++) {
  115.         AVFilterLink *inlink = ctx->inputs[i];
  116.         int linesize[4];
  117.         int height[4];
  118.  
  119.         if ((ret = av_image_fill_linesizes(linesize, inlink->format, inlink->w)) < 0) {
  120.             av_frame_free(&out);
  121.             return ret;
  122.         }
  123.  
  124.         height[1] = height[2] = FF_CEIL_RSHIFT(inlink->h, s->desc->log2_chroma_h);
  125.         height[0] = height[3] = inlink->h;
  126.  
  127.         for (p = 0; p < s->nb_planes; p++) {
  128.             if (s->is_vertical) {
  129.                 av_image_copy_plane(out->data[p] + offset[p] * out->linesize[p],
  130.                                     out->linesize[p],
  131.                                     in[i]->data[p],
  132.                                     in[i]->linesize[p],
  133.                                     linesize[p], height[p]);
  134.                 offset[p] += height[p];
  135.             } else {
  136.                 av_image_copy_plane(out->data[p] + offset[p],
  137.                                     out->linesize[p],
  138.                                     in[i]->data[p],
  139.                                     in[i]->linesize[p],
  140.                                     linesize[p], height[p]);
  141.                 offset[p] += linesize[p];
  142.             }
  143.         }
  144.     }
  145.  
  146.     return ff_filter_frame(outlink, out);
  147. }
  148.  
  149. static int config_output(AVFilterLink *outlink)
  150. {
  151.     AVFilterContext *ctx = outlink->src;
  152.     StackContext *s = ctx->priv;
  153.     AVRational time_base = ctx->inputs[0]->time_base;
  154.     AVRational frame_rate = ctx->inputs[0]->frame_rate;
  155.     int height = ctx->inputs[0]->h;
  156.     int width = ctx->inputs[0]->w;
  157.     FFFrameSyncIn *in;
  158.     int i, ret;
  159.  
  160.     if (s->is_vertical) {
  161.         for (i = 1; i < s->nb_inputs; i++) {
  162.             if (ctx->inputs[i]->w != width) {
  163.                 av_log(ctx, AV_LOG_ERROR, "Input %d width %d does not match input %d width %d.\n", i, ctx->inputs[i]->w, 0, width);
  164.                 return AVERROR(EINVAL);
  165.             }
  166.             height += ctx->inputs[i]->h;
  167.         }
  168.     } else {
  169.         for (i = 1; i < s->nb_inputs; i++) {
  170.             if (ctx->inputs[i]->h != height) {
  171.                 av_log(ctx, AV_LOG_ERROR, "Input %d height %d does not match input %d height %d.\n", i, ctx->inputs[i]->h, 0, height);
  172.                 return AVERROR(EINVAL);
  173.             }
  174.             width += ctx->inputs[i]->w;
  175.         }
  176.     }
  177.  
  178.     s->desc = av_pix_fmt_desc_get(outlink->format);
  179.     if (!s->desc)
  180.         return AVERROR_BUG;
  181.     s->nb_planes = av_pix_fmt_count_planes(outlink->format);
  182.  
  183.     outlink->w          = width;
  184.     outlink->h          = height;
  185.     outlink->time_base  = time_base;
  186.     outlink->frame_rate = frame_rate;
  187.  
  188.     if ((ret = ff_framesync_init(&s->fs, ctx, s->nb_inputs)) < 0)
  189.         return ret;
  190.  
  191.     in = s->fs.in;
  192.     s->fs.opaque = s;
  193.     s->fs.on_event = process_frame;
  194.  
  195.     for (i = 0; i < s->nb_inputs; i++) {
  196.         AVFilterLink *inlink = ctx->inputs[i];
  197.  
  198.         in[i].time_base = inlink->time_base;
  199.         in[i].sync   = 1;
  200.         in[i].before = EXT_STOP;
  201.         in[i].after  = EXT_INFINITY;
  202.     }
  203.  
  204.     return ff_framesync_configure(&s->fs);
  205. }
  206.  
  207. static int request_frame(AVFilterLink *outlink)
  208. {
  209.     StackContext *s = outlink->src->priv;
  210.     return ff_framesync_request_frame(&s->fs, outlink);
  211. }
  212.  
  213. static av_cold void uninit(AVFilterContext *ctx)
  214. {
  215.     StackContext *s = ctx->priv;
  216.     ff_framesync_uninit(&s->fs);
  217.     av_freep(&s->frames);
  218. }
  219.  
  220. #define OFFSET(x) offsetof(StackContext, x)
  221. #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
  222. static const AVOption stack_options[] = {
  223.     { "inputs", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64=2}, 2, INT_MAX, .flags = FLAGS },
  224.     { NULL },
  225. };
  226.  
  227. static const AVFilterPad outputs[] = {
  228.     {
  229.         .name          = "default",
  230.         .type          = AVMEDIA_TYPE_VIDEO,
  231.         .config_props  = config_output,
  232.         .request_frame = request_frame,
  233.     },
  234.     { NULL }
  235. };
  236.  
  237. #if CONFIG_HSTACK_FILTER
  238.  
  239. #define hstack_options stack_options
  240. AVFILTER_DEFINE_CLASS(hstack);
  241.  
  242. AVFilter ff_vf_hstack = {
  243.     .name          = "hstack",
  244.     .description   = NULL_IF_CONFIG_SMALL("Stack video inputs horizontally."),
  245.     .priv_size     = sizeof(StackContext),
  246.     .priv_class    = &hstack_class,
  247.     .query_formats = query_formats,
  248.     .outputs       = outputs,
  249.     .init          = init,
  250.     .uninit        = uninit,
  251.     .flags         = AVFILTER_FLAG_DYNAMIC_INPUTS,
  252. };
  253.  
  254. #endif /* CONFIG_HSTACK_FILTER */
  255.  
  256. #if CONFIG_VSTACK_FILTER
  257.  
  258. #define vstack_options stack_options
  259. AVFILTER_DEFINE_CLASS(vstack);
  260.  
  261. AVFilter ff_vf_vstack = {
  262.     .name          = "vstack",
  263.     .description   = NULL_IF_CONFIG_SMALL("Stack video inputs vertically."),
  264.     .priv_size     = sizeof(StackContext),
  265.     .priv_class    = &vstack_class,
  266.     .query_formats = query_formats,
  267.     .outputs       = outputs,
  268.     .init          = init,
  269.     .uninit        = uninit,
  270.     .flags         = AVFILTER_FLAG_DYNAMIC_INPUTS,
  271. };
  272.  
  273. #endif /* CONFIG_VSTACK_FILTER */
  274.