Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright (c) 2013 Vittorio Giovara
  3.  *
  4.  * This file is part of FFmpeg.
  5.  *
  6.  * FFmpeg is free software; you can redistribute it and/or
  7.  * modify it under the terms of the GNU Lesser General Public
  8.  * License as published by the Free Software Foundation; either
  9.  * version 2.1 of the License, or (at your option) any later version.
  10.  *
  11.  * FFmpeg is distributed in the hope that it will be useful,
  12.  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13.  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14.  * Lesser General Public License for more details.
  15.  *
  16.  * You should have received a copy of the GNU Lesser General Public
  17.  * License along with FFmpeg; if not, write to the Free Software
  18.  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19.  */
  20.  
  21. /**
  22.  * @file
  23.  * Generate a frame packed video, by combining two views in a single surface.
  24.  */
  25.  
  26. #include <string.h>
  27.  
  28. #include "libavutil/imgutils.h"
  29. #include "libavutil/opt.h"
  30. #include "libavutil/pixdesc.h"
  31. #include "libavutil/rational.h"
  32. #include "libavutil/stereo3d.h"
  33.  
  34. #include "avfilter.h"
  35. #include "formats.h"
  36. #include "internal.h"
  37. #include "video.h"
  38.  
  39. #define LEFT  0
  40. #define RIGHT 1
  41.  
  42. typedef struct FramepackContext {
  43.     const AVClass *class;
  44.  
  45.     const AVPixFmtDescriptor *pix_desc; ///< agreed pixel format
  46.  
  47.     enum AVStereo3DType format;         ///< frame pack type output
  48.  
  49.     AVFrame *input_views[2];            ///< input frames
  50.  
  51.     int64_t double_pts;                 ///< new pts for frameseq mode
  52. } FramepackContext;
  53.  
  54. static const enum AVPixelFormat formats_supported[] = {
  55.     AV_PIX_FMT_YUV420P,  AV_PIX_FMT_YUV422P,  AV_PIX_FMT_YUV444P,
  56.     AV_PIX_FMT_YUV410P,  AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVJ420P,
  57.     AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
  58.     AV_PIX_FMT_NONE
  59. };
  60.  
  61. static int query_formats(AVFilterContext *ctx)
  62. {
  63.     // this will ensure that formats are the same on all pads
  64.     AVFilterFormats *fmts_list = ff_make_format_list(formats_supported);
  65.     if (!fmts_list)
  66.         return AVERROR(ENOMEM);
  67.     return ff_set_common_formats(ctx, fmts_list);
  68. }
  69.  
  70. static av_cold void framepack_uninit(AVFilterContext *ctx)
  71. {
  72.     FramepackContext *s = ctx->priv;
  73.  
  74.     // clean any leftover frame
  75.     av_frame_free(&s->input_views[LEFT]);
  76.     av_frame_free(&s->input_views[RIGHT]);
  77. }
  78.  
  79. static int config_output(AVFilterLink *outlink)
  80. {
  81.     AVFilterContext *ctx = outlink->src;
  82.     FramepackContext *s  = outlink->src->priv;
  83.  
  84.     int width            = ctx->inputs[LEFT]->w;
  85.     int height           = ctx->inputs[LEFT]->h;
  86.     AVRational time_base = ctx->inputs[LEFT]->time_base;
  87.     AVRational frame_rate = ctx->inputs[LEFT]->frame_rate;
  88.  
  89.     // check size and fps match on the other input
  90.     if (width  != ctx->inputs[RIGHT]->w ||
  91.         height != ctx->inputs[RIGHT]->h) {
  92.         av_log(ctx, AV_LOG_ERROR,
  93.                "Left and right sizes differ (%dx%d vs %dx%d).\n",
  94.                width, height,
  95.                ctx->inputs[RIGHT]->w, ctx->inputs[RIGHT]->h);
  96.         return AVERROR_INVALIDDATA;
  97.     } else if (av_cmp_q(time_base, ctx->inputs[RIGHT]->time_base) != 0) {
  98.         av_log(ctx, AV_LOG_ERROR,
  99.                "Left and right time bases differ (%d/%d vs %d/%d).\n",
  100.                time_base.num, time_base.den,
  101.                ctx->inputs[RIGHT]->time_base.num,
  102.                ctx->inputs[RIGHT]->time_base.den);
  103.         return AVERROR_INVALIDDATA;
  104.     } else if (av_cmp_q(frame_rate, ctx->inputs[RIGHT]->frame_rate) != 0) {
  105.         av_log(ctx, AV_LOG_ERROR,
  106.                "Left and right framerates differ (%d/%d vs %d/%d).\n",
  107.                frame_rate.num, frame_rate.den,
  108.                ctx->inputs[RIGHT]->frame_rate.num,
  109.                ctx->inputs[RIGHT]->frame_rate.den);
  110.         return AVERROR_INVALIDDATA;
  111.     }
  112.  
  113.     s->pix_desc = av_pix_fmt_desc_get(outlink->format);
  114.     if (!s->pix_desc)
  115.         return AVERROR_BUG;
  116.  
  117.     // modify output properties as needed
  118.     switch (s->format) {
  119.     case AV_STEREO3D_FRAMESEQUENCE:
  120.         time_base.den *= 2;
  121.         frame_rate.num *= 2;
  122.  
  123.         s->double_pts = AV_NOPTS_VALUE;
  124.         break;
  125.     case AV_STEREO3D_COLUMNS:
  126.     case AV_STEREO3D_SIDEBYSIDE:
  127.         width *= 2;
  128.         break;
  129.     case AV_STEREO3D_LINES:
  130.     case AV_STEREO3D_TOPBOTTOM:
  131.         height *= 2;
  132.         break;
  133.     default:
  134.         av_log(ctx, AV_LOG_ERROR, "Unknown packing mode.");
  135.         return AVERROR_INVALIDDATA;
  136.     }
  137.  
  138.     outlink->w         = width;
  139.     outlink->h         = height;
  140.     outlink->time_base = time_base;
  141.     outlink->frame_rate= frame_rate;
  142.  
  143.     return 0;
  144. }
  145.  
  146. static void horizontal_frame_pack(FramepackContext *s,
  147.                                   AVFrame *dst,
  148.                                   int interleaved)
  149. {
  150.     int plane, i;
  151.     int length = dst->width / 2;
  152.     int lines  = dst->height;
  153.  
  154.     for (plane = 0; plane < s->pix_desc->nb_components; plane++) {
  155.         const uint8_t *leftp  = s->input_views[LEFT]->data[plane];
  156.         const uint8_t *rightp = s->input_views[RIGHT]->data[plane];
  157.         uint8_t *dstp         = dst->data[plane];
  158.  
  159.         if (plane == 1 || plane == 2) {
  160.             length = FF_CEIL_RSHIFT(dst->width / 2, s->pix_desc->log2_chroma_w);
  161.             lines  = FF_CEIL_RSHIFT(dst->height,    s->pix_desc->log2_chroma_h);
  162.         }
  163.  
  164.         if (interleaved) {
  165.             for (i = 0; i < lines; i++) {
  166.                 int j;
  167.                 int k = 0;
  168.  
  169.                 for (j = 0; j < length; j++) {
  170.                     dstp[k++] = leftp[j];
  171.                     dstp[k++] = rightp[j];
  172.                 }
  173.  
  174.                 dstp   += dst->linesize[plane];
  175.                 leftp  += s->input_views[LEFT]->linesize[plane];
  176.                 rightp += s->input_views[RIGHT]->linesize[plane];
  177.             }
  178.         } else {
  179.             av_image_copy_plane(dst->data[plane], dst->linesize[plane],
  180.                                 leftp, s->input_views[LEFT]->linesize[plane],
  181.                                 length, lines);
  182.             av_image_copy_plane(dst->data[plane] + length, dst->linesize[plane],
  183.                                 rightp, s->input_views[RIGHT]->linesize[plane],
  184.                                 length, lines);
  185.         }
  186.     }
  187. }
  188.  
  189. static void vertical_frame_pack(FramepackContext *s,
  190.                                 AVFrame *dst,
  191.                                 int interleaved)
  192. {
  193.     int plane, offset;
  194.     int length = dst->width;
  195.     int lines  = dst->height / 2;
  196.  
  197.     for (plane = 0; plane < s->pix_desc->nb_components; plane++) {
  198.         if (plane == 1 || plane == 2) {
  199.             length = -(-(dst->width)      >> s->pix_desc->log2_chroma_w);
  200.             lines  = -(-(dst->height / 2) >> s->pix_desc->log2_chroma_h);
  201.         }
  202.  
  203.         offset = interleaved ? dst->linesize[plane] : dst->linesize[plane] * lines;
  204.  
  205.         av_image_copy_plane(dst->data[plane],
  206.                             dst->linesize[plane] << interleaved,
  207.                             s->input_views[LEFT]->data[plane],
  208.                             s->input_views[LEFT]->linesize[plane],
  209.                             length, lines);
  210.         av_image_copy_plane(dst->data[plane] + offset,
  211.                             dst->linesize[plane] << interleaved,
  212.                             s->input_views[RIGHT]->data[plane],
  213.                             s->input_views[RIGHT]->linesize[plane],
  214.                             length, lines);
  215.     }
  216. }
  217.  
  218. static av_always_inline void spatial_frame_pack(FramepackContext *s, AVFrame *dst)
  219. {
  220.     switch (s->format) {
  221.     case AV_STEREO3D_SIDEBYSIDE:
  222.         horizontal_frame_pack(s, dst, 0);
  223.         break;
  224.     case AV_STEREO3D_COLUMNS:
  225.         horizontal_frame_pack(s, dst, 1);
  226.         break;
  227.     case AV_STEREO3D_TOPBOTTOM:
  228.         vertical_frame_pack(s, dst, 0);
  229.         break;
  230.     case AV_STEREO3D_LINES:
  231.         vertical_frame_pack(s, dst, 1);
  232.         break;
  233.     }
  234. }
  235.  
  236. static int filter_frame_left(AVFilterLink *inlink, AVFrame *frame)
  237. {
  238.     FramepackContext *s = inlink->dst->priv;
  239.     s->input_views[LEFT] = frame;
  240.     return 0;
  241. }
  242.  
  243. static int filter_frame_right(AVFilterLink *inlink, AVFrame *frame)
  244. {
  245.     FramepackContext *s = inlink->dst->priv;
  246.     s->input_views[RIGHT] = frame;
  247.     return 0;
  248. }
  249.  
  250. static int request_frame(AVFilterLink *outlink)
  251. {
  252.     AVFilterContext *ctx = outlink->src;
  253.     FramepackContext *s = ctx->priv;
  254.     AVStereo3D *stereo;
  255.     int ret, i;
  256.  
  257.     /* get a frame on the either input, stop as soon as a video ends */
  258.     for (i = 0; i < 2; i++) {
  259.         if (!s->input_views[i]) {
  260.             ret = ff_request_frame(ctx->inputs[i]);
  261.             if (ret < 0)
  262.                 return ret;
  263.         }
  264.     }
  265.  
  266.     if (s->format == AV_STEREO3D_FRAMESEQUENCE) {
  267.         if (s->double_pts == AV_NOPTS_VALUE)
  268.             s->double_pts = s->input_views[LEFT]->pts;
  269.  
  270.         for (i = 0; i < 2; i++) {
  271.             // set correct timestamps
  272.             s->input_views[i]->pts = s->double_pts++;
  273.  
  274.             // set stereo3d side data
  275.             stereo = av_stereo3d_create_side_data(s->input_views[i]);
  276.             if (!stereo)
  277.                 return AVERROR(ENOMEM);
  278.             stereo->type = s->format;
  279.  
  280.             // filter the frame and immediately relinquish its pointer
  281.             ret = ff_filter_frame(outlink, s->input_views[i]);
  282.             s->input_views[i] = NULL;
  283.             if (ret < 0)
  284.                 return ret;
  285.         }
  286.         return ret;
  287.     } else {
  288.         AVFrame *dst = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  289.         if (!dst)
  290.             return AVERROR(ENOMEM);
  291.  
  292.         spatial_frame_pack(s, dst);
  293.  
  294.         // get any property from the original frame
  295.         ret = av_frame_copy_props(dst, s->input_views[LEFT]);
  296.         if (ret < 0) {
  297.             av_frame_free(&dst);
  298.             return ret;
  299.         }
  300.  
  301.         for (i = 0; i < 2; i++)
  302.             av_frame_free(&s->input_views[i]);
  303.  
  304.         // set stereo3d side data
  305.         stereo = av_stereo3d_create_side_data(dst);
  306.         if (!stereo) {
  307.             av_frame_free(&dst);
  308.             return AVERROR(ENOMEM);
  309.         }
  310.         stereo->type = s->format;
  311.  
  312.         return ff_filter_frame(outlink, dst);
  313.     }
  314. }
  315.  
  316. #define OFFSET(x) offsetof(FramepackContext, x)
  317. #define V AV_OPT_FLAG_VIDEO_PARAM
  318. static const AVOption framepack_options[] = {
  319.     { "format", "Frame pack output format", OFFSET(format), AV_OPT_TYPE_INT,
  320.         { .i64 = AV_STEREO3D_SIDEBYSIDE }, 0, INT_MAX, .flags = V, .unit = "format" },
  321.     { "sbs", "Views are packed next to each other", 0, AV_OPT_TYPE_CONST,
  322.         { .i64 = AV_STEREO3D_SIDEBYSIDE }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
  323.     { "tab", "Views are packed on top of each other", 0, AV_OPT_TYPE_CONST,
  324.         { .i64 = AV_STEREO3D_TOPBOTTOM }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
  325.     { "frameseq", "Views are one after the other", 0, AV_OPT_TYPE_CONST,
  326.         { .i64 = AV_STEREO3D_FRAMESEQUENCE }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
  327.     { "lines", "Views are interleaved by lines", 0, AV_OPT_TYPE_CONST,
  328.         { .i64 = AV_STEREO3D_LINES }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
  329.     { "columns", "Views are interleaved by columns", 0, AV_OPT_TYPE_CONST,
  330.         { .i64 = AV_STEREO3D_COLUMNS }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
  331.     { NULL },
  332. };
  333.  
  334. AVFILTER_DEFINE_CLASS(framepack);
  335.  
  336. static const AVFilterPad framepack_inputs[] = {
  337.     {
  338.         .name         = "left",
  339.         .type         = AVMEDIA_TYPE_VIDEO,
  340.         .filter_frame = filter_frame_left,
  341.         .needs_fifo   = 1,
  342.     },
  343.     {
  344.         .name         = "right",
  345.         .type         = AVMEDIA_TYPE_VIDEO,
  346.         .filter_frame = filter_frame_right,
  347.         .needs_fifo   = 1,
  348.     },
  349.     { NULL }
  350. };
  351.  
  352. static const AVFilterPad framepack_outputs[] = {
  353.     {
  354.         .name          = "packed",
  355.         .type          = AVMEDIA_TYPE_VIDEO,
  356.         .config_props  = config_output,
  357.         .request_frame = request_frame,
  358.     },
  359.     { NULL }
  360. };
  361.  
  362. AVFilter ff_vf_framepack = {
  363.     .name          = "framepack",
  364.     .description   = NULL_IF_CONFIG_SMALL("Generate a frame packed stereoscopic video."),
  365.     .priv_size     = sizeof(FramepackContext),
  366.     .priv_class    = &framepack_class,
  367.     .query_formats = query_formats,
  368.     .inputs        = framepack_inputs,
  369.     .outputs       = framepack_outputs,
  370.     .uninit        = framepack_uninit,
  371. };
  372.