Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright (c) 2013 Paul B Mahol
  3.  *
  4.  * This file is part of FFmpeg.
  5.  *
  6.  * FFmpeg is free software; you can redistribute it and/or
  7.  * modify it under the terms of the GNU Lesser General Public
  8.  * License as published by the Free Software Foundation; either
  9.  * version 2.1 of the License, or (at your option) any later version.
  10.  *
  11.  * FFmpeg is distributed in the hope that it will be useful,
  12.  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13.  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14.  * Lesser General Public License for more details.
  15.  *
  16.  * You should have received a copy of the GNU Lesser General Public
  17.  * License along with FFmpeg; if not, write to the Free Software
  18.  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19.  */
  20.  
  21. /**
  22.  * @file
  23.  * audio to video multimedia vectorscope filter
  24.  */
  25.  
  26. #include "libavutil/avassert.h"
  27. #include "libavutil/channel_layout.h"
  28. #include "libavutil/opt.h"
  29. #include "libavutil/parseutils.h"
  30. #include "avfilter.h"
  31. #include "formats.h"
  32. #include "audio.h"
  33. #include "video.h"
  34. #include "internal.h"
  35.  
  36. enum VectorScopeMode {
  37.     LISSAJOUS,
  38.     LISSAJOUS_XY,
  39.     POLAR,
  40.     MODE_NB,
  41. };
  42.  
  43. typedef struct AudioVectorScopeContext {
  44.     const AVClass *class;
  45.     AVFrame *outpicref;
  46.     int w, h;
  47.     int hw, hh;
  48.     int mode;
  49.     int contrast[4];
  50.     int fade[4];
  51.     double zoom;
  52.     AVRational frame_rate;
  53. } AudioVectorScopeContext;
  54.  
  55. #define OFFSET(x) offsetof(AudioVectorScopeContext, x)
  56. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  57.  
  58. static const AVOption avectorscope_options[] = {
  59.     { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=LISSAJOUS}, 0, MODE_NB-1, FLAGS, "mode" },
  60.     { "m",    "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=LISSAJOUS}, 0, MODE_NB-1, FLAGS, "mode" },
  61.     { "lissajous",    "", 0, AV_OPT_TYPE_CONST, {.i64=LISSAJOUS},    0, 0, FLAGS, "mode" },
  62.     { "lissajous_xy", "", 0, AV_OPT_TYPE_CONST, {.i64=LISSAJOUS_XY}, 0, 0, FLAGS, "mode" },
  63.     { "polar",        "", 0, AV_OPT_TYPE_CONST, {.i64=POLAR},        0, 0, FLAGS, "mode" },
  64.     { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, 0, FLAGS },
  65.     { "r",    "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, 0, FLAGS },
  66.     { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="400x400"}, 0, 0, FLAGS },
  67.     { "s",    "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="400x400"}, 0, 0, FLAGS },
  68.     { "rc", "set red contrast",   OFFSET(contrast[0]), AV_OPT_TYPE_INT, {.i64=40},  0, 255, FLAGS },
  69.     { "gc", "set green contrast", OFFSET(contrast[1]), AV_OPT_TYPE_INT, {.i64=160}, 0, 255, FLAGS },
  70.     { "bc", "set blue contrast",  OFFSET(contrast[2]), AV_OPT_TYPE_INT, {.i64=80},  0, 255, FLAGS },
  71.     { "ac", "set alpha contrast", OFFSET(contrast[3]), AV_OPT_TYPE_INT, {.i64=255}, 0, 255, FLAGS },
  72.     { "rf", "set red fade",       OFFSET(fade[0]), AV_OPT_TYPE_INT, {.i64=15}, 0, 255, FLAGS },
  73.     { "gf", "set green fade",     OFFSET(fade[1]), AV_OPT_TYPE_INT, {.i64=10}, 0, 255, FLAGS },
  74.     { "bf", "set blue fade",      OFFSET(fade[2]), AV_OPT_TYPE_INT, {.i64=5},  0, 255, FLAGS },
  75.     { "af", "set alpha fade",     OFFSET(fade[3]), AV_OPT_TYPE_INT, {.i64=5},  0, 255, FLAGS },
  76.     { "zoom", "set zoom factor",  OFFSET(zoom), AV_OPT_TYPE_DOUBLE, {.dbl=1},  1, 10, FLAGS },
  77.     { NULL }
  78. };
  79.  
  80. AVFILTER_DEFINE_CLASS(avectorscope);
  81.  
  82. static void draw_dot(AudioVectorScopeContext *s, unsigned x, unsigned y)
  83. {
  84.     const int linesize = s->outpicref->linesize[0];
  85.     uint8_t *dst;
  86.  
  87.     if (s->zoom > 1) {
  88.         if (y >= s->h || x >= s->w)
  89.             return;
  90.     } else {
  91.         y = FFMIN(y, s->h - 1);
  92.         x = FFMIN(x, s->w - 1);
  93.     }
  94.  
  95.     dst = &s->outpicref->data[0][y * linesize + x * 4];
  96.     dst[0] = FFMIN(dst[0] + s->contrast[0], 255);
  97.     dst[1] = FFMIN(dst[1] + s->contrast[1], 255);
  98.     dst[2] = FFMIN(dst[2] + s->contrast[2], 255);
  99.     dst[3] = FFMIN(dst[3] + s->contrast[3], 255);
  100. }
  101.  
  102. static void fade(AudioVectorScopeContext *s)
  103. {
  104.     const int linesize = s->outpicref->linesize[0];
  105.     int i, j;
  106.  
  107.     if (s->fade[0] || s->fade[1] || s->fade[2]) {
  108.         uint8_t *d = s->outpicref->data[0];
  109.         for (i = 0; i < s->h; i++) {
  110.             for (j = 0; j < s->w*4; j+=4) {
  111.                 d[j+0] = FFMAX(d[j+0] - s->fade[0], 0);
  112.                 d[j+1] = FFMAX(d[j+1] - s->fade[1], 0);
  113.                 d[j+2] = FFMAX(d[j+2] - s->fade[2], 0);
  114.                 d[j+3] = FFMAX(d[j+3] - s->fade[3], 0);
  115.             }
  116.             d += linesize;
  117.         }
  118.     }
  119. }
  120.  
  121. static int query_formats(AVFilterContext *ctx)
  122. {
  123.     AVFilterFormats *formats = NULL;
  124.     AVFilterChannelLayouts *layout = NULL;
  125.     AVFilterLink *inlink = ctx->inputs[0];
  126.     AVFilterLink *outlink = ctx->outputs[0];
  127.     static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_NONE };
  128.     static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
  129.  
  130.     formats = ff_make_format_list(sample_fmts);
  131.     if (!formats)
  132.         return AVERROR(ENOMEM);
  133.     ff_formats_ref(formats, &inlink->out_formats);
  134.  
  135.     ff_add_channel_layout(&layout, AV_CH_LAYOUT_STEREO);
  136.     ff_channel_layouts_ref(layout, &inlink->out_channel_layouts);
  137.  
  138.     formats = ff_all_samplerates();
  139.     if (!formats)
  140.         return AVERROR(ENOMEM);
  141.     ff_formats_ref(formats, &inlink->out_samplerates);
  142.  
  143.     formats = ff_make_format_list(pix_fmts);
  144.     if (!formats)
  145.         return AVERROR(ENOMEM);
  146.     ff_formats_ref(formats, &outlink->in_formats);
  147.  
  148.     return 0;
  149. }
  150.  
  151. static int config_input(AVFilterLink *inlink)
  152. {
  153.     AVFilterContext *ctx = inlink->dst;
  154.     AudioVectorScopeContext *s = ctx->priv;
  155.     int nb_samples;
  156.  
  157.     nb_samples = FFMAX(1024, ((double)inlink->sample_rate / av_q2d(s->frame_rate)) + 0.5);
  158.     inlink->partial_buf_size =
  159.     inlink->min_samples =
  160.     inlink->max_samples = nb_samples;
  161.  
  162.     return 0;
  163. }
  164.  
  165. static int config_output(AVFilterLink *outlink)
  166. {
  167.     AudioVectorScopeContext *s = outlink->src->priv;
  168.  
  169.     outlink->w = s->w;
  170.     outlink->h = s->h;
  171.     outlink->sample_aspect_ratio = (AVRational){1,1};
  172.     outlink->frame_rate = s->frame_rate;
  173.  
  174.     s->hw = s->w / 2;
  175.     s->hh = s->h / 2;
  176.  
  177.     return 0;
  178. }
  179.  
  180. static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
  181. {
  182.     AVFilterContext *ctx = inlink->dst;
  183.     AVFilterLink *outlink = ctx->outputs[0];
  184.     AudioVectorScopeContext *s = ctx->priv;
  185.     const int hw = s->hw;
  186.     const int hh = s->hh;
  187.     unsigned x, y;
  188.     const double zoom = s->zoom;
  189.     int i;
  190.  
  191.     if (!s->outpicref || s->outpicref->width  != outlink->w ||
  192.                          s->outpicref->height != outlink->h) {
  193.         av_frame_free(&s->outpicref);
  194.         s->outpicref = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  195.         if (!s->outpicref) {
  196.             av_frame_free(&insamples);
  197.             return AVERROR(ENOMEM);
  198.         }
  199.  
  200.         for (i = 0; i < outlink->h; i++)
  201.             memset(s->outpicref->data[0] + i * s->outpicref->linesize[0], 0, outlink->w * 4);
  202.     }
  203.     s->outpicref->pts = insamples->pts;
  204.  
  205.     fade(s);
  206.  
  207.     switch (insamples->format) {
  208.     case AV_SAMPLE_FMT_S16:
  209.         for (i = 0; i < insamples->nb_samples; i++) {
  210.             int16_t *src = (int16_t *)insamples->data[0] + i * 2;
  211.  
  212.             if (s->mode == LISSAJOUS) {
  213.                 x = ((src[1] - src[0]) * zoom / (float)(UINT16_MAX) + 1) * hw;
  214.                 y = (1.0 - (src[0] + src[1]) * zoom / (float)UINT16_MAX) * hh;
  215.             } else if (s->mode == LISSAJOUS_XY) {
  216.                 x = (src[1] * zoom / (float)INT16_MAX + 1) * hw;
  217.                 y = (src[0] * zoom / (float)INT16_MAX + 1) * hh;
  218.             } else {
  219.                 float sx, sy, cx, cy;
  220.  
  221.                 sx = src[1] * zoom / (float)INT16_MAX;
  222.                 sy = src[0] * zoom / (float)INT16_MAX;
  223.                 cx = sx * sqrtf(1 - 0.5*sy*sy);
  224.                 cy = sy * sqrtf(1 - 0.5*sx*sx);
  225.                 x = hw + hw * FFSIGN(cx + cy) * (cx - cy) * .7;
  226.                 y = s->h - s->h * FFABS(cx + cy) * .7;
  227.             }
  228.  
  229.             draw_dot(s, x, y);
  230.         }
  231.         break;
  232.     case AV_SAMPLE_FMT_FLT:
  233.         for (i = 0; i < insamples->nb_samples; i++) {
  234.             float *src = (float *)insamples->data[0] + i * 2;
  235.  
  236.             if (s->mode == LISSAJOUS) {
  237.                 x = ((src[1] - src[0]) * zoom / 2 + 1) * hw;
  238.                 y = (1.0 - (src[0] + src[1]) * zoom / 2) * hh;
  239.             } else if (s->mode == LISSAJOUS_XY){
  240.                 x = (src[1] * zoom + 1) * hw;
  241.                 y = (src[0] * zoom + 1) * hh;
  242.             } else {
  243.                 float sx, sy, cx, cy;
  244.  
  245.                 sx = src[1] * zoom;
  246.                 sy = src[0] * zoom;
  247.                 cx = sx * sqrtf(1 - 0.5 * sy * sy);
  248.                 cy = sy * sqrtf(1 - 0.5 * sx * sx);
  249.                 x = hw + hw * FFSIGN(cx + cy) * (cx - cy) * .7;
  250.                 y = s->h - s->h * FFABS(cx + cy) * .7;
  251.             }
  252.  
  253.             draw_dot(s, x, y);
  254.         }
  255.         break;
  256.     }
  257.  
  258.     av_frame_free(&insamples);
  259.  
  260.     return ff_filter_frame(outlink, av_frame_clone(s->outpicref));
  261. }
  262.  
  263. static av_cold void uninit(AVFilterContext *ctx)
  264. {
  265.     AudioVectorScopeContext *s = ctx->priv;
  266.  
  267.     av_frame_free(&s->outpicref);
  268. }
  269.  
  270. static const AVFilterPad audiovectorscope_inputs[] = {
  271.     {
  272.         .name         = "default",
  273.         .type         = AVMEDIA_TYPE_AUDIO,
  274.         .config_props = config_input,
  275.         .filter_frame = filter_frame,
  276.     },
  277.     { NULL }
  278. };
  279.  
  280. static const AVFilterPad audiovectorscope_outputs[] = {
  281.     {
  282.         .name         = "default",
  283.         .type         = AVMEDIA_TYPE_VIDEO,
  284.         .config_props = config_output,
  285.     },
  286.     { NULL }
  287. };
  288.  
  289. AVFilter ff_avf_avectorscope = {
  290.     .name          = "avectorscope",
  291.     .description   = NULL_IF_CONFIG_SMALL("Convert input audio to vectorscope video output."),
  292.     .uninit        = uninit,
  293.     .query_formats = query_formats,
  294.     .priv_size     = sizeof(AudioVectorScopeContext),
  295.     .inputs        = audiovectorscope_inputs,
  296.     .outputs       = audiovectorscope_outputs,
  297.     .priv_class    = &avectorscope_class,
  298. };
  299.