Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright (c) 2012 Stefano Sabatini
  3.  *
  4.  * This file is part of FFmpeg.
  5.  *
  6.  * FFmpeg is free software; you can redistribute it and/or
  7.  * modify it under the terms of the GNU Lesser General Public
  8.  * License as published by the Free Software Foundation; either
  9.  * version 2.1 of the License, or (at your option) any later version.
  10.  *
  11.  * FFmpeg is distributed in the hope that it will be useful,
  12.  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13.  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14.  * Lesser General Public License for more details.
  15.  *
  16.  * You should have received a copy of the GNU Lesser General Public
  17.  * License along with FFmpeg; if not, write to the Free Software
  18.  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19.  */
  20.  
  21. /**
  22.  * @file
  23.  * audio to video multimedia filter
  24.  */
  25.  
  26. #include "libavutil/avassert.h"
  27. #include "libavutil/channel_layout.h"
  28. #include "libavutil/opt.h"
  29. #include "libavutil/parseutils.h"
  30. #include "avfilter.h"
  31. #include "formats.h"
  32. #include "audio.h"
  33. #include "video.h"
  34. #include "internal.h"
  35.  
  36. enum ShowWavesMode {
  37.     MODE_POINT,
  38.     MODE_LINE,
  39.     MODE_P2P,
  40.     MODE_CENTERED_LINE,
  41.     MODE_NB,
  42. };
  43.  
  44. struct frame_node {
  45.     AVFrame *frame;
  46.     struct frame_node *next;
  47. };
  48.  
  49. typedef struct {
  50.     const AVClass *class;
  51.     int w, h;
  52.     AVRational rate;
  53.     int buf_idx;
  54.     int16_t *buf_idy;    /* y coordinate of previous sample for each channel */
  55.     AVFrame *outpicref;
  56.     int req_fullfilled;
  57.     int n;
  58.     int sample_count_mod;
  59.     int mode;                   ///< ShowWavesMode
  60.     int split_channels;
  61.     void (*draw_sample)(uint8_t *buf, int height, int linesize,
  62.                         int16_t sample, int16_t *prev_y, int intensity);
  63.  
  64.     /* single picture */
  65.     int single_pic;
  66.     struct frame_node *audio_frames;
  67.     struct frame_node *last_frame;
  68.     int64_t total_samples;
  69.     int64_t *sum; /* abs sum of the samples per channel */
  70. } ShowWavesContext;
  71.  
  72. #define OFFSET(x) offsetof(ShowWavesContext, x)
  73. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  74.  
  75. static const AVOption showwaves_options[] = {
  76.     { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
  77.     { "s",    "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
  78.     { "mode", "select display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_POINT}, 0, MODE_NB-1, FLAGS, "mode"},
  79.         { "point", "draw a point for each sample",         0, AV_OPT_TYPE_CONST, {.i64=MODE_POINT},         .flags=FLAGS, .unit="mode"},
  80.         { "line",  "draw a line for each sample",          0, AV_OPT_TYPE_CONST, {.i64=MODE_LINE},          .flags=FLAGS, .unit="mode"},
  81.         { "p2p",   "draw a line between samples",          0, AV_OPT_TYPE_CONST, {.i64=MODE_P2P},           .flags=FLAGS, .unit="mode"},
  82.         { "cline", "draw a centered line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_CENTERED_LINE}, .flags=FLAGS, .unit="mode"},
  83.     { "n",    "set how many samples to show in the same point", OFFSET(n), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS },
  84.     { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
  85.     { "r",    "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
  86.     { "split_channels", "draw channels separately", OFFSET(split_channels), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS },
  87.     { NULL }
  88. };
  89.  
  90. AVFILTER_DEFINE_CLASS(showwaves);
  91.  
  92. static av_cold void uninit(AVFilterContext *ctx)
  93. {
  94.     ShowWavesContext *showwaves = ctx->priv;
  95.  
  96.     av_frame_free(&showwaves->outpicref);
  97.     av_freep(&showwaves->buf_idy);
  98.  
  99.     if (showwaves->single_pic) {
  100.         struct frame_node *node = showwaves->audio_frames;
  101.         while (node) {
  102.             struct frame_node *tmp = node;
  103.  
  104.             node = node->next;
  105.             av_frame_free(&tmp->frame);
  106.             av_freep(&tmp);
  107.         }
  108.         av_freep(&showwaves->sum);
  109.         showwaves->last_frame = NULL;
  110.     }
  111. }
  112.  
  113. static int query_formats(AVFilterContext *ctx)
  114. {
  115.     AVFilterFormats *formats = NULL;
  116.     AVFilterChannelLayouts *layouts = NULL;
  117.     AVFilterLink *inlink = ctx->inputs[0];
  118.     AVFilterLink *outlink = ctx->outputs[0];
  119.     static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE };
  120.     static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
  121.  
  122.     /* set input audio formats */
  123.     formats = ff_make_format_list(sample_fmts);
  124.     if (!formats)
  125.         return AVERROR(ENOMEM);
  126.     ff_formats_ref(formats, &inlink->out_formats);
  127.  
  128.     layouts = ff_all_channel_layouts();
  129.     if (!layouts)
  130.         return AVERROR(ENOMEM);
  131.     ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts);
  132.  
  133.     formats = ff_all_samplerates();
  134.     if (!formats)
  135.         return AVERROR(ENOMEM);
  136.     ff_formats_ref(formats, &inlink->out_samplerates);
  137.  
  138.     /* set output video format */
  139.     formats = ff_make_format_list(pix_fmts);
  140.     if (!formats)
  141.         return AVERROR(ENOMEM);
  142.     ff_formats_ref(formats, &outlink->in_formats);
  143.  
  144.     return 0;
  145. }
  146.  
  147. static int config_output(AVFilterLink *outlink)
  148. {
  149.     AVFilterContext *ctx = outlink->src;
  150.     AVFilterLink *inlink = ctx->inputs[0];
  151.     ShowWavesContext *showwaves = ctx->priv;
  152.     int nb_channels = inlink->channels;
  153.  
  154.     if (!showwaves->n)
  155.         showwaves->n = FFMAX(1, ((double)inlink->sample_rate / (showwaves->w * av_q2d(showwaves->rate))) + 0.5);
  156.  
  157.     showwaves->buf_idx = 0;
  158.     if (!(showwaves->buf_idy = av_mallocz_array(nb_channels, sizeof(*showwaves->buf_idy)))) {
  159.         av_log(ctx, AV_LOG_ERROR, "Could not allocate showwaves buffer\n");
  160.         return AVERROR(ENOMEM);
  161.     }
  162.     outlink->w = showwaves->w;
  163.     outlink->h = showwaves->h;
  164.     outlink->sample_aspect_ratio = (AVRational){1,1};
  165.  
  166.     outlink->frame_rate = av_div_q((AVRational){inlink->sample_rate,showwaves->n},
  167.                                    (AVRational){showwaves->w,1});
  168.  
  169.     av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d r:%f n:%d\n",
  170.            showwaves->w, showwaves->h, av_q2d(outlink->frame_rate), showwaves->n);
  171.     return 0;
  172. }
  173.  
  174. inline static int push_frame(AVFilterLink *outlink)
  175. {
  176.     AVFilterContext *ctx = outlink->src;
  177.     AVFilterLink *inlink = ctx->inputs[0];
  178.     ShowWavesContext *showwaves = outlink->src->priv;
  179.     int nb_channels = inlink->channels;
  180.     int ret, i;
  181.  
  182.     if ((ret = ff_filter_frame(outlink, showwaves->outpicref)) >= 0)
  183.         showwaves->req_fullfilled = 1;
  184.     showwaves->outpicref = NULL;
  185.     showwaves->buf_idx = 0;
  186.     for (i = 0; i < nb_channels; i++)
  187.         showwaves->buf_idy[i] = 0;
  188.     return ret;
  189. }
  190.  
  191. static int push_single_pic(AVFilterLink *outlink)
  192. {
  193.     AVFilterContext *ctx = outlink->src;
  194.     AVFilterLink *inlink = ctx->inputs[0];
  195.     ShowWavesContext *showwaves = ctx->priv;
  196.     int64_t n = 0, max_samples = showwaves->total_samples / outlink->w;
  197.     AVFrame *out = showwaves->outpicref;
  198.     struct frame_node *node;
  199.     const int nb_channels = inlink->channels;
  200.     const int x = 255 / (showwaves->split_channels ? 1 : nb_channels);
  201.     const int ch_height = showwaves->split_channels ? outlink->h / nb_channels : outlink->h;
  202.     const int linesize = out->linesize[0];
  203.     int col = 0;
  204.     int64_t *sum = showwaves->sum;
  205.  
  206.     if (max_samples == 0) {
  207.         av_log(ctx, AV_LOG_ERROR, "Too few samples\n");
  208.         return AVERROR(EINVAL);
  209.     }
  210.  
  211.     av_log(ctx, AV_LOG_DEBUG, "Create frame averaging %"PRId64" samples per column\n", max_samples);
  212.  
  213.     memset(sum, 0, nb_channels);
  214.  
  215.     for (node = showwaves->audio_frames; node; node = node->next) {
  216.         int i;
  217.         const AVFrame *frame = node->frame;
  218.         const int16_t *p = (const int16_t *)frame->data[0];
  219.  
  220.         for (i = 0; i < frame->nb_samples; i++) {
  221.             int ch;
  222.  
  223.             for (ch = 0; ch < nb_channels; ch++)
  224.                 sum[ch] += abs(p[ch + i*nb_channels]) << 1;
  225.             if (n++ == max_samples) {
  226.                 for (ch = 0; ch < nb_channels; ch++) {
  227.                     int16_t sample = sum[ch] / max_samples;
  228.                     uint8_t *buf = out->data[0] + col;
  229.                     if (showwaves->split_channels)
  230.                         buf += ch*ch_height*linesize;
  231.                     av_assert0(col < outlink->w);
  232.                     showwaves->draw_sample(buf, ch_height, linesize, sample, &showwaves->buf_idy[ch], x);
  233.                     sum[ch] = 0;
  234.                 }
  235.                 col++;
  236.                 n = 0;
  237.             }
  238.         }
  239.     }
  240.  
  241.     return push_frame(outlink);
  242. }
  243.  
  244.  
  245. static int request_frame(AVFilterLink *outlink)
  246. {
  247.     ShowWavesContext *showwaves = outlink->src->priv;
  248.     AVFilterLink *inlink = outlink->src->inputs[0];
  249.     int ret;
  250.  
  251.     showwaves->req_fullfilled = 0;
  252.     do {
  253.         ret = ff_request_frame(inlink);
  254.     } while (!showwaves->req_fullfilled && ret >= 0);
  255.  
  256.     if (ret == AVERROR_EOF && showwaves->outpicref) {
  257.         if (showwaves->single_pic)
  258.             push_single_pic(outlink);
  259.         else
  260.             push_frame(outlink);
  261.     }
  262.  
  263.     return ret;
  264. }
  265.  
  266. static void draw_sample_point(uint8_t *buf, int height, int linesize,
  267.                               int16_t sample, int16_t *prev_y, int intensity)
  268. {
  269.     const int h = height/2 - av_rescale(sample, height/2, INT16_MAX);
  270.     if (h >= 0 && h < height)
  271.         buf[h * linesize] += intensity;
  272. }
  273.  
  274. static void draw_sample_line(uint8_t *buf, int height, int linesize,
  275.                              int16_t sample, int16_t *prev_y, int intensity)
  276. {
  277.     int k;
  278.     const int h = height/2 - av_rescale(sample, height/2, INT16_MAX);
  279.     int start   = height/2;
  280.     int end     = av_clip(h, 0, height-1);
  281.     if (start > end)
  282.         FFSWAP(int16_t, start, end);
  283.     for (k = start; k < end; k++)
  284.         buf[k * linesize] += intensity;
  285. }
  286.  
  287. static void draw_sample_p2p(uint8_t *buf, int height, int linesize,
  288.                             int16_t sample, int16_t *prev_y, int intensity)
  289. {
  290.     int k;
  291.     const int h = height/2 - av_rescale(sample, height/2, INT16_MAX);
  292.     if (h >= 0 && h < height) {
  293.         buf[h * linesize] += intensity;
  294.         if (*prev_y && h != *prev_y) {
  295.             int start = *prev_y;
  296.             int end = av_clip(h, 0, height-1);
  297.             if (start > end)
  298.                 FFSWAP(int16_t, start, end);
  299.             for (k = start + 1; k < end; k++)
  300.                 buf[k * linesize] += intensity;
  301.         }
  302.     }
  303.     *prev_y = h;
  304. }
  305.  
  306. static void draw_sample_cline(uint8_t *buf, int height, int linesize,
  307.                               int16_t sample, int16_t *prev_y, int intensity)
  308. {
  309.     int k;
  310.     const int h     = av_rescale(abs(sample), height, INT16_MAX);
  311.     const int start = (height - h) / 2;
  312.     const int end   = start + h;
  313.     for (k = start; k < end; k++)
  314.         buf[k * linesize] += intensity;
  315. }
  316.  
  317. static int alloc_out_frame(ShowWavesContext *showwaves, const int16_t *p,
  318.                            const AVFilterLink *inlink, AVFilterLink *outlink,
  319.                            const AVFrame *in)
  320. {
  321.     if (!showwaves->outpicref) {
  322.         int j;
  323.         AVFrame *out = showwaves->outpicref =
  324.             ff_get_video_buffer(outlink, outlink->w, outlink->h);
  325.         if (!out)
  326.             return AVERROR(ENOMEM);
  327.         out->width  = outlink->w;
  328.         out->height = outlink->h;
  329.         out->pts = in->pts + av_rescale_q((p - (int16_t *)in->data[0]) / inlink->channels,
  330.                                           av_make_q(1, inlink->sample_rate),
  331.                                           outlink->time_base);
  332.         for (j = 0; j < outlink->h; j++)
  333.             memset(out->data[0] + j*out->linesize[0], 0, outlink->w);
  334.     }
  335.     return 0;
  336. }
  337.  
  338. static av_cold int init(AVFilterContext *ctx)
  339. {
  340.     ShowWavesContext *showwaves = ctx->priv;
  341.  
  342.     if (!strcmp(ctx->filter->name, "showwavespic")) {
  343.         showwaves->single_pic = 1;
  344.         showwaves->mode = MODE_CENTERED_LINE;
  345.     }
  346.  
  347.     switch (showwaves->mode) {
  348.     case MODE_POINT:         showwaves->draw_sample = draw_sample_point; break;
  349.     case MODE_LINE:          showwaves->draw_sample = draw_sample_line;  break;
  350.     case MODE_P2P:           showwaves->draw_sample = draw_sample_p2p;   break;
  351.     case MODE_CENTERED_LINE: showwaves->draw_sample = draw_sample_cline; break;
  352.     default:
  353.         return AVERROR_BUG;
  354.     }
  355.     return 0;
  356. }
  357.  
  358. #if CONFIG_SHOWWAVES_FILTER
  359.  
  360. static int showwaves_filter_frame(AVFilterLink *inlink, AVFrame *insamples)
  361. {
  362.     AVFilterContext *ctx = inlink->dst;
  363.     AVFilterLink *outlink = ctx->outputs[0];
  364.     ShowWavesContext *showwaves = ctx->priv;
  365.     const int nb_samples = insamples->nb_samples;
  366.     AVFrame *outpicref = showwaves->outpicref;
  367.     int16_t *p = (int16_t *)insamples->data[0];
  368.     int nb_channels = inlink->channels;
  369.     int i, j, ret = 0;
  370.     const int n = showwaves->n;
  371.     const int x = 255 / ((showwaves->split_channels ? 1 : nb_channels) * n); /* multiplication factor, pre-computed to avoid in-loop divisions */
  372.     const int ch_height = showwaves->split_channels ? outlink->h / nb_channels : outlink->h;
  373.  
  374.     /* draw data in the buffer */
  375.     for (i = 0; i < nb_samples; i++) {
  376.  
  377.         ret = alloc_out_frame(showwaves, p, inlink, outlink, insamples);
  378.         if (ret < 0)
  379.             goto end;
  380.         outpicref = showwaves->outpicref;
  381.  
  382.         for (j = 0; j < nb_channels; j++) {
  383.             uint8_t *buf = outpicref->data[0] + showwaves->buf_idx;
  384.             const int linesize = outpicref->linesize[0];
  385.             if (showwaves->split_channels)
  386.                 buf += j*ch_height*linesize;
  387.             showwaves->draw_sample(buf, ch_height, linesize, *p++,
  388.                                    &showwaves->buf_idy[j], x);
  389.         }
  390.  
  391.         showwaves->sample_count_mod++;
  392.         if (showwaves->sample_count_mod == n) {
  393.             showwaves->sample_count_mod = 0;
  394.             showwaves->buf_idx++;
  395.         }
  396.         if (showwaves->buf_idx == showwaves->w)
  397.             if ((ret = push_frame(outlink)) < 0)
  398.                 break;
  399.         outpicref = showwaves->outpicref;
  400.     }
  401.  
  402. end:
  403.     av_frame_free(&insamples);
  404.     return ret;
  405. }
  406.  
  407. static const AVFilterPad showwaves_inputs[] = {
  408.     {
  409.         .name         = "default",
  410.         .type         = AVMEDIA_TYPE_AUDIO,
  411.         .filter_frame = showwaves_filter_frame,
  412.     },
  413.     { NULL }
  414. };
  415.  
  416. static const AVFilterPad showwaves_outputs[] = {
  417.     {
  418.         .name          = "default",
  419.         .type          = AVMEDIA_TYPE_VIDEO,
  420.         .config_props  = config_output,
  421.         .request_frame = request_frame,
  422.     },
  423.     { NULL }
  424. };
  425.  
  426. AVFilter ff_avf_showwaves = {
  427.     .name          = "showwaves",
  428.     .description   = NULL_IF_CONFIG_SMALL("Convert input audio to a video output."),
  429.     .init          = init,
  430.     .uninit        = uninit,
  431.     .query_formats = query_formats,
  432.     .priv_size     = sizeof(ShowWavesContext),
  433.     .inputs        = showwaves_inputs,
  434.     .outputs       = showwaves_outputs,
  435.     .priv_class    = &showwaves_class,
  436. };
  437.  
  438. #endif // CONFIG_SHOWWAVES_FILTER
  439.  
  440. #if CONFIG_SHOWWAVESPIC_FILTER
  441.  
  442. #define OFFSET(x) offsetof(ShowWavesContext, x)
  443. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  444.  
  445. static const AVOption showwavespic_options[] = {
  446.     { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
  447.     { "s",    "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
  448.     { "split_channels", "draw channels separately", OFFSET(split_channels), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS },
  449.     { NULL }
  450. };
  451.  
  452. AVFILTER_DEFINE_CLASS(showwavespic);
  453.  
  454. static int showwavespic_config_input(AVFilterLink *inlink)
  455. {
  456.     AVFilterContext *ctx = inlink->dst;
  457.     ShowWavesContext *showwaves = ctx->priv;
  458.  
  459.     if (showwaves->single_pic) {
  460.         showwaves->sum = av_mallocz_array(inlink->channels, sizeof(*showwaves->sum));
  461.         if (!showwaves->sum)
  462.             return AVERROR(ENOMEM);
  463.     }
  464.  
  465.     return 0;
  466. }
  467.  
  468. static int showwavespic_filter_frame(AVFilterLink *inlink, AVFrame *insamples)
  469. {
  470.     AVFilterContext *ctx = inlink->dst;
  471.     AVFilterLink *outlink = ctx->outputs[0];
  472.     ShowWavesContext *showwaves = ctx->priv;
  473.     int16_t *p = (int16_t *)insamples->data[0];
  474.     int ret = 0;
  475.  
  476.     if (showwaves->single_pic) {
  477.         struct frame_node *f;
  478.  
  479.         ret = alloc_out_frame(showwaves, p, inlink, outlink, insamples);
  480.         if (ret < 0)
  481.             goto end;
  482.  
  483.         /* queue the audio frame */
  484.         f = av_malloc(sizeof(*f));
  485.         if (!f) {
  486.             ret = AVERROR(ENOMEM);
  487.             goto end;
  488.         }
  489.         f->frame = insamples;
  490.         f->next = NULL;
  491.         if (!showwaves->last_frame) {
  492.             showwaves->audio_frames =
  493.             showwaves->last_frame   = f;
  494.         } else {
  495.             showwaves->last_frame->next = f;
  496.             showwaves->last_frame = f;
  497.         }
  498.         showwaves->total_samples += insamples->nb_samples;
  499.  
  500.         return 0;
  501.     }
  502.  
  503. end:
  504.     av_frame_free(&insamples);
  505.     return ret;
  506. }
  507.  
  508. static const AVFilterPad showwavespic_inputs[] = {
  509.     {
  510.         .name         = "default",
  511.         .type         = AVMEDIA_TYPE_AUDIO,
  512.         .config_props = showwavespic_config_input,
  513.         .filter_frame = showwavespic_filter_frame,
  514.     },
  515.     { NULL }
  516. };
  517.  
  518. static const AVFilterPad showwavespic_outputs[] = {
  519.     {
  520.         .name          = "default",
  521.         .type          = AVMEDIA_TYPE_VIDEO,
  522.         .config_props  = config_output,
  523.         .request_frame = request_frame,
  524.     },
  525.     { NULL }
  526. };
  527.  
  528. AVFilter ff_avf_showwavespic = {
  529.     .name          = "showwavespic",
  530.     .description   = NULL_IF_CONFIG_SMALL("Convert input audio to a video output single picture."),
  531.     .init          = init,
  532.     .uninit        = uninit,
  533.     .query_formats = query_formats,
  534.     .priv_size     = sizeof(ShowWavesContext),
  535.     .inputs        = showwavespic_inputs,
  536.     .outputs       = showwavespic_outputs,
  537.     .priv_class    = &showwavespic_class,
  538. };
  539.  
  540. #endif // CONFIG_SHOWWAVESPIC_FILTER
  541.