Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright (c) 2014 Muhammad Faiz <mfcc64@gmail.com>
  3.  *
  4.  * This file is part of FFmpeg.
  5.  *
  6.  * FFmpeg is free software; you can redistribute it and/or
  7.  * modify it under the terms of the GNU Lesser General Public
  8.  * License as published by the Free Software Foundation; either
  9.  * version 2.1 of the License, or (at your option) any later version.
  10.  *
  11.  * FFmpeg is distributed in the hope that it will be useful,
  12.  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13.  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14.  * Lesser General Public License for more details.
  15.  *
  16.  * You should have received a copy of the GNU Lesser General Public
  17.  * License along with FFmpeg; if not, write to the Free Software
  18.  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19.  */
  20.  
  21. #include "config.h"
  22. #include "libavcodec/avfft.h"
  23. #include "libavutil/avassert.h"
  24. #include "libavutil/channel_layout.h"
  25. #include "libavutil/opt.h"
  26. #include "libavutil/xga_font_data.h"
  27. #include "libavutil/qsort.h"
  28. #include "libavutil/time.h"
  29. #include "libavutil/eval.h"
  30. #include "avfilter.h"
  31. #include "internal.h"
  32.  
  33. #include <math.h>
  34. #include <stdlib.h>
  35.  
  36. #if CONFIG_LIBFREETYPE
  37. #include <ft2build.h>
  38. #include FT_FREETYPE_H
  39. #endif
  40.  
  41. /* this filter is designed to do 16 bins/semitones constant Q transform with Brown-Puckette algorithm
  42.  * start from E0 to D#10 (10 octaves)
  43.  * so there are 16 bins/semitones * 12 semitones/octaves * 10 octaves = 1920 bins
  44.  * match with full HD resolution */
  45.  
  46. #define VIDEO_WIDTH 1920
  47. #define VIDEO_HEIGHT 1080
  48. #define FONT_HEIGHT 32
  49. #define SPECTOGRAM_HEIGHT ((VIDEO_HEIGHT-FONT_HEIGHT)/2)
  50. #define SPECTOGRAM_START (VIDEO_HEIGHT-SPECTOGRAM_HEIGHT)
  51. #define BASE_FREQ 20.051392800492
  52. #define COEFF_CLAMP 1.0e-4
  53. #define TLENGTH_MIN 0.001
  54. #define TLENGTH_DEFAULT "384/f*tc/(384/f+tc)"
  55. #define VOLUME_MIN 1e-10
  56. #define VOLUME_MAX 100.0
  57. #define FONTCOLOR_DEFAULT "st(0, (midi(f)-59.5)/12);" \
  58.     "st(1, if(between(ld(0),0,1), 0.5-0.5*cos(2*PI*ld(0)), 0));" \
  59.     "r(1-ld(1)) + b(ld(1))"
  60.  
  61. typedef struct {
  62.     FFTSample value;
  63.     int index;
  64. } SparseCoeff;
  65.  
  66. typedef struct {
  67.     const AVClass *class;
  68.     AVFrame *outpicref;
  69.     FFTContext *fft_context;
  70.     FFTComplex *fft_data;
  71.     FFTComplex *fft_result_left;
  72.     FFTComplex *fft_result_right;
  73.     uint8_t *spectogram;
  74.     SparseCoeff *coeff_sort;
  75.     SparseCoeff *coeffs[VIDEO_WIDTH];
  76.     uint8_t *font_alpha;
  77.     char *fontfile;     /* using freetype */
  78.     int coeffs_len[VIDEO_WIDTH];
  79.     uint8_t fontcolor_value[VIDEO_WIDTH*3];  /* result of fontcolor option */
  80.     int64_t frame_count;
  81.     int spectogram_count;
  82.     int spectogram_index;
  83.     int fft_bits;
  84.     int req_fullfilled;
  85.     int remaining_fill;
  86.     char *tlength;
  87.     char *volume;
  88.     char *fontcolor;
  89.     double timeclamp;   /* lower timeclamp, time-accurate, higher timeclamp, freq-accurate (at low freq)*/
  90.     float coeffclamp;   /* lower coeffclamp, more precise, higher coeffclamp, faster */
  91.     int fullhd;         /* if true, output video is at full HD resolution, otherwise it will be halved */
  92.     float gamma;        /* lower gamma, more contrast, higher gamma, more range */
  93.     float gamma2;       /* gamma of bargraph */
  94.     int fps;            /* the required fps is so strict, so it's enough to be int, but 24000/1001 etc cannot be encoded */
  95.     int count;          /* fps * count = transform rate */
  96. } ShowCQTContext;
  97.  
  98. #define OFFSET(x) offsetof(ShowCQTContext, x)
  99. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  100.  
  101. static const AVOption showcqt_options[] = {
  102.     { "volume", "set volume", OFFSET(volume), AV_OPT_TYPE_STRING, { .str = "16" }, CHAR_MIN, CHAR_MAX, FLAGS },
  103.     { "tlength", "set transform length", OFFSET(tlength), AV_OPT_TYPE_STRING, { .str = TLENGTH_DEFAULT }, CHAR_MIN, CHAR_MAX, FLAGS },
  104.     { "timeclamp", "set timeclamp", OFFSET(timeclamp), AV_OPT_TYPE_DOUBLE, { .dbl = 0.17 }, 0.1, 1.0, FLAGS },
  105.     { "coeffclamp", "set coeffclamp", OFFSET(coeffclamp), AV_OPT_TYPE_FLOAT, { .dbl = 1 }, 0.1, 10, FLAGS },
  106.     { "gamma", "set gamma", OFFSET(gamma), AV_OPT_TYPE_FLOAT, { .dbl = 3 }, 1, 7, FLAGS },
  107.     { "gamma2", "set gamma of bargraph", OFFSET(gamma2), AV_OPT_TYPE_FLOAT, { .dbl = 1 }, 1, 7, FLAGS },
  108.     { "fullhd", "set full HD resolution", OFFSET(fullhd), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, FLAGS },
  109.     { "fps", "set video fps", OFFSET(fps), AV_OPT_TYPE_INT, { .i64 = 25 }, 10, 100, FLAGS },
  110.     { "count", "set number of transform per frame", OFFSET(count), AV_OPT_TYPE_INT, { .i64 = 6 }, 1, 30, FLAGS },
  111.     { "fontfile", "set font file", OFFSET(fontfile), AV_OPT_TYPE_STRING, { .str = NULL }, CHAR_MIN, CHAR_MAX, FLAGS },
  112.     { "fontcolor", "set font color", OFFSET(fontcolor), AV_OPT_TYPE_STRING, { .str = FONTCOLOR_DEFAULT }, CHAR_MIN, CHAR_MAX, FLAGS },
  113.     { NULL }
  114. };
  115.  
  116. AVFILTER_DEFINE_CLASS(showcqt);
  117.  
  118. static av_cold void uninit(AVFilterContext *ctx)
  119. {
  120.     int k;
  121.  
  122.     ShowCQTContext *s = ctx->priv;
  123.     av_fft_end(s->fft_context);
  124.     s->fft_context = NULL;
  125.     for (k = 0; k < VIDEO_WIDTH; k++)
  126.         av_freep(&s->coeffs[k]);
  127.     av_freep(&s->fft_data);
  128.     av_freep(&s->fft_result_left);
  129.     av_freep(&s->fft_result_right);
  130.     av_freep(&s->coeff_sort);
  131.     av_freep(&s->spectogram);
  132.     av_freep(&s->font_alpha);
  133.     av_frame_free(&s->outpicref);
  134. }
  135.  
  136. static int query_formats(AVFilterContext *ctx)
  137. {
  138.     AVFilterFormats *formats = NULL;
  139.     AVFilterChannelLayouts *layouts = NULL;
  140.     AVFilterLink *inlink = ctx->inputs[0];
  141.     AVFilterLink *outlink = ctx->outputs[0];
  142.     static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_NONE };
  143.     static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGB24, AV_PIX_FMT_NONE };
  144.     static const int64_t channel_layouts[] = { AV_CH_LAYOUT_STEREO, AV_CH_LAYOUT_STEREO_DOWNMIX, -1 };
  145.     static const int samplerates[] = { 44100, 48000, -1 };
  146.  
  147.     /* set input audio formats */
  148.     formats = ff_make_format_list(sample_fmts);
  149.     if (!formats)
  150.         return AVERROR(ENOMEM);
  151.     ff_formats_ref(formats, &inlink->out_formats);
  152.  
  153.     layouts = avfilter_make_format64_list(channel_layouts);
  154.     if (!layouts)
  155.         return AVERROR(ENOMEM);
  156.     ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts);
  157.  
  158.     formats = ff_make_format_list(samplerates);
  159.     if (!formats)
  160.         return AVERROR(ENOMEM);
  161.     ff_formats_ref(formats, &inlink->out_samplerates);
  162.  
  163.     /* set output video format */
  164.     formats = ff_make_format_list(pix_fmts);
  165.     if (!formats)
  166.         return AVERROR(ENOMEM);
  167.     ff_formats_ref(formats, &outlink->in_formats);
  168.  
  169.     return 0;
  170. }
  171.  
  172. #if CONFIG_LIBFREETYPE
  173. static void load_freetype_font(AVFilterContext *ctx)
  174. {
  175.     static const char str[] = "EF G A BC D ";
  176.     ShowCQTContext *s = ctx->priv;
  177.     FT_Library lib = NULL;
  178.     FT_Face face = NULL;
  179.     int video_scale = s->fullhd ? 2 : 1;
  180.     int video_width = (VIDEO_WIDTH/2) * video_scale;
  181.     int font_height = (FONT_HEIGHT/2) * video_scale;
  182.     int font_width = 8 * video_scale;
  183.     int font_repeat = font_width * 12;
  184.     int linear_hori_advance = font_width * 65536;
  185.     int non_monospace_warning = 0;
  186.     int x;
  187.  
  188.     s->font_alpha = NULL;
  189.  
  190.     if (!s->fontfile)
  191.         return;
  192.  
  193.     if (FT_Init_FreeType(&lib))
  194.         goto fail;
  195.  
  196.     if (FT_New_Face(lib, s->fontfile, 0, &face))
  197.         goto fail;
  198.  
  199.     if (FT_Set_Char_Size(face, 16*64, 0, 0, 0))
  200.         goto fail;
  201.  
  202.     if (FT_Load_Char(face, 'A', FT_LOAD_RENDER))
  203.         goto fail;
  204.  
  205.     if (FT_Set_Char_Size(face, 16*64 * linear_hori_advance / face->glyph->linearHoriAdvance, 0, 0, 0))
  206.         goto fail;
  207.  
  208.     s->font_alpha = av_malloc_array(font_height, video_width);
  209.     if (!s->font_alpha)
  210.         goto fail;
  211.  
  212.     memset(s->font_alpha, 0, font_height * video_width);
  213.  
  214.     for (x = 0; x < 12; x++) {
  215.         int sx, sy, rx, bx, by, dx, dy;
  216.  
  217.         if (str[x] == ' ')
  218.             continue;
  219.  
  220.         if (FT_Load_Char(face, str[x], FT_LOAD_RENDER))
  221.             goto fail;
  222.  
  223.         if (face->glyph->advance.x != font_width*64 && !non_monospace_warning) {
  224.             av_log(ctx, AV_LOG_WARNING, "Font is not monospace\n");
  225.             non_monospace_warning = 1;
  226.         }
  227.  
  228.         sy = font_height - 4*video_scale - face->glyph->bitmap_top;
  229.         for (rx = 0; rx < 10; rx++) {
  230.             sx = rx * font_repeat + x * font_width + face->glyph->bitmap_left;
  231.             for (by = 0; by < face->glyph->bitmap.rows; by++) {
  232.                 dy = by + sy;
  233.                 if (dy < 0)
  234.                     continue;
  235.                 if (dy >= font_height)
  236.                     break;
  237.  
  238.                 for (bx = 0; bx < face->glyph->bitmap.width; bx++) {
  239.                     dx = bx + sx;
  240.                     if (dx < 0)
  241.                         continue;
  242.                     if (dx >= video_width)
  243.                         break;
  244.                     s->font_alpha[dy*video_width+dx] = face->glyph->bitmap.buffer[by*face->glyph->bitmap.width+bx];
  245.                 }
  246.             }
  247.         }
  248.     }
  249.  
  250.     FT_Done_Face(face);
  251.     FT_Done_FreeType(lib);
  252.     return;
  253.  
  254.     fail:
  255.     av_log(ctx, AV_LOG_WARNING, "Error while loading freetype font, using default font instead\n");
  256.     FT_Done_Face(face);
  257.     FT_Done_FreeType(lib);
  258.     av_freep(&s->font_alpha);
  259.     return;
  260. }
  261. #endif
  262.  
  263. static double a_weighting(void *p, double f)
  264. {
  265.     double ret = 12200.0*12200.0 * (f*f*f*f);
  266.     ret /= (f*f + 20.6*20.6) * (f*f + 12200.0*12200.0) *
  267.            sqrt((f*f + 107.7*107.7) * (f*f + 737.9*737.9));
  268.     return ret;
  269. }
  270.  
  271. static double b_weighting(void *p, double f)
  272. {
  273.     double ret = 12200.0*12200.0 * (f*f*f);
  274.     ret /= (f*f + 20.6*20.6) * (f*f + 12200.0*12200.0) * sqrt(f*f + 158.5*158.5);
  275.     return ret;
  276. }
  277.  
  278. static double c_weighting(void *p, double f)
  279. {
  280.     double ret = 12200.0*12200.0 * (f*f);
  281.     ret /= (f*f + 20.6*20.6) * (f*f + 12200.0*12200.0);
  282.     return ret;
  283. }
  284.  
  285. static double midi(void *p, double f)
  286. {
  287.     return log2(f/440.0) * 12.0 + 69.0;
  288. }
  289.  
  290. static double r_func(void *p, double x)
  291. {
  292.     x = av_clipd(x, 0.0, 1.0);
  293.     return (int)(x*255.0+0.5) << 16;
  294. }
  295.  
  296. static double g_func(void *p, double x)
  297. {
  298.     x = av_clipd(x, 0.0, 1.0);
  299.     return (int)(x*255.0+0.5) << 8;
  300. }
  301.  
  302. static double b_func(void *p, double x)
  303. {
  304.     x = av_clipd(x, 0.0, 1.0);
  305.     return (int)(x*255.0+0.5);
  306. }
  307.  
  308. static inline int qsort_sparsecoeff(const SparseCoeff *a, const SparseCoeff *b)
  309. {
  310.     if (fabsf(a->value) >= fabsf(b->value))
  311.         return 1;
  312.     else
  313.         return -1;
  314. }
  315.  
  316. static int config_output(AVFilterLink *outlink)
  317. {
  318.     AVFilterContext *ctx = outlink->src;
  319.     AVFilterLink *inlink = ctx->inputs[0];
  320.     ShowCQTContext *s = ctx->priv;
  321.     AVExpr *tlength_expr = NULL, *volume_expr = NULL, *fontcolor_expr = NULL;
  322.     uint8_t *fontcolor_value = s->fontcolor_value;
  323.     static const char * const expr_vars[] = { "timeclamp", "tc", "frequency", "freq", "f", NULL };
  324.     static const char * const expr_func_names[] = { "a_weighting", "b_weighting", "c_weighting", NULL };
  325.     static const char * const expr_fontcolor_func_names[] = { "midi", "r", "g", "b", NULL };
  326.     static double (* const expr_funcs[])(void *, double) = { a_weighting, b_weighting, c_weighting, NULL };
  327.     static double (* const expr_fontcolor_funcs[])(void *, double) = { midi, r_func, g_func, b_func, NULL };
  328.     int fft_len, k, x, y, ret;
  329.     int num_coeffs = 0;
  330.     int rate = inlink->sample_rate;
  331.     double max_len = rate * (double) s->timeclamp;
  332.     int64_t start_time, end_time;
  333.     int video_scale = s->fullhd ? 2 : 1;
  334.     int video_width = (VIDEO_WIDTH/2) * video_scale;
  335.     int video_height = (VIDEO_HEIGHT/2) * video_scale;
  336.     int spectogram_height = (SPECTOGRAM_HEIGHT/2) * video_scale;
  337.  
  338.     s->fft_bits = ceil(log2(max_len));
  339.     fft_len = 1 << s->fft_bits;
  340.  
  341.     if (rate % (s->fps * s->count)) {
  342.         av_log(ctx, AV_LOG_ERROR, "Rate (%u) is not divisible by fps*count (%u*%u)\n", rate, s->fps, s->count);
  343.         return AVERROR(EINVAL);
  344.     }
  345.  
  346.     s->fft_data         = av_malloc_array(fft_len, sizeof(*s->fft_data));
  347.     s->coeff_sort       = av_malloc_array(fft_len, sizeof(*s->coeff_sort));
  348.     s->fft_result_left  = av_malloc_array(fft_len, sizeof(*s->fft_result_left));
  349.     s->fft_result_right = av_malloc_array(fft_len, sizeof(*s->fft_result_right));
  350.     s->fft_context      = av_fft_init(s->fft_bits, 0);
  351.  
  352.     if (!s->fft_data || !s->coeff_sort || !s->fft_result_left || !s->fft_result_right || !s->fft_context)
  353.         return AVERROR(ENOMEM);
  354.  
  355. #if CONFIG_LIBFREETYPE
  356.     load_freetype_font(ctx);
  357. #else
  358.     if (s->fontfile)
  359.         av_log(ctx, AV_LOG_WARNING, "Freetype is not available, ignoring fontfile option\n");
  360.     s->font_alpha = NULL;
  361. #endif
  362.  
  363.     av_log(ctx, AV_LOG_INFO, "Calculating spectral kernel, please wait\n");
  364.     start_time = av_gettime_relative();
  365.     ret = av_expr_parse(&tlength_expr, s->tlength, expr_vars, NULL, NULL, NULL, NULL, 0, ctx);
  366.     if (ret < 0)
  367.         goto eval_error;
  368.  
  369.     ret = av_expr_parse(&volume_expr, s->volume, expr_vars, expr_func_names,
  370.                         expr_funcs, NULL, NULL, 0, ctx);
  371.     if (ret < 0)
  372.         goto eval_error;
  373.  
  374.     ret = av_expr_parse(&fontcolor_expr, s->fontcolor, expr_vars, expr_fontcolor_func_names,
  375.                         expr_fontcolor_funcs, NULL, NULL, 0, ctx);
  376.     if (ret < 0)
  377.         goto eval_error;
  378.  
  379.     for (k = 0; k < VIDEO_WIDTH; k++) {
  380.         int hlen = fft_len >> 1;
  381.         float total = 0;
  382.         float partial = 0;
  383.         double freq = BASE_FREQ * exp2(k * (1.0/192.0));
  384.         double tlen, tlength, volume;
  385.         double expr_vars_val[] = { s->timeclamp, s->timeclamp, freq, freq, freq, 0 };
  386.         /* a window function from Albert H. Nuttall,
  387.          * "Some Windows with Very Good Sidelobe Behavior"
  388.          * -93.32 dB peak sidelobe and 18 dB/octave asymptotic decay
  389.          * coefficient normalized to a0 = 1 */
  390.         double a0 = 0.355768;
  391.         double a1 = 0.487396/a0;
  392.         double a2 = 0.144232/a0;
  393.         double a3 = 0.012604/a0;
  394.         double sv_step, cv_step, sv, cv;
  395.         double sw_step, cw_step, sw, cw, w;
  396.  
  397.         tlength = av_expr_eval(tlength_expr, expr_vars_val, NULL);
  398.         if (isnan(tlength)) {
  399.             av_log(ctx, AV_LOG_WARNING, "at freq %g: tlength is nan, setting it to %g\n", freq, s->timeclamp);
  400.             tlength = s->timeclamp;
  401.         } else if (tlength < TLENGTH_MIN) {
  402.             av_log(ctx, AV_LOG_WARNING, "at freq %g: tlength is %g, setting it to %g\n", freq, tlength, TLENGTH_MIN);
  403.             tlength = TLENGTH_MIN;
  404.         } else if (tlength > s->timeclamp) {
  405.             av_log(ctx, AV_LOG_WARNING, "at freq %g: tlength is %g, setting it to %g\n", freq, tlength, s->timeclamp);
  406.             tlength = s->timeclamp;
  407.         }
  408.  
  409.         volume = FFABS(av_expr_eval(volume_expr, expr_vars_val, NULL));
  410.         if (isnan(volume)) {
  411.             av_log(ctx, AV_LOG_WARNING, "at freq %g: volume is nan, setting it to 0\n", freq);
  412.             volume = VOLUME_MIN;
  413.         } else if (volume < VOLUME_MIN) {
  414.             volume = VOLUME_MIN;
  415.         } else if (volume > VOLUME_MAX) {
  416.             av_log(ctx, AV_LOG_WARNING, "at freq %g: volume is %g, setting it to %g\n", freq, volume, VOLUME_MAX);
  417.             volume = VOLUME_MAX;
  418.         }
  419.  
  420.         if (s->fullhd || !(k & 1)) {
  421.             int fontcolor = av_expr_eval(fontcolor_expr, expr_vars_val, NULL);
  422.             fontcolor_value[0] = (fontcolor >> 16) & 0xFF;
  423.             fontcolor_value[1] = (fontcolor >> 8) & 0xFF;
  424.             fontcolor_value[2] = fontcolor & 0xFF;
  425.             fontcolor_value += 3;
  426.         }
  427.  
  428.         tlen = tlength * rate;
  429.         s->fft_data[0].re = 0;
  430.         s->fft_data[0].im = 0;
  431.         s->fft_data[hlen].re = (1.0 + a1 + a2 + a3) * (1.0/tlen) * volume * (1.0/fft_len);
  432.         s->fft_data[hlen].im = 0;
  433.         sv_step = sv = sin(2.0*M_PI*freq*(1.0/rate));
  434.         cv_step = cv = cos(2.0*M_PI*freq*(1.0/rate));
  435.         /* also optimizing window func */
  436.         sw_step = sw = sin(2.0*M_PI*(1.0/tlen));
  437.         cw_step = cw = cos(2.0*M_PI*(1.0/tlen));
  438.         for (x = 1; x < 0.5 * tlen; x++) {
  439.             double cv_tmp, cw_tmp;
  440.             double cw2, cw3, sw2;
  441.  
  442.             cw2 = cw * cw - sw * sw;
  443.             sw2 = cw * sw + sw * cw;
  444.             cw3 = cw * cw2 - sw * sw2;
  445.             w = (1.0 + a1 * cw + a2 * cw2 + a3 * cw3) * (1.0/tlen) * volume * (1.0/fft_len);
  446.             s->fft_data[hlen + x].re = w * cv;
  447.             s->fft_data[hlen + x].im = w * sv;
  448.             s->fft_data[hlen - x].re = s->fft_data[hlen + x].re;
  449.             s->fft_data[hlen - x].im = -s->fft_data[hlen + x].im;
  450.  
  451.             cv_tmp = cv * cv_step - sv * sv_step;
  452.             sv = sv * cv_step + cv * sv_step;
  453.             cv = cv_tmp;
  454.             cw_tmp = cw * cw_step - sw * sw_step;
  455.             sw = sw * cw_step + cw * sw_step;
  456.             cw = cw_tmp;
  457.         }
  458.         for (; x < hlen; x++) {
  459.             s->fft_data[hlen + x].re = 0;
  460.             s->fft_data[hlen + x].im = 0;
  461.             s->fft_data[hlen - x].re = 0;
  462.             s->fft_data[hlen - x].im = 0;
  463.         }
  464.         av_fft_permute(s->fft_context, s->fft_data);
  465.         av_fft_calc(s->fft_context, s->fft_data);
  466.  
  467.         for (x = 0; x < fft_len; x++) {
  468.             s->coeff_sort[x].index = x;
  469.             s->coeff_sort[x].value = s->fft_data[x].re;
  470.         }
  471.  
  472.         AV_QSORT(s->coeff_sort, fft_len, SparseCoeff, qsort_sparsecoeff);
  473.         for (x = 0; x < fft_len; x++)
  474.             total += fabsf(s->coeff_sort[x].value);
  475.  
  476.         for (x = 0; x < fft_len; x++) {
  477.             partial += fabsf(s->coeff_sort[x].value);
  478.             if (partial > total * s->coeffclamp * COEFF_CLAMP) {
  479.                 s->coeffs_len[k] = fft_len - x;
  480.                 num_coeffs += s->coeffs_len[k];
  481.                 s->coeffs[k] = av_malloc_array(s->coeffs_len[k], sizeof(*s->coeffs[k]));
  482.                 if (!s->coeffs[k]) {
  483.                     ret = AVERROR(ENOMEM);
  484.                     goto eval_error;
  485.                 }
  486.                 for (y = 0; y < s->coeffs_len[k]; y++)
  487.                     s->coeffs[k][y] = s->coeff_sort[x+y];
  488.                 break;
  489.             }
  490.         }
  491.     }
  492.     av_expr_free(fontcolor_expr);
  493.     av_expr_free(volume_expr);
  494.     av_expr_free(tlength_expr);
  495.     end_time = av_gettime_relative();
  496.     av_log(ctx, AV_LOG_INFO, "Elapsed time %.6f s (fft_len=%u, num_coeffs=%u)\n", 1e-6 * (end_time-start_time), fft_len, num_coeffs);
  497.  
  498.     outlink->w = video_width;
  499.     outlink->h = video_height;
  500.  
  501.     s->req_fullfilled = 0;
  502.     s->spectogram_index = 0;
  503.     s->frame_count = 0;
  504.     s->spectogram_count = 0;
  505.     s->remaining_fill = fft_len >> 1;
  506.     memset(s->fft_data, 0, fft_len * sizeof(*s->fft_data));
  507.  
  508.     s->outpicref = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  509.     if (!s->outpicref)
  510.         return AVERROR(ENOMEM);
  511.  
  512.     s->spectogram = av_calloc(spectogram_height, s->outpicref->linesize[0]);
  513.     if (!s->spectogram)
  514.         return AVERROR(ENOMEM);
  515.  
  516.     outlink->sample_aspect_ratio = av_make_q(1, 1);
  517.     outlink->time_base = av_make_q(1, s->fps);
  518.     outlink->frame_rate = av_make_q(s->fps, 1);
  519.     return 0;
  520.  
  521. eval_error:
  522.     av_expr_free(fontcolor_expr);
  523.     av_expr_free(volume_expr);
  524.     av_expr_free(tlength_expr);
  525.     return ret;
  526. }
  527.  
  528. static int plot_cqt(AVFilterLink *inlink)
  529. {
  530.     AVFilterContext *ctx = inlink->dst;
  531.     ShowCQTContext *s = ctx->priv;
  532.     AVFilterLink *outlink = ctx->outputs[0];
  533.     int fft_len = 1 << s->fft_bits;
  534.     FFTSample result[VIDEO_WIDTH][4];
  535.     int x, y, ret = 0;
  536.     int linesize = s->outpicref->linesize[0];
  537.     int video_scale = s->fullhd ? 2 : 1;
  538.     int video_width = (VIDEO_WIDTH/2) * video_scale;
  539.     int spectogram_height = (SPECTOGRAM_HEIGHT/2) * video_scale;
  540.     int spectogram_start = (SPECTOGRAM_START/2) * video_scale;
  541.     int font_height = (FONT_HEIGHT/2) * video_scale;
  542.  
  543.     /* real part contains left samples, imaginary part contains right samples */
  544.     memcpy(s->fft_result_left, s->fft_data, fft_len * sizeof(*s->fft_data));
  545.     av_fft_permute(s->fft_context, s->fft_result_left);
  546.     av_fft_calc(s->fft_context, s->fft_result_left);
  547.  
  548.     /* separate left and right, (and multiply by 2.0) */
  549.     s->fft_result_right[0].re = 2.0f * s->fft_result_left[0].im;
  550.     s->fft_result_right[0].im = 0;
  551.     s->fft_result_left[0].re = 2.0f * s->fft_result_left[0].re;
  552.     s->fft_result_left[0].im = 0;
  553.     for (x = 1; x <= fft_len >> 1; x++) {
  554.         FFTSample tmpy = s->fft_result_left[fft_len-x].im - s->fft_result_left[x].im;
  555.  
  556.         s->fft_result_right[x].re = s->fft_result_left[x].im + s->fft_result_left[fft_len-x].im;
  557.         s->fft_result_right[x].im = s->fft_result_left[x].re - s->fft_result_left[fft_len-x].re;
  558.         s->fft_result_right[fft_len-x].re = s->fft_result_right[x].re;
  559.         s->fft_result_right[fft_len-x].im = -s->fft_result_right[x].im;
  560.  
  561.         s->fft_result_left[x].re = s->fft_result_left[x].re + s->fft_result_left[fft_len-x].re;
  562.         s->fft_result_left[x].im = tmpy;
  563.         s->fft_result_left[fft_len-x].re = s->fft_result_left[x].re;
  564.         s->fft_result_left[fft_len-x].im = -s->fft_result_left[x].im;
  565.     }
  566.  
  567.     /* calculating cqt */
  568.     for (x = 0; x < VIDEO_WIDTH; x++) {
  569.         int u;
  570.         FFTComplex l = {0,0};
  571.         FFTComplex r = {0,0};
  572.  
  573.         for (u = 0; u < s->coeffs_len[x]; u++) {
  574.             FFTSample value = s->coeffs[x][u].value;
  575.             int index = s->coeffs[x][u].index;
  576.             l.re += value * s->fft_result_left[index].re;
  577.             l.im += value * s->fft_result_left[index].im;
  578.             r.re += value * s->fft_result_right[index].re;
  579.             r.im += value * s->fft_result_right[index].im;
  580.         }
  581.         /* result is power, not amplitude */
  582.         result[x][0] = l.re * l.re + l.im * l.im;
  583.         result[x][2] = r.re * r.re + r.im * r.im;
  584.         result[x][1] = 0.5f * (result[x][0] + result[x][2]);
  585.  
  586.         if (s->gamma2 == 1.0f)
  587.             result[x][3] = result[x][1];
  588.         else if (s->gamma2 == 2.0f)
  589.             result[x][3] = sqrtf(result[x][1]);
  590.         else if (s->gamma2 == 3.0f)
  591.             result[x][3] = cbrtf(result[x][1]);
  592.         else if (s->gamma2 == 4.0f)
  593.             result[x][3] = sqrtf(sqrtf(result[x][1]));
  594.         else
  595.             result[x][3] = expf(logf(result[x][1]) * (1.0f / s->gamma2));
  596.  
  597.         result[x][0] = FFMIN(1.0f, result[x][0]);
  598.         result[x][1] = FFMIN(1.0f, result[x][1]);
  599.         result[x][2] = FFMIN(1.0f, result[x][2]);
  600.         if (s->gamma == 1.0f) {
  601.             result[x][0] = 255.0f * result[x][0];
  602.             result[x][1] = 255.0f * result[x][1];
  603.             result[x][2] = 255.0f * result[x][2];
  604.         } else if (s->gamma == 2.0f) {
  605.             result[x][0] = 255.0f * sqrtf(result[x][0]);
  606.             result[x][1] = 255.0f * sqrtf(result[x][1]);
  607.             result[x][2] = 255.0f * sqrtf(result[x][2]);
  608.         } else if (s->gamma == 3.0f) {
  609.             result[x][0] = 255.0f * cbrtf(result[x][0]);
  610.             result[x][1] = 255.0f * cbrtf(result[x][1]);
  611.             result[x][2] = 255.0f * cbrtf(result[x][2]);
  612.         } else if (s->gamma == 4.0f) {
  613.             result[x][0] = 255.0f * sqrtf(sqrtf(result[x][0]));
  614.             result[x][1] = 255.0f * sqrtf(sqrtf(result[x][1]));
  615.             result[x][2] = 255.0f * sqrtf(sqrtf(result[x][2]));
  616.         } else {
  617.             result[x][0] = 255.0f * expf(logf(result[x][0]) * (1.0f / s->gamma));
  618.             result[x][1] = 255.0f * expf(logf(result[x][1]) * (1.0f / s->gamma));
  619.             result[x][2] = 255.0f * expf(logf(result[x][2]) * (1.0f / s->gamma));
  620.         }
  621.     }
  622.  
  623.     if (!s->fullhd) {
  624.         for (x = 0; x < video_width; x++) {
  625.             result[x][0] = 0.5f * (result[2*x][0] + result[2*x+1][0]);
  626.             result[x][1] = 0.5f * (result[2*x][1] + result[2*x+1][1]);
  627.             result[x][2] = 0.5f * (result[2*x][2] + result[2*x+1][2]);
  628.             result[x][3] = 0.5f * (result[2*x][3] + result[2*x+1][3]);
  629.         }
  630.     }
  631.  
  632.     for (x = 0; x < video_width; x++) {
  633.         s->spectogram[s->spectogram_index*linesize + 3*x] = result[x][0] + 0.5f;
  634.         s->spectogram[s->spectogram_index*linesize + 3*x + 1] = result[x][1] + 0.5f;
  635.         s->spectogram[s->spectogram_index*linesize + 3*x + 2] = result[x][2] + 0.5f;
  636.     }
  637.  
  638.     /* drawing */
  639.     if (!s->spectogram_count) {
  640.         uint8_t *data = (uint8_t*) s->outpicref->data[0];
  641.         float rcp_result[VIDEO_WIDTH];
  642.         int total_length = linesize * spectogram_height;
  643.         int back_length = linesize * s->spectogram_index;
  644.  
  645.         for (x = 0; x < video_width; x++)
  646.             rcp_result[x] = 1.0f / (result[x][3]+0.0001f);
  647.  
  648.         /* drawing bar */
  649.         for (y = 0; y < spectogram_height; y++) {
  650.             float height = (spectogram_height - y) * (1.0f/spectogram_height);
  651.             uint8_t *lineptr = data + y * linesize;
  652.             for (x = 0; x < video_width; x++) {
  653.                 float mul;
  654.                 if (result[x][3] <= height) {
  655.                     *lineptr++ = 0;
  656.                     *lineptr++ = 0;
  657.                     *lineptr++ = 0;
  658.                 } else {
  659.                     mul = (result[x][3] - height) * rcp_result[x];
  660.                     *lineptr++ = mul * result[x][0] + 0.5f;
  661.                     *lineptr++ = mul * result[x][1] + 0.5f;
  662.                     *lineptr++ = mul * result[x][2] + 0.5f;
  663.                 }
  664.             }
  665.         }
  666.  
  667.         /* drawing font */
  668.         if (s->font_alpha) {
  669.             for (y = 0; y < font_height; y++) {
  670.                 uint8_t *lineptr = data + (spectogram_height + y) * linesize;
  671.                 uint8_t *spectogram_src = s->spectogram + s->spectogram_index * linesize;
  672.                 uint8_t *fontcolor_value = s->fontcolor_value;
  673.                 for (x = 0; x < video_width; x++) {
  674.                     uint8_t alpha = s->font_alpha[y*video_width+x];
  675.                     lineptr[3*x] = (spectogram_src[3*x] * (255-alpha) + fontcolor_value[0] * alpha + 255) >> 8;
  676.                     lineptr[3*x+1] = (spectogram_src[3*x+1] * (255-alpha) + fontcolor_value[1] * alpha + 255) >> 8;
  677.                     lineptr[3*x+2] = (spectogram_src[3*x+2] * (255-alpha) + fontcolor_value[2] * alpha + 255) >> 8;
  678.                     fontcolor_value += 3;
  679.                 }
  680.             }
  681.         } else {
  682.             for (y = 0; y < font_height; y++) {
  683.                 uint8_t *lineptr = data + (spectogram_height + y) * linesize;
  684.                 memcpy(lineptr, s->spectogram + s->spectogram_index * linesize, video_width*3);
  685.             }
  686.             for (x = 0; x < video_width; x += video_width/10) {
  687.                 int u;
  688.                 static const char str[] = "EF G A BC D ";
  689.                 uint8_t *startptr = data + spectogram_height * linesize + x * 3;
  690.                 for (u = 0; str[u]; u++) {
  691.                     int v;
  692.                     for (v = 0; v < 16; v++) {
  693.                         uint8_t *p = startptr + v * linesize * video_scale + 8 * 3 * u * video_scale;
  694.                         int ux = x + 8 * u * video_scale;
  695.                         int mask;
  696.                         for (mask = 0x80; mask; mask >>= 1) {
  697.                             if (mask & avpriv_vga16_font[str[u] * 16 + v]) {
  698.                                 p[0] = s->fontcolor_value[3*ux];
  699.                                 p[1] = s->fontcolor_value[3*ux+1];
  700.                                 p[2] = s->fontcolor_value[3*ux+2];
  701.                                 if (video_scale == 2) {
  702.                                     p[linesize] = p[0];
  703.                                     p[linesize+1] = p[1];
  704.                                     p[linesize+2] = p[2];
  705.                                     p[3] = p[linesize+3] = s->fontcolor_value[3*ux+3];
  706.                                     p[4] = p[linesize+4] = s->fontcolor_value[3*ux+4];
  707.                                     p[5] = p[linesize+5] = s->fontcolor_value[3*ux+5];
  708.                                 }
  709.                             }
  710.                             p  += 3 * video_scale;
  711.                             ux += video_scale;
  712.                         }
  713.                     }
  714.                 }
  715.             }
  716.         }
  717.  
  718.         /* drawing spectogram/sonogram */
  719.         data += spectogram_start * linesize;
  720.         memcpy(data, s->spectogram + s->spectogram_index*linesize, total_length - back_length);
  721.  
  722.         data += total_length - back_length;
  723.         if (back_length)
  724.             memcpy(data, s->spectogram, back_length);
  725.  
  726.         s->outpicref->pts = s->frame_count;
  727.         ret = ff_filter_frame(outlink, av_frame_clone(s->outpicref));
  728.         s->req_fullfilled = 1;
  729.         s->frame_count++;
  730.     }
  731.     s->spectogram_count = (s->spectogram_count + 1) % s->count;
  732.     s->spectogram_index = (s->spectogram_index + spectogram_height - 1) % spectogram_height;
  733.     return ret;
  734. }
  735.  
  736. static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
  737. {
  738.     AVFilterContext *ctx = inlink->dst;
  739.     ShowCQTContext *s = ctx->priv;
  740.     int step = inlink->sample_rate / (s->fps * s->count);
  741.     int fft_len = 1 << s->fft_bits;
  742.     int remaining;
  743.     float *audio_data;
  744.  
  745.     if (!insamples) {
  746.         while (s->remaining_fill < (fft_len >> 1)) {
  747.             int ret, x;
  748.             memset(&s->fft_data[fft_len - s->remaining_fill], 0, sizeof(*s->fft_data) * s->remaining_fill);
  749.             ret = plot_cqt(inlink);
  750.             if (ret < 0)
  751.                 return ret;
  752.             for (x = 0; x < (fft_len-step); x++)
  753.                 s->fft_data[x] = s->fft_data[x+step];
  754.             s->remaining_fill += step;
  755.         }
  756.         return AVERROR_EOF;
  757.     }
  758.  
  759.     remaining = insamples->nb_samples;
  760.     audio_data = (float*) insamples->data[0];
  761.  
  762.     while (remaining) {
  763.         if (remaining >= s->remaining_fill) {
  764.             int i = insamples->nb_samples - remaining;
  765.             int j = fft_len - s->remaining_fill;
  766.             int m, ret;
  767.             for (m = 0; m < s->remaining_fill; m++) {
  768.                 s->fft_data[j+m].re = audio_data[2*(i+m)];
  769.                 s->fft_data[j+m].im = audio_data[2*(i+m)+1];
  770.             }
  771.             ret = plot_cqt(inlink);
  772.             if (ret < 0) {
  773.                 av_frame_free(&insamples);
  774.                 return ret;
  775.             }
  776.             remaining -= s->remaining_fill;
  777.             for (m = 0; m < fft_len-step; m++)
  778.                 s->fft_data[m] = s->fft_data[m+step];
  779.             s->remaining_fill = step;
  780.         } else {
  781.             int i = insamples->nb_samples - remaining;
  782.             int j = fft_len - s->remaining_fill;
  783.             int m;
  784.             for (m = 0; m < remaining; m++) {
  785.                 s->fft_data[m+j].re = audio_data[2*(i+m)];
  786.                 s->fft_data[m+j].im = audio_data[2*(i+m)+1];
  787.             }
  788.             s->remaining_fill -= remaining;
  789.             remaining = 0;
  790.         }
  791.     }
  792.     av_frame_free(&insamples);
  793.     return 0;
  794. }
  795.  
  796. static int request_frame(AVFilterLink *outlink)
  797. {
  798.     ShowCQTContext *s = outlink->src->priv;
  799.     AVFilterLink *inlink = outlink->src->inputs[0];
  800.     int ret;
  801.  
  802.     s->req_fullfilled = 0;
  803.     do {
  804.         ret = ff_request_frame(inlink);
  805.     } while (!s->req_fullfilled && ret >= 0);
  806.  
  807.     if (ret == AVERROR_EOF && s->outpicref)
  808.         filter_frame(inlink, NULL);
  809.     return ret;
  810. }
  811.  
  812. static const AVFilterPad showcqt_inputs[] = {
  813.     {
  814.         .name         = "default",
  815.         .type         = AVMEDIA_TYPE_AUDIO,
  816.         .filter_frame = filter_frame,
  817.     },
  818.     { NULL }
  819. };
  820.  
  821. static const AVFilterPad showcqt_outputs[] = {
  822.     {
  823.         .name          = "default",
  824.         .type          = AVMEDIA_TYPE_VIDEO,
  825.         .config_props  = config_output,
  826.         .request_frame = request_frame,
  827.     },
  828.     { NULL }
  829. };
  830.  
  831. AVFilter ff_avf_showcqt = {
  832.     .name          = "showcqt",
  833.     .description   = NULL_IF_CONFIG_SMALL("Convert input audio to a CQT (Constant Q Transform) spectrum video output."),
  834.     .uninit        = uninit,
  835.     .query_formats = query_formats,
  836.     .priv_size     = sizeof(ShowCQTContext),
  837.     .inputs        = showcqt_inputs,
  838.     .outputs       = showcqt_outputs,
  839.     .priv_class    = &showcqt_class,
  840. };
  841.