Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright (c) 2007 Michael Niedermayer <michaelni@gmx.at>
  3.  * Copyright (c) 2013 Clément Bœsch <u pkh me>
  4.  *
  5.  * This file is part of FFmpeg.
  6.  *
  7.  * FFmpeg is free software; you can redistribute it and/or modify
  8.  * it under the terms of the GNU General Public License as published by
  9.  * the Free Software Foundation; either version 2 of the License, or
  10.  * (at your option) any later version.
  11.  *
  12.  * FFmpeg is distributed in the hope that it will be useful,
  13.  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14.  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15.  * GNU General Public License for more details.
  16.  *
  17.  * You should have received a copy of the GNU General Public License along
  18.  * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
  19.  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  20.  */
  21.  
  22. /**
  23.  * @todo try to change to int
  24.  * @todo try lifting based implementation
  25.  * @todo optimize optimize optimize
  26.  * @todo hard thresholding
  27.  * @todo use QP to decide filter strength
  28.  * @todo wavelet normalization / least squares optimal signal vs. noise thresholds
  29.  */
  30.  
  31. #include "libavutil/imgutils.h"
  32. #include "libavutil/opt.h"
  33. #include "libavutil/pixdesc.h"
  34. #include "avfilter.h"
  35. #include "internal.h"
  36.  
  37. typedef struct {
  38.     const AVClass *class;
  39.     double luma_strength;
  40.     double chroma_strength;
  41.     int depth;
  42.     float *plane[16+1][4];
  43.     int linesize;
  44.     int hsub, vsub;
  45. } OWDenoiseContext;
  46.  
  47. #define OFFSET(x) offsetof(OWDenoiseContext, x)
  48. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  49. static const AVOption owdenoise_options[] = {
  50.     { "depth",           "set depth",           OFFSET(depth),           AV_OPT_TYPE_INT,    {.i64 =   8}, 8,   16, FLAGS },
  51.     { "luma_strength",   "set luma strength",   OFFSET(luma_strength),   AV_OPT_TYPE_DOUBLE, {.dbl = 1.0}, 0, 1000, FLAGS },
  52.     { "ls",              "set luma strength",   OFFSET(luma_strength),   AV_OPT_TYPE_DOUBLE, {.dbl = 1.0}, 0, 1000, FLAGS },
  53.     { "chroma_strength", "set chroma strength", OFFSET(chroma_strength), AV_OPT_TYPE_DOUBLE, {.dbl = 1.0}, 0, 1000, FLAGS },
  54.     { "cs",              "set chroma strength", OFFSET(chroma_strength), AV_OPT_TYPE_DOUBLE, {.dbl = 1.0}, 0, 1000, FLAGS },
  55.     { NULL }
  56. };
  57.  
  58. AVFILTER_DEFINE_CLASS(owdenoise);
  59.  
  60. DECLARE_ALIGNED(8, static const uint8_t, dither)[8][8] = {
  61.     {  0,  48,  12,  60,   3,  51,  15,  63 },
  62.     { 32,  16,  44,  28,  35,  19,  47,  31 },
  63.     {  8,  56,   4,  52,  11,  59,   7,  55 },
  64.     { 40,  24,  36,  20,  43,  27,  39,  23 },
  65.     {  2,  50,  14,  62,   1,  49,  13,  61 },
  66.     { 34,  18,  46,  30,  33,  17,  45,  29 },
  67.     { 10,  58,   6,  54,   9,  57,   5,  53 },
  68.     { 42,  26,  38,  22,  41,  25,  37,  21 },
  69. };
  70.  
  71. static const double coeff[2][5] = {
  72.     {
  73.          0.6029490182363579  * M_SQRT2,
  74.          0.2668641184428723  * M_SQRT2,
  75.         -0.07822326652898785 * M_SQRT2,
  76.         -0.01686411844287495 * M_SQRT2,
  77.          0.02674875741080976 * M_SQRT2,
  78.     },{
  79.          1.115087052456994   / M_SQRT2,
  80.         -0.5912717631142470  / M_SQRT2,
  81.         -0.05754352622849957 / M_SQRT2,
  82.          0.09127176311424948 / M_SQRT2,
  83.     }
  84. };
  85.  
  86. static const double icoeff[2][5] = {
  87.     {
  88.          1.115087052456994   / M_SQRT2,
  89.          0.5912717631142470  / M_SQRT2,
  90.         -0.05754352622849957 / M_SQRT2,
  91.         -0.09127176311424948 / M_SQRT2,
  92.     },{
  93.          0.6029490182363579  * M_SQRT2,
  94.         -0.2668641184428723  * M_SQRT2,
  95.         -0.07822326652898785 * M_SQRT2,
  96.          0.01686411844287495 * M_SQRT2,
  97.          0.02674875741080976 * M_SQRT2,
  98.     }
  99. };
  100.  
  101.  
  102. static inline void decompose(float *dst_l, float *dst_h, const float *src,
  103.                              int linesize, int w)
  104. {
  105.     int x, i;
  106.     for (x = 0; x < w; x++) {
  107.         double sum_l = src[x * linesize] * coeff[0][0];
  108.         double sum_h = src[x * linesize] * coeff[1][0];
  109.         for (i = 1; i <= 4; i++) {
  110.             const double s = src[avpriv_mirror(x - i, w - 1) * linesize]
  111.                            + src[avpriv_mirror(x + i, w - 1) * linesize];
  112.  
  113.             sum_l += coeff[0][i] * s;
  114.             sum_h += coeff[1][i] * s;
  115.         }
  116.         dst_l[x * linesize] = sum_l;
  117.         dst_h[x * linesize] = sum_h;
  118.     }
  119. }
  120.  
  121. static inline void compose(float *dst, const float *src_l, const float *src_h,
  122.                            int linesize, int w)
  123. {
  124.     int x, i;
  125.     for (x = 0; x < w; x++) {
  126.         double sum_l = src_l[x * linesize] * icoeff[0][0];
  127.         double sum_h = src_h[x * linesize] * icoeff[1][0];
  128.         for (i = 1; i <= 4; i++) {
  129.             const int x0 = avpriv_mirror(x - i, w - 1) * linesize;
  130.             const int x1 = avpriv_mirror(x + i, w - 1) * linesize;
  131.  
  132.             sum_l += icoeff[0][i] * (src_l[x0] + src_l[x1]);
  133.             sum_h += icoeff[1][i] * (src_h[x0] + src_h[x1]);
  134.         }
  135.         dst[x * linesize] = (sum_l + sum_h) * 0.5;
  136.     }
  137. }
  138.  
  139. static inline void decompose2D(float *dst_l, float *dst_h, const float *src,
  140.                                int xlinesize, int ylinesize,
  141.                                int step, int w, int h)
  142. {
  143.     int y, x;
  144.     for (y = 0; y < h; y++)
  145.         for (x = 0; x < step; x++)
  146.             decompose(dst_l + ylinesize*y + xlinesize*x,
  147.                       dst_h + ylinesize*y + xlinesize*x,
  148.                       src   + ylinesize*y + xlinesize*x,
  149.                       step * xlinesize, (w - x + step - 1) / step);
  150. }
  151.  
  152. static inline void compose2D(float *dst, const float *src_l, const float *src_h,
  153.                              int xlinesize, int ylinesize,
  154.                              int step, int w, int h)
  155. {
  156.     int y, x;
  157.     for (y = 0; y < h; y++)
  158.         for (x = 0; x < step; x++)
  159.             compose(dst   + ylinesize*y + xlinesize*x,
  160.                     src_l + ylinesize*y + xlinesize*x,
  161.                     src_h + ylinesize*y + xlinesize*x,
  162.                     step * xlinesize, (w - x + step - 1) / step);
  163. }
  164.  
  165. static void decompose2D2(float *dst[4], float *src, float *temp[2],
  166.                          int linesize, int step, int w, int h)
  167. {
  168.     decompose2D(temp[0], temp[1], src,     1, linesize, step, w, h);
  169.     decompose2D( dst[0],  dst[1], temp[0], linesize, 1, step, h, w);
  170.     decompose2D( dst[2],  dst[3], temp[1], linesize, 1, step, h, w);
  171. }
  172.  
  173. static void compose2D2(float *dst, float *src[4], float *temp[2],
  174.                        int linesize, int step, int w, int h)
  175. {
  176.     compose2D(temp[0],  src[0],  src[1], linesize, 1, step, h, w);
  177.     compose2D(temp[1],  src[2],  src[3], linesize, 1, step, h, w);
  178.     compose2D(dst,     temp[0], temp[1], 1, linesize, step, w, h);
  179. }
  180.  
  181. static void filter(OWDenoiseContext *s,
  182.                    uint8_t       *dst, int dst_linesize,
  183.                    const uint8_t *src, int src_linesize,
  184.                    int width, int height, double strength)
  185. {
  186.     int x, y, i, j, depth = s->depth;
  187.  
  188.     while (1<<depth > width || 1<<depth > height)
  189.         depth--;
  190.  
  191.     for (y = 0; y < height; y++)
  192.         for(x = 0; x < width; x++)
  193.             s->plane[0][0][y*s->linesize + x] = src[y*src_linesize + x];
  194.  
  195.     for (i = 0; i < depth; i++)
  196.         decompose2D2(s->plane[i + 1], s->plane[i][0], s->plane[0] + 1, s->linesize, 1<<i, width, height);
  197.  
  198.     for (i = 0; i < depth; i++) {
  199.         for (j = 1; j < 4; j++) {
  200.             for (y = 0; y < height; y++) {
  201.                 for (x = 0; x < width; x++) {
  202.                     double v = s->plane[i + 1][j][y*s->linesize + x];
  203.                     if      (v >  strength) v -= strength;
  204.                     else if (v < -strength) v += strength;
  205.                     else                    v  = 0;
  206.                     s->plane[i + 1][j][x + y*s->linesize] = v;
  207.                 }
  208.             }
  209.         }
  210.     }
  211.     for (i = depth-1; i >= 0; i--)
  212.         compose2D2(s->plane[i][0], s->plane[i + 1], s->plane[0] + 1, s->linesize, 1<<i, width, height);
  213.  
  214.     for (y = 0; y < height; y++) {
  215.         for (x = 0; x < width; x++) {
  216.             i = s->plane[0][0][y*s->linesize + x] + dither[x&7][y&7]*(1.0/64) + 1.0/128; // yes the rounding is insane but optimal :)
  217.             if ((unsigned)i > 255U) i = ~(i >> 31);
  218.             dst[y*dst_linesize + x] = i;
  219.         }
  220.     }
  221. }
  222.  
  223. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  224. {
  225.     int direct = 0;
  226.     AVFilterContext *ctx = inlink->dst;
  227.     OWDenoiseContext *s = ctx->priv;
  228.     AVFilterLink *outlink = ctx->outputs[0];
  229.     AVFrame *out;
  230.     const int cw = FF_CEIL_RSHIFT(inlink->w, s->hsub);
  231.     const int ch = FF_CEIL_RSHIFT(inlink->h, s->vsub);
  232.  
  233.     if (av_frame_is_writable(in)) {
  234.         direct = 1;
  235.         out = in;
  236.     } else {
  237.         out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  238.         if (!out) {
  239.             av_frame_free(&in);
  240.             return AVERROR(ENOMEM);
  241.         }
  242.         av_frame_copy_props(out, in);
  243.     }
  244.  
  245.     filter(s, out->data[0], out->linesize[0], in->data[0], in->linesize[0], inlink->w, inlink->h, s->luma_strength);
  246.     filter(s, out->data[1], out->linesize[1], in->data[1], in->linesize[1], cw,        ch,        s->chroma_strength);
  247.     filter(s, out->data[2], out->linesize[2], in->data[2], in->linesize[2], cw,        ch,        s->chroma_strength);
  248.  
  249.     if (!direct) {
  250.         if (in->data[3])
  251.             av_image_copy_plane(out->data[3], out->linesize[3],
  252.                                 in ->data[3], in ->linesize[3],
  253.                                 inlink->w, inlink->h);
  254.         av_frame_free(&in);
  255.     }
  256.  
  257.     return ff_filter_frame(outlink, out);
  258. }
  259.  
  260. static int query_formats(AVFilterContext *ctx)
  261. {
  262.     static const enum AVPixelFormat pix_fmts[] = {
  263.         AV_PIX_FMT_YUV444P,      AV_PIX_FMT_YUV422P,
  264.         AV_PIX_FMT_YUV420P,      AV_PIX_FMT_YUV411P,
  265.         AV_PIX_FMT_YUV410P,      AV_PIX_FMT_YUV440P,
  266.         AV_PIX_FMT_YUVA444P,     AV_PIX_FMT_YUVA422P,
  267.         AV_PIX_FMT_YUVA420P,
  268.         AV_PIX_FMT_NONE
  269.     };
  270.     AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
  271.     if (!fmts_list)
  272.         return AVERROR(ENOMEM);
  273.     return ff_set_common_formats(ctx, fmts_list);
  274. }
  275.  
  276. static int config_input(AVFilterLink *inlink)
  277. {
  278.     int i, j;
  279.     OWDenoiseContext *s = inlink->dst->priv;
  280.     const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  281.     const int h = FFALIGN(inlink->h, 16);
  282.  
  283.     s->hsub = desc->log2_chroma_w;
  284.     s->vsub = desc->log2_chroma_h;
  285.  
  286.     s->linesize = FFALIGN(inlink->w, 16);
  287.     for (j = 0; j < 4; j++) {
  288.         for (i = 0; i <= s->depth; i++) {
  289.             s->plane[i][j] = av_malloc_array(s->linesize, h * sizeof(s->plane[0][0][0]));
  290.             if (!s->plane[i][j])
  291.                 return AVERROR(ENOMEM);
  292.         }
  293.     }
  294.     return 0;
  295. }
  296.  
  297. static av_cold void uninit(AVFilterContext *ctx)
  298. {
  299.     int i, j;
  300.     OWDenoiseContext *s = ctx->priv;
  301.  
  302.     for (j = 0; j < 4; j++)
  303.         for (i = 0; i <= s->depth; i++)
  304.             av_freep(&s->plane[i][j]);
  305. }
  306.  
  307. static const AVFilterPad owdenoise_inputs[] = {
  308.     {
  309.         .name         = "default",
  310.         .type         = AVMEDIA_TYPE_VIDEO,
  311.         .filter_frame = filter_frame,
  312.         .config_props = config_input,
  313.     },
  314.     { NULL }
  315. };
  316.  
  317. static const AVFilterPad owdenoise_outputs[] = {
  318.     {
  319.         .name = "default",
  320.         .type = AVMEDIA_TYPE_VIDEO,
  321.     },
  322.     { NULL }
  323. };
  324.  
  325. AVFilter ff_vf_owdenoise = {
  326.     .name          = "owdenoise",
  327.     .description   = NULL_IF_CONFIG_SMALL("Denoise using wavelets."),
  328.     .priv_size     = sizeof(OWDenoiseContext),
  329.     .uninit        = uninit,
  330.     .query_formats = query_formats,
  331.     .inputs        = owdenoise_inputs,
  332.     .outputs       = owdenoise_outputs,
  333.     .priv_class    = &owdenoise_class,
  334.     .flags         = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
  335. };
  336.