Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 4348 → Rev 4349

/contrib/sdk/sources/ffmpeg/libavfilter/Makefile
0,0 → 1,261
include $(SUBDIR)../config.mak
 
NAME = avfilter
FFLIBS = avutil
FFLIBS-$(CONFIG_ACONVERT_FILTER) += swresample
FFLIBS-$(CONFIG_AMOVIE_FILTER) += avformat avcodec
FFLIBS-$(CONFIG_ARESAMPLE_FILTER) += swresample
FFLIBS-$(CONFIG_ASYNCTS_FILTER) += avresample
FFLIBS-$(CONFIG_ATEMPO_FILTER) += avcodec
FFLIBS-$(CONFIG_DECIMATE_FILTER) += avcodec
FFLIBS-$(CONFIG_DESHAKE_FILTER) += avcodec
FFLIBS-$(CONFIG_MCDEINT_FILTER) += avcodec
FFLIBS-$(CONFIG_MOVIE_FILTER) += avformat avcodec
FFLIBS-$(CONFIG_MP_FILTER) += avcodec
FFLIBS-$(CONFIG_PAN_FILTER) += swresample
FFLIBS-$(CONFIG_PP_FILTER) += postproc
FFLIBS-$(CONFIG_REMOVELOGO_FILTER) += avformat avcodec swscale
FFLIBS-$(CONFIG_RESAMPLE_FILTER) += avresample
FFLIBS-$(CONFIG_SAB_FILTER) += swscale
FFLIBS-$(CONFIG_SCALE_FILTER) += swscale
FFLIBS-$(CONFIG_SHOWSPECTRUM_FILTER) += avcodec
FFLIBS-$(CONFIG_SMARTBLUR_FILTER) += swscale
FFLIBS-$(CONFIG_SUBTITLES_FILTER) += avformat avcodec
 
HEADERS = asrc_abuffer.h \
avcodec.h \
avfilter.h \
avfiltergraph.h \
buffersink.h \
buffersrc.h \
version.h \
 
OBJS = allfilters.o \
audio.o \
avfilter.o \
avfiltergraph.o \
buffer.o \
buffersink.o \
buffersrc.o \
drawutils.o \
fifo.o \
formats.o \
graphdump.o \
graphparser.o \
opencl_allkernels.o \
transform.o \
video.o \
 
 
OBJS-$(CONFIG_AVCODEC) += avcodec.o
OBJS-$(CONFIG_AVFORMAT) += lavfutils.o
OBJS-$(CONFIG_SWSCALE) += lswsutils.o
 
OBJS-$(CONFIG_ACONVERT_FILTER) += af_aconvert.o
OBJS-$(CONFIG_ADELAY_FILTER) += af_adelay.o
OBJS-$(CONFIG_AECHO_FILTER) += af_aecho.o
OBJS-$(CONFIG_AFADE_FILTER) += af_afade.o
OBJS-$(CONFIG_AFORMAT_FILTER) += af_aformat.o
OBJS-$(CONFIG_AINTERLEAVE_FILTER) += f_interleave.o
OBJS-$(CONFIG_ALLPASS_FILTER) += af_biquads.o
OBJS-$(CONFIG_AMERGE_FILTER) += af_amerge.o
OBJS-$(CONFIG_AMIX_FILTER) += af_amix.o
OBJS-$(CONFIG_ANULL_FILTER) += af_anull.o
OBJS-$(CONFIG_APAD_FILTER) += af_apad.o
OBJS-$(CONFIG_APERMS_FILTER) += f_perms.o
OBJS-$(CONFIG_APHASER_FILTER) += af_aphaser.o
OBJS-$(CONFIG_ARESAMPLE_FILTER) += af_aresample.o
OBJS-$(CONFIG_ASELECT_FILTER) += f_select.o
OBJS-$(CONFIG_ASENDCMD_FILTER) += f_sendcmd.o
OBJS-$(CONFIG_ASETNSAMPLES_FILTER) += af_asetnsamples.o
OBJS-$(CONFIG_ASETPTS_FILTER) += setpts.o
OBJS-$(CONFIG_ASETRATE_FILTER) += af_asetrate.o
OBJS-$(CONFIG_ASETTB_FILTER) += f_settb.o
OBJS-$(CONFIG_ASHOWINFO_FILTER) += af_ashowinfo.o
OBJS-$(CONFIG_ASPLIT_FILTER) += split.o
OBJS-$(CONFIG_ASTATS_FILTER) += af_astats.o
OBJS-$(CONFIG_ASTREAMSYNC_FILTER) += af_astreamsync.o
OBJS-$(CONFIG_ASYNCTS_FILTER) += af_asyncts.o
OBJS-$(CONFIG_ATEMPO_FILTER) += af_atempo.o
OBJS-$(CONFIG_ATRIM_FILTER) += trim.o
OBJS-$(CONFIG_AZMQ_FILTER) += f_zmq.o
OBJS-$(CONFIG_BANDPASS_FILTER) += af_biquads.o
OBJS-$(CONFIG_BANDREJECT_FILTER) += af_biquads.o
OBJS-$(CONFIG_BASS_FILTER) += af_biquads.o
OBJS-$(CONFIG_BIQUAD_FILTER) += af_biquads.o
OBJS-$(CONFIG_CHANNELMAP_FILTER) += af_channelmap.o
OBJS-$(CONFIG_CHANNELSPLIT_FILTER) += af_channelsplit.o
OBJS-$(CONFIG_COMPAND_FILTER) += af_compand.o
OBJS-$(CONFIG_EARWAX_FILTER) += af_earwax.o
OBJS-$(CONFIG_EBUR128_FILTER) += f_ebur128.o
OBJS-$(CONFIG_EQUALIZER_FILTER) += af_biquads.o
OBJS-$(CONFIG_HIGHPASS_FILTER) += af_biquads.o
OBJS-$(CONFIG_JOIN_FILTER) += af_join.o
OBJS-$(CONFIG_LADSPA_FILTER) += af_ladspa.o
OBJS-$(CONFIG_LOWPASS_FILTER) += af_biquads.o
OBJS-$(CONFIG_PAN_FILTER) += af_pan.o
OBJS-$(CONFIG_REPLAYGAIN_FILTER) += af_replaygain.o
OBJS-$(CONFIG_RESAMPLE_FILTER) += af_resample.o
OBJS-$(CONFIG_SILENCEDETECT_FILTER) += af_silencedetect.o
OBJS-$(CONFIG_TREBLE_FILTER) += af_biquads.o
OBJS-$(CONFIG_VOLUME_FILTER) += af_volume.o
OBJS-$(CONFIG_VOLUMEDETECT_FILTER) += af_volumedetect.o
 
OBJS-$(CONFIG_AEVALSRC_FILTER) += asrc_aevalsrc.o
OBJS-$(CONFIG_ANULLSRC_FILTER) += asrc_anullsrc.o
OBJS-$(CONFIG_FLITE_FILTER) += asrc_flite.o
OBJS-$(CONFIG_SINE_FILTER) += asrc_sine.o
 
OBJS-$(CONFIG_ANULLSINK_FILTER) += asink_anullsink.o
 
OBJS-$(CONFIG_ASS_FILTER) += vf_subtitles.o
OBJS-$(CONFIG_ALPHAEXTRACT_FILTER) += vf_extractplanes.o
OBJS-$(CONFIG_ALPHAMERGE_FILTER) += vf_alphamerge.o
OBJS-$(CONFIG_BBOX_FILTER) += bbox.o vf_bbox.o
OBJS-$(CONFIG_BLACKDETECT_FILTER) += vf_blackdetect.o
OBJS-$(CONFIG_BLACKFRAME_FILTER) += vf_blackframe.o
OBJS-$(CONFIG_BLEND_FILTER) += vf_blend.o dualinput.o framesync.o
OBJS-$(CONFIG_BOXBLUR_FILTER) += vf_boxblur.o
OBJS-$(CONFIG_COLORBALANCE_FILTER) += vf_colorbalance.o
OBJS-$(CONFIG_COLORCHANNELMIXER_FILTER) += vf_colorchannelmixer.o
OBJS-$(CONFIG_COLORMATRIX_FILTER) += vf_colormatrix.o
OBJS-$(CONFIG_COPY_FILTER) += vf_copy.o
OBJS-$(CONFIG_CROP_FILTER) += vf_crop.o
OBJS-$(CONFIG_CROPDETECT_FILTER) += vf_cropdetect.o
OBJS-$(CONFIG_CURVES_FILTER) += vf_curves.o
OBJS-$(CONFIG_DCTDNOIZ_FILTER) += vf_dctdnoiz.o
OBJS-$(CONFIG_DECIMATE_FILTER) += vf_decimate.o
OBJS-$(CONFIG_DELOGO_FILTER) += vf_delogo.o
OBJS-$(CONFIG_DESHAKE_FILTER) += vf_deshake.o
OBJS-$(CONFIG_DRAWBOX_FILTER) += vf_drawbox.o
OBJS-$(CONFIG_DRAWGRID_FILTER) += vf_drawbox.o
OBJS-$(CONFIG_DRAWTEXT_FILTER) += vf_drawtext.o
OBJS-$(CONFIG_EDGEDETECT_FILTER) += vf_edgedetect.o
OBJS-$(CONFIG_EXTRACTPLANES_FILTER) += vf_extractplanes.o
OBJS-$(CONFIG_FADE_FILTER) += vf_fade.o
OBJS-$(CONFIG_FIELD_FILTER) += vf_field.o
OBJS-$(CONFIG_FIELDMATCH_FILTER) += vf_fieldmatch.o
OBJS-$(CONFIG_FIELDORDER_FILTER) += vf_fieldorder.o
OBJS-$(CONFIG_FORMAT_FILTER) += vf_format.o
OBJS-$(CONFIG_FRAMESTEP_FILTER) += vf_framestep.o
OBJS-$(CONFIG_FPS_FILTER) += vf_fps.o
OBJS-$(CONFIG_FREI0R_FILTER) += vf_frei0r.o
OBJS-$(CONFIG_GEQ_FILTER) += vf_geq.o
OBJS-$(CONFIG_GRADFUN_FILTER) += vf_gradfun.o
OBJS-$(CONFIG_HALDCLUT_FILTER) += vf_lut3d.o dualinput.o framesync.o
OBJS-$(CONFIG_HFLIP_FILTER) += vf_hflip.o
OBJS-$(CONFIG_HISTEQ_FILTER) += vf_histeq.o
OBJS-$(CONFIG_HISTOGRAM_FILTER) += vf_histogram.o
OBJS-$(CONFIG_HQDN3D_FILTER) += vf_hqdn3d.o
OBJS-$(CONFIG_HUE_FILTER) += vf_hue.o
OBJS-$(CONFIG_IDET_FILTER) += vf_idet.o
OBJS-$(CONFIG_IL_FILTER) += vf_il.o
OBJS-$(CONFIG_INTERLACE_FILTER) += vf_interlace.o
OBJS-$(CONFIG_INTERLEAVE_FILTER) += f_interleave.o
OBJS-$(CONFIG_KERNDEINT_FILTER) += vf_kerndeint.o
OBJS-$(CONFIG_LUT3D_FILTER) += vf_lut3d.o
OBJS-$(CONFIG_LUT_FILTER) += vf_lut.o
OBJS-$(CONFIG_LUTRGB_FILTER) += vf_lut.o
OBJS-$(CONFIG_LUTYUV_FILTER) += vf_lut.o
OBJS-$(CONFIG_MCDEINT_FILTER) += vf_mcdeint.o
OBJS-$(CONFIG_MERGEPLANES_FILTER) += vf_mergeplanes.o framesync.o
OBJS-$(CONFIG_MP_FILTER) += vf_mp.o
OBJS-$(CONFIG_MPDECIMATE_FILTER) += vf_mpdecimate.o
OBJS-$(CONFIG_NEGATE_FILTER) += vf_lut.o
OBJS-$(CONFIG_NOFORMAT_FILTER) += vf_format.o
OBJS-$(CONFIG_NOISE_FILTER) += vf_noise.o
OBJS-$(CONFIG_NULL_FILTER) += vf_null.o
OBJS-$(CONFIG_OCV_FILTER) += vf_libopencv.o
OBJS-$(CONFIG_OPENCL) += deshake_opencl.o unsharp_opencl.o
OBJS-$(CONFIG_OVERLAY_FILTER) += vf_overlay.o dualinput.o framesync.o
OBJS-$(CONFIG_OWDENOISE_FILTER) += vf_owdenoise.o
OBJS-$(CONFIG_PAD_FILTER) += vf_pad.o
OBJS-$(CONFIG_PERMS_FILTER) += f_perms.o
OBJS-$(CONFIG_PERSPECTIVE_FILTER) += vf_perspective.o
OBJS-$(CONFIG_PHASE_FILTER) += vf_phase.o
OBJS-$(CONFIG_PIXDESCTEST_FILTER) += vf_pixdesctest.o
OBJS-$(CONFIG_PP_FILTER) += vf_pp.o
OBJS-$(CONFIG_PSNR_FILTER) += vf_psnr.o dualinput.o framesync.o
OBJS-$(CONFIG_PULLUP_FILTER) += vf_pullup.o
OBJS-$(CONFIG_REMOVELOGO_FILTER) += bbox.o lswsutils.o lavfutils.o vf_removelogo.o
OBJS-$(CONFIG_ROTATE_FILTER) += vf_rotate.o
OBJS-$(CONFIG_SEPARATEFIELDS_FILTER) += vf_separatefields.o
OBJS-$(CONFIG_SAB_FILTER) += vf_sab.o
OBJS-$(CONFIG_SCALE_FILTER) += vf_scale.o
OBJS-$(CONFIG_SELECT_FILTER) += f_select.o
OBJS-$(CONFIG_SENDCMD_FILTER) += f_sendcmd.o
OBJS-$(CONFIG_SETDAR_FILTER) += vf_aspect.o
OBJS-$(CONFIG_SETFIELD_FILTER) += vf_setfield.o
OBJS-$(CONFIG_SETPTS_FILTER) += setpts.o
OBJS-$(CONFIG_SETSAR_FILTER) += vf_aspect.o
OBJS-$(CONFIG_SETTB_FILTER) += f_settb.o
OBJS-$(CONFIG_SHOWINFO_FILTER) += vf_showinfo.o
OBJS-$(CONFIG_SMARTBLUR_FILTER) += vf_smartblur.o
OBJS-$(CONFIG_SPLIT_FILTER) += split.o
OBJS-$(CONFIG_SPP_FILTER) += vf_spp.o
OBJS-$(CONFIG_STEREO3D_FILTER) += vf_stereo3d.o
OBJS-$(CONFIG_SUBTITLES_FILTER) += vf_subtitles.o
OBJS-$(CONFIG_SUPER2XSAI_FILTER) += vf_super2xsai.o
OBJS-$(CONFIG_SWAPUV_FILTER) += vf_swapuv.o
OBJS-$(CONFIG_TELECINE_FILTER) += vf_telecine.o
OBJS-$(CONFIG_THUMBNAIL_FILTER) += vf_thumbnail.o
OBJS-$(CONFIG_TILE_FILTER) += vf_tile.o
OBJS-$(CONFIG_TINTERLACE_FILTER) += vf_tinterlace.o
OBJS-$(CONFIG_TRANSPOSE_FILTER) += vf_transpose.o
OBJS-$(CONFIG_TRIM_FILTER) += trim.o
OBJS-$(CONFIG_UNSHARP_FILTER) += vf_unsharp.o
OBJS-$(CONFIG_VFLIP_FILTER) += vf_vflip.o
OBJS-$(CONFIG_VIDSTABDETECT_FILTER) += vidstabutils.o vf_vidstabdetect.o
OBJS-$(CONFIG_VIDSTABTRANSFORM_FILTER) += vidstabutils.o vf_vidstabtransform.o
OBJS-$(CONFIG_VIGNETTE_FILTER) += vf_vignette.o
OBJS-$(CONFIG_W3FDIF_FILTER) += vf_w3fdif.o
OBJS-$(CONFIG_YADIF_FILTER) += vf_yadif.o
OBJS-$(CONFIG_ZMQ_FILTER) += f_zmq.o
 
OBJS-$(CONFIG_CELLAUTO_FILTER) += vsrc_cellauto.o
OBJS-$(CONFIG_COLOR_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_FREI0R_SRC_FILTER) += vf_frei0r.o
OBJS-$(CONFIG_HALDCLUTSRC_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_LIFE_FILTER) += vsrc_life.o
OBJS-$(CONFIG_MANDELBROT_FILTER) += vsrc_mandelbrot.o
OBJS-$(CONFIG_MPTESTSRC_FILTER) += vsrc_mptestsrc.o
OBJS-$(CONFIG_NULLSRC_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_RGBTESTSRC_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_SMPTEBARS_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_SMPTEHDBARS_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_TESTSRC_FILTER) += vsrc_testsrc.o
 
OBJS-$(CONFIG_NULLSINK_FILTER) += vsink_nullsink.o
 
OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/mp_image.o
OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/img_format.o
OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_eq2.o
OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_eq.o
OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_fspp.o
OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_ilpack.o
OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_pp7.o
OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_softpulldown.o
OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_uspp.o
 
# multimedia filters
OBJS-$(CONFIG_AVECTORSCOPE_FILTER) += avf_avectorscope.o
OBJS-$(CONFIG_CONCAT_FILTER) += avf_concat.o
OBJS-$(CONFIG_SHOWSPECTRUM_FILTER) += avf_showspectrum.o
OBJS-$(CONFIG_SHOWWAVES_FILTER) += avf_showwaves.o
 
# multimedia sources
OBJS-$(CONFIG_AMOVIE_FILTER) += src_movie.o
OBJS-$(CONFIG_MOVIE_FILTER) += src_movie.o
 
SKIPHEADERS-$(CONFIG_LIBVIDSTAB) += vidstabutils.h
SKIPHEADERS-$(CONFIG_OPENCL) += opencl_internal.h deshake_opencl_kernel.h unsharp_opencl_kernel.h
 
OBJS-$(HAVE_THREADS) += pthread.o
 
TOOLS = graph2dot
TESTPROGS = drawutils filtfmts formats
 
TOOLS-$(CONFIG_LIBZMQ) += zmqsend
 
clean::
$(RM) $(CLEANSUFFIXES:%=libavfilter/libmpcodecs/%)
/contrib/sdk/sources/ffmpeg/libavfilter/af_aconvert.c
0,0 → 1,196
/*
* Copyright (c) 2010 S.N. Hemanth Meenakshisundaram <smeenaks@ucsd.edu>
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2011 Mina Nagy Zaki
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* sample format and channel layout conversion audio filter
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "libswresample/swresample.h"
#include "avfilter.h"
#include "audio.h"
#include "internal.h"
 
typedef struct {
const AVClass *class;
enum AVSampleFormat out_sample_fmt;
int64_t out_chlayout;
struct SwrContext *swr;
char *format_str;
char *channel_layout_str;
} AConvertContext;
 
#define OFFSET(x) offsetof(AConvertContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
#define F AV_OPT_FLAG_FILTERING_PARAM
static const AVOption aconvert_options[] = {
{ "sample_fmt", "", OFFSET(format_str), AV_OPT_TYPE_STRING, .flags = A|F },
{ "channel_layout", "", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, .flags = A|F },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(aconvert);
 
static av_cold int init(AVFilterContext *ctx)
{
AConvertContext *aconvert = ctx->priv;
int ret = 0;
 
av_log(ctx, AV_LOG_WARNING, "This filter is deprecated, use aformat instead\n");
 
aconvert->out_sample_fmt = AV_SAMPLE_FMT_NONE;
aconvert->out_chlayout = 0;
 
if (aconvert->format_str && strcmp(aconvert->format_str, "auto") &&
(ret = ff_parse_sample_format(&aconvert->out_sample_fmt, aconvert->format_str, ctx)) < 0)
return ret;
if (aconvert->channel_layout_str && strcmp(aconvert->channel_layout_str, "auto"))
return ff_parse_channel_layout(&aconvert->out_chlayout, aconvert->channel_layout_str, ctx);
return ret;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
AConvertContext *aconvert = ctx->priv;
swr_free(&aconvert->swr);
}
 
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
AConvertContext *aconvert = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
AVFilterChannelLayouts *layouts;
 
ff_formats_ref(ff_all_formats(AVMEDIA_TYPE_AUDIO),
&inlink->out_formats);
if (aconvert->out_sample_fmt != AV_SAMPLE_FMT_NONE) {
formats = NULL;
ff_add_format(&formats, aconvert->out_sample_fmt);
ff_formats_ref(formats, &outlink->in_formats);
} else
ff_formats_ref(ff_all_formats(AVMEDIA_TYPE_AUDIO),
&outlink->in_formats);
 
ff_channel_layouts_ref(ff_all_channel_layouts(),
&inlink->out_channel_layouts);
if (aconvert->out_chlayout != 0) {
layouts = NULL;
ff_add_channel_layout(&layouts, aconvert->out_chlayout);
ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts);
} else
ff_channel_layouts_ref(ff_all_channel_layouts(),
&outlink->in_channel_layouts);
 
return 0;
}
 
static int config_output(AVFilterLink *outlink)
{
int ret;
AVFilterContext *ctx = outlink->src;
AVFilterLink *inlink = ctx->inputs[0];
AConvertContext *aconvert = ctx->priv;
char buf1[64], buf2[64];
 
/* if not specified in args, use the format and layout of the output */
if (aconvert->out_sample_fmt == AV_SAMPLE_FMT_NONE)
aconvert->out_sample_fmt = outlink->format;
if (aconvert->out_chlayout == 0)
aconvert->out_chlayout = outlink->channel_layout;
 
aconvert->swr = swr_alloc_set_opts(aconvert->swr,
aconvert->out_chlayout, aconvert->out_sample_fmt, inlink->sample_rate,
inlink->channel_layout, inlink->format, inlink->sample_rate,
0, ctx);
if (!aconvert->swr)
return AVERROR(ENOMEM);
ret = swr_init(aconvert->swr);
if (ret < 0)
return ret;
 
av_get_channel_layout_string(buf1, sizeof(buf1),
-1, inlink ->channel_layout);
av_get_channel_layout_string(buf2, sizeof(buf2),
-1, outlink->channel_layout);
av_log(ctx, AV_LOG_VERBOSE,
"fmt:%s cl:%s -> fmt:%s cl:%s\n",
av_get_sample_fmt_name(inlink ->format), buf1,
av_get_sample_fmt_name(outlink->format), buf2);
 
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *insamplesref)
{
AConvertContext *aconvert = inlink->dst->priv;
const int n = insamplesref->nb_samples;
AVFilterLink *const outlink = inlink->dst->outputs[0];
AVFrame *outsamplesref = ff_get_audio_buffer(outlink, n);
int ret;
 
if (!outsamplesref)
return AVERROR(ENOMEM);
swr_convert(aconvert->swr, outsamplesref->extended_data, n,
(void *)insamplesref->extended_data, n);
 
av_frame_copy_props(outsamplesref, insamplesref);
av_frame_set_channels(outsamplesref, outlink->channels);
outsamplesref->channel_layout = outlink->channel_layout;
 
ret = ff_filter_frame(outlink, outsamplesref);
av_frame_free(&insamplesref);
return ret;
}
 
static const AVFilterPad aconvert_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad aconvert_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
},
{ NULL }
};
 
AVFilter avfilter_af_aconvert = {
.name = "aconvert",
.description = NULL_IF_CONFIG_SMALL("Convert the input audio to sample_fmt:channel_layout."),
.priv_size = sizeof(AConvertContext),
.priv_class = &aconvert_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = aconvert_inputs,
.outputs = aconvert_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/af_adelay.c
0,0 → 1,283
/*
* Copyright (c) 2013 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
 
#include "libavutil/avstring.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
#include "avfilter.h"
#include "audio.h"
#include "internal.h"
 
typedef struct ChanDelay {
int delay;
unsigned delay_index;
unsigned index;
uint8_t *samples;
} ChanDelay;
 
typedef struct AudioDelayContext {
const AVClass *class;
char *delays;
ChanDelay *chandelay;
int nb_delays;
int block_align;
unsigned max_delay;
int64_t next_pts;
 
void (*delay_channel)(ChanDelay *d, int nb_samples,
const uint8_t *src, uint8_t *dst);
} AudioDelayContext;
 
#define OFFSET(x) offsetof(AudioDelayContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption adelay_options[] = {
{ "delays", "set list of delays for each channel", OFFSET(delays), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, A },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(adelay);
 
static int query_formats(AVFilterContext *ctx)
{
AVFilterChannelLayouts *layouts;
AVFilterFormats *formats;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_U8P, AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_S32P,
AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
};
 
layouts = ff_all_channel_layouts();
if (!layouts)
return AVERROR(ENOMEM);
ff_set_common_channel_layouts(ctx, layouts);
 
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ff_set_common_formats(ctx, formats);
 
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
ff_set_common_samplerates(ctx, formats);
 
return 0;
}
 
#define DELAY(name, type, fill) \
static void delay_channel_## name ##p(ChanDelay *d, int nb_samples, \
const uint8_t *ssrc, uint8_t *ddst) \
{ \
const type *src = (type *)ssrc; \
type *dst = (type *)ddst; \
type *samples = (type *)d->samples; \
\
while (nb_samples) { \
if (d->delay_index < d->delay) { \
const int len = FFMIN(nb_samples, d->delay - d->delay_index); \
\
memcpy(&samples[d->delay_index], src, len * sizeof(type)); \
memset(dst, fill, len * sizeof(type)); \
d->delay_index += len; \
src += len; \
dst += len; \
nb_samples -= len; \
} else { \
*dst = samples[d->index]; \
samples[d->index] = *src; \
nb_samples--; \
d->index++; \
src++, dst++; \
d->index = d->index >= d->delay ? 0 : d->index; \
} \
} \
}
 
DELAY(u8, uint8_t, 0x80)
DELAY(s16, int16_t, 0)
DELAY(s32, int32_t, 0)
DELAY(flt, float, 0)
DELAY(dbl, double, 0)
 
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
AudioDelayContext *s = ctx->priv;
char *p, *arg, *saveptr = NULL;
int i;
 
s->chandelay = av_calloc(inlink->channels, sizeof(*s->chandelay));
if (!s->chandelay)
return AVERROR(ENOMEM);
s->nb_delays = inlink->channels;
s->block_align = av_get_bytes_per_sample(inlink->format);
 
p = s->delays;
for (i = 0; i < s->nb_delays; i++) {
ChanDelay *d = &s->chandelay[i];
float delay;
 
if (!(arg = av_strtok(p, "|", &saveptr)))
break;
 
p = NULL;
sscanf(arg, "%f", &delay);
 
d->delay = delay * inlink->sample_rate / 1000.0;
if (d->delay < 0) {
av_log(ctx, AV_LOG_ERROR, "Delay must be non negative number.\n");
return AVERROR(EINVAL);
}
}
 
for (i = 0; i < s->nb_delays; i++) {
ChanDelay *d = &s->chandelay[i];
 
if (!d->delay)
continue;
 
d->samples = av_malloc_array(d->delay, s->block_align);
if (!d->samples)
return AVERROR(ENOMEM);
 
s->max_delay = FFMAX(s->max_delay, d->delay);
}
 
if (!s->max_delay) {
av_log(ctx, AV_LOG_ERROR, "At least one delay >0 must be specified.\n");
return AVERROR(EINVAL);
}
 
switch (inlink->format) {
case AV_SAMPLE_FMT_U8P : s->delay_channel = delay_channel_u8p ; break;
case AV_SAMPLE_FMT_S16P: s->delay_channel = delay_channel_s16p; break;
case AV_SAMPLE_FMT_S32P: s->delay_channel = delay_channel_s32p; break;
case AV_SAMPLE_FMT_FLTP: s->delay_channel = delay_channel_fltp; break;
case AV_SAMPLE_FMT_DBLP: s->delay_channel = delay_channel_dblp; break;
}
 
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
AudioDelayContext *s = ctx->priv;
AVFrame *out_frame;
int i;
 
if (ctx->is_disabled || !s->delays)
return ff_filter_frame(ctx->outputs[0], frame);
 
out_frame = ff_get_audio_buffer(inlink, frame->nb_samples);
if (!out_frame)
return AVERROR(ENOMEM);
av_frame_copy_props(out_frame, frame);
 
for (i = 0; i < s->nb_delays; i++) {
ChanDelay *d = &s->chandelay[i];
const uint8_t *src = frame->extended_data[i];
uint8_t *dst = out_frame->extended_data[i];
 
if (!d->delay)
memcpy(dst, src, frame->nb_samples * s->block_align);
else
s->delay_channel(d, frame->nb_samples, src, dst);
}
 
s->next_pts = frame->pts + av_rescale_q(frame->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base);
av_frame_free(&frame);
return ff_filter_frame(ctx->outputs[0], out_frame);
}
 
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AudioDelayContext *s = ctx->priv;
int ret;
 
ret = ff_request_frame(ctx->inputs[0]);
if (ret == AVERROR_EOF && !ctx->is_disabled && s->max_delay) {
int nb_samples = FFMIN(s->max_delay, 2048);
AVFrame *frame;
 
frame = ff_get_audio_buffer(outlink, nb_samples);
if (!frame)
return AVERROR(ENOMEM);
s->max_delay -= nb_samples;
 
av_samples_set_silence(frame->extended_data, 0,
frame->nb_samples,
outlink->channels,
frame->format);
 
frame->pts = s->next_pts;
if (s->next_pts != AV_NOPTS_VALUE)
s->next_pts += av_rescale_q(nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
 
ret = filter_frame(ctx->inputs[0], frame);
}
 
return ret;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
AudioDelayContext *s = ctx->priv;
int i;
 
for (i = 0; i < s->nb_delays; i++)
av_free(s->chandelay[i].samples);
av_freep(&s->chandelay);
}
 
static const AVFilterPad adelay_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_input,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad adelay_outputs[] = {
{
.name = "default",
.request_frame = request_frame,
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
 
AVFilter avfilter_af_adelay = {
.name = "adelay",
.description = NULL_IF_CONFIG_SMALL("Delay one or more audio channels."),
.query_formats = query_formats,
.priv_size = sizeof(AudioDelayContext),
.priv_class = &adelay_class,
.uninit = uninit,
.inputs = adelay_inputs,
.outputs = adelay_outputs,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
};
/contrib/sdk/sources/ffmpeg/libavfilter/af_aecho.c
0,0 → 1,359
/*
* Copyright (c) 2013 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
 
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
#include "avfilter.h"
#include "audio.h"
#include "internal.h"
 
typedef struct AudioEchoContext {
const AVClass *class;
float in_gain, out_gain;
char *delays, *decays;
float *delay, *decay;
int nb_echoes;
int delay_index;
uint8_t **delayptrs;
int max_samples, fade_out;
int *samples;
int64_t next_pts;
 
void (*echo_samples)(struct AudioEchoContext *ctx, uint8_t **delayptrs,
uint8_t * const *src, uint8_t **dst,
int nb_samples, int channels);
} AudioEchoContext;
 
#define OFFSET(x) offsetof(AudioEchoContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption aecho_options[] = {
{ "in_gain", "set signal input gain", OFFSET(in_gain), AV_OPT_TYPE_FLOAT, {.dbl=0.6}, 0, 1, A },
{ "out_gain", "set signal output gain", OFFSET(out_gain), AV_OPT_TYPE_FLOAT, {.dbl=0.3}, 0, 1, A },
{ "delays", "set list of signal delays", OFFSET(delays), AV_OPT_TYPE_STRING, {.str="1000"}, 0, 0, A },
{ "decays", "set list of signal decays", OFFSET(decays), AV_OPT_TYPE_STRING, {.str="0.5"}, 0, 0, A },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(aecho);
 
static void count_items(char *item_str, int *nb_items)
{
char *p;
 
*nb_items = 1;
for (p = item_str; *p; p++) {
if (*p == '|')
(*nb_items)++;
}
 
}
 
static void fill_items(char *item_str, int *nb_items, float *items)
{
char *p, *saveptr = NULL;
int i, new_nb_items = 0;
 
p = item_str;
for (i = 0; i < *nb_items; i++) {
char *tstr = av_strtok(p, "|", &saveptr);
p = NULL;
new_nb_items += sscanf(tstr, "%f", &items[i]) == 1;
}
 
*nb_items = new_nb_items;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
AudioEchoContext *s = ctx->priv;
 
av_freep(&s->delay);
av_freep(&s->decay);
av_freep(&s->samples);
 
if (s->delayptrs)
av_freep(&s->delayptrs[0]);
av_freep(&s->delayptrs);
}
 
static av_cold int init(AVFilterContext *ctx)
{
AudioEchoContext *s = ctx->priv;
int nb_delays, nb_decays, i;
 
if (!s->delays || !s->decays) {
av_log(ctx, AV_LOG_ERROR, "Missing delays and/or decays.\n");
return AVERROR(EINVAL);
}
 
count_items(s->delays, &nb_delays);
count_items(s->decays, &nb_decays);
 
s->delay = av_realloc_f(s->delay, nb_delays, sizeof(*s->delay));
s->decay = av_realloc_f(s->decay, nb_decays, sizeof(*s->decay));
if (!s->delay || !s->decay)
return AVERROR(ENOMEM);
 
fill_items(s->delays, &nb_delays, s->delay);
fill_items(s->decays, &nb_decays, s->decay);
 
if (nb_delays != nb_decays) {
av_log(ctx, AV_LOG_ERROR, "Number of delays %d differs from number of decays %d.\n", nb_delays, nb_decays);
return AVERROR(EINVAL);
}
 
s->nb_echoes = nb_delays;
if (!s->nb_echoes) {
av_log(ctx, AV_LOG_ERROR, "At least one decay & delay must be set.\n");
return AVERROR(EINVAL);
}
 
s->samples = av_realloc_f(s->samples, nb_delays, sizeof(*s->samples));
if (!s->samples)
return AVERROR(ENOMEM);
 
for (i = 0; i < nb_delays; i++) {
if (s->delay[i] <= 0 || s->delay[i] > 90000) {
av_log(ctx, AV_LOG_ERROR, "delay[%d]: %f is out of allowed range: (0, 90000]\n", i, s->delay[i]);
return AVERROR(EINVAL);
}
if (s->decay[i] <= 0 || s->decay[i] > 1) {
av_log(ctx, AV_LOG_ERROR, "decay[%d]: %f is out of allowed range: (0, 1]\n", i, s->decay[i]);
return AVERROR(EINVAL);
}
}
 
s->next_pts = AV_NOPTS_VALUE;
 
av_log(ctx, AV_LOG_DEBUG, "nb_echoes:%d\n", s->nb_echoes);
return 0;
}
 
static int query_formats(AVFilterContext *ctx)
{
AVFilterChannelLayouts *layouts;
AVFilterFormats *formats;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_S32P,
AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
};
 
layouts = ff_all_channel_layouts();
if (!layouts)
return AVERROR(ENOMEM);
ff_set_common_channel_layouts(ctx, layouts);
 
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ff_set_common_formats(ctx, formats);
 
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
ff_set_common_samplerates(ctx, formats);
 
return 0;
}
 
#define MOD(a, b) (((a) >= (b)) ? (a) - (b) : (a))
 
#define ECHO(name, type, min, max) \
static void echo_samples_## name ##p(AudioEchoContext *ctx, \
uint8_t **delayptrs, \
uint8_t * const *src, uint8_t **dst, \
int nb_samples, int channels) \
{ \
const double out_gain = ctx->out_gain; \
const double in_gain = ctx->in_gain; \
const int nb_echoes = ctx->nb_echoes; \
const int max_samples = ctx->max_samples; \
int i, j, chan, av_uninit(index); \
\
av_assert1(channels > 0); /* would corrupt delay_index */ \
\
for (chan = 0; chan < channels; chan++) { \
const type *s = (type *)src[chan]; \
type *d = (type *)dst[chan]; \
type *dbuf = (type *)delayptrs[chan]; \
\
index = ctx->delay_index; \
for (i = 0; i < nb_samples; i++, s++, d++) { \
double out, in; \
\
in = *s; \
out = in * in_gain; \
for (j = 0; j < nb_echoes; j++) { \
int ix = index + max_samples - ctx->samples[j]; \
ix = MOD(ix, max_samples); \
out += dbuf[ix] * ctx->decay[j]; \
} \
out *= out_gain; \
\
*d = av_clipd(out, min, max); \
dbuf[index] = in; \
\
index = MOD(index + 1, max_samples); \
} \
} \
ctx->delay_index = index; \
}
 
ECHO(dbl, double, -1.0, 1.0 )
ECHO(flt, float, -1.0, 1.0 )
ECHO(s16, int16_t, INT16_MIN, INT16_MAX)
ECHO(s32, int32_t, INT32_MIN, INT32_MAX)
 
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AudioEchoContext *s = ctx->priv;
float volume = 1.0;
int i;
 
for (i = 0; i < s->nb_echoes; i++) {
s->samples[i] = s->delay[i] * outlink->sample_rate / 1000.0;
s->max_samples = FFMAX(s->max_samples, s->samples[i]);
volume += s->decay[i];
}
 
if (s->max_samples <= 0) {
av_log(ctx, AV_LOG_ERROR, "Nothing to echo - missing delay samples.\n");
return AVERROR(EINVAL);
}
s->fade_out = s->max_samples;
 
if (volume * s->in_gain * s->out_gain > 1.0)
av_log(ctx, AV_LOG_WARNING,
"out_gain %f can cause saturation of output\n", s->out_gain);
 
switch (outlink->format) {
case AV_SAMPLE_FMT_DBLP: s->echo_samples = echo_samples_dblp; break;
case AV_SAMPLE_FMT_FLTP: s->echo_samples = echo_samples_fltp; break;
case AV_SAMPLE_FMT_S16P: s->echo_samples = echo_samples_s16p; break;
case AV_SAMPLE_FMT_S32P: s->echo_samples = echo_samples_s32p; break;
}
 
 
if (s->delayptrs)
av_freep(&s->delayptrs[0]);
av_freep(&s->delayptrs);
 
return av_samples_alloc_array_and_samples(&s->delayptrs, NULL,
outlink->channels,
s->max_samples,
outlink->format, 0);
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
AudioEchoContext *s = ctx->priv;
AVFrame *out_frame;
 
if (av_frame_is_writable(frame)) {
out_frame = frame;
} else {
out_frame = ff_get_audio_buffer(inlink, frame->nb_samples);
if (!out_frame)
return AVERROR(ENOMEM);
av_frame_copy_props(out_frame, frame);
}
 
s->echo_samples(s, s->delayptrs, frame->extended_data, out_frame->extended_data,
frame->nb_samples, inlink->channels);
 
if (frame != out_frame)
av_frame_free(&frame);
 
s->next_pts = frame->pts + av_rescale_q(frame->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base);
return ff_filter_frame(ctx->outputs[0], out_frame);
}
 
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AudioEchoContext *s = ctx->priv;
int ret;
 
ret = ff_request_frame(ctx->inputs[0]);
 
if (ret == AVERROR_EOF && !ctx->is_disabled && s->fade_out) {
int nb_samples = FFMIN(s->fade_out, 2048);
AVFrame *frame;
 
frame = ff_get_audio_buffer(outlink, nb_samples);
if (!frame)
return AVERROR(ENOMEM);
s->fade_out -= nb_samples;
 
av_samples_set_silence(frame->extended_data, 0,
frame->nb_samples,
outlink->channels,
frame->format);
 
s->echo_samples(s, s->delayptrs, frame->extended_data, frame->extended_data,
frame->nb_samples, outlink->channels);
 
frame->pts = s->next_pts;
if (s->next_pts != AV_NOPTS_VALUE)
s->next_pts += av_rescale_q(nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
 
return ff_filter_frame(outlink, frame);
}
 
return ret;
}
 
static const AVFilterPad aecho_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad aecho_outputs[] = {
{
.name = "default",
.request_frame = request_frame,
.config_props = config_output,
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
 
AVFilter avfilter_af_aecho = {
.name = "aecho",
.description = NULL_IF_CONFIG_SMALL("Add echoing to the audio."),
.query_formats = query_formats,
.priv_size = sizeof(AudioEchoContext),
.priv_class = &aecho_class,
.init = init,
.uninit = uninit,
.inputs = aecho_inputs,
.outputs = aecho_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/af_afade.c
0,0 → 1,300
/*
* Copyright (c) 2013 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* fade audio filter
*/
 
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
 
typedef struct {
const AVClass *class;
int type;
int curve;
int nb_samples;
int64_t start_sample;
int64_t duration;
int64_t start_time;
 
void (*fade_samples)(uint8_t **dst, uint8_t * const *src,
int nb_samples, int channels, int direction,
int64_t start, int range, int curve);
} AudioFadeContext;
 
enum CurveType { TRI, QSIN, ESIN, HSIN, LOG, PAR, QUA, CUB, SQU, CBR };
 
#define OFFSET(x) offsetof(AudioFadeContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption afade_options[] = {
{ "type", "set the fade direction", OFFSET(type), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, FLAGS, "type" },
{ "t", "set the fade direction", OFFSET(type), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, FLAGS, "type" },
{ "in", "fade-in", 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, FLAGS, "type" },
{ "out", "fade-out", 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, FLAGS, "type" },
{ "start_sample", "set number of first sample to start fading", OFFSET(start_sample), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, FLAGS },
{ "ss", "set number of first sample to start fading", OFFSET(start_sample), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, FLAGS },
{ "nb_samples", "set number of samples for fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 44100}, 1, INT32_MAX, FLAGS },
{ "ns", "set number of samples for fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 44100}, 1, INT32_MAX, FLAGS },
{ "start_time", "set time to start fading", OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
{ "st", "set time to start fading", OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
{ "duration", "set fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
{ "d", "set fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
{ "curve", "set fade curve type", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, TRI, CBR, FLAGS, "curve" },
{ "c", "set fade curve type", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, TRI, CBR, FLAGS, "curve" },
{ "tri", "linear slope", 0, AV_OPT_TYPE_CONST, {.i64 = TRI }, 0, 0, FLAGS, "curve" },
{ "qsin", "quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = QSIN }, 0, 0, FLAGS, "curve" },
{ "esin", "exponential sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = ESIN }, 0, 0, FLAGS, "curve" },
{ "hsin", "half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = HSIN }, 0, 0, FLAGS, "curve" },
{ "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64 = LOG }, 0, 0, FLAGS, "curve" },
{ "par", "inverted parabola", 0, AV_OPT_TYPE_CONST, {.i64 = PAR }, 0, 0, FLAGS, "curve" },
{ "qua", "quadratic", 0, AV_OPT_TYPE_CONST, {.i64 = QUA }, 0, 0, FLAGS, "curve" },
{ "cub", "cubic", 0, AV_OPT_TYPE_CONST, {.i64 = CUB }, 0, 0, FLAGS, "curve" },
{ "squ", "square root", 0, AV_OPT_TYPE_CONST, {.i64 = SQU }, 0, 0, FLAGS, "curve" },
{ "cbr", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64 = CBR }, 0, 0, FLAGS, "curve" },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(afade);
 
static av_cold int init(AVFilterContext *ctx)
{
AudioFadeContext *s = ctx->priv;
 
if (INT64_MAX - s->nb_samples < s->start_sample)
return AVERROR(EINVAL);
 
return 0;
}
 
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32P,
AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
};
 
layouts = ff_all_channel_layouts();
if (!layouts)
return AVERROR(ENOMEM);
ff_set_common_channel_layouts(ctx, layouts);
 
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ff_set_common_formats(ctx, formats);
 
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
ff_set_common_samplerates(ctx, formats);
 
return 0;
}
 
static double fade_gain(int curve, int64_t index, int range)
{
double gain;
 
gain = FFMAX(0.0, FFMIN(1.0, 1.0 * index / range));
 
switch (curve) {
case QSIN:
gain = sin(gain * M_PI / 2.0);
break;
case ESIN:
gain = 1.0 - cos(M_PI / 4.0 * (pow(2.0*gain - 1, 3) + 1));
break;
case HSIN:
gain = (1.0 - cos(gain * M_PI)) / 2.0;
break;
case LOG:
gain = pow(0.1, (1 - gain) * 5.0);
break;
case PAR:
gain = (1 - (1 - gain) * (1 - gain));
break;
case QUA:
gain *= gain;
break;
case CUB:
gain = gain * gain * gain;
break;
case SQU:
gain = sqrt(gain);
break;
case CBR:
gain = cbrt(gain);
break;
}
 
return gain;
}
 
#define FADE_PLANAR(name, type) \
static void fade_samples_## name ##p(uint8_t **dst, uint8_t * const *src, \
int nb_samples, int channels, int dir, \
int64_t start, int range, int curve) \
{ \
int i, c; \
\
for (i = 0; i < nb_samples; i++) { \
double gain = fade_gain(curve, start + i * dir, range); \
for (c = 0; c < channels; c++) { \
type *d = (type *)dst[c]; \
const type *s = (type *)src[c]; \
\
d[i] = s[i] * gain; \
} \
} \
}
 
#define FADE(name, type) \
static void fade_samples_## name (uint8_t **dst, uint8_t * const *src, \
int nb_samples, int channels, int dir, \
int64_t start, int range, int curve) \
{ \
type *d = (type *)dst[0]; \
const type *s = (type *)src[0]; \
int i, c, k = 0; \
\
for (i = 0; i < nb_samples; i++) { \
double gain = fade_gain(curve, start + i * dir, range); \
for (c = 0; c < channels; c++, k++) \
d[k] = s[k] * gain; \
} \
}
 
FADE_PLANAR(dbl, double)
FADE_PLANAR(flt, float)
FADE_PLANAR(s16, int16_t)
FADE_PLANAR(s32, int32_t)
 
FADE(dbl, double)
FADE(flt, float)
FADE(s16, int16_t)
FADE(s32, int32_t)
 
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
AudioFadeContext *s = ctx->priv;
 
switch (inlink->format) {
case AV_SAMPLE_FMT_DBL: s->fade_samples = fade_samples_dbl; break;
case AV_SAMPLE_FMT_DBLP: s->fade_samples = fade_samples_dblp; break;
case AV_SAMPLE_FMT_FLT: s->fade_samples = fade_samples_flt; break;
case AV_SAMPLE_FMT_FLTP: s->fade_samples = fade_samples_fltp; break;
case AV_SAMPLE_FMT_S16: s->fade_samples = fade_samples_s16; break;
case AV_SAMPLE_FMT_S16P: s->fade_samples = fade_samples_s16p; break;
case AV_SAMPLE_FMT_S32: s->fade_samples = fade_samples_s32; break;
case AV_SAMPLE_FMT_S32P: s->fade_samples = fade_samples_s32p; break;
}
 
if (s->duration)
s->nb_samples = av_rescale(s->duration, inlink->sample_rate, AV_TIME_BASE);
if (s->start_time)
s->start_sample = av_rescale(s->start_time, inlink->sample_rate, AV_TIME_BASE);
 
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AudioFadeContext *s = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
int nb_samples = buf->nb_samples;
AVFrame *out_buf;
int64_t cur_sample = av_rescale_q(buf->pts, (AVRational){1, outlink->sample_rate}, outlink->time_base);
 
if ((!s->type && (s->start_sample + s->nb_samples < cur_sample)) ||
( s->type && (cur_sample + s->nb_samples < s->start_sample)))
return ff_filter_frame(outlink, buf);
 
if (av_frame_is_writable(buf)) {
out_buf = buf;
} else {
out_buf = ff_get_audio_buffer(inlink, nb_samples);
if (!out_buf)
return AVERROR(ENOMEM);
av_frame_copy_props(out_buf, buf);
}
 
if ((!s->type && (cur_sample + nb_samples < s->start_sample)) ||
( s->type && (s->start_sample + s->nb_samples < cur_sample))) {
av_samples_set_silence(out_buf->extended_data, 0, nb_samples,
av_frame_get_channels(out_buf), out_buf->format);
} else {
int64_t start;
 
if (!s->type)
start = cur_sample - s->start_sample;
else
start = s->start_sample + s->nb_samples - cur_sample;
 
s->fade_samples(out_buf->extended_data, buf->extended_data,
nb_samples, av_frame_get_channels(buf),
s->type ? -1 : 1, start,
s->nb_samples, s->curve);
}
 
if (buf != out_buf)
av_frame_free(&buf);
 
return ff_filter_frame(outlink, out_buf);
}
 
static const AVFilterPad avfilter_af_afade_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
 
static const AVFilterPad avfilter_af_afade_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
 
AVFilter avfilter_af_afade = {
.name = "afade",
.description = NULL_IF_CONFIG_SMALL("Fade in/out input audio."),
.query_formats = query_formats,
.priv_size = sizeof(AudioFadeContext),
.init = init,
.inputs = avfilter_af_afade_inputs,
.outputs = avfilter_af_afade_outputs,
.priv_class = &afade_class,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
/contrib/sdk/sources/ffmpeg/libavfilter/af_aformat.c
0,0 → 1,147
/*
* Copyright (c) 2011 Mina Nagy Zaki
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* format audio filter
*/
 
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/opt.h"
 
#include "audio.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
 
typedef struct AFormatContext {
const AVClass *class;
 
AVFilterFormats *formats;
AVFilterFormats *sample_rates;
AVFilterChannelLayouts *channel_layouts;
 
char *formats_str;
char *sample_rates_str;
char *channel_layouts_str;
} AFormatContext;
 
#define OFFSET(x) offsetof(AFormatContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
#define F AV_OPT_FLAG_FILTERING_PARAM
static const AVOption aformat_options[] = {
{ "sample_fmts", "A comma-separated list of sample formats.", OFFSET(formats_str), AV_OPT_TYPE_STRING, .flags = A|F },
{ "sample_rates", "A comma-separated list of sample rates.", OFFSET(sample_rates_str), AV_OPT_TYPE_STRING, .flags = A|F },
{ "channel_layouts", "A comma-separated list of channel layouts.", OFFSET(channel_layouts_str), AV_OPT_TYPE_STRING, .flags = A|F },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(aformat);
 
#define PARSE_FORMATS(str, type, list, add_to_list, get_fmt, none, desc) \
do { \
char *next, *cur = str, sep; \
\
if (str && strchr(str, ',')) { \
av_log(ctx, AV_LOG_WARNING, "This syntax is deprecated, use '|' to "\
"separate %s.\n", desc); \
sep = ','; \
} else \
sep = '|'; \
\
while (cur) { \
type fmt; \
next = strchr(cur, sep); \
if (next) \
*next++ = 0; \
\
if ((fmt = get_fmt(cur)) == none) { \
av_log(ctx, AV_LOG_ERROR, "Error parsing " desc ": %s.\n", cur);\
return AVERROR(EINVAL); \
} \
add_to_list(&list, fmt); \
\
cur = next; \
} \
} while (0)
 
static int get_sample_rate(const char *samplerate)
{
int ret = strtol(samplerate, NULL, 0);
return FFMAX(ret, 0);
}
 
static av_cold int init(AVFilterContext *ctx)
{
AFormatContext *s = ctx->priv;
 
PARSE_FORMATS(s->formats_str, enum AVSampleFormat, s->formats,
ff_add_format, av_get_sample_fmt, AV_SAMPLE_FMT_NONE, "sample format");
PARSE_FORMATS(s->sample_rates_str, int, s->sample_rates, ff_add_format,
get_sample_rate, 0, "sample rate");
PARSE_FORMATS(s->channel_layouts_str, uint64_t, s->channel_layouts,
ff_add_channel_layout, av_get_channel_layout, 0,
"channel layout");
 
return 0;
}
 
static int query_formats(AVFilterContext *ctx)
{
AFormatContext *s = ctx->priv;
 
ff_set_common_formats(ctx, s->formats ? s->formats :
ff_all_formats(AVMEDIA_TYPE_AUDIO));
ff_set_common_samplerates(ctx, s->sample_rates ? s->sample_rates :
ff_all_samplerates());
ff_set_common_channel_layouts(ctx, s->channel_layouts ? s->channel_layouts :
ff_all_channel_counts());
 
return 0;
}
 
static const AVFilterPad avfilter_af_aformat_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
 
static const AVFilterPad avfilter_af_aformat_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO
},
{ NULL }
};
 
AVFilter avfilter_af_aformat = {
.name = "aformat",
.description = NULL_IF_CONFIG_SMALL("Convert the input audio to one of the specified formats."),
.init = init,
.query_formats = query_formats,
.priv_size = sizeof(AFormatContext),
.priv_class = &aformat_class,
.inputs = avfilter_af_aformat_inputs,
.outputs = avfilter_af_aformat_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/af_amerge.c
0,0 → 1,350
/*
* Copyright (c) 2011 Nicolas George <nicolas.george@normalesup.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Audio merging filter
*/
 
#include "libavutil/avstring.h"
#include "libavutil/bprint.h"
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "libswresample/swresample.h" // only for SWR_CH_MAX
#include "avfilter.h"
#include "audio.h"
#include "bufferqueue.h"
#include "internal.h"
 
typedef struct {
const AVClass *class;
int nb_inputs;
int route[SWR_CH_MAX]; /**< channels routing, see copy_samples */
int bps;
struct amerge_input {
struct FFBufQueue queue;
int nb_ch; /**< number of channels for the input */
int nb_samples;
int pos;
} *in;
} AMergeContext;
 
#define OFFSET(x) offsetof(AMergeContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption amerge_options[] = {
{ "inputs", "specify the number of inputs", OFFSET(nb_inputs),
AV_OPT_TYPE_INT, { .i64 = 2 }, 2, SWR_CH_MAX, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(amerge);
 
static av_cold void uninit(AVFilterContext *ctx)
{
AMergeContext *am = ctx->priv;
int i;
 
for (i = 0; i < am->nb_inputs; i++) {
if (am->in)
ff_bufqueue_discard_all(&am->in[i].queue);
if (ctx->input_pads)
av_freep(&ctx->input_pads[i].name);
}
av_freep(&am->in);
}
 
static int query_formats(AVFilterContext *ctx)
{
AMergeContext *am = ctx->priv;
int64_t inlayout[SWR_CH_MAX], outlayout = 0;
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
int i, overlap = 0, nb_ch = 0;
 
for (i = 0; i < am->nb_inputs; i++) {
if (!ctx->inputs[i]->in_channel_layouts ||
!ctx->inputs[i]->in_channel_layouts->nb_channel_layouts) {
av_log(ctx, AV_LOG_WARNING,
"No channel layout for input %d\n", i + 1);
return AVERROR(EAGAIN);
}
inlayout[i] = ctx->inputs[i]->in_channel_layouts->channel_layouts[0];
if (ctx->inputs[i]->in_channel_layouts->nb_channel_layouts > 1) {
char buf[256];
av_get_channel_layout_string(buf, sizeof(buf), 0, inlayout[i]);
av_log(ctx, AV_LOG_INFO, "Using \"%s\" for input %d\n", buf, i + 1);
}
am->in[i].nb_ch = av_get_channel_layout_nb_channels(inlayout[i]);
if (outlayout & inlayout[i])
overlap++;
outlayout |= inlayout[i];
nb_ch += am->in[i].nb_ch;
}
if (nb_ch > SWR_CH_MAX) {
av_log(ctx, AV_LOG_ERROR, "Too many channels (max %d)\n", SWR_CH_MAX);
return AVERROR(EINVAL);
}
if (overlap) {
av_log(ctx, AV_LOG_WARNING,
"Input channel layouts overlap: "
"output layout will be determined by the number of distinct input channels\n");
for (i = 0; i < nb_ch; i++)
am->route[i] = i;
outlayout = av_get_default_channel_layout(nb_ch);
if (!outlayout)
outlayout = ((int64_t)1 << nb_ch) - 1;
} else {
int *route[SWR_CH_MAX];
int c, out_ch_number = 0;
 
route[0] = am->route;
for (i = 1; i < am->nb_inputs; i++)
route[i] = route[i - 1] + am->in[i - 1].nb_ch;
for (c = 0; c < 64; c++)
for (i = 0; i < am->nb_inputs; i++)
if ((inlayout[i] >> c) & 1)
*(route[i]++) = out_ch_number++;
}
formats = ff_make_format_list(ff_packed_sample_fmts_array);
ff_set_common_formats(ctx, formats);
for (i = 0; i < am->nb_inputs; i++) {
layouts = NULL;
ff_add_channel_layout(&layouts, inlayout[i]);
ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts);
}
layouts = NULL;
ff_add_channel_layout(&layouts, outlayout);
ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts);
ff_set_common_samplerates(ctx, ff_all_samplerates());
return 0;
}
 
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AMergeContext *am = ctx->priv;
AVBPrint bp;
int i;
 
for (i = 1; i < am->nb_inputs; i++) {
if (ctx->inputs[i]->sample_rate != ctx->inputs[0]->sample_rate) {
av_log(ctx, AV_LOG_ERROR,
"Inputs must have the same sample rate "
"%d for in%d vs %d\n",
ctx->inputs[i]->sample_rate, i, ctx->inputs[0]->sample_rate);
return AVERROR(EINVAL);
}
}
am->bps = av_get_bytes_per_sample(ctx->outputs[0]->format);
outlink->sample_rate = ctx->inputs[0]->sample_rate;
outlink->time_base = ctx->inputs[0]->time_base;
 
av_bprint_init(&bp, 0, 1);
for (i = 0; i < am->nb_inputs; i++) {
av_bprintf(&bp, "%sin%d:", i ? " + " : "", i);
av_bprint_channel_layout(&bp, -1, ctx->inputs[i]->channel_layout);
}
av_bprintf(&bp, " -> out:");
av_bprint_channel_layout(&bp, -1, ctx->outputs[0]->channel_layout);
av_log(ctx, AV_LOG_VERBOSE, "%s\n", bp.str);
 
return 0;
}
 
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AMergeContext *am = ctx->priv;
int i, ret;
 
for (i = 0; i < am->nb_inputs; i++)
if (!am->in[i].nb_samples)
if ((ret = ff_request_frame(ctx->inputs[i])) < 0)
return ret;
return 0;
}
 
/**
* Copy samples from several input streams to one output stream.
* @param nb_inputs number of inputs
* @param in inputs; used only for the nb_ch field;
* @param route routing values;
* input channel i goes to output channel route[i];
* i < in[0].nb_ch are the channels from the first output;
* i >= in[0].nb_ch are the channels from the second output
* @param ins pointer to the samples of each inputs, in packed format;
* will be left at the end of the copied samples
* @param outs pointer to the samples of the output, in packet format;
* must point to a buffer big enough;
* will be left at the end of the copied samples
* @param ns number of samples to copy
* @param bps bytes per sample
*/
static inline void copy_samples(int nb_inputs, struct amerge_input in[],
int *route, uint8_t *ins[],
uint8_t **outs, int ns, int bps)
{
int *route_cur;
int i, c, nb_ch = 0;
 
for (i = 0; i < nb_inputs; i++)
nb_ch += in[i].nb_ch;
while (ns--) {
route_cur = route;
for (i = 0; i < nb_inputs; i++) {
for (c = 0; c < in[i].nb_ch; c++) {
memcpy((*outs) + bps * *(route_cur++), ins[i], bps);
ins[i] += bps;
}
}
*outs += nb_ch * bps;
}
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
AVFilterContext *ctx = inlink->dst;
AMergeContext *am = ctx->priv;
AVFilterLink *const outlink = ctx->outputs[0];
int input_number;
int nb_samples, ns, i;
AVFrame *outbuf, *inbuf[SWR_CH_MAX];
uint8_t *ins[SWR_CH_MAX], *outs;
 
for (input_number = 0; input_number < am->nb_inputs; input_number++)
if (inlink == ctx->inputs[input_number])
break;
av_assert1(input_number < am->nb_inputs);
if (ff_bufqueue_is_full(&am->in[input_number].queue)) {
av_frame_free(&insamples);
return AVERROR(ENOMEM);
}
ff_bufqueue_add(ctx, &am->in[input_number].queue, av_frame_clone(insamples));
am->in[input_number].nb_samples += insamples->nb_samples;
av_frame_free(&insamples);
nb_samples = am->in[0].nb_samples;
for (i = 1; i < am->nb_inputs; i++)
nb_samples = FFMIN(nb_samples, am->in[i].nb_samples);
if (!nb_samples)
return 0;
 
outbuf = ff_get_audio_buffer(ctx->outputs[0], nb_samples);
if (!outbuf)
return AVERROR(ENOMEM);
outs = outbuf->data[0];
for (i = 0; i < am->nb_inputs; i++) {
inbuf[i] = ff_bufqueue_peek(&am->in[i].queue, 0);
ins[i] = inbuf[i]->data[0] +
am->in[i].pos * am->in[i].nb_ch * am->bps;
}
av_frame_copy_props(outbuf, inbuf[0]);
outbuf->pts = inbuf[0]->pts == AV_NOPTS_VALUE ? AV_NOPTS_VALUE :
inbuf[0]->pts +
av_rescale_q(am->in[0].pos,
(AVRational){ 1, ctx->inputs[0]->sample_rate },
ctx->outputs[0]->time_base);
 
outbuf->nb_samples = nb_samples;
outbuf->channel_layout = outlink->channel_layout;
av_frame_set_channels(outbuf, outlink->channels);
 
while (nb_samples) {
ns = nb_samples;
for (i = 0; i < am->nb_inputs; i++)
ns = FFMIN(ns, inbuf[i]->nb_samples - am->in[i].pos);
/* Unroll the most common sample formats: speed +~350% for the loop,
+~13% overall (including two common decoders) */
switch (am->bps) {
case 1:
copy_samples(am->nb_inputs, am->in, am->route, ins, &outs, ns, 1);
break;
case 2:
copy_samples(am->nb_inputs, am->in, am->route, ins, &outs, ns, 2);
break;
case 4:
copy_samples(am->nb_inputs, am->in, am->route, ins, &outs, ns, 4);
break;
default:
copy_samples(am->nb_inputs, am->in, am->route, ins, &outs, ns, am->bps);
break;
}
 
nb_samples -= ns;
for (i = 0; i < am->nb_inputs; i++) {
am->in[i].nb_samples -= ns;
am->in[i].pos += ns;
if (am->in[i].pos == inbuf[i]->nb_samples) {
am->in[i].pos = 0;
av_frame_free(&inbuf[i]);
ff_bufqueue_get(&am->in[i].queue);
inbuf[i] = ff_bufqueue_peek(&am->in[i].queue, 0);
ins[i] = inbuf[i] ? inbuf[i]->data[0] : NULL;
}
}
}
return ff_filter_frame(ctx->outputs[0], outbuf);
}
 
static av_cold int init(AVFilterContext *ctx)
{
AMergeContext *am = ctx->priv;
int i;
 
am->in = av_calloc(am->nb_inputs, sizeof(*am->in));
if (!am->in)
return AVERROR(ENOMEM);
for (i = 0; i < am->nb_inputs; i++) {
char *name = av_asprintf("in%d", i);
AVFilterPad pad = {
.name = name,
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
};
if (!name)
return AVERROR(ENOMEM);
ff_insert_inpad(ctx, i, &pad);
}
return 0;
}
 
static const AVFilterPad amerge_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
.request_frame = request_frame,
},
{ NULL }
};
 
AVFilter avfilter_af_amerge = {
.name = "amerge",
.description = NULL_IF_CONFIG_SMALL("Merge two or more audio streams into "
"a single multi-channel stream."),
.priv_size = sizeof(AMergeContext),
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = NULL,
.outputs = amerge_outputs,
.priv_class = &amerge_class,
.flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
};
/contrib/sdk/sources/ffmpeg/libavfilter/af_amix.c
0,0 → 1,560
/*
* Audio Mix Filter
* Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Audio Mix Filter
*
* Mixes audio from multiple sources into a single output. The channel layout,
* sample rate, and sample format will be the same for all inputs and the
* output.
*/
 
#include "libavutil/attributes.h"
#include "libavutil/audio_fifo.h"
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/float_dsp.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
 
#include "audio.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
 
#define INPUT_OFF 0 /**< input has reached EOF */
#define INPUT_ON 1 /**< input is active */
#define INPUT_INACTIVE 2 /**< input is on, but is currently inactive */
 
#define DURATION_LONGEST 0
#define DURATION_SHORTEST 1
#define DURATION_FIRST 2
 
 
typedef struct FrameInfo {
int nb_samples;
int64_t pts;
struct FrameInfo *next;
} FrameInfo;
 
/**
* Linked list used to store timestamps and frame sizes of all frames in the
* FIFO for the first input.
*
* This is needed to keep timestamps synchronized for the case where multiple
* input frames are pushed to the filter for processing before a frame is
* requested by the output link.
*/
typedef struct FrameList {
int nb_frames;
int nb_samples;
FrameInfo *list;
FrameInfo *end;
} FrameList;
 
static void frame_list_clear(FrameList *frame_list)
{
if (frame_list) {
while (frame_list->list) {
FrameInfo *info = frame_list->list;
frame_list->list = info->next;
av_free(info);
}
frame_list->nb_frames = 0;
frame_list->nb_samples = 0;
frame_list->end = NULL;
}
}
 
static int frame_list_next_frame_size(FrameList *frame_list)
{
if (!frame_list->list)
return 0;
return frame_list->list->nb_samples;
}
 
static int64_t frame_list_next_pts(FrameList *frame_list)
{
if (!frame_list->list)
return AV_NOPTS_VALUE;
return frame_list->list->pts;
}
 
static void frame_list_remove_samples(FrameList *frame_list, int nb_samples)
{
if (nb_samples >= frame_list->nb_samples) {
frame_list_clear(frame_list);
} else {
int samples = nb_samples;
while (samples > 0) {
FrameInfo *info = frame_list->list;
av_assert0(info != NULL);
if (info->nb_samples <= samples) {
samples -= info->nb_samples;
frame_list->list = info->next;
if (!frame_list->list)
frame_list->end = NULL;
frame_list->nb_frames--;
frame_list->nb_samples -= info->nb_samples;
av_free(info);
} else {
info->nb_samples -= samples;
info->pts += samples;
frame_list->nb_samples -= samples;
samples = 0;
}
}
}
}
 
static int frame_list_add_frame(FrameList *frame_list, int nb_samples, int64_t pts)
{
FrameInfo *info = av_malloc(sizeof(*info));
if (!info)
return AVERROR(ENOMEM);
info->nb_samples = nb_samples;
info->pts = pts;
info->next = NULL;
 
if (!frame_list->list) {
frame_list->list = info;
frame_list->end = info;
} else {
av_assert0(frame_list->end != NULL);
frame_list->end->next = info;
frame_list->end = info;
}
frame_list->nb_frames++;
frame_list->nb_samples += nb_samples;
 
return 0;
}
 
 
typedef struct MixContext {
const AVClass *class; /**< class for AVOptions */
AVFloatDSPContext fdsp;
 
int nb_inputs; /**< number of inputs */
int active_inputs; /**< number of input currently active */
int duration_mode; /**< mode for determining duration */
float dropout_transition; /**< transition time when an input drops out */
 
int nb_channels; /**< number of channels */
int sample_rate; /**< sample rate */
int planar;
AVAudioFifo **fifos; /**< audio fifo for each input */
uint8_t *input_state; /**< current state of each input */
float *input_scale; /**< mixing scale factor for each input */
float scale_norm; /**< normalization factor for all inputs */
int64_t next_pts; /**< calculated pts for next output frame */
FrameList *frame_list; /**< list of frame info for the first input */
} MixContext;
 
#define OFFSET(x) offsetof(MixContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
#define F AV_OPT_FLAG_FILTERING_PARAM
static const AVOption amix_options[] = {
{ "inputs", "Number of inputs.",
OFFSET(nb_inputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, 32, A|F },
{ "duration", "How to determine the end-of-stream.",
OFFSET(duration_mode), AV_OPT_TYPE_INT, { .i64 = DURATION_LONGEST }, 0, 2, A|F, "duration" },
{ "longest", "Duration of longest input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_LONGEST }, INT_MIN, INT_MAX, A|F, "duration" },
{ "shortest", "Duration of shortest input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_SHORTEST }, INT_MIN, INT_MAX, A|F, "duration" },
{ "first", "Duration of first input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_FIRST }, INT_MIN, INT_MAX, A|F, "duration" },
{ "dropout_transition", "Transition time, in seconds, for volume "
"renormalization when an input stream ends.",
OFFSET(dropout_transition), AV_OPT_TYPE_FLOAT, { .dbl = 2.0 }, 0, INT_MAX, A|F },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(amix);
 
/**
* Update the scaling factors to apply to each input during mixing.
*
* This balances the full volume range between active inputs and handles
* volume transitions when EOF is encountered on an input but mixing continues
* with the remaining inputs.
*/
static void calculate_scales(MixContext *s, int nb_samples)
{
int i;
 
if (s->scale_norm > s->active_inputs) {
s->scale_norm -= nb_samples / (s->dropout_transition * s->sample_rate);
s->scale_norm = FFMAX(s->scale_norm, s->active_inputs);
}
 
for (i = 0; i < s->nb_inputs; i++) {
if (s->input_state[i] == INPUT_ON)
s->input_scale[i] = 1.0f / s->scale_norm;
else
s->input_scale[i] = 0.0f;
}
}
 
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
MixContext *s = ctx->priv;
int i;
char buf[64];
 
s->planar = av_sample_fmt_is_planar(outlink->format);
s->sample_rate = outlink->sample_rate;
outlink->time_base = (AVRational){ 1, outlink->sample_rate };
s->next_pts = AV_NOPTS_VALUE;
 
s->frame_list = av_mallocz(sizeof(*s->frame_list));
if (!s->frame_list)
return AVERROR(ENOMEM);
 
s->fifos = av_mallocz(s->nb_inputs * sizeof(*s->fifos));
if (!s->fifos)
return AVERROR(ENOMEM);
 
s->nb_channels = av_get_channel_layout_nb_channels(outlink->channel_layout);
for (i = 0; i < s->nb_inputs; i++) {
s->fifos[i] = av_audio_fifo_alloc(outlink->format, s->nb_channels, 1024);
if (!s->fifos[i])
return AVERROR(ENOMEM);
}
 
s->input_state = av_malloc(s->nb_inputs);
if (!s->input_state)
return AVERROR(ENOMEM);
memset(s->input_state, INPUT_ON, s->nb_inputs);
s->active_inputs = s->nb_inputs;
 
s->input_scale = av_mallocz(s->nb_inputs * sizeof(*s->input_scale));
if (!s->input_scale)
return AVERROR(ENOMEM);
s->scale_norm = s->active_inputs;
calculate_scales(s, 0);
 
av_get_channel_layout_string(buf, sizeof(buf), -1, outlink->channel_layout);
 
av_log(ctx, AV_LOG_VERBOSE,
"inputs:%d fmt:%s srate:%d cl:%s\n", s->nb_inputs,
av_get_sample_fmt_name(outlink->format), outlink->sample_rate, buf);
 
return 0;
}
 
/**
* Read samples from the input FIFOs, mix, and write to the output link.
*/
static int output_frame(AVFilterLink *outlink, int nb_samples)
{
AVFilterContext *ctx = outlink->src;
MixContext *s = ctx->priv;
AVFrame *out_buf, *in_buf;
int i;
 
calculate_scales(s, nb_samples);
 
out_buf = ff_get_audio_buffer(outlink, nb_samples);
if (!out_buf)
return AVERROR(ENOMEM);
 
in_buf = ff_get_audio_buffer(outlink, nb_samples);
if (!in_buf) {
av_frame_free(&out_buf);
return AVERROR(ENOMEM);
}
 
for (i = 0; i < s->nb_inputs; i++) {
if (s->input_state[i] == INPUT_ON) {
int planes, plane_size, p;
 
av_audio_fifo_read(s->fifos[i], (void **)in_buf->extended_data,
nb_samples);
 
planes = s->planar ? s->nb_channels : 1;
plane_size = nb_samples * (s->planar ? 1 : s->nb_channels);
plane_size = FFALIGN(plane_size, 16);
 
for (p = 0; p < planes; p++) {
s->fdsp.vector_fmac_scalar((float *)out_buf->extended_data[p],
(float *) in_buf->extended_data[p],
s->input_scale[i], plane_size);
}
}
}
av_frame_free(&in_buf);
 
out_buf->pts = s->next_pts;
if (s->next_pts != AV_NOPTS_VALUE)
s->next_pts += nb_samples;
 
return ff_filter_frame(outlink, out_buf);
}
 
/**
* Returns the smallest number of samples available in the input FIFOs other
* than that of the first input.
*/
static int get_available_samples(MixContext *s)
{
int i;
int available_samples = INT_MAX;
 
av_assert0(s->nb_inputs > 1);
 
for (i = 1; i < s->nb_inputs; i++) {
int nb_samples;
if (s->input_state[i] == INPUT_OFF)
continue;
nb_samples = av_audio_fifo_size(s->fifos[i]);
available_samples = FFMIN(available_samples, nb_samples);
}
if (available_samples == INT_MAX)
return 0;
return available_samples;
}
 
/**
* Requests a frame, if needed, from each input link other than the first.
*/
static int request_samples(AVFilterContext *ctx, int min_samples)
{
MixContext *s = ctx->priv;
int i, ret;
 
av_assert0(s->nb_inputs > 1);
 
for (i = 1; i < s->nb_inputs; i++) {
ret = 0;
if (s->input_state[i] == INPUT_OFF)
continue;
while (!ret && av_audio_fifo_size(s->fifos[i]) < min_samples)
ret = ff_request_frame(ctx->inputs[i]);
if (ret == AVERROR_EOF) {
if (av_audio_fifo_size(s->fifos[i]) == 0) {
s->input_state[i] = INPUT_OFF;
continue;
}
} else if (ret < 0)
return ret;
}
return 0;
}
 
/**
* Calculates the number of active inputs and determines EOF based on the
* duration option.
*
* @return 0 if mixing should continue, or AVERROR_EOF if mixing should stop.
*/
static int calc_active_inputs(MixContext *s)
{
int i;
int active_inputs = 0;
for (i = 0; i < s->nb_inputs; i++)
active_inputs += !!(s->input_state[i] != INPUT_OFF);
s->active_inputs = active_inputs;
 
if (!active_inputs ||
(s->duration_mode == DURATION_FIRST && s->input_state[0] == INPUT_OFF) ||
(s->duration_mode == DURATION_SHORTEST && active_inputs != s->nb_inputs))
return AVERROR_EOF;
return 0;
}
 
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
MixContext *s = ctx->priv;
int ret;
int wanted_samples, available_samples;
 
ret = calc_active_inputs(s);
if (ret < 0)
return ret;
 
if (s->input_state[0] == INPUT_OFF) {
ret = request_samples(ctx, 1);
if (ret < 0)
return ret;
 
ret = calc_active_inputs(s);
if (ret < 0)
return ret;
 
available_samples = get_available_samples(s);
if (!available_samples)
return AVERROR(EAGAIN);
 
return output_frame(outlink, available_samples);
}
 
if (s->frame_list->nb_frames == 0) {
ret = ff_request_frame(ctx->inputs[0]);
if (ret == AVERROR_EOF) {
s->input_state[0] = INPUT_OFF;
if (s->nb_inputs == 1)
return AVERROR_EOF;
else
return AVERROR(EAGAIN);
} else if (ret < 0)
return ret;
}
av_assert0(s->frame_list->nb_frames > 0);
 
wanted_samples = frame_list_next_frame_size(s->frame_list);
 
if (s->active_inputs > 1) {
ret = request_samples(ctx, wanted_samples);
if (ret < 0)
return ret;
 
ret = calc_active_inputs(s);
if (ret < 0)
return ret;
}
 
if (s->active_inputs > 1) {
available_samples = get_available_samples(s);
if (!available_samples)
return AVERROR(EAGAIN);
available_samples = FFMIN(available_samples, wanted_samples);
} else {
available_samples = wanted_samples;
}
 
s->next_pts = frame_list_next_pts(s->frame_list);
frame_list_remove_samples(s->frame_list, available_samples);
 
return output_frame(outlink, available_samples);
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;
MixContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
int i, ret = 0;
 
for (i = 0; i < ctx->nb_inputs; i++)
if (ctx->inputs[i] == inlink)
break;
if (i >= ctx->nb_inputs) {
av_log(ctx, AV_LOG_ERROR, "unknown input link\n");
ret = AVERROR(EINVAL);
goto fail;
}
 
if (i == 0) {
int64_t pts = av_rescale_q(buf->pts, inlink->time_base,
outlink->time_base);
ret = frame_list_add_frame(s->frame_list, buf->nb_samples, pts);
if (ret < 0)
goto fail;
}
 
ret = av_audio_fifo_write(s->fifos[i], (void **)buf->extended_data,
buf->nb_samples);
 
fail:
av_frame_free(&buf);
 
return ret;
}
 
static av_cold int init(AVFilterContext *ctx)
{
MixContext *s = ctx->priv;
int i;
 
for (i = 0; i < s->nb_inputs; i++) {
char name[32];
AVFilterPad pad = { 0 };
 
snprintf(name, sizeof(name), "input%d", i);
pad.type = AVMEDIA_TYPE_AUDIO;
pad.name = av_strdup(name);
pad.filter_frame = filter_frame;
 
ff_insert_inpad(ctx, i, &pad);
}
 
avpriv_float_dsp_init(&s->fdsp, 0);
 
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
int i;
MixContext *s = ctx->priv;
 
if (s->fifos) {
for (i = 0; i < s->nb_inputs; i++)
av_audio_fifo_free(s->fifos[i]);
av_freep(&s->fifos);
}
frame_list_clear(s->frame_list);
av_freep(&s->frame_list);
av_freep(&s->input_state);
av_freep(&s->input_scale);
 
for (i = 0; i < ctx->nb_inputs; i++)
av_freep(&ctx->input_pads[i].name);
}
 
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
ff_add_format(&formats, AV_SAMPLE_FMT_FLT);
ff_add_format(&formats, AV_SAMPLE_FMT_FLTP);
ff_set_common_formats(ctx, formats);
ff_set_common_channel_layouts(ctx, ff_all_channel_layouts());
ff_set_common_samplerates(ctx, ff_all_samplerates());
return 0;
}
 
static const AVFilterPad avfilter_af_amix_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
.request_frame = request_frame
},
{ NULL }
};
 
AVFilter avfilter_af_amix = {
.name = "amix",
.description = NULL_IF_CONFIG_SMALL("Audio mixing."),
.priv_size = sizeof(MixContext),
.priv_class = &amix_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = NULL,
.outputs = avfilter_af_amix_outputs,
.flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
};
/contrib/sdk/sources/ffmpeg/libavfilter/af_anull.c
0,0 → 1,52
/*
* Copyright (c) 2010 S.N. Hemanth Meenakshisundaram <smeenaks@ucsd.edu>
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* null audio filter
*/
 
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
#include "libavutil/internal.h"
 
static const AVFilterPad avfilter_af_anull_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
 
static const AVFilterPad avfilter_af_anull_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
 
AVFilter avfilter_af_anull = {
.name = "anull",
.description = NULL_IF_CONFIG_SMALL("Pass the source unchanged to the output."),
.query_formats = ff_query_formats_all,
.inputs = avfilter_af_anull_inputs,
.outputs = avfilter_af_anull_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/af_apad.c
0,0 → 1,156
/*
* Copyright (c) 2012 Michael Niedermayer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
 
/**
* @file
* audio pad filter.
*
* Based on af_aresample.c
*/
 
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
#include "libavutil/avassert.h"
#include "avfilter.h"
#include "audio.h"
#include "internal.h"
 
typedef struct {
const AVClass *class;
int64_t next_pts;
 
int packet_size;
int64_t pad_len;
int64_t whole_len;
} APadContext;
 
#define OFFSET(x) offsetof(APadContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption apad_options[] = {
{ "packet_size", "set silence packet size", OFFSET(packet_size), AV_OPT_TYPE_INT, { .i64 = 4096 }, 0, INT_MAX, A },
{ "pad_len", "number of samples of silence to add", OFFSET(pad_len), AV_OPT_TYPE_INT64, { .i64 = 0 }, 0, INT64_MAX, A },
{ "whole_len", "target number of samples in the audio stream", OFFSET(whole_len), AV_OPT_TYPE_INT64, { .i64 = 0 }, 0, INT64_MAX, A },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(apad);
 
static av_cold int init(AVFilterContext *ctx)
{
APadContext *apad = ctx->priv;
 
apad->next_pts = AV_NOPTS_VALUE;
if (apad->whole_len && apad->pad_len) {
av_log(ctx, AV_LOG_ERROR, "Both whole and pad length are set, this is not possible\n");
return AVERROR(EINVAL);
}
 
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
APadContext *apad = ctx->priv;
 
if (apad->whole_len)
apad->whole_len -= frame->nb_samples;
 
apad->next_pts = frame->pts + av_rescale_q(frame->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base);
return ff_filter_frame(ctx->outputs[0], frame);
}
 
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
APadContext *apad = ctx->priv;
int ret;
 
ret = ff_request_frame(ctx->inputs[0]);
 
if (ret == AVERROR_EOF && !ctx->is_disabled) {
int n_out = apad->packet_size;
AVFrame *outsamplesref;
 
if (apad->whole_len > 0) {
apad->pad_len = apad->whole_len;
apad->whole_len = 0;
}
if (apad->pad_len > 0) {
n_out = FFMIN(n_out, apad->pad_len);
apad->pad_len -= n_out;
}
 
if(!n_out)
return AVERROR_EOF;
 
outsamplesref = ff_get_audio_buffer(outlink, n_out);
if (!outsamplesref)
return AVERROR(ENOMEM);
 
av_assert0(outsamplesref->sample_rate == outlink->sample_rate);
av_assert0(outsamplesref->nb_samples == n_out);
 
av_samples_set_silence(outsamplesref->extended_data, 0,
n_out,
av_frame_get_channels(outsamplesref),
outsamplesref->format);
 
outsamplesref->pts = apad->next_pts;
if (apad->next_pts != AV_NOPTS_VALUE)
apad->next_pts += av_rescale_q(n_out, (AVRational){1, outlink->sample_rate}, outlink->time_base);
 
return ff_filter_frame(outlink, outsamplesref);
}
return ret;
}
 
static const AVFilterPad apad_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad apad_outputs[] = {
{
.name = "default",
.request_frame = request_frame,
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
 
AVFilter avfilter_af_apad = {
.name = "apad",
.description = NULL_IF_CONFIG_SMALL("Pad audio with silence."),
.init = init,
.priv_size = sizeof(APadContext),
.inputs = apad_inputs,
.outputs = apad_outputs,
.priv_class = &apad_class,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
};
/contrib/sdk/sources/ffmpeg/libavfilter/af_aphaser.c
0,0 → 1,358
/*
* Copyright (c) 2013 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* phaser audio filter
*/
 
#include "libavutil/avassert.h"
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
 
enum WaveType {
WAVE_SIN,
WAVE_TRI,
WAVE_NB,
};
 
typedef struct AudioPhaserContext {
const AVClass *class;
double in_gain, out_gain;
double delay;
double decay;
double speed;
 
enum WaveType type;
 
int delay_buffer_length;
double *delay_buffer;
 
int modulation_buffer_length;
int32_t *modulation_buffer;
 
int delay_pos, modulation_pos;
 
void (*phaser)(struct AudioPhaserContext *p,
uint8_t * const *src, uint8_t **dst,
int nb_samples, int channels);
} AudioPhaserContext;
 
#define OFFSET(x) offsetof(AudioPhaserContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption aphaser_options[] = {
{ "in_gain", "set input gain", OFFSET(in_gain), AV_OPT_TYPE_DOUBLE, {.dbl=.4}, 0, 1, FLAGS },
{ "out_gain", "set output gain", OFFSET(out_gain), AV_OPT_TYPE_DOUBLE, {.dbl=.74}, 0, 1e9, FLAGS },
{ "delay", "set delay in milliseconds", OFFSET(delay), AV_OPT_TYPE_DOUBLE, {.dbl=3.}, 0, 5, FLAGS },
{ "decay", "set decay", OFFSET(decay), AV_OPT_TYPE_DOUBLE, {.dbl=.4}, 0, .99, FLAGS },
{ "speed", "set modulation speed", OFFSET(speed), AV_OPT_TYPE_DOUBLE, {.dbl=.5}, .1, 2, FLAGS },
{ "type", "set modulation type", OFFSET(type), AV_OPT_TYPE_INT, {.i64=WAVE_TRI}, 0, WAVE_NB-1, FLAGS, "type" },
{ "triangular", NULL, 0, AV_OPT_TYPE_CONST, {.i64=WAVE_TRI}, 0, 0, FLAGS, "type" },
{ "t", NULL, 0, AV_OPT_TYPE_CONST, {.i64=WAVE_TRI}, 0, 0, FLAGS, "type" },
{ "sinusoidal", NULL, 0, AV_OPT_TYPE_CONST, {.i64=WAVE_SIN}, 0, 0, FLAGS, "type" },
{ "s", NULL, 0, AV_OPT_TYPE_CONST, {.i64=WAVE_SIN}, 0, 0, FLAGS, "type" },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(aphaser);
 
static av_cold int init(AVFilterContext *ctx)
{
AudioPhaserContext *p = ctx->priv;
 
if (p->in_gain > (1 - p->decay * p->decay))
av_log(ctx, AV_LOG_WARNING, "in_gain may cause clipping\n");
if (p->in_gain / (1 - p->decay) > 1 / p->out_gain)
av_log(ctx, AV_LOG_WARNING, "out_gain may cause clipping\n");
 
return 0;
}
 
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32P,
AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_NONE
};
 
layouts = ff_all_channel_layouts();
if (!layouts)
return AVERROR(ENOMEM);
ff_set_common_channel_layouts(ctx, layouts);
 
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ff_set_common_formats(ctx, formats);
 
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
ff_set_common_samplerates(ctx, formats);
 
return 0;
}
 
static void generate_wave_table(enum WaveType wave_type, enum AVSampleFormat sample_fmt,
void *table, int table_size,
double min, double max, double phase)
{
uint32_t i, phase_offset = phase / M_PI / 2 * table_size + 0.5;
 
for (i = 0; i < table_size; i++) {
uint32_t point = (i + phase_offset) % table_size;
double d;
 
switch (wave_type) {
case WAVE_SIN:
d = (sin((double)point / table_size * 2 * M_PI) + 1) / 2;
break;
case WAVE_TRI:
d = (double)point * 2 / table_size;
switch (4 * point / table_size) {
case 0: d = d + 0.5; break;
case 1:
case 2: d = 1.5 - d; break;
case 3: d = d - 1.5; break;
}
break;
default:
av_assert0(0);
}
 
d = d * (max - min) + min;
switch (sample_fmt) {
case AV_SAMPLE_FMT_FLT: {
float *fp = (float *)table;
*fp++ = (float)d;
table = fp;
continue; }
case AV_SAMPLE_FMT_DBL: {
double *dp = (double *)table;
*dp++ = d;
table = dp;
continue; }
}
 
d += d < 0 ? -0.5 : 0.5;
switch (sample_fmt) {
case AV_SAMPLE_FMT_S16: {
int16_t *sp = table;
*sp++ = (int16_t)d;
table = sp;
continue; }
case AV_SAMPLE_FMT_S32: {
int32_t *ip = table;
*ip++ = (int32_t)d;
table = ip;
continue; }
default:
av_assert0(0);
}
}
}
 
#define MOD(a, b) (((a) >= (b)) ? (a) - (b) : (a))
 
#define PHASER_PLANAR(name, type) \
static void phaser_## name ##p(AudioPhaserContext *p, \
uint8_t * const *src, uint8_t **dst, \
int nb_samples, int channels) \
{ \
int i, c, delay_pos, modulation_pos; \
\
av_assert0(channels > 0); \
for (c = 0; c < channels; c++) { \
type *s = (type *)src[c]; \
type *d = (type *)dst[c]; \
double *buffer = p->delay_buffer + \
c * p->delay_buffer_length; \
\
delay_pos = p->delay_pos; \
modulation_pos = p->modulation_pos; \
\
for (i = 0; i < nb_samples; i++, s++, d++) { \
double v = *s * p->in_gain + buffer[ \
MOD(delay_pos + p->modulation_buffer[ \
modulation_pos], \
p->delay_buffer_length)] * p->decay; \
\
modulation_pos = MOD(modulation_pos + 1, \
p->modulation_buffer_length); \
delay_pos = MOD(delay_pos + 1, p->delay_buffer_length); \
buffer[delay_pos] = v; \
\
*d = v * p->out_gain; \
} \
} \
\
p->delay_pos = delay_pos; \
p->modulation_pos = modulation_pos; \
}
 
#define PHASER(name, type) \
static void phaser_## name (AudioPhaserContext *p, \
uint8_t * const *src, uint8_t **dst, \
int nb_samples, int channels) \
{ \
int i, c, delay_pos, modulation_pos; \
type *s = (type *)src[0]; \
type *d = (type *)dst[0]; \
double *buffer = p->delay_buffer; \
\
delay_pos = p->delay_pos; \
modulation_pos = p->modulation_pos; \
\
for (i = 0; i < nb_samples; i++) { \
int pos = MOD(delay_pos + p->modulation_buffer[modulation_pos], \
p->delay_buffer_length) * channels; \
int npos; \
\
delay_pos = MOD(delay_pos + 1, p->delay_buffer_length); \
npos = delay_pos * channels; \
for (c = 0; c < channels; c++, s++, d++) { \
double v = *s * p->in_gain + buffer[pos + c] * p->decay; \
\
buffer[npos + c] = v; \
\
*d = v * p->out_gain; \
} \
\
modulation_pos = MOD(modulation_pos + 1, \
p->modulation_buffer_length); \
} \
\
p->delay_pos = delay_pos; \
p->modulation_pos = modulation_pos; \
}
 
PHASER_PLANAR(dbl, double)
PHASER_PLANAR(flt, float)
PHASER_PLANAR(s16, int16_t)
PHASER_PLANAR(s32, int32_t)
 
PHASER(dbl, double)
PHASER(flt, float)
PHASER(s16, int16_t)
PHASER(s32, int32_t)
 
static int config_output(AVFilterLink *outlink)
{
AudioPhaserContext *p = outlink->src->priv;
AVFilterLink *inlink = outlink->src->inputs[0];
 
p->delay_buffer_length = p->delay * 0.001 * inlink->sample_rate + 0.5;
p->delay_buffer = av_calloc(p->delay_buffer_length, sizeof(*p->delay_buffer) * inlink->channels);
p->modulation_buffer_length = inlink->sample_rate / p->speed + 0.5;
p->modulation_buffer = av_malloc(p->modulation_buffer_length * sizeof(*p->modulation_buffer));
 
if (!p->modulation_buffer || !p->delay_buffer)
return AVERROR(ENOMEM);
 
generate_wave_table(p->type, AV_SAMPLE_FMT_S32,
p->modulation_buffer, p->modulation_buffer_length,
1., p->delay_buffer_length, M_PI / 2.0);
 
p->delay_pos = p->modulation_pos = 0;
 
switch (inlink->format) {
case AV_SAMPLE_FMT_DBL: p->phaser = phaser_dbl; break;
case AV_SAMPLE_FMT_DBLP: p->phaser = phaser_dblp; break;
case AV_SAMPLE_FMT_FLT: p->phaser = phaser_flt; break;
case AV_SAMPLE_FMT_FLTP: p->phaser = phaser_fltp; break;
case AV_SAMPLE_FMT_S16: p->phaser = phaser_s16; break;
case AV_SAMPLE_FMT_S16P: p->phaser = phaser_s16p; break;
case AV_SAMPLE_FMT_S32: p->phaser = phaser_s32; break;
case AV_SAMPLE_FMT_S32P: p->phaser = phaser_s32p; break;
default: av_assert0(0);
}
 
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *inbuf)
{
AudioPhaserContext *p = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *outbuf;
 
if (av_frame_is_writable(inbuf)) {
outbuf = inbuf;
} else {
outbuf = ff_get_audio_buffer(inlink, inbuf->nb_samples);
if (!outbuf)
return AVERROR(ENOMEM);
av_frame_copy_props(outbuf, inbuf);
}
 
p->phaser(p, inbuf->extended_data, outbuf->extended_data,
outbuf->nb_samples, av_frame_get_channels(outbuf));
 
if (inbuf != outbuf)
av_frame_free(&inbuf);
 
return ff_filter_frame(outlink, outbuf);
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
AudioPhaserContext *p = ctx->priv;
 
av_freep(&p->delay_buffer);
av_freep(&p->modulation_buffer);
}
 
static const AVFilterPad aphaser_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad aphaser_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
},
{ NULL }
};
 
AVFilter avfilter_af_aphaser = {
.name = "aphaser",
.description = NULL_IF_CONFIG_SMALL("Add a phasing effect to the audio."),
.query_formats = query_formats,
.priv_size = sizeof(AudioPhaserContext),
.init = init,
.uninit = uninit,
.inputs = aphaser_inputs,
.outputs = aphaser_outputs,
.priv_class = &aphaser_class,
};
/contrib/sdk/sources/ffmpeg/libavfilter/af_aresample.c
0,0 → 1,309
/*
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2011 Mina Nagy Zaki
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* resampling audio filter
*/
 
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
#include "libavutil/avassert.h"
#include "libswresample/swresample.h"
#include "avfilter.h"
#include "audio.h"
#include "internal.h"
 
typedef struct {
const AVClass *class;
int sample_rate_arg;
double ratio;
struct SwrContext *swr;
int64_t next_pts;
int req_fullfilled;
} AResampleContext;
 
static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts)
{
AResampleContext *aresample = ctx->priv;
int ret = 0;
 
aresample->next_pts = AV_NOPTS_VALUE;
aresample->swr = swr_alloc();
if (!aresample->swr) {
ret = AVERROR(ENOMEM);
goto end;
}
 
if (opts) {
AVDictionaryEntry *e = NULL;
 
while ((e = av_dict_get(*opts, "", e, AV_DICT_IGNORE_SUFFIX))) {
if ((ret = av_opt_set(aresample->swr, e->key, e->value, 0)) < 0)
goto end;
}
av_dict_free(opts);
}
if (aresample->sample_rate_arg > 0)
av_opt_set_int(aresample->swr, "osr", aresample->sample_rate_arg, 0);
end:
return ret;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
AResampleContext *aresample = ctx->priv;
swr_free(&aresample->swr);
}
 
static int query_formats(AVFilterContext *ctx)
{
AResampleContext *aresample = ctx->priv;
int out_rate = av_get_int(aresample->swr, "osr", NULL);
uint64_t out_layout = av_get_int(aresample->swr, "ocl", NULL);
enum AVSampleFormat out_format = av_get_int(aresample->swr, "osf", NULL);
 
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
 
AVFilterFormats *in_formats = ff_all_formats(AVMEDIA_TYPE_AUDIO);
AVFilterFormats *out_formats;
AVFilterFormats *in_samplerates = ff_all_samplerates();
AVFilterFormats *out_samplerates;
AVFilterChannelLayouts *in_layouts = ff_all_channel_counts();
AVFilterChannelLayouts *out_layouts;
 
ff_formats_ref (in_formats, &inlink->out_formats);
ff_formats_ref (in_samplerates, &inlink->out_samplerates);
ff_channel_layouts_ref(in_layouts, &inlink->out_channel_layouts);
 
if(out_rate > 0) {
out_samplerates = ff_make_format_list((int[]){ out_rate, -1 });
} else {
out_samplerates = ff_all_samplerates();
}
ff_formats_ref(out_samplerates, &outlink->in_samplerates);
 
if(out_format != AV_SAMPLE_FMT_NONE) {
out_formats = ff_make_format_list((int[]){ out_format, -1 });
} else
out_formats = ff_all_formats(AVMEDIA_TYPE_AUDIO);
ff_formats_ref(out_formats, &outlink->in_formats);
 
if(out_layout) {
out_layouts = avfilter_make_format64_list((int64_t[]){ out_layout, -1 });
} else
out_layouts = ff_all_channel_counts();
ff_channel_layouts_ref(out_layouts, &outlink->in_channel_layouts);
 
return 0;
}
 
 
static int config_output(AVFilterLink *outlink)
{
int ret;
AVFilterContext *ctx = outlink->src;
AVFilterLink *inlink = ctx->inputs[0];
AResampleContext *aresample = ctx->priv;
int out_rate;
uint64_t out_layout;
enum AVSampleFormat out_format;
char inchl_buf[128], outchl_buf[128];
 
aresample->swr = swr_alloc_set_opts(aresample->swr,
outlink->channel_layout, outlink->format, outlink->sample_rate,
inlink->channel_layout, inlink->format, inlink->sample_rate,
0, ctx);
if (!aresample->swr)
return AVERROR(ENOMEM);
if (!inlink->channel_layout)
av_opt_set_int(aresample->swr, "ich", inlink->channels, 0);
if (!outlink->channel_layout)
av_opt_set_int(aresample->swr, "och", outlink->channels, 0);
 
ret = swr_init(aresample->swr);
if (ret < 0)
return ret;
 
out_rate = av_get_int(aresample->swr, "osr", NULL);
out_layout = av_get_int(aresample->swr, "ocl", NULL);
out_format = av_get_int(aresample->swr, "osf", NULL);
outlink->time_base = (AVRational) {1, out_rate};
 
av_assert0(outlink->sample_rate == out_rate);
av_assert0(outlink->channel_layout == out_layout || !outlink->channel_layout);
av_assert0(outlink->format == out_format);
 
aresample->ratio = (double)outlink->sample_rate / inlink->sample_rate;
 
av_get_channel_layout_string(inchl_buf, sizeof(inchl_buf), inlink ->channels, inlink ->channel_layout);
av_get_channel_layout_string(outchl_buf, sizeof(outchl_buf), outlink->channels, outlink->channel_layout);
 
av_log(ctx, AV_LOG_VERBOSE, "ch:%d chl:%s fmt:%s r:%dHz -> ch:%d chl:%s fmt:%s r:%dHz\n",
inlink ->channels, inchl_buf, av_get_sample_fmt_name(inlink->format), inlink->sample_rate,
outlink->channels, outchl_buf, av_get_sample_fmt_name(outlink->format), outlink->sample_rate);
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *insamplesref)
{
AResampleContext *aresample = inlink->dst->priv;
const int n_in = insamplesref->nb_samples;
int n_out = n_in * aresample->ratio * 2 + 256;
AVFilterLink *const outlink = inlink->dst->outputs[0];
AVFrame *outsamplesref = ff_get_audio_buffer(outlink, n_out);
int ret;
 
if(!outsamplesref)
return AVERROR(ENOMEM);
 
av_frame_copy_props(outsamplesref, insamplesref);
outsamplesref->format = outlink->format;
av_frame_set_channels(outsamplesref, outlink->channels);
outsamplesref->channel_layout = outlink->channel_layout;
outsamplesref->sample_rate = outlink->sample_rate;
 
if(insamplesref->pts != AV_NOPTS_VALUE) {
int64_t inpts = av_rescale(insamplesref->pts, inlink->time_base.num * (int64_t)outlink->sample_rate * inlink->sample_rate, inlink->time_base.den);
int64_t outpts= swr_next_pts(aresample->swr, inpts);
aresample->next_pts =
outsamplesref->pts = ROUNDED_DIV(outpts, inlink->sample_rate);
} else {
outsamplesref->pts = AV_NOPTS_VALUE;
}
n_out = swr_convert(aresample->swr, outsamplesref->extended_data, n_out,
(void *)insamplesref->extended_data, n_in);
if (n_out <= 0) {
av_frame_free(&outsamplesref);
av_frame_free(&insamplesref);
return 0;
}
 
outsamplesref->nb_samples = n_out;
 
ret = ff_filter_frame(outlink, outsamplesref);
aresample->req_fullfilled= 1;
av_frame_free(&insamplesref);
return ret;
}
 
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AResampleContext *aresample = ctx->priv;
AVFilterLink *const inlink = outlink->src->inputs[0];
int ret;
 
aresample->req_fullfilled = 0;
do{
ret = ff_request_frame(ctx->inputs[0]);
}while(!aresample->req_fullfilled && ret>=0);
 
if (ret == AVERROR_EOF) {
AVFrame *outsamplesref;
int n_out = 4096;
 
outsamplesref = ff_get_audio_buffer(outlink, n_out);
if (!outsamplesref)
return AVERROR(ENOMEM);
n_out = swr_convert(aresample->swr, outsamplesref->extended_data, n_out, 0, 0);
if (n_out <= 0) {
av_frame_free(&outsamplesref);
return (n_out == 0) ? AVERROR_EOF : n_out;
}
 
outsamplesref->sample_rate = outlink->sample_rate;
outsamplesref->nb_samples = n_out;
#if 0
outsamplesref->pts = aresample->next_pts;
if(aresample->next_pts != AV_NOPTS_VALUE)
aresample->next_pts += av_rescale_q(n_out, (AVRational){1 ,outlink->sample_rate}, outlink->time_base);
#else
outsamplesref->pts = swr_next_pts(aresample->swr, INT64_MIN);
outsamplesref->pts = ROUNDED_DIV(outsamplesref->pts, inlink->sample_rate);
#endif
 
return ff_filter_frame(outlink, outsamplesref);
}
return ret;
}
 
static const AVClass *resample_child_class_next(const AVClass *prev)
{
return prev ? NULL : swr_get_class();
}
 
static void *resample_child_next(void *obj, void *prev)
{
AResampleContext *s = obj;
return prev ? NULL : s->swr;
}
 
#define OFFSET(x) offsetof(AResampleContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption options[] = {
{"sample_rate", NULL, OFFSET(sample_rate_arg), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
{NULL}
};
 
static const AVClass aresample_class = {
.class_name = "aresample",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.child_class_next = resample_child_class_next,
.child_next = resample_child_next,
};
 
static const AVFilterPad aresample_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad aresample_outputs[] = {
{
.name = "default",
.config_props = config_output,
.request_frame = request_frame,
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
 
AVFilter avfilter_af_aresample = {
.name = "aresample",
.description = NULL_IF_CONFIG_SMALL("Resample audio data."),
.init_dict = init_dict,
.uninit = uninit,
.query_formats = query_formats,
.priv_size = sizeof(AResampleContext),
.priv_class = &aresample_class,
.inputs = aresample_inputs,
.outputs = aresample_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/af_asetnsamples.c
0,0 → 1,196
/*
* Copyright (c) 2012 Andrey Utkin
* Copyright (c) 2012 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Filter that changes number of samples on single output operation
*/
 
#include "libavutil/audio_fifo.h"
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "audio.h"
#include "internal.h"
#include "formats.h"
 
typedef struct {
const AVClass *class;
int nb_out_samples; ///< how many samples to output
AVAudioFifo *fifo; ///< samples are queued here
int64_t next_out_pts;
int pad;
} ASNSContext;
 
#define OFFSET(x) offsetof(ASNSContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption asetnsamples_options[] = {
{ "nb_out_samples", "set the number of per-frame output samples", OFFSET(nb_out_samples), AV_OPT_TYPE_INT, {.i64=1024}, 1, INT_MAX, FLAGS },
{ "n", "set the number of per-frame output samples", OFFSET(nb_out_samples), AV_OPT_TYPE_INT, {.i64=1024}, 1, INT_MAX, FLAGS },
{ "pad", "pad last frame with zeros", OFFSET(pad), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS },
{ "p", "pad last frame with zeros", OFFSET(pad), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(asetnsamples);
 
static av_cold int init(AVFilterContext *ctx)
{
ASNSContext *asns = ctx->priv;
 
asns->next_out_pts = AV_NOPTS_VALUE;
av_log(ctx, AV_LOG_VERBOSE, "nb_out_samples:%d pad:%d\n", asns->nb_out_samples, asns->pad);
 
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
ASNSContext *asns = ctx->priv;
av_audio_fifo_free(asns->fifo);
}
 
static int config_props_output(AVFilterLink *outlink)
{
ASNSContext *asns = outlink->src->priv;
 
asns->fifo = av_audio_fifo_alloc(outlink->format, outlink->channels, asns->nb_out_samples);
if (!asns->fifo)
return AVERROR(ENOMEM);
outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
 
return 0;
}
 
static int push_samples(AVFilterLink *outlink)
{
ASNSContext *asns = outlink->src->priv;
AVFrame *outsamples = NULL;
int ret, nb_out_samples, nb_pad_samples;
 
if (asns->pad) {
nb_out_samples = av_audio_fifo_size(asns->fifo) ? asns->nb_out_samples : 0;
nb_pad_samples = nb_out_samples - FFMIN(nb_out_samples, av_audio_fifo_size(asns->fifo));
} else {
nb_out_samples = FFMIN(asns->nb_out_samples, av_audio_fifo_size(asns->fifo));
nb_pad_samples = 0;
}
 
if (!nb_out_samples)
return 0;
 
outsamples = ff_get_audio_buffer(outlink, nb_out_samples);
if (!outsamples)
return AVERROR(ENOMEM);
 
av_audio_fifo_read(asns->fifo,
(void **)outsamples->extended_data, nb_out_samples);
 
if (nb_pad_samples)
av_samples_set_silence(outsamples->extended_data, nb_out_samples - nb_pad_samples,
nb_pad_samples, outlink->channels,
outlink->format);
outsamples->nb_samples = nb_out_samples;
outsamples->channel_layout = outlink->channel_layout;
outsamples->sample_rate = outlink->sample_rate;
outsamples->pts = asns->next_out_pts;
 
if (asns->next_out_pts != AV_NOPTS_VALUE)
asns->next_out_pts += nb_out_samples;
 
ret = ff_filter_frame(outlink, outsamples);
if (ret < 0)
return ret;
return nb_out_samples;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
AVFilterContext *ctx = inlink->dst;
ASNSContext *asns = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
int ret;
int nb_samples = insamples->nb_samples;
 
if (av_audio_fifo_space(asns->fifo) < nb_samples) {
av_log(ctx, AV_LOG_DEBUG, "No space for %d samples, stretching audio fifo\n", nb_samples);
ret = av_audio_fifo_realloc(asns->fifo, av_audio_fifo_size(asns->fifo) + nb_samples);
if (ret < 0) {
av_log(ctx, AV_LOG_ERROR,
"Stretching audio fifo failed, discarded %d samples\n", nb_samples);
return -1;
}
}
av_audio_fifo_write(asns->fifo, (void **)insamples->extended_data, nb_samples);
if (asns->next_out_pts == AV_NOPTS_VALUE)
asns->next_out_pts = insamples->pts;
av_frame_free(&insamples);
 
while (av_audio_fifo_size(asns->fifo) >= asns->nb_out_samples)
push_samples(outlink);
return 0;
}
 
static int request_frame(AVFilterLink *outlink)
{
AVFilterLink *inlink = outlink->src->inputs[0];
int ret;
 
ret = ff_request_frame(inlink);
if (ret == AVERROR_EOF) {
ret = push_samples(outlink);
return ret < 0 ? ret : ret > 0 ? 0 : AVERROR_EOF;
}
 
return ret;
}
 
static const AVFilterPad asetnsamples_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad asetnsamples_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.request_frame = request_frame,
.config_props = config_props_output,
},
{ NULL }
};
 
AVFilter avfilter_af_asetnsamples = {
.name = "asetnsamples",
.description = NULL_IF_CONFIG_SMALL("Set the number of samples for each output audio frames."),
.priv_size = sizeof(ASNSContext),
.priv_class = &asetnsamples_class,
.init = init,
.uninit = uninit,
.inputs = asetnsamples_inputs,
.outputs = asetnsamples_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/af_asetrate.c
0,0 → 1,119
/*
* Copyright (c) 2013 Nicolas George
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/opt.h"
#include "avfilter.h"
#include "internal.h"
 
typedef struct {
const AVClass *class;
int sample_rate;
int rescale_pts;
} ASetRateContext;
 
#define CONTEXT ASetRateContext
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
#define OPT_GENERIC(name, field, def, min, max, descr, type, deffield, ...) \
{ name, descr, offsetof(CONTEXT, field), AV_OPT_TYPE_ ## type, \
{ .deffield = def }, min, max, FLAGS, __VA_ARGS__ }
 
#define OPT_INT(name, field, def, min, max, descr, ...) \
OPT_GENERIC(name, field, def, min, max, descr, INT, i64, __VA_ARGS__)
 
static const AVOption asetrate_options[] = {
OPT_INT("sample_rate", sample_rate, 44100, 1, INT_MAX, "set the sample rate"),
OPT_INT("r", sample_rate, 44100, 1, INT_MAX, "set the sample rate"),
{NULL},
};
 
AVFILTER_DEFINE_CLASS(asetrate);
 
static av_cold int query_formats(AVFilterContext *ctx)
{
ASetRateContext *sr = ctx->priv;
int sample_rates[] = { sr->sample_rate, -1 };
 
ff_formats_ref(ff_make_format_list(sample_rates),
&ctx->outputs[0]->in_samplerates);
return 0;
}
 
static av_cold int config_props(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
ASetRateContext *sr = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
AVRational intb = ctx->inputs[0]->time_base;
int inrate = inlink->sample_rate;
 
if (intb.num == 1 && intb.den == inrate) {
outlink->time_base.num = 1;
outlink->time_base.den = outlink->sample_rate;
} else {
outlink->time_base = intb;
sr->rescale_pts = 1;
if (av_q2d(intb) > 1.0 / FFMAX(inrate, outlink->sample_rate))
av_log(ctx, AV_LOG_WARNING, "Time base is inaccurate\n");
}
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
ASetRateContext *sr = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
 
frame->sample_rate = outlink->sample_rate;
if (sr->rescale_pts)
frame->pts = av_rescale(frame->pts, inlink->sample_rate,
outlink->sample_rate);
return ff_filter_frame(outlink, frame);
}
 
static const AVFilterPad asetrate_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad asetrate_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_props,
},
{ NULL }
};
 
AVFilter avfilter_af_asetrate = {
.name = "asetrate",
.description = NULL_IF_CONFIG_SMALL("Change the sample rate without "
"altering the data."),
.query_formats = query_formats,
.priv_size = sizeof(ASetRateContext),
.inputs = asetrate_inputs,
.outputs = asetrate_outputs,
.priv_class = &asetrate_class,
};
/contrib/sdk/sources/ffmpeg/libavfilter/af_ashowinfo.c
0,0 → 1,126
/*
* Copyright (c) 2011 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* filter for showing textual audio frame information
*/
 
#include <inttypes.h>
#include <stddef.h>
 
#include "libavutil/adler32.h"
#include "libavutil/attributes.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/mem.h"
#include "libavutil/timestamp.h"
#include "libavutil/samplefmt.h"
 
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
 
typedef struct AShowInfoContext {
/**
* Scratch space for individual plane checksums for planar audio
*/
uint32_t *plane_checksums;
} AShowInfoContext;
 
static av_cold void uninit(AVFilterContext *ctx)
{
AShowInfoContext *s = ctx->priv;
av_freep(&s->plane_checksums);
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;
AShowInfoContext *s = ctx->priv;
char chlayout_str[128];
uint32_t checksum = 0;
int channels = inlink->channels;
int planar = av_sample_fmt_is_planar(buf->format);
int block_align = av_get_bytes_per_sample(buf->format) * (planar ? 1 : channels);
int data_size = buf->nb_samples * block_align;
int planes = planar ? channels : 1;
int i;
void *tmp_ptr = av_realloc(s->plane_checksums, channels * sizeof(*s->plane_checksums));
 
if (!tmp_ptr)
return AVERROR(ENOMEM);
s->plane_checksums = tmp_ptr;
 
for (i = 0; i < planes; i++) {
uint8_t *data = buf->extended_data[i];
 
s->plane_checksums[i] = av_adler32_update(0, data, data_size);
checksum = i ? av_adler32_update(checksum, data, data_size) :
s->plane_checksums[0];
}
 
av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str), -1,
buf->channel_layout);
 
av_log(ctx, AV_LOG_INFO,
"n:%"PRId64" pts:%s pts_time:%s pos:%"PRId64" "
"fmt:%s channels:%d chlayout:%s rate:%d nb_samples:%d "
"checksum:%08X ",
inlink->frame_count,
av_ts2str(buf->pts), av_ts2timestr(buf->pts, &inlink->time_base),
av_frame_get_pkt_pos(buf),
av_get_sample_fmt_name(buf->format), av_frame_get_channels(buf), chlayout_str,
buf->sample_rate, buf->nb_samples,
checksum);
 
av_log(ctx, AV_LOG_INFO, "plane_checksums: [ ");
for (i = 0; i < planes; i++)
av_log(ctx, AV_LOG_INFO, "%08X ", s->plane_checksums[i]);
av_log(ctx, AV_LOG_INFO, "]\n");
 
return ff_filter_frame(inlink->dst->outputs[0], buf);
}
 
static const AVFilterPad inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
 
AVFilter avfilter_af_ashowinfo = {
.name = "ashowinfo",
.description = NULL_IF_CONFIG_SMALL("Show textual information for each audio frame."),
.priv_size = sizeof(AShowInfoContext),
.uninit = uninit,
.inputs = inputs,
.outputs = outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/af_astats.c
0,0 → 1,274
/*
* Copyright (c) 2009 Rob Sykes <robs@users.sourceforge.net>
* Copyright (c) 2013 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <float.h>
 
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
 
typedef struct ChannelStats {
double last;
double sigma_x, sigma_x2;
double avg_sigma_x2, min_sigma_x2, max_sigma_x2;
double min, max;
double min_run, max_run;
double min_runs, max_runs;
uint64_t min_count, max_count;
uint64_t nb_samples;
} ChannelStats;
 
typedef struct {
const AVClass *class;
ChannelStats *chstats;
int nb_channels;
uint64_t tc_samples;
double time_constant;
double mult;
} AudioStatsContext;
 
#define OFFSET(x) offsetof(AudioStatsContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption astats_options[] = {
{ "length", "set the window length", OFFSET(time_constant), AV_OPT_TYPE_DOUBLE, {.dbl=.05}, .01, 10, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(astats);
 
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
};
 
layouts = ff_all_channel_layouts();
if (!layouts)
return AVERROR(ENOMEM);
ff_set_common_channel_layouts(ctx, layouts);
 
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ff_set_common_formats(ctx, formats);
 
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
ff_set_common_samplerates(ctx, formats);
 
return 0;
}
 
static int config_output(AVFilterLink *outlink)
{
AudioStatsContext *s = outlink->src->priv;
int c;
 
s->chstats = av_calloc(sizeof(*s->chstats), outlink->channels);
if (!s->chstats)
return AVERROR(ENOMEM);
s->nb_channels = outlink->channels;
s->mult = exp((-1 / s->time_constant / outlink->sample_rate));
s->tc_samples = 5 * s->time_constant * outlink->sample_rate + .5;
 
for (c = 0; c < s->nb_channels; c++) {
ChannelStats *p = &s->chstats[c];
 
p->min = p->min_sigma_x2 = DBL_MAX;
p->max = p->max_sigma_x2 = DBL_MIN;
}
 
return 0;
}
 
static inline void update_stat(AudioStatsContext *s, ChannelStats *p, double d)
{
if (d < p->min) {
p->min = d;
p->min_run = 1;
p->min_runs = 0;
p->min_count = 1;
} else if (d == p->min) {
p->min_count++;
p->min_run = d == p->last ? p->min_run + 1 : 1;
} else if (p->last == p->min) {
p->min_runs += p->min_run * p->min_run;
}
 
if (d > p->max) {
p->max = d;
p->max_run = 1;
p->max_runs = 0;
p->max_count = 1;
} else if (d == p->max) {
p->max_count++;
p->max_run = d == p->last ? p->max_run + 1 : 1;
} else if (p->last == p->max) {
p->max_runs += p->max_run * p->max_run;
}
 
p->sigma_x += d;
p->sigma_x2 += d * d;
p->avg_sigma_x2 = p->avg_sigma_x2 * s->mult + (1.0 - s->mult) * d * d;
p->last = d;
 
if (p->nb_samples >= s->tc_samples) {
p->max_sigma_x2 = FFMAX(p->max_sigma_x2, p->avg_sigma_x2);
p->min_sigma_x2 = FFMIN(p->min_sigma_x2, p->avg_sigma_x2);
}
p->nb_samples++;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AudioStatsContext *s = inlink->dst->priv;
const int channels = s->nb_channels;
const double *src;
int i, c;
 
switch (inlink->format) {
case AV_SAMPLE_FMT_DBLP:
for (c = 0; c < channels; c++) {
ChannelStats *p = &s->chstats[c];
src = (const double *)buf->extended_data[c];
 
for (i = 0; i < buf->nb_samples; i++, src++)
update_stat(s, p, *src);
}
break;
case AV_SAMPLE_FMT_DBL:
src = (const double *)buf->extended_data[0];
 
for (i = 0; i < buf->nb_samples; i++) {
for (c = 0; c < channels; c++, src++)
update_stat(s, &s->chstats[c], *src);
}
break;
}
 
return ff_filter_frame(inlink->dst->outputs[0], buf);
}
 
#define LINEAR_TO_DB(x) (log10(x) * 20)
 
static void print_stats(AVFilterContext *ctx)
{
AudioStatsContext *s = ctx->priv;
uint64_t min_count = 0, max_count = 0, nb_samples = 0;
double min_runs = 0, max_runs = 0,
min = DBL_MAX, max = DBL_MIN,
max_sigma_x = 0,
sigma_x = 0,
sigma_x2 = 0,
min_sigma_x2 = DBL_MAX,
max_sigma_x2 = DBL_MIN;
int c;
 
for (c = 0; c < s->nb_channels; c++) {
ChannelStats *p = &s->chstats[c];
 
if (p->nb_samples < s->tc_samples)
p->min_sigma_x2 = p->max_sigma_x2 = p->sigma_x2 / p->nb_samples;
 
min = FFMIN(min, p->min);
max = FFMAX(max, p->max);
min_sigma_x2 = FFMIN(min_sigma_x2, p->min_sigma_x2);
max_sigma_x2 = FFMAX(max_sigma_x2, p->max_sigma_x2);
sigma_x += p->sigma_x;
sigma_x2 += p->sigma_x2;
min_count += p->min_count;
max_count += p->max_count;
min_runs += p->min_runs;
max_runs += p->max_runs;
nb_samples += p->nb_samples;
if (fabs(p->sigma_x) > fabs(max_sigma_x))
max_sigma_x = p->sigma_x;
 
av_log(ctx, AV_LOG_INFO, "Channel: %d\n", c + 1);
av_log(ctx, AV_LOG_INFO, "DC offset: %f\n", p->sigma_x / p->nb_samples);
av_log(ctx, AV_LOG_INFO, "Min level: %f\n", p->min);
av_log(ctx, AV_LOG_INFO, "Max level: %f\n", p->max);
av_log(ctx, AV_LOG_INFO, "Peak level dB: %f\n", LINEAR_TO_DB(FFMAX(-p->min, p->max)));
av_log(ctx, AV_LOG_INFO, "RMS level dB: %f\n", LINEAR_TO_DB(sqrt(p->sigma_x2 / p->nb_samples)));
av_log(ctx, AV_LOG_INFO, "RMS peak dB: %f\n", LINEAR_TO_DB(sqrt(p->max_sigma_x2)));
if (p->min_sigma_x2 != 1)
av_log(ctx, AV_LOG_INFO, "RMS trough dB: %f\n",LINEAR_TO_DB(sqrt(p->min_sigma_x2)));
av_log(ctx, AV_LOG_INFO, "Crest factor: %f\n", p->sigma_x2 ? FFMAX(-p->min, p->max) / sqrt(p->sigma_x2 / p->nb_samples) : 1);
av_log(ctx, AV_LOG_INFO, "Flat factor: %f\n", LINEAR_TO_DB((p->min_runs + p->max_runs) / (p->min_count + p->max_count)));
av_log(ctx, AV_LOG_INFO, "Peak count: %"PRId64"\n", p->min_count + p->max_count);
}
 
av_log(ctx, AV_LOG_INFO, "Overall\n");
av_log(ctx, AV_LOG_INFO, "DC offset: %f\n", max_sigma_x / (nb_samples / s->nb_channels));
av_log(ctx, AV_LOG_INFO, "Min level: %f\n", min);
av_log(ctx, AV_LOG_INFO, "Max level: %f\n", max);
av_log(ctx, AV_LOG_INFO, "Peak level dB: %f\n", LINEAR_TO_DB(FFMAX(-min, max)));
av_log(ctx, AV_LOG_INFO, "RMS level dB: %f\n", LINEAR_TO_DB(sqrt(sigma_x2 / nb_samples)));
av_log(ctx, AV_LOG_INFO, "RMS peak dB: %f\n", LINEAR_TO_DB(sqrt(max_sigma_x2)));
if (min_sigma_x2 != 1)
av_log(ctx, AV_LOG_INFO, "RMS trough dB: %f\n", LINEAR_TO_DB(sqrt(min_sigma_x2)));
av_log(ctx, AV_LOG_INFO, "Flat factor: %f\n", LINEAR_TO_DB((min_runs + max_runs) / (min_count + max_count)));
av_log(ctx, AV_LOG_INFO, "Peak count: %f\n", (min_count + max_count) / (double)s->nb_channels);
av_log(ctx, AV_LOG_INFO, "Number of samples: %"PRId64"\n", nb_samples / s->nb_channels);
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
AudioStatsContext *s = ctx->priv;
 
print_stats(ctx);
av_freep(&s->chstats);
}
 
static const AVFilterPad astats_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad astats_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
},
{ NULL }
};
 
AVFilter avfilter_af_astats = {
.name = "astats",
.description = NULL_IF_CONFIG_SMALL("Show time domain statistics about audio frames."),
.query_formats = query_formats,
.priv_size = sizeof(AudioStatsContext),
.priv_class = &astats_class,
.uninit = uninit,
.inputs = astats_inputs,
.outputs = astats_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/af_astreamsync.c
0,0 → 1,240
/*
* Copyright (c) 2011 Nicolas George <nicolas.george@normalesup.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Stream (de)synchronization filter
*/
 
#include "libavutil/eval.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "audio.h"
#include "internal.h"
 
#define QUEUE_SIZE 16
 
static const char * const var_names[] = {
"b1", "b2",
"s1", "s2",
"t1", "t2",
NULL
};
 
enum var_name {
VAR_B1, VAR_B2,
VAR_S1, VAR_S2,
VAR_T1, VAR_T2,
VAR_NB
};
 
typedef struct {
const AVClass *class;
AVExpr *expr;
char *expr_str;
double var_values[VAR_NB];
struct buf_queue {
AVFrame *buf[QUEUE_SIZE];
unsigned tail, nb;
/* buf[tail] is the oldest,
buf[(tail + nb) % QUEUE_SIZE] is where the next is added */
} queue[2];
int req[2];
int next_out;
int eof; /* bitmask, one bit for each stream */
} AStreamSyncContext;
 
#define OFFSET(x) offsetof(AStreamSyncContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption astreamsync_options[] = {
{ "expr", "set stream selection expression", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "t1-t2" }, .flags = FLAGS },
{ "e", "set stream selection expression", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "t1-t2" }, .flags = FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(astreamsync);
 
static av_cold int init(AVFilterContext *ctx)
{
AStreamSyncContext *as = ctx->priv;
int r, i;
 
r = av_expr_parse(&as->expr, as->expr_str, var_names,
NULL, NULL, NULL, NULL, 0, ctx);
if (r < 0) {
av_log(ctx, AV_LOG_ERROR, "Error in expression \"%s\"\n", as->expr_str);
return r;
}
for (i = 0; i < 42; i++)
av_expr_eval(as->expr, as->var_values, NULL); /* exercize prng */
return 0;
}
 
static int query_formats(AVFilterContext *ctx)
{
int i;
AVFilterFormats *formats, *rates;
AVFilterChannelLayouts *layouts;
 
for (i = 0; i < 2; i++) {
formats = ctx->inputs[i]->in_formats;
ff_formats_ref(formats, &ctx->inputs[i]->out_formats);
ff_formats_ref(formats, &ctx->outputs[i]->in_formats);
rates = ff_all_samplerates();
ff_formats_ref(rates, &ctx->inputs[i]->out_samplerates);
ff_formats_ref(rates, &ctx->outputs[i]->in_samplerates);
layouts = ctx->inputs[i]->in_channel_layouts;
ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts);
ff_channel_layouts_ref(layouts, &ctx->outputs[i]->in_channel_layouts);
}
return 0;
}
 
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
int id = outlink == ctx->outputs[1];
 
outlink->sample_rate = ctx->inputs[id]->sample_rate;
outlink->time_base = ctx->inputs[id]->time_base;
return 0;
}
 
static int send_out(AVFilterContext *ctx, int out_id)
{
AStreamSyncContext *as = ctx->priv;
struct buf_queue *queue = &as->queue[out_id];
AVFrame *buf = queue->buf[queue->tail];
int ret;
 
queue->buf[queue->tail] = NULL;
as->var_values[VAR_B1 + out_id]++;
as->var_values[VAR_S1 + out_id] += buf->nb_samples;
if (buf->pts != AV_NOPTS_VALUE)
as->var_values[VAR_T1 + out_id] =
av_q2d(ctx->outputs[out_id]->time_base) * buf->pts;
as->var_values[VAR_T1 + out_id] += buf->nb_samples /
(double)ctx->inputs[out_id]->sample_rate;
ret = ff_filter_frame(ctx->outputs[out_id], buf);
queue->nb--;
queue->tail = (queue->tail + 1) % QUEUE_SIZE;
if (as->req[out_id])
as->req[out_id]--;
return ret;
}
 
static void send_next(AVFilterContext *ctx)
{
AStreamSyncContext *as = ctx->priv;
int i;
 
while (1) {
if (!as->queue[as->next_out].nb)
break;
send_out(ctx, as->next_out);
if (!as->eof)
as->next_out = av_expr_eval(as->expr, as->var_values, NULL) >= 0;
}
for (i = 0; i < 2; i++)
if (as->queue[i].nb == QUEUE_SIZE)
send_out(ctx, i);
}
 
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AStreamSyncContext *as = ctx->priv;
int id = outlink == ctx->outputs[1];
 
as->req[id]++;
while (as->req[id] && !(as->eof & (1 << id))) {
if (as->queue[as->next_out].nb) {
send_next(ctx);
} else {
as->eof |= 1 << as->next_out;
ff_request_frame(ctx->inputs[as->next_out]);
if (as->eof & (1 << as->next_out))
as->next_out = !as->next_out;
}
}
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
AVFilterContext *ctx = inlink->dst;
AStreamSyncContext *as = ctx->priv;
int id = inlink == ctx->inputs[1];
 
as->queue[id].buf[(as->queue[id].tail + as->queue[id].nb++) % QUEUE_SIZE] =
insamples;
as->eof &= ~(1 << id);
send_next(ctx);
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
AStreamSyncContext *as = ctx->priv;
 
av_expr_free(as->expr);
as->expr = NULL;
}
 
static const AVFilterPad astreamsync_inputs[] = {
{
.name = "in1",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},{
.name = "in2",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad astreamsync_outputs[] = {
{
.name = "out1",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
.request_frame = request_frame,
},{
.name = "out2",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
.request_frame = request_frame,
},
{ NULL }
};
 
AVFilter avfilter_af_astreamsync = {
.name = "astreamsync",
.description = NULL_IF_CONFIG_SMALL("Copy two streams of audio data "
"in a configurable order."),
.priv_size = sizeof(AStreamSyncContext),
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = astreamsync_inputs,
.outputs = astreamsync_outputs,
.priv_class = &astreamsync_class,
};
/contrib/sdk/sources/ffmpeg/libavfilter/af_asyncts.c
0,0 → 1,321
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavresample/avresample.h"
#include "libavutil/attributes.h"
#include "libavutil/audio_fifo.h"
#include "libavutil/common.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
 
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
 
typedef struct ASyncContext {
const AVClass *class;
 
AVAudioResampleContext *avr;
int64_t pts; ///< timestamp in samples of the first sample in fifo
int min_delta; ///< pad/trim min threshold in samples
int first_frame; ///< 1 until filter_frame() has processed at least 1 frame with a pts != AV_NOPTS_VALUE
int64_t first_pts; ///< user-specified first expected pts, in samples
int comp; ///< current resample compensation
 
/* options */
int resample;
float min_delta_sec;
int max_comp;
 
/* set by filter_frame() to signal an output frame to request_frame() */
int got_output;
} ASyncContext;
 
#define OFFSET(x) offsetof(ASyncContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
#define F AV_OPT_FLAG_FILTERING_PARAM
static const AVOption asyncts_options[] = {
{ "compensate", "Stretch/squeeze the data to make it match the timestamps", OFFSET(resample), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, A|F },
{ "min_delta", "Minimum difference between timestamps and audio data "
"(in seconds) to trigger padding/trimmin the data.", OFFSET(min_delta_sec), AV_OPT_TYPE_FLOAT, { .dbl = 0.1 }, 0, INT_MAX, A|F },
{ "max_comp", "Maximum compensation in samples per second.", OFFSET(max_comp), AV_OPT_TYPE_INT, { .i64 = 500 }, 0, INT_MAX, A|F },
{ "first_pts", "Assume the first pts should be this value.", OFFSET(first_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, A|F },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(asyncts);
 
static av_cold int init(AVFilterContext *ctx)
{
ASyncContext *s = ctx->priv;
 
s->pts = AV_NOPTS_VALUE;
s->first_frame = 1;
 
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
ASyncContext *s = ctx->priv;
 
if (s->avr) {
avresample_close(s->avr);
avresample_free(&s->avr);
}
}
 
static int config_props(AVFilterLink *link)
{
ASyncContext *s = link->src->priv;
int ret;
 
s->min_delta = s->min_delta_sec * link->sample_rate;
link->time_base = (AVRational){1, link->sample_rate};
 
s->avr = avresample_alloc_context();
if (!s->avr)
return AVERROR(ENOMEM);
 
av_opt_set_int(s->avr, "in_channel_layout", link->channel_layout, 0);
av_opt_set_int(s->avr, "out_channel_layout", link->channel_layout, 0);
av_opt_set_int(s->avr, "in_sample_fmt", link->format, 0);
av_opt_set_int(s->avr, "out_sample_fmt", link->format, 0);
av_opt_set_int(s->avr, "in_sample_rate", link->sample_rate, 0);
av_opt_set_int(s->avr, "out_sample_rate", link->sample_rate, 0);
 
if (s->resample)
av_opt_set_int(s->avr, "force_resampling", 1, 0);
 
if ((ret = avresample_open(s->avr)) < 0)
return ret;
 
return 0;
}
 
/* get amount of data currently buffered, in samples */
static int64_t get_delay(ASyncContext *s)
{
return avresample_available(s->avr) + avresample_get_delay(s->avr);
}
 
static void handle_trimming(AVFilterContext *ctx)
{
ASyncContext *s = ctx->priv;
 
if (s->pts < s->first_pts) {
int delta = FFMIN(s->first_pts - s->pts, avresample_available(s->avr));
av_log(ctx, AV_LOG_VERBOSE, "Trimming %d samples from start\n",
delta);
avresample_read(s->avr, NULL, delta);
s->pts += delta;
} else if (s->first_frame)
s->pts = s->first_pts;
}
 
static int request_frame(AVFilterLink *link)
{
AVFilterContext *ctx = link->src;
ASyncContext *s = ctx->priv;
int ret = 0;
int nb_samples;
 
s->got_output = 0;
while (ret >= 0 && !s->got_output)
ret = ff_request_frame(ctx->inputs[0]);
 
/* flush the fifo */
if (ret == AVERROR_EOF) {
if (s->first_pts != AV_NOPTS_VALUE)
handle_trimming(ctx);
 
if (nb_samples = get_delay(s)) {
AVFrame *buf = ff_get_audio_buffer(link, nb_samples);
if (!buf)
return AVERROR(ENOMEM);
ret = avresample_convert(s->avr, buf->extended_data,
buf->linesize[0], nb_samples, NULL, 0, 0);
if (ret <= 0) {
av_frame_free(&buf);
return (ret < 0) ? ret : AVERROR_EOF;
}
 
buf->pts = s->pts;
return ff_filter_frame(link, buf);
}
}
 
return ret;
}
 
static int write_to_fifo(ASyncContext *s, AVFrame *buf)
{
int ret = avresample_convert(s->avr, NULL, 0, 0, buf->extended_data,
buf->linesize[0], buf->nb_samples);
av_frame_free(&buf);
return ret;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;
ASyncContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
int nb_channels = av_get_channel_layout_nb_channels(buf->channel_layout);
int64_t pts = (buf->pts == AV_NOPTS_VALUE) ? buf->pts :
av_rescale_q(buf->pts, inlink->time_base, outlink->time_base);
int out_size, ret;
int64_t delta;
int64_t new_pts;
 
/* buffer data until we get the next timestamp */
if (s->pts == AV_NOPTS_VALUE || pts == AV_NOPTS_VALUE) {
if (pts != AV_NOPTS_VALUE) {
s->pts = pts - get_delay(s);
}
return write_to_fifo(s, buf);
}
 
if (s->first_pts != AV_NOPTS_VALUE) {
handle_trimming(ctx);
if (!avresample_available(s->avr))
return write_to_fifo(s, buf);
}
 
/* when we have two timestamps, compute how many samples would we have
* to add/remove to get proper sync between data and timestamps */
delta = pts - s->pts - get_delay(s);
out_size = avresample_available(s->avr);
 
if (labs(delta) > s->min_delta ||
(s->first_frame && delta && s->first_pts != AV_NOPTS_VALUE)) {
av_log(ctx, AV_LOG_VERBOSE, "Discontinuity - %"PRId64" samples.\n", delta);
out_size = av_clipl_int32((int64_t)out_size + delta);
} else {
if (s->resample) {
// adjust the compensation if delta is non-zero
int delay = get_delay(s);
int comp = s->comp + av_clip(delta * inlink->sample_rate / delay,
-s->max_comp, s->max_comp);
if (comp != s->comp) {
av_log(ctx, AV_LOG_VERBOSE, "Compensating %d samples per second.\n", comp);
if (avresample_set_compensation(s->avr, comp, inlink->sample_rate) == 0) {
s->comp = comp;
}
}
}
// adjust PTS to avoid monotonicity errors with input PTS jitter
pts -= delta;
delta = 0;
}
 
if (out_size > 0) {
AVFrame *buf_out = ff_get_audio_buffer(outlink, out_size);
if (!buf_out) {
ret = AVERROR(ENOMEM);
goto fail;
}
 
if (s->first_frame && delta > 0) {
int planar = av_sample_fmt_is_planar(buf_out->format);
int planes = planar ? nb_channels : 1;
int block_size = av_get_bytes_per_sample(buf_out->format) *
(planar ? 1 : nb_channels);
 
int ch;
 
av_samples_set_silence(buf_out->extended_data, 0, delta,
nb_channels, buf->format);
 
for (ch = 0; ch < planes; ch++)
buf_out->extended_data[ch] += delta * block_size;
 
avresample_read(s->avr, buf_out->extended_data, out_size);
 
for (ch = 0; ch < planes; ch++)
buf_out->extended_data[ch] -= delta * block_size;
} else {
avresample_read(s->avr, buf_out->extended_data, out_size);
 
if (delta > 0) {
av_samples_set_silence(buf_out->extended_data, out_size - delta,
delta, nb_channels, buf->format);
}
}
buf_out->pts = s->pts;
ret = ff_filter_frame(outlink, buf_out);
if (ret < 0)
goto fail;
s->got_output = 1;
} else if (avresample_available(s->avr)) {
av_log(ctx, AV_LOG_WARNING, "Non-monotonous timestamps, dropping "
"whole buffer.\n");
}
 
/* drain any remaining buffered data */
avresample_read(s->avr, NULL, avresample_available(s->avr));
 
new_pts = pts - avresample_get_delay(s->avr);
/* check for s->pts monotonicity */
if (new_pts > s->pts) {
s->pts = new_pts;
ret = avresample_convert(s->avr, NULL, 0, 0, buf->extended_data,
buf->linesize[0], buf->nb_samples);
} else {
av_log(ctx, AV_LOG_WARNING, "Non-monotonous timestamps, dropping "
"whole buffer.\n");
ret = 0;
}
 
s->first_frame = 0;
fail:
av_frame_free(&buf);
 
return ret;
}
 
static const AVFilterPad avfilter_af_asyncts_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame
},
{ NULL }
};
 
static const AVFilterPad avfilter_af_asyncts_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_props,
.request_frame = request_frame
},
{ NULL }
};
 
AVFilter avfilter_af_asyncts = {
.name = "asyncts",
.description = NULL_IF_CONFIG_SMALL("Sync audio data to timestamps"),
.init = init,
.uninit = uninit,
.priv_size = sizeof(ASyncContext),
.priv_class = &asyncts_class,
.inputs = avfilter_af_asyncts_inputs,
.outputs = avfilter_af_asyncts_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/af_atempo.c
0,0 → 1,1196
/*
* Copyright (c) 2012 Pavel Koshevoy <pkoshevoy at gmail dot com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* tempo scaling audio filter -- an implementation of WSOLA algorithm
*
* Based on MIT licensed yaeAudioTempoFilter.h and yaeAudioFragment.h
* from Apprentice Video player by Pavel Koshevoy.
* https://sourceforge.net/projects/apprenticevideo/
*
* An explanation of SOLA algorithm is available at
* http://www.surina.net/article/time-and-pitch-scaling.html
*
* WSOLA is very similar to SOLA, only one major difference exists between
* these algorithms. SOLA shifts audio fragments along the output stream,
* where as WSOLA shifts audio fragments along the input stream.
*
* The advantage of WSOLA algorithm is that the overlap region size is
* always the same, therefore the blending function is constant and
* can be precomputed.
*/
 
#include <float.h>
#include "libavcodec/avfft.h"
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/eval.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
#include "avfilter.h"
#include "audio.h"
#include "internal.h"
 
/**
* A fragment of audio waveform
*/
typedef struct {
// index of the first sample of this fragment in the overall waveform;
// 0: input sample position
// 1: output sample position
int64_t position[2];
 
// original packed multi-channel samples:
uint8_t *data;
 
// number of samples in this fragment:
int nsamples;
 
// rDFT transform of the down-mixed mono fragment, used for
// fast waveform alignment via correlation in frequency domain:
FFTSample *xdat;
} AudioFragment;
 
/**
* Filter state machine states
*/
typedef enum {
YAE_LOAD_FRAGMENT,
YAE_ADJUST_POSITION,
YAE_RELOAD_FRAGMENT,
YAE_OUTPUT_OVERLAP_ADD,
YAE_FLUSH_OUTPUT,
} FilterState;
 
/**
* Filter state machine
*/
typedef struct {
const AVClass *class;
 
// ring-buffer of input samples, necessary because some times
// input fragment position may be adjusted backwards:
uint8_t *buffer;
 
// ring-buffer maximum capacity, expressed in sample rate time base:
int ring;
 
// ring-buffer house keeping:
int size;
int head;
int tail;
 
// 0: input sample position corresponding to the ring buffer tail
// 1: output sample position
int64_t position[2];
 
// sample format:
enum AVSampleFormat format;
 
// number of channels:
int channels;
 
// row of bytes to skip from one sample to next, across multple channels;
// stride = (number-of-channels * bits-per-sample-per-channel) / 8
int stride;
 
// fragment window size, power-of-two integer:
int window;
 
// Hann window coefficients, for feathering
// (blending) the overlapping fragment region:
float *hann;
 
// tempo scaling factor:
double tempo;
 
// a snapshot of previous fragment input and output position values
// captured when the tempo scale factor was set most recently:
int64_t origin[2];
 
// current/previous fragment ring-buffer:
AudioFragment frag[2];
 
// current fragment index:
uint64_t nfrag;
 
// current state:
FilterState state;
 
// for fast correlation calculation in frequency domain:
RDFTContext *real_to_complex;
RDFTContext *complex_to_real;
FFTSample *correlation;
 
// for managing AVFilterPad.request_frame and AVFilterPad.filter_frame
AVFrame *dst_buffer;
uint8_t *dst;
uint8_t *dst_end;
uint64_t nsamples_in;
uint64_t nsamples_out;
} ATempoContext;
 
#define OFFSET(x) offsetof(ATempoContext, x)
 
static const AVOption atempo_options[] = {
{ "tempo", "set tempo scale factor",
OFFSET(tempo), AV_OPT_TYPE_DOUBLE, { .dbl = 1.0 }, 0.5, 2.0,
AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(atempo);
 
inline static AudioFragment *yae_curr_frag(ATempoContext *atempo)
{
return &atempo->frag[atempo->nfrag % 2];
}
 
inline static AudioFragment *yae_prev_frag(ATempoContext *atempo)
{
return &atempo->frag[(atempo->nfrag + 1) % 2];
}
 
/**
* Reset filter to initial state, do not deallocate existing local buffers.
*/
static void yae_clear(ATempoContext *atempo)
{
atempo->size = 0;
atempo->head = 0;
atempo->tail = 0;
 
atempo->nfrag = 0;
atempo->state = YAE_LOAD_FRAGMENT;
 
atempo->position[0] = 0;
atempo->position[1] = 0;
 
atempo->origin[0] = 0;
atempo->origin[1] = 0;
 
atempo->frag[0].position[0] = 0;
atempo->frag[0].position[1] = 0;
atempo->frag[0].nsamples = 0;
 
atempo->frag[1].position[0] = 0;
atempo->frag[1].position[1] = 0;
atempo->frag[1].nsamples = 0;
 
// shift left position of 1st fragment by half a window
// so that no re-normalization would be required for
// the left half of the 1st fragment:
atempo->frag[0].position[0] = -(int64_t)(atempo->window / 2);
atempo->frag[0].position[1] = -(int64_t)(atempo->window / 2);
 
av_frame_free(&atempo->dst_buffer);
atempo->dst = NULL;
atempo->dst_end = NULL;
 
atempo->nsamples_in = 0;
atempo->nsamples_out = 0;
}
 
/**
* Reset filter to initial state and deallocate all buffers.
*/
static void yae_release_buffers(ATempoContext *atempo)
{
yae_clear(atempo);
 
av_freep(&atempo->frag[0].data);
av_freep(&atempo->frag[1].data);
av_freep(&atempo->frag[0].xdat);
av_freep(&atempo->frag[1].xdat);
 
av_freep(&atempo->buffer);
av_freep(&atempo->hann);
av_freep(&atempo->correlation);
 
av_rdft_end(atempo->real_to_complex);
atempo->real_to_complex = NULL;
 
av_rdft_end(atempo->complex_to_real);
atempo->complex_to_real = NULL;
}
 
/* av_realloc is not aligned enough; fortunately, the data does not need to
* be preserved */
#define RE_MALLOC_OR_FAIL(field, field_size) \
do { \
av_freep(&field); \
field = av_malloc(field_size); \
if (!field) { \
yae_release_buffers(atempo); \
return AVERROR(ENOMEM); \
} \
} while (0)
 
/**
* Prepare filter for processing audio data of given format,
* sample rate and number of channels.
*/
static int yae_reset(ATempoContext *atempo,
enum AVSampleFormat format,
int sample_rate,
int channels)
{
const int sample_size = av_get_bytes_per_sample(format);
uint32_t nlevels = 0;
uint32_t pot;
int i;
 
atempo->format = format;
atempo->channels = channels;
atempo->stride = sample_size * channels;
 
// pick a segment window size:
atempo->window = sample_rate / 24;
 
// adjust window size to be a power-of-two integer:
nlevels = av_log2(atempo->window);
pot = 1 << nlevels;
av_assert0(pot <= atempo->window);
 
if (pot < atempo->window) {
atempo->window = pot * 2;
nlevels++;
}
 
// initialize audio fragment buffers:
RE_MALLOC_OR_FAIL(atempo->frag[0].data, atempo->window * atempo->stride);
RE_MALLOC_OR_FAIL(atempo->frag[1].data, atempo->window * atempo->stride);
RE_MALLOC_OR_FAIL(atempo->frag[0].xdat, atempo->window * sizeof(FFTComplex));
RE_MALLOC_OR_FAIL(atempo->frag[1].xdat, atempo->window * sizeof(FFTComplex));
 
// initialize rDFT contexts:
av_rdft_end(atempo->real_to_complex);
atempo->real_to_complex = NULL;
 
av_rdft_end(atempo->complex_to_real);
atempo->complex_to_real = NULL;
 
atempo->real_to_complex = av_rdft_init(nlevels + 1, DFT_R2C);
if (!atempo->real_to_complex) {
yae_release_buffers(atempo);
return AVERROR(ENOMEM);
}
 
atempo->complex_to_real = av_rdft_init(nlevels + 1, IDFT_C2R);
if (!atempo->complex_to_real) {
yae_release_buffers(atempo);
return AVERROR(ENOMEM);
}
 
RE_MALLOC_OR_FAIL(atempo->correlation, atempo->window * sizeof(FFTComplex));
 
atempo->ring = atempo->window * 3;
RE_MALLOC_OR_FAIL(atempo->buffer, atempo->ring * atempo->stride);
 
// initialize the Hann window function:
RE_MALLOC_OR_FAIL(atempo->hann, atempo->window * sizeof(float));
 
for (i = 0; i < atempo->window; i++) {
double t = (double)i / (double)(atempo->window - 1);
double h = 0.5 * (1.0 - cos(2.0 * M_PI * t));
atempo->hann[i] = (float)h;
}
 
yae_clear(atempo);
return 0;
}
 
static int yae_set_tempo(AVFilterContext *ctx, const char *arg_tempo)
{
const AudioFragment *prev;
ATempoContext *atempo = ctx->priv;
char *tail = NULL;
double tempo = av_strtod(arg_tempo, &tail);
 
if (tail && *tail) {
av_log(ctx, AV_LOG_ERROR, "Invalid tempo value '%s'\n", arg_tempo);
return AVERROR(EINVAL);
}
 
if (tempo < 0.5 || tempo > 2.0) {
av_log(ctx, AV_LOG_ERROR, "Tempo value %f exceeds [0.5, 2.0] range\n",
tempo);
return AVERROR(EINVAL);
}
 
prev = yae_prev_frag(atempo);
atempo->origin[0] = prev->position[0] + atempo->window / 2;
atempo->origin[1] = prev->position[1] + atempo->window / 2;
atempo->tempo = tempo;
return 0;
}
 
/**
* A helper macro for initializing complex data buffer with scalar data
* of a given type.
*/
#define yae_init_xdat(scalar_type, scalar_max) \
do { \
const uint8_t *src_end = src + \
frag->nsamples * atempo->channels * sizeof(scalar_type); \
\
FFTSample *xdat = frag->xdat; \
scalar_type tmp; \
\
if (atempo->channels == 1) { \
for (; src < src_end; xdat++) { \
tmp = *(const scalar_type *)src; \
src += sizeof(scalar_type); \
\
*xdat = (FFTSample)tmp; \
} \
} else { \
FFTSample s, max, ti, si; \
int i; \
\
for (; src < src_end; xdat++) { \
tmp = *(const scalar_type *)src; \
src += sizeof(scalar_type); \
\
max = (FFTSample)tmp; \
s = FFMIN((FFTSample)scalar_max, \
(FFTSample)fabsf(max)); \
\
for (i = 1; i < atempo->channels; i++) { \
tmp = *(const scalar_type *)src; \
src += sizeof(scalar_type); \
\
ti = (FFTSample)tmp; \
si = FFMIN((FFTSample)scalar_max, \
(FFTSample)fabsf(ti)); \
\
if (s < si) { \
s = si; \
max = ti; \
} \
} \
\
*xdat = max; \
} \
} \
} while (0)
 
/**
* Initialize complex data buffer of a given audio fragment
* with down-mixed mono data of appropriate scalar type.
*/
static void yae_downmix(ATempoContext *atempo, AudioFragment *frag)
{
// shortcuts:
const uint8_t *src = frag->data;
 
// init complex data buffer used for FFT and Correlation:
memset(frag->xdat, 0, sizeof(FFTComplex) * atempo->window);
 
if (atempo->format == AV_SAMPLE_FMT_U8) {
yae_init_xdat(uint8_t, 127);
} else if (atempo->format == AV_SAMPLE_FMT_S16) {
yae_init_xdat(int16_t, 32767);
} else if (atempo->format == AV_SAMPLE_FMT_S32) {
yae_init_xdat(int, 2147483647);
} else if (atempo->format == AV_SAMPLE_FMT_FLT) {
yae_init_xdat(float, 1);
} else if (atempo->format == AV_SAMPLE_FMT_DBL) {
yae_init_xdat(double, 1);
}
}
 
/**
* Populate the internal data buffer on as-needed basis.
*
* @return
* 0 if requested data was already available or was successfully loaded,
* AVERROR(EAGAIN) if more input data is required.
*/
static int yae_load_data(ATempoContext *atempo,
const uint8_t **src_ref,
const uint8_t *src_end,
int64_t stop_here)
{
// shortcut:
const uint8_t *src = *src_ref;
const int read_size = stop_here - atempo->position[0];
 
if (stop_here <= atempo->position[0]) {
return 0;
}
 
// samples are not expected to be skipped:
av_assert0(read_size <= atempo->ring);
 
while (atempo->position[0] < stop_here && src < src_end) {
int src_samples = (src_end - src) / atempo->stride;
 
// load data piece-wise, in order to avoid complicating the logic:
int nsamples = FFMIN(read_size, src_samples);
int na;
int nb;
 
nsamples = FFMIN(nsamples, atempo->ring);
na = FFMIN(nsamples, atempo->ring - atempo->tail);
nb = FFMIN(nsamples - na, atempo->ring);
 
if (na) {
uint8_t *a = atempo->buffer + atempo->tail * atempo->stride;
memcpy(a, src, na * atempo->stride);
 
src += na * atempo->stride;
atempo->position[0] += na;
 
atempo->size = FFMIN(atempo->size + na, atempo->ring);
atempo->tail = (atempo->tail + na) % atempo->ring;
atempo->head =
atempo->size < atempo->ring ?
atempo->tail - atempo->size :
atempo->tail;
}
 
if (nb) {
uint8_t *b = atempo->buffer;
memcpy(b, src, nb * atempo->stride);
 
src += nb * atempo->stride;
atempo->position[0] += nb;
 
atempo->size = FFMIN(atempo->size + nb, atempo->ring);
atempo->tail = (atempo->tail + nb) % atempo->ring;
atempo->head =
atempo->size < atempo->ring ?
atempo->tail - atempo->size :
atempo->tail;
}
}
 
// pass back the updated source buffer pointer:
*src_ref = src;
 
// sanity check:
av_assert0(atempo->position[0] <= stop_here);
 
return atempo->position[0] == stop_here ? 0 : AVERROR(EAGAIN);
}
 
/**
* Populate current audio fragment data buffer.
*
* @return
* 0 when the fragment is ready,
* AVERROR(EAGAIN) if more input data is required.
*/
static int yae_load_frag(ATempoContext *atempo,
const uint8_t **src_ref,
const uint8_t *src_end)
{
// shortcuts:
AudioFragment *frag = yae_curr_frag(atempo);
uint8_t *dst;
int64_t missing, start, zeros;
uint32_t nsamples;
const uint8_t *a, *b;
int i0, i1, n0, n1, na, nb;
 
int64_t stop_here = frag->position[0] + atempo->window;
if (src_ref && yae_load_data(atempo, src_ref, src_end, stop_here) != 0) {
return AVERROR(EAGAIN);
}
 
// calculate the number of samples we don't have:
missing =
stop_here > atempo->position[0] ?
stop_here - atempo->position[0] : 0;
 
nsamples =
missing < (int64_t)atempo->window ?
(uint32_t)(atempo->window - missing) : 0;
 
// setup the output buffer:
frag->nsamples = nsamples;
dst = frag->data;
 
start = atempo->position[0] - atempo->size;
zeros = 0;
 
if (frag->position[0] < start) {
// what we don't have we substitute with zeros:
zeros = FFMIN(start - frag->position[0], (int64_t)nsamples);
av_assert0(zeros != nsamples);
 
memset(dst, 0, zeros * atempo->stride);
dst += zeros * atempo->stride;
}
 
if (zeros == nsamples) {
return 0;
}
 
// get the remaining data from the ring buffer:
na = (atempo->head < atempo->tail ?
atempo->tail - atempo->head :
atempo->ring - atempo->head);
 
nb = atempo->head < atempo->tail ? 0 : atempo->tail;
 
// sanity check:
av_assert0(nsamples <= zeros + na + nb);
 
a = atempo->buffer + atempo->head * atempo->stride;
b = atempo->buffer;
 
i0 = frag->position[0] + zeros - start;
i1 = i0 < na ? 0 : i0 - na;
 
n0 = i0 < na ? FFMIN(na - i0, (int)(nsamples - zeros)) : 0;
n1 = nsamples - zeros - n0;
 
if (n0) {
memcpy(dst, a + i0 * atempo->stride, n0 * atempo->stride);
dst += n0 * atempo->stride;
}
 
if (n1) {
memcpy(dst, b + i1 * atempo->stride, n1 * atempo->stride);
}
 
return 0;
}
 
/**
* Prepare for loading next audio fragment.
*/
static void yae_advance_to_next_frag(ATempoContext *atempo)
{
const double fragment_step = atempo->tempo * (double)(atempo->window / 2);
 
const AudioFragment *prev;
AudioFragment *frag;
 
atempo->nfrag++;
prev = yae_prev_frag(atempo);
frag = yae_curr_frag(atempo);
 
frag->position[0] = prev->position[0] + (int64_t)fragment_step;
frag->position[1] = prev->position[1] + atempo->window / 2;
frag->nsamples = 0;
}
 
/**
* Calculate cross-correlation via rDFT.
*
* Multiply two vectors of complex numbers (result of real_to_complex rDFT)
* and transform back via complex_to_real rDFT.
*/
static void yae_xcorr_via_rdft(FFTSample *xcorr,
RDFTContext *complex_to_real,
const FFTComplex *xa,
const FFTComplex *xb,
const int window)
{
FFTComplex *xc = (FFTComplex *)xcorr;
int i;
 
// NOTE: first element requires special care -- Given Y = rDFT(X),
// Im(Y[0]) and Im(Y[N/2]) are always zero, therefore av_rdft_calc
// stores Re(Y[N/2]) in place of Im(Y[0]).
 
xc->re = xa->re * xb->re;
xc->im = xa->im * xb->im;
xa++;
xb++;
xc++;
 
for (i = 1; i < window; i++, xa++, xb++, xc++) {
xc->re = (xa->re * xb->re + xa->im * xb->im);
xc->im = (xa->im * xb->re - xa->re * xb->im);
}
 
// apply inverse rDFT:
av_rdft_calc(complex_to_real, xcorr);
}
 
/**
* Calculate alignment offset for given fragment
* relative to the previous fragment.
*
* @return alignment offset of current fragment relative to previous.
*/
static int yae_align(AudioFragment *frag,
const AudioFragment *prev,
const int window,
const int delta_max,
const int drift,
FFTSample *correlation,
RDFTContext *complex_to_real)
{
int best_offset = -drift;
FFTSample best_metric = -FLT_MAX;
FFTSample *xcorr;
 
int i0;
int i1;
int i;
 
yae_xcorr_via_rdft(correlation,
complex_to_real,
(const FFTComplex *)prev->xdat,
(const FFTComplex *)frag->xdat,
window);
 
// identify search window boundaries:
i0 = FFMAX(window / 2 - delta_max - drift, 0);
i0 = FFMIN(i0, window);
 
i1 = FFMIN(window / 2 + delta_max - drift, window - window / 16);
i1 = FFMAX(i1, 0);
 
// identify cross-correlation peaks within search window:
xcorr = correlation + i0;
 
for (i = i0; i < i1; i++, xcorr++) {
FFTSample metric = *xcorr;
 
// normalize:
FFTSample drifti = (FFTSample)(drift + i);
metric *= drifti * (FFTSample)(i - i0) * (FFTSample)(i1 - i);
 
if (metric > best_metric) {
best_metric = metric;
best_offset = i - window / 2;
}
}
 
return best_offset;
}
 
/**
* Adjust current fragment position for better alignment
* with previous fragment.
*
* @return alignment correction.
*/
static int yae_adjust_position(ATempoContext *atempo)
{
const AudioFragment *prev = yae_prev_frag(atempo);
AudioFragment *frag = yae_curr_frag(atempo);
 
const double prev_output_position =
(double)(prev->position[1] - atempo->origin[1] + atempo->window / 2);
 
const double ideal_output_position =
(double)(prev->position[0] - atempo->origin[0] + atempo->window / 2) /
atempo->tempo;
 
const int drift = (int)(prev_output_position - ideal_output_position);
 
const int delta_max = atempo->window / 2;
const int correction = yae_align(frag,
prev,
atempo->window,
delta_max,
drift,
atempo->correlation,
atempo->complex_to_real);
 
if (correction) {
// adjust fragment position:
frag->position[0] -= correction;
 
// clear so that the fragment can be reloaded:
frag->nsamples = 0;
}
 
return correction;
}
 
/**
* A helper macro for blending the overlap region of previous
* and current audio fragment.
*/
#define yae_blend(scalar_type) \
do { \
const scalar_type *aaa = (const scalar_type *)a; \
const scalar_type *bbb = (const scalar_type *)b; \
\
scalar_type *out = (scalar_type *)dst; \
scalar_type *out_end = (scalar_type *)dst_end; \
int64_t i; \
\
for (i = 0; i < overlap && out < out_end; \
i++, atempo->position[1]++, wa++, wb++) { \
float w0 = *wa; \
float w1 = *wb; \
int j; \
\
for (j = 0; j < atempo->channels; \
j++, aaa++, bbb++, out++) { \
float t0 = (float)*aaa; \
float t1 = (float)*bbb; \
\
*out = \
frag->position[0] + i < 0 ? \
*aaa : \
(scalar_type)(t0 * w0 + t1 * w1); \
} \
} \
dst = (uint8_t *)out; \
} while (0)
 
/**
* Blend the overlap region of previous and current audio fragment
* and output the results to the given destination buffer.
*
* @return
* 0 if the overlap region was completely stored in the dst buffer,
* AVERROR(EAGAIN) if more destination buffer space is required.
*/
static int yae_overlap_add(ATempoContext *atempo,
uint8_t **dst_ref,
uint8_t *dst_end)
{
// shortcuts:
const AudioFragment *prev = yae_prev_frag(atempo);
const AudioFragment *frag = yae_curr_frag(atempo);
 
const int64_t start_here = FFMAX(atempo->position[1],
frag->position[1]);
 
const int64_t stop_here = FFMIN(prev->position[1] + prev->nsamples,
frag->position[1] + frag->nsamples);
 
const int64_t overlap = stop_here - start_here;
 
const int64_t ia = start_here - prev->position[1];
const int64_t ib = start_here - frag->position[1];
 
const float *wa = atempo->hann + ia;
const float *wb = atempo->hann + ib;
 
const uint8_t *a = prev->data + ia * atempo->stride;
const uint8_t *b = frag->data + ib * atempo->stride;
 
uint8_t *dst = *dst_ref;
 
av_assert0(start_here <= stop_here &&
frag->position[1] <= start_here &&
overlap <= frag->nsamples);
 
if (atempo->format == AV_SAMPLE_FMT_U8) {
yae_blend(uint8_t);
} else if (atempo->format == AV_SAMPLE_FMT_S16) {
yae_blend(int16_t);
} else if (atempo->format == AV_SAMPLE_FMT_S32) {
yae_blend(int);
} else if (atempo->format == AV_SAMPLE_FMT_FLT) {
yae_blend(float);
} else if (atempo->format == AV_SAMPLE_FMT_DBL) {
yae_blend(double);
}
 
// pass-back the updated destination buffer pointer:
*dst_ref = dst;
 
return atempo->position[1] == stop_here ? 0 : AVERROR(EAGAIN);
}
 
/**
* Feed as much data to the filter as it is able to consume
* and receive as much processed data in the destination buffer
* as it is able to produce or store.
*/
static void
yae_apply(ATempoContext *atempo,
const uint8_t **src_ref,
const uint8_t *src_end,
uint8_t **dst_ref,
uint8_t *dst_end)
{
while (1) {
if (atempo->state == YAE_LOAD_FRAGMENT) {
// load additional data for the current fragment:
if (yae_load_frag(atempo, src_ref, src_end) != 0) {
break;
}
 
// down-mix to mono:
yae_downmix(atempo, yae_curr_frag(atempo));
 
// apply rDFT:
av_rdft_calc(atempo->real_to_complex, yae_curr_frag(atempo)->xdat);
 
// must load the second fragment before alignment can start:
if (!atempo->nfrag) {
yae_advance_to_next_frag(atempo);
continue;
}
 
atempo->state = YAE_ADJUST_POSITION;
}
 
if (atempo->state == YAE_ADJUST_POSITION) {
// adjust position for better alignment:
if (yae_adjust_position(atempo)) {
// reload the fragment at the corrected position, so that the
// Hann window blending would not require normalization:
atempo->state = YAE_RELOAD_FRAGMENT;
} else {
atempo->state = YAE_OUTPUT_OVERLAP_ADD;
}
}
 
if (atempo->state == YAE_RELOAD_FRAGMENT) {
// load additional data if necessary due to position adjustment:
if (yae_load_frag(atempo, src_ref, src_end) != 0) {
break;
}
 
// down-mix to mono:
yae_downmix(atempo, yae_curr_frag(atempo));
 
// apply rDFT:
av_rdft_calc(atempo->real_to_complex, yae_curr_frag(atempo)->xdat);
 
atempo->state = YAE_OUTPUT_OVERLAP_ADD;
}
 
if (atempo->state == YAE_OUTPUT_OVERLAP_ADD) {
// overlap-add and output the result:
if (yae_overlap_add(atempo, dst_ref, dst_end) != 0) {
break;
}
 
// advance to the next fragment, repeat:
yae_advance_to_next_frag(atempo);
atempo->state = YAE_LOAD_FRAGMENT;
}
}
}
 
/**
* Flush any buffered data from the filter.
*
* @return
* 0 if all data was completely stored in the dst buffer,
* AVERROR(EAGAIN) if more destination buffer space is required.
*/
static int yae_flush(ATempoContext *atempo,
uint8_t **dst_ref,
uint8_t *dst_end)
{
AudioFragment *frag = yae_curr_frag(atempo);
int64_t overlap_end;
int64_t start_here;
int64_t stop_here;
int64_t offset;
 
const uint8_t *src;
uint8_t *dst;
 
int src_size;
int dst_size;
int nbytes;
 
atempo->state = YAE_FLUSH_OUTPUT;
 
if (atempo->position[0] == frag->position[0] + frag->nsamples &&
atempo->position[1] == frag->position[1] + frag->nsamples) {
// the current fragment is already flushed:
return 0;
}
 
if (frag->position[0] + frag->nsamples < atempo->position[0]) {
// finish loading the current (possibly partial) fragment:
yae_load_frag(atempo, NULL, NULL);
 
if (atempo->nfrag) {
// down-mix to mono:
yae_downmix(atempo, frag);
 
// apply rDFT:
av_rdft_calc(atempo->real_to_complex, frag->xdat);
 
// align current fragment to previous fragment:
if (yae_adjust_position(atempo)) {
// reload the current fragment due to adjusted position:
yae_load_frag(atempo, NULL, NULL);
}
}
}
 
// flush the overlap region:
overlap_end = frag->position[1] + FFMIN(atempo->window / 2,
frag->nsamples);
 
while (atempo->position[1] < overlap_end) {
if (yae_overlap_add(atempo, dst_ref, dst_end) != 0) {
return AVERROR(EAGAIN);
}
}
 
// flush the remaininder of the current fragment:
start_here = FFMAX(atempo->position[1], overlap_end);
stop_here = frag->position[1] + frag->nsamples;
offset = start_here - frag->position[1];
av_assert0(start_here <= stop_here && frag->position[1] <= start_here);
 
src = frag->data + offset * atempo->stride;
dst = (uint8_t *)*dst_ref;
 
src_size = (int)(stop_here - start_here) * atempo->stride;
dst_size = dst_end - dst;
nbytes = FFMIN(src_size, dst_size);
 
memcpy(dst, src, nbytes);
dst += nbytes;
 
atempo->position[1] += (nbytes / atempo->stride);
 
// pass-back the updated destination buffer pointer:
*dst_ref = (uint8_t *)dst;
 
return atempo->position[1] == stop_here ? 0 : AVERROR(EAGAIN);
}
 
static av_cold int init(AVFilterContext *ctx)
{
ATempoContext *atempo = ctx->priv;
atempo->format = AV_SAMPLE_FMT_NONE;
atempo->state = YAE_LOAD_FRAGMENT;
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
ATempoContext *atempo = ctx->priv;
yae_release_buffers(atempo);
}
 
static int query_formats(AVFilterContext *ctx)
{
AVFilterChannelLayouts *layouts = NULL;
AVFilterFormats *formats = NULL;
 
// WSOLA necessitates an internal sliding window ring buffer
// for incoming audio stream.
//
// Planar sample formats are too cumbersome to store in a ring buffer,
// therefore planar sample formats are not supported.
//
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_U8,
AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_S32,
AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_DBL,
AV_SAMPLE_FMT_NONE
};
 
layouts = ff_all_channel_layouts();
if (!layouts) {
return AVERROR(ENOMEM);
}
ff_set_common_channel_layouts(ctx, layouts);
 
formats = ff_make_format_list(sample_fmts);
if (!formats) {
return AVERROR(ENOMEM);
}
ff_set_common_formats(ctx, formats);
 
formats = ff_all_samplerates();
if (!formats) {
return AVERROR(ENOMEM);
}
ff_set_common_samplerates(ctx, formats);
 
return 0;
}
 
static int config_props(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
ATempoContext *atempo = ctx->priv;
 
enum AVSampleFormat format = inlink->format;
int sample_rate = (int)inlink->sample_rate;
int channels = av_get_channel_layout_nb_channels(inlink->channel_layout);
 
ctx->outputs[0]->flags |= FF_LINK_FLAG_REQUEST_LOOP;
 
return yae_reset(atempo, format, sample_rate, channels);
}
 
static int push_samples(ATempoContext *atempo,
AVFilterLink *outlink,
int n_out)
{
int ret;
 
atempo->dst_buffer->sample_rate = outlink->sample_rate;
atempo->dst_buffer->nb_samples = n_out;
 
// adjust the PTS:
atempo->dst_buffer->pts =
av_rescale_q(atempo->nsamples_out,
(AVRational){ 1, outlink->sample_rate },
outlink->time_base);
 
ret = ff_filter_frame(outlink, atempo->dst_buffer);
if (ret < 0)
return ret;
atempo->dst_buffer = NULL;
atempo->dst = NULL;
atempo->dst_end = NULL;
 
atempo->nsamples_out += n_out;
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *src_buffer)
{
AVFilterContext *ctx = inlink->dst;
ATempoContext *atempo = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
 
int ret = 0;
int n_in = src_buffer->nb_samples;
int n_out = (int)(0.5 + ((double)n_in) / atempo->tempo);
 
const uint8_t *src = src_buffer->data[0];
const uint8_t *src_end = src + n_in * atempo->stride;
 
while (src < src_end) {
if (!atempo->dst_buffer) {
atempo->dst_buffer = ff_get_audio_buffer(outlink, n_out);
if (!atempo->dst_buffer)
return AVERROR(ENOMEM);
av_frame_copy_props(atempo->dst_buffer, src_buffer);
 
atempo->dst = atempo->dst_buffer->data[0];
atempo->dst_end = atempo->dst + n_out * atempo->stride;
}
 
yae_apply(atempo, &src, src_end, &atempo->dst, atempo->dst_end);
 
if (atempo->dst == atempo->dst_end) {
int n_samples = ((atempo->dst - atempo->dst_buffer->data[0]) /
atempo->stride);
ret = push_samples(atempo, outlink, n_samples);
if (ret < 0)
goto end;
}
}
 
atempo->nsamples_in += n_in;
end:
av_frame_free(&src_buffer);
return ret;
}
 
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
ATempoContext *atempo = ctx->priv;
int ret;
 
ret = ff_request_frame(ctx->inputs[0]);
 
if (ret == AVERROR_EOF) {
// flush the filter:
int n_max = atempo->ring;
int n_out;
int err = AVERROR(EAGAIN);
 
while (err == AVERROR(EAGAIN)) {
if (!atempo->dst_buffer) {
atempo->dst_buffer = ff_get_audio_buffer(outlink, n_max);
if (!atempo->dst_buffer)
return AVERROR(ENOMEM);
 
atempo->dst = atempo->dst_buffer->data[0];
atempo->dst_end = atempo->dst + n_max * atempo->stride;
}
 
err = yae_flush(atempo, &atempo->dst, atempo->dst_end);
 
n_out = ((atempo->dst - atempo->dst_buffer->data[0]) /
atempo->stride);
 
if (n_out) {
ret = push_samples(atempo, outlink, n_out);
}
}
 
av_frame_free(&atempo->dst_buffer);
atempo->dst = NULL;
atempo->dst_end = NULL;
 
return AVERROR_EOF;
}
 
return ret;
}
 
static int process_command(AVFilterContext *ctx,
const char *cmd,
const char *arg,
char *res,
int res_len,
int flags)
{
return !strcmp(cmd, "tempo") ? yae_set_tempo(ctx, arg) : AVERROR(ENOSYS);
}
 
static const AVFilterPad atempo_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.config_props = config_props,
},
{ NULL }
};
 
static const AVFilterPad atempo_outputs[] = {
{
.name = "default",
.request_frame = request_frame,
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
 
AVFilter avfilter_af_atempo = {
.name = "atempo",
.description = NULL_IF_CONFIG_SMALL("Adjust audio tempo."),
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.process_command = process_command,
.priv_size = sizeof(ATempoContext),
.priv_class = &atempo_class,
.inputs = atempo_inputs,
.outputs = atempo_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/af_biquads.c
0,0 → 1,620
/*
* Copyright (c) 2013 Paul B Mahol
* Copyright (c) 2006-2008 Rob Sykes <robs@users.sourceforge.net>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/*
* 2-pole filters designed by Robert Bristow-Johnson <rbj@audioimagination.com>
* see http://www.musicdsp.org/files/Audio-EQ-Cookbook.txt
*
* 1-pole filters based on code (c) 2000 Chris Bagwell <cbagwell@sprynet.com>
* Algorithms: Recursive single pole low/high pass filter
* Reference: The Scientist and Engineer's Guide to Digital Signal Processing
*
* low-pass: output[N] = input[N] * A + output[N-1] * B
* X = exp(-2.0 * pi * Fc)
* A = 1 - X
* B = X
* Fc = cutoff freq / sample rate
*
* Mimics an RC low-pass filter:
*
* ---/\/\/\/\----------->
* |
* --- C
* ---
* |
* |
* V
*
* high-pass: output[N] = A0 * input[N] + A1 * input[N-1] + B1 * output[N-1]
* X = exp(-2.0 * pi * Fc)
* A0 = (1 + X) / 2
* A1 = -(1 + X) / 2
* B1 = X
* Fc = cutoff freq / sample rate
*
* Mimics an RC high-pass filter:
*
* || C
* ----||--------->
* || |
* <
* > R
* <
* |
* V
*/
 
#include "libavutil/avassert.h"
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
 
enum FilterType {
biquad,
equalizer,
bass,
treble,
band,
bandpass,
bandreject,
allpass,
highpass,
lowpass,
};
 
enum WidthType {
NONE,
HERTZ,
OCTAVE,
QFACTOR,
SLOPE,
};
 
typedef struct ChanCache {
double i1, i2;
double o1, o2;
} ChanCache;
 
typedef struct {
const AVClass *class;
 
enum FilterType filter_type;
enum WidthType width_type;
int poles;
int csg;
 
double gain;
double frequency;
double width;
 
double a0, a1, a2;
double b0, b1, b2;
 
ChanCache *cache;
 
void (*filter)(const void *ibuf, void *obuf, int len,
double *i1, double *i2, double *o1, double *o2,
double b0, double b1, double b2, double a1, double a2);
} BiquadsContext;
 
static av_cold int init(AVFilterContext *ctx)
{
BiquadsContext *p = ctx->priv;
 
if (p->filter_type != biquad) {
if (p->frequency <= 0 || p->width <= 0) {
av_log(ctx, AV_LOG_ERROR, "Invalid frequency %f and/or width %f <= 0\n",
p->frequency, p->width);
return AVERROR(EINVAL);
}
}
 
return 0;
}
 
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_S32P,
AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
};
 
layouts = ff_all_channel_layouts();
if (!layouts)
return AVERROR(ENOMEM);
ff_set_common_channel_layouts(ctx, layouts);
 
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ff_set_common_formats(ctx, formats);
 
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
ff_set_common_samplerates(ctx, formats);
 
return 0;
}
 
#define BIQUAD_FILTER(name, type, min, max) \
static void biquad_## name (const void *input, void *output, int len, \
double *in1, double *in2, \
double *out1, double *out2, \
double b0, double b1, double b2, \
double a1, double a2) \
{ \
const type *ibuf = input; \
type *obuf = output; \
double i1 = *in1; \
double i2 = *in2; \
double o1 = *out1; \
double o2 = *out2; \
int i; \
a1 = -a1; \
a2 = -a2; \
\
for (i = 0; i+1 < len; i++) { \
o2 = i2 * b2 + i1 * b1 + ibuf[i] * b0 + o2 * a2 + o1 * a1; \
i2 = ibuf[i]; \
if (o2 < min) { \
av_log(NULL, AV_LOG_WARNING, "clipping\n"); \
obuf[i] = min; \
} else if (o2 > max) { \
av_log(NULL, AV_LOG_WARNING, "clipping\n"); \
obuf[i] = max; \
} else { \
obuf[i] = o2; \
} \
i++; \
o1 = i1 * b2 + i2 * b1 + ibuf[i] * b0 + o1 * a2 + o2 * a1; \
i1 = ibuf[i]; \
if (o1 < min) { \
av_log(NULL, AV_LOG_WARNING, "clipping\n"); \
obuf[i] = min; \
} else if (o1 > max) { \
av_log(NULL, AV_LOG_WARNING, "clipping\n"); \
obuf[i] = max; \
} else { \
obuf[i] = o1; \
} \
} \
if (i < len) { \
double o0 = ibuf[i] * b0 + i1 * b1 + i2 * b2 + o1 * a1 + o2 * a2; \
i2 = i1; \
i1 = ibuf[i]; \
o2 = o1; \
o1 = o0; \
if (o0 < min) { \
av_log(NULL, AV_LOG_WARNING, "clipping\n"); \
obuf[i] = min; \
} else if (o0 > max) { \
av_log(NULL, AV_LOG_WARNING, "clipping\n"); \
obuf[i] = max; \
} else { \
obuf[i] = o0; \
} \
} \
*in1 = i1; \
*in2 = i2; \
*out1 = o1; \
*out2 = o2; \
}
 
BIQUAD_FILTER(s16, int16_t, INT16_MIN, INT16_MAX)
BIQUAD_FILTER(s32, int32_t, INT32_MIN, INT32_MAX)
BIQUAD_FILTER(flt, float, -1., 1.)
BIQUAD_FILTER(dbl, double, -1., 1.)
 
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
BiquadsContext *p = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
double A = exp(p->gain / 40 * log(10.));
double w0 = 2 * M_PI * p->frequency / inlink->sample_rate;
double alpha;
 
if (w0 > M_PI) {
av_log(ctx, AV_LOG_ERROR,
"Invalid frequency %f. Frequency must be less than half the sample-rate %d.\n",
p->frequency, inlink->sample_rate);
return AVERROR(EINVAL);
}
 
switch (p->width_type) {
case NONE:
alpha = 0.0;
break;
case HERTZ:
alpha = sin(w0) / (2 * p->frequency / p->width);
break;
case OCTAVE:
alpha = sin(w0) * sinh(log(2.) / 2 * p->width * w0 / sin(w0));
break;
case QFACTOR:
alpha = sin(w0) / (2 * p->width);
break;
case SLOPE:
alpha = sin(w0) / 2 * sqrt((A + 1 / A) * (1 / p->width - 1) + 2);
break;
default:
av_assert0(0);
}
 
switch (p->filter_type) {
case biquad:
break;
case equalizer:
p->a0 = 1 + alpha / A;
p->a1 = -2 * cos(w0);
p->a2 = 1 - alpha / A;
p->b0 = 1 + alpha * A;
p->b1 = -2 * cos(w0);
p->b2 = 1 - alpha * A;
break;
case bass:
p->a0 = (A + 1) + (A - 1) * cos(w0) + 2 * sqrt(A) * alpha;
p->a1 = -2 * ((A - 1) + (A + 1) * cos(w0));
p->a2 = (A + 1) + (A - 1) * cos(w0) - 2 * sqrt(A) * alpha;
p->b0 = A * ((A + 1) - (A - 1) * cos(w0) + 2 * sqrt(A) * alpha);
p->b1 = 2 * A * ((A - 1) - (A + 1) * cos(w0));
p->b2 = A * ((A + 1) - (A - 1) * cos(w0) - 2 * sqrt(A) * alpha);
break;
case treble:
p->a0 = (A + 1) - (A - 1) * cos(w0) + 2 * sqrt(A) * alpha;
p->a1 = 2 * ((A - 1) - (A + 1) * cos(w0));
p->a2 = (A + 1) - (A - 1) * cos(w0) - 2 * sqrt(A) * alpha;
p->b0 = A * ((A + 1) + (A - 1) * cos(w0) + 2 * sqrt(A) * alpha);
p->b1 =-2 * A * ((A - 1) + (A + 1) * cos(w0));
p->b2 = A * ((A + 1) + (A - 1) * cos(w0) - 2 * sqrt(A) * alpha);
break;
case bandpass:
if (p->csg) {
p->a0 = 1 + alpha;
p->a1 = -2 * cos(w0);
p->a2 = 1 - alpha;
p->b0 = sin(w0) / 2;
p->b1 = 0;
p->b2 = -sin(w0) / 2;
} else {
p->a0 = 1 + alpha;
p->a1 = -2 * cos(w0);
p->a2 = 1 - alpha;
p->b0 = alpha;
p->b1 = 0;
p->b2 = -alpha;
}
break;
case bandreject:
p->a0 = 1 + alpha;
p->a1 = -2 * cos(w0);
p->a2 = 1 - alpha;
p->b0 = 1;
p->b1 = -2 * cos(w0);
p->b2 = 1;
break;
case lowpass:
if (p->poles == 1) {
p->a0 = 1;
p->a1 = -exp(-w0);
p->a2 = 0;
p->b0 = 1 + p->a1;
p->b1 = 0;
p->b2 = 0;
} else {
p->a0 = 1 + alpha;
p->a1 = -2 * cos(w0);
p->a2 = 1 - alpha;
p->b0 = (1 - cos(w0)) / 2;
p->b1 = 1 - cos(w0);
p->b2 = (1 - cos(w0)) / 2;
}
break;
case highpass:
if (p->poles == 1) {
p->a0 = 1;
p->a1 = -exp(-w0);
p->a2 = 0;
p->b0 = (1 - p->a1) / 2;
p->b1 = -p->b0;
p->b2 = 0;
} else {
p->a0 = 1 + alpha;
p->a1 = -2 * cos(w0);
p->a2 = 1 - alpha;
p->b0 = (1 + cos(w0)) / 2;
p->b1 = -(1 + cos(w0));
p->b2 = (1 + cos(w0)) / 2;
}
break;
case allpass:
p->a0 = 1 + alpha;
p->a1 = -2 * cos(w0);
p->a2 = 1 - alpha;
p->b0 = 1 - alpha;
p->b1 = -2 * cos(w0);
p->b2 = 1 + alpha;
break;
default:
av_assert0(0);
}
 
p->a1 /= p->a0;
p->a2 /= p->a0;
p->b0 /= p->a0;
p->b1 /= p->a0;
p->b2 /= p->a0;
 
p->cache = av_realloc_f(p->cache, sizeof(ChanCache), inlink->channels);
if (!p->cache)
return AVERROR(ENOMEM);
memset(p->cache, 0, sizeof(ChanCache) * inlink->channels);
 
switch (inlink->format) {
case AV_SAMPLE_FMT_S16P: p->filter = biquad_s16; break;
case AV_SAMPLE_FMT_S32P: p->filter = biquad_s32; break;
case AV_SAMPLE_FMT_FLTP: p->filter = biquad_flt; break;
case AV_SAMPLE_FMT_DBLP: p->filter = biquad_dbl; break;
default: av_assert0(0);
}
 
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
BiquadsContext *p = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *out_buf;
int nb_samples = buf->nb_samples;
int ch;
 
if (av_frame_is_writable(buf)) {
out_buf = buf;
} else {
out_buf = ff_get_audio_buffer(inlink, nb_samples);
if (!out_buf)
return AVERROR(ENOMEM);
av_frame_copy_props(out_buf, buf);
}
 
for (ch = 0; ch < av_frame_get_channels(buf); ch++)
p->filter(buf->extended_data[ch],
out_buf->extended_data[ch], nb_samples,
&p->cache[ch].i1, &p->cache[ch].i2,
&p->cache[ch].o1, &p->cache[ch].o2,
p->b0, p->b1, p->b2, p->a1, p->a2);
 
if (buf != out_buf)
av_frame_free(&buf);
 
return ff_filter_frame(outlink, out_buf);
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
BiquadsContext *p = ctx->priv;
 
av_freep(&p->cache);
}
 
static const AVFilterPad inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
},
{ NULL }
};
 
#define OFFSET(x) offsetof(BiquadsContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
#define DEFINE_BIQUAD_FILTER(name_, description_) \
AVFILTER_DEFINE_CLASS(name_); \
static av_cold int name_##_init(AVFilterContext *ctx) \
{ \
BiquadsContext *p = ctx->priv; \
p->class = &name_##_class; \
p->filter_type = name_; \
return init(ctx); \
} \
\
AVFilter avfilter_af_##name_ = { \
.name = #name_, \
.description = NULL_IF_CONFIG_SMALL(description_), \
.priv_size = sizeof(BiquadsContext), \
.init = name_##_init, \
.uninit = uninit, \
.query_formats = query_formats, \
.inputs = inputs, \
.outputs = outputs, \
.priv_class = &name_##_class, \
}
 
#if CONFIG_EQUALIZER_FILTER
static const AVOption equalizer_options[] = {
{"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 999999, FLAGS},
{"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 999999, FLAGS},
{"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, SLOPE, FLAGS, "width_type"},
{"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
{"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
{"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
{"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
{"width", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 999, FLAGS},
{"w", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 999, FLAGS},
{"gain", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
{"g", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
{NULL}
};
 
DEFINE_BIQUAD_FILTER(equalizer, "Apply two-pole peaking equalization (EQ) filter.");
#endif /* CONFIG_EQUALIZER_FILTER */
#if CONFIG_BASS_FILTER
static const AVOption bass_options[] = {
{"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=100}, 0, 999999, FLAGS},
{"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=100}, 0, 999999, FLAGS},
{"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, SLOPE, FLAGS, "width_type"},
{"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
{"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
{"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
{"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
{"width", "set shelf transition steep", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS},
{"w", "set shelf transition steep", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS},
{"gain", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
{"g", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
{NULL}
};
 
DEFINE_BIQUAD_FILTER(bass, "Boost or cut lower frequencies.");
#endif /* CONFIG_BASS_FILTER */
#if CONFIG_TREBLE_FILTER
static const AVOption treble_options[] = {
{"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
{"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
{"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, SLOPE, FLAGS, "width_type"},
{"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
{"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
{"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
{"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
{"width", "set shelf transition steep", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS},
{"w", "set shelf transition steep", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS},
{"gain", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
{"g", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
{NULL}
};
 
DEFINE_BIQUAD_FILTER(treble, "Boost or cut upper frequencies.");
#endif /* CONFIG_TREBLE_FILTER */
#if CONFIG_BANDPASS_FILTER
static const AVOption bandpass_options[] = {
{"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
{"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
{"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, SLOPE, FLAGS, "width_type"},
{"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
{"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
{"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
{"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
{"width", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 999, FLAGS},
{"w", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 999, FLAGS},
{"csg", "use constant skirt gain", OFFSET(csg), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS},
{NULL}
};
 
DEFINE_BIQUAD_FILTER(bandpass, "Apply a two-pole Butterworth band-pass filter.");
#endif /* CONFIG_BANDPASS_FILTER */
#if CONFIG_BANDREJECT_FILTER
static const AVOption bandreject_options[] = {
{"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
{"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
{"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, SLOPE, FLAGS, "width_type"},
{"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
{"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
{"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
{"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
{"width", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 999, FLAGS},
{"w", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 999, FLAGS},
{NULL}
};
 
DEFINE_BIQUAD_FILTER(bandreject, "Apply a two-pole Butterworth band-reject filter.");
#endif /* CONFIG_BANDREJECT_FILTER */
#if CONFIG_LOWPASS_FILTER
static const AVOption lowpass_options[] = {
{"frequency", "set frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=500}, 0, 999999, FLAGS},
{"f", "set frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=500}, 0, 999999, FLAGS},
{"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, SLOPE, FLAGS, "width_type"},
{"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
{"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
{"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
{"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
{"width", "set width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.707}, 0, 99999, FLAGS},
{"w", "set width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.707}, 0, 99999, FLAGS},
{"poles", "set number of poles", OFFSET(poles), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, FLAGS},
{"p", "set number of poles", OFFSET(poles), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, FLAGS},
{NULL}
};
 
DEFINE_BIQUAD_FILTER(lowpass, "Apply a low-pass filter with 3dB point frequency.");
#endif /* CONFIG_LOWPASS_FILTER */
#if CONFIG_HIGHPASS_FILTER
static const AVOption highpass_options[] = {
{"frequency", "set frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
{"f", "set frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
{"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, SLOPE, FLAGS, "width_type"},
{"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
{"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
{"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
{"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
{"width", "set width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.707}, 0, 99999, FLAGS},
{"w", "set width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.707}, 0, 99999, FLAGS},
{"poles", "set number of poles", OFFSET(poles), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, FLAGS},
{"p", "set number of poles", OFFSET(poles), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, FLAGS},
{NULL}
};
 
DEFINE_BIQUAD_FILTER(highpass, "Apply a high-pass filter with 3dB point frequency.");
#endif /* CONFIG_HIGHPASS_FILTER */
#if CONFIG_ALLPASS_FILTER
static const AVOption allpass_options[] = {
{"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
{"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
{"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=HERTZ}, HERTZ, SLOPE, FLAGS, "width_type"},
{"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
{"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
{"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
{"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
{"width", "set filter-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=707.1}, 0, 99999, FLAGS},
{"w", "set filter-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=707.1}, 0, 99999, FLAGS},
{NULL}
};
 
DEFINE_BIQUAD_FILTER(allpass, "Apply a two-pole all-pass filter.");
#endif /* CONFIG_ALLPASS_FILTER */
#if CONFIG_BIQUAD_FILTER
static const AVOption biquad_options[] = {
{"a0", NULL, OFFSET(a0), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MIN, INT16_MAX, FLAGS},
{"a1", NULL, OFFSET(a1), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MIN, INT16_MAX, FLAGS},
{"a2", NULL, OFFSET(a2), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MIN, INT16_MAX, FLAGS},
{"b0", NULL, OFFSET(b0), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MIN, INT16_MAX, FLAGS},
{"b1", NULL, OFFSET(b1), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MIN, INT16_MAX, FLAGS},
{"b2", NULL, OFFSET(b2), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MIN, INT16_MAX, FLAGS},
{NULL}
};
 
DEFINE_BIQUAD_FILTER(biquad, "Apply a biquad IIR filter with the given coefficients.");
#endif /* CONFIG_BIQUAD_FILTER */
/contrib/sdk/sources/ffmpeg/libavfilter/af_channelmap.c
0,0 → 1,409
/*
* Copyright (c) 2012 Google, Inc.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* audio channel mapping filter
*/
 
#include <ctype.h>
 
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
 
#include "audio.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
 
struct ChannelMap {
uint64_t in_channel;
uint64_t out_channel;
int in_channel_idx;
int out_channel_idx;
};
 
enum MappingMode {
MAP_NONE,
MAP_ONE_INT,
MAP_ONE_STR,
MAP_PAIR_INT_INT,
MAP_PAIR_INT_STR,
MAP_PAIR_STR_INT,
MAP_PAIR_STR_STR
};
 
#define MAX_CH 64
typedef struct ChannelMapContext {
const AVClass *class;
AVFilterChannelLayouts *channel_layouts;
char *mapping_str;
char *channel_layout_str;
uint64_t output_layout;
struct ChannelMap map[MAX_CH];
int nch;
enum MappingMode mode;
} ChannelMapContext;
 
#define OFFSET(x) offsetof(ChannelMapContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
#define F AV_OPT_FLAG_FILTERING_PARAM
static const AVOption channelmap_options[] = {
{ "map", "A comma-separated list of input channel numbers in output order.",
OFFSET(mapping_str), AV_OPT_TYPE_STRING, .flags = A|F },
{ "channel_layout", "Output channel layout.",
OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, .flags = A|F },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(channelmap);
 
static char* split(char *message, char delim) {
char *next = strchr(message, delim);
if (next)
*next++ = '\0';
return next;
}
 
static int get_channel_idx(char **map, int *ch, char delim, int max_ch)
{
char *next = split(*map, delim);
int len;
int n = 0;
if (!next && delim == '-')
return AVERROR(EINVAL);
len = strlen(*map);
sscanf(*map, "%d%n", ch, &n);
if (n != len)
return AVERROR(EINVAL);
if (*ch < 0 || *ch > max_ch)
return AVERROR(EINVAL);
*map = next;
return 0;
}
 
static int get_channel(char **map, uint64_t *ch, char delim)
{
char *next = split(*map, delim);
if (!next && delim == '-')
return AVERROR(EINVAL);
*ch = av_get_channel_layout(*map);
if (av_get_channel_layout_nb_channels(*ch) != 1)
return AVERROR(EINVAL);
*map = next;
return 0;
}
 
static av_cold int channelmap_init(AVFilterContext *ctx)
{
ChannelMapContext *s = ctx->priv;
char *mapping, separator = '|';
int map_entries = 0;
char buf[256];
enum MappingMode mode;
uint64_t out_ch_mask = 0;
int i;
 
mapping = s->mapping_str;
 
if (!mapping) {
mode = MAP_NONE;
} else {
char *dash = strchr(mapping, '-');
if (!dash) { // short mapping
if (av_isdigit(*mapping))
mode = MAP_ONE_INT;
else
mode = MAP_ONE_STR;
} else if (av_isdigit(*mapping)) {
if (av_isdigit(*(dash+1)))
mode = MAP_PAIR_INT_INT;
else
mode = MAP_PAIR_INT_STR;
} else {
if (av_isdigit(*(dash+1)))
mode = MAP_PAIR_STR_INT;
else
mode = MAP_PAIR_STR_STR;
}
#if FF_API_OLD_FILTER_OPTS
if (strchr(mapping, ',')) {
av_log(ctx, AV_LOG_WARNING, "This syntax is deprecated, use "
"'|' to separate the mappings.\n");
separator = ',';
}
#endif
}
 
if (mode != MAP_NONE) {
char *sep = mapping;
map_entries = 1;
while ((sep = strchr(sep, separator))) {
if (*++sep) // Allow trailing comma
map_entries++;
}
}
 
if (map_entries > MAX_CH) {
av_log(ctx, AV_LOG_ERROR, "Too many channels mapped: '%d'.\n", map_entries);
return AVERROR(EINVAL);
}
 
for (i = 0; i < map_entries; i++) {
int in_ch_idx = -1, out_ch_idx = -1;
uint64_t in_ch = 0, out_ch = 0;
static const char err[] = "Failed to parse channel map\n";
switch (mode) {
case MAP_ONE_INT:
if (get_channel_idx(&mapping, &in_ch_idx, separator, MAX_CH) < 0) {
av_log(ctx, AV_LOG_ERROR, err);
return AVERROR(EINVAL);
}
s->map[i].in_channel_idx = in_ch_idx;
s->map[i].out_channel_idx = i;
break;
case MAP_ONE_STR:
if (!get_channel(&mapping, &in_ch, separator)) {
av_log(ctx, AV_LOG_ERROR, err);
return AVERROR(EINVAL);
}
s->map[i].in_channel = in_ch;
s->map[i].out_channel_idx = i;
break;
case MAP_PAIR_INT_INT:
if (get_channel_idx(&mapping, &in_ch_idx, '-', MAX_CH) < 0 ||
get_channel_idx(&mapping, &out_ch_idx, separator, MAX_CH) < 0) {
av_log(ctx, AV_LOG_ERROR, err);
return AVERROR(EINVAL);
}
s->map[i].in_channel_idx = in_ch_idx;
s->map[i].out_channel_idx = out_ch_idx;
break;
case MAP_PAIR_INT_STR:
if (get_channel_idx(&mapping, &in_ch_idx, '-', MAX_CH) < 0 ||
get_channel(&mapping, &out_ch, separator) < 0 ||
out_ch & out_ch_mask) {
av_log(ctx, AV_LOG_ERROR, err);
return AVERROR(EINVAL);
}
s->map[i].in_channel_idx = in_ch_idx;
s->map[i].out_channel = out_ch;
out_ch_mask |= out_ch;
break;
case MAP_PAIR_STR_INT:
if (get_channel(&mapping, &in_ch, '-') < 0 ||
get_channel_idx(&mapping, &out_ch_idx, separator, MAX_CH) < 0) {
av_log(ctx, AV_LOG_ERROR, err);
return AVERROR(EINVAL);
}
s->map[i].in_channel = in_ch;
s->map[i].out_channel_idx = out_ch_idx;
break;
case MAP_PAIR_STR_STR:
if (get_channel(&mapping, &in_ch, '-') < 0 ||
get_channel(&mapping, &out_ch, separator) < 0 ||
out_ch & out_ch_mask) {
av_log(ctx, AV_LOG_ERROR, err);
return AVERROR(EINVAL);
}
s->map[i].in_channel = in_ch;
s->map[i].out_channel = out_ch;
out_ch_mask |= out_ch;
break;
}
}
s->mode = mode;
s->nch = map_entries;
s->output_layout = out_ch_mask ? out_ch_mask :
av_get_default_channel_layout(map_entries);
 
if (s->channel_layout_str) {
uint64_t fmt;
if ((fmt = av_get_channel_layout(s->channel_layout_str)) == 0) {
av_log(ctx, AV_LOG_ERROR, "Error parsing channel layout: '%s'.\n",
s->channel_layout_str);
return AVERROR(EINVAL);
}
if (mode == MAP_NONE) {
int i;
s->nch = av_get_channel_layout_nb_channels(fmt);
for (i = 0; i < s->nch; i++) {
s->map[i].in_channel_idx = i;
s->map[i].out_channel_idx = i;
}
} else if (out_ch_mask && out_ch_mask != fmt) {
av_get_channel_layout_string(buf, sizeof(buf), 0, out_ch_mask);
av_log(ctx, AV_LOG_ERROR,
"Output channel layout '%s' does not match the list of channel mapped: '%s'.\n",
s->channel_layout_str, buf);
return AVERROR(EINVAL);
} else if (s->nch != av_get_channel_layout_nb_channels(fmt)) {
av_log(ctx, AV_LOG_ERROR,
"Output channel layout %s does not match the number of channels mapped %d.\n",
s->channel_layout_str, s->nch);
return AVERROR(EINVAL);
}
s->output_layout = fmt;
}
if (!s->output_layout) {
av_log(ctx, AV_LOG_ERROR, "Output channel layout is not set and "
"cannot be guessed from the maps.\n");
return AVERROR(EINVAL);
}
 
ff_add_channel_layout(&s->channel_layouts, s->output_layout);
 
if (mode == MAP_PAIR_INT_STR || mode == MAP_PAIR_STR_STR) {
for (i = 0; i < s->nch; i++) {
s->map[i].out_channel_idx = av_get_channel_layout_channel_index(
s->output_layout, s->map[i].out_channel);
}
}
 
return 0;
}
 
static int channelmap_query_formats(AVFilterContext *ctx)
{
ChannelMapContext *s = ctx->priv;
 
ff_set_common_formats(ctx, ff_planar_sample_fmts());
ff_set_common_samplerates(ctx, ff_all_samplerates());
ff_channel_layouts_ref(ff_all_channel_layouts(), &ctx->inputs[0]->out_channel_layouts);
ff_channel_layouts_ref(s->channel_layouts, &ctx->outputs[0]->in_channel_layouts);
 
return 0;
}
 
static int channelmap_filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
const ChannelMapContext *s = ctx->priv;
const int nch_in = av_get_channel_layout_nb_channels(inlink->channel_layout);
const int nch_out = s->nch;
int ch;
uint8_t *source_planes[MAX_CH];
 
memcpy(source_planes, buf->extended_data,
nch_in * sizeof(source_planes[0]));
 
if (nch_out > nch_in) {
if (nch_out > FF_ARRAY_ELEMS(buf->data)) {
uint8_t **new_extended_data =
av_mallocz(nch_out * sizeof(*buf->extended_data));
if (!new_extended_data) {
av_frame_free(&buf);
return AVERROR(ENOMEM);
}
if (buf->extended_data == buf->data) {
buf->extended_data = new_extended_data;
} else {
av_free(buf->extended_data);
buf->extended_data = new_extended_data;
}
} else if (buf->extended_data != buf->data) {
av_free(buf->extended_data);
buf->extended_data = buf->data;
}
}
 
for (ch = 0; ch < nch_out; ch++) {
buf->extended_data[s->map[ch].out_channel_idx] =
source_planes[s->map[ch].in_channel_idx];
}
 
if (buf->data != buf->extended_data)
memcpy(buf->data, buf->extended_data,
FFMIN(FF_ARRAY_ELEMS(buf->data), nch_out) * sizeof(buf->data[0]));
 
return ff_filter_frame(outlink, buf);
}
 
static int channelmap_config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
ChannelMapContext *s = ctx->priv;
int nb_channels = av_get_channel_layout_nb_channels(inlink->channel_layout);
int i, err = 0;
const char *channel_name;
char layout_name[256];
 
for (i = 0; i < s->nch; i++) {
struct ChannelMap *m = &s->map[i];
 
if (s->mode == MAP_PAIR_STR_INT || s->mode == MAP_PAIR_STR_STR) {
m->in_channel_idx = av_get_channel_layout_channel_index(
inlink->channel_layout, m->in_channel);
}
 
if (m->in_channel_idx < 0 || m->in_channel_idx >= nb_channels) {
av_get_channel_layout_string(layout_name, sizeof(layout_name),
0, inlink->channel_layout);
if (m->in_channel) {
channel_name = av_get_channel_name(m->in_channel);
av_log(ctx, AV_LOG_ERROR,
"input channel '%s' not available from input layout '%s'\n",
channel_name, layout_name);
} else {
av_log(ctx, AV_LOG_ERROR,
"input channel #%d not available from input layout '%s'\n",
m->in_channel_idx, layout_name);
}
err = AVERROR(EINVAL);
}
}
 
return err;
}
 
static const AVFilterPad avfilter_af_channelmap_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = channelmap_filter_frame,
.config_props = channelmap_config_input,
.needs_writable = 1,
},
{ NULL }
};
 
static const AVFilterPad avfilter_af_channelmap_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO
},
{ NULL }
};
 
AVFilter avfilter_af_channelmap = {
.name = "channelmap",
.description = NULL_IF_CONFIG_SMALL("Remap audio channels."),
.init = channelmap_init,
.query_formats = channelmap_query_formats,
.priv_size = sizeof(ChannelMapContext),
.priv_class = &channelmap_class,
.inputs = avfilter_af_channelmap_inputs,
.outputs = avfilter_af_channelmap_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/af_channelsplit.c
0,0 → 1,149
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Channel split filter
*
* Split an audio stream into per-channel streams.
*/
 
#include "libavutil/attributes.h"
#include "libavutil/channel_layout.h"
#include "libavutil/internal.h"
#include "libavutil/opt.h"
 
#include "audio.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
 
typedef struct ChannelSplitContext {
const AVClass *class;
 
uint64_t channel_layout;
char *channel_layout_str;
} ChannelSplitContext;
 
#define OFFSET(x) offsetof(ChannelSplitContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
#define F AV_OPT_FLAG_FILTERING_PARAM
static const AVOption channelsplit_options[] = {
{ "channel_layout", "Input channel layout.", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, { .str = "stereo" }, .flags = A|F },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(channelsplit);
 
static av_cold int init(AVFilterContext *ctx)
{
ChannelSplitContext *s = ctx->priv;
int nb_channels;
int ret = 0, i;
 
if (!(s->channel_layout = av_get_channel_layout(s->channel_layout_str))) {
av_log(ctx, AV_LOG_ERROR, "Error parsing channel layout '%s'.\n",
s->channel_layout_str);
ret = AVERROR(EINVAL);
goto fail;
}
 
nb_channels = av_get_channel_layout_nb_channels(s->channel_layout);
for (i = 0; i < nb_channels; i++) {
uint64_t channel = av_channel_layout_extract_channel(s->channel_layout, i);
AVFilterPad pad = { 0 };
 
pad.type = AVMEDIA_TYPE_AUDIO;
pad.name = av_get_channel_name(channel);
 
ff_insert_outpad(ctx, i, &pad);
}
 
fail:
return ret;
}
 
static int query_formats(AVFilterContext *ctx)
{
ChannelSplitContext *s = ctx->priv;
AVFilterChannelLayouts *in_layouts = NULL;
int i;
 
ff_set_common_formats (ctx, ff_planar_sample_fmts());
ff_set_common_samplerates(ctx, ff_all_samplerates());
 
ff_add_channel_layout(&in_layouts, s->channel_layout);
ff_channel_layouts_ref(in_layouts, &ctx->inputs[0]->out_channel_layouts);
 
for (i = 0; i < ctx->nb_outputs; i++) {
AVFilterChannelLayouts *out_layouts = NULL;
uint64_t channel = av_channel_layout_extract_channel(s->channel_layout, i);
 
ff_add_channel_layout(&out_layouts, channel);
ff_channel_layouts_ref(out_layouts, &ctx->outputs[i]->in_channel_layouts);
}
 
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;
int i, ret = 0;
 
for (i = 0; i < ctx->nb_outputs; i++) {
AVFrame *buf_out = av_frame_clone(buf);
 
if (!buf_out) {
ret = AVERROR(ENOMEM);
break;
}
 
buf_out->data[0] = buf_out->extended_data[0] = buf_out->extended_data[i];
buf_out->channel_layout =
av_channel_layout_extract_channel(buf->channel_layout, i);
av_frame_set_channels(buf_out, 1);
 
ret = ff_filter_frame(ctx->outputs[i], buf_out);
if (ret < 0)
break;
}
av_frame_free(&buf);
return ret;
}
 
static const AVFilterPad avfilter_af_channelsplit_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
AVFilter avfilter_af_channelsplit = {
.name = "channelsplit",
.description = NULL_IF_CONFIG_SMALL("Split audio into per-channel streams."),
.priv_size = sizeof(ChannelSplitContext),
.priv_class = &channelsplit_class,
.init = init,
.query_formats = query_formats,
.inputs = avfilter_af_channelsplit_inputs,
.outputs = NULL,
.flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
};
/contrib/sdk/sources/ffmpeg/libavfilter/af_compand.c
0,0 → 1,518
/*
* Copyright (c) 1999 Chris Bagwell
* Copyright (c) 1999 Nick Bailey
* Copyright (c) 2007 Rob Sykes <robs@users.sourceforge.net>
* Copyright (c) 2013 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
 
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
#include "avfilter.h"
#include "audio.h"
#include "internal.h"
 
typedef struct ChanParam {
double attack;
double decay;
double volume;
} ChanParam;
 
typedef struct CompandSegment {
double x, y;
double a, b;
} CompandSegment;
 
typedef struct CompandContext {
const AVClass *class;
char *attacks, *decays, *points;
CompandSegment *segments;
ChanParam *channels;
double in_min_lin;
double out_min_lin;
double curve_dB;
double gain_dB;
double initial_volume;
double delay;
uint8_t **delayptrs;
int delay_samples;
int delay_count;
int delay_index;
int64_t pts;
 
int (*compand)(AVFilterContext *ctx, AVFrame *frame);
} CompandContext;
 
#define OFFSET(x) offsetof(CompandContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption compand_options[] = {
{ "attacks", "set time over which increase of volume is determined", OFFSET(attacks), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, A },
{ "decays", "set time over which decrease of volume is determined", OFFSET(decays), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, A },
{ "points", "set points of transfer function", OFFSET(points), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, A },
{ "soft-knee", "set soft-knee", OFFSET(curve_dB), AV_OPT_TYPE_DOUBLE, {.dbl=0.01}, 0.01, 900, A },
{ "gain", "set output gain", OFFSET(gain_dB), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, A },
{ "volume", "set initial volume", OFFSET(initial_volume), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 0, A },
{ "delay", "set delay for samples before sending them to volume adjuster", OFFSET(delay), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 20, A },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(compand);
 
static av_cold int init(AVFilterContext *ctx)
{
CompandContext *s = ctx->priv;
 
if (!s->attacks || !s->decays || !s->points) {
av_log(ctx, AV_LOG_ERROR, "Missing attacks and/or decays and/or points.\n");
return AVERROR(EINVAL);
}
 
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
CompandContext *s = ctx->priv;
 
av_freep(&s->channels);
av_freep(&s->segments);
if (s->delayptrs)
av_freep(&s->delayptrs[0]);
av_freep(&s->delayptrs);
}
 
static int query_formats(AVFilterContext *ctx)
{
AVFilterChannelLayouts *layouts;
AVFilterFormats *formats;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
};
 
layouts = ff_all_channel_layouts();
if (!layouts)
return AVERROR(ENOMEM);
ff_set_common_channel_layouts(ctx, layouts);
 
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ff_set_common_formats(ctx, formats);
 
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
ff_set_common_samplerates(ctx, formats);
 
return 0;
}
 
static void count_items(char *item_str, int *nb_items)
{
char *p;
 
*nb_items = 1;
for (p = item_str; *p; p++) {
if (*p == ' ')
(*nb_items)++;
}
 
}
 
static void update_volume(ChanParam *cp, double in)
{
double delta = in - cp->volume;
 
if (delta > 0.0)
cp->volume += delta * cp->attack;
else
cp->volume += delta * cp->decay;
}
 
static double get_volume(CompandContext *s, double in_lin)
{
CompandSegment *cs;
double in_log, out_log;
int i;
 
if (in_lin < s->in_min_lin)
return s->out_min_lin;
 
in_log = log(in_lin);
 
for (i = 1;; i++)
if (in_log <= s->segments[i + 1].x)
break;
 
cs = &s->segments[i];
in_log -= cs->x;
out_log = cs->y + in_log * (cs->a * in_log + cs->b);
 
return exp(out_log);
}
 
static int compand_nodelay(AVFilterContext *ctx, AVFrame *frame)
{
CompandContext *s = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
const int channels = inlink->channels;
const int nb_samples = frame->nb_samples;
AVFrame *out_frame;
int chan, i;
 
if (av_frame_is_writable(frame)) {
out_frame = frame;
} else {
out_frame = ff_get_audio_buffer(inlink, nb_samples);
if (!out_frame)
return AVERROR(ENOMEM);
av_frame_copy_props(out_frame, frame);
}
 
for (chan = 0; chan < channels; chan++) {
const double *src = (double *)frame->extended_data[chan];
double *dst = (double *)out_frame->extended_data[chan];
ChanParam *cp = &s->channels[chan];
 
for (i = 0; i < nb_samples; i++) {
update_volume(cp, fabs(src[i]));
 
dst[i] = av_clipd(src[i] * get_volume(s, cp->volume), -1, 1);
}
}
 
if (frame != out_frame)
av_frame_free(&frame);
 
return ff_filter_frame(ctx->outputs[0], out_frame);
}
 
#define MOD(a, b) (((a) >= (b)) ? (a) - (b) : (a))
 
static int compand_delay(AVFilterContext *ctx, AVFrame *frame)
{
CompandContext *s = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
const int channels = inlink->channels;
const int nb_samples = frame->nb_samples;
int chan, i, av_uninit(dindex), oindex, av_uninit(count);
AVFrame *out_frame = NULL;
 
av_assert1(channels > 0); /* would corrupt delay_count and delay_index */
 
for (chan = 0; chan < channels; chan++) {
const double *src = (double *)frame->extended_data[chan];
double *dbuf = (double *)s->delayptrs[chan];
ChanParam *cp = &s->channels[chan];
double *dst;
 
count = s->delay_count;
dindex = s->delay_index;
for (i = 0, oindex = 0; i < nb_samples; i++) {
const double in = src[i];
update_volume(cp, fabs(in));
 
if (count >= s->delay_samples) {
if (!out_frame) {
out_frame = ff_get_audio_buffer(inlink, nb_samples - i);
if (!out_frame)
return AVERROR(ENOMEM);
av_frame_copy_props(out_frame, frame);
out_frame->pts = s->pts;
s->pts += av_rescale_q(nb_samples - i, (AVRational){1, inlink->sample_rate}, inlink->time_base);
}
 
dst = (double *)out_frame->extended_data[chan];
dst[oindex++] = av_clipd(dbuf[dindex] * get_volume(s, cp->volume), -1, 1);
} else {
count++;
}
 
dbuf[dindex] = in;
dindex = MOD(dindex + 1, s->delay_samples);
}
}
 
s->delay_count = count;
s->delay_index = dindex;
 
av_frame_free(&frame);
return out_frame ? ff_filter_frame(ctx->outputs[0], out_frame) : 0;
}
 
static int compand_drain(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
CompandContext *s = ctx->priv;
const int channels = outlink->channels;
int chan, i, dindex;
AVFrame *frame = NULL;
 
frame = ff_get_audio_buffer(outlink, FFMIN(2048, s->delay_count));
if (!frame)
return AVERROR(ENOMEM);
frame->pts = s->pts;
s->pts += av_rescale_q(frame->nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
 
for (chan = 0; chan < channels; chan++) {
double *dbuf = (double *)s->delayptrs[chan];
double *dst = (double *)frame->extended_data[chan];
ChanParam *cp = &s->channels[chan];
 
dindex = s->delay_index;
for (i = 0; i < frame->nb_samples; i++) {
dst[i] = av_clipd(dbuf[dindex] * get_volume(s, cp->volume), -1, 1);
dindex = MOD(dindex + 1, s->delay_samples);
}
}
s->delay_count -= frame->nb_samples;
s->delay_index = dindex;
 
return ff_filter_frame(outlink, frame);
}
 
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
CompandContext *s = ctx->priv;
const int sample_rate = outlink->sample_rate;
double radius = s->curve_dB * M_LN10 / 20;
int nb_attacks, nb_decays, nb_points;
char *p, *saveptr = NULL;
int new_nb_items, num;
int i;
 
count_items(s->attacks, &nb_attacks);
count_items(s->decays, &nb_decays);
count_items(s->points, &nb_points);
 
if ((nb_attacks > outlink->channels) || (nb_decays > outlink->channels)) {
av_log(ctx, AV_LOG_ERROR, "Number of attacks/decays bigger than number of channels.\n");
return AVERROR(EINVAL);
}
 
uninit(ctx);
 
s->channels = av_mallocz_array(outlink->channels, sizeof(*s->channels));
s->segments = av_mallocz_array((nb_points + 4) * 2, sizeof(*s->segments));
 
if (!s->channels || !s->segments)
return AVERROR(ENOMEM);
 
p = s->attacks;
for (i = 0, new_nb_items = 0; i < nb_attacks; i++) {
char *tstr = av_strtok(p, " ", &saveptr);
p = NULL;
new_nb_items += sscanf(tstr, "%lf", &s->channels[i].attack) == 1;
if (s->channels[i].attack < 0)
return AVERROR(EINVAL);
}
nb_attacks = new_nb_items;
 
p = s->decays;
for (i = 0, new_nb_items = 0; i < nb_decays; i++) {
char *tstr = av_strtok(p, " ", &saveptr);
p = NULL;
new_nb_items += sscanf(tstr, "%lf", &s->channels[i].decay) == 1;
if (s->channels[i].decay < 0)
return AVERROR(EINVAL);
}
nb_decays = new_nb_items;
 
if (nb_attacks != nb_decays) {
av_log(ctx, AV_LOG_ERROR, "Number of attacks %d differs from number of decays %d.\n", nb_attacks, nb_decays);
return AVERROR(EINVAL);
}
 
#define S(x) s->segments[2 * ((x) + 1)]
p = s->points;
for (i = 0, new_nb_items = 0; i < nb_points; i++) {
char *tstr = av_strtok(p, " ", &saveptr);
p = NULL;
if (sscanf(tstr, "%lf/%lf", &S(i).x, &S(i).y) != 2) {
av_log(ctx, AV_LOG_ERROR, "Invalid and/or missing input/output value.\n");
return AVERROR(EINVAL);
}
if (i && S(i - 1).x > S(i).x) {
av_log(ctx, AV_LOG_ERROR, "Transfer function input values must be increasing.\n");
return AVERROR(EINVAL);
}
S(i).y -= S(i).x;
av_log(ctx, AV_LOG_DEBUG, "%d: x=%f y=%f\n", i, S(i).x, S(i).y);
new_nb_items++;
}
num = new_nb_items;
 
/* Add 0,0 if necessary */
if (num == 0 || S(num - 1).x)
num++;
 
#undef S
#define S(x) s->segments[2 * (x)]
/* Add a tail off segment at the start */
S(0).x = S(1).x - 2 * s->curve_dB;
S(0).y = S(1).y;
num++;
 
/* Join adjacent colinear segments */
for (i = 2; i < num; i++) {
double g1 = (S(i - 1).y - S(i - 2).y) * (S(i - 0).x - S(i - 1).x);
double g2 = (S(i - 0).y - S(i - 1).y) * (S(i - 1).x - S(i - 2).x);
int j;
 
if (fabs(g1 - g2))
continue;
num--;
for (j = --i; j < num; j++)
S(j) = S(j + 1);
}
 
for (i = 0; !i || s->segments[i - 2].x; i += 2) {
s->segments[i].y += s->gain_dB;
s->segments[i].x *= M_LN10 / 20;
s->segments[i].y *= M_LN10 / 20;
}
 
#define L(x) s->segments[i - (x)]
for (i = 4; s->segments[i - 2].x; i += 2) {
double x, y, cx, cy, in1, in2, out1, out2, theta, len, r;
 
L(4).a = 0;
L(4).b = (L(2).y - L(4).y) / (L(2).x - L(4).x);
 
L(2).a = 0;
L(2).b = (L(0).y - L(2).y) / (L(0).x - L(2).x);
 
theta = atan2(L(2).y - L(4).y, L(2).x - L(4).x);
len = sqrt(pow(L(2).x - L(4).x, 2.) + pow(L(2).y - L(4).y, 2.));
r = FFMIN(radius, len);
L(3).x = L(2).x - r * cos(theta);
L(3).y = L(2).y - r * sin(theta);
 
theta = atan2(L(0).y - L(2).y, L(0).x - L(2).x);
len = sqrt(pow(L(0).x - L(2).x, 2.) + pow(L(0).y - L(2).y, 2.));
r = FFMIN(radius, len / 2);
x = L(2).x + r * cos(theta);
y = L(2).y + r * sin(theta);
 
cx = (L(3).x + L(2).x + x) / 3;
cy = (L(3).y + L(2).y + y) / 3;
 
L(2).x = x;
L(2).y = y;
 
in1 = cx - L(3).x;
out1 = cy - L(3).y;
in2 = L(2).x - L(3).x;
out2 = L(2).y - L(3).y;
L(3).a = (out2 / in2 - out1 / in1) / (in2-in1);
L(3).b = out1 / in1 - L(3).a * in1;
}
L(3).x = 0;
L(3).y = L(2).y;
 
s->in_min_lin = exp(s->segments[1].x);
s->out_min_lin = exp(s->segments[1].y);
 
for (i = 0; i < outlink->channels; i++) {
ChanParam *cp = &s->channels[i];
 
if (cp->attack > 1.0 / sample_rate)
cp->attack = 1.0 - exp(-1.0 / (sample_rate * cp->attack));
else
cp->attack = 1.0;
if (cp->decay > 1.0 / sample_rate)
cp->decay = 1.0 - exp(-1.0 / (sample_rate * cp->decay));
else
cp->decay = 1.0;
cp->volume = pow(10.0, s->initial_volume / 20);
}
 
s->delay_samples = s->delay * sample_rate;
if (s->delay_samples > 0) {
int ret;
if ((ret = av_samples_alloc_array_and_samples(&s->delayptrs, NULL,
outlink->channels,
s->delay_samples,
outlink->format, 0)) < 0)
return ret;
s->compand = compand_delay;
outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
} else {
s->compand = compand_nodelay;
}
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
CompandContext *s = ctx->priv;
 
return s->compand(ctx, frame);
}
 
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
CompandContext *s = ctx->priv;
int ret;
 
ret = ff_request_frame(ctx->inputs[0]);
 
if (ret == AVERROR_EOF && !ctx->is_disabled && s->delay_count)
ret = compand_drain(outlink);
 
return ret;
}
 
static const AVFilterPad compand_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad compand_outputs[] = {
{
.name = "default",
.request_frame = request_frame,
.config_props = config_output,
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
 
AVFilter avfilter_af_compand = {
.name = "compand",
.description = NULL_IF_CONFIG_SMALL("Compress or expand audio dynamic range."),
.query_formats = query_formats,
.priv_size = sizeof(CompandContext),
.priv_class = &compand_class,
.init = init,
.uninit = uninit,
.inputs = compand_inputs,
.outputs = compand_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/af_earwax.c
0,0 → 1,172
/*
* Copyright (c) 2011 Mina Nagy Zaki
* Copyright (c) 2000 Edward Beingessner And Sundry Contributors.
* This source code is freely redistributable and may be used for any purpose.
* This copyright notice must be maintained. Edward Beingessner And Sundry
* Contributors are not responsible for the consequences of using this
* software.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Stereo Widening Effect. Adds audio cues to move stereo image in
* front of the listener. Adapted from the libsox earwax effect.
*/
 
#include "libavutil/channel_layout.h"
#include "avfilter.h"
#include "audio.h"
#include "formats.h"
 
#define NUMTAPS 64
 
static const int8_t filt[NUMTAPS] = {
/* 30° 330° */
4, -6, /* 32 tap stereo FIR filter. */
4, -11, /* One side filters as if the */
-1, -5, /* signal was from 30 degrees */
3, 3, /* from the ear, the other as */
-2, 5, /* if 330 degrees. */
-5, 0,
9, 1,
6, 3, /* Input */
-4, -1, /* Left Right */
-5, -3, /* __________ __________ */
-2, -5, /* | | | | */
-7, 1, /* .---| Hh,0(f) | | Hh,0(f) |---. */
6, -7, /* / |__________| |__________| \ */
30, -29, /* / \ / \ */
12, -3, /* / X \ */
-11, 4, /* / / \ \ */
-3, 7, /* ____V_____ __________V V__________ _____V____ */
-20, 23, /* | | | | | | | | */
2, 0, /* | Hh,30(f) | | Hh,330(f)| | Hh,330(f)| | Hh,30(f) | */
1, -6, /* |__________| |__________| |__________| |__________| */
-14, -5, /* \ ___ / \ ___ / */
15, -18, /* \ / \ / _____ \ / \ / */
6, 7, /* `->| + |<--' / \ `-->| + |<-' */
15, -10, /* \___/ _/ \_ \___/ */
-14, 22, /* \ / \ / \ / */
-7, -2, /* `--->| | | |<---' */
-4, 9, /* \_/ \_/ */
6, -12, /* */
6, -6, /* Headphones */
0, -11,
0, -5,
4, 0};
 
typedef struct {
int16_t taps[NUMTAPS * 2];
} EarwaxContext;
 
static int query_formats(AVFilterContext *ctx)
{
static const int sample_rates[] = { 44100, -1 };
 
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layout = NULL;
 
ff_add_format(&formats, AV_SAMPLE_FMT_S16);
ff_set_common_formats(ctx, formats);
ff_add_channel_layout(&layout, AV_CH_LAYOUT_STEREO);
ff_set_common_channel_layouts(ctx, layout);
ff_set_common_samplerates(ctx, ff_make_format_list(sample_rates));
 
return 0;
}
 
//FIXME: replace with DSPContext.scalarproduct_int16
static inline int16_t *scalarproduct(const int16_t *in, const int16_t *endin, int16_t *out)
{
int32_t sample;
int16_t j;
 
while (in < endin) {
sample = 0;
for (j = 0; j < NUMTAPS; j++)
sample += in[j] * filt[j];
*out = av_clip_int16(sample >> 6);
out++;
in++;
}
 
return out;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
AVFilterLink *outlink = inlink->dst->outputs[0];
int16_t *taps, *endin, *in, *out;
AVFrame *outsamples = ff_get_audio_buffer(inlink, insamples->nb_samples);
int len;
 
if (!outsamples) {
av_frame_free(&insamples);
return AVERROR(ENOMEM);
}
av_frame_copy_props(outsamples, insamples);
 
taps = ((EarwaxContext *)inlink->dst->priv)->taps;
out = (int16_t *)outsamples->data[0];
in = (int16_t *)insamples ->data[0];
 
len = FFMIN(NUMTAPS, 2*insamples->nb_samples);
// copy part of new input and process with saved input
memcpy(taps+NUMTAPS, in, len * sizeof(*taps));
out = scalarproduct(taps, taps + len, out);
 
// process current input
if (2*insamples->nb_samples >= NUMTAPS ){
endin = in + insamples->nb_samples * 2 - NUMTAPS;
scalarproduct(in, endin, out);
 
// save part of input for next round
memcpy(taps, endin, NUMTAPS * sizeof(*taps));
} else
memmove(taps, taps + 2*insamples->nb_samples, NUMTAPS * sizeof(*taps));
 
av_frame_free(&insamples);
return ff_filter_frame(outlink, outsamples);
}
 
static const AVFilterPad earwax_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad earwax_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
 
AVFilter avfilter_af_earwax = {
.name = "earwax",
.description = NULL_IF_CONFIG_SMALL("Widen the stereo image."),
.query_formats = query_formats,
.priv_size = sizeof(EarwaxContext),
.inputs = earwax_inputs,
.outputs = earwax_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/af_join.c
0,0 → 1,516
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Audio join filter
*
* Join multiple audio inputs as different channels in
* a single output
*/
 
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/opt.h"
 
#include "audio.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
 
typedef struct ChannelMap {
int input; ///< input stream index
int in_channel_idx; ///< index of in_channel in the input stream data
uint64_t in_channel; ///< layout describing the input channel
uint64_t out_channel; ///< layout describing the output channel
} ChannelMap;
 
typedef struct JoinContext {
const AVClass *class;
 
int inputs;
char *map;
char *channel_layout_str;
uint64_t channel_layout;
 
int nb_channels;
ChannelMap *channels;
 
/**
* Temporary storage for input frames, until we get one on each input.
*/
AVFrame **input_frames;
 
/**
* Temporary storage for buffer references, for assembling the output frame.
*/
AVBufferRef **buffers;
} JoinContext;
 
#define OFFSET(x) offsetof(JoinContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
#define F AV_OPT_FLAG_FILTERING_PARAM
static const AVOption join_options[] = {
{ "inputs", "Number of input streams.", OFFSET(inputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, INT_MAX, A|F },
{ "channel_layout", "Channel layout of the "
"output stream.", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, {.str = "stereo"}, 0, 0, A|F },
{ "map", "A comma-separated list of channels maps in the format "
"'input_stream.input_channel-output_channel.",
OFFSET(map), AV_OPT_TYPE_STRING, .flags = A|F },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(join);
 
static int filter_frame(AVFilterLink *link, AVFrame *frame)
{
AVFilterContext *ctx = link->dst;
JoinContext *s = ctx->priv;
int i;
 
for (i = 0; i < ctx->nb_inputs; i++)
if (link == ctx->inputs[i])
break;
av_assert0(i < ctx->nb_inputs);
av_assert0(!s->input_frames[i]);
s->input_frames[i] = frame;
 
return 0;
}
 
static int parse_maps(AVFilterContext *ctx)
{
JoinContext *s = ctx->priv;
char separator = '|';
char *cur = s->map;
 
#if FF_API_OLD_FILTER_OPTS
if (cur && strchr(cur, ',')) {
av_log(ctx, AV_LOG_WARNING, "This syntax is deprecated, use '|' to "
"separate the mappings.\n");
separator = ',';
}
#endif
 
while (cur && *cur) {
char *sep, *next, *p;
uint64_t in_channel = 0, out_channel = 0;
int input_idx, out_ch_idx, in_ch_idx;
 
next = strchr(cur, separator);
if (next)
*next++ = 0;
 
/* split the map into input and output parts */
if (!(sep = strchr(cur, '-'))) {
av_log(ctx, AV_LOG_ERROR, "Missing separator '-' in channel "
"map '%s'\n", cur);
return AVERROR(EINVAL);
}
*sep++ = 0;
 
#define PARSE_CHANNEL(str, var, inout) \
if (!(var = av_get_channel_layout(str))) { \
av_log(ctx, AV_LOG_ERROR, "Invalid " inout " channel: %s.\n", str);\
return AVERROR(EINVAL); \
} \
if (av_get_channel_layout_nb_channels(var) != 1) { \
av_log(ctx, AV_LOG_ERROR, "Channel map describes more than one " \
inout " channel.\n"); \
return AVERROR(EINVAL); \
}
 
/* parse output channel */
PARSE_CHANNEL(sep, out_channel, "output");
if (!(out_channel & s->channel_layout)) {
av_log(ctx, AV_LOG_ERROR, "Output channel '%s' is not present in "
"requested channel layout.\n", sep);
return AVERROR(EINVAL);
}
 
out_ch_idx = av_get_channel_layout_channel_index(s->channel_layout,
out_channel);
if (s->channels[out_ch_idx].input >= 0) {
av_log(ctx, AV_LOG_ERROR, "Multiple maps for output channel "
"'%s'.\n", sep);
return AVERROR(EINVAL);
}
 
/* parse input channel */
input_idx = strtol(cur, &cur, 0);
if (input_idx < 0 || input_idx >= s->inputs) {
av_log(ctx, AV_LOG_ERROR, "Invalid input stream index: %d.\n",
input_idx);
return AVERROR(EINVAL);
}
 
if (*cur)
cur++;
 
in_ch_idx = strtol(cur, &p, 0);
if (p == cur) {
/* channel specifier is not a number,
* try to parse as channel name */
PARSE_CHANNEL(cur, in_channel, "input");
}
 
s->channels[out_ch_idx].input = input_idx;
if (in_channel)
s->channels[out_ch_idx].in_channel = in_channel;
else
s->channels[out_ch_idx].in_channel_idx = in_ch_idx;
 
cur = next;
}
return 0;
}
 
static av_cold int join_init(AVFilterContext *ctx)
{
JoinContext *s = ctx->priv;
int ret, i;
 
if (!(s->channel_layout = av_get_channel_layout(s->channel_layout_str))) {
av_log(ctx, AV_LOG_ERROR, "Error parsing channel layout '%s'.\n",
s->channel_layout_str);
return AVERROR(EINVAL);
}
 
s->nb_channels = av_get_channel_layout_nb_channels(s->channel_layout);
s->channels = av_mallocz(sizeof(*s->channels) * s->nb_channels);
s->buffers = av_mallocz(sizeof(*s->buffers) * s->nb_channels);
s->input_frames = av_mallocz(sizeof(*s->input_frames) * s->inputs);
if (!s->channels || !s->buffers|| !s->input_frames)
return AVERROR(ENOMEM);
 
for (i = 0; i < s->nb_channels; i++) {
s->channels[i].out_channel = av_channel_layout_extract_channel(s->channel_layout, i);
s->channels[i].input = -1;
}
 
if ((ret = parse_maps(ctx)) < 0)
return ret;
 
for (i = 0; i < s->inputs; i++) {
char name[32];
AVFilterPad pad = { 0 };
 
snprintf(name, sizeof(name), "input%d", i);
pad.type = AVMEDIA_TYPE_AUDIO;
pad.name = av_strdup(name);
pad.filter_frame = filter_frame;
 
pad.needs_fifo = 1;
 
ff_insert_inpad(ctx, i, &pad);
}
 
return 0;
}
 
static av_cold void join_uninit(AVFilterContext *ctx)
{
JoinContext *s = ctx->priv;
int i;
 
for (i = 0; i < ctx->nb_inputs; i++) {
av_freep(&ctx->input_pads[i].name);
av_frame_free(&s->input_frames[i]);
}
 
av_freep(&s->channels);
av_freep(&s->buffers);
av_freep(&s->input_frames);
}
 
static int join_query_formats(AVFilterContext *ctx)
{
JoinContext *s = ctx->priv;
AVFilterChannelLayouts *layouts = NULL;
int i;
 
ff_add_channel_layout(&layouts, s->channel_layout);
ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts);
 
for (i = 0; i < ctx->nb_inputs; i++)
ff_channel_layouts_ref(ff_all_channel_layouts(),
&ctx->inputs[i]->out_channel_layouts);
 
ff_set_common_formats (ctx, ff_planar_sample_fmts());
ff_set_common_samplerates(ctx, ff_all_samplerates());
 
return 0;
}
 
static void guess_map_matching(AVFilterContext *ctx, ChannelMap *ch,
uint64_t *inputs)
{
int i;
 
for (i = 0; i < ctx->nb_inputs; i++) {
AVFilterLink *link = ctx->inputs[i];
 
if (ch->out_channel & link->channel_layout &&
!(ch->out_channel & inputs[i])) {
ch->input = i;
ch->in_channel = ch->out_channel;
inputs[i] |= ch->out_channel;
return;
}
}
}
 
static void guess_map_any(AVFilterContext *ctx, ChannelMap *ch,
uint64_t *inputs)
{
int i;
 
for (i = 0; i < ctx->nb_inputs; i++) {
AVFilterLink *link = ctx->inputs[i];
 
if ((inputs[i] & link->channel_layout) != link->channel_layout) {
uint64_t unused = link->channel_layout & ~inputs[i];
 
ch->input = i;
ch->in_channel = av_channel_layout_extract_channel(unused, 0);
inputs[i] |= ch->in_channel;
return;
}
}
}
 
static int join_config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
JoinContext *s = ctx->priv;
uint64_t *inputs; // nth element tracks which channels are used from nth input
int i, ret = 0;
 
/* initialize inputs to user-specified mappings */
if (!(inputs = av_mallocz(sizeof(*inputs) * ctx->nb_inputs)))
return AVERROR(ENOMEM);
for (i = 0; i < s->nb_channels; i++) {
ChannelMap *ch = &s->channels[i];
AVFilterLink *inlink;
 
if (ch->input < 0)
continue;
 
inlink = ctx->inputs[ch->input];
 
if (!ch->in_channel)
ch->in_channel = av_channel_layout_extract_channel(inlink->channel_layout,
ch->in_channel_idx);
 
if (!(ch->in_channel & inlink->channel_layout)) {
av_log(ctx, AV_LOG_ERROR, "Requested channel %s is not present in "
"input stream #%d.\n", av_get_channel_name(ch->in_channel),
ch->input);
ret = AVERROR(EINVAL);
goto fail;
}
 
inputs[ch->input] |= ch->in_channel;
}
 
/* guess channel maps when not explicitly defined */
/* first try unused matching channels */
for (i = 0; i < s->nb_channels; i++) {
ChannelMap *ch = &s->channels[i];
 
if (ch->input < 0)
guess_map_matching(ctx, ch, inputs);
}
 
/* if the above failed, try to find _any_ unused input channel */
for (i = 0; i < s->nb_channels; i++) {
ChannelMap *ch = &s->channels[i];
 
if (ch->input < 0)
guess_map_any(ctx, ch, inputs);
 
if (ch->input < 0) {
av_log(ctx, AV_LOG_ERROR, "Could not find input channel for "
"output channel '%s'.\n",
av_get_channel_name(ch->out_channel));
goto fail;
}
 
ch->in_channel_idx = av_get_channel_layout_channel_index(ctx->inputs[ch->input]->channel_layout,
ch->in_channel);
}
 
/* print mappings */
av_log(ctx, AV_LOG_VERBOSE, "mappings: ");
for (i = 0; i < s->nb_channels; i++) {
ChannelMap *ch = &s->channels[i];
av_log(ctx, AV_LOG_VERBOSE, "%d.%s => %s ", ch->input,
av_get_channel_name(ch->in_channel),
av_get_channel_name(ch->out_channel));
}
av_log(ctx, AV_LOG_VERBOSE, "\n");
 
for (i = 0; i < ctx->nb_inputs; i++) {
if (!inputs[i])
av_log(ctx, AV_LOG_WARNING, "No channels are used from input "
"stream %d.\n", i);
}
 
fail:
av_freep(&inputs);
return ret;
}
 
static int join_request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
JoinContext *s = ctx->priv;
AVFrame *frame;
int linesize = INT_MAX;
int nb_samples = 0;
int nb_buffers = 0;
int i, j, ret;
 
/* get a frame on each input */
for (i = 0; i < ctx->nb_inputs; i++) {
AVFilterLink *inlink = ctx->inputs[i];
 
if (!s->input_frames[i] &&
(ret = ff_request_frame(inlink)) < 0)
return ret;
 
/* request the same number of samples on all inputs */
if (i == 0) {
nb_samples = s->input_frames[0]->nb_samples;
 
for (j = 1; !i && j < ctx->nb_inputs; j++)
ctx->inputs[j]->request_samples = nb_samples;
}
}
 
/* setup the output frame */
frame = av_frame_alloc();
if (!frame)
return AVERROR(ENOMEM);
if (s->nb_channels > FF_ARRAY_ELEMS(frame->data)) {
frame->extended_data = av_mallocz(s->nb_channels *
sizeof(*frame->extended_data));
if (!frame->extended_data) {
ret = AVERROR(ENOMEM);
goto fail;
}
}
 
/* copy the data pointers */
for (i = 0; i < s->nb_channels; i++) {
ChannelMap *ch = &s->channels[i];
AVFrame *cur = s->input_frames[ch->input];
AVBufferRef *buf;
 
frame->extended_data[i] = cur->extended_data[ch->in_channel_idx];
linesize = FFMIN(linesize, cur->linesize[0]);
 
/* add the buffer where this plan is stored to the list if it's
* not already there */
buf = av_frame_get_plane_buffer(cur, ch->in_channel_idx);
if (!buf) {
ret = AVERROR(EINVAL);
goto fail;
}
for (j = 0; j < nb_buffers; j++)
if (s->buffers[j]->buffer == buf->buffer)
break;
if (j == i)
s->buffers[nb_buffers++] = buf;
}
 
/* create references to the buffers we copied to output */
if (nb_buffers > FF_ARRAY_ELEMS(frame->buf)) {
frame->nb_extended_buf = nb_buffers - FF_ARRAY_ELEMS(frame->buf);
frame->extended_buf = av_mallocz(sizeof(*frame->extended_buf) *
frame->nb_extended_buf);
if (!frame->extended_buf) {
frame->nb_extended_buf = 0;
ret = AVERROR(ENOMEM);
goto fail;
}
}
for (i = 0; i < FFMIN(FF_ARRAY_ELEMS(frame->buf), nb_buffers); i++) {
frame->buf[i] = av_buffer_ref(s->buffers[i]);
if (!frame->buf[i]) {
ret = AVERROR(ENOMEM);
goto fail;
}
}
for (i = 0; i < frame->nb_extended_buf; i++) {
frame->extended_buf[i] = av_buffer_ref(s->buffers[i +
FF_ARRAY_ELEMS(frame->buf)]);
if (!frame->extended_buf[i]) {
ret = AVERROR(ENOMEM);
goto fail;
}
}
 
frame->nb_samples = nb_samples;
frame->channel_layout = outlink->channel_layout;
av_frame_set_channels(frame, outlink->channels);
frame->format = outlink->format;
frame->sample_rate = outlink->sample_rate;
frame->pts = s->input_frames[0]->pts;
frame->linesize[0] = linesize;
if (frame->data != frame->extended_data) {
memcpy(frame->data, frame->extended_data, sizeof(*frame->data) *
FFMIN(FF_ARRAY_ELEMS(frame->data), s->nb_channels));
}
 
ret = ff_filter_frame(outlink, frame);
 
for (i = 0; i < ctx->nb_inputs; i++)
av_frame_free(&s->input_frames[i]);
 
return ret;
 
fail:
av_frame_free(&frame);
return ret;
}
 
static const AVFilterPad avfilter_af_join_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = join_config_output,
.request_frame = join_request_frame,
},
{ NULL }
};
 
AVFilter avfilter_af_join = {
.name = "join",
.description = NULL_IF_CONFIG_SMALL("Join multiple audio streams into "
"multi-channel output."),
.priv_size = sizeof(JoinContext),
.priv_class = &join_class,
.init = join_init,
.uninit = join_uninit,
.query_formats = join_query_formats,
.inputs = NULL,
.outputs = avfilter_af_join_outputs,
.flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
};
/contrib/sdk/sources/ffmpeg/libavfilter/af_ladspa.c
0,0 → 1,703
/*
* Copyright (c) 2013 Paul B Mahol
* Copyright (c) 2011 Mina Nagy Zaki
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* LADSPA wrapper
*/
 
#include <dlfcn.h>
#include <ladspa.h>
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
 
typedef struct LADSPAContext {
const AVClass *class;
char *dl_name;
char *plugin;
char *options;
void *dl_handle;
 
unsigned long nb_inputs;
unsigned long *ipmap; /* map input number to port number */
 
unsigned long nb_inputcontrols;
unsigned long *icmap; /* map input control number to port number */
LADSPA_Data *ictlv; /* input controls values */
 
unsigned long nb_outputs;
unsigned long *opmap; /* map output number to port number */
 
unsigned long nb_outputcontrols;
unsigned long *ocmap; /* map output control number to port number */
LADSPA_Data *octlv; /* output controls values */
 
const LADSPA_Descriptor *desc;
int *ctl_needs_value;
int nb_handles;
LADSPA_Handle *handles;
 
int sample_rate;
int nb_samples;
int64_t pts;
int64_t duration;
} LADSPAContext;
 
#define OFFSET(x) offsetof(LADSPAContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
static const AVOption ladspa_options[] = {
{ "file", "set library name or full path", OFFSET(dl_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "f", "set library name or full path", OFFSET(dl_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "plugin", "set plugin name", OFFSET(plugin), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "p", "set plugin name", OFFSET(plugin), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "controls", "set plugin options", OFFSET(options), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "c", "set plugin options", OFFSET(options), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "sample_rate", "set sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64=44100}, 1, INT32_MAX, FLAGS },
{ "s", "set sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64=44100}, 1, INT32_MAX, FLAGS },
{ "nb_samples", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64=1024}, 1, INT_MAX, FLAGS },
{ "n", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64=1024}, 1, INT_MAX, FLAGS },
{ "duration", "set audio duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64=-1}, -1, INT64_MAX, FLAGS },
{ "d", "set audio duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64=-1}, -1, INT64_MAX, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(ladspa);
 
static void print_ctl_info(AVFilterContext *ctx, int level,
LADSPAContext *s, int ctl, unsigned long *map,
LADSPA_Data *values, int print)
{
const LADSPA_PortRangeHint *h = s->desc->PortRangeHints + map[ctl];
 
av_log(ctx, level, "c%i: %s [", ctl, s->desc->PortNames[map[ctl]]);
 
if (LADSPA_IS_HINT_TOGGLED(h->HintDescriptor)) {
av_log(ctx, level, "toggled (1 or 0)");
 
if (LADSPA_IS_HINT_HAS_DEFAULT(h->HintDescriptor))
av_log(ctx, level, " (default %i)", (int)values[ctl]);
} else {
if (LADSPA_IS_HINT_INTEGER(h->HintDescriptor)) {
av_log(ctx, level, "<int>");
 
if (LADSPA_IS_HINT_BOUNDED_BELOW(h->HintDescriptor))
av_log(ctx, level, ", min: %i", (int)h->LowerBound);
 
if (LADSPA_IS_HINT_BOUNDED_ABOVE(h->HintDescriptor))
av_log(ctx, level, ", max: %i", (int)h->UpperBound);
 
if (print)
av_log(ctx, level, " (value %d)", (int)values[ctl]);
else if (LADSPA_IS_HINT_HAS_DEFAULT(h->HintDescriptor))
av_log(ctx, level, " (default %d)", (int)values[ctl]);
} else {
av_log(ctx, level, "<float>");
 
if (LADSPA_IS_HINT_BOUNDED_BELOW(h->HintDescriptor))
av_log(ctx, level, ", min: %f", h->LowerBound);
 
if (LADSPA_IS_HINT_BOUNDED_ABOVE(h->HintDescriptor))
av_log(ctx, level, ", max: %f", h->UpperBound);
 
if (print)
av_log(ctx, level, " (value %f)", values[ctl]);
else if (LADSPA_IS_HINT_HAS_DEFAULT(h->HintDescriptor))
av_log(ctx, level, " (default %f)", values[ctl]);
}
 
if (LADSPA_IS_HINT_SAMPLE_RATE(h->HintDescriptor))
av_log(ctx, level, ", multiple of sample rate");
 
if (LADSPA_IS_HINT_LOGARITHMIC(h->HintDescriptor))
av_log(ctx, level, ", logarithmic scale");
}
 
av_log(ctx, level, "]\n");
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
LADSPAContext *s = ctx->priv;
AVFrame *out;
int i, h;
 
if (!s->nb_outputs ||
(av_frame_is_writable(in) && s->nb_inputs == s->nb_outputs &&
!(s->desc->Properties & LADSPA_PROPERTY_INPLACE_BROKEN))) {
out = in;
} else {
out = ff_get_audio_buffer(ctx->outputs[0], in->nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
 
for (h = 0; h < s->nb_handles; h++) {
for (i = 0; i < s->nb_inputs; i++) {
s->desc->connect_port(s->handles[h], s->ipmap[i],
(LADSPA_Data*)in->extended_data[i]);
}
 
for (i = 0; i < s->nb_outputs; i++) {
s->desc->connect_port(s->handles[h], s->opmap[i],
(LADSPA_Data*)out->extended_data[i]);
}
 
s->desc->run(s->handles[h], in->nb_samples);
}
 
for (i = 0; i < s->nb_outputcontrols; i++)
print_ctl_info(ctx, AV_LOG_VERBOSE, s, i, s->ocmap, s->octlv, 1);
 
if (out != in)
av_frame_free(&in);
 
return ff_filter_frame(ctx->outputs[0], out);
}
 
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
LADSPAContext *s = ctx->priv;
AVFrame *out;
int64_t t;
int i;
 
if (ctx->nb_inputs)
return ff_request_frame(ctx->inputs[0]);
 
t = av_rescale(s->pts, AV_TIME_BASE, s->sample_rate);
if (s->duration >= 0 && t >= s->duration)
return AVERROR_EOF;
 
out = ff_get_audio_buffer(outlink, s->nb_samples);
if (!out)
return AVERROR(ENOMEM);
 
for (i = 0; i < s->nb_outputs; i++)
s->desc->connect_port(s->handles[0], s->opmap[i],
(LADSPA_Data*)out->extended_data[i]);
 
s->desc->run(s->handles[0], s->nb_samples);
 
for (i = 0; i < s->nb_outputcontrols; i++)
print_ctl_info(ctx, AV_LOG_INFO, s, i, s->ocmap, s->octlv, 1);
 
out->sample_rate = s->sample_rate;
out->pts = s->pts;
s->pts += s->nb_samples;
 
return ff_filter_frame(outlink, out);
}
 
static void set_default_ctl_value(LADSPAContext *s, int ctl,
unsigned long *map, LADSPA_Data *values)
{
const LADSPA_PortRangeHint *h = s->desc->PortRangeHints + map[ctl];
const LADSPA_Data lower = h->LowerBound;
const LADSPA_Data upper = h->UpperBound;
 
if (LADSPA_IS_HINT_DEFAULT_MINIMUM(h->HintDescriptor)) {
values[ctl] = lower;
} else if (LADSPA_IS_HINT_DEFAULT_MAXIMUM(h->HintDescriptor)) {
values[ctl] = upper;
} else if (LADSPA_IS_HINT_DEFAULT_0(h->HintDescriptor)) {
values[ctl] = 0.0;
} else if (LADSPA_IS_HINT_DEFAULT_1(h->HintDescriptor)) {
values[ctl] = 1.0;
} else if (LADSPA_IS_HINT_DEFAULT_100(h->HintDescriptor)) {
values[ctl] = 100.0;
} else if (LADSPA_IS_HINT_DEFAULT_440(h->HintDescriptor)) {
values[ctl] = 440.0;
} else if (LADSPA_IS_HINT_DEFAULT_LOW(h->HintDescriptor)) {
if (LADSPA_IS_HINT_LOGARITHMIC(h->HintDescriptor))
values[ctl] = exp(log(lower) * 0.75 + log(upper) * 0.25);
else
values[ctl] = lower * 0.75 + upper * 0.25;
} else if (LADSPA_IS_HINT_DEFAULT_MIDDLE(h->HintDescriptor)) {
if (LADSPA_IS_HINT_LOGARITHMIC(h->HintDescriptor))
values[ctl] = exp(log(lower) * 0.5 + log(upper) * 0.5);
else
values[ctl] = lower * 0.5 + upper * 0.5;
} else if (LADSPA_IS_HINT_DEFAULT_HIGH(h->HintDescriptor)) {
if (LADSPA_IS_HINT_LOGARITHMIC(h->HintDescriptor))
values[ctl] = exp(log(lower) * 0.25 + log(upper) * 0.75);
else
values[ctl] = lower * 0.25 + upper * 0.75;
}
}
 
static int connect_ports(AVFilterContext *ctx, AVFilterLink *link)
{
LADSPAContext *s = ctx->priv;
int i, j;
 
s->nb_handles = s->nb_inputs == 1 && s->nb_outputs == 1 ? link->channels : 1;
s->handles = av_calloc(s->nb_handles, sizeof(*s->handles));
if (!s->handles)
return AVERROR(ENOMEM);
 
for (i = 0; i < s->nb_handles; i++) {
s->handles[i] = s->desc->instantiate(s->desc, link->sample_rate);
if (!s->handles[i]) {
av_log(ctx, AV_LOG_ERROR, "Could not instantiate plugin.\n");
return AVERROR_EXTERNAL;
}
 
// Connect the input control ports
for (j = 0; j < s->nb_inputcontrols; j++)
s->desc->connect_port(s->handles[i], s->icmap[j], s->ictlv + j);
 
// Connect the output control ports
for (j = 0; j < s->nb_outputcontrols; j++)
s->desc->connect_port(s->handles[i], s->ocmap[j], &s->octlv[j]);
 
if (s->desc->activate)
s->desc->activate(s->handles[i]);
}
 
av_log(ctx, AV_LOG_DEBUG, "handles: %d\n", s->nb_handles);
 
return 0;
}
 
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
 
return connect_ports(ctx, inlink);
}
 
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
int ret;
 
if (ctx->nb_inputs) {
AVFilterLink *inlink = ctx->inputs[0];
 
outlink->format = inlink->format;
outlink->sample_rate = inlink->sample_rate;
 
ret = 0;
} else {
LADSPAContext *s = ctx->priv;
 
outlink->sample_rate = s->sample_rate;
outlink->time_base = (AVRational){1, s->sample_rate};
 
ret = connect_ports(ctx, outlink);
}
 
return ret;
}
 
static void count_ports(const LADSPA_Descriptor *desc,
unsigned long *nb_inputs, unsigned long *nb_outputs)
{
LADSPA_PortDescriptor pd;
int i;
 
for (i = 0; i < desc->PortCount; i++) {
pd = desc->PortDescriptors[i];
 
if (LADSPA_IS_PORT_AUDIO(pd)) {
if (LADSPA_IS_PORT_INPUT(pd)) {
(*nb_inputs)++;
} else if (LADSPA_IS_PORT_OUTPUT(pd)) {
(*nb_outputs)++;
}
}
}
}
 
static void *try_load(const char *dir, const char *soname)
{
char *path = av_asprintf("%s/%s.so", dir, soname);
void *ret = NULL;
 
if (path) {
ret = dlopen(path, RTLD_LOCAL|RTLD_NOW);
av_free(path);
}
 
return ret;
}
 
static int set_control(AVFilterContext *ctx, unsigned long port, LADSPA_Data value)
{
LADSPAContext *s = ctx->priv;
const char *label = s->desc->Label;
LADSPA_PortRangeHint *h = (LADSPA_PortRangeHint *)s->desc->PortRangeHints +
s->icmap[port];
 
if (port >= s->nb_inputcontrols) {
av_log(ctx, AV_LOG_ERROR, "Control c%ld is out of range [0 - %lu].\n",
port, s->nb_inputcontrols);
return AVERROR(EINVAL);
}
 
if (LADSPA_IS_HINT_BOUNDED_BELOW(h->HintDescriptor) &&
value < h->LowerBound) {
av_log(ctx, AV_LOG_ERROR,
"%s: input control c%ld is below lower boundary of %0.4f.\n",
label, port, h->LowerBound);
return AVERROR(EINVAL);
}
 
if (LADSPA_IS_HINT_BOUNDED_ABOVE(h->HintDescriptor) &&
value > h->UpperBound) {
av_log(ctx, AV_LOG_ERROR,
"%s: input control c%ld is above upper boundary of %0.4f.\n",
label, port, h->UpperBound);
return AVERROR(EINVAL);
}
 
s->ictlv[port] = value;
 
return 0;
}
 
static av_cold int init(AVFilterContext *ctx)
{
LADSPAContext *s = ctx->priv;
LADSPA_Descriptor_Function descriptor_fn;
const LADSPA_Descriptor *desc;
LADSPA_PortDescriptor pd;
AVFilterPad pad = { NULL };
char *p, *arg, *saveptr = NULL;
unsigned long nb_ports;
int i;
 
if (!s->dl_name) {
av_log(ctx, AV_LOG_ERROR, "No plugin name provided\n");
return AVERROR(EINVAL);
}
 
if (s->dl_name[0] == '/' || s->dl_name[0] == '.') {
// argument is a path
s->dl_handle = dlopen(s->dl_name, RTLD_LOCAL|RTLD_NOW);
} else {
// argument is a shared object name
char *paths = av_strdup(getenv("LADSPA_PATH"));
const char *separator = ":";
 
if (paths) {
p = paths;
while ((arg = av_strtok(p, separator, &saveptr)) && !s->dl_handle) {
s->dl_handle = try_load(arg, s->dl_name);
p = NULL;
}
}
 
av_free(paths);
if (!s->dl_handle && (paths = av_asprintf("%s/.ladspa/lib", getenv("HOME")))) {
s->dl_handle = try_load(paths, s->dl_name);
av_free(paths);
}
 
if (!s->dl_handle)
s->dl_handle = try_load("/usr/local/lib/ladspa", s->dl_name);
 
if (!s->dl_handle)
s->dl_handle = try_load("/usr/lib/ladspa", s->dl_name);
}
if (!s->dl_handle) {
av_log(ctx, AV_LOG_ERROR, "Failed to load '%s'\n", s->dl_name);
return AVERROR(EINVAL);
}
 
descriptor_fn = dlsym(s->dl_handle, "ladspa_descriptor");
if (!descriptor_fn) {
av_log(ctx, AV_LOG_ERROR, "Could not find ladspa_descriptor: %s\n", dlerror());
return AVERROR(EINVAL);
}
 
// Find the requested plugin, or list plugins
if (!s->plugin) {
av_log(ctx, AV_LOG_INFO, "The '%s' library contains the following plugins:\n", s->dl_name);
av_log(ctx, AV_LOG_INFO, "I = Input Channels\n");
av_log(ctx, AV_LOG_INFO, "O = Output Channels\n");
av_log(ctx, AV_LOG_INFO, "I:O %-25s %s\n", "Plugin", "Description");
av_log(ctx, AV_LOG_INFO, "\n");
for (i = 0; desc = descriptor_fn(i); i++) {
unsigned long inputs = 0, outputs = 0;
 
count_ports(desc, &inputs, &outputs);
av_log(ctx, AV_LOG_INFO, "%lu:%lu %-25s %s\n", inputs, outputs, desc->Label,
av_x_if_null(desc->Name, "?"));
av_log(ctx, AV_LOG_VERBOSE, "Maker: %s\n", av_x_if_null(desc->Maker, "?"));
av_log(ctx, AV_LOG_VERBOSE, "Copyright: %s\n", av_x_if_null(desc->Copyright, "?"));
}
return AVERROR_EXIT;
} else {
for (i = 0;; i++) {
desc = descriptor_fn(i);
if (!desc) {
av_log(ctx, AV_LOG_ERROR, "Could not find plugin: %s\n", s->plugin);
return AVERROR(EINVAL);
}
 
if (desc->Label && !strcmp(desc->Label, s->plugin))
break;
}
}
 
s->desc = desc;
nb_ports = desc->PortCount;
 
s->ipmap = av_calloc(nb_ports, sizeof(*s->ipmap));
s->opmap = av_calloc(nb_ports, sizeof(*s->opmap));
s->icmap = av_calloc(nb_ports, sizeof(*s->icmap));
s->ocmap = av_calloc(nb_ports, sizeof(*s->ocmap));
s->ictlv = av_calloc(nb_ports, sizeof(*s->ictlv));
s->octlv = av_calloc(nb_ports, sizeof(*s->octlv));
s->ctl_needs_value = av_calloc(nb_ports, sizeof(*s->ctl_needs_value));
if (!s->ipmap || !s->opmap || !s->icmap ||
!s->ocmap || !s->ictlv || !s->octlv || !s->ctl_needs_value)
return AVERROR(ENOMEM);
 
for (i = 0; i < nb_ports; i++) {
pd = desc->PortDescriptors[i];
 
if (LADSPA_IS_PORT_AUDIO(pd)) {
if (LADSPA_IS_PORT_INPUT(pd)) {
s->ipmap[s->nb_inputs] = i;
s->nb_inputs++;
} else if (LADSPA_IS_PORT_OUTPUT(pd)) {
s->opmap[s->nb_outputs] = i;
s->nb_outputs++;
}
} else if (LADSPA_IS_PORT_CONTROL(pd)) {
if (LADSPA_IS_PORT_INPUT(pd)) {
s->icmap[s->nb_inputcontrols] = i;
 
if (LADSPA_IS_HINT_HAS_DEFAULT(desc->PortRangeHints[i].HintDescriptor))
set_default_ctl_value(s, s->nb_inputcontrols, s->icmap, s->ictlv);
else
s->ctl_needs_value[s->nb_inputcontrols] = 1;
 
s->nb_inputcontrols++;
} else if (LADSPA_IS_PORT_OUTPUT(pd)) {
s->ocmap[s->nb_outputcontrols] = i;
s->nb_outputcontrols++;
}
}
}
 
// List Control Ports if "help" is specified
if (s->options && !strcmp(s->options, "help")) {
if (!s->nb_inputcontrols) {
av_log(ctx, AV_LOG_INFO,
"The '%s' plugin does not have any input controls.\n",
desc->Label);
} else {
av_log(ctx, AV_LOG_INFO,
"The '%s' plugin has the following input controls:\n",
desc->Label);
for (i = 0; i < s->nb_inputcontrols; i++)
print_ctl_info(ctx, AV_LOG_INFO, s, i, s->icmap, s->ictlv, 0);
}
return AVERROR_EXIT;
}
 
// Parse control parameters
p = s->options;
while (s->options) {
LADSPA_Data val;
int ret;
 
if (!(arg = av_strtok(p, "|", &saveptr)))
break;
p = NULL;
 
if (sscanf(arg, "c%d=%f", &i, &val) != 2) {
av_log(ctx, AV_LOG_ERROR, "Invalid syntax.\n");
return AVERROR(EINVAL);
}
 
if ((ret = set_control(ctx, i, val)) < 0)
return ret;
s->ctl_needs_value[i] = 0;
}
 
// Check if any controls are not set
for (i = 0; i < s->nb_inputcontrols; i++) {
if (s->ctl_needs_value[i]) {
av_log(ctx, AV_LOG_ERROR, "Control c%d must be set.\n", i);
print_ctl_info(ctx, AV_LOG_ERROR, s, i, s->icmap, s->ictlv, 0);
return AVERROR(EINVAL);
}
}
 
pad.type = AVMEDIA_TYPE_AUDIO;
 
if (s->nb_inputs) {
pad.name = av_asprintf("in0:%s%lu", desc->Label, s->nb_inputs);
if (!pad.name)
return AVERROR(ENOMEM);
 
pad.filter_frame = filter_frame;
pad.config_props = config_input;
if (ff_insert_inpad(ctx, ctx->nb_inputs, &pad) < 0) {
av_freep(&pad.name);
return AVERROR(ENOMEM);
}
}
 
av_log(ctx, AV_LOG_DEBUG, "ports: %lu\n", nb_ports);
av_log(ctx, AV_LOG_DEBUG, "inputs: %lu outputs: %lu\n",
s->nb_inputs, s->nb_outputs);
av_log(ctx, AV_LOG_DEBUG, "input controls: %lu output controls: %lu\n",
s->nb_inputcontrols, s->nb_outputcontrols);
 
return 0;
}
 
static int query_formats(AVFilterContext *ctx)
{
LADSPAContext *s = ctx->priv;
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE };
 
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ff_set_common_formats(ctx, formats);
 
if (s->nb_inputs) {
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
 
ff_set_common_samplerates(ctx, formats);
} else {
int sample_rates[] = { s->sample_rate, -1 };
 
ff_set_common_samplerates(ctx, ff_make_format_list(sample_rates));
}
 
if (s->nb_inputs == 1 && s->nb_outputs == 1) {
// We will instantiate multiple LADSPA_Handle, one over each channel
layouts = ff_all_channel_layouts();
if (!layouts)
return AVERROR(ENOMEM);
 
ff_set_common_channel_layouts(ctx, layouts);
} else {
AVFilterLink *outlink = ctx->outputs[0];
 
if (s->nb_inputs >= 1) {
AVFilterLink *inlink = ctx->inputs[0];
int64_t inlayout = FF_COUNT2LAYOUT(s->nb_inputs);
 
layouts = NULL;
ff_add_channel_layout(&layouts, inlayout);
ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts);
 
if (!s->nb_outputs)
ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts);
}
 
if (s->nb_outputs >= 1) {
int64_t outlayout = FF_COUNT2LAYOUT(s->nb_outputs);
 
layouts = NULL;
ff_add_channel_layout(&layouts, outlayout);
ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts);
}
}
 
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
LADSPAContext *s = ctx->priv;
int i;
 
for (i = 0; i < s->nb_handles; i++) {
if (s->desc->deactivate)
s->desc->deactivate(s->handles[i]);
if (s->desc->cleanup)
s->desc->cleanup(s->handles[i]);
}
 
if (s->dl_handle)
dlclose(s->dl_handle);
 
av_freep(&s->ipmap);
av_freep(&s->opmap);
av_freep(&s->icmap);
av_freep(&s->ocmap);
av_freep(&s->ictlv);
av_freep(&s->octlv);
av_freep(&s->handles);
av_freep(&s->ctl_needs_value);
 
if (ctx->nb_inputs)
av_freep(&ctx->input_pads[0].name);
}
 
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
char *res, int res_len, int flags)
{
LADSPA_Data value;
unsigned long port;
 
if (sscanf(cmd, "c%ld", &port) + sscanf(args, "%f", &value) != 2)
return AVERROR(EINVAL);
 
return set_control(ctx, port, value);
}
 
static const AVFilterPad ladspa_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
.request_frame = request_frame,
},
{ NULL }
};
 
AVFilter avfilter_af_ladspa = {
.name = "ladspa",
.description = NULL_IF_CONFIG_SMALL("Apply LADSPA effect."),
.priv_size = sizeof(LADSPAContext),
.priv_class = &ladspa_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.process_command = process_command,
.inputs = 0,
.outputs = ladspa_outputs,
.flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
};
/contrib/sdk/sources/ffmpeg/libavfilter/af_pan.c
0,0 → 1,422
/*
* Copyright (c) 2002 Anders Johansson <ajh@atri.curtin.edu.au>
* Copyright (c) 2011 Clément Bœsch <u pkh me>
* Copyright (c) 2011 Nicolas George <nicolas.george@normalesup.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Audio panning filter (channels mixing)
* Original code written by Anders Johansson for MPlayer,
* reimplemented for FFmpeg.
*/
 
#include <stdio.h>
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "libswresample/swresample.h"
#include "audio.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
 
#define MAX_CHANNELS 63
 
typedef struct PanContext {
const AVClass *class;
char *args;
int64_t out_channel_layout;
double gain[MAX_CHANNELS][MAX_CHANNELS];
int64_t need_renorm;
int need_renumber;
int nb_input_channels;
int nb_output_channels;
 
int pure_gains;
/* channel mapping specific */
int channel_map[SWR_CH_MAX];
struct SwrContext *swr;
} PanContext;
 
static void skip_spaces(char **arg)
{
int len = 0;
 
sscanf(*arg, " %n", &len);
*arg += len;
}
 
static int parse_channel_name(char **arg, int *rchannel, int *rnamed)
{
char buf[8];
int len, i, channel_id = 0;
int64_t layout, layout0;
 
skip_spaces(arg);
/* try to parse a channel name, e.g. "FL" */
if (sscanf(*arg, "%7[A-Z]%n", buf, &len)) {
layout0 = layout = av_get_channel_layout(buf);
/* channel_id <- first set bit in layout */
for (i = 32; i > 0; i >>= 1) {
if (layout >= (int64_t)1 << i) {
channel_id += i;
layout >>= i;
}
}
/* reject layouts that are not a single channel */
if (channel_id >= MAX_CHANNELS || layout0 != (int64_t)1 << channel_id)
return AVERROR(EINVAL);
*rchannel = channel_id;
*rnamed = 1;
*arg += len;
return 0;
}
/* try to parse a channel number, e.g. "c2" */
if (sscanf(*arg, "c%d%n", &channel_id, &len) &&
channel_id >= 0 && channel_id < MAX_CHANNELS) {
*rchannel = channel_id;
*rnamed = 0;
*arg += len;
return 0;
}
return AVERROR(EINVAL);
}
 
static av_cold int init(AVFilterContext *ctx)
{
PanContext *const pan = ctx->priv;
char *arg, *arg0, *tokenizer, *args = av_strdup(pan->args);
int out_ch_id, in_ch_id, len, named, ret;
int nb_in_channels[2] = { 0, 0 }; // number of unnamed and named input channels
double gain;
 
if (!pan->args) {
av_log(ctx, AV_LOG_ERROR,
"pan filter needs a channel layout and a set "
"of channels definitions as parameter\n");
return AVERROR(EINVAL);
}
if (!args)
return AVERROR(ENOMEM);
arg = av_strtok(args, "|", &tokenizer);
ret = ff_parse_channel_layout(&pan->out_channel_layout, arg, ctx);
if (ret < 0)
goto fail;
pan->nb_output_channels = av_get_channel_layout_nb_channels(pan->out_channel_layout);
 
/* parse channel specifications */
while ((arg = arg0 = av_strtok(NULL, "|", &tokenizer))) {
/* channel name */
if (parse_channel_name(&arg, &out_ch_id, &named)) {
av_log(ctx, AV_LOG_ERROR,
"Expected out channel name, got \"%.8s\"\n", arg);
ret = AVERROR(EINVAL);
goto fail;
}
if (named) {
if (!((pan->out_channel_layout >> out_ch_id) & 1)) {
av_log(ctx, AV_LOG_ERROR,
"Channel \"%.8s\" does not exist in the chosen layout\n", arg0);
ret = AVERROR(EINVAL);
goto fail;
}
/* get the channel number in the output channel layout:
* out_channel_layout & ((1 << out_ch_id) - 1) are all the
* channels that come before out_ch_id,
* so their count is the index of out_ch_id */
out_ch_id = av_get_channel_layout_nb_channels(pan->out_channel_layout & (((int64_t)1 << out_ch_id) - 1));
}
if (out_ch_id < 0 || out_ch_id >= pan->nb_output_channels) {
av_log(ctx, AV_LOG_ERROR,
"Invalid out channel name \"%.8s\"\n", arg0);
ret = AVERROR(EINVAL);
goto fail;
}
skip_spaces(&arg);
if (*arg == '=') {
arg++;
} else if (*arg == '<') {
pan->need_renorm |= (int64_t)1 << out_ch_id;
arg++;
} else {
av_log(ctx, AV_LOG_ERROR,
"Syntax error after channel name in \"%.8s\"\n", arg0);
ret = AVERROR(EINVAL);
goto fail;
}
/* gains */
while (1) {
gain = 1;
if (sscanf(arg, "%lf%n *%n", &gain, &len, &len))
arg += len;
if (parse_channel_name(&arg, &in_ch_id, &named)){
av_log(ctx, AV_LOG_ERROR,
"Expected in channel name, got \"%.8s\"\n", arg);
ret = AVERROR(EINVAL);
goto fail;
}
nb_in_channels[named]++;
if (nb_in_channels[!named]) {
av_log(ctx, AV_LOG_ERROR,
"Can not mix named and numbered channels\n");
ret = AVERROR(EINVAL);
goto fail;
}
pan->gain[out_ch_id][in_ch_id] = gain;
skip_spaces(&arg);
if (!*arg)
break;
if (*arg != '+') {
av_log(ctx, AV_LOG_ERROR, "Syntax error near \"%.8s\"\n", arg);
ret = AVERROR(EINVAL);
goto fail;
}
arg++;
}
}
pan->need_renumber = !!nb_in_channels[1];
 
ret = 0;
fail:
av_free(args);
return ret;
}
 
static int are_gains_pure(const PanContext *pan)
{
int i, j;
 
for (i = 0; i < MAX_CHANNELS; i++) {
int nb_gain = 0;
 
for (j = 0; j < MAX_CHANNELS; j++) {
double gain = pan->gain[i][j];
 
/* channel mapping is effective only if 0% or 100% of a channel is
* selected... */
if (gain != 0. && gain != 1.)
return 0;
/* ...and if the output channel is only composed of one input */
if (gain && nb_gain++)
return 0;
}
}
return 1;
}
 
static int query_formats(AVFilterContext *ctx)
{
PanContext *pan = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layouts;
 
pan->pure_gains = are_gains_pure(pan);
/* libswr supports any sample and packing formats */
ff_set_common_formats(ctx, ff_all_formats(AVMEDIA_TYPE_AUDIO));
 
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
ff_set_common_samplerates(ctx, formats);
 
// inlink supports any channel layout
layouts = ff_all_channel_layouts();
ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts);
 
// outlink supports only requested output channel layout
layouts = NULL;
ff_add_channel_layout(&layouts, pan->out_channel_layout);
ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts);
return 0;
}
 
static int config_props(AVFilterLink *link)
{
AVFilterContext *ctx = link->dst;
PanContext *pan = ctx->priv;
char buf[1024], *cur;
int i, j, k, r;
double t;
 
pan->nb_input_channels = av_get_channel_layout_nb_channels(link->channel_layout);
if (pan->need_renumber) {
// input channels were given by their name: renumber them
for (i = j = 0; i < MAX_CHANNELS; i++) {
if ((link->channel_layout >> i) & 1) {
for (k = 0; k < pan->nb_output_channels; k++)
pan->gain[k][j] = pan->gain[k][i];
j++;
}
}
}
 
// sanity check; can't be done in query_formats since the inlink
// channel layout is unknown at that time
if (pan->nb_input_channels > SWR_CH_MAX ||
pan->nb_output_channels > SWR_CH_MAX) {
av_log(ctx, AV_LOG_ERROR,
"libswresample support a maximum of %d channels. "
"Feel free to ask for a higher limit.\n", SWR_CH_MAX);
return AVERROR_PATCHWELCOME;
}
 
// init libswresample context
pan->swr = swr_alloc_set_opts(pan->swr,
pan->out_channel_layout, link->format, link->sample_rate,
link->channel_layout, link->format, link->sample_rate,
0, ctx);
if (!pan->swr)
return AVERROR(ENOMEM);
 
// gains are pure, init the channel mapping
if (pan->pure_gains) {
 
// get channel map from the pure gains
for (i = 0; i < pan->nb_output_channels; i++) {
int ch_id = -1;
for (j = 0; j < pan->nb_input_channels; j++) {
if (pan->gain[i][j]) {
ch_id = j;
break;
}
}
pan->channel_map[i] = ch_id;
}
 
av_opt_set_int(pan->swr, "icl", pan->out_channel_layout, 0);
av_opt_set_int(pan->swr, "uch", pan->nb_output_channels, 0);
swr_set_channel_mapping(pan->swr, pan->channel_map);
} else {
// renormalize
for (i = 0; i < pan->nb_output_channels; i++) {
if (!((pan->need_renorm >> i) & 1))
continue;
t = 0;
for (j = 0; j < pan->nb_input_channels; j++)
t += pan->gain[i][j];
if (t > -1E-5 && t < 1E-5) {
// t is almost 0 but not exactly, this is probably a mistake
if (t)
av_log(ctx, AV_LOG_WARNING,
"Degenerate coefficients while renormalizing\n");
continue;
}
for (j = 0; j < pan->nb_input_channels; j++)
pan->gain[i][j] /= t;
}
av_opt_set_int(pan->swr, "icl", link->channel_layout, 0);
av_opt_set_int(pan->swr, "ocl", pan->out_channel_layout, 0);
swr_set_matrix(pan->swr, pan->gain[0], pan->gain[1] - pan->gain[0]);
}
 
r = swr_init(pan->swr);
if (r < 0)
return r;
 
// summary
for (i = 0; i < pan->nb_output_channels; i++) {
cur = buf;
for (j = 0; j < pan->nb_input_channels; j++) {
r = snprintf(cur, buf + sizeof(buf) - cur, "%s%.3g i%d",
j ? " + " : "", pan->gain[i][j], j);
cur += FFMIN(buf + sizeof(buf) - cur, r);
}
av_log(ctx, AV_LOG_VERBOSE, "o%d = %s\n", i, buf);
}
// add channel mapping summary if possible
if (pan->pure_gains) {
av_log(ctx, AV_LOG_INFO, "Pure channel mapping detected:");
for (i = 0; i < pan->nb_output_channels; i++)
if (pan->channel_map[i] < 0)
av_log(ctx, AV_LOG_INFO, " M");
else
av_log(ctx, AV_LOG_INFO, " %d", pan->channel_map[i]);
av_log(ctx, AV_LOG_INFO, "\n");
return 0;
}
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
int ret;
int n = insamples->nb_samples;
AVFilterLink *const outlink = inlink->dst->outputs[0];
AVFrame *outsamples = ff_get_audio_buffer(outlink, n);
PanContext *pan = inlink->dst->priv;
 
if (!outsamples)
return AVERROR(ENOMEM);
swr_convert(pan->swr, outsamples->data, n, (void *)insamples->data, n);
av_frame_copy_props(outsamples, insamples);
outsamples->channel_layout = outlink->channel_layout;
av_frame_set_channels(outsamples, outlink->channels);
 
ret = ff_filter_frame(outlink, outsamples);
av_frame_free(&insamples);
return ret;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
PanContext *pan = ctx->priv;
swr_free(&pan->swr);
}
 
#define OFFSET(x) offsetof(PanContext, x)
 
static const AVOption pan_options[] = {
{ "args", NULL, OFFSET(args), AV_OPT_TYPE_STRING, { .str = NULL }, CHAR_MIN, CHAR_MAX, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(pan);
 
static const AVFilterPad pan_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_props,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad pan_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
 
AVFilter avfilter_af_pan = {
.name = "pan",
.description = NULL_IF_CONFIG_SMALL("Remix channels with coefficients (panning)."),
.priv_size = sizeof(PanContext),
.priv_class = &pan_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = pan_inputs,
.outputs = pan_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/af_replaygain.c
0,0 → 1,613
/*
* Copyright (c) 1998 - 2009 Conifer Software
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* ReplayGain scanner
*/
 
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
 
#define HISTOGRAM_SLOTS 12000
#define BUTTER_ORDER 2
#define YULE_ORDER 10
 
typedef struct ReplayGainFreqInfo {
int sample_rate;
double BYule[YULE_ORDER + 1];
double AYule[YULE_ORDER + 1];
double BButter[BUTTER_ORDER + 1];
double AButter[BUTTER_ORDER + 1];
} ReplayGainFreqInfo;
 
static const ReplayGainFreqInfo freqinfos[] =
{
{
192000,
{ 0.01184742123123, -0.04631092400086, 0.06584226961238,
-0.02165588522478, -0.05656260778952, 0.08607493592760,
-0.03375544339786, -0.04216579932754, 0.06416711490648,
-0.03444708260844, 0.00697275872241 },
{ 1.00000000000000, -5.24727318348167, 10.60821585192244,
-8.74127665810413, -1.33906071371683, 8.07972882096606,
-5.46179918950847, 0.54318070652536, 0.87450969224280,
-0.34656083539754, 0.03034796843589 },
{ 0.99653501465135, -1.99307002930271, 0.99653501465135 },
{ 1.00000000000000, -1.99305802314321, 0.99308203546221 },
},
{
176400,
{ 0.00268568524529, -0.00852379426080, 0.00852704191347,
0.00146116310295, -0.00950855828762, 0.00625449515499,
0.00116183868722, -0.00362461417136, 0.00203961000134,
-0.00050664587933, 0.00004327455427 },
{ 1.00000000000000, -5.57512782763045, 12.44291056065794,
-12.87462799681221, 3.08554846961576, 6.62493459880692,
-7.07662766313248, 2.51175542736441, 0.06731510802735,
-0.24567753819213, 0.03961404162376 },
{ 0.99622916581118, -1.99245833162236, 0.99622916581118 },
{ 1.00000000000000, -1.99244411238133, 0.99247255086339 },
},
{
144000,
{ 0.00639682359450, -0.02556437970955, 0.04230854400938,
-0.03722462201267, 0.01718514827295, 0.00610592243009,
-0.03065965747365, 0.04345745003539, -0.03298592681309,
0.01320937236809, -0.00220304127757 },
{ 1.00000000000000, -6.14814623523425, 15.80002457141566,
-20.78487587686937, 11.98848552310315, 3.36462015062606,
-10.22419868359470, 6.65599702146473, -1.67141861110485,
-0.05417956536718, 0.07374767867406 },
{ 0.99538268958706, -1.99076537917413, 0.99538268958706 },
{ 1.00000000000000, -1.99074405950505, 0.99078669884321 },
},
{
128000,
{ 0.00553120584305, -0.02112620545016, 0.03549076243117,
-0.03362498312306, 0.01425867248183, 0.01344686928787,
-0.03392770787836, 0.03464136459530, -0.02039116051549,
0.00667420794705, -0.00093763762995 },
{ 1.00000000000000, -6.14581710839925, 16.04785903675838,
-22.19089131407749, 15.24756471580286, -0.52001440400238,
-8.00488641699940, 6.60916094768855, -2.37856022810923,
0.33106947986101, 0.00459820832036 },
{ 0.99480702681278, -1.98961405362557, 0.99480702681278 },
{ 1.00000000000000, -1.98958708647324, 0.98964102077790 },
},
{
112000,
{ 0.00528778718259, -0.01893240907245, 0.03185982561867,
-0.02926260297838, 0.00715743034072, 0.01985743355827,
-0.03222614850941, 0.02565681978192, -0.01210662313473,
0.00325436284541, -0.00044173593001 },
{ 1.00000000000000, -6.24932108456288, 17.42344320538476,
-27.86819709054896, 26.79087344681326,-13.43711081485123,
-0.66023612948173, 6.03658091814935, -4.24926577030310,
1.40829268709186, -0.19480852628112 },
{ 0.99406737810867, -1.98813475621734, 0.99406737810867 },
{ 1.00000000000000, -1.98809955990514, 0.98816995252954 },
},
{
96000,
{ 0.00588138296683, -0.01613559730421, 0.02184798954216,
-0.01742490405317, 0.00464635643780, 0.01117772513205,
-0.02123865824368, 0.01959354413350, -0.01079720643523,
0.00352183686289, -0.00063124341421 },
{ 1.00000000000000, -5.97808823642008, 16.21362507964068,
-25.72923730652599, 25.40470663139513,-14.66166287771134,
2.81597484359752, 2.51447125969733, -2.23575306985286,
0.75788151036791, -0.10078025199029 },
{ 0.99308203517541, -1.98616407035082, 0.99308203517541 },
{ 1.00000000000000, -1.98611621154089, 0.98621192916075 },
},
{
88200,
{ 0.02667482047416, -0.11377479336097, 0.23063167910965,
-0.30726477945593, 0.33188520686529, -0.33862680249063,
0.31807161531340, -0.23730796929880, 0.12273894790371,
-0.03840017967282, 0.00549673387936 },
{ 1.00000000000000, -6.31836451657302, 18.31351310801799,
-31.88210014815921, 36.53792146976740,-28.23393036467559,
14.24725258227189, -4.04670980012854, 0.18865757280515,
0.25420333563908, -0.06012333531065 },
{ 0.99247255046129, -1.98494510092259, 0.99247255046129 },
{ 1.00000000000000, -1.98488843762335, 0.98500176422183 },
},
{
64000,
{ 0.02613056568174, -0.08128786488109, 0.14937282347325,
-0.21695711675126, 0.25010286673402, -0.23162283619278,
0.17424041833052, -0.10299599216680, 0.04258696481981,
-0.00977952936493, 0.00105325558889 },
{ 1.00000000000000, -5.73625477092119, 16.15249794355035,
-29.68654912464508, 39.55706155674083,-39.82524556246253,
30.50605345013009,-17.43051772821245, 7.05154573908017,
-1.80783839720514, 0.22127840210813 },
{ 0.98964101933472, -1.97928203866944, 0.98964101933472 },
{ 1.00000000000000, -1.97917472731009, 0.97938935002880 },
},
{
56000,
{ 0.03144914734085, -0.06151729206963, 0.08066788708145,
-0.09737939921516, 0.08943210803999, -0.06989984672010,
0.04926972841044, -0.03161257848451, 0.01456837493506,
-0.00316015108496, 0.00132807215875 },
{ 1.00000000000000, -4.87377313090032, 12.03922160140209,
-20.10151118381395, 25.10388534415171,-24.29065560815903,
18.27158469090663,-10.45249552560593, 4.30319491872003,
-1.13716992070185, 0.14510733527035 },
{ 0.98816995007392, -1.97633990014784, 0.98816995007392 },
{ 1.00000000000000, -1.97619994516973, 0.97647985512594 },
},
{
48000,
{ 0.03857599435200, -0.02160367184185, -0.00123395316851,
-0.00009291677959, -0.01655260341619, 0.02161526843274,
-0.02074045215285, 0.00594298065125, 0.00306428023191,
0.00012025322027, 0.00288463683916 },
{ 1.00000000000000, -3.84664617118067, 7.81501653005538,
-11.34170355132042, 13.05504219327545,-12.28759895145294,
9.48293806319790, -5.87257861775999, 2.75465861874613,
-0.86984376593551, 0.13919314567432 },
{ 0.98621192462708, -1.97242384925416, 0.98621192462708 },
{ 1.00000000000000, -1.97223372919527, 0.97261396931306 },
},
{
44100,
{ 0.05418656406430, -0.02911007808948, -0.00848709379851,
-0.00851165645469, -0.00834990904936, 0.02245293253339,
-0.02596338512915, 0.01624864962975, -0.00240879051584,
0.00674613682247, -0.00187763777362 },
{ 1.00000000000000, -3.47845948550071, 6.36317777566148,
-8.54751527471874, 9.47693607801280, -8.81498681370155,
6.85401540936998, -4.39470996079559, 2.19611684890774,
-0.75104302451432, 0.13149317958808 },
{ 0.98500175787242, -1.97000351574484, 0.98500175787242 },
{ 1.00000000000000, -1.96977855582618, 0.97022847566350 },
},
{
37800,
{ 0.08717879977844, -0.01000374016172, -0.06265852122368,
-0.01119328800950, -0.00114279372960, 0.02081333954769,
-0.01603261863207, 0.01936763028546, 0.00760044736442,
-0.00303979112271, -0.00075088605788 },
{ 1.00000000000000, -2.62816311472146, 3.53734535817992,
-3.81003448678921, 3.91291636730132, -3.53518605896288,
2.71356866157873, -1.86723311846592, 1.12075382367659,
-0.48574086886890, 0.11330544663849 },
{ 0.98252400815195, -1.96504801630391, 0.98252400815195 },
{ 1.00000000000000, -1.96474258269041, 0.96535344991740 },
},
{
32000,
{ 0.15457299681924, -0.09331049056315, -0.06247880153653,
0.02163541888798, -0.05588393329856, 0.04781476674921,
0.00222312597743, 0.03174092540049, -0.01390589421898,
0.00651420667831, -0.00881362733839 },
{ 1.00000000000000, -2.37898834973084, 2.84868151156327,
-2.64577170229825, 2.23697657451713, -1.67148153367602,
1.00595954808547, -0.45953458054983, 0.16378164858596,
-0.05032077717131, 0.02347897407020 },
{ 0.97938932735214, -1.95877865470428, 0.97938932735214 },
{ 1.00000000000000, -1.95835380975398, 0.95920349965459 },
},
{
24000,
{ 0.30296907319327, -0.22613988682123, -0.08587323730772,
0.03282930172664, -0.00915702933434, -0.02364141202522,
-0.00584456039913, 0.06276101321749, -0.00000828086748,
0.00205861885564, -0.02950134983287 },
{ 1.00000000000000, -1.61273165137247, 1.07977492259970,
-0.25656257754070, -0.16276719120440, -0.22638893773906,
0.39120800788284, -0.22138138954925, 0.04500235387352,
0.02005851806501, 0.00302439095741 },
{ 0.97531843204928, -1.95063686409857, 0.97531843204928 },
{ 1.00000000000000, -1.95002759149878, 0.95124613669835 },
},
{
22050,
{ 0.33642304856132, -0.25572241425570, -0.11828570177555,
0.11921148675203, -0.07834489609479, -0.00469977914380,
-0.00589500224440, 0.05724228140351, 0.00832043980773,
-0.01635381384540, -0.01760176568150 },
{ 1.00000000000000, -1.49858979367799, 0.87350271418188,
0.12205022308084, -0.80774944671438, 0.47854794562326,
-0.12453458140019, -0.04067510197014, 0.08333755284107,
-0.04237348025746, 0.02977207319925 },
{ 0.97316523498161, -1.94633046996323, 0.97316523498161 },
{ 1.00000000000000, -1.94561023566527, 0.94705070426118 },
},
{
18900,
{ 0.38524531015142, -0.27682212062067, -0.09980181488805,
0.09951486755646, -0.08934020156622, -0.00322369330199,
-0.00110329090689, 0.03784509844682, 0.01683906213303,
-0.01147039862572, -0.01941767987192 },
{ 1.00000000000000, -1.29708918404534, 0.90399339674203,
-0.29613799017877, -0.42326645916207, 0.37934887402200,
-0.37919795944938, 0.23410283284785, -0.03892971758879,
0.00403009552351, 0.03640166626278 },
{ 0.96535326815829, -1.93070653631658, 0.96535326815829 },
{ 1.00000000000000, -1.92950577983524, 0.93190729279793 },
},
{
16000,
{ 0.44915256608450, -0.14351757464547, -0.22784394429749,
-0.01419140100551, 0.04078262797139, -0.12398163381748,
0.04097565135648, 0.10478503600251, -0.01863887810927,
-0.03193428438915, 0.00541907748707 },
{ 1.00000000000000, -0.62820619233671, 0.29661783706366,
-0.37256372942400, 0.00213767857124, -0.42029820170918,
0.22199650564824, 0.00613424350682, 0.06747620744683,
0.05784820375801, 0.03222754072173 },
{ 0.96454515552826, -1.92909031105652, 0.96454515552826 },
{ 1.00000000000000, -1.92783286977036, 0.93034775234268 },
},
{
12000,
{ 0.56619470757641, -0.75464456939302, 0.16242137742230,
0.16744243493672, -0.18901604199609, 0.30931782841830,
-0.27562961986224, 0.00647310677246, 0.08647503780351,
-0.03788984554840, -0.00588215443421 },
{ 1.00000000000000, -1.04800335126349, 0.29156311971249,
-0.26806001042947, 0.00819999645858, 0.45054734505008,
-0.33032403314006, 0.06739368333110, -0.04784254229033,
0.01639907836189, 0.01807364323573 },
{ 0.96009142950541, -1.92018285901082, 0.96009142950541 },
{ 1.00000000000000, -1.91858953033784, 0.92177618768381 },
},
{
11025,
{ 0.58100494960553, -0.53174909058578, -0.14289799034253,
0.17520704835522, 0.02377945217615, 0.15558449135573,
-0.25344790059353, 0.01628462406333, 0.06920467763959,
-0.03721611395801, -0.00749618797172 },
{ 1.00000000000000, -0.51035327095184, -0.31863563325245,
-0.20256413484477, 0.14728154134330, 0.38952639978999,
-0.23313271880868, -0.05246019024463, -0.02505961724053,
0.02442357316099, 0.01818801111503 },
{ 0.95856916599601, -1.91713833199203, 0.95856916599601 },
{ 1.00000000000000, -1.91542108074780, 0.91885558323625 },
},
{
8000,
{ 0.53648789255105, -0.42163034350696, -0.00275953611929,
0.04267842219415, -0.10214864179676, 0.14590772289388,
-0.02459864859345, -0.11202315195388, -0.04060034127000,
0.04788665548180, -0.02217936801134 },
{ 1.00000000000000, -0.25049871956020, -0.43193942311114,
-0.03424681017675, -0.04678328784242, 0.26408300200955,
0.15113130533216, -0.17556493366449, -0.18823009262115,
0.05477720428674, 0.04704409688120 },
{ 0.94597685600279, -1.89195371200558, 0.94597685600279 },
{ 1.00000000000000, -1.88903307939452, 0.89487434461664 },
},
};
 
typedef struct ReplayGainContext {
uint32_t histogram[HISTOGRAM_SLOTS];
float peak;
int yule_hist_i, butter_hist_i;
const double *yule_coeff_a;
const double *yule_coeff_b;
const double *butter_coeff_a;
const double *butter_coeff_b;
float yule_hist_a[256];
float yule_hist_b[256];
float butter_hist_a[256];
float butter_hist_b[256];
} ReplayGainContext;
 
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layout = NULL;
int i;
 
ff_add_format(&formats, AV_SAMPLE_FMT_FLT);
ff_set_common_formats(ctx, formats);
ff_add_channel_layout(&layout, AV_CH_LAYOUT_STEREO);
ff_set_common_channel_layouts(ctx, layout);
 
formats = NULL;
for (i = 0; i < FF_ARRAY_ELEMS(freqinfos); i++)
ff_add_format(&formats, freqinfos[i].sample_rate);
ff_set_common_samplerates(ctx, formats);
 
return 0;
}
 
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
ReplayGainContext *s = ctx->priv;
int i;
 
for (i = 0; i < FF_ARRAY_ELEMS(freqinfos); i++) {
if (freqinfos[i].sample_rate == inlink->sample_rate)
break;
}
av_assert0(i < FF_ARRAY_ELEMS(freqinfos));
 
s->yule_coeff_a = freqinfos[i].AYule;
s->yule_coeff_b = freqinfos[i].BYule;
s->butter_coeff_a = freqinfos[i].AButter;
s->butter_coeff_b = freqinfos[i].BButter;
 
s->yule_hist_i = 20;
s->butter_hist_i = 4;
inlink->partial_buf_size =
inlink->min_samples =
inlink->max_samples = inlink->sample_rate / 20;
 
return 0;
}
 
/*
* Update largest absolute sample value.
*/
static void calc_stereo_peak(const float *samples, int nb_samples,
float *peak_p)
{
float peak = 0.0;
 
while (nb_samples--) {
if (samples[0] > peak)
peak = samples[0];
else if (-samples[0] > peak)
peak = -samples[0];
 
if (samples[1] > peak)
peak = samples[1];
else if (-samples[1] > peak)
peak = -samples[1];
 
samples += 2;
}
 
*peak_p = FFMAX(peak, *peak_p);
}
 
/*
* Calculate stereo RMS level. Minimum value is about -100 dB for
* digital silence. The 90 dB offset is to compensate for the
* normalized float range and 3 dB is for stereo samples.
*/
static double calc_stereo_rms(const float *samples, int nb_samples)
{
int count = nb_samples;
double sum = 1e-16;
 
while (count--) {
sum += samples[0] * samples[0] + samples[1] * samples[1];
samples += 2;
}
 
return 10 * log10 (sum / nb_samples) + 90.0 - 3.0;
}
 
/*
* Optimized implementation of 2nd-order IIR stereo filter.
*/
static void butter_filter_stereo_samples(ReplayGainContext *s,
float *samples, int nb_samples)
{
const double *coeff_a = s->butter_coeff_a;
const double *coeff_b = s->butter_coeff_b;
float *hist_a = s->butter_hist_a;
float *hist_b = s->butter_hist_b;
double left, right;
int i, j;
 
i = s->butter_hist_i;
 
// If filter history is very small magnitude, clear it completely
// to prevent denormals from rattling around in there forever
// (slowing us down).
 
for (j = -4; j < 0; ++j)
if (fabs(hist_a[i + j]) > 1e-10 || fabs(hist_b[i + j]) > 1e-10)
break;
 
if (!j) {
memset(s->butter_hist_a, 0, sizeof(s->butter_hist_a));
memset(s->butter_hist_b, 0, sizeof(s->butter_hist_b));
}
 
while (nb_samples--) {
left = (hist_b[i ] = samples[0]) * coeff_b[0];
right = (hist_b[i + 1] = samples[1]) * coeff_b[0];
left += hist_b[i - 2] * coeff_b[1] - hist_a[i - 2] * coeff_a[1];
right += hist_b[i - 1] * coeff_b[1] - hist_a[i - 1] * coeff_a[1];
left += hist_b[i - 4] * coeff_b[2] - hist_a[i - 4] * coeff_a[2];
right += hist_b[i - 3] * coeff_b[2] - hist_a[i - 3] * coeff_a[2];
samples[0] = hist_a[i ] = (float) left;
samples[1] = hist_a[i + 1] = (float) right;
samples += 2;
 
if ((i += 2) == 256) {
memcpy(hist_a, hist_a + 252, sizeof(*hist_a) * 4);
memcpy(hist_b, hist_b + 252, sizeof(*hist_b) * 4);
i = 4;
}
}
 
s->butter_hist_i = i;
}
 
/*
* Optimized implementation of 10th-order IIR stereo filter.
*/
static void yule_filter_stereo_samples(ReplayGainContext *s, const float *src,
float *dst, int nb_samples)
{
const double *coeff_a = s->yule_coeff_a;
const double *coeff_b = s->yule_coeff_b;
float *hist_a = s->yule_hist_a;
float *hist_b = s->yule_hist_b;
double left, right;
int i, j;
 
i = s->yule_hist_i;
 
// If filter history is very small magnitude, clear it completely to
// prevent denormals from rattling around in there forever
// (slowing us down).
 
for (j = -20; j < 0; ++j)
if (fabs(hist_a[i + j]) > 1e-10 || fabs(hist_b[i + j]) > 1e-10)
break;
 
if (!j) {
memset(s->yule_hist_a, 0, sizeof(s->yule_hist_a));
memset(s->yule_hist_b, 0, sizeof(s->yule_hist_b));
}
 
while (nb_samples--) {
left = (hist_b[i] = src[0]) * coeff_b[0];
right = (hist_b[i + 1] = src[1]) * coeff_b[0];
left += hist_b[i - 2] * coeff_b[ 1] - hist_a[i - 2] * coeff_a[1 ];
right += hist_b[i - 1] * coeff_b[ 1] - hist_a[i - 1] * coeff_a[1 ];
left += hist_b[i - 4] * coeff_b[ 2] - hist_a[i - 4] * coeff_a[2 ];
right += hist_b[i - 3] * coeff_b[ 2] - hist_a[i - 3] * coeff_a[2 ];
left += hist_b[i - 6] * coeff_b[ 3] - hist_a[i - 6] * coeff_a[3 ];
right += hist_b[i - 5] * coeff_b[ 3] - hist_a[i - 5] * coeff_a[3 ];
left += hist_b[i - 8] * coeff_b[ 4] - hist_a[i - 8] * coeff_a[4 ];
right += hist_b[i - 7] * coeff_b[ 4] - hist_a[i - 7] * coeff_a[4 ];
left += hist_b[i - 10] * coeff_b[ 5] - hist_a[i - 10] * coeff_a[5 ];
right += hist_b[i - 9] * coeff_b[ 5] - hist_a[i - 9] * coeff_a[5 ];
left += hist_b[i - 12] * coeff_b[ 6] - hist_a[i - 12] * coeff_a[6 ];
right += hist_b[i - 11] * coeff_b[ 6] - hist_a[i - 11] * coeff_a[6 ];
left += hist_b[i - 14] * coeff_b[ 7] - hist_a[i - 14] * coeff_a[7 ];
right += hist_b[i - 13] * coeff_b[ 7] - hist_a[i - 13] * coeff_a[7 ];
left += hist_b[i - 16] * coeff_b[ 8] - hist_a[i - 16] * coeff_a[8 ];
right += hist_b[i - 15] * coeff_b[ 8] - hist_a[i - 15] * coeff_a[8 ];
left += hist_b[i - 18] * coeff_b[ 9] - hist_a[i - 18] * coeff_a[9 ];
right += hist_b[i - 17] * coeff_b[ 9] - hist_a[i - 17] * coeff_a[9 ];
left += hist_b[i - 20] * coeff_b[10] - hist_a[i - 20] * coeff_a[10];
right += hist_b[i - 19] * coeff_b[10] - hist_a[i - 19] * coeff_a[10];
dst[0] = hist_a[i ] = (float)left;
dst[1] = hist_a[i + 1] = (float)right;
src += 2;
dst += 2;
 
if ((i += 2) == 256) {
memcpy(hist_a, hist_a + 236, sizeof(*hist_a) * 20);
memcpy(hist_b, hist_b + 236, sizeof(*hist_b) * 20);
i = 20;
}
}
 
s->yule_hist_i = i;
}
 
/*
* Calculate the ReplayGain value from the specified loudness histogram;
* clip to -24 / +64 dB.
*/
static float calc_replaygain(uint32_t *histogram)
{
uint32_t loud_count = 0, total_windows = 0;
float gain;
int i;
 
for (i = 0; i < HISTOGRAM_SLOTS; i++)
total_windows += histogram [i];
 
while (i--)
if ((loud_count += histogram [i]) * 20 >= total_windows)
break;
 
gain = (float)(64.54 - i / 100.0);
 
return av_clipf(gain, -24.0, 64.0);
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
ReplayGainContext *s = ctx->priv;
uint32_t level;
AVFrame *out;
 
out = ff_get_audio_buffer(inlink, in->nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
 
calc_stereo_peak((float *)in->data[0],
in->nb_samples, &s->peak);
yule_filter_stereo_samples(s, (const float *)in->data[0],
(float *)out->data[0],
out->nb_samples);
butter_filter_stereo_samples(s, (float *)out->data[0],
out->nb_samples);
level = (uint32_t)floor(100 * calc_stereo_rms((float *)out->data[0],
out->nb_samples));
level = av_clip(level, 0, HISTOGRAM_SLOTS - 1);
 
s->histogram[level]++;
 
av_frame_free(&out);
return ff_filter_frame(outlink, in);
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
ReplayGainContext *s = ctx->priv;
float gain = calc_replaygain(s->histogram);
 
av_log(ctx, AV_LOG_INFO, "track_gain = %+.2f dB\n", gain);
av_log(ctx, AV_LOG_INFO, "track_peak = %.6f\n", s->peak);
}
 
static const AVFilterPad replaygain_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
 
static const AVFilterPad replaygain_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
 
AVFilter avfilter_af_replaygain = {
.name = "replaygain",
.description = NULL_IF_CONFIG_SMALL("ReplayGain scanner."),
.query_formats = query_formats,
.uninit = uninit,
.priv_size = sizeof(ReplayGainContext),
.inputs = replaygain_inputs,
.outputs = replaygain_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/af_resample.c
0,0 → 1,327
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* sample format and channel layout conversion audio filter
*/
 
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/common.h"
#include "libavutil/dict.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
 
#include "libavresample/avresample.h"
 
#include "audio.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
 
typedef struct ResampleContext {
const AVClass *class;
AVAudioResampleContext *avr;
AVDictionary *options;
 
int64_t next_pts;
 
/* set by filter_frame() to signal an output frame to request_frame() */
int got_output;
} ResampleContext;
 
static av_cold int init(AVFilterContext *ctx, AVDictionary **opts)
{
ResampleContext *s = ctx->priv;
const AVClass *avr_class = avresample_get_class();
AVDictionaryEntry *e = NULL;
 
while ((e = av_dict_get(*opts, "", e, AV_DICT_IGNORE_SUFFIX))) {
if (av_opt_find(&avr_class, e->key, NULL, 0,
AV_OPT_SEARCH_FAKE_OBJ | AV_OPT_SEARCH_CHILDREN))
av_dict_set(&s->options, e->key, e->value, 0);
}
 
e = NULL;
while ((e = av_dict_get(s->options, "", e, AV_DICT_IGNORE_SUFFIX)))
av_dict_set(opts, e->key, NULL, 0);
 
/* do not allow the user to override basic format options */
av_dict_set(&s->options, "in_channel_layout", NULL, 0);
av_dict_set(&s->options, "out_channel_layout", NULL, 0);
av_dict_set(&s->options, "in_sample_fmt", NULL, 0);
av_dict_set(&s->options, "out_sample_fmt", NULL, 0);
av_dict_set(&s->options, "in_sample_rate", NULL, 0);
av_dict_set(&s->options, "out_sample_rate", NULL, 0);
 
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
ResampleContext *s = ctx->priv;
 
if (s->avr) {
avresample_close(s->avr);
avresample_free(&s->avr);
}
av_dict_free(&s->options);
}
 
static int query_formats(AVFilterContext *ctx)
{
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
 
AVFilterFormats *in_formats = ff_all_formats(AVMEDIA_TYPE_AUDIO);
AVFilterFormats *out_formats = ff_all_formats(AVMEDIA_TYPE_AUDIO);
AVFilterFormats *in_samplerates = ff_all_samplerates();
AVFilterFormats *out_samplerates = ff_all_samplerates();
AVFilterChannelLayouts *in_layouts = ff_all_channel_layouts();
AVFilterChannelLayouts *out_layouts = ff_all_channel_layouts();
 
ff_formats_ref(in_formats, &inlink->out_formats);
ff_formats_ref(out_formats, &outlink->in_formats);
 
ff_formats_ref(in_samplerates, &inlink->out_samplerates);
ff_formats_ref(out_samplerates, &outlink->in_samplerates);
 
ff_channel_layouts_ref(in_layouts, &inlink->out_channel_layouts);
ff_channel_layouts_ref(out_layouts, &outlink->in_channel_layouts);
 
return 0;
}
 
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AVFilterLink *inlink = ctx->inputs[0];
ResampleContext *s = ctx->priv;
char buf1[64], buf2[64];
int ret;
 
if (s->avr) {
avresample_close(s->avr);
avresample_free(&s->avr);
}
 
if (inlink->channel_layout == outlink->channel_layout &&
inlink->sample_rate == outlink->sample_rate &&
(inlink->format == outlink->format ||
(av_get_channel_layout_nb_channels(inlink->channel_layout) == 1 &&
av_get_channel_layout_nb_channels(outlink->channel_layout) == 1 &&
av_get_planar_sample_fmt(inlink->format) ==
av_get_planar_sample_fmt(outlink->format))))
return 0;
 
if (!(s->avr = avresample_alloc_context()))
return AVERROR(ENOMEM);
 
if (s->options) {
AVDictionaryEntry *e = NULL;
while ((e = av_dict_get(s->options, "", e, AV_DICT_IGNORE_SUFFIX)))
av_log(ctx, AV_LOG_VERBOSE, "lavr option: %s=%s\n", e->key, e->value);
 
av_opt_set_dict(s->avr, &s->options);
}
 
av_opt_set_int(s->avr, "in_channel_layout", inlink ->channel_layout, 0);
av_opt_set_int(s->avr, "out_channel_layout", outlink->channel_layout, 0);
av_opt_set_int(s->avr, "in_sample_fmt", inlink ->format, 0);
av_opt_set_int(s->avr, "out_sample_fmt", outlink->format, 0);
av_opt_set_int(s->avr, "in_sample_rate", inlink ->sample_rate, 0);
av_opt_set_int(s->avr, "out_sample_rate", outlink->sample_rate, 0);
 
if ((ret = avresample_open(s->avr)) < 0)
return ret;
 
outlink->time_base = (AVRational){ 1, outlink->sample_rate };
s->next_pts = AV_NOPTS_VALUE;
 
av_get_channel_layout_string(buf1, sizeof(buf1),
-1, inlink ->channel_layout);
av_get_channel_layout_string(buf2, sizeof(buf2),
-1, outlink->channel_layout);
av_log(ctx, AV_LOG_VERBOSE,
"fmt:%s srate:%d cl:%s -> fmt:%s srate:%d cl:%s\n",
av_get_sample_fmt_name(inlink ->format), inlink ->sample_rate, buf1,
av_get_sample_fmt_name(outlink->format), outlink->sample_rate, buf2);
 
return 0;
}
 
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
ResampleContext *s = ctx->priv;
int ret = 0;
 
s->got_output = 0;
while (ret >= 0 && !s->got_output)
ret = ff_request_frame(ctx->inputs[0]);
 
/* flush the lavr delay buffer */
if (ret == AVERROR_EOF && s->avr) {
AVFrame *frame;
int nb_samples = av_rescale_rnd(avresample_get_delay(s->avr),
outlink->sample_rate,
ctx->inputs[0]->sample_rate,
AV_ROUND_UP);
 
if (!nb_samples)
return ret;
 
frame = ff_get_audio_buffer(outlink, nb_samples);
if (!frame)
return AVERROR(ENOMEM);
 
ret = avresample_convert(s->avr, frame->extended_data,
frame->linesize[0], nb_samples,
NULL, 0, 0);
if (ret <= 0) {
av_frame_free(&frame);
return (ret == 0) ? AVERROR_EOF : ret;
}
 
frame->pts = s->next_pts;
return ff_filter_frame(outlink, frame);
}
return ret;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
ResampleContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
int ret;
 
if (s->avr) {
AVFrame *out;
int delay, nb_samples;
 
/* maximum possible samples lavr can output */
delay = avresample_get_delay(s->avr);
nb_samples = av_rescale_rnd(in->nb_samples + delay,
outlink->sample_rate, inlink->sample_rate,
AV_ROUND_UP);
 
out = ff_get_audio_buffer(outlink, nb_samples);
if (!out) {
ret = AVERROR(ENOMEM);
goto fail;
}
 
ret = avresample_convert(s->avr, out->extended_data, out->linesize[0],
nb_samples, in->extended_data, in->linesize[0],
in->nb_samples);
if (ret <= 0) {
av_frame_free(&out);
if (ret < 0)
goto fail;
}
 
av_assert0(!avresample_available(s->avr));
 
if (s->next_pts == AV_NOPTS_VALUE) {
if (in->pts == AV_NOPTS_VALUE) {
av_log(ctx, AV_LOG_WARNING, "First timestamp is missing, "
"assuming 0.\n");
s->next_pts = 0;
} else
s->next_pts = av_rescale_q(in->pts, inlink->time_base,
outlink->time_base);
}
 
if (ret > 0) {
out->nb_samples = ret;
if (in->pts != AV_NOPTS_VALUE) {
out->pts = av_rescale_q(in->pts, inlink->time_base,
outlink->time_base) -
av_rescale(delay, outlink->sample_rate,
inlink->sample_rate);
} else
out->pts = s->next_pts;
 
s->next_pts = out->pts + out->nb_samples;
 
ret = ff_filter_frame(outlink, out);
s->got_output = 1;
}
 
fail:
av_frame_free(&in);
} else {
in->format = outlink->format;
ret = ff_filter_frame(outlink, in);
s->got_output = 1;
}
 
return ret;
}
 
static const AVClass *resample_child_class_next(const AVClass *prev)
{
return prev ? NULL : avresample_get_class();
}
 
static void *resample_child_next(void *obj, void *prev)
{
ResampleContext *s = obj;
return prev ? NULL : s->avr;
}
 
static const AVClass resample_class = {
.class_name = "resample",
.item_name = av_default_item_name,
.version = LIBAVUTIL_VERSION_INT,
.child_class_next = resample_child_class_next,
.child_next = resample_child_next,
};
 
static const AVFilterPad avfilter_af_resample_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad avfilter_af_resample_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
.request_frame = request_frame
},
{ NULL }
};
 
AVFilter avfilter_af_resample = {
.name = "resample",
.description = NULL_IF_CONFIG_SMALL("Audio resampling and conversion."),
.priv_size = sizeof(ResampleContext),
.priv_class = &resample_class,
.init_dict = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = avfilter_af_resample_inputs,
.outputs = avfilter_af_resample_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/af_silencedetect.c
0,0 → 1,212
/*
* Copyright (c) 2012 Clément Bœsch <u pkh me>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Audio silence detector
*/
 
#include <float.h> /* DBL_MAX */
 
#include "libavutil/opt.h"
#include "libavutil/timestamp.h"
#include "audio.h"
#include "formats.h"
#include "avfilter.h"
#include "internal.h"
 
typedef struct SilenceDetectContext {
const AVClass *class;
double noise; ///< noise amplitude ratio
double duration; ///< minimum duration of silence until notification
int64_t nb_null_samples; ///< current number of continuous zero samples
int64_t start; ///< if silence is detected, this value contains the time of the first zero sample
int last_sample_rate; ///< last sample rate to check for sample rate changes
 
void (*silencedetect)(struct SilenceDetectContext *s, AVFrame *insamples,
int nb_samples, int64_t nb_samples_notify,
AVRational time_base);
} SilenceDetectContext;
 
#define OFFSET(x) offsetof(SilenceDetectContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_AUDIO_PARAM
static const AVOption silencedetect_options[] = {
{ "n", "set noise tolerance", OFFSET(noise), AV_OPT_TYPE_DOUBLE, {.dbl=0.001}, 0, DBL_MAX, FLAGS },
{ "noise", "set noise tolerance", OFFSET(noise), AV_OPT_TYPE_DOUBLE, {.dbl=0.001}, 0, DBL_MAX, FLAGS },
{ "d", "set minimum duration in seconds", OFFSET(duration), AV_OPT_TYPE_DOUBLE, {.dbl=2.}, 0, 24*60*60, FLAGS },
{ "duration", "set minimum duration in seconds", OFFSET(duration), AV_OPT_TYPE_DOUBLE, {.dbl=2.}, 0, 24*60*60, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(silencedetect);
 
static char *get_metadata_val(AVFrame *insamples, const char *key)
{
AVDictionaryEntry *e = av_dict_get(insamples->metadata, key, NULL, 0);
return e && e->value ? e->value : NULL;
}
 
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples,
int is_silence, int64_t nb_samples_notify,
AVRational time_base)
{
if (is_silence) {
if (!s->start) {
s->nb_null_samples++;
if (s->nb_null_samples >= nb_samples_notify) {
s->start = insamples->pts - (int64_t)(s->duration / av_q2d(time_base) + .5);
av_dict_set(&insamples->metadata, "lavfi.silence_start",
av_ts2timestr(s->start, &time_base), 0);
av_log(s, AV_LOG_INFO, "silence_start: %s\n",
get_metadata_val(insamples, "lavfi.silence_start"));
}
}
} else {
if (s->start) {
av_dict_set(&insamples->metadata, "lavfi.silence_end",
av_ts2timestr(insamples->pts, &time_base), 0);
av_dict_set(&insamples->metadata, "lavfi.silence_duration",
av_ts2timestr(insamples->pts - s->start, &time_base), 0);
av_log(s, AV_LOG_INFO,
"silence_end: %s | silence_duration: %s\n",
get_metadata_val(insamples, "lavfi.silence_end"),
get_metadata_val(insamples, "lavfi.silence_duration"));
}
s->nb_null_samples = s->start = 0;
}
}
 
#define SILENCE_DETECT(name, type) \
static void silencedetect_##name(SilenceDetectContext *s, AVFrame *insamples, \
int nb_samples, int64_t nb_samples_notify, \
AVRational time_base) \
{ \
const type *p = (const type *)insamples->data[0]; \
const type noise = s->noise; \
int i; \
\
for (i = 0; i < nb_samples; i++, p++) \
update(s, insamples, *p < noise && *p > -noise, \
nb_samples_notify, time_base); \
}
 
SILENCE_DETECT(dbl, double)
SILENCE_DETECT(flt, float)
SILENCE_DETECT(s32, int32_t)
SILENCE_DETECT(s16, int16_t)
 
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
SilenceDetectContext *s = ctx->priv;
 
switch (inlink->format) {
case AV_SAMPLE_FMT_DBL: s->silencedetect = silencedetect_dbl; break;
case AV_SAMPLE_FMT_FLT: s->silencedetect = silencedetect_flt; break;
case AV_SAMPLE_FMT_S32:
s->noise *= INT32_MAX;
s->silencedetect = silencedetect_s32;
break;
case AV_SAMPLE_FMT_S16:
s->noise *= INT16_MAX;
s->silencedetect = silencedetect_s16;
break;
}
 
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
SilenceDetectContext *s = inlink->dst->priv;
const int nb_channels = inlink->channels;
const int srate = inlink->sample_rate;
const int nb_samples = insamples->nb_samples * nb_channels;
const int64_t nb_samples_notify = srate * s->duration * nb_channels;
 
// scale number of null samples to the new sample rate
if (s->last_sample_rate && s->last_sample_rate != srate)
s->nb_null_samples = srate * s->nb_null_samples / s->last_sample_rate;
s->last_sample_rate = srate;
 
// TODO: document metadata
s->silencedetect(s, insamples, nb_samples, nb_samples_notify,
inlink->time_base);
 
return ff_filter_frame(inlink->dst->outputs[0], insamples);
}
 
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layouts = NULL;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_DBL,
AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_S32,
AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE
};
 
layouts = ff_all_channel_layouts();
if (!layouts)
return AVERROR(ENOMEM);
ff_set_common_channel_layouts(ctx, layouts);
 
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ff_set_common_formats(ctx, formats);
 
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
ff_set_common_samplerates(ctx, formats);
 
return 0;
}
 
static const AVFilterPad silencedetect_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_input,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad silencedetect_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
 
AVFilter avfilter_af_silencedetect = {
.name = "silencedetect",
.description = NULL_IF_CONFIG_SMALL("Detect silence."),
.priv_size = sizeof(SilenceDetectContext),
.query_formats = query_formats,
.inputs = silencedetect_inputs,
.outputs = silencedetect_outputs,
.priv_class = &silencedetect_class,
};
/contrib/sdk/sources/ffmpeg/libavfilter/af_volume.c
0,0 → 1,300
/*
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* audio volume filter
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/eval.h"
#include "libavutil/float_dsp.h"
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "af_volume.h"
 
static const char *precision_str[] = {
"fixed", "float", "double"
};
 
#define OFFSET(x) offsetof(VolumeContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
#define F AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption volume_options[] = {
{ "volume", "set volume adjustment",
OFFSET(volume), AV_OPT_TYPE_DOUBLE, { .dbl = 1.0 }, 0, 0x7fffff, A|F },
{ "precision", "select mathematical precision",
OFFSET(precision), AV_OPT_TYPE_INT, { .i64 = PRECISION_FLOAT }, PRECISION_FIXED, PRECISION_DOUBLE, A|F, "precision" },
{ "fixed", "select 8-bit fixed-point", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_FIXED }, INT_MIN, INT_MAX, A|F, "precision" },
{ "float", "select 32-bit floating-point", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_FLOAT }, INT_MIN, INT_MAX, A|F, "precision" },
{ "double", "select 64-bit floating-point", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_DOUBLE }, INT_MIN, INT_MAX, A|F, "precision" },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(volume);
 
static av_cold int init(AVFilterContext *ctx)
{
VolumeContext *vol = ctx->priv;
 
if (vol->precision == PRECISION_FIXED) {
vol->volume_i = (int)(vol->volume * 256 + 0.5);
vol->volume = vol->volume_i / 256.0;
av_log(ctx, AV_LOG_VERBOSE, "volume:(%d/256)(%f)(%1.2fdB) precision:fixed\n",
vol->volume_i, vol->volume, 20.0*log(vol->volume)/M_LN10);
} else {
av_log(ctx, AV_LOG_VERBOSE, "volume:(%f)(%1.2fdB) precision:%s\n",
vol->volume, 20.0*log(vol->volume)/M_LN10,
precision_str[vol->precision]);
}
 
return 0;
}
 
static int query_formats(AVFilterContext *ctx)
{
VolumeContext *vol = ctx->priv;
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layouts;
static const enum AVSampleFormat sample_fmts[][7] = {
[PRECISION_FIXED] = {
AV_SAMPLE_FMT_U8,
AV_SAMPLE_FMT_U8P,
AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_S32,
AV_SAMPLE_FMT_S32P,
AV_SAMPLE_FMT_NONE
},
[PRECISION_FLOAT] = {
AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE
},
[PRECISION_DOUBLE] = {
AV_SAMPLE_FMT_DBL,
AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
}
};
 
layouts = ff_all_channel_layouts();
if (!layouts)
return AVERROR(ENOMEM);
ff_set_common_channel_layouts(ctx, layouts);
 
formats = ff_make_format_list(sample_fmts[vol->precision]);
if (!formats)
return AVERROR(ENOMEM);
ff_set_common_formats(ctx, formats);
 
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
ff_set_common_samplerates(ctx, formats);
 
return 0;
}
 
static inline void scale_samples_u8(uint8_t *dst, const uint8_t *src,
int nb_samples, int volume)
{
int i;
for (i = 0; i < nb_samples; i++)
dst[i] = av_clip_uint8(((((int64_t)src[i] - 128) * volume + 128) >> 8) + 128);
}
 
static inline void scale_samples_u8_small(uint8_t *dst, const uint8_t *src,
int nb_samples, int volume)
{
int i;
for (i = 0; i < nb_samples; i++)
dst[i] = av_clip_uint8((((src[i] - 128) * volume + 128) >> 8) + 128);
}
 
static inline void scale_samples_s16(uint8_t *dst, const uint8_t *src,
int nb_samples, int volume)
{
int i;
int16_t *smp_dst = (int16_t *)dst;
const int16_t *smp_src = (const int16_t *)src;
for (i = 0; i < nb_samples; i++)
smp_dst[i] = av_clip_int16(((int64_t)smp_src[i] * volume + 128) >> 8);
}
 
static inline void scale_samples_s16_small(uint8_t *dst, const uint8_t *src,
int nb_samples, int volume)
{
int i;
int16_t *smp_dst = (int16_t *)dst;
const int16_t *smp_src = (const int16_t *)src;
for (i = 0; i < nb_samples; i++)
smp_dst[i] = av_clip_int16((smp_src[i] * volume + 128) >> 8);
}
 
static inline void scale_samples_s32(uint8_t *dst, const uint8_t *src,
int nb_samples, int volume)
{
int i;
int32_t *smp_dst = (int32_t *)dst;
const int32_t *smp_src = (const int32_t *)src;
for (i = 0; i < nb_samples; i++)
smp_dst[i] = av_clipl_int32((((int64_t)smp_src[i] * volume + 128) >> 8));
}
 
static av_cold void volume_init(VolumeContext *vol)
{
vol->samples_align = 1;
 
switch (av_get_packed_sample_fmt(vol->sample_fmt)) {
case AV_SAMPLE_FMT_U8:
if (vol->volume_i < 0x1000000)
vol->scale_samples = scale_samples_u8_small;
else
vol->scale_samples = scale_samples_u8;
break;
case AV_SAMPLE_FMT_S16:
if (vol->volume_i < 0x10000)
vol->scale_samples = scale_samples_s16_small;
else
vol->scale_samples = scale_samples_s16;
break;
case AV_SAMPLE_FMT_S32:
vol->scale_samples = scale_samples_s32;
break;
case AV_SAMPLE_FMT_FLT:
avpriv_float_dsp_init(&vol->fdsp, 0);
vol->samples_align = 4;
break;
case AV_SAMPLE_FMT_DBL:
avpriv_float_dsp_init(&vol->fdsp, 0);
vol->samples_align = 8;
break;
}
 
if (ARCH_X86)
ff_volume_init_x86(vol);
}
 
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
VolumeContext *vol = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
 
vol->sample_fmt = inlink->format;
vol->channels = av_get_channel_layout_nb_channels(inlink->channel_layout);
vol->planes = av_sample_fmt_is_planar(inlink->format) ? vol->channels : 1;
 
volume_init(vol);
 
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
VolumeContext *vol = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
int nb_samples = buf->nb_samples;
AVFrame *out_buf;
 
if (vol->volume == 1.0 || vol->volume_i == 256)
return ff_filter_frame(outlink, buf);
 
/* do volume scaling in-place if input buffer is writable */
if (av_frame_is_writable(buf)) {
out_buf = buf;
} else {
out_buf = ff_get_audio_buffer(inlink, nb_samples);
if (!out_buf)
return AVERROR(ENOMEM);
av_frame_copy_props(out_buf, buf);
}
 
if (vol->precision != PRECISION_FIXED || vol->volume_i > 0) {
int p, plane_samples;
 
if (av_sample_fmt_is_planar(buf->format))
plane_samples = FFALIGN(nb_samples, vol->samples_align);
else
plane_samples = FFALIGN(nb_samples * vol->channels, vol->samples_align);
 
if (vol->precision == PRECISION_FIXED) {
for (p = 0; p < vol->planes; p++) {
vol->scale_samples(out_buf->extended_data[p],
buf->extended_data[p], plane_samples,
vol->volume_i);
}
} else if (av_get_packed_sample_fmt(vol->sample_fmt) == AV_SAMPLE_FMT_FLT) {
for (p = 0; p < vol->planes; p++) {
vol->fdsp.vector_fmul_scalar((float *)out_buf->extended_data[p],
(const float *)buf->extended_data[p],
vol->volume, plane_samples);
}
} else {
for (p = 0; p < vol->planes; p++) {
vol->fdsp.vector_dmul_scalar((double *)out_buf->extended_data[p],
(const double *)buf->extended_data[p],
vol->volume, plane_samples);
}
}
}
 
if (buf != out_buf)
av_frame_free(&buf);
 
return ff_filter_frame(outlink, out_buf);
}
 
static const AVFilterPad avfilter_af_volume_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad avfilter_af_volume_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
},
{ NULL }
};
 
AVFilter avfilter_af_volume = {
.name = "volume",
.description = NULL_IF_CONFIG_SMALL("Change input volume."),
.query_formats = query_formats,
.priv_size = sizeof(VolumeContext),
.priv_class = &volume_class,
.init = init,
.inputs = avfilter_af_volume_inputs,
.outputs = avfilter_af_volume_outputs,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
/contrib/sdk/sources/ffmpeg/libavfilter/af_volume.h
0,0 → 1,55
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* audio volume filter
*/
 
#ifndef AVFILTER_AF_VOLUME_H
#define AVFILTER_AF_VOLUME_H
 
#include "libavutil/common.h"
#include "libavutil/float_dsp.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
 
enum PrecisionType {
PRECISION_FIXED = 0,
PRECISION_FLOAT,
PRECISION_DOUBLE,
};
 
typedef struct VolumeContext {
const AVClass *class;
AVFloatDSPContext fdsp;
enum PrecisionType precision;
double volume;
int volume_i;
int channels;
int planes;
enum AVSampleFormat sample_fmt;
 
void (*scale_samples)(uint8_t *dst, const uint8_t *src, int nb_samples,
int volume);
int samples_align;
} VolumeContext;
 
void ff_volume_init_x86(VolumeContext *vol);
 
#endif /* AVFILTER_AF_VOLUME_H */
/contrib/sdk/sources/ffmpeg/libavfilter/af_volumedetect.c
0,0 → 1,159
/*
* Copyright (c) 2012 Nicolas George
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/avassert.h"
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
 
typedef struct {
/**
* Number of samples at each PCM value.
* histogram[0x8000 + i] is the number of samples at value i.
* The extra element is there for symmetry.
*/
uint64_t histogram[0x10001];
} VolDetectContext;
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_NONE
};
AVFilterFormats *formats;
 
if (!(formats = ff_make_format_list(sample_fmts)))
return AVERROR(ENOMEM);
ff_set_common_formats(ctx, formats);
 
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *samples)
{
AVFilterContext *ctx = inlink->dst;
VolDetectContext *vd = ctx->priv;
int64_t layout = samples->channel_layout;
int nb_samples = samples->nb_samples;
int nb_channels = av_get_channel_layout_nb_channels(layout);
int nb_planes = nb_channels;
int plane, i;
int16_t *pcm;
 
if (!av_sample_fmt_is_planar(samples->format)) {
nb_samples *= nb_channels;
nb_planes = 1;
}
for (plane = 0; plane < nb_planes; plane++) {
pcm = (int16_t *)samples->extended_data[plane];
for (i = 0; i < nb_samples; i++)
vd->histogram[pcm[i] + 0x8000]++;
}
 
return ff_filter_frame(inlink->dst->outputs[0], samples);
}
 
#define MAX_DB 91
 
static inline double logdb(uint64_t v)
{
double d = v / (double)(0x8000 * 0x8000);
if (!v)
return MAX_DB;
return log(d) * -4.3429448190325182765112891891660508229; /* -10/log(10) */
}
 
static void print_stats(AVFilterContext *ctx)
{
VolDetectContext *vd = ctx->priv;
int i, max_volume, shift;
uint64_t nb_samples = 0, power = 0, nb_samples_shift = 0, sum = 0;
uint64_t histdb[MAX_DB + 1] = { 0 };
 
for (i = 0; i < 0x10000; i++)
nb_samples += vd->histogram[i];
av_log(ctx, AV_LOG_INFO, "n_samples: %"PRId64"\n", nb_samples);
if (!nb_samples)
return;
 
/* If nb_samples > 1<<34, there is a risk of overflow in the
multiplication or the sum: shift all histogram values to avoid that.
The total number of samples must be recomputed to avoid rounding
errors. */
shift = av_log2(nb_samples >> 33);
for (i = 0; i < 0x10000; i++) {
nb_samples_shift += vd->histogram[i] >> shift;
power += (i - 0x8000) * (i - 0x8000) * (vd->histogram[i] >> shift);
}
if (!nb_samples_shift)
return;
power = (power + nb_samples_shift / 2) / nb_samples_shift;
av_assert0(power <= 0x8000 * 0x8000);
av_log(ctx, AV_LOG_INFO, "mean_volume: %.1f dB\n", -logdb(power));
 
max_volume = 0x8000;
while (max_volume > 0 && !vd->histogram[0x8000 + max_volume] &&
!vd->histogram[0x8000 - max_volume])
max_volume--;
av_log(ctx, AV_LOG_INFO, "max_volume: %.1f dB\n", -logdb(max_volume * max_volume));
 
for (i = 0; i < 0x10000; i++)
histdb[(int)logdb((i - 0x8000) * (i - 0x8000))] += vd->histogram[i];
for (i = 0; i <= MAX_DB && !histdb[i]; i++);
for (; i <= MAX_DB && sum < nb_samples / 1000; i++) {
av_log(ctx, AV_LOG_INFO, "histogram_%ddb: %"PRId64"\n", i, histdb[i]);
sum += histdb[i];
}
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
print_stats(ctx);
}
 
static const AVFilterPad volumedetect_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad volumedetect_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
 
AVFilter avfilter_af_volumedetect = {
.name = "volumedetect",
.description = NULL_IF_CONFIG_SMALL("Detect audio volume."),
.priv_size = sizeof(VolDetectContext),
.query_formats = query_formats,
.uninit = uninit,
.inputs = volumedetect_inputs,
.outputs = volumedetect_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/all_channel_layouts.inc
0,0 → 1,68
AV_CH_FRONT_CENTER,
AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY,
AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT,
AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT,
AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER,
AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_CENTER,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER,
AV_CH_FRONT_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
AV_CH_FRONT_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
AV_CH_FRONT_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
/contrib/sdk/sources/ffmpeg/libavfilter/allfilters.c
0,0 → 1,249
/*
* filter registration
* Copyright (c) 2008 Vitor Sessak
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avfilter.h"
#include "config.h"
#include "opencl_allkernels.h"
 
 
#define REGISTER_FILTER(X, x, y) \
{ \
extern AVFilter avfilter_##y##_##x; \
if (CONFIG_##X##_FILTER) \
avfilter_register(&avfilter_##y##_##x); \
}
 
#define REGISTER_FILTER_UNCONDITIONAL(x) \
{ \
extern AVFilter avfilter_##x; \
avfilter_register(&avfilter_##x); \
}
 
void avfilter_register_all(void)
{
static int initialized;
 
if (initialized)
return;
initialized = 1;
 
#if FF_API_ACONVERT_FILTER
REGISTER_FILTER(ACONVERT, aconvert, af);
#endif
REGISTER_FILTER(ADELAY, adelay, af);
REGISTER_FILTER(AECHO, aecho, af);
REGISTER_FILTER(AFADE, afade, af);
REGISTER_FILTER(AFORMAT, aformat, af);
REGISTER_FILTER(AINTERLEAVE, ainterleave, af);
REGISTER_FILTER(ALLPASS, allpass, af);
REGISTER_FILTER(AMERGE, amerge, af);
REGISTER_FILTER(AMIX, amix, af);
REGISTER_FILTER(ANULL, anull, af);
REGISTER_FILTER(APAD, apad, af);
REGISTER_FILTER(APERMS, aperms, af);
REGISTER_FILTER(APHASER, aphaser, af);
REGISTER_FILTER(ARESAMPLE, aresample, af);
REGISTER_FILTER(ASELECT, aselect, af);
REGISTER_FILTER(ASENDCMD, asendcmd, af);
REGISTER_FILTER(ASETNSAMPLES, asetnsamples, af);
REGISTER_FILTER(ASETPTS, asetpts, af);
REGISTER_FILTER(ASETRATE, asetrate, af);
REGISTER_FILTER(ASETTB, asettb, af);
REGISTER_FILTER(ASHOWINFO, ashowinfo, af);
REGISTER_FILTER(ASPLIT, asplit, af);
REGISTER_FILTER(ASTATS, astats, af);
REGISTER_FILTER(ASTREAMSYNC, astreamsync, af);
REGISTER_FILTER(ASYNCTS, asyncts, af);
REGISTER_FILTER(ATEMPO, atempo, af);
REGISTER_FILTER(ATRIM, atrim, af);
REGISTER_FILTER(AZMQ, azmq, af);
REGISTER_FILTER(BANDPASS, bandpass, af);
REGISTER_FILTER(BANDREJECT, bandreject, af);
REGISTER_FILTER(BASS, bass, af);
REGISTER_FILTER(BIQUAD, biquad, af);
REGISTER_FILTER(CHANNELMAP, channelmap, af);
REGISTER_FILTER(CHANNELSPLIT, channelsplit, af);
REGISTER_FILTER(COMPAND, compand, af);
REGISTER_FILTER(EARWAX, earwax, af);
REGISTER_FILTER(EBUR128, ebur128, af);
REGISTER_FILTER(EQUALIZER, equalizer, af);
REGISTER_FILTER(HIGHPASS, highpass, af);
REGISTER_FILTER(JOIN, join, af);
REGISTER_FILTER(LADSPA, ladspa, af);
REGISTER_FILTER(LOWPASS, lowpass, af);
REGISTER_FILTER(PAN, pan, af);
REGISTER_FILTER(REPLAYGAIN, replaygain, af);
REGISTER_FILTER(RESAMPLE, resample, af);
REGISTER_FILTER(SILENCEDETECT, silencedetect, af);
REGISTER_FILTER(TREBLE, treble, af);
REGISTER_FILTER(VOLUME, volume, af);
REGISTER_FILTER(VOLUMEDETECT, volumedetect, af);
 
REGISTER_FILTER(AEVALSRC, aevalsrc, asrc);
REGISTER_FILTER(ANULLSRC, anullsrc, asrc);
REGISTER_FILTER(FLITE, flite, asrc);
REGISTER_FILTER(SINE, sine, asrc);
 
REGISTER_FILTER(ANULLSINK, anullsink, asink);
 
REGISTER_FILTER(ALPHAEXTRACT, alphaextract, vf);
REGISTER_FILTER(ALPHAMERGE, alphamerge, vf);
REGISTER_FILTER(ASS, ass, vf);
REGISTER_FILTER(BBOX, bbox, vf);
REGISTER_FILTER(BLACKDETECT, blackdetect, vf);
REGISTER_FILTER(BLACKFRAME, blackframe, vf);
REGISTER_FILTER(BLEND, blend, vf);
REGISTER_FILTER(BOXBLUR, boxblur, vf);
REGISTER_FILTER(COLORBALANCE, colorbalance, vf);
REGISTER_FILTER(COLORCHANNELMIXER, colorchannelmixer, vf);
REGISTER_FILTER(COLORMATRIX, colormatrix, vf);
REGISTER_FILTER(COPY, copy, vf);
REGISTER_FILTER(CROP, crop, vf);
REGISTER_FILTER(CROPDETECT, cropdetect, vf);
REGISTER_FILTER(CURVES, curves, vf);
REGISTER_FILTER(DCTDNOIZ, dctdnoiz, vf);
REGISTER_FILTER(DECIMATE, decimate, vf);
REGISTER_FILTER(DELOGO, delogo, vf);
REGISTER_FILTER(DESHAKE, deshake, vf);
REGISTER_FILTER(DRAWBOX, drawbox, vf);
REGISTER_FILTER(DRAWGRID, drawgrid, vf);
REGISTER_FILTER(DRAWTEXT, drawtext, vf);
REGISTER_FILTER(EDGEDETECT, edgedetect, vf);
REGISTER_FILTER(EXTRACTPLANES, extractplanes, vf);
REGISTER_FILTER(FADE, fade, vf);
REGISTER_FILTER(FIELD, field, vf);
REGISTER_FILTER(FIELDMATCH, fieldmatch, vf);
REGISTER_FILTER(FIELDORDER, fieldorder, vf);
REGISTER_FILTER(FORMAT, format, vf);
REGISTER_FILTER(FPS, fps, vf);
REGISTER_FILTER(FRAMESTEP, framestep, vf);
REGISTER_FILTER(FREI0R, frei0r, vf);
REGISTER_FILTER(GEQ, geq, vf);
REGISTER_FILTER(GRADFUN, gradfun, vf);
REGISTER_FILTER(HALDCLUT, haldclut, vf);
REGISTER_FILTER(HFLIP, hflip, vf);
REGISTER_FILTER(HISTEQ, histeq, vf);
REGISTER_FILTER(HISTOGRAM, histogram, vf);
REGISTER_FILTER(HQDN3D, hqdn3d, vf);
REGISTER_FILTER(HUE, hue, vf);
REGISTER_FILTER(IDET, idet, vf);
REGISTER_FILTER(IL, il, vf);
REGISTER_FILTER(INTERLACE, interlace, vf);
REGISTER_FILTER(INTERLEAVE, interleave, vf);
REGISTER_FILTER(KERNDEINT, kerndeint, vf);
REGISTER_FILTER(LUT3D, lut3d, vf);
REGISTER_FILTER(LUT, lut, vf);
REGISTER_FILTER(LUTRGB, lutrgb, vf);
REGISTER_FILTER(LUTYUV, lutyuv, vf);
REGISTER_FILTER(MCDEINT, mcdeint, vf);
REGISTER_FILTER(MERGEPLANES, mergeplanes, vf);
REGISTER_FILTER(MP, mp, vf);
REGISTER_FILTER(MPDECIMATE, mpdecimate, vf);
REGISTER_FILTER(NEGATE, negate, vf);
REGISTER_FILTER(NOFORMAT, noformat, vf);
REGISTER_FILTER(NOISE, noise, vf);
REGISTER_FILTER(NULL, null, vf);
REGISTER_FILTER(OCV, ocv, vf);
REGISTER_FILTER(OVERLAY, overlay, vf);
REGISTER_FILTER(OWDENOISE, owdenoise, vf);
REGISTER_FILTER(PAD, pad, vf);
REGISTER_FILTER(PERMS, perms, vf);
REGISTER_FILTER(PERSPECTIVE, perspective, vf);
REGISTER_FILTER(PHASE, phase, vf);
REGISTER_FILTER(PIXDESCTEST, pixdesctest, vf);
REGISTER_FILTER(PP, pp, vf);
REGISTER_FILTER(PSNR, psnr, vf);
REGISTER_FILTER(PULLUP, pullup, vf);
REGISTER_FILTER(REMOVELOGO, removelogo, vf);
REGISTER_FILTER(ROTATE, rotate, vf);
REGISTER_FILTER(SAB, sab, vf);
REGISTER_FILTER(SCALE, scale, vf);
REGISTER_FILTER(SELECT, select, vf);
REGISTER_FILTER(SENDCMD, sendcmd, vf);
REGISTER_FILTER(SEPARATEFIELDS, separatefields, vf);
REGISTER_FILTER(SETDAR, setdar, vf);
REGISTER_FILTER(SETFIELD, setfield, vf);
REGISTER_FILTER(SETPTS, setpts, vf);
REGISTER_FILTER(SETSAR, setsar, vf);
REGISTER_FILTER(SETTB, settb, vf);
REGISTER_FILTER(SHOWINFO, showinfo, vf);
REGISTER_FILTER(SMARTBLUR, smartblur, vf);
REGISTER_FILTER(SPLIT, split, vf);
REGISTER_FILTER(SPP, spp, vf);
REGISTER_FILTER(STEREO3D, stereo3d, vf);
REGISTER_FILTER(SUBTITLES, subtitles, vf);
REGISTER_FILTER(SUPER2XSAI, super2xsai, vf);
REGISTER_FILTER(SWAPUV, swapuv, vf);
REGISTER_FILTER(TELECINE, telecine, vf);
REGISTER_FILTER(THUMBNAIL, thumbnail, vf);
REGISTER_FILTER(TILE, tile, vf);
REGISTER_FILTER(TINTERLACE, tinterlace, vf);
REGISTER_FILTER(TRANSPOSE, transpose, vf);
REGISTER_FILTER(TRIM, trim, vf);
REGISTER_FILTER(UNSHARP, unsharp, vf);
REGISTER_FILTER(VFLIP, vflip, vf);
REGISTER_FILTER(VIDSTABDETECT, vidstabdetect, vf);
REGISTER_FILTER(VIDSTABTRANSFORM, vidstabtransform, vf);
REGISTER_FILTER(VIGNETTE, vignette, vf);
REGISTER_FILTER(W3FDIF, w3fdif, vf);
REGISTER_FILTER(YADIF, yadif, vf);
REGISTER_FILTER(ZMQ, zmq, vf);
 
REGISTER_FILTER(CELLAUTO, cellauto, vsrc);
REGISTER_FILTER(COLOR, color, vsrc);
REGISTER_FILTER(FREI0R, frei0r_src, vsrc);
REGISTER_FILTER(HALDCLUTSRC, haldclutsrc, vsrc);
REGISTER_FILTER(LIFE, life, vsrc);
REGISTER_FILTER(MANDELBROT, mandelbrot, vsrc);
REGISTER_FILTER(MPTESTSRC, mptestsrc, vsrc);
REGISTER_FILTER(NULLSRC, nullsrc, vsrc);
REGISTER_FILTER(RGBTESTSRC, rgbtestsrc, vsrc);
REGISTER_FILTER(SMPTEBARS, smptebars, vsrc);
REGISTER_FILTER(SMPTEHDBARS, smptehdbars, vsrc);
REGISTER_FILTER(TESTSRC, testsrc, vsrc);
 
REGISTER_FILTER(NULLSINK, nullsink, vsink);
 
/* multimedia filters */
REGISTER_FILTER(AVECTORSCOPE, avectorscope, avf);
REGISTER_FILTER(CONCAT, concat, avf);
REGISTER_FILTER(SHOWSPECTRUM, showspectrum, avf);
REGISTER_FILTER(SHOWWAVES, showwaves, avf);
 
/* multimedia sources */
REGISTER_FILTER(AMOVIE, amovie, avsrc);
REGISTER_FILTER(MOVIE, movie, avsrc);
 
#if FF_API_AVFILTERBUFFER
REGISTER_FILTER_UNCONDITIONAL(vsink_ffbuffersink);
REGISTER_FILTER_UNCONDITIONAL(asink_ffabuffersink);
#endif
 
/* those filters are part of public or internal API => registered
* unconditionally */
REGISTER_FILTER_UNCONDITIONAL(asrc_abuffer);
REGISTER_FILTER_UNCONDITIONAL(vsrc_buffer);
REGISTER_FILTER_UNCONDITIONAL(asink_abuffer);
REGISTER_FILTER_UNCONDITIONAL(vsink_buffer);
REGISTER_FILTER_UNCONDITIONAL(af_afifo);
REGISTER_FILTER_UNCONDITIONAL(vf_fifo);
ff_opencl_register_filter_kernel_code_all();
}
/contrib/sdk/sources/ffmpeg/libavfilter/asink_anullsink.c
0,0 → 1,48
/*
* Copyright (c) 2010 S.N. Hemanth Meenakshisundaram <smeenaks@ucsd.edu>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/internal.h"
#include "avfilter.h"
#include "internal.h"
 
static int null_filter_frame(AVFilterLink *link, AVFrame *frame)
{
av_frame_free(&frame);
return 0;
}
 
static const AVFilterPad avfilter_asink_anullsink_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = null_filter_frame,
},
{ NULL },
};
 
AVFilter avfilter_asink_anullsink = {
.name = "anullsink",
.description = NULL_IF_CONFIG_SMALL("Do absolutely nothing with the input audio."),
 
.priv_size = 0,
 
.inputs = avfilter_asink_anullsink_inputs,
.outputs = NULL,
};
/contrib/sdk/sources/ffmpeg/libavfilter/asrc_abuffer.h
0,0 → 1,91
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFILTER_ASRC_ABUFFER_H
#define AVFILTER_ASRC_ABUFFER_H
 
#include "avfilter.h"
 
/**
* @file
* memory buffer source for audio
*
* @deprecated use buffersrc.h instead.
*/
 
/**
* Queue an audio buffer to the audio buffer source.
*
* @param abuffersrc audio source buffer context
* @param data pointers to the samples planes
* @param linesize linesizes of each audio buffer plane
* @param nb_samples number of samples per channel
* @param sample_fmt sample format of the audio data
* @param ch_layout channel layout of the audio data
* @param planar flag to indicate if audio data is planar or packed
* @param pts presentation timestamp of the audio buffer
* @param flags unused
*
* @deprecated use av_buffersrc_add_ref() instead.
*/
attribute_deprecated
int av_asrc_buffer_add_samples(AVFilterContext *abuffersrc,
uint8_t *data[8], int linesize[8],
int nb_samples, int sample_rate,
int sample_fmt, int64_t ch_layout, int planar,
int64_t pts, int av_unused flags);
 
/**
* Queue an audio buffer to the audio buffer source.
*
* This is similar to av_asrc_buffer_add_samples(), but the samples
* are stored in a buffer with known size.
*
* @param abuffersrc audio source buffer context
* @param buf pointer to the samples data, packed is assumed
* @param size the size in bytes of the buffer, it must contain an
* integer number of samples
* @param sample_fmt sample format of the audio data
* @param ch_layout channel layout of the audio data
* @param pts presentation timestamp of the audio buffer
* @param flags unused
*
* @deprecated use av_buffersrc_add_ref() instead.
*/
attribute_deprecated
int av_asrc_buffer_add_buffer(AVFilterContext *abuffersrc,
uint8_t *buf, int buf_size,
int sample_rate,
int sample_fmt, int64_t ch_layout, int planar,
int64_t pts, int av_unused flags);
 
/**
* Queue an audio buffer to the audio buffer source.
*
* @param abuffersrc audio source buffer context
* @param samplesref buffer ref to queue
* @param flags unused
*
* @deprecated use av_buffersrc_add_ref() instead.
*/
attribute_deprecated
int av_asrc_buffer_add_audio_buffer_ref(AVFilterContext *abuffersrc,
AVFilterBufferRef *samplesref,
int av_unused flags);
 
#endif /* AVFILTER_ASRC_ABUFFER_H */
/contrib/sdk/sources/ffmpeg/libavfilter/asrc_aevalsrc.c
0,0 → 1,242
/*
* Copyright (c) 2011 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* eval audio source
*/
 
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/eval.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "avfilter.h"
#include "audio.h"
#include "internal.h"
 
static const char * const var_names[] = {
"n", ///< number of frame
"t", ///< timestamp expressed in seconds
"s", ///< sample rate
NULL
};
 
enum var_name {
VAR_N,
VAR_T,
VAR_S,
VAR_VARS_NB
};
 
typedef struct {
const AVClass *class;
char *sample_rate_str;
int sample_rate;
int64_t chlayout;
char *chlayout_str;
int nb_channels;
int64_t pts;
AVExpr **expr;
char *exprs;
int nb_samples; ///< number of samples per requested frame
int64_t duration;
uint64_t n;
double var_values[VAR_VARS_NB];
} EvalContext;
 
#define OFFSET(x) offsetof(EvalContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption aevalsrc_options[]= {
{ "exprs", "set the '|'-separated list of channels expressions", OFFSET(exprs), AV_OPT_TYPE_STRING, {.str = NULL}, .flags = FLAGS },
{ "nb_samples", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 0, INT_MAX, FLAGS },
{ "n", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 0, INT_MAX, FLAGS },
{ "sample_rate", "set the sample rate", OFFSET(sample_rate_str), AV_OPT_TYPE_STRING, {.str = "44100"}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "s", "set the sample rate", OFFSET(sample_rate_str), AV_OPT_TYPE_STRING, {.str = "44100"}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "duration", "set audio duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS },
{ "d", "set audio duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS },
{ "channel_layout", "set channel layout", OFFSET(chlayout_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
{ "c", "set channel layout", OFFSET(chlayout_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(aevalsrc);
 
static av_cold int init(AVFilterContext *ctx)
{
EvalContext *eval = ctx->priv;
char *args1 = av_strdup(eval->exprs);
char *expr, *buf;
int ret;
 
if (!args1) {
av_log(ctx, AV_LOG_ERROR, "Channels expressions list is empty\n");
ret = eval->exprs ? AVERROR(ENOMEM) : AVERROR(EINVAL);
goto end;
}
 
/* parse expressions */
buf = args1;
while (expr = av_strtok(buf, "|", &buf)) {
if (!av_dynarray2_add((void **)&eval->expr, &eval->nb_channels, sizeof(*eval->expr), NULL)) {
ret = AVERROR(ENOMEM);
goto end;
}
ret = av_expr_parse(&eval->expr[eval->nb_channels - 1], expr, var_names,
NULL, NULL, NULL, NULL, 0, ctx);
if (ret < 0)
goto end;
}
 
if (eval->chlayout_str) {
int n;
ret = ff_parse_channel_layout(&eval->chlayout, eval->chlayout_str, ctx);
if (ret < 0)
goto end;
 
n = av_get_channel_layout_nb_channels(eval->chlayout);
if (n != eval->nb_channels) {
av_log(ctx, AV_LOG_ERROR,
"Mismatch between the specified number of channels '%d' "
"and the number of channels '%d' in the specified channel layout '%s'\n",
eval->nb_channels, n, eval->chlayout_str);
ret = AVERROR(EINVAL);
goto end;
}
} else {
/* guess channel layout from nb expressions/channels */
eval->chlayout = av_get_default_channel_layout(eval->nb_channels);
if (!eval->chlayout && eval->nb_channels <= 0) {
av_log(ctx, AV_LOG_ERROR, "Invalid number of channels '%d' provided\n",
eval->nb_channels);
ret = AVERROR(EINVAL);
goto end;
}
}
 
if ((ret = ff_parse_sample_rate(&eval->sample_rate, eval->sample_rate_str, ctx)))
goto end;
eval->n = 0;
 
end:
av_free(args1);
return ret;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
EvalContext *eval = ctx->priv;
int i;
 
for (i = 0; i < eval->nb_channels; i++) {
av_expr_free(eval->expr[i]);
eval->expr[i] = NULL;
}
av_freep(&eval->expr);
}
 
static int config_props(AVFilterLink *outlink)
{
EvalContext *eval = outlink->src->priv;
char buf[128];
 
outlink->time_base = (AVRational){1, eval->sample_rate};
outlink->sample_rate = eval->sample_rate;
 
eval->var_values[VAR_S] = eval->sample_rate;
 
av_get_channel_layout_string(buf, sizeof(buf), 0, eval->chlayout);
 
av_log(outlink->src, AV_LOG_VERBOSE,
"sample_rate:%d chlayout:%s duration:%"PRId64"\n",
eval->sample_rate, buf, eval->duration);
 
return 0;
}
 
static int query_formats(AVFilterContext *ctx)
{
EvalContext *eval = ctx->priv;
static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_DBLP, AV_SAMPLE_FMT_NONE };
int64_t chlayouts[] = { eval->chlayout ? eval->chlayout : FF_COUNT2LAYOUT(eval->nb_channels) , -1 };
int sample_rates[] = { eval->sample_rate, -1 };
 
ff_set_common_formats (ctx, ff_make_format_list(sample_fmts));
ff_set_common_channel_layouts(ctx, avfilter_make_format64_list(chlayouts));
ff_set_common_samplerates(ctx, ff_make_format_list(sample_rates));
 
return 0;
}
 
static int request_frame(AVFilterLink *outlink)
{
EvalContext *eval = outlink->src->priv;
AVFrame *samplesref;
int i, j;
int64_t t = av_rescale(eval->n, AV_TIME_BASE, eval->sample_rate);
 
if (eval->duration >= 0 && t >= eval->duration)
return AVERROR_EOF;
 
samplesref = ff_get_audio_buffer(outlink, eval->nb_samples);
if (!samplesref)
return AVERROR(ENOMEM);
 
/* evaluate expression for each single sample and for each channel */
for (i = 0; i < eval->nb_samples; i++, eval->n++) {
eval->var_values[VAR_N] = eval->n;
eval->var_values[VAR_T] = eval->var_values[VAR_N] * (double)1/eval->sample_rate;
 
for (j = 0; j < eval->nb_channels; j++) {
*((double *) samplesref->extended_data[j] + i) =
av_expr_eval(eval->expr[j], eval->var_values, NULL);
}
}
 
samplesref->pts = eval->pts;
samplesref->sample_rate = eval->sample_rate;
eval->pts += eval->nb_samples;
 
return ff_filter_frame(outlink, samplesref);
}
 
static const AVFilterPad aevalsrc_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_props,
.request_frame = request_frame,
},
{ NULL }
};
 
AVFilter avfilter_asrc_aevalsrc = {
.name = "aevalsrc",
.description = NULL_IF_CONFIG_SMALL("Generate an audio signal generated by an expression."),
.query_formats = query_formats,
.init = init,
.uninit = uninit,
.priv_size = sizeof(EvalContext),
.inputs = NULL,
.outputs = aevalsrc_outputs,
.priv_class = &aevalsrc_class,
};
/contrib/sdk/sources/ffmpeg/libavfilter/asrc_anullsrc.c
0,0 → 1,146
/*
* Copyright 2010 S.N. Hemanth Meenakshisundaram <smeenaks ucsd edu>
* Copyright 2010 Stefano Sabatini <stefano.sabatini-lala poste it>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* null audio source
*/
 
#include <inttypes.h>
#include <stdio.h>
 
#include "libavutil/channel_layout.h"
#include "libavutil/internal.h"
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
 
typedef struct {
const AVClass *class;
char *channel_layout_str;
uint64_t channel_layout;
char *sample_rate_str;
int sample_rate;
int nb_samples; ///< number of samples per requested frame
int64_t pts;
} ANullContext;
 
#define OFFSET(x) offsetof(ANullContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption anullsrc_options[]= {
{ "channel_layout", "set channel_layout", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, {.str = "stereo"}, 0, 0, FLAGS },
{ "cl", "set channel_layout", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, {.str = "stereo"}, 0, 0, FLAGS },
{ "sample_rate", "set sample rate", OFFSET(sample_rate_str) , AV_OPT_TYPE_STRING, {.str = "44100"}, 0, 0, FLAGS },
{ "r", "set sample rate", OFFSET(sample_rate_str) , AV_OPT_TYPE_STRING, {.str = "44100"}, 0, 0, FLAGS },
{ "nb_samples", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 0, INT_MAX, FLAGS },
{ "n", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 0, INT_MAX, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(anullsrc);
 
static av_cold int init(AVFilterContext *ctx)
{
ANullContext *null = ctx->priv;
int ret;
 
if ((ret = ff_parse_sample_rate(&null->sample_rate,
null->sample_rate_str, ctx)) < 0)
return ret;
 
if ((ret = ff_parse_channel_layout(&null->channel_layout,
null->channel_layout_str, ctx)) < 0)
return ret;
 
return 0;
}
 
static int query_formats(AVFilterContext *ctx)
{
ANullContext *null = ctx->priv;
int64_t chlayouts[] = { null->channel_layout, -1 };
int sample_rates[] = { null->sample_rate, -1 };
 
ff_set_common_formats (ctx, ff_all_formats(AVMEDIA_TYPE_AUDIO));
ff_set_common_channel_layouts(ctx, avfilter_make_format64_list(chlayouts));
ff_set_common_samplerates (ctx, ff_make_format_list(sample_rates));
 
return 0;
}
 
static int config_props(AVFilterLink *outlink)
{
ANullContext *null = outlink->src->priv;
char buf[128];
 
av_get_channel_layout_string(buf, sizeof(buf), 0, null->channel_layout);
av_log(outlink->src, AV_LOG_VERBOSE,
"sample_rate:%d channel_layout:'%s' nb_samples:%d\n",
null->sample_rate, buf, null->nb_samples);
 
return 0;
}
 
static int request_frame(AVFilterLink *outlink)
{
int ret;
ANullContext *null = outlink->src->priv;
AVFrame *samplesref;
 
samplesref = ff_get_audio_buffer(outlink, null->nb_samples);
if (!samplesref)
return AVERROR(ENOMEM);
 
samplesref->pts = null->pts;
samplesref->channel_layout = null->channel_layout;
samplesref->sample_rate = outlink->sample_rate;
 
ret = ff_filter_frame(outlink, av_frame_clone(samplesref));
av_frame_free(&samplesref);
if (ret < 0)
return ret;
 
null->pts += null->nb_samples;
return ret;
}
 
static const AVFilterPad avfilter_asrc_anullsrc_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_props,
.request_frame = request_frame,
},
{ NULL }
};
 
AVFilter avfilter_asrc_anullsrc = {
.name = "anullsrc",
.description = NULL_IF_CONFIG_SMALL("Null audio source, return empty audio frames."),
.init = init,
.query_formats = query_formats,
.priv_size = sizeof(ANullContext),
.inputs = NULL,
.outputs = avfilter_asrc_anullsrc_outputs,
.priv_class = &anullsrc_class,
};
/contrib/sdk/sources/ffmpeg/libavfilter/asrc_flite.c
0,0 → 1,283
/*
* Copyright (c) 2012 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* flite voice synth source
*/
 
#include <flite/flite.h>
#include "libavutil/channel_layout.h"
#include "libavutil/file.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "audio.h"
#include "formats.h"
#include "internal.h"
 
typedef struct {
const AVClass *class;
char *voice_str;
char *textfile;
char *text;
cst_wave *wave;
int16_t *wave_samples;
int wave_nb_samples;
int list_voices;
cst_voice *voice;
struct voice_entry *voice_entry;
int64_t pts;
int frame_nb_samples; ///< number of samples per frame
} FliteContext;
 
#define OFFSET(x) offsetof(FliteContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption flite_options[] = {
{ "list_voices", "list voices and exit", OFFSET(list_voices), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
{ "nb_samples", "set number of samples per frame", OFFSET(frame_nb_samples), AV_OPT_TYPE_INT, {.i64=512}, 0, INT_MAX, FLAGS },
{ "n", "set number of samples per frame", OFFSET(frame_nb_samples), AV_OPT_TYPE_INT, {.i64=512}, 0, INT_MAX, FLAGS },
{ "text", "set text to speak", OFFSET(text), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "textfile", "set filename of the text to speak", OFFSET(textfile), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "v", "set voice", OFFSET(voice_str), AV_OPT_TYPE_STRING, {.str="kal"}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "voice", "set voice", OFFSET(voice_str), AV_OPT_TYPE_STRING, {.str="kal"}, CHAR_MIN, CHAR_MAX, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(flite);
 
static volatile int flite_inited = 0;
 
/* declare functions for all the supported voices */
#define DECLARE_REGISTER_VOICE_FN(name) \
cst_voice *register_cmu_us_## name(const char *); \
void unregister_cmu_us_## name(cst_voice *);
DECLARE_REGISTER_VOICE_FN(awb);
DECLARE_REGISTER_VOICE_FN(kal);
DECLARE_REGISTER_VOICE_FN(kal16);
DECLARE_REGISTER_VOICE_FN(rms);
DECLARE_REGISTER_VOICE_FN(slt);
 
struct voice_entry {
const char *name;
cst_voice * (*register_fn)(const char *);
void (*unregister_fn)(cst_voice *);
cst_voice *voice;
unsigned usage_count;
} voice_entry;
 
#define MAKE_VOICE_STRUCTURE(voice_name) { \
.name = #voice_name, \
.register_fn = register_cmu_us_ ## voice_name, \
.unregister_fn = unregister_cmu_us_ ## voice_name, \
}
static struct voice_entry voice_entries[] = {
MAKE_VOICE_STRUCTURE(awb),
MAKE_VOICE_STRUCTURE(kal),
MAKE_VOICE_STRUCTURE(kal16),
MAKE_VOICE_STRUCTURE(rms),
MAKE_VOICE_STRUCTURE(slt),
};
 
static void list_voices(void *log_ctx, const char *sep)
{
int i, n = FF_ARRAY_ELEMS(voice_entries);
for (i = 0; i < n; i++)
av_log(log_ctx, AV_LOG_INFO, "%s%s",
voice_entries[i].name, i < (n-1) ? sep : "\n");
}
 
static int select_voice(struct voice_entry **entry_ret, const char *voice_name, void *log_ctx)
{
int i;
 
for (i = 0; i < FF_ARRAY_ELEMS(voice_entries); i++) {
struct voice_entry *entry = &voice_entries[i];
if (!strcmp(entry->name, voice_name)) {
if (!entry->voice)
entry->voice = entry->register_fn(NULL);
if (!entry->voice) {
av_log(log_ctx, AV_LOG_ERROR,
"Could not register voice '%s'\n", voice_name);
return AVERROR_UNKNOWN;
}
entry->usage_count++;
*entry_ret = entry;
return 0;
}
}
 
av_log(log_ctx, AV_LOG_ERROR, "Could not find voice '%s'\n", voice_name);
av_log(log_ctx, AV_LOG_INFO, "Choose between the voices: ");
list_voices(log_ctx, ", ");
 
return AVERROR(EINVAL);
}
 
static av_cold int init(AVFilterContext *ctx)
{
FliteContext *flite = ctx->priv;
int ret = 0;
 
if (flite->list_voices) {
list_voices(ctx, "\n");
return AVERROR_EXIT;
}
 
if (!flite_inited) {
if (flite_init() < 0) {
av_log(ctx, AV_LOG_ERROR, "flite initialization failed\n");
return AVERROR_UNKNOWN;
}
flite_inited++;
}
 
if ((ret = select_voice(&flite->voice_entry, flite->voice_str, ctx)) < 0)
return ret;
flite->voice = flite->voice_entry->voice;
 
if (flite->textfile && flite->text) {
av_log(ctx, AV_LOG_ERROR,
"Both text and textfile options set: only one must be specified\n");
return AVERROR(EINVAL);
}
 
if (flite->textfile) {
uint8_t *textbuf;
size_t textbuf_size;
 
if ((ret = av_file_map(flite->textfile, &textbuf, &textbuf_size, 0, ctx)) < 0) {
av_log(ctx, AV_LOG_ERROR,
"The text file '%s' could not be read: %s\n",
flite->textfile, av_err2str(ret));
return ret;
}
 
if (!(flite->text = av_malloc(textbuf_size+1)))
return AVERROR(ENOMEM);
memcpy(flite->text, textbuf, textbuf_size);
flite->text[textbuf_size] = 0;
av_file_unmap(textbuf, textbuf_size);
}
 
if (!flite->text) {
av_log(ctx, AV_LOG_ERROR,
"No speech text specified, specify the 'text' or 'textfile' option\n");
return AVERROR(EINVAL);
}
 
/* synth all the file data in block */
flite->wave = flite_text_to_wave(flite->text, flite->voice);
flite->wave_samples = flite->wave->samples;
flite->wave_nb_samples = flite->wave->num_samples;
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
FliteContext *flite = ctx->priv;
 
if (!--flite->voice_entry->usage_count)
flite->voice_entry->unregister_fn(flite->voice);
flite->voice = NULL;
flite->voice_entry = NULL;
delete_wave(flite->wave);
flite->wave = NULL;
}
 
static int query_formats(AVFilterContext *ctx)
{
FliteContext *flite = ctx->priv;
 
AVFilterChannelLayouts *chlayouts = NULL;
int64_t chlayout = av_get_default_channel_layout(flite->wave->num_channels);
AVFilterFormats *sample_formats = NULL;
AVFilterFormats *sample_rates = NULL;
 
ff_add_channel_layout(&chlayouts, chlayout);
ff_set_common_channel_layouts(ctx, chlayouts);
ff_add_format(&sample_formats, AV_SAMPLE_FMT_S16);
ff_set_common_formats(ctx, sample_formats);
ff_add_format(&sample_rates, flite->wave->sample_rate);
ff_set_common_samplerates (ctx, sample_rates);
 
return 0;
}
 
static int config_props(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
FliteContext *flite = ctx->priv;
 
outlink->sample_rate = flite->wave->sample_rate;
outlink->time_base = (AVRational){1, flite->wave->sample_rate};
 
av_log(ctx, AV_LOG_VERBOSE, "voice:%s fmt:%s sample_rate:%d\n",
flite->voice_str,
av_get_sample_fmt_name(outlink->format), outlink->sample_rate);
return 0;
}
 
static int request_frame(AVFilterLink *outlink)
{
AVFrame *samplesref;
FliteContext *flite = outlink->src->priv;
int nb_samples = FFMIN(flite->wave_nb_samples, flite->frame_nb_samples);
 
if (!nb_samples)
return AVERROR_EOF;
 
samplesref = ff_get_audio_buffer(outlink, nb_samples);
if (!samplesref)
return AVERROR(ENOMEM);
 
memcpy(samplesref->data[0], flite->wave_samples,
nb_samples * flite->wave->num_channels * 2);
samplesref->pts = flite->pts;
av_frame_set_pkt_pos(samplesref, -1);
av_frame_set_sample_rate(samplesref, flite->wave->sample_rate);
flite->pts += nb_samples;
flite->wave_samples += nb_samples * flite->wave->num_channels;
flite->wave_nb_samples -= nb_samples;
 
return ff_filter_frame(outlink, samplesref);
}
 
static const AVFilterPad flite_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_props,
.request_frame = request_frame,
},
{ NULL }
};
 
AVFilter avfilter_asrc_flite = {
.name = "flite",
.description = NULL_IF_CONFIG_SMALL("Synthesize voice from text using libflite."),
.query_formats = query_formats,
.init = init,
.uninit = uninit,
.priv_size = sizeof(FliteContext),
.inputs = NULL,
.outputs = flite_outputs,
.priv_class = &flite_class,
};
/contrib/sdk/sources/ffmpeg/libavfilter/asrc_sine.c
0,0 → 1,223
/*
* Copyright (c) 2013 Nicolas George
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <float.h>
 
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
 
typedef struct {
const AVClass *class;
double frequency;
double beep_factor;
int samples_per_frame;
int sample_rate;
int64_t duration;
int16_t *sin;
int64_t pts;
uint32_t phi; ///< current phase of the sine (2pi = 1<<32)
uint32_t dphi; ///< phase increment between two samples
unsigned beep_period;
unsigned beep_index;
unsigned beep_length;
uint32_t phi_beep; ///< current phase of the beep
uint32_t dphi_beep; ///< phase increment of the beep
} SineContext;
 
#define CONTEXT SineContext
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
#define OPT_GENERIC(name, field, def, min, max, descr, type, deffield, ...) \
{ name, descr, offsetof(CONTEXT, field), AV_OPT_TYPE_ ## type, \
{ .deffield = def }, min, max, FLAGS, __VA_ARGS__ }
 
#define OPT_INT(name, field, def, min, max, descr, ...) \
OPT_GENERIC(name, field, def, min, max, descr, INT, i64, __VA_ARGS__)
 
#define OPT_DBL(name, field, def, min, max, descr, ...) \
OPT_GENERIC(name, field, def, min, max, descr, DOUBLE, dbl, __VA_ARGS__)
 
#define OPT_DUR(name, field, def, min, max, descr, ...) \
OPT_GENERIC(name, field, def, min, max, descr, DURATION, str, __VA_ARGS__)
 
static const AVOption sine_options[] = {
OPT_DBL("frequency", frequency, 440, 0, DBL_MAX, "set the sine frequency"),
OPT_DBL("f", frequency, 440, 0, DBL_MAX, "set the sine frequency"),
OPT_DBL("beep_factor", beep_factor, 0, 0, DBL_MAX, "set the beep fequency factor"),
OPT_DBL("b", beep_factor, 0, 0, DBL_MAX, "set the beep fequency factor"),
OPT_INT("sample_rate", sample_rate, 44100, 1, INT_MAX, "set the sample rate"),
OPT_INT("r", sample_rate, 44100, 1, INT_MAX, "set the sample rate"),
OPT_DUR("duration", duration, 0, 0, INT64_MAX, "set the audio duration"),
OPT_DUR("d", duration, 0, 0, INT64_MAX, "set the audio duration"),
OPT_INT("samples_per_frame", samples_per_frame, 1024, 0, INT_MAX, "set the number of samples per frame"),
{NULL}
};
 
AVFILTER_DEFINE_CLASS(sine);
 
#define LOG_PERIOD 15
#define AMPLITUDE 4095
#define AMPLITUDE_SHIFT 3
 
static void make_sin_table(int16_t *sin)
{
unsigned half_pi = 1 << (LOG_PERIOD - 2);
unsigned ampls = AMPLITUDE << AMPLITUDE_SHIFT;
uint64_t unit2 = (uint64_t)(ampls * ampls) << 32;
unsigned step, i, c, s, k, new_k, n2;
 
/* Principle: if u = exp(i*a1) and v = exp(i*a2), then
exp(i*(a1+a2)/2) = (u+v) / length(u+v) */
sin[0] = 0;
sin[half_pi] = ampls;
for (step = half_pi; step > 1; step /= 2) {
/* k = (1 << 16) * amplitude / length(u+v)
In exact values, k is constant at a given step */
k = 0x10000;
for (i = 0; i < half_pi / 2; i += step) {
s = sin[i] + sin[i + step];
c = sin[half_pi - i] + sin[half_pi - i - step];
n2 = s * s + c * c;
/* Newton's method to solve n² * k² = unit² */
while (1) {
new_k = (k + unit2 / ((uint64_t)k * n2) + 1) >> 1;
if (k == new_k)
break;
k = new_k;
}
sin[i + step / 2] = (k * s + 0x7FFF) >> 16;
sin[half_pi - i - step / 2] = (k * c + 0x8000) >> 16;
}
}
/* Unshift amplitude */
for (i = 0; i <= half_pi; i++)
sin[i] = (sin[i] + (1 << (AMPLITUDE_SHIFT - 1))) >> AMPLITUDE_SHIFT;
/* Use symmetries to fill the other three quarters */
for (i = 0; i < half_pi; i++)
sin[half_pi * 2 - i] = sin[i];
for (i = 0; i < 2 * half_pi; i++)
sin[i + 2 * half_pi] = -sin[i];
}
 
static av_cold int init(AVFilterContext *ctx)
{
SineContext *sine = ctx->priv;
 
if (!(sine->sin = av_malloc(sizeof(*sine->sin) << LOG_PERIOD)))
return AVERROR(ENOMEM);
sine->dphi = ldexp(sine->frequency, 32) / sine->sample_rate + 0.5;
make_sin_table(sine->sin);
 
if (sine->beep_factor) {
sine->beep_period = sine->sample_rate;
sine->beep_length = sine->beep_period / 25;
sine->dphi_beep = ldexp(sine->beep_factor * sine->frequency, 32) /
sine->sample_rate + 0.5;
}
 
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
SineContext *sine = ctx->priv;
 
av_freep(&sine->sin);
}
 
static av_cold int query_formats(AVFilterContext *ctx)
{
SineContext *sine = ctx->priv;
static const int64_t chlayouts[] = { AV_CH_LAYOUT_MONO, -1 };
int sample_rates[] = { sine->sample_rate, -1 };
static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE };
 
ff_set_common_formats (ctx, ff_make_format_list(sample_fmts));
ff_set_common_channel_layouts(ctx, avfilter_make_format64_list(chlayouts));
ff_set_common_samplerates(ctx, ff_make_format_list(sample_rates));
return 0;
}
 
static av_cold int config_props(AVFilterLink *outlink)
{
SineContext *sine = outlink->src->priv;
sine->duration = av_rescale(sine->duration, sine->sample_rate, AV_TIME_BASE);
return 0;
}
 
static int request_frame(AVFilterLink *outlink)
{
SineContext *sine = outlink->src->priv;
AVFrame *frame;
int i, nb_samples = sine->samples_per_frame;
int16_t *samples;
 
if (sine->duration) {
nb_samples = FFMIN(nb_samples, sine->duration - sine->pts);
av_assert1(nb_samples >= 0);
if (!nb_samples)
return AVERROR_EOF;
}
if (!(frame = ff_get_audio_buffer(outlink, nb_samples)))
return AVERROR(ENOMEM);
samples = (int16_t *)frame->data[0];
 
for (i = 0; i < nb_samples; i++) {
samples[i] = sine->sin[sine->phi >> (32 - LOG_PERIOD)];
sine->phi += sine->dphi;
if (sine->beep_index < sine->beep_length) {
samples[i] += sine->sin[sine->phi_beep >> (32 - LOG_PERIOD)] << 1;
sine->phi_beep += sine->dphi_beep;
}
if (++sine->beep_index == sine->beep_period)
sine->beep_index = 0;
}
 
frame->pts = sine->pts;
sine->pts += nb_samples;
return ff_filter_frame(outlink, frame);
}
 
static const AVFilterPad sine_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.request_frame = request_frame,
.config_props = config_props,
},
{ NULL }
};
 
AVFilter avfilter_asrc_sine = {
.name = "sine",
.description = NULL_IF_CONFIG_SMALL("Generate sine wave audio signal."),
.query_formats = query_formats,
.init = init,
.uninit = uninit,
.priv_size = sizeof(SineContext),
.inputs = NULL,
.outputs = sine_outputs,
.priv_class = &sine_class,
};
/contrib/sdk/sources/ffmpeg/libavfilter/audio.c
0,0 → 1,170
/*
* Copyright (c) Stefano Sabatini | stefasab at gmail.com
* Copyright (c) S.N. Hemanth Meenakshisundaram | smeenaks at ucsd.edu
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavcodec/avcodec.h"
 
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
 
int avfilter_ref_get_channels(AVFilterBufferRef *ref)
{
return ref->audio ? ref->audio->channels : 0;
}
 
AVFrame *ff_null_get_audio_buffer(AVFilterLink *link, int nb_samples)
{
return ff_get_audio_buffer(link->dst->outputs[0], nb_samples);
}
 
AVFrame *ff_default_get_audio_buffer(AVFilterLink *link, int nb_samples)
{
AVFrame *frame = av_frame_alloc();
int channels = link->channels;
int ret;
 
av_assert0(channels == av_get_channel_layout_nb_channels(link->channel_layout) || !av_get_channel_layout_nb_channels(link->channel_layout));
 
if (!frame)
return NULL;
 
frame->nb_samples = nb_samples;
frame->format = link->format;
av_frame_set_channels(frame, link->channels);
frame->channel_layout = link->channel_layout;
frame->sample_rate = link->sample_rate;
ret = av_frame_get_buffer(frame, 0);
if (ret < 0) {
av_frame_free(&frame);
return NULL;
}
 
av_samples_set_silence(frame->extended_data, 0, nb_samples, channels,
link->format);
 
 
return frame;
}
 
AVFrame *ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
{
AVFrame *ret = NULL;
 
if (link->dstpad->get_audio_buffer)
ret = link->dstpad->get_audio_buffer(link, nb_samples);
 
if (!ret)
ret = ff_default_get_audio_buffer(link, nb_samples);
 
return ret;
}
 
#if FF_API_AVFILTERBUFFER
AVFilterBufferRef* avfilter_get_audio_buffer_ref_from_arrays_channels(uint8_t **data,
int linesize,int perms,
int nb_samples,
enum AVSampleFormat sample_fmt,
int channels,
uint64_t channel_layout)
{
int planes;
AVFilterBuffer *samples = av_mallocz(sizeof(*samples));
AVFilterBufferRef *samplesref = av_mallocz(sizeof(*samplesref));
 
if (!samples || !samplesref)
goto fail;
 
av_assert0(channels);
av_assert0(channel_layout == 0 ||
channels == av_get_channel_layout_nb_channels(channel_layout));
 
samplesref->buf = samples;
samplesref->buf->free = ff_avfilter_default_free_buffer;
if (!(samplesref->audio = av_mallocz(sizeof(*samplesref->audio))))
goto fail;
 
samplesref->audio->nb_samples = nb_samples;
samplesref->audio->channel_layout = channel_layout;
samplesref->audio->channels = channels;
 
planes = av_sample_fmt_is_planar(sample_fmt) ? channels : 1;
 
/* make sure the buffer gets read permission or it's useless for output */
samplesref->perms = perms | AV_PERM_READ;
 
samples->refcount = 1;
samplesref->type = AVMEDIA_TYPE_AUDIO;
samplesref->format = sample_fmt;
 
memcpy(samples->data, data,
FFMIN(FF_ARRAY_ELEMS(samples->data), planes)*sizeof(samples->data[0]));
memcpy(samplesref->data, samples->data, sizeof(samples->data));
 
samples->linesize[0] = samplesref->linesize[0] = linesize;
 
if (planes > FF_ARRAY_ELEMS(samples->data)) {
samples-> extended_data = av_mallocz(sizeof(*samples->extended_data) *
planes);
samplesref->extended_data = av_mallocz(sizeof(*samplesref->extended_data) *
planes);
 
if (!samples->extended_data || !samplesref->extended_data)
goto fail;
 
memcpy(samples-> extended_data, data, sizeof(*data)*planes);
memcpy(samplesref->extended_data, data, sizeof(*data)*planes);
} else {
samples->extended_data = samples->data;
samplesref->extended_data = samplesref->data;
}
 
samplesref->pts = AV_NOPTS_VALUE;
 
return samplesref;
 
fail:
if (samples && samples->extended_data != samples->data)
av_freep(&samples->extended_data);
if (samplesref) {
av_freep(&samplesref->audio);
if (samplesref->extended_data != samplesref->data)
av_freep(&samplesref->extended_data);
}
av_freep(&samplesref);
av_freep(&samples);
return NULL;
}
 
AVFilterBufferRef* avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data,
int linesize,int perms,
int nb_samples,
enum AVSampleFormat sample_fmt,
uint64_t channel_layout)
{
int channels = av_get_channel_layout_nb_channels(channel_layout);
return avfilter_get_audio_buffer_ref_from_arrays_channels(data, linesize, perms,
nb_samples, sample_fmt,
channels, channel_layout);
}
#endif
/contrib/sdk/sources/ffmpeg/libavfilter/audio.h
0,0 → 1,83
/*
* Copyright (c) Stefano Sabatini | stefasab at gmail.com
* Copyright (c) S.N. Hemanth Meenakshisundaram | smeenaks at ucsd.edu
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFILTER_AUDIO_H
#define AVFILTER_AUDIO_H
 
#include "avfilter.h"
#include "internal.h"
 
static const enum AVSampleFormat ff_packed_sample_fmts_array[] = {
AV_SAMPLE_FMT_U8,
AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_S32,
AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_DBL,
AV_SAMPLE_FMT_NONE
};
 
static const enum AVSampleFormat ff_planar_sample_fmts_array[] = {
AV_SAMPLE_FMT_U8P,
AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_S32P,
AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
};
 
/** default handler for get_audio_buffer() for audio inputs */
AVFrame *ff_default_get_audio_buffer(AVFilterLink *link, int nb_samples);
 
/** get_audio_buffer() handler for filters which simply pass audio along */
AVFrame *ff_null_get_audio_buffer(AVFilterLink *link, int nb_samples);
 
/**
* Request an audio samples buffer with a specific set of permissions.
*
* @param link the output link to the filter from which the buffer will
* be requested
* @param nb_samples the number of samples per channel
* @return A reference to the samples. This must be unreferenced with
* avfilter_unref_buffer when you are finished with it.
*/
AVFrame *ff_get_audio_buffer(AVFilterLink *link, int nb_samples);
 
/**
* Send a buffer of audio samples to the next filter.
*
* @param link the output link over which the audio samples are being sent
* @param samplesref a reference to the buffer of audio samples being sent. The
* receiving filter will free this reference when it no longer
* needs it or pass it on to the next filter.
*
* @return >= 0 on success, a negative AVERROR on error. The receiving filter
* is responsible for unreferencing samplesref in case of error.
*/
int ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref);
 
/**
* Send a buffer of audio samples to the next link, without checking
* min_samples.
*/
int ff_filter_samples_framed(AVFilterLink *link,
AVFilterBufferRef *samplesref);
 
#endif /* AVFILTER_AUDIO_H */
/contrib/sdk/sources/ffmpeg/libavfilter/avcodec.c
0,0 → 1,157
/*
* Copyright 2011 Stefano Sabatini | stefasab at gmail.com
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* libavcodec/libavfilter gluing utilities
*/
 
#include "avcodec.h"
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
 
#if FF_API_AVFILTERBUFFER
AVFilterBufferRef *avfilter_get_video_buffer_ref_from_frame(const AVFrame *frame,
int perms)
{
AVFilterBufferRef *picref =
avfilter_get_video_buffer_ref_from_arrays(frame->data, frame->linesize, perms,
frame->width, frame->height,
frame->format);
if (!picref)
return NULL;
if (avfilter_copy_frame_props(picref, frame) < 0) {
picref->buf->data[0] = NULL;
avfilter_unref_bufferp(&picref);
}
return picref;
}
 
AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_frame(const AVFrame *frame,
int perms)
{
AVFilterBufferRef *samplesref;
int channels = av_frame_get_channels(frame);
int64_t layout = av_frame_get_channel_layout(frame);
 
if (layout && av_get_channel_layout_nb_channels(layout) != av_frame_get_channels(frame)) {
av_log(0, AV_LOG_ERROR, "Layout indicates a different number of channels than actually present\n");
return NULL;
}
 
samplesref = avfilter_get_audio_buffer_ref_from_arrays_channels(
(uint8_t **)frame->extended_data, frame->linesize[0], perms,
frame->nb_samples, frame->format, channels, layout);
if (!samplesref)
return NULL;
if (avfilter_copy_frame_props(samplesref, frame) < 0) {
samplesref->buf->data[0] = NULL;
avfilter_unref_bufferp(&samplesref);
}
return samplesref;
}
 
AVFilterBufferRef *avfilter_get_buffer_ref_from_frame(enum AVMediaType type,
const AVFrame *frame,
int perms)
{
switch (type) {
case AVMEDIA_TYPE_VIDEO:
return avfilter_get_video_buffer_ref_from_frame(frame, perms);
case AVMEDIA_TYPE_AUDIO:
return avfilter_get_audio_buffer_ref_from_frame(frame, perms);
default:
return NULL;
}
}
 
int avfilter_copy_buf_props(AVFrame *dst, const AVFilterBufferRef *src)
{
int planes, nb_channels;
 
if (!dst)
return AVERROR(EINVAL);
/* abort in case the src is NULL and dst is not, avoid inconsistent state in dst */
av_assert0(src);
 
memcpy(dst->data, src->data, sizeof(dst->data));
memcpy(dst->linesize, src->linesize, sizeof(dst->linesize));
 
dst->pts = src->pts;
dst->format = src->format;
av_frame_set_pkt_pos(dst, src->pos);
 
switch (src->type) {
case AVMEDIA_TYPE_VIDEO:
av_assert0(src->video);
dst->width = src->video->w;
dst->height = src->video->h;
dst->sample_aspect_ratio = src->video->sample_aspect_ratio;
dst->interlaced_frame = src->video->interlaced;
dst->top_field_first = src->video->top_field_first;
dst->key_frame = src->video->key_frame;
dst->pict_type = src->video->pict_type;
break;
case AVMEDIA_TYPE_AUDIO:
av_assert0(src->audio);
nb_channels = av_get_channel_layout_nb_channels(src->audio->channel_layout);
planes = av_sample_fmt_is_planar(src->format) ? nb_channels : 1;
 
if (planes > FF_ARRAY_ELEMS(dst->data)) {
dst->extended_data = av_mallocz(planes * sizeof(*dst->extended_data));
if (!dst->extended_data)
return AVERROR(ENOMEM);
memcpy(dst->extended_data, src->extended_data,
planes * sizeof(*dst->extended_data));
} else
dst->extended_data = dst->data;
dst->nb_samples = src->audio->nb_samples;
av_frame_set_sample_rate (dst, src->audio->sample_rate);
av_frame_set_channel_layout(dst, src->audio->channel_layout);
av_frame_set_channels (dst, src->audio->channels);
break;
default:
return AVERROR(EINVAL);
}
 
return 0;
}
#endif
 
#if FF_API_FILL_FRAME
int avfilter_fill_frame_from_audio_buffer_ref(AVFrame *frame,
const AVFilterBufferRef *samplesref)
{
return avfilter_copy_buf_props(frame, samplesref);
}
 
int avfilter_fill_frame_from_video_buffer_ref(AVFrame *frame,
const AVFilterBufferRef *picref)
{
return avfilter_copy_buf_props(frame, picref);
}
 
int avfilter_fill_frame_from_buffer_ref(AVFrame *frame,
const AVFilterBufferRef *ref)
{
return avfilter_copy_buf_props(frame, ref);
}
#endif
/contrib/sdk/sources/ffmpeg/libavfilter/avcodec.h
0,0 → 1,110
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFILTER_AVCODEC_H
#define AVFILTER_AVCODEC_H
 
/**
* @file
* libavcodec/libavfilter gluing utilities
*
* This should be included in an application ONLY if the installed
* libavfilter has been compiled with libavcodec support, otherwise
* symbols defined below will not be available.
*/
 
#include "avfilter.h"
 
#if FF_API_AVFILTERBUFFER
/**
* Create and return a picref reference from the data and properties
* contained in frame.
*
* @param perms permissions to assign to the new buffer reference
* @deprecated avfilter APIs work natively with AVFrame instead.
*/
attribute_deprecated
AVFilterBufferRef *avfilter_get_video_buffer_ref_from_frame(const AVFrame *frame, int perms);
 
 
/**
* Create and return a picref reference from the data and properties
* contained in frame.
*
* @param perms permissions to assign to the new buffer reference
* @deprecated avfilter APIs work natively with AVFrame instead.
*/
attribute_deprecated
AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_frame(const AVFrame *frame,
int perms);
 
/**
* Create and return a buffer reference from the data and properties
* contained in frame.
*
* @param perms permissions to assign to the new buffer reference
* @deprecated avfilter APIs work natively with AVFrame instead.
*/
attribute_deprecated
AVFilterBufferRef *avfilter_get_buffer_ref_from_frame(enum AVMediaType type,
const AVFrame *frame,
int perms);
#endif
 
#if FF_API_FILL_FRAME
/**
* Fill an AVFrame with the information stored in samplesref.
*
* @param frame an already allocated AVFrame
* @param samplesref an audio buffer reference
* @return >= 0 in case of success, a negative AVERROR code in case of
* failure
* @deprecated Use avfilter_copy_buf_props() instead.
*/
attribute_deprecated
int avfilter_fill_frame_from_audio_buffer_ref(AVFrame *frame,
const AVFilterBufferRef *samplesref);
 
/**
* Fill an AVFrame with the information stored in picref.
*
* @param frame an already allocated AVFrame
* @param picref a video buffer reference
* @return >= 0 in case of success, a negative AVERROR code in case of
* failure
* @deprecated Use avfilter_copy_buf_props() instead.
*/
attribute_deprecated
int avfilter_fill_frame_from_video_buffer_ref(AVFrame *frame,
const AVFilterBufferRef *picref);
 
/**
* Fill an AVFrame with information stored in ref.
*
* @param frame an already allocated AVFrame
* @param ref a video or audio buffer reference
* @return >= 0 in case of success, a negative AVERROR code in case of
* failure
* @deprecated Use avfilter_copy_buf_props() instead.
*/
attribute_deprecated
int avfilter_fill_frame_from_buffer_ref(AVFrame *frame,
const AVFilterBufferRef *ref);
#endif
 
#endif /* AVFILTER_AVCODEC_H */
/contrib/sdk/sources/ffmpeg/libavfilter/avf_avectorscope.c
0,0 → 1,273
/*
* Copyright (c) 2013 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* audio to video multimedia vectorscope filter
*/
 
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "avfilter.h"
#include "formats.h"
#include "audio.h"
#include "video.h"
#include "internal.h"
 
enum VectorScopeMode {
LISSAJOUS,
LISSAJOUS_XY,
MODE_NB,
};
 
typedef struct AudioVectorScopeContext {
const AVClass *class;
AVFrame *outpicref;
int w, h;
int hw, hh;
enum VectorScopeMode mode;
int contrast[3];
int fade[3];
double zoom;
AVRational frame_rate;
} AudioVectorScopeContext;
 
#define OFFSET(x) offsetof(AudioVectorScopeContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
 
static const AVOption avectorscope_options[] = {
{ "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=LISSAJOUS}, 0, MODE_NB-1, FLAGS, "mode" },
{ "m", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=LISSAJOUS}, 0, MODE_NB-1, FLAGS, "mode" },
{ "lissajous", "", 0, AV_OPT_TYPE_CONST, {.i64=LISSAJOUS}, 0, 0, FLAGS, "mode" },
{ "lissajous_xy", "", 0, AV_OPT_TYPE_CONST, {.i64=LISSAJOUS_XY}, 0, 0, FLAGS, "mode" },
{ "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, 0, FLAGS },
{ "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, 0, FLAGS },
{ "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="400x400"}, 0, 0, FLAGS },
{ "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="400x400"}, 0, 0, FLAGS },
{ "rc", "set red contrast", OFFSET(contrast[0]), AV_OPT_TYPE_INT, {.i64=40}, 0, 255, FLAGS },
{ "gc", "set green contrast", OFFSET(contrast[1]), AV_OPT_TYPE_INT, {.i64=160}, 0, 255, FLAGS },
{ "bc", "set blue contrast", OFFSET(contrast[2]), AV_OPT_TYPE_INT, {.i64=80}, 0, 255, FLAGS },
{ "rf", "set red fade", OFFSET(fade[0]), AV_OPT_TYPE_INT, {.i64=15}, 0, 255, FLAGS },
{ "gf", "set green fade", OFFSET(fade[1]), AV_OPT_TYPE_INT, {.i64=10}, 0, 255, FLAGS },
{ "bf", "set blue fade", OFFSET(fade[2]), AV_OPT_TYPE_INT, {.i64=5}, 0, 255, FLAGS },
{ "zoom", "set zoom factor", OFFSET(zoom), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 1, 10, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(avectorscope);
 
static void draw_dot(AudioVectorScopeContext *p, unsigned x, unsigned y)
{
const int linesize = p->outpicref->linesize[0];
uint8_t *dst;
 
if (p->zoom > 1) {
if (y >= p->h || x >= p->w)
return;
} else {
y = FFMIN(y, p->h - 1);
x = FFMIN(x, p->w - 1);
}
 
dst = &p->outpicref->data[0][y * linesize + x * 4];
dst[0] = FFMIN(dst[0] + p->contrast[0], 255);
dst[1] = FFMIN(dst[1] + p->contrast[1], 255);
dst[2] = FFMIN(dst[2] + p->contrast[2], 255);
}
 
static void fade(AudioVectorScopeContext *p)
{
const int linesize = p->outpicref->linesize[0];
int i, j;
 
if (p->fade[0] || p->fade[1] || p->fade[2]) {
uint8_t *d = p->outpicref->data[0];
for (i = 0; i < p->h; i++) {
for (j = 0; j < p->w*4; j+=4) {
d[j+0] = FFMAX(d[j+0] - p->fade[0], 0);
d[j+1] = FFMAX(d[j+1] - p->fade[1], 0);
d[j+2] = FFMAX(d[j+2] - p->fade[2], 0);
}
d += linesize;
}
}
}
 
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layout = NULL;
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_NONE };
static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
 
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ff_formats_ref(formats, &inlink->out_formats);
 
ff_add_channel_layout(&layout, AV_CH_LAYOUT_STEREO);
ff_channel_layouts_ref(layout, &inlink->out_channel_layouts);
 
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
ff_formats_ref(formats, &inlink->out_samplerates);
 
formats = ff_make_format_list(pix_fmts);
if (!formats)
return AVERROR(ENOMEM);
ff_formats_ref(formats, &outlink->in_formats);
 
return 0;
}
 
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
AudioVectorScopeContext *p = ctx->priv;
int nb_samples;
 
nb_samples = FFMAX(1024, ((double)inlink->sample_rate / av_q2d(p->frame_rate)) + 0.5);
inlink->partial_buf_size =
inlink->min_samples =
inlink->max_samples = nb_samples;
 
return 0;
}
 
static int config_output(AVFilterLink *outlink)
{
AudioVectorScopeContext *p = outlink->src->priv;
 
outlink->w = p->w;
outlink->h = p->h;
outlink->sample_aspect_ratio = (AVRational){1,1};
outlink->frame_rate = p->frame_rate;
 
p->hw = p->w / 2;
p->hh = p->h / 2;
 
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
AudioVectorScopeContext *p = ctx->priv;
const int hw = p->hw;
const int hh = p->hh;
unsigned x, y;
const double zoom = p->zoom;
int i;
 
if (!p->outpicref || p->outpicref->width != outlink->w ||
p->outpicref->height != outlink->h) {
av_frame_free(&p->outpicref);
p->outpicref = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!p->outpicref)
av_frame_free(&insamples);
return AVERROR(ENOMEM);
 
for (i = 0; i < outlink->h; i++)
memset(p->outpicref->data[0] + i * p->outpicref->linesize[0], 0, outlink->w * 4);
}
p->outpicref->pts = insamples->pts;
 
fade(p);
 
switch (insamples->format) {
case AV_SAMPLE_FMT_S16:
for (i = 0; i < insamples->nb_samples; i++) {
int16_t *src = (int16_t *)insamples->data[0] + i * 2;
 
if (p->mode == LISSAJOUS) {
x = ((src[1] - src[0]) * zoom / (float)(UINT16_MAX) + 1) * hw;
y = (1.0 - (src[0] + src[1]) * zoom / (float)UINT16_MAX) * hh;
} else {
x = (src[1] * zoom / (float)INT16_MAX + 1) * hw;
y = (src[0] * zoom / (float)INT16_MAX + 1) * hh;
}
 
draw_dot(p, x, y);
}
break;
case AV_SAMPLE_FMT_FLT:
for (i = 0; i < insamples->nb_samples; i++) {
float *src = (float *)insamples->data[0] + i * 2;
 
if (p->mode == LISSAJOUS) {
x = ((src[1] - src[0]) * zoom / 2 + 1) * hw;
y = (1.0 - (src[0] + src[1]) * zoom / 2) * hh;
} else {
x = (src[1] * zoom + 1) * hw;
y = (src[0] * zoom + 1) * hh;
}
 
draw_dot(p, x, y);
}
break;
}
 
av_frame_free(&insamples);
 
return ff_filter_frame(outlink, av_frame_clone(p->outpicref));
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
AudioVectorScopeContext *p = ctx->priv;
 
av_frame_free(&p->outpicref);
}
 
static const AVFilterPad audiovectorscope_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_input,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad audiovectorscope_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_output,
},
{ NULL }
};
 
AVFilter avfilter_avf_avectorscope = {
.name = "avectorscope",
.description = NULL_IF_CONFIG_SMALL("Convert input audio to vectorscope video output."),
.uninit = uninit,
.query_formats = query_formats,
.priv_size = sizeof(AudioVectorScopeContext),
.inputs = audiovectorscope_inputs,
.outputs = audiovectorscope_outputs,
.priv_class = &avectorscope_class,
};
/contrib/sdk/sources/ffmpeg/libavfilter/avf_concat.c
0,0 → 1,426
/*
* Copyright (c) 2012 Nicolas George
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* concat audio-video filter
*/
 
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#define FF_BUFQUEUE_SIZE 256
#include "bufferqueue.h"
#include "internal.h"
#include "video.h"
#include "audio.h"
 
#define TYPE_ALL 2
 
typedef struct {
const AVClass *class;
unsigned nb_streams[TYPE_ALL]; /**< number of out streams of each type */
unsigned nb_segments;
unsigned cur_idx; /**< index of the first input of current segment */
int64_t delta_ts; /**< timestamp to add to produce output timestamps */
unsigned nb_in_active; /**< number of active inputs in current segment */
unsigned unsafe;
struct concat_in {
int64_t pts;
int64_t nb_frames;
unsigned eof;
struct FFBufQueue queue;
} *in;
} ConcatContext;
 
#define OFFSET(x) offsetof(ConcatContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
#define F AV_OPT_FLAG_FILTERING_PARAM
#define V AV_OPT_FLAG_VIDEO_PARAM
 
static const AVOption concat_options[] = {
{ "n", "specify the number of segments", OFFSET(nb_segments),
AV_OPT_TYPE_INT, { .i64 = 2 }, 2, INT_MAX, V|A|F},
{ "v", "specify the number of video streams",
OFFSET(nb_streams[AVMEDIA_TYPE_VIDEO]),
AV_OPT_TYPE_INT, { .i64 = 1 }, 0, INT_MAX, V|F },
{ "a", "specify the number of audio streams",
OFFSET(nb_streams[AVMEDIA_TYPE_AUDIO]),
AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, A|F},
{ "unsafe", "enable unsafe mode",
OFFSET(unsafe),
AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, V|A|F},
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(concat);
 
static int query_formats(AVFilterContext *ctx)
{
ConcatContext *cat = ctx->priv;
unsigned type, nb_str, idx0 = 0, idx, str, seg;
AVFilterFormats *formats, *rates = NULL;
AVFilterChannelLayouts *layouts = NULL;
 
for (type = 0; type < TYPE_ALL; type++) {
nb_str = cat->nb_streams[type];
for (str = 0; str < nb_str; str++) {
idx = idx0;
 
/* Set the output formats */
formats = ff_all_formats(type);
if (!formats)
return AVERROR(ENOMEM);
ff_formats_ref(formats, &ctx->outputs[idx]->in_formats);
if (type == AVMEDIA_TYPE_AUDIO) {
rates = ff_all_samplerates();
if (!rates)
return AVERROR(ENOMEM);
ff_formats_ref(rates, &ctx->outputs[idx]->in_samplerates);
layouts = ff_all_channel_layouts();
if (!layouts)
return AVERROR(ENOMEM);
ff_channel_layouts_ref(layouts, &ctx->outputs[idx]->in_channel_layouts);
}
 
/* Set the same formats for each corresponding input */
for (seg = 0; seg < cat->nb_segments; seg++) {
ff_formats_ref(formats, &ctx->inputs[idx]->out_formats);
if (type == AVMEDIA_TYPE_AUDIO) {
ff_formats_ref(rates, &ctx->inputs[idx]->out_samplerates);
ff_channel_layouts_ref(layouts, &ctx->inputs[idx]->out_channel_layouts);
}
idx += ctx->nb_outputs;
}
 
idx0++;
}
}
return 0;
}
 
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
ConcatContext *cat = ctx->priv;
unsigned out_no = FF_OUTLINK_IDX(outlink);
unsigned in_no = out_no, seg;
AVFilterLink *inlink = ctx->inputs[in_no];
 
/* enhancement: find a common one */
outlink->time_base = AV_TIME_BASE_Q;
outlink->w = inlink->w;
outlink->h = inlink->h;
outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
outlink->format = inlink->format;
for (seg = 1; seg < cat->nb_segments; seg++) {
inlink = ctx->inputs[in_no += ctx->nb_outputs];
if (!outlink->sample_aspect_ratio.num)
outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
/* possible enhancement: unsafe mode, do not check */
if (outlink->w != inlink->w ||
outlink->h != inlink->h ||
outlink->sample_aspect_ratio.num != inlink->sample_aspect_ratio.num &&
inlink->sample_aspect_ratio.num ||
outlink->sample_aspect_ratio.den != inlink->sample_aspect_ratio.den) {
av_log(ctx, AV_LOG_ERROR, "Input link %s parameters "
"(size %dx%d, SAR %d:%d) do not match the corresponding "
"output link %s parameters (%dx%d, SAR %d:%d)\n",
ctx->input_pads[in_no].name, inlink->w, inlink->h,
inlink->sample_aspect_ratio.num,
inlink->sample_aspect_ratio.den,
ctx->input_pads[out_no].name, outlink->w, outlink->h,
outlink->sample_aspect_ratio.num,
outlink->sample_aspect_ratio.den);
if (!cat->unsafe)
return AVERROR(EINVAL);
}
}
 
return 0;
}
 
static int push_frame(AVFilterContext *ctx, unsigned in_no, AVFrame *buf)
{
ConcatContext *cat = ctx->priv;
unsigned out_no = in_no % ctx->nb_outputs;
AVFilterLink * inlink = ctx-> inputs[ in_no];
AVFilterLink *outlink = ctx->outputs[out_no];
struct concat_in *in = &cat->in[in_no];
 
buf->pts = av_rescale_q(buf->pts, inlink->time_base, outlink->time_base);
in->pts = buf->pts;
in->nb_frames++;
/* add duration to input PTS */
if (inlink->sample_rate)
/* use number of audio samples */
in->pts += av_rescale_q(buf->nb_samples,
(AVRational){ 1, inlink->sample_rate },
outlink->time_base);
else if (in->nb_frames >= 2)
/* use mean duration */
in->pts = av_rescale(in->pts, in->nb_frames, in->nb_frames - 1);
 
buf->pts += cat->delta_ts;
return ff_filter_frame(outlink, buf);
}
 
static int process_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;
ConcatContext *cat = ctx->priv;
unsigned in_no = FF_INLINK_IDX(inlink);
 
if (in_no < cat->cur_idx) {
av_log(ctx, AV_LOG_ERROR, "Frame after EOF on input %s\n",
ctx->input_pads[in_no].name);
av_frame_free(&buf);
} else if (in_no >= cat->cur_idx + ctx->nb_outputs) {
ff_bufqueue_add(ctx, &cat->in[in_no].queue, buf);
} else {
return push_frame(ctx, in_no, buf);
}
return 0;
}
 
static AVFrame *get_video_buffer(AVFilterLink *inlink, int w, int h)
{
AVFilterContext *ctx = inlink->dst;
unsigned in_no = FF_INLINK_IDX(inlink);
AVFilterLink *outlink = ctx->outputs[in_no % ctx->nb_outputs];
 
return ff_get_video_buffer(outlink, w, h);
}
 
static AVFrame *get_audio_buffer(AVFilterLink *inlink, int nb_samples)
{
AVFilterContext *ctx = inlink->dst;
unsigned in_no = FF_INLINK_IDX(inlink);
AVFilterLink *outlink = ctx->outputs[in_no % ctx->nb_outputs];
 
return ff_get_audio_buffer(outlink, nb_samples);
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
return process_frame(inlink, buf);
}
 
static void close_input(AVFilterContext *ctx, unsigned in_no)
{
ConcatContext *cat = ctx->priv;
 
cat->in[in_no].eof = 1;
cat->nb_in_active--;
av_log(ctx, AV_LOG_VERBOSE, "EOF on %s, %d streams left in segment.\n",
ctx->input_pads[in_no].name, cat->nb_in_active);
}
 
static void find_next_delta_ts(AVFilterContext *ctx, int64_t *seg_delta)
{
ConcatContext *cat = ctx->priv;
unsigned i = cat->cur_idx;
unsigned imax = i + ctx->nb_outputs;
int64_t pts;
 
pts = cat->in[i++].pts;
for (; i < imax; i++)
pts = FFMAX(pts, cat->in[i].pts);
cat->delta_ts += pts;
*seg_delta = pts;
}
 
static int send_silence(AVFilterContext *ctx, unsigned in_no, unsigned out_no,
int64_t seg_delta)
{
ConcatContext *cat = ctx->priv;
AVFilterLink *outlink = ctx->outputs[out_no];
int64_t base_pts = cat->in[in_no].pts + cat->delta_ts - seg_delta;
int64_t nb_samples, sent = 0;
int frame_nb_samples, ret;
AVRational rate_tb = { 1, ctx->inputs[in_no]->sample_rate };
AVFrame *buf;
int nb_channels = av_get_channel_layout_nb_channels(outlink->channel_layout);
 
if (!rate_tb.den)
return AVERROR_BUG;
nb_samples = av_rescale_q(seg_delta - cat->in[in_no].pts,
outlink->time_base, rate_tb);
frame_nb_samples = FFMAX(9600, rate_tb.den / 5); /* arbitrary */
while (nb_samples) {
frame_nb_samples = FFMIN(frame_nb_samples, nb_samples);
buf = ff_get_audio_buffer(outlink, frame_nb_samples);
if (!buf)
return AVERROR(ENOMEM);
av_samples_set_silence(buf->extended_data, 0, frame_nb_samples,
nb_channels, outlink->format);
buf->pts = base_pts + av_rescale_q(sent, rate_tb, outlink->time_base);
ret = ff_filter_frame(outlink, buf);
if (ret < 0)
return ret;
sent += frame_nb_samples;
nb_samples -= frame_nb_samples;
}
return 0;
}
 
static int flush_segment(AVFilterContext *ctx)
{
int ret;
ConcatContext *cat = ctx->priv;
unsigned str, str_max;
int64_t seg_delta;
 
find_next_delta_ts(ctx, &seg_delta);
cat->cur_idx += ctx->nb_outputs;
cat->nb_in_active = ctx->nb_outputs;
av_log(ctx, AV_LOG_VERBOSE, "Segment finished at pts=%"PRId64"\n",
cat->delta_ts);
 
if (cat->cur_idx < ctx->nb_inputs) {
/* pad audio streams with silence */
str = cat->nb_streams[AVMEDIA_TYPE_VIDEO];
str_max = str + cat->nb_streams[AVMEDIA_TYPE_AUDIO];
for (; str < str_max; str++) {
ret = send_silence(ctx, cat->cur_idx - ctx->nb_outputs + str, str,
seg_delta);
if (ret < 0)
return ret;
}
/* flush queued buffers */
/* possible enhancement: flush in PTS order */
str_max = cat->cur_idx + ctx->nb_outputs;
for (str = cat->cur_idx; str < str_max; str++) {
while (cat->in[str].queue.available) {
ret = push_frame(ctx, str, ff_bufqueue_get(&cat->in[str].queue));
if (ret < 0)
return ret;
}
}
}
return 0;
}
 
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
ConcatContext *cat = ctx->priv;
unsigned out_no = FF_OUTLINK_IDX(outlink);
unsigned in_no = out_no + cat->cur_idx;
unsigned str, str_max;
int ret;
 
while (1) {
if (in_no >= ctx->nb_inputs)
return AVERROR_EOF;
if (!cat->in[in_no].eof) {
ret = ff_request_frame(ctx->inputs[in_no]);
if (ret != AVERROR_EOF)
return ret;
close_input(ctx, in_no);
}
/* cycle on all inputs to finish the segment */
/* possible enhancement: request in PTS order */
str_max = cat->cur_idx + ctx->nb_outputs - 1;
for (str = cat->cur_idx; cat->nb_in_active;
str = str == str_max ? cat->cur_idx : str + 1) {
if (cat->in[str].eof)
continue;
ret = ff_request_frame(ctx->inputs[str]);
if (ret == AVERROR_EOF)
close_input(ctx, str);
else if (ret < 0)
return ret;
}
ret = flush_segment(ctx);
if (ret < 0)
return ret;
in_no += ctx->nb_outputs;
}
}
 
static av_cold int init(AVFilterContext *ctx)
{
ConcatContext *cat = ctx->priv;
unsigned seg, type, str;
 
/* create input pads */
for (seg = 0; seg < cat->nb_segments; seg++) {
for (type = 0; type < TYPE_ALL; type++) {
for (str = 0; str < cat->nb_streams[type]; str++) {
AVFilterPad pad = {
.type = type,
.get_video_buffer = get_video_buffer,
.get_audio_buffer = get_audio_buffer,
.filter_frame = filter_frame,
};
pad.name = av_asprintf("in%d:%c%d", seg, "va"[type], str);
ff_insert_inpad(ctx, ctx->nb_inputs, &pad);
}
}
}
/* create output pads */
for (type = 0; type < TYPE_ALL; type++) {
for (str = 0; str < cat->nb_streams[type]; str++) {
AVFilterPad pad = {
.type = type,
.config_props = config_output,
.request_frame = request_frame,
};
pad.name = av_asprintf("out:%c%d", "va"[type], str);
ff_insert_outpad(ctx, ctx->nb_outputs, &pad);
}
}
 
cat->in = av_calloc(ctx->nb_inputs, sizeof(*cat->in));
if (!cat->in)
return AVERROR(ENOMEM);
cat->nb_in_active = ctx->nb_outputs;
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
ConcatContext *cat = ctx->priv;
unsigned i;
 
for (i = 0; i < ctx->nb_inputs; i++) {
av_freep(&ctx->input_pads[i].name);
ff_bufqueue_discard_all(&cat->in[i].queue);
}
for (i = 0; i < ctx->nb_outputs; i++)
av_freep(&ctx->output_pads[i].name);
av_free(cat->in);
}
 
AVFilter avfilter_avf_concat = {
.name = "concat",
.description = NULL_IF_CONFIG_SMALL("Concatenate audio and video streams."),
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.priv_size = sizeof(ConcatContext),
.inputs = NULL,
.outputs = NULL,
.priv_class = &concat_class,
.flags = AVFILTER_FLAG_DYNAMIC_INPUTS | AVFILTER_FLAG_DYNAMIC_OUTPUTS,
};
/contrib/sdk/sources/ffmpeg/libavfilter/avf_showspectrum.c
0,0 → 1,502
/*
* Copyright (c) 2012 Clément Bœsch
* Copyright (c) 2013 Rudolf Polzer <divverent@xonotic.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* audio to spectrum (video) transmedia filter, based on ffplay rdft showmode
* (by Michael Niedermayer) and lavfi/avf_showwaves (by Stefano Sabatini).
*/
 
#include <math.h>
 
#include "libavcodec/avfft.h"
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "internal.h"
 
enum DisplayMode { COMBINED, SEPARATE, NB_MODES };
enum DisplayScale { LINEAR, SQRT, CBRT, LOG, NB_SCALES };
enum ColorMode { CHANNEL, INTENSITY, NB_CLMODES };
 
typedef struct {
const AVClass *class;
int w, h;
AVFrame *outpicref;
int req_fullfilled;
int nb_display_channels;
int channel_height;
int sliding; ///< 1 if sliding mode, 0 otherwise
enum DisplayMode mode; ///< channel display mode
enum ColorMode color_mode; ///< display color scheme
enum DisplayScale scale;
float saturation; ///< color saturation multiplier
int xpos; ///< x position (current column)
RDFTContext *rdft; ///< Real Discrete Fourier Transform context
int rdft_bits; ///< number of bits (RDFT window size = 1<<rdft_bits)
FFTSample **rdft_data; ///< bins holder for each (displayed) channels
int filled; ///< number of samples (per channel) filled in current rdft_buffer
int consumed; ///< number of samples (per channel) consumed from the input frame
float *window_func_lut; ///< Window function LUT
float *combine_buffer; ///< color combining buffer (3 * h items)
} ShowSpectrumContext;
 
#define OFFSET(x) offsetof(ShowSpectrumContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
 
static const AVOption showspectrum_options[] = {
{ "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "640x512"}, 0, 0, FLAGS },
{ "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "640x512"}, 0, 0, FLAGS },
{ "slide", "set sliding mode", OFFSET(sliding), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS },
{ "mode", "set channel display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=COMBINED}, COMBINED, NB_MODES-1, FLAGS, "mode" },
{ "combined", "combined mode", 0, AV_OPT_TYPE_CONST, {.i64=COMBINED}, 0, 0, FLAGS, "mode" },
{ "separate", "separate mode", 0, AV_OPT_TYPE_CONST, {.i64=SEPARATE}, 0, 0, FLAGS, "mode" },
{ "color", "set channel coloring", OFFSET(color_mode), AV_OPT_TYPE_INT, {.i64=CHANNEL}, CHANNEL, NB_CLMODES-1, FLAGS, "color" },
{ "channel", "separate color for each channel", 0, AV_OPT_TYPE_CONST, {.i64=CHANNEL}, 0, 0, FLAGS, "color" },
{ "intensity", "intensity based coloring", 0, AV_OPT_TYPE_CONST, {.i64=INTENSITY}, 0, 0, FLAGS, "color" },
{ "scale", "set display scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64=SQRT}, LINEAR, NB_SCALES-1, FLAGS, "scale" },
{ "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SQRT}, 0, 0, FLAGS, "scale" },
{ "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=CBRT}, 0, 0, FLAGS, "scale" },
{ "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=LOG}, 0, 0, FLAGS, "scale" },
{ "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, "scale" },
{ "saturation", "color saturation multiplier", OFFSET(saturation), AV_OPT_TYPE_FLOAT, {.dbl = 1}, -10, 10, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(showspectrum);
 
static const struct {
float a, y, u, v;
} intensity_color_table[] = {
{ 0, 0, 0, 0 },
{ 0.13, .03587126228984074, .1573300977624594, -.02548747583751842 },
{ 0.30, .18572281794568020, .1772436246393981, .17475554840414750 },
{ 0.60, .28184980583656130, -.1593064119945782, .47132074554608920 },
{ 0.73, .65830621175547810, -.3716070802232764, .24352759331252930 },
{ 0.78, .76318535758242900, -.4307467689263783, .16866496622310430 },
{ 0.91, .95336363636363640, -.2045454545454546, .03313636363636363 },
{ 1, 1, 0, 0 }
};
 
static av_cold void uninit(AVFilterContext *ctx)
{
ShowSpectrumContext *s = ctx->priv;
int i;
 
av_freep(&s->combine_buffer);
av_rdft_end(s->rdft);
for (i = 0; i < s->nb_display_channels; i++)
av_freep(&s->rdft_data[i]);
av_freep(&s->rdft_data);
av_freep(&s->window_func_lut);
av_frame_free(&s->outpicref);
}
 
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layouts = NULL;
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_NONE };
static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_NONE };
 
/* set input audio formats */
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ff_formats_ref(formats, &inlink->out_formats);
 
layouts = ff_all_channel_layouts();
if (!layouts)
return AVERROR(ENOMEM);
ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts);
 
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
ff_formats_ref(formats, &inlink->out_samplerates);
 
/* set output video format */
formats = ff_make_format_list(pix_fmts);
if (!formats)
return AVERROR(ENOMEM);
ff_formats_ref(formats, &outlink->in_formats);
 
return 0;
}
 
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AVFilterLink *inlink = ctx->inputs[0];
ShowSpectrumContext *s = ctx->priv;
int i, rdft_bits, win_size, h;
 
outlink->w = s->w;
outlink->h = s->h;
 
h = (s->mode == COMBINED) ? outlink->h : outlink->h / inlink->channels;
s->channel_height = h;
 
/* RDFT window size (precision) according to the requested output frame height */
for (rdft_bits = 1; 1 << rdft_bits < 2 * h; rdft_bits++);
win_size = 1 << rdft_bits;
 
/* (re-)configuration if the video output changed (or first init) */
if (rdft_bits != s->rdft_bits) {
size_t rdft_size, rdft_listsize;
AVFrame *outpicref;
 
av_rdft_end(s->rdft);
s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
s->rdft_bits = rdft_bits;
 
/* RDFT buffers: x2 for each (display) channel buffer.
* Note: we use free and malloc instead of a realloc-like function to
* make sure the buffer is aligned in memory for the FFT functions. */
for (i = 0; i < s->nb_display_channels; i++)
av_freep(&s->rdft_data[i]);
av_freep(&s->rdft_data);
s->nb_display_channels = inlink->channels;
 
if (av_size_mult(sizeof(*s->rdft_data),
s->nb_display_channels, &rdft_listsize) < 0)
return AVERROR(EINVAL);
if (av_size_mult(sizeof(**s->rdft_data),
win_size, &rdft_size) < 0)
return AVERROR(EINVAL);
s->rdft_data = av_malloc(rdft_listsize);
if (!s->rdft_data)
return AVERROR(ENOMEM);
for (i = 0; i < s->nb_display_channels; i++) {
s->rdft_data[i] = av_malloc(rdft_size);
if (!s->rdft_data[i])
return AVERROR(ENOMEM);
}
s->filled = 0;
 
/* pre-calc windowing function (hann here) */
s->window_func_lut =
av_realloc_f(s->window_func_lut, win_size,
sizeof(*s->window_func_lut));
if (!s->window_func_lut)
return AVERROR(ENOMEM);
for (i = 0; i < win_size; i++)
s->window_func_lut[i] = .5f * (1 - cos(2*M_PI*i / (win_size-1)));
 
/* prepare the initial picref buffer (black frame) */
av_frame_free(&s->outpicref);
s->outpicref = outpicref =
ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!outpicref)
return AVERROR(ENOMEM);
outlink->sample_aspect_ratio = (AVRational){1,1};
for (i = 0; i < outlink->h; i++) {
memset(outpicref->data[0] + i * outpicref->linesize[0], 0, outlink->w);
memset(outpicref->data[1] + i * outpicref->linesize[1], 128, outlink->w);
memset(outpicref->data[2] + i * outpicref->linesize[2], 128, outlink->w);
}
}
 
if (s->xpos >= outlink->w)
s->xpos = 0;
 
s->combine_buffer =
av_realloc_f(s->combine_buffer, outlink->h * 3,
sizeof(*s->combine_buffer));
 
av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d RDFT window size:%d\n",
s->w, s->h, win_size);
return 0;
}
 
inline static int push_frame(AVFilterLink *outlink)
{
ShowSpectrumContext *s = outlink->src->priv;
 
s->xpos++;
if (s->xpos >= outlink->w)
s->xpos = 0;
s->filled = 0;
s->req_fullfilled = 1;
 
return ff_filter_frame(outlink, av_frame_clone(s->outpicref));
}
 
static int request_frame(AVFilterLink *outlink)
{
ShowSpectrumContext *s = outlink->src->priv;
AVFilterLink *inlink = outlink->src->inputs[0];
int ret;
 
s->req_fullfilled = 0;
do {
ret = ff_request_frame(inlink);
} while (!s->req_fullfilled && ret >= 0);
 
if (ret == AVERROR_EOF && s->outpicref)
push_frame(outlink);
return ret;
}
 
static int plot_spectrum_column(AVFilterLink *inlink, AVFrame *insamples, int nb_samples)
{
int ret;
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
ShowSpectrumContext *s = ctx->priv;
AVFrame *outpicref = s->outpicref;
 
/* nb_freq contains the power of two superior or equal to the output image
* height (or half the RDFT window size) */
const int nb_freq = 1 << (s->rdft_bits - 1);
const int win_size = nb_freq << 1;
const double w = 1. / (sqrt(nb_freq) * 32768.);
 
int ch, plane, n, y;
const int start = s->filled;
const int add_samples = FFMIN(win_size - start, nb_samples);
 
/* fill RDFT input with the number of samples available */
for (ch = 0; ch < s->nb_display_channels; ch++) {
const int16_t *p = (int16_t *)insamples->extended_data[ch];
 
p += s->consumed;
for (n = 0; n < add_samples; n++)
s->rdft_data[ch][start + n] = p[n] * s->window_func_lut[start + n];
}
s->filled += add_samples;
 
/* complete RDFT window size? */
if (s->filled == win_size) {
 
/* channel height */
int h = s->channel_height;
 
/* run RDFT on each samples set */
for (ch = 0; ch < s->nb_display_channels; ch++)
av_rdft_calc(s->rdft, s->rdft_data[ch]);
 
/* fill a new spectrum column */
#define RE(y, ch) s->rdft_data[ch][2 * y + 0]
#define IM(y, ch) s->rdft_data[ch][2 * y + 1]
#define MAGNITUDE(y, ch) hypot(RE(y, ch), IM(y, ch))
 
/* initialize buffer for combining to black */
for (y = 0; y < outlink->h; y++) {
s->combine_buffer[3 * y ] = 0;
s->combine_buffer[3 * y + 1] = 127.5;
s->combine_buffer[3 * y + 2] = 127.5;
}
 
for (ch = 0; ch < s->nb_display_channels; ch++) {
float yf, uf, vf;
 
/* decide color range */
switch (s->mode) {
case COMBINED:
// reduce range by channel count
yf = 256.0f / s->nb_display_channels;
switch (s->color_mode) {
case INTENSITY:
uf = yf;
vf = yf;
break;
case CHANNEL:
/* adjust saturation for mixed UV coloring */
/* this factor is correct for infinite channels, an approximation otherwise */
uf = yf * M_PI;
vf = yf * M_PI;
break;
default:
av_assert0(0);
}
break;
case SEPARATE:
// full range
yf = 256.0f;
uf = 256.0f;
vf = 256.0f;
break;
default:
av_assert0(0);
}
 
if (s->color_mode == CHANNEL) {
if (s->nb_display_channels > 1) {
uf *= 0.5 * sin((2 * M_PI * ch) / s->nb_display_channels);
vf *= 0.5 * cos((2 * M_PI * ch) / s->nb_display_channels);
} else {
uf = 0.0f;
vf = 0.0f;
}
}
uf *= s->saturation;
vf *= s->saturation;
 
/* draw the channel */
for (y = 0; y < h; y++) {
int row = (s->mode == COMBINED) ? y : ch * h + y;
float *out = &s->combine_buffer[3 * row];
 
/* get magnitude */
float a = w * MAGNITUDE(y, ch);
 
/* apply scale */
switch (s->scale) {
case LINEAR:
break;
case SQRT:
a = sqrt(a);
break;
case CBRT:
a = cbrt(a);
break;
case LOG:
a = 1 - log(FFMAX(FFMIN(1, a), 1e-6)) / log(1e-6); // zero = -120dBFS
break;
default:
av_assert0(0);
}
 
if (s->color_mode == INTENSITY) {
float y, u, v;
int i;
 
for (i = 1; i < sizeof(intensity_color_table) / sizeof(*intensity_color_table) - 1; i++)
if (intensity_color_table[i].a >= a)
break;
// i now is the first item >= the color
// now we know to interpolate between item i - 1 and i
if (a <= intensity_color_table[i - 1].a) {
y = intensity_color_table[i - 1].y;
u = intensity_color_table[i - 1].u;
v = intensity_color_table[i - 1].v;
} else if (a >= intensity_color_table[i].a) {
y = intensity_color_table[i].y;
u = intensity_color_table[i].u;
v = intensity_color_table[i].v;
} else {
float start = intensity_color_table[i - 1].a;
float end = intensity_color_table[i].a;
float lerpfrac = (a - start) / (end - start);
y = intensity_color_table[i - 1].y * (1.0f - lerpfrac)
+ intensity_color_table[i].y * lerpfrac;
u = intensity_color_table[i - 1].u * (1.0f - lerpfrac)
+ intensity_color_table[i].u * lerpfrac;
v = intensity_color_table[i - 1].v * (1.0f - lerpfrac)
+ intensity_color_table[i].v * lerpfrac;
}
 
out[0] += y * yf;
out[1] += u * uf;
out[2] += v * vf;
} else {
out[0] += a * yf;
out[1] += a * uf;
out[2] += a * vf;
}
}
}
 
/* copy to output */
if (s->sliding) {
for (plane = 0; plane < 3; plane++) {
for (y = 0; y < outlink->h; y++) {
uint8_t *p = outpicref->data[plane] +
y * outpicref->linesize[plane];
memmove(p, p + 1, outlink->w - 1);
}
}
s->xpos = outlink->w - 1;
}
for (plane = 0; plane < 3; plane++) {
uint8_t *p = outpicref->data[plane] +
(outlink->h - 1) * outpicref->linesize[plane] +
s->xpos;
for (y = 0; y < outlink->h; y++) {
*p = rint(FFMAX(0, FFMIN(s->combine_buffer[3 * y + plane], 255)));
p -= outpicref->linesize[plane];
}
}
 
outpicref->pts = insamples->pts +
av_rescale_q(s->consumed,
(AVRational){ 1, inlink->sample_rate },
outlink->time_base);
ret = push_frame(outlink);
if (ret < 0)
return ret;
}
 
return add_samples;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
AVFilterContext *ctx = inlink->dst;
ShowSpectrumContext *s = ctx->priv;
int ret = 0, left_samples = insamples->nb_samples;
 
s->consumed = 0;
while (left_samples) {
int ret = plot_spectrum_column(inlink, insamples, left_samples);
if (ret < 0)
break;
s->consumed += ret;
left_samples -= ret;
}
 
av_frame_free(&insamples);
return ret;
}
 
static const AVFilterPad showspectrum_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad showspectrum_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_output,
.request_frame = request_frame,
},
{ NULL }
};
 
AVFilter avfilter_avf_showspectrum = {
.name = "showspectrum",
.description = NULL_IF_CONFIG_SMALL("Convert input audio to a spectrum video output."),
.uninit = uninit,
.query_formats = query_formats,
.priv_size = sizeof(ShowSpectrumContext),
.inputs = showspectrum_inputs,
.outputs = showspectrum_outputs,
.priv_class = &showspectrum_class,
};
/contrib/sdk/sources/ffmpeg/libavfilter/avf_showwaves.c
0,0 → 1,256
/*
* Copyright (c) 2012 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* audio to video multimedia filter
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "avfilter.h"
#include "formats.h"
#include "audio.h"
#include "video.h"
#include "internal.h"
 
enum ShowWavesMode {
MODE_POINT,
MODE_LINE,
MODE_NB,
};
 
typedef struct {
const AVClass *class;
int w, h;
AVRational rate;
int buf_idx;
AVFrame *outpicref;
int req_fullfilled;
int n;
int sample_count_mod;
enum ShowWavesMode mode;
} ShowWavesContext;
 
#define OFFSET(x) offsetof(ShowWavesContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
 
static const AVOption showwaves_options[] = {
{ "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
{ "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
{ "mode", "select display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_POINT}, 0, MODE_NB-1, FLAGS, "mode"},
{ "point", "draw a point for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_POINT}, .flags=FLAGS, .unit="mode"},
{ "line", "draw a line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_LINE}, .flags=FLAGS, .unit="mode"},
{ "n", "set how many samples to show in the same point", OFFSET(n), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS },
{ "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
{ "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(showwaves);
 
static av_cold void uninit(AVFilterContext *ctx)
{
ShowWavesContext *showwaves = ctx->priv;
 
av_frame_free(&showwaves->outpicref);
}
 
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layouts = NULL;
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE };
static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
 
/* set input audio formats */
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ff_formats_ref(formats, &inlink->out_formats);
 
layouts = ff_all_channel_layouts();
if (!layouts)
return AVERROR(ENOMEM);
ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts);
 
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
ff_formats_ref(formats, &inlink->out_samplerates);
 
/* set output video format */
formats = ff_make_format_list(pix_fmts);
if (!formats)
return AVERROR(ENOMEM);
ff_formats_ref(formats, &outlink->in_formats);
 
return 0;
}
 
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AVFilterLink *inlink = ctx->inputs[0];
ShowWavesContext *showwaves = ctx->priv;
 
if (!showwaves->n)
showwaves->n = FFMAX(1, ((double)inlink->sample_rate / (showwaves->w * av_q2d(showwaves->rate))) + 0.5);
 
showwaves->buf_idx = 0;
outlink->w = showwaves->w;
outlink->h = showwaves->h;
outlink->sample_aspect_ratio = (AVRational){1,1};
 
outlink->frame_rate = av_div_q((AVRational){inlink->sample_rate,showwaves->n},
(AVRational){showwaves->w,1});
 
av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d r:%f n:%d\n",
showwaves->w, showwaves->h, av_q2d(outlink->frame_rate), showwaves->n);
return 0;
}
 
inline static int push_frame(AVFilterLink *outlink)
{
ShowWavesContext *showwaves = outlink->src->priv;
int ret;
 
if ((ret = ff_filter_frame(outlink, showwaves->outpicref)) >= 0)
showwaves->req_fullfilled = 1;
showwaves->outpicref = NULL;
showwaves->buf_idx = 0;
return ret;
}
 
static int request_frame(AVFilterLink *outlink)
{
ShowWavesContext *showwaves = outlink->src->priv;
AVFilterLink *inlink = outlink->src->inputs[0];
int ret;
 
showwaves->req_fullfilled = 0;
do {
ret = ff_request_frame(inlink);
} while (!showwaves->req_fullfilled && ret >= 0);
 
if (ret == AVERROR_EOF && showwaves->outpicref)
push_frame(outlink);
return ret;
}
 
#define MAX_INT16 ((1<<15) -1)
 
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
ShowWavesContext *showwaves = ctx->priv;
const int nb_samples = insamples->nb_samples;
AVFrame *outpicref = showwaves->outpicref;
int linesize = outpicref ? outpicref->linesize[0] : 0;
int16_t *p = (int16_t *)insamples->data[0];
int nb_channels = inlink->channels;
int i, j, k, h, ret = 0;
const int n = showwaves->n;
const int x = 255 / (nb_channels * n); /* multiplication factor, pre-computed to avoid in-loop divisions */
 
/* draw data in the buffer */
for (i = 0; i < nb_samples; i++) {
if (!showwaves->outpicref) {
showwaves->outpicref = outpicref =
ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!outpicref)
return AVERROR(ENOMEM);
outpicref->width = outlink->w;
outpicref->height = outlink->h;
outpicref->pts = insamples->pts +
av_rescale_q((p - (int16_t *)insamples->data[0]) / nb_channels,
(AVRational){ 1, inlink->sample_rate },
outlink->time_base);
linesize = outpicref->linesize[0];
for (j = 0; j < outlink->h; j++)
memset(outpicref->data[0] + j * linesize, 0, outlink->w);
}
for (j = 0; j < nb_channels; j++) {
h = showwaves->h/2 - av_rescale(*p++, showwaves->h/2, MAX_INT16);
switch (showwaves->mode) {
case MODE_POINT:
if (h >= 0 && h < outlink->h)
*(outpicref->data[0] + showwaves->buf_idx + h * linesize) += x;
break;
 
case MODE_LINE:
{
int start = showwaves->h/2, end = av_clip(h, 0, outlink->h-1);
if (start > end) FFSWAP(int16_t, start, end);
for (k = start; k < end; k++)
*(outpicref->data[0] + showwaves->buf_idx + k * linesize) += x;
break;
}
}
}
 
showwaves->sample_count_mod++;
if (showwaves->sample_count_mod == n) {
showwaves->sample_count_mod = 0;
showwaves->buf_idx++;
}
if (showwaves->buf_idx == showwaves->w)
if ((ret = push_frame(outlink)) < 0)
break;
outpicref = showwaves->outpicref;
}
 
av_frame_free(&insamples);
return ret;
}
 
static const AVFilterPad showwaves_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad showwaves_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_output,
.request_frame = request_frame,
},
{ NULL }
};
 
AVFilter avfilter_avf_showwaves = {
.name = "showwaves",
.description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output."),
.uninit = uninit,
.query_formats = query_formats,
.priv_size = sizeof(ShowWavesContext),
.inputs = showwaves_inputs,
.outputs = showwaves_outputs,
.priv_class = &showwaves_class,
};
/contrib/sdk/sources/ffmpeg/libavfilter/avfilter.c
0,0 → 1,1151
/*
* filter layer
* Copyright (c) 2007 Bobby Bingham
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/atomic.h"
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/eval.h"
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/rational.h"
#include "libavutil/samplefmt.h"
 
#include "audio.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
 
static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame);
 
void ff_tlog_ref(void *ctx, AVFrame *ref, int end)
{
av_unused char buf[16];
ff_tlog(ctx,
"ref[%p buf:%p data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64,
ref, ref->buf, ref->data[0],
ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3],
ref->pts, av_frame_get_pkt_pos(ref));
 
if (ref->width) {
ff_tlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c",
ref->sample_aspect_ratio.num, ref->sample_aspect_ratio.den,
ref->width, ref->height,
!ref->interlaced_frame ? 'P' : /* Progressive */
ref->top_field_first ? 'T' : 'B', /* Top / Bottom */
ref->key_frame,
av_get_picture_type_char(ref->pict_type));
}
if (ref->nb_samples) {
ff_tlog(ctx, " cl:%"PRId64"d n:%d r:%d",
ref->channel_layout,
ref->nb_samples,
ref->sample_rate);
}
 
ff_tlog(ctx, "]%s", end ? "\n" : "");
}
 
unsigned avfilter_version(void)
{
av_assert0(LIBAVFILTER_VERSION_MICRO >= 100);
return LIBAVFILTER_VERSION_INT;
}
 
const char *avfilter_configuration(void)
{
return FFMPEG_CONFIGURATION;
}
 
const char *avfilter_license(void)
{
#define LICENSE_PREFIX "libavfilter license: "
return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
}
 
void ff_command_queue_pop(AVFilterContext *filter)
{
AVFilterCommand *c= filter->command_queue;
av_freep(&c->arg);
av_freep(&c->command);
filter->command_queue= c->next;
av_free(c);
}
 
int ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
AVFilterPad **pads, AVFilterLink ***links,
AVFilterPad *newpad)
{
AVFilterLink **newlinks;
AVFilterPad *newpads;
unsigned i;
 
idx = FFMIN(idx, *count);
 
newpads = av_realloc_array(*pads, *count + 1, sizeof(AVFilterPad));
newlinks = av_realloc_array(*links, *count + 1, sizeof(AVFilterLink*));
if (newpads)
*pads = newpads;
if (newlinks)
*links = newlinks;
if (!newpads || !newlinks)
return AVERROR(ENOMEM);
 
memmove(*pads + idx + 1, *pads + idx, sizeof(AVFilterPad) * (*count - idx));
memmove(*links + idx + 1, *links + idx, sizeof(AVFilterLink*) * (*count - idx));
memcpy(*pads + idx, newpad, sizeof(AVFilterPad));
(*links)[idx] = NULL;
 
(*count)++;
for (i = idx + 1; i < *count; i++)
if ((*links)[i])
(*(unsigned *)((uint8_t *) (*links)[i] + padidx_off))++;
 
return 0;
}
 
int avfilter_link(AVFilterContext *src, unsigned srcpad,
AVFilterContext *dst, unsigned dstpad)
{
AVFilterLink *link;
 
if (src->nb_outputs <= srcpad || dst->nb_inputs <= dstpad ||
src->outputs[srcpad] || dst->inputs[dstpad])
return -1;
 
if (src->output_pads[srcpad].type != dst->input_pads[dstpad].type) {
av_log(src, AV_LOG_ERROR,
"Media type mismatch between the '%s' filter output pad %d (%s) and the '%s' filter input pad %d (%s)\n",
src->name, srcpad, (char *)av_x_if_null(av_get_media_type_string(src->output_pads[srcpad].type), "?"),
dst->name, dstpad, (char *)av_x_if_null(av_get_media_type_string(dst-> input_pads[dstpad].type), "?"));
return AVERROR(EINVAL);
}
 
link = av_mallocz(sizeof(*link));
if (!link)
return AVERROR(ENOMEM);
 
src->outputs[srcpad] = dst->inputs[dstpad] = link;
 
link->src = src;
link->dst = dst;
link->srcpad = &src->output_pads[srcpad];
link->dstpad = &dst->input_pads[dstpad];
link->type = src->output_pads[srcpad].type;
av_assert0(AV_PIX_FMT_NONE == -1 && AV_SAMPLE_FMT_NONE == -1);
link->format = -1;
 
return 0;
}
 
void avfilter_link_free(AVFilterLink **link)
{
if (!*link)
return;
 
av_frame_free(&(*link)->partial_buf);
 
av_freep(link);
}
 
int avfilter_link_get_channels(AVFilterLink *link)
{
return link->channels;
}
 
void avfilter_link_set_closed(AVFilterLink *link, int closed)
{
link->closed = closed;
}
 
int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt,
unsigned filt_srcpad_idx, unsigned filt_dstpad_idx)
{
int ret;
unsigned dstpad_idx = link->dstpad - link->dst->input_pads;
 
av_log(link->dst, AV_LOG_VERBOSE, "auto-inserting filter '%s' "
"between the filter '%s' and the filter '%s'\n",
filt->name, link->src->name, link->dst->name);
 
link->dst->inputs[dstpad_idx] = NULL;
if ((ret = avfilter_link(filt, filt_dstpad_idx, link->dst, dstpad_idx)) < 0) {
/* failed to link output filter to new filter */
link->dst->inputs[dstpad_idx] = link;
return ret;
}
 
/* re-hookup the link to the new destination filter we inserted */
link->dst = filt;
link->dstpad = &filt->input_pads[filt_srcpad_idx];
filt->inputs[filt_srcpad_idx] = link;
 
/* if any information on supported media formats already exists on the
* link, we need to preserve that */
if (link->out_formats)
ff_formats_changeref(&link->out_formats,
&filt->outputs[filt_dstpad_idx]->out_formats);
if (link->out_samplerates)
ff_formats_changeref(&link->out_samplerates,
&filt->outputs[filt_dstpad_idx]->out_samplerates);
if (link->out_channel_layouts)
ff_channel_layouts_changeref(&link->out_channel_layouts,
&filt->outputs[filt_dstpad_idx]->out_channel_layouts);
 
return 0;
}
 
int avfilter_config_links(AVFilterContext *filter)
{
int (*config_link)(AVFilterLink *);
unsigned i;
int ret;
 
for (i = 0; i < filter->nb_inputs; i ++) {
AVFilterLink *link = filter->inputs[i];
AVFilterLink *inlink;
 
if (!link) continue;
 
inlink = link->src->nb_inputs ? link->src->inputs[0] : NULL;
link->current_pts = AV_NOPTS_VALUE;
 
switch (link->init_state) {
case AVLINK_INIT:
continue;
case AVLINK_STARTINIT:
av_log(filter, AV_LOG_INFO, "circular filter chain detected\n");
return 0;
case AVLINK_UNINIT:
link->init_state = AVLINK_STARTINIT;
 
if ((ret = avfilter_config_links(link->src)) < 0)
return ret;
 
if (!(config_link = link->srcpad->config_props)) {
if (link->src->nb_inputs != 1) {
av_log(link->src, AV_LOG_ERROR, "Source filters and filters "
"with more than one input "
"must set config_props() "
"callbacks on all outputs\n");
return AVERROR(EINVAL);
}
} else if ((ret = config_link(link)) < 0) {
av_log(link->src, AV_LOG_ERROR,
"Failed to configure output pad on %s\n",
link->src->name);
return ret;
}
 
switch (link->type) {
case AVMEDIA_TYPE_VIDEO:
if (!link->time_base.num && !link->time_base.den)
link->time_base = inlink ? inlink->time_base : AV_TIME_BASE_Q;
 
if (!link->sample_aspect_ratio.num && !link->sample_aspect_ratio.den)
link->sample_aspect_ratio = inlink ?
inlink->sample_aspect_ratio : (AVRational){1,1};
 
if (inlink && !link->frame_rate.num && !link->frame_rate.den)
link->frame_rate = inlink->frame_rate;
 
if (inlink) {
if (!link->w)
link->w = inlink->w;
if (!link->h)
link->h = inlink->h;
} else if (!link->w || !link->h) {
av_log(link->src, AV_LOG_ERROR,
"Video source filters must set their output link's "
"width and height\n");
return AVERROR(EINVAL);
}
break;
 
case AVMEDIA_TYPE_AUDIO:
if (inlink) {
if (!link->time_base.num && !link->time_base.den)
link->time_base = inlink->time_base;
}
 
if (!link->time_base.num && !link->time_base.den)
link->time_base = (AVRational) {1, link->sample_rate};
}
 
if ((config_link = link->dstpad->config_props))
if ((ret = config_link(link)) < 0) {
av_log(link->src, AV_LOG_ERROR,
"Failed to configure input pad on %s\n",
link->dst->name);
return ret;
}
 
link->init_state = AVLINK_INIT;
}
}
 
return 0;
}
 
void ff_tlog_link(void *ctx, AVFilterLink *link, int end)
{
if (link->type == AVMEDIA_TYPE_VIDEO) {
ff_tlog(ctx,
"link[%p s:%dx%d fmt:%s %s->%s]%s",
link, link->w, link->h,
av_get_pix_fmt_name(link->format),
link->src ? link->src->filter->name : "",
link->dst ? link->dst->filter->name : "",
end ? "\n" : "");
} else {
char buf[128];
av_get_channel_layout_string(buf, sizeof(buf), -1, link->channel_layout);
 
ff_tlog(ctx,
"link[%p r:%d cl:%s fmt:%s %s->%s]%s",
link, (int)link->sample_rate, buf,
av_get_sample_fmt_name(link->format),
link->src ? link->src->filter->name : "",
link->dst ? link->dst->filter->name : "",
end ? "\n" : "");
}
}
 
int ff_request_frame(AVFilterLink *link)
{
int ret = -1;
FF_TPRINTF_START(NULL, request_frame); ff_tlog_link(NULL, link, 1);
 
if (link->closed)
return AVERROR_EOF;
av_assert0(!link->frame_requested);
link->frame_requested = 1;
while (link->frame_requested) {
if (link->srcpad->request_frame)
ret = link->srcpad->request_frame(link);
else if (link->src->inputs[0])
ret = ff_request_frame(link->src->inputs[0]);
if (ret == AVERROR_EOF && link->partial_buf) {
AVFrame *pbuf = link->partial_buf;
link->partial_buf = NULL;
ret = ff_filter_frame_framed(link, pbuf);
}
if (ret < 0) {
link->frame_requested = 0;
if (ret == AVERROR_EOF)
link->closed = 1;
} else {
av_assert0(!link->frame_requested ||
link->flags & FF_LINK_FLAG_REQUEST_LOOP);
}
}
return ret;
}
 
int ff_poll_frame(AVFilterLink *link)
{
int i, min = INT_MAX;
 
if (link->srcpad->poll_frame)
return link->srcpad->poll_frame(link);
 
for (i = 0; i < link->src->nb_inputs; i++) {
int val;
if (!link->src->inputs[i])
return -1;
val = ff_poll_frame(link->src->inputs[i]);
min = FFMIN(min, val);
}
 
return min;
}
 
static const char *const var_names[] = { "t", "n", "pos", NULL };
enum { VAR_T, VAR_N, VAR_POS, VAR_VARS_NB };
 
static int set_enable_expr(AVFilterContext *ctx, const char *expr)
{
int ret;
char *expr_dup;
AVExpr *old = ctx->enable;
 
if (!(ctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE)) {
av_log(ctx, AV_LOG_ERROR, "Timeline ('enable' option) not supported "
"with filter '%s'\n", ctx->filter->name);
return AVERROR_PATCHWELCOME;
}
 
expr_dup = av_strdup(expr);
if (!expr_dup)
return AVERROR(ENOMEM);
 
if (!ctx->var_values) {
ctx->var_values = av_calloc(VAR_VARS_NB, sizeof(*ctx->var_values));
if (!ctx->var_values) {
av_free(expr_dup);
return AVERROR(ENOMEM);
}
}
 
ret = av_expr_parse((AVExpr**)&ctx->enable, expr_dup, var_names,
NULL, NULL, NULL, NULL, 0, ctx->priv);
if (ret < 0) {
av_log(ctx->priv, AV_LOG_ERROR,
"Error when evaluating the expression '%s' for enable\n",
expr_dup);
av_free(expr_dup);
return ret;
}
 
av_expr_free(old);
av_free(ctx->enable_str);
ctx->enable_str = expr_dup;
return 0;
}
 
void ff_update_link_current_pts(AVFilterLink *link, int64_t pts)
{
if (pts == AV_NOPTS_VALUE)
return;
link->current_pts = av_rescale_q(pts, link->time_base, AV_TIME_BASE_Q);
/* TODO use duration */
if (link->graph && link->age_index >= 0)
ff_avfilter_graph_update_heap(link->graph, link);
}
 
int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags)
{
if(!strcmp(cmd, "ping")){
char local_res[256] = {0};
 
if (!res) {
res = local_res;
res_len = sizeof(local_res);
}
av_strlcatf(res, res_len, "pong from:%s %s\n", filter->filter->name, filter->name);
if (res == local_res)
av_log(filter, AV_LOG_INFO, "%s", res);
return 0;
}else if(!strcmp(cmd, "enable")) {
return set_enable_expr(filter, arg);
}else if(filter->filter->process_command) {
return filter->filter->process_command(filter, cmd, arg, res, res_len, flags);
}
return AVERROR(ENOSYS);
}
 
static AVFilter *first_filter;
 
AVFilter *avfilter_get_by_name(const char *name)
{
const AVFilter *f = NULL;
 
if (!name)
return NULL;
 
while ((f = avfilter_next(f)))
if (!strcmp(f->name, name))
return (AVFilter *)f;
 
return NULL;
}
 
int avfilter_register(AVFilter *filter)
{
AVFilter **f = &first_filter;
int i;
 
/* the filter must select generic or internal exclusively */
av_assert0((filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE) != AVFILTER_FLAG_SUPPORT_TIMELINE);
 
for(i=0; filter->inputs && filter->inputs[i].name; i++) {
const AVFilterPad *input = &filter->inputs[i];
av_assert0( !input->filter_frame
|| (!input->start_frame && !input->end_frame));
}
 
filter->next = NULL;
 
while(avpriv_atomic_ptr_cas((void * volatile *)f, NULL, filter))
f = &(*f)->next;
 
return 0;
}
 
const AVFilter *avfilter_next(const AVFilter *prev)
{
return prev ? prev->next : first_filter;
}
 
#if FF_API_OLD_FILTER_REGISTER
AVFilter **av_filter_next(AVFilter **filter)
{
return filter ? &(*filter)->next : &first_filter;
}
 
void avfilter_uninit(void)
{
}
#endif
 
int avfilter_pad_count(const AVFilterPad *pads)
{
int count;
 
if (!pads)
return 0;
 
for (count = 0; pads->name; count++)
pads++;
return count;
}
 
static const char *default_filter_name(void *filter_ctx)
{
AVFilterContext *ctx = filter_ctx;
return ctx->name ? ctx->name : ctx->filter->name;
}
 
static void *filter_child_next(void *obj, void *prev)
{
AVFilterContext *ctx = obj;
if (!prev && ctx->filter && ctx->filter->priv_class && ctx->priv)
return ctx->priv;
return NULL;
}
 
static const AVClass *filter_child_class_next(const AVClass *prev)
{
const AVFilter *f = NULL;
 
/* find the filter that corresponds to prev */
while (prev && (f = avfilter_next(f)))
if (f->priv_class == prev)
break;
 
/* could not find filter corresponding to prev */
if (prev && !f)
return NULL;
 
/* find next filter with specific options */
while ((f = avfilter_next(f)))
if (f->priv_class)
return f->priv_class;
 
return NULL;
}
 
#define OFFSET(x) offsetof(AVFilterContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM
static const AVOption avfilter_options[] = {
{ "thread_type", "Allowed thread types", OFFSET(thread_type), AV_OPT_TYPE_FLAGS,
{ .i64 = AVFILTER_THREAD_SLICE }, 0, INT_MAX, FLAGS, "thread_type" },
{ "slice", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AVFILTER_THREAD_SLICE }, .unit = "thread_type" },
{ "enable", "set enable expression", OFFSET(enable_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ NULL },
};
 
static const AVClass avfilter_class = {
.class_name = "AVFilter",
.item_name = default_filter_name,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_FILTER,
.child_next = filter_child_next,
.child_class_next = filter_child_class_next,
.option = avfilter_options,
};
 
static int default_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg,
int *ret, int nb_jobs)
{
int i;
 
for (i = 0; i < nb_jobs; i++) {
int r = func(ctx, arg, i, nb_jobs);
if (ret)
ret[i] = r;
}
return 0;
}
 
AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name)
{
AVFilterContext *ret;
 
if (!filter)
return NULL;
 
ret = av_mallocz(sizeof(AVFilterContext));
if (!ret)
return NULL;
 
ret->av_class = &avfilter_class;
ret->filter = filter;
ret->name = inst_name ? av_strdup(inst_name) : NULL;
if (filter->priv_size) {
ret->priv = av_mallocz(filter->priv_size);
if (!ret->priv)
goto err;
}
 
av_opt_set_defaults(ret);
if (filter->priv_class) {
*(const AVClass**)ret->priv = filter->priv_class;
av_opt_set_defaults(ret->priv);
}
 
ret->internal = av_mallocz(sizeof(*ret->internal));
if (!ret->internal)
goto err;
ret->internal->execute = default_execute;
 
ret->nb_inputs = avfilter_pad_count(filter->inputs);
if (ret->nb_inputs ) {
ret->input_pads = av_malloc(sizeof(AVFilterPad) * ret->nb_inputs);
if (!ret->input_pads)
goto err;
memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->nb_inputs);
ret->inputs = av_mallocz(sizeof(AVFilterLink*) * ret->nb_inputs);
if (!ret->inputs)
goto err;
}
 
ret->nb_outputs = avfilter_pad_count(filter->outputs);
if (ret->nb_outputs) {
ret->output_pads = av_malloc(sizeof(AVFilterPad) * ret->nb_outputs);
if (!ret->output_pads)
goto err;
memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->nb_outputs);
ret->outputs = av_mallocz(sizeof(AVFilterLink*) * ret->nb_outputs);
if (!ret->outputs)
goto err;
}
#if FF_API_FOO_COUNT
FF_DISABLE_DEPRECATION_WARNINGS
ret->output_count = ret->nb_outputs;
ret->input_count = ret->nb_inputs;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
 
return ret;
 
err:
av_freep(&ret->inputs);
av_freep(&ret->input_pads);
ret->nb_inputs = 0;
av_freep(&ret->outputs);
av_freep(&ret->output_pads);
ret->nb_outputs = 0;
av_freep(&ret->priv);
av_freep(&ret->internal);
av_free(ret);
return NULL;
}
 
#if FF_API_AVFILTER_OPEN
int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *inst_name)
{
*filter_ctx = ff_filter_alloc(filter, inst_name);
return *filter_ctx ? 0 : AVERROR(ENOMEM);
}
#endif
 
static void free_link(AVFilterLink *link)
{
if (!link)
return;
 
if (link->src)
link->src->outputs[link->srcpad - link->src->output_pads] = NULL;
if (link->dst)
link->dst->inputs[link->dstpad - link->dst->input_pads] = NULL;
 
ff_formats_unref(&link->in_formats);
ff_formats_unref(&link->out_formats);
ff_formats_unref(&link->in_samplerates);
ff_formats_unref(&link->out_samplerates);
ff_channel_layouts_unref(&link->in_channel_layouts);
ff_channel_layouts_unref(&link->out_channel_layouts);
avfilter_link_free(&link);
}
 
void avfilter_free(AVFilterContext *filter)
{
int i;
 
if (!filter)
return;
 
if (filter->graph)
ff_filter_graph_remove_filter(filter->graph, filter);
 
if (filter->filter->uninit)
filter->filter->uninit(filter);
 
for (i = 0; i < filter->nb_inputs; i++) {
free_link(filter->inputs[i]);
}
for (i = 0; i < filter->nb_outputs; i++) {
free_link(filter->outputs[i]);
}
 
if (filter->filter->priv_class)
av_opt_free(filter->priv);
 
av_freep(&filter->name);
av_freep(&filter->input_pads);
av_freep(&filter->output_pads);
av_freep(&filter->inputs);
av_freep(&filter->outputs);
av_freep(&filter->priv);
while(filter->command_queue){
ff_command_queue_pop(filter);
}
av_opt_free(filter);
av_expr_free(filter->enable);
filter->enable = NULL;
av_freep(&filter->var_values);
av_freep(&filter->internal);
av_free(filter);
}
 
static int process_options(AVFilterContext *ctx, AVDictionary **options,
const char *args)
{
const AVOption *o = NULL;
int ret, count = 0;
char *av_uninit(parsed_key), *av_uninit(value);
const char *key;
int offset= -1;
 
if (!args)
return 0;
 
while (*args) {
const char *shorthand = NULL;
 
o = av_opt_next(ctx->priv, o);
if (o) {
if (o->type == AV_OPT_TYPE_CONST || o->offset == offset)
continue;
offset = o->offset;
shorthand = o->name;
}
 
ret = av_opt_get_key_value(&args, "=", ":",
shorthand ? AV_OPT_FLAG_IMPLICIT_KEY : 0,
&parsed_key, &value);
if (ret < 0) {
if (ret == AVERROR(EINVAL))
av_log(ctx, AV_LOG_ERROR, "No option name near '%s'\n", args);
else
av_log(ctx, AV_LOG_ERROR, "Unable to parse '%s': %s\n", args,
av_err2str(ret));
return ret;
}
if (*args)
args++;
if (parsed_key) {
key = parsed_key;
while ((o = av_opt_next(ctx->priv, o))); /* discard all remaining shorthand */
} else {
key = shorthand;
}
 
av_log(ctx, AV_LOG_DEBUG, "Setting '%s' to value '%s'\n", key, value);
 
if (av_opt_find(ctx, key, NULL, 0, 0)) {
ret = av_opt_set(ctx, key, value, 0);
if (ret < 0) {
av_free(value);
av_free(parsed_key);
return ret;
}
} else {
av_dict_set(options, key, value, 0);
if ((ret = av_opt_set(ctx->priv, key, value, 0)) < 0) {
if (!av_opt_find(ctx->priv, key, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) {
if (ret == AVERROR_OPTION_NOT_FOUND)
av_log(ctx, AV_LOG_ERROR, "Option '%s' not found\n", key);
av_free(value);
av_free(parsed_key);
return ret;
}
}
}
 
av_free(value);
av_free(parsed_key);
count++;
}
 
if (ctx->enable_str) {
ret = set_enable_expr(ctx, ctx->enable_str);
if (ret < 0)
return ret;
}
return count;
}
 
#if FF_API_AVFILTER_INIT_FILTER
int avfilter_init_filter(AVFilterContext *filter, const char *args, void *opaque)
{
return avfilter_init_str(filter, args);
}
#endif
 
int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
{
int ret = 0;
 
ret = av_opt_set_dict(ctx, options);
if (ret < 0) {
av_log(ctx, AV_LOG_ERROR, "Error applying generic filter options.\n");
return ret;
}
 
if (ctx->filter->flags & AVFILTER_FLAG_SLICE_THREADS &&
ctx->thread_type & ctx->graph->thread_type & AVFILTER_THREAD_SLICE &&
ctx->graph->internal->thread_execute) {
ctx->thread_type = AVFILTER_THREAD_SLICE;
ctx->internal->execute = ctx->graph->internal->thread_execute;
} else {
ctx->thread_type = 0;
}
 
if (ctx->filter->priv_class) {
ret = av_opt_set_dict(ctx->priv, options);
if (ret < 0) {
av_log(ctx, AV_LOG_ERROR, "Error applying options to the filter.\n");
return ret;
}
}
 
if (ctx->filter->init_opaque)
ret = ctx->filter->init_opaque(ctx, NULL);
else if (ctx->filter->init)
ret = ctx->filter->init(ctx);
else if (ctx->filter->init_dict)
ret = ctx->filter->init_dict(ctx, options);
 
return ret;
}
 
int avfilter_init_str(AVFilterContext *filter, const char *args)
{
AVDictionary *options = NULL;
AVDictionaryEntry *e;
int ret = 0;
 
if (args && *args) {
if (!filter->filter->priv_class) {
av_log(filter, AV_LOG_ERROR, "This filter does not take any "
"options, but options were provided: %s.\n", args);
return AVERROR(EINVAL);
}
 
#if FF_API_OLD_FILTER_OPTS
if ( !strcmp(filter->filter->name, "format") ||
!strcmp(filter->filter->name, "noformat") ||
!strcmp(filter->filter->name, "frei0r") ||
!strcmp(filter->filter->name, "frei0r_src") ||
!strcmp(filter->filter->name, "ocv") ||
!strcmp(filter->filter->name, "pan") ||
!strcmp(filter->filter->name, "pp") ||
!strcmp(filter->filter->name, "aevalsrc")) {
/* a hack for compatibility with the old syntax
* replace colons with |s */
char *copy = av_strdup(args);
char *p = copy;
int nb_leading = 0; // number of leading colons to skip
int deprecated = 0;
 
if (!copy) {
ret = AVERROR(ENOMEM);
goto fail;
}
 
if (!strcmp(filter->filter->name, "frei0r") ||
!strcmp(filter->filter->name, "ocv"))
nb_leading = 1;
else if (!strcmp(filter->filter->name, "frei0r_src"))
nb_leading = 3;
 
while (nb_leading--) {
p = strchr(p, ':');
if (!p) {
p = copy + strlen(copy);
break;
}
p++;
}
 
deprecated = strchr(p, ':') != NULL;
 
if (!strcmp(filter->filter->name, "aevalsrc")) {
deprecated = 0;
while ((p = strchr(p, ':')) && p[1] != ':') {
const char *epos = strchr(p + 1, '=');
const char *spos = strchr(p + 1, ':');
const int next_token_is_opt = epos && (!spos || epos < spos);
if (next_token_is_opt) {
p++;
break;
}
/* next token does not contain a '=', assume a channel expression */
deprecated = 1;
*p++ = '|';
}
if (p && *p == ':') { // double sep '::' found
deprecated = 1;
memmove(p, p + 1, strlen(p));
}
} else
while ((p = strchr(p, ':')))
*p++ = '|';
 
if (deprecated)
av_log(filter, AV_LOG_WARNING, "This syntax is deprecated. Use "
"'|' to separate the list items.\n");
 
av_log(filter, AV_LOG_DEBUG, "compat: called with args=[%s]\n", copy);
ret = process_options(filter, &options, copy);
av_freep(&copy);
 
if (ret < 0)
goto fail;
#endif
} else {
#if CONFIG_MP_FILTER
if (!strcmp(filter->filter->name, "mp")) {
char *escaped;
 
if (!strncmp(args, "filter=", 7))
args += 7;
ret = av_escape(&escaped, args, ":=", AV_ESCAPE_MODE_BACKSLASH, 0);
if (ret < 0) {
av_log(filter, AV_LOG_ERROR, "Unable to escape MPlayer filters arg '%s'\n", args);
goto fail;
}
ret = process_options(filter, &options, escaped);
av_free(escaped);
} else
#endif
ret = process_options(filter, &options, args);
if (ret < 0)
goto fail;
}
}
 
ret = avfilter_init_dict(filter, &options);
if (ret < 0)
goto fail;
 
if ((e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
av_log(filter, AV_LOG_ERROR, "No such option: %s.\n", e->key);
ret = AVERROR_OPTION_NOT_FOUND;
goto fail;
}
 
fail:
av_dict_free(&options);
 
return ret;
}
 
const char *avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
{
return pads[pad_idx].name;
}
 
enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
{
return pads[pad_idx].type;
}
 
static int default_filter_frame(AVFilterLink *link, AVFrame *frame)
{
return ff_filter_frame(link->dst->outputs[0], frame);
}
 
static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
{
int (*filter_frame)(AVFilterLink *, AVFrame *);
AVFilterContext *dstctx = link->dst;
AVFilterPad *dst = link->dstpad;
AVFrame *out;
int ret;
AVFilterCommand *cmd= link->dst->command_queue;
int64_t pts;
 
if (link->closed) {
av_frame_free(&frame);
return AVERROR_EOF;
}
 
if (!(filter_frame = dst->filter_frame))
filter_frame = default_filter_frame;
 
/* copy the frame if needed */
if (dst->needs_writable && !av_frame_is_writable(frame)) {
av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n");
 
/* Maybe use ff_copy_buffer_ref instead? */
switch (link->type) {
case AVMEDIA_TYPE_VIDEO:
out = ff_get_video_buffer(link, link->w, link->h);
break;
case AVMEDIA_TYPE_AUDIO:
out = ff_get_audio_buffer(link, frame->nb_samples);
break;
default: return AVERROR(EINVAL);
}
if (!out) {
av_frame_free(&frame);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, frame);
 
switch (link->type) {
case AVMEDIA_TYPE_VIDEO:
av_image_copy(out->data, out->linesize, (const uint8_t **)frame->data, frame->linesize,
frame->format, frame->width, frame->height);
break;
case AVMEDIA_TYPE_AUDIO:
av_samples_copy(out->extended_data, frame->extended_data,
0, 0, frame->nb_samples,
av_get_channel_layout_nb_channels(frame->channel_layout),
frame->format);
break;
default: return AVERROR(EINVAL);
}
 
av_frame_free(&frame);
} else
out = frame;
 
while(cmd && cmd->time <= out->pts * av_q2d(link->time_base)){
av_log(link->dst, AV_LOG_DEBUG,
"Processing command time:%f command:%s arg:%s\n",
cmd->time, cmd->command, cmd->arg);
avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags);
ff_command_queue_pop(link->dst);
cmd= link->dst->command_queue;
}
 
pts = out->pts;
if (dstctx->enable_str) {
int64_t pos = av_frame_get_pkt_pos(out);
dstctx->var_values[VAR_N] = link->frame_count;
dstctx->var_values[VAR_T] = pts == AV_NOPTS_VALUE ? NAN : pts * av_q2d(link->time_base);
dstctx->var_values[VAR_POS] = pos == -1 ? NAN : pos;
 
dstctx->is_disabled = fabs(av_expr_eval(dstctx->enable, dstctx->var_values, NULL)) < 0.5;
if (dstctx->is_disabled &&
(dstctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC))
filter_frame = default_filter_frame;
}
ret = filter_frame(link, out);
link->frame_count++;
link->frame_requested = 0;
ff_update_link_current_pts(link, pts);
return ret;
}
 
static int ff_filter_frame_needs_framing(AVFilterLink *link, AVFrame *frame)
{
int insamples = frame->nb_samples, inpos = 0, nb_samples;
AVFrame *pbuf = link->partial_buf;
int nb_channels = av_frame_get_channels(frame);
int ret = 0;
 
link->flags |= FF_LINK_FLAG_REQUEST_LOOP;
/* Handle framing (min_samples, max_samples) */
while (insamples) {
if (!pbuf) {
AVRational samples_tb = { 1, link->sample_rate };
pbuf = ff_get_audio_buffer(link, link->partial_buf_size);
if (!pbuf) {
av_log(link->dst, AV_LOG_WARNING,
"Samples dropped due to memory allocation failure.\n");
return 0;
}
av_frame_copy_props(pbuf, frame);
pbuf->pts = frame->pts;
if (pbuf->pts != AV_NOPTS_VALUE)
pbuf->pts += av_rescale_q(inpos, samples_tb, link->time_base);
pbuf->nb_samples = 0;
}
nb_samples = FFMIN(insamples,
link->partial_buf_size - pbuf->nb_samples);
av_samples_copy(pbuf->extended_data, frame->extended_data,
pbuf->nb_samples, inpos,
nb_samples, nb_channels, link->format);
inpos += nb_samples;
insamples -= nb_samples;
pbuf->nb_samples += nb_samples;
if (pbuf->nb_samples >= link->min_samples) {
ret = ff_filter_frame_framed(link, pbuf);
pbuf = NULL;
}
}
av_frame_free(&frame);
link->partial_buf = pbuf;
return ret;
}
 
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
{
FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1);
 
/* Consistency checks */
if (link->type == AVMEDIA_TYPE_VIDEO) {
if (strcmp(link->dst->filter->name, "scale")) {
av_assert1(frame->format == link->format);
av_assert1(frame->width == link->w);
av_assert1(frame->height == link->h);
}
} else {
av_assert1(frame->format == link->format);
av_assert1(av_frame_get_channels(frame) == link->channels);
av_assert1(frame->channel_layout == link->channel_layout);
av_assert1(frame->sample_rate == link->sample_rate);
}
 
/* Go directly to actual filtering if possible */
if (link->type == AVMEDIA_TYPE_AUDIO &&
link->min_samples &&
(link->partial_buf ||
frame->nb_samples < link->min_samples ||
frame->nb_samples > link->max_samples)) {
return ff_filter_frame_needs_framing(link, frame);
} else {
return ff_filter_frame_framed(link, frame);
}
}
 
const AVClass *avfilter_get_class(void)
{
return &avfilter_class;
}
/contrib/sdk/sources/ffmpeg/libavfilter/avfilter.h
0,0 → 1,1520
/*
* filter layer
* Copyright (c) 2007 Bobby Bingham
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFILTER_AVFILTER_H
#define AVFILTER_AVFILTER_H
 
/**
* @file
* @ingroup lavfi
* Main libavfilter public API header
*/
 
/**
* @defgroup lavfi Libavfilter - graph-based frame editing library
* @{
*/
 
#include <stddef.h>
 
#include "libavutil/attributes.h"
#include "libavutil/avutil.h"
#include "libavutil/dict.h"
#include "libavutil/frame.h"
#include "libavutil/log.h"
#include "libavutil/samplefmt.h"
#include "libavutil/pixfmt.h"
#include "libavutil/rational.h"
 
#include "libavfilter/version.h"
 
/**
* Return the LIBAVFILTER_VERSION_INT constant.
*/
unsigned avfilter_version(void);
 
/**
* Return the libavfilter build-time configuration.
*/
const char *avfilter_configuration(void);
 
/**
* Return the libavfilter license.
*/
const char *avfilter_license(void);
 
typedef struct AVFilterContext AVFilterContext;
typedef struct AVFilterLink AVFilterLink;
typedef struct AVFilterPad AVFilterPad;
typedef struct AVFilterFormats AVFilterFormats;
 
#if FF_API_AVFILTERBUFFER
/**
* A reference-counted buffer data type used by the filter system. Filters
* should not store pointers to this structure directly, but instead use the
* AVFilterBufferRef structure below.
*/
typedef struct AVFilterBuffer {
uint8_t *data[8]; ///< buffer data for each plane/channel
 
/**
* pointers to the data planes/channels.
*
* For video, this should simply point to data[].
*
* For planar audio, each channel has a separate data pointer, and
* linesize[0] contains the size of each channel buffer.
* For packed audio, there is just one data pointer, and linesize[0]
* contains the total size of the buffer for all channels.
*
* Note: Both data and extended_data will always be set, but for planar
* audio with more channels that can fit in data, extended_data must be used
* in order to access all channels.
*/
uint8_t **extended_data;
int linesize[8]; ///< number of bytes per line
 
/** private data to be used by a custom free function */
void *priv;
/**
* A pointer to the function to deallocate this buffer if the default
* function is not sufficient. This could, for example, add the memory
* back into a memory pool to be reused later without the overhead of
* reallocating it from scratch.
*/
void (*free)(struct AVFilterBuffer *buf);
 
int format; ///< media format
int w, h; ///< width and height of the allocated buffer
unsigned refcount; ///< number of references to this buffer
} AVFilterBuffer;
 
#define AV_PERM_READ 0x01 ///< can read from the buffer
#define AV_PERM_WRITE 0x02 ///< can write to the buffer
#define AV_PERM_PRESERVE 0x04 ///< nobody else can overwrite the buffer
#define AV_PERM_REUSE 0x08 ///< can output the buffer multiple times, with the same contents each time
#define AV_PERM_REUSE2 0x10 ///< can output the buffer multiple times, modified each time
#define AV_PERM_NEG_LINESIZES 0x20 ///< the buffer requested can have negative linesizes
#define AV_PERM_ALIGN 0x40 ///< the buffer must be aligned
 
#define AVFILTER_ALIGN 16 //not part of ABI
 
/**
* Audio specific properties in a reference to an AVFilterBuffer. Since
* AVFilterBufferRef is common to different media formats, audio specific
* per reference properties must be separated out.
*/
typedef struct AVFilterBufferRefAudioProps {
uint64_t channel_layout; ///< channel layout of audio buffer
int nb_samples; ///< number of audio samples per channel
int sample_rate; ///< audio buffer sample rate
int channels; ///< number of channels (do not access directly)
} AVFilterBufferRefAudioProps;
 
/**
* Video specific properties in a reference to an AVFilterBuffer. Since
* AVFilterBufferRef is common to different media formats, video specific
* per reference properties must be separated out.
*/
typedef struct AVFilterBufferRefVideoProps {
int w; ///< image width
int h; ///< image height
AVRational sample_aspect_ratio; ///< sample aspect ratio
int interlaced; ///< is frame interlaced
int top_field_first; ///< field order
enum AVPictureType pict_type; ///< picture type of the frame
int key_frame; ///< 1 -> keyframe, 0-> not
int qp_table_linesize; ///< qp_table stride
int qp_table_size; ///< qp_table size
int8_t *qp_table; ///< array of Quantization Parameters
} AVFilterBufferRefVideoProps;
 
/**
* A reference to an AVFilterBuffer. Since filters can manipulate the origin of
* a buffer to, for example, crop image without any memcpy, the buffer origin
* and dimensions are per-reference properties. Linesize is also useful for
* image flipping, frame to field filters, etc, and so is also per-reference.
*
* TODO: add anything necessary for frame reordering
*/
typedef struct AVFilterBufferRef {
AVFilterBuffer *buf; ///< the buffer that this is a reference to
uint8_t *data[8]; ///< picture/audio data for each plane
/**
* pointers to the data planes/channels.
*
* For video, this should simply point to data[].
*
* For planar audio, each channel has a separate data pointer, and
* linesize[0] contains the size of each channel buffer.
* For packed audio, there is just one data pointer, and linesize[0]
* contains the total size of the buffer for all channels.
*
* Note: Both data and extended_data will always be set, but for planar
* audio with more channels that can fit in data, extended_data must be used
* in order to access all channels.
*/
uint8_t **extended_data;
int linesize[8]; ///< number of bytes per line
 
AVFilterBufferRefVideoProps *video; ///< video buffer specific properties
AVFilterBufferRefAudioProps *audio; ///< audio buffer specific properties
 
/**
* presentation timestamp. The time unit may change during
* filtering, as it is specified in the link and the filter code
* may need to rescale the PTS accordingly.
*/
int64_t pts;
int64_t pos; ///< byte position in stream, -1 if unknown
 
int format; ///< media format
 
int perms; ///< permissions, see the AV_PERM_* flags
 
enum AVMediaType type; ///< media type of buffer data
 
AVDictionary *metadata; ///< dictionary containing metadata key=value tags
} AVFilterBufferRef;
 
/**
* Copy properties of src to dst, without copying the actual data
*/
attribute_deprecated
void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *src);
 
/**
* Add a new reference to a buffer.
*
* @param ref an existing reference to the buffer
* @param pmask a bitmask containing the allowable permissions in the new
* reference
* @return a new reference to the buffer with the same properties as the
* old, excluding any permissions denied by pmask
*/
attribute_deprecated
AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask);
 
/**
* Remove a reference to a buffer. If this is the last reference to the
* buffer, the buffer itself is also automatically freed.
*
* @param ref reference to the buffer, may be NULL
*
* @note it is recommended to use avfilter_unref_bufferp() instead of this
* function
*/
attribute_deprecated
void avfilter_unref_buffer(AVFilterBufferRef *ref);
 
/**
* Remove a reference to a buffer and set the pointer to NULL.
* If this is the last reference to the buffer, the buffer itself
* is also automatically freed.
*
* @param ref pointer to the buffer reference
*/
attribute_deprecated
void avfilter_unref_bufferp(AVFilterBufferRef **ref);
#endif
 
/**
* Get the number of channels of a buffer reference.
*/
attribute_deprecated
int avfilter_ref_get_channels(AVFilterBufferRef *ref);
 
#if FF_API_AVFILTERPAD_PUBLIC
/**
* A filter pad used for either input or output.
*
* See doc/filter_design.txt for details on how to implement the methods.
*
* @warning this struct might be removed from public API.
* users should call avfilter_pad_get_name() and avfilter_pad_get_type()
* to access the name and type fields; there should be no need to access
* any other fields from outside of libavfilter.
*/
struct AVFilterPad {
/**
* Pad name. The name is unique among inputs and among outputs, but an
* input may have the same name as an output. This may be NULL if this
* pad has no need to ever be referenced by name.
*/
const char *name;
 
/**
* AVFilterPad type.
*/
enum AVMediaType type;
 
/**
* Input pads:
* Minimum required permissions on incoming buffers. Any buffer with
* insufficient permissions will be automatically copied by the filter
* system to a new buffer which provides the needed access permissions.
*
* Output pads:
* Guaranteed permissions on outgoing buffers. Any buffer pushed on the
* link must have at least these permissions; this fact is checked by
* asserts. It can be used to optimize buffer allocation.
*/
attribute_deprecated int min_perms;
 
/**
* Input pads:
* Permissions which are not accepted on incoming buffers. Any buffer
* which has any of these permissions set will be automatically copied
* by the filter system to a new buffer which does not have those
* permissions. This can be used to easily disallow buffers with
* AV_PERM_REUSE.
*
* Output pads:
* Permissions which are automatically removed on outgoing buffers. It
* can be used to optimize buffer allocation.
*/
attribute_deprecated int rej_perms;
 
/**
* @deprecated unused
*/
int (*start_frame)(AVFilterLink *link, AVFilterBufferRef *picref);
 
/**
* Callback function to get a video buffer. If NULL, the filter system will
* use ff_default_get_video_buffer().
*
* Input video pads only.
*/
AVFrame *(*get_video_buffer)(AVFilterLink *link, int w, int h);
 
/**
* Callback function to get an audio buffer. If NULL, the filter system will
* use ff_default_get_audio_buffer().
*
* Input audio pads only.
*/
AVFrame *(*get_audio_buffer)(AVFilterLink *link, int nb_samples);
 
/**
* @deprecated unused
*/
int (*end_frame)(AVFilterLink *link);
 
/**
* @deprecated unused
*/
int (*draw_slice)(AVFilterLink *link, int y, int height, int slice_dir);
 
/**
* Filtering callback. This is where a filter receives a frame with
* audio/video data and should do its processing.
*
* Input pads only.
*
* @return >= 0 on success, a negative AVERROR on error. This function
* must ensure that frame is properly unreferenced on error if it
* hasn't been passed on to another filter.
*/
int (*filter_frame)(AVFilterLink *link, AVFrame *frame);
 
/**
* Frame poll callback. This returns the number of immediately available
* samples. It should return a positive value if the next request_frame()
* is guaranteed to return one frame (with no delay).
*
* Defaults to just calling the source poll_frame() method.
*
* Output pads only.
*/
int (*poll_frame)(AVFilterLink *link);
 
/**
* Frame request callback. A call to this should result in at least one
* frame being output over the given link. This should return zero on
* success, and another value on error.
* See ff_request_frame() for the error codes with a specific
* meaning.
*
* Output pads only.
*/
int (*request_frame)(AVFilterLink *link);
 
/**
* Link configuration callback.
*
* For output pads, this should set the following link properties:
* video: width, height, sample_aspect_ratio, time_base
* audio: sample_rate.
*
* This should NOT set properties such as format, channel_layout, etc which
* are negotiated between filters by the filter system using the
* query_formats() callback before this function is called.
*
* For input pads, this should check the properties of the link, and update
* the filter's internal state as necessary.
*
* For both input and output pads, this should return zero on success,
* and another value on error.
*/
int (*config_props)(AVFilterLink *link);
 
/**
* The filter expects a fifo to be inserted on its input link,
* typically because it has a delay.
*
* input pads only.
*/
int needs_fifo;
 
int needs_writable;
};
#endif
 
/**
* Get the number of elements in a NULL-terminated array of AVFilterPads (e.g.
* AVFilter.inputs/outputs).
*/
int avfilter_pad_count(const AVFilterPad *pads);
 
/**
* Get the name of an AVFilterPad.
*
* @param pads an array of AVFilterPads
* @param pad_idx index of the pad in the array it; is the caller's
* responsibility to ensure the index is valid
*
* @return name of the pad_idx'th pad in pads
*/
const char *avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx);
 
/**
* Get the type of an AVFilterPad.
*
* @param pads an array of AVFilterPads
* @param pad_idx index of the pad in the array; it is the caller's
* responsibility to ensure the index is valid
*
* @return type of the pad_idx'th pad in pads
*/
enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx);
 
/**
* The number of the filter inputs is not determined just by AVFilter.inputs.
* The filter might add additional inputs during initialization depending on the
* options supplied to it.
*/
#define AVFILTER_FLAG_DYNAMIC_INPUTS (1 << 0)
/**
* The number of the filter outputs is not determined just by AVFilter.outputs.
* The filter might add additional outputs during initialization depending on
* the options supplied to it.
*/
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS (1 << 1)
/**
* The filter supports multithreading by splitting frames into multiple parts
* and processing them concurrently.
*/
#define AVFILTER_FLAG_SLICE_THREADS (1 << 2)
/**
* Some filters support a generic "enable" expression option that can be used
* to enable or disable a filter in the timeline. Filters supporting this
* option have this flag set. When the enable expression is false, the default
* no-op filter_frame() function is called in place of the filter_frame()
* callback defined on each input pad, thus the frame is passed unchanged to
* the next filters.
*/
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC (1 << 16)
/**
* Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will
* have its filter_frame() callback(s) called as usual even when the enable
* expression is false. The filter will disable filtering within the
* filter_frame() callback(s) itself, for example executing code depending on
* the AVFilterContext->is_disabled value.
*/
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL (1 << 17)
/**
* Handy mask to test whether the filter supports or no the timeline feature
* (internally or generically).
*/
#define AVFILTER_FLAG_SUPPORT_TIMELINE (AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL)
 
/**
* Filter definition. This defines the pads a filter contains, and all the
* callback functions used to interact with the filter.
*/
typedef struct AVFilter {
/**
* Filter name. Must be non-NULL and unique among filters.
*/
const char *name;
 
/**
* A description of the filter. May be NULL.
*
* You should use the NULL_IF_CONFIG_SMALL() macro to define it.
*/
const char *description;
 
/**
* List of inputs, terminated by a zeroed element.
*
* NULL if there are no (static) inputs. Instances of filters with
* AVFILTER_FLAG_DYNAMIC_INPUTS set may have more inputs than present in
* this list.
*/
const AVFilterPad *inputs;
/**
* List of outputs, terminated by a zeroed element.
*
* NULL if there are no (static) outputs. Instances of filters with
* AVFILTER_FLAG_DYNAMIC_OUTPUTS set may have more outputs than present in
* this list.
*/
const AVFilterPad *outputs;
 
/**
* A class for the private data, used to declare filter private AVOptions.
* This field is NULL for filters that do not declare any options.
*
* If this field is non-NULL, the first member of the filter private data
* must be a pointer to AVClass, which will be set by libavfilter generic
* code to this class.
*/
const AVClass *priv_class;
 
/**
* A combination of AVFILTER_FLAG_*
*/
int flags;
 
/*****************************************************************
* All fields below this line are not part of the public API. They
* may not be used outside of libavfilter and can be changed and
* removed at will.
* New public fields should be added right above.
*****************************************************************
*/
 
/**
* Filter initialization function.
*
* This callback will be called only once during the filter lifetime, after
* all the options have been set, but before links between filters are
* established and format negotiation is done.
*
* Basic filter initialization should be done here. Filters with dynamic
* inputs and/or outputs should create those inputs/outputs here based on
* provided options. No more changes to this filter's inputs/outputs can be
* done after this callback.
*
* This callback must not assume that the filter links exist or frame
* parameters are known.
*
* @ref AVFilter.uninit "uninit" is guaranteed to be called even if
* initialization fails, so this callback does not have to clean up on
* failure.
*
* @return 0 on success, a negative AVERROR on failure
*/
int (*init)(AVFilterContext *ctx);
 
/**
* Should be set instead of @ref AVFilter.init "init" by the filters that
* want to pass a dictionary of AVOptions to nested contexts that are
* allocated during init.
*
* On return, the options dict should be freed and replaced with one that
* contains all the options which could not be processed by this filter (or
* with NULL if all the options were processed).
*
* Otherwise the semantics is the same as for @ref AVFilter.init "init".
*/
int (*init_dict)(AVFilterContext *ctx, AVDictionary **options);
 
/**
* Filter uninitialization function.
*
* Called only once right before the filter is freed. Should deallocate any
* memory held by the filter, release any buffer references, etc. It does
* not need to deallocate the AVFilterContext.priv memory itself.
*
* This callback may be called even if @ref AVFilter.init "init" was not
* called or failed, so it must be prepared to handle such a situation.
*/
void (*uninit)(AVFilterContext *ctx);
 
/**
* Query formats supported by the filter on its inputs and outputs.
*
* This callback is called after the filter is initialized (so the inputs
* and outputs are fixed), shortly before the format negotiation. This
* callback may be called more than once.
*
* This callback must set AVFilterLink.out_formats on every input link and
* AVFilterLink.in_formats on every output link to a list of pixel/sample
* formats that the filter supports on that link. For audio links, this
* filter must also set @ref AVFilterLink.in_samplerates "in_samplerates" /
* @ref AVFilterLink.out_samplerates "out_samplerates" and
* @ref AVFilterLink.in_channel_layouts "in_channel_layouts" /
* @ref AVFilterLink.out_channel_layouts "out_channel_layouts" analogously.
*
* This callback may be NULL for filters with one input, in which case
* libavfilter assumes that it supports all input formats and preserves
* them on output.
*
* @return zero on success, a negative value corresponding to an
* AVERROR code otherwise
*/
int (*query_formats)(AVFilterContext *);
 
int priv_size; ///< size of private data to allocate for the filter
 
/**
* Used by the filter registration system. Must not be touched by any other
* code.
*/
struct AVFilter *next;
 
/**
* Make the filter instance process a command.
*
* @param cmd the command to process, for handling simplicity all commands must be alphanumeric only
* @param arg the argument for the command
* @param res a buffer with size res_size where the filter(s) can return a response. This must not change when the command is not supported.
* @param flags if AVFILTER_CMD_FLAG_FAST is set and the command would be
* time consuming then a filter should treat it like an unsupported command
*
* @returns >=0 on success otherwise an error code.
* AVERROR(ENOSYS) on unsupported commands
*/
int (*process_command)(AVFilterContext *, const char *cmd, const char *arg, char *res, int res_len, int flags);
 
/**
* Filter initialization function, alternative to the init()
* callback. Args contains the user-supplied parameters, opaque is
* used for providing binary data.
*/
int (*init_opaque)(AVFilterContext *ctx, void *opaque);
} AVFilter;
 
/**
* Process multiple parts of the frame concurrently.
*/
#define AVFILTER_THREAD_SLICE (1 << 0)
 
typedef struct AVFilterInternal AVFilterInternal;
 
/** An instance of a filter */
struct AVFilterContext {
const AVClass *av_class; ///< needed for av_log() and filters common options
 
const AVFilter *filter; ///< the AVFilter of which this is an instance
 
char *name; ///< name of this filter instance
 
AVFilterPad *input_pads; ///< array of input pads
AVFilterLink **inputs; ///< array of pointers to input links
#if FF_API_FOO_COUNT
attribute_deprecated unsigned input_count; ///< @deprecated use nb_inputs
#endif
unsigned nb_inputs; ///< number of input pads
 
AVFilterPad *output_pads; ///< array of output pads
AVFilterLink **outputs; ///< array of pointers to output links
#if FF_API_FOO_COUNT
attribute_deprecated unsigned output_count; ///< @deprecated use nb_outputs
#endif
unsigned nb_outputs; ///< number of output pads
 
void *priv; ///< private data for use by the filter
 
struct AVFilterGraph *graph; ///< filtergraph this filter belongs to
 
/**
* Type of multithreading being allowed/used. A combination of
* AVFILTER_THREAD_* flags.
*
* May be set by the caller before initializing the filter to forbid some
* or all kinds of multithreading for this filter. The default is allowing
* everything.
*
* When the filter is initialized, this field is combined using bit AND with
* AVFilterGraph.thread_type to get the final mask used for determining
* allowed threading types. I.e. a threading type needs to be set in both
* to be allowed.
*
* After the filter is initialzed, libavfilter sets this field to the
* threading type that is actually used (0 for no multithreading).
*/
int thread_type;
 
/**
* An opaque struct for libavfilter internal use.
*/
AVFilterInternal *internal;
 
struct AVFilterCommand *command_queue;
 
char *enable_str; ///< enable expression string
void *enable; ///< parsed expression (AVExpr*)
double *var_values; ///< variable values for the enable expression
int is_disabled; ///< the enabled state from the last expression evaluation
};
 
/**
* A link between two filters. This contains pointers to the source and
* destination filters between which this link exists, and the indexes of
* the pads involved. In addition, this link also contains the parameters
* which have been negotiated and agreed upon between the filter, such as
* image dimensions, format, etc.
*/
struct AVFilterLink {
AVFilterContext *src; ///< source filter
AVFilterPad *srcpad; ///< output pad on the source filter
 
AVFilterContext *dst; ///< dest filter
AVFilterPad *dstpad; ///< input pad on the dest filter
 
enum AVMediaType type; ///< filter media type
 
/* These parameters apply only to video */
int w; ///< agreed upon image width
int h; ///< agreed upon image height
AVRational sample_aspect_ratio; ///< agreed upon sample aspect ratio
/* These parameters apply only to audio */
uint64_t channel_layout; ///< channel layout of current buffer (see libavutil/channel_layout.h)
int sample_rate; ///< samples per second
 
int format; ///< agreed upon media format
 
/**
* Define the time base used by the PTS of the frames/samples
* which will pass through this link.
* During the configuration stage, each filter is supposed to
* change only the output timebase, while the timebase of the
* input link is assumed to be an unchangeable property.
*/
AVRational time_base;
 
/*****************************************************************
* All fields below this line are not part of the public API. They
* may not be used outside of libavfilter and can be changed and
* removed at will.
* New public fields should be added right above.
*****************************************************************
*/
/**
* Lists of formats and channel layouts supported by the input and output
* filters respectively. These lists are used for negotiating the format
* to actually be used, which will be loaded into the format and
* channel_layout members, above, when chosen.
*
*/
AVFilterFormats *in_formats;
AVFilterFormats *out_formats;
 
/**
* Lists of channel layouts and sample rates used for automatic
* negotiation.
*/
AVFilterFormats *in_samplerates;
AVFilterFormats *out_samplerates;
struct AVFilterChannelLayouts *in_channel_layouts;
struct AVFilterChannelLayouts *out_channel_layouts;
 
/**
* Audio only, the destination filter sets this to a non-zero value to
* request that buffers with the given number of samples should be sent to
* it. AVFilterPad.needs_fifo must also be set on the corresponding input
* pad.
* Last buffer before EOF will be padded with silence.
*/
int request_samples;
 
/** stage of the initialization of the link properties (dimensions, etc) */
enum {
AVLINK_UNINIT = 0, ///< not started
AVLINK_STARTINIT, ///< started, but incomplete
AVLINK_INIT ///< complete
} init_state;
 
struct AVFilterPool *pool;
 
/**
* Graph the filter belongs to.
*/
struct AVFilterGraph *graph;
 
/**
* Current timestamp of the link, as defined by the most recent
* frame(s), in AV_TIME_BASE units.
*/
int64_t current_pts;
 
/**
* Index in the age array.
*/
int age_index;
 
/**
* Frame rate of the stream on the link, or 1/0 if unknown;
* if left to 0/0, will be automatically be copied from the first input
* of the source filter if it exists.
*
* Sources should set it to the best estimation of the real frame rate.
* Filters should update it if necessary depending on their function.
* Sinks can use it to set a default output frame rate.
* It is similar to the r_frame_rate field in AVStream.
*/
AVRational frame_rate;
 
/**
* Buffer partially filled with samples to achieve a fixed/minimum size.
*/
AVFrame *partial_buf;
 
/**
* Size of the partial buffer to allocate.
* Must be between min_samples and max_samples.
*/
int partial_buf_size;
 
/**
* Minimum number of samples to filter at once. If filter_frame() is
* called with fewer samples, it will accumulate them in partial_buf.
* This field and the related ones must not be changed after filtering
* has started.
* If 0, all related fields are ignored.
*/
int min_samples;
 
/**
* Maximum number of samples to filter at once. If filter_frame() is
* called with more samples, it will split them.
*/
int max_samples;
 
/**
* The buffer reference currently being received across the link by the
* destination filter. This is used internally by the filter system to
* allow automatic copying of buffers which do not have sufficient
* permissions for the destination. This should not be accessed directly
* by the filters.
*/
AVFilterBufferRef *cur_buf_copy;
 
/**
* True if the link is closed.
* If set, all attemps of start_frame, filter_frame or request_frame
* will fail with AVERROR_EOF, and if necessary the reference will be
* destroyed.
* If request_frame returns AVERROR_EOF, this flag is set on the
* corresponding link.
* It can be set also be set by either the source or the destination
* filter.
*/
int closed;
 
/**
* Number of channels.
*/
int channels;
 
/**
* True if a frame is being requested on the link.
* Used internally by the framework.
*/
unsigned frame_requested;
 
/**
* Link processing flags.
*/
unsigned flags;
 
/**
* Number of past frames sent through the link.
*/
int64_t frame_count;
};
 
/**
* Link two filters together.
*
* @param src the source filter
* @param srcpad index of the output pad on the source filter
* @param dst the destination filter
* @param dstpad index of the input pad on the destination filter
* @return zero on success
*/
int avfilter_link(AVFilterContext *src, unsigned srcpad,
AVFilterContext *dst, unsigned dstpad);
 
/**
* Free the link in *link, and set its pointer to NULL.
*/
void avfilter_link_free(AVFilterLink **link);
 
/**
* Get the number of channels of a link.
*/
int avfilter_link_get_channels(AVFilterLink *link);
 
/**
* Set the closed field of a link.
*/
void avfilter_link_set_closed(AVFilterLink *link, int closed);
 
/**
* Negotiate the media format, dimensions, etc of all inputs to a filter.
*
* @param filter the filter to negotiate the properties for its inputs
* @return zero on successful negotiation
*/
int avfilter_config_links(AVFilterContext *filter);
 
#if FF_API_AVFILTERBUFFER
/**
* Create a buffer reference wrapped around an already allocated image
* buffer.
*
* @param data pointers to the planes of the image to reference
* @param linesize linesizes for the planes of the image to reference
* @param perms the required access permissions
* @param w the width of the image specified by the data and linesize arrays
* @param h the height of the image specified by the data and linesize arrays
* @param format the pixel format of the image specified by the data and linesize arrays
*/
attribute_deprecated
AVFilterBufferRef *
avfilter_get_video_buffer_ref_from_arrays(uint8_t * const data[4], const int linesize[4], int perms,
int w, int h, enum AVPixelFormat format);
 
/**
* Create an audio buffer reference wrapped around an already
* allocated samples buffer.
*
* See avfilter_get_audio_buffer_ref_from_arrays_channels() for a version
* that can handle unknown channel layouts.
*
* @param data pointers to the samples plane buffers
* @param linesize linesize for the samples plane buffers
* @param perms the required access permissions
* @param nb_samples number of samples per channel
* @param sample_fmt the format of each sample in the buffer to allocate
* @param channel_layout the channel layout of the buffer
*/
attribute_deprecated
AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data,
int linesize,
int perms,
int nb_samples,
enum AVSampleFormat sample_fmt,
uint64_t channel_layout);
/**
* Create an audio buffer reference wrapped around an already
* allocated samples buffer.
*
* @param data pointers to the samples plane buffers
* @param linesize linesize for the samples plane buffers
* @param perms the required access permissions
* @param nb_samples number of samples per channel
* @param sample_fmt the format of each sample in the buffer to allocate
* @param channels the number of channels of the buffer
* @param channel_layout the channel layout of the buffer,
* must be either 0 or consistent with channels
*/
attribute_deprecated
AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays_channels(uint8_t **data,
int linesize,
int perms,
int nb_samples,
enum AVSampleFormat sample_fmt,
int channels,
uint64_t channel_layout);
 
#endif
 
 
#define AVFILTER_CMD_FLAG_ONE 1 ///< Stop once a filter understood the command (for target=all for example), fast filters are favored automatically
#define AVFILTER_CMD_FLAG_FAST 2 ///< Only execute command when its fast (like a video out that supports contrast adjustment in hw)
 
/**
* Make the filter instance process a command.
* It is recommended to use avfilter_graph_send_command().
*/
int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags);
 
/** Initialize the filter system. Register all builtin filters. */
void avfilter_register_all(void);
 
#if FF_API_OLD_FILTER_REGISTER
/** Uninitialize the filter system. Unregister all filters. */
attribute_deprecated
void avfilter_uninit(void);
#endif
 
/**
* Register a filter. This is only needed if you plan to use
* avfilter_get_by_name later to lookup the AVFilter structure by name. A
* filter can still by instantiated with avfilter_graph_alloc_filter even if it
* is not registered.
*
* @param filter the filter to register
* @return 0 if the registration was successful, a negative value
* otherwise
*/
int avfilter_register(AVFilter *filter);
 
/**
* Get a filter definition matching the given name.
*
* @param name the filter name to find
* @return the filter definition, if any matching one is registered.
* NULL if none found.
*/
AVFilter *avfilter_get_by_name(const char *name);
 
/**
* Iterate over all registered filters.
* @return If prev is non-NULL, next registered filter after prev or NULL if
* prev is the last filter. If prev is NULL, return the first registered filter.
*/
const AVFilter *avfilter_next(const AVFilter *prev);
 
#if FF_API_OLD_FILTER_REGISTER
/**
* If filter is NULL, returns a pointer to the first registered filter pointer,
* if filter is non-NULL, returns the next pointer after filter.
* If the returned pointer points to NULL, the last registered filter
* was already reached.
* @deprecated use avfilter_next()
*/
attribute_deprecated
AVFilter **av_filter_next(AVFilter **filter);
#endif
 
#if FF_API_AVFILTER_OPEN
/**
* Create a filter instance.
*
* @param filter_ctx put here a pointer to the created filter context
* on success, NULL on failure
* @param filter the filter to create an instance of
* @param inst_name Name to give to the new instance. Can be NULL for none.
* @return >= 0 in case of success, a negative error code otherwise
* @deprecated use avfilter_graph_alloc_filter() instead
*/
attribute_deprecated
int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *inst_name);
#endif
 
 
#if FF_API_AVFILTER_INIT_FILTER
/**
* Initialize a filter.
*
* @param filter the filter to initialize
* @param args A string of parameters to use when initializing the filter.
* The format and meaning of this string varies by filter.
* @param opaque Any extra non-string data needed by the filter. The meaning
* of this parameter varies by filter.
* @return zero on success
*/
attribute_deprecated
int avfilter_init_filter(AVFilterContext *filter, const char *args, void *opaque);
#endif
 
/**
* Initialize a filter with the supplied parameters.
*
* @param ctx uninitialized filter context to initialize
* @param args Options to initialize the filter with. This must be a
* ':'-separated list of options in the 'key=value' form.
* May be NULL if the options have been set directly using the
* AVOptions API or there are no options that need to be set.
* @return 0 on success, a negative AVERROR on failure
*/
int avfilter_init_str(AVFilterContext *ctx, const char *args);
 
/**
* Initialize a filter with the supplied dictionary of options.
*
* @param ctx uninitialized filter context to initialize
* @param options An AVDictionary filled with options for this filter. On
* return this parameter will be destroyed and replaced with
* a dict containing options that were not found. This dictionary
* must be freed by the caller.
* May be NULL, then this function is equivalent to
* avfilter_init_str() with the second parameter set to NULL.
* @return 0 on success, a negative AVERROR on failure
*
* @note This function and avfilter_init_str() do essentially the same thing,
* the difference is in manner in which the options are passed. It is up to the
* calling code to choose whichever is more preferable. The two functions also
* behave differently when some of the provided options are not declared as
* supported by the filter. In such a case, avfilter_init_str() will fail, but
* this function will leave those extra options in the options AVDictionary and
* continue as usual.
*/
int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options);
 
/**
* Free a filter context. This will also remove the filter from its
* filtergraph's list of filters.
*
* @param filter the filter to free
*/
void avfilter_free(AVFilterContext *filter);
 
/**
* Insert a filter in the middle of an existing link.
*
* @param link the link into which the filter should be inserted
* @param filt the filter to be inserted
* @param filt_srcpad_idx the input pad on the filter to connect
* @param filt_dstpad_idx the output pad on the filter to connect
* @return zero on success
*/
int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt,
unsigned filt_srcpad_idx, unsigned filt_dstpad_idx);
 
#if FF_API_AVFILTERBUFFER
/**
* Copy the frame properties of src to dst, without copying the actual
* image data.
*
* @return 0 on success, a negative number on error.
*/
attribute_deprecated
int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src);
 
/**
* Copy the frame properties and data pointers of src to dst, without copying
* the actual data.
*
* @return 0 on success, a negative number on error.
*/
attribute_deprecated
int avfilter_copy_buf_props(AVFrame *dst, const AVFilterBufferRef *src);
#endif
 
/**
* @return AVClass for AVFilterContext.
*
* @see av_opt_find().
*/
const AVClass *avfilter_get_class(void);
 
typedef struct AVFilterGraphInternal AVFilterGraphInternal;
 
/**
* A function pointer passed to the @ref AVFilterGraph.execute callback to be
* executed multiple times, possibly in parallel.
*
* @param ctx the filter context the job belongs to
* @param arg an opaque parameter passed through from @ref
* AVFilterGraph.execute
* @param jobnr the index of the job being executed
* @param nb_jobs the total number of jobs
*
* @return 0 on success, a negative AVERROR on error
*/
typedef int (avfilter_action_func)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
 
/**
* A function executing multiple jobs, possibly in parallel.
*
* @param ctx the filter context to which the jobs belong
* @param func the function to be called multiple times
* @param arg the argument to be passed to func
* @param ret a nb_jobs-sized array to be filled with return values from each
* invocation of func
* @param nb_jobs the number of jobs to execute
*
* @return 0 on success, a negative AVERROR on error
*/
typedef int (avfilter_execute_func)(AVFilterContext *ctx, avfilter_action_func *func,
void *arg, int *ret, int nb_jobs);
 
typedef struct AVFilterGraph {
const AVClass *av_class;
#if FF_API_FOO_COUNT
attribute_deprecated
unsigned filter_count_unused;
#endif
AVFilterContext **filters;
#if !FF_API_FOO_COUNT
unsigned nb_filters;
#endif
 
char *scale_sws_opts; ///< sws options to use for the auto-inserted scale filters
char *resample_lavr_opts; ///< libavresample options to use for the auto-inserted resample filters
#if FF_API_FOO_COUNT
unsigned nb_filters;
#endif
 
/**
* Type of multithreading allowed for filters in this graph. A combination
* of AVFILTER_THREAD_* flags.
*
* May be set by the caller at any point, the setting will apply to all
* filters initialized after that. The default is allowing everything.
*
* When a filter in this graph is initialized, this field is combined using
* bit AND with AVFilterContext.thread_type to get the final mask used for
* determining allowed threading types. I.e. a threading type needs to be
* set in both to be allowed.
*/
int thread_type;
 
/**
* Maximum number of threads used by filters in this graph. May be set by
* the caller before adding any filters to the filtergraph. Zero (the
* default) means that the number of threads is determined automatically.
*/
int nb_threads;
 
/**
* Opaque object for libavfilter internal use.
*/
AVFilterGraphInternal *internal;
 
/**
* Opaque user data. May be set by the caller to an arbitrary value, e.g. to
* be used from callbacks like @ref AVFilterGraph.execute.
* Libavfilter will not touch this field in any way.
*/
void *opaque;
 
/**
* This callback may be set by the caller immediately after allocating the
* graph and before adding any filters to it, to provide a custom
* multithreading implementation.
*
* If set, filters with slice threading capability will call this callback
* to execute multiple jobs in parallel.
*
* If this field is left unset, libavfilter will use its internal
* implementation, which may or may not be multithreaded depending on the
* platform and build options.
*/
avfilter_execute_func *execute;
 
char *aresample_swr_opts; ///< swr options to use for the auto-inserted aresample filters, Access ONLY through AVOptions
 
/**
* Private fields
*
* The following fields are for internal use only.
* Their type, offset, number and semantic can change without notice.
*/
 
AVFilterLink **sink_links;
int sink_links_count;
 
unsigned disable_auto_convert;
} AVFilterGraph;
 
/**
* Allocate a filter graph.
*/
AVFilterGraph *avfilter_graph_alloc(void);
 
/**
* Create a new filter instance in a filter graph.
*
* @param graph graph in which the new filter will be used
* @param filter the filter to create an instance of
* @param name Name to give to the new instance (will be copied to
* AVFilterContext.name). This may be used by the caller to identify
* different filters, libavfilter itself assigns no semantics to
* this parameter. May be NULL.
*
* @return the context of the newly created filter instance (note that it is
* also retrievable directly through AVFilterGraph.filters or with
* avfilter_graph_get_filter()) on success or NULL or failure.
*/
AVFilterContext *avfilter_graph_alloc_filter(AVFilterGraph *graph,
const AVFilter *filter,
const char *name);
 
/**
* Get a filter instance with name name from graph.
*
* @return the pointer to the found filter instance or NULL if it
* cannot be found.
*/
AVFilterContext *avfilter_graph_get_filter(AVFilterGraph *graph, char *name);
 
#if FF_API_AVFILTER_OPEN
/**
* Add an existing filter instance to a filter graph.
*
* @param graphctx the filter graph
* @param filter the filter to be added
*
* @deprecated use avfilter_graph_alloc_filter() to allocate a filter in a
* filter graph
*/
attribute_deprecated
int avfilter_graph_add_filter(AVFilterGraph *graphctx, AVFilterContext *filter);
#endif
 
/**
* Create and add a filter instance into an existing graph.
* The filter instance is created from the filter filt and inited
* with the parameters args and opaque.
*
* In case of success put in *filt_ctx the pointer to the created
* filter instance, otherwise set *filt_ctx to NULL.
*
* @param name the instance name to give to the created filter instance
* @param graph_ctx the filter graph
* @return a negative AVERROR error code in case of failure, a non
* negative value otherwise
*/
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt,
const char *name, const char *args, void *opaque,
AVFilterGraph *graph_ctx);
 
/**
* Enable or disable automatic format conversion inside the graph.
*
* Note that format conversion can still happen inside explicitly inserted
* scale and aresample filters.
*
* @param flags any of the AVFILTER_AUTO_CONVERT_* constants
*/
void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags);
 
enum {
AVFILTER_AUTO_CONVERT_ALL = 0, /**< all automatic conversions enabled */
AVFILTER_AUTO_CONVERT_NONE = -1, /**< all automatic conversions disabled */
};
 
/**
* Check validity and configure all the links and formats in the graph.
*
* @param graphctx the filter graph
* @param log_ctx context used for logging
* @return >= 0 in case of success, a negative AVERROR code otherwise
*/
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx);
 
/**
* Free a graph, destroy its links, and set *graph to NULL.
* If *graph is NULL, do nothing.
*/
void avfilter_graph_free(AVFilterGraph **graph);
 
/**
* A linked-list of the inputs/outputs of the filter chain.
*
* This is mainly useful for avfilter_graph_parse() / avfilter_graph_parse2(),
* where it is used to communicate open (unlinked) inputs and outputs from and
* to the caller.
* This struct specifies, per each not connected pad contained in the graph, the
* filter context and the pad index required for establishing a link.
*/
typedef struct AVFilterInOut {
/** unique name for this input/output in the list */
char *name;
 
/** filter context associated to this input/output */
AVFilterContext *filter_ctx;
 
/** index of the filt_ctx pad to use for linking */
int pad_idx;
 
/** next input/input in the list, NULL if this is the last */
struct AVFilterInOut *next;
} AVFilterInOut;
 
/**
* Allocate a single AVFilterInOut entry.
* Must be freed with avfilter_inout_free().
* @return allocated AVFilterInOut on success, NULL on failure.
*/
AVFilterInOut *avfilter_inout_alloc(void);
 
/**
* Free the supplied list of AVFilterInOut and set *inout to NULL.
* If *inout is NULL, do nothing.
*/
void avfilter_inout_free(AVFilterInOut **inout);
 
#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI || !FF_API_OLD_GRAPH_PARSE
/**
* Add a graph described by a string to a graph.
*
* @note The caller must provide the lists of inputs and outputs,
* which therefore must be known before calling the function.
*
* @note The inputs parameter describes inputs of the already existing
* part of the graph; i.e. from the point of view of the newly created
* part, they are outputs. Similarly the outputs parameter describes
* outputs of the already existing filters, which are provided as
* inputs to the parsed filters.
*
* @param graph the filter graph where to link the parsed grap context
* @param filters string to be parsed
* @param inputs linked list to the inputs of the graph
* @param outputs linked list to the outputs of the graph
* @return zero on success, a negative AVERROR code on error
*/
int avfilter_graph_parse(AVFilterGraph *graph, const char *filters,
AVFilterInOut *inputs, AVFilterInOut *outputs,
void *log_ctx);
#else
/**
* Add a graph described by a string to a graph.
*
* @param graph the filter graph where to link the parsed graph context
* @param filters string to be parsed
* @param inputs pointer to a linked list to the inputs of the graph, may be NULL.
* If non-NULL, *inputs is updated to contain the list of open inputs
* after the parsing, should be freed with avfilter_inout_free().
* @param outputs pointer to a linked list to the outputs of the graph, may be NULL.
* If non-NULL, *outputs is updated to contain the list of open outputs
* after the parsing, should be freed with avfilter_inout_free().
* @return non negative on success, a negative AVERROR code on error
* @deprecated Use avfilter_graph_parse_ptr() instead.
*/
attribute_deprecated
int avfilter_graph_parse(AVFilterGraph *graph, const char *filters,
AVFilterInOut **inputs, AVFilterInOut **outputs,
void *log_ctx);
#endif
 
/**
* Add a graph described by a string to a graph.
*
* @param graph the filter graph where to link the parsed graph context
* @param filters string to be parsed
* @param inputs pointer to a linked list to the inputs of the graph, may be NULL.
* If non-NULL, *inputs is updated to contain the list of open inputs
* after the parsing, should be freed with avfilter_inout_free().
* @param outputs pointer to a linked list to the outputs of the graph, may be NULL.
* If non-NULL, *outputs is updated to contain the list of open outputs
* after the parsing, should be freed with avfilter_inout_free().
* @return non negative on success, a negative AVERROR code on error
*/
int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters,
AVFilterInOut **inputs, AVFilterInOut **outputs,
void *log_ctx);
 
/**
* Add a graph described by a string to a graph.
*
* @param[in] graph the filter graph where to link the parsed graph context
* @param[in] filters string to be parsed
* @param[out] inputs a linked list of all free (unlinked) inputs of the
* parsed graph will be returned here. It is to be freed
* by the caller using avfilter_inout_free().
* @param[out] outputs a linked list of all free (unlinked) outputs of the
* parsed graph will be returned here. It is to be freed by the
* caller using avfilter_inout_free().
* @return zero on success, a negative AVERROR code on error
*
* @note This function returns the inputs and outputs that are left
* unlinked after parsing the graph and the caller then deals with
* them.
* @note This function makes no reference whatsoever to already
* existing parts of the graph and the inputs parameter will on return
* contain inputs of the newly parsed part of the graph. Analogously
* the outputs parameter will contain outputs of the newly created
* filters.
*/
int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters,
AVFilterInOut **inputs,
AVFilterInOut **outputs);
 
/**
* Send a command to one or more filter instances.
*
* @param graph the filter graph
* @param target the filter(s) to which the command should be sent
* "all" sends to all filters
* otherwise it can be a filter or filter instance name
* which will send the command to all matching filters.
* @param cmd the command to send, for handling simplicity all commands must be alphanumeric only
* @param arg the argument for the command
* @param res a buffer with size res_size where the filter(s) can return a response.
*
* @returns >=0 on success otherwise an error code.
* AVERROR(ENOSYS) on unsupported commands
*/
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags);
 
/**
* Queue a command for one or more filter instances.
*
* @param graph the filter graph
* @param target the filter(s) to which the command should be sent
* "all" sends to all filters
* otherwise it can be a filter or filter instance name
* which will send the command to all matching filters.
* @param cmd the command to sent, for handling simplicity all commands must be alphanummeric only
* @param arg the argument for the command
* @param ts time at which the command should be sent to the filter
*
* @note As this executes commands after this function returns, no return code
* from the filter is provided, also AVFILTER_CMD_FLAG_ONE is not supported.
*/
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts);
 
 
/**
* Dump a graph into a human-readable string representation.
*
* @param graph the graph to dump
* @param options formatting options; currently ignored
* @return a string, or NULL in case of memory allocation failure;
* the string must be freed using av_free
*/
char *avfilter_graph_dump(AVFilterGraph *graph, const char *options);
 
/**
* Request a frame on the oldest sink link.
*
* If the request returns AVERROR_EOF, try the next.
*
* Note that this function is not meant to be the sole scheduling mechanism
* of a filtergraph, only a convenience function to help drain a filtergraph
* in a balanced way under normal circumstances.
*
* Also note that AVERROR_EOF does not mean that frames did not arrive on
* some of the sinks during the process.
* When there are multiple sink links, in case the requested link
* returns an EOF, this may cause a filter to flush pending frames
* which are sent to another sink link, although unrequested.
*
* @return the return value of ff_request_frame(),
* or AVERROR_EOF if all links returned AVERROR_EOF
*/
int avfilter_graph_request_oldest(AVFilterGraph *graph);
 
/**
* @}
*/
 
#endif /* AVFILTER_AVFILTER_H */
/contrib/sdk/sources/ffmpeg/libavfilter/avfiltergraph.c
0,0 → 1,1321
/*
* filter graphs
* Copyright (c) 2008 Vitor Sessak
* Copyright (c) 2007 Bobby Bingham
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "config.h"
 
#include <string.h>
 
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/bprint.h"
#include "libavutil/channel_layout.h"
#include "libavutil/internal.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavcodec/avcodec.h" // avcodec_find_best_pix_fmt_of_2()
 
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "thread.h"
 
#define OFFSET(x) offsetof(AVFilterGraph, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption filtergraph_options[] = {
{ "thread_type", "Allowed thread types", OFFSET(thread_type), AV_OPT_TYPE_FLAGS,
{ .i64 = AVFILTER_THREAD_SLICE }, 0, INT_MAX, FLAGS, "thread_type" },
{ "slice", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AVFILTER_THREAD_SLICE }, .flags = FLAGS, .unit = "thread_type" },
{ "threads", "Maximum number of threads", OFFSET(nb_threads),
AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
{"scale_sws_opts" , "default scale filter options" , OFFSET(scale_sws_opts) ,
AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
{"aresample_swr_opts" , "default aresample filter options" , OFFSET(aresample_swr_opts) ,
AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
{ NULL },
};
 
static const AVClass filtergraph_class = {
.class_name = "AVFilterGraph",
.item_name = av_default_item_name,
.version = LIBAVUTIL_VERSION_INT,
.option = filtergraph_options,
.category = AV_CLASS_CATEGORY_FILTER,
};
 
#if !HAVE_THREADS
void ff_graph_thread_free(AVFilterGraph *graph)
{
}
 
int ff_graph_thread_init(AVFilterGraph *graph)
{
graph->thread_type = 0;
graph->nb_threads = 1;
return 0;
}
#endif
 
AVFilterGraph *avfilter_graph_alloc(void)
{
AVFilterGraph *ret = av_mallocz(sizeof(*ret));
if (!ret)
return NULL;
 
ret->internal = av_mallocz(sizeof(*ret->internal));
if (!ret->internal) {
av_freep(&ret);
return NULL;
}
 
ret->av_class = &filtergraph_class;
av_opt_set_defaults(ret);
 
return ret;
}
 
void ff_filter_graph_remove_filter(AVFilterGraph *graph, AVFilterContext *filter)
{
int i;
for (i = 0; i < graph->nb_filters; i++) {
if (graph->filters[i] == filter) {
FFSWAP(AVFilterContext*, graph->filters[i],
graph->filters[graph->nb_filters - 1]);
graph->nb_filters--;
return;
}
}
}
 
void avfilter_graph_free(AVFilterGraph **graph)
{
if (!*graph)
return;
 
while ((*graph)->nb_filters)
avfilter_free((*graph)->filters[0]);
 
ff_graph_thread_free(*graph);
 
av_freep(&(*graph)->sink_links);
 
av_freep(&(*graph)->scale_sws_opts);
av_freep(&(*graph)->aresample_swr_opts);
av_freep(&(*graph)->resample_lavr_opts);
av_freep(&(*graph)->filters);
av_freep(&(*graph)->internal);
av_freep(graph);
}
 
#if FF_API_AVFILTER_OPEN
int avfilter_graph_add_filter(AVFilterGraph *graph, AVFilterContext *filter)
{
AVFilterContext **filters = av_realloc(graph->filters,
sizeof(*filters) * (graph->nb_filters + 1));
if (!filters)
return AVERROR(ENOMEM);
 
graph->filters = filters;
graph->filters[graph->nb_filters++] = filter;
 
#if FF_API_FOO_COUNT
FF_DISABLE_DEPRECATION_WARNINGS
graph->filter_count_unused = graph->nb_filters;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
 
filter->graph = graph;
 
return 0;
}
#endif
 
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt,
const char *name, const char *args, void *opaque,
AVFilterGraph *graph_ctx)
{
int ret;
 
*filt_ctx = avfilter_graph_alloc_filter(graph_ctx, filt, name);
if (!*filt_ctx)
return AVERROR(ENOMEM);
 
ret = avfilter_init_str(*filt_ctx, args);
if (ret < 0)
goto fail;
 
return 0;
 
fail:
if (*filt_ctx)
avfilter_free(*filt_ctx);
*filt_ctx = NULL;
return ret;
}
 
void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags)
{
graph->disable_auto_convert = flags;
}
 
AVFilterContext *avfilter_graph_alloc_filter(AVFilterGraph *graph,
const AVFilter *filter,
const char *name)
{
AVFilterContext **filters, *s;
 
if (graph->thread_type && !graph->internal->thread_execute) {
if (graph->execute) {
graph->internal->thread_execute = graph->execute;
} else {
int ret = ff_graph_thread_init(graph);
if (ret < 0) {
av_log(graph, AV_LOG_ERROR, "Error initializing threading.\n");
return NULL;
}
}
}
 
s = ff_filter_alloc(filter, name);
if (!s)
return NULL;
 
filters = av_realloc(graph->filters, sizeof(*filters) * (graph->nb_filters + 1));
if (!filters) {
avfilter_free(s);
return NULL;
}
 
graph->filters = filters;
graph->filters[graph->nb_filters++] = s;
 
#if FF_API_FOO_COUNT
FF_DISABLE_DEPRECATION_WARNINGS
graph->filter_count_unused = graph->nb_filters;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
 
s->graph = graph;
 
return s;
}
 
/**
* Check for the validity of graph.
*
* A graph is considered valid if all its input and output pads are
* connected.
*
* @return >= 0 in case of success, a negative value otherwise
*/
static int graph_check_validity(AVFilterGraph *graph, AVClass *log_ctx)
{
AVFilterContext *filt;
int i, j;
 
for (i = 0; i < graph->nb_filters; i++) {
const AVFilterPad *pad;
filt = graph->filters[i];
 
for (j = 0; j < filt->nb_inputs; j++) {
if (!filt->inputs[j] || !filt->inputs[j]->src) {
pad = &filt->input_pads[j];
av_log(log_ctx, AV_LOG_ERROR,
"Input pad \"%s\" with type %s of the filter instance \"%s\" of %s not connected to any source\n",
pad->name, av_get_media_type_string(pad->type), filt->name, filt->filter->name);
return AVERROR(EINVAL);
}
}
 
for (j = 0; j < filt->nb_outputs; j++) {
if (!filt->outputs[j] || !filt->outputs[j]->dst) {
pad = &filt->output_pads[j];
av_log(log_ctx, AV_LOG_ERROR,
"Output pad \"%s\" with type %s of the filter instance \"%s\" of %s not connected to any destination\n",
pad->name, av_get_media_type_string(pad->type), filt->name, filt->filter->name);
return AVERROR(EINVAL);
}
}
}
 
return 0;
}
 
/**
* Configure all the links of graphctx.
*
* @return >= 0 in case of success, a negative value otherwise
*/
static int graph_config_links(AVFilterGraph *graph, AVClass *log_ctx)
{
AVFilterContext *filt;
int i, ret;
 
for (i = 0; i < graph->nb_filters; i++) {
filt = graph->filters[i];
 
if (!filt->nb_outputs) {
if ((ret = avfilter_config_links(filt)))
return ret;
}
}
 
return 0;
}
 
AVFilterContext *avfilter_graph_get_filter(AVFilterGraph *graph, char *name)
{
int i;
 
for (i = 0; i < graph->nb_filters; i++)
if (graph->filters[i]->name && !strcmp(name, graph->filters[i]->name))
return graph->filters[i];
 
return NULL;
}
 
static void sanitize_channel_layouts(void *log, AVFilterChannelLayouts *l)
{
if (!l)
return;
if (l->nb_channel_layouts) {
if (l->all_layouts || l->all_counts)
av_log(log, AV_LOG_WARNING, "All layouts set on non-empty list\n");
l->all_layouts = l->all_counts = 0;
} else {
if (l->all_counts && !l->all_layouts)
av_log(log, AV_LOG_WARNING, "All counts without all layouts\n");
l->all_layouts = 1;
}
}
 
static int filter_query_formats(AVFilterContext *ctx)
{
int ret, i;
AVFilterFormats *formats;
AVFilterChannelLayouts *chlayouts;
AVFilterFormats *samplerates;
enum AVMediaType type = ctx->inputs && ctx->inputs [0] ? ctx->inputs [0]->type :
ctx->outputs && ctx->outputs[0] ? ctx->outputs[0]->type :
AVMEDIA_TYPE_VIDEO;
 
if ((ret = ctx->filter->query_formats(ctx)) < 0) {
if (ret != AVERROR(EAGAIN))
av_log(ctx, AV_LOG_ERROR, "Query format failed for '%s': %s\n",
ctx->name, av_err2str(ret));
return ret;
}
 
for (i = 0; i < ctx->nb_inputs; i++)
sanitize_channel_layouts(ctx, ctx->inputs[i]->out_channel_layouts);
for (i = 0; i < ctx->nb_outputs; i++)
sanitize_channel_layouts(ctx, ctx->outputs[i]->in_channel_layouts);
 
formats = ff_all_formats(type);
if (!formats)
return AVERROR(ENOMEM);
ff_set_common_formats(ctx, formats);
if (type == AVMEDIA_TYPE_AUDIO) {
samplerates = ff_all_samplerates();
if (!samplerates)
return AVERROR(ENOMEM);
ff_set_common_samplerates(ctx, samplerates);
chlayouts = ff_all_channel_layouts();
if (!chlayouts)
return AVERROR(ENOMEM);
ff_set_common_channel_layouts(ctx, chlayouts);
}
return 0;
}
 
static int formats_declared(AVFilterContext *f)
{
int i;
 
for (i = 0; i < f->nb_inputs; i++) {
if (!f->inputs[i]->out_formats)
return 0;
if (f->inputs[i]->type == AVMEDIA_TYPE_AUDIO &&
!(f->inputs[i]->out_samplerates &&
f->inputs[i]->out_channel_layouts))
return 0;
}
for (i = 0; i < f->nb_outputs; i++) {
if (!f->outputs[i]->in_formats)
return 0;
if (f->outputs[i]->type == AVMEDIA_TYPE_AUDIO &&
!(f->outputs[i]->in_samplerates &&
f->outputs[i]->in_channel_layouts))
return 0;
}
return 1;
}
 
static AVFilterFormats *clone_filter_formats(AVFilterFormats *arg)
{
AVFilterFormats *a = av_memdup(arg, sizeof(*arg));
if (a) {
a->refcount = 0;
a->refs = NULL;
a->formats = av_memdup(a->formats, sizeof(*a->formats) * a->nb_formats);
if (!a->formats && arg->formats)
av_freep(&a);
}
return a;
}
 
static int can_merge_formats(AVFilterFormats *a_arg,
AVFilterFormats *b_arg,
enum AVMediaType type,
int is_sample_rate)
{
AVFilterFormats *a, *b, *ret;
if (a_arg == b_arg)
return 1;
a = clone_filter_formats(a_arg);
b = clone_filter_formats(b_arg);
 
if (!a || !b) {
if (a)
av_freep(&a->formats);
if (b)
av_freep(&b->formats);
 
av_freep(&a);
av_freep(&b);
 
return 0;
}
 
if (is_sample_rate) {
ret = ff_merge_samplerates(a, b);
} else {
ret = ff_merge_formats(a, b, type);
}
if (ret) {
av_freep(&ret->formats);
av_freep(&ret->refs);
av_freep(&ret);
return 1;
} else {
av_freep(&a->formats);
av_freep(&b->formats);
av_freep(&a);
av_freep(&b);
return 0;
}
}
 
/**
* Perform one round of query_formats() and merging formats lists on the
* filter graph.
* @return >=0 if all links formats lists could be queried and merged;
* AVERROR(EAGAIN) some progress was made in the queries or merging
* and a later call may succeed;
* AVERROR(EIO) (may be changed) plus a log message if no progress
* was made and the negotiation is stuck;
* a negative error code if some other error happened
*/
static int query_formats(AVFilterGraph *graph, AVClass *log_ctx)
{
int i, j, ret;
int scaler_count = 0, resampler_count = 0;
int count_queried = 0; /* successful calls to query_formats() */
int count_merged = 0; /* successful merge of formats lists */
int count_already_merged = 0; /* lists already merged */
int count_delayed = 0; /* lists that need to be merged later */
 
for (i = 0; i < graph->nb_filters; i++) {
AVFilterContext *f = graph->filters[i];
if (formats_declared(f))
continue;
if (f->filter->query_formats)
ret = filter_query_formats(f);
else
ret = ff_default_query_formats(f);
if (ret < 0 && ret != AVERROR(EAGAIN))
return ret;
/* note: EAGAIN could indicate a partial success, not counted yet */
count_queried += ret >= 0;
}
 
/* go through and merge as many format lists as possible */
for (i = 0; i < graph->nb_filters; i++) {
AVFilterContext *filter = graph->filters[i];
 
for (j = 0; j < filter->nb_inputs; j++) {
AVFilterLink *link = filter->inputs[j];
int convert_needed = 0;
 
if (!link)
continue;
 
if (link->in_formats != link->out_formats
&& link->in_formats && link->out_formats)
if (!can_merge_formats(link->in_formats, link->out_formats,
link->type, 0))
convert_needed = 1;
if (link->type == AVMEDIA_TYPE_AUDIO) {
if (link->in_samplerates != link->out_samplerates
&& link->in_samplerates && link->out_samplerates)
if (!can_merge_formats(link->in_samplerates,
link->out_samplerates,
0, 1))
convert_needed = 1;
}
 
#define MERGE_DISPATCH(field, statement) \
if (!(link->in_ ## field && link->out_ ## field)) { \
count_delayed++; \
} else if (link->in_ ## field == link->out_ ## field) { \
count_already_merged++; \
} else if (!convert_needed) { \
count_merged++; \
statement \
}
 
if (link->type == AVMEDIA_TYPE_AUDIO) {
MERGE_DISPATCH(channel_layouts,
if (!ff_merge_channel_layouts(link->in_channel_layouts,
link->out_channel_layouts))
convert_needed = 1;
)
MERGE_DISPATCH(samplerates,
if (!ff_merge_samplerates(link->in_samplerates,
link->out_samplerates))
convert_needed = 1;
)
}
MERGE_DISPATCH(formats,
if (!ff_merge_formats(link->in_formats, link->out_formats,
link->type))
convert_needed = 1;
)
#undef MERGE_DISPATCH
 
if (convert_needed) {
AVFilterContext *convert;
AVFilter *filter;
AVFilterLink *inlink, *outlink;
char scale_args[256];
char inst_name[30];
 
/* couldn't merge format lists. auto-insert conversion filter */
switch (link->type) {
case AVMEDIA_TYPE_VIDEO:
if (!(filter = avfilter_get_by_name("scale"))) {
av_log(log_ctx, AV_LOG_ERROR, "'scale' filter "
"not present, cannot convert pixel formats.\n");
return AVERROR(EINVAL);
}
 
snprintf(inst_name, sizeof(inst_name), "auto-inserted scaler %d",
scaler_count++);
 
if ((ret = avfilter_graph_create_filter(&convert, filter,
inst_name, graph->scale_sws_opts, NULL,
graph)) < 0)
return ret;
break;
case AVMEDIA_TYPE_AUDIO:
if (!(filter = avfilter_get_by_name("aresample"))) {
av_log(log_ctx, AV_LOG_ERROR, "'aresample' filter "
"not present, cannot convert audio formats.\n");
return AVERROR(EINVAL);
}
 
snprintf(inst_name, sizeof(inst_name), "auto-inserted resampler %d",
resampler_count++);
scale_args[0] = '\0';
if (graph->aresample_swr_opts)
snprintf(scale_args, sizeof(scale_args), "%s",
graph->aresample_swr_opts);
if ((ret = avfilter_graph_create_filter(&convert, filter,
inst_name, graph->aresample_swr_opts,
NULL, graph)) < 0)
return ret;
break;
default:
return AVERROR(EINVAL);
}
 
if ((ret = avfilter_insert_filter(link, convert, 0, 0)) < 0)
return ret;
 
filter_query_formats(convert);
inlink = convert->inputs[0];
outlink = convert->outputs[0];
if (!ff_merge_formats( inlink->in_formats, inlink->out_formats, inlink->type) ||
!ff_merge_formats(outlink->in_formats, outlink->out_formats, outlink->type))
ret |= AVERROR(ENOSYS);
if (inlink->type == AVMEDIA_TYPE_AUDIO &&
(!ff_merge_samplerates(inlink->in_samplerates,
inlink->out_samplerates) ||
!ff_merge_channel_layouts(inlink->in_channel_layouts,
inlink->out_channel_layouts)))
ret |= AVERROR(ENOSYS);
if (outlink->type == AVMEDIA_TYPE_AUDIO &&
(!ff_merge_samplerates(outlink->in_samplerates,
outlink->out_samplerates) ||
!ff_merge_channel_layouts(outlink->in_channel_layouts,
outlink->out_channel_layouts)))
ret |= AVERROR(ENOSYS);
 
if (ret < 0) {
av_log(log_ctx, AV_LOG_ERROR,
"Impossible to convert between the formats supported by the filter "
"'%s' and the filter '%s'\n", link->src->name, link->dst->name);
return ret;
}
}
}
}
 
av_log(graph, AV_LOG_DEBUG, "query_formats: "
"%d queried, %d merged, %d already done, %d delayed\n",
count_queried, count_merged, count_already_merged, count_delayed);
if (count_delayed) {
AVBPrint bp;
 
/* if count_queried > 0, one filter at least did set its formats,
that will give additional information to its neighbour;
if count_merged > 0, one pair of formats lists at least was merged,
that will give additional information to all connected filters;
in both cases, progress was made and a new round must be done */
if (count_queried || count_merged)
return AVERROR(EAGAIN);
av_bprint_init(&bp, 0, AV_BPRINT_SIZE_AUTOMATIC);
for (i = 0; i < graph->nb_filters; i++)
if (!formats_declared(graph->filters[i]))
av_bprintf(&bp, "%s%s", bp.len ? ", " : "",
graph->filters[i]->name);
av_log(graph, AV_LOG_ERROR,
"The following filters could not choose their formats: %s\n"
"Consider inserting the (a)format filter near their input or "
"output.\n", bp.str);
return AVERROR(EIO);
}
return 0;
}
 
static int pick_format(AVFilterLink *link, AVFilterLink *ref)
{
if (!link || !link->in_formats)
return 0;
 
if (link->type == AVMEDIA_TYPE_VIDEO) {
if(ref && ref->type == AVMEDIA_TYPE_VIDEO){
int has_alpha= av_pix_fmt_desc_get(ref->format)->nb_components % 2 == 0;
enum AVPixelFormat best= AV_PIX_FMT_NONE;
int i;
for (i=0; i<link->in_formats->nb_formats; i++) {
enum AVPixelFormat p = link->in_formats->formats[i];
best= avcodec_find_best_pix_fmt_of_2(best, p, ref->format, has_alpha, NULL);
}
av_log(link->src,AV_LOG_DEBUG, "picking %s out of %d ref:%s alpha:%d\n",
av_get_pix_fmt_name(best), link->in_formats->nb_formats,
av_get_pix_fmt_name(ref->format), has_alpha);
link->in_formats->formats[0] = best;
}
}
 
link->in_formats->nb_formats = 1;
link->format = link->in_formats->formats[0];
 
if (link->type == AVMEDIA_TYPE_AUDIO) {
if (!link->in_samplerates->nb_formats) {
av_log(link->src, AV_LOG_ERROR, "Cannot select sample rate for"
" the link between filters %s and %s.\n", link->src->name,
link->dst->name);
return AVERROR(EINVAL);
}
link->in_samplerates->nb_formats = 1;
link->sample_rate = link->in_samplerates->formats[0];
 
if (link->in_channel_layouts->all_layouts) {
av_log(link->src, AV_LOG_ERROR, "Cannot select channel layout for"
" the link between filters %s and %s.\n", link->src->name,
link->dst->name);
return AVERROR(EINVAL);
}
link->in_channel_layouts->nb_channel_layouts = 1;
link->channel_layout = link->in_channel_layouts->channel_layouts[0];
if ((link->channels = FF_LAYOUT2COUNT(link->channel_layout)))
link->channel_layout = 0;
else
link->channels = av_get_channel_layout_nb_channels(link->channel_layout);
}
 
ff_formats_unref(&link->in_formats);
ff_formats_unref(&link->out_formats);
ff_formats_unref(&link->in_samplerates);
ff_formats_unref(&link->out_samplerates);
ff_channel_layouts_unref(&link->in_channel_layouts);
ff_channel_layouts_unref(&link->out_channel_layouts);
 
return 0;
}
 
#define REDUCE_FORMATS(fmt_type, list_type, list, var, nb, add_format) \
do { \
for (i = 0; i < filter->nb_inputs; i++) { \
AVFilterLink *link = filter->inputs[i]; \
fmt_type fmt; \
\
if (!link->out_ ## list || link->out_ ## list->nb != 1) \
continue; \
fmt = link->out_ ## list->var[0]; \
\
for (j = 0; j < filter->nb_outputs; j++) { \
AVFilterLink *out_link = filter->outputs[j]; \
list_type *fmts; \
\
if (link->type != out_link->type || \
out_link->in_ ## list->nb == 1) \
continue; \
fmts = out_link->in_ ## list; \
\
if (!out_link->in_ ## list->nb) { \
add_format(&out_link->in_ ##list, fmt); \
ret = 1; \
break; \
} \
\
for (k = 0; k < out_link->in_ ## list->nb; k++) \
if (fmts->var[k] == fmt) { \
fmts->var[0] = fmt; \
fmts->nb = 1; \
ret = 1; \
break; \
} \
} \
} \
} while (0)
 
static int reduce_formats_on_filter(AVFilterContext *filter)
{
int i, j, k, ret = 0;
 
REDUCE_FORMATS(int, AVFilterFormats, formats, formats,
nb_formats, ff_add_format);
REDUCE_FORMATS(int, AVFilterFormats, samplerates, formats,
nb_formats, ff_add_format);
 
/* reduce channel layouts */
for (i = 0; i < filter->nb_inputs; i++) {
AVFilterLink *inlink = filter->inputs[i];
uint64_t fmt;
 
if (!inlink->out_channel_layouts ||
inlink->out_channel_layouts->nb_channel_layouts != 1)
continue;
fmt = inlink->out_channel_layouts->channel_layouts[0];
 
for (j = 0; j < filter->nb_outputs; j++) {
AVFilterLink *outlink = filter->outputs[j];
AVFilterChannelLayouts *fmts;
 
fmts = outlink->in_channel_layouts;
if (inlink->type != outlink->type || fmts->nb_channel_layouts == 1)
continue;
 
if (fmts->all_layouts) {
/* Turn the infinite list into a singleton */
fmts->all_layouts = fmts->all_counts = 0;
ff_add_channel_layout(&outlink->in_channel_layouts, fmt);
break;
}
 
for (k = 0; k < outlink->in_channel_layouts->nb_channel_layouts; k++) {
if (fmts->channel_layouts[k] == fmt) {
fmts->channel_layouts[0] = fmt;
fmts->nb_channel_layouts = 1;
ret = 1;
break;
}
}
}
}
 
return ret;
}
 
static void reduce_formats(AVFilterGraph *graph)
{
int i, reduced;
 
do {
reduced = 0;
 
for (i = 0; i < graph->nb_filters; i++)
reduced |= reduce_formats_on_filter(graph->filters[i]);
} while (reduced);
}
 
static void swap_samplerates_on_filter(AVFilterContext *filter)
{
AVFilterLink *link = NULL;
int sample_rate;
int i, j;
 
for (i = 0; i < filter->nb_inputs; i++) {
link = filter->inputs[i];
 
if (link->type == AVMEDIA_TYPE_AUDIO &&
link->out_samplerates->nb_formats== 1)
break;
}
if (i == filter->nb_inputs)
return;
 
sample_rate = link->out_samplerates->formats[0];
 
for (i = 0; i < filter->nb_outputs; i++) {
AVFilterLink *outlink = filter->outputs[i];
int best_idx, best_diff = INT_MAX;
 
if (outlink->type != AVMEDIA_TYPE_AUDIO ||
outlink->in_samplerates->nb_formats < 2)
continue;
 
for (j = 0; j < outlink->in_samplerates->nb_formats; j++) {
int diff = abs(sample_rate - outlink->in_samplerates->formats[j]);
 
if (diff < best_diff) {
best_diff = diff;
best_idx = j;
}
}
FFSWAP(int, outlink->in_samplerates->formats[0],
outlink->in_samplerates->formats[best_idx]);
}
}
 
static void swap_samplerates(AVFilterGraph *graph)
{
int i;
 
for (i = 0; i < graph->nb_filters; i++)
swap_samplerates_on_filter(graph->filters[i]);
}
 
#define CH_CENTER_PAIR (AV_CH_FRONT_LEFT_OF_CENTER | AV_CH_FRONT_RIGHT_OF_CENTER)
#define CH_FRONT_PAIR (AV_CH_FRONT_LEFT | AV_CH_FRONT_RIGHT)
#define CH_STEREO_PAIR (AV_CH_STEREO_LEFT | AV_CH_STEREO_RIGHT)
#define CH_WIDE_PAIR (AV_CH_WIDE_LEFT | AV_CH_WIDE_RIGHT)
#define CH_SIDE_PAIR (AV_CH_SIDE_LEFT | AV_CH_SIDE_RIGHT)
#define CH_DIRECT_PAIR (AV_CH_SURROUND_DIRECT_LEFT | AV_CH_SURROUND_DIRECT_RIGHT)
#define CH_BACK_PAIR (AV_CH_BACK_LEFT | AV_CH_BACK_RIGHT)
 
/* allowable substitutions for channel pairs when comparing layouts,
* ordered by priority for both values */
static const uint64_t ch_subst[][2] = {
{ CH_FRONT_PAIR, CH_CENTER_PAIR },
{ CH_FRONT_PAIR, CH_WIDE_PAIR },
{ CH_FRONT_PAIR, AV_CH_FRONT_CENTER },
{ CH_CENTER_PAIR, CH_FRONT_PAIR },
{ CH_CENTER_PAIR, CH_WIDE_PAIR },
{ CH_CENTER_PAIR, AV_CH_FRONT_CENTER },
{ CH_WIDE_PAIR, CH_FRONT_PAIR },
{ CH_WIDE_PAIR, CH_CENTER_PAIR },
{ CH_WIDE_PAIR, AV_CH_FRONT_CENTER },
{ AV_CH_FRONT_CENTER, CH_FRONT_PAIR },
{ AV_CH_FRONT_CENTER, CH_CENTER_PAIR },
{ AV_CH_FRONT_CENTER, CH_WIDE_PAIR },
{ CH_SIDE_PAIR, CH_DIRECT_PAIR },
{ CH_SIDE_PAIR, CH_BACK_PAIR },
{ CH_SIDE_PAIR, AV_CH_BACK_CENTER },
{ CH_BACK_PAIR, CH_DIRECT_PAIR },
{ CH_BACK_PAIR, CH_SIDE_PAIR },
{ CH_BACK_PAIR, AV_CH_BACK_CENTER },
{ AV_CH_BACK_CENTER, CH_BACK_PAIR },
{ AV_CH_BACK_CENTER, CH_DIRECT_PAIR },
{ AV_CH_BACK_CENTER, CH_SIDE_PAIR },
};
 
static void swap_channel_layouts_on_filter(AVFilterContext *filter)
{
AVFilterLink *link = NULL;
int i, j, k;
 
for (i = 0; i < filter->nb_inputs; i++) {
link = filter->inputs[i];
 
if (link->type == AVMEDIA_TYPE_AUDIO &&
link->out_channel_layouts->nb_channel_layouts == 1)
break;
}
if (i == filter->nb_inputs)
return;
 
for (i = 0; i < filter->nb_outputs; i++) {
AVFilterLink *outlink = filter->outputs[i];
int best_idx = -1, best_score = INT_MIN, best_count_diff = INT_MAX;
 
if (outlink->type != AVMEDIA_TYPE_AUDIO ||
outlink->in_channel_layouts->nb_channel_layouts < 2)
continue;
 
for (j = 0; j < outlink->in_channel_layouts->nb_channel_layouts; j++) {
uint64_t in_chlayout = link->out_channel_layouts->channel_layouts[0];
uint64_t out_chlayout = outlink->in_channel_layouts->channel_layouts[j];
int in_channels = av_get_channel_layout_nb_channels(in_chlayout);
int out_channels = av_get_channel_layout_nb_channels(out_chlayout);
int count_diff = out_channels - in_channels;
int matched_channels, extra_channels;
int score = 100000;
 
if (FF_LAYOUT2COUNT(in_chlayout) || FF_LAYOUT2COUNT(out_chlayout)) {
/* Compute score in case the input or output layout encodes
a channel count; in this case the score is not altered by
the computation afterwards, as in_chlayout and
out_chlayout have both been set to 0 */
if (FF_LAYOUT2COUNT(in_chlayout))
in_channels = FF_LAYOUT2COUNT(in_chlayout);
if (FF_LAYOUT2COUNT(out_chlayout))
out_channels = FF_LAYOUT2COUNT(out_chlayout);
score -= 10000 + FFABS(out_channels - in_channels) +
(in_channels > out_channels ? 10000 : 0);
in_chlayout = out_chlayout = 0;
/* Let the remaining computation run, even if the score
value is not altered */
}
 
/* channel substitution */
for (k = 0; k < FF_ARRAY_ELEMS(ch_subst); k++) {
uint64_t cmp0 = ch_subst[k][0];
uint64_t cmp1 = ch_subst[k][1];
if (( in_chlayout & cmp0) && (!(out_chlayout & cmp0)) &&
(out_chlayout & cmp1) && (!( in_chlayout & cmp1))) {
in_chlayout &= ~cmp0;
out_chlayout &= ~cmp1;
/* add score for channel match, minus a deduction for
having to do the substitution */
score += 10 * av_get_channel_layout_nb_channels(cmp1) - 2;
}
}
 
/* no penalty for LFE channel mismatch */
if ( (in_chlayout & AV_CH_LOW_FREQUENCY) &&
(out_chlayout & AV_CH_LOW_FREQUENCY))
score += 10;
in_chlayout &= ~AV_CH_LOW_FREQUENCY;
out_chlayout &= ~AV_CH_LOW_FREQUENCY;
 
matched_channels = av_get_channel_layout_nb_channels(in_chlayout &
out_chlayout);
extra_channels = av_get_channel_layout_nb_channels(out_chlayout &
(~in_chlayout));
score += 10 * matched_channels - 5 * extra_channels;
 
if (score > best_score ||
(count_diff < best_count_diff && score == best_score)) {
best_score = score;
best_idx = j;
best_count_diff = count_diff;
}
}
av_assert0(best_idx >= 0);
FFSWAP(uint64_t, outlink->in_channel_layouts->channel_layouts[0],
outlink->in_channel_layouts->channel_layouts[best_idx]);
}
 
}
 
static void swap_channel_layouts(AVFilterGraph *graph)
{
int i;
 
for (i = 0; i < graph->nb_filters; i++)
swap_channel_layouts_on_filter(graph->filters[i]);
}
 
static void swap_sample_fmts_on_filter(AVFilterContext *filter)
{
AVFilterLink *link = NULL;
int format, bps;
int i, j;
 
for (i = 0; i < filter->nb_inputs; i++) {
link = filter->inputs[i];
 
if (link->type == AVMEDIA_TYPE_AUDIO &&
link->out_formats->nb_formats == 1)
break;
}
if (i == filter->nb_inputs)
return;
 
format = link->out_formats->formats[0];
bps = av_get_bytes_per_sample(format);
 
for (i = 0; i < filter->nb_outputs; i++) {
AVFilterLink *outlink = filter->outputs[i];
int best_idx = -1, best_score = INT_MIN;
 
if (outlink->type != AVMEDIA_TYPE_AUDIO ||
outlink->in_formats->nb_formats < 2)
continue;
 
for (j = 0; j < outlink->in_formats->nb_formats; j++) {
int out_format = outlink->in_formats->formats[j];
int out_bps = av_get_bytes_per_sample(out_format);
int score;
 
if (av_get_packed_sample_fmt(out_format) == format ||
av_get_planar_sample_fmt(out_format) == format) {
best_idx = j;
break;
}
 
/* for s32 and float prefer double to prevent loss of information */
if (bps == 4 && out_bps == 8) {
best_idx = j;
break;
}
 
/* prefer closest higher or equal bps */
score = -abs(out_bps - bps);
if (out_bps >= bps)
score += INT_MAX/2;
 
if (score > best_score) {
best_score = score;
best_idx = j;
}
}
av_assert0(best_idx >= 0);
FFSWAP(int, outlink->in_formats->formats[0],
outlink->in_formats->formats[best_idx]);
}
}
 
static void swap_sample_fmts(AVFilterGraph *graph)
{
int i;
 
for (i = 0; i < graph->nb_filters; i++)
swap_sample_fmts_on_filter(graph->filters[i]);
 
}
 
static int pick_formats(AVFilterGraph *graph)
{
int i, j, ret;
int change;
 
do{
change = 0;
for (i = 0; i < graph->nb_filters; i++) {
AVFilterContext *filter = graph->filters[i];
if (filter->nb_inputs){
for (j = 0; j < filter->nb_inputs; j++){
if(filter->inputs[j]->in_formats && filter->inputs[j]->in_formats->nb_formats == 1) {
if ((ret = pick_format(filter->inputs[j], NULL)) < 0)
return ret;
change = 1;
}
}
}
if (filter->nb_outputs){
for (j = 0; j < filter->nb_outputs; j++){
if(filter->outputs[j]->in_formats && filter->outputs[j]->in_formats->nb_formats == 1) {
if ((ret = pick_format(filter->outputs[j], NULL)) < 0)
return ret;
change = 1;
}
}
}
if (filter->nb_inputs && filter->nb_outputs && filter->inputs[0]->format>=0) {
for (j = 0; j < filter->nb_outputs; j++) {
if(filter->outputs[j]->format<0) {
if ((ret = pick_format(filter->outputs[j], filter->inputs[0])) < 0)
return ret;
change = 1;
}
}
}
}
}while(change);
 
for (i = 0; i < graph->nb_filters; i++) {
AVFilterContext *filter = graph->filters[i];
 
for (j = 0; j < filter->nb_inputs; j++)
if ((ret = pick_format(filter->inputs[j], NULL)) < 0)
return ret;
for (j = 0; j < filter->nb_outputs; j++)
if ((ret = pick_format(filter->outputs[j], NULL)) < 0)
return ret;
}
return 0;
}
 
/**
* Configure the formats of all the links in the graph.
*/
static int graph_config_formats(AVFilterGraph *graph, AVClass *log_ctx)
{
int ret;
 
/* find supported formats from sub-filters, and merge along links */
while ((ret = query_formats(graph, log_ctx)) == AVERROR(EAGAIN))
av_log(graph, AV_LOG_DEBUG, "query_formats not finished\n");
if (ret < 0)
return ret;
 
/* Once everything is merged, it's possible that we'll still have
* multiple valid media format choices. We try to minimize the amount
* of format conversion inside filters */
reduce_formats(graph);
 
/* for audio filters, ensure the best format, sample rate and channel layout
* is selected */
swap_sample_fmts(graph);
swap_samplerates(graph);
swap_channel_layouts(graph);
 
if ((ret = pick_formats(graph)) < 0)
return ret;
 
return 0;
}
 
static int ff_avfilter_graph_config_pointers(AVFilterGraph *graph,
AVClass *log_ctx)
{
unsigned i, j;
int sink_links_count = 0, n = 0;
AVFilterContext *f;
AVFilterLink **sinks;
 
for (i = 0; i < graph->nb_filters; i++) {
f = graph->filters[i];
for (j = 0; j < f->nb_inputs; j++) {
f->inputs[j]->graph = graph;
f->inputs[j]->age_index = -1;
}
for (j = 0; j < f->nb_outputs; j++) {
f->outputs[j]->graph = graph;
f->outputs[j]->age_index= -1;
}
if (!f->nb_outputs) {
if (f->nb_inputs > INT_MAX - sink_links_count)
return AVERROR(EINVAL);
sink_links_count += f->nb_inputs;
}
}
sinks = av_calloc(sink_links_count, sizeof(*sinks));
if (!sinks)
return AVERROR(ENOMEM);
for (i = 0; i < graph->nb_filters; i++) {
f = graph->filters[i];
if (!f->nb_outputs) {
for (j = 0; j < f->nb_inputs; j++) {
sinks[n] = f->inputs[j];
f->inputs[j]->age_index = n++;
}
}
}
av_assert0(n == sink_links_count);
graph->sink_links = sinks;
graph->sink_links_count = sink_links_count;
return 0;
}
 
static int graph_insert_fifos(AVFilterGraph *graph, AVClass *log_ctx)
{
AVFilterContext *f;
int i, j, ret;
int fifo_count = 0;
 
for (i = 0; i < graph->nb_filters; i++) {
f = graph->filters[i];
 
for (j = 0; j < f->nb_inputs; j++) {
AVFilterLink *link = f->inputs[j];
AVFilterContext *fifo_ctx;
AVFilter *fifo;
char name[32];
 
if (!link->dstpad->needs_fifo)
continue;
 
fifo = f->inputs[j]->type == AVMEDIA_TYPE_VIDEO ?
avfilter_get_by_name("fifo") :
avfilter_get_by_name("afifo");
 
snprintf(name, sizeof(name), "auto-inserted fifo %d", fifo_count++);
 
ret = avfilter_graph_create_filter(&fifo_ctx, fifo, name, NULL,
NULL, graph);
if (ret < 0)
return ret;
 
ret = avfilter_insert_filter(link, fifo_ctx, 0, 0);
if (ret < 0)
return ret;
}
}
 
return 0;
}
 
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
{
int ret;
 
if ((ret = graph_check_validity(graphctx, log_ctx)))
return ret;
if ((ret = graph_insert_fifos(graphctx, log_ctx)) < 0)
return ret;
if ((ret = graph_config_formats(graphctx, log_ctx)))
return ret;
if ((ret = graph_config_links(graphctx, log_ctx)))
return ret;
if ((ret = ff_avfilter_graph_config_pointers(graphctx, log_ctx)))
return ret;
 
return 0;
}
 
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
{
int i, r = AVERROR(ENOSYS);
 
if (!graph)
return r;
 
if ((flags & AVFILTER_CMD_FLAG_ONE) && !(flags & AVFILTER_CMD_FLAG_FAST)) {
r = avfilter_graph_send_command(graph, target, cmd, arg, res, res_len, flags | AVFILTER_CMD_FLAG_FAST);
if (r != AVERROR(ENOSYS))
return r;
}
 
if (res_len && res)
res[0] = 0;
 
for (i = 0; i < graph->nb_filters; i++) {
AVFilterContext *filter = graph->filters[i];
if (!strcmp(target, "all") || (filter->name && !strcmp(target, filter->name)) || !strcmp(target, filter->filter->name)) {
r = avfilter_process_command(filter, cmd, arg, res, res_len, flags);
if (r != AVERROR(ENOSYS)) {
if ((flags & AVFILTER_CMD_FLAG_ONE) || r < 0)
return r;
}
}
}
 
return r;
}
 
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *command, const char *arg, int flags, double ts)
{
int i;
 
if(!graph)
return 0;
 
for (i = 0; i < graph->nb_filters; i++) {
AVFilterContext *filter = graph->filters[i];
if(filter && (!strcmp(target, "all") || !strcmp(target, filter->name) || !strcmp(target, filter->filter->name))){
AVFilterCommand **queue = &filter->command_queue, *next;
while (*queue && (*queue)->time <= ts)
queue = &(*queue)->next;
next = *queue;
*queue = av_mallocz(sizeof(AVFilterCommand));
(*queue)->command = av_strdup(command);
(*queue)->arg = av_strdup(arg);
(*queue)->time = ts;
(*queue)->flags = flags;
(*queue)->next = next;
if(flags & AVFILTER_CMD_FLAG_ONE)
return 0;
}
}
 
return 0;
}
 
static void heap_bubble_up(AVFilterGraph *graph,
AVFilterLink *link, int index)
{
AVFilterLink **links = graph->sink_links;
 
while (index) {
int parent = (index - 1) >> 1;
if (links[parent]->current_pts >= link->current_pts)
break;
links[index] = links[parent];
links[index]->age_index = index;
index = parent;
}
links[index] = link;
link->age_index = index;
}
 
static void heap_bubble_down(AVFilterGraph *graph,
AVFilterLink *link, int index)
{
AVFilterLink **links = graph->sink_links;
 
while (1) {
int child = 2 * index + 1;
if (child >= graph->sink_links_count)
break;
if (child + 1 < graph->sink_links_count &&
links[child + 1]->current_pts < links[child]->current_pts)
child++;
if (link->current_pts < links[child]->current_pts)
break;
links[index] = links[child];
links[index]->age_index = index;
index = child;
}
links[index] = link;
link->age_index = index;
}
 
void ff_avfilter_graph_update_heap(AVFilterGraph *graph, AVFilterLink *link)
{
heap_bubble_up (graph, link, link->age_index);
heap_bubble_down(graph, link, link->age_index);
}
 
 
int avfilter_graph_request_oldest(AVFilterGraph *graph)
{
while (graph->sink_links_count) {
AVFilterLink *oldest = graph->sink_links[0];
int r = ff_request_frame(oldest);
if (r != AVERROR_EOF)
return r;
av_log(oldest->dst, AV_LOG_DEBUG, "EOF on sink link %s:%s.\n",
oldest->dst ? oldest->dst->name : "unknown",
oldest->dstpad ? oldest->dstpad->name : "unknown");
/* EOF: remove the link from the heap */
if (oldest->age_index < --graph->sink_links_count)
heap_bubble_down(graph, graph->sink_links[graph->sink_links_count],
oldest->age_index);
oldest->age_index = -1;
}
return AVERROR_EOF;
}
/contrib/sdk/sources/ffmpeg/libavfilter/avfiltergraph.h
0,0 → 1,28
/*
* Filter graphs
* copyright (c) 2007 Bobby Bingham
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFILTER_AVFILTERGRAPH_H
#define AVFILTER_AVFILTERGRAPH_H
 
#include "avfilter.h"
#include "libavutil/log.h"
 
#endif /* AVFILTER_AVFILTERGRAPH_H */
/contrib/sdk/sources/ffmpeg/libavfilter/bbox.c
0,0 → 1,75
/*
* Copyright (c) 2005 Robert Edele <yartrebo@earthlink.net>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "bbox.h"
 
int ff_calculate_bounding_box(FFBoundingBox *bbox,
const uint8_t *data, int linesize, int w, int h,
int min_val)
{
int x, y;
int start_x;
int start_y;
int end_x;
int end_y;
const uint8_t *line;
 
/* left bound */
for (start_x = 0; start_x < w; start_x++)
for (y = 0; y < h; y++)
if ((data[y * linesize + start_x] > min_val))
goto outl;
outl:
if (start_x == w) /* no points found */
return 0;
 
/* right bound */
for (end_x = w - 1; end_x >= start_x; end_x--)
for (y = 0; y < h; y++)
if ((data[y * linesize + end_x] > min_val))
goto outr;
outr:
 
/* top bound */
line = data;
for (start_y = 0; start_y < h; start_y++) {
for (x = 0; x < w; x++)
if (line[x] > min_val)
goto outt;
line += linesize;
}
outt:
 
/* bottom bound */
line = data + (h-1)*linesize;
for (end_y = h - 1; end_y >= start_y; end_y--) {
for (x = 0; x < w; x++)
if (line[x] > min_val)
goto outb;
line -= linesize;
}
outb:
 
bbox->x1 = start_x;
bbox->y1 = start_y;
bbox->x2 = end_x;
bbox->y2 = end_y;
return 1;
}
/contrib/sdk/sources/ffmpeg/libavfilter/bbox.h
0,0 → 1,44
/*
* Copyright (c) 2005 Robert Edele <yartrebo@earthlink.net>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFILTER_BBOX_H
#define AVFILTER_BBOX_H
 
#include <stdint.h>
 
typedef struct {
int x1, x2, y1, y2;
} FFBoundingBox;
 
/**
* Calculate the smallest rectangle that will encompass the
* region with values > min_val.
*
* @param bbox bounding box structure which is updated with the found values.
* If no pixels could be found with value > min_val, the
* structure is not modified.
* @return 1 in case at least one pixel with value > min_val was found,
* 0 otherwise
*/
int ff_calculate_bounding_box(FFBoundingBox *bbox,
const uint8_t *data, int linesize,
int w, int h, int min_val);
 
#endif /* AVFILTER_BBOX_H */
/contrib/sdk/sources/ffmpeg/libavfilter/buffer.c
0,0 → 1,170
/*
* Copyright Stefano Sabatini <stefasab gmail com>
* Copyright Anton Khirnov <anton khirnov net>
* Copyright Michael Niedermayer <michaelni gmx at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/avassert.h"
#include "libavutil/common.h"
#include "libavutil/imgutils.h"
#include "libavcodec/avcodec.h"
 
#include "avfilter.h"
#include "internal.h"
#include "audio.h"
#include "avcodec.h"
#include "version.h"
 
#if FF_API_AVFILTERBUFFER
void ff_avfilter_default_free_buffer(AVFilterBuffer *ptr)
{
if (ptr->extended_data != ptr->data)
av_freep(&ptr->extended_data);
av_free(ptr->data[0]);
av_free(ptr);
}
 
static void copy_video_props(AVFilterBufferRefVideoProps *dst, AVFilterBufferRefVideoProps *src) {
*dst = *src;
if (src->qp_table) {
int qsize = src->qp_table_size;
dst->qp_table = av_malloc(qsize);
memcpy(dst->qp_table, src->qp_table, qsize);
}
}
 
AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask)
{
AVFilterBufferRef *ret = av_malloc(sizeof(AVFilterBufferRef));
if (!ret)
return NULL;
*ret = *ref;
 
ret->metadata = NULL;
av_dict_copy(&ret->metadata, ref->metadata, 0);
 
if (ref->type == AVMEDIA_TYPE_VIDEO) {
ret->video = av_malloc(sizeof(AVFilterBufferRefVideoProps));
if (!ret->video) {
av_free(ret);
return NULL;
}
copy_video_props(ret->video, ref->video);
ret->extended_data = ret->data;
} else if (ref->type == AVMEDIA_TYPE_AUDIO) {
ret->audio = av_malloc(sizeof(AVFilterBufferRefAudioProps));
if (!ret->audio) {
av_free(ret);
return NULL;
}
*ret->audio = *ref->audio;
 
if (ref->extended_data && ref->extended_data != ref->data) {
int nb_channels = av_get_channel_layout_nb_channels(ref->audio->channel_layout);
if (!(ret->extended_data = av_malloc(sizeof(*ret->extended_data) *
nb_channels))) {
av_freep(&ret->audio);
av_freep(&ret);
return NULL;
}
memcpy(ret->extended_data, ref->extended_data,
sizeof(*ret->extended_data) * nb_channels);
} else
ret->extended_data = ret->data;
}
ret->perms &= pmask;
ret->buf->refcount ++;
return ret;
}
 
void avfilter_unref_buffer(AVFilterBufferRef *ref)
{
if (!ref)
return;
av_assert0(ref->buf->refcount > 0);
if (!(--ref->buf->refcount))
ref->buf->free(ref->buf);
if (ref->extended_data != ref->data)
av_freep(&ref->extended_data);
if (ref->video)
av_freep(&ref->video->qp_table);
av_freep(&ref->video);
av_freep(&ref->audio);
av_dict_free(&ref->metadata);
av_free(ref);
}
 
void avfilter_unref_bufferp(AVFilterBufferRef **ref)
{
avfilter_unref_buffer(*ref);
*ref = NULL;
}
 
int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src)
{
dst->pts = src->pts;
dst->pos = av_frame_get_pkt_pos(src);
dst->format = src->format;
 
av_dict_free(&dst->metadata);
av_dict_copy(&dst->metadata, av_frame_get_metadata(src), 0);
 
switch (dst->type) {
case AVMEDIA_TYPE_VIDEO:
dst->video->w = src->width;
dst->video->h = src->height;
dst->video->sample_aspect_ratio = src->sample_aspect_ratio;
dst->video->interlaced = src->interlaced_frame;
dst->video->top_field_first = src->top_field_first;
dst->video->key_frame = src->key_frame;
dst->video->pict_type = src->pict_type;
break;
case AVMEDIA_TYPE_AUDIO:
dst->audio->sample_rate = src->sample_rate;
dst->audio->channel_layout = src->channel_layout;
break;
default:
return AVERROR(EINVAL);
}
 
return 0;
}
 
void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *src)
{
// copy common properties
dst->pts = src->pts;
dst->pos = src->pos;
 
switch (src->type) {
case AVMEDIA_TYPE_VIDEO: {
if (dst->video->qp_table)
av_freep(&dst->video->qp_table);
copy_video_props(dst->video, src->video);
break;
}
case AVMEDIA_TYPE_AUDIO: *dst->audio = *src->audio; break;
default: break;
}
 
av_dict_free(&dst->metadata);
av_dict_copy(&dst->metadata, src->metadata, 0);
}
#endif /* FF_API_AVFILTERBUFFER */
/contrib/sdk/sources/ffmpeg/libavfilter/bufferqueue.h
0,0 → 1,121
/*
* Generic buffer queue
* Copyright (c) 2012 Nicolas George
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFILTER_BUFFERQUEUE_H
#define AVFILTER_BUFFERQUEUE_H
 
/**
* FFBufQueue: simple AVFrame queue API
*
* Note: this API is not thread-safe. Concurrent access to the same queue
* must be protected by a mutex or any synchronization mechanism.
*/
 
/**
* Maximum size of the queue.
*
* This value can be overridden by definying it before including this
* header.
* Powers of 2 are recommended.
*/
#ifndef FF_BUFQUEUE_SIZE
#define FF_BUFQUEUE_SIZE 32
#endif
 
#include "avfilter.h"
#include "libavutil/avassert.h"
 
/**
* Structure holding the queue
*/
struct FFBufQueue {
AVFrame *queue[FF_BUFQUEUE_SIZE];
unsigned short head;
unsigned short available; /**< number of available buffers */
};
 
#define BUCKET(i) queue->queue[(queue->head + (i)) % FF_BUFQUEUE_SIZE]
 
/**
* Test if a buffer queue is full.
*/
static inline int ff_bufqueue_is_full(struct FFBufQueue *queue)
{
return queue->available == FF_BUFQUEUE_SIZE;
}
 
/**
* Add a buffer to the queue.
*
* If the queue is already full, then the current last buffer is dropped
* (and unrefed) with a warning before adding the new buffer.
*/
static inline void ff_bufqueue_add(void *log, struct FFBufQueue *queue,
AVFrame *buf)
{
if (ff_bufqueue_is_full(queue)) {
av_log(log, AV_LOG_WARNING, "Buffer queue overflow, dropping.\n");
av_frame_free(&BUCKET(--queue->available));
}
BUCKET(queue->available++) = buf;
}
 
/**
* Get a buffer from the queue without altering it.
*
* Buffer with index 0 is the first buffer in the queue.
* Return NULL if the queue has not enough buffers.
*/
static inline AVFrame *ff_bufqueue_peek(struct FFBufQueue *queue,
unsigned index)
{
return index < queue->available ? BUCKET(index) : NULL;
}
 
/**
* Get the first buffer from the queue and remove it.
*
* Do not use on an empty queue.
*/
static inline AVFrame *ff_bufqueue_get(struct FFBufQueue *queue)
{
AVFrame *ret = queue->queue[queue->head];
av_assert0(queue->available);
queue->available--;
queue->queue[queue->head] = NULL;
queue->head = (queue->head + 1) % FF_BUFQUEUE_SIZE;
return ret;
}
 
/**
* Unref and remove all buffers from the queue.
*/
static inline void ff_bufqueue_discard_all(struct FFBufQueue *queue)
{
while (queue->available) {
AVFrame *buf = ff_bufqueue_get(queue);
av_frame_free(&buf);
}
}
 
#undef BUCKET
 
#endif /* AVFILTER_BUFFERQUEUE_H */
/contrib/sdk/sources/ffmpeg/libavfilter/buffersink.c
0,0 → 1,609
/*
* Copyright (c) 2011 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* buffer sink
*/
 
#include "libavutil/audio_fifo.h"
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/internal.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
 
#include "audio.h"
#include "avfilter.h"
#include "buffersink.h"
#include "internal.h"
 
typedef struct {
const AVClass *class;
AVFifoBuffer *fifo; ///< FIFO buffer of video frame references
unsigned warning_limit;
 
/* only used for video */
enum AVPixelFormat *pixel_fmts; ///< list of accepted pixel formats, must be terminated with -1
int pixel_fmts_size;
 
/* only used for audio */
enum AVSampleFormat *sample_fmts; ///< list of accepted sample formats, terminated by AV_SAMPLE_FMT_NONE
int sample_fmts_size;
int64_t *channel_layouts; ///< list of accepted channel layouts, terminated by -1
int channel_layouts_size;
int *channel_counts; ///< list of accepted channel counts, terminated by -1
int channel_counts_size;
int all_channel_counts;
int *sample_rates; ///< list of accepted sample rates, terminated by -1
int sample_rates_size;
 
/* only used for compat API */
AVAudioFifo *audio_fifo; ///< FIFO for audio samples
int64_t next_pts; ///< interpolating audio pts
} BufferSinkContext;
 
#define NB_ITEMS(list) (list ## _size / sizeof(*list))
 
static av_cold void uninit(AVFilterContext *ctx)
{
BufferSinkContext *sink = ctx->priv;
AVFrame *frame;
 
if (sink->audio_fifo)
av_audio_fifo_free(sink->audio_fifo);
 
if (sink->fifo) {
while (av_fifo_size(sink->fifo) >= sizeof(AVFilterBufferRef *)) {
av_fifo_generic_read(sink->fifo, &frame, sizeof(frame), NULL);
av_frame_free(&frame);
}
av_fifo_free(sink->fifo);
sink->fifo = NULL;
}
}
 
static int add_buffer_ref(AVFilterContext *ctx, AVFrame *ref)
{
BufferSinkContext *buf = ctx->priv;
 
if (av_fifo_space(buf->fifo) < sizeof(AVFilterBufferRef *)) {
/* realloc fifo size */
if (av_fifo_realloc2(buf->fifo, av_fifo_size(buf->fifo) * 2) < 0) {
av_log(ctx, AV_LOG_ERROR,
"Cannot buffer more frames. Consume some available frames "
"before adding new ones.\n");
return AVERROR(ENOMEM);
}
}
 
/* cache frame */
av_fifo_generic_write(buf->fifo, &ref, sizeof(AVFilterBufferRef *), NULL);
return 0;
}
 
static int filter_frame(AVFilterLink *link, AVFrame *frame)
{
AVFilterContext *ctx = link->dst;
BufferSinkContext *buf = link->dst->priv;
int ret;
 
if ((ret = add_buffer_ref(ctx, frame)) < 0)
return ret;
if (buf->warning_limit &&
av_fifo_size(buf->fifo) / sizeof(AVFilterBufferRef *) >= buf->warning_limit) {
av_log(ctx, AV_LOG_WARNING,
"%d buffers queued in %s, something may be wrong.\n",
buf->warning_limit,
(char *)av_x_if_null(ctx->name, ctx->filter->name));
buf->warning_limit *= 10;
}
return 0;
}
 
int attribute_align_arg av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame)
{
return av_buffersink_get_frame_flags(ctx, frame, 0);
}
 
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
{
BufferSinkContext *buf = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
int ret;
AVFrame *cur_frame;
 
/* no picref available, fetch it from the filterchain */
if (!av_fifo_size(buf->fifo)) {
if (flags & AV_BUFFERSINK_FLAG_NO_REQUEST)
return AVERROR(EAGAIN);
if ((ret = ff_request_frame(inlink)) < 0)
return ret;
}
 
if (!av_fifo_size(buf->fifo))
return AVERROR(EINVAL);
 
if (flags & AV_BUFFERSINK_FLAG_PEEK) {
cur_frame = *((AVFrame **)av_fifo_peek2(buf->fifo, 0));
if ((ret = av_frame_ref(frame, cur_frame)) < 0)
return ret;
} else {
av_fifo_generic_read(buf->fifo, &cur_frame, sizeof(cur_frame), NULL);
av_frame_move_ref(frame, cur_frame);
av_frame_free(&cur_frame);
}
 
return 0;
}
 
static int read_from_fifo(AVFilterContext *ctx, AVFrame *frame,
int nb_samples)
{
BufferSinkContext *s = ctx->priv;
AVFilterLink *link = ctx->inputs[0];
AVFrame *tmp;
 
if (!(tmp = ff_get_audio_buffer(link, nb_samples)))
return AVERROR(ENOMEM);
av_audio_fifo_read(s->audio_fifo, (void**)tmp->extended_data, nb_samples);
 
tmp->pts = s->next_pts;
if (s->next_pts != AV_NOPTS_VALUE)
s->next_pts += av_rescale_q(nb_samples, (AVRational){1, link->sample_rate},
link->time_base);
 
av_frame_move_ref(frame, tmp);
av_frame_free(&tmp);
 
return 0;
}
 
int attribute_align_arg av_buffersink_get_samples(AVFilterContext *ctx,
AVFrame *frame, int nb_samples)
{
BufferSinkContext *s = ctx->priv;
AVFilterLink *link = ctx->inputs[0];
AVFrame *cur_frame;
int ret = 0;
 
if (!s->audio_fifo) {
int nb_channels = link->channels;
if (!(s->audio_fifo = av_audio_fifo_alloc(link->format, nb_channels, nb_samples)))
return AVERROR(ENOMEM);
}
 
while (ret >= 0) {
if (av_audio_fifo_size(s->audio_fifo) >= nb_samples)
return read_from_fifo(ctx, frame, nb_samples);
 
if (!(cur_frame = av_frame_alloc()))
return AVERROR(ENOMEM);
ret = av_buffersink_get_frame_flags(ctx, cur_frame, 0);
if (ret == AVERROR_EOF && av_audio_fifo_size(s->audio_fifo)) {
av_frame_free(&cur_frame);
return read_from_fifo(ctx, frame, av_audio_fifo_size(s->audio_fifo));
} else if (ret < 0) {
av_frame_free(&cur_frame);
return ret;
}
 
if (cur_frame->pts != AV_NOPTS_VALUE) {
s->next_pts = cur_frame->pts -
av_rescale_q(av_audio_fifo_size(s->audio_fifo),
(AVRational){ 1, link->sample_rate },
link->time_base);
}
 
ret = av_audio_fifo_write(s->audio_fifo, (void**)cur_frame->extended_data,
cur_frame->nb_samples);
av_frame_free(&cur_frame);
}
 
return ret;
}
 
AVBufferSinkParams *av_buffersink_params_alloc(void)
{
static const int pixel_fmts[] = { AV_PIX_FMT_NONE };
AVBufferSinkParams *params = av_malloc(sizeof(AVBufferSinkParams));
if (!params)
return NULL;
 
params->pixel_fmts = pixel_fmts;
return params;
}
 
AVABufferSinkParams *av_abuffersink_params_alloc(void)
{
AVABufferSinkParams *params = av_mallocz(sizeof(AVABufferSinkParams));
 
if (!params)
return NULL;
return params;
}
 
#define FIFO_INIT_SIZE 8
 
static av_cold int common_init(AVFilterContext *ctx)
{
BufferSinkContext *buf = ctx->priv;
 
buf->fifo = av_fifo_alloc(FIFO_INIT_SIZE*sizeof(AVFilterBufferRef *));
if (!buf->fifo) {
av_log(ctx, AV_LOG_ERROR, "Failed to allocate fifo\n");
return AVERROR(ENOMEM);
}
buf->warning_limit = 100;
buf->next_pts = AV_NOPTS_VALUE;
return 0;
}
 
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
{
AVFilterLink *inlink = ctx->inputs[0];
 
inlink->min_samples = inlink->max_samples =
inlink->partial_buf_size = frame_size;
}
 
#if FF_API_AVFILTERBUFFER
FF_DISABLE_DEPRECATION_WARNINGS
static void compat_free_buffer(AVFilterBuffer *buf)
{
AVFrame *frame = buf->priv;
av_frame_free(&frame);
av_free(buf);
}
 
static int compat_read(AVFilterContext *ctx,
AVFilterBufferRef **pbuf, int nb_samples, int flags)
{
AVFilterBufferRef *buf;
AVFrame *frame;
int ret;
 
if (!pbuf)
return ff_poll_frame(ctx->inputs[0]);
 
frame = av_frame_alloc();
if (!frame)
return AVERROR(ENOMEM);
 
if (!nb_samples)
ret = av_buffersink_get_frame_flags(ctx, frame, flags);
else
ret = av_buffersink_get_samples(ctx, frame, nb_samples);
 
if (ret < 0)
goto fail;
 
AV_NOWARN_DEPRECATED(
if (ctx->inputs[0]->type == AVMEDIA_TYPE_VIDEO) {
buf = avfilter_get_video_buffer_ref_from_arrays(frame->data, frame->linesize,
AV_PERM_READ,
frame->width, frame->height,
frame->format);
} else {
buf = avfilter_get_audio_buffer_ref_from_arrays(frame->extended_data,
frame->linesize[0], AV_PERM_READ,
frame->nb_samples,
frame->format,
frame->channel_layout);
}
if (!buf) {
ret = AVERROR(ENOMEM);
goto fail;
}
 
avfilter_copy_frame_props(buf, frame);
)
 
buf->buf->priv = frame;
buf->buf->free = compat_free_buffer;
 
*pbuf = buf;
 
return 0;
fail:
av_frame_free(&frame);
return ret;
}
 
int attribute_align_arg av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf)
{
return compat_read(ctx, buf, 0, 0);
}
 
int attribute_align_arg av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf,
int nb_samples)
{
return compat_read(ctx, buf, nb_samples, 0);
}
 
int attribute_align_arg av_buffersink_get_buffer_ref(AVFilterContext *ctx,
AVFilterBufferRef **bufref, int flags)
{
*bufref = NULL;
 
av_assert0( !strcmp(ctx->filter->name, "buffersink")
|| !strcmp(ctx->filter->name, "abuffersink")
|| !strcmp(ctx->filter->name, "ffbuffersink")
|| !strcmp(ctx->filter->name, "ffabuffersink"));
 
return compat_read(ctx, bufref, 0, flags);
}
FF_ENABLE_DEPRECATION_WARNINGS
#endif
 
AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx)
{
av_assert0( !strcmp(ctx->filter->name, "buffersink")
|| !strcmp(ctx->filter->name, "ffbuffersink"));
 
return ctx->inputs[0]->frame_rate;
}
 
int attribute_align_arg av_buffersink_poll_frame(AVFilterContext *ctx)
{
BufferSinkContext *buf = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
 
av_assert0( !strcmp(ctx->filter->name, "buffersink")
|| !strcmp(ctx->filter->name, "abuffersink")
|| !strcmp(ctx->filter->name, "ffbuffersink")
|| !strcmp(ctx->filter->name, "ffabuffersink"));
 
return av_fifo_size(buf->fifo)/sizeof(AVFilterBufferRef *) + ff_poll_frame(inlink);
}
 
static av_cold int vsink_init(AVFilterContext *ctx, void *opaque)
{
BufferSinkContext *buf = ctx->priv;
AVBufferSinkParams *params = opaque;
int ret;
 
if (params) {
if ((ret = av_opt_set_int_list(buf, "pix_fmts", params->pixel_fmts, AV_PIX_FMT_NONE, 0)) < 0)
return ret;
}
 
return common_init(ctx);
}
 
#define CHECK_LIST_SIZE(field) \
if (buf->field ## _size % sizeof(*buf->field)) { \
av_log(ctx, AV_LOG_ERROR, "Invalid size for " #field ": %d, " \
"should be multiple of %d\n", \
buf->field ## _size, (int)sizeof(*buf->field)); \
return AVERROR(EINVAL); \
}
static int vsink_query_formats(AVFilterContext *ctx)
{
BufferSinkContext *buf = ctx->priv;
AVFilterFormats *formats = NULL;
unsigned i;
int ret;
 
CHECK_LIST_SIZE(pixel_fmts)
if (buf->pixel_fmts_size) {
for (i = 0; i < NB_ITEMS(buf->pixel_fmts); i++)
if ((ret = ff_add_format(&formats, buf->pixel_fmts[i])) < 0) {
ff_formats_unref(&formats);
return ret;
}
ff_set_common_formats(ctx, formats);
} else {
ff_default_query_formats(ctx);
}
 
return 0;
}
 
static av_cold int asink_init(AVFilterContext *ctx, void *opaque)
{
BufferSinkContext *buf = ctx->priv;
AVABufferSinkParams *params = opaque;
int ret;
 
if (params) {
if ((ret = av_opt_set_int_list(buf, "sample_fmts", params->sample_fmts, AV_SAMPLE_FMT_NONE, 0)) < 0 ||
(ret = av_opt_set_int_list(buf, "sample_rates", params->sample_rates, -1, 0)) < 0 ||
(ret = av_opt_set_int_list(buf, "channel_layouts", params->channel_layouts, -1, 0)) < 0 ||
(ret = av_opt_set_int_list(buf, "channel_counts", params->channel_counts, -1, 0)) < 0 ||
(ret = av_opt_set_int(buf, "all_channel_counts", params->all_channel_counts, 0)) < 0)
return ret;
}
return common_init(ctx);
}
 
static int asink_query_formats(AVFilterContext *ctx)
{
BufferSinkContext *buf = ctx->priv;
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layouts = NULL;
unsigned i;
int ret;
 
CHECK_LIST_SIZE(sample_fmts)
CHECK_LIST_SIZE(sample_rates)
CHECK_LIST_SIZE(channel_layouts)
CHECK_LIST_SIZE(channel_counts)
 
if (buf->sample_fmts_size) {
for (i = 0; i < NB_ITEMS(buf->sample_fmts); i++)
if ((ret = ff_add_format(&formats, buf->sample_fmts[i])) < 0) {
ff_formats_unref(&formats);
return ret;
}
ff_set_common_formats(ctx, formats);
}
 
if (buf->channel_layouts_size || buf->channel_counts_size ||
buf->all_channel_counts) {
for (i = 0; i < NB_ITEMS(buf->channel_layouts); i++)
if ((ret = ff_add_channel_layout(&layouts, buf->channel_layouts[i])) < 0) {
ff_channel_layouts_unref(&layouts);
return ret;
}
for (i = 0; i < NB_ITEMS(buf->channel_counts); i++)
if ((ret = ff_add_channel_layout(&layouts, FF_COUNT2LAYOUT(buf->channel_counts[i]))) < 0) {
ff_channel_layouts_unref(&layouts);
return ret;
}
if (buf->all_channel_counts) {
if (layouts)
av_log(ctx, AV_LOG_WARNING,
"Conflicting all_channel_counts and list in options\n");
else if (!(layouts = ff_all_channel_counts()))
return AVERROR(ENOMEM);
}
ff_set_common_channel_layouts(ctx, layouts);
}
 
if (buf->sample_rates_size) {
formats = NULL;
for (i = 0; i < NB_ITEMS(buf->sample_rates); i++)
if ((ret = ff_add_format(&formats, buf->sample_rates[i])) < 0) {
ff_formats_unref(&formats);
return ret;
}
ff_set_common_samplerates(ctx, formats);
}
 
return 0;
}
 
#define OFFSET(x) offsetof(BufferSinkContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
static const AVOption buffersink_options[] = {
{ "pix_fmts", "set the supported pixel formats", OFFSET(pixel_fmts), AV_OPT_TYPE_BINARY, .flags = FLAGS },
{ NULL },
};
#undef FLAGS
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
static const AVOption abuffersink_options[] = {
{ "sample_fmts", "set the supported sample formats", OFFSET(sample_fmts), AV_OPT_TYPE_BINARY, .flags = FLAGS },
{ "sample_rates", "set the supported sample rates", OFFSET(sample_rates), AV_OPT_TYPE_BINARY, .flags = FLAGS },
{ "channel_layouts", "set the supported channel layouts", OFFSET(channel_layouts), AV_OPT_TYPE_BINARY, .flags = FLAGS },
{ "channel_counts", "set the supported channel counts", OFFSET(channel_counts), AV_OPT_TYPE_BINARY, .flags = FLAGS },
{ "all_channel_counts", "accept all channel counts", OFFSET(all_channel_counts), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS },
{ NULL },
};
#undef FLAGS
 
AVFILTER_DEFINE_CLASS(buffersink);
AVFILTER_DEFINE_CLASS(abuffersink);
 
#if FF_API_AVFILTERBUFFER
 
#define ffbuffersink_options buffersink_options
#define ffabuffersink_options abuffersink_options
AVFILTER_DEFINE_CLASS(ffbuffersink);
AVFILTER_DEFINE_CLASS(ffabuffersink);
 
static const AVFilterPad ffbuffersink_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL },
};
 
AVFilter avfilter_vsink_ffbuffersink = {
.name = "ffbuffersink",
.description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them available to the end of the filter graph."),
.priv_size = sizeof(BufferSinkContext),
.priv_class = &ffbuffersink_class,
.init_opaque = vsink_init,
.uninit = uninit,
 
.query_formats = vsink_query_formats,
.inputs = ffbuffersink_inputs,
.outputs = NULL,
};
 
static const AVFilterPad ffabuffersink_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL },
};
 
AVFilter avfilter_asink_ffabuffersink = {
.name = "ffabuffersink",
.description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them available to the end of the filter graph."),
.init_opaque = asink_init,
.uninit = uninit,
.priv_size = sizeof(BufferSinkContext),
.priv_class = &ffabuffersink_class,
.query_formats = asink_query_formats,
.inputs = ffabuffersink_inputs,
.outputs = NULL,
};
#endif /* FF_API_AVFILTERBUFFER */
 
static const AVFilterPad avfilter_vsink_buffer_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
AVFilter avfilter_vsink_buffer = {
.name = "buffersink",
.description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them available to the end of the filter graph."),
.priv_size = sizeof(BufferSinkContext),
.priv_class = &buffersink_class,
.init_opaque = vsink_init,
.uninit = uninit,
 
.query_formats = vsink_query_formats,
.inputs = avfilter_vsink_buffer_inputs,
.outputs = NULL,
};
 
static const AVFilterPad avfilter_asink_abuffer_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
AVFilter avfilter_asink_abuffer = {
.name = "abuffersink",
.description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them available to the end of the filter graph."),
.priv_class = &abuffersink_class,
.priv_size = sizeof(BufferSinkContext),
.init_opaque = asink_init,
.uninit = uninit,
 
.query_formats = asink_query_formats,
.inputs = avfilter_asink_abuffer_inputs,
.outputs = NULL,
};
/contrib/sdk/sources/ffmpeg/libavfilter/buffersink.h
0,0 → 1,186
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFILTER_BUFFERSINK_H
#define AVFILTER_BUFFERSINK_H
 
/**
* @file
* memory buffer sink API for audio and video
*/
 
#include "avfilter.h"
 
#if FF_API_AVFILTERBUFFER
/**
* Get an audio/video buffer data from buffer_sink and put it in bufref.
*
* This function works with both audio and video buffer sinks.
*
* @param buffer_sink pointer to a buffersink or abuffersink context
* @param flags a combination of AV_BUFFERSINK_FLAG_* flags
* @return >= 0 in case of success, a negative AVERROR code in case of
* failure
*/
attribute_deprecated
int av_buffersink_get_buffer_ref(AVFilterContext *buffer_sink,
AVFilterBufferRef **bufref, int flags);
 
/**
* Get the number of immediately available frames.
*/
attribute_deprecated
int av_buffersink_poll_frame(AVFilterContext *ctx);
 
/**
* Get a buffer with filtered data from sink and put it in buf.
*
* @param ctx pointer to a context of a buffersink or abuffersink AVFilter.
* @param buf pointer to the buffer will be written here if buf is non-NULL. buf
* must be freed by the caller using avfilter_unref_buffer().
* Buf may also be NULL to query whether a buffer is ready to be
* output.
*
* @return >= 0 in case of success, a negative AVERROR code in case of
* failure.
*/
attribute_deprecated
int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf);
 
/**
* Same as av_buffersink_read, but with the ability to specify the number of
* samples read. This function is less efficient than av_buffersink_read(),
* because it copies the data around.
*
* @param ctx pointer to a context of the abuffersink AVFilter.
* @param buf pointer to the buffer will be written here if buf is non-NULL. buf
* must be freed by the caller using avfilter_unref_buffer(). buf
* will contain exactly nb_samples audio samples, except at the end
* of stream, when it can contain less than nb_samples.
* Buf may also be NULL to query whether a buffer is ready to be
* output.
*
* @warning do not mix this function with av_buffersink_read(). Use only one or
* the other with a single sink, not both.
*/
attribute_deprecated
int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf,
int nb_samples);
#endif
 
/**
* Get a frame with filtered data from sink and put it in frame.
*
* @param ctx pointer to a buffersink or abuffersink filter context.
* @param frame pointer to an allocated frame that will be filled with data.
* The data must be freed using av_frame_unref() / av_frame_free()
* @param flags a combination of AV_BUFFERSINK_FLAG_* flags
*
* @return >= 0 in for success, a negative AVERROR code for failure.
*/
int av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags);
 
/**
* Tell av_buffersink_get_buffer_ref() to read video/samples buffer
* reference, but not remove it from the buffer. This is useful if you
* need only to read a video/samples buffer, without to fetch it.
*/
#define AV_BUFFERSINK_FLAG_PEEK 1
 
/**
* Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
* If a frame is already buffered, it is read (and removed from the buffer),
* but if no frame is present, return AVERROR(EAGAIN).
*/
#define AV_BUFFERSINK_FLAG_NO_REQUEST 2
 
/**
* Struct to use for initializing a buffersink context.
*/
typedef struct {
const enum AVPixelFormat *pixel_fmts; ///< list of allowed pixel formats, terminated by AV_PIX_FMT_NONE
} AVBufferSinkParams;
 
/**
* Create an AVBufferSinkParams structure.
*
* Must be freed with av_free().
*/
AVBufferSinkParams *av_buffersink_params_alloc(void);
 
/**
* Struct to use for initializing an abuffersink context.
*/
typedef struct {
const enum AVSampleFormat *sample_fmts; ///< list of allowed sample formats, terminated by AV_SAMPLE_FMT_NONE
const int64_t *channel_layouts; ///< list of allowed channel layouts, terminated by -1
const int *channel_counts; ///< list of allowed channel counts, terminated by -1
int all_channel_counts; ///< if not 0, accept any channel count or layout
int *sample_rates; ///< list of allowed sample rates, terminated by -1
} AVABufferSinkParams;
 
/**
* Create an AVABufferSinkParams structure.
*
* Must be freed with av_free().
*/
AVABufferSinkParams *av_abuffersink_params_alloc(void);
 
/**
* Set the frame size for an audio buffer sink.
*
* All calls to av_buffersink_get_buffer_ref will return a buffer with
* exactly the specified number of samples, or AVERROR(EAGAIN) if there is
* not enough. The last buffer at EOF will be padded with 0.
*/
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size);
 
/**
* Get the frame rate of the input.
*/
AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx);
 
/**
* Get a frame with filtered data from sink and put it in frame.
*
* @param ctx pointer to a context of a buffersink or abuffersink AVFilter.
* @param frame pointer to an allocated frame that will be filled with data.
* The data must be freed using av_frame_unref() / av_frame_free()
*
* @return >= 0 in case of success, a negative AVERROR code in case of
* failure.
*/
int av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame);
 
/**
* Same as av_buffersink_get_frame(), but with the ability to specify the number
* of samples read. This function is less efficient than
* av_buffersink_get_frame(), because it copies the data around.
*
* @param ctx pointer to a context of the abuffersink AVFilter.
* @param frame pointer to an allocated frame that will be filled with data.
* The data must be freed using av_frame_unref() / av_frame_free()
* frame will contain exactly nb_samples audio samples, except at
* the end of stream, when it can contain less than nb_samples.
*
* @warning do not mix this function with av_buffersink_get_frame(). Use only one or
* the other with a single sink, not both.
*/
int av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples);
 
#endif /* AVFILTER_BUFFERSINK_H */
/contrib/sdk/sources/ffmpeg/libavfilter/buffersrc.c
0,0 → 1,551
/*
* Copyright (c) 2008 Vitor Sessak
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* memory buffer source filter
*/
 
#include <float.h>
 
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/fifo.h"
#include "libavutil/frame.h"
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
#include "audio.h"
#include "avfilter.h"
#include "buffersrc.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
#include "avcodec.h"
 
typedef struct {
const AVClass *class;
AVFifoBuffer *fifo;
AVRational time_base; ///< time_base to set in the output link
AVRational frame_rate; ///< frame_rate to set in the output link
unsigned nb_failed_requests;
unsigned warning_limit;
 
/* video only */
int w, h;
enum AVPixelFormat pix_fmt;
AVRational pixel_aspect;
char *sws_param;
 
/* audio only */
int sample_rate;
enum AVSampleFormat sample_fmt;
char *sample_fmt_str;
int channels;
uint64_t channel_layout;
char *channel_layout_str;
 
int eof;
} BufferSourceContext;
 
#define CHECK_VIDEO_PARAM_CHANGE(s, c, width, height, format)\
if (c->w != width || c->h != height || c->pix_fmt != format) {\
av_log(s, AV_LOG_INFO, "Changing frame properties on the fly is not supported by all filters.\n");\
}
 
#define CHECK_AUDIO_PARAM_CHANGE(s, c, srate, ch_layout, ch_count, format)\
if (c->sample_fmt != format || c->sample_rate != srate ||\
c->channel_layout != ch_layout || c->channels != ch_count) {\
av_log(s, AV_LOG_ERROR, "Changing frame properties on the fly is not supported.\n");\
return AVERROR(EINVAL);\
}
 
int attribute_align_arg av_buffersrc_write_frame(AVFilterContext *ctx, const AVFrame *frame)
{
return av_buffersrc_add_frame_flags(ctx, (AVFrame *)frame,
AV_BUFFERSRC_FLAG_KEEP_REF);
}
 
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
{
return av_buffersrc_add_frame_flags(ctx, frame, 0);
}
 
static int av_buffersrc_add_frame_internal(AVFilterContext *ctx,
AVFrame *frame, int flags);
 
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
{
AVFrame *copy = NULL;
int ret = 0;
 
if (frame && frame->channel_layout &&
av_get_channel_layout_nb_channels(frame->channel_layout) != av_frame_get_channels(frame)) {
av_log(0, AV_LOG_ERROR, "Layout indicates a different number of channels than actually present\n");
return AVERROR(EINVAL);
}
 
if (!(flags & AV_BUFFERSRC_FLAG_KEEP_REF) || !frame)
return av_buffersrc_add_frame_internal(ctx, frame, flags);
 
if (!(copy = av_frame_alloc()))
return AVERROR(ENOMEM);
ret = av_frame_ref(copy, frame);
if (ret >= 0)
ret = av_buffersrc_add_frame_internal(ctx, copy, flags);
 
av_frame_free(&copy);
return ret;
}
 
static int av_buffersrc_add_frame_internal(AVFilterContext *ctx,
AVFrame *frame, int flags)
{
BufferSourceContext *s = ctx->priv;
AVFrame *copy;
int ret;
 
s->nb_failed_requests = 0;
 
if (!frame) {
s->eof = 1;
return 0;
} else if (s->eof)
return AVERROR(EINVAL);
 
if (!(flags & AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT)) {
 
switch (ctx->outputs[0]->type) {
case AVMEDIA_TYPE_VIDEO:
CHECK_VIDEO_PARAM_CHANGE(ctx, s, frame->width, frame->height,
frame->format);
break;
case AVMEDIA_TYPE_AUDIO:
/* For layouts unknown on input but known on link after negotiation. */
if (!frame->channel_layout)
frame->channel_layout = s->channel_layout;
CHECK_AUDIO_PARAM_CHANGE(ctx, s, frame->sample_rate, frame->channel_layout,
av_frame_get_channels(frame), frame->format);
break;
default:
return AVERROR(EINVAL);
}
 
}
 
if (!av_fifo_space(s->fifo) &&
(ret = av_fifo_realloc2(s->fifo, av_fifo_size(s->fifo) +
sizeof(copy))) < 0)
return ret;
 
if (!(copy = av_frame_alloc()))
return AVERROR(ENOMEM);
av_frame_move_ref(copy, frame);
 
if ((ret = av_fifo_generic_write(s->fifo, &copy, sizeof(copy), NULL)) < 0) {
av_frame_move_ref(frame, copy);
av_frame_free(&copy);
return ret;
}
 
if ((flags & AV_BUFFERSRC_FLAG_PUSH))
if ((ret = ctx->output_pads[0].request_frame(ctx->outputs[0])) < 0)
return ret;
 
return 0;
}
 
#if FF_API_AVFILTERBUFFER
FF_DISABLE_DEPRECATION_WARNINGS
static void compat_free_buffer(void *opaque, uint8_t *data)
{
AVFilterBufferRef *buf = opaque;
AV_NOWARN_DEPRECATED(
avfilter_unref_buffer(buf);
)
}
 
static void compat_unref_buffer(void *opaque, uint8_t *data)
{
AVBufferRef *buf = opaque;
AV_NOWARN_DEPRECATED(
av_buffer_unref(&buf);
)
}
 
int av_buffersrc_add_ref(AVFilterContext *ctx, AVFilterBufferRef *buf,
int flags)
{
BufferSourceContext *s = ctx->priv;
AVFrame *frame = NULL;
AVBufferRef *dummy_buf = NULL;
int ret = 0, planes, i;
 
if (!buf) {
s->eof = 1;
return 0;
} else if (s->eof)
return AVERROR(EINVAL);
 
frame = av_frame_alloc();
if (!frame)
return AVERROR(ENOMEM);
 
dummy_buf = av_buffer_create(NULL, 0, compat_free_buffer, buf,
(buf->perms & AV_PERM_WRITE) ? 0 : AV_BUFFER_FLAG_READONLY);
if (!dummy_buf) {
ret = AVERROR(ENOMEM);
goto fail;
}
 
AV_NOWARN_DEPRECATED(
if ((ret = avfilter_copy_buf_props(frame, buf)) < 0)
goto fail;
)
 
#define WRAP_PLANE(ref_out, data, data_size) \
do { \
AVBufferRef *dummy_ref = av_buffer_ref(dummy_buf); \
if (!dummy_ref) { \
ret = AVERROR(ENOMEM); \
goto fail; \
} \
ref_out = av_buffer_create(data, data_size, compat_unref_buffer, \
dummy_ref, (buf->perms & AV_PERM_WRITE) ? 0 : AV_BUFFER_FLAG_READONLY); \
if (!ref_out) { \
av_frame_unref(frame); \
ret = AVERROR(ENOMEM); \
goto fail; \
} \
} while (0)
 
if (ctx->outputs[0]->type == AVMEDIA_TYPE_VIDEO) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
 
planes = av_pix_fmt_count_planes(frame->format);
if (!desc || planes <= 0) {
ret = AVERROR(EINVAL);
goto fail;
}
 
for (i = 0; i < planes; i++) {
int v_shift = (i == 1 || i == 2) ? desc->log2_chroma_h : 0;
int plane_size = (frame->height >> v_shift) * frame->linesize[i];
 
WRAP_PLANE(frame->buf[i], frame->data[i], plane_size);
}
} else {
int planar = av_sample_fmt_is_planar(frame->format);
int channels = av_get_channel_layout_nb_channels(frame->channel_layout);
 
planes = planar ? channels : 1;
 
if (planes > FF_ARRAY_ELEMS(frame->buf)) {
frame->nb_extended_buf = planes - FF_ARRAY_ELEMS(frame->buf);
frame->extended_buf = av_mallocz(sizeof(*frame->extended_buf) *
frame->nb_extended_buf);
if (!frame->extended_buf) {
ret = AVERROR(ENOMEM);
goto fail;
}
}
 
for (i = 0; i < FFMIN(planes, FF_ARRAY_ELEMS(frame->buf)); i++)
WRAP_PLANE(frame->buf[i], frame->extended_data[i], frame->linesize[0]);
 
for (i = 0; i < planes - FF_ARRAY_ELEMS(frame->buf); i++)
WRAP_PLANE(frame->extended_buf[i],
frame->extended_data[i + FF_ARRAY_ELEMS(frame->buf)],
frame->linesize[0]);
}
 
ret = av_buffersrc_add_frame_flags(ctx, frame, flags);
 
fail:
av_buffer_unref(&dummy_buf);
av_frame_free(&frame);
 
return ret;
}
FF_ENABLE_DEPRECATION_WARNINGS
 
int av_buffersrc_buffer(AVFilterContext *ctx, AVFilterBufferRef *buf)
{
return av_buffersrc_add_ref(ctx, buf, 0);
}
#endif
 
static av_cold int init_video(AVFilterContext *ctx)
{
BufferSourceContext *c = ctx->priv;
 
if (c->pix_fmt == AV_PIX_FMT_NONE || !c->w || !c->h || av_q2d(c->time_base) <= 0) {
av_log(ctx, AV_LOG_ERROR, "Invalid parameters provided.\n");
return AVERROR(EINVAL);
}
 
if (!(c->fifo = av_fifo_alloc(sizeof(AVFrame*))))
return AVERROR(ENOMEM);
 
av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d pixfmt:%s tb:%d/%d fr:%d/%d sar:%d/%d sws_param:%s\n",
c->w, c->h, av_get_pix_fmt_name(c->pix_fmt),
c->time_base.num, c->time_base.den, c->frame_rate.num, c->frame_rate.den,
c->pixel_aspect.num, c->pixel_aspect.den, (char *)av_x_if_null(c->sws_param, ""));
c->warning_limit = 100;
return 0;
}
 
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
{
return ((BufferSourceContext *)buffer_src->priv)->nb_failed_requests;
}
 
#define OFFSET(x) offsetof(BufferSourceContext, x)
#define A AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_AUDIO_PARAM
#define V AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
 
static const AVOption buffer_options[] = {
{ "width", NULL, OFFSET(w), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, V },
{ "video_size", NULL, OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, .flags = V },
{ "height", NULL, OFFSET(h), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, V },
{ "pix_fmt", NULL, OFFSET(pix_fmt), AV_OPT_TYPE_PIXEL_FMT, .flags = V },
#if FF_API_OLD_FILTER_OPTS
/* those 4 are for compatibility with the old option passing system where each filter
* did its own parsing */
{ "time_base_num", "deprecated, do not use", OFFSET(time_base.num), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, V },
{ "time_base_den", "deprecated, do not use", OFFSET(time_base.den), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, V },
{ "sar_num", "deprecated, do not use", OFFSET(pixel_aspect.num), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, V },
{ "sar_den", "deprecated, do not use", OFFSET(pixel_aspect.den), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, V },
#endif
{ "sar", "sample aspect ratio", OFFSET(pixel_aspect), AV_OPT_TYPE_RATIONAL, { .dbl = 1 }, 0, DBL_MAX, V },
{ "pixel_aspect", "sample aspect ratio", OFFSET(pixel_aspect), AV_OPT_TYPE_RATIONAL, { .dbl = 1 }, 0, DBL_MAX, V },
{ "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, DBL_MAX, V },
{ "frame_rate", NULL, OFFSET(frame_rate), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, DBL_MAX, V },
{ "sws_param", NULL, OFFSET(sws_param), AV_OPT_TYPE_STRING, .flags = V },
{ NULL },
};
 
AVFILTER_DEFINE_CLASS(buffer);
 
static const AVOption abuffer_options[] = {
{ "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, INT_MAX, A },
{ "sample_rate", NULL, OFFSET(sample_rate), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, A },
{ "sample_fmt", NULL, OFFSET(sample_fmt_str), AV_OPT_TYPE_STRING, .flags = A },
{ "channel_layout", NULL, OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, .flags = A },
{ "channels", NULL, OFFSET(channels), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, A },
{ NULL },
};
 
AVFILTER_DEFINE_CLASS(abuffer);
 
static av_cold int init_audio(AVFilterContext *ctx)
{
BufferSourceContext *s = ctx->priv;
int ret = 0;
 
s->sample_fmt = av_get_sample_fmt(s->sample_fmt_str);
if (s->sample_fmt == AV_SAMPLE_FMT_NONE) {
av_log(ctx, AV_LOG_ERROR, "Invalid sample format %s\n",
s->sample_fmt_str);
return AVERROR(EINVAL);
}
 
if (s->channel_layout_str) {
int n;
/* TODO reindent */
s->channel_layout = av_get_channel_layout(s->channel_layout_str);
if (!s->channel_layout) {
av_log(ctx, AV_LOG_ERROR, "Invalid channel layout %s.\n",
s->channel_layout_str);
return AVERROR(EINVAL);
}
n = av_get_channel_layout_nb_channels(s->channel_layout);
if (s->channels) {
if (n != s->channels) {
av_log(ctx, AV_LOG_ERROR,
"Mismatching channel count %d and layout '%s' "
"(%d channels)\n",
s->channels, s->channel_layout_str, n);
return AVERROR(EINVAL);
}
}
s->channels = n;
} else if (!s->channels) {
av_log(ctx, AV_LOG_ERROR, "Neither number of channels nor "
"channel layout specified\n");
return AVERROR(EINVAL);
}
 
if (!(s->fifo = av_fifo_alloc(sizeof(AVFrame*))))
return AVERROR(ENOMEM);
 
if (!s->time_base.num)
s->time_base = (AVRational){1, s->sample_rate};
 
av_log(ctx, AV_LOG_VERBOSE,
"tb:%d/%d samplefmt:%s samplerate:%d chlayout:%s\n",
s->time_base.num, s->time_base.den, s->sample_fmt_str,
s->sample_rate, s->channel_layout_str);
s->warning_limit = 100;
 
return ret;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
BufferSourceContext *s = ctx->priv;
while (s->fifo && av_fifo_size(s->fifo)) {
AVFrame *frame;
av_fifo_generic_read(s->fifo, &frame, sizeof(frame), NULL);
av_frame_free(&frame);
}
av_fifo_free(s->fifo);
s->fifo = NULL;
}
 
static int query_formats(AVFilterContext *ctx)
{
BufferSourceContext *c = ctx->priv;
AVFilterChannelLayouts *channel_layouts = NULL;
AVFilterFormats *formats = NULL;
AVFilterFormats *samplerates = NULL;
 
switch (ctx->outputs[0]->type) {
case AVMEDIA_TYPE_VIDEO:
ff_add_format(&formats, c->pix_fmt);
ff_set_common_formats(ctx, formats);
break;
case AVMEDIA_TYPE_AUDIO:
ff_add_format(&formats, c->sample_fmt);
ff_set_common_formats(ctx, formats);
 
ff_add_format(&samplerates, c->sample_rate);
ff_set_common_samplerates(ctx, samplerates);
 
ff_add_channel_layout(&channel_layouts,
c->channel_layout ? c->channel_layout :
FF_COUNT2LAYOUT(c->channels));
ff_set_common_channel_layouts(ctx, channel_layouts);
break;
default:
return AVERROR(EINVAL);
}
 
return 0;
}
 
static int config_props(AVFilterLink *link)
{
BufferSourceContext *c = link->src->priv;
 
switch (link->type) {
case AVMEDIA_TYPE_VIDEO:
link->w = c->w;
link->h = c->h;
link->sample_aspect_ratio = c->pixel_aspect;
break;
case AVMEDIA_TYPE_AUDIO:
if (!c->channel_layout)
c->channel_layout = link->channel_layout;
break;
default:
return AVERROR(EINVAL);
}
 
link->time_base = c->time_base;
link->frame_rate = c->frame_rate;
return 0;
}
 
static int request_frame(AVFilterLink *link)
{
BufferSourceContext *c = link->src->priv;
AVFrame *frame;
 
if (!av_fifo_size(c->fifo)) {
if (c->eof)
return AVERROR_EOF;
c->nb_failed_requests++;
return AVERROR(EAGAIN);
}
av_fifo_generic_read(c->fifo, &frame, sizeof(frame), NULL);
 
return ff_filter_frame(link, frame);
}
 
static int poll_frame(AVFilterLink *link)
{
BufferSourceContext *c = link->src->priv;
int size = av_fifo_size(c->fifo);
if (!size && c->eof)
return AVERROR_EOF;
return size/sizeof(AVFrame*);
}
 
static const AVFilterPad avfilter_vsrc_buffer_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.request_frame = request_frame,
.poll_frame = poll_frame,
.config_props = config_props,
},
{ NULL }
};
 
AVFilter avfilter_vsrc_buffer = {
.name = "buffer",
.description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them accessible to the filterchain."),
.priv_size = sizeof(BufferSourceContext),
.query_formats = query_formats,
 
.init = init_video,
.uninit = uninit,
 
.inputs = NULL,
.outputs = avfilter_vsrc_buffer_outputs,
.priv_class = &buffer_class,
};
 
static const AVFilterPad avfilter_asrc_abuffer_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.request_frame = request_frame,
.poll_frame = poll_frame,
.config_props = config_props,
},
{ NULL }
};
 
AVFilter avfilter_asrc_abuffer = {
.name = "abuffer",
.description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them accessible to the filterchain."),
.priv_size = sizeof(BufferSourceContext),
.query_formats = query_formats,
 
.init = init_audio,
.uninit = uninit,
 
.inputs = NULL,
.outputs = avfilter_asrc_abuffer_outputs,
.priv_class = &abuffer_class,
};
/contrib/sdk/sources/ffmpeg/libavfilter/buffersrc.h
0,0 → 1,148
/*
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFILTER_BUFFERSRC_H
#define AVFILTER_BUFFERSRC_H
 
/**
* @file
* Memory buffer source API.
*/
 
#include "libavcodec/avcodec.h"
#include "avfilter.h"
 
enum {
 
/**
* Do not check for format changes.
*/
AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT = 1,
 
#if FF_API_AVFILTERBUFFER
/**
* Ignored
*/
AV_BUFFERSRC_FLAG_NO_COPY = 2,
#endif
 
/**
* Immediately push the frame to the output.
*/
AV_BUFFERSRC_FLAG_PUSH = 4,
 
/**
* Keep a reference to the frame.
* If the frame if reference-counted, create a new reference; otherwise
* copy the frame data.
*/
AV_BUFFERSRC_FLAG_KEEP_REF = 8,
 
};
 
/**
* Add buffer data in picref to buffer_src.
*
* @param buffer_src pointer to a buffer source context
* @param picref a buffer reference, or NULL to mark EOF
* @param flags a combination of AV_BUFFERSRC_FLAG_*
* @return >= 0 in case of success, a negative AVERROR code
* in case of failure
*/
int av_buffersrc_add_ref(AVFilterContext *buffer_src,
AVFilterBufferRef *picref, int flags);
 
/**
* Get the number of failed requests.
*
* A failed request is when the request_frame method is called while no
* frame is present in the buffer.
* The number is reset when a frame is added.
*/
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src);
 
#if FF_API_AVFILTERBUFFER
/**
* Add a buffer to the filtergraph s.
*
* @param buf buffer containing frame data to be passed down the filtergraph.
* This function will take ownership of buf, the user must not free it.
* A NULL buf signals EOF -- i.e. no more frames will be sent to this filter.
*
* @deprecated use av_buffersrc_write_frame() or av_buffersrc_add_frame()
*/
attribute_deprecated
int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf);
#endif
 
/**
* Add a frame to the buffer source.
*
* @param s an instance of the buffersrc filter.
* @param frame frame to be added. If the frame is reference counted, this
* function will make a new reference to it. Otherwise the frame data will be
* copied.
*
* @return 0 on success, a negative AVERROR on error
*
* This function is equivalent to av_buffersrc_add_frame_flags() with the
* AV_BUFFERSRC_FLAG_KEEP_REF flag.
*/
int av_buffersrc_write_frame(AVFilterContext *s, const AVFrame *frame);
 
/**
* Add a frame to the buffer source.
*
* @param s an instance of the buffersrc filter.
* @param frame frame to be added. If the frame is reference counted, this
* function will take ownership of the reference(s) and reset the frame.
* Otherwise the frame data will be copied. If this function returns an error,
* the input frame is not touched.
*
* @return 0 on success, a negative AVERROR on error.
*
* @note the difference between this function and av_buffersrc_write_frame() is
* that av_buffersrc_write_frame() creates a new reference to the input frame,
* while this function takes ownership of the reference passed to it.
*
* This function is equivalent to av_buffersrc_add_frame_flags() without the
* AV_BUFFERSRC_FLAG_KEEP_REF flag.
*/
int av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame);
 
/**
* Add a frame to the buffer source.
*
* By default, if the frame is reference-counted, this function will take
* ownership of the reference(s) and reset the frame. This can be controled
* using the flags.
*
* If this function returns an error, the input frame is not touched.
*
* @param buffer_src pointer to a buffer source context
* @param frame a frame, or NULL to mark EOF
* @param flags a combination of AV_BUFFERSRC_FLAG_*
* @return >= 0 in case of success, a negative AVERROR code
* in case of failure
*/
int av_buffersrc_add_frame_flags(AVFilterContext *buffer_src,
AVFrame *frame, int flags);
 
 
#endif /* AVFILTER_BUFFERSRC_H */
/contrib/sdk/sources/ffmpeg/libavfilter/deshake.h
0,0 → 1,104
/*
* Copyright (C) 2013 Wei Gao <weigao@multicorewareinc.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFILTER_DESHAKE_H
#define AVFILTER_DESHAKE_H
 
#include "config.h"
#include "avfilter.h"
#include "libavcodec/dsputil.h"
#include "transform.h"
#if CONFIG_OPENCL
#include "libavutil/opencl.h"
#endif
 
 
enum SearchMethod {
EXHAUSTIVE, ///< Search all possible positions
SMART_EXHAUSTIVE, ///< Search most possible positions (faster)
SEARCH_COUNT
};
 
typedef struct {
int x; ///< Horizontal shift
int y; ///< Vertical shift
} IntMotionVector;
 
typedef struct {
double x; ///< Horizontal shift
double y; ///< Vertical shift
} MotionVector;
 
typedef struct {
MotionVector vector; ///< Motion vector
double angle; ///< Angle of rotation
double zoom; ///< Zoom percentage
} Transform;
 
#if CONFIG_OPENCL
 
typedef struct {
size_t matrix_size;
float matrix_y[9];
float matrix_uv[9];
cl_mem cl_matrix_y;
cl_mem cl_matrix_uv;
int in_plane_size[8];
int out_plane_size[8];
int plane_num;
cl_mem cl_inbuf;
size_t cl_inbuf_size;
cl_mem cl_outbuf;
size_t cl_outbuf_size;
AVOpenCLKernelEnv kernel_env;
} DeshakeOpenclContext;
 
#endif
 
typedef struct {
const AVClass *class;
AVFrame *ref; ///< Previous frame
int rx; ///< Maximum horizontal shift
int ry; ///< Maximum vertical shift
int edge; ///< Edge fill method
int blocksize; ///< Size of blocks to compare
int contrast; ///< Contrast threshold
int search; ///< Motion search method
AVCodecContext *avctx;
DSPContext c; ///< Context providing optimized SAD methods
Transform last; ///< Transform from last frame
int refcount; ///< Number of reference frames (defines averaging window)
FILE *fp;
Transform avg;
int cw; ///< Crop motion search to this box
int ch;
int cx;
int cy;
char *filename; ///< Motion search detailed log filename
int opencl;
#if CONFIG_OPENCL
DeshakeOpenclContext opencl_ctx;
#endif
int (* transform)(AVFilterContext *ctx, int width, int height, int cw, int ch,
const float *matrix_y, const float *matrix_uv, enum InterpolateMethod interpolate,
enum FillMethod fill, AVFrame *in, AVFrame *out);
} DeshakeContext;
 
#endif /* AVFILTER_DESHAKE_H */
/contrib/sdk/sources/ffmpeg/libavfilter/deshake_opencl.c
0,0 → 1,176
/*
* Copyright (C) 2013 Wei Gao <weigao@multicorewareinc.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* transform input video
*/
 
#include "libavutil/common.h"
#include "libavutil/dict.h"
#include "libavutil/pixdesc.h"
#include "deshake_opencl.h"
#include "libavutil/opencl_internal.h"
 
#define MATRIX_SIZE 6
#define PLANE_NUM 3
 
int ff_opencl_transform(AVFilterContext *ctx,
int width, int height, int cw, int ch,
const float *matrix_y, const float *matrix_uv,
enum InterpolateMethod interpolate,
enum FillMethod fill, AVFrame *in, AVFrame *out)
{
int ret = 0;
const size_t global_work_size = width * height + 2 * ch * cw;
cl_int status;
DeshakeContext *deshake = ctx->priv;
FFOpenclParam opencl_param = {0};
 
opencl_param.ctx = ctx;
opencl_param.kernel = deshake->opencl_ctx.kernel_env.kernel;
ret = av_opencl_buffer_write(deshake->opencl_ctx.cl_matrix_y, (uint8_t *)matrix_y, deshake->opencl_ctx.matrix_size * sizeof(cl_float));
if (ret < 0)
return ret;
ret = av_opencl_buffer_write(deshake->opencl_ctx.cl_matrix_uv, (uint8_t *)matrix_uv, deshake->opencl_ctx.matrix_size * sizeof(cl_float));
if (ret < 0)
return ret;
 
if ((unsigned int)interpolate > INTERPOLATE_BIQUADRATIC) {
av_log(ctx, AV_LOG_ERROR, "Selected interpolate method is invalid\n");
return AVERROR(EINVAL);
}
ret = ff_opencl_set_parameter(&opencl_param,
FF_OPENCL_PARAM_INFO(deshake->opencl_ctx.cl_inbuf),
FF_OPENCL_PARAM_INFO(deshake->opencl_ctx.cl_outbuf),
FF_OPENCL_PARAM_INFO(deshake->opencl_ctx.cl_matrix_y),
FF_OPENCL_PARAM_INFO(deshake->opencl_ctx.cl_matrix_uv),
FF_OPENCL_PARAM_INFO(interpolate),
FF_OPENCL_PARAM_INFO(fill),
FF_OPENCL_PARAM_INFO(in->linesize[0]),
FF_OPENCL_PARAM_INFO(out->linesize[0]),
FF_OPENCL_PARAM_INFO(in->linesize[1]),
FF_OPENCL_PARAM_INFO(out->linesize[1]),
FF_OPENCL_PARAM_INFO(height),
FF_OPENCL_PARAM_INFO(width),
FF_OPENCL_PARAM_INFO(ch),
FF_OPENCL_PARAM_INFO(cw),
NULL);
if (ret < 0)
return ret;
status = clEnqueueNDRangeKernel(deshake->opencl_ctx.kernel_env.command_queue,
deshake->opencl_ctx.kernel_env.kernel, 1, NULL,
&global_work_size, NULL, 0, NULL, NULL);
if (status != CL_SUCCESS) {
av_log(ctx, AV_LOG_ERROR, "OpenCL run kernel error occurred: %s\n", av_opencl_errstr(status));
return AVERROR_EXTERNAL;
}
clFinish(deshake->opencl_ctx.kernel_env.command_queue);
ret = av_opencl_buffer_read_image(out->data, deshake->opencl_ctx.out_plane_size,
deshake->opencl_ctx.plane_num, deshake->opencl_ctx.cl_outbuf,
deshake->opencl_ctx.cl_outbuf_size);
if (ret < 0)
return ret;
return ret;
}
 
int ff_opencl_deshake_init(AVFilterContext *ctx)
{
int ret = 0;
DeshakeContext *deshake = ctx->priv;
ret = av_opencl_init(NULL);
if (ret < 0)
return ret;
deshake->opencl_ctx.matrix_size = MATRIX_SIZE;
deshake->opencl_ctx.plane_num = PLANE_NUM;
ret = av_opencl_buffer_create(&deshake->opencl_ctx.cl_matrix_y,
deshake->opencl_ctx.matrix_size*sizeof(cl_float), CL_MEM_READ_ONLY, NULL);
if (ret < 0)
return ret;
ret = av_opencl_buffer_create(&deshake->opencl_ctx.cl_matrix_uv,
deshake->opencl_ctx.matrix_size*sizeof(cl_float), CL_MEM_READ_ONLY, NULL);
if (ret < 0)
return ret;
if (!deshake->opencl_ctx.kernel_env.kernel) {
ret = av_opencl_create_kernel(&deshake->opencl_ctx.kernel_env, "avfilter_transform");
if (ret < 0) {
av_log(ctx, AV_LOG_ERROR, "OpenCL failed to create kernel for name 'avfilter_transform'\n");
return ret;
}
}
return ret;
}
 
void ff_opencl_deshake_uninit(AVFilterContext *ctx)
{
DeshakeContext *deshake = ctx->priv;
av_opencl_buffer_release(&deshake->opencl_ctx.cl_inbuf);
av_opencl_buffer_release(&deshake->opencl_ctx.cl_outbuf);
av_opencl_buffer_release(&deshake->opencl_ctx.cl_matrix_y);
av_opencl_buffer_release(&deshake->opencl_ctx.cl_matrix_uv);
av_opencl_release_kernel(&deshake->opencl_ctx.kernel_env);
av_opencl_uninit();
}
 
 
int ff_opencl_deshake_process_inout_buf(AVFilterContext *ctx, AVFrame *in, AVFrame *out)
{
int ret = 0;
AVFilterLink *link = ctx->inputs[0];
DeshakeContext *deshake = ctx->priv;
const int hshift = av_pix_fmt_desc_get(link->format)->log2_chroma_h;
int chroma_height = FF_CEIL_RSHIFT(link->h, hshift);
 
if ((!deshake->opencl_ctx.cl_inbuf) || (!deshake->opencl_ctx.cl_outbuf)) {
deshake->opencl_ctx.in_plane_size[0] = (in->linesize[0] * in->height);
deshake->opencl_ctx.in_plane_size[1] = (in->linesize[1] * chroma_height);
deshake->opencl_ctx.in_plane_size[2] = (in->linesize[2] * chroma_height);
deshake->opencl_ctx.out_plane_size[0] = (out->linesize[0] * out->height);
deshake->opencl_ctx.out_plane_size[1] = (out->linesize[1] * chroma_height);
deshake->opencl_ctx.out_plane_size[2] = (out->linesize[2] * chroma_height);
deshake->opencl_ctx.cl_inbuf_size = deshake->opencl_ctx.in_plane_size[0] +
deshake->opencl_ctx.in_plane_size[1] +
deshake->opencl_ctx.in_plane_size[2];
deshake->opencl_ctx.cl_outbuf_size = deshake->opencl_ctx.out_plane_size[0] +
deshake->opencl_ctx.out_plane_size[1] +
deshake->opencl_ctx.out_plane_size[2];
if (!deshake->opencl_ctx.cl_inbuf) {
ret = av_opencl_buffer_create(&deshake->opencl_ctx.cl_inbuf,
deshake->opencl_ctx.cl_inbuf_size,
CL_MEM_READ_ONLY, NULL);
if (ret < 0)
return ret;
}
if (!deshake->opencl_ctx.cl_outbuf) {
ret = av_opencl_buffer_create(&deshake->opencl_ctx.cl_outbuf,
deshake->opencl_ctx.cl_outbuf_size,
CL_MEM_READ_WRITE, NULL);
if (ret < 0)
return ret;
}
}
ret = av_opencl_buffer_write_image(deshake->opencl_ctx.cl_inbuf,
deshake->opencl_ctx.cl_inbuf_size,
0, in->data,deshake->opencl_ctx.in_plane_size,
deshake->opencl_ctx.plane_num);
if(ret < 0)
return ret;
return ret;
}
/contrib/sdk/sources/ffmpeg/libavfilter/deshake_opencl.h
0,0 → 1,38
/*
* Copyright (C) 2013 Wei Gao <weigao@multicorewareinc.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFILTER_DESHAKE_OPENCL_H
#define AVFILTER_DESHAKE_OPENCL_H
 
#include "deshake.h"
 
int ff_opencl_deshake_init(AVFilterContext *ctx);
 
void ff_opencl_deshake_uninit(AVFilterContext *ctx);
 
int ff_opencl_deshake_process_inout_buf(AVFilterContext *ctx, AVFrame *in, AVFrame *out);
 
int ff_opencl_transform(AVFilterContext *ctx,
int width, int height, int cw, int ch,
const float *matrix_y, const float *matrix_uv,
enum InterpolateMethod interpolate,
enum FillMethod fill, AVFrame *in, AVFrame *out);
 
#endif /* AVFILTER_DESHAKE_OPENCL_H */
/contrib/sdk/sources/ffmpeg/libavfilter/deshake_opencl_kernel.h
0,0 → 1,217
/*
* Copyright (C) 2013 Wei Gao <weigao@multicorewareinc.com>
*
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFILTER_DESHAKE_OPENCL_KERNEL_H
#define AVFILTER_DESHAKE_OPENCL_KERNEL_H
 
#include "libavutil/opencl.h"
 
const char *ff_kernel_deshake_opencl = AV_OPENCL_KERNEL(
 
inline unsigned char pixel(global const unsigned char *src, float x, float y,
int w, int h,int stride, unsigned char def)
{
return (x < 0 || y < 0 || x >= w || y >= h) ? def : src[(int)x + (int)y * stride];
}
unsigned char interpolate_nearest(float x, float y, global const unsigned char *src,
int width, int height, int stride, unsigned char def)
{
return pixel(src, (int)(x + 0.5), (int)(y + 0.5), width, height, stride, def);
}
 
unsigned char interpolate_bilinear(float x, float y, global const unsigned char *src,
int width, int height, int stride, unsigned char def)
{
int x_c, x_f, y_c, y_f;
int v1, v2, v3, v4;
 
if (x < -1 || x > width || y < -1 || y > height) {
return def;
} else {
x_f = (int)x;
x_c = x_f + 1;
 
y_f = (int)y;
y_c = y_f + 1;
 
v1 = pixel(src, x_c, y_c, width, height, stride, def);
v2 = pixel(src, x_c, y_f, width, height, stride, def);
v3 = pixel(src, x_f, y_c, width, height, stride, def);
v4 = pixel(src, x_f, y_f, width, height, stride, def);
 
return (v1*(x - x_f)*(y - y_f) + v2*((x - x_f)*(y_c - y)) +
v3*(x_c - x)*(y - y_f) + v4*((x_c - x)*(y_c - y)));
}
}
 
unsigned char interpolate_biquadratic(float x, float y, global const unsigned char *src,
int width, int height, int stride, unsigned char def)
{
int x_c, x_f, y_c, y_f;
unsigned char v1, v2, v3, v4;
float f1, f2, f3, f4;
 
if (x < - 1 || x > width || y < -1 || y > height)
return def;
else {
x_f = (int)x;
x_c = x_f + 1;
y_f = (int)y;
y_c = y_f + 1;
 
v1 = pixel(src, x_c, y_c, width, height, stride, def);
v2 = pixel(src, x_c, y_f, width, height, stride, def);
v3 = pixel(src, x_f, y_c, width, height, stride, def);
v4 = pixel(src, x_f, y_f, width, height, stride, def);
 
f1 = 1 - sqrt((x_c - x) * (y_c - y));
f2 = 1 - sqrt((x_c - x) * (y - y_f));
f3 = 1 - sqrt((x - x_f) * (y_c - y));
f4 = 1 - sqrt((x - x_f) * (y - y_f));
return (v1 * f1 + v2 * f2 + v3 * f3 + v4 * f4) / (f1 + f2 + f3 + f4);
}
}
 
inline const float clipf(float a, float amin, float amax)
{
if (a < amin) return amin;
else if (a > amax) return amax;
else return a;
}
 
inline int mirror(int v, int m)
{
while ((unsigned)v > (unsigned)m) {
v = -v;
if (v < 0)
v += 2 * m;
}
return v;
}
 
kernel void avfilter_transform(global unsigned char *src,
global unsigned char *dst,
global float *matrix,
global float *matrix2,
int interpolate,
int fillmethod,
int src_stride_lu,
int dst_stride_lu,
int src_stride_ch,
int dst_stride_ch,
int height,
int width,
int ch,
int cw)
{
int global_id = get_global_id(0);
 
global unsigned char *dst_y = dst;
global unsigned char *dst_u = dst_y + height * dst_stride_lu;
global unsigned char *dst_v = dst_u + ch * dst_stride_ch;
 
global unsigned char *src_y = src;
global unsigned char *src_u = src_y + height * src_stride_lu;
global unsigned char *src_v = src_u + ch * src_stride_ch;
 
global unsigned char *tempdst;
global unsigned char *tempsrc;
 
int x;
int y;
float x_s;
float y_s;
int tempsrc_stride;
int tempdst_stride;
int temp_height;
int temp_width;
int curpos;
unsigned char def = 0;
if (global_id < width*height) {
y = global_id/width;
x = global_id%width;
x_s = x * matrix[0] + y * matrix[1] + matrix[2];
y_s = x * matrix[3] + y * matrix[4] + matrix[5];
tempdst = dst_y;
tempsrc = src_y;
tempsrc_stride = src_stride_lu;
tempdst_stride = dst_stride_lu;
temp_height = height;
temp_width = width;
} else if ((global_id >= width*height)&&(global_id < width*height + ch*cw)) {
y = (global_id - width*height)/cw;
x = (global_id - width*height)%cw;
x_s = x * matrix2[0] + y * matrix2[1] + matrix2[2];
y_s = x * matrix2[3] + y * matrix2[4] + matrix2[5];
tempdst = dst_u;
tempsrc = src_u;
tempsrc_stride = src_stride_ch;
tempdst_stride = dst_stride_ch;
temp_height = ch;
temp_width = cw;
} else {
y = (global_id - width*height - ch*cw)/cw;
x = (global_id - width*height - ch*cw)%cw;
x_s = x * matrix2[0] + y * matrix2[1] + matrix2[2];
y_s = x * matrix2[3] + y * matrix2[4] + matrix2[5];
tempdst = dst_v;
tempsrc = src_v;
tempsrc_stride = src_stride_ch;
tempdst_stride = dst_stride_ch;
temp_height = ch;
temp_width = cw;
}
curpos = y * tempdst_stride + x;
switch (fillmethod) {
case 0: //FILL_BLANK
def = 0;
break;
case 1: //FILL_ORIGINAL
def = tempsrc[y*tempsrc_stride+x];
break;
case 2: //FILL_CLAMP
y_s = clipf(y_s, 0, temp_height - 1);
x_s = clipf(x_s, 0, temp_width - 1);
def = tempsrc[(int)y_s * tempsrc_stride + (int)x_s];
break;
case 3: //FILL_MIRROR
y_s = mirror(y_s,temp_height - 1);
x_s = mirror(x_s,temp_width - 1);
def = tempsrc[(int)y_s * tempsrc_stride + (int)x_s];
break;
}
switch (interpolate) {
case 0: //INTERPOLATE_NEAREST
tempdst[curpos] = interpolate_nearest(x_s, y_s, tempsrc, temp_width, temp_height, tempsrc_stride, def);
break;
case 1: //INTERPOLATE_BILINEAR
tempdst[curpos] = interpolate_bilinear(x_s, y_s, tempsrc, temp_width, temp_height, tempsrc_stride, def);
break;
case 2: //INTERPOLATE_BIQUADRATIC
tempdst[curpos] = interpolate_biquadratic(x_s, y_s, tempsrc, temp_width, temp_height, tempsrc_stride, def);
break;
default:
return;
}
}
);
 
#endif /* AVFILTER_DESHAKE_OPENCL_KERNEL_H */
/contrib/sdk/sources/ffmpeg/libavfilter/drawutils.c
0,0 → 1,569
/*
* Copyright 2011 Stefano Sabatini <stefano.sabatini-lala poste it>
* Copyright 2012 Nicolas George <nicolas.george normalesup org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <string.h>
 
#include "libavutil/avutil.h"
#include "libavutil/colorspace.h"
#include "libavutil/mem.h"
#include "libavutil/pixdesc.h"
#include "drawutils.h"
#include "formats.h"
 
enum { RED = 0, GREEN, BLUE, ALPHA };
 
int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
{
switch (pix_fmt) {
case AV_PIX_FMT_0RGB:
case AV_PIX_FMT_ARGB: rgba_map[ALPHA] = 0; rgba_map[RED ] = 1; rgba_map[GREEN] = 2; rgba_map[BLUE ] = 3; break;
case AV_PIX_FMT_0BGR:
case AV_PIX_FMT_ABGR: rgba_map[ALPHA] = 0; rgba_map[BLUE ] = 1; rgba_map[GREEN] = 2; rgba_map[RED ] = 3; break;
case AV_PIX_FMT_RGB48LE:
case AV_PIX_FMT_RGB48BE:
case AV_PIX_FMT_RGBA64BE:
case AV_PIX_FMT_RGBA64LE:
case AV_PIX_FMT_RGB0:
case AV_PIX_FMT_RGBA:
case AV_PIX_FMT_RGB24: rgba_map[RED ] = 0; rgba_map[GREEN] = 1; rgba_map[BLUE ] = 2; rgba_map[ALPHA] = 3; break;
case AV_PIX_FMT_BGR48LE:
case AV_PIX_FMT_BGR48BE:
case AV_PIX_FMT_BGRA64BE:
case AV_PIX_FMT_BGRA64LE:
case AV_PIX_FMT_BGRA:
case AV_PIX_FMT_BGR0:
case AV_PIX_FMT_BGR24: rgba_map[BLUE ] = 0; rgba_map[GREEN] = 1; rgba_map[RED ] = 2; rgba_map[ALPHA] = 3; break;
case AV_PIX_FMT_GBRAP:
case AV_PIX_FMT_GBRP: rgba_map[GREEN] = 0; rgba_map[BLUE ] = 1; rgba_map[RED ] = 2; rgba_map[ALPHA] = 3; break;
default: /* unsupported */
return AVERROR(EINVAL);
}
return 0;
}
 
int ff_fill_line_with_color(uint8_t *line[4], int pixel_step[4], int w, uint8_t dst_color[4],
enum AVPixelFormat pix_fmt, uint8_t rgba_color[4],
int *is_packed_rgba, uint8_t rgba_map_ptr[4])
{
uint8_t rgba_map[4] = {0};
int i;
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(pix_fmt);
int hsub = pix_desc->log2_chroma_w;
 
*is_packed_rgba = ff_fill_rgba_map(rgba_map, pix_fmt) >= 0;
 
if (*is_packed_rgba) {
pixel_step[0] = (av_get_bits_per_pixel(pix_desc))>>3;
for (i = 0; i < 4; i++)
dst_color[rgba_map[i]] = rgba_color[i];
 
line[0] = av_malloc(w * pixel_step[0]);
for (i = 0; i < w; i++)
memcpy(line[0] + i * pixel_step[0], dst_color, pixel_step[0]);
if (rgba_map_ptr)
memcpy(rgba_map_ptr, rgba_map, sizeof(rgba_map[0]) * 4);
} else {
int plane;
 
dst_color[0] = RGB_TO_Y_CCIR(rgba_color[0], rgba_color[1], rgba_color[2]);
dst_color[1] = RGB_TO_U_CCIR(rgba_color[0], rgba_color[1], rgba_color[2], 0);
dst_color[2] = RGB_TO_V_CCIR(rgba_color[0], rgba_color[1], rgba_color[2], 0);
dst_color[3] = rgba_color[3];
 
for (plane = 0; plane < 4; plane++) {
int line_size;
int hsub1 = (plane == 1 || plane == 2) ? hsub : 0;
 
pixel_step[plane] = 1;
line_size = FF_CEIL_RSHIFT(w, hsub1) * pixel_step[plane];
line[plane] = av_malloc(line_size);
memset(line[plane], dst_color[plane], line_size);
}
}
 
return 0;
}
 
void ff_draw_rectangle(uint8_t *dst[4], int dst_linesize[4],
uint8_t *src[4], int pixelstep[4],
int hsub, int vsub, int x, int y, int w, int h)
{
int i, plane;
uint8_t *p;
 
for (plane = 0; plane < 4 && dst[plane]; plane++) {
int hsub1 = plane == 1 || plane == 2 ? hsub : 0;
int vsub1 = plane == 1 || plane == 2 ? vsub : 0;
int width = FF_CEIL_RSHIFT(w, hsub1);
int height = FF_CEIL_RSHIFT(h, vsub1);
 
p = dst[plane] + (y >> vsub1) * dst_linesize[plane];
for (i = 0; i < height; i++) {
memcpy(p + (x >> hsub1) * pixelstep[plane],
src[plane], width * pixelstep[plane]);
p += dst_linesize[plane];
}
}
}
 
void ff_copy_rectangle(uint8_t *dst[4], int dst_linesize[4],
uint8_t *src[4], int src_linesize[4], int pixelstep[4],
int hsub, int vsub, int x, int y, int y2, int w, int h)
{
int i, plane;
uint8_t *p;
 
for (plane = 0; plane < 4 && dst[plane]; plane++) {
int hsub1 = plane == 1 || plane == 2 ? hsub : 0;
int vsub1 = plane == 1 || plane == 2 ? vsub : 0;
int width = FF_CEIL_RSHIFT(w, hsub1);
int height = FF_CEIL_RSHIFT(h, vsub1);
 
p = dst[plane] + (y >> vsub1) * dst_linesize[plane];
for (i = 0; i < height; i++) {
memcpy(p + (x >> hsub1) * pixelstep[plane],
src[plane] + src_linesize[plane]*(i+(y2>>vsub1)), width * pixelstep[plane]);
p += dst_linesize[plane];
}
}
}
 
int ff_draw_init(FFDrawContext *draw, enum AVPixelFormat format, unsigned flags)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(format);
const AVComponentDescriptor *c;
unsigned i, nb_planes = 0;
int pixelstep[MAX_PLANES] = { 0 };
 
if (!desc->name)
return AVERROR(EINVAL);
if (desc->flags & ~(AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_PSEUDOPAL | AV_PIX_FMT_FLAG_ALPHA))
return AVERROR(ENOSYS);
for (i = 0; i < desc->nb_components; i++) {
c = &desc->comp[i];
/* for now, only 8-bits formats */
if (c->depth_minus1 != 8 - 1)
return AVERROR(ENOSYS);
if (c->plane >= MAX_PLANES)
return AVERROR(ENOSYS);
/* strange interleaving */
if (pixelstep[c->plane] != 0 &&
pixelstep[c->plane] != c->step_minus1 + 1)
return AVERROR(ENOSYS);
pixelstep[c->plane] = c->step_minus1 + 1;
if (pixelstep[c->plane] >= 8)
return AVERROR(ENOSYS);
nb_planes = FFMAX(nb_planes, c->plane + 1);
}
if ((desc->log2_chroma_w || desc->log2_chroma_h) && nb_planes < 3)
return AVERROR(ENOSYS); /* exclude NV12 and NV21 */
memset(draw, 0, sizeof(*draw));
draw->desc = desc;
draw->format = format;
draw->nb_planes = nb_planes;
memcpy(draw->pixelstep, pixelstep, sizeof(draw->pixelstep));
draw->hsub[1] = draw->hsub[2] = draw->hsub_max = desc->log2_chroma_w;
draw->vsub[1] = draw->vsub[2] = draw->vsub_max = desc->log2_chroma_h;
for (i = 0; i < ((desc->nb_components - 1) | 1); i++)
draw->comp_mask[desc->comp[i].plane] |=
1 << (desc->comp[i].offset_plus1 - 1);
return 0;
}
 
void ff_draw_color(FFDrawContext *draw, FFDrawColor *color, const uint8_t rgba[4])
{
unsigned i;
uint8_t rgba_map[4];
 
if (rgba != color->rgba)
memcpy(color->rgba, rgba, sizeof(color->rgba));
if ((draw->desc->flags & AV_PIX_FMT_FLAG_RGB) &&
ff_fill_rgba_map(rgba_map, draw->format) >= 0) {
if (draw->nb_planes == 1) {
for (i = 0; i < 4; i++)
color->comp[0].u8[rgba_map[i]] = rgba[i];
} else {
for (i = 0; i < 4; i++)
color->comp[rgba_map[i]].u8[0] = rgba[i];
}
} else if (draw->nb_planes == 3 || draw->nb_planes == 4) {
/* assume YUV */
color->comp[0].u8[0] = RGB_TO_Y_CCIR(rgba[0], rgba[1], rgba[2]);
color->comp[1].u8[0] = RGB_TO_U_CCIR(rgba[0], rgba[1], rgba[2], 0);
color->comp[2].u8[0] = RGB_TO_V_CCIR(rgba[0], rgba[1], rgba[2], 0);
color->comp[3].u8[0] = rgba[3];
} else if (draw->format == AV_PIX_FMT_GRAY8 || draw->format == AV_PIX_FMT_GRAY8A) {
color->comp[0].u8[0] = RGB_TO_Y_CCIR(rgba[0], rgba[1], rgba[2]);
color->comp[1].u8[0] = rgba[3];
} else {
av_log(NULL, AV_LOG_WARNING,
"Color conversion not implemented for %s\n", draw->desc->name);
memset(color, 128, sizeof(*color));
}
}
 
static uint8_t *pointer_at(FFDrawContext *draw, uint8_t *data[], int linesize[],
int plane, int x, int y)
{
return data[plane] +
(y >> draw->vsub[plane]) * linesize[plane] +
(x >> draw->hsub[plane]) * draw->pixelstep[plane];
}
 
void ff_copy_rectangle2(FFDrawContext *draw,
uint8_t *dst[], int dst_linesize[],
uint8_t *src[], int src_linesize[],
int dst_x, int dst_y, int src_x, int src_y,
int w, int h)
{
int plane, y, wp, hp;
uint8_t *p, *q;
 
for (plane = 0; plane < draw->nb_planes; plane++) {
p = pointer_at(draw, src, src_linesize, plane, src_x, src_y);
q = pointer_at(draw, dst, dst_linesize, plane, dst_x, dst_y);
wp = FF_CEIL_RSHIFT(w, draw->hsub[plane]) * draw->pixelstep[plane];
hp = FF_CEIL_RSHIFT(h, draw->vsub[plane]);
for (y = 0; y < hp; y++) {
memcpy(q, p, wp);
p += src_linesize[plane];
q += dst_linesize[plane];
}
}
}
 
void ff_fill_rectangle(FFDrawContext *draw, FFDrawColor *color,
uint8_t *dst[], int dst_linesize[],
int dst_x, int dst_y, int w, int h)
{
int plane, x, y, wp, hp;
uint8_t *p0, *p;
 
for (plane = 0; plane < draw->nb_planes; plane++) {
p0 = pointer_at(draw, dst, dst_linesize, plane, dst_x, dst_y);
wp = FF_CEIL_RSHIFT(w, draw->hsub[plane]);
hp = FF_CEIL_RSHIFT(h, draw->vsub[plane]);
if (!hp)
return;
p = p0;
/* copy first line from color */
for (x = 0; x < wp; x++) {
memcpy(p, color->comp[plane].u8, draw->pixelstep[plane]);
p += draw->pixelstep[plane];
}
wp *= draw->pixelstep[plane];
/* copy next lines from first line */
p = p0 + dst_linesize[plane];
for (y = 1; y < hp; y++) {
memcpy(p, p0, wp);
p += dst_linesize[plane];
}
}
}
 
/**
* Clip interval [x; x+w[ within [0; wmax[.
* The resulting w may be negative if the final interval is empty.
* dx, if not null, return the difference between in and out value of x.
*/
static void clip_interval(int wmax, int *x, int *w, int *dx)
{
if (dx)
*dx = 0;
if (*x < 0) {
if (dx)
*dx = -*x;
*w += *x;
*x = 0;
}
if (*x + *w > wmax)
*w = wmax - *x;
}
 
/**
* Decompose w pixels starting at x
* into start + (w starting at x) + end
* with x and w aligned on multiples of 1<<sub.
*/
static void subsampling_bounds(int sub, int *x, int *w, int *start, int *end)
{
int mask = (1 << sub) - 1;
 
*start = (-*x) & mask;
*x += *start;
*start = FFMIN(*start, *w);
*w -= *start;
*end = *w & mask;
*w >>= sub;
}
 
static int component_used(FFDrawContext *draw, int plane, int comp)
{
return (draw->comp_mask[plane] >> comp) & 1;
}
 
/* If alpha is in the [ 0 ; 0x1010101 ] range,
then alpha * value is in the [ 0 ; 0xFFFFFFFF ] range,
and >> 24 gives a correct rounding. */
static void blend_line(uint8_t *dst, unsigned src, unsigned alpha,
int dx, int w, unsigned hsub, int left, int right)
{
unsigned asrc = alpha * src;
unsigned tau = 0x1010101 - alpha;
int x;
 
if (left) {
unsigned suba = (left * alpha) >> hsub;
*dst = (*dst * (0x1010101 - suba) + src * suba) >> 24;
dst += dx;
}
for (x = 0; x < w; x++) {
*dst = (*dst * tau + asrc) >> 24;
dst += dx;
}
if (right) {
unsigned suba = (right * alpha) >> hsub;
*dst = (*dst * (0x1010101 - suba) + src * suba) >> 24;
}
}
 
void ff_blend_rectangle(FFDrawContext *draw, FFDrawColor *color,
uint8_t *dst[], int dst_linesize[],
int dst_w, int dst_h,
int x0, int y0, int w, int h)
{
unsigned alpha, nb_planes, nb_comp, plane, comp;
int w_sub, h_sub, x_sub, y_sub, left, right, top, bottom, y;
uint8_t *p0, *p;
 
/* TODO optimize if alpha = 0xFF */
clip_interval(dst_w, &x0, &w, NULL);
clip_interval(dst_h, &y0, &h, NULL);
if (w <= 0 || h <= 0 || !color->rgba[3])
return;
/* 0x10203 * alpha + 2 is in the [ 2 ; 0x1010101 - 2 ] range */
alpha = 0x10203 * color->rgba[3] + 0x2;
nb_planes = (draw->nb_planes - 1) | 1; /* eliminate alpha */
for (plane = 0; plane < nb_planes; plane++) {
nb_comp = draw->pixelstep[plane];
p0 = pointer_at(draw, dst, dst_linesize, plane, x0, y0);
w_sub = w;
h_sub = h;
x_sub = x0;
y_sub = y0;
subsampling_bounds(draw->hsub[plane], &x_sub, &w_sub, &left, &right);
subsampling_bounds(draw->vsub[plane], &y_sub, &h_sub, &top, &bottom);
for (comp = 0; comp < nb_comp; comp++) {
if (!component_used(draw, plane, comp))
continue;
p = p0 + comp;
if (top) {
blend_line(p, color->comp[plane].u8[comp], alpha >> 1,
draw->pixelstep[plane], w_sub,
draw->hsub[plane], left, right);
p += dst_linesize[plane];
}
for (y = 0; y < h_sub; y++) {
blend_line(p, color->comp[plane].u8[comp], alpha,
draw->pixelstep[plane], w_sub,
draw->hsub[plane], left, right);
p += dst_linesize[plane];
}
if (bottom)
blend_line(p, color->comp[plane].u8[comp], alpha >> 1,
draw->pixelstep[plane], w_sub,
draw->hsub[plane], left, right);
}
}
}
 
static void blend_pixel(uint8_t *dst, unsigned src, unsigned alpha,
uint8_t *mask, int mask_linesize, int l2depth,
unsigned w, unsigned h, unsigned shift, unsigned xm0)
{
unsigned xm, x, y, t = 0;
unsigned xmshf = 3 - l2depth;
unsigned xmmod = 7 >> l2depth;
unsigned mbits = (1 << (1 << l2depth)) - 1;
unsigned mmult = 255 / mbits;
 
for (y = 0; y < h; y++) {
xm = xm0;
for (x = 0; x < w; x++) {
t += ((mask[xm >> xmshf] >> ((~xm & xmmod) << l2depth)) & mbits)
* mmult;
xm++;
}
mask += mask_linesize;
}
alpha = (t >> shift) * alpha;
*dst = ((0x1010101 - alpha) * *dst + alpha * src) >> 24;
}
 
static void blend_line_hv(uint8_t *dst, int dst_delta,
unsigned src, unsigned alpha,
uint8_t *mask, int mask_linesize, int l2depth, int w,
unsigned hsub, unsigned vsub,
int xm, int left, int right, int hband)
{
int x;
 
if (left) {
blend_pixel(dst, src, alpha, mask, mask_linesize, l2depth,
left, hband, hsub + vsub, xm);
dst += dst_delta;
xm += left;
}
for (x = 0; x < w; x++) {
blend_pixel(dst, src, alpha, mask, mask_linesize, l2depth,
1 << hsub, hband, hsub + vsub, xm);
dst += dst_delta;
xm += 1 << hsub;
}
if (right)
blend_pixel(dst, src, alpha, mask, mask_linesize, l2depth,
right, hband, hsub + vsub, xm);
}
 
void ff_blend_mask(FFDrawContext *draw, FFDrawColor *color,
uint8_t *dst[], int dst_linesize[], int dst_w, int dst_h,
uint8_t *mask, int mask_linesize, int mask_w, int mask_h,
int l2depth, unsigned endianness, int x0, int y0)
{
unsigned alpha, nb_planes, nb_comp, plane, comp;
int xm0, ym0, w_sub, h_sub, x_sub, y_sub, left, right, top, bottom, y;
uint8_t *p0, *p, *m;
 
clip_interval(dst_w, &x0, &mask_w, &xm0);
clip_interval(dst_h, &y0, &mask_h, &ym0);
mask += ym0 * mask_linesize;
if (mask_w <= 0 || mask_h <= 0 || !color->rgba[3])
return;
/* alpha is in the [ 0 ; 0x10203 ] range,
alpha * mask is in the [ 0 ; 0x1010101 - 4 ] range */
alpha = (0x10307 * color->rgba[3] + 0x3) >> 8;
nb_planes = (draw->nb_planes - 1) | 1; /* eliminate alpha */
for (plane = 0; plane < nb_planes; plane++) {
nb_comp = draw->pixelstep[plane];
p0 = pointer_at(draw, dst, dst_linesize, plane, x0, y0);
w_sub = mask_w;
h_sub = mask_h;
x_sub = x0;
y_sub = y0;
subsampling_bounds(draw->hsub[plane], &x_sub, &w_sub, &left, &right);
subsampling_bounds(draw->vsub[plane], &y_sub, &h_sub, &top, &bottom);
for (comp = 0; comp < nb_comp; comp++) {
if (!component_used(draw, plane, comp))
continue;
p = p0 + comp;
m = mask;
if (top) {
blend_line_hv(p, draw->pixelstep[plane],
color->comp[plane].u8[comp], alpha,
m, mask_linesize, l2depth, w_sub,
draw->hsub[plane], draw->vsub[plane],
xm0, left, right, top);
p += dst_linesize[plane];
m += top * mask_linesize;
}
for (y = 0; y < h_sub; y++) {
blend_line_hv(p, draw->pixelstep[plane],
color->comp[plane].u8[comp], alpha,
m, mask_linesize, l2depth, w_sub,
draw->hsub[plane], draw->vsub[plane],
xm0, left, right, 1 << draw->vsub[plane]);
p += dst_linesize[plane];
m += mask_linesize << draw->vsub[plane];
}
if (bottom)
blend_line_hv(p, draw->pixelstep[plane],
color->comp[plane].u8[comp], alpha,
m, mask_linesize, l2depth, w_sub,
draw->hsub[plane], draw->vsub[plane],
xm0, left, right, bottom);
}
}
}
 
int ff_draw_round_to_sub(FFDrawContext *draw, int sub_dir, int round_dir,
int value)
{
unsigned shift = sub_dir ? draw->vsub_max : draw->hsub_max;
 
if (!shift)
return value;
if (round_dir >= 0)
value += round_dir ? (1 << shift) - 1 : 1 << (shift - 1);
return (value >> shift) << shift;
}
 
AVFilterFormats *ff_draw_supported_pixel_formats(unsigned flags)
{
enum AVPixelFormat i, pix_fmts[AV_PIX_FMT_NB + 1];
unsigned n = 0;
FFDrawContext draw;
 
for (i = 0; i < AV_PIX_FMT_NB; i++)
if (ff_draw_init(&draw, i, flags) >= 0)
pix_fmts[n++] = i;
pix_fmts[n++] = AV_PIX_FMT_NONE;
return ff_make_format_list(pix_fmts);
}
 
#ifdef TEST
 
#undef printf
 
int main(void)
{
enum AVPixelFormat f;
const AVPixFmtDescriptor *desc;
FFDrawContext draw;
FFDrawColor color;
int r, i;
 
for (f = 0; f < AV_PIX_FMT_NB; f++) {
desc = av_pix_fmt_desc_get(f);
if (!desc->name)
continue;
printf("Testing %s...%*s", desc->name,
(int)(16 - strlen(desc->name)), "");
r = ff_draw_init(&draw, f, 0);
if (r < 0) {
char buf[128];
av_strerror(r, buf, sizeof(buf));
printf("no: %s\n", buf);
continue;
}
ff_draw_color(&draw, &color, (uint8_t[]) { 1, 0, 0, 1 });
for (i = 0; i < sizeof(color); i++)
if (((uint8_t *)&color)[i] != 128)
break;
if (i == sizeof(color)) {
printf("fallback color\n");
continue;
}
printf("ok\n");
}
return 0;
}
 
#endif
/contrib/sdk/sources/ffmpeg/libavfilter/drawutils.h
0,0 → 1,155
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFILTER_DRAWUTILS_H
#define AVFILTER_DRAWUTILS_H
 
/**
* @file
* misc drawing utilities
*/
 
#include <stdint.h>
#include "avfilter.h"
#include "libavutil/pixfmt.h"
 
int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt);
 
int ff_fill_line_with_color(uint8_t *line[4], int pixel_step[4], int w,
uint8_t dst_color[4],
enum AVPixelFormat pix_fmt, uint8_t rgba_color[4],
int *is_packed_rgba, uint8_t rgba_map[4]);
 
void ff_draw_rectangle(uint8_t *dst[4], int dst_linesize[4],
uint8_t *src[4], int pixelstep[4],
int hsub, int vsub, int x, int y, int w, int h);
 
void ff_copy_rectangle(uint8_t *dst[4], int dst_linesize[4],
uint8_t *src[4], int src_linesize[4], int pixelstep[4],
int hsub, int vsub, int x, int y, int y2, int w, int h);
 
#define MAX_PLANES 4
 
typedef struct FFDrawContext {
const struct AVPixFmtDescriptor *desc;
enum AVPixelFormat format;
unsigned nb_planes;
int pixelstep[MAX_PLANES]; /*< offset between pixels */
uint8_t comp_mask[MAX_PLANES]; /*< bitmask of used non-alpha components */
uint8_t hsub[MAX_PLANES]; /*< horizontal subsampling */
uint8_t vsub[MAX_PLANES]; /*< vertical subsampling */
uint8_t hsub_max;
uint8_t vsub_max;
} FFDrawContext;
 
typedef struct FFDrawColor {
uint8_t rgba[4];
union {
uint32_t u32;
uint16_t u16;
uint8_t u8[4];
} comp[MAX_PLANES];
} FFDrawColor;
 
/**
* Init a draw context.
*
* Only a limited number of pixel formats are supported, if format is not
* supported the function will return an error.
* No flags currently defined.
* @return 0 for success, < 0 for error
*/
int ff_draw_init(FFDrawContext *draw, enum AVPixelFormat format, unsigned flags);
 
/**
* Prepare a color.
*/
void ff_draw_color(FFDrawContext *draw, FFDrawColor *color, const uint8_t rgba[4]);
 
/**
* Copy a rectangle from an image to another.
*
* The coordinates must be as even as the subsampling requires.
*/
void ff_copy_rectangle2(FFDrawContext *draw,
uint8_t *dst[], int dst_linesize[],
uint8_t *src[], int src_linesize[],
int dst_x, int dst_y, int src_x, int src_y,
int w, int h);
 
/**
* Fill a rectangle with an uniform color.
*
* The coordinates must be as even as the subsampling requires.
* The color needs to be inited with ff_draw_color.
*/
void ff_fill_rectangle(FFDrawContext *draw, FFDrawColor *color,
uint8_t *dst[], int dst_linesize[],
int dst_x, int dst_y, int w, int h);
 
/**
* Blend a rectangle with an uniform color.
*/
void ff_blend_rectangle(FFDrawContext *draw, FFDrawColor *color,
uint8_t *dst[], int dst_linesize[],
int dst_w, int dst_h,
int x0, int y0, int w, int h);
 
/**
* Blend an alpha mask with an uniform color.
*
* @param draw draw context
* @param color color for the overlay;
* @param dst destination image
* @param dst_linesize line stride of the destination
* @param dst_w width of the destination image
* @param dst_h height of the destination image
* @param mask mask
* @param mask_linesize line stride of the mask
* @param mask_w width of the mask
* @param mask_h height of the mask
* @param l2depth log2 of depth of the mask (0 for 1bpp, 3 for 8bpp)
* @param endianness bit order of the mask (0: MSB to the left)
* @param x0 horizontal position of the overlay
* @param y0 vertical position of the overlay
*/
void ff_blend_mask(FFDrawContext *draw, FFDrawColor *color,
uint8_t *dst[], int dst_linesize[], int dst_w, int dst_h,
uint8_t *mask, int mask_linesize, int mask_w, int mask_h,
int l2depth, unsigned endianness, int x0, int y0);
 
/**
* Round a dimension according to subsampling.
*
* @param draw draw context
* @param sub_dir 0 for horizontal, 1 for vertical
* @param round_dir 0 nearest, -1 round down, +1 round up
* @param value value to round
* @return the rounded value
*/
int ff_draw_round_to_sub(FFDrawContext *draw, int sub_dir, int round_dir,
int value);
 
/**
* Return the list of pixel formats supported by the draw functions.
*
* The flags are the same as ff_draw_init, i.e., none currently.
*/
AVFilterFormats *ff_draw_supported_pixel_formats(unsigned flags);
 
#endif /* AVFILTER_DRAWUTILS_H */
/contrib/sdk/sources/ffmpeg/libavfilter/dualinput.c
0,0 → 1,83
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "dualinput.h"
#include "libavutil/timestamp.h"
 
static int process_frame(FFFrameSync *fs)
{
AVFilterContext *ctx = fs->parent;
FFDualInputContext *s = fs->opaque;
AVFrame *mainpic = NULL, *secondpic = NULL;
int ret = 0;
 
if ((ret = ff_framesync_get_frame(&s->fs, 0, &mainpic, 1)) < 0 ||
(ret = ff_framesync_get_frame(&s->fs, 1, &secondpic, 0)) < 0) {
av_frame_free(&mainpic);
return ret;
}
av_assert0(mainpic);
mainpic->pts = av_rescale_q(mainpic->pts, s->fs.time_base, ctx->outputs[0]->time_base);
if (secondpic && !ctx->is_disabled)
mainpic = s->process(ctx, mainpic, secondpic);
ret = ff_filter_frame(ctx->outputs[0], mainpic);
av_assert1(ret != AVERROR(EAGAIN));
return ret;
}
 
int ff_dualinput_init(AVFilterContext *ctx, FFDualInputContext *s)
{
FFFrameSyncIn *in = s->fs.in;
 
ff_framesync_init(&s->fs, ctx, 2);
s->fs.opaque = s;
s->fs.on_event = process_frame;
in[0].time_base = ctx->inputs[0]->time_base;
in[1].time_base = ctx->inputs[1]->time_base;
in[0].sync = 2;
in[0].before = EXT_STOP;
in[0].after = EXT_INFINITY;
in[1].sync = 1;
in[1].before = EXT_NULL;
in[1].after = EXT_INFINITY;
 
if (s->shortest)
in[1].after = EXT_STOP;
if (!s->repeatlast) {
in[0].after = EXT_STOP;
in[1].sync = 0;
}
 
return ff_framesync_configure(&s->fs);
}
 
int ff_dualinput_filter_frame(FFDualInputContext *s,
AVFilterLink *inlink, AVFrame *in)
{
return ff_framesync_filter_frame(&s->fs, inlink, in);
}
 
int ff_dualinput_request_frame(FFDualInputContext *s, AVFilterLink *outlink)
{
return ff_framesync_request_frame(&s->fs, outlink);
}
 
void ff_dualinput_uninit(FFDualInputContext *s)
{
ff_framesync_uninit(&s->fs);
}
/contrib/sdk/sources/ffmpeg/libavfilter/dualinput.h
0,0 → 1,46
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Double input streams helper for filters
*/
 
#ifndef AVFILTER_DUALINPUT_H
#define AVFILTER_DUALINPUT_H
 
#include <stdint.h>
#include "bufferqueue.h"
#include "framesync.h"
#include "internal.h"
 
typedef struct {
FFFrameSync fs;
FFFrameSyncIn second_input; /* must be immediately after fs */
 
AVFrame *(*process)(AVFilterContext *ctx, AVFrame *main, const AVFrame *second);
int shortest; ///< terminate stream when the second input terminates
int repeatlast; ///< repeat last second frame
} FFDualInputContext;
 
int ff_dualinput_init(AVFilterContext *ctx, FFDualInputContext *s);
int ff_dualinput_filter_frame(FFDualInputContext *s, AVFilterLink *inlink, AVFrame *in);
int ff_dualinput_request_frame(FFDualInputContext *s, AVFilterLink *outlink);
void ff_dualinput_uninit(FFDualInputContext *s);
 
#endif /* AVFILTER_DUALINPUT_H */
/contrib/sdk/sources/ffmpeg/libavfilter/f_ebur128.c
0,0 → 1,799
/*
* Copyright (c) 2012 Clément Bœsch
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
/**
* @file
* EBU R.128 implementation
* @see http://tech.ebu.ch/loudness
* @see https://www.youtube.com/watch?v=iuEtQqC-Sqo "EBU R128 Introduction - Florian Camerer"
* @todo True Peak
* @todo implement start/stop/reset through filter command injection
* @todo support other frequencies to avoid resampling
*/
 
#include <math.h>
 
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/dict.h"
#include "libavutil/xga_font_data.h"
#include "libavutil/opt.h"
#include "libavutil/timestamp.h"
#include "audio.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
 
#define MAX_CHANNELS 63
 
/* pre-filter coefficients */
#define PRE_B0 1.53512485958697
#define PRE_B1 -2.69169618940638
#define PRE_B2 1.19839281085285
#define PRE_A1 -1.69065929318241
#define PRE_A2 0.73248077421585
 
/* RLB-filter coefficients */
#define RLB_B0 1.0
#define RLB_B1 -2.0
#define RLB_B2 1.0
#define RLB_A1 -1.99004745483398
#define RLB_A2 0.99007225036621
 
#define ABS_THRES -70 ///< silence gate: we discard anything below this absolute (LUFS) threshold
#define ABS_UP_THRES 10 ///< upper loud limit to consider (ABS_THRES being the minimum)
#define HIST_GRAIN 100 ///< defines histogram precision
#define HIST_SIZE ((ABS_UP_THRES - ABS_THRES) * HIST_GRAIN + 1)
 
/**
* A histogram is an array of HIST_SIZE hist_entry storing all the energies
* recorded (with an accuracy of 1/HIST_GRAIN) of the loudnesses from ABS_THRES
* (at 0) to ABS_UP_THRES (at HIST_SIZE-1).
* This fixed-size system avoids the need of a list of energies growing
* infinitely over the time and is thus more scalable.
*/
struct hist_entry {
int count; ///< how many times the corresponding value occurred
double energy; ///< E = 10^((L + 0.691) / 10)
double loudness; ///< L = -0.691 + 10 * log10(E)
};
 
struct integrator {
double *cache[MAX_CHANNELS]; ///< window of filtered samples (N ms)
int cache_pos; ///< focus on the last added bin in the cache array
double sum[MAX_CHANNELS]; ///< sum of the last N ms filtered samples (cache content)
int filled; ///< 1 if the cache is completely filled, 0 otherwise
double rel_threshold; ///< relative threshold
double sum_kept_powers; ///< sum of the powers (weighted sums) above absolute threshold
int nb_kept_powers; ///< number of sum above absolute threshold
struct hist_entry *histogram; ///< histogram of the powers, used to compute LRA and I
};
 
struct rect { int x, y, w, h; };
 
typedef struct {
const AVClass *class; ///< AVClass context for log and options purpose
 
/* video */
int do_video; ///< 1 if video output enabled, 0 otherwise
int w, h; ///< size of the video output
struct rect text; ///< rectangle for the LU legend on the left
struct rect graph; ///< rectangle for the main graph in the center
struct rect gauge; ///< rectangle for the gauge on the right
AVFrame *outpicref; ///< output picture reference, updated regularly
int meter; ///< select a EBU mode between +9 and +18
int scale_range; ///< the range of LU values according to the meter
int y_zero_lu; ///< the y value (pixel position) for 0 LU
int *y_line_ref; ///< y reference values for drawing the LU lines in the graph and the gauge
 
/* audio */
int nb_channels; ///< number of channels in the input
double *ch_weighting; ///< channel weighting mapping
int sample_count; ///< sample count used for refresh frequency, reset at refresh
 
/* Filter caches.
* The mult by 3 in the following is for X[i], X[i-1] and X[i-2] */
double x[MAX_CHANNELS * 3]; ///< 3 input samples cache for each channel
double y[MAX_CHANNELS * 3]; ///< 3 pre-filter samples cache for each channel
double z[MAX_CHANNELS * 3]; ///< 3 RLB-filter samples cache for each channel
 
#define I400_BINS (48000 * 4 / 10)
#define I3000_BINS (48000 * 3)
struct integrator i400; ///< 400ms integrator, used for Momentary loudness (M), and Integrated loudness (I)
struct integrator i3000; ///< 3s integrator, used for Short term loudness (S), and Loudness Range (LRA)
 
/* I and LRA specific */
double integrated_loudness; ///< integrated loudness in LUFS (I)
double loudness_range; ///< loudness range in LU (LRA)
double lra_low, lra_high; ///< low and high LRA values
 
/* misc */
int loglevel; ///< log level for frame logging
int metadata; ///< whether or not to inject loudness results in frames
} EBUR128Context;
 
#define OFFSET(x) offsetof(EBUR128Context, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
#define V AV_OPT_FLAG_VIDEO_PARAM
#define F AV_OPT_FLAG_FILTERING_PARAM
static const AVOption ebur128_options[] = {
{ "video", "set video output", OFFSET(do_video), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, V|F },
{ "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "640x480"}, 0, 0, V|F },
{ "meter", "set scale meter (+9 to +18)", OFFSET(meter), AV_OPT_TYPE_INT, {.i64 = 9}, 9, 18, V|F },
{ "framelog", "force frame logging level", OFFSET(loglevel), AV_OPT_TYPE_INT, {.i64 = -1}, INT_MIN, INT_MAX, A|V|F, "level" },
{ "info", "information logging level", 0, AV_OPT_TYPE_CONST, {.i64 = AV_LOG_INFO}, INT_MIN, INT_MAX, A|V|F, "level" },
{ "verbose", "verbose logging level", 0, AV_OPT_TYPE_CONST, {.i64 = AV_LOG_VERBOSE}, INT_MIN, INT_MAX, A|V|F, "level" },
{ "metadata", "inject metadata in the filtergraph", OFFSET(metadata), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, A|V|F },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(ebur128);
 
static const uint8_t graph_colors[] = {
0xdd, 0x66, 0x66, // value above 0LU non reached
0x66, 0x66, 0xdd, // value below 0LU non reached
0x96, 0x33, 0x33, // value above 0LU reached
0x33, 0x33, 0x96, // value below 0LU reached
0xdd, 0x96, 0x96, // value above 0LU line non reached
0x96, 0x96, 0xdd, // value below 0LU line non reached
0xdd, 0x33, 0x33, // value above 0LU line reached
0x33, 0x33, 0xdd, // value below 0LU line reached
};
 
static const uint8_t *get_graph_color(const EBUR128Context *ebur128, int v, int y)
{
const int below0 = y > ebur128->y_zero_lu;
const int reached = y >= v;
const int line = ebur128->y_line_ref[y] || y == ebur128->y_zero_lu;
const int colorid = 4*line + 2*reached + below0;
return graph_colors + 3*colorid;
}
 
static inline int lu_to_y(const EBUR128Context *ebur128, double v)
{
v += 2 * ebur128->meter; // make it in range [0;...]
v = av_clipf(v, 0, ebur128->scale_range); // make sure it's in the graph scale
v = ebur128->scale_range - v; // invert value (y=0 is on top)
return v * ebur128->graph.h / ebur128->scale_range; // rescale from scale range to px height
}
 
#define FONT8 0
#define FONT16 1
 
static const uint8_t font_colors[] = {
0xdd, 0xdd, 0x00,
0x00, 0x96, 0x96,
};
 
static void drawtext(AVFrame *pic, int x, int y, int ftid, const uint8_t *color, const char *fmt, ...)
{
int i;
char buf[128] = {0};
const uint8_t *font;
int font_height;
va_list vl;
 
if (ftid == FONT16) font = avpriv_vga16_font, font_height = 16;
else if (ftid == FONT8) font = avpriv_cga_font, font_height = 8;
else return;
 
va_start(vl, fmt);
vsnprintf(buf, sizeof(buf), fmt, vl);
va_end(vl);
 
for (i = 0; buf[i]; i++) {
int char_y, mask;
uint8_t *p = pic->data[0] + y*pic->linesize[0] + (x + i*8)*3;
 
for (char_y = 0; char_y < font_height; char_y++) {
for (mask = 0x80; mask; mask >>= 1) {
if (font[buf[i] * font_height + char_y] & mask)
memcpy(p, color, 3);
else
memcpy(p, "\x00\x00\x00", 3);
p += 3;
}
p += pic->linesize[0] - 8*3;
}
}
}
 
static void drawline(AVFrame *pic, int x, int y, int len, int step)
{
int i;
uint8_t *p = pic->data[0] + y*pic->linesize[0] + x*3;
 
for (i = 0; i < len; i++) {
memcpy(p, "\x00\xff\x00", 3);
p += step;
}
}
 
static int config_video_output(AVFilterLink *outlink)
{
int i, x, y;
uint8_t *p;
AVFilterContext *ctx = outlink->src;
EBUR128Context *ebur128 = ctx->priv;
AVFrame *outpicref;
 
/* check if there is enough space to represent everything decently */
if (ebur128->w < 640 || ebur128->h < 480) {
av_log(ctx, AV_LOG_ERROR, "Video size %dx%d is too small, "
"minimum size is 640x480\n", ebur128->w, ebur128->h);
return AVERROR(EINVAL);
}
outlink->w = ebur128->w;
outlink->h = ebur128->h;
 
#define PAD 8
 
/* configure text area position and size */
ebur128->text.x = PAD;
ebur128->text.y = 40;
ebur128->text.w = 3 * 8; // 3 characters
ebur128->text.h = ebur128->h - PAD - ebur128->text.y;
 
/* configure gauge position and size */
ebur128->gauge.w = 20;
ebur128->gauge.h = ebur128->text.h;
ebur128->gauge.x = ebur128->w - PAD - ebur128->gauge.w;
ebur128->gauge.y = ebur128->text.y;
 
/* configure graph position and size */
ebur128->graph.x = ebur128->text.x + ebur128->text.w + PAD;
ebur128->graph.y = ebur128->gauge.y;
ebur128->graph.w = ebur128->gauge.x - ebur128->graph.x - PAD;
ebur128->graph.h = ebur128->gauge.h;
 
/* graph and gauge share the LU-to-pixel code */
av_assert0(ebur128->graph.h == ebur128->gauge.h);
 
/* prepare the initial picref buffer */
av_frame_free(&ebur128->outpicref);
ebur128->outpicref = outpicref =
ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!outpicref)
return AVERROR(ENOMEM);
outlink->sample_aspect_ratio = (AVRational){1,1};
 
/* init y references values (to draw LU lines) */
ebur128->y_line_ref = av_calloc(ebur128->graph.h + 1, sizeof(*ebur128->y_line_ref));
if (!ebur128->y_line_ref)
return AVERROR(ENOMEM);
 
/* black background */
memset(outpicref->data[0], 0, ebur128->h * outpicref->linesize[0]);
 
/* draw LU legends */
drawtext(outpicref, PAD, PAD+16, FONT8, font_colors+3, " LU");
for (i = ebur128->meter; i >= -ebur128->meter * 2; i--) {
y = lu_to_y(ebur128, i);
x = PAD + (i < 10 && i > -10) * 8;
ebur128->y_line_ref[y] = i;
y -= 4; // -4 to center vertically
drawtext(outpicref, x, y + ebur128->graph.y, FONT8, font_colors+3,
"%c%d", i < 0 ? '-' : i > 0 ? '+' : ' ', FFABS(i));
}
 
/* draw graph */
ebur128->y_zero_lu = lu_to_y(ebur128, 0);
p = outpicref->data[0] + ebur128->graph.y * outpicref->linesize[0]
+ ebur128->graph.x * 3;
for (y = 0; y < ebur128->graph.h; y++) {
const uint8_t *c = get_graph_color(ebur128, INT_MAX, y);
 
for (x = 0; x < ebur128->graph.w; x++)
memcpy(p + x*3, c, 3);
p += outpicref->linesize[0];
}
 
/* draw fancy rectangles around the graph and the gauge */
#define DRAW_RECT(r) do { \
drawline(outpicref, r.x, r.y - 1, r.w, 3); \
drawline(outpicref, r.x, r.y + r.h, r.w, 3); \
drawline(outpicref, r.x - 1, r.y, r.h, outpicref->linesize[0]); \
drawline(outpicref, r.x + r.w, r.y, r.h, outpicref->linesize[0]); \
} while (0)
DRAW_RECT(ebur128->graph);
DRAW_RECT(ebur128->gauge);
 
outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
 
return 0;
}
 
static int config_audio_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
EBUR128Context *ebur128 = ctx->priv;
 
/* force 100ms framing in case of metadata injection: the frames must have
* a granularity of the window overlap to be accurately exploited */
if (ebur128->metadata)
inlink->min_samples =
inlink->max_samples =
inlink->partial_buf_size = inlink->sample_rate / 10;
return 0;
}
 
static int config_audio_output(AVFilterLink *outlink)
{
int i;
int idx_bitposn = 0;
AVFilterContext *ctx = outlink->src;
EBUR128Context *ebur128 = ctx->priv;
const int nb_channels = av_get_channel_layout_nb_channels(outlink->channel_layout);
 
#define BACK_MASK (AV_CH_BACK_LEFT |AV_CH_BACK_CENTER |AV_CH_BACK_RIGHT| \
AV_CH_TOP_BACK_LEFT|AV_CH_TOP_BACK_CENTER|AV_CH_TOP_BACK_RIGHT| \
AV_CH_SIDE_LEFT |AV_CH_SIDE_RIGHT| \
AV_CH_SURROUND_DIRECT_LEFT |AV_CH_SURROUND_DIRECT_RIGHT)
 
ebur128->nb_channels = nb_channels;
ebur128->ch_weighting = av_calloc(nb_channels, sizeof(*ebur128->ch_weighting));
if (!ebur128->ch_weighting)
return AVERROR(ENOMEM);
 
for (i = 0; i < nb_channels; i++) {
 
/* find the next bit that is set starting from the right */
while ((outlink->channel_layout & 1ULL<<idx_bitposn) == 0 && idx_bitposn < 63)
idx_bitposn++;
 
/* channel weighting */
if ((1ULL<<idx_bitposn & AV_CH_LOW_FREQUENCY) ||
(1ULL<<idx_bitposn & AV_CH_LOW_FREQUENCY_2)) {
ebur128->ch_weighting[i] = 0;
} else if (1ULL<<idx_bitposn & BACK_MASK) {
ebur128->ch_weighting[i] = 1.41;
} else {
ebur128->ch_weighting[i] = 1.0;
}
 
idx_bitposn++;
 
if (!ebur128->ch_weighting[i])
continue;
 
/* bins buffer for the two integration window (400ms and 3s) */
ebur128->i400.cache[i] = av_calloc(I400_BINS, sizeof(*ebur128->i400.cache[0]));
ebur128->i3000.cache[i] = av_calloc(I3000_BINS, sizeof(*ebur128->i3000.cache[0]));
if (!ebur128->i400.cache[i] || !ebur128->i3000.cache[i])
return AVERROR(ENOMEM);
}
 
outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
 
return 0;
}
 
#define ENERGY(loudness) (pow(10, ((loudness) + 0.691) / 10.))
#define LOUDNESS(energy) (-0.691 + 10 * log10(energy))
 
static struct hist_entry *get_histogram(void)
{
int i;
struct hist_entry *h = av_calloc(HIST_SIZE, sizeof(*h));
 
if (!h)
return NULL;
for (i = 0; i < HIST_SIZE; i++) {
h[i].loudness = i / (double)HIST_GRAIN + ABS_THRES;
h[i].energy = ENERGY(h[i].loudness);
}
return h;
}
 
static av_cold int init(AVFilterContext *ctx)
{
EBUR128Context *ebur128 = ctx->priv;
AVFilterPad pad;
 
if (ebur128->loglevel != AV_LOG_INFO &&
ebur128->loglevel != AV_LOG_VERBOSE) {
if (ebur128->do_video || ebur128->metadata)
ebur128->loglevel = AV_LOG_VERBOSE;
else
ebur128->loglevel = AV_LOG_INFO;
}
 
// if meter is +9 scale, scale range is from -18 LU to +9 LU (or 3*9)
// if meter is +18 scale, scale range is from -36 LU to +18 LU (or 3*18)
ebur128->scale_range = 3 * ebur128->meter;
 
ebur128->i400.histogram = get_histogram();
ebur128->i3000.histogram = get_histogram();
if (!ebur128->i400.histogram || !ebur128->i3000.histogram)
return AVERROR(ENOMEM);
 
ebur128->integrated_loudness = ABS_THRES;
ebur128->loudness_range = 0;
 
/* insert output pads */
if (ebur128->do_video) {
pad = (AVFilterPad){
.name = av_strdup("out0"),
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_video_output,
};
if (!pad.name)
return AVERROR(ENOMEM);
ff_insert_outpad(ctx, 0, &pad);
}
pad = (AVFilterPad){
.name = av_asprintf("out%d", ebur128->do_video),
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_audio_output,
};
if (!pad.name)
return AVERROR(ENOMEM);
ff_insert_outpad(ctx, ebur128->do_video, &pad);
 
/* summary */
av_log(ctx, AV_LOG_VERBOSE, "EBU +%d scale\n", ebur128->meter);
 
return 0;
}
 
#define HIST_POS(power) (int)(((power) - ABS_THRES) * HIST_GRAIN)
 
/* loudness and power should be set such as loudness = -0.691 +
* 10*log10(power), we just avoid doing that calculus two times */
static int gate_update(struct integrator *integ, double power,
double loudness, int gate_thres)
{
int ipower;
double relative_threshold;
int gate_hist_pos;
 
/* update powers histograms by incrementing current power count */
ipower = av_clip(HIST_POS(loudness), 0, HIST_SIZE - 1);
integ->histogram[ipower].count++;
 
/* compute relative threshold and get its position in the histogram */
integ->sum_kept_powers += power;
integ->nb_kept_powers++;
relative_threshold = integ->sum_kept_powers / integ->nb_kept_powers;
if (!relative_threshold)
relative_threshold = 1e-12;
integ->rel_threshold = LOUDNESS(relative_threshold) + gate_thres;
gate_hist_pos = av_clip(HIST_POS(integ->rel_threshold), 0, HIST_SIZE - 1);
 
return gate_hist_pos;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
int i, ch, idx_insample;
AVFilterContext *ctx = inlink->dst;
EBUR128Context *ebur128 = ctx->priv;
const int nb_channels = ebur128->nb_channels;
const int nb_samples = insamples->nb_samples;
const double *samples = (double *)insamples->data[0];
AVFrame *pic = ebur128->outpicref;
 
for (idx_insample = 0; idx_insample < nb_samples; idx_insample++) {
const int bin_id_400 = ebur128->i400.cache_pos;
const int bin_id_3000 = ebur128->i3000.cache_pos;
 
#define MOVE_TO_NEXT_CACHED_ENTRY(time) do { \
ebur128->i##time.cache_pos++; \
if (ebur128->i##time.cache_pos == I##time##_BINS) { \
ebur128->i##time.filled = 1; \
ebur128->i##time.cache_pos = 0; \
} \
} while (0)
 
MOVE_TO_NEXT_CACHED_ENTRY(400);
MOVE_TO_NEXT_CACHED_ENTRY(3000);
 
for (ch = 0; ch < nb_channels; ch++) {
double bin;
 
ebur128->x[ch * 3] = *samples++; // set X[i]
 
if (!ebur128->ch_weighting[ch])
continue;
 
/* Y[i] = X[i]*b0 + X[i-1]*b1 + X[i-2]*b2 - Y[i-1]*a1 - Y[i-2]*a2 */
#define FILTER(Y, X, name) do { \
double *dst = ebur128->Y + ch*3; \
double *src = ebur128->X + ch*3; \
dst[2] = dst[1]; \
dst[1] = dst[0]; \
dst[0] = src[0]*name##_B0 + src[1]*name##_B1 + src[2]*name##_B2 \
- dst[1]*name##_A1 - dst[2]*name##_A2; \
} while (0)
 
// TODO: merge both filters in one?
FILTER(y, x, PRE); // apply pre-filter
ebur128->x[ch * 3 + 2] = ebur128->x[ch * 3 + 1];
ebur128->x[ch * 3 + 1] = ebur128->x[ch * 3 ];
FILTER(z, y, RLB); // apply RLB-filter
 
bin = ebur128->z[ch * 3] * ebur128->z[ch * 3];
 
/* add the new value, and limit the sum to the cache size (400ms or 3s)
* by removing the oldest one */
ebur128->i400.sum [ch] = ebur128->i400.sum [ch] + bin - ebur128->i400.cache [ch][bin_id_400];
ebur128->i3000.sum[ch] = ebur128->i3000.sum[ch] + bin - ebur128->i3000.cache[ch][bin_id_3000];
 
/* override old cache entry with the new value */
ebur128->i400.cache [ch][bin_id_400 ] = bin;
ebur128->i3000.cache[ch][bin_id_3000] = bin;
}
 
/* For integrated loudness, gating blocks are 400ms long with 75%
* overlap (see BS.1770-2 p5), so a re-computation is needed each 100ms
* (4800 samples at 48kHz). */
if (++ebur128->sample_count == 4800) {
double loudness_400, loudness_3000;
double power_400 = 1e-12, power_3000 = 1e-12;
AVFilterLink *outlink = ctx->outputs[0];
const int64_t pts = insamples->pts +
av_rescale_q(idx_insample, (AVRational){ 1, inlink->sample_rate },
outlink->time_base);
 
ebur128->sample_count = 0;
 
#define COMPUTE_LOUDNESS(m, time) do { \
if (ebur128->i##time.filled) { \
/* weighting sum of the last <time> ms */ \
for (ch = 0; ch < nb_channels; ch++) \
power_##time += ebur128->ch_weighting[ch] * ebur128->i##time.sum[ch]; \
power_##time /= I##time##_BINS; \
} \
loudness_##time = LOUDNESS(power_##time); \
} while (0)
 
COMPUTE_LOUDNESS(M, 400);
COMPUTE_LOUDNESS(S, 3000);
 
/* Integrated loudness */
#define I_GATE_THRES -10 // initially defined to -8 LU in the first EBU standard
 
if (loudness_400 >= ABS_THRES) {
double integrated_sum = 0;
int nb_integrated = 0;
int gate_hist_pos = gate_update(&ebur128->i400, power_400,
loudness_400, I_GATE_THRES);
 
/* compute integrated loudness by summing the histogram values
* above the relative threshold */
for (i = gate_hist_pos; i < HIST_SIZE; i++) {
const int nb_v = ebur128->i400.histogram[i].count;
nb_integrated += nb_v;
integrated_sum += nb_v * ebur128->i400.histogram[i].energy;
}
if (nb_integrated)
ebur128->integrated_loudness = LOUDNESS(integrated_sum / nb_integrated);
}
 
/* LRA */
#define LRA_GATE_THRES -20
#define LRA_LOWER_PRC 10
#define LRA_HIGHER_PRC 95
 
/* XXX: example code in EBU 3342 is ">=" but formula in BS.1770
* specs is ">" */
if (loudness_3000 >= ABS_THRES) {
int nb_powers = 0;
int gate_hist_pos = gate_update(&ebur128->i3000, power_3000,
loudness_3000, LRA_GATE_THRES);
 
for (i = gate_hist_pos; i < HIST_SIZE; i++)
nb_powers += ebur128->i3000.histogram[i].count;
if (nb_powers) {
int n, nb_pow;
 
/* get lower loudness to consider */
n = 0;
nb_pow = LRA_LOWER_PRC * nb_powers / 100. + 0.5;
for (i = gate_hist_pos; i < HIST_SIZE; i++) {
n += ebur128->i3000.histogram[i].count;
if (n >= nb_pow) {
ebur128->lra_low = ebur128->i3000.histogram[i].loudness;
break;
}
}
 
/* get higher loudness to consider */
n = nb_powers;
nb_pow = LRA_HIGHER_PRC * nb_powers / 100. + 0.5;
for (i = HIST_SIZE - 1; i >= 0; i--) {
n -= ebur128->i3000.histogram[i].count;
if (n < nb_pow) {
ebur128->lra_high = ebur128->i3000.histogram[i].loudness;
break;
}
}
 
// XXX: show low & high on the graph?
ebur128->loudness_range = ebur128->lra_high - ebur128->lra_low;
}
}
 
#define LOG_FMT "M:%6.1f S:%6.1f I:%6.1f LUFS LRA:%6.1f LU"
 
/* push one video frame */
if (ebur128->do_video) {
int x, y, ret;
uint8_t *p;
 
const int y_loudness_lu_graph = lu_to_y(ebur128, loudness_3000 + 23);
const int y_loudness_lu_gauge = lu_to_y(ebur128, loudness_400 + 23);
 
/* draw the graph using the short-term loudness */
p = pic->data[0] + ebur128->graph.y*pic->linesize[0] + ebur128->graph.x*3;
for (y = 0; y < ebur128->graph.h; y++) {
const uint8_t *c = get_graph_color(ebur128, y_loudness_lu_graph, y);
 
memmove(p, p + 3, (ebur128->graph.w - 1) * 3);
memcpy(p + (ebur128->graph.w - 1) * 3, c, 3);
p += pic->linesize[0];
}
 
/* draw the gauge using the momentary loudness */
p = pic->data[0] + ebur128->gauge.y*pic->linesize[0] + ebur128->gauge.x*3;
for (y = 0; y < ebur128->gauge.h; y++) {
const uint8_t *c = get_graph_color(ebur128, y_loudness_lu_gauge, y);
 
for (x = 0; x < ebur128->gauge.w; x++)
memcpy(p + x*3, c, 3);
p += pic->linesize[0];
}
 
/* draw textual info */
drawtext(pic, PAD, PAD - PAD/2, FONT16, font_colors,
LOG_FMT " ", // padding to erase trailing characters
loudness_400, loudness_3000,
ebur128->integrated_loudness, ebur128->loudness_range);
 
/* set pts and push frame */
pic->pts = pts;
ret = ff_filter_frame(outlink, av_frame_clone(pic));
if (ret < 0)
return ret;
}
 
if (ebur128->metadata) { /* happens only once per filter_frame call */
char metabuf[128];
#define SET_META(name, var) do { \
snprintf(metabuf, sizeof(metabuf), "%.3f", var); \
av_dict_set(&insamples->metadata, "lavfi.r128." name, metabuf, 0); \
} while (0)
SET_META("M", loudness_400);
SET_META("S", loudness_3000);
SET_META("I", ebur128->integrated_loudness);
SET_META("LRA", ebur128->loudness_range);
SET_META("LRA.low", ebur128->lra_low);
SET_META("LRA.high", ebur128->lra_high);
}
 
av_log(ctx, ebur128->loglevel, "t: %-10s " LOG_FMT "\n",
av_ts2timestr(pts, &outlink->time_base),
loudness_400, loudness_3000,
ebur128->integrated_loudness, ebur128->loudness_range);
}
}
 
return ff_filter_frame(ctx->outputs[ebur128->do_video], insamples);
}
 
static int query_formats(AVFilterContext *ctx)
{
EBUR128Context *ebur128 = ctx->priv;
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
 
static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_NONE };
static const int input_srate[] = {48000, -1}; // ITU-R BS.1770 provides coeff only for 48kHz
static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGB24, AV_PIX_FMT_NONE };
 
/* set optional output video format */
if (ebur128->do_video) {
formats = ff_make_format_list(pix_fmts);
if (!formats)
return AVERROR(ENOMEM);
ff_formats_ref(formats, &outlink->in_formats);
outlink = ctx->outputs[1];
}
 
/* set input and output audio formats
* Note: ff_set_common_* functions are not used because they affect all the
* links, and thus break the video format negotiation */
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ff_formats_ref(formats, &inlink->out_formats);
ff_formats_ref(formats, &outlink->in_formats);
 
layouts = ff_all_channel_layouts();
if (!layouts)
return AVERROR(ENOMEM);
ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts);
ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts);
 
formats = ff_make_format_list(input_srate);
if (!formats)
return AVERROR(ENOMEM);
ff_formats_ref(formats, &inlink->out_samplerates);
ff_formats_ref(formats, &outlink->in_samplerates);
 
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
int i;
EBUR128Context *ebur128 = ctx->priv;
 
av_log(ctx, AV_LOG_INFO, "Summary:\n\n"
" Integrated loudness:\n"
" I: %5.1f LUFS\n"
" Threshold: %5.1f LUFS\n\n"
" Loudness range:\n"
" LRA: %5.1f LU\n"
" Threshold: %5.1f LUFS\n"
" LRA low: %5.1f LUFS\n"
" LRA high: %5.1f LUFS\n",
ebur128->integrated_loudness, ebur128->i400.rel_threshold,
ebur128->loudness_range, ebur128->i3000.rel_threshold,
ebur128->lra_low, ebur128->lra_high);
 
av_freep(&ebur128->y_line_ref);
av_freep(&ebur128->ch_weighting);
av_freep(&ebur128->i400.histogram);
av_freep(&ebur128->i3000.histogram);
for (i = 0; i < ebur128->nb_channels; i++) {
av_freep(&ebur128->i400.cache[i]);
av_freep(&ebur128->i3000.cache[i]);
}
for (i = 0; i < ctx->nb_outputs; i++)
av_freep(&ctx->output_pads[i].name);
av_frame_free(&ebur128->outpicref);
}
 
static const AVFilterPad ebur128_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.config_props = config_audio_input,
},
{ NULL }
};
 
AVFilter avfilter_af_ebur128 = {
.name = "ebur128",
.description = NULL_IF_CONFIG_SMALL("EBU R128 scanner."),
.priv_size = sizeof(EBUR128Context),
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = ebur128_inputs,
.outputs = NULL,
.priv_class = &ebur128_class,
.flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
};
/contrib/sdk/sources/ffmpeg/libavfilter/f_interleave.c
0,0 → 1,259
/*
* Copyright (c) 2013 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* audio and video interleaver
*/
 
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "bufferqueue.h"
#include "formats.h"
#include "internal.h"
#include "audio.h"
#include "video.h"
 
typedef struct {
const AVClass *class;
int nb_inputs;
struct FFBufQueue *queues;
} InterleaveContext;
 
#define OFFSET(x) offsetof(InterleaveContext, x)
 
#define DEFINE_OPTIONS(filt_name, flags_) \
static const AVOption filt_name##_options[] = { \
{ "nb_inputs", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, .flags = flags_ }, \
{ "n", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, .flags = flags_ }, \
{ NULL } \
}
 
inline static int push_frame(AVFilterContext *ctx)
{
InterleaveContext *s = ctx->priv;
AVFrame *frame;
int i, queue_idx = -1;
int64_t pts_min = INT64_MAX;
 
/* look for oldest frame */
for (i = 0; i < ctx->nb_inputs; i++) {
struct FFBufQueue *q = &s->queues[i];
 
if (!q->available && !ctx->inputs[i]->closed)
return 0;
if (q->available) {
frame = ff_bufqueue_peek(q, 0);
if (frame->pts < pts_min) {
pts_min = frame->pts;
queue_idx = i;
}
}
}
 
/* all inputs are closed */
if (queue_idx < 0)
return AVERROR_EOF;
 
frame = ff_bufqueue_get(&s->queues[queue_idx]);
av_log(ctx, AV_LOG_DEBUG, "queue:%d -> frame time:%f\n",
queue_idx, frame->pts * av_q2d(AV_TIME_BASE_Q));
return ff_filter_frame(ctx->outputs[0], frame);
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
InterleaveContext *s = ctx->priv;
unsigned in_no = FF_INLINK_IDX(inlink);
 
if (frame->pts == AV_NOPTS_VALUE) {
av_log(ctx, AV_LOG_WARNING,
"NOPTS value for input frame cannot be accepted, frame discarded\n");
av_frame_free(&frame);
return AVERROR_INVALIDDATA;
}
 
/* queue frame */
frame->pts = av_rescale_q(frame->pts, inlink->time_base, AV_TIME_BASE_Q);
av_log(ctx, AV_LOG_DEBUG, "frame pts:%f -> queue idx:%d available:%d\n",
frame->pts * av_q2d(AV_TIME_BASE_Q), in_no, s->queues[in_no].available);
ff_bufqueue_add(ctx, &s->queues[in_no], frame);
 
return push_frame(ctx);
}
 
static av_cold int init(AVFilterContext *ctx)
{
InterleaveContext *s = ctx->priv;
const AVFilterPad *outpad = &ctx->filter->outputs[0];
int i;
 
s->queues = av_calloc(s->nb_inputs, sizeof(s->queues[0]));
if (!s->queues)
return AVERROR(ENOMEM);
 
for (i = 0; i < s->nb_inputs; i++) {
AVFilterPad inpad = { 0 };
 
inpad.name = av_asprintf("input%d", i);
if (!inpad.name)
return AVERROR(ENOMEM);
inpad.type = outpad->type;
inpad.filter_frame = filter_frame;
 
switch (outpad->type) {
case AVMEDIA_TYPE_VIDEO:
inpad.get_video_buffer = ff_null_get_video_buffer; break;
case AVMEDIA_TYPE_AUDIO:
inpad.get_audio_buffer = ff_null_get_audio_buffer; break;
default:
av_assert0(0);
}
ff_insert_inpad(ctx, i, &inpad);
}
 
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
InterleaveContext *s = ctx->priv;
int i;
 
for (i = 0; i < ctx->nb_inputs; i++) {
ff_bufqueue_discard_all(&s->queues[i]);
av_freep(&s->queues[i]);
av_freep(&ctx->input_pads[i].name);
}
}
 
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AVFilterLink *inlink0 = ctx->inputs[0];
int i;
 
if (outlink->type == AVMEDIA_TYPE_VIDEO) {
outlink->time_base = AV_TIME_BASE_Q;
outlink->w = inlink0->w;
outlink->h = inlink0->h;
outlink->sample_aspect_ratio = inlink0->sample_aspect_ratio;
outlink->format = inlink0->format;
outlink->frame_rate = (AVRational) {1, 0};
for (i = 1; i < ctx->nb_inputs; i++) {
AVFilterLink *inlink = ctx->inputs[i];
 
if (outlink->w != inlink->w ||
outlink->h != inlink->h ||
outlink->sample_aspect_ratio.num != inlink->sample_aspect_ratio.num ||
outlink->sample_aspect_ratio.den != inlink->sample_aspect_ratio.den) {
av_log(ctx, AV_LOG_ERROR, "Parameters for input link %s "
"(size %dx%d, SAR %d:%d) do not match the corresponding "
"output link parameters (%dx%d, SAR %d:%d)\n",
ctx->input_pads[i].name, inlink->w, inlink->h,
inlink->sample_aspect_ratio.num,
inlink->sample_aspect_ratio.den,
outlink->w, outlink->h,
outlink->sample_aspect_ratio.num,
outlink->sample_aspect_ratio.den);
return AVERROR(EINVAL);
}
}
}
 
outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
return 0;
}
 
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
InterleaveContext *s = ctx->priv;
int i, ret;
 
for (i = 0; i < ctx->nb_inputs; i++) {
if (!s->queues[i].available && !ctx->inputs[i]->closed) {
ret = ff_request_frame(ctx->inputs[i]);
if (ret != AVERROR_EOF)
return ret;
}
}
 
return push_frame(ctx);
}
 
#if CONFIG_INTERLEAVE_FILTER
 
DEFINE_OPTIONS(interleave, AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM);
AVFILTER_DEFINE_CLASS(interleave);
 
static const AVFilterPad interleave_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_output,
.request_frame = request_frame,
},
{ NULL }
};
 
AVFilter avfilter_vf_interleave = {
.name = "interleave",
.description = NULL_IF_CONFIG_SMALL("Temporally interleave video inputs."),
.priv_size = sizeof(InterleaveContext),
.init = init,
.uninit = uninit,
.outputs = interleave_outputs,
.priv_class = &interleave_class,
.flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
};
 
#endif
 
#if CONFIG_AINTERLEAVE_FILTER
 
DEFINE_OPTIONS(ainterleave, AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM);
AVFILTER_DEFINE_CLASS(ainterleave);
 
static const AVFilterPad ainterleave_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
.request_frame = request_frame,
},
{ NULL }
};
 
AVFilter avfilter_af_ainterleave = {
.name = "ainterleave",
.description = NULL_IF_CONFIG_SMALL("Temporally interleave audio inputs."),
.priv_size = sizeof(InterleaveContext),
.init = init,
.uninit = uninit,
.outputs = ainterleave_outputs,
.priv_class = &ainterleave_class,
.flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
};
 
#endif
/contrib/sdk/sources/ffmpeg/libavfilter/f_perms.c
0,0 → 1,178
/*
* Copyright (c) 2013 Clément Bœsch
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/lfg.h"
#include "libavutil/opt.h"
#include "libavutil/random_seed.h"
#include "audio.h"
#include "video.h"
 
enum mode {
MODE_NONE,
MODE_RO,
MODE_RW,
MODE_TOGGLE,
MODE_RANDOM,
NB_MODES
};
 
typedef struct {
const AVClass *class;
AVLFG lfg;
int64_t random_seed;
enum mode mode;
} PermsContext;
 
#define OFFSET(x) offsetof(PermsContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_VIDEO_PARAM
 
static const AVOption options[] = {
{ "mode", "select permissions mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64 = MODE_NONE}, MODE_NONE, NB_MODES-1, FLAGS, "mode" },
{ "none", "do nothing", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_NONE}, INT_MIN, INT_MAX, FLAGS, "mode" },
{ "ro", "set all output frames read-only", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_RO}, INT_MIN, INT_MAX, FLAGS, "mode" },
{ "rw", "set all output frames writable", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_RW}, INT_MIN, INT_MAX, FLAGS, "mode" },
{ "toggle", "switch permissions", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_TOGGLE}, INT_MIN, INT_MAX, FLAGS, "mode" },
{ "random", "set permissions randomly", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_RANDOM}, INT_MIN, INT_MAX, FLAGS, "mode" },
{ "seed", "set the seed for the random mode", OFFSET(random_seed), AV_OPT_TYPE_INT64, {.i64 = -1}, -1, UINT32_MAX, FLAGS },
{ NULL }
};
 
static av_cold int init(AVFilterContext *ctx)
{
PermsContext *perms = ctx->priv;
 
if (perms->mode == MODE_RANDOM) {
uint32_t seed;
 
if (perms->random_seed == -1)
perms->random_seed = av_get_random_seed();
seed = perms->random_seed;
av_log(ctx, AV_LOG_INFO, "random seed: 0x%08x\n", seed);
av_lfg_init(&perms->lfg, seed);
}
 
return 0;
}
 
enum perm { RO, RW };
static const char *perm_str[2] = { "RO", "RW" };
 
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
int ret;
AVFilterContext *ctx = inlink->dst;
PermsContext *perms = ctx->priv;
AVFrame *out = frame;
enum perm in_perm = av_frame_is_writable(frame) ? RW : RO;
enum perm out_perm;
 
switch (perms->mode) {
case MODE_TOGGLE: out_perm = in_perm == RO ? RW : RO; break;
case MODE_RANDOM: out_perm = av_lfg_get(&perms->lfg) & 1 ? RW : RO; break;
case MODE_RO: out_perm = RO; break;
case MODE_RW: out_perm = RW; break;
default: out_perm = in_perm; break;
}
 
av_log(ctx, AV_LOG_VERBOSE, "%s -> %s%s\n",
perm_str[in_perm], perm_str[out_perm],
in_perm == out_perm ? " (no-op)" : "");
 
if (in_perm == RO && out_perm == RW) {
if ((ret = av_frame_make_writable(frame)) < 0)
return ret;
} else if (in_perm == RW && out_perm == RO) {
out = av_frame_clone(frame);
if (!out)
return AVERROR(ENOMEM);
}
 
ret = ff_filter_frame(ctx->outputs[0], out);
 
if (in_perm == RW && out_perm == RO)
av_frame_free(&frame);
return ret;
}
 
#if CONFIG_APERMS_FILTER
 
#define aperms_options options
AVFILTER_DEFINE_CLASS(aperms);
 
static const AVFilterPad aperms_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad aperms_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
 
AVFilter avfilter_af_aperms = {
.name = "aperms",
.description = NULL_IF_CONFIG_SMALL("Set permissions for the output audio frame."),
.init = init,
.priv_size = sizeof(PermsContext),
.inputs = aperms_inputs,
.outputs = aperms_outputs,
.priv_class = &aperms_class,
};
#endif /* CONFIG_APERMS_FILTER */
 
#if CONFIG_PERMS_FILTER
 
#define perms_options options
AVFILTER_DEFINE_CLASS(perms);
 
static const AVFilterPad perms_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad perms_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_perms = {
.name = "perms",
.description = NULL_IF_CONFIG_SMALL("Set permissions for the output video frame."),
.init = init,
.priv_size = sizeof(PermsContext),
.inputs = perms_inputs,
.outputs = perms_outputs,
.priv_class = &perms_class,
};
#endif /* CONFIG_PERMS_FILTER */
/contrib/sdk/sources/ffmpeg/libavfilter/f_select.c
0,0 → 1,533
/*
* Copyright (c) 2011 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* filter for selecting which frame passes in the filterchain
*/
 
#include "libavutil/avstring.h"
#include "libavutil/eval.h"
#include "libavutil/fifo.h"
#include "libavutil/internal.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "audio.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
#if CONFIG_AVCODEC
#include "libavcodec/dsputil.h"
#endif
 
static const char *const var_names[] = {
"TB", ///< timebase
 
"pts", ///< original pts in the file of the frame
"start_pts", ///< first PTS in the stream, expressed in TB units
"prev_pts", ///< previous frame PTS
"prev_selected_pts", ///< previous selected frame PTS
 
"t", ///< first PTS in seconds
"start_t", ///< first PTS in the stream, expressed in seconds
"prev_t", ///< previous frame time
"prev_selected_t", ///< previously selected time
 
"pict_type", ///< the type of picture in the movie
"I",
"P",
"B",
"S",
"SI",
"SP",
"BI",
"PICT_TYPE_I",
"PICT_TYPE_P",
"PICT_TYPE_B",
"PICT_TYPE_S",
"PICT_TYPE_SI",
"PICT_TYPE_SP",
"PICT_TYPE_BI",
 
"interlace_type", ///< the frame interlace type
"PROGRESSIVE",
"TOPFIRST",
"BOTTOMFIRST",
 
"consumed_samples_n",///< number of samples consumed by the filter (only audio)
"samples_n", ///< number of samples in the current frame (only audio)
"sample_rate", ///< sample rate (only audio)
 
"n", ///< frame number (starting from zero)
"selected_n", ///< selected frame number (starting from zero)
"prev_selected_n", ///< number of the last selected frame
 
"key", ///< tell if the frame is a key frame
"pos", ///< original position in the file of the frame
 
"scene",
 
NULL
};
 
enum var_name {
VAR_TB,
 
VAR_PTS,
VAR_START_PTS,
VAR_PREV_PTS,
VAR_PREV_SELECTED_PTS,
 
VAR_T,
VAR_START_T,
VAR_PREV_T,
VAR_PREV_SELECTED_T,
 
VAR_PICT_TYPE,
VAR_I,
VAR_P,
VAR_B,
VAR_S,
VAR_SI,
VAR_SP,
VAR_BI,
VAR_PICT_TYPE_I,
VAR_PICT_TYPE_P,
VAR_PICT_TYPE_B,
VAR_PICT_TYPE_S,
VAR_PICT_TYPE_SI,
VAR_PICT_TYPE_SP,
VAR_PICT_TYPE_BI,
 
VAR_INTERLACE_TYPE,
VAR_INTERLACE_TYPE_P,
VAR_INTERLACE_TYPE_T,
VAR_INTERLACE_TYPE_B,
 
VAR_CONSUMED_SAMPLES_N,
VAR_SAMPLES_N,
VAR_SAMPLE_RATE,
 
VAR_N,
VAR_SELECTED_N,
VAR_PREV_SELECTED_N,
 
VAR_KEY,
VAR_POS,
 
VAR_SCENE,
 
VAR_VARS_NB
};
 
typedef struct {
const AVClass *class;
char *expr_str;
AVExpr *expr;
double var_values[VAR_VARS_NB];
int do_scene_detect; ///< 1 if the expression requires scene detection variables, 0 otherwise
#if CONFIG_AVCODEC
AVCodecContext *avctx; ///< codec context required for the DSPContext (scene detect only)
DSPContext c; ///< context providing optimized SAD methods (scene detect only)
double prev_mafd; ///< previous MAFD (scene detect only)
#endif
AVFrame *prev_picref; ///< previous frame (scene detect only)
double select;
int select_out; ///< mark the selected output pad index
int nb_outputs;
} SelectContext;
 
#define OFFSET(x) offsetof(SelectContext, x)
#define DEFINE_OPTIONS(filt_name, FLAGS) \
static const AVOption filt_name##_options[] = { \
{ "expr", "set an expression to use for selecting frames", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "1" }, .flags=FLAGS }, \
{ "e", "set an expression to use for selecting frames", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "1" }, .flags=FLAGS }, \
{ "outputs", "set the number of outputs", OFFSET(nb_outputs), AV_OPT_TYPE_INT, {.i64 = 1}, 1, INT_MAX, .flags=FLAGS }, \
{ "n", "set the number of outputs", OFFSET(nb_outputs), AV_OPT_TYPE_INT, {.i64 = 1}, 1, INT_MAX, .flags=FLAGS }, \
{ NULL } \
}
 
static int request_frame(AVFilterLink *outlink);
 
static av_cold int init(AVFilterContext *ctx)
{
SelectContext *select = ctx->priv;
int i, ret;
 
if ((ret = av_expr_parse(&select->expr, select->expr_str,
var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) {
av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n",
select->expr_str);
return ret;
}
select->do_scene_detect = !!strstr(select->expr_str, "scene");
 
for (i = 0; i < select->nb_outputs; i++) {
AVFilterPad pad = { 0 };
 
pad.name = av_asprintf("output%d", i);
if (!pad.name)
return AVERROR(ENOMEM);
pad.type = ctx->filter->inputs[0].type;
pad.request_frame = request_frame;
ff_insert_outpad(ctx, i, &pad);
}
 
return 0;
}
 
#define INTERLACE_TYPE_P 0
#define INTERLACE_TYPE_T 1
#define INTERLACE_TYPE_B 2
 
static int config_input(AVFilterLink *inlink)
{
SelectContext *select = inlink->dst->priv;
 
select->var_values[VAR_N] = 0.0;
select->var_values[VAR_SELECTED_N] = 0.0;
 
select->var_values[VAR_TB] = av_q2d(inlink->time_base);
 
select->var_values[VAR_PREV_PTS] = NAN;
select->var_values[VAR_PREV_SELECTED_PTS] = NAN;
select->var_values[VAR_PREV_SELECTED_T] = NAN;
select->var_values[VAR_PREV_T] = NAN;
select->var_values[VAR_START_PTS] = NAN;
select->var_values[VAR_START_T] = NAN;
 
select->var_values[VAR_I] = AV_PICTURE_TYPE_I;
select->var_values[VAR_P] = AV_PICTURE_TYPE_P;
select->var_values[VAR_B] = AV_PICTURE_TYPE_B;
select->var_values[VAR_SI] = AV_PICTURE_TYPE_SI;
select->var_values[VAR_SP] = AV_PICTURE_TYPE_SP;
select->var_values[VAR_BI] = AV_PICTURE_TYPE_BI;
select->var_values[VAR_PICT_TYPE_I] = AV_PICTURE_TYPE_I;
select->var_values[VAR_PICT_TYPE_P] = AV_PICTURE_TYPE_P;
select->var_values[VAR_PICT_TYPE_B] = AV_PICTURE_TYPE_B;
select->var_values[VAR_PICT_TYPE_SI] = AV_PICTURE_TYPE_SI;
select->var_values[VAR_PICT_TYPE_SP] = AV_PICTURE_TYPE_SP;
select->var_values[VAR_PICT_TYPE_BI] = AV_PICTURE_TYPE_BI;
 
select->var_values[VAR_INTERLACE_TYPE_P] = INTERLACE_TYPE_P;
select->var_values[VAR_INTERLACE_TYPE_T] = INTERLACE_TYPE_T;
select->var_values[VAR_INTERLACE_TYPE_B] = INTERLACE_TYPE_B;
 
select->var_values[VAR_PICT_TYPE] = NAN;
select->var_values[VAR_INTERLACE_TYPE] = NAN;
select->var_values[VAR_SCENE] = NAN;
select->var_values[VAR_CONSUMED_SAMPLES_N] = NAN;
select->var_values[VAR_SAMPLES_N] = NAN;
 
select->var_values[VAR_SAMPLE_RATE] =
inlink->type == AVMEDIA_TYPE_AUDIO ? inlink->sample_rate : NAN;
 
#if CONFIG_AVCODEC
if (select->do_scene_detect) {
select->avctx = avcodec_alloc_context3(NULL);
if (!select->avctx)
return AVERROR(ENOMEM);
avpriv_dsputil_init(&select->c, select->avctx);
}
#endif
return 0;
}
 
#if CONFIG_AVCODEC
static double get_scene_score(AVFilterContext *ctx, AVFrame *frame)
{
double ret = 0;
SelectContext *select = ctx->priv;
AVFrame *prev_picref = select->prev_picref;
 
if (prev_picref &&
frame->height == prev_picref->height &&
frame->width == prev_picref->width &&
frame->linesize[0] == prev_picref->linesize[0]) {
int x, y, nb_sad = 0;
int64_t sad = 0;
double mafd, diff;
uint8_t *p1 = frame->data[0];
uint8_t *p2 = prev_picref->data[0];
const int linesize = frame->linesize[0];
 
for (y = 0; y < frame->height - 8; y += 8) {
for (x = 0; x < frame->width*3 - 8; x += 8) {
sad += select->c.sad[1](select, p1 + x, p2 + x,
linesize, 8);
nb_sad += 8 * 8;
}
p1 += 8 * linesize;
p2 += 8 * linesize;
}
emms_c();
mafd = nb_sad ? sad / nb_sad : 0;
diff = fabs(mafd - select->prev_mafd);
ret = av_clipf(FFMIN(mafd, diff) / 100., 0, 1);
select->prev_mafd = mafd;
av_frame_free(&prev_picref);
}
select->prev_picref = av_frame_clone(frame);
return ret;
}
#endif
 
#define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d))
#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
 
static void select_frame(AVFilterContext *ctx, AVFrame *frame)
{
SelectContext *select = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
double res;
 
if (isnan(select->var_values[VAR_START_PTS]))
select->var_values[VAR_START_PTS] = TS2D(frame->pts);
if (isnan(select->var_values[VAR_START_T]))
select->var_values[VAR_START_T] = TS2D(frame->pts) * av_q2d(inlink->time_base);
 
select->var_values[VAR_N ] = inlink->frame_count;
select->var_values[VAR_PTS] = TS2D(frame->pts);
select->var_values[VAR_T ] = TS2D(frame->pts) * av_q2d(inlink->time_base);
select->var_values[VAR_POS] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame);
 
switch (inlink->type) {
case AVMEDIA_TYPE_AUDIO:
select->var_values[VAR_SAMPLES_N] = frame->nb_samples;
break;
 
case AVMEDIA_TYPE_VIDEO:
select->var_values[VAR_INTERLACE_TYPE] =
!frame->interlaced_frame ? INTERLACE_TYPE_P :
frame->top_field_first ? INTERLACE_TYPE_T : INTERLACE_TYPE_B;
select->var_values[VAR_PICT_TYPE] = frame->pict_type;
#if CONFIG_AVCODEC
if (select->do_scene_detect) {
char buf[32];
select->var_values[VAR_SCENE] = get_scene_score(ctx, frame);
// TODO: document metadata
snprintf(buf, sizeof(buf), "%f", select->var_values[VAR_SCENE]);
av_dict_set(avpriv_frame_get_metadatap(frame), "lavfi.scene_score", buf, 0);
}
#endif
break;
}
 
select->select = res = av_expr_eval(select->expr, select->var_values, NULL);
av_log(inlink->dst, AV_LOG_DEBUG,
"n:%f pts:%f t:%f key:%d",
select->var_values[VAR_N],
select->var_values[VAR_PTS],
select->var_values[VAR_T],
(int)select->var_values[VAR_KEY]);
 
switch (inlink->type) {
case AVMEDIA_TYPE_VIDEO:
av_log(inlink->dst, AV_LOG_DEBUG, " interlace_type:%c pict_type:%c scene:%f",
select->var_values[VAR_INTERLACE_TYPE] == INTERLACE_TYPE_P ? 'P' :
select->var_values[VAR_INTERLACE_TYPE] == INTERLACE_TYPE_T ? 'T' :
select->var_values[VAR_INTERLACE_TYPE] == INTERLACE_TYPE_B ? 'B' : '?',
av_get_picture_type_char(select->var_values[VAR_PICT_TYPE]),
select->var_values[VAR_SCENE]);
break;
case AVMEDIA_TYPE_AUDIO:
av_log(inlink->dst, AV_LOG_DEBUG, " samples_n:%d consumed_samples_n:%d",
(int)select->var_values[VAR_SAMPLES_N],
(int)select->var_values[VAR_CONSUMED_SAMPLES_N]);
break;
}
 
if (res == 0) {
select->select_out = -1; /* drop */
} else if (isnan(res) || res < 0) {
select->select_out = 0; /* first output */
} else {
select->select_out = FFMIN(ceilf(res)-1, select->nb_outputs-1); /* other outputs */
}
 
av_log(inlink->dst, AV_LOG_DEBUG, " -> select:%f select_out:%d\n", res, select->select_out);
 
if (res) {
select->var_values[VAR_PREV_SELECTED_N] = select->var_values[VAR_N];
select->var_values[VAR_PREV_SELECTED_PTS] = select->var_values[VAR_PTS];
select->var_values[VAR_PREV_SELECTED_T] = select->var_values[VAR_T];
select->var_values[VAR_SELECTED_N] += 1.0;
if (inlink->type == AVMEDIA_TYPE_AUDIO)
select->var_values[VAR_CONSUMED_SAMPLES_N] += frame->nb_samples;
}
 
select->var_values[VAR_PREV_PTS] = select->var_values[VAR_PTS];
select->var_values[VAR_PREV_T] = select->var_values[VAR_T];
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
SelectContext *select = ctx->priv;
 
select_frame(ctx, frame);
if (select->select)
return ff_filter_frame(ctx->outputs[select->select_out], frame);
 
av_frame_free(&frame);
return 0;
}
 
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
SelectContext *select = ctx->priv;
AVFilterLink *inlink = outlink->src->inputs[0];
int out_no = FF_OUTLINK_IDX(outlink);
 
do {
int ret = ff_request_frame(inlink);
if (ret < 0)
return ret;
} while (select->select_out != out_no);
 
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
SelectContext *select = ctx->priv;
int i;
 
av_expr_free(select->expr);
select->expr = NULL;
 
for (i = 0; i < ctx->nb_outputs; i++)
av_freep(&ctx->output_pads[i].name);
 
#if CONFIG_AVCODEC
if (select->do_scene_detect) {
av_frame_free(&select->prev_picref);
if (select->avctx) {
avcodec_close(select->avctx);
av_freep(&select->avctx);
}
}
#endif
}
 
static int query_formats(AVFilterContext *ctx)
{
SelectContext *select = ctx->priv;
 
if (!select->do_scene_detect) {
return ff_default_query_formats(ctx);
} else {
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
AV_PIX_FMT_NONE
};
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
}
return 0;
}
 
#if CONFIG_ASELECT_FILTER
 
DEFINE_OPTIONS(aselect, AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM);
AVFILTER_DEFINE_CLASS(aselect);
 
static av_cold int aselect_init(AVFilterContext *ctx)
{
SelectContext *select = ctx->priv;
int ret;
 
if ((ret = init(ctx)) < 0)
return ret;
 
if (select->do_scene_detect) {
av_log(ctx, AV_LOG_ERROR, "Scene detection is ignored in aselect filter\n");
return AVERROR(EINVAL);
}
 
return 0;
}
 
static const AVFilterPad avfilter_af_aselect_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_input,
.filter_frame = filter_frame,
},
{ NULL }
};
 
AVFilter avfilter_af_aselect = {
.name = "aselect",
.description = NULL_IF_CONFIG_SMALL("Select audio frames to pass in output."),
.init = aselect_init,
.uninit = uninit,
.priv_size = sizeof(SelectContext),
.inputs = avfilter_af_aselect_inputs,
.priv_class = &aselect_class,
.flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
};
#endif /* CONFIG_ASELECT_FILTER */
 
#if CONFIG_SELECT_FILTER
 
DEFINE_OPTIONS(select, AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM);
AVFILTER_DEFINE_CLASS(select);
 
static av_cold int select_init(AVFilterContext *ctx)
{
SelectContext *select = ctx->priv;
int ret;
 
if ((ret = init(ctx)) < 0)
return ret;
 
if (select->do_scene_detect && !CONFIG_AVCODEC) {
av_log(ctx, AV_LOG_ERROR, "Scene detection is not available without libavcodec.\n");
return AVERROR(EINVAL);
}
 
return 0;
}
 
static const AVFilterPad avfilter_vf_select_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input,
.filter_frame = filter_frame,
},
{ NULL }
};
 
AVFilter avfilter_vf_select = {
.name = "select",
.description = NULL_IF_CONFIG_SMALL("Select video frames to pass in output."),
.init = select_init,
.uninit = uninit,
.query_formats = query_formats,
.priv_size = sizeof(SelectContext),
.priv_class = &select_class,
.inputs = avfilter_vf_select_inputs,
.flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
};
#endif /* CONFIG_SELECT_FILTER */
/contrib/sdk/sources/ffmpeg/libavfilter/f_sendcmd.c
0,0 → 1,576
/*
* Copyright (c) 2012 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* send commands filter
*/
 
#include "libavutil/avstring.h"
#include "libavutil/bprint.h"
#include "libavutil/file.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "avfilter.h"
#include "internal.h"
#include "avfiltergraph.h"
#include "audio.h"
#include "video.h"
 
#define COMMAND_FLAG_ENTER 1
#define COMMAND_FLAG_LEAVE 2
 
static inline char *make_command_flags_str(AVBPrint *pbuf, int flags)
{
static const char * const flag_strings[] = { "enter", "leave" };
int i, is_first = 1;
 
av_bprint_init(pbuf, 0, AV_BPRINT_SIZE_AUTOMATIC);
for (i = 0; i < FF_ARRAY_ELEMS(flag_strings); i++) {
if (flags & 1<<i) {
if (!is_first)
av_bprint_chars(pbuf, '+', 1);
av_bprintf(pbuf, "%s", flag_strings[i]);
is_first = 0;
}
}
 
return pbuf->str;
}
 
typedef struct {
int flags;
char *target, *command, *arg;
int index;
} Command;
 
typedef struct {
int64_t start_ts; ///< start timestamp expressed as microseconds units
int64_t end_ts; ///< end timestamp expressed as microseconds units
int index; ///< unique index for these interval commands
Command *commands;
int nb_commands;
int enabled; ///< current time detected inside this interval
} Interval;
 
typedef struct {
const AVClass *class;
Interval *intervals;
int nb_intervals;
 
char *commands_filename;
char *commands_str;
} SendCmdContext;
 
#define OFFSET(x) offsetof(SendCmdContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_VIDEO_PARAM
static const AVOption options[] = {
{ "commands", "set commands", OFFSET(commands_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
{ "c", "set commands", OFFSET(commands_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
{ "filename", "set commands file", OFFSET(commands_filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
{ "f", "set commands file", OFFSET(commands_filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
{ NULL }
};
 
#define SPACES " \f\t\n\r"
 
static void skip_comments(const char **buf)
{
while (**buf) {
/* skip leading spaces */
*buf += strspn(*buf, SPACES);
if (**buf != '#')
break;
 
(*buf)++;
 
/* skip comment until the end of line */
*buf += strcspn(*buf, "\n");
if (**buf)
(*buf)++;
}
}
 
#define COMMAND_DELIMS " \f\t\n\r,;"
 
static int parse_command(Command *cmd, int cmd_count, int interval_count,
const char **buf, void *log_ctx)
{
int ret;
 
memset(cmd, 0, sizeof(Command));
cmd->index = cmd_count;
 
/* format: [FLAGS] target command arg */
*buf += strspn(*buf, SPACES);
 
/* parse flags */
if (**buf == '[') {
(*buf)++; /* skip "[" */
 
while (**buf) {
int len = strcspn(*buf, "|+]");
 
if (!strncmp(*buf, "enter", strlen("enter"))) cmd->flags |= COMMAND_FLAG_ENTER;
else if (!strncmp(*buf, "leave", strlen("leave"))) cmd->flags |= COMMAND_FLAG_LEAVE;
else {
char flag_buf[64];
av_strlcpy(flag_buf, *buf, sizeof(flag_buf));
av_log(log_ctx, AV_LOG_ERROR,
"Unknown flag '%s' in interval #%d, command #%d\n",
flag_buf, interval_count, cmd_count);
return AVERROR(EINVAL);
}
*buf += len;
if (**buf == ']')
break;
if (!strspn(*buf, "+|")) {
av_log(log_ctx, AV_LOG_ERROR,
"Invalid flags char '%c' in interval #%d, command #%d\n",
**buf, interval_count, cmd_count);
return AVERROR(EINVAL);
}
if (**buf)
(*buf)++;
}
 
if (**buf != ']') {
av_log(log_ctx, AV_LOG_ERROR,
"Missing flag terminator or extraneous data found at the end of flags "
"in interval #%d, command #%d\n", interval_count, cmd_count);
return AVERROR(EINVAL);
}
(*buf)++; /* skip "]" */
} else {
cmd->flags = COMMAND_FLAG_ENTER;
}
 
*buf += strspn(*buf, SPACES);
cmd->target = av_get_token(buf, COMMAND_DELIMS);
if (!cmd->target || !cmd->target[0]) {
av_log(log_ctx, AV_LOG_ERROR,
"No target specified in interval #%d, command #%d\n",
interval_count, cmd_count);
ret = AVERROR(EINVAL);
goto fail;
}
 
*buf += strspn(*buf, SPACES);
cmd->command = av_get_token(buf, COMMAND_DELIMS);
if (!cmd->command || !cmd->command[0]) {
av_log(log_ctx, AV_LOG_ERROR,
"No command specified in interval #%d, command #%d\n",
interval_count, cmd_count);
ret = AVERROR(EINVAL);
goto fail;
}
 
*buf += strspn(*buf, SPACES);
cmd->arg = av_get_token(buf, COMMAND_DELIMS);
 
return 1;
 
fail:
av_freep(&cmd->target);
av_freep(&cmd->command);
av_freep(&cmd->arg);
return ret;
}
 
static int parse_commands(Command **cmds, int *nb_cmds, int interval_count,
const char **buf, void *log_ctx)
{
int cmd_count = 0;
int ret, n = 0;
AVBPrint pbuf;
 
*cmds = NULL;
*nb_cmds = 0;
 
while (**buf) {
Command cmd;
 
if ((ret = parse_command(&cmd, cmd_count, interval_count, buf, log_ctx)) < 0)
return ret;
cmd_count++;
 
/* (re)allocate commands array if required */
if (*nb_cmds == n) {
n = FFMAX(16, 2*n); /* first allocation = 16, or double the number */
*cmds = av_realloc_f(*cmds, n, 2*sizeof(Command));
if (!*cmds) {
av_log(log_ctx, AV_LOG_ERROR,
"Could not (re)allocate command array\n");
return AVERROR(ENOMEM);
}
}
 
(*cmds)[(*nb_cmds)++] = cmd;
 
*buf += strspn(*buf, SPACES);
if (**buf && **buf != ';' && **buf != ',') {
av_log(log_ctx, AV_LOG_ERROR,
"Missing separator or extraneous data found at the end of "
"interval #%d, in command #%d\n",
interval_count, cmd_count);
av_log(log_ctx, AV_LOG_ERROR,
"Command was parsed as: flags:[%s] target:%s command:%s arg:%s\n",
make_command_flags_str(&pbuf, cmd.flags), cmd.target, cmd.command, cmd.arg);
return AVERROR(EINVAL);
}
if (**buf == ';')
break;
if (**buf == ',')
(*buf)++;
}
 
return 0;
}
 
#define DELIMS " \f\t\n\r,;"
 
static int parse_interval(Interval *interval, int interval_count,
const char **buf, void *log_ctx)
{
char *intervalstr;
int ret;
 
*buf += strspn(*buf, SPACES);
if (!**buf)
return 0;
 
/* reset data */
memset(interval, 0, sizeof(Interval));
interval->index = interval_count;
 
/* format: INTERVAL COMMANDS */
 
/* parse interval */
intervalstr = av_get_token(buf, DELIMS);
if (intervalstr && intervalstr[0]) {
char *start, *end;
 
start = av_strtok(intervalstr, "-", &end);
if ((ret = av_parse_time(&interval->start_ts, start, 1)) < 0) {
av_log(log_ctx, AV_LOG_ERROR,
"Invalid start time specification '%s' in interval #%d\n",
start, interval_count);
goto end;
}
 
if (end) {
if ((ret = av_parse_time(&interval->end_ts, end, 1)) < 0) {
av_log(log_ctx, AV_LOG_ERROR,
"Invalid end time specification '%s' in interval #%d\n",
end, interval_count);
goto end;
}
} else {
interval->end_ts = INT64_MAX;
}
if (interval->end_ts < interval->start_ts) {
av_log(log_ctx, AV_LOG_ERROR,
"Invalid end time '%s' in interval #%d: "
"cannot be lesser than start time '%s'\n",
end, interval_count, start);
ret = AVERROR(EINVAL);
goto end;
}
} else {
av_log(log_ctx, AV_LOG_ERROR,
"No interval specified for interval #%d\n", interval_count);
ret = AVERROR(EINVAL);
goto end;
}
 
/* parse commands */
ret = parse_commands(&interval->commands, &interval->nb_commands,
interval_count, buf, log_ctx);
 
end:
av_free(intervalstr);
return ret;
}
 
static int parse_intervals(Interval **intervals, int *nb_intervals,
const char *buf, void *log_ctx)
{
int interval_count = 0;
int ret, n = 0;
 
*intervals = NULL;
*nb_intervals = 0;
 
while (1) {
Interval interval;
 
skip_comments(&buf);
if (!(*buf))
break;
 
if ((ret = parse_interval(&interval, interval_count, &buf, log_ctx)) < 0)
return ret;
 
buf += strspn(buf, SPACES);
if (*buf) {
if (*buf != ';') {
av_log(log_ctx, AV_LOG_ERROR,
"Missing terminator or extraneous data found at the end of interval #%d\n",
interval_count);
return AVERROR(EINVAL);
}
buf++; /* skip ';' */
}
interval_count++;
 
/* (re)allocate commands array if required */
if (*nb_intervals == n) {
n = FFMAX(16, 2*n); /* first allocation = 16, or double the number */
*intervals = av_realloc_f(*intervals, n, 2*sizeof(Interval));
if (!*intervals) {
av_log(log_ctx, AV_LOG_ERROR,
"Could not (re)allocate intervals array\n");
return AVERROR(ENOMEM);
}
}
 
(*intervals)[(*nb_intervals)++] = interval;
}
 
return 0;
}
 
static int cmp_intervals(const void *a, const void *b)
{
const Interval *i1 = a;
const Interval *i2 = b;
int64_t ts_diff = i1->start_ts - i2->start_ts;
int ret;
 
ret = ts_diff > 0 ? 1 : ts_diff < 0 ? -1 : 0;
return ret == 0 ? i1->index - i2->index : ret;
}
 
static av_cold int init(AVFilterContext *ctx)
{
SendCmdContext *sendcmd = ctx->priv;
int ret, i, j;
 
if (sendcmd->commands_filename && sendcmd->commands_str) {
av_log(ctx, AV_LOG_ERROR,
"Only one of the filename or commands options must be specified\n");
return AVERROR(EINVAL);
}
 
if (sendcmd->commands_filename) {
uint8_t *file_buf, *buf;
size_t file_bufsize;
ret = av_file_map(sendcmd->commands_filename,
&file_buf, &file_bufsize, 0, ctx);
if (ret < 0)
return ret;
 
/* create a 0-terminated string based on the read file */
buf = av_malloc(file_bufsize + 1);
if (!buf) {
av_file_unmap(file_buf, file_bufsize);
return AVERROR(ENOMEM);
}
memcpy(buf, file_buf, file_bufsize);
buf[file_bufsize] = 0;
av_file_unmap(file_buf, file_bufsize);
sendcmd->commands_str = buf;
}
 
if ((ret = parse_intervals(&sendcmd->intervals, &sendcmd->nb_intervals,
sendcmd->commands_str, ctx)) < 0)
return ret;
 
qsort(sendcmd->intervals, sendcmd->nb_intervals, sizeof(Interval), cmp_intervals);
 
av_log(ctx, AV_LOG_DEBUG, "Parsed commands:\n");
for (i = 0; i < sendcmd->nb_intervals; i++) {
AVBPrint pbuf;
Interval *interval = &sendcmd->intervals[i];
av_log(ctx, AV_LOG_VERBOSE, "start_time:%f end_time:%f index:%d\n",
(double)interval->start_ts/1000000, (double)interval->end_ts/1000000, interval->index);
for (j = 0; j < interval->nb_commands; j++) {
Command *cmd = &interval->commands[j];
av_log(ctx, AV_LOG_VERBOSE,
" [%s] target:%s command:%s arg:%s index:%d\n",
make_command_flags_str(&pbuf, cmd->flags), cmd->target, cmd->command, cmd->arg, cmd->index);
}
}
 
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
SendCmdContext *sendcmd = ctx->priv;
int i, j;
 
for (i = 0; i < sendcmd->nb_intervals; i++) {
Interval *interval = &sendcmd->intervals[i];
for (j = 0; j < interval->nb_commands; j++) {
Command *cmd = &interval->commands[j];
av_free(cmd->target);
av_free(cmd->command);
av_free(cmd->arg);
}
av_free(interval->commands);
}
av_freep(&sendcmd->intervals);
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *ref)
{
AVFilterContext *ctx = inlink->dst;
SendCmdContext *sendcmd = ctx->priv;
int64_t ts;
int i, j, ret;
 
if (ref->pts == AV_NOPTS_VALUE)
goto end;
 
ts = av_rescale_q(ref->pts, inlink->time_base, AV_TIME_BASE_Q);
 
#define WITHIN_INTERVAL(ts, start_ts, end_ts) ((ts) >= (start_ts) && (ts) < (end_ts))
 
for (i = 0; i < sendcmd->nb_intervals; i++) {
Interval *interval = &sendcmd->intervals[i];
int flags = 0;
 
if (!interval->enabled && WITHIN_INTERVAL(ts, interval->start_ts, interval->end_ts)) {
flags += COMMAND_FLAG_ENTER;
interval->enabled = 1;
}
if (interval->enabled && !WITHIN_INTERVAL(ts, interval->start_ts, interval->end_ts)) {
flags += COMMAND_FLAG_LEAVE;
interval->enabled = 0;
}
 
if (flags) {
AVBPrint pbuf;
av_log(ctx, AV_LOG_VERBOSE,
"[%s] interval #%d start_ts:%f end_ts:%f ts:%f\n",
make_command_flags_str(&pbuf, flags), interval->index,
(double)interval->start_ts/1000000, (double)interval->end_ts/1000000,
(double)ts/1000000);
 
for (j = 0; flags && j < interval->nb_commands; j++) {
Command *cmd = &interval->commands[j];
char buf[1024];
 
if (cmd->flags & flags) {
av_log(ctx, AV_LOG_VERBOSE,
"Processing command #%d target:%s command:%s arg:%s\n",
cmd->index, cmd->target, cmd->command, cmd->arg);
ret = avfilter_graph_send_command(inlink->graph,
cmd->target, cmd->command, cmd->arg,
buf, sizeof(buf),
AVFILTER_CMD_FLAG_ONE);
av_log(ctx, AV_LOG_VERBOSE,
"Command reply for command #%d: ret:%s res:%s\n",
cmd->index, av_err2str(ret), buf);
}
}
}
}
 
end:
switch (inlink->type) {
case AVMEDIA_TYPE_VIDEO:
case AVMEDIA_TYPE_AUDIO:
return ff_filter_frame(inlink->dst->outputs[0], ref);
}
 
return AVERROR(ENOSYS);
}
 
#if CONFIG_SENDCMD_FILTER
 
#define sendcmd_options options
AVFILTER_DEFINE_CLASS(sendcmd);
 
static const AVFilterPad sendcmd_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad sendcmd_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_sendcmd = {
.name = "sendcmd",
.description = NULL_IF_CONFIG_SMALL("Send commands to filters."),
.init = init,
.uninit = uninit,
.priv_size = sizeof(SendCmdContext),
.inputs = sendcmd_inputs,
.outputs = sendcmd_outputs,
.priv_class = &sendcmd_class,
};
 
#endif
 
#if CONFIG_ASENDCMD_FILTER
 
#define asendcmd_options options
AVFILTER_DEFINE_CLASS(asendcmd);
 
static const AVFilterPad asendcmd_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad asendcmd_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
 
AVFilter avfilter_af_asendcmd = {
.name = "asendcmd",
.description = NULL_IF_CONFIG_SMALL("Send commands to filters."),
.init = init,
.uninit = uninit,
.priv_size = sizeof(SendCmdContext),
.inputs = asendcmd_inputs,
.outputs = asendcmd_outputs,
.priv_class = &asendcmd_class,
};
 
#endif
/contrib/sdk/sources/ffmpeg/libavfilter/f_settb.c
0,0 → 1,187
/*
* Copyright (c) 2010 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Set timebase for the output link.
*/
 
#include <inttypes.h>
#include <stdio.h>
 
#include "libavutil/avstring.h"
#include "libavutil/eval.h"
#include "libavutil/internal.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/rational.h"
#include "avfilter.h"
#include "internal.h"
#include "audio.h"
#include "video.h"
 
static const char *const var_names[] = {
"AVTB", /* default timebase 1/AV_TIME_BASE */
"intb", /* input timebase */
"sr", /* sample rate */
NULL
};
 
enum var_name {
VAR_AVTB,
VAR_INTB,
VAR_SR,
VAR_VARS_NB
};
 
typedef struct {
const AVClass *class;
char *tb_expr;
double var_values[VAR_VARS_NB];
} SetTBContext;
 
#define OFFSET(x) offsetof(SetTBContext, x)
#define DEFINE_OPTIONS(filt_name, filt_type) \
static const AVOption filt_name##_options[] = { \
{ "expr", "set expression determining the output timebase", OFFSET(tb_expr), AV_OPT_TYPE_STRING, {.str="intb"}, \
.flags=AV_OPT_FLAG_##filt_type##_PARAM|AV_OPT_FLAG_FILTERING_PARAM }, \
{ "tb", "set expression determining the output timebase", OFFSET(tb_expr), AV_OPT_TYPE_STRING, {.str="intb"}, \
.flags=AV_OPT_FLAG_##filt_type##_PARAM|AV_OPT_FLAG_FILTERING_PARAM }, \
{ NULL } \
}
 
static int config_output_props(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
SetTBContext *settb = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
AVRational time_base;
int ret;
double res;
 
settb->var_values[VAR_AVTB] = av_q2d(AV_TIME_BASE_Q);
settb->var_values[VAR_INTB] = av_q2d(inlink->time_base);
settb->var_values[VAR_SR] = inlink->sample_rate;
 
outlink->w = inlink->w;
outlink->h = inlink->h;
 
if ((ret = av_expr_parse_and_eval(&res, settb->tb_expr, var_names, settb->var_values,
NULL, NULL, NULL, NULL, NULL, 0, NULL)) < 0) {
av_log(ctx, AV_LOG_ERROR, "Invalid expression '%s' for timebase.\n", settb->tb_expr);
return ret;
}
time_base = av_d2q(res, INT_MAX);
if (time_base.num <= 0 || time_base.den <= 0) {
av_log(ctx, AV_LOG_ERROR,
"Invalid non-positive values for the timebase num:%d or den:%d.\n",
time_base.num, time_base.den);
return AVERROR(EINVAL);
}
 
outlink->time_base = time_base;
av_log(outlink->src, AV_LOG_VERBOSE, "tb:%d/%d -> tb:%d/%d\n",
inlink ->time_base.num, inlink ->time_base.den,
outlink->time_base.num, outlink->time_base.den);
 
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
 
if (av_cmp_q(inlink->time_base, outlink->time_base)) {
int64_t orig_pts = frame->pts;
frame->pts = av_rescale_q(frame->pts, inlink->time_base, outlink->time_base);
av_log(ctx, AV_LOG_DEBUG, "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
inlink ->time_base.num, inlink ->time_base.den, orig_pts,
outlink->time_base.num, outlink->time_base.den, frame->pts);
}
 
return ff_filter_frame(outlink, frame);
}
 
#if CONFIG_SETTB_FILTER
 
DEFINE_OPTIONS(settb, VIDEO);
AVFILTER_DEFINE_CLASS(settb);
 
static const AVFilterPad avfilter_vf_settb_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_settb_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_output_props,
},
{ NULL }
};
 
AVFilter avfilter_vf_settb = {
.name = "settb",
.description = NULL_IF_CONFIG_SMALL("Set timebase for the video output link."),
.priv_size = sizeof(SetTBContext),
.priv_class = &settb_class,
.inputs = avfilter_vf_settb_inputs,
.outputs = avfilter_vf_settb_outputs,
};
#endif
 
#if CONFIG_ASETTB_FILTER
 
DEFINE_OPTIONS(asettb, AUDIO);
AVFILTER_DEFINE_CLASS(asettb);
 
static const AVFilterPad avfilter_af_asettb_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad avfilter_af_asettb_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output_props,
},
{ NULL }
};
 
AVFilter avfilter_af_asettb = {
.name = "asettb",
.description = NULL_IF_CONFIG_SMALL("Set timebase for the audio output link."),
.priv_size = sizeof(SetTBContext),
.inputs = avfilter_af_asettb_inputs,
.outputs = avfilter_af_asettb_outputs,
.priv_class = &asettb_class,
};
#endif
/contrib/sdk/sources/ffmpeg/libavfilter/f_zmq.c
0,0 → 1,275
/*
* Copyright (c) 2013 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* receive commands through libzeromq and broker them to filters
*/
 
#include <zmq.h>
#include "libavutil/avstring.h"
#include "libavutil/bprint.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "internal.h"
#include "avfiltergraph.h"
#include "audio.h"
#include "video.h"
 
typedef struct {
const AVClass *class;
void *zmq;
void *responder;
char *bind_address;
int command_count;
} ZMQContext;
 
#define OFFSET(x) offsetof(ZMQContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_VIDEO_PARAM
static const AVOption options[] = {
{ "bind_address", "set bind address", OFFSET(bind_address), AV_OPT_TYPE_STRING, {.str = "tcp://*:5555"}, 0, 0, FLAGS },
{ "b", "set bind address", OFFSET(bind_address), AV_OPT_TYPE_STRING, {.str = "tcp://*:5555"}, 0, 0, FLAGS },
{ NULL }
};
 
static av_cold int init(AVFilterContext *ctx)
{
ZMQContext *zmq = ctx->priv;
 
zmq->zmq = zmq_ctx_new();
if (!zmq->zmq) {
av_log(ctx, AV_LOG_ERROR,
"Could not create ZMQ context: %s\n", zmq_strerror(errno));
return AVERROR_EXTERNAL;
}
 
zmq->responder = zmq_socket(zmq->zmq, ZMQ_REP);
if (!zmq->responder) {
av_log(ctx, AV_LOG_ERROR,
"Could not create ZMQ socket: %s\n", zmq_strerror(errno));
return AVERROR_EXTERNAL;
}
 
if (zmq_bind(zmq->responder, zmq->bind_address) == -1) {
av_log(ctx, AV_LOG_ERROR,
"Could not bind ZMQ socket to address '%s': %s\n",
zmq->bind_address, zmq_strerror(errno));
return AVERROR_EXTERNAL;
}
 
zmq->command_count = -1;
return 0;
}
 
static void av_cold uninit(AVFilterContext *ctx)
{
ZMQContext *zmq = ctx->priv;
 
zmq_close(zmq->responder);
zmq_ctx_destroy(zmq->zmq);
}
 
typedef struct {
char *target, *command, *arg;
} Command;
 
#define SPACES " \f\t\n\r"
 
static int parse_command(Command *cmd, const char *command_str, void *log_ctx)
{
const char **buf = &command_str;
 
cmd->target = av_get_token(buf, SPACES);
if (!cmd->target || !cmd->target[0]) {
av_log(log_ctx, AV_LOG_ERROR,
"No target specified in command '%s'\n", command_str);
return AVERROR(EINVAL);
}
 
cmd->command = av_get_token(buf, SPACES);
if (!cmd->command || !cmd->command[0]) {
av_log(log_ctx, AV_LOG_ERROR,
"No command specified in command '%s'\n", command_str);
return AVERROR(EINVAL);
}
 
cmd->arg = av_get_token(buf, SPACES);
return 0;
}
 
static int recv_msg(AVFilterContext *ctx, char **buf, int *buf_size)
{
ZMQContext *zmq = ctx->priv;
zmq_msg_t msg;
int ret = 0;
 
if (zmq_msg_init(&msg) == -1) {
av_log(ctx, AV_LOG_WARNING,
"Could not initialize receive message: %s\n", zmq_strerror(errno));
return AVERROR_EXTERNAL;
}
 
if (zmq_msg_recv(&msg, zmq->responder, ZMQ_DONTWAIT) == -1) {
if (errno != EAGAIN)
av_log(ctx, AV_LOG_WARNING,
"Could not receive message: %s\n", zmq_strerror(errno));
ret = AVERROR_EXTERNAL;
goto end;
}
 
*buf_size = zmq_msg_size(&msg) + 1;
*buf = av_malloc(*buf_size);
if (!*buf) {
ret = AVERROR(ENOMEM);
goto end;
}
memcpy(*buf, zmq_msg_data(&msg), *buf_size);
(*buf)[*buf_size-1] = 0;
 
end:
zmq_msg_close(&msg);
return ret;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *ref)
{
AVFilterContext *ctx = inlink->dst;
ZMQContext *zmq = ctx->priv;
 
while (1) {
char cmd_buf[1024];
char *recv_buf, *send_buf;
int recv_buf_size;
Command cmd = {0};
int ret;
 
/* receive command */
if (recv_msg(ctx, &recv_buf, &recv_buf_size) < 0)
break;
zmq->command_count++;
 
/* parse command */
if (parse_command(&cmd, recv_buf, ctx) < 0) {
av_log(ctx, AV_LOG_ERROR, "Could not parse command #%d\n", zmq->command_count);
goto end;
}
 
/* process command */
av_log(ctx, AV_LOG_VERBOSE,
"Processing command #%d target:%s command:%s arg:%s\n",
zmq->command_count, cmd.target, cmd.command, cmd.arg);
ret = avfilter_graph_send_command(inlink->graph,
cmd.target, cmd.command, cmd.arg,
cmd_buf, sizeof(cmd_buf),
AVFILTER_CMD_FLAG_ONE);
send_buf = av_asprintf("%d %s%s%s",
-ret, av_err2str(ret), cmd_buf[0] ? "\n" : "", cmd_buf);
if (!send_buf) {
ret = AVERROR(ENOMEM);
goto end;
}
av_log(ctx, AV_LOG_VERBOSE,
"Sending command reply for command #%d:\n%s\n",
zmq->command_count, send_buf);
if (zmq_send(zmq->responder, send_buf, strlen(send_buf), 0) == -1)
av_log(ctx, AV_LOG_ERROR, "Failed to send reply for command #%d: %s\n",
zmq->command_count, zmq_strerror(ret));
 
end:
av_freep(&send_buf);
av_freep(&recv_buf);
recv_buf_size = 0;
av_freep(&cmd.target);
av_freep(&cmd.command);
av_freep(&cmd.arg);
}
 
return ff_filter_frame(ctx->outputs[0], ref);
}
 
#if CONFIG_ZMQ_FILTER
 
#define zmq_options options
AVFILTER_DEFINE_CLASS(zmq);
 
static const AVFilterPad zmq_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad zmq_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_zmq = {
.name = "zmq",
.description = NULL_IF_CONFIG_SMALL("Receive commands through ZMQ and broker them to filters."),
.init = init,
.uninit = uninit,
.priv_size = sizeof(ZMQContext),
.inputs = zmq_inputs,
.outputs = zmq_outputs,
.priv_class = &zmq_class,
};
 
#endif
 
#if CONFIG_AZMQ_FILTER
 
#define azmq_options options
AVFILTER_DEFINE_CLASS(azmq);
 
static const AVFilterPad azmq_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad azmq_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
 
AVFilter avfilter_af_azmq = {
.name = "azmq",
.description = NULL_IF_CONFIG_SMALL("Receive commands through ZMQ and broker them to filters."),
.init = init,
.uninit = uninit,
.priv_size = sizeof(ZMQContext),
.inputs = azmq_inputs,
.outputs = azmq_outputs,
.priv_class = &azmq_class,
};
 
#endif
/contrib/sdk/sources/ffmpeg/libavfilter/fifo.c
0,0 → 1,313
/*
* Copyright (c) 2007 Bobby Bingham
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* FIFO buffering filter
*/
 
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/mathematics.h"
#include "libavutil/samplefmt.h"
 
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
#include "video.h"
 
typedef struct Buf {
AVFrame *frame;
struct Buf *next;
} Buf;
 
typedef struct {
Buf root;
Buf *last; ///< last buffered frame
 
/**
* When a specific number of output samples is requested, the partial
* buffer is stored here
*/
AVFrame *out;
int allocated_samples; ///< number of samples out was allocated for
} FifoContext;
 
static av_cold int init(AVFilterContext *ctx)
{
FifoContext *fifo = ctx->priv;
fifo->last = &fifo->root;
 
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
FifoContext *fifo = ctx->priv;
Buf *buf, *tmp;
 
for (buf = fifo->root.next; buf; buf = tmp) {
tmp = buf->next;
av_frame_free(&buf->frame);
av_free(buf);
}
 
av_frame_free(&fifo->out);
}
 
static int add_to_queue(AVFilterLink *inlink, AVFrame *frame)
{
FifoContext *fifo = inlink->dst->priv;
 
fifo->last->next = av_mallocz(sizeof(Buf));
if (!fifo->last->next) {
av_frame_free(&frame);
return AVERROR(ENOMEM);
}
 
fifo->last = fifo->last->next;
fifo->last->frame = frame;
 
return 0;
}
 
static void queue_pop(FifoContext *s)
{
Buf *tmp = s->root.next->next;
if (s->last == s->root.next)
s->last = &s->root;
av_freep(&s->root.next);
s->root.next = tmp;
}
 
/**
* Move data pointers and pts offset samples forward.
*/
static void buffer_offset(AVFilterLink *link, AVFrame *frame,
int offset)
{
int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout);
int planar = av_sample_fmt_is_planar(link->format);
int planes = planar ? nb_channels : 1;
int block_align = av_get_bytes_per_sample(link->format) * (planar ? 1 : nb_channels);
int i;
 
av_assert0(frame->nb_samples > offset);
 
for (i = 0; i < planes; i++)
frame->extended_data[i] += block_align * offset;
if (frame->data != frame->extended_data)
memcpy(frame->data, frame->extended_data,
FFMIN(planes, FF_ARRAY_ELEMS(frame->data)) * sizeof(*frame->data));
frame->linesize[0] -= block_align*offset;
frame->nb_samples -= offset;
 
if (frame->pts != AV_NOPTS_VALUE) {
frame->pts += av_rescale_q(offset, (AVRational){1, link->sample_rate},
link->time_base);
}
}
 
static int calc_ptr_alignment(AVFrame *frame)
{
int planes = av_sample_fmt_is_planar(frame->format) ?
av_get_channel_layout_nb_channels(frame->channel_layout) : 1;
int min_align = 128;
int p;
 
for (p = 0; p < planes; p++) {
int cur_align = 128;
while ((intptr_t)frame->extended_data[p] % cur_align)
cur_align >>= 1;
if (cur_align < min_align)
min_align = cur_align;
}
return min_align;
}
 
static int return_audio_frame(AVFilterContext *ctx)
{
AVFilterLink *link = ctx->outputs[0];
FifoContext *s = ctx->priv;
AVFrame *head = s->root.next ? s->root.next->frame : NULL;
AVFrame *out;
int ret;
 
/* if head is NULL then we're flushing the remaining samples in out */
if (!head && !s->out)
return AVERROR_EOF;
 
if (!s->out &&
head->nb_samples >= link->request_samples &&
calc_ptr_alignment(head) >= 32) {
if (head->nb_samples == link->request_samples) {
out = head;
queue_pop(s);
} else {
out = av_frame_clone(head);
if (!out)
return AVERROR(ENOMEM);
 
out->nb_samples = link->request_samples;
buffer_offset(link, head, link->request_samples);
}
} else {
int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout);
 
if (!s->out) {
s->out = ff_get_audio_buffer(link, link->request_samples);
if (!s->out)
return AVERROR(ENOMEM);
 
s->out->nb_samples = 0;
s->out->pts = head->pts;
s->allocated_samples = link->request_samples;
} else if (link->request_samples != s->allocated_samples) {
av_log(ctx, AV_LOG_ERROR, "request_samples changed before the "
"buffer was returned.\n");
return AVERROR(EINVAL);
}
 
while (s->out->nb_samples < s->allocated_samples) {
int len;
 
if (!s->root.next) {
ret = ff_request_frame(ctx->inputs[0]);
if (ret == AVERROR_EOF) {
av_samples_set_silence(s->out->extended_data,
s->out->nb_samples,
s->allocated_samples -
s->out->nb_samples,
nb_channels, link->format);
s->out->nb_samples = s->allocated_samples;
break;
} else if (ret < 0)
return ret;
av_assert0(s->root.next); // If ff_request_frame() succeeded then we should have a frame
}
head = s->root.next->frame;
 
len = FFMIN(s->allocated_samples - s->out->nb_samples,
head->nb_samples);
 
av_samples_copy(s->out->extended_data, head->extended_data,
s->out->nb_samples, 0, len, nb_channels,
link->format);
s->out->nb_samples += len;
 
if (len == head->nb_samples) {
av_frame_free(&head);
queue_pop(s);
} else {
buffer_offset(link, head, len);
}
}
out = s->out;
s->out = NULL;
}
return ff_filter_frame(link, out);
}
 
static int request_frame(AVFilterLink *outlink)
{
FifoContext *fifo = outlink->src->priv;
int ret = 0;
 
if (!fifo->root.next) {
if ((ret = ff_request_frame(outlink->src->inputs[0])) < 0) {
if (ret == AVERROR_EOF && outlink->request_samples)
return return_audio_frame(outlink->src);
return ret;
}
av_assert0(fifo->root.next);
}
 
if (outlink->request_samples) {
return return_audio_frame(outlink->src);
} else {
ret = ff_filter_frame(outlink, fifo->root.next->frame);
queue_pop(fifo);
}
 
return ret;
}
 
static const AVFilterPad avfilter_vf_fifo_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = add_to_queue,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_fifo_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.request_frame = request_frame,
},
{ NULL }
};
 
AVFilter avfilter_vf_fifo = {
.name = "fifo",
.description = NULL_IF_CONFIG_SMALL("Buffer input images and send them when they are requested."),
 
.init = init,
.uninit = uninit,
 
.priv_size = sizeof(FifoContext),
 
.inputs = avfilter_vf_fifo_inputs,
.outputs = avfilter_vf_fifo_outputs,
};
 
static const AVFilterPad avfilter_af_afifo_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = add_to_queue,
},
{ NULL }
};
 
static const AVFilterPad avfilter_af_afifo_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.request_frame = request_frame,
},
{ NULL }
};
 
AVFilter avfilter_af_afifo = {
.name = "afifo",
.description = NULL_IF_CONFIG_SMALL("Buffer input frames and send them when they are requested."),
 
.init = init,
.uninit = uninit,
 
.priv_size = sizeof(FifoContext),
 
.inputs = avfilter_af_afifo_inputs,
.outputs = avfilter_af_afifo_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/filtfmts.c
0,0 → 1,136
/*
* Copyright (c) 2009 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <stdio.h>
 
#include "libavformat/avformat.h"
#include "libavutil/pixdesc.h"
#include "libavutil/samplefmt.h"
#include "libavfilter/avfilter.h"
#include "libavfilter/formats.h"
 
static void print_formats(AVFilterContext *filter_ctx)
{
int i, j;
 
#define PRINT_FMTS(inout, outin, INOUT) \
for (i = 0; i < filter_ctx->nb_##inout##puts; i++) { \
if (filter_ctx->inout##puts[i]->type == AVMEDIA_TYPE_VIDEO) { \
AVFilterFormats *fmts = \
filter_ctx->inout##puts[i]->outin##_formats; \
for (j = 0; j < fmts->nb_formats; j++) \
if(av_get_pix_fmt_name(fmts->formats[j])) \
printf(#INOUT "PUT[%d] %s: fmt:%s\n", \
i, filter_ctx->filter->inout##puts[i].name, \
av_get_pix_fmt_name(fmts->formats[j])); \
} else if (filter_ctx->inout##puts[i]->type == AVMEDIA_TYPE_AUDIO) { \
AVFilterFormats *fmts; \
AVFilterChannelLayouts *layouts; \
\
fmts = filter_ctx->inout##puts[i]->outin##_formats; \
for (j = 0; j < fmts->nb_formats; j++) \
printf(#INOUT "PUT[%d] %s: fmt:%s\n", \
i, filter_ctx->filter->inout##puts[i].name, \
av_get_sample_fmt_name(fmts->formats[j])); \
\
layouts = filter_ctx->inout##puts[i]->outin##_channel_layouts; \
for (j = 0; j < layouts->nb_channel_layouts; j++) { \
char buf[256]; \
av_get_channel_layout_string(buf, sizeof(buf), -1, \
layouts->channel_layouts[j]); \
printf(#INOUT "PUT[%d] %s: chlayout:%s\n", \
i, filter_ctx->filter->inout##puts[i].name, buf); \
} \
} \
} \
 
PRINT_FMTS(in, out, IN);
PRINT_FMTS(out, in, OUT);
}
 
int main(int argc, char **argv)
{
AVFilter *filter;
AVFilterContext *filter_ctx;
AVFilterGraph *graph_ctx;
const char *filter_name;
const char *filter_args = NULL;
int i;
 
av_log_set_level(AV_LOG_DEBUG);
 
if (argc < 2) {
fprintf(stderr, "Missing filter name as argument\n");
return 1;
}
 
filter_name = argv[1];
if (argc > 2)
filter_args = argv[2];
 
/* allocate graph */
graph_ctx = avfilter_graph_alloc();
if (!graph_ctx)
return 1;
 
avfilter_register_all();
 
/* get a corresponding filter and open it */
if (!(filter = avfilter_get_by_name(filter_name))) {
fprintf(stderr, "Unrecognized filter with name '%s'\n", filter_name);
return 1;
}
 
/* open filter and add it to the graph */
if (!(filter_ctx = avfilter_graph_alloc_filter(graph_ctx, filter, filter_name))) {
fprintf(stderr, "Impossible to open filter with name '%s'\n",
filter_name);
return 1;
}
if (avfilter_init_str(filter_ctx, filter_args) < 0) {
fprintf(stderr, "Impossible to init filter '%s' with arguments '%s'\n",
filter_name, filter_args);
return 1;
}
 
/* create a link for each of the input pads */
for (i = 0; i < filter_ctx->nb_inputs; i++) {
AVFilterLink *link = av_mallocz(sizeof(AVFilterLink));
link->type = filter_ctx->filter->inputs[i].type;
filter_ctx->inputs[i] = link;
}
for (i = 0; i < filter_ctx->nb_outputs; i++) {
AVFilterLink *link = av_mallocz(sizeof(AVFilterLink));
link->type = filter_ctx->filter->outputs[i].type;
filter_ctx->outputs[i] = link;
}
 
if (filter->query_formats)
filter->query_formats(filter_ctx);
else
ff_default_query_formats(filter_ctx);
 
print_formats(filter_ctx);
 
avfilter_free(filter_ctx);
avfilter_graph_free(&graph_ctx);
fflush(stdout);
return 0;
}
/contrib/sdk/sources/ffmpeg/libavfilter/formats.c
0,0 → 1,651
/*
* Filter layer - format negotiation
* Copyright (c) 2007 Bobby Bingham
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/eval.h"
#include "libavutil/pixdesc.h"
#include "libavutil/parseutils.h"
#include "avfilter.h"
#include "internal.h"
#include "formats.h"
 
#define KNOWN(l) (!FF_LAYOUT2COUNT(l)) /* for readability */
 
/**
* Add all refs from a to ret and destroy a.
*/
#define MERGE_REF(ret, a, fmts, type, fail) \
do { \
type ***tmp; \
int i; \
\
if (!(tmp = av_realloc(ret->refs, \
sizeof(*tmp) * (ret->refcount + a->refcount)))) \
goto fail; \
ret->refs = tmp; \
\
for (i = 0; i < a->refcount; i ++) { \
ret->refs[ret->refcount] = a->refs[i]; \
*ret->refs[ret->refcount++] = ret; \
} \
\
av_freep(&a->refs); \
av_freep(&a->fmts); \
av_freep(&a); \
} while (0)
 
/**
* Add all formats common for a and b to ret, copy the refs and destroy
* a and b.
*/
#define MERGE_FORMATS(ret, a, b, fmts, nb, type, fail) \
do { \
int i, j, k = 0, count = FFMIN(a->nb, b->nb); \
\
if (!(ret = av_mallocz(sizeof(*ret)))) \
goto fail; \
\
if (count) { \
if (!(ret->fmts = av_malloc(sizeof(*ret->fmts) * count))) \
goto fail; \
for (i = 0; i < a->nb; i++) \
for (j = 0; j < b->nb; j++) \
if (a->fmts[i] == b->fmts[j]) { \
if(k >= FFMIN(a->nb, b->nb)){ \
av_log(NULL, AV_LOG_ERROR, "Duplicate formats in avfilter_merge_formats() detected\n"); \
av_free(ret->fmts); \
av_free(ret); \
return NULL; \
} \
ret->fmts[k++] = a->fmts[i]; \
} \
} \
ret->nb = k; \
/* check that there was at least one common format */ \
if (!ret->nb) \
goto fail; \
\
MERGE_REF(ret, a, fmts, type, fail); \
MERGE_REF(ret, b, fmts, type, fail); \
} while (0)
 
AVFilterFormats *ff_merge_formats(AVFilterFormats *a, AVFilterFormats *b,
enum AVMediaType type)
{
AVFilterFormats *ret = NULL;
int i, j;
int alpha1=0, alpha2=0;
int chroma1=0, chroma2=0;
 
if (a == b)
return a;
 
/* Do not lose chroma or alpha in merging.
It happens if both lists have formats with chroma (resp. alpha), but
the only formats in common do not have it (e.g. YUV+gray vs.
RGB+gray): in that case, the merging would select the gray format,
possibly causing a lossy conversion elsewhere in the graph.
To avoid that, pretend that there are no common formats to force the
insertion of a conversion filter. */
if (type == AVMEDIA_TYPE_VIDEO)
for (i = 0; i < a->nb_formats; i++)
for (j = 0; j < b->nb_formats; j++) {
const AVPixFmtDescriptor *adesc = av_pix_fmt_desc_get(a->formats[i]);
const AVPixFmtDescriptor *bdesc = av_pix_fmt_desc_get(b->formats[j]);
alpha2 |= adesc->flags & bdesc->flags & AV_PIX_FMT_FLAG_ALPHA;
chroma2|= adesc->nb_components > 1 && bdesc->nb_components > 1;
if (a->formats[i] == b->formats[j]) {
alpha1 |= adesc->flags & AV_PIX_FMT_FLAG_ALPHA;
chroma1|= adesc->nb_components > 1;
}
}
 
// If chroma or alpha can be lost through merging then do not merge
if (alpha2 > alpha1 || chroma2 > chroma1)
return NULL;
 
MERGE_FORMATS(ret, a, b, formats, nb_formats, AVFilterFormats, fail);
 
return ret;
fail:
if (ret) {
av_freep(&ret->refs);
av_freep(&ret->formats);
}
av_freep(&ret);
return NULL;
}
 
AVFilterFormats *ff_merge_samplerates(AVFilterFormats *a,
AVFilterFormats *b)
{
AVFilterFormats *ret = NULL;
 
if (a == b) return a;
 
if (a->nb_formats && b->nb_formats) {
MERGE_FORMATS(ret, a, b, formats, nb_formats, AVFilterFormats, fail);
} else if (a->nb_formats) {
MERGE_REF(a, b, formats, AVFilterFormats, fail);
ret = a;
} else {
MERGE_REF(b, a, formats, AVFilterFormats, fail);
ret = b;
}
 
return ret;
fail:
if (ret) {
av_freep(&ret->refs);
av_freep(&ret->formats);
}
av_freep(&ret);
return NULL;
}
 
AVFilterChannelLayouts *ff_merge_channel_layouts(AVFilterChannelLayouts *a,
AVFilterChannelLayouts *b)
{
AVFilterChannelLayouts *ret = NULL;
unsigned a_all = a->all_layouts + a->all_counts;
unsigned b_all = b->all_layouts + b->all_counts;
int ret_max, ret_nb = 0, i, j, round;
 
if (a == b) return a;
 
/* Put the most generic set in a, to avoid doing everything twice */
if (a_all < b_all) {
FFSWAP(AVFilterChannelLayouts *, a, b);
FFSWAP(unsigned, a_all, b_all);
}
if (a_all) {
if (a_all == 1 && !b_all) {
/* keep only known layouts in b; works also for b_all = 1 */
for (i = j = 0; i < b->nb_channel_layouts; i++)
if (KNOWN(b->channel_layouts[i]))
b->channel_layouts[j++] = b->channel_layouts[i];
/* Not optimal: the unknown layouts of b may become known after
another merge. */
if (!j)
return NULL;
b->nb_channel_layouts = j;
}
MERGE_REF(b, a, channel_layouts, AVFilterChannelLayouts, fail);
return b;
}
 
ret_max = a->nb_channel_layouts + b->nb_channel_layouts;
if (!(ret = av_mallocz(sizeof(*ret))) ||
!(ret->channel_layouts = av_malloc(sizeof(*ret->channel_layouts) *
ret_max)))
goto fail;
 
/* a[known] intersect b[known] */
for (i = 0; i < a->nb_channel_layouts; i++) {
if (!KNOWN(a->channel_layouts[i]))
continue;
for (j = 0; j < b->nb_channel_layouts; j++) {
if (a->channel_layouts[i] == b->channel_layouts[j]) {
ret->channel_layouts[ret_nb++] = a->channel_layouts[i];
a->channel_layouts[i] = b->channel_layouts[j] = 0;
}
}
}
/* 1st round: a[known] intersect b[generic]
2nd round: a[generic] intersect b[known] */
for (round = 0; round < 2; round++) {
for (i = 0; i < a->nb_channel_layouts; i++) {
uint64_t fmt = a->channel_layouts[i], bfmt;
if (!fmt || !KNOWN(fmt))
continue;
bfmt = FF_COUNT2LAYOUT(av_get_channel_layout_nb_channels(fmt));
for (j = 0; j < b->nb_channel_layouts; j++)
if (b->channel_layouts[j] == bfmt)
ret->channel_layouts[ret_nb++] = a->channel_layouts[i];
}
/* 1st round: swap to prepare 2nd round; 2nd round: put it back */
FFSWAP(AVFilterChannelLayouts *, a, b);
}
/* a[generic] intersect b[generic] */
for (i = 0; i < a->nb_channel_layouts; i++) {
if (KNOWN(a->channel_layouts[i]))
continue;
for (j = 0; j < b->nb_channel_layouts; j++)
if (a->channel_layouts[i] == b->channel_layouts[j])
ret->channel_layouts[ret_nb++] = a->channel_layouts[i];
}
 
ret->nb_channel_layouts = ret_nb;
if (!ret->nb_channel_layouts)
goto fail;
MERGE_REF(ret, a, channel_layouts, AVFilterChannelLayouts, fail);
MERGE_REF(ret, b, channel_layouts, AVFilterChannelLayouts, fail);
return ret;
 
fail:
if (ret) {
av_freep(&ret->refs);
av_freep(&ret->channel_layouts);
}
av_freep(&ret);
return NULL;
}
 
int ff_fmt_is_in(int fmt, const int *fmts)
{
const int *p;
 
for (p = fmts; *p != -1; p++) {
if (fmt == *p)
return 1;
}
return 0;
}
 
#define COPY_INT_LIST(list_copy, list, type) { \
int count = 0; \
if (list) \
for (count = 0; list[count] != -1; count++) \
; \
list_copy = av_calloc(count+1, sizeof(type)); \
if (list_copy) { \
memcpy(list_copy, list, sizeof(type) * count); \
list_copy[count] = -1; \
} \
}
 
#define MAKE_FORMAT_LIST(type, field, count_field) \
type *formats; \
int count = 0; \
if (fmts) \
for (count = 0; fmts[count] != -1; count++) \
; \
formats = av_mallocz(sizeof(*formats)); \
if (!formats) return NULL; \
formats->count_field = count; \
if (count) { \
formats->field = av_malloc(sizeof(*formats->field)*count); \
if (!formats->field) { \
av_free(formats); \
return NULL; \
} \
}
 
AVFilterFormats *ff_make_format_list(const int *fmts)
{
MAKE_FORMAT_LIST(AVFilterFormats, formats, nb_formats);
while (count--)
formats->formats[count] = fmts[count];
 
return formats;
}
 
AVFilterChannelLayouts *avfilter_make_format64_list(const int64_t *fmts)
{
MAKE_FORMAT_LIST(AVFilterChannelLayouts,
channel_layouts, nb_channel_layouts);
if (count)
memcpy(formats->channel_layouts, fmts,
sizeof(*formats->channel_layouts) * count);
 
return formats;
}
 
#define ADD_FORMAT(f, fmt, type, list, nb) \
do { \
type *fmts; \
\
if (!(*f) && !(*f = av_mallocz(sizeof(**f)))) \
return AVERROR(ENOMEM); \
\
fmts = av_realloc((*f)->list, \
sizeof(*(*f)->list) * ((*f)->nb + 1));\
if (!fmts) \
return AVERROR(ENOMEM); \
\
(*f)->list = fmts; \
(*f)->list[(*f)->nb++] = fmt; \
} while (0)
 
int ff_add_format(AVFilterFormats **avff, int64_t fmt)
{
ADD_FORMAT(avff, fmt, int, formats, nb_formats);
return 0;
}
 
int ff_add_channel_layout(AVFilterChannelLayouts **l, uint64_t channel_layout)
{
av_assert1(!(*l && (*l)->all_layouts));
ADD_FORMAT(l, channel_layout, uint64_t, channel_layouts, nb_channel_layouts);
return 0;
}
 
AVFilterFormats *ff_all_formats(enum AVMediaType type)
{
AVFilterFormats *ret = NULL;
int fmt;
int num_formats = type == AVMEDIA_TYPE_VIDEO ? AV_PIX_FMT_NB :
type == AVMEDIA_TYPE_AUDIO ? AV_SAMPLE_FMT_NB : 0;
 
for (fmt = 0; fmt < num_formats; fmt++) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
if ((type != AVMEDIA_TYPE_VIDEO) ||
(type == AVMEDIA_TYPE_VIDEO && !(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)))
ff_add_format(&ret, fmt);
}
 
return ret;
}
 
const int64_t avfilter_all_channel_layouts[] = {
#include "all_channel_layouts.inc"
-1
};
 
// AVFilterFormats *avfilter_make_all_channel_layouts(void)
// {
// return avfilter_make_format64_list(avfilter_all_channel_layouts);
// }
 
AVFilterFormats *ff_planar_sample_fmts(void)
{
AVFilterFormats *ret = NULL;
int fmt;
 
for (fmt = 0; fmt < AV_SAMPLE_FMT_NB; fmt++)
if (av_sample_fmt_is_planar(fmt))
ff_add_format(&ret, fmt);
 
return ret;
}
 
AVFilterFormats *ff_all_samplerates(void)
{
AVFilterFormats *ret = av_mallocz(sizeof(*ret));
return ret;
}
 
AVFilterChannelLayouts *ff_all_channel_layouts(void)
{
AVFilterChannelLayouts *ret = av_mallocz(sizeof(*ret));
if (!ret)
return NULL;
ret->all_layouts = 1;
return ret;
}
 
AVFilterChannelLayouts *ff_all_channel_counts(void)
{
AVFilterChannelLayouts *ret = av_mallocz(sizeof(*ret));
if (!ret)
return NULL;
ret->all_layouts = ret->all_counts = 1;
return ret;
}
 
#define FORMATS_REF(f, ref) \
do { \
*ref = f; \
f->refs = av_realloc(f->refs, sizeof(*f->refs) * ++f->refcount); \
f->refs[f->refcount-1] = ref; \
} while (0)
 
void ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
{
FORMATS_REF(f, ref);
}
 
void ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
{
FORMATS_REF(f, ref);
}
 
#define FIND_REF_INDEX(ref, idx) \
do { \
int i; \
for (i = 0; i < (*ref)->refcount; i ++) \
if((*ref)->refs[i] == ref) { \
idx = i; \
break; \
} \
} while (0)
 
#define FORMATS_UNREF(ref, list) \
do { \
int idx = -1; \
\
if (!*ref) \
return; \
\
FIND_REF_INDEX(ref, idx); \
\
if (idx >= 0) \
memmove((*ref)->refs + idx, (*ref)->refs + idx + 1, \
sizeof(*(*ref)->refs) * ((*ref)->refcount - idx - 1)); \
\
if(!--(*ref)->refcount) { \
av_free((*ref)->list); \
av_free((*ref)->refs); \
av_free(*ref); \
} \
*ref = NULL; \
} while (0)
 
void ff_formats_unref(AVFilterFormats **ref)
{
FORMATS_UNREF(ref, formats);
}
 
void ff_channel_layouts_unref(AVFilterChannelLayouts **ref)
{
FORMATS_UNREF(ref, channel_layouts);
}
 
#define FORMATS_CHANGEREF(oldref, newref) \
do { \
int idx = -1; \
\
FIND_REF_INDEX(oldref, idx); \
\
if (idx >= 0) { \
(*oldref)->refs[idx] = newref; \
*newref = *oldref; \
*oldref = NULL; \
} \
} while (0)
 
void ff_channel_layouts_changeref(AVFilterChannelLayouts **oldref,
AVFilterChannelLayouts **newref)
{
FORMATS_CHANGEREF(oldref, newref);
}
 
void ff_formats_changeref(AVFilterFormats **oldref, AVFilterFormats **newref)
{
FORMATS_CHANGEREF(oldref, newref);
}
 
#define SET_COMMON_FORMATS(ctx, fmts, in_fmts, out_fmts, ref, list) \
{ \
int count = 0, i; \
\
for (i = 0; i < ctx->nb_inputs; i++) { \
if (ctx->inputs[i] && !ctx->inputs[i]->out_fmts) { \
ref(fmts, &ctx->inputs[i]->out_fmts); \
count++; \
} \
} \
for (i = 0; i < ctx->nb_outputs; i++) { \
if (ctx->outputs[i] && !ctx->outputs[i]->in_fmts) { \
ref(fmts, &ctx->outputs[i]->in_fmts); \
count++; \
} \
} \
\
if (!count) { \
av_freep(&fmts->list); \
av_freep(&fmts->refs); \
av_freep(&fmts); \
} \
}
 
void ff_set_common_channel_layouts(AVFilterContext *ctx,
AVFilterChannelLayouts *layouts)
{
SET_COMMON_FORMATS(ctx, layouts, in_channel_layouts, out_channel_layouts,
ff_channel_layouts_ref, channel_layouts);
}
 
void ff_set_common_samplerates(AVFilterContext *ctx,
AVFilterFormats *samplerates)
{
SET_COMMON_FORMATS(ctx, samplerates, in_samplerates, out_samplerates,
ff_formats_ref, formats);
}
 
/**
* A helper for query_formats() which sets all links to the same list of
* formats. If there are no links hooked to this filter, the list of formats is
* freed.
*/
void ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
{
SET_COMMON_FORMATS(ctx, formats, in_formats, out_formats,
ff_formats_ref, formats);
}
 
static int default_query_formats_common(AVFilterContext *ctx,
AVFilterChannelLayouts *(layouts)(void))
{
enum AVMediaType type = ctx->inputs && ctx->inputs [0] ? ctx->inputs [0]->type :
ctx->outputs && ctx->outputs[0] ? ctx->outputs[0]->type :
AVMEDIA_TYPE_VIDEO;
 
ff_set_common_formats(ctx, ff_all_formats(type));
if (type == AVMEDIA_TYPE_AUDIO) {
ff_set_common_channel_layouts(ctx, layouts());
ff_set_common_samplerates(ctx, ff_all_samplerates());
}
 
return 0;
}
 
int ff_default_query_formats(AVFilterContext *ctx)
{
return default_query_formats_common(ctx, ff_all_channel_layouts);
}
 
int ff_query_formats_all(AVFilterContext *ctx)
{
return default_query_formats_common(ctx, ff_all_channel_counts);
}
 
/* internal functions for parsing audio format arguments */
 
int ff_parse_pixel_format(enum AVPixelFormat *ret, const char *arg, void *log_ctx)
{
char *tail;
int pix_fmt = av_get_pix_fmt(arg);
if (pix_fmt == AV_PIX_FMT_NONE) {
pix_fmt = strtol(arg, &tail, 0);
if (*tail || (unsigned)pix_fmt >= AV_PIX_FMT_NB) {
av_log(log_ctx, AV_LOG_ERROR, "Invalid pixel format '%s'\n", arg);
return AVERROR(EINVAL);
}
}
*ret = pix_fmt;
return 0;
}
 
int ff_parse_sample_format(int *ret, const char *arg, void *log_ctx)
{
char *tail;
int sfmt = av_get_sample_fmt(arg);
if (sfmt == AV_SAMPLE_FMT_NONE) {
sfmt = strtol(arg, &tail, 0);
if (*tail || (unsigned)sfmt >= AV_SAMPLE_FMT_NB) {
av_log(log_ctx, AV_LOG_ERROR, "Invalid sample format '%s'\n", arg);
return AVERROR(EINVAL);
}
}
*ret = sfmt;
return 0;
}
 
int ff_parse_time_base(AVRational *ret, const char *arg, void *log_ctx)
{
AVRational r;
if(av_parse_ratio(&r, arg, INT_MAX, 0, log_ctx) < 0 ||r.num<=0 ||r.den<=0) {
av_log(log_ctx, AV_LOG_ERROR, "Invalid time base '%s'\n", arg);
return AVERROR(EINVAL);
}
*ret = r;
return 0;
}
 
int ff_parse_sample_rate(int *ret, const char *arg, void *log_ctx)
{
char *tail;
double srate = av_strtod(arg, &tail);
if (*tail || srate < 1 || (int)srate != srate || srate > INT_MAX) {
av_log(log_ctx, AV_LOG_ERROR, "Invalid sample rate '%s'\n", arg);
return AVERROR(EINVAL);
}
*ret = srate;
return 0;
}
 
int ff_parse_channel_layout(int64_t *ret, const char *arg, void *log_ctx)
{
char *tail;
int64_t chlayout = av_get_channel_layout(arg);
if (chlayout == 0) {
chlayout = strtol(arg, &tail, 10);
if (*tail || chlayout == 0) {
av_log(log_ctx, AV_LOG_ERROR, "Invalid channel layout '%s'\n", arg);
return AVERROR(EINVAL);
}
}
*ret = chlayout;
return 0;
}
 
#ifdef TEST
 
#undef printf
 
int main(void)
{
const int64_t *cl;
char buf[512];
 
for (cl = avfilter_all_channel_layouts; *cl != -1; cl++) {
av_get_channel_layout_string(buf, sizeof(buf), -1, *cl);
printf("%s\n", buf);
}
 
return 0;
}
 
#endif
 
/contrib/sdk/sources/ffmpeg/libavfilter/formats.h
0,0 → 1,270
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFILTER_FORMATS_H
#define AVFILTER_FORMATS_H
 
#include "avfilter.h"
 
/**
* A list of supported formats for one end of a filter link. This is used
* during the format negotiation process to try to pick the best format to
* use to minimize the number of necessary conversions. Each filter gives a
* list of the formats supported by each input and output pad. The list
* given for each pad need not be distinct - they may be references to the
* same list of formats, as is often the case when a filter supports multiple
* formats, but will always output the same format as it is given in input.
*
* In this way, a list of possible input formats and a list of possible
* output formats are associated with each link. When a set of formats is
* negotiated over a link, the input and output lists are merged to form a
* new list containing only the common elements of each list. In the case
* that there were no common elements, a format conversion is necessary.
* Otherwise, the lists are merged, and all other links which reference
* either of the format lists involved in the merge are also affected.
*
* For example, consider the filter chain:
* filter (a) --> (b) filter (b) --> (c) filter
*
* where the letters in parenthesis indicate a list of formats supported on
* the input or output of the link. Suppose the lists are as follows:
* (a) = {A, B}
* (b) = {A, B, C}
* (c) = {B, C}
*
* First, the first link's lists are merged, yielding:
* filter (a) --> (a) filter (a) --> (c) filter
*
* Notice that format list (b) now refers to the same list as filter list (a).
* Next, the lists for the second link are merged, yielding:
* filter (a) --> (a) filter (a) --> (a) filter
*
* where (a) = {B}.
*
* Unfortunately, when the format lists at the two ends of a link are merged,
* we must ensure that all links which reference either pre-merge format list
* get updated as well. Therefore, we have the format list structure store a
* pointer to each of the pointers to itself.
*/
struct AVFilterFormats {
unsigned nb_formats; ///< number of formats
int *formats; ///< list of media formats
 
unsigned refcount; ///< number of references to this list
struct AVFilterFormats ***refs; ///< references to this list
};
 
/**
* A list of supported channel layouts.
*
* The list works the same as AVFilterFormats, except for the following
* differences:
* - A list with all_layouts = 1 means all channel layouts with a known
* disposition; nb_channel_layouts must then be 0.
* - A list with all_counts = 1 means all channel counts, with a known or
* unknown disposition; nb_channel_layouts must then be 0 and all_layouts 1.
* - The list must not contain a layout with a known disposition and a
* channel count with unknown disposition with the same number of channels
* (e.g. AV_CH_LAYOUT_STEREO and FF_COUNT2LAYOUT(2).
*/
typedef struct AVFilterChannelLayouts {
uint64_t *channel_layouts; ///< list of channel layouts
int nb_channel_layouts; ///< number of channel layouts
char all_layouts; ///< accept any known channel layout
char all_counts; ///< accept any channel layout or count
 
unsigned refcount; ///< number of references to this list
struct AVFilterChannelLayouts ***refs; ///< references to this list
} AVFilterChannelLayouts;
 
/**
* Encode a channel count as a channel layout.
* FF_COUNT2LAYOUT(c) means any channel layout with c channels, with a known
* or unknown disposition.
* The result is only valid inside AVFilterChannelLayouts and immediately
* related functions.
*/
#define FF_COUNT2LAYOUT(c) (0x8000000000000000ULL | (c))
 
/**
* Decode a channel count encoded as a channel layout.
* Return 0 if the channel layout was a real one.
*/
#define FF_LAYOUT2COUNT(l) (((l) & 0x8000000000000000ULL) ? \
(int)((l) & 0x7FFFFFFF) : 0)
 
/**
* Return a channel layouts/samplerates list which contains the intersection of
* the layouts/samplerates of a and b. Also, all the references of a, all the
* references of b, and a and b themselves will be deallocated.
*
* If a and b do not share any common elements, neither is modified, and NULL
* is returned.
*/
AVFilterChannelLayouts *ff_merge_channel_layouts(AVFilterChannelLayouts *a,
AVFilterChannelLayouts *b);
AVFilterFormats *ff_merge_samplerates(AVFilterFormats *a,
AVFilterFormats *b);
 
/**
* Construct an empty AVFilterChannelLayouts/AVFilterFormats struct --
* representing any channel layout (with known disposition)/sample rate.
*/
AVFilterChannelLayouts *ff_all_channel_layouts(void);
AVFilterFormats *ff_all_samplerates(void);
 
/**
* Construct an AVFilterChannelLayouts coding for any channel layout, with
* known or unknown disposition.
*/
AVFilterChannelLayouts *ff_all_channel_counts(void);
 
AVFilterChannelLayouts *avfilter_make_format64_list(const int64_t *fmts);
 
 
/**
* A helper for query_formats() which sets all links to the same list of channel
* layouts/sample rates. If there are no links hooked to this filter, the list
* is freed.
*/
void ff_set_common_channel_layouts(AVFilterContext *ctx,
AVFilterChannelLayouts *layouts);
void ff_set_common_samplerates(AVFilterContext *ctx,
AVFilterFormats *samplerates);
 
/**
* A helper for query_formats() which sets all links to the same list of
* formats. If there are no links hooked to this filter, the list of formats is
* freed.
*/
void ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats);
 
int ff_add_channel_layout(AVFilterChannelLayouts **l, uint64_t channel_layout);
 
/**
* Add *ref as a new reference to f.
*/
void ff_channel_layouts_ref(AVFilterChannelLayouts *f,
AVFilterChannelLayouts **ref);
 
/**
* Remove a reference to a channel layouts list.
*/
void ff_channel_layouts_unref(AVFilterChannelLayouts **ref);
 
void ff_channel_layouts_changeref(AVFilterChannelLayouts **oldref,
AVFilterChannelLayouts **newref);
 
int ff_default_query_formats(AVFilterContext *ctx);
 
/**
* Set the formats list to all existing formats.
* This function behaves like ff_default_query_formats(), except it also
* accepts channel layouts with unknown disposition. It should only be used
* with audio filters.
*/
int ff_query_formats_all(AVFilterContext *ctx);
 
 
/**
* Create a list of supported formats. This is intended for use in
* AVFilter->query_formats().
*
* @param fmts list of media formats, terminated by -1
* @return the format list, with no existing references
*/
AVFilterFormats *ff_make_format_list(const int *fmts);
 
/**
* Add fmt to the list of media formats contained in *avff.
* If *avff is NULL the function allocates the filter formats struct
* and puts its pointer in *avff.
*
* @return a non negative value in case of success, or a negative
* value corresponding to an AVERROR code in case of error
*/
int ff_add_format(AVFilterFormats **avff, int64_t fmt);
 
/**
* Return a list of all formats supported by FFmpeg for the given media type.
*/
AVFilterFormats *ff_all_formats(enum AVMediaType type);
 
/**
* Construct a formats list containing all planar sample formats.
*/
AVFilterFormats *ff_planar_sample_fmts(void);
 
/**
* Return a format list which contains the intersection of the formats of
* a and b. Also, all the references of a, all the references of b, and
* a and b themselves will be deallocated.
*
* If a and b do not share any common formats, neither is modified, and NULL
* is returned.
*/
AVFilterFormats *ff_merge_formats(AVFilterFormats *a, AVFilterFormats *b,
enum AVMediaType type);
 
/**
* Add *ref as a new reference to formats.
* That is the pointers will point like in the ascii art below:
* ________
* |formats |<--------.
* | ____ | ____|___________________
* | |refs| | | __|_
* | |* * | | | | | | AVFilterLink
* | |* *--------->|*ref|
* | |____| | | |____|
* |________| |________________________
*/
void ff_formats_ref(AVFilterFormats *formats, AVFilterFormats **ref);
 
/**
* If *ref is non-NULL, remove *ref as a reference to the format list
* it currently points to, deallocates that list if this was the last
* reference, and sets *ref to NULL.
*
* Before After
* ________ ________ NULL
* |formats |<--------. |formats | ^
* | ____ | ____|________________ | ____ | ____|________________
* | |refs| | | __|_ | |refs| | | __|_
* | |* * | | | | | | AVFilterLink | |* * | | | | | | AVFilterLink
* | |* *--------->|*ref| | |* | | | |*ref|
* | |____| | | |____| | |____| | | |____|
* |________| |_____________________ |________| |_____________________
*/
void ff_formats_unref(AVFilterFormats **ref);
 
/**
*
* Before After
* ________ ________
* |formats |<---------. |formats |<---------.
* | ____ | ___|___ | ____ | ___|___
* | |refs| | | | | | |refs| | | | | NULL
* | |* *--------->|*oldref| | |* *--------->|*newref| ^
* | |* * | | |_______| | |* * | | |_______| ___|___
* | |____| | | |____| | | | |
* |________| |________| |*oldref|
* |_______|
*/
void ff_formats_changeref(AVFilterFormats **oldref, AVFilterFormats **newref);
 
#endif /* AVFILTER_FORMATS_H */
/contrib/sdk/sources/ffmpeg/libavfilter/framesync.c
0,0 → 1,329
/*
* Copyright (c) 2013 Nicolas George
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avassert.h"
#include "avfilter.h"
#include "bufferqueue.h"
#include "framesync.h"
#include "internal.h"
 
#define OFFSET(member) offsetof(FFFrameSync, member)
 
static const char *framesync_name(void *ptr)
{
return "framesync";
}
 
static const AVClass framesync_class = {
.version = LIBAVUTIL_VERSION_INT,
.class_name = "framesync",
.item_name = framesync_name,
.category = AV_CLASS_CATEGORY_FILTER,
.option = NULL,
.parent_log_context_offset = OFFSET(parent),
};
 
enum {
STATE_BOF,
STATE_RUN,
STATE_EOF,
};
 
void ff_framesync_init(FFFrameSync *fs, void *parent, unsigned nb_in)
{
fs->class = &framesync_class;
fs->parent = parent;
fs->nb_in = nb_in;
}
 
static void framesync_sync_level_update(FFFrameSync *fs)
{
unsigned i, level = 0;
 
for (i = 0; i < fs->nb_in; i++)
if (fs->in[i].state != STATE_EOF)
level = FFMAX(level, fs->in[i].sync);
av_assert0(level <= fs->sync_level);
if (level < fs->sync_level)
av_log(fs, AV_LOG_VERBOSE, "Sync level %u\n", level);
if (level)
fs->sync_level = level;
else
fs->eof = 1;
}
 
int ff_framesync_configure(FFFrameSync *fs)
{
unsigned i;
int64_t gcd, lcm;
 
if (!fs->time_base.num) {
for (i = 0; i < fs->nb_in; i++) {
if (fs->in[i].sync) {
if (fs->time_base.num) {
gcd = av_gcd(fs->time_base.den, fs->in[i].time_base.den);
lcm = (fs->time_base.den / gcd) * fs->in[i].time_base.den;
if (lcm < AV_TIME_BASE / 2) {
fs->time_base.den = lcm;
fs->time_base.num = av_gcd(fs->time_base.num,
fs->in[i].time_base.num);
} else {
fs->time_base.num = 1;
fs->time_base.den = AV_TIME_BASE;
break;
}
} else {
fs->time_base = fs->in[i].time_base;
}
}
}
if (!fs->time_base.num) {
av_log(fs, AV_LOG_ERROR, "Impossible to set time base\n");
return AVERROR(EINVAL);
}
av_log(fs, AV_LOG_VERBOSE, "Selected %d/%d time base\n",
fs->time_base.num, fs->time_base.den);
}
 
for (i = 0; i < fs->nb_in; i++)
fs->in[i].pts = fs->in[i].pts_next = AV_NOPTS_VALUE;
fs->sync_level = UINT_MAX;
framesync_sync_level_update(fs);
 
return 0;
}
 
static void framesync_advance(FFFrameSync *fs)
{
int latest;
unsigned i;
int64_t pts;
 
if (fs->eof)
return;
while (!fs->frame_ready) {
latest = -1;
for (i = 0; i < fs->nb_in; i++) {
if (!fs->in[i].have_next) {
if (latest < 0 || fs->in[i].pts < fs->in[latest].pts)
latest = i;
}
}
if (latest >= 0) {
fs->in_request = latest;
break;
}
 
pts = fs->in[0].pts_next;
for (i = 1; i < fs->nb_in; i++)
if (fs->in[i].pts_next < pts)
pts = fs->in[i].pts_next;
if (pts == INT64_MAX) {
fs->eof = 1;
break;
}
for (i = 0; i < fs->nb_in; i++) {
if (fs->in[i].pts_next == pts ||
(fs->in[i].before == EXT_INFINITY &&
fs->in[i].state == STATE_BOF)) {
av_frame_free(&fs->in[i].frame);
fs->in[i].frame = fs->in[i].frame_next;
fs->in[i].pts = fs->in[i].pts_next;
fs->in[i].frame_next = NULL;
fs->in[i].pts_next = AV_NOPTS_VALUE;
fs->in[i].have_next = 0;
fs->in[i].state = fs->in[i].frame ? STATE_RUN : STATE_EOF;
if (fs->in[i].sync == fs->sync_level && fs->in[i].frame)
fs->frame_ready = 1;
if (fs->in[i].state == STATE_EOF &&
fs->in[i].after == EXT_STOP)
fs->eof = 1;
}
}
if (fs->eof)
fs->frame_ready = 0;
if (fs->frame_ready)
for (i = 0; i < fs->nb_in; i++)
if ((fs->in[i].state == STATE_BOF &&
fs->in[i].before == EXT_STOP))
fs->frame_ready = 0;
fs->pts = pts;
}
}
 
static int64_t framesync_pts_extrapolate(FFFrameSync *fs, unsigned in,
int64_t pts)
{
/* Possible enhancement: use the link's frame rate */
return pts + 1;
}
 
static void framesync_inject_frame(FFFrameSync *fs, unsigned in, AVFrame *frame)
{
int64_t pts;
 
av_assert0(!fs->in[in].have_next);
if (frame) {
pts = av_rescale_q(frame->pts, fs->in[in].time_base, fs->time_base);
frame->pts = pts;
} else {
pts = fs->in[in].state != STATE_RUN || fs->in[in].after == EXT_INFINITY
? INT64_MAX : framesync_pts_extrapolate(fs, in, fs->in[in].pts);
fs->in[in].sync = 0;
framesync_sync_level_update(fs);
}
fs->in[in].frame_next = frame;
fs->in[in].pts_next = pts;
fs->in[in].have_next = 1;
}
 
int ff_framesync_add_frame(FFFrameSync *fs, unsigned in, AVFrame *frame)
{
av_assert1(in < fs->nb_in);
if (!fs->in[in].have_next)
framesync_inject_frame(fs, in, frame);
else
ff_bufqueue_add(fs, &fs->in[in].queue, frame);
return 0;
}
 
void ff_framesync_next(FFFrameSync *fs)
{
unsigned i;
 
av_assert0(!fs->frame_ready);
for (i = 0; i < fs->nb_in; i++)
if (!fs->in[i].have_next && fs->in[i].queue.available)
framesync_inject_frame(fs, i, ff_bufqueue_get(&fs->in[i].queue));
fs->frame_ready = 0;
framesync_advance(fs);
}
 
void ff_framesync_drop(FFFrameSync *fs)
{
fs->frame_ready = 0;
}
 
int ff_framesync_get_frame(FFFrameSync *fs, unsigned in, AVFrame **rframe,
unsigned get)
{
AVFrame *frame;
unsigned need_copy = 0, i;
int64_t pts_next;
int ret;
 
if (!fs->in[in].frame) {
*rframe = NULL;
return 0;
}
frame = fs->in[in].frame;
if (get) {
/* Find out if we need to copy the frame: is there another sync
stream, and do we know if its current frame will outlast this one? */
pts_next = fs->in[in].have_next ? fs->in[in].pts_next : INT64_MAX;
for (i = 0; i < fs->nb_in && !need_copy; i++)
if (i != in && fs->in[i].sync &&
(!fs->in[i].have_next || fs->in[i].pts_next < pts_next))
need_copy = 1;
if (need_copy) {
if (!(frame = av_frame_clone(frame)))
return AVERROR(ENOMEM);
if ((ret = av_frame_make_writable(frame)) < 0) {
av_frame_free(&frame);
return ret;
}
} else {
fs->in[in].frame = NULL;
}
fs->frame_ready = 0;
}
*rframe = frame;
return 0;
}
 
void ff_framesync_uninit(FFFrameSync *fs)
{
unsigned i;
 
for (i = 0; i < fs->nb_in; i++) {
av_frame_free(&fs->in[i].frame);
av_frame_free(&fs->in[i].frame_next);
ff_bufqueue_discard_all(&fs->in[i].queue);
}
}
 
int ff_framesync_process_frame(FFFrameSync *fs, unsigned all)
{
int ret, count = 0;
 
av_assert0(fs->on_event);
while (1) {
ff_framesync_next(fs);
if (fs->eof || !fs->frame_ready)
break;
if ((ret = fs->on_event(fs)) < 0)
return ret;
ff_framesync_drop(fs);
count++;
if (!all)
break;
}
if (!count && fs->eof)
return AVERROR_EOF;
return count;
}
 
int ff_framesync_filter_frame(FFFrameSync *fs, AVFilterLink *inlink,
AVFrame *in)
{
int ret;
 
if ((ret = ff_framesync_process_frame(fs, 1)) < 0)
return ret;
if ((ret = ff_framesync_add_frame(fs, FF_INLINK_IDX(inlink), in)) < 0)
return ret;
if ((ret = ff_framesync_process_frame(fs, 0)) < 0)
return ret;
return 0;
}
 
int ff_framesync_request_frame(FFFrameSync *fs, AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
int input, ret;
 
if ((ret = ff_framesync_process_frame(fs, 0)) < 0)
return ret;
if (ret > 0)
return 0;
if (fs->eof)
return AVERROR_EOF;
outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
input = fs->in_request;
ret = ff_request_frame(ctx->inputs[input]);
if (ret == AVERROR_EOF) {
if ((ret = ff_framesync_add_frame(fs, input, NULL)) < 0)
return ret;
if ((ret = ff_framesync_process_frame(fs, 0)) < 0)
return ret;
ret = 0;
}
return ret;
}
/contrib/sdk/sources/ffmpeg/libavfilter/framesync.h
0,0 → 1,296
/*
* Copyright (c) 2013 Nicolas George
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFILTER_FRAMESYNC_H
#define AVFILTER_FRAMESYNC_H
 
#include "bufferqueue.h"
 
/*
* TODO
* Callback-based API similar to dualinput.
* Export convenient options.
*/
 
/**
* This API is intended as a helper for filters that have several video
* input and need to combine them somehow. If the inputs have different or
* variable frame rate, getting the input frames to match requires a rather
* complex logic and a few user-tunable options.
*
* In this API, when a set of synchronized input frames is ready to be
* procesed is called a frame event. Frame event can be generated in
* response to input frames on any or all inputs and the handling of
* situations where some stream extend beyond the beginning or the end of
* others can be configured.
*
* The basic working of this API is the following:
*
* - When a frame is available on any input, add it using
* ff_framesync_add_frame().
*
* - When a frame event is ready to be processed (i.e. after adding a frame
* or when requested on input):
* - call ff_framesync_next();
* - if fs->frame_ready is true, process the frames;
* - call ff_framesync_drop().
*/
 
/**
* Stream extrapolation mode
*
* Describe how the frames of a stream are extrapolated before the first one
* and after EOF to keep sync with possibly longer other streams.
*/
enum FFFrameSyncExtMode {
 
/**
* Completely stop all streams with this one.
*/
EXT_STOP,
 
/**
* Ignore this stream and continue processing the other ones.
*/
EXT_NULL,
 
/**
* Extend the frame to infinity.
*/
EXT_INFINITY,
};
 
/**
* Input stream structure
*/
typedef struct FFFrameSyncIn {
 
/**
* Queue of incoming AVFrame, and NULL to mark EOF
*/
struct FFBufQueue queue;
 
/**
* Extrapolation mode for timestamps before the first frame
*/
enum FFFrameSyncExtMode before;
 
/**
* Extrapolation mode for timestamps after the last frame
*/
enum FFFrameSyncExtMode after;
 
/**
* Time base for the incoming frames
*/
AVRational time_base;
 
/**
* Current frame, may be NULL before the first one or after EOF
*/
AVFrame *frame;
 
/**
* Next frame, for internal use
*/
AVFrame *frame_next;
 
/**
* PTS of the current frame
*/
int64_t pts;
 
/**
* PTS of the next frame, for internal use
*/
int64_t pts_next;
 
/**
* Boolean flagging the next frame, for internal use
*/
uint8_t have_next;
 
/**
* State: before first, in stream or after EOF, for internal use
*/
uint8_t state;
 
/**
* Synchronization level: frames on input at the highest sync level will
* generate output frame events.
*
* For example, if inputs #0 and #1 have sync level 2 and input #2 has
* sync level 1, then a frame on either input #0 or #1 will generate a
* frame event, but not a frame on input #2 until both inputs #0 and #1
* have reached EOF.
*
* If sync is 0, no frame event will be generated.
*/
unsigned sync;
 
} FFFrameSyncIn;
 
/**
* Frame sync structure.
*/
typedef struct FFFrameSync {
const AVClass *class;
void *parent;
 
/**
* Number of input streams
*/
unsigned nb_in;
 
/**
* Time base for the output events
*/
AVRational time_base;
 
/**
* Timestamp of the current event
*/
int64_t pts;
 
/**
* Callback called when a frame event is ready
*/
int (*on_event)(struct FFFrameSync *fs);
 
/**
* Opaque pointer, not used by the API
*/
void *opaque;
 
/**
* Index of the input that requires a request
*/
unsigned in_request;
 
/**
* Synchronization level: only inputs with the same sync level are sync
* sources.
*/
unsigned sync_level;
 
/**
* Flag indicating that a frame event is ready
*/
uint8_t frame_ready;
 
/**
* Flag indicating that output has reached EOF.
*/
uint8_t eof;
 
/**
* Array of inputs; all inputs must be in consecutive memory
*/
FFFrameSyncIn in[1]; /* must be the last field */
 
} FFFrameSync;
 
/**
* Initialize a frame sync structure.
*
* The entire structure is expected to be already set to 0.
*
* @param fs frame sync structure to initialize
* @param parent parent object, used for logging
* @param nb_in number of inputs
*/
void ff_framesync_init(FFFrameSync *fs, void *parent, unsigned nb_in);
 
/**
* Configure a frame sync structure.
*
* Must be called after all options are set but before all use.
*
* @return >= 0 for success or a negative error code
*/
int ff_framesync_configure(FFFrameSync *fs);
 
/**
* Free all memory currently allocated.
*/
void ff_framesync_uninit(FFFrameSync *fs);
 
/**
* Add a frame to an input
*
* Typically called from the filter_frame() method.
*
* @param fs frame sync structure
* @param in index of the input
* @param frame input frame, or NULL for EOF
*/
int ff_framesync_add_frame(FFFrameSync *fs, unsigned in, AVFrame *frame);
 
/**
* Prepare the next frame event.
*
* The status of the operation can be found in fs->frame_ready and fs->eof.
*/
void ff_framesync_next(FFFrameSync *fs);
 
/**
* Drop the current frame event.
*/
void ff_framesync_drop(FFFrameSync *fs);
 
/**
* Get the current frame in an input.
*
* @param fs frame sync structure
* @param in index of the input
* @param rframe used to return the current frame (or NULL)
* @param get if not zero, the calling code needs to get ownership of
* the returned frame; the current frame will either be
* duplicated or removed from the framesync structure
*/
int ff_framesync_get_frame(FFFrameSync *fs, unsigned in, AVFrame **rframe,
unsigned get);
 
/**
* Process one or several frame using the on_event callback.
*
* @return number of frames processed or negative error code
*/
int ff_framesync_process_frame(FFFrameSync *fs, unsigned all);
 
 
/**
* Accept a frame on a filter input.
*
* This function can be the complete implementation of all filter_frame
* methods of a filter using framesync.
*/
int ff_framesync_filter_frame(FFFrameSync *fs, AVFilterLink *inlink,
AVFrame *in);
 
/**
* Request a frame on the filter output.
*
* This function can be the complete implementation of all filter_frame
* methods of a filter using framesync if it has only one output.
*/
int ff_framesync_request_frame(FFFrameSync *fs, AVFilterLink *outlink);
 
#endif /* AVFILTER_FRAMESYNC_H */
/contrib/sdk/sources/ffmpeg/libavfilter/gradfun.h
0,0 → 1,47
/*
* Copyright (c) 2010 Nolan Lum <nol888@gmail.com>
* Copyright (c) 2009 Loren Merritt <lorenm@u.washington.edu>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFILTER_GRADFUN_H
#define AVFILTER_GRADFUN_H
 
#include "avfilter.h"
 
/// Holds instance-specific information for gradfun.
typedef struct GradFunContext {
const AVClass *class;
float strength;
int thresh; ///< threshold for gradient algorithm
int radius; ///< blur radius
int chroma_w; ///< width of the chroma planes
int chroma_h; ///< weight of the chroma planes
int chroma_r; ///< blur radius for the chroma planes
uint16_t *buf; ///< holds image data for blur algorithm passed into filter.
/// DSP functions.
void (*filter_line) (uint8_t *dst, const uint8_t *src, const uint16_t *dc, int width, int thresh, const uint16_t *dithers);
void (*blur_line) (uint16_t *dc, uint16_t *buf, const uint16_t *buf1, const uint8_t *src, int src_linesize, int width);
} GradFunContext;
 
void ff_gradfun_init_x86(GradFunContext *gf);
 
void ff_gradfun_filter_line_c(uint8_t *dst, const uint8_t *src, const uint16_t *dc, int width, int thresh, const uint16_t *dithers);
void ff_gradfun_blur_line_c(uint16_t *dc, uint16_t *buf, const uint16_t *buf1, const uint8_t *src, int src_linesize, int width);
 
#endif /* AVFILTER_GRADFUN_H */
/contrib/sdk/sources/ffmpeg/libavfilter/graphdump.c
0,0 → 1,164
/*
* Filter graphs to bad ASCII-art
* Copyright (c) 2012 Nicolas George
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <string.h>
 
#include "libavutil/channel_layout.h"
#include "libavutil/bprint.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "avfiltergraph.h"
 
static int print_link_prop(AVBPrint *buf, AVFilterLink *link)
{
char *format;
char layout[64];
 
if (!buf)
buf = &(AVBPrint){ 0 }; /* dummy buffer */
switch (link->type) {
case AVMEDIA_TYPE_VIDEO:
format = av_x_if_null(av_get_pix_fmt_name(link->format), "?");
av_bprintf(buf, "[%dx%d %d:%d %s]", link->w, link->h,
link->sample_aspect_ratio.num,
link->sample_aspect_ratio.den,
format);
break;
 
case AVMEDIA_TYPE_AUDIO:
av_get_channel_layout_string(layout, sizeof(layout),
link->channels, link->channel_layout);
format = av_x_if_null(av_get_sample_fmt_name(link->format), "?");
av_bprintf(buf, "[%dHz %s:%s]",
(int)link->sample_rate, format, layout);
break;
 
default:
av_bprintf(buf, "?");
break;
}
return buf->len;
}
 
static void avfilter_graph_dump_to_buf(AVBPrint *buf, AVFilterGraph *graph)
{
unsigned i, j, x, e;
 
for (i = 0; i < graph->nb_filters; i++) {
AVFilterContext *filter = graph->filters[i];
unsigned max_src_name = 0, max_dst_name = 0;
unsigned max_in_name = 0, max_out_name = 0;
unsigned max_in_fmt = 0, max_out_fmt = 0;
unsigned width, height, in_indent;
unsigned lname = strlen(filter->name);
unsigned ltype = strlen(filter->filter->name);
 
for (j = 0; j < filter->nb_inputs; j++) {
AVFilterLink *l = filter->inputs[j];
unsigned ln = strlen(l->src->name) + 1 + strlen(l->srcpad->name);
max_src_name = FFMAX(max_src_name, ln);
max_in_name = FFMAX(max_in_name, strlen(l->dstpad->name));
max_in_fmt = FFMAX(max_in_fmt, print_link_prop(NULL, l));
}
for (j = 0; j < filter->nb_outputs; j++) {
AVFilterLink *l = filter->outputs[j];
unsigned ln = strlen(l->dst->name) + 1 + strlen(l->dstpad->name);
max_dst_name = FFMAX(max_dst_name, ln);
max_out_name = FFMAX(max_out_name, strlen(l->srcpad->name));
max_out_fmt = FFMAX(max_out_fmt, print_link_prop(NULL, l));
}
in_indent = max_src_name + max_in_name + max_in_fmt;
in_indent += in_indent ? 4 : 0;
width = FFMAX(lname + 2, ltype + 4);
height = FFMAX3(2, filter->nb_inputs, filter->nb_outputs);
av_bprint_chars(buf, ' ', in_indent);
av_bprintf(buf, "+");
av_bprint_chars(buf, '-', width);
av_bprintf(buf, "+\n");
for (j = 0; j < height; j++) {
unsigned in_no = j - (height - filter->nb_inputs ) / 2;
unsigned out_no = j - (height - filter->nb_outputs) / 2;
 
/* Input link */
if (in_no < filter->nb_inputs) {
AVFilterLink *l = filter->inputs[in_no];
e = buf->len + max_src_name + 2;
av_bprintf(buf, "%s:%s", l->src->name, l->srcpad->name);
av_bprint_chars(buf, '-', e - buf->len);
e = buf->len + max_in_fmt + 2 +
max_in_name - strlen(l->dstpad->name);
print_link_prop(buf, l);
av_bprint_chars(buf, '-', e - buf->len);
av_bprintf(buf, "%s", l->dstpad->name);
} else {
av_bprint_chars(buf, ' ', in_indent);
}
 
/* Filter */
av_bprintf(buf, "|");
if (j == (height - 2) / 2) {
x = (width - lname) / 2;
av_bprintf(buf, "%*s%-*s", x, "", width - x, filter->name);
} else if (j == (height - 2) / 2 + 1) {
x = (width - ltype - 2) / 2;
av_bprintf(buf, "%*s(%s)%*s", x, "", filter->filter->name,
width - ltype - 2 - x, "");
} else {
av_bprint_chars(buf, ' ', width);
}
av_bprintf(buf, "|");
 
/* Output link */
if (out_no < filter->nb_outputs) {
AVFilterLink *l = filter->outputs[out_no];
unsigned ln = strlen(l->dst->name) + 1 +
strlen(l->dstpad->name);
e = buf->len + max_out_name + 2;
av_bprintf(buf, "%s", l->srcpad->name);
av_bprint_chars(buf, '-', e - buf->len);
e = buf->len + max_out_fmt + 2 +
max_dst_name - ln;
print_link_prop(buf, l);
av_bprint_chars(buf, '-', e - buf->len);
av_bprintf(buf, "%s:%s", l->dst->name, l->dstpad->name);
}
av_bprintf(buf, "\n");
}
av_bprint_chars(buf, ' ', in_indent);
av_bprintf(buf, "+");
av_bprint_chars(buf, '-', width);
av_bprintf(buf, "+\n");
av_bprintf(buf, "\n");
}
}
 
char *avfilter_graph_dump(AVFilterGraph *graph, const char *options)
{
AVBPrint buf;
char *dump;
 
av_bprint_init(&buf, 0, 0);
avfilter_graph_dump_to_buf(&buf, graph);
av_bprint_init(&buf, buf.len + 1, buf.len + 1);
avfilter_graph_dump_to_buf(&buf, graph);
av_bprint_finalize(&buf, &dump);
return dump;
}
/contrib/sdk/sources/ffmpeg/libavfilter/graphparser.c
0,0 → 1,605
/*
* filter graph parser
* Copyright (c) 2008 Vitor Sessak
* Copyright (c) 2007 Bobby Bingham
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <string.h>
#include <stdio.h>
 
#include "libavutil/avstring.h"
#include "libavutil/mem.h"
#include "avfilter.h"
 
#define WHITESPACES " \n\t"
 
/**
* Link two filters together.
*
* @see avfilter_link()
*/
static int link_filter(AVFilterContext *src, int srcpad,
AVFilterContext *dst, int dstpad,
void *log_ctx)
{
int ret;
if ((ret = avfilter_link(src, srcpad, dst, dstpad))) {
av_log(log_ctx, AV_LOG_ERROR,
"Cannot create the link %s:%d -> %s:%d\n",
src->filter->name, srcpad, dst->filter->name, dstpad);
return ret;
}
 
return 0;
}
 
/**
* Parse the name of a link, which has the format "[linkname]".
*
* @return a pointer (that need to be freed after use) to the name
* between parenthesis
*/
static char *parse_link_name(const char **buf, void *log_ctx)
{
const char *start = *buf;
char *name;
(*buf)++;
 
name = av_get_token(buf, "]");
 
if (!name[0]) {
av_log(log_ctx, AV_LOG_ERROR,
"Bad (empty?) label found in the following: \"%s\".\n", start);
goto fail;
}
 
if (*(*buf)++ != ']') {
av_log(log_ctx, AV_LOG_ERROR,
"Mismatched '[' found in the following: \"%s\".\n", start);
fail:
av_freep(&name);
}
 
return name;
}
 
/**
* Create an instance of a filter, initialize and insert it in the
* filtergraph in *ctx.
*
* @param filt_ctx put here a filter context in case of successful creation and configuration, NULL otherwise.
* @param ctx the filtergraph context
* @param index an index which is supposed to be unique for each filter instance added to the filtergraph
* @param filt_name the name of the filter to create
* @param args the arguments provided to the filter during its initialization
* @param log_ctx the log context to use
* @return >= 0 in case of success, a negative AVERROR code otherwise
*/
static int create_filter(AVFilterContext **filt_ctx, AVFilterGraph *ctx, int index,
const char *filt_name, const char *args, void *log_ctx)
{
AVFilter *filt;
char inst_name[30];
char *tmp_args = NULL;
int ret;
 
snprintf(inst_name, sizeof(inst_name), "Parsed_%s_%d", filt_name, index);
 
filt = avfilter_get_by_name(filt_name);
 
if (!filt) {
av_log(log_ctx, AV_LOG_ERROR,
"No such filter: '%s'\n", filt_name);
return AVERROR(EINVAL);
}
 
*filt_ctx = avfilter_graph_alloc_filter(ctx, filt, inst_name);
if (!*filt_ctx) {
av_log(log_ctx, AV_LOG_ERROR,
"Error creating filter '%s'\n", filt_name);
return AVERROR(ENOMEM);
}
 
if (!strcmp(filt_name, "scale") && args && !strstr(args, "flags") &&
ctx->scale_sws_opts) {
tmp_args = av_asprintf("%s:%s",
args, ctx->scale_sws_opts);
if (!tmp_args)
return AVERROR(ENOMEM);
args = tmp_args;
}
 
ret = avfilter_init_str(*filt_ctx, args);
if (ret < 0) {
av_log(log_ctx, AV_LOG_ERROR,
"Error initializing filter '%s'", filt_name);
if (args)
av_log(log_ctx, AV_LOG_ERROR, " with args '%s'", args);
av_log(log_ctx, AV_LOG_ERROR, "\n");
}
 
av_free(tmp_args);
return ret;
}
 
/**
* Parse a string of the form FILTER_NAME[=PARAMS], and create a
* corresponding filter instance which is added to graph with
* create_filter().
*
* @param filt_ctx Pointer that is set to the created and configured filter
* context on success, set to NULL on failure.
* @param filt_ctx put here a pointer to the created filter context on
* success, NULL otherwise
* @param buf pointer to the buffer to parse, *buf will be updated to
* point to the char next after the parsed string
* @param index an index which is assigned to the created filter
* instance, and which is supposed to be unique for each filter
* instance added to the filtergraph
* @return >= 0 in case of success, a negative AVERROR code otherwise
*/
static int parse_filter(AVFilterContext **filt_ctx, const char **buf, AVFilterGraph *graph,
int index, void *log_ctx)
{
char *opts = NULL;
char *name = av_get_token(buf, "=,;[\n");
int ret;
 
if (**buf == '=') {
(*buf)++;
opts = av_get_token(buf, "[],;\n");
}
 
ret = create_filter(filt_ctx, graph, index, name, opts, log_ctx);
av_free(name);
av_free(opts);
return ret;
}
 
AVFilterInOut *avfilter_inout_alloc(void)
{
return av_mallocz(sizeof(AVFilterInOut));
}
 
void avfilter_inout_free(AVFilterInOut **inout)
{
while (*inout) {
AVFilterInOut *next = (*inout)->next;
av_freep(&(*inout)->name);
av_freep(inout);
*inout = next;
}
}
 
static AVFilterInOut *extract_inout(const char *label, AVFilterInOut **links)
{
AVFilterInOut *ret;
 
while (*links && (!(*links)->name || strcmp((*links)->name, label)))
links = &((*links)->next);
 
ret = *links;
 
if (ret) {
*links = ret->next;
ret->next = NULL;
}
 
return ret;
}
 
static void insert_inout(AVFilterInOut **inouts, AVFilterInOut *element)
{
element->next = *inouts;
*inouts = element;
}
 
static void append_inout(AVFilterInOut **inouts, AVFilterInOut **element)
{
while (*inouts && (*inouts)->next)
inouts = &((*inouts)->next);
 
if (!*inouts)
*inouts = *element;
else
(*inouts)->next = *element;
*element = NULL;
}
 
static int link_filter_inouts(AVFilterContext *filt_ctx,
AVFilterInOut **curr_inputs,
AVFilterInOut **open_inputs, void *log_ctx)
{
int pad, ret;
 
for (pad = 0; pad < filt_ctx->nb_inputs; pad++) {
AVFilterInOut *p = *curr_inputs;
 
if (p) {
*curr_inputs = (*curr_inputs)->next;
p->next = NULL;
} else if (!(p = av_mallocz(sizeof(*p))))
return AVERROR(ENOMEM);
 
if (p->filter_ctx) {
ret = link_filter(p->filter_ctx, p->pad_idx, filt_ctx, pad, log_ctx);
av_free(p->name);
av_free(p);
if (ret < 0)
return ret;
} else {
p->filter_ctx = filt_ctx;
p->pad_idx = pad;
append_inout(open_inputs, &p);
}
}
 
if (*curr_inputs) {
av_log(log_ctx, AV_LOG_ERROR,
"Too many inputs specified for the \"%s\" filter.\n",
filt_ctx->filter->name);
return AVERROR(EINVAL);
}
 
pad = filt_ctx->nb_outputs;
while (pad--) {
AVFilterInOut *currlinkn = av_mallocz(sizeof(AVFilterInOut));
if (!currlinkn)
return AVERROR(ENOMEM);
currlinkn->filter_ctx = filt_ctx;
currlinkn->pad_idx = pad;
insert_inout(curr_inputs, currlinkn);
}
 
return 0;
}
 
static int parse_inputs(const char **buf, AVFilterInOut **curr_inputs,
AVFilterInOut **open_outputs, void *log_ctx)
{
AVFilterInOut *parsed_inputs = NULL;
int pad = 0;
 
while (**buf == '[') {
char *name = parse_link_name(buf, log_ctx);
AVFilterInOut *match;
 
if (!name)
return AVERROR(EINVAL);
 
/* First check if the label is not in the open_outputs list */
match = extract_inout(name, open_outputs);
 
if (match) {
av_free(name);
} else {
/* Not in the list, so add it as an input */
if (!(match = av_mallocz(sizeof(AVFilterInOut)))) {
av_free(name);
return AVERROR(ENOMEM);
}
match->name = name;
match->pad_idx = pad;
}
 
append_inout(&parsed_inputs, &match);
 
*buf += strspn(*buf, WHITESPACES);
pad++;
}
 
append_inout(&parsed_inputs, curr_inputs);
*curr_inputs = parsed_inputs;
 
return pad;
}
 
static int parse_outputs(const char **buf, AVFilterInOut **curr_inputs,
AVFilterInOut **open_inputs,
AVFilterInOut **open_outputs, void *log_ctx)
{
int ret, pad = 0;
 
while (**buf == '[') {
char *name = parse_link_name(buf, log_ctx);
AVFilterInOut *match;
 
AVFilterInOut *input = *curr_inputs;
 
if (!name)
return AVERROR(EINVAL);
 
if (!input) {
av_log(log_ctx, AV_LOG_ERROR,
"No output pad can be associated to link label '%s'.\n", name);
av_free(name);
return AVERROR(EINVAL);
}
*curr_inputs = (*curr_inputs)->next;
 
/* First check if the label is not in the open_inputs list */
match = extract_inout(name, open_inputs);
 
if (match) {
if ((ret = link_filter(input->filter_ctx, input->pad_idx,
match->filter_ctx, match->pad_idx, log_ctx)) < 0) {
av_free(name);
return ret;
}
av_free(match->name);
av_free(name);
av_free(match);
av_free(input);
} else {
/* Not in the list, so add the first input as a open_output */
input->name = name;
insert_inout(open_outputs, input);
}
*buf += strspn(*buf, WHITESPACES);
pad++;
}
 
return pad;
}
 
static int parse_sws_flags(const char **buf, AVFilterGraph *graph)
{
char *p = strchr(*buf, ';');
 
if (strncmp(*buf, "sws_flags=", 10))
return 0;
 
if (!p) {
av_log(graph, AV_LOG_ERROR, "sws_flags not terminated with ';'.\n");
return AVERROR(EINVAL);
}
 
*buf += 4; // keep the 'flags=' part
 
av_freep(&graph->scale_sws_opts);
if (!(graph->scale_sws_opts = av_mallocz(p - *buf + 1)))
return AVERROR(ENOMEM);
av_strlcpy(graph->scale_sws_opts, *buf, p - *buf + 1);
 
*buf = p + 1;
return 0;
}
 
int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters,
AVFilterInOut **inputs,
AVFilterInOut **outputs)
{
int index = 0, ret = 0;
char chr = 0;
 
AVFilterInOut *curr_inputs = NULL, *open_inputs = NULL, *open_outputs = NULL;
 
filters += strspn(filters, WHITESPACES);
 
if ((ret = parse_sws_flags(&filters, graph)) < 0)
goto fail;
 
do {
AVFilterContext *filter;
filters += strspn(filters, WHITESPACES);
 
if ((ret = parse_inputs(&filters, &curr_inputs, &open_outputs, graph)) < 0)
goto end;
if ((ret = parse_filter(&filter, &filters, graph, index, graph)) < 0)
goto end;
 
 
if ((ret = link_filter_inouts(filter, &curr_inputs, &open_inputs, graph)) < 0)
goto end;
 
if ((ret = parse_outputs(&filters, &curr_inputs, &open_inputs, &open_outputs,
graph)) < 0)
goto end;
 
filters += strspn(filters, WHITESPACES);
chr = *filters++;
 
if (chr == ';' && curr_inputs)
append_inout(&open_outputs, &curr_inputs);
index++;
} while (chr == ',' || chr == ';');
 
if (chr) {
av_log(graph, AV_LOG_ERROR,
"Unable to parse graph description substring: \"%s\"\n",
filters - 1);
ret = AVERROR(EINVAL);
goto end;
}
 
append_inout(&open_outputs, &curr_inputs);
 
 
*inputs = open_inputs;
*outputs = open_outputs;
return 0;
 
fail:end:
while (graph->nb_filters)
avfilter_free(graph->filters[0]);
av_freep(&graph->filters);
avfilter_inout_free(&open_inputs);
avfilter_inout_free(&open_outputs);
avfilter_inout_free(&curr_inputs);
 
*inputs = NULL;
*outputs = NULL;
 
return ret;
}
 
#if HAVE_INCOMPATIBLE_LIBAV_ABI || !FF_API_OLD_GRAPH_PARSE
int avfilter_graph_parse(AVFilterGraph *graph, const char *filters,
AVFilterInOut *open_inputs,
AVFilterInOut *open_outputs, void *log_ctx)
{
int ret;
AVFilterInOut *cur, *match, *inputs = NULL, *outputs = NULL;
 
if ((ret = avfilter_graph_parse2(graph, filters, &inputs, &outputs)) < 0)
goto fail;
 
/* First input can be omitted if it is "[in]" */
if (inputs && !inputs->name)
inputs->name = av_strdup("in");
for (cur = inputs; cur; cur = cur->next) {
if (!cur->name) {
av_log(log_ctx, AV_LOG_ERROR,
"Not enough inputs specified for the \"%s\" filter.\n",
cur->filter_ctx->filter->name);
ret = AVERROR(EINVAL);
goto fail;
}
if (!(match = extract_inout(cur->name, &open_outputs)))
continue;
ret = avfilter_link(match->filter_ctx, match->pad_idx,
cur->filter_ctx, cur->pad_idx);
avfilter_inout_free(&match);
if (ret < 0)
goto fail;
}
 
/* Last output can be omitted if it is "[out]" */
if (outputs && !outputs->name)
outputs->name = av_strdup("out");
for (cur = outputs; cur; cur = cur->next) {
if (!cur->name) {
av_log(log_ctx, AV_LOG_ERROR,
"Invalid filterchain containing an unlabelled output pad: \"%s\"\n",
filters);
ret = AVERROR(EINVAL);
goto fail;
}
if (!(match = extract_inout(cur->name, &open_inputs)))
continue;
ret = avfilter_link(cur->filter_ctx, cur->pad_idx,
match->filter_ctx, match->pad_idx);
avfilter_inout_free(&match);
if (ret < 0)
goto fail;
}
 
fail:
if (ret < 0) {
while (graph->nb_filters)
avfilter_free(graph->filters[0]);
av_freep(&graph->filters);
}
avfilter_inout_free(&inputs);
avfilter_inout_free(&outputs);
avfilter_inout_free(&open_inputs);
avfilter_inout_free(&open_outputs);
return ret;
#else
int avfilter_graph_parse(AVFilterGraph *graph, const char *filters,
AVFilterInOut **inputs, AVFilterInOut **outputs,
void *log_ctx)
{
return avfilter_graph_parse_ptr(graph, filters, inputs, outputs, log_ctx);
#endif
}
 
int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters,
AVFilterInOut **open_inputs_ptr, AVFilterInOut **open_outputs_ptr,
void *log_ctx)
{
int index = 0, ret = 0;
char chr = 0;
 
AVFilterInOut *curr_inputs = NULL;
AVFilterInOut *open_inputs = open_inputs_ptr ? *open_inputs_ptr : NULL;
AVFilterInOut *open_outputs = open_outputs_ptr ? *open_outputs_ptr : NULL;
 
if ((ret = parse_sws_flags(&filters, graph)) < 0)
goto end;
 
do {
AVFilterContext *filter;
const char *filterchain = filters;
filters += strspn(filters, WHITESPACES);
 
if ((ret = parse_inputs(&filters, &curr_inputs, &open_outputs, log_ctx)) < 0)
goto end;
 
if ((ret = parse_filter(&filter, &filters, graph, index, log_ctx)) < 0)
goto end;
 
if (filter->nb_inputs == 1 && !curr_inputs && !index) {
/* First input pad, assume it is "[in]" if not specified */
const char *tmp = "[in]";
if ((ret = parse_inputs(&tmp, &curr_inputs, &open_outputs, log_ctx)) < 0)
goto end;
}
 
if ((ret = link_filter_inouts(filter, &curr_inputs, &open_inputs, log_ctx)) < 0)
goto end;
 
if ((ret = parse_outputs(&filters, &curr_inputs, &open_inputs, &open_outputs,
log_ctx)) < 0)
goto end;
 
filters += strspn(filters, WHITESPACES);
chr = *filters++;
 
if (chr == ';' && curr_inputs) {
av_log(log_ctx, AV_LOG_ERROR,
"Invalid filterchain containing an unlabelled output pad: \"%s\"\n",
filterchain);
ret = AVERROR(EINVAL);
goto end;
}
index++;
} while (chr == ',' || chr == ';');
 
if (chr) {
av_log(log_ctx, AV_LOG_ERROR,
"Unable to parse graph description substring: \"%s\"\n",
filters - 1);
ret = AVERROR(EINVAL);
goto end;
}
 
if (curr_inputs) {
/* Last output pad, assume it is "[out]" if not specified */
const char *tmp = "[out]";
if ((ret = parse_outputs(&tmp, &curr_inputs, &open_inputs, &open_outputs,
log_ctx)) < 0)
goto end;
}
 
end:
/* clear open_in/outputs only if not passed as parameters */
if (open_inputs_ptr) *open_inputs_ptr = open_inputs;
else avfilter_inout_free(&open_inputs);
if (open_outputs_ptr) *open_outputs_ptr = open_outputs;
else avfilter_inout_free(&open_outputs);
avfilter_inout_free(&curr_inputs);
 
if (ret < 0) {
while (graph->nb_filters)
avfilter_free(graph->filters[0]);
av_freep(&graph->filters);
}
return ret;
}
/contrib/sdk/sources/ffmpeg/libavfilter/internal.h
0,0 → 1,366
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFILTER_INTERNAL_H
#define AVFILTER_INTERNAL_H
 
/**
* @file
* internal API functions
*/
 
#include "libavutil/internal.h"
#include "avfilter.h"
#include "avfiltergraph.h"
#include "formats.h"
#include "thread.h"
#include "version.h"
#include "video.h"
 
#define POOL_SIZE 32
typedef struct AVFilterPool {
AVFilterBufferRef *pic[POOL_SIZE];
int count;
int refcount;
int draining;
} AVFilterPool;
 
typedef struct AVFilterCommand {
double time; ///< time expressed in seconds
char *command; ///< command
char *arg; ///< optional argument for the command
int flags;
struct AVFilterCommand *next;
} AVFilterCommand;
 
/**
* Update the position of a link in the age heap.
*/
void ff_avfilter_graph_update_heap(AVFilterGraph *graph, AVFilterLink *link);
 
#if !FF_API_AVFILTERPAD_PUBLIC
/**
* A filter pad used for either input or output.
*/
struct AVFilterPad {
/**
* Pad name. The name is unique among inputs and among outputs, but an
* input may have the same name as an output. This may be NULL if this
* pad has no need to ever be referenced by name.
*/
const char *name;
 
/**
* AVFilterPad type.
*/
enum AVMediaType type;
 
/**
* Callback function to get a video buffer. If NULL, the filter system will
* use ff_default_get_video_buffer().
*
* Input video pads only.
*/
AVFrame *(*get_video_buffer)(AVFilterLink *link, int w, int h);
 
/**
* Callback function to get an audio buffer. If NULL, the filter system will
* use ff_default_get_audio_buffer().
*
* Input audio pads only.
*/
AVFrame *(*get_audio_buffer)(AVFilterLink *link, int nb_samples);
 
/**
* Filtering callback. This is where a filter receives a frame with
* audio/video data and should do its processing.
*
* Input pads only.
*
* @return >= 0 on success, a negative AVERROR on error. This function
* must ensure that samplesref is properly unreferenced on error if it
* hasn't been passed on to another filter.
*/
int (*filter_frame)(AVFilterLink *link, AVFrame *frame);
 
/**
* Frame poll callback. This returns the number of immediately available
* samples. It should return a positive value if the next request_frame()
* is guaranteed to return one frame (with no delay).
*
* Defaults to just calling the source poll_frame() method.
*
* Output pads only.
*/
int (*poll_frame)(AVFilterLink *link);
 
/**
* Frame request callback. A call to this should result in at least one
* frame being output over the given link. This should return zero on
* success, and another value on error.
*
* Output pads only.
*/
int (*request_frame)(AVFilterLink *link);
 
/**
* Link configuration callback.
*
* For output pads, this should set the link properties such as
* width/height. This should NOT set the format property - that is
* negotiated between filters by the filter system using the
* query_formats() callback before this function is called.
*
* For input pads, this should check the properties of the link, and update
* the filter's internal state as necessary.
*
* For both input and output filters, this should return zero on success,
* and another value on error.
*/
int (*config_props)(AVFilterLink *link);
 
/**
* The filter expects a fifo to be inserted on its input link,
* typically because it has a delay.
*
* input pads only.
*/
int needs_fifo;
};
#endif
 
struct AVFilterGraphInternal {
void *thread;
avfilter_execute_func *thread_execute;
};
 
struct AVFilterInternal {
avfilter_execute_func *execute;
};
 
#if FF_API_AVFILTERBUFFER
/** default handler for freeing audio/video buffer when there are no references left */
void ff_avfilter_default_free_buffer(AVFilterBuffer *buf);
#endif
 
/** Tell is a format is contained in the provided list terminated by -1. */
int ff_fmt_is_in(int fmt, const int *fmts);
 
/* Functions to parse audio format arguments */
 
/**
* Parse a pixel format.
*
* @param ret pixel format pointer to where the value should be written
* @param arg string to parse
* @param log_ctx log context
* @return >= 0 in case of success, a negative AVERROR code on error
*/
int ff_parse_pixel_format(enum AVPixelFormat *ret, const char *arg, void *log_ctx);
 
/**
* Parse a sample rate.
*
* @param ret unsigned integer pointer to where the value should be written
* @param arg string to parse
* @param log_ctx log context
* @return >= 0 in case of success, a negative AVERROR code on error
*/
int ff_parse_sample_rate(int *ret, const char *arg, void *log_ctx);
 
/**
* Parse a time base.
*
* @param ret unsigned AVRational pointer to where the value should be written
* @param arg string to parse
* @param log_ctx log context
* @return >= 0 in case of success, a negative AVERROR code on error
*/
int ff_parse_time_base(AVRational *ret, const char *arg, void *log_ctx);
 
/**
* Parse a sample format name or a corresponding integer representation.
*
* @param ret integer pointer to where the value should be written
* @param arg string to parse
* @param log_ctx log context
* @return >= 0 in case of success, a negative AVERROR code on error
*/
int ff_parse_sample_format(int *ret, const char *arg, void *log_ctx);
 
/**
* Parse a channel layout or a corresponding integer representation.
*
* @param ret 64bit integer pointer to where the value should be written.
* @param arg string to parse
* @param log_ctx log context
* @return >= 0 in case of success, a negative AVERROR code on error
*/
int ff_parse_channel_layout(int64_t *ret, const char *arg, void *log_ctx);
 
void ff_update_link_current_pts(AVFilterLink *link, int64_t pts);
 
void ff_command_queue_pop(AVFilterContext *filter);
 
/* misc trace functions */
 
/* #define FF_AVFILTER_TRACE */
 
#ifdef FF_AVFILTER_TRACE
# define ff_tlog(pctx, ...) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__)
#else
# define ff_tlog(pctx, ...) do { if (0) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__); } while (0)
#endif
 
#define FF_TPRINTF_START(ctx, func) ff_tlog(NULL, "%-16s: ", #func)
 
char *ff_get_ref_perms_string(char *buf, size_t buf_size, int perms);
 
void ff_tlog_ref(void *ctx, AVFrame *ref, int end);
 
void ff_tlog_link(void *ctx, AVFilterLink *link, int end);
 
/**
* Insert a new pad.
*
* @param idx Insertion point. Pad is inserted at the end if this point
* is beyond the end of the list of pads.
* @param count Pointer to the number of pads in the list
* @param padidx_off Offset within an AVFilterLink structure to the element
* to increment when inserting a new pad causes link
* numbering to change
* @param pads Pointer to the pointer to the beginning of the list of pads
* @param links Pointer to the pointer to the beginning of the list of links
* @param newpad The new pad to add. A copy is made when adding.
* @return >= 0 in case of success, a negative AVERROR code on error
*/
int ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
AVFilterPad **pads, AVFilterLink ***links,
AVFilterPad *newpad);
 
/** Insert a new input pad for the filter. */
static inline int ff_insert_inpad(AVFilterContext *f, unsigned index,
AVFilterPad *p)
{
int ret = ff_insert_pad(index, &f->nb_inputs, offsetof(AVFilterLink, dstpad),
&f->input_pads, &f->inputs, p);
#if FF_API_FOO_COUNT
FF_DISABLE_DEPRECATION_WARNINGS
f->input_count = f->nb_inputs;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
return ret;
}
 
/** Insert a new output pad for the filter. */
static inline int ff_insert_outpad(AVFilterContext *f, unsigned index,
AVFilterPad *p)
{
int ret = ff_insert_pad(index, &f->nb_outputs, offsetof(AVFilterLink, srcpad),
&f->output_pads, &f->outputs, p);
#if FF_API_FOO_COUNT
FF_DISABLE_DEPRECATION_WARNINGS
f->output_count = f->nb_outputs;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
return ret;
}
 
/**
* Poll a frame from the filter chain.
*
* @param link the input link
* @return the number of immediately available frames, a negative
* number in case of error
*/
int ff_poll_frame(AVFilterLink *link);
 
/**
* Request an input frame from the filter at the other end of the link.
*
* @param link the input link
* @return zero on success
*/
int ff_request_frame(AVFilterLink *link);
 
#define AVFILTER_DEFINE_CLASS(fname) \
static const AVClass fname##_class = { \
.class_name = #fname, \
.item_name = av_default_item_name, \
.option = fname##_options, \
.version = LIBAVUTIL_VERSION_INT, \
.category = AV_CLASS_CATEGORY_FILTER, \
}
 
AVFilterBufferRef *ff_copy_buffer_ref(AVFilterLink *outlink,
AVFilterBufferRef *ref);
 
/**
* Find the index of a link.
*
* I.e. find i such that link == ctx->(in|out)puts[i]
*/
#define FF_INLINK_IDX(link) ((int)((link)->dstpad - (link)->dst->input_pads))
#define FF_OUTLINK_IDX(link) ((int)((link)->srcpad - (link)->src->output_pads))
 
int ff_buffersink_read_compat(AVFilterContext *ctx, AVFilterBufferRef **buf);
int ff_buffersink_read_samples_compat(AVFilterContext *ctx, AVFilterBufferRef **pbuf,
int nb_samples);
/**
* Send a frame of data to the next filter.
*
* @param link the output link over which the data is being sent
* @param frame a reference to the buffer of data being sent. The
* receiving filter will free this reference when it no longer
* needs it or pass it on to the next filter.
*
* @return >= 0 on success, a negative AVERROR on error. The receiving filter
* is responsible for unreferencing frame in case of error.
*/
int ff_filter_frame(AVFilterLink *link, AVFrame *frame);
 
/**
* Flags for AVFilterLink.flags.
*/
enum {
 
/**
* Frame requests may need to loop in order to be fulfilled.
* A filter must set this flags on an output link if it may return 0 in
* request_frame() without filtering a frame.
*/
FF_LINK_FLAG_REQUEST_LOOP = 1,
 
};
 
/**
* Allocate a new filter context and return it.
*
* @param filter what filter to create an instance of
* @param inst_name name to give to the new filter context
*
* @return newly created filter context or NULL on failure
*/
AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name);
 
/**
* Remove a filter from a graph;
*/
void ff_filter_graph_remove_filter(AVFilterGraph *graph, AVFilterContext *filter);
 
#endif /* AVFILTER_INTERNAL_H */
/contrib/sdk/sources/ffmpeg/libavfilter/lavfutils.c
0,0 → 1,98
/*
* Copyright 2012 Stefano Sabatini <stefasab gmail com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/imgutils.h"
#include "lavfutils.h"
 
int ff_load_image(uint8_t *data[4], int linesize[4],
int *w, int *h, enum AVPixelFormat *pix_fmt,
const char *filename, void *log_ctx)
{
AVInputFormat *iformat = NULL;
AVFormatContext *format_ctx = NULL;
AVCodec *codec;
AVCodecContext *codec_ctx;
AVFrame *frame;
int frame_decoded, ret = 0;
AVPacket pkt;
 
av_init_packet(&pkt);
 
av_register_all();
 
iformat = av_find_input_format("image2");
if ((ret = avformat_open_input(&format_ctx, filename, iformat, NULL)) < 0) {
av_log(log_ctx, AV_LOG_ERROR,
"Failed to open input file '%s'\n", filename);
return ret;
}
 
codec_ctx = format_ctx->streams[0]->codec;
codec = avcodec_find_decoder(codec_ctx->codec_id);
if (!codec) {
av_log(log_ctx, AV_LOG_ERROR, "Failed to find codec\n");
ret = AVERROR(EINVAL);
goto end;
}
 
if ((ret = avcodec_open2(codec_ctx, codec, NULL)) < 0) {
av_log(log_ctx, AV_LOG_ERROR, "Failed to open codec\n");
goto end;
}
 
if (!(frame = avcodec_alloc_frame()) ) {
av_log(log_ctx, AV_LOG_ERROR, "Failed to alloc frame\n");
ret = AVERROR(ENOMEM);
goto end;
}
 
ret = av_read_frame(format_ctx, &pkt);
if (ret < 0) {
av_log(log_ctx, AV_LOG_ERROR, "Failed to read frame from file\n");
goto end;
}
 
ret = avcodec_decode_video2(codec_ctx, frame, &frame_decoded, &pkt);
if (ret < 0 || !frame_decoded) {
av_log(log_ctx, AV_LOG_ERROR, "Failed to decode image from file\n");
goto end;
}
ret = 0;
 
*w = frame->width;
*h = frame->height;
*pix_fmt = frame->format;
 
if ((ret = av_image_alloc(data, linesize, *w, *h, *pix_fmt, 16)) < 0)
goto end;
ret = 0;
 
av_image_copy(data, linesize, (const uint8_t **)frame->data, frame->linesize, *pix_fmt, *w, *h);
 
end:
av_free_packet(&pkt);
avcodec_close(codec_ctx);
avformat_close_input(&format_ctx);
av_freep(&frame);
 
if (ret < 0)
av_log(log_ctx, AV_LOG_ERROR, "Error loading image file '%s'\n", filename);
return ret;
}
/contrib/sdk/sources/ffmpeg/libavfilter/lavfutils.h
0,0 → 1,43
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Miscellaneous utilities which make use of the libavformat library
*/
 
#ifndef AVFILTER_LAVFUTILS_H
#define AVFILTER_LAVFUTILS_H
 
#include "libavformat/avformat.h"
 
/**
* Load image from filename and put the resulting image in data.
*
* @param w pointer to the width of the loaded image
* @param h pointer to the height of the loaded image
* @param pix_fmt pointer to the pixel format of the loaded image
* @param filename the name of the image file to load
* @param log_ctx log context
* @return >= 0 in case of success, a negative error code otherwise.
*/
int ff_load_image(uint8_t *data[4], int linesize[4],
int *w, int *h, enum AVPixelFormat *pix_fmt,
const char *filename, void *log_ctx);
 
#endif /* AVFILTER_LAVFUTILS_H */
/contrib/sdk/sources/ffmpeg/libavfilter/libavfilter.v
0,0 → 1,5
LIBAVFILTER_$MAJOR {
global: avfilter_*; av_*;
ff_default_query_formats;
local: *;
};
/contrib/sdk/sources/ffmpeg/libavfilter/libmpcodecs/av_helpers.h
0,0 → 1,27
/*
* Generic libav* helpers
*
* This file is part of MPlayer.
*
* MPlayer is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* MPlayer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with MPlayer; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
#ifndef MPLAYER_AV_HELPERS_H
#define MPLAYER_AV_HELPERS_H
 
void ff_init_avcodec(void);
void ff_init_avformat(void);
 
#endif /* MPLAYER_AV_HELPERS_H */
/contrib/sdk/sources/ffmpeg/libavfilter/libmpcodecs/cpudetect.h
0,0 → 1,60
/*
* This file is part of MPlayer.
*
* MPlayer is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* MPlayer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with MPlayer; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
#ifndef MPLAYER_CPUDETECT_H
#define MPLAYER_CPUDETECT_H
 
#define CPUTYPE_I386 3
#define CPUTYPE_I486 4
#define CPUTYPE_I586 5
#define CPUTYPE_I686 6
 
#include "libavutil/x86_cpu.h"
 
typedef struct cpucaps_s {
int cpuType;
int cpuModel;
int cpuStepping;
int hasMMX;
int hasMMX2;
int has3DNow;
int has3DNowExt;
int hasSSE;
int hasSSE2;
int hasSSE3;
int hasSSSE3;
int hasSSE4;
int hasSSE42;
int hasSSE4a;
int hasAVX;
int isX86;
unsigned cl_size; /* size of cache line */
int hasAltiVec;
int hasTSC;
} CpuCaps;
 
extern CpuCaps ff_gCpuCaps;
 
void ff_do_cpuid(unsigned int ax, unsigned int *p);
 
void ff_GetCpuCaps(CpuCaps *caps);
 
/* returned value is malloc()'ed so free() it after use */
char *ff_GetCpuFriendlyName(unsigned int regs[], unsigned int regs2[]);
 
#endif /* MPLAYER_CPUDETECT_H */
/contrib/sdk/sources/ffmpeg/libavfilter/libmpcodecs/img_format.c
0,0 → 1,233
/*
* This file is part of MPlayer.
*
* MPlayer is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* MPlayer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with MPlayer; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
#include "config.h"
#include "img_format.h"
#include "stdio.h"
#include "libavutil/bswap.h"
 
const char *ff_vo_format_name(int format)
{
static char unknown_format[20];
switch(format)
{
case IMGFMT_RGB1: return "RGB 1-bit";
case IMGFMT_RGB4: return "RGB 4-bit";
case IMGFMT_RG4B: return "RGB 4-bit per byte";
case IMGFMT_RGB8: return "RGB 8-bit";
case IMGFMT_RGB12: return "RGB 12-bit";
case IMGFMT_RGB15: return "RGB 15-bit";
case IMGFMT_RGB16: return "RGB 16-bit";
case IMGFMT_RGB24: return "RGB 24-bit";
// case IMGFMT_RGB32: return "RGB 32-bit";
case IMGFMT_RGB48LE: return "RGB 48-bit LE";
case IMGFMT_RGB48BE: return "RGB 48-bit BE";
case IMGFMT_RGB64LE: return "RGB 64-bit LE";
case IMGFMT_RGB64BE: return "RGB 64-bit BE";
case IMGFMT_BGR1: return "BGR 1-bit";
case IMGFMT_BGR4: return "BGR 4-bit";
case IMGFMT_BG4B: return "BGR 4-bit per byte";
case IMGFMT_BGR8: return "BGR 8-bit";
case IMGFMT_BGR12: return "BGR 12-bit";
case IMGFMT_BGR15: return "BGR 15-bit";
case IMGFMT_BGR16: return "BGR 16-bit";
case IMGFMT_BGR24: return "BGR 24-bit";
// case IMGFMT_BGR32: return "BGR 32-bit";
case IMGFMT_ABGR: return "ABGR";
case IMGFMT_BGRA: return "BGRA";
case IMGFMT_ARGB: return "ARGB";
case IMGFMT_RGBA: return "RGBA";
case IMGFMT_GBR24P: return "Planar GBR 24-bit";
case IMGFMT_GBR12P: return "Planar GBR 36-bit";
case IMGFMT_GBR14P: return "Planar GBR 42-bit";
case IMGFMT_YVU9: return "Planar YVU9";
case IMGFMT_IF09: return "Planar IF09";
case IMGFMT_YV12: return "Planar YV12";
case IMGFMT_I420: return "Planar I420";
case IMGFMT_IYUV: return "Planar IYUV";
case IMGFMT_CLPL: return "Planar CLPL";
case IMGFMT_Y800: return "Planar Y800";
case IMGFMT_Y8: return "Planar Y8";
case IMGFMT_Y8A: return "Planar Y8 with alpha";
case IMGFMT_Y16_LE: return "Planar Y16 little-endian";
case IMGFMT_Y16_BE: return "Planar Y16 big-endian";
case IMGFMT_420P16_LE: return "Planar 420P 16-bit little-endian";
case IMGFMT_420P16_BE: return "Planar 420P 16-bit big-endian";
case IMGFMT_420P14_LE: return "Planar 420P 14-bit little-endian";
case IMGFMT_420P14_BE: return "Planar 420P 14-bit big-endian";
case IMGFMT_420P12_LE: return "Planar 420P 12-bit little-endian";
case IMGFMT_420P12_BE: return "Planar 420P 12-bit big-endian";
case IMGFMT_420P10_LE: return "Planar 420P 10-bit little-endian";
case IMGFMT_420P10_BE: return "Planar 420P 10-bit big-endian";
case IMGFMT_420P9_LE: return "Planar 420P 9-bit little-endian";
case IMGFMT_420P9_BE: return "Planar 420P 9-bit big-endian";
case IMGFMT_422P16_LE: return "Planar 422P 16-bit little-endian";
case IMGFMT_422P16_BE: return "Planar 422P 16-bit big-endian";
case IMGFMT_422P14_LE: return "Planar 422P 14-bit little-endian";
case IMGFMT_422P14_BE: return "Planar 422P 14-bit big-endian";
case IMGFMT_422P12_LE: return "Planar 422P 12-bit little-endian";
case IMGFMT_422P12_BE: return "Planar 422P 12-bit big-endian";
case IMGFMT_422P10_LE: return "Planar 422P 10-bit little-endian";
case IMGFMT_422P10_BE: return "Planar 422P 10-bit big-endian";
case IMGFMT_422P9_LE: return "Planar 422P 9-bit little-endian";
case IMGFMT_422P9_BE: return "Planar 422P 9-bit big-endian";
case IMGFMT_444P16_LE: return "Planar 444P 16-bit little-endian";
case IMGFMT_444P16_BE: return "Planar 444P 16-bit big-endian";
case IMGFMT_444P14_LE: return "Planar 444P 14-bit little-endian";
case IMGFMT_444P14_BE: return "Planar 444P 14-bit big-endian";
case IMGFMT_444P12_LE: return "Planar 444P 12-bit little-endian";
case IMGFMT_444P12_BE: return "Planar 444P 12-bit big-endian";
case IMGFMT_444P10_LE: return "Planar 444P 10-bit little-endian";
case IMGFMT_444P10_BE: return "Planar 444P 10-bit big-endian";
case IMGFMT_444P9_LE: return "Planar 444P 9-bit little-endian";
case IMGFMT_444P9_BE: return "Planar 444P 9-bit big-endian";
case IMGFMT_420A: return "Planar 420P with alpha";
case IMGFMT_444P: return "Planar 444P";
case IMGFMT_444A: return "Planar 444P with alpha";
case IMGFMT_422P: return "Planar 422P";
case IMGFMT_422A: return "Planar 422P with alpha";
case IMGFMT_411P: return "Planar 411P";
case IMGFMT_NV12: return "Planar NV12";
case IMGFMT_NV21: return "Planar NV21";
case IMGFMT_HM12: return "Planar NV12 Macroblock";
case IMGFMT_IUYV: return "Packed IUYV";
case IMGFMT_IY41: return "Packed IY41";
case IMGFMT_IYU1: return "Packed IYU1";
case IMGFMT_IYU2: return "Packed IYU2";
case IMGFMT_UYVY: return "Packed UYVY";
case IMGFMT_UYNV: return "Packed UYNV";
case IMGFMT_cyuv: return "Packed CYUV";
case IMGFMT_Y422: return "Packed Y422";
case IMGFMT_YUY2: return "Packed YUY2";
case IMGFMT_YUNV: return "Packed YUNV";
case IMGFMT_YVYU: return "Packed YVYU";
case IMGFMT_Y41P: return "Packed Y41P";
case IMGFMT_Y211: return "Packed Y211";
case IMGFMT_Y41T: return "Packed Y41T";
case IMGFMT_Y42T: return "Packed Y42T";
case IMGFMT_V422: return "Packed V422";
case IMGFMT_V655: return "Packed V655";
case IMGFMT_CLJR: return "Packed CLJR";
case IMGFMT_YUVP: return "Packed YUVP";
case IMGFMT_UYVP: return "Packed UYVP";
case IMGFMT_MPEGPES: return "Mpeg PES";
case IMGFMT_ZRMJPEGNI: return "Zoran MJPEG non-interlaced";
case IMGFMT_ZRMJPEGIT: return "Zoran MJPEG top field first";
case IMGFMT_ZRMJPEGIB: return "Zoran MJPEG bottom field first";
case IMGFMT_XVMC_MOCO_MPEG2: return "MPEG1/2 Motion Compensation";
case IMGFMT_XVMC_IDCT_MPEG2: return "MPEG1/2 Motion Compensation and IDCT";
case IMGFMT_VDPAU_MPEG1: return "MPEG1 VDPAU acceleration";
case IMGFMT_VDPAU_MPEG2: return "MPEG2 VDPAU acceleration";
case IMGFMT_VDPAU_H264: return "H.264 VDPAU acceleration";
case IMGFMT_VDPAU_MPEG4: return "MPEG-4 Part 2 VDPAU acceleration";
case IMGFMT_VDPAU_WMV3: return "WMV3 VDPAU acceleration";
case IMGFMT_VDPAU_VC1: return "VC1 VDPAU acceleration";
}
snprintf(unknown_format,20,"Unknown 0x%04x",format);
return unknown_format;
}
 
int ff_mp_get_chroma_shift(int format, int *x_shift, int *y_shift, int *component_bits)
{
int xs = 0, ys = 0;
int bpp;
int err = 0;
int bits = 8;
if ((format & 0xff0000f0) == 0x34000050)
format = av_bswap32(format);
if ((format & 0xf00000ff) == 0x50000034) {
switch (format >> 24) {
case 0x50:
break;
case 0x51:
bits = 16;
break;
case 0x52:
bits = 10;
break;
case 0x53:
bits = 9;
break;
default:
err = 1;
break;
}
switch (format & 0x00ffffff) {
case 0x00343434: // 444
xs = 0;
ys = 0;
break;
case 0x00323234: // 422
xs = 1;
ys = 0;
break;
case 0x00303234: // 420
xs = 1;
ys = 1;
break;
case 0x00313134: // 411
xs = 2;
ys = 0;
break;
case 0x00303434: // 440
xs = 0;
ys = 1;
break;
default:
err = 1;
break;
}
} else switch (format) {
case IMGFMT_444A:
xs = 0;
ys = 0;
break;
case IMGFMT_422A:
xs = 1;
ys = 0;
break;
case IMGFMT_420A:
case IMGFMT_I420:
case IMGFMT_IYUV:
case IMGFMT_YV12:
xs = 1;
ys = 1;
break;
case IMGFMT_IF09:
case IMGFMT_YVU9:
xs = 2;
ys = 2;
break;
case IMGFMT_Y8:
case IMGFMT_Y800:
xs = 31;
ys = 31;
break;
default:
err = 1;
break;
}
if (x_shift) *x_shift = xs;
if (y_shift) *y_shift = ys;
if (component_bits) *component_bits = bits;
bpp = 8 + ((16 >> xs) >> ys);
if (format == IMGFMT_420A || format == IMGFMT_422A || format == IMGFMT_444A)
bpp += 8;
bpp *= (bits + 7) >> 3;
return err ? 0 : bpp;
}
/contrib/sdk/sources/ffmpeg/libavfilter/libmpcodecs/img_format.h
0,0 → 1,300
/*
* This file is part of MPlayer.
*
* MPlayer is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* MPlayer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with MPlayer; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
#ifndef MPLAYER_IMG_FORMAT_H
#define MPLAYER_IMG_FORMAT_H
 
#include "config.h"
 
/* RGB/BGR Formats */
 
#define IMGFMT_RGB_MASK 0xFFFFFF00
#define IMGFMT_RGB (('R'<<24)|('G'<<16)|('B'<<8))
#define IMGFMT_RGB1 (IMGFMT_RGB|1)
#define IMGFMT_RGB4 (IMGFMT_RGB|4)
#define IMGFMT_RGB4_CHAR (IMGFMT_RGB|4|128) // RGB4 with 1 pixel per byte
#define IMGFMT_RGB8 (IMGFMT_RGB|8)
#define IMGFMT_RGB12 (IMGFMT_RGB|12)
#define IMGFMT_RGB15 (IMGFMT_RGB|15)
#define IMGFMT_RGB16 (IMGFMT_RGB|16)
#define IMGFMT_RGB24 (IMGFMT_RGB|24)
#define IMGFMT_RGB32 (IMGFMT_RGB|32)
#define IMGFMT_RGB48LE (IMGFMT_RGB|48)
#define IMGFMT_RGB48BE (IMGFMT_RGB|48|128)
#define IMGFMT_RGB64LE (IMGFMT_RGB|64)
#define IMGFMT_RGB64BE (IMGFMT_RGB|64|128)
 
#define IMGFMT_BGR_MASK 0xFFFFFF00
#define IMGFMT_BGR (('B'<<24)|('G'<<16)|('R'<<8))
#define IMGFMT_BGR1 (IMGFMT_BGR|1)
#define IMGFMT_BGR4 (IMGFMT_BGR|4)
#define IMGFMT_BGR4_CHAR (IMGFMT_BGR|4|128) // BGR4 with 1 pixel per byte
#define IMGFMT_BGR8 (IMGFMT_BGR|8)
#define IMGFMT_BGR12 (IMGFMT_BGR|12)
#define IMGFMT_BGR15 (IMGFMT_BGR|15)
#define IMGFMT_BGR16 (IMGFMT_BGR|16)
#define IMGFMT_BGR24 (IMGFMT_BGR|24)
#define IMGFMT_BGR32 (IMGFMT_BGR|32)
 
#define IMGFMT_GBR24P (('G'<<24)|('B'<<16)|('R'<<8)|24)
#define IMGFMT_GBR12PLE (('G'<<24)|('B'<<16)|('R'<<8)|36)
#define IMGFMT_GBR12PBE (('G'<<24)|('B'<<16)|('R'<<8)|36|128)
#define IMGFMT_GBR14PLE (('G'<<24)|('B'<<16)|('R'<<8)|42)
#define IMGFMT_GBR14PBE (('G'<<24)|('B'<<16)|('R'<<8)|42|128)
 
#if HAVE_BIGENDIAN
#define IMGFMT_ABGR IMGFMT_RGB32
#define IMGFMT_BGRA (IMGFMT_RGB32|128)
#define IMGFMT_ARGB IMGFMT_BGR32
#define IMGFMT_RGBA (IMGFMT_BGR32|128)
#define IMGFMT_RGB64NE IMGFMT_RGB64BE
#define IMGFMT_RGB48NE IMGFMT_RGB48BE
#define IMGFMT_RGB12BE IMGFMT_RGB12
#define IMGFMT_RGB12LE (IMGFMT_RGB12|128)
#define IMGFMT_RGB15BE IMGFMT_RGB15
#define IMGFMT_RGB15LE (IMGFMT_RGB15|128)
#define IMGFMT_RGB16BE IMGFMT_RGB16
#define IMGFMT_RGB16LE (IMGFMT_RGB16|128)
#define IMGFMT_BGR12BE IMGFMT_BGR12
#define IMGFMT_BGR12LE (IMGFMT_BGR12|128)
#define IMGFMT_BGR15BE IMGFMT_BGR15
#define IMGFMT_BGR15LE (IMGFMT_BGR15|128)
#define IMGFMT_BGR16BE IMGFMT_BGR16
#define IMGFMT_BGR16LE (IMGFMT_BGR16|128)
#define IMGFMT_GBR12P IMGFMT_GBR12PBE
#define IMGFMT_GBR14P IMGFMT_GBR14PBE
#else
#define IMGFMT_ABGR (IMGFMT_BGR32|128)
#define IMGFMT_BGRA IMGFMT_BGR32
#define IMGFMT_ARGB (IMGFMT_RGB32|128)
#define IMGFMT_RGBA IMGFMT_RGB32
#define IMGFMT_RGB64NE IMGFMT_RGB64LE
#define IMGFMT_RGB48NE IMGFMT_RGB48LE
#define IMGFMT_RGB12BE (IMGFMT_RGB12|128)
#define IMGFMT_RGB12LE IMGFMT_RGB12
#define IMGFMT_RGB15BE (IMGFMT_RGB15|128)
#define IMGFMT_RGB15LE IMGFMT_RGB15
#define IMGFMT_RGB16BE (IMGFMT_RGB16|128)
#define IMGFMT_RGB16LE IMGFMT_RGB16
#define IMGFMT_BGR12BE (IMGFMT_BGR12|128)
#define IMGFMT_BGR12LE IMGFMT_BGR12
#define IMGFMT_BGR15BE (IMGFMT_BGR15|128)
#define IMGFMT_BGR15LE IMGFMT_BGR15
#define IMGFMT_BGR16BE (IMGFMT_BGR16|128)
#define IMGFMT_BGR16LE IMGFMT_BGR16
#define IMGFMT_GBR12P IMGFMT_GBR12PLE
#define IMGFMT_GBR14P IMGFMT_GBR14PLE
#endif
 
/* old names for compatibility */
#define IMGFMT_RG4B IMGFMT_RGB4_CHAR
#define IMGFMT_BG4B IMGFMT_BGR4_CHAR
 
#define IMGFMT_IS_RGB(fmt) (((fmt)&IMGFMT_RGB_MASK)==IMGFMT_RGB)
#define IMGFMT_IS_BGR(fmt) (((fmt)&IMGFMT_BGR_MASK)==IMGFMT_BGR)
 
#define IMGFMT_RGB_DEPTH(fmt) ((fmt)&0x7F)
#define IMGFMT_BGR_DEPTH(fmt) ((fmt)&0x7F)
 
 
/* Planar YUV Formats */
 
#define IMGFMT_YVU9 0x39555659
#define IMGFMT_IF09 0x39304649
#define IMGFMT_YV12 0x32315659
#define IMGFMT_I420 0x30323449
#define IMGFMT_IYUV 0x56555949
#define IMGFMT_CLPL 0x4C504C43
#define IMGFMT_Y800 0x30303859
#define IMGFMT_Y8 0x20203859
#define IMGFMT_NV12 0x3231564E
#define IMGFMT_NV21 0x3132564E
#define IMGFMT_Y16_LE 0x20363159
 
/* unofficial Planar Formats, FIXME if official 4CC exists */
#define IMGFMT_444P 0x50343434
#define IMGFMT_422P 0x50323234
#define IMGFMT_411P 0x50313134
#define IMGFMT_440P 0x50303434
#define IMGFMT_HM12 0x32314D48
#define IMGFMT_Y16_BE 0x59313620
 
// Gray with alpha
#define IMGFMT_Y8A 0x59320008
// 4:2:0 planar with alpha
#define IMGFMT_420A 0x41303234
// 4:2:2 planar with alpha
#define IMGFMT_422A 0x41323234
// 4:4:4 planar with alpha
#define IMGFMT_444A 0x41343434
 
#define IMGFMT_444P16_LE 0x51343434
#define IMGFMT_444P16_BE 0x34343451
#define IMGFMT_444P14_LE 0x54343434
#define IMGFMT_444P14_BE 0x34343454
#define IMGFMT_444P12_LE 0x55343434
#define IMGFMT_444P12_BE 0x34343455
#define IMGFMT_444P10_LE 0x52343434
#define IMGFMT_444P10_BE 0x34343452
#define IMGFMT_444P9_LE 0x53343434
#define IMGFMT_444P9_BE 0x34343453
#define IMGFMT_422P16_LE 0x51323234
#define IMGFMT_422P16_BE 0x34323251
#define IMGFMT_422P14_LE 0x54323234
#define IMGFMT_422P14_BE 0x34323254
#define IMGFMT_422P12_LE 0x55323234
#define IMGFMT_422P12_BE 0x34323255
#define IMGFMT_422P10_LE 0x52323234
#define IMGFMT_422P10_BE 0x34323252
#define IMGFMT_422P9_LE 0x53323234
#define IMGFMT_422P9_BE 0x34323253
#define IMGFMT_420P16_LE 0x51303234
#define IMGFMT_420P16_BE 0x34323051
#define IMGFMT_420P14_LE 0x54303234
#define IMGFMT_420P14_BE 0x34323054
#define IMGFMT_420P12_LE 0x55303234
#define IMGFMT_420P12_BE 0x34323055
#define IMGFMT_420P10_LE 0x52303234
#define IMGFMT_420P10_BE 0x34323052
#define IMGFMT_420P9_LE 0x53303234
#define IMGFMT_420P9_BE 0x34323053
#if HAVE_BIGENDIAN
#define IMGFMT_444P16 IMGFMT_444P16_BE
#define IMGFMT_444P14 IMGFMT_444P14_BE
#define IMGFMT_444P12 IMGFMT_444P12_BE
#define IMGFMT_444P10 IMGFMT_444P10_BE
#define IMGFMT_444P9 IMGFMT_444P9_BE
#define IMGFMT_422P16 IMGFMT_422P16_BE
#define IMGFMT_422P14 IMGFMT_422P14_BE
#define IMGFMT_422P12 IMGFMT_422P12_BE
#define IMGFMT_422P10 IMGFMT_422P10_BE
#define IMGFMT_422P9 IMGFMT_422P9_BE
#define IMGFMT_420P16 IMGFMT_420P16_BE
#define IMGFMT_420P14 IMGFMT_420P14_BE
#define IMGFMT_420P12 IMGFMT_420P12_BE
#define IMGFMT_420P10 IMGFMT_420P10_BE
#define IMGFMT_420P9 IMGFMT_420P9_BE
#define IMGFMT_Y16 IMGFMT_Y16_BE
#define IMGFMT_IS_YUVP16_NE(fmt) IMGFMT_IS_YUVP16_BE(fmt)
#else
#define IMGFMT_444P16 IMGFMT_444P16_LE
#define IMGFMT_444P14 IMGFMT_444P14_LE
#define IMGFMT_444P12 IMGFMT_444P12_LE
#define IMGFMT_444P10 IMGFMT_444P10_LE
#define IMGFMT_444P9 IMGFMT_444P9_LE
#define IMGFMT_422P16 IMGFMT_422P16_LE
#define IMGFMT_422P14 IMGFMT_422P14_LE
#define IMGFMT_422P12 IMGFMT_422P12_LE
#define IMGFMT_422P10 IMGFMT_422P10_LE
#define IMGFMT_422P9 IMGFMT_422P9_LE
#define IMGFMT_420P16 IMGFMT_420P16_LE
#define IMGFMT_420P14 IMGFMT_420P14_LE
#define IMGFMT_420P12 IMGFMT_420P12_LE
#define IMGFMT_420P10 IMGFMT_420P10_LE
#define IMGFMT_420P9 IMGFMT_420P9_LE
#define IMGFMT_Y16 IMGFMT_Y16_LE
#define IMGFMT_IS_YUVP16_NE(fmt) IMGFMT_IS_YUVP16_LE(fmt)
#endif
 
#define IMGFMT_IS_YUVP16_LE(fmt) (((fmt - 0x51000034) & 0xfc0000ff) == 0)
#define IMGFMT_IS_YUVP16_BE(fmt) (((fmt - 0x34000051) & 0xff0000fc) == 0)
#define IMGFMT_IS_YUVP16(fmt) (IMGFMT_IS_YUVP16_LE(fmt) || IMGFMT_IS_YUVP16_BE(fmt))
 
/**
* \brief Find the corresponding full 16 bit format, i.e. IMGFMT_420P10_LE -> IMGFMT_420P16_LE
* \return normalized format ID or 0 if none exists.
*/
static inline int normalize_yuvp16(int fmt) {
if (IMGFMT_IS_YUVP16_LE(fmt))
return (fmt & 0x00ffffff) | 0x51000000;
if (IMGFMT_IS_YUVP16_BE(fmt))
return (fmt & 0xffffff00) | 0x00000051;
return 0;
}
 
/* Packed YUV Formats */
 
#define IMGFMT_IUYV 0x56595549 // Interlaced UYVY
#define IMGFMT_IY41 0x31435949 // Interlaced Y41P
#define IMGFMT_IYU1 0x31555949
#define IMGFMT_IYU2 0x32555949
#define IMGFMT_UYVY 0x59565955
#define IMGFMT_UYNV 0x564E5955 // Exactly same as UYVY
#define IMGFMT_cyuv 0x76757963 // upside-down UYVY
#define IMGFMT_Y422 0x32323459 // Exactly same as UYVY
#define IMGFMT_YUY2 0x32595559
#define IMGFMT_YUNV 0x564E5559 // Exactly same as YUY2
#define IMGFMT_YVYU 0x55595659
#define IMGFMT_Y41P 0x50313459
#define IMGFMT_Y211 0x31313259
#define IMGFMT_Y41T 0x54313459 // Y41P, Y lsb = transparency
#define IMGFMT_Y42T 0x54323459 // UYVY, Y lsb = transparency
#define IMGFMT_V422 0x32323456 // upside-down UYVY?
#define IMGFMT_V655 0x35353656
#define IMGFMT_CLJR 0x524A4C43
#define IMGFMT_YUVP 0x50565559 // 10-bit YUYV
#define IMGFMT_UYVP 0x50565955 // 10-bit UYVY
 
/* Compressed Formats */
#define IMGFMT_MPEGPES (('M'<<24)|('P'<<16)|('E'<<8)|('S'))
#define IMGFMT_MJPEG (('M')|('J'<<8)|('P'<<16)|('G'<<24))
/* Formats that are understood by zoran chips, we include
* non-interlaced, interlaced top-first, interlaced bottom-first */
#define IMGFMT_ZRMJPEGNI (('Z'<<24)|('R'<<16)|('N'<<8)|('I'))
#define IMGFMT_ZRMJPEGIT (('Z'<<24)|('R'<<16)|('I'<<8)|('T'))
#define IMGFMT_ZRMJPEGIB (('Z'<<24)|('R'<<16)|('I'<<8)|('B'))
 
// I think that this code could not be used by any other codec/format
#define IMGFMT_XVMC 0x1DC70000
#define IMGFMT_XVMC_MASK 0xFFFF0000
#define IMGFMT_IS_XVMC(fmt) (((fmt)&IMGFMT_XVMC_MASK)==IMGFMT_XVMC)
//these are chroma420
#define IMGFMT_XVMC_MOCO_MPEG2 (IMGFMT_XVMC|0x02)
#define IMGFMT_XVMC_IDCT_MPEG2 (IMGFMT_XVMC|0x82)
 
// VDPAU specific format.
#define IMGFMT_VDPAU 0x1DC80000
#define IMGFMT_VDPAU_MASK 0xFFFF0000
#define IMGFMT_IS_VDPAU(fmt) (((fmt)&IMGFMT_VDPAU_MASK)==IMGFMT_VDPAU)
#define IMGFMT_VDPAU_MPEG1 (IMGFMT_VDPAU|0x01)
#define IMGFMT_VDPAU_MPEG2 (IMGFMT_VDPAU|0x02)
#define IMGFMT_VDPAU_H264 (IMGFMT_VDPAU|0x03)
#define IMGFMT_VDPAU_WMV3 (IMGFMT_VDPAU|0x04)
#define IMGFMT_VDPAU_VC1 (IMGFMT_VDPAU|0x05)
#define IMGFMT_VDPAU_MPEG4 (IMGFMT_VDPAU|0x06)
 
#define IMGFMT_IS_HWACCEL(fmt) (IMGFMT_IS_VDPAU(fmt) || IMGFMT_IS_XVMC(fmt))
 
typedef struct {
void* data;
int size;
int id; // stream id. usually 0x1E0
int timestamp; // pts, 90000 Hz counter based
} vo_mpegpes_t;
 
const char *ff_vo_format_name(int format);
 
/**
* Calculates the scale shifts for the chroma planes for planar YUV
*
* \param component_bits bits per component
* \return bits-per-pixel for format if successful (i.e. format is 3 or 4-planes planar YUV), 0 otherwise
*/
int ff_mp_get_chroma_shift(int format, int *x_shift, int *y_shift, int *component_bits);
 
#endif /* MPLAYER_IMG_FORMAT_H */
/contrib/sdk/sources/ffmpeg/libavfilter/libmpcodecs/libvo/fastmemcpy.h
0,0 → 1,99
/*
* This file is part of MPlayer.
*
* MPlayer is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* MPlayer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with MPlayer; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef MPLAYER_FASTMEMCPY_H
#define MPLAYER_FASTMEMCPY_H
 
#include <inttypes.h>
#include <string.h>
#include <stddef.h>
 
void * fast_memcpy(void * to, const void * from, size_t len);
void * mem2agpcpy(void * to, const void * from, size_t len);
 
#if ! defined(CONFIG_FASTMEMCPY) || ! (HAVE_MMX || HAVE_MMX2 || HAVE_AMD3DNOW /* || HAVE_SSE || HAVE_SSE2 */)
#define mem2agpcpy(a,b,c) memcpy(a,b,c)
#define fast_memcpy(a,b,c) memcpy(a,b,c)
#endif
 
static inline void * mem2agpcpy_pic(void * dst, const void * src, int bytesPerLine, int height, int dstStride, int srcStride)
{
int i;
void *retval=dst;
 
if(dstStride == srcStride)
{
if (srcStride < 0) {
src = (const uint8_t*)src + (height-1)*srcStride;
dst = (uint8_t*)dst + (height-1)*dstStride;
srcStride = -srcStride;
}
 
mem2agpcpy(dst, src, srcStride*height);
}
else
{
for(i=0; i<height; i++)
{
mem2agpcpy(dst, src, bytesPerLine);
src = (const uint8_t*)src + srcStride;
dst = (uint8_t*)dst + dstStride;
}
}
 
return retval;
}
 
#define memcpy_pic(d, s, b, h, ds, ss) memcpy_pic2(d, s, b, h, ds, ss, 0)
#define my_memcpy_pic(d, s, b, h, ds, ss) memcpy_pic2(d, s, b, h, ds, ss, 1)
 
/**
* \param limit2width always skip data between end of line and start of next
* instead of copying the full block when strides are the same
*/
static inline void * memcpy_pic2(void * dst, const void * src,
int bytesPerLine, int height,
int dstStride, int srcStride, int limit2width)
{
int i;
void *retval=dst;
 
if(!limit2width && dstStride == srcStride)
{
if (srcStride < 0) {
src = (const uint8_t*)src + (height-1)*srcStride;
dst = (uint8_t*)dst + (height-1)*dstStride;
srcStride = -srcStride;
}
 
fast_memcpy(dst, src, srcStride*height);
}
else
{
for(i=0; i<height; i++)
{
fast_memcpy(dst, src, bytesPerLine);
src = (const uint8_t*)src + srcStride;
dst = (uint8_t*)dst + dstStride;
}
}
 
return retval;
}
 
#endif /* MPLAYER_FASTMEMCPY_H */
/contrib/sdk/sources/ffmpeg/libavfilter/libmpcodecs/libvo/video_out.h
0,0 → 1,281
/*
* Copyright (C) Aaron Holtzman - Aug 1999
* Strongly modified, most parts rewritten: A'rpi/ESP-team - 2000-2001
* (C) MPlayer developers
*
* This file is part of MPlayer.
*
* MPlayer is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* MPlayer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with MPlayer; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
#ifndef MPLAYER_VIDEO_OUT_H
#define MPLAYER_VIDEO_OUT_H
 
#include <inttypes.h>
#include <stdarg.h>
 
//#include "sub/font_load.h"
#include "../img_format.h"
//#include "vidix/vidix.h"
 
#define VO_EVENT_EXPOSE 1
#define VO_EVENT_RESIZE 2
#define VO_EVENT_KEYPRESS 4
#define VO_EVENT_REINIT 8
#define VO_EVENT_MOVE 16
 
/* Obsolete: VOCTRL_QUERY_VAA 1 */
/* does the device support the required format */
#define VOCTRL_QUERY_FORMAT 2
/* signal a device reset seek */
#define VOCTRL_RESET 3
/* true if vo driver can use GUI created windows */
#define VOCTRL_GUISUPPORT 4
#define VOCTRL_GUI_NOWINDOW 19
/* used to switch to fullscreen */
#define VOCTRL_FULLSCREEN 5
/* signal a device pause */
#define VOCTRL_PAUSE 7
/* start/resume playback */
#define VOCTRL_RESUME 8
/* libmpcodecs direct rendering: */
#define VOCTRL_GET_IMAGE 9
#define VOCTRL_DRAW_IMAGE 13
#define VOCTRL_SET_SPU_PALETTE 14
/* decoding ahead: */
#define VOCTRL_GET_NUM_FRAMES 10
#define VOCTRL_GET_FRAME_NUM 11
#define VOCTRL_SET_FRAME_NUM 12
#define VOCTRL_GET_PANSCAN 15
#define VOCTRL_SET_PANSCAN 16
/* equalizer controls */
#define VOCTRL_SET_EQUALIZER 17
#define VOCTRL_GET_EQUALIZER 18
//#define VOCTRL_GUI_NOWINDOW 19
/* Frame duplication */
#define VOCTRL_DUPLICATE_FRAME 20
// ... 21
#define VOCTRL_START_SLICE 21
 
#define VOCTRL_ONTOP 25
#define VOCTRL_ROOTWIN 26
#define VOCTRL_BORDER 27
#define VOCTRL_DRAW_EOSD 28
#define VOCTRL_GET_EOSD_RES 29
 
#define VOCTRL_SET_DEINTERLACE 30
#define VOCTRL_GET_DEINTERLACE 31
 
#define VOCTRL_UPDATE_SCREENINFO 32
 
// Vo can be used by xover
#define VOCTRL_XOVERLAY_SUPPORT 22
 
#define VOCTRL_XOVERLAY_SET_COLORKEY 24
typedef struct {
uint32_t x11; // The raw x11 color
uint16_t r,g,b;
} mp_colorkey_t;
 
#define VOCTRL_XOVERLAY_SET_WIN 23
typedef struct {
int x,y;
int w,h;
} mp_win_t;
 
#define VO_TRUE 1
#define VO_FALSE 0
#define VO_ERROR -1
#define VO_NOTAVAIL -2
#define VO_NOTIMPL -3
 
#define VOFLAG_FULLSCREEN 0x01
#define VOFLAG_MODESWITCHING 0x02
#define VOFLAG_SWSCALE 0x04
#define VOFLAG_FLIPPING 0x08
#define VOFLAG_HIDDEN 0x10 //< Use to create a hidden window
#define VOFLAG_STEREO 0x20 //< Use to create a stereo-capable window
#define VOFLAG_XOVERLAY_SUB_VO 0x10000
 
typedef struct vo_info_s
{
/* driver name ("Matrox Millennium G200/G400" */
const char *name;
/* short name (for config strings) ("mga") */
const char *short_name;
/* author ("Aaron Holtzman <aholtzma@ess.engr.uvic.ca>") */
const char *author;
/* any additional comments */
const char *comment;
} vo_info_t;
 
typedef struct vo_functions_s
{
const vo_info_t *info;
/*
* Preinitializes driver (real INITIALIZATION)
* arg - currently it's vo_subdevice
* returns: zero on successful initialization, non-zero on error.
*/
int (*preinit)(const char *arg);
/*
* Initialize (means CONFIGURE) the display driver.
* params:
* width,height: image source size
* d_width,d_height: size of the requested window size, just a hint
* fullscreen: flag, 0=windowd 1=fullscreen, just a hint
* title: window title, if available
* format: fourcc of pixel format
* returns : zero on successful initialization, non-zero on error.
*/
int (*config)(uint32_t width, uint32_t height, uint32_t d_width,
uint32_t d_height, uint32_t fullscreen, char *title,
uint32_t format);
 
/*
* Control interface
*/
int (*control)(uint32_t request, void *data, ...);
 
/*
* Display a new RGB/BGR frame of the video to the screen.
* params:
* src[0] - pointer to the image
*/
int (*draw_frame)(uint8_t *src[]);
 
/*
* Draw a planar YUV slice to the buffer:
* params:
* src[3] = source image planes (Y,U,V)
* stride[3] = source image planes line widths (in bytes)
* w,h = width*height of area to be copied (in Y pixels)
* x,y = position at the destination image (in Y pixels)
*/
int (*draw_slice)(uint8_t *src[], int stride[], int w,int h, int x,int y);
 
/*
* Draws OSD to the screen buffer
*/
void (*draw_osd)(void);
 
/*
* Blit/Flip buffer to the screen. Must be called after each frame!
*/
void (*flip_page)(void);
 
/*
* This func is called after every frames to handle keyboard and
* other events. It's called in PAUSE mode too!
*/
void (*check_events)(void);
 
/*
* Closes driver. Should restore the original state of the system.
*/
void (*uninit)(void);
} vo_functions_t;
 
const vo_functions_t* init_best_video_out(char** vo_list);
int config_video_out(const vo_functions_t *vo, uint32_t width, uint32_t height,
uint32_t d_width, uint32_t d_height, uint32_t flags,
char *title, uint32_t format);
void list_video_out(void);
 
// NULL terminated array of all drivers
extern const vo_functions_t* const video_out_drivers[];
 
extern int vo_flags;
 
extern int vo_config_count;
 
extern int xinerama_screen;
extern int xinerama_x;
extern int xinerama_y;
 
// correct resolution/bpp on screen: (should be autodetected by vo_init())
extern int vo_depthonscreen;
extern int vo_screenwidth;
extern int vo_screenheight;
 
// requested resolution/bpp: (-x -y -bpp options)
extern int vo_dx;
extern int vo_dy;
extern int vo_dwidth;
extern int vo_dheight;
extern int vo_dbpp;
 
extern int vo_grabpointer;
extern int vo_doublebuffering;
extern int vo_directrendering;
extern int vo_vsync;
extern int vo_fsmode;
extern float vo_panscan;
extern int vo_adapter_num;
extern int vo_refresh_rate;
extern int vo_keepaspect;
extern int vo_rootwin;
extern int vo_ontop;
extern int vo_border;
 
extern int vo_gamma_gamma;
extern int vo_gamma_brightness;
extern int vo_gamma_saturation;
extern int vo_gamma_contrast;
extern int vo_gamma_hue;
extern int vo_gamma_red_intensity;
extern int vo_gamma_green_intensity;
extern int vo_gamma_blue_intensity;
 
extern int vo_nomouse_input;
extern int enable_mouse_movements;
 
extern int vo_pts;
extern float vo_fps;
 
extern char *vo_subdevice;
 
extern int vo_colorkey;
 
extern char *vo_winname;
extern char *vo_wintitle;
 
extern int64_t WinID;
 
typedef struct {
float min;
float max;
} range_t;
 
float range_max(range_t *r);
int in_range(range_t *r, float f);
range_t *str2range(char *s);
extern char *monitor_hfreq_str;
extern char *monitor_vfreq_str;
extern char *monitor_dotclock_str;
 
struct mp_keymap {
int from;
int to;
};
int lookup_keymap_table(const struct mp_keymap *map, int key);
struct vo_rect {
int left, right, top, bottom, width, height;
};
void calc_src_dst_rects(int src_width, int src_height, struct vo_rect *src, struct vo_rect *dst,
struct vo_rect *borders, const struct vo_rect *crop);
void vo_mouse_movement(int posx, int posy);
 
#endif /* MPLAYER_VIDEO_OUT_H */
/contrib/sdk/sources/ffmpeg/libavfilter/libmpcodecs/mp_image.c
0,0 → 1,253
/*
* This file is part of MPlayer.
*
* MPlayer is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* MPlayer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with MPlayer; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
#include "config.h"
 
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
 
#if HAVE_MALLOC_H
#include <malloc.h>
#endif
 
#include "img_format.h"
#include "mp_image.h"
 
#include "libvo/fastmemcpy.h"
//#include "libavutil/mem.h"
#include "libavutil/imgutils.h"
 
void ff_mp_image_alloc_planes(mp_image_t *mpi) {
uint32_t temp[256];
if (avpriv_set_systematic_pal2(temp, ff_mp2ff_pix_fmt(mpi->imgfmt)) >= 0)
mpi->flags |= MP_IMGFLAG_RGB_PALETTE;
 
// IF09 - allocate space for 4. plane delta info - unused
if (mpi->imgfmt == IMGFMT_IF09) {
mpi->planes[0]=av_malloc(mpi->bpp*mpi->width*(mpi->height+2)/8+
mpi->chroma_width*mpi->chroma_height);
} else
mpi->planes[0]=av_malloc(mpi->bpp*mpi->width*(mpi->height+2)/8);
if (mpi->flags&MP_IMGFLAG_PLANAR) {
int bpp = IMGFMT_IS_YUVP16(mpi->imgfmt)? 2 : 1;
// YV12/I420/YVU9/IF09. feel free to add other planar formats here...
mpi->stride[0]=mpi->stride[3]=bpp*mpi->width;
if(mpi->num_planes > 2){
mpi->stride[1]=mpi->stride[2]=bpp*mpi->chroma_width;
if(mpi->flags&MP_IMGFLAG_SWAPPED){
// I420/IYUV (Y,U,V)
mpi->planes[1]=mpi->planes[0]+mpi->stride[0]*mpi->height;
mpi->planes[2]=mpi->planes[1]+mpi->stride[1]*mpi->chroma_height;
if (mpi->num_planes > 3)
mpi->planes[3]=mpi->planes[2]+mpi->stride[2]*mpi->chroma_height;
} else {
// YV12,YVU9,IF09 (Y,V,U)
mpi->planes[2]=mpi->planes[0]+mpi->stride[0]*mpi->height;
mpi->planes[1]=mpi->planes[2]+mpi->stride[1]*mpi->chroma_height;
if (mpi->num_planes > 3)
mpi->planes[3]=mpi->planes[1]+mpi->stride[1]*mpi->chroma_height;
}
} else {
// NV12/NV21
mpi->stride[1]=mpi->chroma_width;
mpi->planes[1]=mpi->planes[0]+mpi->stride[0]*mpi->height;
}
} else {
mpi->stride[0]=mpi->width*mpi->bpp/8;
if (mpi->flags & MP_IMGFLAG_RGB_PALETTE) {
mpi->planes[1] = av_malloc(1024);
memcpy(mpi->planes[1], temp, 1024);
}
}
mpi->flags|=MP_IMGFLAG_ALLOCATED;
}
 
mp_image_t* ff_alloc_mpi(int w, int h, unsigned long int fmt) {
mp_image_t* mpi = ff_new_mp_image(w,h);
 
ff_mp_image_setfmt(mpi,fmt);
ff_mp_image_alloc_planes(mpi);
 
return mpi;
}
 
void ff_copy_mpi(mp_image_t *dmpi, mp_image_t *mpi) {
if(mpi->flags&MP_IMGFLAG_PLANAR){
memcpy_pic(dmpi->planes[0],mpi->planes[0], mpi->w, mpi->h,
dmpi->stride[0],mpi->stride[0]);
memcpy_pic(dmpi->planes[1],mpi->planes[1], mpi->chroma_width, mpi->chroma_height,
dmpi->stride[1],mpi->stride[1]);
memcpy_pic(dmpi->planes[2], mpi->planes[2], mpi->chroma_width, mpi->chroma_height,
dmpi->stride[2],mpi->stride[2]);
} else {
memcpy_pic(dmpi->planes[0],mpi->planes[0],
mpi->w*(dmpi->bpp/8), mpi->h,
dmpi->stride[0],mpi->stride[0]);
}
}
 
void ff_mp_image_setfmt(mp_image_t* mpi,unsigned int out_fmt){
mpi->flags&=~(MP_IMGFLAG_PLANAR|MP_IMGFLAG_YUV|MP_IMGFLAG_SWAPPED);
mpi->imgfmt=out_fmt;
// compressed formats
if(out_fmt == IMGFMT_MPEGPES ||
out_fmt == IMGFMT_ZRMJPEGNI || out_fmt == IMGFMT_ZRMJPEGIT || out_fmt == IMGFMT_ZRMJPEGIB ||
IMGFMT_IS_HWACCEL(out_fmt)){
mpi->bpp=0;
return;
}
mpi->num_planes=1;
if (IMGFMT_IS_RGB(out_fmt)) {
if (IMGFMT_RGB_DEPTH(out_fmt) < 8 && !(out_fmt&128))
mpi->bpp = IMGFMT_RGB_DEPTH(out_fmt);
else
mpi->bpp=(IMGFMT_RGB_DEPTH(out_fmt)+7)&(~7);
return;
}
if (IMGFMT_IS_BGR(out_fmt)) {
if (IMGFMT_BGR_DEPTH(out_fmt) < 8 && !(out_fmt&128))
mpi->bpp = IMGFMT_BGR_DEPTH(out_fmt);
else
mpi->bpp=(IMGFMT_BGR_DEPTH(out_fmt)+7)&(~7);
mpi->flags|=MP_IMGFLAG_SWAPPED;
return;
}
mpi->num_planes=3;
if (out_fmt == IMGFMT_GBR24P) {
mpi->bpp=24;
mpi->flags|=MP_IMGFLAG_PLANAR;
return;
} else if (out_fmt == IMGFMT_GBR12P) {
mpi->bpp=36;
mpi->flags|=MP_IMGFLAG_PLANAR;
return;
} else if (out_fmt == IMGFMT_GBR14P) {
mpi->bpp=42;
mpi->flags|=MP_IMGFLAG_PLANAR;
return;
}
mpi->flags|=MP_IMGFLAG_YUV;
if (ff_mp_get_chroma_shift(out_fmt, NULL, NULL, NULL)) {
mpi->flags|=MP_IMGFLAG_PLANAR;
mpi->bpp = ff_mp_get_chroma_shift(out_fmt, &mpi->chroma_x_shift, &mpi->chroma_y_shift, NULL);
mpi->chroma_width = mpi->width >> mpi->chroma_x_shift;
mpi->chroma_height = mpi->height >> mpi->chroma_y_shift;
}
switch(out_fmt){
case IMGFMT_I420:
case IMGFMT_IYUV:
mpi->flags|=MP_IMGFLAG_SWAPPED;
case IMGFMT_YV12:
return;
case IMGFMT_420A:
case IMGFMT_422A:
case IMGFMT_444A:
case IMGFMT_IF09:
mpi->num_planes=4;
case IMGFMT_YVU9:
case IMGFMT_444P:
case IMGFMT_422P:
case IMGFMT_411P:
case IMGFMT_440P:
case IMGFMT_444P16_LE:
case IMGFMT_444P16_BE:
case IMGFMT_444P14_LE:
case IMGFMT_444P14_BE:
case IMGFMT_444P12_LE:
case IMGFMT_444P12_BE:
case IMGFMT_444P10_LE:
case IMGFMT_444P10_BE:
case IMGFMT_444P9_LE:
case IMGFMT_444P9_BE:
case IMGFMT_422P16_LE:
case IMGFMT_422P16_BE:
case IMGFMT_422P14_LE:
case IMGFMT_422P14_BE:
case IMGFMT_422P12_LE:
case IMGFMT_422P12_BE:
case IMGFMT_422P10_LE:
case IMGFMT_422P10_BE:
case IMGFMT_422P9_LE:
case IMGFMT_422P9_BE:
case IMGFMT_420P16_LE:
case IMGFMT_420P16_BE:
case IMGFMT_420P14_LE:
case IMGFMT_420P14_BE:
case IMGFMT_420P12_LE:
case IMGFMT_420P12_BE:
case IMGFMT_420P10_LE:
case IMGFMT_420P10_BE:
case IMGFMT_420P9_LE:
case IMGFMT_420P9_BE:
return;
case IMGFMT_Y16_LE:
case IMGFMT_Y16_BE:
mpi->bpp=16;
case IMGFMT_Y800:
case IMGFMT_Y8:
/* they're planar ones, but for easier handling use them as packed */
mpi->flags&=~MP_IMGFLAG_PLANAR;
mpi->num_planes=1;
return;
case IMGFMT_Y8A:
mpi->num_planes=2;
return;
case IMGFMT_UYVY:
mpi->flags|=MP_IMGFLAG_SWAPPED;
case IMGFMT_YUY2:
mpi->chroma_x_shift = 1;
mpi->bpp=16;
mpi->num_planes=1;
return;
case IMGFMT_NV12:
mpi->flags|=MP_IMGFLAG_SWAPPED;
case IMGFMT_NV21:
mpi->flags|=MP_IMGFLAG_PLANAR;
mpi->bpp=12;
mpi->num_planes=2;
mpi->chroma_width=(mpi->width>>0);
mpi->chroma_height=(mpi->height>>1);
mpi->chroma_x_shift=0;
mpi->chroma_y_shift=1;
return;
}
ff_mp_msg(MSGT_DECVIDEO,MSGL_WARN,"mp_image: unknown out_fmt: 0x%X\n",out_fmt);
mpi->bpp=0;
}
 
mp_image_t* ff_new_mp_image(int w,int h){
mp_image_t* mpi = malloc(sizeof(mp_image_t));
if(!mpi) return NULL; // error!
memset(mpi,0,sizeof(mp_image_t));
mpi->width=mpi->w=w;
mpi->height=mpi->h=h;
return mpi;
}
 
void ff_free_mp_image(mp_image_t* mpi){
if(!mpi) return;
if(mpi->flags&MP_IMGFLAG_ALLOCATED){
/* becouse we allocate the whole image in once */
av_free(mpi->planes[0]);
if (mpi->flags & MP_IMGFLAG_RGB_PALETTE)
av_free(mpi->planes[1]);
}
free(mpi);
}
 
/contrib/sdk/sources/ffmpeg/libavfilter/libmpcodecs/mp_image.h
0,0 → 1,159
/*
* This file is part of MPlayer.
*
* MPlayer is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* MPlayer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with MPlayer; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
#ifndef MPLAYER_MP_IMAGE_H
#define MPLAYER_MP_IMAGE_H
 
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#undef printf //FIXME
#undef fprintf //FIXME
#include "mp_msg.h"
#include "libavutil/avutil.h"
#include "libavutil/avassert.h"
#undef realloc
#undef malloc
#undef free
#undef rand
#undef srand
#undef printf
#undef strncpy
#define ASMALIGN(ZEROBITS) ".p2align " #ZEROBITS "\n\t"
#define CODEC_FLAG2_MEMC_ONLY 0x00001000 ///< Only do ME/MC (I frames -> ref, P frame -> ME+MC).
 
enum AVPixelFormat ff_mp2ff_pix_fmt(int mp);
 
//--------- codec's requirements (filled by the codec/vf) ---------
 
//--- buffer content restrictions:
// set if buffer content shouldn't be modified:
#define MP_IMGFLAG_PRESERVE 0x01
// set if buffer content will be READ.
// This can be e.g. for next frame's MC: (I/P mpeg frames) -
// then in combination with MP_IMGFLAG_PRESERVE - or it
// can be because a video filter or codec will read a significant
// amount of data while processing that frame (e.g. blending something
// onto the frame, MV based intra prediction).
// A frame marked like this should not be placed in to uncachable
// video RAM for example.
#define MP_IMGFLAG_READABLE 0x02
 
//--- buffer width/stride/plane restrictions: (used for direct rendering)
// stride _have_to_ be aligned to MB boundary: [for DR restrictions]
#define MP_IMGFLAG_ACCEPT_ALIGNED_STRIDE 0x4
// stride should be aligned to MB boundary: [for buffer allocation]
#define MP_IMGFLAG_PREFER_ALIGNED_STRIDE 0x8
// codec accept any stride (>=width):
#define MP_IMGFLAG_ACCEPT_STRIDE 0x10
// codec accept any width (width*bpp=stride -> stride%bpp==0) (>=width):
#define MP_IMGFLAG_ACCEPT_WIDTH 0x20
//--- for planar formats only:
// uses only stride[0], and stride[1]=stride[2]=stride[0]>>mpi->chroma_x_shift
#define MP_IMGFLAG_COMMON_STRIDE 0x40
// uses only planes[0], and calculates planes[1,2] from width,height,imgfmt
#define MP_IMGFLAG_COMMON_PLANE 0x80
 
#define MP_IMGFLAGMASK_RESTRICTIONS 0xFF
 
//--------- color info (filled by ff_mp_image_setfmt() ) -----------
// set if number of planes > 1
#define MP_IMGFLAG_PLANAR 0x100
// set if it's YUV colorspace
#define MP_IMGFLAG_YUV 0x200
// set if it's swapped (BGR or YVU) plane/byteorder
#define MP_IMGFLAG_SWAPPED 0x400
// set if you want memory for palette allocated and managed by ff_vf_get_image etc.
#define MP_IMGFLAG_RGB_PALETTE 0x800
 
#define MP_IMGFLAGMASK_COLORS 0xF00
 
// codec uses drawing/rendering callbacks (draw_slice()-like thing, DR method 2)
// [the codec will set this flag if it supports callbacks, and the vo _may_
// clear it in get_image() if draw_slice() not implemented]
#define MP_IMGFLAG_DRAW_CALLBACK 0x1000
// set if it's in video buffer/memory: [set by vo/vf's get_image() !!!]
#define MP_IMGFLAG_DIRECT 0x2000
// set if buffer is allocated (used in destination images):
#define MP_IMGFLAG_ALLOCATED 0x4000
 
// buffer type was printed (do NOT set this flag - it's for INTERNAL USE!!!)
#define MP_IMGFLAG_TYPE_DISPLAYED 0x8000
 
// codec doesn't support any form of direct rendering - it has own buffer
// allocation. so we just export its buffer pointers:
#define MP_IMGTYPE_EXPORT 0
// codec requires a static WO buffer, but it does only partial updates later:
#define MP_IMGTYPE_STATIC 1
// codec just needs some WO memory, where it writes/copies the whole frame to:
#define MP_IMGTYPE_TEMP 2
// I+P type, requires 2+ independent static R/W buffers
#define MP_IMGTYPE_IP 3
// I+P+B type, requires 2+ independent static R/W and 1+ temp WO buffers
#define MP_IMGTYPE_IPB 4
// Upper 16 bits give desired buffer number, -1 means get next available
#define MP_IMGTYPE_NUMBERED 5
// Doesn't need any buffer, incomplete image (probably a first field only)
// we need this type to be able to differentiate between half frames and
// all other cases
#define MP_IMGTYPE_INCOMPLETE 6
 
#define MP_MAX_PLANES 4
 
#define MP_IMGFIELD_ORDERED 0x01
#define MP_IMGFIELD_TOP_FIRST 0x02
#define MP_IMGFIELD_REPEAT_FIRST 0x04
#define MP_IMGFIELD_TOP 0x08
#define MP_IMGFIELD_BOTTOM 0x10
#define MP_IMGFIELD_INTERLACED 0x20
 
typedef struct mp_image {
unsigned int flags;
unsigned char type;
int number;
unsigned char bpp; // bits/pixel. NOT depth! for RGB it will be n*8
unsigned int imgfmt;
int width,height; // stored dimensions
int x,y,w,h; // visible dimensions
unsigned char* planes[MP_MAX_PLANES];
int stride[MP_MAX_PLANES];
char * qscale;
int qstride;
int pict_type; // 0->unknown, 1->I, 2->P, 3->B
int fields;
int qscale_type; // 0->mpeg1/4/h263, 1->mpeg2
int num_planes;
/* these are only used by planar formats Y,U(Cb),V(Cr) */
int chroma_width;
int chroma_height;
int chroma_x_shift; // horizontal
int chroma_y_shift; // vertical
int usage_count;
/* for private use by filter or vo driver (to store buffer id or dmpi) */
void* priv;
} mp_image_t;
 
void ff_mp_image_setfmt(mp_image_t* mpi,unsigned int out_fmt);
mp_image_t* ff_new_mp_image(int w,int h);
void ff_free_mp_image(mp_image_t* mpi);
 
mp_image_t* ff_alloc_mpi(int w, int h, unsigned long int fmt);
void ff_mp_image_alloc_planes(mp_image_t *mpi);
void ff_copy_mpi(mp_image_t *dmpi, mp_image_t *mpi);
 
#endif /* MPLAYER_MP_IMAGE_H */
/contrib/sdk/sources/ffmpeg/libavfilter/libmpcodecs/mp_msg.h
0,0 → 1,166
/*
* This file is part of MPlayer.
*
* MPlayer is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* MPlayer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with MPlayer; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
#ifndef MPLAYER_MP_MSG_H
#define MPLAYER_MP_MSG_H
 
#include <stdarg.h>
 
// defined in mplayer.c and mencoder.c
extern int verbose;
 
// verbosity elevel:
 
/* Only messages level MSGL_FATAL-MSGL_STATUS should be translated,
* messages level MSGL_V and above should not be translated. */
 
#define MSGL_FATAL 0 // will exit/abort
#define MSGL_ERR 1 // continues
#define MSGL_WARN 2 // only warning
#define MSGL_HINT 3 // short help message
#define MSGL_INFO 4 // -quiet
#define MSGL_STATUS 5 // v=0
#define MSGL_V 6 // v=1
#define MSGL_DBG2 7 // v=2
#define MSGL_DBG3 8 // v=3
#define MSGL_DBG4 9 // v=4
#define MSGL_DBG5 10 // v=5
 
#define MSGL_FIXME 1 // for conversions from printf where the appropriate MSGL is not known; set equal to ERR for obtrusiveness
#define MSGT_FIXME 0 // for conversions from printf where the appropriate MSGT is not known; set equal to GLOBAL for obtrusiveness
 
// code/module:
 
#define MSGT_GLOBAL 0 // common player stuff errors
#define MSGT_CPLAYER 1 // console player (mplayer.c)
#define MSGT_GPLAYER 2 // gui player
 
#define MSGT_VO 3 // libvo
#define MSGT_AO 4 // libao
 
#define MSGT_DEMUXER 5 // demuxer.c (general stuff)
#define MSGT_DS 6 // demux stream (add/read packet etc)
#define MSGT_DEMUX 7 // fileformat-specific stuff (demux_*.c)
#define MSGT_HEADER 8 // fileformat-specific header (*header.c)
 
#define MSGT_AVSYNC 9 // mplayer.c timer stuff
#define MSGT_AUTOQ 10 // mplayer.c auto-quality stuff
 
#define MSGT_CFGPARSER 11 // cfgparser.c
 
#define MSGT_DECAUDIO 12 // av decoder
#define MSGT_DECVIDEO 13
 
#define MSGT_SEEK 14 // seeking code
#define MSGT_WIN32 15 // win32 dll stuff
#define MSGT_OPEN 16 // open.c (stream opening)
#define MSGT_DVD 17 // open.c (DVD init/read/seek)
 
#define MSGT_PARSEES 18 // parse_es.c (mpeg stream parser)
#define MSGT_LIRC 19 // lirc_mp.c and input lirc driver
 
#define MSGT_STREAM 20 // stream.c
#define MSGT_CACHE 21 // cache2.c
 
#define MSGT_MENCODER 22
 
#define MSGT_XACODEC 23 // XAnim codecs
 
#define MSGT_TV 24 // TV input subsystem
 
#define MSGT_OSDEP 25 // OS-dependent parts
 
#define MSGT_SPUDEC 26 // spudec.c
 
#define MSGT_PLAYTREE 27 // Playtree handeling (playtree.c, playtreeparser.c)
 
#define MSGT_INPUT 28
 
#define MSGT_VFILTER 29
 
#define MSGT_OSD 30
 
#define MSGT_NETWORK 31
 
#define MSGT_CPUDETECT 32
 
#define MSGT_CODECCFG 33
 
#define MSGT_SWS 34
 
#define MSGT_VOBSUB 35
#define MSGT_SUBREADER 36
 
#define MSGT_AFILTER 37 // Audio filter messages
 
#define MSGT_NETST 38 // Netstream
 
#define MSGT_MUXER 39 // muxer layer
 
#define MSGT_OSD_MENU 40
 
#define MSGT_IDENTIFY 41 // -identify output
 
#define MSGT_RADIO 42
 
#define MSGT_ASS 43 // libass messages
 
#define MSGT_LOADER 44 // dll loader messages
 
#define MSGT_STATUSLINE 45 // playback/encoding status line
 
#define MSGT_TELETEXT 46 // Teletext decoder
 
#define MSGT_MAX 64
 
 
extern char *ff_mp_msg_charset;
extern int ff_mp_msg_color;
extern int ff_mp_msg_module;
 
extern int ff_mp_msg_levels[MSGT_MAX];
extern int ff_mp_msg_level_all;
 
 
void ff_mp_msg_init(void);
int ff_mp_msg_test(int mod, int lev);
 
#include "config.h"
 
void ff_mp_msg_va(int mod, int lev, const char *format, va_list va);
#ifdef __GNUC__
void ff_mp_msg(int mod, int lev, const char *format, ... ) __attribute__ ((format (printf, 3, 4)));
# ifdef MP_DEBUG
# define mp_dbg(mod,lev, args... ) ff_mp_msg(mod, lev, ## args )
# else
// only useful for developers, disable but check syntax
# define mp_dbg(mod,lev, args... ) do { if (0) ff_mp_msg(mod, lev, ## args ); } while (0)
# endif
#else // not GNU C
void ff_mp_msg(int mod, int lev, const char *format, ... );
# ifdef MP_DEBUG
# define mp_dbg(mod,lev, ... ) ff_mp_msg(mod, lev, __VA_ARGS__)
# else
// only useful for developers, disable but check syntax
# define mp_dbg(mod,lev, ... ) do { if (0) ff_mp_msg(mod, lev, __VA_ARGS__); } while (0)
# endif
#endif /* __GNUC__ */
 
const char* ff_filename_recode(const char* filename);
 
#endif /* MPLAYER_MP_MSG_H */
/contrib/sdk/sources/ffmpeg/libavfilter/libmpcodecs/mpc_info.h
0,0 → 1,43
/*
* This file is part of MPlayer.
*
* MPlayer is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* MPlayer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with MPlayer; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
#ifndef MPLAYER_MPC_INFO_H
#define MPLAYER_MPC_INFO_H
 
typedef struct mp_codec_info_s
{
/* codec long name ("Autodesk FLI/FLC Animation decoder" */
const char *name;
/* short name (same as driver name in codecs.conf) ("dshow") */
const char *short_name;
/* interface author/maintainer */
const char *maintainer;
/* codec author ("Aaron Holtzman <aholtzma@ess.engr.uvic.ca>") */
const char *author;
/* any additional comments */
const char *comment;
} mp_codec_info_t;
 
#define CONTROL_OK 1
#define CONTROL_TRUE 1
#define CONTROL_FALSE 0
#define CONTROL_UNKNOWN -1
#define CONTROL_ERROR -2
#define CONTROL_NA -3
 
#endif /* MPLAYER_MPC_INFO_H */
/contrib/sdk/sources/ffmpeg/libavfilter/libmpcodecs/vf.h
0,0 → 1,169
/*
* This file is part of MPlayer.
*
* MPlayer is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* MPlayer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with MPlayer; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
#ifndef MPLAYER_VF_H
#define MPLAYER_VF_H
 
//#include "m_option.h"
#include "mp_image.h"
 
//extern m_obj_settings_t* vf_settings;
//extern const m_obj_list_t vf_obj_list;
 
struct vf_instance;
struct vf_priv_s;
 
typedef struct vf_info_s {
const char *info;
const char *name;
const char *author;
const char *comment;
int (*vf_open)(struct vf_instance *vf,char* args);
// Ptr to a struct dscribing the options
const void* opts;
} vf_info_t;
 
#define NUM_NUMBERED_MPI 50
 
typedef struct vf_image_context_s {
mp_image_t* static_images[2];
mp_image_t* temp_images[1];
mp_image_t* export_images[1];
mp_image_t* numbered_images[NUM_NUMBERED_MPI];
int static_idx;
} vf_image_context_t;
 
typedef struct vf_format_context_t {
int have_configured;
int orig_width, orig_height, orig_fmt;
} vf_format_context_t;
 
typedef struct vf_instance {
const vf_info_t* info;
// funcs:
int (*config)(struct vf_instance *vf,
int width, int height, int d_width, int d_height,
unsigned int flags, unsigned int outfmt);
int (*control)(struct vf_instance *vf,
int request, void* data);
int (*query_format)(struct vf_instance *vf,
unsigned int fmt);
void (*get_image)(struct vf_instance *vf,
mp_image_t *mpi);
int (*put_image)(struct vf_instance *vf,
mp_image_t *mpi, double pts);
void (*start_slice)(struct vf_instance *vf,
mp_image_t *mpi);
void (*draw_slice)(struct vf_instance *vf,
unsigned char** src, int* stride, int w,int h, int x, int y);
void (*uninit)(struct vf_instance *vf);
 
int (*continue_buffered_image)(struct vf_instance *vf);
// caps:
unsigned int default_caps; // used by default query_format()
unsigned int default_reqs; // used by default config()
// data:
int w, h;
vf_image_context_t imgctx;
vf_format_context_t fmt;
struct vf_instance *next;
mp_image_t *dmpi;
struct vf_priv_s* priv;
} vf_instance_t;
 
// control codes:
#include "mpc_info.h"
 
typedef struct vf_seteq_s
{
const char *item;
int value;
} vf_equalizer_t;
 
#define VFCTRL_QUERY_MAX_PP_LEVEL 4 /* test for postprocessing support (max level) */
#define VFCTRL_SET_PP_LEVEL 5 /* set postprocessing level */
#define VFCTRL_SET_EQUALIZER 6 /* set color options (brightness,contrast etc) */
#define VFCTRL_GET_EQUALIZER 8 /* gset color options (brightness,contrast etc) */
#define VFCTRL_DRAW_OSD 7
#define VFCTRL_CHANGE_RECTANGLE 9 /* Change the rectangle boundaries */
#define VFCTRL_FLIP_PAGE 10 /* Tell the vo to flip pages */
#define VFCTRL_DUPLICATE_FRAME 11 /* For encoding - encode zero-change frame */
#define VFCTRL_SKIP_NEXT_FRAME 12 /* For encoding - drop the next frame that passes thru */
#define VFCTRL_FLUSH_FRAMES 13 /* For encoding - flush delayed frames */
#define VFCTRL_SCREENSHOT 14 /* Make a screenshot */
#define VFCTRL_INIT_EOSD 15 /* Select EOSD renderer */
#define VFCTRL_DRAW_EOSD 16 /* Render EOSD */
#define VFCTRL_GET_PTS 17 /* Return last pts value that reached vf_vo*/
#define VFCTRL_SET_DEINTERLACE 18 /* Set deinterlacing status */
#define VFCTRL_GET_DEINTERLACE 19 /* Get deinterlacing status */
 
#include "vfcap.h"
 
//FIXME this should be in a common header, but i dunno which
#define MP_NOPTS_VALUE (-1LL<<63) //both int64_t and double should be able to represent this exactly
 
 
// functions:
void ff_vf_mpi_clear(mp_image_t* mpi,int x0,int y0,int w,int h);
mp_image_t* ff_vf_get_image(vf_instance_t* vf, unsigned int outfmt, int mp_imgtype, int mp_imgflag, int w, int h);
 
vf_instance_t* vf_open_plugin(const vf_info_t* const* filter_list, vf_instance_t* next, const char *name, char **args);
vf_instance_t* vf_open_filter(vf_instance_t* next, const char *name, char **args);
vf_instance_t* ff_vf_add_before_vo(vf_instance_t **vf, char *name, char **args);
vf_instance_t* vf_open_encoder(vf_instance_t* next, const char *name, char *args);
 
unsigned int ff_vf_match_csp(vf_instance_t** vfp,const unsigned int* list,unsigned int preferred);
void ff_vf_clone_mpi_attributes(mp_image_t* dst, mp_image_t* src);
void ff_vf_queue_frame(vf_instance_t *vf, int (*)(vf_instance_t *));
int ff_vf_output_queued_frame(vf_instance_t *vf);
 
// default wrappers:
int ff_vf_next_config(struct vf_instance *vf,
int width, int height, int d_width, int d_height,
unsigned int flags, unsigned int outfmt);
int ff_vf_next_control(struct vf_instance *vf, int request, void* data);
void ff_vf_extra_flip(struct vf_instance *vf);
int ff_vf_next_query_format(struct vf_instance *vf, unsigned int fmt);
int ff_vf_next_put_image(struct vf_instance *vf,mp_image_t *mpi, double pts);
void ff_vf_next_draw_slice (struct vf_instance *vf, unsigned char** src, int* stride, int w,int h, int x, int y);
 
vf_instance_t* ff_append_filters(vf_instance_t* last);
 
void ff_vf_uninit_filter(vf_instance_t* vf);
void ff_vf_uninit_filter_chain(vf_instance_t* vf);
 
int ff_vf_config_wrapper(struct vf_instance *vf,
int width, int height, int d_width, int d_height,
unsigned int flags, unsigned int outfmt);
 
static inline int norm_qscale(int qscale, int type)
{
switch (type) {
case 0: // MPEG-1
return qscale;
case 1: // MPEG-2
return qscale >> 1;
case 2: // H264
return qscale >> 2;
case 3: // VP56
return (63 - qscale + 2) >> 2;
}
return qscale;
}
 
#endif /* MPLAYER_VF_H */
/contrib/sdk/sources/ffmpeg/libavfilter/libmpcodecs/vf_eq.c
0,0 → 1,240
/*
* This file is part of MPlayer.
*
* MPlayer is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* MPlayer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with MPlayer; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <inttypes.h>
 
#include "config.h"
#include "mp_msg.h"
#include "cpudetect.h"
 
#include "img_format.h"
#include "mp_image.h"
#include "vf.h"
 
#include "libvo/video_out.h"
 
struct vf_priv_s {
unsigned char *buf;
int brightness;
int contrast;
};
 
#if HAVE_MMX
static void process_MMX(unsigned char *dest, int dstride, unsigned char *src, int sstride,
int w, int h, int brightness, int contrast)
{
int i;
int pel;
int dstep = dstride-w;
int sstep = sstride-w;
short brvec[4];
short contvec[4];
 
contrast = ((contrast+100)*256*16)/100;
brightness = ((brightness+100)*511)/200-128 - contrast/32;
 
brvec[0] = brvec[1] = brvec[2] = brvec[3] = brightness;
contvec[0] = contvec[1] = contvec[2] = contvec[3] = contrast;
 
while (h--) {
__asm__ volatile (
"movq (%5), %%mm3 \n\t"
"movq (%6), %%mm4 \n\t"
"pxor %%mm0, %%mm0 \n\t"
"movl %4, %%eax\n\t"
ASMALIGN(4)
"1: \n\t"
"movq (%0), %%mm1 \n\t"
"movq (%0), %%mm2 \n\t"
"punpcklbw %%mm0, %%mm1 \n\t"
"punpckhbw %%mm0, %%mm2 \n\t"
"psllw $4, %%mm1 \n\t"
"psllw $4, %%mm2 \n\t"
"pmulhw %%mm4, %%mm1 \n\t"
"pmulhw %%mm4, %%mm2 \n\t"
"paddw %%mm3, %%mm1 \n\t"
"paddw %%mm3, %%mm2 \n\t"
"packuswb %%mm2, %%mm1 \n\t"
"add $8, %0 \n\t"
"movq %%mm1, (%1) \n\t"
"add $8, %1 \n\t"
"decl %%eax \n\t"
"jnz 1b \n\t"
: "=r" (src), "=r" (dest)
: "0" (src), "1" (dest), "r" (w>>3), "r" (brvec), "r" (contvec)
: "%eax"
);
 
for (i = w&7; i; i--)
{
pel = ((*src++* contrast)>>12) + brightness;
if(pel&768) pel = (-pel)>>31;
*dest++ = pel;
}
 
src += sstep;
dest += dstep;
}
__asm__ volatile ( "emms \n\t" ::: "memory" );
}
#endif
 
static void process_C(unsigned char *dest, int dstride, unsigned char *src, int sstride,
int w, int h, int brightness, int contrast)
{
int i;
int pel;
int dstep = dstride-w;
int sstep = sstride-w;
 
contrast = ((contrast+100)*256*256)/100;
brightness = ((brightness+100)*511)/200-128 - contrast/512;
 
while (h--) {
for (i = w; i; i--)
{
pel = ((*src++* contrast)>>16) + brightness;
if(pel&768) pel = (-pel)>>31;
*dest++ = pel;
}
src += sstep;
dest += dstep;
}
}
 
static void (*process)(unsigned char *dest, int dstride, unsigned char *src, int sstride,
int w, int h, int brightness, int contrast);
 
/* FIXME: add packed yuv version of process */
 
static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts)
{
mp_image_t *dmpi;
 
dmpi=ff_vf_get_image(vf->next, mpi->imgfmt,
MP_IMGTYPE_EXPORT, 0,
mpi->w, mpi->h);
 
dmpi->stride[0] = mpi->stride[0];
dmpi->planes[1] = mpi->planes[1];
dmpi->planes[2] = mpi->planes[2];
dmpi->stride[1] = mpi->stride[1];
dmpi->stride[2] = mpi->stride[2];
 
if (!vf->priv->buf) vf->priv->buf = malloc(mpi->stride[0]*mpi->h);
 
if ((vf->priv->brightness == 0) && (vf->priv->contrast == 0))
dmpi->planes[0] = mpi->planes[0];
else {
dmpi->planes[0] = vf->priv->buf;
process(dmpi->planes[0], dmpi->stride[0],
mpi->planes[0], mpi->stride[0],
mpi->w, mpi->h, vf->priv->brightness,
vf->priv->contrast);
}
 
return ff_vf_next_put_image(vf,dmpi, pts);
}
 
static int control(struct vf_instance *vf, int request, void* data)
{
vf_equalizer_t *eq;
 
switch (request) {
case VFCTRL_SET_EQUALIZER:
eq = data;
if (!strcmp(eq->item,"brightness")) {
vf->priv->brightness = eq->value;
return CONTROL_TRUE;
}
else if (!strcmp(eq->item,"contrast")) {
vf->priv->contrast = eq->value;
return CONTROL_TRUE;
}
break;
case VFCTRL_GET_EQUALIZER:
eq = data;
if (!strcmp(eq->item,"brightness")) {
eq->value = vf->priv->brightness;
return CONTROL_TRUE;
}
else if (!strcmp(eq->item,"contrast")) {
eq->value = vf->priv->contrast;
return CONTROL_TRUE;
}
break;
}
return ff_vf_next_control(vf, request, data);
}
 
static int query_format(struct vf_instance *vf, unsigned int fmt)
{
switch (fmt) {
case IMGFMT_YVU9:
case IMGFMT_IF09:
case IMGFMT_YV12:
case IMGFMT_I420:
case IMGFMT_IYUV:
case IMGFMT_CLPL:
case IMGFMT_Y800:
case IMGFMT_Y8:
case IMGFMT_NV12:
case IMGFMT_NV21:
case IMGFMT_444P:
case IMGFMT_422P:
case IMGFMT_411P:
return ff_vf_next_query_format(vf, fmt);
}
return 0;
}
 
static void uninit(struct vf_instance *vf)
{
free(vf->priv->buf);
free(vf->priv);
}
 
static int vf_open(vf_instance_t *vf, char *args)
{
vf->control=control;
vf->query_format=query_format;
vf->put_image=put_image;
vf->uninit=uninit;
 
vf->priv = malloc(sizeof(struct vf_priv_s));
memset(vf->priv, 0, sizeof(struct vf_priv_s));
if (args) sscanf(args, "%d:%d", &vf->priv->brightness, &vf->priv->contrast);
 
process = process_C;
#if HAVE_MMX
if(ff_gCpuCaps.hasMMX) process = process_MMX;
#endif
 
return 1;
}
 
const vf_info_t ff_vf_info_eq = {
"soft video equalizer",
"eq",
"Richard Felker",
"",
vf_open,
};
/contrib/sdk/sources/ffmpeg/libavfilter/libmpcodecs/vf_eq2.c
0,0 → 1,519
/*
* Software equalizer (brightness, contrast, gamma, saturation)
*
* Hampa Hug <hampa@hampa.ch> (original LUT gamma/contrast/brightness filter)
* Daniel Moreno <comac@comac.darktech.org> (saturation, R/G/B gamma support)
* Richard Felker (original MMX contrast/brightness code (vf_eq.c))
* Michael Niedermayer <michalni@gmx.at> (LUT16)
*
* This file is part of MPlayer.
*
* MPlayer is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* MPlayer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with MPlayer; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <inttypes.h>
 
#include "config.h"
#include "mp_msg.h"
#include "cpudetect.h"
 
#include "img_format.h"
#include "mp_image.h"
#include "vf.h"
 
#define LUT16
 
/* Per channel parameters */
typedef struct eq2_param_t {
unsigned char lut[256];
#ifdef LUT16
uint16_t lut16[256*256];
#endif
int lut_clean;
 
void (*adjust) (struct eq2_param_t *par, unsigned char *dst, unsigned char *src,
unsigned w, unsigned h, unsigned dstride, unsigned sstride);
 
double c;
double b;
double g;
double w;
} eq2_param_t;
 
typedef struct vf_priv_s {
eq2_param_t param[3];
 
double contrast;
double brightness;
double saturation;
 
double gamma;
double gamma_weight;
double rgamma;
double ggamma;
double bgamma;
 
unsigned buf_w[3];
unsigned buf_h[3];
unsigned char *buf[3];
} vf_eq2_t;
 
 
static
void create_lut (eq2_param_t *par)
{
unsigned i;
double g, v;
double lw, gw;
 
g = par->g;
gw = par->w;
lw = 1.0 - gw;
 
if ((g < 0.001) || (g > 1000.0)) {
g = 1.0;
}
 
g = 1.0 / g;
 
for (i = 0; i < 256; i++) {
v = (double) i / 255.0;
v = par->c * (v - 0.5) + 0.5 + par->b;
 
if (v <= 0.0) {
par->lut[i] = 0;
}
else {
v = v*lw + pow(v, g)*gw;
 
if (v >= 1.0) {
par->lut[i] = 255;
}
else {
par->lut[i] = (unsigned char) (256.0 * v);
}
}
}
 
#ifdef LUT16
for(i=0; i<256*256; i++){
par->lut16[i]= par->lut[i&0xFF] + (par->lut[i>>8]<<8);
}
#endif
 
par->lut_clean = 1;
}
 
#if HAVE_MMX
static
void affine_1d_MMX (eq2_param_t *par, unsigned char *dst, unsigned char *src,
unsigned w, unsigned h, unsigned dstride, unsigned sstride)
{
unsigned i;
int contrast, brightness;
unsigned dstep, sstep;
int pel;
short brvec[4];
short contvec[4];
 
// printf("\nmmx: src=%p dst=%p w=%d h=%d ds=%d ss=%d\n",src,dst,w,h,dstride,sstride);
 
contrast = (int) (par->c * 256 * 16);
brightness = ((int) (100.0 * par->b + 100.0) * 511) / 200 - 128 - contrast / 32;
 
brvec[0] = brvec[1] = brvec[2] = brvec[3] = brightness;
contvec[0] = contvec[1] = contvec[2] = contvec[3] = contrast;
 
sstep = sstride - w;
dstep = dstride - w;
 
while (h-- > 0) {
__asm__ volatile (
"movq (%5), %%mm3 \n\t"
"movq (%6), %%mm4 \n\t"
"pxor %%mm0, %%mm0 \n\t"
"movl %4, %%eax\n\t"
ASMALIGN(4)
"1: \n\t"
"movq (%0), %%mm1 \n\t"
"movq (%0), %%mm2 \n\t"
"punpcklbw %%mm0, %%mm1 \n\t"
"punpckhbw %%mm0, %%mm2 \n\t"
"psllw $4, %%mm1 \n\t"
"psllw $4, %%mm2 \n\t"
"pmulhw %%mm4, %%mm1 \n\t"
"pmulhw %%mm4, %%mm2 \n\t"
"paddw %%mm3, %%mm1 \n\t"
"paddw %%mm3, %%mm2 \n\t"
"packuswb %%mm2, %%mm1 \n\t"
"add $8, %0 \n\t"
"movq %%mm1, (%1) \n\t"
"add $8, %1 \n\t"
"decl %%eax \n\t"
"jnz 1b \n\t"
: "=r" (src), "=r" (dst)
: "0" (src), "1" (dst), "r" (w >> 3), "r" (brvec), "r" (contvec)
: "%eax"
);
 
for (i = w & 7; i > 0; i--) {
pel = ((*src++ * contrast) >> 12) + brightness;
if (pel & 768) {
pel = (-pel) >> 31;
}
*dst++ = pel;
}
 
src += sstep;
dst += dstep;
}
 
__asm__ volatile ( "emms \n\t" ::: "memory" );
}
#endif
 
static
void apply_lut (eq2_param_t *par, unsigned char *dst, unsigned char *src,
unsigned w, unsigned h, unsigned dstride, unsigned sstride)
{
unsigned i, j, w2;
unsigned char *lut;
uint16_t *lut16;
 
if (!par->lut_clean) {
create_lut (par);
}
 
lut = par->lut;
#ifdef LUT16
lut16 = par->lut16;
w2= (w>>3)<<2;
for (j = 0; j < h; j++) {
uint16_t *src16= (uint16_t*)src;
uint16_t *dst16= (uint16_t*)dst;
for (i = 0; i < w2; i+=4) {
dst16[i+0] = lut16[src16[i+0]];
dst16[i+1] = lut16[src16[i+1]];
dst16[i+2] = lut16[src16[i+2]];
dst16[i+3] = lut16[src16[i+3]];
}
i <<= 1;
#else
w2= (w>>3)<<3;
for (j = 0; j < h; j++) {
for (i = 0; i < w2; i+=8) {
dst[i+0] = lut[src[i+0]];
dst[i+1] = lut[src[i+1]];
dst[i+2] = lut[src[i+2]];
dst[i+3] = lut[src[i+3]];
dst[i+4] = lut[src[i+4]];
dst[i+5] = lut[src[i+5]];
dst[i+6] = lut[src[i+6]];
dst[i+7] = lut[src[i+7]];
}
#endif
for (; i < w; i++) {
dst[i] = lut[src[i]];
}
 
src += sstride;
dst += dstride;
}
}
 
static
int put_image (vf_instance_t *vf, mp_image_t *src, double pts)
{
unsigned i;
vf_eq2_t *eq2;
mp_image_t *dst;
unsigned long img_n,img_c;
 
eq2 = vf->priv;
 
if ((eq2->buf_w[0] != src->w) || (eq2->buf_h[0] != src->h)) {
eq2->buf_w[0] = src->w;
eq2->buf_h[0] = src->h;
eq2->buf_w[1] = eq2->buf_w[2] = src->w >> src->chroma_x_shift;
eq2->buf_h[1] = eq2->buf_h[2] = src->h >> src->chroma_y_shift;
img_n = eq2->buf_w[0]*eq2->buf_h[0];
if(src->num_planes>1){
img_c = eq2->buf_w[1]*eq2->buf_h[1];
eq2->buf[0] = realloc (eq2->buf[0], img_n + 2*img_c);
eq2->buf[1] = eq2->buf[0] + img_n;
eq2->buf[2] = eq2->buf[1] + img_c;
} else
eq2->buf[0] = realloc (eq2->buf[0], img_n);
}
 
dst = ff_vf_get_image (vf->next, src->imgfmt, MP_IMGTYPE_EXPORT, 0, src->w, src->h);
 
for (i = 0; i < ((src->num_planes>1)?3:1); i++) {
if (eq2->param[i].adjust != NULL) {
dst->planes[i] = eq2->buf[i];
dst->stride[i] = eq2->buf_w[i];
 
eq2->param[i].adjust (&eq2->param[i], dst->planes[i], src->planes[i],
eq2->buf_w[i], eq2->buf_h[i], dst->stride[i], src->stride[i]);
}
else {
dst->planes[i] = src->planes[i];
dst->stride[i] = src->stride[i];
}
}
 
return ff_vf_next_put_image (vf, dst, pts);
}
 
static
void check_values (eq2_param_t *par)
{
/* yuck! floating point comparisons... */
 
if ((par->c == 1.0) && (par->b == 0.0) && (par->g == 1.0)) {
par->adjust = NULL;
}
#if HAVE_MMX
else if (par->g == 1.0 && ff_gCpuCaps.hasMMX) {
par->adjust = &affine_1d_MMX;
}
#endif
else {
par->adjust = &apply_lut;
}
}
 
static
void print_values (vf_eq2_t *eq2)
{
ff_mp_msg (MSGT_VFILTER, MSGL_V, "vf_eq2: c=%.2f b=%.2f g=%.4f s=%.2f \n",
eq2->contrast, eq2->brightness, eq2->gamma, eq2->saturation
);
}
 
static
void set_contrast (vf_eq2_t *eq2, double c)
{
eq2->contrast = c;
eq2->param[0].c = c;
eq2->param[0].lut_clean = 0;
check_values (&eq2->param[0]);
print_values (eq2);
}
 
static
void set_brightness (vf_eq2_t *eq2, double b)
{
eq2->brightness = b;
eq2->param[0].b = b;
eq2->param[0].lut_clean = 0;
check_values (&eq2->param[0]);
print_values (eq2);
}
 
static
void set_gamma (vf_eq2_t *eq2, double g)
{
eq2->gamma = g;
 
eq2->param[0].g = eq2->gamma * eq2->ggamma;
eq2->param[1].g = sqrt (eq2->bgamma / eq2->ggamma);
eq2->param[2].g = sqrt (eq2->rgamma / eq2->ggamma);
eq2->param[0].w = eq2->param[1].w = eq2->param[2].w = eq2->gamma_weight;
 
eq2->param[0].lut_clean = 0;
eq2->param[1].lut_clean = 0;
eq2->param[2].lut_clean = 0;
 
check_values (&eq2->param[0]);
check_values (&eq2->param[1]);
check_values (&eq2->param[2]);
 
print_values (eq2);
}
 
static
void set_saturation (vf_eq2_t *eq2, double s)
{
eq2->saturation = s;
 
eq2->param[1].c = s;
eq2->param[2].c = s;
 
eq2->param[1].lut_clean = 0;
eq2->param[2].lut_clean = 0;
 
check_values (&eq2->param[1]);
check_values (&eq2->param[2]);
 
print_values (eq2);
}
 
static
int control (vf_instance_t *vf, int request, void *data)
{
vf_equalizer_t *eq;
 
switch (request) {
case VFCTRL_SET_EQUALIZER:
eq = (vf_equalizer_t *) data;
 
if (strcmp (eq->item, "gamma") == 0) {
set_gamma (vf->priv, exp (log (8.0) * eq->value / 100.0));
return CONTROL_TRUE;
}
else if (strcmp (eq->item, "contrast") == 0) {
set_contrast (vf->priv, (1.0 / 100.0) * (eq->value + 100));
return CONTROL_TRUE;
}
else if (strcmp (eq->item, "brightness") == 0) {
set_brightness (vf->priv, (1.0 / 100.0) * eq->value);
return CONTROL_TRUE;
}
else if (strcmp (eq->item, "saturation") == 0) {
set_saturation (vf->priv, (double) (eq->value + 100) / 100.0);
return CONTROL_TRUE;
}
break;
 
case VFCTRL_GET_EQUALIZER:
eq = (vf_equalizer_t *) data;
if (strcmp (eq->item, "gamma") == 0) {
eq->value = (int) (100.0 * log (vf->priv->gamma) / log (8.0));
return CONTROL_TRUE;
}
else if (strcmp (eq->item, "contrast") == 0) {
eq->value = (int) (100.0 * vf->priv->contrast) - 100;
return CONTROL_TRUE;
}
else if (strcmp (eq->item, "brightness") == 0) {
eq->value = (int) (100.0 * vf->priv->brightness);
return CONTROL_TRUE;
}
else if (strcmp (eq->item, "saturation") == 0) {
eq->value = (int) (100.0 * vf->priv->saturation) - 100;
return CONTROL_TRUE;
}
break;
}
 
return ff_vf_next_control (vf, request, data);
}
 
static
int query_format (vf_instance_t *vf, unsigned fmt)
{
switch (fmt) {
case IMGFMT_YVU9:
case IMGFMT_IF09:
case IMGFMT_YV12:
case IMGFMT_I420:
case IMGFMT_IYUV:
case IMGFMT_Y800:
case IMGFMT_Y8:
case IMGFMT_444P:
case IMGFMT_422P:
case IMGFMT_411P:
return ff_vf_next_query_format (vf, fmt);
}
 
return 0;
}
 
static
void uninit (vf_instance_t *vf)
{
if (vf->priv != NULL) {
free (vf->priv->buf[0]);
free (vf->priv);
}
}
 
static
int vf_open(vf_instance_t *vf, char *args)
{
unsigned i;
vf_eq2_t *eq2;
double par[8];
 
vf->control = control;
vf->query_format = query_format;
vf->put_image = put_image;
vf->uninit = uninit;
 
vf->priv = malloc (sizeof (vf_eq2_t));
eq2 = vf->priv;
 
for (i = 0; i < 3; i++) {
eq2->buf[i] = NULL;
eq2->buf_w[i] = 0;
eq2->buf_h[i] = 0;
 
eq2->param[i].adjust = NULL;
eq2->param[i].c = 1.0;
eq2->param[i].b = 0.0;
eq2->param[i].g = 1.0;
eq2->param[i].lut_clean = 0;
}
 
eq2->contrast = 1.0;
eq2->brightness = 0.0;
eq2->saturation = 1.0;
 
eq2->gamma = 1.0;
eq2->gamma_weight = 1.0;
eq2->rgamma = 1.0;
eq2->ggamma = 1.0;
eq2->bgamma = 1.0;
 
if (args != NULL) {
par[0] = 1.0;
par[1] = 1.0;
par[2] = 0.0;
par[3] = 1.0;
par[4] = 1.0;
par[5] = 1.0;
par[6] = 1.0;
par[7] = 1.0;
sscanf (args, "%lf:%lf:%lf:%lf:%lf:%lf:%lf:%lf",
par, par + 1, par + 2, par + 3, par + 4, par + 5, par + 6, par + 7
);
 
eq2->rgamma = par[4];
eq2->ggamma = par[5];
eq2->bgamma = par[6];
eq2->gamma_weight = par[7];
 
set_gamma (eq2, par[0]);
set_contrast (eq2, par[1]);
set_brightness (eq2, par[2]);
set_saturation (eq2, par[3]);
}
 
return 1;
}
 
const vf_info_t ff_vf_info_eq2 = {
"Software equalizer",
"eq2",
"Hampa Hug, Daniel Moreno, Richard Felker",
"",
&vf_open,
NULL
};
/contrib/sdk/sources/ffmpeg/libavfilter/libmpcodecs/vf_fspp.c
0,0 → 1,2118
/*
* Copyright (C) 2003 Michael Niedermayer <michaelni@gmx.at>
* Copyright (C) 2005 Nikolaj Poroshin <porosh3@psu.ru>
*
* This file is part of MPlayer.
*
* MPlayer is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* MPlayer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with MPlayer; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
/*
* This implementation is based on an algorithm described in
* "Aria Nosratinia Embedded Post-Processing for
* Enhancement of Compressed Images (1999)"
* (http://citeseer.nj.nec.com/nosratinia99embedded.html)
* Futher, with splitting (i)dct into hor/ver passes, one of them can be
* performed once per block, not pixel. This allows for much better speed.
*/
 
/*
Heavily optimized version of SPP filter by Nikolaj
*/
 
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <inttypes.h>
#include <math.h>
 
#include "config.h"
 
#include "mp_msg.h"
#include "cpudetect.h"
#include "img_format.h"
#include "mp_image.h"
#include "vf.h"
#include "av_helpers.h"
#include "libvo/fastmemcpy.h"
 
#include "libavutil/internal.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/mem.h"
#include "libavutil/x86/asm.h"
#include "libavcodec/avcodec.h"
#include "libavcodec/dsputil.h"
 
#undef free
#undef malloc
 
//===========================================================================//
#define BLOCKSZ 12
 
static const short custom_threshold[64]=
// values (296) can't be too high
// -it causes too big quant dependence
// or maybe overflow(check), which results in some flashing
{ 71, 296, 295, 237, 71, 40, 38, 19,
245, 193, 185, 121, 102, 73, 53, 27,
158, 129, 141, 107, 97, 73, 50, 26,
102, 116, 109, 98, 82, 66, 45, 23,
71, 94, 95, 81, 70, 56, 38, 20,
56, 77, 74, 66, 56, 44, 30, 15,
38, 53, 50, 45, 38, 30, 21, 11,
20, 27, 26, 23, 20, 15, 11, 5
};
 
static const uint8_t __attribute__((aligned(32))) dither[8][8]={
{ 0, 48, 12, 60, 3, 51, 15, 63, },
{ 32, 16, 44, 28, 35, 19, 47, 31, },
{ 8, 56, 4, 52, 11, 59, 7, 55, },
{ 40, 24, 36, 20, 43, 27, 39, 23, },
{ 2, 50, 14, 62, 1, 49, 13, 61, },
{ 34, 18, 46, 30, 33, 17, 45, 29, },
{ 10, 58, 6, 54, 9, 57, 5, 53, },
{ 42, 26, 38, 22, 41, 25, 37, 21, },
};
 
struct vf_priv_s { //align 16 !
uint64_t threshold_mtx_noq[8*2];
uint64_t threshold_mtx[8*2];//used in both C & MMX (& later SSE2) versions
 
int log2_count;
int temp_stride;
int qp;
int mpeg2;
int prev_q;
uint8_t *src;
int16_t *temp;
int bframes;
char *non_b_qp;
};
 
 
#if !HAVE_MMX
 
//This func reads from 1 slice, 1 and clears 0 & 1
static void store_slice_c(uint8_t *dst, int16_t *src, int dst_stride, int src_stride, int width, int height, int log2_scale)
{int y, x;
#define STORE(pos) \
temp= (src[x + pos] + (d[pos]>>log2_scale))>>(6-log2_scale); \
src[x + pos]=src[x + pos - 8*src_stride]=0; \
if(temp & 0x100) temp= ~(temp>>31); \
dst[x + pos]= temp;
 
for(y=0; y<height; y++){
const uint8_t *d= dither[y];
for(x=0; x<width; x+=8){
int temp;
STORE(0);
STORE(1);
STORE(2);
STORE(3);
STORE(4);
STORE(5);
STORE(6);
STORE(7);
}
src+=src_stride;
dst+=dst_stride;
}
}
 
//This func reads from 2 slices, 0 & 2 and clears 2-nd
static void store_slice2_c(uint8_t *dst, int16_t *src, int dst_stride, int src_stride, int width, int height, int log2_scale)
{int y, x;
#define STORE2(pos) \
temp= (src[x + pos] + src[x + pos + 16*src_stride] + (d[pos]>>log2_scale))>>(6-log2_scale); \
src[x + pos + 16*src_stride]=0; \
if(temp & 0x100) temp= ~(temp>>31); \
dst[x + pos]= temp;
 
for(y=0; y<height; y++){
const uint8_t *d= dither[y];
for(x=0; x<width; x+=8){
int temp;
STORE2(0);
STORE2(1);
STORE2(2);
STORE2(3);
STORE2(4);
STORE2(5);
STORE2(6);
STORE2(7);
}
src+=src_stride;
dst+=dst_stride;
}
}
 
static void mul_thrmat_c(struct vf_priv_s *p,int q)
{
int a;
for(a=0;a<64;a++)
((short*)p->threshold_mtx)[a]=q * ((short*)p->threshold_mtx_noq)[a];//ints faster in C
}
 
static void column_fidct_c(int16_t* thr_adr, int16_t *data, int16_t *output, int cnt);
static void row_idct_c(int16_t* workspace,
int16_t* output_adr, int output_stride, int cnt);
static void row_fdct_c(int16_t *data, const uint8_t *pixels, int line_size, int cnt);
 
//this is rather ugly, but there is no need for function pointers
#define store_slice_s store_slice_c
#define store_slice2_s store_slice2_c
#define mul_thrmat_s mul_thrmat_c
#define column_fidct_s column_fidct_c
#define row_idct_s row_idct_c
#define row_fdct_s row_fdct_c
 
#else /* HAVE_MMX */
 
//This func reads from 1 slice, 1 and clears 0 & 1
static void store_slice_mmx(uint8_t *dst, int16_t *src, long dst_stride, long src_stride, long width, long height, long log2_scale)
{
const uint8_t *od=&dither[0][0];
const uint8_t *end=&dither[height][0];
width = (width+7)&~7;
dst_stride-=width;
//src_stride=(src_stride-width)*2;
__asm__ volatile(
"mov %5, %%"REG_d" \n\t"
"mov %6, %%"REG_S" \n\t"
"mov %7, %%"REG_D" \n\t"
"mov %1, %%"REG_a" \n\t"
"movd %%"REG_d", %%mm5 \n\t"
"xor $-1, %%"REG_d" \n\t"
"mov %%"REG_a", %%"REG_c" \n\t"
"add $7, %%"REG_d" \n\t"
"neg %%"REG_a" \n\t"
"sub %0, %%"REG_c" \n\t"
"add %%"REG_c", %%"REG_c" \n\t"
"movd %%"REG_d", %%mm2 \n\t"
"mov %%"REG_c", %1 \n\t"
"mov %2, %%"REG_d" \n\t"
"shl $4, %%"REG_a" \n\t"
 
"2: \n\t"
"movq (%%"REG_d"), %%mm3 \n\t"
"movq %%mm3, %%mm4 \n\t"
"pxor %%mm7, %%mm7 \n\t"
"punpcklbw %%mm7, %%mm3 \n\t"
"punpckhbw %%mm7, %%mm4 \n\t"
"mov %0, %%"REG_c" \n\t"
"psraw %%mm5, %%mm3 \n\t"
"psraw %%mm5, %%mm4 \n\t"
"1: \n\t"
"movq %%mm7, (%%"REG_S",%%"REG_a",) \n\t"
"movq (%%"REG_S"), %%mm0 \n\t"
"movq 8(%%"REG_S"), %%mm1 \n\t"
 
"movq %%mm7, 8(%%"REG_S",%%"REG_a",) \n\t"
"paddw %%mm3, %%mm0 \n\t"
"paddw %%mm4, %%mm1 \n\t"
 
"movq %%mm7, (%%"REG_S") \n\t"
"psraw %%mm2, %%mm0 \n\t"
"psraw %%mm2, %%mm1 \n\t"
 
"movq %%mm7, 8(%%"REG_S") \n\t"
"packuswb %%mm1, %%mm0 \n\t"
"add $16, %%"REG_S" \n\t"
 
"movq %%mm0, (%%"REG_D") \n\t"
"add $8, %%"REG_D" \n\t"
"sub $8, %%"REG_c" \n\t"
"jg 1b \n\t"
"add %1, %%"REG_S" \n\t"
"add $8, %%"REG_d" \n\t"
"add %3, %%"REG_D" \n\t"
"cmp %4, %%"REG_d" \n\t"
"jl 2b \n\t"
 
:
: "m" (width), "m" (src_stride), "erm" (od), "m" (dst_stride), "erm" (end),
"m" (log2_scale), "m" (src), "m" (dst) //input
: "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
);
}
 
//This func reads from 2 slices, 0 & 2 and clears 2-nd
static void store_slice2_mmx(uint8_t *dst, int16_t *src, long dst_stride, long src_stride, long width, long height, long log2_scale)
{
const uint8_t *od=&dither[0][0];
const uint8_t *end=&dither[height][0];
width = (width+7)&~7;
dst_stride-=width;
//src_stride=(src_stride-width)*2;
__asm__ volatile(
"mov %5, %%"REG_d" \n\t"
"mov %6, %%"REG_S" \n\t"
"mov %7, %%"REG_D" \n\t"
"mov %1, %%"REG_a" \n\t"
"movd %%"REG_d", %%mm5 \n\t"
"xor $-1, %%"REG_d" \n\t"
"mov %%"REG_a", %%"REG_c" \n\t"
"add $7, %%"REG_d" \n\t"
"sub %0, %%"REG_c" \n\t"
"add %%"REG_c", %%"REG_c" \n\t"
"movd %%"REG_d", %%mm2 \n\t"
"mov %%"REG_c", %1 \n\t"
"mov %2, %%"REG_d" \n\t"
"shl $5, %%"REG_a" \n\t"
 
"2: \n\t"
"movq (%%"REG_d"), %%mm3 \n\t"
"movq %%mm3, %%mm4 \n\t"
"pxor %%mm7, %%mm7 \n\t"
"punpcklbw %%mm7, %%mm3 \n\t"
"punpckhbw %%mm7, %%mm4 \n\t"
"mov %0, %%"REG_c" \n\t"
"psraw %%mm5, %%mm3 \n\t"
"psraw %%mm5, %%mm4 \n\t"
"1: \n\t"
"movq (%%"REG_S"), %%mm0 \n\t"
"movq 8(%%"REG_S"), %%mm1 \n\t"
"paddw %%mm3, %%mm0 \n\t"
 
"paddw (%%"REG_S",%%"REG_a",), %%mm0 \n\t"
"paddw %%mm4, %%mm1 \n\t"
"movq 8(%%"REG_S",%%"REG_a",), %%mm6 \n\t"
 
"movq %%mm7, (%%"REG_S",%%"REG_a",) \n\t"
"psraw %%mm2, %%mm0 \n\t"
"paddw %%mm6, %%mm1 \n\t"
 
"movq %%mm7, 8(%%"REG_S",%%"REG_a",) \n\t"
"psraw %%mm2, %%mm1 \n\t"
"packuswb %%mm1, %%mm0 \n\t"
 
"movq %%mm0, (%%"REG_D") \n\t"
"add $16, %%"REG_S" \n\t"
"add $8, %%"REG_D" \n\t"
"sub $8, %%"REG_c" \n\t"
"jg 1b \n\t"
"add %1, %%"REG_S" \n\t"
"add $8, %%"REG_d" \n\t"
"add %3, %%"REG_D" \n\t"
"cmp %4, %%"REG_d" \n\t"
"jl 2b \n\t"
 
:
: "m" (width), "m" (src_stride), "erm" (od), "m" (dst_stride), "erm" (end),
"m" (log2_scale), "m" (src), "m" (dst) //input
: "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_D, "%"REG_S
);
}
 
static void mul_thrmat_mmx(struct vf_priv_s *p, int q)
{
uint64_t *adr=&p->threshold_mtx_noq[0];
__asm__ volatile(
"movd %0, %%mm7 \n\t"
"add $8*8*2, %%"REG_D" \n\t"
"movq 0*8(%%"REG_S"), %%mm0 \n\t"
"punpcklwd %%mm7, %%mm7 \n\t"
"movq 1*8(%%"REG_S"), %%mm1 \n\t"
"punpckldq %%mm7, %%mm7 \n\t"
"pmullw %%mm7, %%mm0 \n\t"
 
"movq 2*8(%%"REG_S"), %%mm2 \n\t"
"pmullw %%mm7, %%mm1 \n\t"
 
"movq 3*8(%%"REG_S"), %%mm3 \n\t"
"pmullw %%mm7, %%mm2 \n\t"
 
"movq %%mm0, 0*8(%%"REG_D") \n\t"
"movq 4*8(%%"REG_S"), %%mm4 \n\t"
"pmullw %%mm7, %%mm3 \n\t"
 
"movq %%mm1, 1*8(%%"REG_D") \n\t"
"movq 5*8(%%"REG_S"), %%mm5 \n\t"
"pmullw %%mm7, %%mm4 \n\t"
 
"movq %%mm2, 2*8(%%"REG_D") \n\t"
"movq 6*8(%%"REG_S"), %%mm6 \n\t"
"pmullw %%mm7, %%mm5 \n\t"
 
"movq %%mm3, 3*8(%%"REG_D") \n\t"
"movq 7*8+0*8(%%"REG_S"), %%mm0 \n\t"
"pmullw %%mm7, %%mm6 \n\t"
 
"movq %%mm4, 4*8(%%"REG_D") \n\t"
"movq 7*8+1*8(%%"REG_S"), %%mm1 \n\t"
"pmullw %%mm7, %%mm0 \n\t"
 
"movq %%mm5, 5*8(%%"REG_D") \n\t"
"movq 7*8+2*8(%%"REG_S"), %%mm2 \n\t"
"pmullw %%mm7, %%mm1 \n\t"
 
"movq %%mm6, 6*8(%%"REG_D") \n\t"
"movq 7*8+3*8(%%"REG_S"), %%mm3 \n\t"
"pmullw %%mm7, %%mm2 \n\t"
 
"movq %%mm0, 7*8+0*8(%%"REG_D") \n\t"
"movq 7*8+4*8(%%"REG_S"), %%mm4 \n\t"
"pmullw %%mm7, %%mm3 \n\t"
 
"movq %%mm1, 7*8+1*8(%%"REG_D") \n\t"
"movq 7*8+5*8(%%"REG_S"), %%mm5 \n\t"
"pmullw %%mm7, %%mm4 \n\t"
 
"movq %%mm2, 7*8+2*8(%%"REG_D") \n\t"
"movq 7*8+6*8(%%"REG_S"), %%mm6 \n\t"
"pmullw %%mm7, %%mm5 \n\t"
 
"movq %%mm3, 7*8+3*8(%%"REG_D") \n\t"
"movq 14*8+0*8(%%"REG_S"), %%mm0 \n\t"
"pmullw %%mm7, %%mm6 \n\t"
 
"movq %%mm4, 7*8+4*8(%%"REG_D") \n\t"
"movq 14*8+1*8(%%"REG_S"), %%mm1 \n\t"
"pmullw %%mm7, %%mm0 \n\t"
 
"movq %%mm5, 7*8+5*8(%%"REG_D") \n\t"
"pmullw %%mm7, %%mm1 \n\t"
 
"movq %%mm6, 7*8+6*8(%%"REG_D") \n\t"
"movq %%mm0, 14*8+0*8(%%"REG_D") \n\t"
"movq %%mm1, 14*8+1*8(%%"REG_D") \n\t"
 
: "+g" (q), "+S" (adr), "+D" (adr)
:
);
}
 
static void column_fidct_mmx(int16_t* thr_adr, int16_t *data, int16_t *output, int cnt);
static void row_idct_mmx(int16_t* workspace,
int16_t* output_adr, int output_stride, int cnt);
static void row_fdct_mmx(int16_t *data, const uint8_t *pixels, int line_size, int cnt);
 
#define store_slice_s store_slice_mmx
#define store_slice2_s store_slice2_mmx
#define mul_thrmat_s mul_thrmat_mmx
#define column_fidct_s column_fidct_mmx
#define row_idct_s row_idct_mmx
#define row_fdct_s row_fdct_mmx
#endif // HAVE_MMX
 
static void filter(struct vf_priv_s *p, uint8_t *dst, uint8_t *src,
int dst_stride, int src_stride,
int width, int height,
uint8_t *qp_store, int qp_stride, int is_luma)
{
int x, x0, y, es, qy, t;
const int stride= is_luma ? p->temp_stride : (width+16);//((width+16+15)&(~15))
const int step=6-p->log2_count;
const int qps= 3 + is_luma;
int32_t __attribute__((aligned(32))) block_align[4*8*BLOCKSZ+ 4*8*BLOCKSZ];
int16_t *block= (int16_t *)block_align;
int16_t *block3=(int16_t *)(block_align+4*8*BLOCKSZ);
 
memset(block3, 0, 4*8*BLOCKSZ);
 
//p->src=src-src_stride*8-8;//!
if (!src || !dst) return; // HACK avoid crash for Y8 colourspace
for(y=0; y<height; y++){
int index= 8 + 8*stride + y*stride;
fast_memcpy(p->src + index, src + y*src_stride, width);//this line can be avoided by using DR & user fr.buffers
for(x=0; x<8; x++){
p->src[index - x - 1]= p->src[index + x ];
p->src[index + width + x ]= p->src[index + width - x - 1];
}
}
for(y=0; y<8; y++){
fast_memcpy(p->src + ( 7-y)*stride, p->src + ( y+8)*stride, stride);
fast_memcpy(p->src + (height+8+y)*stride, p->src + (height-y+7)*stride, stride);
}
//FIXME (try edge emu)
 
for(y=8; y<24; y++)
memset(p->temp+ 8 +y*stride, 0,width*sizeof(int16_t));
 
for(y=step; y<height+8; y+=step){ //step= 1,2
qy=y-4;
if (qy>height-1) qy=height-1;
if (qy<0) qy=0;
qy=(qy>>qps)*qp_stride;
row_fdct_s(block, p->src + y*stride +2-(y&1), stride, 2);
for(x0=0; x0<width+8-8*(BLOCKSZ-1); x0+=8*(BLOCKSZ-1)){
row_fdct_s(block+8*8, p->src + y*stride+8+x0 +2-(y&1), stride, 2*(BLOCKSZ-1));
if(p->qp)
column_fidct_s((int16_t*)(&p->threshold_mtx[0]), block+0*8, block3+0*8, 8*(BLOCKSZ-1)); //yes, this is a HOTSPOT
else
for (x=0; x<8*(BLOCKSZ-1); x+=8) {
t=x+x0-2; //correct t=x+x0-2-(y&1), but its the same
if (t<0) t=0;//t always < width-2
t=qp_store[qy+(t>>qps)];
t=norm_qscale(t, p->mpeg2);
if (t!=p->prev_q) p->prev_q=t, mul_thrmat_s(p, t);
column_fidct_s((int16_t*)(&p->threshold_mtx[0]), block+x*8, block3+x*8, 8); //yes, this is a HOTSPOT
}
row_idct_s(block3+0*8, p->temp + (y&15)*stride+x0+2-(y&1), stride, 2*(BLOCKSZ-1));
memmove(block, block+(BLOCKSZ-1)*64, 8*8*sizeof(int16_t)); //cycling
memmove(block3, block3+(BLOCKSZ-1)*64, 6*8*sizeof(int16_t));
}
//
es=width+8-x0; // 8, ...
if (es>8)
row_fdct_s(block+8*8, p->src + y*stride+8+x0 +2-(y&1), stride, (es-4)>>2);
column_fidct_s((int16_t*)(&p->threshold_mtx[0]), block, block3, es&(~1));
row_idct_s(block3+0*8, p->temp + (y&15)*stride+x0+2-(y&1), stride, es>>2);
{const int y1=y-8+step;//l5-7 l4-6
if (!(y1&7) && y1) {
if (y1&8) store_slice_s(dst + (y1-8)*dst_stride, p->temp+ 8 +8*stride,
dst_stride, stride, width, 8, 5-p->log2_count);
else store_slice2_s(dst + (y1-8)*dst_stride, p->temp+ 8 +0*stride,
dst_stride, stride, width, 8, 5-p->log2_count);
} }
}
 
if (y&7) { // == height & 7
if (y&8) store_slice_s(dst + ((y-8)&~7)*dst_stride, p->temp+ 8 +8*stride,
dst_stride, stride, width, y&7, 5-p->log2_count);
else store_slice2_s(dst + ((y-8)&~7)*dst_stride, p->temp+ 8 +0*stride,
dst_stride, stride, width, y&7, 5-p->log2_count);
}
}
 
static int config(struct vf_instance *vf,
int width, int height, int d_width, int d_height,
unsigned int flags, unsigned int outfmt)
{
int h= (height+16+15)&(~15);
 
vf->priv->temp_stride= (width+16+15)&(~15);
vf->priv->temp= (int16_t*)av_mallocz(vf->priv->temp_stride*3*8*sizeof(int16_t));
//this can also be avoided, see above
vf->priv->src = (uint8_t*)av_malloc(vf->priv->temp_stride*h*sizeof(uint8_t));
 
return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt);
}
 
static void get_image(struct vf_instance *vf, mp_image_t *mpi)
{
if(mpi->flags&MP_IMGFLAG_PRESERVE) return; // don't change
// ok, we can do pp in-place (or pp disabled):
vf->dmpi=ff_vf_get_image(vf->next,mpi->imgfmt,
mpi->type, mpi->flags, mpi->width, mpi->height);
mpi->planes[0]=vf->dmpi->planes[0];
mpi->stride[0]=vf->dmpi->stride[0];
mpi->width=vf->dmpi->width;
if(mpi->flags&MP_IMGFLAG_PLANAR){
mpi->planes[1]=vf->dmpi->planes[1];
mpi->planes[2]=vf->dmpi->planes[2];
mpi->stride[1]=vf->dmpi->stride[1];
mpi->stride[2]=vf->dmpi->stride[2];
}
mpi->flags|=MP_IMGFLAG_DIRECT;
}
 
static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts)
{
mp_image_t *dmpi;
if(!(mpi->flags&MP_IMGFLAG_DIRECT)){
// no DR, so get a new image! hope we'll get DR buffer:
dmpi=ff_vf_get_image(vf->next,mpi->imgfmt,
MP_IMGTYPE_TEMP,
MP_IMGFLAG_ACCEPT_STRIDE|MP_IMGFLAG_PREFER_ALIGNED_STRIDE,
mpi->width,mpi->height);
ff_vf_clone_mpi_attributes(dmpi, mpi);
}else{
dmpi=vf->dmpi;
}
 
vf->priv->mpeg2= mpi->qscale_type;
if(mpi->pict_type != 3 && mpi->qscale && !vf->priv->qp){
int w = mpi->qstride;
int h = (mpi->h + 15) >> 4;
if (!w) {
w = (mpi->w + 15) >> 4;
h = 1;
}
if(!vf->priv->non_b_qp)
vf->priv->non_b_qp= malloc(w*h);
fast_memcpy(vf->priv->non_b_qp, mpi->qscale, w*h);
}
if(vf->priv->log2_count || !(mpi->flags&MP_IMGFLAG_DIRECT)){
char *qp_tab= vf->priv->non_b_qp;
if(vf->priv->bframes || !qp_tab)
qp_tab= mpi->qscale;
 
if(qp_tab || vf->priv->qp){
filter(vf->priv, dmpi->planes[0], mpi->planes[0], dmpi->stride[0], mpi->stride[0],
mpi->w, mpi->h, qp_tab, mpi->qstride, 1);
filter(vf->priv, dmpi->planes[1], mpi->planes[1], dmpi->stride[1], mpi->stride[1],
mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, qp_tab, mpi->qstride, 0);
filter(vf->priv, dmpi->planes[2], mpi->planes[2], dmpi->stride[2], mpi->stride[2],
mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, qp_tab, mpi->qstride, 0);
}else{
memcpy_pic(dmpi->planes[0], mpi->planes[0], mpi->w, mpi->h, dmpi->stride[0], mpi->stride[0]);
memcpy_pic(dmpi->planes[1], mpi->planes[1], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, dmpi->stride[1], mpi->stride[1]);
memcpy_pic(dmpi->planes[2], mpi->planes[2], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, dmpi->stride[2], mpi->stride[2]);
}
}
 
#if HAVE_MMX
if(ff_gCpuCaps.hasMMX) __asm__ volatile ("emms\n\t");
#endif
#if HAVE_MMX2
if(ff_gCpuCaps.hasMMX2) __asm__ volatile ("sfence\n\t");
#endif
return ff_vf_next_put_image(vf,dmpi, pts);
}
 
static void uninit(struct vf_instance *vf)
{
if(!vf->priv) return;
 
av_free(vf->priv->temp);
vf->priv->temp= NULL;
av_free(vf->priv->src);
vf->priv->src= NULL;
//free(vf->priv->avctx);
//vf->priv->avctx= NULL;
free(vf->priv->non_b_qp);
vf->priv->non_b_qp= NULL;
 
av_free(vf->priv);
vf->priv=NULL;
}
 
//===========================================================================//
 
static int query_format(struct vf_instance *vf, unsigned int fmt)
{
switch(fmt){
case IMGFMT_YVU9:
case IMGFMT_IF09:
case IMGFMT_YV12:
case IMGFMT_I420:
case IMGFMT_IYUV:
case IMGFMT_CLPL:
case IMGFMT_Y800:
case IMGFMT_Y8:
case IMGFMT_444P:
case IMGFMT_422P:
case IMGFMT_411P:
return ff_vf_next_query_format(vf,fmt);
}
return 0;
}
 
static int control(struct vf_instance *vf, int request, void* data)
{
switch(request){
case VFCTRL_QUERY_MAX_PP_LEVEL:
return 5;
case VFCTRL_SET_PP_LEVEL:
vf->priv->log2_count= *((unsigned int*)data);
if (vf->priv->log2_count < 4) vf->priv->log2_count=4;
return CONTROL_TRUE;
}
return ff_vf_next_control(vf,request,data);
}
 
static int vf_open(vf_instance_t *vf, char *args)
{
int i=0, bias;
int custom_threshold_m[64];
int log2c=-1;
 
vf->config=config;
vf->put_image=put_image;
vf->get_image=get_image;
vf->query_format=query_format;
vf->uninit=uninit;
vf->control= control;
vf->priv=av_mallocz(sizeof(struct vf_priv_s));//assumes align 16 !
 
ff_init_avcodec();
 
//vf->priv->avctx= avcodec_alloc_context();
//dsputil_init(&vf->priv->dsp, vf->priv->avctx);
 
vf->priv->log2_count= 4;
vf->priv->bframes = 0;
 
if (args) sscanf(args, "%d:%d:%d:%d", &log2c, &vf->priv->qp, &i, &vf->priv->bframes);
 
if( log2c >=4 && log2c <=5 )
vf->priv->log2_count = log2c;
else if( log2c >= 6 )
vf->priv->log2_count = 5;
 
if(vf->priv->qp < 0)
vf->priv->qp = 0;
 
if (i < -15) i = -15;
if (i > 32) i = 32;
 
bias= (1<<4)+i; //regulable
vf->priv->prev_q=0;
//
for(i=0;i<64;i++) //FIXME: tune custom_threshold[] and remove this !
custom_threshold_m[i]=(int)(custom_threshold[i]*(bias/71.)+ 0.5);
for(i=0;i<8;i++){
vf->priv->threshold_mtx_noq[2*i]=(uint64_t)custom_threshold_m[i*8+2]
|(((uint64_t)custom_threshold_m[i*8+6])<<16)
|(((uint64_t)custom_threshold_m[i*8+0])<<32)
|(((uint64_t)custom_threshold_m[i*8+4])<<48);
vf->priv->threshold_mtx_noq[2*i+1]=(uint64_t)custom_threshold_m[i*8+5]
|(((uint64_t)custom_threshold_m[i*8+3])<<16)
|(((uint64_t)custom_threshold_m[i*8+1])<<32)
|(((uint64_t)custom_threshold_m[i*8+7])<<48);
}
 
if (vf->priv->qp) vf->priv->prev_q=vf->priv->qp, mul_thrmat_s(vf->priv, vf->priv->qp);
 
return 1;
}
 
const vf_info_t ff_vf_info_fspp = {
"fast simple postprocess",
"fspp",
"Michael Niedermayer, Nikolaj Poroshin",
"",
vf_open,
NULL
};
 
//====================================================================
//Specific spp's dct, idct and threshold functions
//I'd prefer to have them in the separate file.
 
//#define MANGLE(a) #a
 
//typedef int16_t int16_t; //! only int16_t
 
#define DCTSIZE 8
#define DCTSIZE_S "8"
 
#define FIX(x,s) ((int) ((x) * (1<<s) + 0.5)&0xffff)
#define C64(x) ((uint64_t)((x)|(x)<<16))<<32 | (uint64_t)(x) | (uint64_t)(x)<<16
#define FIX64(x,s) C64(FIX(x,s))
 
#define MULTIPLY16H(x,k) (((x)*(k))>>16)
#define THRESHOLD(r,x,t) if(((unsigned)((x)+t))>t*2) r=(x);else r=0;
#define DESCALE(x,n) (((x) + (1 << ((n)-1))) >> n)
 
#if HAVE_MMX
 
DECLARE_ASM_CONST(8, uint64_t, MM_FIX_0_382683433)=FIX64(0.382683433, 14);
DECLARE_ALIGNED(8, uint64_t, ff_MM_FIX_0_541196100)=FIX64(0.541196100, 14);
DECLARE_ALIGNED(8, uint64_t, ff_MM_FIX_0_707106781)=FIX64(0.707106781, 14);
DECLARE_ASM_CONST(8, uint64_t, MM_FIX_1_306562965)=FIX64(1.306562965, 14);
 
DECLARE_ASM_CONST(8, uint64_t, MM_FIX_1_414213562_A)=FIX64(1.414213562, 14);
 
DECLARE_ASM_CONST(8, uint64_t, MM_FIX_1_847759065)=FIX64(1.847759065, 13);
DECLARE_ASM_CONST(8, uint64_t, MM_FIX_2_613125930)=FIX64(-2.613125930, 13); //-
DECLARE_ASM_CONST(8, uint64_t, MM_FIX_1_414213562)=FIX64(1.414213562, 13);
DECLARE_ASM_CONST(8, uint64_t, MM_FIX_1_082392200)=FIX64(1.082392200, 13);
//for t3,t5,t7 == 0 shortcut
DECLARE_ASM_CONST(8, uint64_t, MM_FIX_0_847759065)=FIX64(0.847759065, 14);
DECLARE_ASM_CONST(8, uint64_t, MM_FIX_0_566454497)=FIX64(0.566454497, 14);
DECLARE_ASM_CONST(8, uint64_t, MM_FIX_0_198912367)=FIX64(0.198912367, 14);
 
DECLARE_ASM_CONST(8, uint64_t, MM_DESCALE_RND)=C64(4);
DECLARE_ASM_CONST(8, uint64_t, MM_2)=C64(2);
 
#else /* !HAVE_MMX */
 
typedef int32_t int_simd16_t;
static const int16_t FIX_0_382683433=FIX(0.382683433, 14);
static const int16_t FIX_0_541196100=FIX(0.541196100, 14);
static const int16_t FIX_0_707106781=FIX(0.707106781, 14);
static const int16_t FIX_1_306562965=FIX(1.306562965, 14);
static const int16_t FIX_1_414213562_A=FIX(1.414213562, 14);
static const int16_t FIX_1_847759065=FIX(1.847759065, 13);
static const int16_t FIX_2_613125930=FIX(-2.613125930, 13); //-
static const int16_t FIX_1_414213562=FIX(1.414213562, 13);
static const int16_t FIX_1_082392200=FIX(1.082392200, 13);
 
#endif
 
#if !HAVE_MMX
 
static void column_fidct_c(int16_t* thr_adr, int16_t *data, int16_t *output, int cnt)
{
int_simd16_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
int_simd16_t tmp10, tmp11, tmp12, tmp13;
int_simd16_t z1,z2,z3,z4,z5, z10, z11, z12, z13;
int_simd16_t d0, d1, d2, d3, d4, d5, d6, d7;
 
int16_t* dataptr;
int16_t* wsptr;
int16_t *threshold;
int ctr;
 
dataptr = data;
wsptr = output;
 
for (; cnt > 0; cnt-=2) { //start positions
threshold=(int16_t*)thr_adr;//threshold_mtx
for (ctr = DCTSIZE; ctr > 0; ctr--) {
// Process columns from input, add to output.
tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*7];
tmp7 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*7];
 
tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*6];
tmp6 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*6];
 
tmp2 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*5];
tmp5 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*5];
 
tmp3 = dataptr[DCTSIZE*3] + dataptr[DCTSIZE*4];
tmp4 = dataptr[DCTSIZE*3] - dataptr[DCTSIZE*4];
 
// Even part of FDCT
 
tmp10 = tmp0 + tmp3;
tmp13 = tmp0 - tmp3;
tmp11 = tmp1 + tmp2;
tmp12 = tmp1 - tmp2;
 
d0 = tmp10 + tmp11;
d4 = tmp10 - tmp11;
 
z1 = MULTIPLY16H((tmp12 + tmp13) <<2, FIX_0_707106781);
d2 = tmp13 + z1;
d6 = tmp13 - z1;
 
// Even part of IDCT
 
THRESHOLD(tmp0, d0, threshold[0*8]);
THRESHOLD(tmp1, d2, threshold[2*8]);
THRESHOLD(tmp2, d4, threshold[4*8]);
THRESHOLD(tmp3, d6, threshold[6*8]);
tmp0+=2;
tmp10 = (tmp0 + tmp2)>>2;
tmp11 = (tmp0 - tmp2)>>2;
 
tmp13 = (tmp1 + tmp3)>>2; //+2 ! (psnr decides)
tmp12 = MULTIPLY16H((tmp1 - tmp3), FIX_1_414213562_A) - tmp13; //<<2
 
tmp0 = tmp10 + tmp13; //->temps
tmp3 = tmp10 - tmp13; //->temps
tmp1 = tmp11 + tmp12; //->temps
tmp2 = tmp11 - tmp12; //->temps
 
// Odd part of FDCT
 
tmp10 = tmp4 + tmp5;
tmp11 = tmp5 + tmp6;
tmp12 = tmp6 + tmp7;
 
z5 = MULTIPLY16H((tmp10 - tmp12)<<2, FIX_0_382683433);
z2 = MULTIPLY16H(tmp10 <<2, FIX_0_541196100) + z5;
z4 = MULTIPLY16H(tmp12 <<2, FIX_1_306562965) + z5;
z3 = MULTIPLY16H(tmp11 <<2, FIX_0_707106781);
 
z11 = tmp7 + z3;
z13 = tmp7 - z3;
 
d5 = z13 + z2;
d3 = z13 - z2;
d1 = z11 + z4;
d7 = z11 - z4;
 
// Odd part of IDCT
 
THRESHOLD(tmp4, d1, threshold[1*8]);
THRESHOLD(tmp5, d3, threshold[3*8]);
THRESHOLD(tmp6, d5, threshold[5*8]);
THRESHOLD(tmp7, d7, threshold[7*8]);
 
//Simd version uses here a shortcut for the tmp5,tmp6,tmp7 == 0
z13 = tmp6 + tmp5;
z10 = (tmp6 - tmp5)<<1;
z11 = tmp4 + tmp7;
z12 = (tmp4 - tmp7)<<1;
 
tmp7 = (z11 + z13)>>2; //+2 !
tmp11 = MULTIPLY16H((z11 - z13)<<1, FIX_1_414213562);
z5 = MULTIPLY16H(z10 + z12, FIX_1_847759065);
tmp10 = MULTIPLY16H(z12, FIX_1_082392200) - z5;
tmp12 = MULTIPLY16H(z10, FIX_2_613125930) + z5; // - !!
 
tmp6 = tmp12 - tmp7;
tmp5 = tmp11 - tmp6;
tmp4 = tmp10 + tmp5;
 
wsptr[DCTSIZE*0]+= (tmp0 + tmp7);
wsptr[DCTSIZE*1]+= (tmp1 + tmp6);
wsptr[DCTSIZE*2]+= (tmp2 + tmp5);
wsptr[DCTSIZE*3]+= (tmp3 - tmp4);
wsptr[DCTSIZE*4]+= (tmp3 + tmp4);
wsptr[DCTSIZE*5]+= (tmp2 - tmp5);
wsptr[DCTSIZE*6]= (tmp1 - tmp6);
wsptr[DCTSIZE*7]= (tmp0 - tmp7);
//
dataptr++; //next column
wsptr++;
threshold++;
}
dataptr+=8; //skip each second start pos
wsptr +=8;
}
}
 
#else /* HAVE_MMX */
 
static void column_fidct_mmx(int16_t* thr_adr, int16_t *data, int16_t *output, int cnt)
{
uint64_t __attribute__((aligned(8))) temps[4];
__asm__ volatile(
ASMALIGN(4)
"1: \n\t"
"movq "DCTSIZE_S"*0*2(%%"REG_S"), %%mm1 \n\t"
//
"movq "DCTSIZE_S"*3*2(%%"REG_S"), %%mm7 \n\t"
"movq %%mm1, %%mm0 \n\t"
 
"paddw "DCTSIZE_S"*7*2(%%"REG_S"), %%mm1 \n\t" //t0
"movq %%mm7, %%mm3 \n\t"
 
"paddw "DCTSIZE_S"*4*2(%%"REG_S"), %%mm7 \n\t" //t3
"movq %%mm1, %%mm5 \n\t"
 
"movq "DCTSIZE_S"*1*2(%%"REG_S"), %%mm6 \n\t"
"psubw %%mm7, %%mm1 \n\t" //t13
 
"movq "DCTSIZE_S"*2*2(%%"REG_S"), %%mm2 \n\t"
"movq %%mm6, %%mm4 \n\t"
 
"paddw "DCTSIZE_S"*6*2(%%"REG_S"), %%mm6 \n\t" //t1
"paddw %%mm7, %%mm5 \n\t" //t10
 
"paddw "DCTSIZE_S"*5*2(%%"REG_S"), %%mm2 \n\t" //t2
"movq %%mm6, %%mm7 \n\t"
 
"paddw %%mm2, %%mm6 \n\t" //t11
"psubw %%mm2, %%mm7 \n\t" //t12
 
"movq %%mm5, %%mm2 \n\t"
"paddw %%mm6, %%mm5 \n\t" //d0
// i0 t13 t12 i3 i1 d0 - d4
"psubw %%mm6, %%mm2 \n\t" //d4
"paddw %%mm1, %%mm7 \n\t"
 
"movq 4*16(%%"REG_d"), %%mm6 \n\t"
"psllw $2, %%mm7 \n\t"
 
"psubw 0*16(%%"REG_d"), %%mm5 \n\t"
"psubw %%mm6, %%mm2 \n\t"
 
"paddusw 0*16(%%"REG_d"), %%mm5 \n\t"
"paddusw %%mm6, %%mm2 \n\t"
 
"pmulhw "MANGLE(ff_MM_FIX_0_707106781)", %%mm7 \n\t"
//
"paddw 0*16(%%"REG_d"), %%mm5 \n\t"
"paddw %%mm6, %%mm2 \n\t"
 
"psubusw 0*16(%%"REG_d"), %%mm5 \n\t"
"psubusw %%mm6, %%mm2 \n\t"
 
//This func is totally compute-bound, operates at huge speed. So, DC shortcut
// at this place isn't worthwhile due to BTB miss penalty (checked on Pent. 3).
//However, typical numbers: nondc - 29%%, dc - 46%%, zero - 25%%. All <> 0 case is very rare.
"paddw "MANGLE(MM_2)", %%mm5 \n\t"
"movq %%mm2, %%mm6 \n\t"
 
"paddw %%mm5, %%mm2 \n\t"
"psubw %%mm6, %%mm5 \n\t"
 
"movq %%mm1, %%mm6 \n\t"
"paddw %%mm7, %%mm1 \n\t" //d2
 
"psubw 2*16(%%"REG_d"), %%mm1 \n\t"
"psubw %%mm7, %%mm6 \n\t" //d6
 
"movq 6*16(%%"REG_d"), %%mm7 \n\t"
"psraw $2, %%mm5 \n\t"
 
"paddusw 2*16(%%"REG_d"), %%mm1 \n\t"
"psubw %%mm7, %%mm6 \n\t"
// t7 d2 /t11 t4 t6 - d6 /t10
 
"paddw 2*16(%%"REG_d"), %%mm1 \n\t"
"paddusw %%mm7, %%mm6 \n\t"
 
"psubusw 2*16(%%"REG_d"), %%mm1 \n\t"
"paddw %%mm7, %%mm6 \n\t"
 
"psubw "DCTSIZE_S"*4*2(%%"REG_S"), %%mm3 \n\t"
"psubusw %%mm7, %%mm6 \n\t"
 
//movq [edi+"DCTSIZE_S"*2*2], mm1
//movq [edi+"DCTSIZE_S"*6*2], mm6
"movq %%mm1, %%mm7 \n\t"
"psraw $2, %%mm2 \n\t"
 
"psubw "DCTSIZE_S"*6*2(%%"REG_S"), %%mm4 \n\t"
"psubw %%mm6, %%mm1 \n\t"
 
"psubw "DCTSIZE_S"*7*2(%%"REG_S"), %%mm0 \n\t"
"paddw %%mm7, %%mm6 \n\t" //'t13
 
"psraw $2, %%mm6 \n\t" //paddw mm6, MM_2 !! ---
"movq %%mm2, %%mm7 \n\t"
 
"pmulhw "MANGLE(MM_FIX_1_414213562_A)", %%mm1 \n\t"
"paddw %%mm6, %%mm2 \n\t" //'t0
 
"movq %%mm2, 0*8+%3 \n\t" //!
"psubw %%mm6, %%mm7 \n\t" //'t3
 
"movq "DCTSIZE_S"*2*2(%%"REG_S"), %%mm2 \n\t"
"psubw %%mm6, %%mm1 \n\t" //'t12
 
"psubw "DCTSIZE_S"*5*2(%%"REG_S"), %%mm2 \n\t" //t5
"movq %%mm5, %%mm6 \n\t"
 
"movq %%mm7, 3*8+%3 \n\t"
"paddw %%mm2, %%mm3 \n\t" //t10
 
"paddw %%mm4, %%mm2 \n\t" //t11
"paddw %%mm0, %%mm4 \n\t" //t12
 
"movq %%mm3, %%mm7 \n\t"
"psubw %%mm4, %%mm3 \n\t"
 
"psllw $2, %%mm3 \n\t"
"psllw $2, %%mm7 \n\t" //opt for P6
 
"pmulhw "MANGLE(MM_FIX_0_382683433)", %%mm3 \n\t"
"psllw $2, %%mm4 \n\t"
 
"pmulhw "MANGLE(ff_MM_FIX_0_541196100)", %%mm7 \n\t"
"psllw $2, %%mm2 \n\t"
 
"pmulhw "MANGLE(MM_FIX_1_306562965)", %%mm4 \n\t"
"paddw %%mm1, %%mm5 \n\t" //'t1
 
"pmulhw "MANGLE(ff_MM_FIX_0_707106781)", %%mm2 \n\t"
"psubw %%mm1, %%mm6 \n\t" //'t2
// t7 't12 't11 t4 t6 - 't13 't10 ---
 
"paddw %%mm3, %%mm7 \n\t" //z2
 
"movq %%mm5, 1*8+%3 \n\t"
"paddw %%mm3, %%mm4 \n\t" //z4
 
"movq 3*16(%%"REG_d"), %%mm3 \n\t"
"movq %%mm0, %%mm1 \n\t"
 
"movq %%mm6, 2*8+%3 \n\t"
"psubw %%mm2, %%mm1 \n\t" //z13
 
//===
"paddw %%mm2, %%mm0 \n\t" //z11
"movq %%mm1, %%mm5 \n\t"
 
"movq 5*16(%%"REG_d"), %%mm2 \n\t"
"psubw %%mm7, %%mm1 \n\t" //d3
 
"paddw %%mm7, %%mm5 \n\t" //d5
"psubw %%mm3, %%mm1 \n\t"
 
"movq 1*16(%%"REG_d"), %%mm7 \n\t"
"psubw %%mm2, %%mm5 \n\t"
 
"movq %%mm0, %%mm6 \n\t"
"paddw %%mm4, %%mm0 \n\t" //d1
 
"paddusw %%mm3, %%mm1 \n\t"
"psubw %%mm4, %%mm6 \n\t" //d7
 
// d1 d3 - - - d5 d7 -
"movq 7*16(%%"REG_d"), %%mm4 \n\t"
"psubw %%mm7, %%mm0 \n\t"
 
"psubw %%mm4, %%mm6 \n\t"
"paddusw %%mm2, %%mm5 \n\t"
 
"paddusw %%mm4, %%mm6 \n\t"
"paddw %%mm3, %%mm1 \n\t"
 
"paddw %%mm2, %%mm5 \n\t"
"paddw %%mm4, %%mm6 \n\t"
 
"psubusw %%mm3, %%mm1 \n\t"
"psubusw %%mm2, %%mm5 \n\t"
 
"psubusw %%mm4, %%mm6 \n\t"
"movq %%mm1, %%mm4 \n\t"
 
"por %%mm5, %%mm4 \n\t"
"paddusw %%mm7, %%mm0 \n\t"
 
"por %%mm6, %%mm4 \n\t"
"paddw %%mm7, %%mm0 \n\t"
 
"packssdw %%mm4, %%mm4 \n\t"
"psubusw %%mm7, %%mm0 \n\t"
 
"movd %%mm4, %%"REG_a" \n\t"
"or %%"REG_a", %%"REG_a" \n\t"
"jnz 2f \n\t"
//movq [edi+"DCTSIZE_S"*3*2], mm1
//movq [edi+"DCTSIZE_S"*5*2], mm5
//movq [edi+"DCTSIZE_S"*1*2], mm0
//movq [edi+"DCTSIZE_S"*7*2], mm6
// t4 t5 - - - t6 t7 -
//--- t4 (mm0) may be <>0; mm1, mm5, mm6 == 0
//Typical numbers: nondc - 19%%, dc - 26%%, zero - 55%%. zero case alone isn't worthwhile
"movq 0*8+%3, %%mm4 \n\t"
"movq %%mm0, %%mm1 \n\t"
 
"pmulhw "MANGLE(MM_FIX_0_847759065)", %%mm0 \n\t" //tmp6
"movq %%mm1, %%mm2 \n\t"
 
"movq "DCTSIZE_S"*0*2(%%"REG_D"), %%mm5 \n\t"
"movq %%mm2, %%mm3 \n\t"
 
"pmulhw "MANGLE(MM_FIX_0_566454497)", %%mm1 \n\t" //tmp5
"paddw %%mm4, %%mm5 \n\t"
 
"movq 1*8+%3, %%mm6 \n\t"
//paddw mm3, MM_2
"psraw $2, %%mm3 \n\t" //tmp7
 
"pmulhw "MANGLE(MM_FIX_0_198912367)", %%mm2 \n\t" //-tmp4
"psubw %%mm3, %%mm4 \n\t"
 
"movq "DCTSIZE_S"*1*2(%%"REG_D"), %%mm7 \n\t"
"paddw %%mm3, %%mm5 \n\t"
 
"movq %%mm4, "DCTSIZE_S"*7*2(%%"REG_D") \n\t"
"paddw %%mm6, %%mm7 \n\t"
 
"movq 2*8+%3, %%mm3 \n\t"
"psubw %%mm0, %%mm6 \n\t"
 
"movq "DCTSIZE_S"*2*2(%%"REG_D"), %%mm4 \n\t"
"paddw %%mm0, %%mm7 \n\t"
 
"movq %%mm5, "DCTSIZE_S"*0*2(%%"REG_D") \n\t"
"paddw %%mm3, %%mm4 \n\t"
 
"movq %%mm6, "DCTSIZE_S"*6*2(%%"REG_D") \n\t"
"psubw %%mm1, %%mm3 \n\t"
 
"movq "DCTSIZE_S"*5*2(%%"REG_D"), %%mm5 \n\t"
"paddw %%mm1, %%mm4 \n\t"
 
"movq "DCTSIZE_S"*3*2(%%"REG_D"), %%mm6 \n\t"
"paddw %%mm3, %%mm5 \n\t"
 
"movq 3*8+%3, %%mm0 \n\t"
"add $8, %%"REG_S" \n\t"
 
"movq %%mm7, "DCTSIZE_S"*1*2(%%"REG_D") \n\t"
"paddw %%mm0, %%mm6 \n\t"
 
"movq %%mm4, "DCTSIZE_S"*2*2(%%"REG_D") \n\t"
"psubw %%mm2, %%mm0 \n\t"
 
"movq "DCTSIZE_S"*4*2(%%"REG_D"), %%mm7 \n\t"
"paddw %%mm2, %%mm6 \n\t"
 
"movq %%mm5, "DCTSIZE_S"*5*2(%%"REG_D") \n\t"
"paddw %%mm0, %%mm7 \n\t"
 
"movq %%mm6, "DCTSIZE_S"*3*2(%%"REG_D") \n\t"
 
"movq %%mm7, "DCTSIZE_S"*4*2(%%"REG_D") \n\t"
"add $8, %%"REG_D" \n\t"
"jmp 4f \n\t"
 
"2: \n\t"
//--- non DC2
//psraw mm1, 2 w/o it -> offset. thr1, thr1, thr1 (actually thr1, thr1, thr1-1)
//psraw mm5, 2
//psraw mm0, 2
//psraw mm6, 2
"movq %%mm5, %%mm3 \n\t"
"psubw %%mm1, %%mm5 \n\t"
 
"psllw $1, %%mm5 \n\t" //'z10
"paddw %%mm1, %%mm3 \n\t" //'z13
 
"movq %%mm0, %%mm2 \n\t"
"psubw %%mm6, %%mm0 \n\t"
 
"movq %%mm5, %%mm1 \n\t"
"psllw $1, %%mm0 \n\t" //'z12
 
"pmulhw "MANGLE(MM_FIX_2_613125930)", %%mm1 \n\t" //-
"paddw %%mm0, %%mm5 \n\t"
 
"pmulhw "MANGLE(MM_FIX_1_847759065)", %%mm5 \n\t" //'z5
"paddw %%mm6, %%mm2 \n\t" //'z11
 
"pmulhw "MANGLE(MM_FIX_1_082392200)", %%mm0 \n\t"
"movq %%mm2, %%mm7 \n\t"
 
//---
"movq 0*8+%3, %%mm4 \n\t"
"psubw %%mm3, %%mm2 \n\t"
 
"psllw $1, %%mm2 \n\t"
"paddw %%mm3, %%mm7 \n\t" //'t7
 
"pmulhw "MANGLE(MM_FIX_1_414213562)", %%mm2 \n\t" //'t11
"movq %%mm4, %%mm6 \n\t"
//paddw mm7, MM_2
"psraw $2, %%mm7 \n\t"
 
"paddw "DCTSIZE_S"*0*2(%%"REG_D"), %%mm4 \n\t"
"psubw %%mm7, %%mm6 \n\t"
 
"movq 1*8+%3, %%mm3 \n\t"
"paddw %%mm7, %%mm4 \n\t"
 
"movq %%mm6, "DCTSIZE_S"*7*2(%%"REG_D") \n\t"
"paddw %%mm5, %%mm1 \n\t" //'t12
 
"movq %%mm4, "DCTSIZE_S"*0*2(%%"REG_D") \n\t"
"psubw %%mm7, %%mm1 \n\t" //'t6
 
"movq 2*8+%3, %%mm7 \n\t"
"psubw %%mm5, %%mm0 \n\t" //'t10
 
"movq 3*8+%3, %%mm6 \n\t"
"movq %%mm3, %%mm5 \n\t"
 
"paddw "DCTSIZE_S"*1*2(%%"REG_D"), %%mm3 \n\t"
"psubw %%mm1, %%mm5 \n\t"
 
"psubw %%mm1, %%mm2 \n\t" //'t5
"paddw %%mm1, %%mm3 \n\t"
 
"movq %%mm5, "DCTSIZE_S"*6*2(%%"REG_D") \n\t"
"movq %%mm7, %%mm4 \n\t"
 
"paddw "DCTSIZE_S"*2*2(%%"REG_D"), %%mm7 \n\t"
"psubw %%mm2, %%mm4 \n\t"
 
"paddw "DCTSIZE_S"*5*2(%%"REG_D"), %%mm4 \n\t"
"paddw %%mm2, %%mm7 \n\t"
 
"movq %%mm3, "DCTSIZE_S"*1*2(%%"REG_D") \n\t"
"paddw %%mm2, %%mm0 \n\t" //'t4
 
// 't4 't6 't5 - - - - 't7
"movq %%mm7, "DCTSIZE_S"*2*2(%%"REG_D") \n\t"
"movq %%mm6, %%mm1 \n\t"
 
"paddw "DCTSIZE_S"*4*2(%%"REG_D"), %%mm6 \n\t"
"psubw %%mm0, %%mm1 \n\t"
 
"paddw "DCTSIZE_S"*3*2(%%"REG_D"), %%mm1 \n\t"
"paddw %%mm0, %%mm6 \n\t"
 
"movq %%mm4, "DCTSIZE_S"*5*2(%%"REG_D") \n\t"
"add $8, %%"REG_S" \n\t"
 
"movq %%mm6, "DCTSIZE_S"*4*2(%%"REG_D") \n\t"
 
"movq %%mm1, "DCTSIZE_S"*3*2(%%"REG_D") \n\t"
"add $8, %%"REG_D" \n\t"
 
"4: \n\t"
//=part 2 (the same)===========================================================
"movq "DCTSIZE_S"*0*2(%%"REG_S"), %%mm1 \n\t"
//
"movq "DCTSIZE_S"*3*2(%%"REG_S"), %%mm7 \n\t"
"movq %%mm1, %%mm0 \n\t"
 
"paddw "DCTSIZE_S"*7*2(%%"REG_S"), %%mm1 \n\t" //t0
"movq %%mm7, %%mm3 \n\t"
 
"paddw "DCTSIZE_S"*4*2(%%"REG_S"), %%mm7 \n\t" //t3
"movq %%mm1, %%mm5 \n\t"
 
"movq "DCTSIZE_S"*1*2(%%"REG_S"), %%mm6 \n\t"
"psubw %%mm7, %%mm1 \n\t" //t13
 
"movq "DCTSIZE_S"*2*2(%%"REG_S"), %%mm2 \n\t"
"movq %%mm6, %%mm4 \n\t"
 
"paddw "DCTSIZE_S"*6*2(%%"REG_S"), %%mm6 \n\t" //t1
"paddw %%mm7, %%mm5 \n\t" //t10
 
"paddw "DCTSIZE_S"*5*2(%%"REG_S"), %%mm2 \n\t" //t2
"movq %%mm6, %%mm7 \n\t"
 
"paddw %%mm2, %%mm6 \n\t" //t11
"psubw %%mm2, %%mm7 \n\t" //t12
 
"movq %%mm5, %%mm2 \n\t"
"paddw %%mm6, %%mm5 \n\t" //d0
// i0 t13 t12 i3 i1 d0 - d4
"psubw %%mm6, %%mm2 \n\t" //d4
"paddw %%mm1, %%mm7 \n\t"
 
"movq 1*8+4*16(%%"REG_d"), %%mm6 \n\t"
"psllw $2, %%mm7 \n\t"
 
"psubw 1*8+0*16(%%"REG_d"), %%mm5 \n\t"
"psubw %%mm6, %%mm2 \n\t"
 
"paddusw 1*8+0*16(%%"REG_d"), %%mm5 \n\t"
"paddusw %%mm6, %%mm2 \n\t"
 
"pmulhw "MANGLE(ff_MM_FIX_0_707106781)", %%mm7 \n\t"
//
"paddw 1*8+0*16(%%"REG_d"), %%mm5 \n\t"
"paddw %%mm6, %%mm2 \n\t"
 
"psubusw 1*8+0*16(%%"REG_d"), %%mm5 \n\t"
"psubusw %%mm6, %%mm2 \n\t"
 
//This func is totally compute-bound, operates at huge speed. So, DC shortcut
// at this place isn't worthwhile due to BTB miss penalty (checked on Pent. 3).
//However, typical numbers: nondc - 29%%, dc - 46%%, zero - 25%%. All <> 0 case is very rare.
"paddw "MANGLE(MM_2)", %%mm5 \n\t"
"movq %%mm2, %%mm6 \n\t"
 
"paddw %%mm5, %%mm2 \n\t"
"psubw %%mm6, %%mm5 \n\t"
 
"movq %%mm1, %%mm6 \n\t"
"paddw %%mm7, %%mm1 \n\t" //d2
 
"psubw 1*8+2*16(%%"REG_d"), %%mm1 \n\t"
"psubw %%mm7, %%mm6 \n\t" //d6
 
"movq 1*8+6*16(%%"REG_d"), %%mm7 \n\t"
"psraw $2, %%mm5 \n\t"
 
"paddusw 1*8+2*16(%%"REG_d"), %%mm1 \n\t"
"psubw %%mm7, %%mm6 \n\t"
// t7 d2 /t11 t4 t6 - d6 /t10
 
"paddw 1*8+2*16(%%"REG_d"), %%mm1 \n\t"
"paddusw %%mm7, %%mm6 \n\t"
 
"psubusw 1*8+2*16(%%"REG_d"), %%mm1 \n\t"
"paddw %%mm7, %%mm6 \n\t"
 
"psubw "DCTSIZE_S"*4*2(%%"REG_S"), %%mm3 \n\t"
"psubusw %%mm7, %%mm6 \n\t"
 
//movq [edi+"DCTSIZE_S"*2*2], mm1
//movq [edi+"DCTSIZE_S"*6*2], mm6
"movq %%mm1, %%mm7 \n\t"
"psraw $2, %%mm2 \n\t"
 
"psubw "DCTSIZE_S"*6*2(%%"REG_S"), %%mm4 \n\t"
"psubw %%mm6, %%mm1 \n\t"
 
"psubw "DCTSIZE_S"*7*2(%%"REG_S"), %%mm0 \n\t"
"paddw %%mm7, %%mm6 \n\t" //'t13
 
"psraw $2, %%mm6 \n\t" //paddw mm6, MM_2 !! ---
"movq %%mm2, %%mm7 \n\t"
 
"pmulhw "MANGLE(MM_FIX_1_414213562_A)", %%mm1 \n\t"
"paddw %%mm6, %%mm2 \n\t" //'t0
 
"movq %%mm2, 0*8+%3 \n\t" //!
"psubw %%mm6, %%mm7 \n\t" //'t3
 
"movq "DCTSIZE_S"*2*2(%%"REG_S"), %%mm2 \n\t"
"psubw %%mm6, %%mm1 \n\t" //'t12
 
"psubw "DCTSIZE_S"*5*2(%%"REG_S"), %%mm2 \n\t" //t5
"movq %%mm5, %%mm6 \n\t"
 
"movq %%mm7, 3*8+%3 \n\t"
"paddw %%mm2, %%mm3 \n\t" //t10
 
"paddw %%mm4, %%mm2 \n\t" //t11
"paddw %%mm0, %%mm4 \n\t" //t12
 
"movq %%mm3, %%mm7 \n\t"
"psubw %%mm4, %%mm3 \n\t"
 
"psllw $2, %%mm3 \n\t"
"psllw $2, %%mm7 \n\t" //opt for P6
 
"pmulhw "MANGLE(MM_FIX_0_382683433)", %%mm3 \n\t"
"psllw $2, %%mm4 \n\t"
 
"pmulhw "MANGLE(ff_MM_FIX_0_541196100)", %%mm7 \n\t"
"psllw $2, %%mm2 \n\t"
 
"pmulhw "MANGLE(MM_FIX_1_306562965)", %%mm4 \n\t"
"paddw %%mm1, %%mm5 \n\t" //'t1
 
"pmulhw "MANGLE(ff_MM_FIX_0_707106781)", %%mm2 \n\t"
"psubw %%mm1, %%mm6 \n\t" //'t2
// t7 't12 't11 t4 t6 - 't13 't10 ---
 
"paddw %%mm3, %%mm7 \n\t" //z2
 
"movq %%mm5, 1*8+%3 \n\t"
"paddw %%mm3, %%mm4 \n\t" //z4
 
"movq 1*8+3*16(%%"REG_d"), %%mm3 \n\t"
"movq %%mm0, %%mm1 \n\t"
 
"movq %%mm6, 2*8+%3 \n\t"
"psubw %%mm2, %%mm1 \n\t" //z13
 
//===
"paddw %%mm2, %%mm0 \n\t" //z11
"movq %%mm1, %%mm5 \n\t"
 
"movq 1*8+5*16(%%"REG_d"), %%mm2 \n\t"
"psubw %%mm7, %%mm1 \n\t" //d3
 
"paddw %%mm7, %%mm5 \n\t" //d5
"psubw %%mm3, %%mm1 \n\t"
 
"movq 1*8+1*16(%%"REG_d"), %%mm7 \n\t"
"psubw %%mm2, %%mm5 \n\t"
 
"movq %%mm0, %%mm6 \n\t"
"paddw %%mm4, %%mm0 \n\t" //d1
 
"paddusw %%mm3, %%mm1 \n\t"
"psubw %%mm4, %%mm6 \n\t" //d7
 
// d1 d3 - - - d5 d7 -
"movq 1*8+7*16(%%"REG_d"), %%mm4 \n\t"
"psubw %%mm7, %%mm0 \n\t"
 
"psubw %%mm4, %%mm6 \n\t"
"paddusw %%mm2, %%mm5 \n\t"
 
"paddusw %%mm4, %%mm6 \n\t"
"paddw %%mm3, %%mm1 \n\t"
 
"paddw %%mm2, %%mm5 \n\t"
"paddw %%mm4, %%mm6 \n\t"
 
"psubusw %%mm3, %%mm1 \n\t"
"psubusw %%mm2, %%mm5 \n\t"
 
"psubusw %%mm4, %%mm6 \n\t"
"movq %%mm1, %%mm4 \n\t"
 
"por %%mm5, %%mm4 \n\t"
"paddusw %%mm7, %%mm0 \n\t"
 
"por %%mm6, %%mm4 \n\t"
"paddw %%mm7, %%mm0 \n\t"
 
"packssdw %%mm4, %%mm4 \n\t"
"psubusw %%mm7, %%mm0 \n\t"
 
"movd %%mm4, %%"REG_a" \n\t"
"or %%"REG_a", %%"REG_a" \n\t"
"jnz 3f \n\t"
//movq [edi+"DCTSIZE_S"*3*2], mm1
//movq [edi+"DCTSIZE_S"*5*2], mm5
//movq [edi+"DCTSIZE_S"*1*2], mm0
//movq [edi+"DCTSIZE_S"*7*2], mm6
// t4 t5 - - - t6 t7 -
//--- t4 (mm0) may be <>0; mm1, mm5, mm6 == 0
//Typical numbers: nondc - 19%%, dc - 26%%, zero - 55%%. zero case alone isn't worthwhile
"movq 0*8+%3, %%mm4 \n\t"
"movq %%mm0, %%mm1 \n\t"
 
"pmulhw "MANGLE(MM_FIX_0_847759065)", %%mm0 \n\t" //tmp6
"movq %%mm1, %%mm2 \n\t"
 
"movq "DCTSIZE_S"*0*2(%%"REG_D"), %%mm5 \n\t"
"movq %%mm2, %%mm3 \n\t"
 
"pmulhw "MANGLE(MM_FIX_0_566454497)", %%mm1 \n\t" //tmp5
"paddw %%mm4, %%mm5 \n\t"
 
"movq 1*8+%3, %%mm6 \n\t"
//paddw mm3, MM_2
"psraw $2, %%mm3 \n\t" //tmp7
 
"pmulhw "MANGLE(MM_FIX_0_198912367)", %%mm2 \n\t" //-tmp4
"psubw %%mm3, %%mm4 \n\t"
 
"movq "DCTSIZE_S"*1*2(%%"REG_D"), %%mm7 \n\t"
"paddw %%mm3, %%mm5 \n\t"
 
"movq %%mm4, "DCTSIZE_S"*7*2(%%"REG_D") \n\t"
"paddw %%mm6, %%mm7 \n\t"
 
"movq 2*8+%3, %%mm3 \n\t"
"psubw %%mm0, %%mm6 \n\t"
 
"movq "DCTSIZE_S"*2*2(%%"REG_D"), %%mm4 \n\t"
"paddw %%mm0, %%mm7 \n\t"
 
"movq %%mm5, "DCTSIZE_S"*0*2(%%"REG_D") \n\t"
"paddw %%mm3, %%mm4 \n\t"
 
"movq %%mm6, "DCTSIZE_S"*6*2(%%"REG_D") \n\t"
"psubw %%mm1, %%mm3 \n\t"
 
"movq "DCTSIZE_S"*5*2(%%"REG_D"), %%mm5 \n\t"
"paddw %%mm1, %%mm4 \n\t"
 
"movq "DCTSIZE_S"*3*2(%%"REG_D"), %%mm6 \n\t"
"paddw %%mm3, %%mm5 \n\t"
 
"movq 3*8+%3, %%mm0 \n\t"
"add $24, %%"REG_S" \n\t"
 
"movq %%mm7, "DCTSIZE_S"*1*2(%%"REG_D") \n\t"
"paddw %%mm0, %%mm6 \n\t"
 
"movq %%mm4, "DCTSIZE_S"*2*2(%%"REG_D") \n\t"
"psubw %%mm2, %%mm0 \n\t"
 
"movq "DCTSIZE_S"*4*2(%%"REG_D"), %%mm7 \n\t"
"paddw %%mm2, %%mm6 \n\t"
 
"movq %%mm5, "DCTSIZE_S"*5*2(%%"REG_D") \n\t"
"paddw %%mm0, %%mm7 \n\t"
 
"movq %%mm6, "DCTSIZE_S"*3*2(%%"REG_D") \n\t"
 
"movq %%mm7, "DCTSIZE_S"*4*2(%%"REG_D") \n\t"
"add $24, %%"REG_D" \n\t"
"sub $2, %%"REG_c" \n\t"
"jnz 1b \n\t"
"jmp 5f \n\t"
 
"3: \n\t"
//--- non DC2
//psraw mm1, 2 w/o it -> offset. thr1, thr1, thr1 (actually thr1, thr1, thr1-1)
//psraw mm5, 2
//psraw mm0, 2
//psraw mm6, 2
"movq %%mm5, %%mm3 \n\t"
"psubw %%mm1, %%mm5 \n\t"
 
"psllw $1, %%mm5 \n\t" //'z10
"paddw %%mm1, %%mm3 \n\t" //'z13
 
"movq %%mm0, %%mm2 \n\t"
"psubw %%mm6, %%mm0 \n\t"
 
"movq %%mm5, %%mm1 \n\t"
"psllw $1, %%mm0 \n\t" //'z12
 
"pmulhw "MANGLE(MM_FIX_2_613125930)", %%mm1 \n\t" //-
"paddw %%mm0, %%mm5 \n\t"
 
"pmulhw "MANGLE(MM_FIX_1_847759065)", %%mm5 \n\t" //'z5
"paddw %%mm6, %%mm2 \n\t" //'z11
 
"pmulhw "MANGLE(MM_FIX_1_082392200)", %%mm0 \n\t"
"movq %%mm2, %%mm7 \n\t"
 
//---
"movq 0*8+%3, %%mm4 \n\t"
"psubw %%mm3, %%mm2 \n\t"
 
"psllw $1, %%mm2 \n\t"
"paddw %%mm3, %%mm7 \n\t" //'t7
 
"pmulhw "MANGLE(MM_FIX_1_414213562)", %%mm2 \n\t" //'t11
"movq %%mm4, %%mm6 \n\t"
//paddw mm7, MM_2
"psraw $2, %%mm7 \n\t"
 
"paddw "DCTSIZE_S"*0*2(%%"REG_D"), %%mm4 \n\t"
"psubw %%mm7, %%mm6 \n\t"
 
"movq 1*8+%3, %%mm3 \n\t"
"paddw %%mm7, %%mm4 \n\t"
 
"movq %%mm6, "DCTSIZE_S"*7*2(%%"REG_D") \n\t"
"paddw %%mm5, %%mm1 \n\t" //'t12
 
"movq %%mm4, "DCTSIZE_S"*0*2(%%"REG_D") \n\t"
"psubw %%mm7, %%mm1 \n\t" //'t6
 
"movq 2*8+%3, %%mm7 \n\t"
"psubw %%mm5, %%mm0 \n\t" //'t10
 
"movq 3*8+%3, %%mm6 \n\t"
"movq %%mm3, %%mm5 \n\t"
 
"paddw "DCTSIZE_S"*1*2(%%"REG_D"), %%mm3 \n\t"
"psubw %%mm1, %%mm5 \n\t"
 
"psubw %%mm1, %%mm2 \n\t" //'t5
"paddw %%mm1, %%mm3 \n\t"
 
"movq %%mm5, "DCTSIZE_S"*6*2(%%"REG_D") \n\t"
"movq %%mm7, %%mm4 \n\t"
 
"paddw "DCTSIZE_S"*2*2(%%"REG_D"), %%mm7 \n\t"
"psubw %%mm2, %%mm4 \n\t"
 
"paddw "DCTSIZE_S"*5*2(%%"REG_D"), %%mm4 \n\t"
"paddw %%mm2, %%mm7 \n\t"
 
"movq %%mm3, "DCTSIZE_S"*1*2(%%"REG_D") \n\t"
"paddw %%mm2, %%mm0 \n\t" //'t4
 
// 't4 't6 't5 - - - - 't7
"movq %%mm7, "DCTSIZE_S"*2*2(%%"REG_D") \n\t"
"movq %%mm6, %%mm1 \n\t"
 
"paddw "DCTSIZE_S"*4*2(%%"REG_D"), %%mm6 \n\t"
"psubw %%mm0, %%mm1 \n\t"
 
"paddw "DCTSIZE_S"*3*2(%%"REG_D"), %%mm1 \n\t"
"paddw %%mm0, %%mm6 \n\t"
 
"movq %%mm4, "DCTSIZE_S"*5*2(%%"REG_D") \n\t"
"add $24, %%"REG_S" \n\t"
 
"movq %%mm6, "DCTSIZE_S"*4*2(%%"REG_D") \n\t"
 
"movq %%mm1, "DCTSIZE_S"*3*2(%%"REG_D") \n\t"
"add $24, %%"REG_D" \n\t"
"sub $2, %%"REG_c" \n\t"
"jnz 1b \n\t"
"5: \n\t"
 
: "+S"(data), "+D"(output), "+c"(cnt), "=o"(temps)
: "d"(thr_adr)
: "%"REG_a
);
}
 
#endif // HAVE_MMX
 
#if !HAVE_MMX
 
static void row_idct_c(int16_t* workspace,
int16_t* output_adr, int output_stride, int cnt)
{
int_simd16_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
int_simd16_t tmp10, tmp11, tmp12, tmp13;
int_simd16_t z5, z10, z11, z12, z13;
int16_t* outptr;
int16_t* wsptr;
 
cnt*=4;
wsptr = workspace;
outptr = output_adr;
for (; cnt > 0; cnt--) {
// Even part
//Simd version reads 4x4 block and transposes it
tmp10 = ( wsptr[2] + wsptr[3]);
tmp11 = ( wsptr[2] - wsptr[3]);
 
tmp13 = ( wsptr[0] + wsptr[1]);
tmp12 = (MULTIPLY16H( wsptr[0] - wsptr[1], FIX_1_414213562_A)<<2) - tmp13;//this shift order to avoid overflow
 
tmp0 = tmp10 + tmp13; //->temps
tmp3 = tmp10 - tmp13; //->temps
tmp1 = tmp11 + tmp12;
tmp2 = tmp11 - tmp12;
 
// Odd part
//Also transpose, with previous:
// ---- ---- ||||
// ---- ---- idct ||||
// ---- ---- ---> ||||
// ---- ---- ||||
z13 = wsptr[4] + wsptr[5];
z10 = wsptr[4] - wsptr[5];
z11 = wsptr[6] + wsptr[7];
z12 = wsptr[6] - wsptr[7];
 
tmp7 = z11 + z13;
tmp11 = MULTIPLY16H(z11 - z13, FIX_1_414213562);
 
z5 = MULTIPLY16H(z10 + z12, FIX_1_847759065);
tmp10 = MULTIPLY16H(z12, FIX_1_082392200) - z5;
tmp12 = MULTIPLY16H(z10, FIX_2_613125930) + z5; // - FIX_
 
tmp6 = (tmp12<<3) - tmp7;
tmp5 = (tmp11<<3) - tmp6;
tmp4 = (tmp10<<3) + tmp5;
 
// Final output stage: descale and write column
outptr[0*output_stride]+= DESCALE(tmp0 + tmp7, 3);
outptr[1*output_stride]+= DESCALE(tmp1 + tmp6, 3);
outptr[2*output_stride]+= DESCALE(tmp2 + tmp5, 3);
outptr[3*output_stride]+= DESCALE(tmp3 - tmp4, 3);
outptr[4*output_stride]+= DESCALE(tmp3 + tmp4, 3);
outptr[5*output_stride]+= DESCALE(tmp2 - tmp5, 3);
outptr[6*output_stride]+= DESCALE(tmp1 - tmp6, 3); //no += ?
outptr[7*output_stride]+= DESCALE(tmp0 - tmp7, 3); //no += ?
outptr++;
 
wsptr += DCTSIZE; // advance pointer to next row
}
}
 
#else /* HAVE_MMX */
 
static void row_idct_mmx (int16_t* workspace,
int16_t* output_adr, int output_stride, int cnt)
{
uint64_t __attribute__((aligned(8))) temps[4];
__asm__ volatile(
"lea (%%"REG_a",%%"REG_a",2), %%"REG_d" \n\t"
"1: \n\t"
"movq "DCTSIZE_S"*0*2(%%"REG_S"), %%mm0 \n\t"
//
 
"movq "DCTSIZE_S"*1*2(%%"REG_S"), %%mm1 \n\t"
"movq %%mm0, %%mm4 \n\t"
 
"movq "DCTSIZE_S"*2*2(%%"REG_S"), %%mm2 \n\t"
"punpcklwd %%mm1, %%mm0 \n\t"
 
"movq "DCTSIZE_S"*3*2(%%"REG_S"), %%mm3 \n\t"
"punpckhwd %%mm1, %%mm4 \n\t"
 
//transpose 4x4
"movq %%mm2, %%mm7 \n\t"
"punpcklwd %%mm3, %%mm2 \n\t"
 
"movq %%mm0, %%mm6 \n\t"
"punpckldq %%mm2, %%mm0 \n\t" //0
 
"punpckhdq %%mm2, %%mm6 \n\t" //1
"movq %%mm0, %%mm5 \n\t"
 
"punpckhwd %%mm3, %%mm7 \n\t"
"psubw %%mm6, %%mm0 \n\t"
 
"pmulhw "MANGLE(MM_FIX_1_414213562_A)", %%mm0 \n\t"
"movq %%mm4, %%mm2 \n\t"
 
"punpckldq %%mm7, %%mm4 \n\t" //2
"paddw %%mm6, %%mm5 \n\t"
 
"punpckhdq %%mm7, %%mm2 \n\t" //3
"movq %%mm4, %%mm1 \n\t"
 
"psllw $2, %%mm0 \n\t"
"paddw %%mm2, %%mm4 \n\t" //t10
 
"movq "DCTSIZE_S"*0*2+"DCTSIZE_S"(%%"REG_S"), %%mm3 \n\t"
"psubw %%mm2, %%mm1 \n\t" //t11
 
"movq "DCTSIZE_S"*1*2+"DCTSIZE_S"(%%"REG_S"), %%mm2 \n\t"
"psubw %%mm5, %%mm0 \n\t"
 
"movq %%mm4, %%mm6 \n\t"
"paddw %%mm5, %%mm4 \n\t" //t0
 
"psubw %%mm5, %%mm6 \n\t" //t3
"movq %%mm1, %%mm7 \n\t"
 
"movq "DCTSIZE_S"*2*2+"DCTSIZE_S"(%%"REG_S"), %%mm5 \n\t"
"paddw %%mm0, %%mm1 \n\t" //t1
 
"movq %%mm4, 0*8+%3 \n\t" //t0
"movq %%mm3, %%mm4 \n\t"
 
"movq %%mm6, 1*8+%3 \n\t" //t3
"punpcklwd %%mm2, %%mm3 \n\t"
 
//transpose 4x4
"movq "DCTSIZE_S"*3*2+"DCTSIZE_S"(%%"REG_S"), %%mm6 \n\t"
"punpckhwd %%mm2, %%mm4 \n\t"
 
"movq %%mm5, %%mm2 \n\t"
"punpcklwd %%mm6, %%mm5 \n\t"
 
"psubw %%mm0, %%mm7 \n\t" //t2
"punpckhwd %%mm6, %%mm2 \n\t"
 
"movq %%mm3, %%mm0 \n\t"
"punpckldq %%mm5, %%mm3 \n\t" //4
 
"punpckhdq %%mm5, %%mm0 \n\t" //5
"movq %%mm4, %%mm5 \n\t"
 
//
"movq %%mm3, %%mm6 \n\t"
"punpckldq %%mm2, %%mm4 \n\t" //6
 
"psubw %%mm0, %%mm3 \n\t" //z10
"punpckhdq %%mm2, %%mm5 \n\t" //7
 
"paddw %%mm0, %%mm6 \n\t" //z13
"movq %%mm4, %%mm2 \n\t"
 
"movq %%mm3, %%mm0 \n\t"
"psubw %%mm5, %%mm4 \n\t" //z12
 
"pmulhw "MANGLE(MM_FIX_2_613125930)", %%mm0 \n\t" //-
"paddw %%mm4, %%mm3 \n\t"
 
"pmulhw "MANGLE(MM_FIX_1_847759065)", %%mm3 \n\t" //z5
"paddw %%mm5, %%mm2 \n\t" //z11 >
 
"pmulhw "MANGLE(MM_FIX_1_082392200)", %%mm4 \n\t"
"movq %%mm2, %%mm5 \n\t"
 
"psubw %%mm6, %%mm2 \n\t"
"paddw %%mm6, %%mm5 \n\t" //t7
 
"pmulhw "MANGLE(MM_FIX_1_414213562)", %%mm2 \n\t" //t11
"paddw %%mm3, %%mm0 \n\t" //t12
 
"psllw $3, %%mm0 \n\t"
"psubw %%mm3, %%mm4 \n\t" //t10
 
"movq 0*8+%3, %%mm6 \n\t"
"movq %%mm1, %%mm3 \n\t"
 
"psllw $3, %%mm4 \n\t"
"psubw %%mm5, %%mm0 \n\t" //t6
 
"psllw $3, %%mm2 \n\t"
"paddw %%mm0, %%mm1 \n\t" //d1
 
"psubw %%mm0, %%mm2 \n\t" //t5
"psubw %%mm0, %%mm3 \n\t" //d6
 
"paddw %%mm2, %%mm4 \n\t" //t4
"movq %%mm7, %%mm0 \n\t"
 
"paddw %%mm2, %%mm7 \n\t" //d2
"psubw %%mm2, %%mm0 \n\t" //d5
 
"movq "MANGLE(MM_DESCALE_RND)", %%mm2 \n\t" //4
"psubw %%mm5, %%mm6 \n\t" //d7
 
"paddw 0*8+%3, %%mm5 \n\t" //d0
"paddw %%mm2, %%mm1 \n\t"
 
"paddw %%mm2, %%mm5 \n\t"
"psraw $3, %%mm1 \n\t"
 
"paddw %%mm2, %%mm7 \n\t"
"psraw $3, %%mm5 \n\t"
 
"paddw (%%"REG_D"), %%mm5 \n\t"
"psraw $3, %%mm7 \n\t"
 
"paddw (%%"REG_D",%%"REG_a",), %%mm1 \n\t"
"paddw %%mm2, %%mm0 \n\t"
 
"paddw (%%"REG_D",%%"REG_a",2), %%mm7 \n\t"
"paddw %%mm2, %%mm3 \n\t"
 
"movq %%mm5, (%%"REG_D") \n\t"
"paddw %%mm2, %%mm6 \n\t"
 
"movq %%mm1, (%%"REG_D",%%"REG_a",) \n\t"
"psraw $3, %%mm0 \n\t"
 
"movq %%mm7, (%%"REG_D",%%"REG_a",2) \n\t"
"add %%"REG_d", %%"REG_D" \n\t" //3*ls
 
"movq 1*8+%3, %%mm5 \n\t" //t3
"psraw $3, %%mm3 \n\t"
 
"paddw (%%"REG_D",%%"REG_a",2), %%mm0 \n\t"
"psubw %%mm4, %%mm5 \n\t" //d3
 
"paddw (%%"REG_D",%%"REG_d",), %%mm3 \n\t"
"psraw $3, %%mm6 \n\t"
 
"paddw 1*8+%3, %%mm4 \n\t" //d4
"paddw %%mm2, %%mm5 \n\t"
 
"paddw (%%"REG_D",%%"REG_a",4), %%mm6 \n\t"
"paddw %%mm2, %%mm4 \n\t"
 
"movq %%mm0, (%%"REG_D",%%"REG_a",2) \n\t"
"psraw $3, %%mm5 \n\t"
 
"paddw (%%"REG_D"), %%mm5 \n\t"
"psraw $3, %%mm4 \n\t"
 
"paddw (%%"REG_D",%%"REG_a",), %%mm4 \n\t"
"add $"DCTSIZE_S"*2*4, %%"REG_S" \n\t" //4 rows
 
"movq %%mm3, (%%"REG_D",%%"REG_d",) \n\t"
"movq %%mm6, (%%"REG_D",%%"REG_a",4) \n\t"
"movq %%mm5, (%%"REG_D") \n\t"
"movq %%mm4, (%%"REG_D",%%"REG_a",) \n\t"
 
"sub %%"REG_d", %%"REG_D" \n\t"
"add $8, %%"REG_D" \n\t"
"dec %%"REG_c" \n\t"
"jnz 1b \n\t"
 
: "+S"(workspace), "+D"(output_adr), "+c"(cnt), "=o"(temps)
: "a"(output_stride*sizeof(short))
: "%"REG_d
);
}
 
#endif // HAVE_MMX
 
#if !HAVE_MMX
 
static void row_fdct_c(int16_t *data, const uint8_t *pixels, int line_size, int cnt)
{
int_simd16_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
int_simd16_t tmp10, tmp11, tmp12, tmp13;
int_simd16_t z1, z2, z3, z4, z5, z11, z13;
int16_t *dataptr;
 
cnt*=4;
// Pass 1: process rows.
 
dataptr = data;
for (; cnt > 0; cnt--) {
tmp0 = pixels[line_size*0] + pixels[line_size*7];
tmp7 = pixels[line_size*0] - pixels[line_size*7];
tmp1 = pixels[line_size*1] + pixels[line_size*6];
tmp6 = pixels[line_size*1] - pixels[line_size*6];
tmp2 = pixels[line_size*2] + pixels[line_size*5];
tmp5 = pixels[line_size*2] - pixels[line_size*5];
tmp3 = pixels[line_size*3] + pixels[line_size*4];
tmp4 = pixels[line_size*3] - pixels[line_size*4];
 
// Even part
 
tmp10 = tmp0 + tmp3;
tmp13 = tmp0 - tmp3;
tmp11 = tmp1 + tmp2;
tmp12 = tmp1 - tmp2;
//Even columns are written first, this leads to different order of columns
//in column_fidct(), but they are processed independently, so all ok.
//Later in the row_idct() columns readed at the same order.
dataptr[2] = tmp10 + tmp11;
dataptr[3] = tmp10 - tmp11;
 
z1 = MULTIPLY16H((tmp12 + tmp13)<<2, FIX_0_707106781);
dataptr[0] = tmp13 + z1;
dataptr[1] = tmp13 - z1;
 
// Odd part
 
tmp10 = (tmp4 + tmp5) <<2;
tmp11 = (tmp5 + tmp6) <<2;
tmp12 = (tmp6 + tmp7) <<2;
 
z5 = MULTIPLY16H(tmp10 - tmp12, FIX_0_382683433);
z2 = MULTIPLY16H(tmp10, FIX_0_541196100) + z5;
z4 = MULTIPLY16H(tmp12, FIX_1_306562965) + z5;
z3 = MULTIPLY16H(tmp11, FIX_0_707106781);
 
z11 = tmp7 + z3;
z13 = tmp7 - z3;
 
dataptr[4] = z13 + z2;
dataptr[5] = z13 - z2;
dataptr[6] = z11 + z4;
dataptr[7] = z11 - z4;
 
pixels++; // advance pointer to next column
dataptr += DCTSIZE;
}
}
 
#else /* HAVE_MMX */
 
static void row_fdct_mmx(int16_t *data, const uint8_t *pixels, int line_size, int cnt)
{
uint64_t __attribute__((aligned(8))) temps[4];
__asm__ volatile(
"lea (%%"REG_a",%%"REG_a",2), %%"REG_d" \n\t"
"6: \n\t"
"movd (%%"REG_S"), %%mm0 \n\t"
"pxor %%mm7, %%mm7 \n\t"
 
"movd (%%"REG_S",%%"REG_a",), %%mm1 \n\t"
"punpcklbw %%mm7, %%mm0 \n\t"
 
"movd (%%"REG_S",%%"REG_a",2), %%mm2 \n\t"
"punpcklbw %%mm7, %%mm1 \n\t"
 
"punpcklbw %%mm7, %%mm2 \n\t"
"add %%"REG_d", %%"REG_S" \n\t"
 
"movq %%mm0, %%mm5 \n\t"
//
 
"movd (%%"REG_S",%%"REG_a",4), %%mm3 \n\t" //7 ;prefetch!
"movq %%mm1, %%mm6 \n\t"
 
"movd (%%"REG_S",%%"REG_d",), %%mm4 \n\t" //6
"punpcklbw %%mm7, %%mm3 \n\t"
 
"psubw %%mm3, %%mm5 \n\t"
"punpcklbw %%mm7, %%mm4 \n\t"
 
"paddw %%mm3, %%mm0 \n\t"
"psubw %%mm4, %%mm6 \n\t"
 
"movd (%%"REG_S",%%"REG_a",2), %%mm3 \n\t" //5
"paddw %%mm4, %%mm1 \n\t"
 
"movq %%mm5, 0*8+%3 \n\t" //t7
"punpcklbw %%mm7, %%mm3 \n\t"
 
"movq %%mm6, 1*8+%3 \n\t" //t6
"movq %%mm2, %%mm4 \n\t"
 
"movd (%%"REG_S"), %%mm5 \n\t" //3
"paddw %%mm3, %%mm2 \n\t"
 
"movd (%%"REG_S",%%"REG_a",), %%mm6 \n\t" //4
"punpcklbw %%mm7, %%mm5 \n\t"
 
"psubw %%mm3, %%mm4 \n\t"
"punpcklbw %%mm7, %%mm6 \n\t"
 
"movq %%mm5, %%mm3 \n\t"
"paddw %%mm6, %%mm5 \n\t" //t3
 
"psubw %%mm6, %%mm3 \n\t" //t4 ; t0 t1 t2 t4 t5 t3 - -
"movq %%mm0, %%mm6 \n\t"
 
"movq %%mm1, %%mm7 \n\t"
"psubw %%mm5, %%mm0 \n\t" //t13
 
"psubw %%mm2, %%mm1 \n\t"
"paddw %%mm2, %%mm7 \n\t" //t11
 
"paddw %%mm0, %%mm1 \n\t"
"movq %%mm7, %%mm2 \n\t"
 
"psllw $2, %%mm1 \n\t"
"paddw %%mm5, %%mm6 \n\t" //t10
 
"pmulhw "MANGLE(ff_MM_FIX_0_707106781)", %%mm1 \n\t"
"paddw %%mm6, %%mm7 \n\t" //d2
 
"psubw %%mm2, %%mm6 \n\t" //d3
"movq %%mm0, %%mm5 \n\t"
 
//transpose 4x4
"movq %%mm7, %%mm2 \n\t"
"punpcklwd %%mm6, %%mm7 \n\t"
 
"paddw %%mm1, %%mm0 \n\t" //d0
"punpckhwd %%mm6, %%mm2 \n\t"
 
"psubw %%mm1, %%mm5 \n\t" //d1
"movq %%mm0, %%mm6 \n\t"
 
"movq 1*8+%3, %%mm1 \n\t"
"punpcklwd %%mm5, %%mm0 \n\t"
 
"punpckhwd %%mm5, %%mm6 \n\t"
"movq %%mm0, %%mm5 \n\t"
 
"punpckldq %%mm7, %%mm0 \n\t" //0
"paddw %%mm4, %%mm3 \n\t"
 
"punpckhdq %%mm7, %%mm5 \n\t" //1
"movq %%mm6, %%mm7 \n\t"
 
"movq %%mm0, "DCTSIZE_S"*0*2(%%"REG_D") \n\t"
"punpckldq %%mm2, %%mm6 \n\t" //2
 
"movq %%mm5, "DCTSIZE_S"*1*2(%%"REG_D") \n\t"
"punpckhdq %%mm2, %%mm7 \n\t" //3
 
"movq %%mm6, "DCTSIZE_S"*2*2(%%"REG_D") \n\t"
"paddw %%mm1, %%mm4 \n\t"
 
"movq %%mm7, "DCTSIZE_S"*3*2(%%"REG_D") \n\t"
"psllw $2, %%mm3 \n\t" //t10
 
"movq 0*8+%3, %%mm2 \n\t"
"psllw $2, %%mm4 \n\t" //t11
 
"pmulhw "MANGLE(ff_MM_FIX_0_707106781)", %%mm4 \n\t" //z3
"paddw %%mm2, %%mm1 \n\t"
 
"psllw $2, %%mm1 \n\t" //t12
"movq %%mm3, %%mm0 \n\t"
 
"pmulhw "MANGLE(ff_MM_FIX_0_541196100)", %%mm0 \n\t"
"psubw %%mm1, %%mm3 \n\t"
 
"pmulhw "MANGLE(MM_FIX_0_382683433)", %%mm3 \n\t" //z5
"movq %%mm2, %%mm5 \n\t"
 
"pmulhw "MANGLE(MM_FIX_1_306562965)", %%mm1 \n\t"
"psubw %%mm4, %%mm2 \n\t" //z13
 
"paddw %%mm4, %%mm5 \n\t" //z11
"movq %%mm2, %%mm6 \n\t"
 
"paddw %%mm3, %%mm0 \n\t" //z2
"movq %%mm5, %%mm7 \n\t"
 
"paddw %%mm0, %%mm2 \n\t" //d4
"psubw %%mm0, %%mm6 \n\t" //d5
 
"movq %%mm2, %%mm4 \n\t"
"paddw %%mm3, %%mm1 \n\t" //z4
 
//transpose 4x4
"punpcklwd %%mm6, %%mm2 \n\t"
"paddw %%mm1, %%mm5 \n\t" //d6
 
"punpckhwd %%mm6, %%mm4 \n\t"
"psubw %%mm1, %%mm7 \n\t" //d7
 
"movq %%mm5, %%mm6 \n\t"
"punpcklwd %%mm7, %%mm5 \n\t"
 
"punpckhwd %%mm7, %%mm6 \n\t"
"movq %%mm2, %%mm7 \n\t"
 
"punpckldq %%mm5, %%mm2 \n\t" //4
"sub %%"REG_d", %%"REG_S" \n\t"
 
"punpckhdq %%mm5, %%mm7 \n\t" //5
"movq %%mm4, %%mm5 \n\t"
 
"movq %%mm2, "DCTSIZE_S"*0*2+"DCTSIZE_S"(%%"REG_D") \n\t"
"punpckldq %%mm6, %%mm4 \n\t" //6
 
"movq %%mm7, "DCTSIZE_S"*1*2+"DCTSIZE_S"(%%"REG_D") \n\t"
"punpckhdq %%mm6, %%mm5 \n\t" //7
 
"movq %%mm4, "DCTSIZE_S"*2*2+"DCTSIZE_S"(%%"REG_D") \n\t"
"add $4, %%"REG_S" \n\t"
 
"movq %%mm5, "DCTSIZE_S"*3*2+"DCTSIZE_S"(%%"REG_D") \n\t"
"add $"DCTSIZE_S"*2*4, %%"REG_D" \n\t" //4 rows
"dec %%"REG_c" \n\t"
"jnz 6b \n\t"
 
: "+S"(pixels), "+D"(data), "+c"(cnt), "=o"(temps)
: "a"(line_size)
: "%"REG_d);
}
 
#endif // HAVE_MMX
/contrib/sdk/sources/ffmpeg/libavfilter/libmpcodecs/vf_ilpack.c
0,0 → 1,458
/*
* This file is part of MPlayer.
*
* MPlayer is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* MPlayer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with MPlayer; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <inttypes.h>
 
#include "config.h"
#include "mp_msg.h"
#include "cpudetect.h"
 
#include "img_format.h"
#include "mp_image.h"
#include "vf.h"
#include "libavutil/attributes.h"
#include "libavutil/x86/asm.h"
 
typedef void (pack_func_t)(unsigned char *dst, unsigned char *y,
unsigned char *u, unsigned char *v, int w, int us, int vs);
 
struct vf_priv_s {
int mode;
pack_func_t *pack[2];
};
 
static void pack_nn_C(unsigned char *dst, unsigned char *y,
unsigned char *u, unsigned char *v, int w,
int av_unused us, int av_unused vs)
{
int j;
for (j = w/2; j; j--) {
*dst++ = *y++;
*dst++ = *u++;
*dst++ = *y++;
*dst++ = *v++;
}
}
 
static void pack_li_0_C(unsigned char *dst, unsigned char *y,
unsigned char *u, unsigned char *v, int w, int us, int vs)
{
int j;
for (j = w/2; j; j--) {
*dst++ = *y++;
*dst++ = (u[us+us] + 7*u[0])>>3;
*dst++ = *y++;
*dst++ = (v[vs+vs] + 7*v[0])>>3;
u++; v++;
}
}
 
static void pack_li_1_C(unsigned char *dst, unsigned char *y,
unsigned char *u, unsigned char *v, int w, int us, int vs)
{
int j;
for (j = w/2; j; j--) {
*dst++ = *y++;
*dst++ = (3*u[us+us] + 5*u[0])>>3;
*dst++ = *y++;
*dst++ = (3*v[vs+vs] + 5*v[0])>>3;
u++; v++;
}
}
 
#if HAVE_MMX
static void pack_nn_MMX(unsigned char *dst, unsigned char *y,
unsigned char *u, unsigned char *v, int w,
int av_unused us, int av_unused vs)
{
__asm__ volatile (""
ASMALIGN(4)
"1: \n\t"
"movq (%0), %%mm1 \n\t"
"movq (%0), %%mm2 \n\t"
"movq (%1), %%mm4 \n\t"
"movq (%2), %%mm6 \n\t"
"punpcklbw %%mm6, %%mm4 \n\t"
"punpcklbw %%mm4, %%mm1 \n\t"
"punpckhbw %%mm4, %%mm2 \n\t"
 
"add $8, %0 \n\t"
"add $4, %1 \n\t"
"add $4, %2 \n\t"
"movq %%mm1, (%3) \n\t"
"movq %%mm2, 8(%3) \n\t"
"add $16, %3 \n\t"
"decl %4 \n\t"
"jnz 1b \n\t"
"emms \n\t"
:
: "r" (y), "r" (u), "r" (v), "r" (dst), "r" (w/8)
: "memory"
);
pack_nn_C(dst, y, u, v, (w&7), 0, 0);
}
 
#if HAVE_EBX_AVAILABLE
static void pack_li_0_MMX(unsigned char *dst, unsigned char *y,
unsigned char *u, unsigned char *v, int w, int us, int vs)
{
__asm__ volatile (""
"push %%"REG_BP" \n\t"
#if ARCH_X86_64
"mov %6, %%"REG_BP" \n\t"
#else
"movl 4(%%"REG_d"), %%"REG_BP" \n\t"
"movl (%%"REG_d"), %%"REG_d" \n\t"
#endif
"pxor %%mm0, %%mm0 \n\t"
 
ASMALIGN(4)
".Lli0: \n\t"
"movq (%%"REG_S"), %%mm1 \n\t"
"movq (%%"REG_S"), %%mm2 \n\t"
 
"movq (%%"REG_a",%%"REG_d",2), %%mm4 \n\t"
"movq (%%"REG_b",%%"REG_BP",2), %%mm6 \n\t"
"punpcklbw %%mm0, %%mm4 \n\t"
"punpcklbw %%mm0, %%mm6 \n\t"
"movq (%%"REG_a"), %%mm3 \n\t"
"movq (%%"REG_b"), %%mm5 \n\t"
"punpcklbw %%mm0, %%mm3 \n\t"
"punpcklbw %%mm0, %%mm5 \n\t"
"paddw %%mm3, %%mm4 \n\t"
"paddw %%mm5, %%mm6 \n\t"
"paddw %%mm3, %%mm4 \n\t"
"paddw %%mm5, %%mm6 \n\t"
"paddw %%mm3, %%mm4 \n\t"
"paddw %%mm5, %%mm6 \n\t"
"paddw %%mm3, %%mm4 \n\t"
"paddw %%mm5, %%mm6 \n\t"
"paddw %%mm3, %%mm4 \n\t"
"paddw %%mm5, %%mm6 \n\t"
"paddw %%mm3, %%mm4 \n\t"
"paddw %%mm5, %%mm6 \n\t"
"paddw %%mm3, %%mm4 \n\t"
"paddw %%mm5, %%mm6 \n\t"
"psrlw $3, %%mm4 \n\t"
"psrlw $3, %%mm6 \n\t"
"packuswb %%mm4, %%mm4 \n\t"
"packuswb %%mm6, %%mm6 \n\t"
"punpcklbw %%mm6, %%mm4 \n\t"
"punpcklbw %%mm4, %%mm1 \n\t"
"punpckhbw %%mm4, %%mm2 \n\t"
 
"movq %%mm1, (%%"REG_D") \n\t"
"movq %%mm2, 8(%%"REG_D") \n\t"
 
"movq 8(%%"REG_S"), %%mm1 \n\t"
"movq 8(%%"REG_S"), %%mm2 \n\t"
 
"movq (%%"REG_a",%%"REG_d",2), %%mm4 \n\t"
"movq (%%"REG_b",%%"REG_BP",2), %%mm6 \n\t"
"punpckhbw %%mm0, %%mm4 \n\t"
"punpckhbw %%mm0, %%mm6 \n\t"
"movq (%%"REG_a"), %%mm3 \n\t"
"movq (%%"REG_b"), %%mm5 \n\t"
"punpckhbw %%mm0, %%mm3 \n\t"
"punpckhbw %%mm0, %%mm5 \n\t"
"paddw %%mm3, %%mm4 \n\t"
"paddw %%mm5, %%mm6 \n\t"
"paddw %%mm3, %%mm4 \n\t"
"paddw %%mm5, %%mm6 \n\t"
"paddw %%mm3, %%mm4 \n\t"
"paddw %%mm5, %%mm6 \n\t"
"paddw %%mm3, %%mm4 \n\t"
"paddw %%mm5, %%mm6 \n\t"
"paddw %%mm3, %%mm4 \n\t"
"paddw %%mm5, %%mm6 \n\t"
"paddw %%mm3, %%mm4 \n\t"
"paddw %%mm5, %%mm6 \n\t"
"paddw %%mm3, %%mm4 \n\t"
"paddw %%mm5, %%mm6 \n\t"
"psrlw $3, %%mm4 \n\t"
"psrlw $3, %%mm6 \n\t"
"packuswb %%mm4, %%mm4 \n\t"
"packuswb %%mm6, %%mm6 \n\t"
"punpcklbw %%mm6, %%mm4 \n\t"
"punpcklbw %%mm4, %%mm1 \n\t"
"punpckhbw %%mm4, %%mm2 \n\t"
 
"add $16, %%"REG_S" \n\t"
"add $8, %%"REG_a" \n\t"
"add $8, %%"REG_b" \n\t"
 
"movq %%mm1, 16(%%"REG_D") \n\t"
"movq %%mm2, 24(%%"REG_D") \n\t"
"add $32, %%"REG_D" \n\t"
 
"decl %%ecx \n\t"
"jnz .Lli0 \n\t"
"emms \n\t"
"pop %%"REG_BP" \n\t"
:
: "S" (y), "D" (dst), "a" (u), "b" (v), "c" (w/16),
#if ARCH_X86_64
"d" ((x86_reg)us), "r" ((x86_reg)vs)
#else
"d" (&us)
#endif
: "memory"
);
pack_li_0_C(dst, y, u, v, (w&15), us, vs);
}
 
static void pack_li_1_MMX(unsigned char *dst, unsigned char *y,
unsigned char *u, unsigned char *v, int w, int us, int vs)
{
__asm__ volatile (""
"push %%"REG_BP" \n\t"
#if ARCH_X86_64
"mov %6, %%"REG_BP" \n\t"
#else
"movl 4(%%"REG_d"), %%"REG_BP" \n\t"
"movl (%%"REG_d"), %%"REG_d" \n\t"
#endif
"pxor %%mm0, %%mm0 \n\t"
 
ASMALIGN(4)
".Lli1: \n\t"
"movq (%%"REG_S"), %%mm1 \n\t"
"movq (%%"REG_S"), %%mm2 \n\t"
 
"movq (%%"REG_a",%%"REG_d",2), %%mm4 \n\t"
"movq (%%"REG_b",%%"REG_BP",2), %%mm6 \n\t"
"punpcklbw %%mm0, %%mm4 \n\t"
"punpcklbw %%mm0, %%mm6 \n\t"
"movq (%%"REG_a"), %%mm3 \n\t"
"movq (%%"REG_b"), %%mm5 \n\t"
"punpcklbw %%mm0, %%mm3 \n\t"
"punpcklbw %%mm0, %%mm5 \n\t"
"movq %%mm4, %%mm7 \n\t"
"paddw %%mm4, %%mm4 \n\t"
"paddw %%mm7, %%mm4 \n\t"
"movq %%mm6, %%mm7 \n\t"
"paddw %%mm6, %%mm6 \n\t"
"paddw %%mm7, %%mm6 \n\t"
"paddw %%mm3, %%mm4 \n\t"
"paddw %%mm5, %%mm6 \n\t"
"paddw %%mm3, %%mm4 \n\t"
"paddw %%mm5, %%mm6 \n\t"
"paddw %%mm3, %%mm4 \n\t"
"paddw %%mm5, %%mm6 \n\t"
"paddw %%mm3, %%mm4 \n\t"
"paddw %%mm5, %%mm6 \n\t"
"paddw %%mm3, %%mm4 \n\t"
"paddw %%mm5, %%mm6 \n\t"
"psrlw $3, %%mm4 \n\t"
"psrlw $3, %%mm6 \n\t"
"packuswb %%mm4, %%mm4 \n\t"
"packuswb %%mm6, %%mm6 \n\t"
"punpcklbw %%mm6, %%mm4 \n\t"
"punpcklbw %%mm4, %%mm1 \n\t"
"punpckhbw %%mm4, %%mm2 \n\t"
 
"movq %%mm1, (%%"REG_D") \n\t"
"movq %%mm2, 8(%%"REG_D") \n\t"
 
"movq 8(%%"REG_S"), %%mm1 \n\t"
"movq 8(%%"REG_S"), %%mm2 \n\t"
 
"movq (%%"REG_a",%%"REG_d",2), %%mm4 \n\t"
"movq (%%"REG_b",%%"REG_BP",2), %%mm6 \n\t"
"punpckhbw %%mm0, %%mm4 \n\t"
"punpckhbw %%mm0, %%mm6 \n\t"
"movq (%%"REG_a"), %%mm3 \n\t"
"movq (%%"REG_b"), %%mm5 \n\t"
"punpckhbw %%mm0, %%mm3 \n\t"
"punpckhbw %%mm0, %%mm5 \n\t"
"movq %%mm4, %%mm7 \n\t"
"paddw %%mm4, %%mm4 \n\t"
"paddw %%mm7, %%mm4 \n\t"
"movq %%mm6, %%mm7 \n\t"
"paddw %%mm6, %%mm6 \n\t"
"paddw %%mm7, %%mm6 \n\t"
"paddw %%mm3, %%mm4 \n\t"
"paddw %%mm5, %%mm6 \n\t"
"paddw %%mm3, %%mm4 \n\t"
"paddw %%mm5, %%mm6 \n\t"
"paddw %%mm3, %%mm4 \n\t"
"paddw %%mm5, %%mm6 \n\t"
"paddw %%mm3, %%mm4 \n\t"
"paddw %%mm5, %%mm6 \n\t"
"paddw %%mm3, %%mm4 \n\t"
"paddw %%mm5, %%mm6 \n\t"
"psrlw $3, %%mm4 \n\t"
"psrlw $3, %%mm6 \n\t"
"packuswb %%mm4, %%mm4 \n\t"
"packuswb %%mm6, %%mm6 \n\t"
"punpcklbw %%mm6, %%mm4 \n\t"
"punpcklbw %%mm4, %%mm1 \n\t"
"punpckhbw %%mm4, %%mm2 \n\t"
 
"add $16, %%"REG_S" \n\t"
"add $8, %%"REG_a" \n\t"
"add $8, %%"REG_b" \n\t"
 
"movq %%mm1, 16(%%"REG_D") \n\t"
"movq %%mm2, 24(%%"REG_D") \n\t"
"add $32, %%"REG_D" \n\t"
 
"decl %%ecx \n\t"
"jnz .Lli1 \n\t"
"emms \n\t"
"pop %%"REG_BP" \n\t"
:
: "S" (y), "D" (dst), "a" (u), "b" (v), "c" (w/16),
#if ARCH_X86_64
"d" ((x86_reg)us), "r" ((x86_reg)vs)
#else
"d" (&us)
#endif
: "memory"
);
pack_li_1_C(dst, y, u, v, (w&15), us, vs);
}
#endif /* HAVE_EBX_AVAILABLE */
#endif
 
static pack_func_t *pack_nn;
static pack_func_t *pack_li_0;
static pack_func_t *pack_li_1;
 
static void ilpack(unsigned char *dst, unsigned char *src[3],
int dststride, int srcstride[3], int w, int h, pack_func_t *pack[2])
{
int i;
unsigned char *y, *u, *v;
int ys = srcstride[0], us = srcstride[1], vs = srcstride[2];
int a, b;
 
y = src[0];
u = src[1];
v = src[2];
 
pack_nn(dst, y, u, v, w, 0, 0);
y += ys; dst += dststride;
pack_nn(dst, y, u+us, v+vs, w, 0, 0);
y += ys; dst += dststride;
for (i=2; i<h-2; i++) {
a = (i&2) ? 1 : -1;
b = (i&1) ^ ((i&2)>>1);
pack[b](dst, y, u, v, w, us*a, vs*a);
y += ys;
if ((i&3) == 1) {
u -= us;
v -= vs;
} else {
u += us;
v += vs;
}
dst += dststride;
}
pack_nn(dst, y, u, v, w, 0, 0);
y += ys; dst += dststride; u += us; v += vs;
pack_nn(dst, y, u, v, w, 0, 0);
}
 
 
static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts)
{
mp_image_t *dmpi;
 
// hope we'll get DR buffer:
dmpi=ff_vf_get_image(vf->next, IMGFMT_YUY2,
MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE,
mpi->w, mpi->h);
 
ilpack(dmpi->planes[0], mpi->planes, dmpi->stride[0], mpi->stride, mpi->w, mpi->h, vf->priv->pack);
 
return ff_vf_next_put_image(vf,dmpi, pts);
}
 
static int config(struct vf_instance *vf,
int width, int height, int d_width, int d_height,
unsigned int flags, unsigned int outfmt)
{
/* FIXME - also support UYVY output? */
return ff_vf_next_config(vf, width, height, d_width, d_height, flags, IMGFMT_YUY2);
}
 
 
static int query_format(struct vf_instance *vf, unsigned int fmt)
{
/* FIXME - really any YUV 4:2:0 input format should work */
switch (fmt) {
case IMGFMT_YV12:
case IMGFMT_IYUV:
case IMGFMT_I420:
return ff_vf_next_query_format(vf,IMGFMT_YUY2);
}
return 0;
}
 
static int vf_open(vf_instance_t *vf, char *args)
{
vf->config=config;
vf->query_format=query_format;
vf->put_image=put_image;
vf->priv = calloc(1, sizeof(struct vf_priv_s));
vf->priv->mode = 1;
if (args) sscanf(args, "%d", &vf->priv->mode);
 
pack_nn = pack_nn_C;
pack_li_0 = pack_li_0_C;
pack_li_1 = pack_li_1_C;
#if HAVE_MMX
if(ff_gCpuCaps.hasMMX) {
pack_nn = pack_nn_MMX;
#if HAVE_EBX_AVAILABLE
pack_li_0 = pack_li_0_MMX;
pack_li_1 = pack_li_1_MMX;
#endif
}
#endif
 
switch(vf->priv->mode) {
case 0:
vf->priv->pack[0] = vf->priv->pack[1] = pack_nn;
break;
default:
ff_mp_msg(MSGT_VFILTER, MSGL_WARN,
"ilpack: unknown mode %d (fallback to linear)\n",
vf->priv->mode);
/* Fallthrough */
case 1:
vf->priv->pack[0] = pack_li_0;
vf->priv->pack[1] = pack_li_1;
break;
}
 
return 1;
}
 
const vf_info_t ff_vf_info_ilpack = {
"4:2:0 planar -> 4:2:2 packed reinterlacer",
"ilpack",
"Richard Felker",
"",
vf_open,
NULL
};
/contrib/sdk/sources/ffmpeg/libavfilter/libmpcodecs/vf_pp7.c
0,0 → 1,491
/*
* Copyright (C) 2005 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of MPlayer.
*
* MPlayer is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* MPlayer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with MPlayer; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
 
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <inttypes.h>
#include <math.h>
 
#include "config.h"
 
#include "mp_msg.h"
#include "cpudetect.h"
 
#if HAVE_MALLOC_H
#include <malloc.h>
#endif
 
#include "libavutil/mem.h"
 
#include "img_format.h"
#include "mp_image.h"
#include "vf.h"
#include "libvo/fastmemcpy.h"
 
#define XMIN(a,b) ((a) < (b) ? (a) : (b))
#define XMAX(a,b) ((a) > (b) ? (a) : (b))
 
//===========================================================================//
static const uint8_t __attribute__((aligned(8))) dither[8][8]={
{ 0, 48, 12, 60, 3, 51, 15, 63, },
{ 32, 16, 44, 28, 35, 19, 47, 31, },
{ 8, 56, 4, 52, 11, 59, 7, 55, },
{ 40, 24, 36, 20, 43, 27, 39, 23, },
{ 2, 50, 14, 62, 1, 49, 13, 61, },
{ 34, 18, 46, 30, 33, 17, 45, 29, },
{ 10, 58, 6, 54, 9, 57, 5, 53, },
{ 42, 26, 38, 22, 41, 25, 37, 21, },
};
 
struct vf_priv_s {
int qp;
int mode;
int mpeg2;
int temp_stride;
uint8_t *src;
};
#if 0
static inline void dct7_c(int16_t *dst, int s0, int s1, int s2, int s3, int step){
int s, d;
int dst2[64];
//#define S0 (1024/0.37796447300922719759)
#define C0 ((int)(1024*0.37796447300922719759+0.5)) //sqrt(1/7)
#define C1 ((int)(1024*0.53452248382484879308/6+0.5)) //sqrt(2/7)/6
 
#define C2 ((int)(1024*0.45221175985034745004/2+0.5))
#define C3 ((int)(1024*0.36264567479870879474/2+0.5))
 
//0.1962505182412941918 0.0149276808419397944-0.2111781990832339584
#define C4 ((int)(1024*0.1962505182412941918+0.5))
#define C5 ((int)(1024*0.0149276808419397944+0.5))
//#define C6 ((int)(1024*0.2111781990832339584+0.5))
#if 0
s= s0 + s1 + s2;
dst[0*step] = ((s + s3)*C0 + 512) >> 10;
s= (s - 6*s3)*C1 + 512;
d= (s0-s2)*C4 + (s1-s2)*C5;
dst[1*step] = (s + 2*d)>>10;
s -= d;
d= (s1-s0)*C2 + (s1-s2)*C3;
dst[2*step] = (s + d)>>10;
dst[3*step] = (s - d)>>10;
#elif 1
s = s3+s3;
s3= s-s0;
s0= s+s0;
s = s2+s1;
s2= s2-s1;
dst[0*step]= s0 + s;
dst[2*step]= s0 - s;
dst[1*step]= 2*s3 + s2;
dst[3*step]= s3 - 2*s2;
#else
int i,j,n=7;
for(i=0; i<7; i+=2){
dst2[i*step/2]= 0;
for(j=0; j<4; j++)
dst2[i*step/2] += src[j*step] * cos(i*M_PI/n*(j+0.5)) * sqrt((i?2.0:1.0)/n);
if(fabs(dst2[i*step/2] - dst[i*step/2]) > 20)
printf("%d %d %d (%d %d %d %d) -> (%d %d %d %d)\n", i,dst2[i*step/2], dst[i*step/2],src[0*step], src[1*step], src[2*step], src[3*step], dst[0*step], dst[1*step],dst[2*step],dst[3*step]);
}
#endif
}
#endif
 
static inline void dctA_c(int16_t *dst, uint8_t *src, int stride){
int i;
 
for(i=0; i<4; i++){
int s0= src[0*stride] + src[6*stride];
int s1= src[1*stride] + src[5*stride];
int s2= src[2*stride] + src[4*stride];
int s3= src[3*stride];
int s= s3+s3;
s3= s-s0;
s0= s+s0;
s = s2+s1;
s2= s2-s1;
dst[0]= s0 + s;
dst[2]= s0 - s;
dst[1]= 2*s3 + s2;
dst[3]= s3 - 2*s2;
src++;
dst+=4;
}
}
 
static void dctB_c(int16_t *dst, int16_t *src){
int i;
 
for(i=0; i<4; i++){
int s0= src[0*4] + src[6*4];
int s1= src[1*4] + src[5*4];
int s2= src[2*4] + src[4*4];
int s3= src[3*4];
int s= s3+s3;
s3= s-s0;
s0= s+s0;
s = s2+s1;
s2= s2-s1;
dst[0*4]= s0 + s;
dst[2*4]= s0 - s;
dst[1*4]= 2*s3 + s2;
dst[3*4]= s3 - 2*s2;
src++;
dst++;
}
}
 
#if HAVE_MMX
static void dctB_mmx(int16_t *dst, int16_t *src){
__asm__ volatile (
"movq (%0), %%mm0 \n\t"
"movq 1*4*2(%0), %%mm1 \n\t"
"paddw 6*4*2(%0), %%mm0 \n\t"
"paddw 5*4*2(%0), %%mm1 \n\t"
"movq 2*4*2(%0), %%mm2 \n\t"
"movq 3*4*2(%0), %%mm3 \n\t"
"paddw 4*4*2(%0), %%mm2 \n\t"
"paddw %%mm3, %%mm3 \n\t" //s
"movq %%mm3, %%mm4 \n\t" //s
"psubw %%mm0, %%mm3 \n\t" //s-s0
"paddw %%mm0, %%mm4 \n\t" //s+s0
"movq %%mm2, %%mm0 \n\t" //s2
"psubw %%mm1, %%mm2 \n\t" //s2-s1
"paddw %%mm1, %%mm0 \n\t" //s2+s1
"movq %%mm4, %%mm1 \n\t" //s0'
"psubw %%mm0, %%mm4 \n\t" //s0'-s'
"paddw %%mm0, %%mm1 \n\t" //s0'+s'
"movq %%mm3, %%mm0 \n\t" //s3'
"psubw %%mm2, %%mm3 \n\t"
"psubw %%mm2, %%mm3 \n\t"
"paddw %%mm0, %%mm2 \n\t"
"paddw %%mm0, %%mm2 \n\t"
"movq %%mm1, (%1) \n\t"
"movq %%mm4, 2*4*2(%1) \n\t"
"movq %%mm2, 1*4*2(%1) \n\t"
"movq %%mm3, 3*4*2(%1) \n\t"
:: "r" (src), "r"(dst)
);
}
#endif
 
static void (*dctB)(int16_t *dst, int16_t *src)= dctB_c;
 
#define N0 4
#define N1 5
#define N2 10
#define SN0 2
#define SN1 2.2360679775
#define SN2 3.16227766017
#define N (1<<16)
 
static const int factor[16]={
N/(N0*N0), N/(N0*N1), N/(N0*N0),N/(N0*N2),
N/(N1*N0), N/(N1*N1), N/(N1*N0),N/(N1*N2),
N/(N0*N0), N/(N0*N1), N/(N0*N0),N/(N0*N2),
N/(N2*N0), N/(N2*N1), N/(N2*N0),N/(N2*N2),
};
 
static const int thres[16]={
N/(SN0*SN0), N/(SN0*SN2), N/(SN0*SN0),N/(SN0*SN2),
N/(SN2*SN0), N/(SN2*SN2), N/(SN2*SN0),N/(SN2*SN2),
N/(SN0*SN0), N/(SN0*SN2), N/(SN0*SN0),N/(SN0*SN2),
N/(SN2*SN0), N/(SN2*SN2), N/(SN2*SN0),N/(SN2*SN2),
};
 
static int thres2[99][16];
 
static void init_thres2(void){
int qp, i;
int bias= 0; //FIXME
 
for(qp=0; qp<99; qp++){
for(i=0; i<16; i++){
thres2[qp][i]= ((i&1)?SN2:SN0) * ((i&4)?SN2:SN0) * XMAX(1,qp) * (1<<2) - 1 - bias;
}
}
}
 
static int hardthresh_c(int16_t *src, int qp){
int i;
int a;
 
a= src[0] * factor[0];
for(i=1; i<16; i++){
unsigned int threshold1= thres2[qp][i];
unsigned int threshold2= (threshold1<<1);
int level= src[i];
if(((unsigned)(level+threshold1))>threshold2){
a += level * factor[i];
}
}
return (a + (1<<11))>>12;
}
 
static int mediumthresh_c(int16_t *src, int qp){
int i;
int a;
 
a= src[0] * factor[0];
for(i=1; i<16; i++){
unsigned int threshold1= thres2[qp][i];
unsigned int threshold2= (threshold1<<1);
int level= src[i];
if(((unsigned)(level+threshold1))>threshold2){
if(((unsigned)(level+2*threshold1))>2*threshold2){
a += level * factor[i];
}else{
if(level>0) a+= 2*(level - (int)threshold1)*factor[i];
else a+= 2*(level + (int)threshold1)*factor[i];
}
}
}
return (a + (1<<11))>>12;
}
 
static int softthresh_c(int16_t *src, int qp){
int i;
int a;
 
a= src[0] * factor[0];
for(i=1; i<16; i++){
unsigned int threshold1= thres2[qp][i];
unsigned int threshold2= (threshold1<<1);
int level= src[i];
if(((unsigned)(level+threshold1))>threshold2){
if(level>0) a+= (level - (int)threshold1)*factor[i];
else a+= (level + (int)threshold1)*factor[i];
}
}
return (a + (1<<11))>>12;
}
 
static int (*requantize)(int16_t *src, int qp)= hardthresh_c;
 
static void filter(struct vf_priv_s *p, uint8_t *dst, uint8_t *src, int dst_stride, int src_stride, int width, int height, uint8_t *qp_store, int qp_stride, int is_luma){
int x, y;
const int stride= is_luma ? p->temp_stride : ((width+16+15)&(~15));
uint8_t *p_src= p->src + 8*stride;
int16_t *block= (int16_t *)p->src;
int16_t *temp= (int16_t *)(p->src + 32);
 
if (!src || !dst) return; // HACK avoid crash for Y8 colourspace
for(y=0; y<height; y++){
int index= 8 + 8*stride + y*stride;
fast_memcpy(p_src + index, src + y*src_stride, width);
for(x=0; x<8; x++){
p_src[index - x - 1]= p_src[index + x ];
p_src[index + width + x ]= p_src[index + width - x - 1];
}
}
for(y=0; y<8; y++){
fast_memcpy(p_src + ( 7-y)*stride, p_src + ( y+8)*stride, stride);
fast_memcpy(p_src + (height+8+y)*stride, p_src + (height-y+7)*stride, stride);
}
//FIXME (try edge emu)
 
for(y=0; y<height; y++){
for(x=-8; x<0; x+=4){
const int index= x + y*stride + (8-3)*(1+stride) + 8; //FIXME silly offset
uint8_t *src = p_src + index;
int16_t *tp= temp+4*x;
 
dctA_c(tp+4*8, src, stride);
}
for(x=0; x<width; ){
const int qps= 3 + is_luma;
int qp;
int end= XMIN(x+8, width);
 
if(p->qp)
qp= p->qp;
else{
qp= qp_store[ (XMIN(x, width-1)>>qps) + (XMIN(y, height-1)>>qps) * qp_stride];
qp=norm_qscale(qp, p->mpeg2);
}
for(; x<end; x++){
const int index= x + y*stride + (8-3)*(1+stride) + 8; //FIXME silly offset
uint8_t *src = p_src + index;
int16_t *tp= temp+4*x;
int v;
 
if((x&3)==0)
dctA_c(tp+4*8, src, stride);
 
dctB(block, tp);
 
v= requantize(block, qp);
v= (v + dither[y&7][x&7])>>6;
if((unsigned)v > 255)
v= (-v)>>31;
dst[x + y*dst_stride]= v;
}
}
}
}
 
static int config(struct vf_instance *vf,
int width, int height, int d_width, int d_height,
unsigned int flags, unsigned int outfmt){
int h= (height+16+15)&(~15);
 
vf->priv->temp_stride= (width+16+15)&(~15);
vf->priv->src = av_malloc(vf->priv->temp_stride*(h+8)*sizeof(uint8_t));
 
return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt);
}
 
static void get_image(struct vf_instance *vf, mp_image_t *mpi){
if(mpi->flags&MP_IMGFLAG_PRESERVE) return; // don't change
// ok, we can do pp in-place (or pp disabled):
vf->dmpi=ff_vf_get_image(vf->next,mpi->imgfmt,
mpi->type, mpi->flags | MP_IMGFLAG_READABLE, mpi->width, mpi->height);
mpi->planes[0]=vf->dmpi->planes[0];
mpi->stride[0]=vf->dmpi->stride[0];
mpi->width=vf->dmpi->width;
if(mpi->flags&MP_IMGFLAG_PLANAR){
mpi->planes[1]=vf->dmpi->planes[1];
mpi->planes[2]=vf->dmpi->planes[2];
mpi->stride[1]=vf->dmpi->stride[1];
mpi->stride[2]=vf->dmpi->stride[2];
}
mpi->flags|=MP_IMGFLAG_DIRECT;
}
 
static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){
mp_image_t *dmpi;
 
if(mpi->flags&MP_IMGFLAG_DIRECT){
dmpi=vf->dmpi;
}else{
// no DR, so get a new image! hope we'll get DR buffer:
dmpi=ff_vf_get_image(vf->next,mpi->imgfmt,
MP_IMGTYPE_TEMP,
MP_IMGFLAG_ACCEPT_STRIDE|MP_IMGFLAG_PREFER_ALIGNED_STRIDE,
mpi->width,mpi->height);
ff_vf_clone_mpi_attributes(dmpi, mpi);
}
 
vf->priv->mpeg2= mpi->qscale_type;
if(mpi->qscale || vf->priv->qp){
filter(vf->priv, dmpi->planes[0], mpi->planes[0], dmpi->stride[0], mpi->stride[0], mpi->w, mpi->h, mpi->qscale, mpi->qstride, 1);
filter(vf->priv, dmpi->planes[1], mpi->planes[1], dmpi->stride[1], mpi->stride[1], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, mpi->qscale, mpi->qstride, 0);
filter(vf->priv, dmpi->planes[2], mpi->planes[2], dmpi->stride[2], mpi->stride[2], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, mpi->qscale, mpi->qstride, 0);
}else{
memcpy_pic(dmpi->planes[0], mpi->planes[0], mpi->w, mpi->h, dmpi->stride[0], mpi->stride[0]);
memcpy_pic(dmpi->planes[1], mpi->planes[1], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, dmpi->stride[1], mpi->stride[1]);
memcpy_pic(dmpi->planes[2], mpi->planes[2], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, dmpi->stride[2], mpi->stride[2]);
}
 
#if HAVE_MMX
if(ff_gCpuCaps.hasMMX) __asm__ volatile ("emms\n\t");
#endif
#if HAVE_MMX2
if(ff_gCpuCaps.hasMMX2) __asm__ volatile ("sfence\n\t");
#endif
 
return ff_vf_next_put_image(vf,dmpi, pts);
}
 
static void uninit(struct vf_instance *vf){
if(!vf->priv) return;
 
av_free(vf->priv->src);
vf->priv->src= NULL;
 
free(vf->priv);
vf->priv=NULL;
}
 
//===========================================================================//
static int query_format(struct vf_instance *vf, unsigned int fmt){
switch(fmt){
case IMGFMT_YVU9:
case IMGFMT_IF09:
case IMGFMT_YV12:
case IMGFMT_I420:
case IMGFMT_IYUV:
case IMGFMT_CLPL:
case IMGFMT_Y800:
case IMGFMT_Y8:
case IMGFMT_444P:
case IMGFMT_422P:
case IMGFMT_411P:
return ff_vf_next_query_format(vf,fmt);
}
return 0;
}
 
static int control(struct vf_instance *vf, int request, void* data){
return ff_vf_next_control(vf,request,data);
}
 
static int vf_open(vf_instance_t *vf, char *args){
vf->config=config;
vf->put_image=put_image;
vf->get_image=get_image;
vf->query_format=query_format;
vf->uninit=uninit;
vf->control= control;
vf->priv=malloc(sizeof(struct vf_priv_s));
memset(vf->priv, 0, sizeof(struct vf_priv_s));
 
if (args) sscanf(args, "%d:%d", &vf->priv->qp, &vf->priv->mode);
 
if(vf->priv->qp < 0)
vf->priv->qp = 0;
 
init_thres2();
 
switch(vf->priv->mode){
case 0: requantize= hardthresh_c; break;
case 1: requantize= softthresh_c; break;
default:
case 2: requantize= mediumthresh_c; break;
}
 
#if HAVE_MMX
if(ff_gCpuCaps.hasMMX){
dctB= dctB_mmx;
}
#endif
#if 0
if(ff_gCpuCaps.hasMMX){
switch(vf->priv->mode){
case 0: requantize= hardthresh_mmx; break;
case 1: requantize= softthresh_mmx; break;
}
}
#endif
 
return 1;
}
 
const vf_info_t ff_vf_info_pp7 = {
"postprocess 7",
"pp7",
"Michael Niedermayer",
"",
vf_open,
NULL
};
/contrib/sdk/sources/ffmpeg/libavfilter/libmpcodecs/vf_softpulldown.c
0,0 → 1,163
/*
* This file is part of MPlayer.
*
* MPlayer is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* MPlayer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with MPlayer; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
 
#include "config.h"
#include "mp_msg.h"
 
#include "img_format.h"
#include "mp_image.h"
#include "vf.h"
 
#include "libvo/fastmemcpy.h"
 
struct vf_priv_s {
int state;
long long in;
long long out;
};
 
static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts)
{
mp_image_t *dmpi;
int ret = 0;
int flags = mpi->fields;
int state = vf->priv->state;
 
dmpi = ff_vf_get_image(vf->next, mpi->imgfmt,
MP_IMGTYPE_STATIC, MP_IMGFLAG_ACCEPT_STRIDE |
MP_IMGFLAG_PRESERVE, mpi->width, mpi->height);
 
vf->priv->in++;
 
if ((state == 0 &&
!(flags & MP_IMGFIELD_TOP_FIRST)) ||
(state == 1 &&
flags & MP_IMGFIELD_TOP_FIRST)) {
ff_mp_msg(MSGT_VFILTER, MSGL_WARN,
"softpulldown: Unexpected field flags: state=%d top_field_first=%d repeat_first_field=%d\n",
state,
(flags & MP_IMGFIELD_TOP_FIRST) != 0,
(flags & MP_IMGFIELD_REPEAT_FIRST) != 0);
state ^= 1;
}
 
if (state == 0) {
ret = ff_vf_next_put_image(vf, mpi, MP_NOPTS_VALUE);
vf->priv->out++;
if (flags & MP_IMGFIELD_REPEAT_FIRST) {
my_memcpy_pic(dmpi->planes[0],
mpi->planes[0], mpi->w, mpi->h/2,
dmpi->stride[0]*2, mpi->stride[0]*2);
if (mpi->flags & MP_IMGFLAG_PLANAR) {
my_memcpy_pic(dmpi->planes[1],
mpi->planes[1],
mpi->chroma_width,
mpi->chroma_height/2,
dmpi->stride[1]*2,
mpi->stride[1]*2);
my_memcpy_pic(dmpi->planes[2],
mpi->planes[2],
mpi->chroma_width,
mpi->chroma_height/2,
dmpi->stride[2]*2,
mpi->stride[2]*2);
}
state=1;
}
} else {
my_memcpy_pic(dmpi->planes[0]+dmpi->stride[0],
mpi->planes[0]+mpi->stride[0], mpi->w, mpi->h/2,
dmpi->stride[0]*2, mpi->stride[0]*2);
if (mpi->flags & MP_IMGFLAG_PLANAR) {
my_memcpy_pic(dmpi->planes[1]+dmpi->stride[1],
mpi->planes[1]+mpi->stride[1],
mpi->chroma_width, mpi->chroma_height/2,
dmpi->stride[1]*2, mpi->stride[1]*2);
my_memcpy_pic(dmpi->planes[2]+dmpi->stride[2],
mpi->planes[2]+mpi->stride[2],
mpi->chroma_width, mpi->chroma_height/2,
dmpi->stride[2]*2, mpi->stride[2]*2);
}
ret = ff_vf_next_put_image(vf, dmpi, MP_NOPTS_VALUE);
vf->priv->out++;
if (flags & MP_IMGFIELD_REPEAT_FIRST) {
ret |= ff_vf_next_put_image(vf, mpi, MP_NOPTS_VALUE);
vf->priv->out++;
state=0;
} else {
my_memcpy_pic(dmpi->planes[0],
mpi->planes[0], mpi->w, mpi->h/2,
dmpi->stride[0]*2, mpi->stride[0]*2);
if (mpi->flags & MP_IMGFLAG_PLANAR) {
my_memcpy_pic(dmpi->planes[1],
mpi->planes[1],
mpi->chroma_width,
mpi->chroma_height/2,
dmpi->stride[1]*2,
mpi->stride[1]*2);
my_memcpy_pic(dmpi->planes[2],
mpi->planes[2],
mpi->chroma_width,
mpi->chroma_height/2,
dmpi->stride[2]*2,
mpi->stride[2]*2);
}
}
}
 
vf->priv->state = state;
 
return ret;
}
 
static int config(struct vf_instance *vf,
int width, int height, int d_width, int d_height,
unsigned int flags, unsigned int outfmt)
{
return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt);
}
 
static void uninit(struct vf_instance *vf)
{
ff_mp_msg(MSGT_VFILTER, MSGL_INFO, "softpulldown: %lld frames in, %lld frames out\n", vf->priv->in, vf->priv->out);
free(vf->priv);
}
 
static int vf_open(vf_instance_t *vf, char *args)
{
vf->config = config;
vf->put_image = put_image;
vf->uninit = uninit;
vf->default_reqs = VFCAP_ACCEPT_STRIDE;
vf->priv = calloc(1, sizeof(struct vf_priv_s));
vf->priv->state = 0;
return 1;
}
 
const vf_info_t ff_vf_info_softpulldown = {
"mpeg2 soft 3:2 pulldown",
"softpulldown",
"Tobias Diedrich <ranma+mplayer@tdiedrich.de>",
"",
vf_open,
NULL
};
/contrib/sdk/sources/ffmpeg/libavfilter/libmpcodecs/vf_uspp.c
0,0 → 1,393
/*
* Copyright (C) 2005 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of MPlayer.
*
* MPlayer is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* MPlayer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with MPlayer; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <inttypes.h>
#include <math.h>
#include <assert.h>
 
#include "config.h"
 
#include "mp_msg.h"
#include "cpudetect.h"
 
#include "libavutil/mem.h"
#include "libavcodec/avcodec.h"
 
#include "img_format.h"
#include "mp_image.h"
#include "vf.h"
#include "av_helpers.h"
#include "libvo/fastmemcpy.h"
 
#define XMIN(a,b) ((a) < (b) ? (a) : (b))
 
#define BLOCK 16
 
//===========================================================================//
static const uint8_t __attribute__((aligned(8))) dither[8][8]={
{ 0*4, 48*4, 12*4, 60*4, 3*4, 51*4, 15*4, 63*4, },
{ 32*4, 16*4, 44*4, 28*4, 35*4, 19*4, 47*4, 31*4, },
{ 8*4, 56*4, 4*4, 52*4, 11*4, 59*4, 7*4, 55*4, },
{ 40*4, 24*4, 36*4, 20*4, 43*4, 27*4, 39*4, 23*4, },
{ 2*4, 50*4, 14*4, 62*4, 1*4, 49*4, 13*4, 61*4, },
{ 34*4, 18*4, 46*4, 30*4, 33*4, 17*4, 45*4, 29*4, },
{ 10*4, 58*4, 6*4, 54*4, 9*4, 57*4, 5*4, 53*4, },
{ 42*4, 26*4, 38*4, 22*4, 41*4, 25*4, 37*4, 21*4, },
};
 
static const uint8_t offset[511][2]= {
{ 0, 0},
{ 0, 0}, { 8, 8},
{ 0, 0}, { 4, 4}, {12, 8}, { 8,12},
{ 0, 0}, {10, 2}, { 4, 4}, {14, 6}, { 8, 8}, { 2,10}, {12,12}, { 6,14},
 
{ 0, 0}, {10, 2}, { 4, 4}, {14, 6}, { 8, 8}, { 2,10}, {12,12}, { 6,14},
{ 5, 1}, {15, 3}, { 9, 5}, { 3, 7}, {13, 9}, { 7,11}, { 1,13}, {11,15},
 
{ 0, 0}, { 8, 0}, { 0, 8}, { 8, 8}, { 5, 1}, {13, 1}, { 5, 9}, {13, 9},
{ 2, 2}, {10, 2}, { 2,10}, {10,10}, { 7, 3}, {15, 3}, { 7,11}, {15,11},
{ 4, 4}, {12, 4}, { 4,12}, {12,12}, { 1, 5}, { 9, 5}, { 1,13}, { 9,13},
{ 6, 6}, {14, 6}, { 6,14}, {14,14}, { 3, 7}, {11, 7}, { 3,15}, {11,15},
 
{ 0, 0}, { 8, 0}, { 0, 8}, { 8, 8}, { 4, 0}, {12, 0}, { 4, 8}, {12, 8},
{ 1, 1}, { 9, 1}, { 1, 9}, { 9, 9}, { 5, 1}, {13, 1}, { 5, 9}, {13, 9},
{ 3, 2}, {11, 2}, { 3,10}, {11,10}, { 7, 2}, {15, 2}, { 7,10}, {15,10},
{ 2, 3}, {10, 3}, { 2,11}, {10,11}, { 6, 3}, {14, 3}, { 6,11}, {14,11},
{ 0, 4}, { 8, 4}, { 0,12}, { 8,12}, { 4, 4}, {12, 4}, { 4,12}, {12,12},
{ 1, 5}, { 9, 5}, { 1,13}, { 9,13}, { 5, 5}, {13, 5}, { 5,13}, {13,13},
{ 3, 6}, {11, 6}, { 3,14}, {11,14}, { 7, 6}, {15, 6}, { 7,14}, {15,14},
{ 2, 7}, {10, 7}, { 2,15}, {10,15}, { 6, 7}, {14, 7}, { 6,15}, {14,15},
 
{ 0, 0}, { 8, 0}, { 0, 8}, { 8, 8}, { 0, 2}, { 8, 2}, { 0,10}, { 8,10},
{ 0, 4}, { 8, 4}, { 0,12}, { 8,12}, { 0, 6}, { 8, 6}, { 0,14}, { 8,14},
{ 1, 1}, { 9, 1}, { 1, 9}, { 9, 9}, { 1, 3}, { 9, 3}, { 1,11}, { 9,11},
{ 1, 5}, { 9, 5}, { 1,13}, { 9,13}, { 1, 7}, { 9, 7}, { 1,15}, { 9,15},
{ 2, 0}, {10, 0}, { 2, 8}, {10, 8}, { 2, 2}, {10, 2}, { 2,10}, {10,10},
{ 2, 4}, {10, 4}, { 2,12}, {10,12}, { 2, 6}, {10, 6}, { 2,14}, {10,14},
{ 3, 1}, {11, 1}, { 3, 9}, {11, 9}, { 3, 3}, {11, 3}, { 3,11}, {11,11},
{ 3, 5}, {11, 5}, { 3,13}, {11,13}, { 3, 7}, {11, 7}, { 3,15}, {11,15},
{ 4, 0}, {12, 0}, { 4, 8}, {12, 8}, { 4, 2}, {12, 2}, { 4,10}, {12,10},
{ 4, 4}, {12, 4}, { 4,12}, {12,12}, { 4, 6}, {12, 6}, { 4,14}, {12,14},
{ 5, 1}, {13, 1}, { 5, 9}, {13, 9}, { 5, 3}, {13, 3}, { 5,11}, {13,11},
{ 5, 5}, {13, 5}, { 5,13}, {13,13}, { 5, 7}, {13, 7}, { 5,15}, {13,15},
{ 6, 0}, {14, 0}, { 6, 8}, {14, 8}, { 6, 2}, {14, 2}, { 6,10}, {14,10},
{ 6, 4}, {14, 4}, { 6,12}, {14,12}, { 6, 6}, {14, 6}, { 6,14}, {14,14},
{ 7, 1}, {15, 1}, { 7, 9}, {15, 9}, { 7, 3}, {15, 3}, { 7,11}, {15,11},
{ 7, 5}, {15, 5}, { 7,13}, {15,13}, { 7, 7}, {15, 7}, { 7,15}, {15,15},
 
{ 0, 0}, { 8, 0}, { 0, 8}, { 8, 8}, { 4, 4}, {12, 4}, { 4,12}, {12,12}, { 0, 4}, { 8, 4}, { 0,12}, { 8,12}, { 4, 0}, {12, 0}, { 4, 8}, {12, 8}, { 2, 2}, {10, 2}, { 2,10}, {10,10}, { 6, 6}, {14, 6}, { 6,14}, {14,14}, { 2, 6}, {10, 6}, { 2,14}, {10,14}, { 6, 2}, {14, 2}, { 6,10}, {14,10}, { 0, 2}, { 8, 2}, { 0,10}, { 8,10}, { 4, 6}, {12, 6}, { 4,14}, {12,14}, { 0, 6}, { 8, 6}, { 0,14}, { 8,14}, { 4, 2}, {12, 2}, { 4,10}, {12,10}, { 2, 0}, {10, 0}, { 2, 8}, {10, 8}, { 6, 4}, {14, 4}, { 6,12}, {14,12}, { 2, 4}, {10, 4}, { 2,12}, {10,12}, { 6, 0}, {14, 0}, { 6, 8}, {14, 8}, { 1, 1}, { 9, 1}, { 1, 9}, { 9, 9}, { 5, 5}, {13, 5}, { 5,13}, {13,13}, { 1, 5}, { 9, 5}, { 1,13}, { 9,13}, { 5, 1}, {13, 1}, { 5, 9}, {13, 9}, { 3, 3}, {11, 3}, { 3,11}, {11,11}, { 7, 7}, {15, 7}, { 7,15}, {15,15}, { 3, 7}, {11, 7}, { 3,15}, {11,15}, { 7, 3}, {15, 3}, { 7,11}, {15,11}, { 1, 3}, { 9, 3}, { 1,11}, { 9,11}, { 5, 7}, {13, 7}, { 5,15}, {13,15}, { 1, 7}, { 9, 7}, { 1,15}, { 9,15}, { 5, 3}, {13, 3}, { 5,11}, {13,11}, { 3, 1}, {11, 1}
, { 3, 9}, {11, 9}, { 7, 5}, {15, 5}, { 7,13}, {15,13}, { 3, 5}, {11, 5}, { 3,13}, {11,13}, { 7, 1}, {15, 1}, { 7, 9}, {15, 9}, { 0, 1}, { 8, 1}, { 0, 9}, { 8, 9}, { 4, 5}, {12, 5}, { 4,13}, {12,13}, { 0, 5}, { 8, 5}, { 0,13}, { 8,13}, { 4, 1}, {12, 1}, { 4, 9}, {12, 9}, { 2, 3}, {10, 3}, { 2,11}, {10,11}, { 6, 7}, {14, 7}, { 6,15}, {14,15}, { 2, 7}, {10, 7}, { 2,15}, {10,15}, { 6, 3}, {14, 3}, { 6,11}, {14,11}, { 0, 3}, { 8, 3}, { 0,11}, { 8,11}, { 4, 7}, {12, 7}, { 4,15}, {12,15}, { 0, 7}, { 8, 7}, { 0,15}, { 8,15}, { 4, 3}, {12, 3}, { 4,11}, {12,11}, { 2, 1}, {10, 1}, { 2, 9}, {10, 9}, { 6, 5}, {14, 5}, { 6,13}, {14,13}, { 2, 5}, {10, 5}, { 2,13}, {10,13}, { 6, 1}, {14, 1}, { 6, 9}, {14, 9}, { 1, 0}, { 9, 0}, { 1, 8}, { 9, 8}, { 5, 4}, {13, 4}, { 5,12}, {13,12}, { 1, 4}, { 9, 4}, { 1,12}, { 9,12}, { 5, 0}, {13, 0}, { 5, 8}, {13, 8}, { 3, 2}, {11, 2}, { 3,10}, {11,10}, { 7, 6}, {15, 6}, { 7,14}, {15,14}, { 3, 6}, {11, 6}, { 3,14}, {11,14}, { 7, 2}, {15, 2}, { 7,10}, {15,10}, { 1, 2}, { 9, 2}, { 1,10}, { 9,
10}, { 5, 6}, {13, 6}, { 5,14}, {13,14}, { 1, 6}, { 9, 6}, { 1,14}, { 9,14}, { 5, 2}, {13, 2}, { 5,10}, {13,10}, { 3, 0}, {11, 0}, { 3, 8}, {11, 8}, { 7, 4}, {15, 4}, { 7,12}, {15,12}, { 3, 4}, {11, 4}, { 3,12}, {11,12}, { 7, 0}, {15, 0}, { 7, 8}, {15, 8},
};
 
struct vf_priv_s {
int log2_count;
int qp;
int mode;
int mpeg2;
int temp_stride[3];
uint8_t *src[3];
int16_t *temp[3];
int outbuf_size;
uint8_t *outbuf;
AVCodecContext *avctx_enc[BLOCK*BLOCK];
AVFrame *frame;
AVFrame *frame_dec;
};
 
static void store_slice_c(uint8_t *dst, int16_t *src, int dst_stride, int src_stride, int width, int height, int log2_scale){
int y, x;
 
#define STORE(pos) \
temp= ((src[x + y*src_stride + pos]<<log2_scale) + d[pos])>>8;\
if(temp & 0x100) temp= ~(temp>>31);\
dst[x + y*dst_stride + pos]= temp;
 
for(y=0; y<height; y++){
const uint8_t *d= dither[y&7];
for(x=0; x<width; x+=8){
int temp;
STORE(0);
STORE(1);
STORE(2);
STORE(3);
STORE(4);
STORE(5);
STORE(6);
STORE(7);
}
}
}
 
static void filter(struct vf_priv_s *p, uint8_t *dst[3], uint8_t *src[3], int dst_stride[3], int src_stride[3], int width, int height, uint8_t *qp_store, int qp_stride){
int x, y, i, j;
const int count= 1<<p->log2_count;
 
for(i=0; i<3; i++){
int is_chroma= !!i;
int w= width >>is_chroma;
int h= height>>is_chroma;
int stride= p->temp_stride[i];
int block= BLOCK>>is_chroma;
 
if (!src[i] || !dst[i])
continue; // HACK avoid crash for Y8 colourspace
for(y=0; y<h; y++){
int index= block + block*stride + y*stride;
fast_memcpy(p->src[i] + index, src[i] + y*src_stride[i], w);
for(x=0; x<block; x++){
p->src[i][index - x - 1]= p->src[i][index + x ];
p->src[i][index + w + x ]= p->src[i][index + w - x - 1];
}
}
for(y=0; y<block; y++){
fast_memcpy(p->src[i] + ( block-1-y)*stride, p->src[i] + ( y+block )*stride, stride);
fast_memcpy(p->src[i] + (h+block +y)*stride, p->src[i] + (h-y+block-1)*stride, stride);
}
 
p->frame->linesize[i]= stride;
memset(p->temp[i], 0, (h+2*block)*stride*sizeof(int16_t));
}
 
if(p->qp)
p->frame->quality= p->qp * FF_QP2LAMBDA;
else
p->frame->quality= norm_qscale(qp_store[0], p->mpeg2) * FF_QP2LAMBDA;
// init per MB qscale stuff FIXME
 
for(i=0; i<count; i++){
const int x1= offset[i+count-1][0];
const int y1= offset[i+count-1][1];
int offset;
p->frame->data[0]= p->src[0] + x1 + y1 * p->frame->linesize[0];
p->frame->data[1]= p->src[1] + x1/2 + y1/2 * p->frame->linesize[1];
p->frame->data[2]= p->src[2] + x1/2 + y1/2 * p->frame->linesize[2];
 
avcodec_encode_video(p->avctx_enc[i], p->outbuf, p->outbuf_size, p->frame);
p->frame_dec = p->avctx_enc[i]->coded_frame;
 
offset= (BLOCK-x1) + (BLOCK-y1)*p->frame_dec->linesize[0];
//FIXME optimize
for(y=0; y<height; y++){
for(x=0; x<width; x++){
p->temp[0][ x + y*p->temp_stride[0] ] += p->frame_dec->data[0][ x + y*p->frame_dec->linesize[0] + offset ];
}
}
offset= (BLOCK/2-x1/2) + (BLOCK/2-y1/2)*p->frame_dec->linesize[1];
for(y=0; y<height/2; y++){
for(x=0; x<width/2; x++){
p->temp[1][ x + y*p->temp_stride[1] ] += p->frame_dec->data[1][ x + y*p->frame_dec->linesize[1] + offset ];
p->temp[2][ x + y*p->temp_stride[2] ] += p->frame_dec->data[2][ x + y*p->frame_dec->linesize[2] + offset ];
}
}
}
 
for(j=0; j<3; j++){
int is_chroma= !!j;
if (!dst[j])
continue; // HACK avoid crash for Y8 colourspace
store_slice_c(dst[j], p->temp[j], dst_stride[j], p->temp_stride[j], width>>is_chroma, height>>is_chroma, 8-p->log2_count);
}
}
 
static int config(struct vf_instance *vf,
int width, int height, int d_width, int d_height,
unsigned int flags, unsigned int outfmt){
int i;
AVCodec *enc= avcodec_find_encoder(AV_CODEC_ID_SNOW);
 
for(i=0; i<3; i++){
int is_chroma= !!i;
int w= ((width + 4*BLOCK-1) & (~(2*BLOCK-1)))>>is_chroma;
int h= ((height + 4*BLOCK-1) & (~(2*BLOCK-1)))>>is_chroma;
 
vf->priv->temp_stride[i]= w;
vf->priv->temp[i]= malloc(vf->priv->temp_stride[i]*h*sizeof(int16_t));
vf->priv->src [i]= malloc(vf->priv->temp_stride[i]*h*sizeof(uint8_t));
}
for(i=0; i< (1<<vf->priv->log2_count); i++){
AVCodecContext *avctx_enc;
AVDictionary *opts = NULL;
 
avctx_enc=
vf->priv->avctx_enc[i]= avcodec_alloc_context3(NULL);
avctx_enc->width = width + BLOCK;
avctx_enc->height = height + BLOCK;
avctx_enc->time_base= (AVRational){1,25}; // meaningless
avctx_enc->gop_size = 300;
avctx_enc->max_b_frames= 0;
avctx_enc->pix_fmt = AV_PIX_FMT_YUV420P;
avctx_enc->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_LOW_DELAY;
avctx_enc->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
avctx_enc->global_quality= 123;
av_dict_set(&opts, "no_bitstream", "1", 0);
avcodec_open2(avctx_enc, enc, &opts);
av_dict_free(&opts);
assert(avctx_enc->codec);
}
vf->priv->frame= avcodec_alloc_frame();
vf->priv->frame_dec= avcodec_alloc_frame();
 
vf->priv->outbuf_size= (width + BLOCK)*(height + BLOCK)*10;
vf->priv->outbuf= malloc(vf->priv->outbuf_size);
 
return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt);
}
 
static void get_image(struct vf_instance *vf, mp_image_t *mpi){
if(mpi->flags&MP_IMGFLAG_PRESERVE) return; // don't change
// ok, we can do pp in-place (or pp disabled):
vf->dmpi=ff_vf_get_image(vf->next,mpi->imgfmt,
mpi->type, mpi->flags | MP_IMGFLAG_READABLE, mpi->width, mpi->height);
mpi->planes[0]=vf->dmpi->planes[0];
mpi->stride[0]=vf->dmpi->stride[0];
mpi->width=vf->dmpi->width;
if(mpi->flags&MP_IMGFLAG_PLANAR){
mpi->planes[1]=vf->dmpi->planes[1];
mpi->planes[2]=vf->dmpi->planes[2];
mpi->stride[1]=vf->dmpi->stride[1];
mpi->stride[2]=vf->dmpi->stride[2];
}
mpi->flags|=MP_IMGFLAG_DIRECT;
}
 
static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){
mp_image_t *dmpi;
 
if(!(mpi->flags&MP_IMGFLAG_DIRECT)){
// no DR, so get a new image! hope we'll get DR buffer:
dmpi=ff_vf_get_image(vf->next,mpi->imgfmt,
MP_IMGTYPE_TEMP,
MP_IMGFLAG_ACCEPT_STRIDE|MP_IMGFLAG_PREFER_ALIGNED_STRIDE,
mpi->width,mpi->height);
ff_vf_clone_mpi_attributes(dmpi, mpi);
}else{
dmpi=vf->dmpi;
}
 
vf->priv->mpeg2= mpi->qscale_type;
if(vf->priv->log2_count || !(mpi->flags&MP_IMGFLAG_DIRECT)){
if(mpi->qscale || vf->priv->qp){
filter(vf->priv, dmpi->planes, mpi->planes, dmpi->stride, mpi->stride, mpi->w, mpi->h, mpi->qscale, mpi->qstride);
}else{
memcpy_pic(dmpi->planes[0], mpi->planes[0], mpi->w, mpi->h, dmpi->stride[0], mpi->stride[0]);
memcpy_pic(dmpi->planes[1], mpi->planes[1], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, dmpi->stride[1], mpi->stride[1]);
memcpy_pic(dmpi->planes[2], mpi->planes[2], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, dmpi->stride[2], mpi->stride[2]);
}
}
 
#if HAVE_MMX
if(ff_gCpuCaps.hasMMX) __asm__ volatile ("emms\n\t");
#endif
#if HAVE_MMX2
if(ff_gCpuCaps.hasMMX2) __asm__ volatile ("sfence\n\t");
#endif
 
return ff_vf_next_put_image(vf,dmpi, pts);
}
 
static void uninit(struct vf_instance *vf){
int i;
if(!vf->priv) return;
 
for(i=0; i<3; i++){
free(vf->priv->temp[i]);
vf->priv->temp[i]= NULL;
free(vf->priv->src[i]);
vf->priv->src[i]= NULL;
}
for(i=0; i<BLOCK*BLOCK; i++){
av_freep(&vf->priv->avctx_enc[i]);
}
 
free(vf->priv);
vf->priv=NULL;
}
 
//===========================================================================//
static int query_format(struct vf_instance *vf, unsigned int fmt){
switch(fmt){
case IMGFMT_YV12:
case IMGFMT_I420:
case IMGFMT_IYUV:
case IMGFMT_Y800:
case IMGFMT_Y8:
return ff_vf_next_query_format(vf,fmt);
}
return 0;
}
 
static int control(struct vf_instance *vf, int request, void* data){
switch(request){
case VFCTRL_QUERY_MAX_PP_LEVEL:
return 8;
case VFCTRL_SET_PP_LEVEL:
vf->priv->log2_count= *((unsigned int*)data);
//FIXME we have to realloc a few things here
return CONTROL_TRUE;
}
return ff_vf_next_control(vf,request,data);
}
 
static int vf_open(vf_instance_t *vf, char *args){
 
int log2c=-1;
 
vf->config=config;
vf->put_image=put_image;
vf->get_image=get_image;
vf->query_format=query_format;
vf->uninit=uninit;
vf->control= control;
vf->priv=malloc(sizeof(struct vf_priv_s));
memset(vf->priv, 0, sizeof(struct vf_priv_s));
 
ff_init_avcodec();
 
vf->priv->log2_count= 4;
 
if (args) sscanf(args, "%d:%d:%d", &log2c, &vf->priv->qp, &vf->priv->mode);
 
if( log2c >=0 && log2c <=8 )
vf->priv->log2_count = log2c;
 
if(vf->priv->qp < 0)
vf->priv->qp = 0;
 
// #if HAVE_MMX
// if(ff_gCpuCaps.hasMMX){
// store_slice= store_slice_mmx;
// }
// #endif
 
return 1;
}
 
const vf_info_t ff_vf_info_uspp = {
"ultra simple/slow postprocess",
"uspp",
"Michael Niedermayer",
"",
vf_open,
NULL
};
/contrib/sdk/sources/ffmpeg/libavfilter/libmpcodecs/vfcap.h
0,0 → 1,56
/* VFCAP_* values: they are flags, returned by query_format():
*
* This file is part of MPlayer.
*
* MPlayer is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* MPlayer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with MPlayer; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
#ifndef MPLAYER_VFCAP_H
#define MPLAYER_VFCAP_H
 
// set, if the given colorspace is supported (with or without conversion)
#define VFCAP_CSP_SUPPORTED 0x1
// set, if the given colorspace is supported _without_ conversion
#define VFCAP_CSP_SUPPORTED_BY_HW 0x2
// set if the driver/filter can draw OSD
#define VFCAP_OSD 0x4
// set if the driver/filter can handle compressed SPU stream
#define VFCAP_SPU 0x8
// scaling up/down by hardware, or software:
#define VFCAP_HWSCALE_UP 0x10
#define VFCAP_HWSCALE_DOWN 0x20
#define VFCAP_SWSCALE 0x40
// driver/filter can do vertical flip (upside-down)
#define VFCAP_FLIP 0x80
 
// driver/hardware handles timing (blocking)
#define VFCAP_TIMER 0x100
// driver _always_ flip image upside-down (for ve_vfw)
#define VFCAP_FLIPPED 0x200
// vf filter: accepts stride (put_image)
// vo driver: has draw_slice() support for the given csp
#define VFCAP_ACCEPT_STRIDE 0x400
// filter does postprocessing (so you shouldn't scale/filter image before it)
#define VFCAP_POSTPROC 0x800
// filter cannot be reconfigured to different size & format
#define VFCAP_CONSTANT 0x1000
// filter can draw EOSD
#define VFCAP_EOSD 0x2000
// filter will draw EOSD at screen resolution (without scaling)
#define VFCAP_EOSD_UNSCALED 0x4000
// used by libvo and vf_vo, indicates the VO does not support draw_slice for this format
#define VOCAP_NOSLICES 0x8000
 
#endif /* MPLAYER_VFCAP_H */
/contrib/sdk/sources/ffmpeg/libavfilter/lswsutils.c
0,0 → 1,50
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/imgutils.h"
#include "lswsutils.h"
 
int ff_scale_image(uint8_t *dst_data[4], int dst_linesize[4],
int dst_w, int dst_h, enum AVPixelFormat dst_pix_fmt,
uint8_t * const src_data[4], int src_linesize[4],
int src_w, int src_h, enum AVPixelFormat src_pix_fmt,
void *log_ctx)
{
int ret;
struct SwsContext *sws_ctx = sws_getContext(src_w, src_h, src_pix_fmt,
dst_w, dst_h, dst_pix_fmt,
0, NULL, NULL, NULL);
if (!sws_ctx) {
av_log(log_ctx, AV_LOG_ERROR,
"Impossible to create scale context for the conversion "
"fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
av_get_pix_fmt_name(src_pix_fmt), src_w, src_h,
av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h);
ret = AVERROR(EINVAL);
goto end;
}
 
if ((ret = av_image_alloc(dst_data, dst_linesize, dst_w, dst_h, dst_pix_fmt, 16)) < 0)
goto end;
ret = 0;
sws_scale(sws_ctx, (const uint8_t * const*)src_data, src_linesize, 0, src_h, dst_data, dst_linesize);
 
end:
sws_freeContext(sws_ctx);
return ret;
}
/contrib/sdk/sources/ffmpeg/libavfilter/lswsutils.h
0,0 → 1,38
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Miscellaneous utilities which make use of the libswscale library
*/
 
#ifndef AVFILTER_LSWSUTILS_H
#define AVFILTER_LSWSUTILS_H
 
#include "libswscale/swscale.h"
 
/**
* Scale image using libswscale.
*/
int ff_scale_image(uint8_t *dst_data[4], int dst_linesize[4],
int dst_w, int dst_h, enum AVPixelFormat dst_pix_fmt,
uint8_t *const src_data[4], int src_linesize[4],
int src_w, int src_h, enum AVPixelFormat src_pix_fmt,
void *log_ctx);
 
#endif /* AVFILTER_LSWSUTILS_H */
/contrib/sdk/sources/ffmpeg/libavfilter/opencl_allkernels.c
0,0 → 1,41
/*
* Copyright (C) 2013 Wei Gao <weigao@multicorewareinc.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "opencl_allkernels.h"
#if CONFIG_OPENCL
#include "libavutil/opencl.h"
#include "deshake_opencl_kernel.h"
#include "unsharp_opencl_kernel.h"
#endif
 
#define OPENCL_REGISTER_KERNEL_CODE(X, x) \
{ \
if (CONFIG_##X##_FILTER) { \
av_opencl_register_kernel_code(ff_kernel_##x##_opencl); \
} \
}
 
void ff_opencl_register_filter_kernel_code_all(void)
{
#if CONFIG_OPENCL
OPENCL_REGISTER_KERNEL_CODE(DESHAKE, deshake);
OPENCL_REGISTER_KERNEL_CODE(UNSHARP, unsharp);
#endif
}
/contrib/sdk/sources/ffmpeg/libavfilter/opencl_allkernels.h
0,0 → 1,29
/*
* Copyright (C) 2013 Wei Gao <weigao@multicorewareinc.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFILTER_OPENCL_ALLKERNEL_H
#define AVFILTER_OPENCL_ALLKERNEL_H
 
#include "avfilter.h"
#include "config.h"
 
void ff_opencl_register_filter_kernel_code_all(void);
 
#endif /* AVFILTER_OPENCL_ALLKERNEL_H */
/contrib/sdk/sources/ffmpeg/libavfilter/pthread.c
0,0 → 1,237
/*
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Libavfilter multithreading support
*/
 
#include "config.h"
 
#include "libavutil/common.h"
#include "libavutil/cpu.h"
#include "libavutil/mem.h"
 
#include "avfilter.h"
#include "internal.h"
#include "thread.h"
 
#if HAVE_PTHREADS
#include <pthread.h>
#elif HAVE_OS2THREADS
#include "compat/os2threads.h"
#elif HAVE_W32THREADS
#include "compat/w32pthreads.h"
#endif
 
typedef struct ThreadContext {
AVFilterGraph *graph;
 
int nb_threads;
pthread_t *workers;
avfilter_action_func *func;
 
/* per-execute perameters */
AVFilterContext *ctx;
void *arg;
int *rets;
int nb_rets;
int nb_jobs;
 
pthread_cond_t last_job_cond;
pthread_cond_t current_job_cond;
pthread_mutex_t current_job_lock;
int current_job;
unsigned int current_execute;
int done;
} ThreadContext;
 
static void* attribute_align_arg worker(void *v)
{
ThreadContext *c = v;
int our_job = c->nb_jobs;
int nb_threads = c->nb_threads;
unsigned int last_execute = 0;
int self_id;
 
pthread_mutex_lock(&c->current_job_lock);
self_id = c->current_job++;
for (;;) {
while (our_job >= c->nb_jobs) {
if (c->current_job == nb_threads + c->nb_jobs)
pthread_cond_signal(&c->last_job_cond);
 
while (last_execute == c->current_execute && !c->done)
pthread_cond_wait(&c->current_job_cond, &c->current_job_lock);
last_execute = c->current_execute;
our_job = self_id;
 
if (c->done) {
pthread_mutex_unlock(&c->current_job_lock);
return NULL;
}
}
pthread_mutex_unlock(&c->current_job_lock);
 
c->rets[our_job % c->nb_rets] = c->func(c->ctx, c->arg, our_job, c->nb_jobs);
 
pthread_mutex_lock(&c->current_job_lock);
our_job = c->current_job++;
}
}
 
static void slice_thread_uninit(ThreadContext *c)
{
int i;
 
pthread_mutex_lock(&c->current_job_lock);
c->done = 1;
pthread_cond_broadcast(&c->current_job_cond);
pthread_mutex_unlock(&c->current_job_lock);
 
for (i = 0; i < c->nb_threads; i++)
pthread_join(c->workers[i], NULL);
 
pthread_mutex_destroy(&c->current_job_lock);
pthread_cond_destroy(&c->current_job_cond);
pthread_cond_destroy(&c->last_job_cond);
av_freep(&c->workers);
}
 
static void slice_thread_park_workers(ThreadContext *c)
{
while (c->current_job != c->nb_threads + c->nb_jobs)
pthread_cond_wait(&c->last_job_cond, &c->current_job_lock);
pthread_mutex_unlock(&c->current_job_lock);
}
 
static int thread_execute(AVFilterContext *ctx, avfilter_action_func *func,
void *arg, int *ret, int nb_jobs)
{
ThreadContext *c = ctx->graph->internal->thread;
int dummy_ret;
 
if (nb_jobs <= 0)
return 0;
 
pthread_mutex_lock(&c->current_job_lock);
 
c->current_job = c->nb_threads;
c->nb_jobs = nb_jobs;
c->ctx = ctx;
c->arg = arg;
c->func = func;
if (ret) {
c->rets = ret;
c->nb_rets = nb_jobs;
} else {
c->rets = &dummy_ret;
c->nb_rets = 1;
}
c->current_execute++;
 
pthread_cond_broadcast(&c->current_job_cond);
 
slice_thread_park_workers(c);
 
return 0;
}
 
static int thread_init_internal(ThreadContext *c, int nb_threads)
{
int i, ret;
 
if (!nb_threads) {
int nb_cpus = av_cpu_count();
// use number of cores + 1 as thread count if there is more than one
if (nb_cpus > 1)
nb_threads = nb_cpus + 1;
else
nb_threads = 1;
}
 
if (nb_threads <= 1)
return 1;
 
c->nb_threads = nb_threads;
c->workers = av_mallocz(sizeof(*c->workers) * nb_threads);
if (!c->workers)
return AVERROR(ENOMEM);
 
c->current_job = 0;
c->nb_jobs = 0;
c->done = 0;
 
pthread_cond_init(&c->current_job_cond, NULL);
pthread_cond_init(&c->last_job_cond, NULL);
 
pthread_mutex_init(&c->current_job_lock, NULL);
pthread_mutex_lock(&c->current_job_lock);
for (i = 0; i < nb_threads; i++) {
ret = pthread_create(&c->workers[i], NULL, worker, c);
if (ret) {
pthread_mutex_unlock(&c->current_job_lock);
c->nb_threads = i;
slice_thread_uninit(c);
return AVERROR(ret);
}
}
 
slice_thread_park_workers(c);
 
return c->nb_threads;
}
 
int ff_graph_thread_init(AVFilterGraph *graph)
{
int ret;
 
#if HAVE_W32THREADS
w32thread_init();
#endif
 
if (graph->nb_threads == 1) {
graph->thread_type = 0;
return 0;
}
 
graph->internal->thread = av_mallocz(sizeof(ThreadContext));
if (!graph->internal->thread)
return AVERROR(ENOMEM);
 
ret = thread_init_internal(graph->internal->thread, graph->nb_threads);
if (ret <= 1) {
av_freep(&graph->internal->thread);
graph->thread_type = 0;
graph->nb_threads = 1;
return (ret < 0) ? ret : 0;
}
graph->nb_threads = ret;
 
graph->internal->thread_execute = thread_execute;
 
return 0;
}
 
void ff_graph_thread_free(AVFilterGraph *graph)
{
if (graph->internal->thread)
slice_thread_uninit(graph->internal->thread);
av_freep(&graph->internal->thread);
}
/contrib/sdk/sources/ffmpeg/libavfilter/setpts.c
0,0 → 1,296
/*
* Copyright (c) 2010 Stefano Sabatini
* Copyright (c) 2008 Victor Paesa
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* video presentation timestamp (PTS) modification filter
*/
 
#include "libavutil/eval.h"
#include "libavutil/internal.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
#include "video.h"
 
static const char *const var_names[] = {
"FRAME_RATE", ///< defined only for constant frame-rate video
"INTERLACED", ///< tell if the current frame is interlaced
"N", ///< frame / sample number (starting at zero)
"NB_CONSUMED_SAMPLES", ///< number of samples consumed by the filter (only audio)
"NB_SAMPLES", ///< number of samples in the current frame (only audio)
"POS", ///< original position in the file of the frame
"PREV_INPTS", ///< previous input PTS
"PREV_INT", ///< previous input time in seconds
"PREV_OUTPTS", ///< previous output PTS
"PREV_OUTT", ///< previous output time in seconds
"PTS", ///< original pts in the file of the frame
"SAMPLE_RATE", ///< sample rate (only audio)
"STARTPTS", ///< PTS at start of movie
"STARTT", ///< time at start of movie
"T", ///< original time in the file of the frame
"TB", ///< timebase
"RTCTIME", ///< wallclock (RTC) time in micro seconds
"RTCSTART", ///< wallclock (RTC) time at the start of the movie in micro seconds
"S", // Number of samples in the current frame
"SR", // Audio sample rate
NULL
};
 
enum var_name {
VAR_FRAME_RATE,
VAR_INTERLACED,
VAR_N,
VAR_NB_CONSUMED_SAMPLES,
VAR_NB_SAMPLES,
VAR_POS,
VAR_PREV_INPTS,
VAR_PREV_INT,
VAR_PREV_OUTPTS,
VAR_PREV_OUTT,
VAR_PTS,
VAR_SAMPLE_RATE,
VAR_STARTPTS,
VAR_STARTT,
VAR_T,
VAR_TB,
VAR_RTCTIME,
VAR_RTCSTART,
VAR_S,
VAR_SR,
VAR_VARS_NB
};
 
typedef struct {
const AVClass *class;
char *expr_str;
AVExpr *expr;
double var_values[VAR_VARS_NB];
enum AVMediaType type;
} SetPTSContext;
 
static av_cold int init(AVFilterContext *ctx)
{
SetPTSContext *setpts = ctx->priv;
int ret;
 
if ((ret = av_expr_parse(&setpts->expr, setpts->expr_str,
var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) {
av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n", setpts->expr_str);
return ret;
}
 
setpts->var_values[VAR_N] = 0.0;
setpts->var_values[VAR_S] = 0.0;
setpts->var_values[VAR_PREV_INPTS] = NAN;
setpts->var_values[VAR_PREV_INT] = NAN;
setpts->var_values[VAR_PREV_OUTPTS] = NAN;
setpts->var_values[VAR_PREV_OUTT] = NAN;
setpts->var_values[VAR_STARTPTS] = NAN;
setpts->var_values[VAR_STARTT] = NAN;
return 0;
}
 
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
SetPTSContext *setpts = ctx->priv;
 
setpts->type = inlink->type;
setpts->var_values[VAR_TB] = av_q2d(inlink->time_base);
setpts->var_values[VAR_RTCSTART] = av_gettime();
 
setpts->var_values[VAR_SR] =
setpts->var_values[VAR_SAMPLE_RATE] =
setpts->type == AVMEDIA_TYPE_AUDIO ? inlink->sample_rate : NAN;
 
setpts->var_values[VAR_FRAME_RATE] = inlink->frame_rate.num && inlink->frame_rate.den ?
av_q2d(inlink->frame_rate) : NAN;
 
av_log(inlink->src, AV_LOG_VERBOSE, "TB:%f FRAME_RATE:%f SAMPLE_RATE:%f\n",
setpts->var_values[VAR_TB],
setpts->var_values[VAR_FRAME_RATE],
setpts->var_values[VAR_SAMPLE_RATE]);
return 0;
}
 
#define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d))
#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)*av_q2d(tb))
 
#define BUF_SIZE 64
 
static inline char *double2int64str(char *buf, double v)
{
if (isnan(v)) snprintf(buf, BUF_SIZE, "nan");
else snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)v);
return buf;
}
 
#define d2istr(v) double2int64str((char[BUF_SIZE]){0}, v)
 
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
SetPTSContext *setpts = inlink->dst->priv;
int64_t in_pts = frame->pts;
double d;
 
if (isnan(setpts->var_values[VAR_STARTPTS])) {
setpts->var_values[VAR_STARTPTS] = TS2D(frame->pts);
setpts->var_values[VAR_STARTT ] = TS2T(frame->pts, inlink->time_base);
}
setpts->var_values[VAR_PTS ] = TS2D(frame->pts);
setpts->var_values[VAR_T ] = TS2T(frame->pts, inlink->time_base);
setpts->var_values[VAR_POS ] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame);
setpts->var_values[VAR_RTCTIME ] = av_gettime();
 
if (inlink->type == AVMEDIA_TYPE_VIDEO) {
setpts->var_values[VAR_INTERLACED] = frame->interlaced_frame;
} else if (inlink->type == AVMEDIA_TYPE_AUDIO) {
setpts->var_values[VAR_S] = frame->nb_samples;
setpts->var_values[VAR_NB_SAMPLES] = frame->nb_samples;
}
 
d = av_expr_eval(setpts->expr, setpts->var_values, NULL);
frame->pts = D2TS(d);
 
av_log(inlink->dst, AV_LOG_DEBUG,
"N:%"PRId64" PTS:%s T:%f POS:%s",
(int64_t)setpts->var_values[VAR_N],
d2istr(setpts->var_values[VAR_PTS]),
setpts->var_values[VAR_T],
d2istr(setpts->var_values[VAR_POS]));
switch (inlink->type) {
case AVMEDIA_TYPE_VIDEO:
av_log(inlink->dst, AV_LOG_DEBUG, " INTERLACED:%"PRId64,
(int64_t)setpts->var_values[VAR_INTERLACED]);
break;
case AVMEDIA_TYPE_AUDIO:
av_log(inlink->dst, AV_LOG_DEBUG, " NB_SAMPLES:%"PRId64" NB_CONSUMED_SAMPLES:%"PRId64,
(int64_t)setpts->var_values[VAR_NB_SAMPLES],
(int64_t)setpts->var_values[VAR_NB_CONSUMED_SAMPLES]);
break;
}
av_log(inlink->dst, AV_LOG_DEBUG, " -> PTS:%s T:%f\n", d2istr(d), TS2T(d, inlink->time_base));
 
if (inlink->type == AVMEDIA_TYPE_VIDEO) {
setpts->var_values[VAR_N] += 1.0;
} else {
setpts->var_values[VAR_N] += frame->nb_samples;
}
 
setpts->var_values[VAR_PREV_INPTS ] = TS2D(in_pts);
setpts->var_values[VAR_PREV_INT ] = TS2T(in_pts, inlink->time_base);
setpts->var_values[VAR_PREV_OUTPTS] = TS2D(frame->pts);
setpts->var_values[VAR_PREV_OUTT] = TS2T(frame->pts, inlink->time_base);
if (setpts->type == AVMEDIA_TYPE_AUDIO) {
setpts->var_values[VAR_NB_CONSUMED_SAMPLES] += frame->nb_samples;
}
return ff_filter_frame(inlink->dst->outputs[0], frame);
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
SetPTSContext *setpts = ctx->priv;
av_expr_free(setpts->expr);
setpts->expr = NULL;
}
 
#define OFFSET(x) offsetof(SetPTSContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
static const AVOption options[] = {
{ "expr", "Expression determining the frame timestamp", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "PTS" }, .flags = FLAGS },
{ NULL }
};
 
#if CONFIG_SETPTS_FILTER
#define setpts_options options
AVFILTER_DEFINE_CLASS(setpts);
 
static const AVFilterPad avfilter_vf_setpts_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_setpts_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_setpts = {
.name = "setpts",
.description = NULL_IF_CONFIG_SMALL("Set PTS for the output video frame."),
.init = init,
.uninit = uninit,
 
.priv_size = sizeof(SetPTSContext),
.priv_class = &setpts_class,
 
.inputs = avfilter_vf_setpts_inputs,
.outputs = avfilter_vf_setpts_outputs,
};
#endif /* CONFIG_SETPTS_FILTER */
 
#if CONFIG_ASETPTS_FILTER
 
#define asetpts_options options
AVFILTER_DEFINE_CLASS(asetpts);
 
static const AVFilterPad asetpts_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_input,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad asetpts_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
 
AVFilter avfilter_af_asetpts = {
.name = "asetpts",
.description = NULL_IF_CONFIG_SMALL("Set PTS for the output audio frame."),
.init = init,
.uninit = uninit,
.priv_size = sizeof(SetPTSContext),
.priv_class = &asetpts_class,
.inputs = asetpts_inputs,
.outputs = asetpts_outputs,
};
#endif /* CONFIG_ASETPTS_FILTER */
/contrib/sdk/sources/ffmpeg/libavfilter/split.c
0,0 → 1,147
/*
* Copyright (c) 2007 Bobby Bingham
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* audio and video splitter
*/
 
#include <stdio.h>
 
#include "libavutil/attributes.h"
#include "libavutil/internal.h"
#include "libavutil/mem.h"
#include "libavutil/opt.h"
 
#include "avfilter.h"
#include "audio.h"
#include "internal.h"
#include "video.h"
 
typedef struct SplitContext {
const AVClass *class;
int nb_outputs;
} SplitContext;
 
static av_cold int split_init(AVFilterContext *ctx)
{
SplitContext *s = ctx->priv;
int i;
 
for (i = 0; i < s->nb_outputs; i++) {
char name[32];
AVFilterPad pad = { 0 };
 
snprintf(name, sizeof(name), "output%d", i);
pad.type = ctx->filter->inputs[0].type;
pad.name = av_strdup(name);
 
ff_insert_outpad(ctx, i, &pad);
}
 
return 0;
}
 
static av_cold void split_uninit(AVFilterContext *ctx)
{
int i;
 
for (i = 0; i < ctx->nb_outputs; i++)
av_freep(&ctx->output_pads[i].name);
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
int i, ret = AVERROR_EOF;
 
for (i = 0; i < ctx->nb_outputs; i++) {
AVFrame *buf_out;
 
if (ctx->outputs[i]->closed)
continue;
buf_out = av_frame_clone(frame);
if (!buf_out) {
ret = AVERROR(ENOMEM);
break;
}
 
ret = ff_filter_frame(ctx->outputs[i], buf_out);
if (ret < 0)
break;
}
av_frame_free(&frame);
return ret;
}
 
#define OFFSET(x) offsetof(SplitContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_VIDEO_PARAM
static const AVOption options[] = {
{ "outputs", "set number of outputs", OFFSET(nb_outputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, INT_MAX, FLAGS },
{ NULL }
};
 
#define split_options options
AVFILTER_DEFINE_CLASS(split);
 
#define asplit_options options
AVFILTER_DEFINE_CLASS(asplit);
 
static const AVFilterPad avfilter_vf_split_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
AVFilter avfilter_vf_split = {
.name = "split",
.description = NULL_IF_CONFIG_SMALL("Pass on the input to N video outputs."),
.priv_size = sizeof(SplitContext),
.priv_class = &split_class,
.init = split_init,
.uninit = split_uninit,
.inputs = avfilter_vf_split_inputs,
.outputs = NULL,
.flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
};
 
static const AVFilterPad avfilter_af_asplit_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
AVFilter avfilter_af_asplit = {
.name = "asplit",
.description = NULL_IF_CONFIG_SMALL("Pass on the audio input to N audio outputs."),
.priv_size = sizeof(SplitContext),
.priv_class = &asplit_class,
.init = split_init,
.uninit = split_uninit,
.inputs = avfilter_af_asplit_inputs,
.outputs = NULL,
.flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
};
/contrib/sdk/sources/ffmpeg/libavfilter/src_movie.c
0,0 → 1,603
/*
* Copyright (c) 2010 Stefano Sabatini
* Copyright (c) 2008 Victor Paesa
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* movie video source
*
* @todo use direct rendering (no allocation of a new frame)
* @todo support a PTS correction mechanism
*/
 
#include <float.h>
 
#include "libavutil/attributes.h"
#include "libavutil/avstring.h"
#include "libavutil/avassert.h"
#include "libavutil/opt.h"
#include "libavutil/imgutils.h"
#include "libavutil/timestamp.h"
#include "libavformat/avformat.h"
#include "audio.h"
#include "avcodec.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
typedef struct {
AVStream *st;
int done;
} MovieStream;
 
typedef struct {
/* common A/V fields */
const AVClass *class;
int64_t seek_point; ///< seekpoint in microseconds
double seek_point_d;
char *format_name;
char *file_name;
char *stream_specs; /**< user-provided list of streams, separated by + */
int stream_index; /**< for compatibility */
int loop_count;
 
AVFormatContext *format_ctx;
int eof;
AVPacket pkt, pkt0;
AVFrame *frame; ///< video frame to store the decoded images in
 
int max_stream_index; /**< max stream # actually used for output */
MovieStream *st; /**< array of all streams, one per output */
int *out_index; /**< stream number -> output number map, or -1 */
} MovieContext;
 
#define OFFSET(x) offsetof(MovieContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_VIDEO_PARAM
 
static const AVOption movie_options[]= {
{ "filename", NULL, OFFSET(file_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "format_name", "set format name", OFFSET(format_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "f", "set format name", OFFSET(format_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "stream_index", "set stream index", OFFSET(stream_index), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
{ "si", "set stream index", OFFSET(stream_index), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
{ "seek_point", "set seekpoint (seconds)", OFFSET(seek_point_d), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, (INT64_MAX-1) / 1000000, FLAGS },
{ "sp", "set seekpoint (seconds)", OFFSET(seek_point_d), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, (INT64_MAX-1) / 1000000, FLAGS },
{ "streams", "set streams", OFFSET(stream_specs), AV_OPT_TYPE_STRING, {.str = 0}, CHAR_MAX, CHAR_MAX, FLAGS },
{ "s", "set streams", OFFSET(stream_specs), AV_OPT_TYPE_STRING, {.str = 0}, CHAR_MAX, CHAR_MAX, FLAGS },
{ "loop", "set loop count", OFFSET(loop_count), AV_OPT_TYPE_INT, {.i64 = 1}, 0, INT_MAX, FLAGS },
{ NULL },
};
 
static int movie_config_output_props(AVFilterLink *outlink);
static int movie_request_frame(AVFilterLink *outlink);
 
static AVStream *find_stream(void *log, AVFormatContext *avf, const char *spec)
{
int i, ret, already = 0, stream_id = -1;
char type_char[2], dummy;
AVStream *found = NULL;
enum AVMediaType type;
 
ret = sscanf(spec, "d%1[av]%d%c", type_char, &stream_id, &dummy);
if (ret >= 1 && ret <= 2) {
type = type_char[0] == 'v' ? AVMEDIA_TYPE_VIDEO : AVMEDIA_TYPE_AUDIO;
ret = av_find_best_stream(avf, type, stream_id, -1, NULL, 0);
if (ret < 0) {
av_log(log, AV_LOG_ERROR, "No %s stream with index '%d' found\n",
av_get_media_type_string(type), stream_id);
return NULL;
}
return avf->streams[ret];
}
for (i = 0; i < avf->nb_streams; i++) {
ret = avformat_match_stream_specifier(avf, avf->streams[i], spec);
if (ret < 0) {
av_log(log, AV_LOG_ERROR,
"Invalid stream specifier \"%s\"\n", spec);
return NULL;
}
if (!ret)
continue;
if (avf->streams[i]->discard != AVDISCARD_ALL) {
already++;
continue;
}
if (found) {
av_log(log, AV_LOG_WARNING,
"Ambiguous stream specifier \"%s\", using #%d\n", spec, i);
break;
}
found = avf->streams[i];
}
if (!found) {
av_log(log, AV_LOG_WARNING, "Stream specifier \"%s\" %s\n", spec,
already ? "matched only already used streams" :
"did not match any stream");
return NULL;
}
if (found->codec->codec_type != AVMEDIA_TYPE_VIDEO &&
found->codec->codec_type != AVMEDIA_TYPE_AUDIO) {
av_log(log, AV_LOG_ERROR, "Stream specifier \"%s\" matched a %s stream,"
"currently unsupported by libavfilter\n", spec,
av_get_media_type_string(found->codec->codec_type));
return NULL;
}
return found;
}
 
static int open_stream(void *log, MovieStream *st)
{
AVCodec *codec;
int ret;
 
codec = avcodec_find_decoder(st->st->codec->codec_id);
if (!codec) {
av_log(log, AV_LOG_ERROR, "Failed to find any codec\n");
return AVERROR(EINVAL);
}
 
st->st->codec->refcounted_frames = 1;
 
if ((ret = avcodec_open2(st->st->codec, codec, NULL)) < 0) {
av_log(log, AV_LOG_ERROR, "Failed to open codec\n");
return ret;
}
 
return 0;
}
 
static int guess_channel_layout(MovieStream *st, int st_index, void *log_ctx)
{
AVCodecContext *dec_ctx = st->st->codec;
char buf[256];
int64_t chl = av_get_default_channel_layout(dec_ctx->channels);
 
if (!chl) {
av_log(log_ctx, AV_LOG_ERROR,
"Channel layout is not set in stream %d, and could not "
"be guessed from the number of channels (%d)\n",
st_index, dec_ctx->channels);
return AVERROR(EINVAL);
}
 
av_get_channel_layout_string(buf, sizeof(buf), dec_ctx->channels, chl);
av_log(log_ctx, AV_LOG_WARNING,
"Channel layout is not set in output stream %d, "
"guessed channel layout is '%s'\n",
st_index, buf);
dec_ctx->channel_layout = chl;
return 0;
}
 
static av_cold int movie_common_init(AVFilterContext *ctx)
{
MovieContext *movie = ctx->priv;
AVInputFormat *iformat = NULL;
int64_t timestamp;
int nb_streams, ret, i;
char default_streams[16], *stream_specs, *spec, *cursor;
char name[16];
AVStream *st;
 
if (!movie->file_name) {
av_log(ctx, AV_LOG_ERROR, "No filename provided!\n");
return AVERROR(EINVAL);
}
 
movie->seek_point = movie->seek_point_d * 1000000 + 0.5;
 
stream_specs = movie->stream_specs;
if (!stream_specs) {
snprintf(default_streams, sizeof(default_streams), "d%c%d",
!strcmp(ctx->filter->name, "amovie") ? 'a' : 'v',
movie->stream_index);
stream_specs = default_streams;
}
for (cursor = stream_specs, nb_streams = 1; *cursor; cursor++)
if (*cursor == '+')
nb_streams++;
 
if (movie->loop_count != 1 && nb_streams != 1) {
av_log(ctx, AV_LOG_ERROR,
"Loop with several streams is currently unsupported\n");
return AVERROR_PATCHWELCOME;
}
 
av_register_all();
 
// Try to find the movie format (container)
iformat = movie->format_name ? av_find_input_format(movie->format_name) : NULL;
 
movie->format_ctx = NULL;
if ((ret = avformat_open_input(&movie->format_ctx, movie->file_name, iformat, NULL)) < 0) {
av_log(ctx, AV_LOG_ERROR,
"Failed to avformat_open_input '%s'\n", movie->file_name);
return ret;
}
if ((ret = avformat_find_stream_info(movie->format_ctx, NULL)) < 0)
av_log(ctx, AV_LOG_WARNING, "Failed to find stream info\n");
 
// if seeking requested, we execute it
if (movie->seek_point > 0) {
timestamp = movie->seek_point;
// add the stream start time, should it exist
if (movie->format_ctx->start_time != AV_NOPTS_VALUE) {
if (timestamp > INT64_MAX - movie->format_ctx->start_time) {
av_log(ctx, AV_LOG_ERROR,
"%s: seek value overflow with start_time:%"PRId64" seek_point:%"PRId64"\n",
movie->file_name, movie->format_ctx->start_time, movie->seek_point);
return AVERROR(EINVAL);
}
timestamp += movie->format_ctx->start_time;
}
if ((ret = av_seek_frame(movie->format_ctx, -1, timestamp, AVSEEK_FLAG_BACKWARD)) < 0) {
av_log(ctx, AV_LOG_ERROR, "%s: could not seek to position %"PRId64"\n",
movie->file_name, timestamp);
return ret;
}
}
 
for (i = 0; i < movie->format_ctx->nb_streams; i++)
movie->format_ctx->streams[i]->discard = AVDISCARD_ALL;
 
movie->st = av_calloc(nb_streams, sizeof(*movie->st));
if (!movie->st)
return AVERROR(ENOMEM);
 
for (i = 0; i < nb_streams; i++) {
spec = av_strtok(stream_specs, "+", &cursor);
if (!spec)
return AVERROR_BUG;
stream_specs = NULL; /* for next strtok */
st = find_stream(ctx, movie->format_ctx, spec);
if (!st)
return AVERROR(EINVAL);
st->discard = AVDISCARD_DEFAULT;
movie->st[i].st = st;
movie->max_stream_index = FFMAX(movie->max_stream_index, st->index);
}
if (av_strtok(NULL, "+", &cursor))
return AVERROR_BUG;
 
movie->out_index = av_calloc(movie->max_stream_index + 1,
sizeof(*movie->out_index));
if (!movie->out_index)
return AVERROR(ENOMEM);
for (i = 0; i <= movie->max_stream_index; i++)
movie->out_index[i] = -1;
for (i = 0; i < nb_streams; i++)
movie->out_index[movie->st[i].st->index] = i;
 
for (i = 0; i < nb_streams; i++) {
AVFilterPad pad = { 0 };
snprintf(name, sizeof(name), "out%d", i);
pad.type = movie->st[i].st->codec->codec_type;
pad.name = av_strdup(name);
pad.config_props = movie_config_output_props;
pad.request_frame = movie_request_frame;
ff_insert_outpad(ctx, i, &pad);
ret = open_stream(ctx, &movie->st[i]);
if (ret < 0)
return ret;
if ( movie->st[i].st->codec->codec->type == AVMEDIA_TYPE_AUDIO &&
!movie->st[i].st->codec->channel_layout) {
ret = guess_channel_layout(&movie->st[i], i, ctx);
if (ret < 0)
return ret;
}
}
 
av_log(ctx, AV_LOG_VERBOSE, "seek_point:%"PRIi64" format_name:%s file_name:%s stream_index:%d\n",
movie->seek_point, movie->format_name, movie->file_name,
movie->stream_index);
 
return 0;
}
 
static av_cold void movie_uninit(AVFilterContext *ctx)
{
MovieContext *movie = ctx->priv;
int i;
 
for (i = 0; i < ctx->nb_outputs; i++) {
av_freep(&ctx->output_pads[i].name);
if (movie->st[i].st)
avcodec_close(movie->st[i].st->codec);
}
av_freep(&movie->st);
av_freep(&movie->out_index);
av_frame_free(&movie->frame);
if (movie->format_ctx)
avformat_close_input(&movie->format_ctx);
}
 
static int movie_query_formats(AVFilterContext *ctx)
{
MovieContext *movie = ctx->priv;
int list[] = { 0, -1 };
int64_t list64[] = { 0, -1 };
int i;
 
for (i = 0; i < ctx->nb_outputs; i++) {
MovieStream *st = &movie->st[i];
AVCodecContext *c = st->st->codec;
AVFilterLink *outlink = ctx->outputs[i];
 
switch (c->codec_type) {
case AVMEDIA_TYPE_VIDEO:
list[0] = c->pix_fmt;
ff_formats_ref(ff_make_format_list(list), &outlink->in_formats);
break;
case AVMEDIA_TYPE_AUDIO:
list[0] = c->sample_fmt;
ff_formats_ref(ff_make_format_list(list), &outlink->in_formats);
list[0] = c->sample_rate;
ff_formats_ref(ff_make_format_list(list), &outlink->in_samplerates);
list64[0] = c->channel_layout;
ff_channel_layouts_ref(avfilter_make_format64_list(list64),
&outlink->in_channel_layouts);
break;
}
}
 
return 0;
}
 
static int movie_config_output_props(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
MovieContext *movie = ctx->priv;
unsigned out_id = FF_OUTLINK_IDX(outlink);
MovieStream *st = &movie->st[out_id];
AVCodecContext *c = st->st->codec;
 
outlink->time_base = st->st->time_base;
 
switch (c->codec_type) {
case AVMEDIA_TYPE_VIDEO:
outlink->w = c->width;
outlink->h = c->height;
outlink->frame_rate = st->st->r_frame_rate;
break;
case AVMEDIA_TYPE_AUDIO:
break;
}
 
return 0;
}
 
static char *describe_frame_to_str(char *dst, size_t dst_size,
AVFrame *frame,
AVFilterLink *link)
{
switch (frame->type) {
case AVMEDIA_TYPE_VIDEO:
snprintf(dst, dst_size,
"video pts:%s time:%s size:%dx%d aspect:%d/%d",
av_ts2str(frame->pts), av_ts2timestr(frame->pts, &link->time_base),
frame->width, frame->height,
frame->sample_aspect_ratio.num,
frame->sample_aspect_ratio.den);
break;
case AVMEDIA_TYPE_AUDIO:
snprintf(dst, dst_size,
"audio pts:%s time:%s samples:%d",
av_ts2str(frame->pts), av_ts2timestr(frame->pts, &link->time_base),
frame->nb_samples);
break;
default:
snprintf(dst, dst_size, "%s BUG", av_get_media_type_string(frame->type));
break;
}
return dst;
}
 
#define describe_frameref(f, link) \
describe_frame_to_str((char[1024]){0}, 1024, f, link)
 
static int rewind_file(AVFilterContext *ctx)
{
MovieContext *movie = ctx->priv;
int64_t timestamp = movie->seek_point;
int ret, i;
 
if (movie->format_ctx->start_time != AV_NOPTS_VALUE)
timestamp += movie->format_ctx->start_time;
ret = av_seek_frame(movie->format_ctx, -1, timestamp, AVSEEK_FLAG_BACKWARD);
if (ret < 0) {
av_log(ctx, AV_LOG_ERROR, "Unable to loop: %s\n", av_err2str(ret));
movie->loop_count = 1; /* do not try again */
return ret;
}
 
for (i = 0; i < ctx->nb_outputs; i++) {
avcodec_flush_buffers(movie->st[i].st->codec);
movie->st[i].done = 0;
}
movie->eof = 0;
return 0;
}
 
/**
* Try to push a frame to the requested output.
*
* @param ctx filter context
* @param out_id number of output where a frame is wanted;
* if the frame is read from file, used to set the return value;
* if the codec is being flushed, flush the corresponding stream
* @return 1 if a frame was pushed on the requested output,
* 0 if another attempt is possible,
* <0 AVERROR code
*/
static int movie_push_frame(AVFilterContext *ctx, unsigned out_id)
{
MovieContext *movie = ctx->priv;
AVPacket *pkt = &movie->pkt;
MovieStream *st;
int ret, got_frame = 0, pkt_out_id;
AVFilterLink *outlink;
 
if (!pkt->size) {
if (movie->eof) {
if (movie->st[out_id].done) {
if (movie->loop_count != 1) {
ret = rewind_file(ctx);
if (ret < 0)
return ret;
movie->loop_count -= movie->loop_count > 1;
av_log(ctx, AV_LOG_VERBOSE, "Stream finished, looping.\n");
return 0; /* retry */
}
return AVERROR_EOF;
}
pkt->stream_index = movie->st[out_id].st->index;
/* packet is already ready for flushing */
} else {
ret = av_read_frame(movie->format_ctx, &movie->pkt0);
if (ret < 0) {
av_init_packet(&movie->pkt0); /* ready for flushing */
*pkt = movie->pkt0;
if (ret == AVERROR_EOF) {
movie->eof = 1;
return 0; /* start flushing */
}
return ret;
}
*pkt = movie->pkt0;
}
}
 
pkt_out_id = pkt->stream_index > movie->max_stream_index ? -1 :
movie->out_index[pkt->stream_index];
if (pkt_out_id < 0) {
av_free_packet(&movie->pkt0);
pkt->size = 0; /* ready for next run */
pkt->data = NULL;
return 0;
}
st = &movie->st[pkt_out_id];
outlink = ctx->outputs[pkt_out_id];
 
movie->frame = av_frame_alloc();
if (!movie->frame)
return AVERROR(ENOMEM);
 
switch (st->st->codec->codec_type) {
case AVMEDIA_TYPE_VIDEO:
ret = avcodec_decode_video2(st->st->codec, movie->frame, &got_frame, pkt);
break;
case AVMEDIA_TYPE_AUDIO:
ret = avcodec_decode_audio4(st->st->codec, movie->frame, &got_frame, pkt);
break;
default:
ret = AVERROR(ENOSYS);
break;
}
if (ret < 0) {
av_log(ctx, AV_LOG_WARNING, "Decode error: %s\n", av_err2str(ret));
av_frame_free(&movie->frame);
av_free_packet(&movie->pkt0);
movie->pkt.size = 0;
movie->pkt.data = NULL;
return 0;
}
if (!ret || st->st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
ret = pkt->size;
 
pkt->data += ret;
pkt->size -= ret;
if (pkt->size <= 0) {
av_free_packet(&movie->pkt0);
pkt->size = 0; /* ready for next run */
pkt->data = NULL;
}
if (!got_frame) {
if (!ret)
st->done = 1;
av_frame_free(&movie->frame);
return 0;
}
 
av_dlog(ctx, "movie_push_frame(): file:'%s' %s\n", movie->file_name,
describe_frameref(movie->frame, outlink));
 
movie->frame->pts = av_frame_get_best_effort_timestamp(movie->frame);
ret = ff_filter_frame(outlink, movie->frame);
movie->frame = NULL;
 
if (ret < 0)
return ret;
return pkt_out_id == out_id;
}
 
static int movie_request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
unsigned out_id = FF_OUTLINK_IDX(outlink);
int ret;
 
while (1) {
ret = movie_push_frame(ctx, out_id);
if (ret)
return FFMIN(ret, 0);
}
}
 
#if CONFIG_MOVIE_FILTER
 
AVFILTER_DEFINE_CLASS(movie);
 
AVFilter avfilter_avsrc_movie = {
.name = "movie",
.description = NULL_IF_CONFIG_SMALL("Read from a movie source."),
.priv_size = sizeof(MovieContext),
.priv_class = &movie_class,
.init = movie_common_init,
.uninit = movie_uninit,
.query_formats = movie_query_formats,
 
.inputs = NULL,
.outputs = NULL,
.flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
};
 
#endif /* CONFIG_MOVIE_FILTER */
 
#if CONFIG_AMOVIE_FILTER
 
#define amovie_options movie_options
AVFILTER_DEFINE_CLASS(amovie);
 
AVFilter avfilter_avsrc_amovie = {
.name = "amovie",
.description = NULL_IF_CONFIG_SMALL("Read audio from a movie source."),
.priv_size = sizeof(MovieContext),
.init = movie_common_init,
.uninit = movie_uninit,
.query_formats = movie_query_formats,
 
.inputs = NULL,
.outputs = NULL,
.priv_class = &amovie_class,
.flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
};
 
#endif /* CONFIG_AMOVIE_FILTER */
/contrib/sdk/sources/ffmpeg/libavfilter/thread.h
0,0 → 1,29
/*
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFILTER_THREAD_H
#define AVFILTER_THREAD_H
 
#include "avfilter.h"
 
int ff_graph_thread_init(AVFilterGraph *graph);
 
void ff_graph_thread_free(AVFilterGraph *graph);
 
#endif /* AVFILTER_THREAD_H */
/contrib/sdk/sources/ffmpeg/libavfilter/transform.c
0,0 → 1,201
/*
* Copyright (C) 2010 Georg Martius <georg.martius@web.de>
* Copyright (C) 2010 Daniel G. Taylor <dan@programmer-art.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* transform input video
*/
 
#include "libavutil/common.h"
#include "libavutil/avassert.h"
 
#include "transform.h"
 
#define INTERPOLATE_METHOD(name) \
static uint8_t name(float x, float y, const uint8_t *src, \
int width, int height, int stride, uint8_t def)
 
#define PIXEL(img, x, y, w, h, stride, def) \
((x) < 0 || (y) < 0) ? (def) : \
(((x) >= (w) || (y) >= (h)) ? (def) : \
img[(x) + (y) * (stride)])
 
/**
* Nearest neighbor interpolation
*/
INTERPOLATE_METHOD(interpolate_nearest)
{
return PIXEL(src, (int)(x + 0.5), (int)(y + 0.5), width, height, stride, def);
}
 
/**
* Bilinear interpolation
*/
INTERPOLATE_METHOD(interpolate_bilinear)
{
int x_c, x_f, y_c, y_f;
int v1, v2, v3, v4;
 
if (x < -1 || x > width || y < -1 || y > height) {
return def;
} else {
x_f = (int)x;
x_c = x_f + 1;
 
y_f = (int)y;
y_c = y_f + 1;
 
v1 = PIXEL(src, x_c, y_c, width, height, stride, def);
v2 = PIXEL(src, x_c, y_f, width, height, stride, def);
v3 = PIXEL(src, x_f, y_c, width, height, stride, def);
v4 = PIXEL(src, x_f, y_f, width, height, stride, def);
 
return (v1*(x - x_f)*(y - y_f) + v2*((x - x_f)*(y_c - y)) +
v3*(x_c - x)*(y - y_f) + v4*((x_c - x)*(y_c - y)));
}
}
 
/**
* Biquadratic interpolation
*/
INTERPOLATE_METHOD(interpolate_biquadratic)
{
int x_c, x_f, y_c, y_f;
uint8_t v1, v2, v3, v4;
float f1, f2, f3, f4;
 
if (x < - 1 || x > width || y < -1 || y > height)
return def;
else {
x_f = (int)x;
x_c = x_f + 1;
y_f = (int)y;
y_c = y_f + 1;
 
v1 = PIXEL(src, x_c, y_c, width, height, stride, def);
v2 = PIXEL(src, x_c, y_f, width, height, stride, def);
v3 = PIXEL(src, x_f, y_c, width, height, stride, def);
v4 = PIXEL(src, x_f, y_f, width, height, stride, def);
 
f1 = 1 - sqrt((x_c - x) * (y_c - y));
f2 = 1 - sqrt((x_c - x) * (y - y_f));
f3 = 1 - sqrt((x - x_f) * (y_c - y));
f4 = 1 - sqrt((x - x_f) * (y - y_f));
return (v1 * f1 + v2 * f2 + v3 * f3 + v4 * f4) / (f1 + f2 + f3 + f4);
}
}
 
void avfilter_get_matrix(float x_shift, float y_shift, float angle, float zoom, float *matrix) {
matrix[0] = zoom * cos(angle);
matrix[1] = -sin(angle);
matrix[2] = x_shift;
matrix[3] = -matrix[1];
matrix[4] = matrix[0];
matrix[5] = y_shift;
matrix[6] = 0;
matrix[7] = 0;
matrix[8] = 1;
}
 
void avfilter_add_matrix(const float *m1, const float *m2, float *result)
{
int i;
for (i = 0; i < 9; i++)
result[i] = m1[i] + m2[i];
}
 
void avfilter_sub_matrix(const float *m1, const float *m2, float *result)
{
int i;
for (i = 0; i < 9; i++)
result[i] = m1[i] - m2[i];
}
 
void avfilter_mul_matrix(const float *m1, float scalar, float *result)
{
int i;
for (i = 0; i < 9; i++)
result[i] = m1[i] * scalar;
}
 
static inline int mirror(int v, int m)
{
while ((unsigned)v > (unsigned)m) {
v = -v;
if (v < 0)
v += 2 * m;
}
return v;
}
 
int avfilter_transform(const uint8_t *src, uint8_t *dst,
int src_stride, int dst_stride,
int width, int height, const float *matrix,
enum InterpolateMethod interpolate,
enum FillMethod fill)
{
int x, y;
float x_s, y_s;
uint8_t def = 0;
uint8_t (*func)(float, float, const uint8_t *, int, int, int, uint8_t) = NULL;
 
switch(interpolate) {
case INTERPOLATE_NEAREST:
func = interpolate_nearest;
break;
case INTERPOLATE_BILINEAR:
func = interpolate_bilinear;
break;
case INTERPOLATE_BIQUADRATIC:
func = interpolate_biquadratic;
break;
default:
return AVERROR(EINVAL);
}
 
for (y = 0; y < height; y++) {
for(x = 0; x < width; x++) {
x_s = x * matrix[0] + y * matrix[1] + matrix[2];
y_s = x * matrix[3] + y * matrix[4] + matrix[5];
 
switch(fill) {
case FILL_ORIGINAL:
def = src[y * src_stride + x];
break;
case FILL_CLAMP:
y_s = av_clipf(y_s, 0, height - 1);
x_s = av_clipf(x_s, 0, width - 1);
def = src[(int)y_s * src_stride + (int)x_s];
break;
case FILL_MIRROR:
x_s = mirror(x_s, width-1);
y_s = mirror(y_s, height-1);
 
av_assert2(x_s >= 0 && y_s >= 0);
av_assert2(x_s < width && y_s < height);
def = src[(int)y_s * src_stride + (int)x_s];
}
 
dst[y * dst_stride + x] = func(x_s, y_s, src, width, height, src_stride, def);
}
}
return 0;
}
/contrib/sdk/sources/ffmpeg/libavfilter/transform.h
0,0 → 1,127
/*
* Copyright (C) 2010 Georg Martius <georg.martius@web.de>
* Copyright (C) 2010 Daniel G. Taylor <dan@programmer-art.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFILTER_TRANSFORM_H
#define AVFILTER_TRANSFORM_H
 
#include <stdint.h>
 
/**
* @file
* transform input video
*
* All matrices are defined as a single 9-item block of contiguous memory. For
* example, the identity matrix would be:
*
* float *matrix = {1, 0, 0,
* 0, 1, 0,
* 0, 0, 1};
*/
 
enum InterpolateMethod {
INTERPOLATE_NEAREST, //< Nearest-neighbor (fast)
INTERPOLATE_BILINEAR, //< Bilinear
INTERPOLATE_BIQUADRATIC, //< Biquadratic (best)
INTERPOLATE_COUNT, //< Number of interpolation methods
};
 
// Shortcuts for the fastest and best interpolation methods
#define INTERPOLATE_DEFAULT INTERPOLATE_BILINEAR
#define INTERPOLATE_FAST INTERPOLATE_NEAREST
#define INTERPOLATE_BEST INTERPOLATE_BIQUADRATIC
 
enum FillMethod {
FILL_BLANK, //< Fill zeroes at blank locations
FILL_ORIGINAL, //< Original image at blank locations
FILL_CLAMP, //< Extruded edge value at blank locations
FILL_MIRROR, //< Mirrored edge at blank locations
FILL_COUNT, //< Number of edge fill methods
};
 
// Shortcuts for fill methods
#define FILL_DEFAULT FILL_ORIGINAL
 
/**
* Get an affine transformation matrix from a given translation, rotation, and
* zoom factor. The matrix will look like:
*
* [ zoom * cos(angle), -sin(angle), x_shift,
* sin(angle), zoom * cos(angle), y_shift,
* 0, 0, 1 ]
*
* @param x_shift horizontal translation
* @param y_shift vertical translation
* @param angle rotation in radians
* @param zoom scale percent (1.0 = 100%)
* @param matrix 9-item affine transformation matrix
*/
void avfilter_get_matrix(float x_shift, float y_shift, float angle, float zoom, float *matrix);
 
/**
* Add two matrices together. result = m1 + m2.
*
* @param m1 9-item transformation matrix
* @param m2 9-item transformation matrix
* @param result 9-item transformation matrix
*/
void avfilter_add_matrix(const float *m1, const float *m2, float *result);
 
/**
* Subtract one matrix from another. result = m1 - m2.
*
* @param m1 9-item transformation matrix
* @param m2 9-item transformation matrix
* @param result 9-item transformation matrix
*/
void avfilter_sub_matrix(const float *m1, const float *m2, float *result);
 
/**
* Multiply a matrix by a scalar value. result = m1 * scalar.
*
* @param m1 9-item transformation matrix
* @param scalar a number
* @param result 9-item transformation matrix
*/
void avfilter_mul_matrix(const float *m1, float scalar, float *result);
 
/**
* Do an affine transformation with the given interpolation method. This
* multiplies each vector [x,y,1] by the matrix and then interpolates to
* get the final value.
*
* @param src source image
* @param dst destination image
* @param src_stride source image line size in bytes
* @param dst_stride destination image line size in bytes
* @param width image width in pixels
* @param height image height in pixels
* @param matrix 9-item affine transformation matrix
* @param interpolate pixel interpolation method
* @param fill edge fill method
* @return negative on error
*/
int avfilter_transform(const uint8_t *src, uint8_t *dst,
int src_stride, int dst_stride,
int width, int height, const float *matrix,
enum InterpolateMethod interpolate,
enum FillMethod fill);
 
#endif /* AVFILTER_TRANSFORM_H */
/contrib/sdk/sources/ffmpeg/libavfilter/trim.c
0,0 → 1,394
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <float.h>
#include <math.h>
 
#include "config.h"
 
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/log.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
 
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
 
typedef struct TrimContext {
const AVClass *class;
 
/*
* AVOptions
*/
int64_t duration;
int64_t start_time, end_time;
int64_t start_frame, end_frame;
 
double duration_dbl;
double start_time_dbl, end_time_dbl;
/*
* in the link timebase for video,
* in 1/samplerate for audio
*/
int64_t start_pts, end_pts;
int64_t start_sample, end_sample;
 
/*
* number of video frames that arrived on this filter so far
*/
int64_t nb_frames;
/*
* number of audio samples that arrived on this filter so far
*/
int64_t nb_samples;
/*
* timestamp of the first frame in the output, in the timebase units
*/
int64_t first_pts;
/*
* duration in the timebase units
*/
int64_t duration_tb;
 
int64_t next_pts;
 
int eof;
} TrimContext;
 
static av_cold int init(AVFilterContext *ctx)
{
TrimContext *s = ctx->priv;
 
s->first_pts = AV_NOPTS_VALUE;
 
return 0;
}
 
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
TrimContext *s = ctx->priv;
AVRational tb = (inlink->type == AVMEDIA_TYPE_VIDEO) ?
inlink->time_base : (AVRational){ 1, inlink->sample_rate };
 
if (s->start_time_dbl != DBL_MAX)
s->start_time = s->start_time_dbl * 1e6;
if (s->end_time_dbl != DBL_MAX)
s->end_time = s->end_time_dbl * 1e6;
if (s->duration_dbl != 0)
s->duration = s->duration_dbl * 1e6;
 
if (s->start_time != INT64_MAX) {
int64_t start_pts = av_rescale_q(s->start_time, AV_TIME_BASE_Q, tb);
if (s->start_pts == AV_NOPTS_VALUE || start_pts < s->start_pts)
s->start_pts = start_pts;
}
if (s->end_time != INT64_MAX) {
int64_t end_pts = av_rescale_q(s->end_time, AV_TIME_BASE_Q, tb);
if (s->end_pts == AV_NOPTS_VALUE || end_pts > s->end_pts)
s->end_pts = end_pts;
}
if (s->duration)
s->duration_tb = av_rescale_q(s->duration, AV_TIME_BASE_Q, tb);
 
return 0;
}
 
static int config_output(AVFilterLink *outlink)
{
outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
return 0;
}
 
#define OFFSET(x) offsetof(TrimContext, x)
#define COMMON_OPTS \
{ "starti", "Timestamp of the first frame that " \
"should be passed", OFFSET(start_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \
{ "endi", "Timestamp of the first frame that " \
"should be dropped again", OFFSET(end_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \
{ "start_pts", "Timestamp of the first frame that should be " \
" passed", OFFSET(start_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, FLAGS }, \
{ "end_pts", "Timestamp of the first frame that should be " \
"dropped again", OFFSET(end_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, FLAGS }, \
{ "durationi", "Maximum duration of the output", OFFSET(duration), AV_OPT_TYPE_DURATION, { .i64 = 0 }, 0, INT64_MAX, FLAGS },
 
#define COMPAT_OPTS \
{ "start", "Timestamp in seconds of the first frame that " \
"should be passed", OFFSET(start_time_dbl),AV_OPT_TYPE_DOUBLE, { .dbl = DBL_MAX }, -DBL_MAX, DBL_MAX, FLAGS }, \
{ "end", "Timestamp in seconds of the first frame that " \
"should be dropped again", OFFSET(end_time_dbl), AV_OPT_TYPE_DOUBLE, { .dbl = DBL_MAX }, -DBL_MAX, DBL_MAX, FLAGS }, \
{ "duration", "Maximum duration of the output in seconds", OFFSET(duration_dbl), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, DBL_MAX, FLAGS },
 
 
#if CONFIG_TRIM_FILTER
static int trim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
TrimContext *s = ctx->priv;
int drop;
 
/* drop everything if EOF has already been returned */
if (s->eof) {
av_frame_free(&frame);
return 0;
}
 
if (s->start_frame >= 0 || s->start_pts != AV_NOPTS_VALUE) {
drop = 1;
if (s->start_frame >= 0 && s->nb_frames >= s->start_frame)
drop = 0;
if (s->start_pts != AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE &&
frame->pts >= s->start_pts)
drop = 0;
if (drop)
goto drop;
}
 
if (s->first_pts == AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE)
s->first_pts = frame->pts;
 
if (s->end_frame != INT64_MAX || s->end_pts != AV_NOPTS_VALUE || s->duration_tb) {
drop = 1;
 
if (s->end_frame != INT64_MAX && s->nb_frames < s->end_frame)
drop = 0;
if (s->end_pts != AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE &&
frame->pts < s->end_pts)
drop = 0;
if (s->duration_tb && frame->pts != AV_NOPTS_VALUE &&
frame->pts - s->first_pts < s->duration_tb)
drop = 0;
 
if (drop) {
s->eof = inlink->closed = 1;
goto drop;
}
}
 
s->nb_frames++;
 
return ff_filter_frame(ctx->outputs[0], frame);
 
drop:
s->nb_frames++;
av_frame_free(&frame);
return 0;
}
 
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
static const AVOption trim_options[] = {
COMMON_OPTS
{ "start_frame", "Number of the first frame that should be passed "
"to the output", OFFSET(start_frame), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, FLAGS },
{ "end_frame", "Number of the first frame that should be dropped "
"again", OFFSET(end_frame), AV_OPT_TYPE_INT64, { .i64 = INT64_MAX }, 0, INT64_MAX, FLAGS },
COMPAT_OPTS
{ NULL }
};
#undef FLAGS
 
AVFILTER_DEFINE_CLASS(trim);
 
static const AVFilterPad trim_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = trim_filter_frame,
.config_props = config_input,
},
{ NULL }
};
 
static const AVFilterPad trim_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_output,
},
{ NULL }
};
 
AVFilter avfilter_vf_trim = {
.name = "trim",
.description = NULL_IF_CONFIG_SMALL("Pick one continuous section from the input, drop the rest."),
.init = init,
.priv_size = sizeof(TrimContext),
.priv_class = &trim_class,
.inputs = trim_inputs,
.outputs = trim_outputs,
};
#endif // CONFIG_TRIM_FILTER
 
#if CONFIG_ATRIM_FILTER
static int atrim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
TrimContext *s = ctx->priv;
int64_t start_sample, end_sample = frame->nb_samples;
int64_t pts;
int drop;
 
/* drop everything if EOF has already been returned */
if (s->eof) {
av_frame_free(&frame);
return 0;
}
 
if (frame->pts != AV_NOPTS_VALUE)
pts = av_rescale_q(frame->pts, inlink->time_base,
(AVRational){ 1, inlink->sample_rate });
else
pts = s->next_pts;
s->next_pts = pts + frame->nb_samples;
 
/* check if at least a part of the frame is after the start time */
if (s->start_sample < 0 && s->start_pts == AV_NOPTS_VALUE) {
start_sample = 0;
} else {
drop = 1;
start_sample = frame->nb_samples;
 
if (s->start_sample >= 0 &&
s->nb_samples + frame->nb_samples > s->start_sample) {
drop = 0;
start_sample = FFMIN(start_sample, s->start_sample - s->nb_samples);
}
 
if (s->start_pts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE &&
pts + frame->nb_samples > s->start_pts) {
drop = 0;
start_sample = FFMIN(start_sample, s->start_pts - pts);
}
 
if (drop)
goto drop;
}
 
if (s->first_pts == AV_NOPTS_VALUE)
s->first_pts = pts + start_sample;
 
/* check if at least a part of the frame is before the end time */
if (s->end_sample == INT64_MAX && s->end_pts == AV_NOPTS_VALUE && !s->duration_tb) {
end_sample = frame->nb_samples;
} else {
drop = 1;
end_sample = 0;
 
if (s->end_sample != INT64_MAX &&
s->nb_samples < s->end_sample) {
drop = 0;
end_sample = FFMAX(end_sample, s->end_sample - s->nb_samples);
}
 
if (s->end_pts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE &&
pts < s->end_pts) {
drop = 0;
end_sample = FFMAX(end_sample, s->end_pts - pts);
}
 
if (s->duration_tb && pts - s->first_pts < s->duration_tb) {
drop = 0;
end_sample = FFMAX(end_sample, s->first_pts + s->duration_tb - pts);
}
 
if (drop) {
s->eof = inlink->closed = 1;
goto drop;
}
}
 
s->nb_samples += frame->nb_samples;
start_sample = FFMAX(0, start_sample);
end_sample = FFMIN(frame->nb_samples, end_sample);
av_assert0(start_sample < end_sample || (start_sample == end_sample && !frame->nb_samples));
 
if (start_sample) {
AVFrame *out = ff_get_audio_buffer(ctx->outputs[0], end_sample - start_sample);
if (!out) {
av_frame_free(&frame);
return AVERROR(ENOMEM);
}
 
av_frame_copy_props(out, frame);
av_samples_copy(out->extended_data, frame->extended_data, 0, start_sample,
out->nb_samples, inlink->channels,
frame->format);
if (out->pts != AV_NOPTS_VALUE)
out->pts += av_rescale_q(start_sample, (AVRational){ 1, out->sample_rate },
inlink->time_base);
 
av_frame_free(&frame);
frame = out;
} else
frame->nb_samples = end_sample;
 
return ff_filter_frame(ctx->outputs[0], frame);
 
drop:
s->nb_samples += frame->nb_samples;
av_frame_free(&frame);
return 0;
}
 
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
static const AVOption atrim_options[] = {
COMMON_OPTS
{ "start_sample", "Number of the first audio sample that should be "
"passed to the output", OFFSET(start_sample), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, FLAGS },
{ "end_sample", "Number of the first audio sample that should be "
"dropped again", OFFSET(end_sample), AV_OPT_TYPE_INT64, { .i64 = INT64_MAX }, 0, INT64_MAX, FLAGS },
COMPAT_OPTS
{ NULL }
};
#undef FLAGS
 
AVFILTER_DEFINE_CLASS(atrim);
 
static const AVFilterPad atrim_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = atrim_filter_frame,
.config_props = config_input,
},
{ NULL }
};
 
static const AVFilterPad atrim_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
},
{ NULL }
};
 
AVFilter avfilter_af_atrim = {
.name = "atrim",
.description = NULL_IF_CONFIG_SMALL("Pick one continuous section from the input, drop the rest."),
.init = init,
.priv_size = sizeof(TrimContext),
.priv_class = &atrim_class,
.inputs = atrim_inputs,
.outputs = atrim_outputs,
};
#endif // CONFIG_ATRIM_FILTER
/contrib/sdk/sources/ffmpeg/libavfilter/unsharp.h
0,0 → 1,75
/*
* Copyright (C) 2013 Wei Gao <weigao@multicorewareinc.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFILTER_UNSHARP_H
#define AVFILTER_UNSHARP_H
 
#include "config.h"
#include "avfilter.h"
#if CONFIG_OPENCL
#include "libavutil/opencl.h"
#endif
 
#define MIN_MATRIX_SIZE 3
#define MAX_MATRIX_SIZE 63
 
#if CONFIG_OPENCL
 
typedef struct {
cl_mem cl_luma_mask;
cl_mem cl_chroma_mask;
int in_plane_size[8];
int out_plane_size[8];
int plane_num;
cl_mem cl_inbuf;
size_t cl_inbuf_size;
cl_mem cl_outbuf;
size_t cl_outbuf_size;
AVOpenCLKernelEnv kernel_env;
} UnsharpOpenclContext;
 
#endif
 
typedef struct UnsharpFilterParam {
int msize_x; ///< matrix width
int msize_y; ///< matrix height
int amount; ///< effect amount
int steps_x; ///< horizontal step count
int steps_y; ///< vertical step count
int scalebits; ///< bits to shift pixel
int32_t halfscale; ///< amount to add to pixel
uint32_t *sc[MAX_MATRIX_SIZE - 1]; ///< finite state machine storage
} UnsharpFilterParam;
 
typedef struct {
const AVClass *class;
int lmsize_x, lmsize_y, cmsize_x, cmsize_y;
float lamount, camount;
UnsharpFilterParam luma; ///< luma parameters (width, height, amount)
UnsharpFilterParam chroma; ///< chroma parameters (width, height, amount)
int hsub, vsub;
int opencl;
#if CONFIG_OPENCL
UnsharpOpenclContext opencl_ctx;
#endif
int (* apply_unsharp)(AVFilterContext *ctx, AVFrame *in, AVFrame *out);
} UnsharpContext;
 
#endif /* AVFILTER_UNSHARP_H */
/contrib/sdk/sources/ffmpeg/libavfilter/unsharp_opencl.c
0,0 → 1,283
/*
* Copyright (C) 2013 Wei Gao <weigao@multicorewareinc.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* unsharp input video
*/
 
#include "unsharp_opencl.h"
#include "libavutil/common.h"
#include "libavutil/opencl_internal.h"
 
#define PLANE_NUM 3
 
static inline void add_mask_counter(uint32_t *dst, uint32_t *counter1, uint32_t *counter2, int len)
{
int i;
for (i = 0; i < len; i++) {
dst[i] = counter1[i] + counter2[i];
}
}
 
static int compute_mask(int step, uint32_t *mask)
{
int i, z, ret = 0;
int counter_size = sizeof(uint32_t) * (2 * step + 1);
uint32_t *temp1_counter, *temp2_counter, **counter;
temp1_counter = av_mallocz(counter_size);
if (!temp1_counter) {
ret = AVERROR(ENOMEM);
goto end;
}
temp2_counter = av_mallocz(counter_size);
if (!temp2_counter) {
ret = AVERROR(ENOMEM);
goto end;
}
counter = av_mallocz(sizeof(uint32_t *) * (2 * step + 1));
if (!counter) {
ret = AVERROR(ENOMEM);
goto end;
}
for (i = 0; i < 2 * step + 1; i++) {
counter[i] = av_mallocz(counter_size);
if (!counter[i]) {
ret = AVERROR(ENOMEM);
goto end;
}
}
for (i = 0; i < 2 * step + 1; i++) {
memset(temp1_counter, 0, counter_size);
temp1_counter[i] = 1;
for (z = 0; z < step * 2; z += 2) {
add_mask_counter(temp2_counter, counter[z], temp1_counter, step * 2);
memcpy(counter[z], temp1_counter, counter_size);
add_mask_counter(temp1_counter, counter[z + 1], temp2_counter, step * 2);
memcpy(counter[z + 1], temp2_counter, counter_size);
}
}
memcpy(mask, temp1_counter, counter_size);
end:
av_freep(&temp1_counter);
av_freep(&temp2_counter);
for (i = 0; i < 2 * step + 1; i++) {
av_freep(&counter[i]);
}
av_freep(&counter);
return ret;
}
 
static int compute_mask_matrix(cl_mem cl_mask_matrix, int step_x, int step_y)
{
int i, j, ret = 0;
uint32_t *mask_matrix, *mask_x, *mask_y;
size_t size_matrix = sizeof(uint32_t) * (2 * step_x + 1) * (2 * step_y + 1);
mask_x = av_mallocz(sizeof(uint32_t) * (2 * step_x + 1));
if (!mask_x) {
ret = AVERROR(ENOMEM);
goto end;
}
mask_y = av_mallocz(sizeof(uint32_t) * (2 * step_y + 1));
if (!mask_y) {
ret = AVERROR(ENOMEM);
goto end;
}
mask_matrix = av_mallocz(size_matrix);
if (!mask_matrix) {
ret = AVERROR(ENOMEM);
goto end;
}
ret = compute_mask(step_x, mask_x);
if (ret < 0)
goto end;
ret = compute_mask(step_y, mask_y);
if (ret < 0)
goto end;
for (j = 0; j < 2 * step_y + 1; j++) {
for (i = 0; i < 2 * step_x + 1; i++) {
mask_matrix[i + j * (2 * step_x + 1)] = mask_y[j] * mask_x[i];
}
}
ret = av_opencl_buffer_write(cl_mask_matrix, (uint8_t *)mask_matrix, size_matrix);
end:
av_freep(&mask_x);
av_freep(&mask_y);
av_freep(&mask_matrix);
return ret;
}
 
static int generate_mask(AVFilterContext *ctx)
{
UnsharpContext *unsharp = ctx->priv;
int i, ret = 0, step_x[2], step_y[2];
cl_mem mask_matrix[2];
mask_matrix[0] = unsharp->opencl_ctx.cl_luma_mask;
mask_matrix[1] = unsharp->opencl_ctx.cl_chroma_mask;
step_x[0] = unsharp->luma.steps_x;
step_x[1] = unsharp->chroma.steps_x;
step_y[0] = unsharp->luma.steps_y;
step_y[1] = unsharp->chroma.steps_y;
if (!mask_matrix[0] || !mask_matrix[1]) {
av_log(ctx, AV_LOG_ERROR, "Luma mask and chroma mask should not be NULL\n");
return AVERROR(EINVAL);
}
for (i = 0; i < 2; i++) {
ret = compute_mask_matrix(mask_matrix[i], step_x[i], step_y[i]);
if (ret < 0)
return ret;
}
return ret;
}
 
int ff_opencl_apply_unsharp(AVFilterContext *ctx, AVFrame *in, AVFrame *out)
{
int ret;
AVFilterLink *link = ctx->inputs[0];
UnsharpContext *unsharp = ctx->priv;
cl_int status;
int cw = FF_CEIL_RSHIFT(link->w, unsharp->hsub);
int ch = FF_CEIL_RSHIFT(link->h, unsharp->vsub);
const size_t global_work_size = link->w * link->h + 2 * ch * cw;
FFOpenclParam opencl_param = {0};
 
opencl_param.ctx = ctx;
opencl_param.kernel = unsharp->opencl_ctx.kernel_env.kernel;
ret = ff_opencl_set_parameter(&opencl_param,
FF_OPENCL_PARAM_INFO(unsharp->opencl_ctx.cl_inbuf),
FF_OPENCL_PARAM_INFO(unsharp->opencl_ctx.cl_outbuf),
FF_OPENCL_PARAM_INFO(unsharp->opencl_ctx.cl_luma_mask),
FF_OPENCL_PARAM_INFO(unsharp->opencl_ctx.cl_chroma_mask),
FF_OPENCL_PARAM_INFO(unsharp->luma.amount),
FF_OPENCL_PARAM_INFO(unsharp->chroma.amount),
FF_OPENCL_PARAM_INFO(unsharp->luma.steps_x),
FF_OPENCL_PARAM_INFO(unsharp->luma.steps_y),
FF_OPENCL_PARAM_INFO(unsharp->chroma.steps_x),
FF_OPENCL_PARAM_INFO(unsharp->chroma.steps_y),
FF_OPENCL_PARAM_INFO(unsharp->luma.scalebits),
FF_OPENCL_PARAM_INFO(unsharp->chroma.scalebits),
FF_OPENCL_PARAM_INFO(unsharp->luma.halfscale),
FF_OPENCL_PARAM_INFO(unsharp->chroma.halfscale),
FF_OPENCL_PARAM_INFO(in->linesize[0]),
FF_OPENCL_PARAM_INFO(in->linesize[1]),
FF_OPENCL_PARAM_INFO(out->linesize[0]),
FF_OPENCL_PARAM_INFO(out->linesize[1]),
FF_OPENCL_PARAM_INFO(link->h),
FF_OPENCL_PARAM_INFO(link->w),
FF_OPENCL_PARAM_INFO(ch),
FF_OPENCL_PARAM_INFO(cw),
NULL);
if (ret < 0)
return ret;
status = clEnqueueNDRangeKernel(unsharp->opencl_ctx.kernel_env.command_queue,
unsharp->opencl_ctx.kernel_env.kernel, 1, NULL,
&global_work_size, NULL, 0, NULL, NULL);
if (status != CL_SUCCESS) {
av_log(ctx, AV_LOG_ERROR, "OpenCL run kernel error occurred: %s\n", av_opencl_errstr(status));
return AVERROR_EXTERNAL;
}
clFinish(unsharp->opencl_ctx.kernel_env.command_queue);
return av_opencl_buffer_read_image(out->data, unsharp->opencl_ctx.out_plane_size,
unsharp->opencl_ctx.plane_num, unsharp->opencl_ctx.cl_outbuf,
unsharp->opencl_ctx.cl_outbuf_size);
}
 
int ff_opencl_unsharp_init(AVFilterContext *ctx)
{
int ret = 0;
UnsharpContext *unsharp = ctx->priv;
ret = av_opencl_init(NULL);
if (ret < 0)
return ret;
ret = av_opencl_buffer_create(&unsharp->opencl_ctx.cl_luma_mask,
sizeof(uint32_t) * (2 * unsharp->luma.steps_x + 1) * (2 * unsharp->luma.steps_y + 1),
CL_MEM_READ_ONLY, NULL);
if (ret < 0)
return ret;
ret = av_opencl_buffer_create(&unsharp->opencl_ctx.cl_chroma_mask,
sizeof(uint32_t) * (2 * unsharp->chroma.steps_x + 1) * (2 * unsharp->chroma.steps_y + 1),
CL_MEM_READ_ONLY, NULL);
if (ret < 0)
return ret;
ret = generate_mask(ctx);
if (ret < 0)
return ret;
unsharp->opencl_ctx.plane_num = PLANE_NUM;
if (!unsharp->opencl_ctx.kernel_env.kernel) {
ret = av_opencl_create_kernel(&unsharp->opencl_ctx.kernel_env, "unsharp");
if (ret < 0) {
av_log(ctx, AV_LOG_ERROR, "OpenCL failed to create kernel with name 'unsharp'\n");
return ret;
}
}
return ret;
}
 
void ff_opencl_unsharp_uninit(AVFilterContext *ctx)
{
UnsharpContext *unsharp = ctx->priv;
av_opencl_buffer_release(&unsharp->opencl_ctx.cl_inbuf);
av_opencl_buffer_release(&unsharp->opencl_ctx.cl_outbuf);
av_opencl_buffer_release(&unsharp->opencl_ctx.cl_luma_mask);
av_opencl_buffer_release(&unsharp->opencl_ctx.cl_chroma_mask);
av_opencl_release_kernel(&unsharp->opencl_ctx.kernel_env);
av_opencl_uninit();
}
 
int ff_opencl_unsharp_process_inout_buf(AVFilterContext *ctx, AVFrame *in, AVFrame *out)
{
int ret = 0;
AVFilterLink *link = ctx->inputs[0];
UnsharpContext *unsharp = ctx->priv;
int ch = FF_CEIL_RSHIFT(link->h, unsharp->vsub);
 
if ((!unsharp->opencl_ctx.cl_inbuf) || (!unsharp->opencl_ctx.cl_outbuf)) {
unsharp->opencl_ctx.in_plane_size[0] = (in->linesize[0] * in->height);
unsharp->opencl_ctx.in_plane_size[1] = (in->linesize[1] * ch);
unsharp->opencl_ctx.in_plane_size[2] = (in->linesize[2] * ch);
unsharp->opencl_ctx.out_plane_size[0] = (out->linesize[0] * out->height);
unsharp->opencl_ctx.out_plane_size[1] = (out->linesize[1] * ch);
unsharp->opencl_ctx.out_plane_size[2] = (out->linesize[2] * ch);
unsharp->opencl_ctx.cl_inbuf_size = unsharp->opencl_ctx.in_plane_size[0] +
unsharp->opencl_ctx.in_plane_size[1] +
unsharp->opencl_ctx.in_plane_size[2];
unsharp->opencl_ctx.cl_outbuf_size = unsharp->opencl_ctx.out_plane_size[0] +
unsharp->opencl_ctx.out_plane_size[1] +
unsharp->opencl_ctx.out_plane_size[2];
if (!unsharp->opencl_ctx.cl_inbuf) {
ret = av_opencl_buffer_create(&unsharp->opencl_ctx.cl_inbuf,
unsharp->opencl_ctx.cl_inbuf_size,
CL_MEM_READ_ONLY, NULL);
if (ret < 0)
return ret;
}
if (!unsharp->opencl_ctx.cl_outbuf) {
ret = av_opencl_buffer_create(&unsharp->opencl_ctx.cl_outbuf,
unsharp->opencl_ctx.cl_outbuf_size,
CL_MEM_READ_WRITE, NULL);
if (ret < 0)
return ret;
}
}
return av_opencl_buffer_write_image(unsharp->opencl_ctx.cl_inbuf,
unsharp->opencl_ctx.cl_inbuf_size,
0, in->data, unsharp->opencl_ctx.in_plane_size,
unsharp->opencl_ctx.plane_num);
}
/contrib/sdk/sources/ffmpeg/libavfilter/unsharp_opencl.h
0,0 → 1,34
/*
* Copyright (C) 2013 Wei Gao <weigao@multicorewareinc.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFILTER_UNSHARP_OPENCL_H
#define AVFILTER_UNSHARP_OPENCL_H
 
#include "unsharp.h"
 
int ff_opencl_unsharp_init(AVFilterContext *ctx);
 
void ff_opencl_unsharp_uninit(AVFilterContext *ctx);
 
int ff_opencl_unsharp_process_inout_buf(AVFilterContext *ctx, AVFrame *in, AVFrame *out);
 
int ff_opencl_apply_unsharp(AVFilterContext *ctx, AVFrame *in, AVFrame *out);
 
#endif /* AVFILTER_UNSHARP_OPENCL_H */
/contrib/sdk/sources/ffmpeg/libavfilter/unsharp_opencl_kernel.h
0,0 → 1,137
/*
* Copyright (C) 2013 Wei Gao <weigao@multicorewareinc.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFILTER_UNSHARP_OPENCL_KERNEL_H
#define AVFILTER_UNSHARP_OPENCL_KERNEL_H
 
#include "libavutil/opencl.h"
 
const char *ff_kernel_unsharp_opencl = AV_OPENCL_KERNEL(
inline unsigned char clip_uint8(int a)
{
if (a & (~0xFF))
return (-a)>>31;
else
return a;
}
 
kernel void unsharp(global unsigned char *src,
global unsigned char *dst,
const global unsigned int *mask_lu,
const global unsigned int *mask_ch,
int amount_lu,
int amount_ch,
int step_x_lu,
int step_y_lu,
int step_x_ch,
int step_y_ch,
int scalebits_lu,
int scalebits_ch,
int halfscale_lu,
int halfscale_ch,
int src_stride_lu,
int src_stride_ch,
int dst_stride_lu,
int dst_stride_ch,
int height,
int width,
int ch,
int cw)
{
global unsigned char *dst_y = dst;
global unsigned char *dst_u = dst_y + height * dst_stride_lu;
global unsigned char *dst_v = dst_u + ch * dst_stride_ch;
 
global unsigned char *src_y = src;
global unsigned char *src_u = src_y + height * src_stride_lu;
global unsigned char *src_v = src_u + ch * src_stride_ch;
 
global unsigned char *temp_dst;
global unsigned char *temp_src;
const global unsigned int *temp_mask;
int global_id = get_global_id(0);
int i, j, x, y, temp_src_stride, temp_dst_stride, temp_height, temp_width, temp_steps_x, temp_steps_y,
temp_amount, temp_scalebits, temp_halfscale, sum, idx_x, idx_y, temp, res;
if (global_id < width * height) {
y = global_id / width;
x = global_id % width;
temp_dst = dst_y;
temp_src = src_y;
temp_src_stride = src_stride_lu;
temp_dst_stride = dst_stride_lu;
temp_height = height;
temp_width = width;
temp_steps_x = step_x_lu;
temp_steps_y = step_y_lu;
temp_mask = mask_lu;
temp_amount = amount_lu;
temp_scalebits = scalebits_lu;
temp_halfscale = halfscale_lu;
} else if ((global_id >= width * height) && (global_id < width * height + ch * cw)) {
y = (global_id - width * height) / cw;
x = (global_id - width * height) % cw;
temp_dst = dst_u;
temp_src = src_u;
temp_src_stride = src_stride_ch;
temp_dst_stride = dst_stride_ch;
temp_height = ch;
temp_width = cw;
temp_steps_x = step_x_ch;
temp_steps_y = step_y_ch;
temp_mask = mask_ch;
temp_amount = amount_ch;
temp_scalebits = scalebits_ch;
temp_halfscale = halfscale_ch;
} else {
y = (global_id - width * height - ch * cw) / cw;
x = (global_id - width * height - ch * cw) % cw;
temp_dst = dst_v;
temp_src = src_v;
temp_src_stride = src_stride_ch;
temp_dst_stride = dst_stride_ch;
temp_height = ch;
temp_width = cw;
temp_steps_x = step_x_ch;
temp_steps_y = step_y_ch;
temp_mask = mask_ch;
temp_amount = amount_ch;
temp_scalebits = scalebits_ch;
temp_halfscale = halfscale_ch;
}
if (temp_amount) {
sum = 0;
for (j = 0; j <= 2 * temp_steps_y; j++) {
idx_y = (y - temp_steps_y + j) <= 0 ? 0 : (y - temp_steps_y + j) >= temp_height ? temp_height-1 : y - temp_steps_y + j;
for (i = 0; i <= 2 * temp_steps_x; i++) {
idx_x = (x - temp_steps_x + i) <= 0 ? 0 : (x - temp_steps_x + i) >= temp_width ? temp_width-1 : x - temp_steps_x + i;
sum += temp_mask[i + j * (2 * temp_steps_x + 1)] * temp_src[idx_x + idx_y * temp_src_stride];
}
}
temp = (int)temp_src[x + y * temp_src_stride];
res = temp + (((temp - (int)((sum + temp_halfscale) >> temp_scalebits)) * temp_amount) >> 16);
temp_dst[x + y * temp_dst_stride] = clip_uint8(res);
} else {
temp_dst[x + y * temp_dst_stride] = temp_src[x + y * temp_src_stride];
}
}
 
);
 
#endif /* AVFILTER_UNSHARP_OPENCL_KERNEL_H */
/contrib/sdk/sources/ffmpeg/libavfilter/version.h
0,0 → 1,89
/*
* Version macros.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFILTER_VERSION_H
#define AVFILTER_VERSION_H
 
/**
* @file
* @ingroup lavfi
* Libavfilter version macros
*/
 
#include "libavutil/avutil.h"
 
#define LIBAVFILTER_VERSION_MAJOR 3
#define LIBAVFILTER_VERSION_MINOR 90
#define LIBAVFILTER_VERSION_MICRO 100
 
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
LIBAVFILTER_VERSION_MINOR, \
LIBAVFILTER_VERSION_MICRO)
#define LIBAVFILTER_VERSION AV_VERSION(LIBAVFILTER_VERSION_MAJOR, \
LIBAVFILTER_VERSION_MINOR, \
LIBAVFILTER_VERSION_MICRO)
#define LIBAVFILTER_BUILD LIBAVFILTER_VERSION_INT
 
#define LIBAVFILTER_IDENT "Lavfi" AV_STRINGIFY(LIBAVFILTER_VERSION)
 
/**
* FF_API_* defines may be placed below to indicate public API that will be
* dropped at a future version bump. The defines themselves are not part of
* the public API and may change, break or disappear at any time.
*/
 
#ifndef FF_API_AVFILTERPAD_PUBLIC
#define FF_API_AVFILTERPAD_PUBLIC (LIBAVFILTER_VERSION_MAJOR < 4)
#endif
#ifndef FF_API_FOO_COUNT
#define FF_API_FOO_COUNT (LIBAVFILTER_VERSION_MAJOR < 4)
#endif
#ifndef FF_API_FILL_FRAME
#define FF_API_FILL_FRAME (LIBAVFILTER_VERSION_MAJOR < 4)
#endif
#ifndef FF_API_BUFFERSRC_BUFFER
#define FF_API_BUFFERSRC_BUFFER (LIBAVFILTER_VERSION_MAJOR < 4)
#endif
#ifndef FF_API_AVFILTERBUFFER
#define FF_API_AVFILTERBUFFER (LIBAVFILTER_VERSION_MAJOR < 4)
#endif
#ifndef FF_API_OLD_FILTER_OPTS
#define FF_API_OLD_FILTER_OPTS (LIBAVFILTER_VERSION_MAJOR < 4)
#endif
#ifndef FF_API_ACONVERT_FILTER
#define FF_API_ACONVERT_FILTER (LIBAVFILTER_VERSION_MAJOR < 4)
#endif
#ifndef FF_API_AVFILTER_OPEN
#define FF_API_AVFILTER_OPEN (LIBAVFILTER_VERSION_MAJOR < 4)
#endif
#ifndef FF_API_AVFILTER_INIT_FILTER
#define FF_API_AVFILTER_INIT_FILTER (LIBAVFILTER_VERSION_MAJOR < 4)
#endif
#ifndef FF_API_OLD_FILTER_REGISTER
#define FF_API_OLD_FILTER_REGISTER (LIBAVFILTER_VERSION_MAJOR < 4)
#endif
#ifndef FF_API_OLD_GRAPH_PARSE
#define FF_API_OLD_GRAPH_PARSE (LIBAVFILTER_VERSION_MAJOR < 4)
#endif
#ifndef FF_API_DRAWTEXT_OLD_TIMELINE
#define FF_API_DRAWTEXT_OLD_TIMELINE (LIBAVFILTER_VERSION_MAJOR < 4)
#endif
 
#endif /* AVFILTER_VERSION_H */
/contrib/sdk/sources/ffmpeg/libavfilter/vf_alphamerge.c
0,0 → 1,207
/*
* Copyright (c) 2012 Steven Robertson
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* copy an alpha component from another video's luma
*/
 
#include <string.h>
 
#include "libavutil/pixfmt.h"
#include "avfilter.h"
#include "bufferqueue.h"
#include "drawutils.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
enum { Y, U, V, A };
 
typedef struct {
int frame_requested;
int is_packed_rgb;
uint8_t rgba_map[4];
struct FFBufQueue queue_main;
struct FFBufQueue queue_alpha;
} AlphaMergeContext;
 
static av_cold void uninit(AVFilterContext *ctx)
{
AlphaMergeContext *merge = ctx->priv;
ff_bufqueue_discard_all(&merge->queue_main);
ff_bufqueue_discard_all(&merge->queue_alpha);
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat main_fmts[] = {
AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA, AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
AV_PIX_FMT_NONE
};
static const enum AVPixelFormat alpha_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
AVFilterFormats *main_formats = ff_make_format_list(main_fmts);
AVFilterFormats *alpha_formats = ff_make_format_list(alpha_fmts);
ff_formats_ref(main_formats, &ctx->inputs[0]->out_formats);
ff_formats_ref(alpha_formats, &ctx->inputs[1]->out_formats);
ff_formats_ref(main_formats, &ctx->outputs[0]->in_formats);
return 0;
}
 
static int config_input_main(AVFilterLink *inlink)
{
AlphaMergeContext *merge = inlink->dst->priv;
merge->is_packed_rgb =
ff_fill_rgba_map(merge->rgba_map, inlink->format) >= 0;
return 0;
}
 
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AVFilterLink *mainlink = ctx->inputs[0];
AVFilterLink *alphalink = ctx->inputs[1];
if (mainlink->w != alphalink->w || mainlink->h != alphalink->h) {
av_log(ctx, AV_LOG_ERROR,
"Input frame sizes do not match (%dx%d vs %dx%d).\n",
mainlink->w, mainlink->h,
alphalink->w, alphalink->h);
return AVERROR(EINVAL);
}
 
outlink->w = mainlink->w;
outlink->h = mainlink->h;
outlink->time_base = mainlink->time_base;
outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
outlink->frame_rate = mainlink->frame_rate;
return 0;
}
 
static void draw_frame(AVFilterContext *ctx,
AVFrame *main_buf,
AVFrame *alpha_buf)
{
AlphaMergeContext *merge = ctx->priv;
int h = main_buf->height;
 
if (merge->is_packed_rgb) {
int x, y;
uint8_t *pin, *pout;
for (y = 0; y < h; y++) {
pin = alpha_buf->data[0] + y * alpha_buf->linesize[0];
pout = main_buf->data[0] + y * main_buf->linesize[0] + merge->rgba_map[A];
for (x = 0; x < main_buf->width; x++) {
*pout = *pin;
pin += 1;
pout += 4;
}
}
} else {
int y;
const int main_linesize = main_buf->linesize[A];
const int alpha_linesize = alpha_buf->linesize[Y];
for (y = 0; y < h && y < alpha_buf->height; y++) {
memcpy(main_buf->data[A] + y * main_linesize,
alpha_buf->data[Y] + y * alpha_linesize,
FFMIN(main_linesize, alpha_linesize));
}
}
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;
AlphaMergeContext *merge = ctx->priv;
 
int ret = 0;
int is_alpha = (inlink == ctx->inputs[1]);
struct FFBufQueue *queue =
(is_alpha ? &merge->queue_alpha : &merge->queue_main);
ff_bufqueue_add(ctx, queue, buf);
 
do {
AVFrame *main_buf, *alpha_buf;
 
if (!ff_bufqueue_peek(&merge->queue_main, 0) ||
!ff_bufqueue_peek(&merge->queue_alpha, 0)) break;
 
main_buf = ff_bufqueue_get(&merge->queue_main);
alpha_buf = ff_bufqueue_get(&merge->queue_alpha);
 
merge->frame_requested = 0;
draw_frame(ctx, main_buf, alpha_buf);
ret = ff_filter_frame(ctx->outputs[0], main_buf);
av_frame_free(&alpha_buf);
} while (ret >= 0);
return ret;
}
 
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AlphaMergeContext *merge = ctx->priv;
int in, ret;
 
merge->frame_requested = 1;
while (merge->frame_requested) {
in = ff_bufqueue_peek(&merge->queue_main, 0) ? 1 : 0;
ret = ff_request_frame(ctx->inputs[in]);
if (ret < 0)
return ret;
}
return 0;
}
 
static const AVFilterPad alphamerge_inputs[] = {
{
.name = "main",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input_main,
.filter_frame = filter_frame,
.needs_writable = 1,
},{
.name = "alpha",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad alphamerge_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_output,
.request_frame = request_frame,
},
{ NULL }
};
 
AVFilter avfilter_vf_alphamerge = {
.name = "alphamerge",
.description = NULL_IF_CONFIG_SMALL("Copy the luma value of the second "
"input into the alpha channel of the first input."),
.uninit = uninit,
.priv_size = sizeof(AlphaMergeContext),
.query_formats = query_formats,
.inputs = alphamerge_inputs,
.outputs = alphamerge_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_aspect.c
0,0 → 1,231
/*
* Copyright (c) 2010 Bobby Bingham
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* aspect ratio modification video filters
*/
 
#include <float.h>
 
#include "libavutil/common.h"
#include "libavutil/eval.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
 
#include "avfilter.h"
#include "internal.h"
#include "video.h"
 
typedef struct {
const AVClass *class;
AVRational dar;
AVRational sar;
int max;
#if FF_API_OLD_FILTER_OPTS
float aspect_den;
#endif
char *ratio_str;
} AspectContext;
 
static av_cold int init(AVFilterContext *ctx)
{
AspectContext *s = ctx->priv;
int ret;
 
#if FF_API_OLD_FILTER_OPTS
if (s->ratio_str && s->aspect_den > 0) {
double num;
av_log(ctx, AV_LOG_WARNING,
"num:den syntax is deprecated, please use num/den or named options instead\n");
ret = av_expr_parse_and_eval(&num, s->ratio_str, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, 0, ctx);
if (ret < 0) {
av_log(ctx, AV_LOG_ERROR, "Unable to parse ratio numerator \"%s\"\n", s->ratio_str);
return AVERROR(EINVAL);
}
s->sar = s->dar = av_d2q(num / s->aspect_den, s->max);
} else
#endif
if (s->ratio_str) {
ret = av_parse_ratio(&s->sar, s->ratio_str, s->max, 0, ctx);
if (ret < 0 || s->sar.num < 0 || s->sar.den <= 0) {
av_log(ctx, AV_LOG_ERROR,
"Invalid string '%s' for aspect ratio\n", s->ratio_str);
return AVERROR(EINVAL);
}
s->dar = s->sar;
}
return 0;
}
 
static int filter_frame(AVFilterLink *link, AVFrame *frame)
{
AspectContext *s = link->dst->priv;
 
frame->sample_aspect_ratio = s->sar;
return ff_filter_frame(link->dst->outputs[0], frame);
}
 
#define OFFSET(x) offsetof(AspectContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
 
static inline void compute_dar(AVRational *dar, AVRational sar, int w, int h)
{
if (sar.num && sar.den) {
av_reduce(&dar->num, &dar->den, sar.num * w, sar.den * h, INT_MAX);
} else {
av_reduce(&dar->num, &dar->den, w, h, INT_MAX);
}
}
 
#if CONFIG_SETDAR_FILTER
 
static int setdar_config_props(AVFilterLink *inlink)
{
AspectContext *s = inlink->dst->priv;
AVRational dar;
AVRational old_dar;
AVRational old_sar = inlink->sample_aspect_ratio;
 
if (s->dar.num && s->dar.den) {
av_reduce(&s->sar.num, &s->sar.den,
s->dar.num * inlink->h,
s->dar.den * inlink->w, INT_MAX);
inlink->sample_aspect_ratio = s->sar;
dar = s->dar;
} else {
inlink->sample_aspect_ratio = (AVRational){ 1, 1 };
dar = (AVRational){ inlink->w, inlink->h };
}
 
compute_dar(&old_dar, old_sar, inlink->w, inlink->h);
av_log(inlink->dst, AV_LOG_VERBOSE, "w:%d h:%d dar:%d/%d sar:%d/%d -> dar:%d/%d sar:%d/%d\n",
inlink->w, inlink->h, old_dar.num, old_dar.den, old_sar.num, old_sar.den,
dar.num, dar.den, inlink->sample_aspect_ratio.num, inlink->sample_aspect_ratio.den);
 
return 0;
}
 
static const AVOption setdar_options[] = {
{ "dar", "set display aspect ratio", OFFSET(ratio_str), AV_OPT_TYPE_STRING, {.str="0"}, .flags=FLAGS },
{ "ratio", "set display aspect ratio", OFFSET(ratio_str), AV_OPT_TYPE_STRING, {.str="0"}, .flags=FLAGS },
{ "r", "set display aspect ratio", OFFSET(ratio_str), AV_OPT_TYPE_STRING, {.str="0"}, .flags=FLAGS },
#if FF_API_OLD_FILTER_OPTS
{ "dar_den", NULL, OFFSET(aspect_den), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, 0, FLT_MAX, FLAGS },
#endif
{ "max", "set max value for nominator or denominator in the ratio", OFFSET(max), AV_OPT_TYPE_INT, {.i64=100}, 1, INT_MAX, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(setdar);
 
static const AVFilterPad avfilter_vf_setdar_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = setdar_config_props,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_setdar_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_setdar = {
.name = "setdar",
.description = NULL_IF_CONFIG_SMALL("Set the frame display aspect ratio."),
.init = init,
.priv_size = sizeof(AspectContext),
.priv_class = &setdar_class,
.inputs = avfilter_vf_setdar_inputs,
.outputs = avfilter_vf_setdar_outputs,
};
 
#endif /* CONFIG_SETDAR_FILTER */
 
#if CONFIG_SETSAR_FILTER
 
static int setsar_config_props(AVFilterLink *inlink)
{
AspectContext *s = inlink->dst->priv;
AVRational old_sar = inlink->sample_aspect_ratio;
AVRational old_dar, dar;
 
inlink->sample_aspect_ratio = s->sar;
 
compute_dar(&old_dar, old_sar, inlink->w, inlink->h);
compute_dar(&dar, s->sar, inlink->w, inlink->h);
av_log(inlink->dst, AV_LOG_VERBOSE, "w:%d h:%d sar:%d/%d dar:%d/%d -> sar:%d/%d dar:%d/%d\n",
inlink->w, inlink->h, old_sar.num, old_sar.den, old_dar.num, old_dar.den,
inlink->sample_aspect_ratio.num, inlink->sample_aspect_ratio.den, dar.num, dar.den);
 
return 0;
}
 
static const AVOption setsar_options[] = {
{ "sar", "set sample (pixel) aspect ratio", OFFSET(ratio_str), AV_OPT_TYPE_STRING, {.str="0"}, .flags=FLAGS },
{ "ratio", "set sample (pixel) aspect ratio", OFFSET(ratio_str), AV_OPT_TYPE_STRING, {.str="0"}, .flags=FLAGS },
{ "r", "set sample (pixel) aspect ratio", OFFSET(ratio_str), AV_OPT_TYPE_STRING, {.str="0"}, .flags=FLAGS },
#if FF_API_OLD_FILTER_OPTS
{ "sar_den", NULL, OFFSET(aspect_den), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, 0, FLT_MAX, FLAGS },
#endif
{ "max", "set max value for nominator or denominator in the ratio", OFFSET(max), AV_OPT_TYPE_INT, {.i64=100}, 1, INT_MAX, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(setsar);
 
static const AVFilterPad avfilter_vf_setsar_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = setsar_config_props,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_setsar_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_setsar = {
.name = "setsar",
.description = NULL_IF_CONFIG_SMALL("Set the pixel sample aspect ratio."),
.init = init,
.priv_size = sizeof(AspectContext),
.priv_class = &setsar_class,
.inputs = avfilter_vf_setsar_inputs,
.outputs = avfilter_vf_setsar_outputs,
};
 
#endif /* CONFIG_SETSAR_FILTER */
/contrib/sdk/sources/ffmpeg/libavfilter/vf_bbox.c
0,0 → 1,134
/*
* Copyright (c) 2012 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* bounding box detection filter
*/
 
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/timestamp.h"
#include "avfilter.h"
#include "bbox.h"
#include "internal.h"
 
typedef struct {
const AVClass *class;
int min_val;
} BBoxContext;
 
#define OFFSET(x) offsetof(BBoxContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption bbox_options[] = {
{ "min_val", "set minimum luminance value for bounding box", OFFSET(min_val), AV_OPT_TYPE_INT, { .i64 = 16 }, 0, 254, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(bbox);
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV444P,
AV_PIX_FMT_YUV440P,
AV_PIX_FMT_YUV422P,
AV_PIX_FMT_YUV411P,
AV_PIX_FMT_NONE,
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
#define SET_META(key, value) \
snprintf(buf, sizeof(buf), "%d", value); \
av_dict_set(metadata, key, buf, 0);
 
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
BBoxContext *bbox = ctx->priv;
FFBoundingBox box;
int has_bbox, w, h;
char buf[32];
 
has_bbox =
ff_calculate_bounding_box(&box,
frame->data[0], frame->linesize[0],
inlink->w, inlink->h, bbox->min_val);
w = box.x2 - box.x1 + 1;
h = box.y2 - box.y1 + 1;
 
av_log(ctx, AV_LOG_INFO,
"n:%"PRId64" pts:%s pts_time:%s", inlink->frame_count,
av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base));
 
if (has_bbox) {
AVDictionary **metadata = avpriv_frame_get_metadatap(frame);
 
SET_META("lavfi.bbox.x1", box.x1)
SET_META("lavfi.bbox.x2", box.x2)
SET_META("lavfi.bbox.y1", box.y1)
SET_META("lavfi.bbox.y2", box.y2)
SET_META("lavfi.bbox.w", w)
SET_META("lavfi.bbox.h", h)
 
av_log(ctx, AV_LOG_INFO,
" x1:%d x2:%d y1:%d y2:%d w:%d h:%d"
" crop=%d:%d:%d:%d drawbox=%d:%d:%d:%d",
box.x1, box.x2, box.y1, box.y2, w, h,
w, h, box.x1, box.y1, /* crop params */
box.x1, box.y1, w, h); /* drawbox params */
}
av_log(ctx, AV_LOG_INFO, "\n");
 
return ff_filter_frame(inlink->dst->outputs[0], frame);
}
 
static const AVFilterPad bbox_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad bbox_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_bbox = {
.name = "bbox",
.description = NULL_IF_CONFIG_SMALL("Compute bounding box for each frame."),
.priv_size = sizeof(BBoxContext),
.priv_class = &bbox_class,
.query_formats = query_formats,
.inputs = bbox_inputs,
.outputs = bbox_outputs,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_blackdetect.c
0,0 → 1,204
/*
* Copyright (c) 2012 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Video black detector, loosely based on blackframe with extended
* syntax and features
*/
 
#include <float.h>
#include "libavutil/opt.h"
#include "libavutil/timestamp.h"
#include "avfilter.h"
#include "internal.h"
 
typedef struct {
const AVClass *class;
double black_min_duration_time; ///< minimum duration of detected black, in seconds
int64_t black_min_duration; ///< minimum duration of detected black, expressed in timebase units
int64_t black_start; ///< pts start time of the first black picture
int64_t black_end; ///< pts end time of the last black picture
int64_t last_picref_pts; ///< pts of the last input picture
int black_started;
 
double picture_black_ratio_th;
double pixel_black_th;
unsigned int pixel_black_th_i;
 
unsigned int nb_black_pixels; ///< number of black pixels counted so far
} BlackDetectContext;
 
#define OFFSET(x) offsetof(BlackDetectContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption blackdetect_options[] = {
{ "d", "set minimum detected black duration in seconds", OFFSET(black_min_duration_time), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 0, DBL_MAX, FLAGS },
{ "black_min_duration", "set minimum detected black duration in seconds", OFFSET(black_min_duration_time), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 0, DBL_MAX, FLAGS },
{ "picture_black_ratio_th", "set the picture black ratio threshold", OFFSET(picture_black_ratio_th), AV_OPT_TYPE_DOUBLE, {.dbl=.98}, 0, 1, FLAGS },
{ "pic_th", "set the picture black ratio threshold", OFFSET(picture_black_ratio_th), AV_OPT_TYPE_DOUBLE, {.dbl=.98}, 0, 1, FLAGS },
{ "pixel_black_th", "set the pixel black threshold", OFFSET(pixel_black_th), AV_OPT_TYPE_DOUBLE, {.dbl=.10}, 0, 1, FLAGS },
{ "pix_th", "set the pixel black threshold", OFFSET(pixel_black_th), AV_OPT_TYPE_DOUBLE, {.dbl=.10}, 0, 1, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(blackdetect);
 
#define YUVJ_FORMATS \
AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P
 
static enum AVPixelFormat yuvj_formats[] = {
YUVJ_FORMATS, AV_PIX_FMT_NONE
};
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_GRAY8,
AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
AV_PIX_FMT_NV12, AV_PIX_FMT_NV21,
YUVJ_FORMATS,
AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
BlackDetectContext *blackdetect = ctx->priv;
 
blackdetect->black_min_duration =
blackdetect->black_min_duration_time / av_q2d(inlink->time_base);
 
blackdetect->pixel_black_th_i = ff_fmt_is_in(inlink->format, yuvj_formats) ?
// luminance_minimum_value + pixel_black_th * luminance_range_size
blackdetect->pixel_black_th * 255 :
16 + blackdetect->pixel_black_th * (235 - 16);
 
av_log(blackdetect, AV_LOG_VERBOSE,
"black_min_duration:%s pixel_black_th:%f pixel_black_th_i:%d picture_black_ratio_th:%f\n",
av_ts2timestr(blackdetect->black_min_duration, &inlink->time_base),
blackdetect->pixel_black_th, blackdetect->pixel_black_th_i,
blackdetect->picture_black_ratio_th);
return 0;
}
 
static void check_black_end(AVFilterContext *ctx)
{
BlackDetectContext *blackdetect = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
 
if ((blackdetect->black_end - blackdetect->black_start) >= blackdetect->black_min_duration) {
av_log(blackdetect, AV_LOG_INFO,
"black_start:%s black_end:%s black_duration:%s\n",
av_ts2timestr(blackdetect->black_start, &inlink->time_base),
av_ts2timestr(blackdetect->black_end, &inlink->time_base),
av_ts2timestr(blackdetect->black_end - blackdetect->black_start, &inlink->time_base));
}
}
 
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
BlackDetectContext *blackdetect = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
int ret = ff_request_frame(inlink);
 
if (ret == AVERROR_EOF && blackdetect->black_started) {
// FIXME: black_end should be set to last_picref_pts + last_picref_duration
blackdetect->black_end = blackdetect->last_picref_pts;
check_black_end(ctx);
}
return ret;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
{
AVFilterContext *ctx = inlink->dst;
BlackDetectContext *blackdetect = ctx->priv;
double picture_black_ratio = 0;
const uint8_t *p = picref->data[0];
int x, i;
 
for (i = 0; i < inlink->h; i++) {
for (x = 0; x < inlink->w; x++)
blackdetect->nb_black_pixels += p[x] <= blackdetect->pixel_black_th_i;
p += picref->linesize[0];
}
 
picture_black_ratio = (double)blackdetect->nb_black_pixels / (inlink->w * inlink->h);
 
av_log(ctx, AV_LOG_DEBUG,
"frame:%"PRId64" picture_black_ratio:%f pts:%s t:%s type:%c\n",
inlink->frame_count, picture_black_ratio,
av_ts2str(picref->pts), av_ts2timestr(picref->pts, &inlink->time_base),
av_get_picture_type_char(picref->pict_type));
 
if (picture_black_ratio >= blackdetect->picture_black_ratio_th) {
if (!blackdetect->black_started) {
/* black starts here */
blackdetect->black_started = 1;
blackdetect->black_start = picref->pts;
}
} else if (blackdetect->black_started) {
/* black ends here */
blackdetect->black_started = 0;
blackdetect->black_end = picref->pts;
check_black_end(ctx);
}
 
blackdetect->last_picref_pts = picref->pts;
blackdetect->nb_black_pixels = 0;
return ff_filter_frame(inlink->dst->outputs[0], picref);
}
 
static const AVFilterPad blackdetect_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad blackdetect_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.request_frame = request_frame,
},
{ NULL }
};
 
AVFilter avfilter_vf_blackdetect = {
.name = "blackdetect",
.description = NULL_IF_CONFIG_SMALL("Detect video intervals that are (almost) black."),
.priv_size = sizeof(BlackDetectContext),
.query_formats = query_formats,
.inputs = blackdetect_inputs,
.outputs = blackdetect_outputs,
.priv_class = &blackdetect_class,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_blackframe.c
0,0 → 1,130
/*
* Copyright (c) 2010 Stefano Sabatini
* Copyright (c) 2006 Ivo van Poorten
* Copyright (c) 2006 Julian Hall
* Copyright (c) 2002-2003 Brian J. Murrell
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
/**
* @file
* Search for black frames to detect scene transitions.
* Ported from MPlayer libmpcodecs/vf_blackframe.c.
*/
 
#include <stdio.h>
#include <inttypes.h>
 
#include "libavutil/internal.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
typedef struct {
const AVClass *class;
int bamount; ///< black amount
int bthresh; ///< black threshold
unsigned int frame; ///< frame number
unsigned int nblack; ///< number of black pixels counted so far
unsigned int last_keyframe; ///< frame number of the last received key-frame
} BlackFrameContext;
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NV12,
AV_PIX_FMT_NV21, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV411P,
AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
BlackFrameContext *s = ctx->priv;
int x, i;
int pblack = 0;
uint8_t *p = frame->data[0];
 
for (i = 0; i < frame->height; i++) {
for (x = 0; x < inlink->w; x++)
s->nblack += p[x] < s->bthresh;
p += frame->linesize[0];
}
 
if (frame->key_frame)
s->last_keyframe = s->frame;
 
pblack = s->nblack * 100 / (inlink->w * inlink->h);
if (pblack >= s->bamount)
av_log(ctx, AV_LOG_INFO, "frame:%u pblack:%u pts:%"PRId64" t:%f "
"type:%c last_keyframe:%d\n",
s->frame, pblack, frame->pts,
frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base),
av_get_picture_type_char(frame->pict_type), s->last_keyframe);
 
s->frame++;
s->nblack = 0;
return ff_filter_frame(inlink->dst->outputs[0], frame);
}
 
#define OFFSET(x) offsetof(BlackFrameContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption blackframe_options[] = {
{ "amount", "Percentage of the pixels that have to be below the threshold "
"for the frame to be considered black.", OFFSET(bamount), AV_OPT_TYPE_INT, { .i64 = 98 }, 0, 100, FLAGS },
{ "threshold", "threshold below which a pixel value is considered black",
OFFSET(bthresh), AV_OPT_TYPE_INT, { .i64 = 32 }, 0, 255, FLAGS },
{ "thresh", "threshold below which a pixel value is considered black",
OFFSET(bthresh), AV_OPT_TYPE_INT, { .i64 = 32 }, 0, 255, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(blackframe);
 
static const AVFilterPad avfilter_vf_blackframe_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_blackframe_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO
},
{ NULL }
};
 
AVFilter avfilter_vf_blackframe = {
.name = "blackframe",
.description = NULL_IF_CONFIG_SMALL("Detect frames that are (almost) black."),
.priv_size = sizeof(BlackFrameContext),
.priv_class = &blackframe_class,
.query_formats = query_formats,
.inputs = avfilter_vf_blackframe_inputs,
.outputs = avfilter_vf_blackframe_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_blend.c
0,0 → 1,465
/*
* Copyright (c) 2013 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/imgutils.h"
#include "libavutil/eval.h"
#include "libavutil/opt.h"
#include "libavutil/pixfmt.h"
#include "avfilter.h"
#include "bufferqueue.h"
#include "formats.h"
#include "internal.h"
#include "dualinput.h"
#include "video.h"
 
#define TOP 0
#define BOTTOM 1
 
enum BlendMode {
BLEND_UNSET = -1,
BLEND_NORMAL,
BLEND_ADDITION,
BLEND_AND,
BLEND_AVERAGE,
BLEND_BURN,
BLEND_DARKEN,
BLEND_DIFFERENCE,
BLEND_DIVIDE,
BLEND_DODGE,
BLEND_EXCLUSION,
BLEND_HARDLIGHT,
BLEND_LIGHTEN,
BLEND_MULTIPLY,
BLEND_NEGATION,
BLEND_OR,
BLEND_OVERLAY,
BLEND_PHOENIX,
BLEND_PINLIGHT,
BLEND_REFLECT,
BLEND_SCREEN,
BLEND_SOFTLIGHT,
BLEND_SUBTRACT,
BLEND_VIVIDLIGHT,
BLEND_XOR,
BLEND_NB
};
 
static const char *const var_names[] = { "X", "Y", "W", "H", "SW", "SH", "T", "N", "A", "B", "TOP", "BOTTOM", NULL };
enum { VAR_X, VAR_Y, VAR_W, VAR_H, VAR_SW, VAR_SH, VAR_T, VAR_N, VAR_A, VAR_B, VAR_TOP, VAR_BOTTOM, VAR_VARS_NB };
 
typedef struct FilterParams {
enum BlendMode mode;
double opacity;
AVExpr *e;
char *expr_str;
void (*blend)(const uint8_t *top, int top_linesize,
const uint8_t *bottom, int bottom_linesize,
uint8_t *dst, int dst_linesize,
int width, int start, int end,
struct FilterParams *param, double *values);
} FilterParams;
 
typedef struct ThreadData {
const AVFrame *top, *bottom;
AVFrame *dst;
AVFilterLink *inlink;
int plane;
int w, h;
FilterParams *param;
} ThreadData;
 
typedef struct {
const AVClass *class;
FFDualInputContext dinput;
int hsub, vsub; ///< chroma subsampling values
int nb_planes;
char *all_expr;
enum BlendMode all_mode;
double all_opacity;
 
FilterParams params[4];
} BlendContext;
 
#define OFFSET(x) offsetof(BlendContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
 
static const AVOption blend_options[] = {
{ "c0_mode", "set component #0 blend mode", OFFSET(params[0].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},
{ "c1_mode", "set component #1 blend mode", OFFSET(params[1].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},
{ "c2_mode", "set component #2 blend mode", OFFSET(params[2].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},
{ "c3_mode", "set component #3 blend mode", OFFSET(params[3].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},
{ "all_mode", "set blend mode for all components", OFFSET(all_mode), AV_OPT_TYPE_INT, {.i64=-1},-1, BLEND_NB-1, FLAGS, "mode"},
{ "addition", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_ADDITION}, 0, 0, FLAGS, "mode" },
{ "and", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_AND}, 0, 0, FLAGS, "mode" },
{ "average", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_AVERAGE}, 0, 0, FLAGS, "mode" },
{ "burn", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_BURN}, 0, 0, FLAGS, "mode" },
{ "darken", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DARKEN}, 0, 0, FLAGS, "mode" },
{ "difference", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIFFERENCE}, 0, 0, FLAGS, "mode" },
{ "divide", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIVIDE}, 0, 0, FLAGS, "mode" },
{ "dodge", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DODGE}, 0, 0, FLAGS, "mode" },
{ "exclusion", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_EXCLUSION}, 0, 0, FLAGS, "mode" },
{ "hardlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARDLIGHT}, 0, 0, FLAGS, "mode" },
{ "lighten", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_LIGHTEN}, 0, 0, FLAGS, "mode" },
{ "multiply", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_MULTIPLY}, 0, 0, FLAGS, "mode" },
{ "negation", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_NEGATION}, 0, 0, FLAGS, "mode" },
{ "normal", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_NORMAL}, 0, 0, FLAGS, "mode" },
{ "or", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_OR}, 0, 0, FLAGS, "mode" },
{ "overlay", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_OVERLAY}, 0, 0, FLAGS, "mode" },
{ "phoenix", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_PHOENIX}, 0, 0, FLAGS, "mode" },
{ "pinlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_PINLIGHT}, 0, 0, FLAGS, "mode" },
{ "reflect", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_REFLECT}, 0, 0, FLAGS, "mode" },
{ "screen", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SCREEN}, 0, 0, FLAGS, "mode" },
{ "softlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SOFTLIGHT}, 0, 0, FLAGS, "mode" },
{ "subtract", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SUBTRACT}, 0, 0, FLAGS, "mode" },
{ "vividlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_VIVIDLIGHT}, 0, 0, FLAGS, "mode" },
{ "xor", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_XOR}, 0, 0, FLAGS, "mode" },
{ "c0_expr", "set color component #0 expression", OFFSET(params[0].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "c1_expr", "set color component #1 expression", OFFSET(params[1].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "c2_expr", "set color component #2 expression", OFFSET(params[2].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "c3_expr", "set color component #3 expression", OFFSET(params[3].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "all_expr", "set expression for all color components", OFFSET(all_expr), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "c0_opacity", "set color component #0 opacity", OFFSET(params[0].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },
{ "c1_opacity", "set color component #1 opacity", OFFSET(params[1].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },
{ "c2_opacity", "set color component #2 opacity", OFFSET(params[2].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },
{ "c3_opacity", "set color component #3 opacity", OFFSET(params[3].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },
{ "all_opacity", "set opacity for all color components", OFFSET(all_opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS},
{ "shortest", "force termination when the shortest input terminates", OFFSET(dinput.shortest), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
{ "repeatlast", "repeat last bottom frame", OFFSET(dinput.repeatlast), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(blend);
 
static void blend_normal(const uint8_t *top, int top_linesize,
const uint8_t *bottom, int bottom_linesize,
uint8_t *dst, int dst_linesize,
int width, int start, int end,
FilterParams *param, double *values)
{
av_image_copy_plane(dst, dst_linesize, top, top_linesize, width, end - start);
}
 
#define DEFINE_BLEND(name, expr) \
static void blend_## name(const uint8_t *top, int top_linesize, \
const uint8_t *bottom, int bottom_linesize, \
uint8_t *dst, int dst_linesize, \
int width, int start, int end, \
FilterParams *param, double *values) \
{ \
double opacity = param->opacity; \
int i, j; \
\
for (i = start; i < end; i++) { \
for (j = 0; j < width; j++) { \
dst[j] = top[j] + ((expr) - top[j]) * opacity; \
} \
dst += dst_linesize; \
top += top_linesize; \
bottom += bottom_linesize; \
} \
}
 
#define A top[j]
#define B bottom[j]
 
#define MULTIPLY(x, a, b) (x * ((a * b) / 255))
#define SCREEN(x, a, b) (255 - x * ((255 - a) * (255 - b) / 255))
#define BURN(a, b) ((a == 0) ? a : FFMAX(0, 255 - ((255 - b) << 8) / a))
#define DODGE(a, b) ((a == 255) ? a : FFMIN(255, ((b << 8) / (255 - a))))
 
DEFINE_BLEND(addition, FFMIN(255, A + B))
DEFINE_BLEND(average, (A + B) / 2)
DEFINE_BLEND(subtract, FFMAX(0, A - B))
DEFINE_BLEND(multiply, MULTIPLY(1, A, B))
DEFINE_BLEND(negation, 255 - FFABS(255 - A - B))
DEFINE_BLEND(difference, FFABS(A - B))
DEFINE_BLEND(screen, SCREEN(1, A, B))
DEFINE_BLEND(overlay, (A < 128) ? MULTIPLY(2, A, B) : SCREEN(2, A, B))
DEFINE_BLEND(hardlight, (B < 128) ? MULTIPLY(2, B, A) : SCREEN(2, B, A))
DEFINE_BLEND(darken, FFMIN(A, B))
DEFINE_BLEND(lighten, FFMAX(A, B))
DEFINE_BLEND(divide, ((float)A / ((float)B) * 255))
DEFINE_BLEND(dodge, DODGE(A, B))
DEFINE_BLEND(burn, BURN(A, B))
DEFINE_BLEND(softlight, (A > 127) ? B + (255 - B) * (A - 127.5) / 127.5 * (0.5 - FFABS(B - 127.5) / 255): B - B * ((127.5 - A) / 127.5) * (0.5 - FFABS(B - 127.5)/255))
DEFINE_BLEND(exclusion, A + B - 2 * A * B / 255)
DEFINE_BLEND(pinlight, (B < 128) ? FFMIN(A, 2 * B) : FFMAX(A, 2 * (B - 128)))
DEFINE_BLEND(phoenix, FFMIN(A, B) - FFMAX(A, B) + 255)
DEFINE_BLEND(reflect, (B == 255) ? B : FFMIN(255, (A * A / (255 - B))))
DEFINE_BLEND(and, A & B)
DEFINE_BLEND(or, A | B)
DEFINE_BLEND(xor, A ^ B)
DEFINE_BLEND(vividlight, (B < 128) ? BURN(A, 2 * B) : DODGE(A, 2 * (B - 128)))
 
static void blend_expr(const uint8_t *top, int top_linesize,
const uint8_t *bottom, int bottom_linesize,
uint8_t *dst, int dst_linesize,
int width, int start, int end,
FilterParams *param, double *values)
{
AVExpr *e = param->e;
int y, x;
 
for (y = start; y < end; y++) {
values[VAR_Y] = y;
for (x = 0; x < width; x++) {
values[VAR_X] = x;
values[VAR_TOP] = values[VAR_A] = top[x];
values[VAR_BOTTOM] = values[VAR_B] = bottom[x];
dst[x] = av_expr_eval(e, values, NULL);
}
dst += dst_linesize;
top += top_linesize;
bottom += bottom_linesize;
}
}
 
static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
{
ThreadData *td = arg;
int slice_start = (td->h * jobnr ) / nb_jobs;
int slice_end = (td->h * (jobnr+1)) / nb_jobs;
const uint8_t *top = td->top->data[td->plane];
const uint8_t *bottom = td->bottom->data[td->plane];
uint8_t *dst = td->dst->data[td->plane];
double values[VAR_VARS_NB];
 
values[VAR_N] = td->inlink->frame_count;
values[VAR_T] = td->dst->pts == AV_NOPTS_VALUE ? NAN : td->dst->pts * av_q2d(td->inlink->time_base);
values[VAR_W] = td->w;
values[VAR_H] = td->h;
values[VAR_SW] = td->w / (double)td->dst->width;
values[VAR_SH] = td->h / (double)td->dst->height;
 
td->param->blend(top + slice_start * td->top->linesize[td->plane],
td->top->linesize[td->plane],
bottom + slice_start * td->bottom->linesize[td->plane],
td->bottom->linesize[td->plane],
dst + slice_start * td->dst->linesize[td->plane],
td->dst->linesize[td->plane],
td->w, slice_start, slice_end, td->param, &values[0]);
return 0;
}
 
static AVFrame *blend_frame(AVFilterContext *ctx, AVFrame *top_buf,
const AVFrame *bottom_buf)
{
BlendContext *b = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *dst_buf;
int plane;
 
dst_buf = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!dst_buf)
return top_buf;
av_frame_copy_props(dst_buf, top_buf);
 
for (plane = 0; plane < b->nb_planes; plane++) {
int hsub = plane == 1 || plane == 2 ? b->hsub : 0;
int vsub = plane == 1 || plane == 2 ? b->vsub : 0;
int outw = FF_CEIL_RSHIFT(dst_buf->width, hsub);
int outh = FF_CEIL_RSHIFT(dst_buf->height, vsub);
FilterParams *param = &b->params[plane];
ThreadData td = { .top = top_buf, .bottom = bottom_buf, .dst = dst_buf,
.w = outw, .h = outh, .param = param, .plane = plane,
.inlink = inlink };
 
ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outh, ctx->graph->nb_threads));
}
 
av_frame_free(&top_buf);
 
return dst_buf;
}
 
static av_cold int init(AVFilterContext *ctx)
{
BlendContext *b = ctx->priv;
int ret, plane;
 
for (plane = 0; plane < FF_ARRAY_ELEMS(b->params); plane++) {
FilterParams *param = &b->params[plane];
 
if (b->all_mode >= 0)
param->mode = b->all_mode;
if (b->all_opacity < 1)
param->opacity = b->all_opacity;
 
switch (param->mode) {
case BLEND_ADDITION: param->blend = blend_addition; break;
case BLEND_AND: param->blend = blend_and; break;
case BLEND_AVERAGE: param->blend = blend_average; break;
case BLEND_BURN: param->blend = blend_burn; break;
case BLEND_DARKEN: param->blend = blend_darken; break;
case BLEND_DIFFERENCE: param->blend = blend_difference; break;
case BLEND_DIVIDE: param->blend = blend_divide; break;
case BLEND_DODGE: param->blend = blend_dodge; break;
case BLEND_EXCLUSION: param->blend = blend_exclusion; break;
case BLEND_HARDLIGHT: param->blend = blend_hardlight; break;
case BLEND_LIGHTEN: param->blend = blend_lighten; break;
case BLEND_MULTIPLY: param->blend = blend_multiply; break;
case BLEND_NEGATION: param->blend = blend_negation; break;
case BLEND_NORMAL: param->blend = blend_normal; break;
case BLEND_OR: param->blend = blend_or; break;
case BLEND_OVERLAY: param->blend = blend_overlay; break;
case BLEND_PHOENIX: param->blend = blend_phoenix; break;
case BLEND_PINLIGHT: param->blend = blend_pinlight; break;
case BLEND_REFLECT: param->blend = blend_reflect; break;
case BLEND_SCREEN: param->blend = blend_screen; break;
case BLEND_SOFTLIGHT: param->blend = blend_softlight; break;
case BLEND_SUBTRACT: param->blend = blend_subtract; break;
case BLEND_VIVIDLIGHT: param->blend = blend_vividlight; break;
case BLEND_XOR: param->blend = blend_xor; break;
}
 
if (b->all_expr && !param->expr_str) {
param->expr_str = av_strdup(b->all_expr);
if (!param->expr_str)
return AVERROR(ENOMEM);
}
if (param->expr_str) {
ret = av_expr_parse(&param->e, param->expr_str, var_names,
NULL, NULL, NULL, NULL, 0, ctx);
if (ret < 0)
return ret;
param->blend = blend_expr;
}
}
 
b->dinput.process = blend_frame;
return 0;
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ422P,AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ411P,
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AVFilterLink *toplink = ctx->inputs[TOP];
AVFilterLink *bottomlink = ctx->inputs[BOTTOM];
BlendContext *b = ctx->priv;
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(toplink->format);
int ret;
 
if (toplink->format != bottomlink->format) {
av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
return AVERROR(EINVAL);
}
if (toplink->w != bottomlink->w ||
toplink->h != bottomlink->h ||
toplink->sample_aspect_ratio.num != bottomlink->sample_aspect_ratio.num ||
toplink->sample_aspect_ratio.den != bottomlink->sample_aspect_ratio.den) {
av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
"(size %dx%d, SAR %d:%d) do not match the corresponding "
"second input link %s parameters (%dx%d, SAR %d:%d)\n",
ctx->input_pads[TOP].name, toplink->w, toplink->h,
toplink->sample_aspect_ratio.num,
toplink->sample_aspect_ratio.den,
ctx->input_pads[BOTTOM].name, bottomlink->w, bottomlink->h,
bottomlink->sample_aspect_ratio.num,
bottomlink->sample_aspect_ratio.den);
return AVERROR(EINVAL);
}
 
outlink->w = toplink->w;
outlink->h = toplink->h;
outlink->time_base = toplink->time_base;
outlink->sample_aspect_ratio = toplink->sample_aspect_ratio;
outlink->frame_rate = toplink->frame_rate;
 
b->hsub = pix_desc->log2_chroma_w;
b->vsub = pix_desc->log2_chroma_h;
b->nb_planes = av_pix_fmt_count_planes(toplink->format);
 
if ((ret = ff_dualinput_init(ctx, &b->dinput)) < 0)
return ret;
 
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
BlendContext *b = ctx->priv;
int i;
 
ff_dualinput_uninit(&b->dinput);
for (i = 0; i < FF_ARRAY_ELEMS(b->params); i++)
av_expr_free(b->params[i].e);
}
 
static int request_frame(AVFilterLink *outlink)
{
BlendContext *b = outlink->src->priv;
return ff_dualinput_request_frame(&b->dinput, outlink);
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
BlendContext *b = inlink->dst->priv;
return ff_dualinput_filter_frame(&b->dinput, inlink, buf);
}
 
static const AVFilterPad blend_inputs[] = {
{
.name = "top",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},{
.name = "bottom",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad blend_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_output,
.request_frame = request_frame,
},
{ NULL }
};
 
AVFilter avfilter_vf_blend = {
.name = "blend",
.description = NULL_IF_CONFIG_SMALL("Blend two video frames into each other."),
.init = init,
.uninit = uninit,
.priv_size = sizeof(BlendContext),
.query_formats = query_formats,
.inputs = blend_inputs,
.outputs = blend_outputs,
.priv_class = &blend_class,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_boxblur.c
0,0 → 1,386
/*
* Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
* Copyright (c) 2011 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
/**
* @file
* Apply a boxblur filter to the input video.
* Ported from MPlayer libmpcodecs/vf_boxblur.c.
*/
 
#include "libavutil/avstring.h"
#include "libavutil/common.h"
#include "libavutil/eval.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
static const char *const var_names[] = {
"w",
"h",
"cw",
"ch",
"hsub",
"vsub",
NULL
};
 
enum var_name {
VAR_W,
VAR_H,
VAR_CW,
VAR_CH,
VAR_HSUB,
VAR_VSUB,
VARS_NB
};
 
typedef struct {
int radius;
int power;
char *radius_expr;
} FilterParam;
 
typedef struct {
const AVClass *class;
FilterParam luma_param;
FilterParam chroma_param;
FilterParam alpha_param;
 
int hsub, vsub;
int radius[4];
int power[4];
uint8_t *temp[2]; ///< temporary buffer used in blur_power()
} BoxBlurContext;
 
#define Y 0
#define U 1
#define V 2
#define A 3
 
static av_cold int init(AVFilterContext *ctx)
{
BoxBlurContext *s = ctx->priv;
 
if (!s->luma_param.radius_expr) {
av_log(ctx, AV_LOG_ERROR, "Luma radius expression is not set.\n");
return AVERROR(EINVAL);
}
 
/* fill missing params */
if (!s->chroma_param.radius_expr) {
s->chroma_param.radius_expr = av_strdup(s->luma_param.radius_expr);
if (!s->chroma_param.radius_expr)
return AVERROR(ENOMEM);
}
if (s->chroma_param.power < 0)
s->chroma_param.power = s->luma_param.power;
 
if (!s->alpha_param.radius_expr) {
s->alpha_param.radius_expr = av_strdup(s->luma_param.radius_expr);
if (!s->alpha_param.radius_expr)
return AVERROR(ENOMEM);
}
if (s->alpha_param.power < 0)
s->alpha_param.power = s->luma_param.power;
 
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
BoxBlurContext *s = ctx->priv;
 
av_freep(&s->temp[0]);
av_freep(&s->temp[1]);
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUVA420P,
AV_PIX_FMT_YUV440P, AV_PIX_FMT_GRAY8,
AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
AV_PIX_FMT_YUVJ440P,
AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static int config_input(AVFilterLink *inlink)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
AVFilterContext *ctx = inlink->dst;
BoxBlurContext *s = ctx->priv;
int w = inlink->w, h = inlink->h;
int cw, ch;
double var_values[VARS_NB], res;
char *expr;
int ret;
 
if (!(s->temp[0] = av_malloc(FFMAX(w, h))) ||
!(s->temp[1] = av_malloc(FFMAX(w, h))))
return AVERROR(ENOMEM);
 
s->hsub = desc->log2_chroma_w;
s->vsub = desc->log2_chroma_h;
 
var_values[VAR_W] = inlink->w;
var_values[VAR_H] = inlink->h;
var_values[VAR_CW] = cw = w>>s->hsub;
var_values[VAR_CH] = ch = h>>s->vsub;
var_values[VAR_HSUB] = 1<<s->hsub;
var_values[VAR_VSUB] = 1<<s->vsub;
 
#define EVAL_RADIUS_EXPR(comp) \
expr = s->comp##_param.radius_expr; \
ret = av_expr_parse_and_eval(&res, expr, var_names, var_values, \
NULL, NULL, NULL, NULL, NULL, 0, ctx); \
s->comp##_param.radius = res; \
if (ret < 0) { \
av_log(NULL, AV_LOG_ERROR, \
"Error when evaluating " #comp " radius expression '%s'\n", expr); \
return ret; \
}
EVAL_RADIUS_EXPR(luma);
EVAL_RADIUS_EXPR(chroma);
EVAL_RADIUS_EXPR(alpha);
 
av_log(ctx, AV_LOG_VERBOSE,
"luma_radius:%d luma_power:%d "
"chroma_radius:%d chroma_power:%d "
"alpha_radius:%d alpha_power:%d "
"w:%d chroma_w:%d h:%d chroma_h:%d\n",
s->luma_param .radius, s->luma_param .power,
s->chroma_param.radius, s->chroma_param.power,
s->alpha_param .radius, s->alpha_param .power,
w, cw, h, ch);
 
#define CHECK_RADIUS_VAL(w_, h_, comp) \
if (s->comp##_param.radius < 0 || \
2*s->comp##_param.radius > FFMIN(w_, h_)) { \
av_log(ctx, AV_LOG_ERROR, \
"Invalid " #comp " radius value %d, must be >= 0 and <= %d\n", \
s->comp##_param.radius, FFMIN(w_, h_)/2); \
return AVERROR(EINVAL); \
}
CHECK_RADIUS_VAL(w, h, luma);
CHECK_RADIUS_VAL(cw, ch, chroma);
CHECK_RADIUS_VAL(w, h, alpha);
 
s->radius[Y] = s->luma_param.radius;
s->radius[U] = s->radius[V] = s->chroma_param.radius;
s->radius[A] = s->alpha_param.radius;
 
s->power[Y] = s->luma_param.power;
s->power[U] = s->power[V] = s->chroma_param.power;
s->power[A] = s->alpha_param.power;
 
return 0;
}
 
static inline void blur(uint8_t *dst, int dst_step, const uint8_t *src, int src_step,
int len, int radius)
{
/* Naive boxblur would sum source pixels from x-radius .. x+radius
* for destination pixel x. That would be O(radius*width).
* If you now look at what source pixels represent 2 consecutive
* output pixels, then you see they are almost identical and only
* differ by 2 pixels, like:
* src0 111111111
* dst0 1
* src1 111111111
* dst1 1
* src0-src1 1 -1
* so when you know one output pixel you can find the next by just adding
* and subtracting 1 input pixel.
* The following code adopts this faster variant.
*/
const int length = radius*2 + 1;
const int inv = ((1<<16) + length/2)/length;
int x, sum = 0;
 
for (x = 0; x < radius; x++)
sum += src[x*src_step]<<1;
sum += src[radius*src_step];
 
for (x = 0; x <= radius; x++) {
sum += src[(radius+x)*src_step] - src[(radius-x)*src_step];
dst[x*dst_step] = (sum*inv + (1<<15))>>16;
}
 
for (; x < len-radius; x++) {
sum += src[(radius+x)*src_step] - src[(x-radius-1)*src_step];
dst[x*dst_step] = (sum*inv + (1<<15))>>16;
}
 
for (; x < len; x++) {
sum += src[(2*len-radius-x-1)*src_step] - src[(x-radius-1)*src_step];
dst[x*dst_step] = (sum*inv + (1<<15))>>16;
}
}
 
static inline void blur_power(uint8_t *dst, int dst_step, const uint8_t *src, int src_step,
int len, int radius, int power, uint8_t *temp[2])
{
uint8_t *a = temp[0], *b = temp[1];
 
if (radius && power) {
blur(a, 1, src, src_step, len, radius);
for (; power > 2; power--) {
uint8_t *c;
blur(b, 1, a, 1, len, radius);
c = a; a = b; b = c;
}
if (power > 1) {
blur(dst, dst_step, a, 1, len, radius);
} else {
int i;
for (i = 0; i < len; i++)
dst[i*dst_step] = a[i];
}
} else {
int i;
for (i = 0; i < len; i++)
dst[i*dst_step] = src[i*src_step];
}
}
 
static void hblur(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize,
int w, int h, int radius, int power, uint8_t *temp[2])
{
int y;
 
if (radius == 0 && dst == src)
return;
 
for (y = 0; y < h; y++)
blur_power(dst + y*dst_linesize, 1, src + y*src_linesize, 1,
w, radius, power, temp);
}
 
static void vblur(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize,
int w, int h, int radius, int power, uint8_t *temp[2])
{
int x;
 
if (radius == 0 && dst == src)
return;
 
for (x = 0; x < w; x++)
blur_power(dst + x, dst_linesize, src + x, src_linesize,
h, radius, power, temp);
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
BoxBlurContext *s = ctx->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *out;
int plane;
int cw = FF_CEIL_RSHIFT(inlink->w, s->hsub), ch = FF_CEIL_RSHIFT(in->height, s->vsub);
int w[4] = { inlink->w, cw, cw, inlink->w };
int h[4] = { in->height, ch, ch, in->height };
 
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
 
for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++)
hblur(out->data[plane], out->linesize[plane],
in ->data[plane], in ->linesize[plane],
w[plane], h[plane], s->radius[plane], s->power[plane],
s->temp);
 
for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++)
vblur(out->data[plane], out->linesize[plane],
out->data[plane], out->linesize[plane],
w[plane], h[plane], s->radius[plane], s->power[plane],
s->temp);
 
av_frame_free(&in);
 
return ff_filter_frame(outlink, out);
}
 
#define OFFSET(x) offsetof(BoxBlurContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption boxblur_options[] = {
{ "luma_radius", "Radius of the luma blurring box", OFFSET(luma_param.radius_expr), AV_OPT_TYPE_STRING, {.str="2"}, .flags = FLAGS },
{ "lr", "Radius of the luma blurring box", OFFSET(luma_param.radius_expr), AV_OPT_TYPE_STRING, {.str="2"}, .flags = FLAGS },
{ "luma_power", "How many times should the boxblur be applied to luma", OFFSET(luma_param.power), AV_OPT_TYPE_INT, {.i64=2}, 0, INT_MAX, .flags = FLAGS },
{ "lp", "How many times should the boxblur be applied to luma", OFFSET(luma_param.power), AV_OPT_TYPE_INT, {.i64=2}, 0, INT_MAX, .flags = FLAGS },
 
{ "chroma_radius", "Radius of the chroma blurring box", OFFSET(chroma_param.radius_expr), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ "cr", "Radius of the chroma blurring box", OFFSET(chroma_param.radius_expr), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ "chroma_power", "How many times should the boxblur be applied to chroma", OFFSET(chroma_param.power), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
{ "cp", "How many times should the boxblur be applied to chroma", OFFSET(chroma_param.power), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
 
{ "alpha_radius", "Radius of the alpha blurring box", OFFSET(alpha_param.radius_expr), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ "ar", "Radius of the alpha blurring box", OFFSET(alpha_param.radius_expr), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ "alpha_power", "How many times should the boxblur be applied to alpha", OFFSET(alpha_param.power), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
{ "ap", "How many times should the boxblur be applied to alpha", OFFSET(alpha_param.power), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
 
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(boxblur);
 
static const AVFilterPad avfilter_vf_boxblur_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_boxblur_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_boxblur = {
.name = "boxblur",
.description = NULL_IF_CONFIG_SMALL("Blur the input."),
.priv_size = sizeof(BoxBlurContext),
.priv_class = &boxblur_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = avfilter_vf_boxblur_inputs,
.outputs = avfilter_vf_boxblur_outputs,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_colorbalance.c
0,0 → 1,213
/*
* Copyright (c) 2013 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "drawutils.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
#define R 0
#define G 1
#define B 2
#define A 3
 
typedef struct {
double shadows;
double midtones;
double highlights;
} Range;
 
typedef struct {
const AVClass *class;
Range cyan_red;
Range magenta_green;
Range yellow_blue;
 
uint8_t lut[3][256];
 
uint8_t rgba_map[4];
int step;
} ColorBalanceContext;
 
#define OFFSET(x) offsetof(ColorBalanceContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
static const AVOption colorbalance_options[] = {
{ "rs", "set red shadows", OFFSET(cyan_red.shadows), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, FLAGS },
{ "gs", "set green shadows", OFFSET(magenta_green.shadows), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, FLAGS },
{ "bs", "set blue shadows", OFFSET(yellow_blue.shadows), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, FLAGS },
{ "rm", "set red midtones", OFFSET(cyan_red.midtones), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, FLAGS },
{ "gm", "set green midtones", OFFSET(magenta_green.midtones), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, FLAGS },
{ "bm", "set blue midtones", OFFSET(yellow_blue.midtones), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, FLAGS },
{ "rh", "set red highlights", OFFSET(cyan_red.highlights), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, FLAGS },
{ "gh", "set green highlights", OFFSET(magenta_green.highlights), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, FLAGS },
{ "bh", "set blue highlights", OFFSET(yellow_blue.highlights), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(colorbalance);
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
AV_PIX_FMT_ABGR, AV_PIX_FMT_ARGB,
AV_PIX_FMT_0BGR, AV_PIX_FMT_0RGB,
AV_PIX_FMT_RGB0, AV_PIX_FMT_BGR0,
AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
ColorBalanceContext *cb = ctx->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format);
double *shadows, *midtones, *highlights, *buffer;
int i, r, g, b;
 
buffer = av_malloc(256 * 3 * sizeof(*buffer));
if (!buffer)
return AVERROR(ENOMEM);
 
shadows = buffer + 256 * 0;
midtones = buffer + 256 * 1;
highlights = buffer + 256 * 2;
 
for (i = 0; i < 256; i++) {
double low = av_clipd((i - 85.0) / -64.0 + 0.5, 0, 1) * 178.5;
double mid = av_clipd((i - 85.0) / 64.0 + 0.5, 0, 1) *
av_clipd((i + 85.0 - 255.0) / -64.0 + 0.5, 0, 1) * 178.5;
 
shadows[i] = low;
midtones[i] = mid;
highlights[255 - i] = low;
}
 
for (i = 0; i < 256; i++) {
r = g = b = i;
 
r = av_clip_uint8(r + cb->cyan_red.shadows * shadows[r]);
r = av_clip_uint8(r + cb->cyan_red.midtones * midtones[r]);
r = av_clip_uint8(r + cb->cyan_red.highlights * highlights[r]);
 
g = av_clip_uint8(g + cb->magenta_green.shadows * shadows[g]);
g = av_clip_uint8(g + cb->magenta_green.midtones * midtones[g]);
g = av_clip_uint8(g + cb->magenta_green.highlights * highlights[g]);
 
b = av_clip_uint8(b + cb->yellow_blue.shadows * shadows[b]);
b = av_clip_uint8(b + cb->yellow_blue.midtones * midtones[b]);
b = av_clip_uint8(b + cb->yellow_blue.highlights * highlights[b]);
 
cb->lut[R][i] = r;
cb->lut[G][i] = g;
cb->lut[B][i] = b;
}
 
av_free(buffer);
 
ff_fill_rgba_map(cb->rgba_map, outlink->format);
cb->step = av_get_padded_bits_per_pixel(desc) >> 3;
 
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
ColorBalanceContext *cb = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
const uint8_t roffset = cb->rgba_map[R];
const uint8_t goffset = cb->rgba_map[G];
const uint8_t boffset = cb->rgba_map[B];
const uint8_t aoffset = cb->rgba_map[A];
const int step = cb->step;
const uint8_t *srcrow = in->data[0];
uint8_t *dstrow;
AVFrame *out;
int i, j;
 
if (av_frame_is_writable(in)) {
out = in;
} else {
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
 
dstrow = out->data[0];
for (i = 0; i < outlink->h; i++) {
const uint8_t *src = srcrow;
uint8_t *dst = dstrow;
 
for (j = 0; j < outlink->w * step; j += step) {
dst[j + roffset] = cb->lut[R][src[j + roffset]];
dst[j + goffset] = cb->lut[G][src[j + goffset]];
dst[j + boffset] = cb->lut[B][src[j + boffset]];
if (in != out && step == 4)
dst[j + aoffset] = src[j + aoffset];
}
 
srcrow += in->linesize[0];
dstrow += out->linesize[0];
}
 
if (in != out)
av_frame_free(&in);
return ff_filter_frame(ctx->outputs[0], out);
}
 
static const AVFilterPad colorbalance_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad colorbalance_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_output,
},
{ NULL }
};
 
AVFilter avfilter_vf_colorbalance = {
.name = "colorbalance",
.description = NULL_IF_CONFIG_SMALL("Adjust the color balance."),
.priv_size = sizeof(ColorBalanceContext),
.priv_class = &colorbalance_class,
.query_formats = query_formats,
.inputs = colorbalance_inputs,
.outputs = colorbalance_outputs,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_colorchannelmixer.c
0,0 → 1,360
/*
* Copyright (c) 2013 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/opt.h"
#include "avfilter.h"
#include "drawutils.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
#define R 0
#define G 1
#define B 2
#define A 3
 
typedef struct {
const AVClass *class;
double rr, rg, rb, ra;
double gr, gg, gb, ga;
double br, bg, bb, ba;
double ar, ag, ab, aa;
 
int *lut[4][4];
 
int *buffer;
 
uint8_t rgba_map[4];
} ColorChannelMixerContext;
 
#define OFFSET(x) offsetof(ColorChannelMixerContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
static const AVOption colorchannelmixer_options[] = {
{ "rr", "set the red gain for the red channel", OFFSET(rr), AV_OPT_TYPE_DOUBLE, {.dbl=1}, -2, 2, FLAGS },
{ "rg", "set the green gain for the red channel", OFFSET(rg), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
{ "rb", "set the blue gain for the red channel", OFFSET(rb), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
{ "ra", "set the alpha gain for the red channel", OFFSET(ra), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
{ "gr", "set the red gain for the green channel", OFFSET(gr), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
{ "gg", "set the green gain for the green channel", OFFSET(gg), AV_OPT_TYPE_DOUBLE, {.dbl=1}, -2, 2, FLAGS },
{ "gb", "set the blue gain for the green channel", OFFSET(gb), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
{ "ga", "set the alpha gain for the green channel", OFFSET(ga), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
{ "br", "set the red gain for the blue channel", OFFSET(br), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
{ "bg", "set the green gain for the blue channel", OFFSET(bg), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
{ "bb", "set the blue gain for the blue channel", OFFSET(bb), AV_OPT_TYPE_DOUBLE, {.dbl=1}, -2, 2, FLAGS },
{ "ba", "set the alpha gain for the blue channel", OFFSET(ba), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
{ "ar", "set the red gain for the alpha channel", OFFSET(ar), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
{ "ag", "set the green gain for the alpha channel", OFFSET(ag), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
{ "ab", "set the blue gain for the alpha channel", OFFSET(ab), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
{ "aa", "set the alpha gain for the alpha channel", OFFSET(aa), AV_OPT_TYPE_DOUBLE, {.dbl=1}, -2, 2, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(colorchannelmixer);
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
AV_PIX_FMT_0RGB, AV_PIX_FMT_0BGR,
AV_PIX_FMT_RGB0, AV_PIX_FMT_BGR0,
AV_PIX_FMT_RGB48, AV_PIX_FMT_BGR48,
AV_PIX_FMT_RGBA64, AV_PIX_FMT_BGRA64,
AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
ColorChannelMixerContext *cm = ctx->priv;
int i, j, size, *buffer;
 
ff_fill_rgba_map(cm->rgba_map, outlink->format);
 
switch (outlink->format) {
case AV_PIX_FMT_RGB48:
case AV_PIX_FMT_BGR48:
case AV_PIX_FMT_RGBA64:
case AV_PIX_FMT_BGRA64:
size = 65536;
break;
default:
size = 256;
}
 
cm->buffer = buffer = av_malloc(16 * size * sizeof(*cm->buffer));
if (!cm->buffer)
return AVERROR(ENOMEM);
 
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++, buffer += size)
cm->lut[i][j] = buffer;
 
for (i = 0; i < size; i++) {
cm->lut[R][R][i] = round(i * cm->rr);
cm->lut[R][G][i] = round(i * cm->rg);
cm->lut[R][B][i] = round(i * cm->rb);
cm->lut[R][A][i] = round(i * cm->ra);
 
cm->lut[G][R][i] = round(i * cm->gr);
cm->lut[G][G][i] = round(i * cm->gg);
cm->lut[G][B][i] = round(i * cm->gb);
cm->lut[G][A][i] = round(i * cm->ga);
 
cm->lut[B][R][i] = round(i * cm->br);
cm->lut[B][G][i] = round(i * cm->bg);
cm->lut[B][B][i] = round(i * cm->bb);
cm->lut[B][A][i] = round(i * cm->ba);
 
cm->lut[A][R][i] = round(i * cm->ar);
cm->lut[A][G][i] = round(i * cm->ag);
cm->lut[A][B][i] = round(i * cm->ab);
cm->lut[A][A][i] = round(i * cm->aa);
}
 
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
ColorChannelMixerContext *cm = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
const uint8_t roffset = cm->rgba_map[R];
const uint8_t goffset = cm->rgba_map[G];
const uint8_t boffset = cm->rgba_map[B];
const uint8_t aoffset = cm->rgba_map[A];
const uint8_t *srcrow = in->data[0];
uint8_t *dstrow;
AVFrame *out;
int i, j;
 
if (av_frame_is_writable(in)) {
out = in;
} else {
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
 
dstrow = out->data[0];
switch (outlink->format) {
case AV_PIX_FMT_BGR24:
case AV_PIX_FMT_RGB24:
for (i = 0; i < outlink->h; i++) {
const uint8_t *src = srcrow;
uint8_t *dst = dstrow;
 
for (j = 0; j < outlink->w * 3; j += 3) {
const uint8_t rin = src[j + roffset];
const uint8_t gin = src[j + goffset];
const uint8_t bin = src[j + boffset];
 
dst[j + roffset] = av_clip_uint8(cm->lut[R][R][rin] +
cm->lut[R][G][gin] +
cm->lut[R][B][bin]);
dst[j + goffset] = av_clip_uint8(cm->lut[G][R][rin] +
cm->lut[G][G][gin] +
cm->lut[G][B][bin]);
dst[j + boffset] = av_clip_uint8(cm->lut[B][R][rin] +
cm->lut[B][G][gin] +
cm->lut[B][B][bin]);
}
 
srcrow += in->linesize[0];
dstrow += out->linesize[0];
}
break;
case AV_PIX_FMT_0BGR:
case AV_PIX_FMT_0RGB:
case AV_PIX_FMT_BGR0:
case AV_PIX_FMT_RGB0:
for (i = 0; i < outlink->h; i++) {
const uint8_t *src = srcrow;
uint8_t *dst = dstrow;
 
for (j = 0; j < outlink->w * 4; j += 4) {
const uint8_t rin = src[j + roffset];
const uint8_t gin = src[j + goffset];
const uint8_t bin = src[j + boffset];
 
dst[j + roffset] = av_clip_uint8(cm->lut[R][R][rin] +
cm->lut[R][G][gin] +
cm->lut[R][B][bin]);
dst[j + goffset] = av_clip_uint8(cm->lut[G][R][rin] +
cm->lut[G][G][gin] +
cm->lut[G][B][bin]);
dst[j + boffset] = av_clip_uint8(cm->lut[B][R][rin] +
cm->lut[B][G][gin] +
cm->lut[B][B][bin]);
if (in != out)
dst[j + aoffset] = 0;
}
 
srcrow += in->linesize[0];
dstrow += out->linesize[0];
}
break;
case AV_PIX_FMT_ABGR:
case AV_PIX_FMT_ARGB:
case AV_PIX_FMT_BGRA:
case AV_PIX_FMT_RGBA:
for (i = 0; i < outlink->h; i++) {
const uint8_t *src = srcrow;
uint8_t *dst = dstrow;
 
for (j = 0; j < outlink->w * 4; j += 4) {
const uint8_t rin = src[j + roffset];
const uint8_t gin = src[j + goffset];
const uint8_t bin = src[j + boffset];
const uint8_t ain = src[j + aoffset];
 
dst[j + roffset] = av_clip_uint8(cm->lut[R][R][rin] +
cm->lut[R][G][gin] +
cm->lut[R][B][bin] +
cm->lut[R][A][ain]);
dst[j + goffset] = av_clip_uint8(cm->lut[G][R][rin] +
cm->lut[G][G][gin] +
cm->lut[G][B][bin] +
cm->lut[G][A][ain]);
dst[j + boffset] = av_clip_uint8(cm->lut[B][R][rin] +
cm->lut[B][G][gin] +
cm->lut[B][B][bin] +
cm->lut[B][A][ain]);
dst[j + aoffset] = av_clip_uint8(cm->lut[A][R][rin] +
cm->lut[A][G][gin] +
cm->lut[A][B][bin] +
cm->lut[A][A][ain]);
}
 
srcrow += in->linesize[0];
dstrow += out->linesize[0];
}
break;
case AV_PIX_FMT_BGR48:
case AV_PIX_FMT_RGB48:
for (i = 0; i < outlink->h; i++) {
const uint16_t *src = (const uint16_t *)srcrow;
uint16_t *dst = (uint16_t *)dstrow;
 
for (j = 0; j < outlink->w * 3; j += 3) {
const uint16_t rin = src[j + roffset];
const uint16_t gin = src[j + goffset];
const uint16_t bin = src[j + boffset];
 
dst[j + roffset] = av_clip_uint16(cm->lut[R][R][rin] +
cm->lut[R][G][gin] +
cm->lut[R][B][bin]);
dst[j + goffset] = av_clip_uint16(cm->lut[G][R][rin] +
cm->lut[G][G][gin] +
cm->lut[G][B][bin]);
dst[j + boffset] = av_clip_uint16(cm->lut[B][R][rin] +
cm->lut[B][G][gin] +
cm->lut[B][B][bin]);
}
 
srcrow += in->linesize[0];
dstrow += out->linesize[0];
}
break;
case AV_PIX_FMT_BGRA64:
case AV_PIX_FMT_RGBA64:
for (i = 0; i < outlink->h; i++) {
const uint16_t *src = (const uint16_t *)srcrow;
uint16_t *dst = (uint16_t *)dstrow;
 
for (j = 0; j < outlink->w * 4; j += 4) {
const uint16_t rin = src[j + roffset];
const uint16_t gin = src[j + goffset];
const uint16_t bin = src[j + boffset];
const uint16_t ain = src[j + aoffset];
 
dst[j + roffset] = av_clip_uint16(cm->lut[R][R][rin] +
cm->lut[R][G][gin] +
cm->lut[R][B][bin] +
cm->lut[R][A][ain]);
dst[j + goffset] = av_clip_uint16(cm->lut[G][R][rin] +
cm->lut[G][G][gin] +
cm->lut[G][B][bin] +
cm->lut[G][A][ain]);
dst[j + boffset] = av_clip_uint16(cm->lut[B][R][rin] +
cm->lut[B][G][gin] +
cm->lut[B][B][bin] +
cm->lut[B][A][ain]);
dst[j + aoffset] = av_clip_uint16(cm->lut[A][R][rin] +
cm->lut[A][G][gin] +
cm->lut[A][B][bin] +
cm->lut[A][A][ain]);
}
 
srcrow += in->linesize[0];
dstrow += out->linesize[0];
}
}
 
if (in != out)
av_frame_free(&in);
return ff_filter_frame(ctx->outputs[0], out);
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
ColorChannelMixerContext *cm = ctx->priv;
 
av_freep(&cm->buffer);
}
 
static const AVFilterPad colorchannelmixer_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad colorchannelmixer_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_output,
},
{ NULL }
};
 
AVFilter avfilter_vf_colorchannelmixer = {
.name = "colorchannelmixer",
.description = NULL_IF_CONFIG_SMALL("Adjust colors by mixing color channels."),
.priv_size = sizeof(ColorChannelMixerContext),
.priv_class = &colorchannelmixer_class,
.uninit = uninit,
.query_formats = query_formats,
.inputs = colorchannelmixer_inputs,
.outputs = colorchannelmixer_outputs,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_colormatrix.c
0,0 → 1,388
/*
* ColorMatrix v2.2 for Avisynth 2.5.x
*
* Copyright (C) 2006-2007 Kevin Stone
*
* ColorMatrix 1.x is Copyright (C) Wilbert Dijkhof
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
* License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
 
/**
* @file
* ColorMatrix 2.0 is based on the original ColorMatrix filter by Wilbert
* Dijkhof. It adds the ability to convert between any of: Rec.709, FCC,
* Rec.601, and SMPTE 240M. It also makes pre and post clipping optional,
* adds an option to use scaled or non-scaled coefficients, and more...
*/
 
#include <float.h>
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/avstring.h"
 
#define NS(n) n < 0 ? (int)(n*65536.0-0.5+DBL_EPSILON) : (int)(n*65536.0+0.5)
#define CB(n) av_clip_uint8(n)
 
static const double yuv_coeff[4][3][3] = {
{ { +0.7152, +0.0722, +0.2126 }, // Rec.709 (0)
{ -0.3850, +0.5000, -0.1150 },
{ -0.4540, -0.0460, +0.5000 } },
{ { +0.5900, +0.1100, +0.3000 }, // FCC (1)
{ -0.3310, +0.5000, -0.1690 },
{ -0.4210, -0.0790, +0.5000 } },
{ { +0.5870, +0.1140, +0.2990 }, // Rec.601 (ITU-R BT.470-2/SMPTE 170M) (2)
{ -0.3313, +0.5000, -0.1687 },
{ -0.4187, -0.0813, +0.5000 } },
{ { +0.7010, +0.0870, +0.2120 }, // SMPTE 240M (3)
{ -0.3840, +0.5000, -0.1160 },
{ -0.4450, -0.0550, +0.5000 } },
};
 
enum ColorMode {
COLOR_MODE_NONE = -1,
COLOR_MODE_BT709,
COLOR_MODE_FCC,
COLOR_MODE_BT601,
COLOR_MODE_SMPTE240M,
COLOR_MODE_COUNT
};
 
typedef struct {
const AVClass *class;
int yuv_convert[16][3][3];
int interlaced;
enum ColorMode source, dest;
int mode;
int hsub, vsub;
} ColorMatrixContext;
 
#define OFFSET(x) offsetof(ColorMatrixContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption colormatrix_options[] = {
{ "src", "set source color matrix", OFFSET(source), AV_OPT_TYPE_INT, {.i64=COLOR_MODE_NONE}, COLOR_MODE_NONE, COLOR_MODE_COUNT-1, .flags=FLAGS, .unit="color_mode" },
{ "dst", "set destination color matrix", OFFSET(dest), AV_OPT_TYPE_INT, {.i64=COLOR_MODE_NONE}, COLOR_MODE_NONE, COLOR_MODE_COUNT-1, .flags=FLAGS, .unit="color_mode" },
{ "bt709", "set BT.709 colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT709}, .flags=FLAGS, .unit="color_mode" },
{ "fcc", "set FCC colorspace ", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_FCC}, .flags=FLAGS, .unit="color_mode" },
{ "bt601", "set BT.601 colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT601}, .flags=FLAGS, .unit="color_mode" },
{ "smpte240m", "set SMPTE-240M colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_SMPTE240M}, .flags=FLAGS, .unit="color_mode" },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(colormatrix);
 
#define ma m[0][0]
#define mb m[0][1]
#define mc m[0][2]
#define md m[1][0]
#define me m[1][1]
#define mf m[1][2]
#define mg m[2][0]
#define mh m[2][1]
#define mi m[2][2]
 
#define ima im[0][0]
#define imb im[0][1]
#define imc im[0][2]
#define imd im[1][0]
#define ime im[1][1]
#define imf im[1][2]
#define img im[2][0]
#define imh im[2][1]
#define imi im[2][2]
 
static void inverse3x3(double im[3][3], const double m[3][3])
{
double det = ma * (me * mi - mf * mh) - mb * (md * mi - mf * mg) + mc * (md * mh - me * mg);
det = 1.0 / det;
ima = det * (me * mi - mf * mh);
imb = det * (mc * mh - mb * mi);
imc = det * (mb * mf - mc * me);
imd = det * (mf * mg - md * mi);
ime = det * (ma * mi - mc * mg);
imf = det * (mc * md - ma * mf);
img = det * (md * mh - me * mg);
imh = det * (mb * mg - ma * mh);
imi = det * (ma * me - mb * md);
}
 
static void solve_coefficients(double cm[3][3], double rgb[3][3], const double yuv[3][3])
{
int i, j;
for (i = 0; i < 3; i++)
for (j = 0; j < 3; j++)
cm[i][j] = yuv[i][0] * rgb[0][j] + yuv[i][1] * rgb[1][j] + yuv[i][2] * rgb[2][j];
}
 
static void calc_coefficients(AVFilterContext *ctx)
{
ColorMatrixContext *color = ctx->priv;
double rgb_coeffd[4][3][3];
double yuv_convertd[16][3][3];
int v = 0;
int i, j, k;
 
for (i = 0; i < 4; i++)
inverse3x3(rgb_coeffd[i], yuv_coeff[i]);
for (i = 0; i < 4; i++) {
for (j = 0; j < 4; j++) {
solve_coefficients(yuv_convertd[v], rgb_coeffd[i], yuv_coeff[j]);
for (k = 0; k < 3; k++) {
color->yuv_convert[v][k][0] = NS(yuv_convertd[v][k][0]);
color->yuv_convert[v][k][1] = NS(yuv_convertd[v][k][1]);
color->yuv_convert[v][k][2] = NS(yuv_convertd[v][k][2]);
}
if (color->yuv_convert[v][0][0] != 65536 || color->yuv_convert[v][1][0] != 0 ||
color->yuv_convert[v][2][0] != 0) {
av_log(ctx, AV_LOG_ERROR, "error calculating conversion coefficients\n");
}
v++;
}
}
}
 
static const char *color_modes[] = {"bt709", "fcc", "bt601", "smpte240m"};
 
static av_cold int init(AVFilterContext *ctx)
{
ColorMatrixContext *color = ctx->priv;
 
if (color->source == COLOR_MODE_NONE || color->dest == COLOR_MODE_NONE) {
av_log(ctx, AV_LOG_ERROR, "Unspecified source or destination color space\n");
return AVERROR(EINVAL);
}
 
if (color->source == color->dest) {
av_log(ctx, AV_LOG_ERROR, "Source and destination color space must not be identical\n");
return AVERROR(EINVAL);
}
 
color->mode = color->source * 4 + color->dest;
 
calc_coefficients(ctx);
 
return 0;
}
 
static void process_frame_uyvy422(ColorMatrixContext *color,
AVFrame *dst, AVFrame *src)
{
const unsigned char *srcp = src->data[0];
const int src_pitch = src->linesize[0];
const int height = src->height;
const int width = src->width*2;
unsigned char *dstp = dst->data[0];
const int dst_pitch = dst->linesize[0];
const int c2 = color->yuv_convert[color->mode][0][1];
const int c3 = color->yuv_convert[color->mode][0][2];
const int c4 = color->yuv_convert[color->mode][1][1];
const int c5 = color->yuv_convert[color->mode][1][2];
const int c6 = color->yuv_convert[color->mode][2][1];
const int c7 = color->yuv_convert[color->mode][2][2];
int x, y;
 
for (y = 0; y < height; y++) {
for (x = 0; x < width; x += 4) {
const int u = srcp[x + 0] - 128;
const int v = srcp[x + 2] - 128;
const int uvval = c2 * u + c3 * v + 1081344;
dstp[x + 0] = CB((c4 * u + c5 * v + 8421376) >> 16);
dstp[x + 1] = CB((65536 * (srcp[x + 1] - 16) + uvval) >> 16);
dstp[x + 2] = CB((c6 * u + c7 * v + 8421376) >> 16);
dstp[x + 3] = CB((65536 * (srcp[x + 3] - 16) + uvval) >> 16);
}
srcp += src_pitch;
dstp += dst_pitch;
}
}
 
static void process_frame_yuv422p(ColorMatrixContext *color,
AVFrame *dst, AVFrame *src)
{
const unsigned char *srcpU = src->data[1];
const unsigned char *srcpV = src->data[2];
const unsigned char *srcpY = src->data[0];
const int src_pitchY = src->linesize[0];
const int src_pitchUV = src->linesize[1];
const int height = src->height;
const int width = src->width;
unsigned char *dstpU = dst->data[1];
unsigned char *dstpV = dst->data[2];
unsigned char *dstpY = dst->data[0];
const int dst_pitchY = dst->linesize[0];
const int dst_pitchUV = dst->linesize[1];
const int c2 = color->yuv_convert[color->mode][0][1];
const int c3 = color->yuv_convert[color->mode][0][2];
const int c4 = color->yuv_convert[color->mode][1][1];
const int c5 = color->yuv_convert[color->mode][1][2];
const int c6 = color->yuv_convert[color->mode][2][1];
const int c7 = color->yuv_convert[color->mode][2][2];
int x, y;
 
for (y = 0; y < height; y++) {
for (x = 0; x < width; x += 2) {
const int u = srcpU[x >> 1] - 128;
const int v = srcpV[x >> 1] - 128;
const int uvval = c2 * u + c3 * v + 1081344;
dstpY[x + 0] = CB((65536 * (srcpY[x + 0] - 16) + uvval) >> 16);
dstpY[x + 1] = CB((65536 * (srcpY[x + 1] - 16) + uvval) >> 16);
dstpU[x >> 1] = CB((c4 * u + c5 * v + 8421376) >> 16);
dstpV[x >> 1] = CB((c6 * u + c7 * v + 8421376) >> 16);
}
srcpY += src_pitchY;
dstpY += dst_pitchY;
srcpU += src_pitchUV;
srcpV += src_pitchUV;
dstpU += dst_pitchUV;
dstpV += dst_pitchUV;
}
}
 
static void process_frame_yuv420p(ColorMatrixContext *color,
AVFrame *dst, AVFrame *src)
{
const unsigned char *srcpU = src->data[1];
const unsigned char *srcpV = src->data[2];
const unsigned char *srcpY = src->data[0];
const unsigned char *srcpN = src->data[0] + src->linesize[0];
const int src_pitchY = src->linesize[0];
const int src_pitchUV = src->linesize[1];
const int height = src->height;
const int width = src->width;
unsigned char *dstpU = dst->data[1];
unsigned char *dstpV = dst->data[2];
unsigned char *dstpY = dst->data[0];
unsigned char *dstpN = dst->data[0] + dst->linesize[0];
const int dst_pitchY = dst->linesize[0];
const int dst_pitchUV = dst->linesize[1];
const int c2 = color->yuv_convert[color->mode][0][1];
const int c3 = color->yuv_convert[color->mode][0][2];
const int c4 = color->yuv_convert[color->mode][1][1];
const int c5 = color->yuv_convert[color->mode][1][2];
const int c6 = color->yuv_convert[color->mode][2][1];
const int c7 = color->yuv_convert[color->mode][2][2];
int x, y;
 
for (y = 0; y < height; y += 2) {
for (x = 0; x < width; x += 2) {
const int u = srcpU[x >> 1] - 128;
const int v = srcpV[x >> 1] - 128;
const int uvval = c2 * u + c3 * v + 1081344;
dstpY[x + 0] = CB((65536 * (srcpY[x + 0] - 16) + uvval) >> 16);
dstpY[x + 1] = CB((65536 * (srcpY[x + 1] - 16) + uvval) >> 16);
dstpN[x + 0] = CB((65536 * (srcpN[x + 0] - 16) + uvval) >> 16);
dstpN[x + 1] = CB((65536 * (srcpN[x + 1] - 16) + uvval) >> 16);
dstpU[x >> 1] = CB((c4 * u + c5 * v + 8421376) >> 16);
dstpV[x >> 1] = CB((c6 * u + c7 * v + 8421376) >> 16);
}
srcpY += src_pitchY << 1;
dstpY += dst_pitchY << 1;
srcpN += src_pitchY << 1;
dstpN += dst_pitchY << 1;
srcpU += src_pitchUV;
srcpV += src_pitchUV;
dstpU += dst_pitchUV;
dstpV += dst_pitchUV;
}
}
 
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
ColorMatrixContext *color = ctx->priv;
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
 
color->hsub = pix_desc->log2_chroma_w;
color->vsub = pix_desc->log2_chroma_h;
 
av_log(ctx, AV_LOG_VERBOSE, "%s -> %s\n",
color_modes[color->source], color_modes[color->dest]);
 
return 0;
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV422P,
AV_PIX_FMT_YUV420P,
AV_PIX_FMT_UYVY422,
AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
 
return 0;
}
 
static int filter_frame(AVFilterLink *link, AVFrame *in)
{
AVFilterContext *ctx = link->dst;
ColorMatrixContext *color = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out;
 
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
 
if (in->format == AV_PIX_FMT_YUV422P)
process_frame_yuv422p(color, out, in);
else if (in->format == AV_PIX_FMT_YUV420P)
process_frame_yuv420p(color, out, in);
else
process_frame_uyvy422(color, out, in);
 
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
 
static const AVFilterPad colormatrix_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad colormatrix_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_colormatrix = {
.name = "colormatrix",
.description = NULL_IF_CONFIG_SMALL("Convert color matrix."),
.priv_size = sizeof(ColorMatrixContext),
.init = init,
.query_formats = query_formats,
.inputs = colormatrix_inputs,
.outputs = colormatrix_outputs,
.priv_class = &colormatrix_class,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_copy.c
0,0 → 1,69
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* copy video filter
*/
 
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "avfilter.h"
#include "internal.h"
#include "video.h"
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *out = ff_get_video_buffer(outlink, in->width, in->height);
 
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
av_image_copy(out->data, out->linesize, (const uint8_t**) in->data, in->linesize,
in->format, in->width, in->height);
 
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
 
static const AVFilterPad avfilter_vf_copy_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_copy_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_copy = {
.name = "copy",
.description = NULL_IF_CONFIG_SMALL("Copy the input video unchanged to the output."),
.inputs = avfilter_vf_copy_inputs,
.outputs = avfilter_vf_copy_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_crop.c
0,0 → 1,344
/*
* Copyright (c) 2007 Bobby Bingham
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* video crop filter
*/
 
#include <stdio.h>
 
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
#include "libavutil/eval.h"
#include "libavutil/avstring.h"
#include "libavutil/internal.h"
#include "libavutil/libm.h"
#include "libavutil/imgutils.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
 
static const char *const var_names[] = {
"in_w", "iw", ///< width of the input video
"in_h", "ih", ///< height of the input video
"out_w", "ow", ///< width of the cropped video
"out_h", "oh", ///< height of the cropped video
"a",
"sar",
"dar",
"hsub",
"vsub",
"x",
"y",
"n", ///< number of frame
"pos", ///< position in the file
"t", ///< timestamp expressed in seconds
NULL
};
 
enum var_name {
VAR_IN_W, VAR_IW,
VAR_IN_H, VAR_IH,
VAR_OUT_W, VAR_OW,
VAR_OUT_H, VAR_OH,
VAR_A,
VAR_SAR,
VAR_DAR,
VAR_HSUB,
VAR_VSUB,
VAR_X,
VAR_Y,
VAR_N,
VAR_POS,
VAR_T,
VAR_VARS_NB
};
 
typedef struct {
const AVClass *class;
int x; ///< x offset of the non-cropped area with respect to the input area
int y; ///< y offset of the non-cropped area with respect to the input area
int w; ///< width of the cropped area
int h; ///< height of the cropped area
 
AVRational out_sar; ///< output sample aspect ratio
int keep_aspect; ///< keep display aspect ratio when cropping
 
int max_step[4]; ///< max pixel step for each plane, expressed as a number of bytes
int hsub, vsub; ///< chroma subsampling
char *x_expr, *y_expr, *w_expr, *h_expr;
AVExpr *x_pexpr, *y_pexpr; /* parsed expressions for x and y */
double var_values[VAR_VARS_NB];
} CropContext;
 
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
int fmt;
 
for (fmt = 0; fmt < AV_PIX_FMT_NB; fmt++) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
if (!(desc->flags & (AV_PIX_FMT_FLAG_HWACCEL | AV_PIX_FMT_FLAG_BITSTREAM)) &&
!((desc->log2_chroma_w || desc->log2_chroma_h) && !(desc->flags & AV_PIX_FMT_FLAG_PLANAR)))
ff_add_format(&formats, fmt);
}
 
ff_set_common_formats(ctx, formats);
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
CropContext *s = ctx->priv;
 
av_expr_free(s->x_pexpr);
s->x_pexpr = NULL;
av_expr_free(s->y_pexpr);
s->y_pexpr = NULL;
}
 
static inline int normalize_double(int *n, double d)
{
int ret = 0;
 
if (isnan(d)) {
ret = AVERROR(EINVAL);
} else if (d > INT_MAX || d < INT_MIN) {
*n = d > INT_MAX ? INT_MAX : INT_MIN;
ret = AVERROR(EINVAL);
} else
*n = round(d);
 
return ret;
}
 
static int config_input(AVFilterLink *link)
{
AVFilterContext *ctx = link->dst;
CropContext *s = ctx->priv;
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(link->format);
int ret;
const char *expr;
double res;
 
s->var_values[VAR_IN_W] = s->var_values[VAR_IW] = ctx->inputs[0]->w;
s->var_values[VAR_IN_H] = s->var_values[VAR_IH] = ctx->inputs[0]->h;
s->var_values[VAR_A] = (float) link->w / link->h;
s->var_values[VAR_SAR] = link->sample_aspect_ratio.num ? av_q2d(link->sample_aspect_ratio) : 1;
s->var_values[VAR_DAR] = s->var_values[VAR_A] * s->var_values[VAR_SAR];
s->var_values[VAR_HSUB] = 1<<pix_desc->log2_chroma_w;
s->var_values[VAR_VSUB] = 1<<pix_desc->log2_chroma_h;
s->var_values[VAR_X] = NAN;
s->var_values[VAR_Y] = NAN;
s->var_values[VAR_OUT_W] = s->var_values[VAR_OW] = NAN;
s->var_values[VAR_OUT_H] = s->var_values[VAR_OH] = NAN;
s->var_values[VAR_N] = 0;
s->var_values[VAR_T] = NAN;
s->var_values[VAR_POS] = NAN;
 
av_image_fill_max_pixsteps(s->max_step, NULL, pix_desc);
s->hsub = pix_desc->log2_chroma_w;
s->vsub = pix_desc->log2_chroma_h;
 
if ((ret = av_expr_parse_and_eval(&res, (expr = s->w_expr),
var_names, s->var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
goto fail_expr;
s->var_values[VAR_OUT_W] = s->var_values[VAR_OW] = res;
if ((ret = av_expr_parse_and_eval(&res, (expr = s->h_expr),
var_names, s->var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
goto fail_expr;
s->var_values[VAR_OUT_H] = s->var_values[VAR_OH] = res;
/* evaluate again ow as it may depend on oh */
if ((ret = av_expr_parse_and_eval(&res, (expr = s->w_expr),
var_names, s->var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
goto fail_expr;
 
s->var_values[VAR_OUT_W] = s->var_values[VAR_OW] = res;
if (normalize_double(&s->w, s->var_values[VAR_OUT_W]) < 0 ||
normalize_double(&s->h, s->var_values[VAR_OUT_H]) < 0) {
av_log(ctx, AV_LOG_ERROR,
"Too big value or invalid expression for out_w/ow or out_h/oh. "
"Maybe the expression for out_w:'%s' or for out_h:'%s' is self-referencing.\n",
s->w_expr, s->h_expr);
return AVERROR(EINVAL);
}
s->w &= ~((1 << s->hsub) - 1);
s->h &= ~((1 << s->vsub) - 1);
 
av_expr_free(s->x_pexpr);
av_expr_free(s->y_pexpr);
s->x_pexpr = s->y_pexpr = NULL;
if ((ret = av_expr_parse(&s->x_pexpr, s->x_expr, var_names,
NULL, NULL, NULL, NULL, 0, ctx)) < 0 ||
(ret = av_expr_parse(&s->y_pexpr, s->y_expr, var_names,
NULL, NULL, NULL, NULL, 0, ctx)) < 0)
return AVERROR(EINVAL);
 
if (s->keep_aspect) {
AVRational dar = av_mul_q(link->sample_aspect_ratio,
(AVRational){ link->w, link->h });
av_reduce(&s->out_sar.num, &s->out_sar.den,
dar.num * s->h, dar.den * s->w, INT_MAX);
} else
s->out_sar = link->sample_aspect_ratio;
 
av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d sar:%d/%d -> w:%d h:%d sar:%d/%d\n",
link->w, link->h, link->sample_aspect_ratio.num, link->sample_aspect_ratio.den,
s->w, s->h, s->out_sar.num, s->out_sar.den);
 
if (s->w <= 0 || s->h <= 0 ||
s->w > link->w || s->h > link->h) {
av_log(ctx, AV_LOG_ERROR,
"Invalid too big or non positive size for width '%d' or height '%d'\n",
s->w, s->h);
return AVERROR(EINVAL);
}
 
/* set default, required in the case the first computed value for x/y is NAN */
s->x = (link->w - s->w) / 2;
s->y = (link->h - s->h) / 2;
s->x &= ~((1 << s->hsub) - 1);
s->y &= ~((1 << s->vsub) - 1);
return 0;
 
fail_expr:
av_log(NULL, AV_LOG_ERROR, "Error when evaluating the expression '%s'\n", expr);
return ret;
}
 
static int config_output(AVFilterLink *link)
{
CropContext *s = link->src->priv;
 
link->w = s->w;
link->h = s->h;
link->sample_aspect_ratio = s->out_sar;
 
return 0;
}
 
static int filter_frame(AVFilterLink *link, AVFrame *frame)
{
AVFilterContext *ctx = link->dst;
CropContext *s = ctx->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
int i;
 
frame->width = s->w;
frame->height = s->h;
 
s->var_values[VAR_N] = link->frame_count;
s->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ?
NAN : frame->pts * av_q2d(link->time_base);
s->var_values[VAR_POS] = av_frame_get_pkt_pos(frame) == -1 ?
NAN : av_frame_get_pkt_pos(frame);
s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, NULL);
s->var_values[VAR_Y] = av_expr_eval(s->y_pexpr, s->var_values, NULL);
s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, NULL);
 
normalize_double(&s->x, s->var_values[VAR_X]);
normalize_double(&s->y, s->var_values[VAR_Y]);
 
if (s->x < 0)
s->x = 0;
if (s->y < 0)
s->y = 0;
if ((unsigned)s->x + (unsigned)s->w > link->w)
s->x = link->w - s->w;
if ((unsigned)s->y + (unsigned)s->h > link->h)
s->y = link->h - s->h;
s->x &= ~((1 << s->hsub) - 1);
s->y &= ~((1 << s->vsub) - 1);
 
av_dlog(ctx, "n:%d t:%f pos:%f x:%d y:%d x+w:%d y+h:%d\n",
(int)s->var_values[VAR_N], s->var_values[VAR_T], s->var_values[VAR_POS],
s->x, s->y, s->x+s->w, s->y+s->h);
 
frame->data[0] += s->y * frame->linesize[0];
frame->data[0] += s->x * s->max_step[0];
 
if (!(desc->flags & AV_PIX_FMT_FLAG_PAL || desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL)) {
for (i = 1; i < 3; i ++) {
if (frame->data[i]) {
frame->data[i] += (s->y >> s->vsub) * frame->linesize[i];
frame->data[i] += (s->x * s->max_step[i]) >> s->hsub;
}
}
}
 
/* alpha plane */
if (frame->data[3]) {
frame->data[3] += s->y * frame->linesize[3];
frame->data[3] += s->x * s->max_step[3];
}
 
return ff_filter_frame(link->dst->outputs[0], frame);
}
 
#define OFFSET(x) offsetof(CropContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
 
static const AVOption crop_options[] = {
{ "out_w", "set the width crop area expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "w", "set the width crop area expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "out_h", "set the height crop area expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "h", "set the height crop area expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "x", "set the x crop area expression", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str = "(in_w-out_w)/2"}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "y", "set the y crop area expression", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str = "(in_h-out_h)/2"}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "keep_aspect", "keep aspect ratio", OFFSET(keep_aspect), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(crop);
 
static const AVFilterPad avfilter_vf_crop_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_crop_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_output,
},
{ NULL }
};
 
AVFilter avfilter_vf_crop = {
.name = "crop",
.description = NULL_IF_CONFIG_SMALL("Crop the input video."),
.priv_size = sizeof(CropContext),
.priv_class = &crop_class,
.query_formats = query_formats,
.uninit = uninit,
.inputs = avfilter_vf_crop_inputs,
.outputs = avfilter_vf_crop_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_cropdetect.c
0,0 → 1,252
/*
* Copyright (c) 2002 A'rpi
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
/**
* @file
* border detection filter
* Ported from MPlayer libmpcodecs/vf_cropdetect.c.
*/
 
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "libavutil/opt.h"
 
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
typedef struct {
const AVClass *class;
int x1, y1, x2, y2;
int limit;
int round;
int reset_count;
int frame_nb;
int max_pixsteps[4];
} CropDetectContext;
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVJ420P,
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVJ422P,
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P,
AV_PIX_FMT_YUV411P, AV_PIX_FMT_GRAY8,
AV_PIX_FMT_NV12, AV_PIX_FMT_NV21,
AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static int checkline(void *ctx, const unsigned char *src, int stride, int len, int bpp)
{
int total = 0;
int div = len;
 
switch (bpp) {
case 1:
while (--len >= 0) {
total += src[0];
src += stride;
}
break;
case 3:
case 4:
while (--len >= 0) {
total += src[0] + src[1] + src[2];
src += stride;
}
div *= 3;
break;
}
total /= div;
 
av_log(ctx, AV_LOG_DEBUG, "total:%d\n", total);
return total;
}
 
static av_cold int init(AVFilterContext *ctx)
{
CropDetectContext *s = ctx->priv;
 
s->frame_nb = -2;
 
av_log(ctx, AV_LOG_VERBOSE, "limit:%d round:%d reset_count:%d\n",
s->limit, s->round, s->reset_count);
 
return 0;
}
 
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
CropDetectContext *s = ctx->priv;
 
av_image_fill_max_pixsteps(s->max_pixsteps, NULL,
av_pix_fmt_desc_get(inlink->format));
 
s->x1 = inlink->w - 1;
s->y1 = inlink->h - 1;
s->x2 = 0;
s->y2 = 0;
 
return 0;
}
 
#define SET_META(key, value) \
snprintf(buf, sizeof(buf), "%d", value); \
av_dict_set(metadata, key, buf, 0)
 
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
CropDetectContext *s = ctx->priv;
int bpp = s->max_pixsteps[0];
int w, h, x, y, shrink_by;
AVDictionary **metadata;
char buf[32];
 
// ignore first 2 frames - they may be empty
if (++s->frame_nb > 0) {
metadata = avpriv_frame_get_metadatap(frame);
 
// Reset the crop area every reset_count frames, if reset_count is > 0
if (s->reset_count > 0 && s->frame_nb > s->reset_count) {
s->x1 = frame->width - 1;
s->y1 = frame->height - 1;
s->x2 = 0;
s->y2 = 0;
s->frame_nb = 1;
}
 
for (y = 0; y < s->y1; y++) {
if (checkline(ctx, frame->data[0] + frame->linesize[0] * y, bpp, frame->width, bpp) > s->limit) {
s->y1 = y;
break;
}
}
 
for (y = frame->height - 1; y > s->y2; y--) {
if (checkline(ctx, frame->data[0] + frame->linesize[0] * y, bpp, frame->width, bpp) > s->limit) {
s->y2 = y;
break;
}
}
 
for (y = 0; y < s->x1; y++) {
if (checkline(ctx, frame->data[0] + bpp*y, frame->linesize[0], frame->height, bpp) > s->limit) {
s->x1 = y;
break;
}
}
 
for (y = frame->width - 1; y > s->x2; y--) {
if (checkline(ctx, frame->data[0] + bpp*y, frame->linesize[0], frame->height, bpp) > s->limit) {
s->x2 = y;
break;
}
}
 
// round x and y (up), important for yuv colorspaces
// make sure they stay rounded!
x = (s->x1+1) & ~1;
y = (s->y1+1) & ~1;
 
w = s->x2 - x + 1;
h = s->y2 - y + 1;
 
// w and h must be divisible by 2 as well because of yuv
// colorspace problems.
if (s->round <= 1)
s->round = 16;
if (s->round % 2)
s->round *= 2;
 
shrink_by = w % s->round;
w -= shrink_by;
x += (shrink_by/2 + 1) & ~1;
 
shrink_by = h % s->round;
h -= shrink_by;
y += (shrink_by/2 + 1) & ~1;
 
SET_META("lavfi.cropdetect.x1", s->x1);
SET_META("lavfi.cropdetect.x2", s->x2);
SET_META("lavfi.cropdetect.y1", s->y1);
SET_META("lavfi.cropdetect.y2", s->y2);
SET_META("lavfi.cropdetect.w", w);
SET_META("lavfi.cropdetect.h", h);
SET_META("lavfi.cropdetect.x", x);
SET_META("lavfi.cropdetect.y", y);
 
av_log(ctx, AV_LOG_INFO,
"x1:%d x2:%d y1:%d y2:%d w:%d h:%d x:%d y:%d pts:%"PRId64" t:%f crop=%d:%d:%d:%d\n",
s->x1, s->x2, s->y1, s->y2, w, h, x, y, frame->pts,
frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base),
w, h, x, y);
}
 
return ff_filter_frame(inlink->dst->outputs[0], frame);
}
 
#define OFFSET(x) offsetof(CropDetectContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption cropdetect_options[] = {
{ "limit", "Threshold below which the pixel is considered black", OFFSET(limit), AV_OPT_TYPE_INT, { .i64 = 24 }, 0, 255, FLAGS },
{ "round", "Value by which the width/height should be divisible", OFFSET(round), AV_OPT_TYPE_INT, { .i64 = 16 }, 0, INT_MAX, FLAGS },
{ "reset", "Recalculate the crop area after this many frames", OFFSET(reset_count), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
{ "reset_count", "Recalculate the crop area after this many frames",OFFSET(reset_count),AV_OPT_TYPE_INT,{ .i64 = 0 }, 0, INT_MAX, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(cropdetect);
 
static const AVFilterPad avfilter_vf_cropdetect_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_cropdetect_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO
},
{ NULL }
};
 
AVFilter avfilter_vf_cropdetect = {
.name = "cropdetect",
.description = NULL_IF_CONFIG_SMALL("Auto-detect crop size."),
.priv_size = sizeof(CropDetectContext),
.priv_class = &cropdetect_class,
.init = init,
.query_formats = query_formats,
.inputs = avfilter_vf_cropdetect_inputs,
.outputs = avfilter_vf_cropdetect_outputs,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_curves.c
0,0 → 1,552
/*
* Copyright (c) 2013 Clément Bœsch
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/opt.h"
#include "libavutil/bprint.h"
#include "libavutil/eval.h"
#include "libavutil/file.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/avassert.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "drawutils.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
#define R 0
#define G 1
#define B 2
#define A 3
 
struct keypoint {
double x, y;
struct keypoint *next;
};
 
#define NB_COMP 3
 
enum preset {
PRESET_NONE,
PRESET_COLOR_NEGATIVE,
PRESET_CROSS_PROCESS,
PRESET_DARKER,
PRESET_INCREASE_CONTRAST,
PRESET_LIGHTER,
PRESET_LINEAR_CONTRAST,
PRESET_MEDIUM_CONTRAST,
PRESET_NEGATIVE,
PRESET_STRONG_CONTRAST,
PRESET_VINTAGE,
NB_PRESETS,
};
 
typedef struct {
const AVClass *class;
enum preset preset;
char *comp_points_str[NB_COMP + 1];
char *comp_points_str_all;
uint8_t graph[NB_COMP + 1][256];
char *psfile;
uint8_t rgba_map[4];
int step;
} CurvesContext;
 
#define OFFSET(x) offsetof(CurvesContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
static const AVOption curves_options[] = {
{ "preset", "select a color curves preset", OFFSET(preset), AV_OPT_TYPE_INT, {.i64=PRESET_NONE}, PRESET_NONE, NB_PRESETS-1, FLAGS, "preset_name" },
{ "none", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_NONE}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
{ "color_negative", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_COLOR_NEGATIVE}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
{ "cross_process", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_CROSS_PROCESS}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
{ "darker", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_DARKER}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
{ "increase_contrast", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_INCREASE_CONTRAST}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
{ "lighter", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_LIGHTER}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
{ "linear_contrast", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_LINEAR_CONTRAST}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
{ "medium_contrast", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_MEDIUM_CONTRAST}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
{ "negative", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_NEGATIVE}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
{ "strong_contrast", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_STRONG_CONTRAST}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
{ "vintage", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_VINTAGE}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
{ "master","set master points coordinates",OFFSET(comp_points_str[NB_COMP]), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ "m", "set master points coordinates",OFFSET(comp_points_str[NB_COMP]), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ "red", "set red points coordinates", OFFSET(comp_points_str[0]), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ "r", "set red points coordinates", OFFSET(comp_points_str[0]), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ "green", "set green points coordinates", OFFSET(comp_points_str[1]), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ "g", "set green points coordinates", OFFSET(comp_points_str[1]), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ "blue", "set blue points coordinates", OFFSET(comp_points_str[2]), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ "b", "set blue points coordinates", OFFSET(comp_points_str[2]), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ "all", "set points coordinates for all components", OFFSET(comp_points_str_all), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ "psfile", "set Photoshop curves file name", OFFSET(psfile), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(curves);
 
static const struct {
const char *r;
const char *g;
const char *b;
const char *master;
} curves_presets[] = {
[PRESET_COLOR_NEGATIVE] = {
"0/1 0.129/1 0.466/0.498 0.725/0 1/0",
"0/1 0.109/1 0.301/0.498 0.517/0 1/0",
"0/1 0.098/1 0.235/0.498 0.423/0 1/0",
},
[PRESET_CROSS_PROCESS] = {
"0.25/0.156 0.501/0.501 0.686/0.745",
"0.25/0.188 0.38/0.501 0.745/0.815 1/0.815",
"0.231/0.094 0.709/0.874",
},
[PRESET_DARKER] = { .master = "0.5/0.4" },
[PRESET_INCREASE_CONTRAST] = { .master = "0.149/0.066 0.831/0.905 0.905/0.98" },
[PRESET_LIGHTER] = { .master = "0.4/0.5" },
[PRESET_LINEAR_CONTRAST] = { .master = "0.305/0.286 0.694/0.713" },
[PRESET_MEDIUM_CONTRAST] = { .master = "0.286/0.219 0.639/0.643" },
[PRESET_NEGATIVE] = { .master = "0/1 1/0" },
[PRESET_STRONG_CONTRAST] = { .master = "0.301/0.196 0.592/0.6 0.686/0.737" },
[PRESET_VINTAGE] = {
"0/0.11 0.42/0.51 1/0.95",
"0.50/0.48",
"0/0.22 0.49/0.44 1/0.8",
}
};
 
static struct keypoint *make_point(double x, double y, struct keypoint *next)
{
struct keypoint *point = av_mallocz(sizeof(*point));
 
if (!point)
return NULL;
point->x = x;
point->y = y;
point->next = next;
return point;
}
 
static int parse_points_str(AVFilterContext *ctx, struct keypoint **points, const char *s)
{
char *p = (char *)s; // strtod won't alter the string
struct keypoint *last = NULL;
 
/* construct a linked list based on the key points string */
while (p && *p) {
struct keypoint *point = make_point(0, 0, NULL);
if (!point)
return AVERROR(ENOMEM);
point->x = av_strtod(p, &p); if (p && *p) p++;
point->y = av_strtod(p, &p); if (p && *p) p++;
if (point->x < 0 || point->x > 1 || point->y < 0 || point->y > 1) {
av_log(ctx, AV_LOG_ERROR, "Invalid key point coordinates (%f;%f), "
"x and y must be in the [0;1] range.\n", point->x, point->y);
return AVERROR(EINVAL);
}
if (!*points)
*points = point;
if (last) {
if ((int)(last->x * 255) >= (int)(point->x * 255)) {
av_log(ctx, AV_LOG_ERROR, "Key point coordinates (%f;%f) "
"and (%f;%f) are too close from each other or not "
"strictly increasing on the x-axis\n",
last->x, last->y, point->x, point->y);
return AVERROR(EINVAL);
}
last->next = point;
}
last = point;
}
 
/* auto insert first key point if missing at x=0 */
if (!*points) {
last = make_point(0, 0, NULL);
if (!last)
return AVERROR(ENOMEM);
last->x = last->y = 0;
*points = last;
} else if ((*points)->x != 0.) {
struct keypoint *newfirst = make_point(0, 0, *points);
if (!newfirst)
return AVERROR(ENOMEM);
*points = newfirst;
}
 
av_assert0(last);
 
/* auto insert last key point if missing at x=1 */
if (last->x != 1.) {
struct keypoint *point = make_point(1, 1, NULL);
if (!point)
return AVERROR(ENOMEM);
last->next = point;
}
 
return 0;
}
 
static int get_nb_points(const struct keypoint *d)
{
int n = 0;
while (d) {
n++;
d = d->next;
}
return n;
}
 
/**
* Natural cubic spline interpolation
* Finding curves using Cubic Splines notes by Steven Rauch and John Stockie.
* @see http://people.math.sfu.ca/~stockie/teaching/macm316/notes/splines.pdf
*/
static int interpolate(AVFilterContext *ctx, uint8_t *y, const struct keypoint *points)
{
int i, ret = 0;
const struct keypoint *point;
double xprev = 0;
 
int n = get_nb_points(points); // number of splines
 
double (*matrix)[3] = av_calloc(n, sizeof(*matrix));
double *h = av_malloc((n - 1) * sizeof(*h));
double *r = av_calloc(n, sizeof(*r));
 
if (!matrix || !h || !r) {
ret = AVERROR(ENOMEM);
goto end;
}
 
/* h(i) = x(i+1) - x(i) */
i = -1;
for (point = points; point; point = point->next) {
if (i != -1)
h[i] = point->x - xprev;
xprev = point->x;
i++;
}
 
/* right-side of the polynomials, will be modified to contains the solution */
point = points;
for (i = 1; i < n - 1; i++) {
double yp = point->y,
yc = point->next->y,
yn = point->next->next->y;
r[i] = 6 * ((yn-yc)/h[i] - (yc-yp)/h[i-1]);
point = point->next;
}
 
#define BD 0 /* sub diagonal (below main) */
#define MD 1 /* main diagonal (center) */
#define AD 2 /* sup diagonal (above main) */
 
/* left side of the polynomials into a tridiagonal matrix. */
matrix[0][MD] = matrix[n - 1][MD] = 1;
for (i = 1; i < n - 1; i++) {
matrix[i][BD] = h[i-1];
matrix[i][MD] = 2 * (h[i-1] + h[i]);
matrix[i][AD] = h[i];
}
 
/* tridiagonal solving of the linear system */
for (i = 1; i < n; i++) {
double den = matrix[i][MD] - matrix[i][BD] * matrix[i-1][AD];
double k = den ? 1./den : 1.;
matrix[i][AD] *= k;
r[i] = (r[i] - matrix[i][BD] * r[i - 1]) * k;
}
for (i = n - 2; i >= 0; i--)
r[i] = r[i] - matrix[i][AD] * r[i + 1];
 
/* compute the graph with x=[0..255] */
i = 0;
point = points;
av_assert0(point->next); // always at least 2 key points
while (point->next) {
double yc = point->y;
double yn = point->next->y;
 
double a = yc;
double b = (yn-yc)/h[i] - h[i]*r[i]/2. - h[i]*(r[i+1]-r[i])/6.;
double c = r[i] / 2.;
double d = (r[i+1] - r[i]) / (6.*h[i]);
 
int x;
int x_start = point->x * 255;
int x_end = point->next->x * 255;
 
av_assert0(x_start >= 0 && x_start <= 255 &&
x_end >= 0 && x_end <= 255);
 
for (x = x_start; x <= x_end; x++) {
double xx = (x - x_start) * 1/255.;
double yy = a + b*xx + c*xx*xx + d*xx*xx*xx;
y[x] = av_clipf(yy, 0, 1) * 255;
av_log(ctx, AV_LOG_DEBUG, "f(%f)=%f -> y[%d]=%d\n", xx, yy, x, y[x]);
}
 
point = point->next;
i++;
}
 
end:
av_free(matrix);
av_free(h);
av_free(r);
return ret;
}
 
static int parse_psfile(AVFilterContext *ctx, const char *fname)
{
CurvesContext *curves = ctx->priv;
uint8_t *buf;
size_t size;
int i, ret, av_unused(version), nb_curves;
AVBPrint ptstr;
static const int comp_ids[] = {3, 0, 1, 2};
 
av_bprint_init(&ptstr, 0, AV_BPRINT_SIZE_AUTOMATIC);
 
ret = av_file_map(fname, &buf, &size, 0, NULL);
if (ret < 0)
return ret;
 
#define READ16(dst) do { \
if (size < 2) \
return AVERROR_INVALIDDATA; \
dst = AV_RB16(buf); \
buf += 2; \
size -= 2; \
} while (0)
 
READ16(version);
READ16(nb_curves);
for (i = 0; i < FFMIN(nb_curves, FF_ARRAY_ELEMS(comp_ids)); i++) {
int nb_points, n;
av_bprint_clear(&ptstr);
READ16(nb_points);
for (n = 0; n < nb_points; n++) {
int y, x;
READ16(y);
READ16(x);
av_bprintf(&ptstr, "%f/%f ", x / 255., y / 255.);
}
if (*ptstr.str) {
char **pts = &curves->comp_points_str[comp_ids[i]];
if (!*pts) {
*pts = av_strdup(ptstr.str);
av_log(ctx, AV_LOG_DEBUG, "curves %d (intid=%d) [%d points]: [%s]\n",
i, comp_ids[i], nb_points, *pts);
if (!*pts) {
ret = AVERROR(ENOMEM);
goto end;
}
}
}
}
end:
av_bprint_finalize(&ptstr, NULL);
av_file_unmap(buf, size);
return ret;
}
 
static av_cold int init(AVFilterContext *ctx)
{
int i, j, ret;
CurvesContext *curves = ctx->priv;
struct keypoint *comp_points[NB_COMP + 1] = {0};
char **pts = curves->comp_points_str;
const char *allp = curves->comp_points_str_all;
 
//if (!allp && curves->preset != PRESET_NONE && curves_presets[curves->preset].all)
// allp = curves_presets[curves->preset].all;
 
if (allp) {
for (i = 0; i < NB_COMP; i++) {
if (!pts[i])
pts[i] = av_strdup(allp);
if (!pts[i])
return AVERROR(ENOMEM);
}
}
 
if (curves->psfile) {
ret = parse_psfile(ctx, curves->psfile);
if (ret < 0)
return ret;
}
 
if (curves->preset != PRESET_NONE) {
#define SET_COMP_IF_NOT_SET(n, name) do { \
if (!pts[n] && curves_presets[curves->preset].name) { \
pts[n] = av_strdup(curves_presets[curves->preset].name); \
if (!pts[n]) \
return AVERROR(ENOMEM); \
} \
} while (0)
SET_COMP_IF_NOT_SET(0, r);
SET_COMP_IF_NOT_SET(1, g);
SET_COMP_IF_NOT_SET(2, b);
SET_COMP_IF_NOT_SET(3, master);
}
 
for (i = 0; i < NB_COMP + 1; i++) {
ret = parse_points_str(ctx, comp_points + i, curves->comp_points_str[i]);
if (ret < 0)
return ret;
ret = interpolate(ctx, curves->graph[i], comp_points[i]);
if (ret < 0)
return ret;
}
 
if (pts[NB_COMP]) {
for (i = 0; i < NB_COMP; i++)
for (j = 0; j < 256; j++)
curves->graph[i][j] = curves->graph[NB_COMP][curves->graph[i][j]];
}
 
if (av_log_get_level() >= AV_LOG_VERBOSE) {
for (i = 0; i < NB_COMP; i++) {
struct keypoint *point = comp_points[i];
av_log(ctx, AV_LOG_VERBOSE, "#%d points:", i);
while (point) {
av_log(ctx, AV_LOG_VERBOSE, " (%f;%f)", point->x, point->y);
point = point->next;
}
av_log(ctx, AV_LOG_VERBOSE, "\n");
av_log(ctx, AV_LOG_VERBOSE, "#%d values:", i);
for (j = 0; j < 256; j++)
av_log(ctx, AV_LOG_VERBOSE, " %02X", curves->graph[i][j]);
av_log(ctx, AV_LOG_VERBOSE, "\n");
}
}
 
for (i = 0; i < NB_COMP + 1; i++) {
struct keypoint *point = comp_points[i];
while (point) {
struct keypoint *next = point->next;
av_free(point);
point = next;
}
}
 
return 0;
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
AV_PIX_FMT_0RGB, AV_PIX_FMT_0BGR,
AV_PIX_FMT_RGB0, AV_PIX_FMT_BGR0,
AV_PIX_FMT_NONE
};
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static int config_input(AVFilterLink *inlink)
{
CurvesContext *curves = inlink->dst->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
 
ff_fill_rgba_map(curves->rgba_map, inlink->format);
curves->step = av_get_padded_bits_per_pixel(desc) >> 3;
 
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
int x, y, direct = 0;
AVFilterContext *ctx = inlink->dst;
CurvesContext *curves = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out;
uint8_t *dst;
const uint8_t *src;
const int step = curves->step;
const uint8_t r = curves->rgba_map[R];
const uint8_t g = curves->rgba_map[G];
const uint8_t b = curves->rgba_map[B];
const uint8_t a = curves->rgba_map[A];
 
if (av_frame_is_writable(in)) {
direct = 1;
out = in;
} else {
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
 
dst = out->data[0];
src = in ->data[0];
 
for (y = 0; y < inlink->h; y++) {
for (x = 0; x < inlink->w * step; x += step) {
dst[x + r] = curves->graph[R][src[x + r]];
dst[x + g] = curves->graph[G][src[x + g]];
dst[x + b] = curves->graph[B][src[x + b]];
if (!direct && step == 4)
dst[x + a] = src[x + a];
}
dst += out->linesize[0];
src += in ->linesize[0];
}
 
if (!direct)
av_frame_free(&in);
 
return ff_filter_frame(outlink, out);
}
 
static const AVFilterPad curves_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
 
static const AVFilterPad curves_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_curves = {
.name = "curves",
.description = NULL_IF_CONFIG_SMALL("Adjust components curves."),
.priv_size = sizeof(CurvesContext),
.init = init,
.query_formats = query_formats,
.inputs = curves_inputs,
.outputs = curves_outputs,
.priv_class = &curves_class,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_dctdnoiz.c
0,0 → 1,433
/*
* Copyright (c) 2013 Clément Bœsch
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* A simple, relatively efficient and extremely slow DCT image denoiser.
* @see http://www.ipol.im/pub/art/2011/ys-dct/
*/
 
#include "libavcodec/avfft.h"
#include "libavutil/eval.h"
#include "libavutil/opt.h"
#include "drawutils.h"
#include "internal.h"
 
#define NBITS 4
#define BSIZE (1<<(NBITS))
 
static const char *const var_names[] = { "c", NULL };
enum { VAR_C, VAR_VARS_NB };
 
typedef struct {
const AVClass *class;
 
/* coefficient factor expression */
char *expr_str;
AVExpr *expr;
double var_values[VAR_VARS_NB];
 
int pr_width, pr_height; // width and height to process
float sigma; // used when no expression are st
float th; // threshold (3*sigma)
float color_dct[3][3]; // 3x3 DCT for color decorrelation
float *cbuf[2][3]; // two planar rgb color buffers
float *weights; // dct coeff are cumulated with overlapping; these values are used for averaging
int p_linesize; // line sizes for color and weights
int overlap; // number of block overlapping pixels
int step; // block step increment (BSIZE - overlap)
DCTContext *dct, *idct; // DCT and inverse DCT contexts
float *block, *tmp_block; // two BSIZE x BSIZE block buffers
} DCTdnoizContext;
 
#define OFFSET(x) offsetof(DCTdnoizContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
static const AVOption dctdnoiz_options[] = {
{ "sigma", "set noise sigma constant", OFFSET(sigma), AV_OPT_TYPE_FLOAT, {.dbl=0}, 0, 999, .flags = FLAGS },
{ "s", "set noise sigma constant", OFFSET(sigma), AV_OPT_TYPE_FLOAT, {.dbl=0}, 0, 999, .flags = FLAGS },
{ "overlap", "set number of block overlapping pixels", OFFSET(overlap), AV_OPT_TYPE_INT, {.i64=(1<<NBITS)-1}, 0, (1<<NBITS)-1, .flags = FLAGS },
{ "expr", "set coefficient factor expression", OFFSET(expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ "e", "set coefficient factor expression", OFFSET(expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(dctdnoiz);
 
static float *dct_block(DCTdnoizContext *ctx, const float *src, int src_linesize)
{
int x, y;
float *column;
 
for (y = 0; y < BSIZE; y++) {
float *line = ctx->block;
 
memcpy(line, src, BSIZE * sizeof(*line));
src += src_linesize;
av_dct_calc(ctx->dct, line);
 
column = ctx->tmp_block + y;
column[0] = line[0] * (1. / sqrt(BSIZE));
column += BSIZE;
for (x = 1; x < BSIZE; x++) {
*column = line[x] * sqrt(2. / BSIZE);
column += BSIZE;
}
}
 
column = ctx->tmp_block;
for (x = 0; x < BSIZE; x++) {
av_dct_calc(ctx->dct, column);
column[0] *= 1. / sqrt(BSIZE);
for (y = 1; y < BSIZE; y++)
column[y] *= sqrt(2. / BSIZE);
column += BSIZE;
}
 
for (y = 0; y < BSIZE; y++)
for (x = 0; x < BSIZE; x++)
ctx->block[y*BSIZE + x] = ctx->tmp_block[x*BSIZE + y];
 
return ctx->block;
}
 
static void idct_block(DCTdnoizContext *ctx, float *dst, int dst_linesize)
{
int x, y;
float *block = ctx->block;
float *tmp = ctx->tmp_block;
 
for (y = 0; y < BSIZE; y++) {
block[0] *= sqrt(BSIZE);
for (x = 1; x < BSIZE; x++)
block[x] *= 1./sqrt(2. / BSIZE);
av_dct_calc(ctx->idct, block);
block += BSIZE;
}
 
block = ctx->block;
for (y = 0; y < BSIZE; y++) {
tmp[0] = block[y] * sqrt(BSIZE);
for (x = 1; x < BSIZE; x++)
tmp[x] = block[x*BSIZE + y] * (1./sqrt(2. / BSIZE));
av_dct_calc(ctx->idct, tmp);
for (x = 0; x < BSIZE; x++)
dst[x*dst_linesize + y] += tmp[x];
}
}
 
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
DCTdnoizContext *s = ctx->priv;
int i, x, y, bx, by, linesize, *iweights;
const float dct_3x3[3][3] = {
{ 1./sqrt(3), 1./sqrt(3), 1./sqrt(3) },
{ 1./sqrt(2), 0, -1./sqrt(2) },
{ 1./sqrt(6), -2./sqrt(6), 1./sqrt(6) },
};
uint8_t rgba_map[4];
 
ff_fill_rgba_map(rgba_map, inlink->format);
for (y = 0; y < 3; y++)
for (x = 0; x < 3; x++)
s->color_dct[y][x] = dct_3x3[rgba_map[y]][rgba_map[x]];
 
s->pr_width = inlink->w - (inlink->w - BSIZE) % s->step;
s->pr_height = inlink->h - (inlink->h - BSIZE) % s->step;
if (s->pr_width != inlink->w)
av_log(ctx, AV_LOG_WARNING, "The last %d horizontal pixels won't be denoised\n",
inlink->w - s->pr_width);
if (s->pr_height != inlink->h)
av_log(ctx, AV_LOG_WARNING, "The last %d vertical pixels won't be denoised\n",
inlink->h - s->pr_height);
 
s->p_linesize = linesize = FFALIGN(s->pr_width, 32);
for (i = 0; i < 2; i++) {
s->cbuf[i][0] = av_malloc(linesize * s->pr_height * sizeof(*s->cbuf[i][0]));
s->cbuf[i][1] = av_malloc(linesize * s->pr_height * sizeof(*s->cbuf[i][1]));
s->cbuf[i][2] = av_malloc(linesize * s->pr_height * sizeof(*s->cbuf[i][2]));
if (!s->cbuf[i][0] || !s->cbuf[i][1] || !s->cbuf[i][2])
return AVERROR(ENOMEM);
}
 
s->weights = av_malloc(s->pr_height * linesize * sizeof(*s->weights));
if (!s->weights)
return AVERROR(ENOMEM);
iweights = av_calloc(s->pr_height, linesize * sizeof(*iweights));
if (!iweights)
return AVERROR(ENOMEM);
for (y = 0; y < s->pr_height - BSIZE + 1; y += s->step)
for (x = 0; x < s->pr_width - BSIZE + 1; x += s->step)
for (by = 0; by < BSIZE; by++)
for (bx = 0; bx < BSIZE; bx++)
iweights[(y + by)*linesize + x + bx]++;
for (y = 0; y < s->pr_height; y++)
for (x = 0; x < s->pr_width; x++)
s->weights[y*linesize + x] = 1. / iweights[y*linesize + x];
av_free(iweights);
 
return 0;
}
 
static av_cold int init(AVFilterContext *ctx)
{
DCTdnoizContext *s = ctx->priv;
 
if (s->expr_str) {
int ret = av_expr_parse(&s->expr, s->expr_str, var_names,
NULL, NULL, NULL, NULL, 0, ctx);
if (ret < 0)
return ret;
}
 
s->th = s->sigma * 3.;
s->step = BSIZE - s->overlap;
s->dct = av_dct_init(NBITS, DCT_II);
s->idct = av_dct_init(NBITS, DCT_III);
s->block = av_malloc(BSIZE * BSIZE * sizeof(*s->block));
s->tmp_block = av_malloc(BSIZE * BSIZE * sizeof(*s->tmp_block));
 
if (!s->dct || !s->idct || !s->tmp_block || !s->block)
return AVERROR(ENOMEM);
 
return 0;
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_BGR24, AV_PIX_FMT_RGB24,
AV_PIX_FMT_NONE
};
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static void color_decorrelation(float dct3ch[3][3], float **dst, int dst_linesize,
const uint8_t *src, int src_linesize, int w, int h)
{
int x, y;
float *dstp_r = dst[0];
float *dstp_g = dst[1];
float *dstp_b = dst[2];
 
for (y = 0; y < h; y++) {
const uint8_t *srcp = src;
 
for (x = 0; x < w; x++) {
dstp_r[x] = srcp[0] * dct3ch[0][0] + srcp[1] * dct3ch[0][1] + srcp[2] * dct3ch[0][2];
dstp_g[x] = srcp[0] * dct3ch[1][0] + srcp[1] * dct3ch[1][1] + srcp[2] * dct3ch[1][2];
dstp_b[x] = srcp[0] * dct3ch[2][0] + srcp[1] * dct3ch[2][1] + srcp[2] * dct3ch[2][2];
srcp += 3;
}
src += src_linesize;
dstp_r += dst_linesize;
dstp_g += dst_linesize;
dstp_b += dst_linesize;
}
}
 
static void color_correlation(float dct3ch[3][3], uint8_t *dst, int dst_linesize,
float **src, int src_linesize, int w, int h)
{
int x, y;
const float *src_r = src[0];
const float *src_g = src[1];
const float *src_b = src[2];
 
for (y = 0; y < h; y++) {
uint8_t *dstp = dst;
 
for (x = 0; x < w; x++) {
dstp[0] = av_clip_uint8(src_r[x] * dct3ch[0][0] + src_g[x] * dct3ch[1][0] + src_b[x] * dct3ch[2][0]);
dstp[1] = av_clip_uint8(src_r[x] * dct3ch[0][1] + src_g[x] * dct3ch[1][1] + src_b[x] * dct3ch[2][1]);
dstp[2] = av_clip_uint8(src_r[x] * dct3ch[0][2] + src_g[x] * dct3ch[1][2] + src_b[x] * dct3ch[2][2]);
dstp += 3;
}
dst += dst_linesize;
src_r += src_linesize;
src_g += src_linesize;
src_b += src_linesize;
}
}
 
static void filter_plane(AVFilterContext *ctx,
float *dst, int dst_linesize,
const float *src, int src_linesize,
int w, int h)
{
int x, y, bx, by;
DCTdnoizContext *s = ctx->priv;
float *dst0 = dst;
const float *weights = s->weights;
 
// reset block sums
memset(dst, 0, h * dst_linesize * sizeof(*dst));
 
// block dct sums
for (y = 0; y < h - BSIZE + 1; y += s->step) {
for (x = 0; x < w - BSIZE + 1; x += s->step) {
float *ftb = dct_block(s, src + x, src_linesize);
 
if (s->expr) {
for (by = 0; by < BSIZE; by++) {
for (bx = 0; bx < BSIZE; bx++) {
s->var_values[VAR_C] = FFABS(*ftb);
*ftb++ *= av_expr_eval(s->expr, s->var_values, s);
}
}
} else {
for (by = 0; by < BSIZE; by++) {
for (bx = 0; bx < BSIZE; bx++) {
if (FFABS(*ftb) < s->th)
*ftb = 0;
ftb++;
}
}
}
idct_block(s, dst + x, dst_linesize);
}
src += s->step * src_linesize;
dst += s->step * dst_linesize;
}
 
// average blocks
dst = dst0;
for (y = 0; y < h; y++) {
for (x = 0; x < w; x++)
dst[x] *= weights[x];
dst += dst_linesize;
weights += dst_linesize;
}
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
DCTdnoizContext *s = ctx->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
int direct, plane;
AVFrame *out;
 
if (av_frame_is_writable(in)) {
direct = 1;
out = in;
} else {
direct = 0;
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
 
color_decorrelation(s->color_dct, s->cbuf[0], s->p_linesize,
in->data[0], in->linesize[0], s->pr_width, s->pr_height);
for (plane = 0; plane < 3; plane++)
filter_plane(ctx, s->cbuf[1][plane], s->p_linesize,
s->cbuf[0][plane], s->p_linesize,
s->pr_width, s->pr_height);
color_correlation(s->color_dct, out->data[0], out->linesize[0],
s->cbuf[1], s->p_linesize, s->pr_width, s->pr_height);
 
if (!direct) {
int y;
uint8_t *dst = out->data[0];
const uint8_t *src = in->data[0];
const int dst_linesize = out->linesize[0];
const int src_linesize = in->linesize[0];
const int hpad = (inlink->w - s->pr_width) * 3;
const int vpad = (inlink->h - s->pr_height);
 
if (hpad) {
uint8_t *dstp = dst + s->pr_width * 3;
const uint8_t *srcp = src + s->pr_width * 3;
 
for (y = 0; y < s->pr_height; y++) {
memcpy(dstp, srcp, hpad);
dstp += dst_linesize;
srcp += src_linesize;
}
}
if (vpad) {
uint8_t *dstp = dst + s->pr_height * dst_linesize;
const uint8_t *srcp = src + s->pr_height * src_linesize;
 
for (y = 0; y < vpad; y++) {
memcpy(dstp, srcp, inlink->w * 3);
dstp += dst_linesize;
srcp += src_linesize;
}
}
 
av_frame_free(&in);
}
 
return ff_filter_frame(outlink, out);
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
int i;
DCTdnoizContext *s = ctx->priv;
 
av_dct_end(s->dct);
av_dct_end(s->idct);
av_free(s->block);
av_free(s->tmp_block);
av_free(s->weights);
for (i = 0; i < 2; i++) {
av_free(s->cbuf[i][0]);
av_free(s->cbuf[i][1]);
av_free(s->cbuf[i][2]);
}
av_expr_free(s->expr);
}
 
static const AVFilterPad dctdnoiz_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
 
static const AVFilterPad dctdnoiz_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_dctdnoiz = {
.name = "dctdnoiz",
.description = NULL_IF_CONFIG_SMALL("Denoise frames using 2D DCT."),
.priv_size = sizeof(DCTdnoizContext),
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = dctdnoiz_inputs,
.outputs = dctdnoiz_outputs,
.priv_class = &dctdnoiz_class,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_decimate.c
0,0 → 1,397
/*
* Copyright (c) 2012 Fredrik Mellbin
* Copyright (c) 2013 Clément Bœsch
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/timestamp.h"
#include "avfilter.h"
#include "internal.h"
 
#define INPUT_MAIN 0
#define INPUT_CLEANSRC 1
 
struct qitem {
AVFrame *frame;
int64_t maxbdiff;
int64_t totdiff;
};
 
typedef struct {
const AVClass *class;
struct qitem *queue; ///< window of cycle frames and the associated data diff
int fid; ///< current frame id in the queue
int filled; ///< 1 if the queue is filled, 0 otherwise
AVFrame *last; ///< last frame from the previous queue
AVFrame **clean_src; ///< frame queue for the clean source
int got_frame[2]; ///< frame request flag for each input stream
double ts_unit; ///< timestamp units for the output frames
uint32_t eof; ///< bitmask for end of stream
int hsub, vsub; ///< chroma subsampling values
int depth;
int nxblocks, nyblocks;
int bdiffsize;
int64_t *bdiffs;
 
/* options */
int cycle;
double dupthresh_flt;
double scthresh_flt;
int64_t dupthresh;
int64_t scthresh;
int blockx, blocky;
int ppsrc;
int chroma;
} DecimateContext;
 
#define OFFSET(x) offsetof(DecimateContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption decimate_options[] = {
{ "cycle", "set the number of frame from which one will be dropped", OFFSET(cycle), AV_OPT_TYPE_INT, {.i64 = 5}, 2, 25, FLAGS },
{ "dupthresh", "set duplicate threshold", OFFSET(dupthresh_flt), AV_OPT_TYPE_DOUBLE, {.dbl = 1.1}, 0, 100, FLAGS },
{ "scthresh", "set scene change threshold", OFFSET(scthresh_flt), AV_OPT_TYPE_DOUBLE, {.dbl = 15.0}, 0, 100, FLAGS },
{ "blockx", "set the size of the x-axis blocks used during metric calculations", OFFSET(blockx), AV_OPT_TYPE_INT, {.i64 = 32}, 4, 1<<9, FLAGS },
{ "blocky", "set the size of the y-axis blocks used during metric calculations", OFFSET(blocky), AV_OPT_TYPE_INT, {.i64 = 32}, 4, 1<<9, FLAGS },
{ "ppsrc", "mark main input as a pre-processed input and activate clean source input stream", OFFSET(ppsrc), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
{ "chroma", "set whether or not chroma is considered in the metric calculations", OFFSET(chroma), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(decimate);
 
static void calc_diffs(const DecimateContext *dm, struct qitem *q,
const AVFrame *f1, const AVFrame *f2)
{
int64_t maxdiff = -1;
int64_t *bdiffs = dm->bdiffs;
int plane, i, j;
 
memset(bdiffs, 0, dm->bdiffsize * sizeof(*bdiffs));
 
for (plane = 0; plane < (dm->chroma && f1->data[2] ? 3 : 1); plane++) {
int x, y, xl;
const int linesize1 = f1->linesize[plane];
const int linesize2 = f2->linesize[plane];
const uint8_t *f1p = f1->data[plane];
const uint8_t *f2p = f2->data[plane];
int width = plane ? FF_CEIL_RSHIFT(f1->width, dm->hsub) : f1->width;
int height = plane ? FF_CEIL_RSHIFT(f1->height, dm->vsub) : f1->height;
int hblockx = dm->blockx / 2;
int hblocky = dm->blocky / 2;
 
if (plane) {
hblockx >>= dm->hsub;
hblocky >>= dm->vsub;
}
 
for (y = 0; y < height; y++) {
int ydest = y / hblocky;
int xdest = 0;
 
#define CALC_DIFF(nbits) do { \
for (x = 0; x < width; x += hblockx) { \
int64_t acc = 0; \
int m = FFMIN(width, x + hblockx); \
for (xl = x; xl < m; xl++) \
acc += abs(((const uint##nbits##_t *)f1p)[xl] - \
((const uint##nbits##_t *)f2p)[xl]); \
bdiffs[ydest * dm->nxblocks + xdest] += acc; \
xdest++; \
} \
} while (0)
if (dm->depth == 8) CALC_DIFF(8);
else CALC_DIFF(16);
 
f1p += linesize1;
f2p += linesize2;
}
}
 
for (i = 0; i < dm->nyblocks - 1; i++) {
for (j = 0; j < dm->nxblocks - 1; j++) {
int64_t tmp = bdiffs[ i * dm->nxblocks + j ]
+ bdiffs[ i * dm->nxblocks + j + 1]
+ bdiffs[(i + 1) * dm->nxblocks + j ]
+ bdiffs[(i + 1) * dm->nxblocks + j + 1];
if (tmp > maxdiff)
maxdiff = tmp;
}
}
 
q->totdiff = 0;
for (i = 0; i < dm->bdiffsize; i++)
q->totdiff += bdiffs[i];
q->maxbdiff = maxdiff;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
int scpos = -1, duppos = -1;
int drop = INT_MIN, i, lowest = 0, ret;
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
DecimateContext *dm = ctx->priv;
AVFrame *prv;
 
/* update frames queue(s) */
if (FF_INLINK_IDX(inlink) == INPUT_MAIN) {
dm->queue[dm->fid].frame = in;
dm->got_frame[INPUT_MAIN] = 1;
} else {
dm->clean_src[dm->fid] = in;
dm->got_frame[INPUT_CLEANSRC] = 1;
}
if (!dm->got_frame[INPUT_MAIN] || (dm->ppsrc && !dm->got_frame[INPUT_CLEANSRC]))
return 0;
dm->got_frame[INPUT_MAIN] = dm->got_frame[INPUT_CLEANSRC] = 0;
 
if (in) {
/* update frame metrics */
prv = dm->fid ? dm->queue[dm->fid - 1].frame : dm->last;
if (!prv)
prv = in;
calc_diffs(dm, &dm->queue[dm->fid], prv, in);
if (++dm->fid != dm->cycle)
return 0;
av_frame_free(&dm->last);
dm->last = av_frame_clone(in);
dm->fid = 0;
 
/* we have a complete cycle, select the frame to drop */
lowest = 0;
for (i = 0; i < dm->cycle; i++) {
if (dm->queue[i].totdiff > dm->scthresh)
scpos = i;
if (dm->queue[i].maxbdiff < dm->queue[lowest].maxbdiff)
lowest = i;
}
if (dm->queue[lowest].maxbdiff < dm->dupthresh)
duppos = lowest;
drop = scpos >= 0 && duppos < 0 ? scpos : lowest;
}
 
/* metrics debug */
if (av_log_get_level() >= AV_LOG_DEBUG) {
av_log(ctx, AV_LOG_DEBUG, "1/%d frame drop:\n", dm->cycle);
for (i = 0; i < dm->cycle && dm->queue[i].frame; i++) {
av_log(ctx, AV_LOG_DEBUG," #%d: totdiff=%08"PRIx64" maxbdiff=%08"PRIx64"%s%s%s%s\n",
i + 1, dm->queue[i].totdiff, dm->queue[i].maxbdiff,
i == scpos ? " sc" : "",
i == duppos ? " dup" : "",
i == lowest ? " lowest" : "",
i == drop ? " [DROP]" : "");
}
}
 
/* push all frames except the drop */
ret = 0;
for (i = 0; i < dm->cycle && dm->queue[i].frame; i++) {
if (i == drop) {
if (dm->ppsrc)
av_frame_free(&dm->clean_src[i]);
av_frame_free(&dm->queue[i].frame);
} else {
AVFrame *frame = dm->queue[i].frame;
if (dm->ppsrc) {
av_frame_free(&frame);
frame = dm->clean_src[i];
}
frame->pts = outlink->frame_count * dm->ts_unit;
ret = ff_filter_frame(outlink, frame);
if (ret < 0)
break;
}
}
 
return ret;
}
 
static int config_input(AVFilterLink *inlink)
{
int max_value;
AVFilterContext *ctx = inlink->dst;
DecimateContext *dm = ctx->priv;
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
const int w = inlink->w;
const int h = inlink->h;
 
dm->hsub = pix_desc->log2_chroma_w;
dm->vsub = pix_desc->log2_chroma_h;
dm->depth = pix_desc->comp[0].depth_minus1 + 1;
max_value = (1 << dm->depth) - 1;
dm->scthresh = (int64_t)(((int64_t)max_value * w * h * dm->scthresh_flt) / 100);
dm->dupthresh = (int64_t)(((int64_t)max_value * dm->blockx * dm->blocky * dm->dupthresh_flt) / 100);
dm->nxblocks = (w + dm->blockx/2 - 1) / (dm->blockx/2);
dm->nyblocks = (h + dm->blocky/2 - 1) / (dm->blocky/2);
dm->bdiffsize = dm->nxblocks * dm->nyblocks;
dm->bdiffs = av_malloc(dm->bdiffsize * sizeof(*dm->bdiffs));
dm->queue = av_calloc(dm->cycle, sizeof(*dm->queue));
 
if (!dm->bdiffs || !dm->queue)
return AVERROR(ENOMEM);
 
if (dm->ppsrc) {
dm->clean_src = av_calloc(dm->cycle, sizeof(*dm->clean_src));
if (!dm->clean_src)
return AVERROR(ENOMEM);
}
 
return 0;
}
 
static av_cold int decimate_init(AVFilterContext *ctx)
{
const DecimateContext *dm = ctx->priv;
AVFilterPad pad = {
.name = av_strdup("main"),
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_input,
};
 
if (!pad.name)
return AVERROR(ENOMEM);
ff_insert_inpad(ctx, INPUT_MAIN, &pad);
 
if (dm->ppsrc) {
pad.name = av_strdup("clean_src");
pad.config_props = NULL;
if (!pad.name)
return AVERROR(ENOMEM);
ff_insert_inpad(ctx, INPUT_CLEANSRC, &pad);
}
 
if ((dm->blockx & (dm->blockx - 1)) ||
(dm->blocky & (dm->blocky - 1))) {
av_log(ctx, AV_LOG_ERROR, "blockx and blocky settings must be power of two\n");
return AVERROR(EINVAL);
}
 
return 0;
}
 
static av_cold void decimate_uninit(AVFilterContext *ctx)
{
int i;
DecimateContext *dm = ctx->priv;
 
av_frame_free(&dm->last);
av_freep(&dm->bdiffs);
av_freep(&dm->queue);
av_freep(&dm->clean_src);
for (i = 0; i < ctx->nb_inputs; i++)
av_freep(&ctx->input_pads[i].name);
}
 
static int request_inlink(AVFilterContext *ctx, int lid)
{
int ret = 0;
DecimateContext *dm = ctx->priv;
 
if (!dm->got_frame[lid]) {
AVFilterLink *inlink = ctx->inputs[lid];
ret = ff_request_frame(inlink);
if (ret == AVERROR_EOF) { // flushing
dm->eof |= 1 << lid;
ret = filter_frame(inlink, NULL);
}
}
return ret;
}
 
static int request_frame(AVFilterLink *outlink)
{
int ret;
AVFilterContext *ctx = outlink->src;
DecimateContext *dm = ctx->priv;
const uint32_t eof_mask = 1<<INPUT_MAIN | dm->ppsrc<<INPUT_CLEANSRC;
 
if ((dm->eof & eof_mask) == eof_mask) // flush done?
return AVERROR_EOF;
if ((ret = request_inlink(ctx, INPUT_MAIN)) < 0)
return ret;
if (dm->ppsrc && (ret = request_inlink(ctx, INPUT_CLEANSRC)) < 0)
return ret;
return 0;
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
#define PF_NOALPHA(suf) AV_PIX_FMT_YUV420##suf, AV_PIX_FMT_YUV422##suf, AV_PIX_FMT_YUV444##suf
#define PF_ALPHA(suf) AV_PIX_FMT_YUVA420##suf, AV_PIX_FMT_YUVA422##suf, AV_PIX_FMT_YUVA444##suf
#define PF(suf) PF_NOALPHA(suf), PF_ALPHA(suf)
PF(P), PF(P9), PF(P10), PF_NOALPHA(P12), PF_NOALPHA(P14), PF(P16),
AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY16,
AV_PIX_FMT_NONE
};
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
DecimateContext *dm = ctx->priv;
const AVFilterLink *inlink =
ctx->inputs[dm->ppsrc ? INPUT_CLEANSRC : INPUT_MAIN];
AVRational fps = inlink->frame_rate;
 
if (!fps.num || !fps.den) {
av_log(ctx, AV_LOG_ERROR, "The input needs a constant frame rate; "
"current rate of %d/%d is invalid\n", fps.num, fps.den);
return AVERROR(EINVAL);
}
fps = av_mul_q(fps, (AVRational){dm->cycle - 1, dm->cycle});
av_log(ctx, AV_LOG_VERBOSE, "FPS: %d/%d -> %d/%d\n",
inlink->frame_rate.num, inlink->frame_rate.den, fps.num, fps.den);
outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
outlink->time_base = inlink->time_base;
outlink->frame_rate = fps;
outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
outlink->w = inlink->w;
outlink->h = inlink->h;
dm->ts_unit = av_q2d(av_inv_q(av_mul_q(fps, outlink->time_base)));
return 0;
}
 
static const AVFilterPad decimate_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.request_frame = request_frame,
.config_props = config_output,
},
{ NULL }
};
 
AVFilter avfilter_vf_decimate = {
.name = "decimate",
.description = NULL_IF_CONFIG_SMALL("Decimate frames (post field matching filter)."),
.init = decimate_init,
.uninit = decimate_uninit,
.priv_size = sizeof(DecimateContext),
.query_formats = query_formats,
.outputs = decimate_outputs,
.priv_class = &decimate_class,
.flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_delogo.c
0,0 → 1,290
/*
* Copyright (c) 2002 Jindrich Makovicka <makovick@gmail.com>
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2013 Jean Delvare <khali@linux-fr.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
/**
* @file
* A very simple tv station logo remover
* Originally imported from MPlayer libmpcodecs/vf_delogo.c,
* the algorithm was later improved.
*/
 
#include "libavutil/common.h"
#include "libavutil/imgutils.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
/**
* Apply a simple delogo algorithm to the image in src and put the
* result in dst.
*
* The algorithm is only applied to the region specified by the logo
* parameters.
*
* @param w width of the input image
* @param h height of the input image
* @param logo_x x coordinate of the top left corner of the logo region
* @param logo_y y coordinate of the top left corner of the logo region
* @param logo_w width of the logo
* @param logo_h height of the logo
* @param band the size of the band around the processed area
* @param show show a rectangle around the processed area, useful for
* parameters tweaking
* @param direct if non-zero perform in-place processing
*/
static void apply_delogo(uint8_t *dst, int dst_linesize,
uint8_t *src, int src_linesize,
int w, int h, AVRational sar,
int logo_x, int logo_y, int logo_w, int logo_h,
unsigned int band, int show, int direct)
{
int x, y;
uint64_t interp, weightl, weightr, weightt, weightb;
uint8_t *xdst, *xsrc;
 
uint8_t *topleft, *botleft, *topright;
unsigned int left_sample, right_sample;
int xclipl, xclipr, yclipt, yclipb;
int logo_x1, logo_x2, logo_y1, logo_y2;
 
xclipl = FFMAX(-logo_x, 0);
xclipr = FFMAX(logo_x+logo_w-w, 0);
yclipt = FFMAX(-logo_y, 0);
yclipb = FFMAX(logo_y+logo_h-h, 0);
 
logo_x1 = logo_x + xclipl;
logo_x2 = logo_x + logo_w - xclipr;
logo_y1 = logo_y + yclipt;
logo_y2 = logo_y + logo_h - yclipb;
 
topleft = src+logo_y1 * src_linesize+logo_x1;
topright = src+logo_y1 * src_linesize+logo_x2-1;
botleft = src+(logo_y2-1) * src_linesize+logo_x1;
 
if (!direct)
av_image_copy_plane(dst, dst_linesize, src, src_linesize, w, h);
 
dst += (logo_y1 + 1) * dst_linesize;
src += (logo_y1 + 1) * src_linesize;
 
for (y = logo_y1+1; y < logo_y2-1; y++) {
left_sample = topleft[src_linesize*(y-logo_y1)] +
topleft[src_linesize*(y-logo_y1-1)] +
topleft[src_linesize*(y-logo_y1+1)];
right_sample = topright[src_linesize*(y-logo_y1)] +
topright[src_linesize*(y-logo_y1-1)] +
topright[src_linesize*(y-logo_y1+1)];
 
for (x = logo_x1+1,
xdst = dst+logo_x1+1,
xsrc = src+logo_x1+1; x < logo_x2-1; x++, xdst++, xsrc++) {
 
/* Weighted interpolation based on relative distances, taking SAR into account */
weightl = (uint64_t) (logo_x2-1-x) * (y-logo_y1) * (logo_y2-1-y) * sar.den;
weightr = (uint64_t)(x-logo_x1) * (y-logo_y1) * (logo_y2-1-y) * sar.den;
weightt = (uint64_t)(x-logo_x1) * (logo_x2-1-x) * (logo_y2-1-y) * sar.num;
weightb = (uint64_t)(x-logo_x1) * (logo_x2-1-x) * (y-logo_y1) * sar.num;
 
interp =
left_sample * weightl
+
right_sample * weightr
+
(topleft[x-logo_x1] +
topleft[x-logo_x1-1] +
topleft[x-logo_x1+1]) * weightt
+
(botleft[x-logo_x1] +
botleft[x-logo_x1-1] +
botleft[x-logo_x1+1]) * weightb;
interp /= (weightl + weightr + weightt + weightb) * 3U;
 
if (y >= logo_y+band && y < logo_y+logo_h-band &&
x >= logo_x+band && x < logo_x+logo_w-band) {
*xdst = interp;
} else {
unsigned dist = 0;
 
if (x < logo_x+band)
dist = FFMAX(dist, logo_x-x+band);
else if (x >= logo_x+logo_w-band)
dist = FFMAX(dist, x-(logo_x+logo_w-1-band));
 
if (y < logo_y+band)
dist = FFMAX(dist, logo_y-y+band);
else if (y >= logo_y+logo_h-band)
dist = FFMAX(dist, y-(logo_y+logo_h-1-band));
 
*xdst = (*xsrc*dist + interp*(band-dist))/band;
if (show && (dist == band-1))
*xdst = 0;
}
}
 
dst += dst_linesize;
src += src_linesize;
}
}
 
typedef struct {
const AVClass *class;
int x, y, w, h, band, show;
} DelogoContext;
 
#define OFFSET(x) offsetof(DelogoContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
 
static const AVOption delogo_options[]= {
{ "x", "set logo x position", OFFSET(x), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
{ "y", "set logo y position", OFFSET(y), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
{ "w", "set logo width", OFFSET(w), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
{ "h", "set logo height", OFFSET(h), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
{ "band", "set delogo area band size", OFFSET(band), AV_OPT_TYPE_INT, { .i64 = 4 }, 1, INT_MAX, FLAGS },
{ "t", "set delogo area band size", OFFSET(band), AV_OPT_TYPE_INT, { .i64 = 4 }, 1, INT_MAX, FLAGS },
{ "show", "show delogo area", OFFSET(show), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(delogo);
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
AV_PIX_FMT_YUVA420P, AV_PIX_FMT_GRAY8,
AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static av_cold int init(AVFilterContext *ctx)
{
DelogoContext *s = ctx->priv;
 
#define CHECK_UNSET_OPT(opt) \
if (s->opt == -1) { \
av_log(s, AV_LOG_ERROR, "Option %s was not set.\n", #opt); \
return AVERROR(EINVAL); \
}
CHECK_UNSET_OPT(x);
CHECK_UNSET_OPT(y);
CHECK_UNSET_OPT(w);
CHECK_UNSET_OPT(h);
 
av_log(ctx, AV_LOG_VERBOSE, "x:%d y:%d, w:%d h:%d band:%d show:%d\n",
s->x, s->y, s->w, s->h, s->band, s->show);
 
s->w += s->band*2;
s->h += s->band*2;
s->x -= s->band;
s->y -= s->band;
 
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
DelogoContext *s = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
AVFrame *out;
int hsub0 = desc->log2_chroma_w;
int vsub0 = desc->log2_chroma_h;
int direct = 0;
int plane;
AVRational sar;
 
if (av_frame_is_writable(in)) {
direct = 1;
out = in;
} else {
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
 
av_frame_copy_props(out, in);
}
 
sar = in->sample_aspect_ratio;
/* Assume square pixels if SAR is unknown */
if (!sar.num)
sar.num = sar.den = 1;
 
for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
int hsub = plane == 1 || plane == 2 ? hsub0 : 0;
int vsub = plane == 1 || plane == 2 ? vsub0 : 0;
 
apply_delogo(out->data[plane], out->linesize[plane],
in ->data[plane], in ->linesize[plane],
FF_CEIL_RSHIFT(inlink->w, hsub),
FF_CEIL_RSHIFT(inlink->h, vsub),
sar, s->x>>hsub, s->y>>vsub,
/* Up and left borders were rounded down, inject lost bits
* into width and height to avoid error accumulation */
FF_CEIL_RSHIFT(s->w + (s->x & ((1<<hsub)-1)), hsub),
FF_CEIL_RSHIFT(s->h + (s->y & ((1<<vsub)-1)), vsub),
s->band>>FFMIN(hsub, vsub),
s->show, direct);
}
 
if (!direct)
av_frame_free(&in);
 
return ff_filter_frame(outlink, out);
}
 
static const AVFilterPad avfilter_vf_delogo_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_delogo_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_delogo = {
.name = "delogo",
.description = NULL_IF_CONFIG_SMALL("Remove logo from input video."),
.priv_size = sizeof(DelogoContext),
.priv_class = &delogo_class,
.init = init,
.query_formats = query_formats,
.inputs = avfilter_vf_delogo_inputs,
.outputs = avfilter_vf_delogo_outputs,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_deshake.c
0,0 → 1,580
/*
* Copyright (C) 2010 Georg Martius <georg.martius@web.de>
* Copyright (C) 2010 Daniel G. Taylor <dan@programmer-art.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* fast deshake / depan video filter
*
* SAD block-matching motion compensation to fix small changes in
* horizontal and/or vertical shift. This filter helps remove camera shake
* from hand-holding a camera, bumping a tripod, moving on a vehicle, etc.
*
* Algorithm:
* - For each frame with one previous reference frame
* - For each block in the frame
* - If contrast > threshold then find likely motion vector
* - For all found motion vectors
* - Find most common, store as global motion vector
* - Find most likely rotation angle
* - Transform image along global motion
*
* TODO:
* - Fill frame edges based on previous/next reference frames
* - Fill frame edges by stretching image near the edges?
* - Can this be done quickly and look decent?
*
* Dark Shikari links to http://wiki.videolan.org/SoC_x264_2010#GPU_Motion_Estimation_2
* for an algorithm similar to what could be used here to get the gmv
* It requires only a couple diamond searches + fast downscaling
*
* Special thanks to Jason Kotenko for his help with the algorithm and my
* inability to see simple errors in C code.
*/
 
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
#include "libavutil/common.h"
#include "libavutil/mem.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavcodec/dsputil.h"
 
#include "deshake.h"
#include "deshake_opencl.h"
 
#define CHROMA_WIDTH(link) -((-link->w) >> av_pix_fmt_desc_get(link->format)->log2_chroma_w)
#define CHROMA_HEIGHT(link) -((-link->h) >> av_pix_fmt_desc_get(link->format)->log2_chroma_h)
 
#define OFFSET(x) offsetof(DeshakeContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
#define MAX_R 64
 
static const AVOption deshake_options[] = {
{ "x", "set x for the rectangular search area", OFFSET(cx), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
{ "y", "set y for the rectangular search area", OFFSET(cy), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
{ "w", "set width for the rectangular search area", OFFSET(cw), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
{ "h", "set height for the rectangular search area", OFFSET(ch), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
{ "rx", "set x for the rectangular search area", OFFSET(rx), AV_OPT_TYPE_INT, {.i64=16}, 0, MAX_R, .flags = FLAGS },
{ "ry", "set y for the rectangular search area", OFFSET(ry), AV_OPT_TYPE_INT, {.i64=16}, 0, MAX_R, .flags = FLAGS },
{ "edge", "set edge mode", OFFSET(edge), AV_OPT_TYPE_INT, {.i64=FILL_MIRROR}, FILL_BLANK, FILL_COUNT-1, FLAGS, "edge"},
{ "blank", "fill zeroes at blank locations", 0, AV_OPT_TYPE_CONST, {.i64=FILL_BLANK}, INT_MIN, INT_MAX, FLAGS, "edge" },
{ "original", "original image at blank locations", 0, AV_OPT_TYPE_CONST, {.i64=FILL_ORIGINAL}, INT_MIN, INT_MAX, FLAGS, "edge" },
{ "clamp", "extruded edge value at blank locations", 0, AV_OPT_TYPE_CONST, {.i64=FILL_CLAMP}, INT_MIN, INT_MAX, FLAGS, "edge" },
{ "mirror", "mirrored edge at blank locations", 0, AV_OPT_TYPE_CONST, {.i64=FILL_MIRROR}, INT_MIN, INT_MAX, FLAGS, "edge" },
{ "blocksize", "set motion search blocksize", OFFSET(blocksize), AV_OPT_TYPE_INT, {.i64=8}, 4, 128, .flags = FLAGS },
{ "contrast", "set contrast threshold for blocks", OFFSET(contrast), AV_OPT_TYPE_INT, {.i64=125}, 1, 255, .flags = FLAGS },
{ "search", "set search strategy", OFFSET(search), AV_OPT_TYPE_INT, {.i64=EXHAUSTIVE}, EXHAUSTIVE, SEARCH_COUNT-1, FLAGS, "smode" },
{ "exhaustive", "exhaustive search", 0, AV_OPT_TYPE_CONST, {.i64=EXHAUSTIVE}, INT_MIN, INT_MAX, FLAGS, "smode" },
{ "less", "less exhaustive search", 0, AV_OPT_TYPE_CONST, {.i64=SMART_EXHAUSTIVE}, INT_MIN, INT_MAX, FLAGS, "smode" },
{ "filename", "set motion search detailed log file name", OFFSET(filename), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ "opencl", "use OpenCL filtering capabilities", OFFSET(opencl), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, .flags = FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(deshake);
 
static int cmp(const double *a, const double *b)
{
return *a < *b ? -1 : ( *a > *b ? 1 : 0 );
}
 
/**
* Cleaned mean (cuts off 20% of values to remove outliers and then averages)
*/
static double clean_mean(double *values, int count)
{
double mean = 0;
int cut = count / 5;
int x;
 
qsort(values, count, sizeof(double), (void*)cmp);
 
for (x = cut; x < count - cut; x++) {
mean += values[x];
}
 
return mean / (count - cut * 2);
}
 
/**
* Find the most likely shift in motion between two frames for a given
* macroblock. Test each block against several shifts given by the rx
* and ry attributes. Searches using a simple matrix of those shifts and
* chooses the most likely shift by the smallest difference in blocks.
*/
static void find_block_motion(DeshakeContext *deshake, uint8_t *src1,
uint8_t *src2, int cx, int cy, int stride,
IntMotionVector *mv)
{
int x, y;
int diff;
int smallest = INT_MAX;
int tmp, tmp2;
 
#define CMP(i, j) deshake->c.sad[0](deshake, src1 + cy * stride + cx, \
src2 + (j) * stride + (i), stride, \
deshake->blocksize)
 
if (deshake->search == EXHAUSTIVE) {
// Compare every possible position - this is sloooow!
for (y = -deshake->ry; y <= deshake->ry; y++) {
for (x = -deshake->rx; x <= deshake->rx; x++) {
diff = CMP(cx - x, cy - y);
if (diff < smallest) {
smallest = diff;
mv->x = x;
mv->y = y;
}
}
}
} else if (deshake->search == SMART_EXHAUSTIVE) {
// Compare every other possible position and find the best match
for (y = -deshake->ry + 1; y < deshake->ry; y += 2) {
for (x = -deshake->rx + 1; x < deshake->rx; x += 2) {
diff = CMP(cx - x, cy - y);
if (diff < smallest) {
smallest = diff;
mv->x = x;
mv->y = y;
}
}
}
 
// Hone in on the specific best match around the match we found above
tmp = mv->x;
tmp2 = mv->y;
 
for (y = tmp2 - 1; y <= tmp2 + 1; y++) {
for (x = tmp - 1; x <= tmp + 1; x++) {
if (x == tmp && y == tmp2)
continue;
 
diff = CMP(cx - x, cy - y);
if (diff < smallest) {
smallest = diff;
mv->x = x;
mv->y = y;
}
}
}
}
 
if (smallest > 512) {
mv->x = -1;
mv->y = -1;
}
emms_c();
//av_log(NULL, AV_LOG_ERROR, "%d\n", smallest);
//av_log(NULL, AV_LOG_ERROR, "Final: (%d, %d) = %d x %d\n", cx, cy, mv->x, mv->y);
}
 
/**
* Find the contrast of a given block. When searching for global motion we
* really only care about the high contrast blocks, so using this method we
* can actually skip blocks we don't care much about.
*/
static int block_contrast(uint8_t *src, int x, int y, int stride, int blocksize)
{
int highest = 0;
int lowest = 255;
int i, j, pos;
 
for (i = 0; i <= blocksize * 2; i++) {
// We use a width of 16 here to match the libavcodec sad functions
for (j = 0; j <= 15; j++) {
pos = (y - i) * stride + (x - j);
if (src[pos] < lowest)
lowest = src[pos];
else if (src[pos] > highest) {
highest = src[pos];
}
}
}
 
return highest - lowest;
}
 
/**
* Find the rotation for a given block.
*/
static double block_angle(int x, int y, int cx, int cy, IntMotionVector *shift)
{
double a1, a2, diff;
 
a1 = atan2(y - cy, x - cx);
a2 = atan2(y - cy + shift->y, x - cx + shift->x);
 
diff = a2 - a1;
 
return (diff > M_PI) ? diff - 2 * M_PI :
(diff < -M_PI) ? diff + 2 * M_PI :
diff;
}
 
/**
* Find the estimated global motion for a scene given the most likely shift
* for each block in the frame. The global motion is estimated to be the
* same as the motion from most blocks in the frame, so if most blocks
* move one pixel to the right and two pixels down, this would yield a
* motion vector (1, -2).
*/
static void find_motion(DeshakeContext *deshake, uint8_t *src1, uint8_t *src2,
int width, int height, int stride, Transform *t)
{
int x, y;
IntMotionVector mv = {0, 0};
int counts[2*MAX_R+1][2*MAX_R+1];
int count_max_value = 0;
int contrast;
 
int pos;
double *angles = av_malloc(sizeof(*angles) * width * height / (16 * deshake->blocksize));
int center_x = 0, center_y = 0;
double p_x, p_y;
 
// Reset counts to zero
for (x = 0; x < deshake->rx * 2 + 1; x++) {
for (y = 0; y < deshake->ry * 2 + 1; y++) {
counts[x][y] = 0;
}
}
 
pos = 0;
// Find motion for every block and store the motion vector in the counts
for (y = deshake->ry; y < height - deshake->ry - (deshake->blocksize * 2); y += deshake->blocksize * 2) {
// We use a width of 16 here to match the libavcodec sad functions
for (x = deshake->rx; x < width - deshake->rx - 16; x += 16) {
// If the contrast is too low, just skip this block as it probably
// won't be very useful to us.
contrast = block_contrast(src2, x, y, stride, deshake->blocksize);
if (contrast > deshake->contrast) {
//av_log(NULL, AV_LOG_ERROR, "%d\n", contrast);
find_block_motion(deshake, src1, src2, x, y, stride, &mv);
if (mv.x != -1 && mv.y != -1) {
counts[mv.x + deshake->rx][mv.y + deshake->ry] += 1;
if (x > deshake->rx && y > deshake->ry)
angles[pos++] = block_angle(x, y, 0, 0, &mv);
 
center_x += mv.x;
center_y += mv.y;
}
}
}
}
 
if (pos) {
center_x /= pos;
center_y /= pos;
t->angle = clean_mean(angles, pos);
if (t->angle < 0.001)
t->angle = 0;
} else {
t->angle = 0;
}
 
// Find the most common motion vector in the frame and use it as the gmv
for (y = deshake->ry * 2; y >= 0; y--) {
for (x = 0; x < deshake->rx * 2 + 1; x++) {
//av_log(NULL, AV_LOG_ERROR, "%5d ", counts[x][y]);
if (counts[x][y] > count_max_value) {
t->vector.x = x - deshake->rx;
t->vector.y = y - deshake->ry;
count_max_value = counts[x][y];
}
}
//av_log(NULL, AV_LOG_ERROR, "\n");
}
 
p_x = (center_x - width / 2);
p_y = (center_y - height / 2);
t->vector.x += (cos(t->angle)-1)*p_x - sin(t->angle)*p_y;
t->vector.y += sin(t->angle)*p_x + (cos(t->angle)-1)*p_y;
 
// Clamp max shift & rotation?
t->vector.x = av_clipf(t->vector.x, -deshake->rx * 2, deshake->rx * 2);
t->vector.y = av_clipf(t->vector.y, -deshake->ry * 2, deshake->ry * 2);
t->angle = av_clipf(t->angle, -0.1, 0.1);
 
//av_log(NULL, AV_LOG_ERROR, "%d x %d\n", avg->x, avg->y);
av_free(angles);
}
 
static int deshake_transform_c(AVFilterContext *ctx,
int width, int height, int cw, int ch,
const float *matrix_y, const float *matrix_uv,
enum InterpolateMethod interpolate,
enum FillMethod fill, AVFrame *in, AVFrame *out)
{
int i = 0, ret = 0;
const float *matrixs[3];
int plane_w[3], plane_h[3];
matrixs[0] = matrix_y;
matrixs[1] = matrixs[2] = matrix_uv;
plane_w[0] = width;
plane_w[1] = plane_w[2] = cw;
plane_h[0] = height;
plane_h[1] = plane_h[2] = ch;
 
for (i = 0; i < 3; i++) {
// Transform the luma and chroma planes
ret = avfilter_transform(in->data[i], out->data[i], in->linesize[i], out->linesize[i],
plane_w[i], plane_h[i], matrixs[i], interpolate, fill);
if (ret < 0)
return ret;
}
return ret;
}
 
static av_cold int init(AVFilterContext *ctx)
{
int ret;
DeshakeContext *deshake = ctx->priv;
 
deshake->refcount = 20; // XXX: add to options?
deshake->blocksize /= 2;
deshake->blocksize = av_clip(deshake->blocksize, 4, 128);
 
if (deshake->rx % 16) {
av_log(ctx, AV_LOG_ERROR, "rx must be a multiple of 16\n");
return AVERROR_PATCHWELCOME;
}
 
if (deshake->filename)
deshake->fp = fopen(deshake->filename, "w");
if (deshake->fp)
fwrite("Ori x, Avg x, Fin x, Ori y, Avg y, Fin y, Ori angle, Avg angle, Fin angle, Ori zoom, Avg zoom, Fin zoom\n", sizeof(char), 104, deshake->fp);
 
// Quadword align left edge of box for MMX code, adjust width if necessary
// to keep right margin
if (deshake->cx > 0) {
deshake->cw += deshake->cx - (deshake->cx & ~15);
deshake->cx &= ~15;
}
deshake->transform = deshake_transform_c;
if (!CONFIG_OPENCL && deshake->opencl) {
av_log(ctx, AV_LOG_ERROR, "OpenCL support was not enabled in this build, cannot be selected\n");
return AVERROR(EINVAL);
}
 
if (CONFIG_OPENCL && deshake->opencl) {
deshake->transform = ff_opencl_transform;
ret = ff_opencl_deshake_init(ctx);
if (ret < 0)
return ret;
}
av_log(ctx, AV_LOG_VERBOSE, "cx: %d, cy: %d, cw: %d, ch: %d, rx: %d, ry: %d, edge: %d blocksize: %d contrast: %d search: %d\n",
deshake->cx, deshake->cy, deshake->cw, deshake->ch,
deshake->rx, deshake->ry, deshake->edge, deshake->blocksize * 2, deshake->contrast, deshake->search);
 
return 0;
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV410P,
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
 
return 0;
}
 
static int config_props(AVFilterLink *link)
{
DeshakeContext *deshake = link->dst->priv;
 
deshake->ref = NULL;
deshake->last.vector.x = 0;
deshake->last.vector.y = 0;
deshake->last.angle = 0;
deshake->last.zoom = 0;
 
deshake->avctx = avcodec_alloc_context3(NULL);
avpriv_dsputil_init(&deshake->c, deshake->avctx);
 
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
DeshakeContext *deshake = ctx->priv;
if (CONFIG_OPENCL && deshake->opencl) {
ff_opencl_deshake_uninit(ctx);
}
av_frame_free(&deshake->ref);
if (deshake->fp)
fclose(deshake->fp);
if (deshake->avctx)
avcodec_close(deshake->avctx);
av_freep(&deshake->avctx);
}
 
static int filter_frame(AVFilterLink *link, AVFrame *in)
{
DeshakeContext *deshake = link->dst->priv;
AVFilterLink *outlink = link->dst->outputs[0];
AVFrame *out;
Transform t = {{0},0}, orig = {{0},0};
float matrix_y[9], matrix_uv[9];
float alpha = 2.0 / deshake->refcount;
char tmp[256];
int ret = 0;
 
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
 
if (CONFIG_OPENCL && deshake->opencl) {
ret = ff_opencl_deshake_process_inout_buf(link->dst,in, out);
if (ret < 0)
return ret;
}
 
if (deshake->cx < 0 || deshake->cy < 0 || deshake->cw < 0 || deshake->ch < 0) {
// Find the most likely global motion for the current frame
find_motion(deshake, (deshake->ref == NULL) ? in->data[0] : deshake->ref->data[0], in->data[0], link->w, link->h, in->linesize[0], &t);
} else {
uint8_t *src1 = (deshake->ref == NULL) ? in->data[0] : deshake->ref->data[0];
uint8_t *src2 = in->data[0];
 
deshake->cx = FFMIN(deshake->cx, link->w);
deshake->cy = FFMIN(deshake->cy, link->h);
 
if ((unsigned)deshake->cx + (unsigned)deshake->cw > link->w) deshake->cw = link->w - deshake->cx;
if ((unsigned)deshake->cy + (unsigned)deshake->ch > link->h) deshake->ch = link->h - deshake->cy;
 
// Quadword align right margin
deshake->cw &= ~15;
 
src1 += deshake->cy * in->linesize[0] + deshake->cx;
src2 += deshake->cy * in->linesize[0] + deshake->cx;
 
find_motion(deshake, src1, src2, deshake->cw, deshake->ch, in->linesize[0], &t);
}
 
 
// Copy transform so we can output it later to compare to the smoothed value
orig.vector.x = t.vector.x;
orig.vector.y = t.vector.y;
orig.angle = t.angle;
orig.zoom = t.zoom;
 
// Generate a one-sided moving exponential average
deshake->avg.vector.x = alpha * t.vector.x + (1.0 - alpha) * deshake->avg.vector.x;
deshake->avg.vector.y = alpha * t.vector.y + (1.0 - alpha) * deshake->avg.vector.y;
deshake->avg.angle = alpha * t.angle + (1.0 - alpha) * deshake->avg.angle;
deshake->avg.zoom = alpha * t.zoom + (1.0 - alpha) * deshake->avg.zoom;
 
// Remove the average from the current motion to detect the motion that
// is not on purpose, just as jitter from bumping the camera
t.vector.x -= deshake->avg.vector.x;
t.vector.y -= deshake->avg.vector.y;
t.angle -= deshake->avg.angle;
t.zoom -= deshake->avg.zoom;
 
// Invert the motion to undo it
t.vector.x *= -1;
t.vector.y *= -1;
t.angle *= -1;
 
// Write statistics to file
if (deshake->fp) {
snprintf(tmp, 256, "%f, %f, %f, %f, %f, %f, %f, %f, %f, %f, %f, %f\n", orig.vector.x, deshake->avg.vector.x, t.vector.x, orig.vector.y, deshake->avg.vector.y, t.vector.y, orig.angle, deshake->avg.angle, t.angle, orig.zoom, deshake->avg.zoom, t.zoom);
fwrite(tmp, sizeof(char), strlen(tmp), deshake->fp);
}
 
// Turn relative current frame motion into absolute by adding it to the
// last absolute motion
t.vector.x += deshake->last.vector.x;
t.vector.y += deshake->last.vector.y;
t.angle += deshake->last.angle;
t.zoom += deshake->last.zoom;
 
// Shrink motion by 10% to keep things centered in the camera frame
t.vector.x *= 0.9;
t.vector.y *= 0.9;
t.angle *= 0.9;
 
// Store the last absolute motion information
deshake->last.vector.x = t.vector.x;
deshake->last.vector.y = t.vector.y;
deshake->last.angle = t.angle;
deshake->last.zoom = t.zoom;
 
// Generate a luma transformation matrix
avfilter_get_matrix(t.vector.x, t.vector.y, t.angle, 1.0 + t.zoom / 100.0, matrix_y);
// Generate a chroma transformation matrix
avfilter_get_matrix(t.vector.x / (link->w / CHROMA_WIDTH(link)), t.vector.y / (link->h / CHROMA_HEIGHT(link)), t.angle, 1.0 + t.zoom / 100.0, matrix_uv);
// Transform the luma and chroma planes
ret = deshake->transform(link->dst, link->w, link->h, CHROMA_WIDTH(link), CHROMA_HEIGHT(link),
matrix_y, matrix_uv, INTERPOLATE_BILINEAR, deshake->edge, in, out);
 
// Cleanup the old reference frame
av_frame_free(&deshake->ref);
 
if (ret < 0)
return ret;
 
// Store the current frame as the reference frame for calculating the
// motion of the next frame
deshake->ref = in;
 
return ff_filter_frame(outlink, out);
}
 
static const AVFilterPad deshake_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_props,
},
{ NULL }
};
 
static const AVFilterPad deshake_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_deshake = {
.name = "deshake",
.description = NULL_IF_CONFIG_SMALL("Stabilize shaky video."),
.priv_size = sizeof(DeshakeContext),
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = deshake_inputs,
.outputs = deshake_outputs,
.priv_class = &deshake_class,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_drawbox.c
0,0 → 1,392
/*
* Copyright (c) 2008 Affine Systems, Inc (Michael Sullivan, Bobby Impollonia)
* Copyright (c) 2013 Andrey Utkin <andrey.krieger.utkin gmail com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Box and grid drawing filters. Also a nice template for a filter
* that needs to write in the input frame.
*/
 
#include "libavutil/colorspace.h"
#include "libavutil/common.h"
#include "libavutil/opt.h"
#include "libavutil/eval.h"
#include "libavutil/pixdesc.h"
#include "libavutil/parseutils.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
static const char *const var_names[] = {
"dar",
"hsub", "vsub",
"in_h", "ih", ///< height of the input video
"in_w", "iw", ///< width of the input video
"sar",
"x",
"y",
"h", ///< height of the rendered box
"w", ///< width of the rendered box
"t",
NULL
};
 
enum { Y, U, V, A };
 
enum var_name {
VAR_DAR,
VAR_HSUB, VAR_VSUB,
VAR_IN_H, VAR_IH,
VAR_IN_W, VAR_IW,
VAR_SAR,
VAR_X,
VAR_Y,
VAR_H,
VAR_W,
VAR_T,
VARS_NB
};
 
typedef struct {
const AVClass *class;
int x, y, w, h;
int thickness;
char *color_str;
unsigned char yuv_color[4];
int invert_color; ///< invert luma color
int vsub, hsub; ///< chroma subsampling
char *x_expr, *y_expr; ///< expression for x and y
char *w_expr, *h_expr; ///< expression for width and height
char *t_expr; ///< expression for thickness
} DrawBoxContext;
 
static const int NUM_EXPR_EVALS = 5;
 
static av_cold int init(AVFilterContext *ctx)
{
DrawBoxContext *s = ctx->priv;
uint8_t rgba_color[4];
 
if (!strcmp(s->color_str, "invert"))
s->invert_color = 1;
else if (av_parse_color(rgba_color, s->color_str, -1, ctx) < 0)
return AVERROR(EINVAL);
 
if (!s->invert_color) {
s->yuv_color[Y] = RGB_TO_Y_CCIR(rgba_color[0], rgba_color[1], rgba_color[2]);
s->yuv_color[U] = RGB_TO_U_CCIR(rgba_color[0], rgba_color[1], rgba_color[2], 0);
s->yuv_color[V] = RGB_TO_V_CCIR(rgba_color[0], rgba_color[1], rgba_color[2], 0);
s->yuv_color[A] = rgba_color[3];
}
 
return 0;
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUVJ440P,
AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
DrawBoxContext *s = ctx->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
double var_values[VARS_NB], res;
char *expr;
int ret;
int i;
 
s->hsub = desc->log2_chroma_w;
s->vsub = desc->log2_chroma_h;
 
var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h;
var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w;
var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ? av_q2d(inlink->sample_aspect_ratio) : 1;
var_values[VAR_DAR] = (double)inlink->w / inlink->h * var_values[VAR_SAR];
var_values[VAR_HSUB] = s->hsub;
var_values[VAR_VSUB] = s->vsub;
var_values[VAR_X] = NAN;
var_values[VAR_Y] = NAN;
var_values[VAR_H] = NAN;
var_values[VAR_W] = NAN;
var_values[VAR_T] = NAN;
 
for (i = 0; i <= NUM_EXPR_EVALS; i++) {
/* evaluate expressions, fail on last iteration */
if ((ret = av_expr_parse_and_eval(&res, (expr = s->x_expr),
var_names, var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
goto fail;
s->x = var_values[VAR_X] = res;
 
if ((ret = av_expr_parse_and_eval(&res, (expr = s->y_expr),
var_names, var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
goto fail;
s->y = var_values[VAR_Y] = res;
 
if ((ret = av_expr_parse_and_eval(&res, (expr = s->w_expr),
var_names, var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
goto fail;
s->w = var_values[VAR_W] = res;
 
if ((ret = av_expr_parse_and_eval(&res, (expr = s->h_expr),
var_names, var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
goto fail;
s->h = var_values[VAR_H] = res;
 
if ((ret = av_expr_parse_and_eval(&res, (expr = s->t_expr),
var_names, var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
goto fail;
s->thickness = var_values[VAR_T] = res;
}
 
/* if w or h are zero, use the input w/h */
s->w = (s->w > 0) ? s->w : inlink->w;
s->h = (s->h > 0) ? s->h : inlink->h;
 
/* sanity check width and height */
if (s->w < 0 || s->h < 0) {
av_log(ctx, AV_LOG_ERROR, "Size values less than 0 are not acceptable.\n");
return AVERROR(EINVAL);
}
 
av_log(ctx, AV_LOG_VERBOSE, "x:%d y:%d w:%d h:%d color:0x%02X%02X%02X%02X\n",
s->x, s->y, s->w, s->h,
s->yuv_color[Y], s->yuv_color[U], s->yuv_color[V], s->yuv_color[A]);
 
return 0;
 
fail:
av_log(ctx, AV_LOG_ERROR,
"Error when evaluating the expression '%s'.\n",
expr);
return ret;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
DrawBoxContext *s = inlink->dst->priv;
int plane, x, y, xb = s->x, yb = s->y;
unsigned char *row[4];
 
for (y = FFMAX(yb, 0); y < frame->height && y < (yb + s->h); y++) {
row[0] = frame->data[0] + y * frame->linesize[0];
 
for (plane = 1; plane < 3; plane++)
row[plane] = frame->data[plane] +
frame->linesize[plane] * (y >> s->vsub);
 
if (s->invert_color) {
for (x = FFMAX(xb, 0); x < xb + s->w && x < frame->width; x++)
if ((y - yb < s->thickness) || (yb + s->h - 1 - y < s->thickness) ||
(x - xb < s->thickness) || (xb + s->w - 1 - x < s->thickness))
row[0][x] = 0xff - row[0][x];
} else {
for (x = FFMAX(xb, 0); x < xb + s->w && x < frame->width; x++) {
double alpha = (double)s->yuv_color[A] / 255;
 
if ((y - yb < s->thickness) || (yb + s->h - 1 - y < s->thickness) ||
(x - xb < s->thickness) || (xb + s->w - 1 - x < s->thickness)) {
row[0][x ] = (1 - alpha) * row[0][x ] + alpha * s->yuv_color[Y];
row[1][x >> s->hsub] = (1 - alpha) * row[1][x >> s->hsub] + alpha * s->yuv_color[U];
row[2][x >> s->hsub] = (1 - alpha) * row[2][x >> s->hsub] + alpha * s->yuv_color[V];
}
}
}
}
 
return ff_filter_frame(inlink->dst->outputs[0], frame);
}
 
#define OFFSET(x) offsetof(DrawBoxContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
#if CONFIG_DRAWBOX_FILTER
 
static const AVOption drawbox_options[] = {
{ "x", "set horizontal position of the left box edge", OFFSET(x_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
{ "y", "set vertical position of the top box edge", OFFSET(y_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
{ "width", "set width of the box", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
{ "w", "set width of the box", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
{ "height", "set height of the box", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
{ "h", "set height of the box", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
{ "color", "set color of the box", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, CHAR_MIN, CHAR_MAX, FLAGS },
{ "c", "set color of the box", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, CHAR_MIN, CHAR_MAX, FLAGS },
{ "thickness", "set the box thickness", OFFSET(t_expr), AV_OPT_TYPE_STRING, { .str="3" }, CHAR_MIN, CHAR_MAX, FLAGS },
{ "t", "set the box thickness", OFFSET(t_expr), AV_OPT_TYPE_STRING, { .str="3" }, CHAR_MIN, CHAR_MAX, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(drawbox);
 
static const AVFilterPad drawbox_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input,
.filter_frame = filter_frame,
.needs_writable = 1,
},
{ NULL }
};
 
static const AVFilterPad drawbox_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_drawbox = {
.name = "drawbox",
.description = NULL_IF_CONFIG_SMALL("Draw a colored box on the input video."),
.priv_size = sizeof(DrawBoxContext),
.priv_class = &drawbox_class,
.init = init,
.query_formats = query_formats,
.inputs = drawbox_inputs,
.outputs = drawbox_outputs,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
#endif /* CONFIG_DRAWBOX_FILTER */
 
#if CONFIG_DRAWGRID_FILTER
static av_pure av_always_inline int pixel_belongs_to_grid(DrawBoxContext *drawgrid, int x, int y)
{
// x is horizontal (width) coord,
// y is vertical (height) coord
int x_modulo;
int y_modulo;
 
// Abstract from the offset
x -= drawgrid->x;
y -= drawgrid->y;
 
x_modulo = x % drawgrid->w;
y_modulo = y % drawgrid->h;
 
// If x or y got negative, fix values to preserve logics
if (x_modulo < 0)
x_modulo += drawgrid->w;
if (y_modulo < 0)
y_modulo += drawgrid->h;
 
return x_modulo < drawgrid->thickness // Belongs to vertical line
|| y_modulo < drawgrid->thickness; // Belongs to horizontal line
}
 
static int drawgrid_filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
DrawBoxContext *drawgrid = inlink->dst->priv;
int plane, x, y;
uint8_t *row[4];
 
for (y = 0; y < frame->height; y++) {
row[0] = frame->data[0] + y * frame->linesize[0];
 
for (plane = 1; plane < 3; plane++)
row[plane] = frame->data[plane] +
frame->linesize[plane] * (y >> drawgrid->vsub);
 
if (drawgrid->invert_color) {
for (x = 0; x < frame->width; x++)
if (pixel_belongs_to_grid(drawgrid, x, y))
row[0][x] = 0xff - row[0][x];
} else {
for (x = 0; x < frame->width; x++) {
double alpha = (double)drawgrid->yuv_color[A] / 255;
 
if (pixel_belongs_to_grid(drawgrid, x, y)) {
row[0][x ] = (1 - alpha) * row[0][x ] + alpha * drawgrid->yuv_color[Y];
row[1][x >> drawgrid->hsub] = (1 - alpha) * row[1][x >> drawgrid->hsub] + alpha * drawgrid->yuv_color[U];
row[2][x >> drawgrid->hsub] = (1 - alpha) * row[2][x >> drawgrid->hsub] + alpha * drawgrid->yuv_color[V];
}
}
}
}
 
return ff_filter_frame(inlink->dst->outputs[0], frame);
}
 
static const AVOption drawgrid_options[] = {
{ "x", "set horizontal offset", OFFSET(x_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
{ "y", "set vertical offset", OFFSET(y_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
{ "width", "set width of grid cell", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
{ "w", "set width of grid cell", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
{ "height", "set height of grid cell", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
{ "h", "set height of grid cell", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
{ "color", "set color of the grid", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, CHAR_MIN, CHAR_MAX, FLAGS },
{ "c", "set color of the grid", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, CHAR_MIN, CHAR_MAX, FLAGS },
{ "thickness", "set grid line thickness", OFFSET(t_expr), AV_OPT_TYPE_STRING, {.str="1"}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "t", "set grid line thickness", OFFSET(t_expr), AV_OPT_TYPE_STRING, {.str="1"}, CHAR_MIN, CHAR_MAX, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(drawgrid);
 
static const AVFilterPad drawgrid_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input,
.filter_frame = drawgrid_filter_frame,
.needs_writable = 1,
},
{ NULL }
};
 
static const AVFilterPad drawgrid_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_drawgrid = {
.name = "drawgrid",
.description = NULL_IF_CONFIG_SMALL("Draw a colored grid on the input video."),
.priv_size = sizeof(DrawBoxContext),
.priv_class = &drawgrid_class,
.init = init,
.query_formats = query_formats,
.inputs = drawgrid_inputs,
.outputs = drawgrid_outputs,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
 
#endif /* CONFIG_DRAWGRID_FILTER */
/contrib/sdk/sources/ffmpeg/libavfilter/vf_drawtext.c
0,0 → 1,1075
/*
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2010 S.N. Hemanth Meenakshisundaram
* Copyright (c) 2003 Gustavo Sverzut Barbieri <gsbarbieri@yahoo.com.br>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* drawtext filter, based on the original vhook/drawtext.c
* filter by Gustavo Sverzut Barbieri
*/
 
#include <sys/time.h>
#include <time.h>
 
#include "config.h"
#include "libavutil/avstring.h"
#include "libavutil/bprint.h"
#include "libavutil/common.h"
#include "libavutil/file.h"
#include "libavutil/eval.h"
#include "libavutil/opt.h"
#include "libavutil/random_seed.h"
#include "libavutil/parseutils.h"
#include "libavutil/timecode.h"
#include "libavutil/tree.h"
#include "libavutil/lfg.h"
#include "avfilter.h"
#include "drawutils.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
#include <ft2build.h>
#include <freetype/config/ftheader.h>
#include FT_FREETYPE_H
#include FT_GLYPH_H
#if CONFIG_FONTCONFIG
#include <fontconfig/fontconfig.h>
#endif
 
static const char *const var_names[] = {
"dar",
"hsub", "vsub",
"line_h", "lh", ///< line height, same as max_glyph_h
"main_h", "h", "H", ///< height of the input video
"main_w", "w", "W", ///< width of the input video
"max_glyph_a", "ascent", ///< max glyph ascent
"max_glyph_d", "descent", ///< min glyph descent
"max_glyph_h", ///< max glyph height
"max_glyph_w", ///< max glyph width
"n", ///< number of frame
"sar",
"t", ///< timestamp expressed in seconds
"text_h", "th", ///< height of the rendered text
"text_w", "tw", ///< width of the rendered text
"x",
"y",
"pict_type",
NULL
};
 
static const char *const fun2_names[] = {
"rand"
};
 
static double drand(void *opaque, double min, double max)
{
return min + (max-min) / UINT_MAX * av_lfg_get(opaque);
}
 
typedef double (*eval_func2)(void *, double a, double b);
 
static const eval_func2 fun2[] = {
drand,
NULL
};
 
enum var_name {
VAR_DAR,
VAR_HSUB, VAR_VSUB,
VAR_LINE_H, VAR_LH,
VAR_MAIN_H, VAR_h, VAR_H,
VAR_MAIN_W, VAR_w, VAR_W,
VAR_MAX_GLYPH_A, VAR_ASCENT,
VAR_MAX_GLYPH_D, VAR_DESCENT,
VAR_MAX_GLYPH_H,
VAR_MAX_GLYPH_W,
VAR_N,
VAR_SAR,
VAR_T,
VAR_TEXT_H, VAR_TH,
VAR_TEXT_W, VAR_TW,
VAR_X,
VAR_Y,
VAR_PICT_TYPE,
VAR_VARS_NB
};
 
enum expansion_mode {
EXP_NONE,
EXP_NORMAL,
EXP_STRFTIME,
};
 
typedef struct {
const AVClass *class;
enum expansion_mode exp_mode; ///< expansion mode to use for the text
int reinit; ///< tells if the filter is being reinited
uint8_t *fontfile; ///< font to be used
uint8_t *text; ///< text to be drawn
AVBPrint expanded_text; ///< used to contain the expanded text
int ft_load_flags; ///< flags used for loading fonts, see FT_LOAD_*
FT_Vector *positions; ///< positions for each element in the text
size_t nb_positions; ///< number of elements of positions array
char *textfile; ///< file with text to be drawn
int x; ///< x position to start drawing text
int y; ///< y position to start drawing text
int max_glyph_w; ///< max glyph width
int max_glyph_h; ///< max glyph height
int shadowx, shadowy;
unsigned int fontsize; ///< font size to use
 
short int draw_box; ///< draw box around text - true or false
int use_kerning; ///< font kerning is used - true/false
int tabsize; ///< tab size
int fix_bounds; ///< do we let it go out of frame bounds - t/f
 
FFDrawContext dc;
FFDrawColor fontcolor; ///< foreground color
FFDrawColor shadowcolor; ///< shadow color
FFDrawColor boxcolor; ///< background color
 
FT_Library library; ///< freetype font library handle
FT_Face face; ///< freetype font face handle
struct AVTreeNode *glyphs; ///< rendered glyphs, stored using the UTF-32 char code
char *x_expr; ///< expression for x position
char *y_expr; ///< expression for y position
AVExpr *x_pexpr, *y_pexpr; ///< parsed expressions for x and y
int64_t basetime; ///< base pts time in the real world for display
double var_values[VAR_VARS_NB];
#if FF_API_DRAWTEXT_OLD_TIMELINE
char *draw_expr; ///< expression for draw
AVExpr *draw_pexpr; ///< parsed expression for draw
int draw; ///< set to zero to prevent drawing
#endif
AVLFG prng; ///< random
char *tc_opt_string; ///< specified timecode option string
AVRational tc_rate; ///< frame rate for timecode
AVTimecode tc; ///< timecode context
int tc24hmax; ///< 1 if timecode is wrapped to 24 hours, 0 otherwise
int reload; ///< reload text file for each frame
int start_number; ///< starting frame number for n/frame_num var
AVDictionary *metadata;
} DrawTextContext;
 
#define OFFSET(x) offsetof(DrawTextContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
 
static const AVOption drawtext_options[]= {
{"fontfile", "set font file", OFFSET(fontfile), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS},
{"text", "set text", OFFSET(text), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS},
{"textfile", "set text file", OFFSET(textfile), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS},
{"fontcolor", "set foreground color", OFFSET(fontcolor.rgba), AV_OPT_TYPE_COLOR, {.str="black"}, CHAR_MIN, CHAR_MAX, FLAGS},
{"boxcolor", "set box color", OFFSET(boxcolor.rgba), AV_OPT_TYPE_COLOR, {.str="white"}, CHAR_MIN, CHAR_MAX, FLAGS},
{"shadowcolor", "set shadow color", OFFSET(shadowcolor.rgba), AV_OPT_TYPE_COLOR, {.str="black"}, CHAR_MIN, CHAR_MAX, FLAGS},
{"box", "set box", OFFSET(draw_box), AV_OPT_TYPE_INT, {.i64=0}, 0, 1 , FLAGS},
{"fontsize", "set font size", OFFSET(fontsize), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX , FLAGS},
{"x", "set x expression", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str="0"}, CHAR_MIN, CHAR_MAX, FLAGS},
{"y", "set y expression", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str="0"}, CHAR_MIN, CHAR_MAX, FLAGS},
{"shadowx", "set x", OFFSET(shadowx), AV_OPT_TYPE_INT, {.i64=0}, INT_MIN, INT_MAX , FLAGS},
{"shadowy", "set y", OFFSET(shadowy), AV_OPT_TYPE_INT, {.i64=0}, INT_MIN, INT_MAX , FLAGS},
{"tabsize", "set tab size", OFFSET(tabsize), AV_OPT_TYPE_INT, {.i64=4}, 0, INT_MAX , FLAGS},
{"basetime", "set base time", OFFSET(basetime), AV_OPT_TYPE_INT64, {.i64=AV_NOPTS_VALUE}, INT64_MIN, INT64_MAX , FLAGS},
#if FF_API_DRAWTEXT_OLD_TIMELINE
{"draw", "if false do not draw (deprecated)", OFFSET(draw_expr), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS},
#endif
 
{"expansion", "set the expansion mode", OFFSET(exp_mode), AV_OPT_TYPE_INT, {.i64=EXP_NORMAL}, 0, 2, FLAGS, "expansion"},
{"none", "set no expansion", OFFSET(exp_mode), AV_OPT_TYPE_CONST, {.i64=EXP_NONE}, 0, 0, FLAGS, "expansion"},
{"normal", "set normal expansion", OFFSET(exp_mode), AV_OPT_TYPE_CONST, {.i64=EXP_NORMAL}, 0, 0, FLAGS, "expansion"},
{"strftime", "set strftime expansion (deprecated)", OFFSET(exp_mode), AV_OPT_TYPE_CONST, {.i64=EXP_STRFTIME}, 0, 0, FLAGS, "expansion"},
 
{"timecode", "set initial timecode", OFFSET(tc_opt_string), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS},
{"tc24hmax", "set 24 hours max (timecode only)", OFFSET(tc24hmax), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS},
{"timecode_rate", "set rate (timecode only)", OFFSET(tc_rate), AV_OPT_TYPE_RATIONAL, {.dbl=0}, 0, INT_MAX, FLAGS},
{"r", "set rate (timecode only)", OFFSET(tc_rate), AV_OPT_TYPE_RATIONAL, {.dbl=0}, 0, INT_MAX, FLAGS},
{"rate", "set rate (timecode only)", OFFSET(tc_rate), AV_OPT_TYPE_RATIONAL, {.dbl=0}, 0, INT_MAX, FLAGS},
{"reload", "reload text file for each frame", OFFSET(reload), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS},
{"fix_bounds", "if true, check and fix text coords to avoid clipping", OFFSET(fix_bounds), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS},
{"start_number", "start frame number for n/frame_num variable", OFFSET(start_number), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS},
 
/* FT_LOAD_* flags */
{ "ft_load_flags", "set font loading flags for libfreetype", OFFSET(ft_load_flags), AV_OPT_TYPE_FLAGS, { .i64 = FT_LOAD_DEFAULT | FT_LOAD_RENDER}, 0, INT_MAX, FLAGS, "ft_load_flags" },
{ "default", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_DEFAULT }, .flags = FLAGS, .unit = "ft_load_flags" },
{ "no_scale", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_NO_SCALE }, .flags = FLAGS, .unit = "ft_load_flags" },
{ "no_hinting", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_NO_HINTING }, .flags = FLAGS, .unit = "ft_load_flags" },
{ "render", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_RENDER }, .flags = FLAGS, .unit = "ft_load_flags" },
{ "no_bitmap", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_NO_BITMAP }, .flags = FLAGS, .unit = "ft_load_flags" },
{ "vertical_layout", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_VERTICAL_LAYOUT }, .flags = FLAGS, .unit = "ft_load_flags" },
{ "force_autohint", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_FORCE_AUTOHINT }, .flags = FLAGS, .unit = "ft_load_flags" },
{ "crop_bitmap", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_CROP_BITMAP }, .flags = FLAGS, .unit = "ft_load_flags" },
{ "pedantic", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_PEDANTIC }, .flags = FLAGS, .unit = "ft_load_flags" },
{ "ignore_global_advance_width", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_IGNORE_GLOBAL_ADVANCE_WIDTH }, .flags = FLAGS, .unit = "ft_load_flags" },
{ "no_recurse", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_NO_RECURSE }, .flags = FLAGS, .unit = "ft_load_flags" },
{ "ignore_transform", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_IGNORE_TRANSFORM }, .flags = FLAGS, .unit = "ft_load_flags" },
{ "monochrome", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_MONOCHROME }, .flags = FLAGS, .unit = "ft_load_flags" },
{ "linear_design", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_LINEAR_DESIGN }, .flags = FLAGS, .unit = "ft_load_flags" },
{ "no_autohint", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_NO_AUTOHINT }, .flags = FLAGS, .unit = "ft_load_flags" },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(drawtext);
 
#undef __FTERRORS_H__
#define FT_ERROR_START_LIST {
#define FT_ERRORDEF(e, v, s) { (e), (s) },
#define FT_ERROR_END_LIST { 0, NULL } };
 
struct ft_error
{
int err;
const char *err_msg;
} static ft_errors[] =
#include FT_ERRORS_H
 
#define FT_ERRMSG(e) ft_errors[e].err_msg
 
typedef struct {
FT_Glyph *glyph;
uint32_t code;
FT_Bitmap bitmap; ///< array holding bitmaps of font
FT_BBox bbox;
int advance;
int bitmap_left;
int bitmap_top;
} Glyph;
 
static int glyph_cmp(void *key, const void *b)
{
const Glyph *a = key, *bb = b;
int64_t diff = (int64_t)a->code - (int64_t)bb->code;
return diff > 0 ? 1 : diff < 0 ? -1 : 0;
}
 
/**
* Load glyphs corresponding to the UTF-32 codepoint code.
*/
static int load_glyph(AVFilterContext *ctx, Glyph **glyph_ptr, uint32_t code)
{
DrawTextContext *s = ctx->priv;
Glyph *glyph;
struct AVTreeNode *node = NULL;
int ret;
 
/* load glyph into s->face->glyph */
if (FT_Load_Char(s->face, code, s->ft_load_flags))
return AVERROR(EINVAL);
 
/* save glyph */
if (!(glyph = av_mallocz(sizeof(*glyph))) ||
!(glyph->glyph = av_mallocz(sizeof(*glyph->glyph)))) {
ret = AVERROR(ENOMEM);
goto error;
}
glyph->code = code;
 
if (FT_Get_Glyph(s->face->glyph, glyph->glyph)) {
ret = AVERROR(EINVAL);
goto error;
}
 
glyph->bitmap = s->face->glyph->bitmap;
glyph->bitmap_left = s->face->glyph->bitmap_left;
glyph->bitmap_top = s->face->glyph->bitmap_top;
glyph->advance = s->face->glyph->advance.x >> 6;
 
/* measure text height to calculate text_height (or the maximum text height) */
FT_Glyph_Get_CBox(*glyph->glyph, ft_glyph_bbox_pixels, &glyph->bbox);
 
/* cache the newly created glyph */
if (!(node = av_tree_node_alloc())) {
ret = AVERROR(ENOMEM);
goto error;
}
av_tree_insert(&s->glyphs, glyph, glyph_cmp, &node);
 
if (glyph_ptr)
*glyph_ptr = glyph;
return 0;
 
error:
if (glyph)
av_freep(&glyph->glyph);
av_freep(&glyph);
av_freep(&node);
return ret;
}
 
static int load_font_file(AVFilterContext *ctx, const char *path, int index,
const char **error)
{
DrawTextContext *s = ctx->priv;
int err;
 
err = FT_New_Face(s->library, path, index, &s->face);
if (err) {
*error = FT_ERRMSG(err);
return AVERROR(EINVAL);
}
return 0;
}
 
#if CONFIG_FONTCONFIG
static int load_font_fontconfig(AVFilterContext *ctx, const char **error)
{
DrawTextContext *s = ctx->priv;
FcConfig *fontconfig;
FcPattern *pattern, *fpat;
FcResult result = FcResultMatch;
FcChar8 *filename;
int err, index;
double size;
 
fontconfig = FcInitLoadConfigAndFonts();
if (!fontconfig) {
*error = "impossible to init fontconfig\n";
return AVERROR(EINVAL);
}
pattern = FcNameParse(s->fontfile ? s->fontfile :
(uint8_t *)(intptr_t)"default");
if (!pattern) {
*error = "could not parse fontconfig pattern";
return AVERROR(EINVAL);
}
if (!FcConfigSubstitute(fontconfig, pattern, FcMatchPattern)) {
*error = "could not substitue fontconfig options"; /* very unlikely */
return AVERROR(EINVAL);
}
FcDefaultSubstitute(pattern);
fpat = FcFontMatch(fontconfig, pattern, &result);
if (!fpat || result != FcResultMatch) {
*error = "impossible to find a matching font";
return AVERROR(EINVAL);
}
if (FcPatternGetString (fpat, FC_FILE, 0, &filename) != FcResultMatch ||
FcPatternGetInteger(fpat, FC_INDEX, 0, &index ) != FcResultMatch ||
FcPatternGetDouble (fpat, FC_SIZE, 0, &size ) != FcResultMatch) {
*error = "impossible to find font information";
return AVERROR(EINVAL);
}
av_log(ctx, AV_LOG_INFO, "Using \"%s\"\n", filename);
if (!s->fontsize)
s->fontsize = size + 0.5;
err = load_font_file(ctx, filename, index, error);
if (err)
return err;
FcPatternDestroy(fpat);
FcPatternDestroy(pattern);
FcConfigDestroy(fontconfig);
return 0;
}
#endif
 
static int load_font(AVFilterContext *ctx)
{
DrawTextContext *s = ctx->priv;
int err;
const char *error = "unknown error\n";
 
/* load the face, and set up the encoding, which is by default UTF-8 */
err = load_font_file(ctx, s->fontfile, 0, &error);
if (!err)
return 0;
#if CONFIG_FONTCONFIG
err = load_font_fontconfig(ctx, &error);
if (!err)
return 0;
#endif
av_log(ctx, AV_LOG_ERROR, "Could not load font \"%s\": %s\n",
s->fontfile, error);
return err;
}
 
static int load_textfile(AVFilterContext *ctx)
{
DrawTextContext *s = ctx->priv;
int err;
uint8_t *textbuf;
size_t textbuf_size;
 
if ((err = av_file_map(s->textfile, &textbuf, &textbuf_size, 0, ctx)) < 0) {
av_log(ctx, AV_LOG_ERROR,
"The text file '%s' could not be read or is empty\n",
s->textfile);
return err;
}
 
if (!(s->text = av_realloc(s->text, textbuf_size + 1)))
return AVERROR(ENOMEM);
memcpy(s->text, textbuf, textbuf_size);
s->text[textbuf_size] = 0;
av_file_unmap(textbuf, textbuf_size);
 
return 0;
}
 
static av_cold int init(AVFilterContext *ctx)
{
int err;
DrawTextContext *s = ctx->priv;
Glyph *glyph;
 
#if FF_API_DRAWTEXT_OLD_TIMELINE
if (s->draw_expr)
av_log(ctx, AV_LOG_WARNING, "'draw' option is deprecated and will be removed soon, "
"you are encouraged to use the generic timeline support through the 'enable' option\n");
#endif
 
if (!s->fontfile && !CONFIG_FONTCONFIG) {
av_log(ctx, AV_LOG_ERROR, "No font filename provided\n");
return AVERROR(EINVAL);
}
 
if (s->textfile) {
if (s->text) {
av_log(ctx, AV_LOG_ERROR,
"Both text and text file provided. Please provide only one\n");
return AVERROR(EINVAL);
}
if ((err = load_textfile(ctx)) < 0)
return err;
}
 
if (s->reload && !s->textfile)
av_log(ctx, AV_LOG_WARNING, "No file to reload\n");
 
if (s->tc_opt_string) {
int ret = av_timecode_init_from_string(&s->tc, s->tc_rate,
s->tc_opt_string, ctx);
if (ret < 0)
return ret;
if (s->tc24hmax)
s->tc.flags |= AV_TIMECODE_FLAG_24HOURSMAX;
if (!s->text)
s->text = av_strdup("");
}
 
if (!s->text) {
av_log(ctx, AV_LOG_ERROR,
"Either text, a valid file or a timecode must be provided\n");
return AVERROR(EINVAL);
}
 
if ((err = FT_Init_FreeType(&(s->library)))) {
av_log(ctx, AV_LOG_ERROR,
"Could not load FreeType: %s\n", FT_ERRMSG(err));
return AVERROR(EINVAL);
}
 
err = load_font(ctx);
if (err)
return err;
if (!s->fontsize)
s->fontsize = 16;
if ((err = FT_Set_Pixel_Sizes(s->face, 0, s->fontsize))) {
av_log(ctx, AV_LOG_ERROR, "Could not set font size to %d pixels: %s\n",
s->fontsize, FT_ERRMSG(err));
return AVERROR(EINVAL);
}
 
s->use_kerning = FT_HAS_KERNING(s->face);
 
/* load the fallback glyph with code 0 */
load_glyph(ctx, NULL, 0);
 
/* set the tabsize in pixels */
if ((err = load_glyph(ctx, &glyph, ' ')) < 0) {
av_log(ctx, AV_LOG_ERROR, "Could not set tabsize.\n");
return err;
}
s->tabsize *= glyph->advance;
 
if (s->exp_mode == EXP_STRFTIME &&
(strchr(s->text, '%') || strchr(s->text, '\\')))
av_log(ctx, AV_LOG_WARNING, "expansion=strftime is deprecated.\n");
 
av_bprint_init(&s->expanded_text, 0, AV_BPRINT_SIZE_UNLIMITED);
 
return 0;
}
 
static int query_formats(AVFilterContext *ctx)
{
ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0));
return 0;
}
 
static int glyph_enu_free(void *opaque, void *elem)
{
Glyph *glyph = elem;
 
FT_Done_Glyph(*glyph->glyph);
av_freep(&glyph->glyph);
av_free(elem);
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
DrawTextContext *s = ctx->priv;
 
av_expr_free(s->x_pexpr);
av_expr_free(s->y_pexpr);
#if FF_API_DRAWTEXT_OLD_TIMELINE
av_expr_free(s->draw_pexpr);
s->x_pexpr = s->y_pexpr = s->draw_pexpr = NULL;
#endif
av_freep(&s->positions);
s->nb_positions = 0;
 
 
av_tree_enumerate(s->glyphs, NULL, NULL, glyph_enu_free);
av_tree_destroy(s->glyphs);
s->glyphs = NULL;
 
FT_Done_Face(s->face);
FT_Done_FreeType(s->library);
 
av_bprint_finalize(&s->expanded_text, NULL);
}
 
static inline int is_newline(uint32_t c)
{
return c == '\n' || c == '\r' || c == '\f' || c == '\v';
}
 
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
DrawTextContext *s = ctx->priv;
int ret;
 
ff_draw_init(&s->dc, inlink->format, 0);
ff_draw_color(&s->dc, &s->fontcolor, s->fontcolor.rgba);
ff_draw_color(&s->dc, &s->shadowcolor, s->shadowcolor.rgba);
ff_draw_color(&s->dc, &s->boxcolor, s->boxcolor.rgba);
 
s->var_values[VAR_w] = s->var_values[VAR_W] = s->var_values[VAR_MAIN_W] = inlink->w;
s->var_values[VAR_h] = s->var_values[VAR_H] = s->var_values[VAR_MAIN_H] = inlink->h;
s->var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ? av_q2d(inlink->sample_aspect_ratio) : 1;
s->var_values[VAR_DAR] = (double)inlink->w / inlink->h * s->var_values[VAR_SAR];
s->var_values[VAR_HSUB] = 1 << s->dc.hsub_max;
s->var_values[VAR_VSUB] = 1 << s->dc.vsub_max;
s->var_values[VAR_X] = NAN;
s->var_values[VAR_Y] = NAN;
s->var_values[VAR_T] = NAN;
 
av_lfg_init(&s->prng, av_get_random_seed());
 
av_expr_free(s->x_pexpr);
av_expr_free(s->y_pexpr);
#if FF_API_DRAWTEXT_OLD_TIMELINE
av_expr_free(s->draw_pexpr);
s->x_pexpr = s->y_pexpr = s->draw_pexpr = NULL;
#else
s->x_pexpr = s->y_pexpr = NULL;
#endif
 
if ((ret = av_expr_parse(&s->x_pexpr, s->x_expr, var_names,
NULL, NULL, fun2_names, fun2, 0, ctx)) < 0 ||
(ret = av_expr_parse(&s->y_pexpr, s->y_expr, var_names,
NULL, NULL, fun2_names, fun2, 0, ctx)) < 0)
 
return AVERROR(EINVAL);
#if FF_API_DRAWTEXT_OLD_TIMELINE
if (s->draw_expr &&
(ret = av_expr_parse(&s->draw_pexpr, s->draw_expr, var_names,
NULL, NULL, fun2_names, fun2, 0, ctx)) < 0)
return ret;
#endif
 
return 0;
}
 
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
{
DrawTextContext *s = ctx->priv;
 
if (!strcmp(cmd, "reinit")) {
int ret;
uninit(ctx);
s->reinit = 1;
if ((ret = init(ctx)) < 0)
return ret;
return config_input(ctx->inputs[0]);
}
 
return AVERROR(ENOSYS);
}
 
static int func_pict_type(AVFilterContext *ctx, AVBPrint *bp,
char *fct, unsigned argc, char **argv, int tag)
{
DrawTextContext *s = ctx->priv;
 
av_bprintf(bp, "%c", av_get_picture_type_char(s->var_values[VAR_PICT_TYPE]));
return 0;
}
 
static int func_pts(AVFilterContext *ctx, AVBPrint *bp,
char *fct, unsigned argc, char **argv, int tag)
{
DrawTextContext *s = ctx->priv;
 
av_bprintf(bp, "%.6f", s->var_values[VAR_T]);
return 0;
}
 
static int func_frame_num(AVFilterContext *ctx, AVBPrint *bp,
char *fct, unsigned argc, char **argv, int tag)
{
DrawTextContext *s = ctx->priv;
 
av_bprintf(bp, "%d", (int)s->var_values[VAR_N]);
return 0;
}
 
static int func_metadata(AVFilterContext *ctx, AVBPrint *bp,
char *fct, unsigned argc, char **argv, int tag)
{
DrawTextContext *s = ctx->priv;
AVDictionaryEntry *e = av_dict_get(s->metadata, argv[0], NULL, 0);
 
if (e && e->value)
av_bprintf(bp, "%s", e->value);
return 0;
}
 
#if !HAVE_LOCALTIME_R
static void localtime_r(const time_t *t, struct tm *tm)
{
*tm = *localtime(t);
}
#endif
 
static int func_strftime(AVFilterContext *ctx, AVBPrint *bp,
char *fct, unsigned argc, char **argv, int tag)
{
const char *fmt = argc ? argv[0] : "%Y-%m-%d %H:%M:%S";
time_t now;
struct tm tm;
 
time(&now);
if (tag == 'L')
localtime_r(&now, &tm);
else
tm = *gmtime(&now);
av_bprint_strftime(bp, fmt, &tm);
return 0;
}
 
static int func_eval_expr(AVFilterContext *ctx, AVBPrint *bp,
char *fct, unsigned argc, char **argv, int tag)
{
DrawTextContext *s = ctx->priv;
double res;
int ret;
 
ret = av_expr_parse_and_eval(&res, argv[0], var_names, s->var_values,
NULL, NULL, fun2_names, fun2,
&s->prng, 0, ctx);
if (ret < 0)
av_log(ctx, AV_LOG_ERROR,
"Expression '%s' for the expr text expansion function is not valid\n",
argv[0]);
else
av_bprintf(bp, "%f", res);
 
return ret;
}
 
static const struct drawtext_function {
const char *name;
unsigned argc_min, argc_max;
int tag; /**< opaque argument to func */
int (*func)(AVFilterContext *, AVBPrint *, char *, unsigned, char **, int);
} functions[] = {
{ "expr", 1, 1, 0, func_eval_expr },
{ "e", 1, 1, 0, func_eval_expr },
{ "pict_type", 0, 0, 0, func_pict_type },
{ "pts", 0, 0, 0, func_pts },
{ "gmtime", 0, 1, 'G', func_strftime },
{ "localtime", 0, 1, 'L', func_strftime },
{ "frame_num", 0, 0, 0, func_frame_num },
{ "n", 0, 0, 0, func_frame_num },
{ "metadata", 1, 1, 0, func_metadata },
};
 
static int eval_function(AVFilterContext *ctx, AVBPrint *bp, char *fct,
unsigned argc, char **argv)
{
unsigned i;
 
for (i = 0; i < FF_ARRAY_ELEMS(functions); i++) {
if (strcmp(fct, functions[i].name))
continue;
if (argc < functions[i].argc_min) {
av_log(ctx, AV_LOG_ERROR, "%%{%s} requires at least %d arguments\n",
fct, functions[i].argc_min);
return AVERROR(EINVAL);
}
if (argc > functions[i].argc_max) {
av_log(ctx, AV_LOG_ERROR, "%%{%s} requires at most %d arguments\n",
fct, functions[i].argc_max);
return AVERROR(EINVAL);
}
break;
}
if (i >= FF_ARRAY_ELEMS(functions)) {
av_log(ctx, AV_LOG_ERROR, "%%{%s} is not known\n", fct);
return AVERROR(EINVAL);
}
return functions[i].func(ctx, bp, fct, argc, argv, functions[i].tag);
}
 
static int expand_function(AVFilterContext *ctx, AVBPrint *bp, char **rtext)
{
const char *text = *rtext;
char *argv[16] = { NULL };
unsigned argc = 0, i;
int ret;
 
if (*text != '{') {
av_log(ctx, AV_LOG_ERROR, "Stray %% near '%s'\n", text);
return AVERROR(EINVAL);
}
text++;
while (1) {
if (!(argv[argc++] = av_get_token(&text, ":}"))) {
ret = AVERROR(ENOMEM);
goto end;
}
if (!*text) {
av_log(ctx, AV_LOG_ERROR, "Unterminated %%{} near '%s'\n", *rtext);
ret = AVERROR(EINVAL);
goto end;
}
if (argc == FF_ARRAY_ELEMS(argv))
av_freep(&argv[--argc]); /* error will be caught later */
if (*text == '}')
break;
text++;
}
 
if ((ret = eval_function(ctx, bp, argv[0], argc - 1, argv + 1)) < 0)
goto end;
ret = 0;
*rtext = (char *)text + 1;
 
end:
for (i = 0; i < argc; i++)
av_freep(&argv[i]);
return ret;
}
 
static int expand_text(AVFilterContext *ctx)
{
DrawTextContext *s = ctx->priv;
char *text = s->text;
AVBPrint *bp = &s->expanded_text;
int ret;
 
av_bprint_clear(bp);
while (*text) {
if (*text == '\\' && text[1]) {
av_bprint_chars(bp, text[1], 1);
text += 2;
} else if (*text == '%') {
text++;
if ((ret = expand_function(ctx, bp, &text)) < 0)
return ret;
} else {
av_bprint_chars(bp, *text, 1);
text++;
}
}
if (!av_bprint_is_complete(bp))
return AVERROR(ENOMEM);
return 0;
}
 
static int draw_glyphs(DrawTextContext *s, AVFrame *frame,
int width, int height, const uint8_t rgbcolor[4], FFDrawColor *color, int x, int y)
{
char *text = s->expanded_text.str;
uint32_t code = 0;
int i, x1, y1;
uint8_t *p;
Glyph *glyph = NULL;
 
for (i = 0, p = text; *p; i++) {
Glyph dummy = { 0 };
GET_UTF8(code, *p++, continue;);
 
/* skip new line chars, just go to new line */
if (code == '\n' || code == '\r' || code == '\t')
continue;
 
dummy.code = code;
glyph = av_tree_find(s->glyphs, &dummy, (void *)glyph_cmp, NULL);
 
if (glyph->bitmap.pixel_mode != FT_PIXEL_MODE_MONO &&
glyph->bitmap.pixel_mode != FT_PIXEL_MODE_GRAY)
return AVERROR(EINVAL);
 
x1 = s->positions[i].x+s->x+x;
y1 = s->positions[i].y+s->y+y;
 
ff_blend_mask(&s->dc, color,
frame->data, frame->linesize, width, height,
glyph->bitmap.buffer, glyph->bitmap.pitch,
glyph->bitmap.width, glyph->bitmap.rows,
glyph->bitmap.pixel_mode == FT_PIXEL_MODE_MONO ? 0 : 3,
0, x1, y1);
}
 
return 0;
}
 
static int draw_text(AVFilterContext *ctx, AVFrame *frame,
int width, int height)
{
DrawTextContext *s = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
 
uint32_t code = 0, prev_code = 0;
int x = 0, y = 0, i = 0, ret;
int max_text_line_w = 0, len;
int box_w, box_h;
char *text;
uint8_t *p;
int y_min = 32000, y_max = -32000;
int x_min = 32000, x_max = -32000;
FT_Vector delta;
Glyph *glyph = NULL, *prev_glyph = NULL;
Glyph dummy = { 0 };
 
time_t now = time(0);
struct tm ltime;
AVBPrint *bp = &s->expanded_text;
 
av_bprint_clear(bp);
 
if(s->basetime != AV_NOPTS_VALUE)
now= frame->pts*av_q2d(ctx->inputs[0]->time_base) + s->basetime/1000000;
 
switch (s->exp_mode) {
case EXP_NONE:
av_bprintf(bp, "%s", s->text);
break;
case EXP_NORMAL:
if ((ret = expand_text(ctx)) < 0)
return ret;
break;
case EXP_STRFTIME:
localtime_r(&now, &ltime);
av_bprint_strftime(bp, s->text, &ltime);
break;
}
 
if (s->tc_opt_string) {
char tcbuf[AV_TIMECODE_STR_SIZE];
av_timecode_make_string(&s->tc, tcbuf, inlink->frame_count);
av_bprint_clear(bp);
av_bprintf(bp, "%s%s", s->text, tcbuf);
}
 
if (!av_bprint_is_complete(bp))
return AVERROR(ENOMEM);
text = s->expanded_text.str;
if ((len = s->expanded_text.len) > s->nb_positions) {
if (!(s->positions =
av_realloc(s->positions, len*sizeof(*s->positions))))
return AVERROR(ENOMEM);
s->nb_positions = len;
}
 
x = 0;
y = 0;
 
/* load and cache glyphs */
for (i = 0, p = text; *p; i++) {
GET_UTF8(code, *p++, continue;);
 
/* get glyph */
dummy.code = code;
glyph = av_tree_find(s->glyphs, &dummy, glyph_cmp, NULL);
if (!glyph) {
load_glyph(ctx, &glyph, code);
}
 
y_min = FFMIN(glyph->bbox.yMin, y_min);
y_max = FFMAX(glyph->bbox.yMax, y_max);
x_min = FFMIN(glyph->bbox.xMin, x_min);
x_max = FFMAX(glyph->bbox.xMax, x_max);
}
s->max_glyph_h = y_max - y_min;
s->max_glyph_w = x_max - x_min;
 
/* compute and save position for each glyph */
glyph = NULL;
for (i = 0, p = text; *p; i++) {
GET_UTF8(code, *p++, continue;);
 
/* skip the \n in the sequence \r\n */
if (prev_code == '\r' && code == '\n')
continue;
 
prev_code = code;
if (is_newline(code)) {
 
max_text_line_w = FFMAX(max_text_line_w, x);
y += s->max_glyph_h;
x = 0;
continue;
}
 
/* get glyph */
prev_glyph = glyph;
dummy.code = code;
glyph = av_tree_find(s->glyphs, &dummy, glyph_cmp, NULL);
 
/* kerning */
if (s->use_kerning && prev_glyph && glyph->code) {
FT_Get_Kerning(s->face, prev_glyph->code, glyph->code,
ft_kerning_default, &delta);
x += delta.x >> 6;
}
 
/* save position */
s->positions[i].x = x + glyph->bitmap_left;
s->positions[i].y = y - glyph->bitmap_top + y_max;
if (code == '\t') x = (x / s->tabsize + 1)*s->tabsize;
else x += glyph->advance;
}
 
max_text_line_w = FFMAX(x, max_text_line_w);
 
s->var_values[VAR_TW] = s->var_values[VAR_TEXT_W] = max_text_line_w;
s->var_values[VAR_TH] = s->var_values[VAR_TEXT_H] = y + s->max_glyph_h;
 
s->var_values[VAR_MAX_GLYPH_W] = s->max_glyph_w;
s->var_values[VAR_MAX_GLYPH_H] = s->max_glyph_h;
s->var_values[VAR_MAX_GLYPH_A] = s->var_values[VAR_ASCENT ] = y_max;
s->var_values[VAR_MAX_GLYPH_D] = s->var_values[VAR_DESCENT] = y_min;
 
s->var_values[VAR_LINE_H] = s->var_values[VAR_LH] = s->max_glyph_h;
 
s->x = s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, &s->prng);
s->y = s->var_values[VAR_Y] = av_expr_eval(s->y_pexpr, s->var_values, &s->prng);
s->x = s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, &s->prng);
#if FF_API_DRAWTEXT_OLD_TIMELINE
if (s->draw_pexpr){
s->draw = av_expr_eval(s->draw_pexpr, s->var_values, &s->prng);
 
if(!s->draw)
return 0;
}
if (ctx->is_disabled)
return 0;
#endif
 
box_w = FFMIN(width - 1 , max_text_line_w);
box_h = FFMIN(height - 1, y + s->max_glyph_h);
 
/* draw box */
if (s->draw_box)
ff_blend_rectangle(&s->dc, &s->boxcolor,
frame->data, frame->linesize, width, height,
s->x, s->y, box_w, box_h);
 
if (s->shadowx || s->shadowy) {
if ((ret = draw_glyphs(s, frame, width, height, s->shadowcolor.rgba,
&s->shadowcolor, s->shadowx, s->shadowy)) < 0)
return ret;
}
 
if ((ret = draw_glyphs(s, frame, width, height, s->fontcolor.rgba,
&s->fontcolor, 0, 0)) < 0)
return ret;
 
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
DrawTextContext *s = ctx->priv;
int ret;
 
if (s->reload)
if ((ret = load_textfile(ctx)) < 0)
return ret;
 
s->var_values[VAR_N] = inlink->frame_count+s->start_number;
s->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ?
NAN : frame->pts * av_q2d(inlink->time_base);
 
s->var_values[VAR_PICT_TYPE] = frame->pict_type;
s->metadata = av_frame_get_metadata(frame);
 
draw_text(ctx, frame, frame->width, frame->height);
 
av_log(ctx, AV_LOG_DEBUG, "n:%d t:%f text_w:%d text_h:%d x:%d y:%d\n",
(int)s->var_values[VAR_N], s->var_values[VAR_T],
(int)s->var_values[VAR_TEXT_W], (int)s->var_values[VAR_TEXT_H],
s->x, s->y);
 
return ff_filter_frame(outlink, frame);
}
 
static const AVFilterPad avfilter_vf_drawtext_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_input,
.needs_writable = 1,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_drawtext_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_drawtext = {
.name = "drawtext",
.description = NULL_IF_CONFIG_SMALL("Draw text on top of video frames using libfreetype library."),
.priv_size = sizeof(DrawTextContext),
.priv_class = &drawtext_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = avfilter_vf_drawtext_inputs,
.outputs = avfilter_vf_drawtext_outputs,
.process_command = command,
#if FF_API_DRAWTEXT_OLD_TIMELINE
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
#else
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
#endif
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_edgedetect.c
0,0 → 1,331
/*
* Copyright (c) 2012 Clément Bœsch
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Edge detection filter
*
* @see https://en.wikipedia.org/wiki/Canny_edge_detector
*/
 
#include "libavutil/opt.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
typedef struct {
const AVClass *class;
uint8_t *tmpbuf;
uint16_t *gradients;
char *directions;
double low, high;
uint8_t low_u8, high_u8;
} EdgeDetectContext;
 
#define OFFSET(x) offsetof(EdgeDetectContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
static const AVOption edgedetect_options[] = {
{ "high", "set high threshold", OFFSET(high), AV_OPT_TYPE_DOUBLE, {.dbl=50/255.}, 0, 1, FLAGS },
{ "low", "set low threshold", OFFSET(low), AV_OPT_TYPE_DOUBLE, {.dbl=20/255.}, 0, 1, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(edgedetect);
 
static av_cold int init(AVFilterContext *ctx)
{
EdgeDetectContext *edgedetect = ctx->priv;
 
edgedetect->low_u8 = edgedetect->low * 255. + .5;
edgedetect->high_u8 = edgedetect->high * 255. + .5;
return 0;
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE};
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static int config_props(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
EdgeDetectContext *edgedetect = ctx->priv;
 
edgedetect->tmpbuf = av_malloc(inlink->w * inlink->h);
edgedetect->gradients = av_calloc(inlink->w * inlink->h, sizeof(*edgedetect->gradients));
edgedetect->directions = av_malloc(inlink->w * inlink->h);
if (!edgedetect->tmpbuf || !edgedetect->gradients || !edgedetect->directions)
return AVERROR(ENOMEM);
return 0;
}
 
static void gaussian_blur(AVFilterContext *ctx, int w, int h,
uint8_t *dst, int dst_linesize,
const uint8_t *src, int src_linesize)
{
int i, j;
 
memcpy(dst, src, w); dst += dst_linesize; src += src_linesize;
memcpy(dst, src, w); dst += dst_linesize; src += src_linesize;
for (j = 2; j < h - 2; j++) {
dst[0] = src[0];
dst[1] = src[1];
for (i = 2; i < w - 2; i++) {
/* Gaussian mask of size 5x5 with sigma = 1.4 */
dst[i] = ((src[-2*src_linesize + i-2] + src[2*src_linesize + i-2]) * 2
+ (src[-2*src_linesize + i-1] + src[2*src_linesize + i-1]) * 4
+ (src[-2*src_linesize + i ] + src[2*src_linesize + i ]) * 5
+ (src[-2*src_linesize + i+1] + src[2*src_linesize + i+1]) * 4
+ (src[-2*src_linesize + i+2] + src[2*src_linesize + i+2]) * 2
 
+ (src[ -src_linesize + i-2] + src[ src_linesize + i-2]) * 4
+ (src[ -src_linesize + i-1] + src[ src_linesize + i-1]) * 9
+ (src[ -src_linesize + i ] + src[ src_linesize + i ]) * 12
+ (src[ -src_linesize + i+1] + src[ src_linesize + i+1]) * 9
+ (src[ -src_linesize + i+2] + src[ src_linesize + i+2]) * 4
 
+ src[i-2] * 5
+ src[i-1] * 12
+ src[i ] * 15
+ src[i+1] * 12
+ src[i+2] * 5) / 159;
}
dst[i ] = src[i ];
dst[i + 1] = src[i + 1];
 
dst += dst_linesize;
src += src_linesize;
}
memcpy(dst, src, w); dst += dst_linesize; src += src_linesize;
memcpy(dst, src, w);
}
 
enum {
DIRECTION_45UP,
DIRECTION_45DOWN,
DIRECTION_HORIZONTAL,
DIRECTION_VERTICAL,
};
 
static int get_rounded_direction(int gx, int gy)
{
/* reference angles:
* tan( pi/8) = sqrt(2)-1
* tan(3pi/8) = sqrt(2)+1
* Gy/Gx is the tangent of the angle (theta), so Gy/Gx is compared against
* <ref-angle>, or more simply Gy against <ref-angle>*Gx
*
* Gx and Gy bounds = [-1020;1020], using 16-bit arithmetic:
* round((sqrt(2)-1) * (1<<16)) = 27146
* round((sqrt(2)+1) * (1<<16)) = 158218
*/
if (gx) {
int tanpi8gx, tan3pi8gx;
 
if (gx < 0)
gx = -gx, gy = -gy;
gy <<= 16;
tanpi8gx = 27146 * gx;
tan3pi8gx = 158218 * gx;
if (gy > -tan3pi8gx && gy < -tanpi8gx) return DIRECTION_45UP;
if (gy > -tanpi8gx && gy < tanpi8gx) return DIRECTION_HORIZONTAL;
if (gy > tanpi8gx && gy < tan3pi8gx) return DIRECTION_45DOWN;
}
return DIRECTION_VERTICAL;
}
 
static void sobel(AVFilterContext *ctx, int w, int h,
uint16_t *dst, int dst_linesize,
const uint8_t *src, int src_linesize)
{
int i, j;
EdgeDetectContext *edgedetect = ctx->priv;
 
for (j = 1; j < h - 1; j++) {
dst += dst_linesize;
src += src_linesize;
for (i = 1; i < w - 1; i++) {
const int gx =
-1*src[-src_linesize + i-1] + 1*src[-src_linesize + i+1]
-2*src[ i-1] + 2*src[ i+1]
-1*src[ src_linesize + i-1] + 1*src[ src_linesize + i+1];
const int gy =
-1*src[-src_linesize + i-1] + 1*src[ src_linesize + i-1]
-2*src[-src_linesize + i ] + 2*src[ src_linesize + i ]
-1*src[-src_linesize + i+1] + 1*src[ src_linesize + i+1];
 
dst[i] = FFABS(gx) + FFABS(gy);
edgedetect->directions[j*w + i] = get_rounded_direction(gx, gy);
}
}
}
 
static void non_maximum_suppression(AVFilterContext *ctx, int w, int h,
uint8_t *dst, int dst_linesize,
const uint16_t *src, int src_linesize)
{
int i, j;
EdgeDetectContext *edgedetect = ctx->priv;
 
#define COPY_MAXIMA(ay, ax, by, bx) do { \
if (src[i] > src[(ay)*src_linesize + i+(ax)] && \
src[i] > src[(by)*src_linesize + i+(bx)]) \
dst[i] = av_clip_uint8(src[i]); \
} while (0)
 
for (j = 1; j < h - 1; j++) {
dst += dst_linesize;
src += src_linesize;
for (i = 1; i < w - 1; i++) {
switch (edgedetect->directions[j*w + i]) {
case DIRECTION_45UP: COPY_MAXIMA( 1, -1, -1, 1); break;
case DIRECTION_45DOWN: COPY_MAXIMA(-1, -1, 1, 1); break;
case DIRECTION_HORIZONTAL: COPY_MAXIMA( 0, -1, 0, 1); break;
case DIRECTION_VERTICAL: COPY_MAXIMA(-1, 0, 1, 0); break;
}
}
}
}
 
static void double_threshold(AVFilterContext *ctx, int w, int h,
uint8_t *dst, int dst_linesize,
const uint8_t *src, int src_linesize)
{
int i, j;
EdgeDetectContext *edgedetect = ctx->priv;
const int low = edgedetect->low_u8;
const int high = edgedetect->high_u8;
 
for (j = 0; j < h; j++) {
for (i = 0; i < w; i++) {
if (src[i] > high) {
dst[i] = src[i];
continue;
}
 
if ((!i || i == w - 1 || !j || j == h - 1) &&
src[i] > low &&
(src[-src_linesize + i-1] > high ||
src[-src_linesize + i ] > high ||
src[-src_linesize + i+1] > high ||
src[ i-1] > high ||
src[ i+1] > high ||
src[ src_linesize + i-1] > high ||
src[ src_linesize + i ] > high ||
src[ src_linesize + i+1] > high))
dst[i] = src[i];
else
dst[i] = 0;
}
dst += dst_linesize;
src += src_linesize;
}
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
EdgeDetectContext *edgedetect = ctx->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
uint8_t *tmpbuf = edgedetect->tmpbuf;
uint16_t *gradients = edgedetect->gradients;
int direct = 0;
AVFrame *out;
 
if (av_frame_is_writable(in)) {
direct = 1;
out = in;
} else {
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
 
/* gaussian filter to reduce noise */
gaussian_blur(ctx, inlink->w, inlink->h,
tmpbuf, inlink->w,
in->data[0], in->linesize[0]);
 
/* compute the 16-bits gradients and directions for the next step */
sobel(ctx, inlink->w, inlink->h,
gradients, inlink->w,
tmpbuf, inlink->w);
 
/* non_maximum_suppression() will actually keep & clip what's necessary and
* ignore the rest, so we need a clean output buffer */
memset(tmpbuf, 0, inlink->w * inlink->h);
non_maximum_suppression(ctx, inlink->w, inlink->h,
tmpbuf, inlink->w,
gradients, inlink->w);
 
/* keep high values, or low values surrounded by high values */
double_threshold(ctx, inlink->w, inlink->h,
out->data[0], out->linesize[0],
tmpbuf, inlink->w);
 
if (!direct)
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
EdgeDetectContext *edgedetect = ctx->priv;
av_freep(&edgedetect->tmpbuf);
av_freep(&edgedetect->gradients);
av_freep(&edgedetect->directions);
}
 
static const AVFilterPad edgedetect_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_props,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad edgedetect_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_edgedetect = {
.name = "edgedetect",
.description = NULL_IF_CONFIG_SMALL("Detect and draw edge."),
.priv_size = sizeof(EdgeDetectContext),
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = edgedetect_inputs,
.outputs = edgedetect_outputs,
.priv_class = &edgedetect_class,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_extractplanes.c
0,0 → 1,335
/*
* Copyright (c) 2013 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avstring.h"
#include "libavutil/imgutils.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "drawutils.h"
#include "internal.h"
 
#define PLANE_R 0x01
#define PLANE_G 0x02
#define PLANE_B 0x04
#define PLANE_A 0x08
#define PLANE_Y 0x10
#define PLANE_U 0x20
#define PLANE_V 0x40
 
typedef struct {
const AVClass *class;
int requested_planes;
int map[4];
int linesize[4];
int is_packed_rgb;
int depth;
int step;
} ExtractPlanesContext;
 
#define OFFSET(x) offsetof(ExtractPlanesContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
static const AVOption extractplanes_options[] = {
{ "planes", "set planes", OFFSET(requested_planes), AV_OPT_TYPE_FLAGS, {.i64=1}, 1, 0xff, FLAGS, "flags"},
{ "y", "set luma plane", 0, AV_OPT_TYPE_CONST, {.i64=PLANE_Y}, 0, 0, FLAGS, "flags"},
{ "u", "set u plane", 0, AV_OPT_TYPE_CONST, {.i64=PLANE_U}, 0, 0, FLAGS, "flags"},
{ "v", "set v plane", 0, AV_OPT_TYPE_CONST, {.i64=PLANE_V}, 0, 0, FLAGS, "flags"},
{ "r", "set red plane", 0, AV_OPT_TYPE_CONST, {.i64=PLANE_R}, 0, 0, FLAGS, "flags"},
{ "g", "set green plane", 0, AV_OPT_TYPE_CONST, {.i64=PLANE_G}, 0, 0, FLAGS, "flags"},
{ "b", "set blue plane", 0, AV_OPT_TYPE_CONST, {.i64=PLANE_B}, 0, 0, FLAGS, "flags"},
{ "a", "set alpha plane", 0, AV_OPT_TYPE_CONST, {.i64=PLANE_A}, 0, 0, FLAGS, "flags"},
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(extractplanes);
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat in_pixfmts[] = {
AV_PIX_FMT_YUV410P,
AV_PIX_FMT_YUV411P,
AV_PIX_FMT_YUV440P,
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVA420P,
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA422P,
AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ444P,
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVA444P,
AV_PIX_FMT_YUV420P16LE, AV_PIX_FMT_YUVA420P16LE,
AV_PIX_FMT_YUV420P16BE, AV_PIX_FMT_YUVA420P16BE,
AV_PIX_FMT_YUV422P16LE, AV_PIX_FMT_YUVA422P16LE,
AV_PIX_FMT_YUV422P16BE, AV_PIX_FMT_YUVA422P16BE,
AV_PIX_FMT_YUV444P16LE, AV_PIX_FMT_YUVA444P16LE,
AV_PIX_FMT_YUV444P16BE, AV_PIX_FMT_YUVA444P16BE,
AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY8A,
AV_PIX_FMT_GRAY16LE, AV_PIX_FMT_GRAY16BE,
AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
AV_PIX_FMT_RGB48LE, AV_PIX_FMT_BGR48LE,
AV_PIX_FMT_RGB48BE, AV_PIX_FMT_BGR48BE,
AV_PIX_FMT_RGBA64LE, AV_PIX_FMT_BGRA64LE,
AV_PIX_FMT_RGBA64BE, AV_PIX_FMT_BGRA64BE,
AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
AV_PIX_FMT_GBRP16LE, AV_PIX_FMT_GBRP16BE,
AV_PIX_FMT_GBRAP16LE, AV_PIX_FMT_GBRAP16BE,
AV_PIX_FMT_NONE,
};
static const enum AVPixelFormat out8_pixfmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
static const enum AVPixelFormat out16le_pixfmts[] = { AV_PIX_FMT_GRAY16LE, AV_PIX_FMT_NONE };
static const enum AVPixelFormat out16be_pixfmts[] = { AV_PIX_FMT_GRAY16BE, AV_PIX_FMT_NONE };
const enum AVPixelFormat *out_pixfmts;
const AVPixFmtDescriptor *desc;
AVFilterFormats *avff;
int i, depth = 0, be = 0;
 
if (!ctx->inputs[0]->in_formats ||
!ctx->inputs[0]->in_formats->nb_formats) {
return AVERROR(EAGAIN);
}
 
if (!ctx->inputs[0]->out_formats)
ff_formats_ref(ff_make_format_list(in_pixfmts), &ctx->inputs[0]->out_formats);
 
avff = ctx->inputs[0]->in_formats;
desc = av_pix_fmt_desc_get(avff->formats[0]);
depth = desc->comp[0].depth_minus1;
be = desc->flags & AV_PIX_FMT_FLAG_BE;
for (i = 1; i < avff->nb_formats; i++) {
desc = av_pix_fmt_desc_get(avff->formats[i]);
if (depth != desc->comp[0].depth_minus1 ||
be != (desc->flags & AV_PIX_FMT_FLAG_BE)) {
return AVERROR(EAGAIN);
}
}
 
if (depth == 7)
out_pixfmts = out8_pixfmts;
else if (be)
out_pixfmts = out16be_pixfmts;
else
out_pixfmts = out16le_pixfmts;
 
for (i = 0; i < ctx->nb_outputs; i++)
ff_formats_ref(ff_make_format_list(out_pixfmts), &ctx->outputs[i]->in_formats);
return 0;
}
 
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
ExtractPlanesContext *e = ctx->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
int plane_avail, ret, i;
uint8_t rgba_map[4];
 
plane_avail = ((desc->flags & AV_PIX_FMT_FLAG_RGB) ? PLANE_R|PLANE_G|PLANE_B :
PLANE_Y |
((desc->nb_components > 2) ? PLANE_U|PLANE_V : 0)) |
((desc->flags & AV_PIX_FMT_FLAG_ALPHA) ? PLANE_A : 0);
if (e->requested_planes & ~plane_avail) {
av_log(ctx, AV_LOG_ERROR, "Requested planes not available.\n");
return AVERROR(EINVAL);
}
if ((ret = av_image_fill_linesizes(e->linesize, inlink->format, inlink->w)) < 0)
return ret;
 
e->depth = (desc->comp[0].depth_minus1 + 1) >> 3;
e->step = av_get_padded_bits_per_pixel(desc) >> 3;
e->is_packed_rgb = !(desc->flags & AV_PIX_FMT_FLAG_PLANAR);
if (desc->flags & AV_PIX_FMT_FLAG_RGB) {
ff_fill_rgba_map(rgba_map, inlink->format);
for (i = 0; i < 4; i++)
e->map[i] = rgba_map[e->map[i]];
}
 
return 0;
}
 
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AVFilterLink *inlink = ctx->inputs[0];
ExtractPlanesContext *e = ctx->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
const int output = outlink->srcpad - ctx->output_pads;
 
if (e->map[output] == 1 || e->map[output] == 2) {
outlink->h = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
outlink->w = FF_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
}
 
return 0;
}
 
static void extract_from_packed(uint8_t *dst, int dst_linesize,
const uint8_t *src, int src_linesize,
int width, int height,
int depth, int step, int comp)
{
int x, y;
 
for (y = 0; y < height; y++) {
switch (depth) {
case 1:
for (x = 0; x < width; x++)
dst[x] = src[x * step + comp];
break;
case 2:
for (x = 0; x < width; x++) {
dst[x * 2 ] = src[x * step + comp * 2 ];
dst[x * 2 + 1] = src[x * step + comp * 2 + 1];
}
break;
}
dst += dst_linesize;
src += src_linesize;
}
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
ExtractPlanesContext *e = ctx->priv;
int i, eof = 0, ret = 0;
 
for (i = 0; i < ctx->nb_outputs; i++) {
AVFilterLink *outlink = ctx->outputs[i];
const int idx = e->map[i];
AVFrame *out;
 
if (outlink->closed)
continue;
 
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
ret = AVERROR(ENOMEM);
break;
}
av_frame_copy_props(out, frame);
 
if (e->is_packed_rgb) {
extract_from_packed(out->data[0], out->linesize[0],
frame->data[0], frame->linesize[0],
outlink->w, outlink->h,
e->depth,
e->step, idx);
} else {
av_image_copy_plane(out->data[0], out->linesize[0],
frame->data[idx], frame->linesize[idx],
e->linesize[idx], outlink->h);
}
 
ret = ff_filter_frame(outlink, out);
if (ret == AVERROR_EOF)
eof++;
else if (ret < 0)
break;
}
av_frame_free(&frame);
 
if (eof == ctx->nb_outputs)
ret = AVERROR_EOF;
else if (ret == AVERROR_EOF)
ret = 0;
return ret;
}
 
static av_cold int init(AVFilterContext *ctx)
{
ExtractPlanesContext *e = ctx->priv;
int planes = (e->requested_planes & 0xf) | (e->requested_planes >> 4);
int i;
 
for (i = 0; i < 4; i++) {
char *name;
AVFilterPad pad = { 0 };
 
if (!(planes & (1 << i)))
continue;
 
name = av_asprintf("out%d", ctx->nb_outputs);
if (!name)
return AVERROR(ENOMEM);
e->map[ctx->nb_outputs] = i;
pad.name = name;
pad.type = AVMEDIA_TYPE_VIDEO;
pad.config_props = config_output;
 
ff_insert_outpad(ctx, ctx->nb_outputs, &pad);
}
 
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
int i;
 
for (i = 0; i < ctx->nb_outputs; i++)
av_freep(&ctx->output_pads[i].name);
}
 
static const AVFilterPad extractplanes_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
 
AVFilter avfilter_vf_extractplanes = {
.name = "extractplanes",
.description = NULL_IF_CONFIG_SMALL("Extract planes as grayscale frames."),
.priv_size = sizeof(ExtractPlanesContext),
.priv_class = &extractplanes_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = extractplanes_inputs,
.outputs = NULL,
.flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
};
 
#if CONFIG_ALPHAEXTRACT_FILTER
 
static av_cold int init_alphaextract(AVFilterContext *ctx)
{
ExtractPlanesContext *e = ctx->priv;
 
e->requested_planes = PLANE_A;
 
return init(ctx);
}
 
AVFilter avfilter_vf_alphaextract = {
.name = "alphaextract",
.description = NULL_IF_CONFIG_SMALL("Extract an alpha channel as a "
"grayscale image component."),
.priv_size = sizeof(ExtractPlanesContext),
.init = init_alphaextract,
.uninit = uninit,
.query_formats = query_formats,
.inputs = extractplanes_inputs,
.outputs = NULL,
.flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
};
#endif /* CONFIG_ALPHAEXTRACT_FILTER */
/contrib/sdk/sources/ffmpeg/libavfilter/vf_fade.c
0,0 → 1,352
/*
* Copyright (c) 2010 Brandon Mintern
* Copyright (c) 2007 Bobby Bingham
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* video fade filter
* based heavily on vf_negate.c by Bobby Bingham
*/
 
#include "libavutil/avstring.h"
#include "libavutil/common.h"
#include "libavutil/eval.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "drawutils.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
#define R 0
#define G 1
#define B 2
#define A 3
 
#define Y 0
#define U 1
#define V 2
 
#define FADE_IN 0
#define FADE_OUT 1
 
typedef struct {
const AVClass *class;
int type;
int factor, fade_per_frame;
int start_frame, nb_frames;
unsigned int frame_index;
int hsub, vsub, bpp;
unsigned int black_level, black_level_scaled;
uint8_t is_packed_rgb;
uint8_t rgba_map[4];
int alpha;
uint64_t start_time, duration;
enum {VF_FADE_WAITING=0, VF_FADE_FADING, VF_FADE_DONE} fade_state;
} FadeContext;
 
static av_cold int init(AVFilterContext *ctx)
{
FadeContext *s = ctx->priv;
 
s->fade_per_frame = (1 << 16) / s->nb_frames;
s->fade_state = VF_FADE_WAITING;
 
if (s->duration != 0) {
// If duration (seconds) is non-zero, assume that we are not fading based on frames
s->nb_frames = 0; // Mostly to clean up logging
}
 
// Choose what to log. If both time-based and frame-based options, both lines will be in the log
if (s->start_frame || s->nb_frames) {
av_log(ctx, AV_LOG_VERBOSE,
"type:%s start_frame:%d nb_frames:%d alpha:%d\n",
s->type == FADE_IN ? "in" : "out", s->start_frame,
s->nb_frames,s->alpha);
}
if (s->start_time || s->duration) {
av_log(ctx, AV_LOG_VERBOSE,
"type:%s start_time:%f duration:%f alpha:%d\n",
s->type == FADE_IN ? "in" : "out", (s->start_time / (double)AV_TIME_BASE),
(s->duration / (double)AV_TIME_BASE),s->alpha);
}
 
return 0;
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUVJ440P,
AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
const static enum AVPixelFormat studio_level_pix_fmts[] = {
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
AV_PIX_FMT_YUV440P,
AV_PIX_FMT_NONE
};
 
static int config_props(AVFilterLink *inlink)
{
FadeContext *s = inlink->dst->priv;
const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(inlink->format);
 
s->hsub = pixdesc->log2_chroma_w;
s->vsub = pixdesc->log2_chroma_h;
 
s->bpp = av_get_bits_per_pixel(pixdesc) >> 3;
s->alpha &= !!(pixdesc->flags & AV_PIX_FMT_FLAG_ALPHA);
s->is_packed_rgb = ff_fill_rgba_map(s->rgba_map, inlink->format) >= 0;
 
/* use CCIR601/709 black level for studio-level pixel non-alpha components */
s->black_level =
ff_fmt_is_in(inlink->format, studio_level_pix_fmts) && !s->alpha ? 16 : 0;
/* 32768 = 1 << 15, it is an integer representation
* of 0.5 and is for rounding. */
s->black_level_scaled = (s->black_level << 16) + 32768;
return 0;
}
 
static int filter_slice_luma(AVFilterContext *ctx, void *arg, int jobnr,
int nb_jobs)
{
FadeContext *s = ctx->priv;
AVFrame *frame = arg;
int slice_start = (frame->height * jobnr ) / nb_jobs;
int slice_end = (frame->height * (jobnr+1)) / nb_jobs;
int i, j;
 
for (i = slice_start; i < slice_end; i++) {
uint8_t *p = frame->data[0] + i * frame->linesize[0];
for (j = 0; j < frame->width * s->bpp; j++) {
/* s->factor is using 16 lower-order bits for decimal
* places. 32768 = 1 << 15, it is an integer representation
* of 0.5 and is for rounding. */
*p = ((*p - s->black_level) * s->factor + s->black_level_scaled) >> 16;
p++;
}
}
 
return 0;
}
 
static int filter_slice_chroma(AVFilterContext *ctx, void *arg, int jobnr,
int nb_jobs)
{
FadeContext *s = ctx->priv;
AVFrame *frame = arg;
int i, j, plane;
const int width = FF_CEIL_RSHIFT(frame->width, s->hsub);
const int height= FF_CEIL_RSHIFT(frame->height, s->vsub);
int slice_start = (height * jobnr ) / nb_jobs;
int slice_end = (height * (jobnr+1)) / nb_jobs;
 
for (plane = 1; plane < 3; plane++) {
for (i = slice_start; i < slice_end; i++) {
uint8_t *p = frame->data[plane] + i * frame->linesize[plane];
for (j = 0; j < width; j++) {
/* 8421367 = ((128 << 1) + 1) << 15. It is an integer
* representation of 128.5. The .5 is for rounding
* purposes. */
*p = ((*p - 128) * s->factor + 8421367) >> 16;
p++;
}
}
}
 
return 0;
}
 
static int filter_slice_alpha(AVFilterContext *ctx, void *arg, int jobnr,
int nb_jobs)
{
FadeContext *s = ctx->priv;
AVFrame *frame = arg;
int plane = s->is_packed_rgb ? 0 : A;
int slice_start = (frame->height * jobnr ) / nb_jobs;
int slice_end = (frame->height * (jobnr+1)) / nb_jobs;
int i, j;
 
for (i = slice_start; i < slice_end; i++) {
uint8_t *p = frame->data[plane] + i * frame->linesize[plane] + s->is_packed_rgb*s->rgba_map[A];
int step = s->is_packed_rgb ? 4 : 1;
for (j = 0; j < frame->width; j++) {
/* s->factor is using 16 lower-order bits for decimal
* places. 32768 = 1 << 15, it is an integer representation
* of 0.5 and is for rounding. */
*p = ((*p - s->black_level) * s->factor + s->black_level_scaled) >> 16;
p += step;
}
}
 
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
FadeContext *s = ctx->priv;
double frame_timestamp = frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base);
 
// Calculate Fade assuming this is a Fade In
if (s->fade_state == VF_FADE_WAITING) {
s->factor=0;
if ((frame_timestamp >= (s->start_time/(double)AV_TIME_BASE))
&& (s->frame_index >= s->start_frame)) {
// Time to start fading
s->fade_state = VF_FADE_FADING;
 
// Save start time in case we are starting based on frames and fading based on time
if ((s->start_time == 0) && (s->start_frame != 0)) {
s->start_time = frame_timestamp*(double)AV_TIME_BASE;
}
 
// Save start frame in case we are starting based on time and fading based on frames
if ((s->start_time != 0) && (s->start_frame == 0)) {
s->start_frame = s->frame_index;
}
}
}
if (s->fade_state == VF_FADE_FADING) {
if (s->duration == 0) {
// Fading based on frame count
s->factor = (s->frame_index - s->start_frame) * s->fade_per_frame;
if (s->frame_index > (s->start_frame + s->nb_frames)) {
s->fade_state = VF_FADE_DONE;
}
 
} else {
// Fading based on duration
s->factor = (frame_timestamp - (s->start_time/(double)AV_TIME_BASE))
* (float) UINT16_MAX / (s->duration/(double)AV_TIME_BASE);
if (frame_timestamp > ((s->start_time/(double)AV_TIME_BASE)
+ (s->duration/(double)AV_TIME_BASE))) {
s->fade_state = VF_FADE_DONE;
}
}
}
if (s->fade_state == VF_FADE_DONE) {
s->factor=UINT16_MAX;
}
 
s->factor = av_clip_uint16(s->factor);
 
// Invert fade_factor if Fading Out
if (s->type == 1) {
s->factor=UINT16_MAX-s->factor;
}
 
if (s->factor < UINT16_MAX) {
if (s->alpha) {
ctx->internal->execute(ctx, filter_slice_alpha, frame, NULL,
FFMIN(frame->height, ctx->graph->nb_threads));
} else {
/* luma or rgb plane */
ctx->internal->execute(ctx, filter_slice_luma, frame, NULL,
FFMIN(frame->height, ctx->graph->nb_threads));
 
if (frame->data[1] && frame->data[2]) {
/* chroma planes */
ctx->internal->execute(ctx, filter_slice_chroma, frame, NULL,
FFMIN(frame->height, ctx->graph->nb_threads));
}
}
}
 
s->frame_index++;
 
return ff_filter_frame(inlink->dst->outputs[0], frame);
}
 
 
#define OFFSET(x) offsetof(FadeContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption fade_options[] = {
{ "type", "'in' or 'out' for fade-in/fade-out", OFFSET(type), AV_OPT_TYPE_INT, { .i64 = FADE_IN }, FADE_IN, FADE_OUT, FLAGS, "type" },
{ "t", "'in' or 'out' for fade-in/fade-out", OFFSET(type), AV_OPT_TYPE_INT, { .i64 = FADE_IN }, FADE_IN, FADE_OUT, FLAGS, "type" },
{ "in", "fade-in", 0, AV_OPT_TYPE_CONST, { .i64 = FADE_IN }, .unit = "type" },
{ "out", "fade-out", 0, AV_OPT_TYPE_CONST, { .i64 = FADE_OUT }, .unit = "type" },
{ "start_frame", "Number of the first frame to which to apply the effect.",
OFFSET(start_frame), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
{ "s", "Number of the first frame to which to apply the effect.",
OFFSET(start_frame), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
{ "nb_frames", "Number of frames to which the effect should be applied.",
OFFSET(nb_frames), AV_OPT_TYPE_INT, { .i64 = 25 }, 0, INT_MAX, FLAGS },
{ "n", "Number of frames to which the effect should be applied.",
OFFSET(nb_frames), AV_OPT_TYPE_INT, { .i64 = 25 }, 0, INT_MAX, FLAGS },
{ "alpha", "fade alpha if it is available on the input", OFFSET(alpha), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, FLAGS },
{ "start_time", "Number of seconds of the beginning of the effect.",
OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
{ "st", "Number of seconds of the beginning of the effect.",
OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
{ "duration", "Duration of the effect in seconds.",
OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
{ "d", "Duration of the effect in seconds.",
OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(fade);
 
static const AVFilterPad avfilter_vf_fade_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_props,
.filter_frame = filter_frame,
.needs_writable = 1,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_fade_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_fade = {
.name = "fade",
.description = NULL_IF_CONFIG_SMALL("Fade in/out input video."),
.init = init,
.priv_size = sizeof(FadeContext),
.priv_class = &fade_class,
.query_formats = query_formats,
.inputs = avfilter_vf_fade_inputs,
.outputs = avfilter_vf_fade_outputs,
.flags = AVFILTER_FLAG_SLICE_THREADS,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_field.c
0,0 → 1,111
/*
* Copyright (c) 2003 Rich Felker
* Copyright (c) 2012 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* field filter, based on libmpcodecs/vf_field.c by Rich Felker
*/
 
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "internal.h"
 
enum FieldType { FIELD_TYPE_TOP = 0, FIELD_TYPE_BOTTOM };
 
typedef struct {
const AVClass *class;
enum FieldType type;
int nb_planes; ///< number of planes of the current format
} FieldContext;
 
#define OFFSET(x) offsetof(FieldContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
 
static const AVOption field_options[] = {
{"type", "set field type (top or bottom)", OFFSET(type), AV_OPT_TYPE_INT, {.i64=FIELD_TYPE_TOP}, 0, 1, FLAGS, "field_type" },
{"top", "select top field", 0, AV_OPT_TYPE_CONST, {.i64=FIELD_TYPE_TOP}, INT_MIN, INT_MAX, FLAGS, "field_type"},
{"bottom", "select bottom field", 0, AV_OPT_TYPE_CONST, {.i64=FIELD_TYPE_BOTTOM}, INT_MIN, INT_MAX, FLAGS, "field_type"},
{NULL}
};
 
AVFILTER_DEFINE_CLASS(field);
 
static int config_props_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
FieldContext *field = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
 
field->nb_planes = av_pix_fmt_count_planes(outlink->format);
 
outlink->w = inlink->w;
outlink->h = (inlink->h + (field->type == FIELD_TYPE_TOP)) / 2;
 
av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d type:%s -> w:%d h:%d\n",
inlink->w, inlink->h, field->type == FIELD_TYPE_BOTTOM ? "bottom" : "top",
outlink->w, outlink->h);
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
{
FieldContext *field = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
int i;
 
inpicref->height = outlink->h;
inpicref->interlaced_frame = 0;
 
for (i = 0; i < field->nb_planes; i++) {
if (field->type == FIELD_TYPE_BOTTOM)
inpicref->data[i] = inpicref->data[i] + inpicref->linesize[i];
inpicref->linesize[i] = 2 * inpicref->linesize[i];
}
return ff_filter_frame(outlink, inpicref);
}
 
static const AVFilterPad field_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad field_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_props_output,
},
{ NULL }
};
 
AVFilter avfilter_vf_field = {
.name = "field",
.description = NULL_IF_CONFIG_SMALL("Extract a field from the input video."),
.priv_size = sizeof(FieldContext),
.inputs = field_inputs,
.outputs = field_outputs,
.priv_class = &field_class,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_fieldmatch.c
0,0 → 1,984
/*
* Copyright (c) 2012 Fredrik Mellbin
* Copyright (c) 2013 Clément Bœsch
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Fieldmatching filter, ported from VFM filter (VapouSsynth) by Clément.
* Fredrik Mellbin is the author of the VIVTC/VFM filter, which is itself a
* light clone of the TIVTC/TFM (AviSynth) filter written by Kevin Stone
* (tritical), the original author.
*
* @see http://bengal.missouri.edu/~kes25c/
* @see http://www.vapoursynth.com/about/
*/
 
#include <inttypes.h>
 
#include "libavutil/avassert.h"
#include "libavutil/imgutils.h"
#include "libavutil/opt.h"
#include "libavutil/timestamp.h"
#include "avfilter.h"
#include "internal.h"
 
#define INPUT_MAIN 0
#define INPUT_CLEANSRC 1
 
enum fieldmatch_parity {
FM_PARITY_AUTO = -1,
FM_PARITY_BOTTOM = 0,
FM_PARITY_TOP = 1,
};
 
enum matching_mode {
MODE_PC,
MODE_PC_N,
MODE_PC_U,
MODE_PC_N_UB,
MODE_PCN,
MODE_PCN_UB,
NB_MODE
};
 
enum comb_matching_mode {
COMBMATCH_NONE,
COMBMATCH_SC,
COMBMATCH_FULL,
NB_COMBMATCH
};
 
enum comb_dbg {
COMBDBG_NONE,
COMBDBG_PCN,
COMBDBG_PCNUB,
NB_COMBDBG
};
 
typedef struct {
const AVClass *class;
 
AVFrame *prv, *src, *nxt; ///< main sliding window of 3 frames
AVFrame *prv2, *src2, *nxt2; ///< sliding window of the optional second stream
int got_frame[2]; ///< frame request flag for each input stream
int hsub, vsub; ///< chroma subsampling values
uint32_t eof; ///< bitmask for end of stream
int64_t lastscdiff;
int64_t lastn;
 
/* options */
int order;
int ppsrc;
enum matching_mode mode;
int field;
int mchroma;
int y0, y1;
int64_t scthresh;
double scthresh_flt;
enum comb_matching_mode combmatch;
int combdbg;
int cthresh;
int chroma;
int blockx, blocky;
int combpel;
 
/* misc buffers */
uint8_t *map_data[4];
int map_linesize[4];
uint8_t *cmask_data[4];
int cmask_linesize[4];
int *c_array;
int tpitchy, tpitchuv;
uint8_t *tbuffer;
} FieldMatchContext;
 
#define OFFSET(x) offsetof(FieldMatchContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption fieldmatch_options[] = {
{ "order", "specify the assumed field order", OFFSET(order), AV_OPT_TYPE_INT, {.i64=FM_PARITY_AUTO}, -1, 1, FLAGS, "order" },
{ "auto", "auto detect parity", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_AUTO}, INT_MIN, INT_MAX, FLAGS, "order" },
{ "bff", "assume bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_BOTTOM}, INT_MIN, INT_MAX, FLAGS, "order" },
{ "tff", "assume top field first", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_TOP}, INT_MIN, INT_MAX, FLAGS, "order" },
{ "mode", "set the matching mode or strategy to use", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_PC_N}, MODE_PC, NB_MODE-1, FLAGS, "mode" },
{ "pc", "2-way match (p/c)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PC}, INT_MIN, INT_MAX, FLAGS, "mode" },
{ "pc_n", "2-way match + 3rd match on combed (p/c + u)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PC_N}, INT_MIN, INT_MAX, FLAGS, "mode" },
{ "pc_u", "2-way match + 3rd match (same order) on combed (p/c + u)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PC_U}, INT_MIN, INT_MAX, FLAGS, "mode" },
{ "pc_n_ub", "2-way match + 3rd match on combed + 4th/5th matches if still combed (p/c + u + u/b)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PC_N_UB}, INT_MIN, INT_MAX, FLAGS, "mode" },
{ "pcn", "3-way match (p/c/n)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PCN}, INT_MIN, INT_MAX, FLAGS, "mode" },
{ "pcn_ub", "3-way match + 4th/5th matches on combed (p/c/n + u/b)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PCN_UB}, INT_MIN, INT_MAX, FLAGS, "mode" },
{ "ppsrc", "mark main input as a pre-processed input and activate clean source input stream", OFFSET(ppsrc), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
{ "field", "set the field to match from", OFFSET(field), AV_OPT_TYPE_INT, {.i64=FM_PARITY_AUTO}, -1, 1, FLAGS, "field" },
{ "auto", "automatic (same value as 'order')", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_AUTO}, INT_MIN, INT_MAX, FLAGS, "field" },
{ "bottom", "bottom field", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_BOTTOM}, INT_MIN, INT_MAX, FLAGS, "field" },
{ "top", "top field", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_TOP}, INT_MIN, INT_MAX, FLAGS, "field" },
{ "mchroma", "set whether or not chroma is included during the match comparisons", OFFSET(mchroma), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS },
{ "y0", "define an exclusion band which excludes the lines between y0 and y1 from the field matching decision", OFFSET(y0), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
{ "y1", "define an exclusion band which excludes the lines between y0 and y1 from the field matching decision", OFFSET(y1), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
{ "scthresh", "set scene change detection threshold", OFFSET(scthresh_flt), AV_OPT_TYPE_DOUBLE, {.dbl=12}, 0, 100, FLAGS },
{ "combmatch", "set combmatching mode", OFFSET(combmatch), AV_OPT_TYPE_INT, {.i64=COMBMATCH_SC}, COMBMATCH_NONE, NB_COMBMATCH-1, FLAGS, "combmatching" },
{ "none", "disable combmatching", 0, AV_OPT_TYPE_CONST, {.i64=COMBMATCH_NONE}, INT_MIN, INT_MAX, FLAGS, "combmatching" },
{ "sc", "enable combmatching only on scene change", 0, AV_OPT_TYPE_CONST, {.i64=COMBMATCH_SC}, INT_MIN, INT_MAX, FLAGS, "combmatching" },
{ "full", "enable combmatching all the time", 0, AV_OPT_TYPE_CONST, {.i64=COMBMATCH_FULL}, INT_MIN, INT_MAX, FLAGS, "combmatching" },
{ "combdbg", "enable comb debug", OFFSET(combdbg), AV_OPT_TYPE_INT, {.i64=COMBDBG_NONE}, COMBDBG_NONE, NB_COMBDBG-1, FLAGS, "dbglvl" },
{ "none", "no forced calculation", 0, AV_OPT_TYPE_CONST, {.i64=COMBDBG_NONE}, INT_MIN, INT_MAX, FLAGS, "dbglvl" },
{ "pcn", "calculate p/c/n", 0, AV_OPT_TYPE_CONST, {.i64=COMBDBG_PCN}, INT_MIN, INT_MAX, FLAGS, "dbglvl" },
{ "pcnub", "calculate p/c/n/u/b", 0, AV_OPT_TYPE_CONST, {.i64=COMBDBG_PCNUB}, INT_MIN, INT_MAX, FLAGS, "dbglvl" },
{ "cthresh", "set the area combing threshold used for combed frame detection", OFFSET(cthresh), AV_OPT_TYPE_INT, {.i64= 9}, -1, 0xff, FLAGS },
{ "chroma", "set whether or not chroma is considered in the combed frame decision", OFFSET(chroma), AV_OPT_TYPE_INT, {.i64= 0}, 0, 1, FLAGS },
{ "blockx", "set the x-axis size of the window used during combed frame detection", OFFSET(blockx), AV_OPT_TYPE_INT, {.i64=16}, 4, 1<<9, FLAGS },
{ "blocky", "set the y-axis size of the window used during combed frame detection", OFFSET(blocky), AV_OPT_TYPE_INT, {.i64=16}, 4, 1<<9, FLAGS },
{ "combpel", "set the number of combed pixels inside any of the blocky by blockx size blocks on the frame for the frame to be detected as combed", OFFSET(combpel), AV_OPT_TYPE_INT, {.i64=80}, 0, INT_MAX, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(fieldmatch);
 
static int get_width(const FieldMatchContext *fm, const AVFrame *f, int plane)
{
return plane ? FF_CEIL_RSHIFT(f->width, fm->hsub) : f->width;
}
 
static int get_height(const FieldMatchContext *fm, const AVFrame *f, int plane)
{
return plane ? FF_CEIL_RSHIFT(f->height, fm->vsub) : f->height;
}
 
static int64_t luma_abs_diff(const AVFrame *f1, const AVFrame *f2)
{
int x, y;
const uint8_t *srcp1 = f1->data[0];
const uint8_t *srcp2 = f2->data[0];
const int src1_linesize = f1->linesize[0];
const int src2_linesize = f2->linesize[0];
const int width = f1->width;
const int height = f1->height;
int64_t acc = 0;
 
for (y = 0; y < height; y++) {
for (x = 0; x < width; x++)
acc += abs(srcp1[x] - srcp2[x]);
srcp1 += src1_linesize;
srcp2 += src2_linesize;
}
return acc;
}
 
static void fill_buf(uint8_t *data, int w, int h, int linesize, uint8_t v)
{
int y;
 
for (y = 0; y < h; y++) {
memset(data, v, w);
data += linesize;
}
}
 
static int calc_combed_score(const FieldMatchContext *fm, const AVFrame *src)
{
int x, y, plane, max_v = 0;
const int cthresh = fm->cthresh;
const int cthresh6 = cthresh * 6;
 
for (plane = 0; plane < (fm->chroma ? 3 : 1); plane++) {
const uint8_t *srcp = src->data[plane];
const int src_linesize = src->linesize[plane];
const int width = get_width (fm, src, plane);
const int height = get_height(fm, src, plane);
uint8_t *cmkp = fm->cmask_data[plane];
const int cmk_linesize = fm->cmask_linesize[plane];
 
if (cthresh < 0) {
fill_buf(cmkp, width, height, cmk_linesize, 0xff);
continue;
}
fill_buf(cmkp, width, height, cmk_linesize, 0);
 
/* [1 -3 4 -3 1] vertical filter */
#define FILTER(xm2, xm1, xp1, xp2) \
abs( 4 * srcp[x] \
-3 * (srcp[x + (xm1)*src_linesize] + srcp[x + (xp1)*src_linesize]) \
+ (srcp[x + (xm2)*src_linesize] + srcp[x + (xp2)*src_linesize])) > cthresh6
 
/* first line */
for (x = 0; x < width; x++) {
const int s1 = abs(srcp[x] - srcp[x + src_linesize]);
if (s1 > cthresh && FILTER(2, 1, 1, 2))
cmkp[x] = 0xff;
}
srcp += src_linesize;
cmkp += cmk_linesize;
 
/* second line */
for (x = 0; x < width; x++) {
const int s1 = abs(srcp[x] - srcp[x - src_linesize]);
const int s2 = abs(srcp[x] - srcp[x + src_linesize]);
if (s1 > cthresh && s2 > cthresh && FILTER(2, -1, 1, 2))
cmkp[x] = 0xff;
}
srcp += src_linesize;
cmkp += cmk_linesize;
 
/* all lines minus first two and last two */
for (y = 2; y < height-2; y++) {
for (x = 0; x < width; x++) {
const int s1 = abs(srcp[x] - srcp[x - src_linesize]);
const int s2 = abs(srcp[x] - srcp[x + src_linesize]);
if (s1 > cthresh && s2 > cthresh && FILTER(-2, -1, 1, 2))
cmkp[x] = 0xff;
}
srcp += src_linesize;
cmkp += cmk_linesize;
}
 
/* before-last line */
for (x = 0; x < width; x++) {
const int s1 = abs(srcp[x] - srcp[x - src_linesize]);
const int s2 = abs(srcp[x] - srcp[x + src_linesize]);
if (s1 > cthresh && s2 > cthresh && FILTER(-2, -1, 1, -2))
cmkp[x] = 0xff;
}
srcp += src_linesize;
cmkp += cmk_linesize;
 
/* last line */
for (x = 0; x < width; x++) {
const int s1 = abs(srcp[x] - srcp[x - src_linesize]);
if (s1 > cthresh && FILTER(-2, -1, -1, -2))
cmkp[x] = 0xff;
}
}
 
if (fm->chroma) {
uint8_t *cmkp = fm->cmask_data[0];
uint8_t *cmkpU = fm->cmask_data[1];
uint8_t *cmkpV = fm->cmask_data[2];
const int width = FF_CEIL_RSHIFT(src->width, fm->hsub);
const int height = FF_CEIL_RSHIFT(src->height, fm->vsub);
const int cmk_linesize = fm->cmask_linesize[0] << 1;
const int cmk_linesizeUV = fm->cmask_linesize[2];
uint8_t *cmkpp = cmkp - (cmk_linesize>>1);
uint8_t *cmkpn = cmkp + (cmk_linesize>>1);
uint8_t *cmkpnn = cmkp + cmk_linesize;
for (y = 1; y < height - 1; y++) {
cmkpp += cmk_linesize;
cmkp += cmk_linesize;
cmkpn += cmk_linesize;
cmkpnn += cmk_linesize;
cmkpV += cmk_linesizeUV;
cmkpU += cmk_linesizeUV;
for (x = 1; x < width - 1; x++) {
#define HAS_FF_AROUND(p, lz) (p[x-1 - lz] == 0xff || p[x - lz] == 0xff || p[x+1 - lz] == 0xff || \
p[x-1 ] == 0xff || p[x+1 ] == 0xff || \
p[x-1 + lz] == 0xff || p[x + lz] == 0xff || p[x+1 + lz] == 0xff)
if ((cmkpV[x] == 0xff && HAS_FF_AROUND(cmkpV, cmk_linesizeUV)) ||
(cmkpU[x] == 0xff && HAS_FF_AROUND(cmkpU, cmk_linesizeUV))) {
((uint16_t*)cmkp)[x] = 0xffff;
((uint16_t*)cmkpn)[x] = 0xffff;
if (y&1) ((uint16_t*)cmkpp)[x] = 0xffff;
else ((uint16_t*)cmkpnn)[x] = 0xffff;
}
}
}
}
 
{
const int blockx = fm->blockx;
const int blocky = fm->blocky;
const int xhalf = blockx/2;
const int yhalf = blocky/2;
const int cmk_linesize = fm->cmask_linesize[0];
const uint8_t *cmkp = fm->cmask_data[0] + cmk_linesize;
const int width = src->width;
const int height = src->height;
const int xblocks = ((width+xhalf)/blockx) + 1;
const int xblocks4 = xblocks<<2;
const int yblocks = ((height+yhalf)/blocky) + 1;
int *c_array = fm->c_array;
const int arraysize = (xblocks*yblocks)<<2;
int heighta = (height/(blocky/2))*(blocky/2);
const int widtha = (width /(blockx/2))*(blockx/2);
if (heighta == height)
heighta = height - yhalf;
memset(c_array, 0, arraysize * sizeof(*c_array));
 
#define C_ARRAY_ADD(v) do { \
const int box1 = (x / blockx) * 4; \
const int box2 = ((x + xhalf) / blockx) * 4; \
c_array[temp1 + box1 ] += v; \
c_array[temp1 + box2 + 1] += v; \
c_array[temp2 + box1 + 2] += v; \
c_array[temp2 + box2 + 3] += v; \
} while (0)
 
#define VERTICAL_HALF(y_start, y_end) do { \
for (y = y_start; y < y_end; y++) { \
const int temp1 = (y / blocky) * xblocks4; \
const int temp2 = ((y + yhalf) / blocky) * xblocks4; \
for (x = 0; x < width; x++) \
if (cmkp[x - cmk_linesize] == 0xff && \
cmkp[x ] == 0xff && \
cmkp[x + cmk_linesize] == 0xff) \
C_ARRAY_ADD(1); \
cmkp += cmk_linesize; \
} \
} while (0)
 
VERTICAL_HALF(1, yhalf);
 
for (y = yhalf; y < heighta; y += yhalf) {
const int temp1 = (y / blocky) * xblocks4;
const int temp2 = ((y + yhalf) / blocky) * xblocks4;
 
for (x = 0; x < widtha; x += xhalf) {
const uint8_t *cmkp_tmp = cmkp + x;
int u, v, sum = 0;
for (u = 0; u < yhalf; u++) {
for (v = 0; v < xhalf; v++)
if (cmkp_tmp[v - cmk_linesize] == 0xff &&
cmkp_tmp[v ] == 0xff &&
cmkp_tmp[v + cmk_linesize] == 0xff)
sum++;
cmkp_tmp += cmk_linesize;
}
if (sum)
C_ARRAY_ADD(sum);
}
 
for (x = widtha; x < width; x++) {
const uint8_t *cmkp_tmp = cmkp + x;
int u, sum = 0;
for (u = 0; u < yhalf; u++) {
if (cmkp_tmp[-cmk_linesize] == 0xff &&
cmkp_tmp[ 0] == 0xff &&
cmkp_tmp[ cmk_linesize] == 0xff)
sum++;
cmkp_tmp += cmk_linesize;
}
if (sum)
C_ARRAY_ADD(sum);
}
 
cmkp += cmk_linesize * yhalf;
}
 
VERTICAL_HALF(heighta, height - 1);
 
for (x = 0; x < arraysize; x++)
if (c_array[x] > max_v)
max_v = c_array[x];
}
return max_v;
}
 
// the secret is that tbuffer is an interlaced, offset subset of all the lines
static void build_abs_diff_mask(const uint8_t *prvp, int prv_linesize,
const uint8_t *nxtp, int nxt_linesize,
uint8_t *tbuffer, int tbuf_linesize,
int width, int height)
{
int y, x;
 
prvp -= prv_linesize;
nxtp -= nxt_linesize;
for (y = 0; y < height; y++) {
for (x = 0; x < width; x++)
tbuffer[x] = FFABS(prvp[x] - nxtp[x]);
prvp += prv_linesize;
nxtp += nxt_linesize;
tbuffer += tbuf_linesize;
}
}
 
/**
* Build a map over which pixels differ a lot/a little
*/
static void build_diff_map(FieldMatchContext *fm,
const uint8_t *prvp, int prv_linesize,
const uint8_t *nxtp, int nxt_linesize,
uint8_t *dstp, int dst_linesize, int height,
int width, int plane)
{
int x, y, u, diff, count;
int tpitch = plane ? fm->tpitchuv : fm->tpitchy;
const uint8_t *dp = fm->tbuffer + tpitch;
 
build_abs_diff_mask(prvp, prv_linesize, nxtp, nxt_linesize,
fm->tbuffer, tpitch, width, height>>1);
 
for (y = 2; y < height - 2; y += 2) {
for (x = 1; x < width - 1; x++) {
diff = dp[x];
if (diff > 3) {
for (count = 0, u = x-1; u < x+2 && count < 2; u++) {
count += dp[u-tpitch] > 3;
count += dp[u ] > 3;
count += dp[u+tpitch] > 3;
}
if (count > 1) {
dstp[x] = 1;
if (diff > 19) {
int upper = 0, lower = 0;
for (count = 0, u = x-1; u < x+2 && count < 6; u++) {
if (dp[u-tpitch] > 19) { count++; upper = 1; }
if (dp[u ] > 19) count++;
if (dp[u+tpitch] > 19) { count++; lower = 1; }
}
if (count > 3) {
if (upper && lower) {
dstp[x] |= 1<<1;
} else {
int upper2 = 0, lower2 = 0;
for (u = FFMAX(x-4,0); u < FFMIN(x+5,width); u++) {
if (y != 2 && dp[u-2*tpitch] > 19) upper2 = 1;
if ( dp[u- tpitch] > 19) upper = 1;
if ( dp[u+ tpitch] > 19) lower = 1;
if (y != height-4 && dp[u+2*tpitch] > 19) lower2 = 1;
}
if ((upper && (lower || upper2)) ||
(lower && (upper || lower2)))
dstp[x] |= 1<<1;
else if (count > 5)
dstp[x] |= 1<<2;
}
}
}
}
}
}
dp += tpitch;
dstp += dst_linesize;
}
}
 
enum { mP, mC, mN, mB, mU };
 
static int get_field_base(int match, int field)
{
return match < 3 ? 2 - field : 1 + field;
}
 
static AVFrame *select_frame(FieldMatchContext *fm, int match)
{
if (match == mP || match == mB) return fm->prv;
else if (match == mN || match == mU) return fm->nxt;
else /* match == mC */ return fm->src;
}
 
static int compare_fields(FieldMatchContext *fm, int match1, int match2, int field)
{
int plane, ret;
uint64_t accumPc = 0, accumPm = 0, accumPml = 0;
uint64_t accumNc = 0, accumNm = 0, accumNml = 0;
int norm1, norm2, mtn1, mtn2;
float c1, c2, mr;
const AVFrame *src = fm->src;
 
for (plane = 0; plane < (fm->mchroma ? 3 : 1); plane++) {
int x, y, temp1, temp2, fbase;
const AVFrame *prev, *next;
uint8_t *mapp = fm->map_data[plane];
int map_linesize = fm->map_linesize[plane];
const uint8_t *srcp = src->data[plane];
const int src_linesize = src->linesize[plane];
const int srcf_linesize = src_linesize << 1;
int prv_linesize, nxt_linesize;
int prvf_linesize, nxtf_linesize;
const int width = get_width (fm, src, plane);
const int height = get_height(fm, src, plane);
const int y0a = fm->y0 >> (plane != 0);
const int y1a = fm->y1 >> (plane != 0);
const int startx = (plane == 0 ? 8 : 4);
const int stopx = width - startx;
const uint8_t *srcpf, *srcf, *srcnf;
const uint8_t *prvpf, *prvnf, *nxtpf, *nxtnf;
 
fill_buf(mapp, width, height, map_linesize, 0);
 
/* match1 */
fbase = get_field_base(match1, field);
srcf = srcp + (fbase + 1) * src_linesize;
srcpf = srcf - srcf_linesize;
srcnf = srcf + srcf_linesize;
mapp = mapp + fbase * map_linesize;
prev = select_frame(fm, match1);
prv_linesize = prev->linesize[plane];
prvf_linesize = prv_linesize << 1;
prvpf = prev->data[plane] + fbase * prv_linesize; // previous frame, previous field
prvnf = prvpf + prvf_linesize; // previous frame, next field
 
/* match2 */
fbase = get_field_base(match2, field);
next = select_frame(fm, match2);
nxt_linesize = next->linesize[plane];
nxtf_linesize = nxt_linesize << 1;
nxtpf = next->data[plane] + fbase * nxt_linesize; // next frame, previous field
nxtnf = nxtpf + nxtf_linesize; // next frame, next field
 
map_linesize <<= 1;
if ((match1 >= 3 && field == 1) || (match1 < 3 && field != 1))
build_diff_map(fm, prvpf, prvf_linesize, nxtpf, nxtf_linesize,
mapp, map_linesize, height, width, plane);
else
build_diff_map(fm, prvnf, prvf_linesize, nxtnf, nxtf_linesize,
mapp + map_linesize, map_linesize, height, width, plane);
 
for (y = 2; y < height - 2; y += 2) {
if (y0a == y1a || y < y0a || y > y1a) {
for (x = startx; x < stopx; x++) {
if (mapp[x] > 0 || mapp[x + map_linesize] > 0) {
temp1 = srcpf[x] + (srcf[x] << 2) + srcnf[x]; // [1 4 1]
 
temp2 = abs(3 * (prvpf[x] + prvnf[x]) - temp1);
if (temp2 > 23 && ((mapp[x]&1) || (mapp[x + map_linesize]&1)))
accumPc += temp2;
if (temp2 > 42) {
if ((mapp[x]&2) || (mapp[x + map_linesize]&2))
accumPm += temp2;
if ((mapp[x]&4) || (mapp[x + map_linesize]&4))
accumPml += temp2;
}
 
temp2 = abs(3 * (nxtpf[x] + nxtnf[x]) - temp1);
if (temp2 > 23 && ((mapp[x]&1) || (mapp[x + map_linesize]&1)))
accumNc += temp2;
if (temp2 > 42) {
if ((mapp[x]&2) || (mapp[x + map_linesize]&2))
accumNm += temp2;
if ((mapp[x]&4) || (mapp[x + map_linesize]&4))
accumNml += temp2;
}
}
}
}
prvpf += prvf_linesize;
prvnf += prvf_linesize;
srcpf += srcf_linesize;
srcf += srcf_linesize;
srcnf += srcf_linesize;
nxtpf += nxtf_linesize;
nxtnf += nxtf_linesize;
mapp += map_linesize;
}
}
 
if (accumPm < 500 && accumNm < 500 && (accumPml >= 500 || accumNml >= 500) &&
FFMAX(accumPml,accumNml) > 3*FFMIN(accumPml,accumNml)) {
accumPm = accumPml;
accumNm = accumNml;
}
 
norm1 = (int)((accumPc / 6.0f) + 0.5f);
norm2 = (int)((accumNc / 6.0f) + 0.5f);
mtn1 = (int)((accumPm / 6.0f) + 0.5f);
mtn2 = (int)((accumNm / 6.0f) + 0.5f);
c1 = ((float)FFMAX(norm1,norm2)) / ((float)FFMAX(FFMIN(norm1,norm2),1));
c2 = ((float)FFMAX(mtn1, mtn2)) / ((float)FFMAX(FFMIN(mtn1, mtn2), 1));
mr = ((float)FFMAX(mtn1, mtn2)) / ((float)FFMAX(FFMAX(norm1,norm2),1));
if (((mtn1 >= 500 || mtn2 >= 500) && (mtn1*2 < mtn2*1 || mtn2*2 < mtn1*1)) ||
((mtn1 >= 1000 || mtn2 >= 1000) && (mtn1*3 < mtn2*2 || mtn2*3 < mtn1*2)) ||
((mtn1 >= 2000 || mtn2 >= 2000) && (mtn1*5 < mtn2*4 || mtn2*5 < mtn1*4)) ||
((mtn1 >= 4000 || mtn2 >= 4000) && c2 > c1))
ret = mtn1 > mtn2 ? match2 : match1;
else if (mr > 0.005 && FFMAX(mtn1, mtn2) > 150 && (mtn1*2 < mtn2*1 || mtn2*2 < mtn1*1))
ret = mtn1 > mtn2 ? match2 : match1;
else
ret = norm1 > norm2 ? match2 : match1;
return ret;
}
 
static void copy_fields(const FieldMatchContext *fm, AVFrame *dst,
const AVFrame *src, int field)
{
int plane;
for (plane = 0; plane < 4 && src->data[plane] && src->linesize[plane]; plane++)
av_image_copy_plane(dst->data[plane] + field*dst->linesize[plane], dst->linesize[plane] << 1,
src->data[plane] + field*src->linesize[plane], src->linesize[plane] << 1,
get_width(fm, src, plane), get_height(fm, src, plane) / 2);
}
 
static AVFrame *create_weave_frame(AVFilterContext *ctx, int match, int field,
const AVFrame *prv, AVFrame *src, const AVFrame *nxt)
{
AVFrame *dst;
FieldMatchContext *fm = ctx->priv;
 
if (match == mC) {
dst = av_frame_clone(src);
} else {
AVFilterLink *outlink = ctx->outputs[0];
 
dst = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!dst)
return NULL;
av_frame_copy_props(dst, src);
 
switch (match) {
case mP: copy_fields(fm, dst, src, 1-field); copy_fields(fm, dst, prv, field); break;
case mN: copy_fields(fm, dst, src, 1-field); copy_fields(fm, dst, nxt, field); break;
case mB: copy_fields(fm, dst, src, field); copy_fields(fm, dst, prv, 1-field); break;
case mU: copy_fields(fm, dst, src, field); copy_fields(fm, dst, nxt, 1-field); break;
default: av_assert0(0);
}
}
return dst;
}
 
static int checkmm(AVFilterContext *ctx, int *combs, int m1, int m2,
AVFrame **gen_frames, int field)
{
const FieldMatchContext *fm = ctx->priv;
 
#define LOAD_COMB(mid) do { \
if (combs[mid] < 0) { \
if (!gen_frames[mid]) \
gen_frames[mid] = create_weave_frame(ctx, mid, field, \
fm->prv, fm->src, fm->nxt); \
combs[mid] = calc_combed_score(fm, gen_frames[mid]); \
} \
} while (0)
 
LOAD_COMB(m1);
LOAD_COMB(m2);
 
if ((combs[m2] * 3 < combs[m1] || (combs[m2] * 2 < combs[m1] && combs[m1] > fm->combpel)) &&
abs(combs[m2] - combs[m1]) >= 30 && combs[m2] < fm->combpel)
return m2;
else
return m1;
}
 
static const int fxo0m[] = { mP, mC, mN, mB, mU };
static const int fxo1m[] = { mN, mC, mP, mU, mB };
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
FieldMatchContext *fm = ctx->priv;
int combs[] = { -1, -1, -1, -1, -1 };
int order, field, i, match, sc = 0;
const int *fxo;
AVFrame *gen_frames[] = { NULL, NULL, NULL, NULL, NULL };
AVFrame *dst;
 
/* update frames queue(s) */
#define SLIDING_FRAME_WINDOW(prv, src, nxt) do { \
if (prv != src) /* 2nd loop exception (1st has prv==src and we don't want to loose src) */ \
av_frame_free(&prv); \
prv = src; \
src = nxt; \
if (in) \
nxt = in; \
if (!prv) \
prv = src; \
if (!prv) /* received only one frame at that point */ \
return 0; \
av_assert0(prv && src && nxt); \
} while (0)
if (FF_INLINK_IDX(inlink) == INPUT_MAIN) {
SLIDING_FRAME_WINDOW(fm->prv, fm->src, fm->nxt);
fm->got_frame[INPUT_MAIN] = 1;
} else {
SLIDING_FRAME_WINDOW(fm->prv2, fm->src2, fm->nxt2);
fm->got_frame[INPUT_CLEANSRC] = 1;
}
if (!fm->got_frame[INPUT_MAIN] || (fm->ppsrc && !fm->got_frame[INPUT_CLEANSRC]))
return 0;
fm->got_frame[INPUT_MAIN] = fm->got_frame[INPUT_CLEANSRC] = 0;
in = fm->src;
 
/* parity */
order = fm->order != FM_PARITY_AUTO ? fm->order : (in->interlaced_frame ? in->top_field_first : 1);
field = fm->field != FM_PARITY_AUTO ? fm->field : order;
av_assert0(order == 0 || order == 1 || field == 0 || field == 1);
fxo = field ^ order ? fxo1m : fxo0m;
 
/* debug mode: we generate all the fields combinations and their associated
* combed score. XXX: inject as frame metadata? */
if (fm->combdbg) {
for (i = 0; i < FF_ARRAY_ELEMS(combs); i++) {
if (i > mN && fm->combdbg == COMBDBG_PCN)
break;
gen_frames[i] = create_weave_frame(ctx, i, field, fm->prv, fm->src, fm->nxt);
if (!gen_frames[i])
return AVERROR(ENOMEM);
combs[i] = calc_combed_score(fm, gen_frames[i]);
}
av_log(ctx, AV_LOG_INFO, "COMBS: %3d %3d %3d %3d %3d\n",
combs[0], combs[1], combs[2], combs[3], combs[4]);
} else {
gen_frames[mC] = av_frame_clone(fm->src);
if (!gen_frames[mC])
return AVERROR(ENOMEM);
}
 
/* p/c selection and optional 3-way p/c/n matches */
match = compare_fields(fm, fxo[mC], fxo[mP], field);
if (fm->mode == MODE_PCN || fm->mode == MODE_PCN_UB)
match = compare_fields(fm, match, fxo[mN], field);
 
/* scene change check */
if (fm->combmatch == COMBMATCH_SC) {
if (fm->lastn == outlink->frame_count - 1) {
if (fm->lastscdiff > fm->scthresh)
sc = 1;
} else if (luma_abs_diff(fm->prv, fm->src) > fm->scthresh) {
sc = 1;
}
 
if (!sc) {
fm->lastn = outlink->frame_count;
fm->lastscdiff = luma_abs_diff(fm->src, fm->nxt);
sc = fm->lastscdiff > fm->scthresh;
}
}
 
if (fm->combmatch == COMBMATCH_FULL || (fm->combmatch == COMBMATCH_SC && sc)) {
switch (fm->mode) {
/* 2-way p/c matches */
case MODE_PC:
match = checkmm(ctx, combs, match, match == fxo[mP] ? fxo[mC] : fxo[mP], gen_frames, field);
break;
case MODE_PC_N:
match = checkmm(ctx, combs, match, fxo[mN], gen_frames, field);
break;
case MODE_PC_U:
match = checkmm(ctx, combs, match, fxo[mU], gen_frames, field);
break;
case MODE_PC_N_UB:
match = checkmm(ctx, combs, match, fxo[mN], gen_frames, field);
match = checkmm(ctx, combs, match, fxo[mU], gen_frames, field);
match = checkmm(ctx, combs, match, fxo[mB], gen_frames, field);
break;
/* 3-way p/c/n matches */
case MODE_PCN:
match = checkmm(ctx, combs, match, match == fxo[mP] ? fxo[mC] : fxo[mP], gen_frames, field);
break;
case MODE_PCN_UB:
match = checkmm(ctx, combs, match, fxo[mU], gen_frames, field);
match = checkmm(ctx, combs, match, fxo[mB], gen_frames, field);
break;
default:
av_assert0(0);
}
}
 
/* get output frame and drop the others */
if (fm->ppsrc) {
/* field matching was based on a filtered/post-processed input, we now
* pick the untouched fields from the clean source */
dst = create_weave_frame(ctx, match, field, fm->prv2, fm->src2, fm->nxt2);
} else {
if (!gen_frames[match]) { // XXX: is that possible?
dst = create_weave_frame(ctx, match, field, fm->prv, fm->src, fm->nxt);
} else {
dst = gen_frames[match];
gen_frames[match] = NULL;
}
}
if (!dst)
return AVERROR(ENOMEM);
for (i = 0; i < FF_ARRAY_ELEMS(gen_frames); i++)
av_frame_free(&gen_frames[i]);
 
/* mark the frame we are unable to match properly as interlaced so a proper
* de-interlacer can take the relay */
dst->interlaced_frame = combs[match] >= fm->combpel;
if (dst->interlaced_frame) {
av_log(ctx, AV_LOG_WARNING, "Frame #%"PRId64" at %s is still interlaced\n",
outlink->frame_count, av_ts2timestr(in->pts, &inlink->time_base));
dst->top_field_first = field;
}
 
av_log(ctx, AV_LOG_DEBUG, "SC:%d | COMBS: %3d %3d %3d %3d %3d (combpel=%d)"
" match=%d combed=%s\n", sc, combs[0], combs[1], combs[2], combs[3], combs[4],
fm->combpel, match, dst->interlaced_frame ? "YES" : "NO");
 
return ff_filter_frame(outlink, dst);
}
 
static int request_inlink(AVFilterContext *ctx, int lid)
{
int ret = 0;
FieldMatchContext *fm = ctx->priv;
 
if (!fm->got_frame[lid]) {
AVFilterLink *inlink = ctx->inputs[lid];
ret = ff_request_frame(inlink);
if (ret == AVERROR_EOF) { // flushing
fm->eof |= 1 << lid;
ret = filter_frame(inlink, NULL);
}
}
return ret;
}
 
static int request_frame(AVFilterLink *outlink)
{
int ret;
AVFilterContext *ctx = outlink->src;
FieldMatchContext *fm = ctx->priv;
const uint32_t eof_mask = 1<<INPUT_MAIN | fm->ppsrc<<INPUT_CLEANSRC;
 
if ((fm->eof & eof_mask) == eof_mask) // flush done?
return AVERROR_EOF;
if ((ret = request_inlink(ctx, INPUT_MAIN)) < 0)
return ret;
if (fm->ppsrc && (ret = request_inlink(ctx, INPUT_CLEANSRC)) < 0)
return ret;
return 0;
}
 
static int query_formats(AVFilterContext *ctx)
{
// TODO: second input source can support >8bit depth
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
AV_PIX_FMT_NONE
};
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static int config_input(AVFilterLink *inlink)
{
int ret;
AVFilterContext *ctx = inlink->dst;
FieldMatchContext *fm = ctx->priv;
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
const int w = inlink->w;
const int h = inlink->h;
 
fm->scthresh = (int64_t)((w * h * 255.0 * fm->scthresh_flt) / 100.0);
 
if ((ret = av_image_alloc(fm->map_data, fm->map_linesize, w, h, inlink->format, 32)) < 0 ||
(ret = av_image_alloc(fm->cmask_data, fm->cmask_linesize, w, h, inlink->format, 32)) < 0)
return ret;
 
fm->hsub = pix_desc->log2_chroma_w;
fm->vsub = pix_desc->log2_chroma_h;
 
fm->tpitchy = FFALIGN(w, 16);
fm->tpitchuv = FFALIGN(w >> 1, 16);
 
fm->tbuffer = av_malloc(h/2 * fm->tpitchy);
fm->c_array = av_malloc((((w + fm->blockx/2)/fm->blockx)+1) *
(((h + fm->blocky/2)/fm->blocky)+1) *
4 * sizeof(*fm->c_array));
if (!fm->tbuffer || !fm->c_array)
return AVERROR(ENOMEM);
 
return 0;
}
 
static av_cold int fieldmatch_init(AVFilterContext *ctx)
{
const FieldMatchContext *fm = ctx->priv;
AVFilterPad pad = {
.name = av_strdup("main"),
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_input,
};
 
if (!pad.name)
return AVERROR(ENOMEM);
ff_insert_inpad(ctx, INPUT_MAIN, &pad);
 
if (fm->ppsrc) {
pad.name = av_strdup("clean_src");
pad.config_props = NULL;
if (!pad.name)
return AVERROR(ENOMEM);
ff_insert_inpad(ctx, INPUT_CLEANSRC, &pad);
}
 
if ((fm->blockx & (fm->blockx - 1)) ||
(fm->blocky & (fm->blocky - 1))) {
av_log(ctx, AV_LOG_ERROR, "blockx and blocky settings must be power of two\n");
return AVERROR(EINVAL);
}
 
if (fm->combpel > fm->blockx * fm->blocky) {
av_log(ctx, AV_LOG_ERROR, "Combed pixel should not be larger than blockx x blocky\n");
return AVERROR(EINVAL);
}
 
return 0;
}
 
static av_cold void fieldmatch_uninit(AVFilterContext *ctx)
{
int i;
FieldMatchContext *fm = ctx->priv;
 
if (fm->prv != fm->src)
av_frame_free(&fm->prv);
if (fm->nxt != fm->src)
av_frame_free(&fm->nxt);
av_frame_free(&fm->src);
av_freep(&fm->map_data[0]);
av_freep(&fm->cmask_data[0]);
av_freep(&fm->tbuffer);
av_freep(&fm->c_array);
for (i = 0; i < ctx->nb_inputs; i++)
av_freep(&ctx->input_pads[i].name);
}
 
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
const FieldMatchContext *fm = ctx->priv;
const AVFilterLink *inlink =
ctx->inputs[fm->ppsrc ? INPUT_CLEANSRC : INPUT_MAIN];
 
outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
outlink->time_base = inlink->time_base;
outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
outlink->frame_rate = inlink->frame_rate;
outlink->w = inlink->w;
outlink->h = inlink->h;
return 0;
}
 
static const AVFilterPad fieldmatch_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.request_frame = request_frame,
.config_props = config_output,
},
{ NULL }
};
 
AVFilter avfilter_vf_fieldmatch = {
.name = "fieldmatch",
.description = NULL_IF_CONFIG_SMALL("Field matching for inverse telecine."),
.query_formats = query_formats,
.priv_size = sizeof(FieldMatchContext),
.init = fieldmatch_init,
.uninit = fieldmatch_uninit,
.inputs = NULL,
.outputs = fieldmatch_outputs,
.priv_class = &fieldmatch_class,
.flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_fieldorder.c
0,0 → 1,196
/*
* Copyright (c) 2011 Mark Himsley
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* video field order filter, heavily influenced by vf_pad.c
*/
 
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
typedef struct {
const AVClass *class;
int dst_tff; ///< output bff/tff
int line_size[4]; ///< bytes of pixel data per line for each plane
} FieldOrderContext;
 
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats;
enum AVPixelFormat pix_fmt;
int ret;
 
/** accept any input pixel format that is not hardware accelerated, not
* a bitstream format, and does not have vertically sub-sampled chroma */
if (ctx->inputs[0]) {
formats = NULL;
for (pix_fmt = 0; pix_fmt < AV_PIX_FMT_NB; pix_fmt++) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL ||
desc->flags & AV_PIX_FMT_FLAG_PAL ||
desc->flags & AV_PIX_FMT_FLAG_BITSTREAM) &&
desc->nb_components && !desc->log2_chroma_h &&
(ret = ff_add_format(&formats, pix_fmt)) < 0) {
ff_formats_unref(&formats);
return ret;
}
}
ff_formats_ref(formats, &ctx->inputs[0]->out_formats);
ff_formats_ref(formats, &ctx->outputs[0]->in_formats);
}
 
return 0;
}
 
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
FieldOrderContext *s = ctx->priv;
 
return av_image_fill_linesizes(s->line_size, inlink->format, inlink->w);
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
FieldOrderContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
int h, plane, src_line_step, dst_line_step, line_size, line;
uint8_t *dst, *src;
AVFrame *out;
 
if (!frame->interlaced_frame ||
frame->top_field_first == s->dst_tff) {
av_log(ctx, AV_LOG_VERBOSE,
"Skipping %s.\n",
frame->interlaced_frame ?
"frame with same field order" : "progressive frame");
return ff_filter_frame(outlink, frame);
}
 
if (av_frame_is_writable(frame)) {
out = frame;
} else {
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&frame);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, frame);
}
 
av_dlog(ctx,
"picture will move %s one line\n",
s->dst_tff ? "up" : "down");
h = frame->height;
for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++) {
dst_line_step = out->linesize[plane];
src_line_step = frame->linesize[plane];
line_size = s->line_size[plane];
dst = out->data[plane];
src = frame->data[plane];
if (s->dst_tff) {
/** Move every line up one line, working from
* the top to the bottom of the frame.
* The original top line is lost.
* The new last line is created as a copy of the
* penultimate line from that field. */
for (line = 0; line < h; line++) {
if (1 + line < frame->height) {
memcpy(dst, src + src_line_step, line_size);
} else {
memcpy(dst, src - 2 * src_line_step, line_size);
}
dst += dst_line_step;
src += src_line_step;
}
} else {
/** Move every line down one line, working from
* the bottom to the top of the frame.
* The original bottom line is lost.
* The new first line is created as a copy of the
* second line from that field. */
dst += (h - 1) * dst_line_step;
src += (h - 1) * src_line_step;
for (line = h - 1; line >= 0 ; line--) {
if (line > 0) {
memcpy(dst, src - src_line_step, line_size);
} else {
memcpy(dst, src + 2 * src_line_step, line_size);
}
dst -= dst_line_step;
src -= src_line_step;
}
}
}
out->top_field_first = s->dst_tff;
 
if (frame != out)
av_frame_free(&frame);
return ff_filter_frame(outlink, out);
}
 
#define OFFSET(x) offsetof(FieldOrderContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
 
static const AVOption fieldorder_options[] = {
{ "order", "output field order", OFFSET(dst_tff), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, FLAGS, "order" },
{ "bff", "bottom field first", 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, .flags=FLAGS, .unit = "order" },
{ "tff", "top field first", 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, .flags=FLAGS, .unit = "order" },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(fieldorder);
 
static const AVFilterPad avfilter_vf_fieldorder_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_fieldorder_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_fieldorder = {
.name = "fieldorder",
.description = NULL_IF_CONFIG_SMALL("Set the field order."),
.priv_size = sizeof(FieldOrderContext),
.priv_class = &fieldorder_class,
.query_formats = query_formats,
.inputs = avfilter_vf_fieldorder_inputs,
.outputs = avfilter_vf_fieldorder_outputs,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_format.c
0,0 → 1,180
/*
* Copyright (c) 2007 Bobby Bingham
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* format and noformat video filters
*/
 
#include <string.h>
 
#include "libavutil/internal.h"
#include "libavutil/mem.h"
#include "libavutil/pixdesc.h"
#include "libavutil/opt.h"
 
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
typedef struct {
const AVClass *class;
char *pix_fmts;
/**
* List of flags telling if a given image format has been listed
* as argument to the filter.
*/
int listed_pix_fmt_flags[AV_PIX_FMT_NB];
} FormatContext;
 
#define AV_PIX_FMT_NAME_MAXSIZE 32
 
static av_cold int init(AVFilterContext *ctx)
{
FormatContext *s = ctx->priv;
const char *cur, *sep;
char pix_fmt_name[AV_PIX_FMT_NAME_MAXSIZE];
int pix_fmt_name_len, ret;
enum AVPixelFormat pix_fmt;
 
/* parse the list of formats */
for (cur = s->pix_fmts; cur; cur = sep ? sep + 1 : NULL) {
if (!(sep = strchr(cur, '|')))
pix_fmt_name_len = strlen(cur);
else
pix_fmt_name_len = sep - cur;
if (pix_fmt_name_len >= AV_PIX_FMT_NAME_MAXSIZE) {
av_log(ctx, AV_LOG_ERROR, "Format name too long\n");
return -1;
}
 
memcpy(pix_fmt_name, cur, pix_fmt_name_len);
pix_fmt_name[pix_fmt_name_len] = 0;
 
if ((ret = ff_parse_pixel_format(&pix_fmt, pix_fmt_name, ctx)) < 0)
return ret;
 
s->listed_pix_fmt_flags[pix_fmt] = 1;
}
 
return 0;
}
 
static AVFilterFormats *make_format_list(FormatContext *s, int flag)
{
AVFilterFormats *formats = NULL;
enum AVPixelFormat pix_fmt;
 
for (pix_fmt = 0; pix_fmt < AV_PIX_FMT_NB; pix_fmt++)
if (s->listed_pix_fmt_flags[pix_fmt] == flag) {
int ret = ff_add_format(&formats, pix_fmt);
if (ret < 0) {
ff_formats_unref(&formats);
return NULL;
}
}
 
return formats;
}
 
#define OFFSET(x) offsetof(FormatContext, x)
static const AVOption options[] = {
{ "pix_fmts", "A '|'-separated list of pixel formats", OFFSET(pix_fmts), AV_OPT_TYPE_STRING, .flags = AV_OPT_FLAG_VIDEO_PARAM },
{ NULL }
};
 
#if CONFIG_FORMAT_FILTER
static int query_formats_format(AVFilterContext *ctx)
{
ff_set_common_formats(ctx, make_format_list(ctx->priv, 1));
return 0;
}
 
#define format_options options
AVFILTER_DEFINE_CLASS(format);
 
static const AVFilterPad avfilter_vf_format_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer = ff_null_get_video_buffer,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_format_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO
},
{ NULL }
};
 
AVFilter avfilter_vf_format = {
.name = "format",
.description = NULL_IF_CONFIG_SMALL("Convert the input video to one of the specified pixel formats."),
.init = init,
.query_formats = query_formats_format,
.priv_size = sizeof(FormatContext),
.priv_class = &format_class,
.inputs = avfilter_vf_format_inputs,
.outputs = avfilter_vf_format_outputs,
};
#endif /* CONFIG_FORMAT_FILTER */
 
#if CONFIG_NOFORMAT_FILTER
static int query_formats_noformat(AVFilterContext *ctx)
{
ff_set_common_formats(ctx, make_format_list(ctx->priv, 0));
return 0;
}
 
#define noformat_options options
AVFILTER_DEFINE_CLASS(noformat);
 
static const AVFilterPad avfilter_vf_noformat_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer = ff_null_get_video_buffer,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_noformat_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO
},
{ NULL }
};
 
AVFilter avfilter_vf_noformat = {
.name = "noformat",
.description = NULL_IF_CONFIG_SMALL("Force libavfilter not to use any of the specified pixel formats for the input to the next filter."),
.init = init,
.query_formats = query_formats_noformat,
.priv_size = sizeof(FormatContext),
.priv_class = &noformat_class,
.inputs = avfilter_vf_noformat_inputs,
.outputs = avfilter_vf_noformat_outputs,
};
#endif /* CONFIG_NOFORMAT_FILTER */
/contrib/sdk/sources/ffmpeg/libavfilter/vf_fps.c
0,0 → 1,302
/*
* Copyright 2007 Bobby Bingham
* Copyright 2012 Robert Nagy <ronag89 gmail com>
* Copyright 2012 Anton Khirnov <anton khirnov net>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* a filter enforcing given constant framerate
*/
 
#include <float.h>
 
#include "libavutil/common.h"
#include "libavutil/fifo.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
 
#include "avfilter.h"
#include "internal.h"
#include "video.h"
 
typedef struct FPSContext {
const AVClass *class;
 
AVFifoBuffer *fifo; ///< store frames until we get two successive timestamps
 
/* timestamps in input timebase */
int64_t first_pts; ///< pts of the first frame that arrived on this filter
int64_t pts; ///< pts of the first frame currently in the fifo
 
double start_time; ///< pts, in seconds, of the expected first frame
 
AVRational framerate; ///< target framerate
int rounding; ///< AVRounding method for timestamps
 
/* statistics */
int frames_in; ///< number of frames on input
int frames_out; ///< number of frames on output
int dup; ///< number of frames duplicated
int drop; ///< number of framed dropped
} FPSContext;
 
#define OFFSET(x) offsetof(FPSContext, x)
#define V AV_OPT_FLAG_VIDEO_PARAM
#define F AV_OPT_FLAG_FILTERING_PARAM
static const AVOption fps_options[] = {
{ "fps", "A string describing desired output framerate", OFFSET(framerate), AV_OPT_TYPE_VIDEO_RATE, { .str = "25" }, .flags = V|F },
{ "start_time", "Assume the first PTS should be this value.", OFFSET(start_time), AV_OPT_TYPE_DOUBLE, { .dbl = DBL_MAX}, -DBL_MAX, DBL_MAX, V },
{ "round", "set rounding method for timestamps", OFFSET(rounding), AV_OPT_TYPE_INT, { .i64 = AV_ROUND_NEAR_INF }, 0, 5, V|F, "round" },
{ "zero", "round towards 0", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_ZERO }, 0, 5, V|F, "round" },
{ "inf", "round away from 0", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_INF }, 0, 5, V|F, "round" },
{ "down", "round towards -infty", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_DOWN }, 0, 5, V|F, "round" },
{ "up", "round towards +infty", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_UP }, 0, 5, V|F, "round" },
{ "near", "round to nearest", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_NEAR_INF }, 0, 5, V|F, "round" },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(fps);
 
static av_cold int init(AVFilterContext *ctx)
{
FPSContext *s = ctx->priv;
 
if (!(s->fifo = av_fifo_alloc(2*sizeof(AVFrame*))))
return AVERROR(ENOMEM);
 
s->pts = AV_NOPTS_VALUE;
s->first_pts = AV_NOPTS_VALUE;
 
av_log(ctx, AV_LOG_VERBOSE, "fps=%d/%d\n", s->framerate.num, s->framerate.den);
return 0;
}
 
static void flush_fifo(AVFifoBuffer *fifo)
{
while (av_fifo_size(fifo)) {
AVFrame *tmp;
av_fifo_generic_read(fifo, &tmp, sizeof(tmp), NULL);
av_frame_free(&tmp);
}
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
FPSContext *s = ctx->priv;
if (s->fifo) {
s->drop += av_fifo_size(s->fifo) / sizeof(AVFrame*);
flush_fifo(s->fifo);
av_fifo_free(s->fifo);
}
 
av_log(ctx, AV_LOG_VERBOSE, "%d frames in, %d frames out; %d frames dropped, "
"%d frames duplicated.\n", s->frames_in, s->frames_out, s->drop, s->dup);
}
 
static int config_props(AVFilterLink* link)
{
FPSContext *s = link->src->priv;
 
link->time_base = av_inv_q(s->framerate);
link->frame_rate= s->framerate;
link->w = link->src->inputs[0]->w;
link->h = link->src->inputs[0]->h;
 
return 0;
}
 
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
FPSContext *s = ctx->priv;
int frames_out = s->frames_out;
int ret = 0;
 
while (ret >= 0 && s->frames_out == frames_out)
ret = ff_request_frame(ctx->inputs[0]);
 
/* flush the fifo */
if (ret == AVERROR_EOF && av_fifo_size(s->fifo)) {
int i;
for (i = 0; av_fifo_size(s->fifo); i++) {
AVFrame *buf;
 
av_fifo_generic_read(s->fifo, &buf, sizeof(buf), NULL);
buf->pts = av_rescale_q(s->first_pts, ctx->inputs[0]->time_base,
outlink->time_base) + s->frames_out;
 
if ((ret = ff_filter_frame(outlink, buf)) < 0)
return ret;
 
s->frames_out++;
}
return 0;
}
 
return ret;
}
 
static int write_to_fifo(AVFifoBuffer *fifo, AVFrame *buf)
{
int ret;
 
if (!av_fifo_space(fifo) &&
(ret = av_fifo_realloc2(fifo, 2*av_fifo_size(fifo)))) {
av_frame_free(&buf);
return ret;
}
 
av_fifo_generic_write(fifo, &buf, sizeof(buf), NULL);
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;
FPSContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
int64_t delta;
int i, ret;
 
s->frames_in++;
/* discard frames until we get the first timestamp */
if (s->pts == AV_NOPTS_VALUE) {
if (buf->pts != AV_NOPTS_VALUE) {
ret = write_to_fifo(s->fifo, buf);
if (ret < 0)
return ret;
 
if (s->start_time != DBL_MAX && s->start_time != AV_NOPTS_VALUE) {
double first_pts = s->start_time * AV_TIME_BASE;
first_pts = FFMIN(FFMAX(first_pts, INT64_MIN), INT64_MAX);
s->first_pts = s->pts = av_rescale_q(first_pts, AV_TIME_BASE_Q,
inlink->time_base);
av_log(ctx, AV_LOG_VERBOSE, "Set first pts to (in:%"PRId64" out:%"PRId64")\n",
s->first_pts, av_rescale_q(first_pts, AV_TIME_BASE_Q,
outlink->time_base));
} else {
s->first_pts = s->pts = buf->pts;
}
} else {
av_log(ctx, AV_LOG_WARNING, "Discarding initial frame(s) with no "
"timestamp.\n");
av_frame_free(&buf);
s->drop++;
}
return 0;
}
 
/* now wait for the next timestamp */
if (buf->pts == AV_NOPTS_VALUE || av_fifo_size(s->fifo) <= 0) {
return write_to_fifo(s->fifo, buf);
}
 
/* number of output frames */
delta = av_rescale_q_rnd(buf->pts - s->pts, inlink->time_base,
outlink->time_base, s->rounding);
 
if (delta < 1) {
/* drop the frame and everything buffered except the first */
AVFrame *tmp;
int drop = av_fifo_size(s->fifo)/sizeof(AVFrame*);
 
av_log(ctx, AV_LOG_DEBUG, "Dropping %d frame(s).\n", drop);
s->drop += drop;
 
av_fifo_generic_read(s->fifo, &tmp, sizeof(tmp), NULL);
flush_fifo(s->fifo);
ret = write_to_fifo(s->fifo, tmp);
 
av_frame_free(&buf);
return ret;
}
 
/* can output >= 1 frames */
for (i = 0; i < delta; i++) {
AVFrame *buf_out;
av_fifo_generic_read(s->fifo, &buf_out, sizeof(buf_out), NULL);
 
/* duplicate the frame if needed */
if (!av_fifo_size(s->fifo) && i < delta - 1) {
AVFrame *dup = av_frame_clone(buf_out);
 
av_log(ctx, AV_LOG_DEBUG, "Duplicating frame.\n");
if (dup)
ret = write_to_fifo(s->fifo, dup);
else
ret = AVERROR(ENOMEM);
 
if (ret < 0) {
av_frame_free(&buf_out);
av_frame_free(&buf);
return ret;
}
 
s->dup++;
}
 
buf_out->pts = av_rescale_q(s->first_pts, inlink->time_base,
outlink->time_base) + s->frames_out;
 
if ((ret = ff_filter_frame(outlink, buf_out)) < 0) {
av_frame_free(&buf);
return ret;
}
 
s->frames_out++;
}
flush_fifo(s->fifo);
 
ret = write_to_fifo(s->fifo, buf);
s->pts = s->first_pts + av_rescale_q(s->frames_out, outlink->time_base, inlink->time_base);
 
return ret;
}
 
static const AVFilterPad avfilter_vf_fps_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_fps_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.request_frame = request_frame,
.config_props = config_props
},
{ NULL }
};
 
AVFilter avfilter_vf_fps = {
.name = "fps",
.description = NULL_IF_CONFIG_SMALL("Force constant framerate."),
.init = init,
.uninit = uninit,
.priv_size = sizeof(FPSContext),
.priv_class = &fps_class,
.inputs = avfilter_vf_fps_inputs,
.outputs = avfilter_vf_fps_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_framestep.c
0,0 → 1,101
/*
* Copyright (c) 2012 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file framestep filter, inspired on libmpcodecs/vf_framestep.c by
* Daniele Fornighieri <guru AT digitalfantasy it>.
*/
 
#include "libavutil/opt.h"
#include "avfilter.h"
#include "internal.h"
#include "video.h"
 
typedef struct {
const AVClass *class;
int frame_step;
} FrameStepContext;
 
#define OFFSET(x) offsetof(FrameStepContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
 
static const AVOption framestep_options[] = {
{ "step", "set frame step", OFFSET(frame_step), AV_OPT_TYPE_INT, {.i64=1}, 1, INT_MAX, FLAGS},
{ NULL },
};
 
AVFILTER_DEFINE_CLASS(framestep);
 
static int config_output_props(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
FrameStepContext *framestep = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
 
outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
outlink->frame_rate =
av_div_q(inlink->frame_rate, (AVRational){framestep->frame_step, 1});
 
av_log(ctx, AV_LOG_VERBOSE, "step:%d frame_rate:%d/%d(%f) -> frame_rate:%d/%d(%f)\n",
framestep->frame_step,
inlink->frame_rate.num, inlink->frame_rate.den, av_q2d(inlink->frame_rate),
outlink->frame_rate.num, outlink->frame_rate.den, av_q2d(outlink->frame_rate));
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *ref)
{
FrameStepContext *framestep = inlink->dst->priv;
 
if (!(inlink->frame_count % framestep->frame_step)) {
return ff_filter_frame(inlink->dst->outputs[0], ref);
} else {
av_frame_free(&ref);
return 0;
}
}
 
static const AVFilterPad framestep_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad framestep_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_output_props,
},
{ NULL }
};
 
AVFilter avfilter_vf_framestep = {
.name = "framestep",
.description = NULL_IF_CONFIG_SMALL("Select one frame every N frames."),
.priv_size = sizeof(FrameStepContext),
.priv_class = &framestep_class,
.inputs = framestep_inputs,
.outputs = framestep_outputs,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_frei0r.c
0,0 → 1,530
/*
* Copyright (c) 2010 Stefano Sabatini
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* frei0r wrapper
*/
 
#include <dlfcn.h>
#include <frei0r.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "config.h"
#include "libavutil/avstring.h"
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "libavutil/mathematics.h"
#include "libavutil/mem.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
typedef f0r_instance_t (*f0r_construct_f)(unsigned int width, unsigned int height);
typedef void (*f0r_destruct_f)(f0r_instance_t instance);
typedef void (*f0r_deinit_f)(void);
typedef int (*f0r_init_f)(void);
typedef void (*f0r_get_plugin_info_f)(f0r_plugin_info_t *info);
typedef void (*f0r_get_param_info_f)(f0r_param_info_t *info, int param_index);
typedef void (*f0r_update_f)(f0r_instance_t instance, double time, const uint32_t *inframe, uint32_t *outframe);
typedef void (*f0r_update2_f)(f0r_instance_t instance, double time, const uint32_t *inframe1, const uint32_t *inframe2, const uint32_t *inframe3, uint32_t *outframe);
typedef void (*f0r_set_param_value_f)(f0r_instance_t instance, f0r_param_t param, int param_index);
typedef void (*f0r_get_param_value_f)(f0r_instance_t instance, f0r_param_t param, int param_index);
 
typedef struct Frei0rContext {
const AVClass *class;
f0r_update_f update;
void *dl_handle; /* dynamic library handle */
f0r_instance_t instance;
f0r_plugin_info_t plugin_info;
 
f0r_get_param_info_f get_param_info;
f0r_get_param_value_f get_param_value;
f0r_set_param_value_f set_param_value;
f0r_construct_f construct;
f0r_destruct_f destruct;
f0r_deinit_f deinit;
 
char *dl_name;
char *params;
AVRational framerate;
 
/* only used by the source */
int w, h;
AVRational time_base;
uint64_t pts;
} Frei0rContext;
 
static void *load_sym(AVFilterContext *ctx, const char *sym_name)
{
Frei0rContext *s = ctx->priv;
void *sym = dlsym(s->dl_handle, sym_name);
if (!sym)
av_log(ctx, AV_LOG_ERROR, "Could not find symbol '%s' in loaded module\n", sym_name);
return sym;
}
 
static int set_param(AVFilterContext *ctx, f0r_param_info_t info, int index, char *param)
{
Frei0rContext *s = ctx->priv;
union {
double d;
f0r_param_color_t col;
f0r_param_position_t pos;
} val;
char *tail;
uint8_t rgba[4];
 
switch (info.type) {
case F0R_PARAM_BOOL:
if (!strcmp(param, "y")) val.d = 1.0;
else if (!strcmp(param, "n")) val.d = 0.0;
else goto fail;
break;
 
case F0R_PARAM_DOUBLE:
val.d = strtod(param, &tail);
if (*tail || val.d == HUGE_VAL)
goto fail;
break;
 
case F0R_PARAM_COLOR:
if (sscanf(param, "%f/%f/%f", &val.col.r, &val.col.g, &val.col.b) != 3) {
if (av_parse_color(rgba, param, -1, ctx) < 0)
goto fail;
val.col.r = rgba[0] / 255.0;
val.col.g = rgba[1] / 255.0;
val.col.b = rgba[2] / 255.0;
}
break;
 
case F0R_PARAM_POSITION:
if (sscanf(param, "%lf/%lf", &val.pos.x, &val.pos.y) != 2)
goto fail;
break;
}
 
s->set_param_value(s->instance, &val, index);
return 0;
 
fail:
av_log(ctx, AV_LOG_ERROR, "Invalid value '%s' for parameter '%s'\n",
param, info.name);
return AVERROR(EINVAL);
}
 
static int set_params(AVFilterContext *ctx, const char *params)
{
Frei0rContext *s = ctx->priv;
int i;
 
if (!params)
return 0;
 
for (i = 0; i < s->plugin_info.num_params; i++) {
f0r_param_info_t info;
char *param;
int ret;
 
s->get_param_info(&info, i);
 
if (*params) {
if (!(param = av_get_token(&params, "|")))
return AVERROR(ENOMEM);
if (*params)
params++; /* skip ':' */
ret = set_param(ctx, info, i, param);
av_free(param);
if (ret < 0)
return ret;
}
 
av_log(ctx, AV_LOG_VERBOSE,
"idx:%d name:'%s' type:%s explanation:'%s' ",
i, info.name,
info.type == F0R_PARAM_BOOL ? "bool" :
info.type == F0R_PARAM_DOUBLE ? "double" :
info.type == F0R_PARAM_COLOR ? "color" :
info.type == F0R_PARAM_POSITION ? "position" :
info.type == F0R_PARAM_STRING ? "string" : "unknown",
info.explanation);
 
#ifdef DEBUG
av_log(ctx, AV_LOG_DEBUG, "value:");
switch (info.type) {
void *v;
double d;
char s[128];
f0r_param_color_t col;
f0r_param_position_t pos;
 
case F0R_PARAM_BOOL:
v = &d;
s->get_param_value(s->instance, v, i);
av_log(ctx, AV_LOG_DEBUG, "%s", d >= 0.5 && d <= 1.0 ? "y" : "n");
break;
case F0R_PARAM_DOUBLE:
v = &d;
s->get_param_value(s->instance, v, i);
av_log(ctx, AV_LOG_DEBUG, "%f", d);
break;
case F0R_PARAM_COLOR:
v = &col;
s->get_param_value(s->instance, v, i);
av_log(ctx, AV_LOG_DEBUG, "%f/%f/%f", col.r, col.g, col.b);
break;
case F0R_PARAM_POSITION:
v = &pos;
s->get_param_value(s->instance, v, i);
av_log(ctx, AV_LOG_DEBUG, "%f/%f", pos.x, pos.y);
break;
default: /* F0R_PARAM_STRING */
v = s;
s->get_param_value(s->instance, v, i);
av_log(ctx, AV_LOG_DEBUG, "'%s'\n", s);
break;
}
#endif
av_log(ctx, AV_LOG_VERBOSE, "\n");
}
 
return 0;
}
 
static int load_path(AVFilterContext *ctx, void **handle_ptr, const char *prefix, const char *name)
{
char *path = av_asprintf("%s%s%s", prefix, name, SLIBSUF);
if (!path)
return AVERROR(ENOMEM);
av_log(ctx, AV_LOG_DEBUG, "Looking for frei0r effect in '%s'\n", path);
*handle_ptr = dlopen(path, RTLD_NOW|RTLD_LOCAL);
av_free(path);
return 0;
}
 
static av_cold int frei0r_init(AVFilterContext *ctx,
const char *dl_name, int type)
{
Frei0rContext *s = ctx->priv;
f0r_init_f f0r_init;
f0r_get_plugin_info_f f0r_get_plugin_info;
f0r_plugin_info_t *pi;
char *path;
int ret = 0;
 
if (!dl_name) {
av_log(ctx, AV_LOG_ERROR, "No filter name provided.\n");
return AVERROR(EINVAL);
}
 
/* see: http://frei0r.dyne.org/codedoc/html/group__pluglocations.html */
if ((path = av_strdup(getenv("FREI0R_PATH")))) {
#ifdef _WIN32
const char *separator = ";";
#else
const char *separator = ":";
#endif
char *p, *ptr = NULL;
for (p = path; p = av_strtok(p, separator, &ptr); p = NULL) {
/* add additional trailing slash in case it is missing */
char *p1 = av_asprintf("%s/", p);
if (!p1) {
ret = AVERROR(ENOMEM);
goto check_path_end;
}
ret = load_path(ctx, &s->dl_handle, p1, dl_name);
av_free(p1);
if (ret < 0)
goto check_path_end;
if (s->dl_handle)
break;
}
 
check_path_end:
av_free(path);
if (ret < 0)
return ret;
}
if (!s->dl_handle && (path = getenv("HOME"))) {
char *prefix = av_asprintf("%s/.frei0r-1/lib/", path);
if (!prefix)
return AVERROR(ENOMEM);
ret = load_path(ctx, &s->dl_handle, prefix, dl_name);
av_free(prefix);
if (ret < 0)
return ret;
}
if (!s->dl_handle) {
ret = load_path(ctx, &s->dl_handle, "/usr/local/lib/frei0r-1/", dl_name);
if (ret < 0)
return ret;
}
if (!s->dl_handle) {
ret = load_path(ctx, &s->dl_handle, "/usr/lib/frei0r-1/", dl_name);
if (ret < 0)
return ret;
}
if (!s->dl_handle) {
av_log(ctx, AV_LOG_ERROR, "Could not find module '%s'\n", dl_name);
return AVERROR(EINVAL);
}
 
if (!(f0r_init = load_sym(ctx, "f0r_init" )) ||
!(f0r_get_plugin_info = load_sym(ctx, "f0r_get_plugin_info")) ||
!(s->get_param_info = load_sym(ctx, "f0r_get_param_info" )) ||
!(s->get_param_value = load_sym(ctx, "f0r_get_param_value")) ||
!(s->set_param_value = load_sym(ctx, "f0r_set_param_value")) ||
!(s->update = load_sym(ctx, "f0r_update" )) ||
!(s->construct = load_sym(ctx, "f0r_construct" )) ||
!(s->destruct = load_sym(ctx, "f0r_destruct" )) ||
!(s->deinit = load_sym(ctx, "f0r_deinit" )))
return AVERROR(EINVAL);
 
if (f0r_init() < 0) {
av_log(ctx, AV_LOG_ERROR, "Could not init the frei0r module\n");
return AVERROR(EINVAL);
}
 
f0r_get_plugin_info(&s->plugin_info);
pi = &s->plugin_info;
if (pi->plugin_type != type) {
av_log(ctx, AV_LOG_ERROR,
"Invalid type '%s' for the plugin\n",
pi->plugin_type == F0R_PLUGIN_TYPE_FILTER ? "filter" :
pi->plugin_type == F0R_PLUGIN_TYPE_SOURCE ? "source" :
pi->plugin_type == F0R_PLUGIN_TYPE_MIXER2 ? "mixer2" :
pi->plugin_type == F0R_PLUGIN_TYPE_MIXER3 ? "mixer3" : "unknown");
return AVERROR(EINVAL);
}
 
av_log(ctx, AV_LOG_VERBOSE,
"name:%s author:'%s' explanation:'%s' color_model:%s "
"frei0r_version:%d version:%d.%d num_params:%d\n",
pi->name, pi->author, pi->explanation,
pi->color_model == F0R_COLOR_MODEL_BGRA8888 ? "bgra8888" :
pi->color_model == F0R_COLOR_MODEL_RGBA8888 ? "rgba8888" :
pi->color_model == F0R_COLOR_MODEL_PACKED32 ? "packed32" : "unknown",
pi->frei0r_version, pi->major_version, pi->minor_version, pi->num_params);
 
return 0;
}
 
static av_cold int filter_init(AVFilterContext *ctx)
{
Frei0rContext *s = ctx->priv;
 
return frei0r_init(ctx, s->dl_name, F0R_PLUGIN_TYPE_FILTER);
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
Frei0rContext *s = ctx->priv;
 
if (s->destruct && s->instance)
s->destruct(s->instance);
if (s->deinit)
s->deinit();
if (s->dl_handle)
dlclose(s->dl_handle);
}
 
static int config_input_props(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
Frei0rContext *s = ctx->priv;
 
if (s->destruct && s->instance)
s->destruct(s->instance);
if (!(s->instance = s->construct(inlink->w, inlink->h))) {
av_log(ctx, AV_LOG_ERROR, "Impossible to load frei0r instance\n");
return AVERROR(EINVAL);
}
 
return set_params(ctx, s->params);
}
 
static int query_formats(AVFilterContext *ctx)
{
Frei0rContext *s = ctx->priv;
AVFilterFormats *formats = NULL;
 
if (s->plugin_info.color_model == F0R_COLOR_MODEL_BGRA8888) {
ff_add_format(&formats, AV_PIX_FMT_BGRA);
} else if (s->plugin_info.color_model == F0R_COLOR_MODEL_RGBA8888) {
ff_add_format(&formats, AV_PIX_FMT_RGBA);
} else { /* F0R_COLOR_MODEL_PACKED32 */
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_BGRA, AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR, AV_PIX_FMT_ARGB, AV_PIX_FMT_NONE
};
formats = ff_make_format_list(pix_fmts);
}
 
if (!formats)
return AVERROR(ENOMEM);
 
ff_set_common_formats(ctx, formats);
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
Frei0rContext *s = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *out;
 
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
 
s->update(s->instance, in->pts * av_q2d(inlink->time_base) * 1000,
(const uint32_t *)in->data[0],
(uint32_t *)out->data[0]);
 
av_frame_free(&in);
 
return ff_filter_frame(outlink, out);
}
 
#define OFFSET(x) offsetof(Frei0rContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
static const AVOption frei0r_options[] = {
{ "filter_name", NULL, OFFSET(dl_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "filter_params", NULL, OFFSET(params), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(frei0r);
 
static const AVFilterPad avfilter_vf_frei0r_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input_props,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_frei0r_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_frei0r = {
.name = "frei0r",
.description = NULL_IF_CONFIG_SMALL("Apply a frei0r effect."),
.query_formats = query_formats,
.init = filter_init,
.uninit = uninit,
.priv_size = sizeof(Frei0rContext),
.priv_class = &frei0r_class,
.inputs = avfilter_vf_frei0r_inputs,
.outputs = avfilter_vf_frei0r_outputs,
};
 
static av_cold int source_init(AVFilterContext *ctx)
{
Frei0rContext *s = ctx->priv;
 
s->time_base.num = s->framerate.den;
s->time_base.den = s->framerate.num;
 
return frei0r_init(ctx, s->dl_name, F0R_PLUGIN_TYPE_SOURCE);
}
 
static int source_config_props(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
Frei0rContext *s = ctx->priv;
 
if (av_image_check_size(s->w, s->h, 0, ctx) < 0)
return AVERROR(EINVAL);
outlink->w = s->w;
outlink->h = s->h;
outlink->time_base = s->time_base;
outlink->sample_aspect_ratio = (AVRational){1,1};
 
if (s->destruct && s->instance)
s->destruct(s->instance);
if (!(s->instance = s->construct(outlink->w, outlink->h))) {
av_log(ctx, AV_LOG_ERROR, "Impossible to load frei0r instance\n");
return AVERROR(EINVAL);
}
 
return set_params(ctx, s->params);
}
 
static int source_request_frame(AVFilterLink *outlink)
{
Frei0rContext *s = outlink->src->priv;
AVFrame *frame = ff_get_video_buffer(outlink, outlink->w, outlink->h);
 
if (!frame)
return AVERROR(ENOMEM);
 
frame->sample_aspect_ratio = (AVRational) {1, 1};
frame->pts = s->pts++;
 
s->update(s->instance, av_rescale_q(frame->pts, s->time_base, (AVRational){1,1000}),
NULL, (uint32_t *)frame->data[0]);
 
return ff_filter_frame(outlink, frame);
}
 
static const AVOption frei0r_src_options[] = {
{ "size", "Dimensions of the generated video.", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, { .str = "320x240" }, .flags = FLAGS },
{ "framerate", NULL, OFFSET(framerate), AV_OPT_TYPE_VIDEO_RATE, { .str = "25" }, .flags = FLAGS },
{ "filter_name", NULL, OFFSET(dl_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "filter_params", NULL, OFFSET(params), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ NULL },
};
 
AVFILTER_DEFINE_CLASS(frei0r_src);
 
static const AVFilterPad avfilter_vsrc_frei0r_src_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.request_frame = source_request_frame,
.config_props = source_config_props
},
{ NULL }
};
 
AVFilter avfilter_vsrc_frei0r_src = {
.name = "frei0r_src",
.description = NULL_IF_CONFIG_SMALL("Generate a frei0r source."),
.priv_size = sizeof(Frei0rContext),
.priv_class = &frei0r_src_class,
.init = source_init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = NULL,
.outputs = avfilter_vsrc_frei0r_src_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_geq.c
0,0 → 1,280
/*
* Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at>
* Copyright (C) 2012 Clément Bœsch <u pkh me>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
/**
* @file
* Generic equation change filter
* Originally written by Michael Niedermayer for the MPlayer project, and
* ported by Clément Bœsch for FFmpeg.
*/
 
#include "libavutil/avstring.h"
#include "libavutil/eval.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "internal.h"
 
typedef struct {
const AVClass *class;
AVExpr *e[4]; ///< expressions for each plane
char *expr_str[4+3]; ///< expression strings for each plane
AVFrame *picref; ///< current input buffer
int hsub, vsub; ///< chroma subsampling
int planes; ///< number of planes
int is_rgb;
} GEQContext;
 
enum { Y = 0, U, V, A, G, B, R };
 
#define OFFSET(x) offsetof(GEQContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption geq_options[] = {
{ "lum_expr", "set luminance expression", OFFSET(expr_str[Y]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "lum", "set luminance expression", OFFSET(expr_str[Y]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "cb_expr", "set chroma blue expression", OFFSET(expr_str[U]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "cb", "set chroma blue expression", OFFSET(expr_str[U]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "cr_expr", "set chroma red expression", OFFSET(expr_str[V]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "cr", "set chroma red expression", OFFSET(expr_str[V]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "alpha_expr", "set alpha expression", OFFSET(expr_str[A]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "a", "set alpha expression", OFFSET(expr_str[A]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "red_expr", "set red expression", OFFSET(expr_str[R]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "r", "set red expression", OFFSET(expr_str[R]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "green_expr", "set green expression", OFFSET(expr_str[G]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "g", "set green expression", OFFSET(expr_str[G]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "blue_expr", "set blue expression", OFFSET(expr_str[B]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "b", "set blue expression", OFFSET(expr_str[B]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
{NULL},
};
 
AVFILTER_DEFINE_CLASS(geq);
 
static inline double getpix(void *priv, double x, double y, int plane)
{
int xi, yi;
GEQContext *geq = priv;
AVFrame *picref = geq->picref;
const uint8_t *src = picref->data[plane];
const int linesize = picref->linesize[plane];
const int w = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(picref->width, geq->hsub) : picref->width;
const int h = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(picref->height, geq->vsub) : picref->height;
 
if (!src)
return 0;
 
xi = x = av_clipf(x, 0, w - 2);
yi = y = av_clipf(y, 0, h - 2);
 
x -= xi;
y -= yi;
 
return (1-y)*((1-x)*src[xi + yi * linesize] + x*src[xi + 1 + yi * linesize])
+ y *((1-x)*src[xi + (yi+1) * linesize] + x*src[xi + 1 + (yi+1) * linesize]);
}
 
//TODO: cubic interpolate
//TODO: keep the last few frames
static double lum(void *priv, double x, double y) { return getpix(priv, x, y, 0); }
static double cb(void *priv, double x, double y) { return getpix(priv, x, y, 1); }
static double cr(void *priv, double x, double y) { return getpix(priv, x, y, 2); }
static double alpha(void *priv, double x, double y) { return getpix(priv, x, y, 3); }
 
static const char *const var_names[] = { "X", "Y", "W", "H", "N", "SW", "SH", "T", NULL };
enum { VAR_X, VAR_Y, VAR_W, VAR_H, VAR_N, VAR_SW, VAR_SH, VAR_T, VAR_VARS_NB };
 
static av_cold int geq_init(AVFilterContext *ctx)
{
GEQContext *geq = ctx->priv;
int plane, ret = 0;
 
if (!geq->expr_str[Y] && !geq->expr_str[G] && !geq->expr_str[B] && !geq->expr_str[R]) {
av_log(ctx, AV_LOG_ERROR, "A luminance or RGB expression is mandatory\n");
ret = AVERROR(EINVAL);
goto end;
}
geq->is_rgb = !geq->expr_str[Y];
 
if ((geq->expr_str[Y] || geq->expr_str[U] || geq->expr_str[V]) && (geq->expr_str[G] || geq->expr_str[B] || geq->expr_str[R])) {
av_log(ctx, AV_LOG_ERROR, "Either YCbCr or RGB but not both must be specified\n");
ret = AVERROR(EINVAL);
goto end;
}
 
if (!geq->expr_str[U] && !geq->expr_str[V]) {
/* No chroma at all: fallback on luma */
geq->expr_str[U] = av_strdup(geq->expr_str[Y]);
geq->expr_str[V] = av_strdup(geq->expr_str[Y]);
} else {
/* One chroma unspecified, fallback on the other */
if (!geq->expr_str[U]) geq->expr_str[U] = av_strdup(geq->expr_str[V]);
if (!geq->expr_str[V]) geq->expr_str[V] = av_strdup(geq->expr_str[U]);
}
 
if (!geq->expr_str[A])
geq->expr_str[A] = av_strdup("255");
if (!geq->expr_str[G])
geq->expr_str[G] = av_strdup("g(X,Y)");
if (!geq->expr_str[B])
geq->expr_str[B] = av_strdup("b(X,Y)");
if (!geq->expr_str[R])
geq->expr_str[R] = av_strdup("r(X,Y)");
 
if (geq->is_rgb ?
(!geq->expr_str[G] || !geq->expr_str[B] || !geq->expr_str[R])
:
(!geq->expr_str[U] || !geq->expr_str[V] || !geq->expr_str[A])) {
ret = AVERROR(ENOMEM);
goto end;
}
 
for (plane = 0; plane < 4; plane++) {
static double (*p[])(void *, double, double) = { lum, cb, cr, alpha };
static const char *const func2_yuv_names[] = { "lum", "cb", "cr", "alpha", "p", NULL };
static const char *const func2_rgb_names[] = { "g", "b", "r", "alpha", "p", NULL };
const char *const *func2_names = geq->is_rgb ? func2_rgb_names : func2_yuv_names;
double (*func2[])(void *, double, double) = { lum, cb, cr, alpha, p[plane], NULL };
 
ret = av_expr_parse(&geq->e[plane], geq->expr_str[plane < 3 && geq->is_rgb ? plane+4 : plane], var_names,
NULL, NULL, func2_names, func2, 0, ctx);
if (ret < 0)
break;
}
 
end:
return ret;
}
 
static int geq_query_formats(AVFilterContext *ctx)
{
GEQContext *geq = ctx->priv;
static const enum PixelFormat yuv_pix_fmts[] = {
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
AV_PIX_FMT_GRAY8,
AV_PIX_FMT_NONE
};
static const enum PixelFormat rgb_pix_fmts[] = {
AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
AV_PIX_FMT_NONE
};
if (geq->is_rgb) {
ff_set_common_formats(ctx, ff_make_format_list(rgb_pix_fmts));
} else
ff_set_common_formats(ctx, ff_make_format_list(yuv_pix_fmts));
return 0;
}
 
static int geq_config_props(AVFilterLink *inlink)
{
GEQContext *geq = inlink->dst->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
 
geq->hsub = desc->log2_chroma_w;
geq->vsub = desc->log2_chroma_h;
geq->planes = desc->nb_components;
return 0;
}
 
static int geq_filter_frame(AVFilterLink *inlink, AVFrame *in)
{
int plane;
GEQContext *geq = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *out;
double values[VAR_VARS_NB] = {
[VAR_N] = inlink->frame_count,
[VAR_T] = in->pts == AV_NOPTS_VALUE ? NAN : in->pts * av_q2d(inlink->time_base),
};
 
geq->picref = in;
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
 
for (plane = 0; plane < geq->planes && out->data[plane]; plane++) {
int x, y;
uint8_t *dst = out->data[plane];
const int linesize = out->linesize[plane];
const int w = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(inlink->w, geq->hsub) : inlink->w;
const int h = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(inlink->h, geq->vsub) : inlink->h;
 
values[VAR_W] = w;
values[VAR_H] = h;
values[VAR_SW] = w / (double)inlink->w;
values[VAR_SH] = h / (double)inlink->h;
 
for (y = 0; y < h; y++) {
values[VAR_Y] = y;
for (x = 0; x < w; x++) {
values[VAR_X] = x;
dst[x] = av_expr_eval(geq->e[plane], values, geq);
}
dst += linesize;
}
}
 
av_frame_free(&geq->picref);
return ff_filter_frame(outlink, out);
}
 
static av_cold void geq_uninit(AVFilterContext *ctx)
{
int i;
GEQContext *geq = ctx->priv;
 
for (i = 0; i < FF_ARRAY_ELEMS(geq->e); i++)
av_expr_free(geq->e[i]);
}
 
static const AVFilterPad geq_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = geq_config_props,
.filter_frame = geq_filter_frame,
},
{ NULL }
};
 
static const AVFilterPad geq_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_geq = {
.name = "geq",
.description = NULL_IF_CONFIG_SMALL("Apply generic equation to each pixel."),
.priv_size = sizeof(GEQContext),
.init = geq_init,
.uninit = geq_uninit,
.query_formats = geq_query_formats,
.inputs = geq_inputs,
.outputs = geq_outputs,
.priv_class = &geq_class,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_gradfun.c
0,0 → 1,264
/*
* Copyright (c) 2010 Nolan Lum <nol888@gmail.com>
* Copyright (c) 2009 Loren Merritt <lorenm@u.washington.edu>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* gradfun debanding filter, ported from MPlayer
* libmpcodecs/vf_gradfun.c
*
* Apply a boxblur debanding algorithm (based on the gradfun2db
* AviSynth filter by prunedtree).
* Foreach pixel, if it's within threshold of the blurred value, make it closer.
* So now we have a smoothed and higher bitdepth version of all the shallow
* gradients, while leaving detailed areas untouched.
* Dither it back to 8bit.
*/
 
#include "libavutil/imgutils.h"
#include "libavutil/common.h"
#include "libavutil/cpu.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "formats.h"
#include "gradfun.h"
#include "internal.h"
#include "video.h"
 
DECLARE_ALIGNED(16, static const uint16_t, dither)[8][8] = {
{0x00,0x60,0x18,0x78,0x06,0x66,0x1E,0x7E},
{0x40,0x20,0x58,0x38,0x46,0x26,0x5E,0x3E},
{0x10,0x70,0x08,0x68,0x16,0x76,0x0E,0x6E},
{0x50,0x30,0x48,0x28,0x56,0x36,0x4E,0x2E},
{0x04,0x64,0x1C,0x7C,0x02,0x62,0x1A,0x7A},
{0x44,0x24,0x5C,0x3C,0x42,0x22,0x5A,0x3A},
{0x14,0x74,0x0C,0x6C,0x12,0x72,0x0A,0x6A},
{0x54,0x34,0x4C,0x2C,0x52,0x32,0x4A,0x2A},
};
 
void ff_gradfun_filter_line_c(uint8_t *dst, const uint8_t *src, const uint16_t *dc, int width, int thresh, const uint16_t *dithers)
{
int x;
for (x = 0; x < width; dc += x & 1, x++) {
int pix = src[x] << 7;
int delta = dc[0] - pix;
int m = abs(delta) * thresh >> 16;
m = FFMAX(0, 127 - m);
m = m * m * delta >> 14;
pix += m + dithers[x & 7];
dst[x] = av_clip_uint8(pix >> 7);
}
}
 
void ff_gradfun_blur_line_c(uint16_t *dc, uint16_t *buf, const uint16_t *buf1, const uint8_t *src, int src_linesize, int width)
{
int x, v, old;
for (x = 0; x < width; x++) {
v = buf1[x] + src[2 * x] + src[2 * x + 1] + src[2 * x + src_linesize] + src[2 * x + 1 + src_linesize];
old = buf[x];
buf[x] = v;
dc[x] = v - old;
}
}
 
static void filter(GradFunContext *ctx, uint8_t *dst, const uint8_t *src, int width, int height, int dst_linesize, int src_linesize, int r)
{
int bstride = FFALIGN(width, 16) / 2;
int y;
uint32_t dc_factor = (1 << 21) / (r * r);
uint16_t *dc = ctx->buf + 16;
uint16_t *buf = ctx->buf + bstride + 32;
int thresh = ctx->thresh;
 
memset(dc, 0, (bstride + 16) * sizeof(*buf));
for (y = 0; y < r; y++)
ctx->blur_line(dc, buf + y * bstride, buf + (y - 1) * bstride, src + 2 * y * src_linesize, src_linesize, width / 2);
for (;;) {
if (y < height - r) {
int mod = ((y + r) / 2) % r;
uint16_t *buf0 = buf + mod * bstride;
uint16_t *buf1 = buf + (mod ? mod - 1 : r - 1) * bstride;
int x, v;
ctx->blur_line(dc, buf0, buf1, src + (y + r) * src_linesize, src_linesize, width / 2);
for (x = v = 0; x < r; x++)
v += dc[x];
for (; x < width / 2; x++) {
v += dc[x] - dc[x-r];
dc[x-r] = v * dc_factor >> 16;
}
for (; x < (width + r + 1) / 2; x++)
dc[x-r] = v * dc_factor >> 16;
for (x = -r / 2; x < 0; x++)
dc[x] = dc[0];
}
if (y == r) {
for (y = 0; y < r; y++)
ctx->filter_line(dst + y * dst_linesize, src + y * src_linesize, dc - r / 2, width, thresh, dither[y & 7]);
}
ctx->filter_line(dst + y * dst_linesize, src + y * src_linesize, dc - r / 2, width, thresh, dither[y & 7]);
if (++y >= height) break;
ctx->filter_line(dst + y * dst_linesize, src + y * src_linesize, dc - r / 2, width, thresh, dither[y & 7]);
if (++y >= height) break;
}
}
 
static av_cold int init(AVFilterContext *ctx)
{
GradFunContext *s = ctx->priv;
 
s->thresh = (1 << 15) / s->strength;
s->radius = av_clip((s->radius + 1) & ~1, 4, 32);
 
s->blur_line = ff_gradfun_blur_line_c;
s->filter_line = ff_gradfun_filter_line_c;
 
if (ARCH_X86)
ff_gradfun_init_x86(s);
 
av_log(ctx, AV_LOG_VERBOSE, "threshold:%.2f radius:%d\n", s->strength, s->radius);
 
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
GradFunContext *s = ctx->priv;
av_freep(&s->buf);
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV420P,
AV_PIX_FMT_GRAY8, AV_PIX_FMT_YUV444P,
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV411P,
AV_PIX_FMT_YUV440P,
AV_PIX_FMT_GBRP,
AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
 
return 0;
}
 
static int config_input(AVFilterLink *inlink)
{
GradFunContext *s = inlink->dst->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
int hsub = desc->log2_chroma_w;
int vsub = desc->log2_chroma_h;
 
av_freep(&s->buf);
s->buf = av_calloc((FFALIGN(inlink->w, 16) * (s->radius + 1) / 2 + 32), sizeof(*s->buf));
if (!s->buf)
return AVERROR(ENOMEM);
 
s->chroma_w = FF_CEIL_RSHIFT(inlink->w, hsub);
s->chroma_h = FF_CEIL_RSHIFT(inlink->h, vsub);
s->chroma_r = av_clip(((((s->radius >> hsub) + (s->radius >> vsub)) / 2 ) + 1) & ~1, 4, 32);
 
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
GradFunContext *s = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *out;
int p, direct;
 
if (av_frame_is_writable(in)) {
direct = 1;
out = in;
} else {
direct = 0;
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
 
for (p = 0; p < 4 && in->data[p] && in->linesize[p]; p++) {
int w = inlink->w;
int h = inlink->h;
int r = s->radius;
if (p) {
w = s->chroma_w;
h = s->chroma_h;
r = s->chroma_r;
}
 
if (FFMIN(w, h) > 2 * r)
filter(s, out->data[p], in->data[p], w, h, out->linesize[p], in->linesize[p], r);
else if (out->data[p] != in->data[p])
av_image_copy_plane(out->data[p], out->linesize[p], in->data[p], in->linesize[p], w, h);
}
 
if (!direct)
av_frame_free(&in);
 
return ff_filter_frame(outlink, out);
}
 
#define OFFSET(x) offsetof(GradFunContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption gradfun_options[] = {
{ "strength", "The maximum amount by which the filter will change any one pixel.", OFFSET(strength), AV_OPT_TYPE_FLOAT, { .dbl = 1.2 }, 0.51, 64, FLAGS },
{ "radius", "The neighborhood to fit the gradient to.", OFFSET(radius), AV_OPT_TYPE_INT, { .i64 = 16 }, 4, 32, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(gradfun);
 
static const AVFilterPad avfilter_vf_gradfun_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_gradfun_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_gradfun = {
.name = "gradfun",
.description = NULL_IF_CONFIG_SMALL("Debands video quickly using gradients."),
.priv_size = sizeof(GradFunContext),
.priv_class = &gradfun_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = avfilter_vf_gradfun_inputs,
.outputs = avfilter_vf_gradfun_outputs,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_hflip.c
0,0 → 1,201
/*
* Copyright (c) 2007 Benoit Fouet
* Copyright (c) 2010 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* horizontal flip filter
*/
 
#include <string.h>
 
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
#include "libavutil/pixdesc.h"
#include "libavutil/internal.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/imgutils.h"
 
typedef struct {
int max_step[4]; ///< max pixel step for each plane, expressed as a number of bytes
int planewidth[4]; ///< width of each plane
int planeheight[4]; ///< height of each plane
} FlipContext;
 
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *pix_fmts = NULL;
int fmt;
 
for (fmt = 0; fmt < AV_PIX_FMT_NB; fmt++) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL ||
desc->flags & AV_PIX_FMT_FLAG_BITSTREAM ||
(desc->log2_chroma_w != desc->log2_chroma_h &&
desc->comp[0].plane == desc->comp[1].plane)))
ff_add_format(&pix_fmts, fmt);
}
 
ff_set_common_formats(ctx, pix_fmts);
return 0;
}
 
static int config_props(AVFilterLink *inlink)
{
FlipContext *s = inlink->dst->priv;
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
const int hsub = pix_desc->log2_chroma_w;
const int vsub = pix_desc->log2_chroma_h;
 
av_image_fill_max_pixsteps(s->max_step, NULL, pix_desc);
s->planewidth[0] = s->planewidth[3] = inlink->w;
s->planewidth[1] = s->planewidth[2] = FF_CEIL_RSHIFT(inlink->w, hsub);
s->planeheight[0] = s->planeheight[3] = inlink->h;
s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, vsub);
 
return 0;
}
 
typedef struct ThreadData {
AVFrame *in, *out;
} ThreadData;
 
static int filter_slice(AVFilterContext *ctx, void *arg, int job, int nb_jobs)
{
FlipContext *s = ctx->priv;
ThreadData *td = arg;
AVFrame *in = td->in;
AVFrame *out = td->out;
uint8_t *inrow, *outrow;
int i, j, plane, step;
 
for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
const int width = s->planewidth[plane];
const int height = s->planeheight[plane];
const int start = (height * job ) / nb_jobs;
const int end = (height * (job+1)) / nb_jobs;
 
step = s->max_step[plane];
 
outrow = out->data[plane] + start * out->linesize[plane];
inrow = in ->data[plane] + start * in->linesize[plane] + (width - 1) * step;
for (i = start; i < end; i++) {
switch (step) {
case 1:
for (j = 0; j < width; j++)
outrow[j] = inrow[-j];
break;
 
case 2:
{
uint16_t *outrow16 = (uint16_t *)outrow;
uint16_t * inrow16 = (uint16_t *) inrow;
for (j = 0; j < width; j++)
outrow16[j] = inrow16[-j];
}
break;
 
case 3:
{
uint8_t *in = inrow;
uint8_t *out = outrow;
for (j = 0; j < width; j++, out += 3, in -= 3) {
int32_t v = AV_RB24(in);
AV_WB24(out, v);
}
}
break;
 
case 4:
{
uint32_t *outrow32 = (uint32_t *)outrow;
uint32_t * inrow32 = (uint32_t *) inrow;
for (j = 0; j < width; j++)
outrow32[j] = inrow32[-j];
}
break;
 
default:
for (j = 0; j < width; j++)
memcpy(outrow + j*step, inrow - j*step, step);
}
 
inrow += in ->linesize[plane];
outrow += out->linesize[plane];
}
}
 
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
ThreadData td;
AVFrame *out;
 
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
 
/* copy palette if required */
if (av_pix_fmt_desc_get(inlink->format)->flags & AV_PIX_FMT_FLAG_PAL)
memcpy(out->data[1], in->data[1], AVPALETTE_SIZE);
 
td.in = in, td.out = out;
ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outlink->h, ctx->graph->nb_threads));
 
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
 
static const AVFilterPad avfilter_vf_hflip_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_props,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_hflip_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_hflip = {
.name = "hflip",
.description = NULL_IF_CONFIG_SMALL("Horizontally flip the input video."),
.priv_size = sizeof(FlipContext),
.query_formats = query_formats,
.inputs = avfilter_vf_hflip_inputs,
.outputs = avfilter_vf_hflip_outputs,
.flags = AVFILTER_FLAG_SLICE_THREADS,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_histeq.c
0,0 → 1,281
/*
* Copyright (c) 2012 Jeremy Tran
* Copyright (c) 2001 Donald A. Graft
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
/**
* @file
* Histogram equalization filter, based on the VirtualDub filter by
* Donald A. Graft <neuron2 AT home DOT com>.
* Implements global automatic contrast adjustment by means of
* histogram equalization.
*/
 
#include "libavutil/common.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
 
#include "avfilter.h"
#include "drawutils.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
// #define DEBUG
 
// Linear Congruential Generator, see "Numerical Recipes"
#define LCG_A 4096
#define LCG_C 150889
#define LCG_M 714025
#define LCG(x) (((x) * LCG_A + LCG_C) % LCG_M)
#define LCG_SEED 739187
 
enum HisteqAntibanding {
HISTEQ_ANTIBANDING_NONE = 0,
HISTEQ_ANTIBANDING_WEAK = 1,
HISTEQ_ANTIBANDING_STRONG = 2,
HISTEQ_ANTIBANDING_NB,
};
 
typedef struct {
const AVClass *class;
float strength;
float intensity;
enum HisteqAntibanding antibanding;
int in_histogram [256]; ///< input histogram
int out_histogram[256]; ///< output histogram
int LUT[256]; ///< lookup table derived from histogram[]
uint8_t rgba_map[4]; ///< components position
int bpp; ///< bytes per pixel
} HisteqContext;
 
#define OFFSET(x) offsetof(HisteqContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
#define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, INT_MIN, INT_MAX, FLAGS, unit }
 
static const AVOption histeq_options[] = {
{ "strength", "set the strength", OFFSET(strength), AV_OPT_TYPE_FLOAT, {.dbl=0.2}, 0, 1, FLAGS },
{ "intensity", "set the intensity", OFFSET(intensity), AV_OPT_TYPE_FLOAT, {.dbl=0.21}, 0, 1, FLAGS },
{ "antibanding", "set the antibanding level", OFFSET(antibanding), AV_OPT_TYPE_INT, {.i64=HISTEQ_ANTIBANDING_NONE}, 0, HISTEQ_ANTIBANDING_NB-1, FLAGS, "antibanding" },
CONST("none", "apply no antibanding", HISTEQ_ANTIBANDING_NONE, "antibanding"),
CONST("weak", "apply weak antibanding", HISTEQ_ANTIBANDING_WEAK, "antibanding"),
CONST("strong", "apply strong antibanding", HISTEQ_ANTIBANDING_STRONG, "antibanding"),
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(histeq);
 
static av_cold int init(AVFilterContext *ctx)
{
HisteqContext *histeq = ctx->priv;
 
av_log(ctx, AV_LOG_VERBOSE,
"strength:%0.3f intensity:%0.3f antibanding:%d\n",
histeq->strength, histeq->intensity, histeq->antibanding);
 
return 0;
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum PixelFormat pix_fmts[] = {
AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA, AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA,
AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
HisteqContext *histeq = ctx->priv;
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
 
histeq->bpp = av_get_bits_per_pixel(pix_desc) / 8;
ff_fill_rgba_map(histeq->rgba_map, inlink->format);
 
return 0;
}
 
#define R 0
#define G 1
#define B 2
#define A 3
 
#define GET_RGB_VALUES(r, g, b, src, map) do { \
r = src[x + map[R]]; \
g = src[x + map[G]]; \
b = src[x + map[B]]; \
} while (0)
 
static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
{
AVFilterContext *ctx = inlink->dst;
HisteqContext *histeq = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
int strength = histeq->strength * 1000;
int intensity = histeq->intensity * 1000;
int x, y, i, luthi, lutlo, lut, luma, oluma, m;
AVFrame *outpic;
unsigned int r, g, b, jran;
uint8_t *src, *dst;
 
outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!outpic) {
av_frame_free(&inpic);
return AVERROR(ENOMEM);
}
av_frame_copy_props(outpic, inpic);
 
/* Seed random generator for antibanding. */
jran = LCG_SEED;
 
/* Calculate and store the luminance and calculate the global histogram
based on the luminance. */
memset(histeq->in_histogram, 0, sizeof(histeq->in_histogram));
src = inpic->data[0];
dst = outpic->data[0];
for (y = 0; y < inlink->h; y++) {
for (x = 0; x < inlink->w * histeq->bpp; x += histeq->bpp) {
GET_RGB_VALUES(r, g, b, src, histeq->rgba_map);
luma = (55 * r + 182 * g + 19 * b) >> 8;
dst[x + histeq->rgba_map[A]] = luma;
histeq->in_histogram[luma]++;
}
src += inpic->linesize[0];
dst += outpic->linesize[0];
}
 
#ifdef DEBUG
for (x = 0; x < 256; x++)
av_dlog(ctx, "in[%d]: %u\n", x, histeq->in_histogram[x]);
#endif
 
/* Calculate the lookup table. */
histeq->LUT[0] = histeq->in_histogram[0];
/* Accumulate */
for (x = 1; x < 256; x++)
histeq->LUT[x] = histeq->LUT[x-1] + histeq->in_histogram[x];
 
/* Normalize */
for (x = 0; x < 256; x++)
histeq->LUT[x] = (histeq->LUT[x] * intensity) / (inlink->h * inlink->w);
 
/* Adjust the LUT based on the selected strength. This is an alpha
mix of the calculated LUT and a linear LUT with gain 1. */
for (x = 0; x < 256; x++)
histeq->LUT[x] = (strength * histeq->LUT[x]) / 255 +
((255 - strength) * x) / 255;
 
/* Output the equalized frame. */
memset(histeq->out_histogram, 0, sizeof(histeq->out_histogram));
 
src = inpic->data[0];
dst = outpic->data[0];
for (y = 0; y < inlink->h; y++) {
for (x = 0; x < inlink->w * histeq->bpp; x += histeq->bpp) {
luma = dst[x + histeq->rgba_map[A]];
if (luma == 0) {
for (i = 0; i < histeq->bpp; ++i)
dst[x + i] = 0;
histeq->out_histogram[0]++;
} else {
lut = histeq->LUT[luma];
if (histeq->antibanding != HISTEQ_ANTIBANDING_NONE) {
if (luma > 0) {
lutlo = histeq->antibanding == HISTEQ_ANTIBANDING_WEAK ?
(histeq->LUT[luma] + histeq->LUT[luma - 1]) / 2 :
histeq->LUT[luma - 1];
} else
lutlo = lut;
 
if (luma < 255) {
luthi = (histeq->antibanding == HISTEQ_ANTIBANDING_WEAK) ?
(histeq->LUT[luma] + histeq->LUT[luma + 1]) / 2 :
histeq->LUT[luma + 1];
} else
luthi = lut;
 
if (lutlo != luthi) {
jran = LCG(jran);
lut = lutlo + ((luthi - lutlo + 1) * jran) / LCG_M;
}
}
 
GET_RGB_VALUES(r, g, b, src, histeq->rgba_map);
if (((m = FFMAX3(r, g, b)) * lut) / luma > 255) {
r = (r * 255) / m;
g = (g * 255) / m;
b = (b * 255) / m;
} else {
r = (r * lut) / luma;
g = (g * lut) / luma;
b = (b * lut) / luma;
}
dst[x + histeq->rgba_map[R]] = r;
dst[x + histeq->rgba_map[G]] = g;
dst[x + histeq->rgba_map[B]] = b;
oluma = av_clip_uint8((55 * r + 182 * g + 19 * b) >> 8);
histeq->out_histogram[oluma]++;
}
}
src += inpic->linesize[0];
dst += outpic->linesize[0];
}
#ifdef DEBUG
for (x = 0; x < 256; x++)
av_dlog(ctx, "out[%d]: %u\n", x, histeq->out_histogram[x]);
#endif
 
av_frame_free(&inpic);
return ff_filter_frame(outlink, outpic);
}
 
static const AVFilterPad histeq_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad histeq_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_histeq = {
.name = "histeq",
.description = NULL_IF_CONFIG_SMALL("Apply global color histogram equalization."),
.priv_size = sizeof(HisteqContext),
.init = init,
.query_formats = query_formats,
.inputs = histeq_inputs,
.outputs = histeq_outputs,
.priv_class = &histeq_class,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_histogram.c
0,0 → 1,376
/*
* Copyright (c) 2012-2013 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avassert.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
enum HistogramMode {
MODE_LEVELS,
MODE_WAVEFORM,
MODE_COLOR,
MODE_COLOR2,
MODE_NB
};
 
typedef struct HistogramContext {
const AVClass *class; ///< AVClass context for log and options purpose
enum HistogramMode mode;
unsigned histogram[256];
int ncomp;
const uint8_t *bg_color;
const uint8_t *fg_color;
int level_height;
int scale_height;
int step;
int waveform_mode;
int waveform_mirror;
int display_mode;
int levels_mode;
const AVPixFmtDescriptor *desc;
} HistogramContext;
 
#define OFFSET(x) offsetof(HistogramContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
 
static const AVOption histogram_options[] = {
{ "mode", "set histogram mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_LEVELS}, 0, MODE_NB-1, FLAGS, "mode"},
{ "levels", "standard histogram", 0, AV_OPT_TYPE_CONST, {.i64=MODE_LEVELS}, 0, 0, FLAGS, "mode" },
{ "waveform", "per row/column luminance graph", 0, AV_OPT_TYPE_CONST, {.i64=MODE_WAVEFORM}, 0, 0, FLAGS, "mode" },
{ "color", "chroma values in vectorscope", 0, AV_OPT_TYPE_CONST, {.i64=MODE_COLOR}, 0, 0, FLAGS, "mode" },
{ "color2", "chroma values in vectorscope", 0, AV_OPT_TYPE_CONST, {.i64=MODE_COLOR2}, 0, 0, FLAGS, "mode" },
{ "level_height", "set level height", OFFSET(level_height), AV_OPT_TYPE_INT, {.i64=200}, 50, 2048, FLAGS},
{ "scale_height", "set scale height", OFFSET(scale_height), AV_OPT_TYPE_INT, {.i64=12}, 0, 40, FLAGS},
{ "step", "set waveform step value", OFFSET(step), AV_OPT_TYPE_INT, {.i64=10}, 1, 255, FLAGS},
{ "waveform_mode", "set waveform mode", OFFSET(waveform_mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "waveform_mode"},
{ "row", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "waveform_mode" },
{ "column", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "waveform_mode" },
{ "waveform_mirror", "set waveform mirroring", OFFSET(waveform_mirror), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "waveform_mirror"},
{ "display_mode", "set display mode", OFFSET(display_mode), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "display_mode"},
{ "parade", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "display_mode" },
{ "overlay", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "display_mode" },
{ "levels_mode", "set levels mode", OFFSET(levels_mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "levels_mode"},
{ "linear", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "levels_mode" },
{ "logarithmic", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "levels_mode" },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(histogram);
 
static const enum AVPixelFormat color_pix_fmts[] = {
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVJ444P,
AV_PIX_FMT_NONE
};
 
static const enum AVPixelFormat levels_pix_fmts[] = {
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVJ444P,
AV_PIX_FMT_GRAY8, AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, AV_PIX_FMT_NONE
};
 
static const enum AVPixelFormat waveform_pix_fmts[] = {
AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUVJ420P,
AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P,
AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
AV_PIX_FMT_GRAY8,
AV_PIX_FMT_NONE
};
 
static int query_formats(AVFilterContext *ctx)
{
HistogramContext *h = ctx->priv;
const enum AVPixelFormat *pix_fmts;
 
switch (h->mode) {
case MODE_WAVEFORM:
pix_fmts = waveform_pix_fmts;
break;
case MODE_LEVELS:
pix_fmts = levels_pix_fmts;
break;
case MODE_COLOR:
case MODE_COLOR2:
pix_fmts = color_pix_fmts;
break;
default:
av_assert0(0);
}
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
 
return 0;
}
 
static const uint8_t black_yuva_color[4] = { 0, 127, 127, 255 };
static const uint8_t black_gbrp_color[4] = { 0, 0, 0, 255 };
static const uint8_t white_yuva_color[4] = { 255, 127, 127, 255 };
static const uint8_t white_gbrp_color[4] = { 255, 255, 255, 255 };
 
static int config_input(AVFilterLink *inlink)
{
HistogramContext *h = inlink->dst->priv;
 
h->desc = av_pix_fmt_desc_get(inlink->format);
h->ncomp = h->desc->nb_components;
 
switch (inlink->format) {
case AV_PIX_FMT_GBRAP:
case AV_PIX_FMT_GBRP:
h->bg_color = black_gbrp_color;
h->fg_color = white_gbrp_color;
break;
default:
h->bg_color = black_yuva_color;
h->fg_color = white_yuva_color;
}
 
return 0;
}
 
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
HistogramContext *h = ctx->priv;
 
switch (h->mode) {
case MODE_LEVELS:
outlink->w = 256;
outlink->h = (h->level_height + h->scale_height) * FFMAX(h->ncomp * h->display_mode, 1);
break;
case MODE_WAVEFORM:
if (h->waveform_mode)
outlink->h = 256 * FFMAX(h->ncomp * h->display_mode, 1);
else
outlink->w = 256 * FFMAX(h->ncomp * h->display_mode, 1);
break;
case MODE_COLOR:
case MODE_COLOR2:
outlink->h = outlink->w = 256;
break;
default:
av_assert0(0);
}
 
outlink->sample_aspect_ratio = (AVRational){1,1};
 
return 0;
}
 
static void gen_waveform(HistogramContext *h, AVFrame *inpicref, AVFrame *outpicref,
int component, int intensity, int offset, int col_mode)
{
const int plane = h->desc->comp[component].plane;
const int mirror = h->waveform_mirror;
const int is_chroma = (component == 1 || component == 2);
const int shift_w = (is_chroma ? h->desc->log2_chroma_w : 0);
const int shift_h = (is_chroma ? h->desc->log2_chroma_h : 0);
const int src_linesize = inpicref->linesize[plane];
const int dst_linesize = outpicref->linesize[plane];
const int dst_signed_linesize = dst_linesize * (mirror == 1 ? -1 : 1);
uint8_t *src_data = inpicref->data[plane];
uint8_t *dst_data = outpicref->data[plane] + (col_mode ? (offset >> shift_h) * dst_linesize : offset >> shift_w);
uint8_t * const dst_bottom_line = dst_data + dst_linesize * ((256 >> shift_h) - 1);
uint8_t * const dst_line = (mirror ? dst_bottom_line : dst_data);
const uint8_t max = 255 - intensity;
const int src_h = FF_CEIL_RSHIFT(inpicref->height, shift_h);
const int src_w = FF_CEIL_RSHIFT(inpicref->width, shift_w);
uint8_t *dst, *p;
int y;
 
if (!col_mode && mirror)
dst_data += 256 >> shift_w;
for (y = 0; y < src_h; y++) {
const uint8_t *src_data_end = src_data + src_w;
dst = dst_line;
for (p = src_data; p < src_data_end; p++) {
uint8_t *target;
if (col_mode) {
target = dst++ + dst_signed_linesize * (*p >> shift_h);
} else {
if (mirror)
target = dst_data - (*p >> shift_w);
else
target = dst_data + (*p >> shift_w);
}
if (*target <= max)
*target += intensity;
else
*target = 255;
}
src_data += src_linesize;
dst_data += dst_linesize;
}
}
 
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
HistogramContext *h = inlink->dst->priv;
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out;
const uint8_t *src;
uint8_t *dst;
int i, j, k, l;
 
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
 
out->pts = in->pts;
 
for (k = 0; k < h->ncomp; k++) {
const int is_chroma = (k == 1 || k == 2);
const int dst_h = FF_CEIL_RSHIFT(outlink->h, (is_chroma ? h->desc->log2_chroma_h : 0));
const int dst_w = FF_CEIL_RSHIFT(outlink->w, (is_chroma ? h->desc->log2_chroma_w : 0));
for (i = 0; i < dst_h ; i++)
memset(out->data[h->desc->comp[k].plane] +
i * out->linesize[h->desc->comp[k].plane],
h->bg_color[k], dst_w);
}
 
switch (h->mode) {
case MODE_LEVELS:
for (k = 0; k < h->ncomp; k++) {
const int p = h->desc->comp[k].plane;
const int start = k * (h->level_height + h->scale_height) * h->display_mode;
double max_hval_log;
unsigned max_hval = 0;
 
for (i = 0; i < in->height; i++) {
src = in->data[p] + i * in->linesize[p];
for (j = 0; j < in->width; j++)
h->histogram[src[j]]++;
}
 
for (i = 0; i < 256; i++)
max_hval = FFMAX(max_hval, h->histogram[i]);
max_hval_log = log2(max_hval + 1);
 
for (i = 0; i < outlink->w; i++) {
int col_height;
 
if (h->levels_mode)
col_height = round(h->level_height * (1. - (log2(h->histogram[i] + 1) / max_hval_log)));
else
col_height = h->level_height - (h->histogram[i] * (int64_t)h->level_height + max_hval - 1) / max_hval;
 
for (j = h->level_height - 1; j >= col_height; j--) {
if (h->display_mode) {
for (l = 0; l < h->ncomp; l++)
out->data[l][(j + start) * out->linesize[l] + i] = h->fg_color[l];
} else {
out->data[p][(j + start) * out->linesize[p] + i] = 255;
}
}
for (j = h->level_height + h->scale_height - 1; j >= h->level_height; j--)
out->data[p][(j + start) * out->linesize[p] + i] = i;
}
 
memset(h->histogram, 0, 256 * sizeof(unsigned));
}
break;
case MODE_WAVEFORM:
for (k = 0; k < h->ncomp; k++) {
const int offset = k * 256 * h->display_mode;
gen_waveform(h, in, out, k, h->step, offset, h->waveform_mode);
}
break;
case MODE_COLOR:
for (i = 0; i < inlink->h; i++) {
const int iw1 = i * in->linesize[1];
const int iw2 = i * in->linesize[2];
for (j = 0; j < inlink->w; j++) {
const int pos = in->data[1][iw1 + j] * out->linesize[0] + in->data[2][iw2 + j];
if (out->data[0][pos] < 255)
out->data[0][pos]++;
}
}
for (i = 0; i < 256; i++) {
dst = out->data[0] + i * out->linesize[0];
for (j = 0; j < 256; j++) {
if (!dst[j]) {
out->data[1][i * out->linesize[0] + j] = i;
out->data[2][i * out->linesize[0] + j] = j;
}
}
}
break;
case MODE_COLOR2:
for (i = 0; i < inlink->h; i++) {
const int iw1 = i * in->linesize[1];
const int iw2 = i * in->linesize[2];
for (j = 0; j < inlink->w; j++) {
const int u = in->data[1][iw1 + j];
const int v = in->data[2][iw2 + j];
const int pos = u * out->linesize[0] + v;
if (!out->data[0][pos])
out->data[0][pos] = FFABS(128 - u) + FFABS(128 - v);
out->data[1][pos] = u;
out->data[2][pos] = v;
}
}
break;
default:
av_assert0(0);
}
 
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
 
static const AVFilterPad inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
 
static const AVFilterPad outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_output,
},
{ NULL }
};
 
AVFilter avfilter_vf_histogram = {
.name = "histogram",
.description = NULL_IF_CONFIG_SMALL("Compute and draw a histogram."),
.priv_size = sizeof(HistogramContext),
.query_formats = query_formats,
.inputs = inputs,
.outputs = outputs,
.priv_class = &histogram_class,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_hqdn3d.c
0,0 → 1,363
/*
* Copyright (c) 2003 Daniel Moreno <comac AT comac DOT darktech DOT org>
* Copyright (c) 2010 Baptiste Coudurier
* Copyright (c) 2012 Loren Merritt
*
* This file is part of FFmpeg, ported from MPlayer.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
/**
* @file
* high quality 3d video denoiser, ported from MPlayer
* libmpcodecs/vf_hqdn3d.c.
*/
 
#include <float.h>
 
#include "config.h"
#include "libavutil/attributes.h"
#include "libavutil/common.h"
#include "libavutil/pixdesc.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/opt.h"
 
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
#include "vf_hqdn3d.h"
 
#define LUT_BITS (depth==16 ? 8 : 4)
#define LOAD(x) (((depth == 8 ? src[x] : AV_RN16A(src + (x) * 2)) << (16 - depth))\
+ (((1 << (16 - depth)) - 1) >> 1))
#define STORE(x,val) (depth == 8 ? dst[x] = (val) >> (16 - depth) : \
AV_WN16A(dst + (x) * 2, (val) >> (16 - depth)))
 
av_always_inline
static uint32_t lowpass(int prev, int cur, int16_t *coef, int depth)
{
int d = (prev - cur) >> (8 - LUT_BITS);
return cur + coef[d];
}
 
av_always_inline
static void denoise_temporal(uint8_t *src, uint8_t *dst,
uint16_t *frame_ant,
int w, int h, int sstride, int dstride,
int16_t *temporal, int depth)
{
long x, y;
uint32_t tmp;
 
temporal += 256 << LUT_BITS;
 
for (y = 0; y < h; y++) {
for (x = 0; x < w; x++) {
frame_ant[x] = tmp = lowpass(frame_ant[x], LOAD(x), temporal, depth);
STORE(x, tmp);
}
src += sstride;
dst += dstride;
frame_ant += w;
}
}
 
av_always_inline
static void denoise_spatial(HQDN3DContext *s,
uint8_t *src, uint8_t *dst,
uint16_t *line_ant, uint16_t *frame_ant,
int w, int h, int sstride, int dstride,
int16_t *spatial, int16_t *temporal, int depth)
{
long x, y;
uint32_t pixel_ant;
uint32_t tmp;
 
spatial += 256 << LUT_BITS;
temporal += 256 << LUT_BITS;
 
/* First line has no top neighbor. Only left one for each tmp and
* last frame */
pixel_ant = LOAD(0);
for (x = 0; x < w; x++) {
line_ant[x] = tmp = pixel_ant = lowpass(pixel_ant, LOAD(x), spatial, depth);
frame_ant[x] = tmp = lowpass(frame_ant[x], tmp, temporal, depth);
STORE(x, tmp);
}
 
for (y = 1; y < h; y++) {
src += sstride;
dst += dstride;
frame_ant += w;
if (s->denoise_row[depth]) {
s->denoise_row[depth](src, dst, line_ant, frame_ant, w, spatial, temporal);
continue;
}
pixel_ant = LOAD(0);
for (x = 0; x < w-1; x++) {
line_ant[x] = tmp = lowpass(line_ant[x], pixel_ant, spatial, depth);
pixel_ant = lowpass(pixel_ant, LOAD(x+1), spatial, depth);
frame_ant[x] = tmp = lowpass(frame_ant[x], tmp, temporal, depth);
STORE(x, tmp);
}
line_ant[x] = tmp = lowpass(line_ant[x], pixel_ant, spatial, depth);
frame_ant[x] = tmp = lowpass(frame_ant[x], tmp, temporal, depth);
STORE(x, tmp);
}
}
 
av_always_inline
static void denoise_depth(HQDN3DContext *s,
uint8_t *src, uint8_t *dst,
uint16_t *line_ant, uint16_t **frame_ant_ptr,
int w, int h, int sstride, int dstride,
int16_t *spatial, int16_t *temporal, int depth)
{
// FIXME: For 16bit depth, frame_ant could be a pointer to the previous
// filtered frame rather than a separate buffer.
long x, y;
uint16_t *frame_ant = *frame_ant_ptr;
if (!frame_ant) {
uint8_t *frame_src = src;
*frame_ant_ptr = frame_ant = av_malloc(w*h*sizeof(uint16_t));
for (y = 0; y < h; y++, src += sstride, frame_ant += w)
for (x = 0; x < w; x++)
frame_ant[x] = LOAD(x);
src = frame_src;
frame_ant = *frame_ant_ptr;
}
 
if (spatial[0])
denoise_spatial(s, src, dst, line_ant, frame_ant,
w, h, sstride, dstride, spatial, temporal, depth);
else
denoise_temporal(src, dst, frame_ant,
w, h, sstride, dstride, temporal, depth);
}
 
#define denoise(...) \
switch (s->depth) {\
case 8: denoise_depth(__VA_ARGS__, 8); break;\
case 9: denoise_depth(__VA_ARGS__, 9); break;\
case 10: denoise_depth(__VA_ARGS__, 10); break;\
case 16: denoise_depth(__VA_ARGS__, 16); break;\
}
 
static int16_t *precalc_coefs(double dist25, int depth)
{
int i;
double gamma, simil, C;
int16_t *ct = av_malloc((512<<LUT_BITS)*sizeof(int16_t));
if (!ct)
return NULL;
 
gamma = log(0.25) / log(1.0 - FFMIN(dist25,252.0)/255.0 - 0.00001);
 
for (i = -255<<LUT_BITS; i <= 255<<LUT_BITS; i++) {
double f = ((i<<(9-LUT_BITS)) + (1<<(8-LUT_BITS)) - 1) / 512.0; // midpoint of the bin
simil = 1.0 - FFABS(f) / 255.0;
C = pow(simil, gamma) * 256.0 * f;
ct[(256<<LUT_BITS)+i] = lrint(C);
}
 
ct[0] = !!dist25;
return ct;
}
 
#define PARAM1_DEFAULT 4.0
#define PARAM2_DEFAULT 3.0
#define PARAM3_DEFAULT 6.0
 
static av_cold int init(AVFilterContext *ctx)
{
HQDN3DContext *s = ctx->priv;
 
if (!s->strength[LUMA_SPATIAL])
s->strength[LUMA_SPATIAL] = PARAM1_DEFAULT;
if (!s->strength[CHROMA_SPATIAL])
s->strength[CHROMA_SPATIAL] = PARAM2_DEFAULT * s->strength[LUMA_SPATIAL] / PARAM1_DEFAULT;
if (!s->strength[LUMA_TMP])
s->strength[LUMA_TMP] = PARAM3_DEFAULT * s->strength[LUMA_SPATIAL] / PARAM1_DEFAULT;
if (!s->strength[CHROMA_TMP])
s->strength[CHROMA_TMP] = s->strength[LUMA_TMP] * s->strength[CHROMA_SPATIAL] / s->strength[LUMA_SPATIAL];
 
av_log(ctx, AV_LOG_VERBOSE, "ls:%f cs:%f lt:%f ct:%f\n",
s->strength[LUMA_SPATIAL], s->strength[CHROMA_SPATIAL],
s->strength[LUMA_TMP], s->strength[CHROMA_TMP]);
 
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
HQDN3DContext *s = ctx->priv;
 
av_freep(&s->coefs[0]);
av_freep(&s->coefs[1]);
av_freep(&s->coefs[2]);
av_freep(&s->coefs[3]);
av_freep(&s->line);
av_freep(&s->frame_prev[0]);
av_freep(&s->frame_prev[1]);
av_freep(&s->frame_prev[2]);
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV422P,
AV_PIX_FMT_YUV444P,
AV_PIX_FMT_YUV410P,
AV_PIX_FMT_YUV411P,
AV_PIX_FMT_YUV440P,
AV_PIX_FMT_YUVJ420P,
AV_PIX_FMT_YUVJ422P,
AV_PIX_FMT_YUVJ444P,
AV_PIX_FMT_YUVJ440P,
AV_PIX_FMT_YUV420P9,
AV_PIX_FMT_YUV422P9,
AV_PIX_FMT_YUV444P9,
AV_PIX_FMT_YUV420P10,
AV_PIX_FMT_YUV422P10,
AV_PIX_FMT_YUV444P10,
AV_PIX_FMT_YUV420P16,
AV_PIX_FMT_YUV422P16,
AV_PIX_FMT_YUV444P16,
AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
 
return 0;
}
 
static int config_input(AVFilterLink *inlink)
{
HQDN3DContext *s = inlink->dst->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
int i;
 
uninit(inlink->dst);
 
s->hsub = desc->log2_chroma_w;
s->vsub = desc->log2_chroma_h;
s->depth = desc->comp[0].depth_minus1+1;
 
s->line = av_malloc(inlink->w * sizeof(*s->line));
if (!s->line)
return AVERROR(ENOMEM);
 
for (i = 0; i < 4; i++) {
s->coefs[i] = precalc_coefs(s->strength[i], s->depth);
if (!s->coefs[i])
return AVERROR(ENOMEM);
}
 
if (ARCH_X86)
ff_hqdn3d_init_x86(s);
 
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
HQDN3DContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
 
AVFrame *out;
int direct, c;
 
if (av_frame_is_writable(in) && !ctx->is_disabled) {
direct = 1;
out = in;
} else {
direct = 0;
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
 
av_frame_copy_props(out, in);
}
 
for (c = 0; c < 3; c++) {
denoise(s, in->data[c], out->data[c],
s->line, &s->frame_prev[c],
FF_CEIL_RSHIFT(in->width, (!!c * s->hsub)),
FF_CEIL_RSHIFT(in->height, (!!c * s->vsub)),
in->linesize[c], out->linesize[c],
s->coefs[c ? CHROMA_SPATIAL : LUMA_SPATIAL],
s->coefs[c ? CHROMA_TMP : LUMA_TMP]);
}
 
if (ctx->is_disabled) {
av_frame_free(&out);
return ff_filter_frame(outlink, in);
}
 
if (!direct)
av_frame_free(&in);
 
return ff_filter_frame(outlink, out);
}
 
#define OFFSET(x) offsetof(HQDN3DContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
static const AVOption hqdn3d_options[] = {
{ "luma_spatial", "spatial luma strength", OFFSET(strength[LUMA_SPATIAL]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS },
{ "chroma_spatial", "spatial chroma strength", OFFSET(strength[CHROMA_SPATIAL]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS },
{ "luma_tmp", "temporal luma strength", OFFSET(strength[LUMA_TMP]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS },
{ "chroma_tmp", "temporal chroma strength", OFFSET(strength[CHROMA_TMP]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(hqdn3d);
 
static const AVFilterPad avfilter_vf_hqdn3d_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input,
.filter_frame = filter_frame,
},
{ NULL }
};
 
 
static const AVFilterPad avfilter_vf_hqdn3d_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO
},
{ NULL }
};
 
AVFilter avfilter_vf_hqdn3d = {
.name = "hqdn3d",
.description = NULL_IF_CONFIG_SMALL("Apply a High Quality 3D Denoiser."),
.priv_size = sizeof(HQDN3DContext),
.priv_class = &hqdn3d_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = avfilter_vf_hqdn3d_inputs,
.outputs = avfilter_vf_hqdn3d_outputs,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_hqdn3d.h
0,0 → 1,49
/*
* Copyright (c) 2003 Daniel Moreno <comac AT comac DOT darktech DOT org>
* Copyright (c) 2010 Baptiste Coudurier
* Copyright (c) 2012 Loren Merritt
*
* This file is part of FFmpeg, ported from MPlayer.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
#ifndef AVFILTER_VF_HQDN3D_H
#define AVFILTER_VF_HQDN3D_H
 
#include <stddef.h>
#include <stdint.h>
 
#include "libavutil/opt.h"
 
typedef struct {
const AVClass *class;
int16_t *coefs[4];
uint16_t *line;
uint16_t *frame_prev[3];
double strength[4];
int hsub, vsub;
int depth;
void (*denoise_row[17])(uint8_t *src, uint8_t *dst, uint16_t *line_ant, uint16_t *frame_ant, ptrdiff_t w, int16_t *spatial, int16_t *temporal);
} HQDN3DContext;
 
#define LUMA_SPATIAL 0
#define LUMA_TMP 1
#define CHROMA_SPATIAL 2
#define CHROMA_TMP 3
 
void ff_hqdn3d_init_x86(HQDN3DContext *hqdn3d);
 
#endif /* AVFILTER_VF_HQDN3D_H */
/contrib/sdk/sources/ffmpeg/libavfilter/vf_hue.c
0,0 → 1,449
/*
* Copyright (c) 2003 Michael Niedermayer
* Copyright (c) 2012 Jeremy Tran
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Apply a hue/saturation filter to the input video
* Ported from MPlayer libmpcodecs/vf_hue.c.
*/
 
#include <float.h>
#include "libavutil/eval.h"
#include "libavutil/imgutils.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
 
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
#define SAT_MIN_VAL -10
#define SAT_MAX_VAL 10
 
static const char *const var_names[] = {
"n", // frame count
"pts", // presentation timestamp expressed in AV_TIME_BASE units
"r", // frame rate
"t", // timestamp expressed in seconds
"tb", // timebase
NULL
};
 
enum var_name {
VAR_N,
VAR_PTS,
VAR_R,
VAR_T,
VAR_TB,
VAR_NB
};
 
typedef struct {
const AVClass *class;
float hue_deg; /* hue expressed in degrees */
float hue; /* hue expressed in radians */
char *hue_deg_expr;
char *hue_expr;
AVExpr *hue_deg_pexpr;
AVExpr *hue_pexpr;
float saturation;
char *saturation_expr;
AVExpr *saturation_pexpr;
float brightness;
char *brightness_expr;
AVExpr *brightness_pexpr;
int hsub;
int vsub;
int32_t hue_sin;
int32_t hue_cos;
double var_values[VAR_NB];
uint8_t lut_l[256];
uint8_t lut_u[256][256];
uint8_t lut_v[256][256];
} HueContext;
 
#define OFFSET(x) offsetof(HueContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption hue_options[] = {
{ "h", "set the hue angle degrees expression", OFFSET(hue_deg_expr), AV_OPT_TYPE_STRING,
{ .str = NULL }, .flags = FLAGS },
{ "s", "set the saturation expression", OFFSET(saturation_expr), AV_OPT_TYPE_STRING,
{ .str = "1" }, .flags = FLAGS },
{ "H", "set the hue angle radians expression", OFFSET(hue_expr), AV_OPT_TYPE_STRING,
{ .str = NULL }, .flags = FLAGS },
{ "b", "set the brightness expression", OFFSET(brightness_expr), AV_OPT_TYPE_STRING,
{ .str = "0" }, .flags = FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(hue);
 
static inline void compute_sin_and_cos(HueContext *hue)
{
/*
* Scale the value to the norm of the resulting (U,V) vector, that is
* the saturation.
* This will be useful in the apply_lut function.
*/
hue->hue_sin = rint(sin(hue->hue) * (1 << 16) * hue->saturation);
hue->hue_cos = rint(cos(hue->hue) * (1 << 16) * hue->saturation);
}
 
static inline void create_luma_lut(HueContext *h)
{
const float b = h->brightness;
int i;
 
for (i = 0; i < 256; i++) {
h->lut_l[i] = av_clip_uint8(i + b * 25.5);
}
}
 
static inline void create_chrominance_lut(HueContext *h, const int32_t c,
const int32_t s)
{
int32_t i, j, u, v, new_u, new_v;
 
/*
* If we consider U and V as the components of a 2D vector then its angle
* is the hue and the norm is the saturation
*/
for (i = 0; i < 256; i++) {
for (j = 0; j < 256; j++) {
/* Normalize the components from range [16;140] to [-112;112] */
u = i - 128;
v = j - 128;
/*
* Apply the rotation of the vector : (c * u) - (s * v)
* (s * u) + (c * v)
* De-normalize the components (without forgetting to scale 128
* by << 16)
* Finally scale back the result by >> 16
*/
new_u = ((c * u) - (s * v) + (1 << 15) + (128 << 16)) >> 16;
new_v = ((s * u) + (c * v) + (1 << 15) + (128 << 16)) >> 16;
 
/* Prevent a potential overflow */
h->lut_u[i][j] = av_clip_uint8_c(new_u);
h->lut_v[i][j] = av_clip_uint8_c(new_v);
}
}
}
 
static int set_expr(AVExpr **pexpr_ptr, char **expr_ptr,
const char *expr, const char *option, void *log_ctx)
{
int ret;
AVExpr *new_pexpr;
char *new_expr;
 
new_expr = av_strdup(expr);
if (!new_expr)
return AVERROR(ENOMEM);
ret = av_expr_parse(&new_pexpr, expr, var_names,
NULL, NULL, NULL, NULL, 0, log_ctx);
if (ret < 0) {
av_log(log_ctx, AV_LOG_ERROR,
"Error when evaluating the expression '%s' for %s\n",
expr, option);
av_free(new_expr);
return ret;
}
 
if (*pexpr_ptr)
av_expr_free(*pexpr_ptr);
*pexpr_ptr = new_pexpr;
av_freep(expr_ptr);
*expr_ptr = new_expr;
 
return 0;
}
 
static av_cold int init(AVFilterContext *ctx)
{
HueContext *hue = ctx->priv;
int ret;
 
if (hue->hue_expr && hue->hue_deg_expr) {
av_log(ctx, AV_LOG_ERROR,
"H and h options are incompatible and cannot be specified "
"at the same time\n");
return AVERROR(EINVAL);
}
 
#define SET_EXPR(expr, option) \
if (hue->expr##_expr) do { \
ret = set_expr(&hue->expr##_pexpr, &hue->expr##_expr, \
hue->expr##_expr, option, ctx); \
if (ret < 0) \
return ret; \
} while (0)
SET_EXPR(brightness, "b");
SET_EXPR(saturation, "s");
SET_EXPR(hue_deg, "h");
SET_EXPR(hue, "H");
#undef SET_EXPR
 
av_log(ctx, AV_LOG_VERBOSE,
"H_expr:%s h_deg_expr:%s s_expr:%s b_expr:%s\n",
hue->hue_expr, hue->hue_deg_expr, hue->saturation_expr, hue->brightness_expr);
compute_sin_and_cos(hue);
 
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
HueContext *hue = ctx->priv;
 
av_expr_free(hue->brightness_pexpr);
av_expr_free(hue->hue_deg_pexpr);
av_expr_free(hue->hue_pexpr);
av_expr_free(hue->saturation_pexpr);
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P,
AV_PIX_FMT_YUVA420P,
AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
 
return 0;
}
 
static int config_props(AVFilterLink *inlink)
{
HueContext *hue = inlink->dst->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
 
hue->hsub = desc->log2_chroma_w;
hue->vsub = desc->log2_chroma_h;
 
hue->var_values[VAR_N] = 0;
hue->var_values[VAR_TB] = av_q2d(inlink->time_base);
hue->var_values[VAR_R] = inlink->frame_rate.num == 0 || inlink->frame_rate.den == 0 ?
NAN : av_q2d(inlink->frame_rate);
 
return 0;
}
 
static void apply_luma_lut(HueContext *s,
uint8_t *ldst, const int dst_linesize,
uint8_t *lsrc, const int src_linesize,
int w, int h)
{
int i;
 
while (h--) {
for (i = 0; i < w; i++)
ldst[i] = s->lut_l[lsrc[i]];
 
lsrc += src_linesize;
ldst += dst_linesize;
}
}
 
static void apply_lut(HueContext *s,
uint8_t *udst, uint8_t *vdst, const int dst_linesize,
uint8_t *usrc, uint8_t *vsrc, const int src_linesize,
int w, int h)
{
int i;
 
while (h--) {
for (i = 0; i < w; i++) {
const int u = usrc[i];
const int v = vsrc[i];
 
udst[i] = s->lut_u[u][v];
vdst[i] = s->lut_v[u][v];
}
 
usrc += src_linesize;
vsrc += src_linesize;
udst += dst_linesize;
vdst += dst_linesize;
}
}
 
#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts) * av_q2d(tb))
 
static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
{
HueContext *hue = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *outpic;
const int32_t old_hue_sin = hue->hue_sin, old_hue_cos = hue->hue_cos;
const float old_brightness = hue->brightness;
int direct = 0;
 
if (av_frame_is_writable(inpic)) {
direct = 1;
outpic = inpic;
} else {
outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!outpic) {
av_frame_free(&inpic);
return AVERROR(ENOMEM);
}
av_frame_copy_props(outpic, inpic);
}
 
hue->var_values[VAR_N] = inlink->frame_count;
hue->var_values[VAR_T] = TS2T(inpic->pts, inlink->time_base);
hue->var_values[VAR_PTS] = TS2D(inpic->pts);
 
if (hue->saturation_expr) {
hue->saturation = av_expr_eval(hue->saturation_pexpr, hue->var_values, NULL);
 
if (hue->saturation < SAT_MIN_VAL || hue->saturation > SAT_MAX_VAL) {
hue->saturation = av_clip(hue->saturation, SAT_MIN_VAL, SAT_MAX_VAL);
av_log(inlink->dst, AV_LOG_WARNING,
"Saturation value not in range [%d,%d]: clipping value to %0.1f\n",
SAT_MIN_VAL, SAT_MAX_VAL, hue->saturation);
}
}
 
if (hue->brightness_expr) {
hue->brightness = av_expr_eval(hue->brightness_pexpr, hue->var_values, NULL);
 
if (hue->brightness < -10 || hue->brightness > 10) {
hue->brightness = av_clipf(hue->brightness, -10, 10);
av_log(inlink->dst, AV_LOG_WARNING,
"Brightness value not in range [%d,%d]: clipping value to %0.1f\n",
-10, 10, hue->brightness);
}
}
 
if (hue->hue_deg_expr) {
hue->hue_deg = av_expr_eval(hue->hue_deg_pexpr, hue->var_values, NULL);
hue->hue = hue->hue_deg * M_PI / 180;
} else if (hue->hue_expr) {
hue->hue = av_expr_eval(hue->hue_pexpr, hue->var_values, NULL);
hue->hue_deg = hue->hue * 180 / M_PI;
}
 
av_log(inlink->dst, AV_LOG_DEBUG,
"H:%0.1f*PI h:%0.1f s:%0.f b:%0.f t:%0.1f n:%d\n",
hue->hue/M_PI, hue->hue_deg, hue->saturation, hue->brightness,
hue->var_values[VAR_T], (int)hue->var_values[VAR_N]);
 
compute_sin_and_cos(hue);
if (old_hue_sin != hue->hue_sin || old_hue_cos != hue->hue_cos)
create_chrominance_lut(hue, hue->hue_cos, hue->hue_sin);
 
if (old_brightness != hue->brightness && hue->brightness)
create_luma_lut(hue);
 
if (!direct) {
if (!hue->brightness)
av_image_copy_plane(outpic->data[0], outpic->linesize[0],
inpic->data[0], inpic->linesize[0],
inlink->w, inlink->h);
if (inpic->data[3])
av_image_copy_plane(outpic->data[3], outpic->linesize[3],
inpic->data[3], inpic->linesize[3],
inlink->w, inlink->h);
}
 
apply_lut(hue, outpic->data[1], outpic->data[2], outpic->linesize[1],
inpic->data[1], inpic->data[2], inpic->linesize[1],
FF_CEIL_RSHIFT(inlink->w, hue->hsub),
FF_CEIL_RSHIFT(inlink->h, hue->vsub));
if (hue->brightness)
apply_luma_lut(hue, outpic->data[0], outpic->linesize[0],
inpic->data[0], inpic->linesize[0], inlink->w, inlink->h);
 
if (!direct)
av_frame_free(&inpic);
return ff_filter_frame(outlink, outpic);
}
 
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
char *res, int res_len, int flags)
{
HueContext *hue = ctx->priv;
int ret;
 
#define SET_EXPR(expr, option) \
do { \
ret = set_expr(&hue->expr##_pexpr, &hue->expr##_expr, \
args, option, ctx); \
if (ret < 0) \
return ret; \
} while (0)
 
if (!strcmp(cmd, "h")) {
SET_EXPR(hue_deg, "h");
av_freep(&hue->hue_expr);
} else if (!strcmp(cmd, "H")) {
SET_EXPR(hue, "H");
av_freep(&hue->hue_deg_expr);
} else if (!strcmp(cmd, "s")) {
SET_EXPR(saturation, "s");
} else if (!strcmp(cmd, "b")) {
SET_EXPR(brightness, "b");
} else
return AVERROR(ENOSYS);
 
return 0;
}
 
static const AVFilterPad hue_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_props,
},
{ NULL }
};
 
static const AVFilterPad hue_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_hue = {
.name = "hue",
.description = NULL_IF_CONFIG_SMALL("Adjust the hue and saturation of the input video."),
.priv_size = sizeof(HueContext),
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.process_command = process_command,
.inputs = hue_inputs,
.outputs = hue_outputs,
.priv_class = &hue_class,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_idet.c
0,0 → 1,308
/*
* Copyright (C) 2012 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <float.h> /* FLT_MAX */
 
#include "libavutil/cpu.h"
#include "libavutil/common.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "internal.h"
 
#define HIST_SIZE 4
 
typedef enum {
TFF,
BFF,
PROGRSSIVE,
UNDETERMINED,
} Type;
 
typedef struct {
const AVClass *class;
float interlace_threshold;
float progressive_threshold;
 
Type last_type;
int prestat[4];
int poststat[4];
 
uint8_t history[HIST_SIZE];
 
AVFrame *cur;
AVFrame *next;
AVFrame *prev;
int (*filter_line)(const uint8_t *prev, const uint8_t *cur, const uint8_t *next, int w);
 
const AVPixFmtDescriptor *csp;
} IDETContext;
 
#define OFFSET(x) offsetof(IDETContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption idet_options[] = {
{ "intl_thres", "set interlacing threshold", OFFSET(interlace_threshold), AV_OPT_TYPE_FLOAT, {.dbl = 1.04}, -1, FLT_MAX, FLAGS },
{ "prog_thres", "set progressive threshold", OFFSET(progressive_threshold), AV_OPT_TYPE_FLOAT, {.dbl = 1.5}, -1, FLT_MAX, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(idet);
 
static const char *type2str(Type type)
{
switch(type) {
case TFF : return "Top Field First ";
case BFF : return "Bottom Field First";
case PROGRSSIVE : return "Progressive ";
case UNDETERMINED: return "Undetermined ";
}
return NULL;
}
 
static int filter_line_c(const uint8_t *a, const uint8_t *b, const uint8_t *c, int w)
{
int x;
int ret=0;
 
for(x=0; x<w; x++){
int v = (*a++ + *c++) - 2 * *b++;
ret += FFABS(v);
}
 
return ret;
}
 
static int filter_line_c_16bit(const uint16_t *a, const uint16_t *b, const uint16_t *c, int w)
{
int x;
int ret=0;
 
for(x=0; x<w; x++){
int v = (*a++ + *c++) - 2 * *b++;
ret += FFABS(v);
}
 
return ret;
}
 
static void filter(AVFilterContext *ctx)
{
IDETContext *idet = ctx->priv;
int y, i;
int64_t alpha[2]={0};
int64_t delta=0;
Type type, best_type;
int match = 0;
 
for (i = 0; i < idet->csp->nb_components; i++) {
int w = idet->cur->width;
int h = idet->cur->height;
int refs = idet->cur->linesize[i];
 
if (i && i<3) {
w = FF_CEIL_RSHIFT(w, idet->csp->log2_chroma_w);
h = FF_CEIL_RSHIFT(h, idet->csp->log2_chroma_h);
}
 
for (y = 2; y < h - 2; y++) {
uint8_t *prev = &idet->prev->data[i][y*refs];
uint8_t *cur = &idet->cur ->data[i][y*refs];
uint8_t *next = &idet->next->data[i][y*refs];
alpha[ y &1] += idet->filter_line(cur-refs, prev, cur+refs, w);
alpha[(y^1)&1] += idet->filter_line(cur-refs, next, cur+refs, w);
delta += idet->filter_line(cur-refs, cur, cur+refs, w);
}
}
 
if (alpha[0] > idet->interlace_threshold * alpha[1]){
type = TFF;
}else if(alpha[1] > idet->interlace_threshold * alpha[0]){
type = BFF;
}else if(alpha[1] > idet->progressive_threshold * delta){
type = PROGRSSIVE;
}else{
type = UNDETERMINED;
}
 
memmove(idet->history+1, idet->history, HIST_SIZE-1);
idet->history[0] = type;
best_type = UNDETERMINED;
for(i=0; i<HIST_SIZE; i++){
if(idet->history[i] != UNDETERMINED){
if(best_type == UNDETERMINED)
best_type = idet->history[i];
 
if(idet->history[i] == best_type) {
match++;
}else{
match=0;
break;
}
}
}
if(idet->last_type == UNDETERMINED){
if(match ) idet->last_type = best_type;
}else{
if(match>2) idet->last_type = best_type;
}
 
if (idet->last_type == TFF){
idet->cur->top_field_first = 1;
idet->cur->interlaced_frame = 1;
}else if(idet->last_type == BFF){
idet->cur->top_field_first = 0;
idet->cur->interlaced_frame = 1;
}else if(idet->last_type == PROGRSSIVE){
idet->cur->interlaced_frame = 0;
}
 
idet->prestat [ type] ++;
idet->poststat[idet->last_type] ++;
av_log(ctx, AV_LOG_DEBUG, "Single frame:%s, Multi frame:%s\n", type2str(type), type2str(idet->last_type));
}
 
static int filter_frame(AVFilterLink *link, AVFrame *picref)
{
AVFilterContext *ctx = link->dst;
IDETContext *idet = ctx->priv;
 
if (idet->prev)
av_frame_free(&idet->prev);
idet->prev = idet->cur;
idet->cur = idet->next;
idet->next = picref;
 
if (!idet->cur)
return 0;
 
if (!idet->prev)
idet->prev = av_frame_clone(idet->cur);
 
if (!idet->csp)
idet->csp = av_pix_fmt_desc_get(link->format);
if (idet->csp->comp[0].depth_minus1 / 8 == 1)
idet->filter_line = (void*)filter_line_c_16bit;
 
filter(ctx);
 
return ff_filter_frame(ctx->outputs[0], av_frame_clone(idet->cur));
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
IDETContext *idet = ctx->priv;
 
av_log(ctx, AV_LOG_INFO, "Single frame detection: TFF:%d BFF:%d Progressive:%d Undetermined:%d\n",
idet->prestat[TFF],
idet->prestat[BFF],
idet->prestat[PROGRSSIVE],
idet->prestat[UNDETERMINED]
);
av_log(ctx, AV_LOG_INFO, "Multi frame detection: TFF:%d BFF:%d Progressive:%d Undetermined:%d\n",
idet->poststat[TFF],
idet->poststat[BFF],
idet->poststat[PROGRSSIVE],
idet->poststat[UNDETERMINED]
);
 
av_frame_free(&idet->prev);
av_frame_free(&idet->cur );
av_frame_free(&idet->next);
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV422P,
AV_PIX_FMT_YUV444P,
AV_PIX_FMT_YUV410P,
AV_PIX_FMT_YUV411P,
AV_PIX_FMT_GRAY8,
AV_PIX_FMT_YUVJ420P,
AV_PIX_FMT_YUVJ422P,
AV_PIX_FMT_YUVJ444P,
AV_PIX_FMT_GRAY16,
AV_PIX_FMT_YUV440P,
AV_PIX_FMT_YUVJ440P,
AV_PIX_FMT_YUV420P10,
AV_PIX_FMT_YUV422P10,
AV_PIX_FMT_YUV444P10,
AV_PIX_FMT_YUV420P16,
AV_PIX_FMT_YUV422P16,
AV_PIX_FMT_YUV444P16,
AV_PIX_FMT_YUVA420P,
AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
 
return 0;
}
 
static int config_output(AVFilterLink *outlink)
{
outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
return 0;
}
 
static av_cold int init(AVFilterContext *ctx)
{
IDETContext *idet = ctx->priv;
 
idet->last_type = UNDETERMINED;
memset(idet->history, UNDETERMINED, HIST_SIZE);
 
idet->filter_line = filter_line_c;
 
return 0;
}
 
 
static const AVFilterPad idet_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad idet_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_output,
},
{ NULL }
};
 
AVFilter avfilter_vf_idet = {
.name = "idet",
.description = NULL_IF_CONFIG_SMALL("Interlace detect Filter."),
.priv_size = sizeof(IDETContext),
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = idet_inputs,
.outputs = idet_outputs,
.priv_class = &idet_class,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_il.c
0,0 → 1,212
/*
* Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
* Copyright (c) 2013 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* (de)interleave fields filter
*/
 
#include "libavutil/opt.h"
#include "libavutil/imgutils.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "internal.h"
 
enum FilterMode {
MODE_NONE,
MODE_INTERLEAVE,
MODE_DEINTERLEAVE
};
 
typedef struct {
const AVClass *class;
enum FilterMode luma_mode, chroma_mode, alpha_mode;
int luma_swap, chroma_swap, alpha_swap;
int nb_planes;
int linesize[4], chroma_height;
int has_alpha;
} IlContext;
 
#define OFFSET(x) offsetof(IlContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
 
static const AVOption il_options[] = {
{"luma_mode", "select luma mode", OFFSET(luma_mode), AV_OPT_TYPE_INT, {.i64=MODE_NONE}, MODE_NONE, MODE_DEINTERLEAVE, FLAGS, "luma_mode"},
{"l", "select luma mode", OFFSET(luma_mode), AV_OPT_TYPE_INT, {.i64=MODE_NONE}, MODE_NONE, MODE_DEINTERLEAVE, FLAGS, "luma_mode"},
{"none", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_NONE}, 0, 0, FLAGS, "luma_mode"},
{"interleave", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE}, 0, 0, FLAGS, "luma_mode"},
{"i", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE}, 0, 0, FLAGS, "luma_mode"},
{"deinterleave", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_DEINTERLEAVE}, 0, 0, FLAGS, "luma_mode"},
{"d", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_DEINTERLEAVE}, 0, 0, FLAGS, "luma_mode"},
{"chroma_mode", "select chroma mode", OFFSET(chroma_mode), AV_OPT_TYPE_INT, {.i64=MODE_NONE}, MODE_NONE, MODE_DEINTERLEAVE, FLAGS, "chroma_mode"},
{"c", "select chroma mode", OFFSET(chroma_mode), AV_OPT_TYPE_INT, {.i64=MODE_NONE}, MODE_NONE, MODE_DEINTERLEAVE, FLAGS, "chroma_mode"},
{"none", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_NONE}, 0, 0, FLAGS, "chroma_mode"},
{"interleave", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE}, 0, 0, FLAGS, "chroma_mode"},
{"i", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE}, 0, 0, FLAGS, "chroma_mode"},
{"deinterleave", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_DEINTERLEAVE}, 0, 0, FLAGS, "chroma_mode"},
{"d", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_DEINTERLEAVE}, 0, 0, FLAGS, "chroma_mode"},
{"alpha_mode", "select alpha mode", OFFSET(alpha_mode), AV_OPT_TYPE_INT, {.i64=MODE_NONE}, MODE_NONE, MODE_DEINTERLEAVE, FLAGS, "alpha_mode"},
{"a", "select alpha mode", OFFSET(alpha_mode), AV_OPT_TYPE_INT, {.i64=MODE_NONE}, MODE_NONE, MODE_DEINTERLEAVE, FLAGS, "alpha_mode"},
{"none", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_NONE}, 0, 0, FLAGS, "alpha_mode"},
{"interleave", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE}, 0, 0, FLAGS, "alpha_mode"},
{"i", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE}, 0, 0, FLAGS, "alpha_mode"},
{"deinterleave", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_DEINTERLEAVE}, 0, 0, FLAGS, "alpha_mode"},
{"d", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_DEINTERLEAVE}, 0, 0, FLAGS, "alpha_mode"},
{"luma_swap", "swap luma fields", OFFSET(luma_swap), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS},
{"ls", "swap luma fields", OFFSET(luma_swap), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS},
{"chroma_swap", "swap chroma fields", OFFSET(chroma_swap), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS},
{"cs", "swap chroma fields", OFFSET(chroma_swap), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS},
{"alpha_swap", "swap alpha fields", OFFSET(alpha_swap), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS},
{"as", "swap alpha fields", OFFSET(alpha_swap), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS},
{NULL}
};
 
AVFILTER_DEFINE_CLASS(il);
 
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
int fmt;
 
for (fmt = 0; fmt < AV_PIX_FMT_NB; fmt++) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
if (!(desc->flags & AV_PIX_FMT_FLAG_PAL) && !(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
ff_add_format(&formats, fmt);
}
 
ff_set_common_formats(ctx, formats);
return 0;
}
 
static int config_input(AVFilterLink *inlink)
{
IlContext *il = inlink->dst->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
int ret;
 
il->nb_planes = av_pix_fmt_count_planes(inlink->format);
 
il->has_alpha = !!(desc->flags & AV_PIX_FMT_FLAG_ALPHA);
if ((ret = av_image_fill_linesizes(il->linesize, inlink->format, inlink->w)) < 0)
return ret;
 
il->chroma_height = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
 
return 0;
}
 
static void interleave(uint8_t *dst, uint8_t *src, int w, int h,
int dst_linesize, int src_linesize,
enum FilterMode mode, int swap)
{
const int a = swap;
const int b = 1 - a;
const int m = h >> 1;
int y;
 
switch (mode) {
case MODE_DEINTERLEAVE:
for (y = 0; y < m; y++) {
memcpy(dst + dst_linesize * y , src + src_linesize * (y * 2 + a), w);
memcpy(dst + dst_linesize * (y + m), src + src_linesize * (y * 2 + b), w);
}
break;
case MODE_NONE:
for (y = 0; y < m; y++) {
memcpy(dst + dst_linesize * y * 2 , src + src_linesize * (y * 2 + a), w);
memcpy(dst + dst_linesize * (y * 2 + 1), src + src_linesize * (y * 2 + b), w);
}
break;
case MODE_INTERLEAVE:
for (y = 0; y < m; y++) {
memcpy(dst + dst_linesize * (y * 2 + a), src + src_linesize * y , w);
memcpy(dst + dst_linesize * (y * 2 + b), src + src_linesize * (y + m), w);
}
break;
}
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
{
IlContext *il = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *out;
int comp;
 
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&inpicref);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, inpicref);
 
interleave(out->data[0], inpicref->data[0],
il->linesize[0], inlink->h,
out->linesize[0], inpicref->linesize[0],
il->luma_mode, il->luma_swap);
 
for (comp = 1; comp < (il->nb_planes - il->has_alpha); comp++) {
interleave(out->data[comp], inpicref->data[comp],
il->linesize[comp], il->chroma_height,
out->linesize[comp], inpicref->linesize[comp],
il->chroma_mode, il->chroma_swap);
}
 
if (il->has_alpha) {
comp = il->nb_planes - 1;
interleave(out->data[comp], inpicref->data[comp],
il->linesize[comp], inlink->h,
out->linesize[comp], inpicref->linesize[comp],
il->alpha_mode, il->alpha_swap);
}
 
av_frame_free(&inpicref);
return ff_filter_frame(outlink, out);
}
 
static const AVFilterPad inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
 
static const AVFilterPad outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_il = {
.name = "il",
.description = NULL_IF_CONFIG_SMALL("Deinterleave or interleave fields."),
.priv_size = sizeof(IlContext),
.query_formats = query_formats,
.inputs = inputs,
.outputs = outputs,
.priv_class = &il_class,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_interlace.c
0,0 → 1,242
/*
* Copyright (c) 2003 Michael Zucchi <notzed@ximian.com>
* Copyright (c) 2010 Baptiste Coudurier
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2013 Vittorio Giovara <vittorio.giovara@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
/**
* @file
* progressive to interlaced content filter, inspired by heavy debugging of tinterlace filter
*/
 
#include "libavutil/common.h"
#include "libavutil/opt.h"
#include "libavutil/imgutils.h"
#include "libavutil/avassert.h"
 
#include "formats.h"
#include "avfilter.h"
#include "internal.h"
#include "video.h"
 
enum ScanMode {
MODE_TFF = 0,
MODE_BFF = 1,
};
 
enum FieldType {
FIELD_UPPER = 0,
FIELD_LOWER = 1,
};
 
typedef struct {
const AVClass *class;
enum ScanMode scan; // top or bottom field first scanning
int lowpass; // enable or disable low pass filterning
AVFrame *cur, *next; // the two frames from which the new one is obtained
} InterlaceContext;
 
#define OFFSET(x) offsetof(InterlaceContext, x)
#define V AV_OPT_FLAG_VIDEO_PARAM
static const AVOption interlace_options[] = {
{ "scan", "scanning mode", OFFSET(scan),
AV_OPT_TYPE_INT, {.i64 = MODE_TFF }, 0, 1, .flags = V, .unit = "scan" },
{ "tff", "top field first", 0,
AV_OPT_TYPE_CONST, {.i64 = MODE_TFF }, INT_MIN, INT_MAX, .flags = V, .unit = "scan" },
{ "bff", "bottom field first", 0,
AV_OPT_TYPE_CONST, {.i64 = MODE_BFF }, INT_MIN, INT_MAX, .flags = V, .unit = "scan" },
{ "lowpass", "enable vertical low-pass filter", OFFSET(lowpass),
AV_OPT_TYPE_INT, {.i64 = 1 }, 0, 1, .flags = V },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(interlace);
 
static const enum AVPixelFormat formats_supported[] = {
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUVA420P,
AV_PIX_FMT_GRAY8, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_NONE
};
 
static int query_formats(AVFilterContext *ctx)
{
ff_set_common_formats(ctx, ff_make_format_list(formats_supported));
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
InterlaceContext *s = ctx->priv;
 
av_frame_free(&s->cur);
av_frame_free(&s->next);
}
 
static int config_out_props(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AVFilterLink *inlink = outlink->src->inputs[0];
InterlaceContext *s = ctx->priv;
 
if (inlink->h < 2) {
av_log(ctx, AV_LOG_ERROR, "input video height is too small\n");
return AVERROR_INVALIDDATA;
}
// same input size
outlink->w = inlink->w;
outlink->h = inlink->h;
outlink->time_base = inlink->time_base;
outlink->frame_rate = inlink->frame_rate;
// half framerate
outlink->time_base.num *= 2;
outlink->frame_rate.den *= 2;
outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
 
av_log(ctx, AV_LOG_VERBOSE, "%s interlacing %s lowpass filter\n",
s->scan == MODE_TFF ? "tff" : "bff", (s->lowpass) ? "with" : "without");
 
return 0;
}
 
static void copy_picture_field(AVFrame *src_frame, AVFrame *dst_frame,
AVFilterLink *inlink, enum FieldType field_type,
int lowpass)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
int vsub = desc->log2_chroma_h;
int plane, i, j;
 
for (plane = 0; plane < desc->nb_components; plane++) {
int lines = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(inlink->h, vsub) : inlink->h;
int linesize = av_image_get_linesize(inlink->format, inlink->w, plane);
uint8_t *dstp = dst_frame->data[plane];
const uint8_t *srcp = src_frame->data[plane];
 
av_assert0(linesize >= 0);
 
lines = (lines + (field_type == FIELD_UPPER)) / 2;
if (field_type == FIELD_LOWER)
srcp += src_frame->linesize[plane];
if (field_type == FIELD_LOWER)
dstp += dst_frame->linesize[plane];
if (lowpass) {
int srcp_linesize = src_frame->linesize[plane] * 2;
int dstp_linesize = dst_frame->linesize[plane] * 2;
for (j = lines; j > 0; j--) {
const uint8_t *srcp_above = srcp - src_frame->linesize[plane];
const uint8_t *srcp_below = srcp + src_frame->linesize[plane];
if (j == lines)
srcp_above = srcp; // there is no line above
if (j == 1)
srcp_below = srcp; // there is no line below
for (i = 0; i < linesize; i++) {
// this calculation is an integer representation of
// '0.5 * current + 0.25 * above + 0.25 * below'
// '1 +' is for rounding.
dstp[i] = (1 + srcp[i] + srcp[i] + srcp_above[i] + srcp_below[i]) >> 2;
}
dstp += dstp_linesize;
srcp += srcp_linesize;
}
} else {
av_image_copy_plane(dstp, dst_frame->linesize[plane] * 2,
srcp, src_frame->linesize[plane] * 2,
linesize, lines);
}
}
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
InterlaceContext *s = ctx->priv;
AVFrame *out;
int tff, ret;
 
av_frame_free(&s->cur);
s->cur = s->next;
s->next = buf;
 
/* we need at least two frames */
if (!s->cur || !s->next)
return 0;
 
if (s->cur->interlaced_frame) {
av_log(ctx, AV_LOG_WARNING,
"video is already interlaced, adjusting framerate only\n");
out = av_frame_clone(s->cur);
out->pts /= 2; // adjust pts to new framerate
ret = ff_filter_frame(outlink, out);
return ret;
}
 
tff = (s->scan == MODE_TFF);
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out)
return AVERROR(ENOMEM);
 
av_frame_copy_props(out, s->cur);
out->interlaced_frame = 1;
out->top_field_first = tff;
out->pts /= 2; // adjust pts to new framerate
 
/* copy upper/lower field from cur */
copy_picture_field(s->cur, out, inlink, tff ? FIELD_UPPER : FIELD_LOWER, s->lowpass);
av_frame_free(&s->cur);
 
/* copy lower/upper field from next */
copy_picture_field(s->next, out, inlink, tff ? FIELD_LOWER : FIELD_UPPER, s->lowpass);
av_frame_free(&s->next);
 
ret = ff_filter_frame(outlink, out);
 
return ret;
}
 
static const AVFilterPad inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_out_props,
},
{ NULL }
};
 
AVFilter avfilter_vf_interlace = {
.name = "interlace",
.description = NULL_IF_CONFIG_SMALL("Convert progressive video into interlaced."),
.uninit = uninit,
.priv_class = &interlace_class,
.priv_size = sizeof(InterlaceContext),
.query_formats = query_formats,
.inputs = inputs,
.outputs = outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_kerndeint.c
0,0 → 1,318
/*
* Copyright (c) 2012 Jeremy Tran
* Copyright (c) 2004 Tobias Diedrich
* Copyright (c) 2003 Donald A. Graft
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
/**
* @file
* Kernel Deinterlacer
* Ported from MPlayer libmpcodecs/vf_kerndeint.c.
*/
 
#include "libavutil/imgutils.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
 
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
 
typedef struct {
const AVClass *class;
int frame; ///< frame count, starting from 0
int thresh, map, order, sharp, twoway;
int vsub;
int is_packed_rgb;
uint8_t *tmp_data [4]; ///< temporary plane data buffer
int tmp_linesize[4]; ///< temporary plane byte linesize
int tmp_bwidth [4]; ///< temporary plane byte width
} KerndeintContext;
 
#define OFFSET(x) offsetof(KerndeintContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption kerndeint_options[] = {
{ "thresh", "set the threshold", OFFSET(thresh), AV_OPT_TYPE_INT, {.i64=10}, 0, 255, FLAGS },
{ "map", "set the map", OFFSET(map), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
{ "order", "set the order", OFFSET(order), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
{ "sharp", "enable sharpening", OFFSET(sharp), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
{ "twoway", "enable twoway", OFFSET(twoway), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(kerndeint);
 
static av_cold void uninit(AVFilterContext *ctx)
{
KerndeintContext *kerndeint = ctx->priv;
 
av_free(kerndeint->tmp_data[0]);
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum PixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUYV422,
AV_PIX_FMT_ARGB, AV_PIX_FMT_0RGB,
AV_PIX_FMT_ABGR, AV_PIX_FMT_0BGR,
AV_PIX_FMT_RGBA, AV_PIX_FMT_RGB0,
AV_PIX_FMT_BGRA, AV_PIX_FMT_BGR0,
AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
 
return 0;
}
 
static int config_props(AVFilterLink *inlink)
{
KerndeintContext *kerndeint = inlink->dst->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
int ret;
 
kerndeint->is_packed_rgb = av_pix_fmt_desc_get(inlink->format)->flags & AV_PIX_FMT_FLAG_RGB;
kerndeint->vsub = desc->log2_chroma_h;
 
ret = av_image_alloc(kerndeint->tmp_data, kerndeint->tmp_linesize,
inlink->w, inlink->h, inlink->format, 16);
if (ret < 0)
return ret;
memset(kerndeint->tmp_data[0], 0, ret);
 
if ((ret = av_image_fill_linesizes(kerndeint->tmp_bwidth, inlink->format, inlink->w)) < 0)
return ret;
 
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
{
KerndeintContext *kerndeint = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *outpic;
const uint8_t *prvp; ///< Previous field's pixel line number n
const uint8_t *prvpp; ///< Previous field's pixel line number (n - 1)
const uint8_t *prvpn; ///< Previous field's pixel line number (n + 1)
const uint8_t *prvppp; ///< Previous field's pixel line number (n - 2)
const uint8_t *prvpnn; ///< Previous field's pixel line number (n + 2)
const uint8_t *prvp4p; ///< Previous field's pixel line number (n - 4)
const uint8_t *prvp4n; ///< Previous field's pixel line number (n + 4)
 
const uint8_t *srcp; ///< Current field's pixel line number n
const uint8_t *srcpp; ///< Current field's pixel line number (n - 1)
const uint8_t *srcpn; ///< Current field's pixel line number (n + 1)
const uint8_t *srcppp; ///< Current field's pixel line number (n - 2)
const uint8_t *srcpnn; ///< Current field's pixel line number (n + 2)
const uint8_t *srcp3p; ///< Current field's pixel line number (n - 3)
const uint8_t *srcp3n; ///< Current field's pixel line number (n + 3)
const uint8_t *srcp4p; ///< Current field's pixel line number (n - 4)
const uint8_t *srcp4n; ///< Current field's pixel line number (n + 4)
 
uint8_t *dstp, *dstp_saved;
const uint8_t *srcp_saved;
 
int src_linesize, psrc_linesize, dst_linesize, bwidth;
int x, y, plane, val, hi, lo, g, h, n = kerndeint->frame++;
double valf;
 
const int thresh = kerndeint->thresh;
const int order = kerndeint->order;
const int map = kerndeint->map;
const int sharp = kerndeint->sharp;
const int twoway = kerndeint->twoway;
 
const int is_packed_rgb = kerndeint->is_packed_rgb;
 
outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!outpic) {
av_frame_free(&inpic);
return AVERROR(ENOMEM);
}
av_frame_copy_props(outpic, inpic);
outpic->interlaced_frame = 0;
 
for (plane = 0; plane < 4 && inpic->data[plane] && inpic->linesize[plane]; plane++) {
h = plane == 0 ? inlink->h : FF_CEIL_RSHIFT(inlink->h, kerndeint->vsub);
bwidth = kerndeint->tmp_bwidth[plane];
 
srcp = srcp_saved = inpic->data[plane];
src_linesize = inpic->linesize[plane];
psrc_linesize = kerndeint->tmp_linesize[plane];
dstp = dstp_saved = outpic->data[plane];
dst_linesize = outpic->linesize[plane];
srcp = srcp_saved + (1 - order) * src_linesize;
dstp = dstp_saved + (1 - order) * dst_linesize;
 
for (y = 0; y < h; y += 2) {
memcpy(dstp, srcp, bwidth);
srcp += 2 * src_linesize;
dstp += 2 * dst_linesize;
}
 
// Copy through the lines that will be missed below.
memcpy(dstp_saved + order * dst_linesize, srcp_saved + (1 - order) * src_linesize, bwidth);
memcpy(dstp_saved + (2 + order ) * dst_linesize, srcp_saved + (3 - order) * src_linesize, bwidth);
memcpy(dstp_saved + (h - 2 + order) * dst_linesize, srcp_saved + (h - 1 - order) * src_linesize, bwidth);
memcpy(dstp_saved + (h - 4 + order) * dst_linesize, srcp_saved + (h - 3 - order) * src_linesize, bwidth);
 
/* For the other field choose adaptively between using the previous field
or the interpolant from the current field. */
prvp = kerndeint->tmp_data[plane] + 5 * psrc_linesize - (1 - order) * psrc_linesize;
prvpp = prvp - psrc_linesize;
prvppp = prvp - 2 * psrc_linesize;
prvp4p = prvp - 4 * psrc_linesize;
prvpn = prvp + psrc_linesize;
prvpnn = prvp + 2 * psrc_linesize;
prvp4n = prvp + 4 * psrc_linesize;
 
srcp = srcp_saved + 5 * src_linesize - (1 - order) * src_linesize;
srcpp = srcp - src_linesize;
srcppp = srcp - 2 * src_linesize;
srcp3p = srcp - 3 * src_linesize;
srcp4p = srcp - 4 * src_linesize;
 
srcpn = srcp + src_linesize;
srcpnn = srcp + 2 * src_linesize;
srcp3n = srcp + 3 * src_linesize;
srcp4n = srcp + 4 * src_linesize;
 
dstp = dstp_saved + 5 * dst_linesize - (1 - order) * dst_linesize;
 
for (y = 5 - (1 - order); y <= h - 5 - (1 - order); y += 2) {
for (x = 0; x < bwidth; x++) {
if (thresh == 0 || n == 0 ||
(abs((int)prvp[x] - (int)srcp[x]) > thresh) ||
(abs((int)prvpp[x] - (int)srcpp[x]) > thresh) ||
(abs((int)prvpn[x] - (int)srcpn[x]) > thresh)) {
if (map) {
g = x & ~3;
 
if (is_packed_rgb) {
AV_WB32(dstp + g, 0xffffffff);
x = g + 3;
} else if (inlink->format == AV_PIX_FMT_YUYV422) {
// y <- 235, u <- 128, y <- 235, v <- 128
AV_WB32(dstp + g, 0xeb80eb80);
x = g + 3;
} else {
dstp[x] = plane == 0 ? 235 : 128;
}
} else {
if (is_packed_rgb) {
hi = 255;
lo = 0;
} else if (inlink->format == AV_PIX_FMT_YUYV422) {
hi = x & 1 ? 240 : 235;
lo = 16;
} else {
hi = plane == 0 ? 235 : 240;
lo = 16;
}
 
if (sharp) {
if (twoway) {
valf = + 0.526 * ((int)srcpp[x] + (int)srcpn[x])
+ 0.170 * ((int)srcp[x] + (int)prvp[x])
- 0.116 * ((int)srcppp[x] + (int)srcpnn[x] + (int)prvppp[x] + (int)prvpnn[x])
- 0.026 * ((int)srcp3p[x] + (int)srcp3n[x])
+ 0.031 * ((int)srcp4p[x] + (int)srcp4n[x] + (int)prvp4p[x] + (int)prvp4n[x]);
} else {
valf = + 0.526 * ((int)srcpp[x] + (int)srcpn[x])
+ 0.170 * ((int)prvp[x])
- 0.116 * ((int)prvppp[x] + (int)prvpnn[x])
- 0.026 * ((int)srcp3p[x] + (int)srcp3n[x])
+ 0.031 * ((int)prvp4p[x] + (int)prvp4p[x]);
}
dstp[x] = av_clip(valf, lo, hi);
} else {
if (twoway) {
val = (8 * ((int)srcpp[x] + (int)srcpn[x]) + 2 * ((int)srcp[x] + (int)prvp[x])
- (int)(srcppp[x]) - (int)(srcpnn[x])
- (int)(prvppp[x]) - (int)(prvpnn[x])) >> 4;
} else {
val = (8 * ((int)srcpp[x] + (int)srcpn[x]) + 2 * ((int)prvp[x])
- (int)(prvppp[x]) - (int)(prvpnn[x])) >> 4;
}
dstp[x] = av_clip(val, lo, hi);
}
}
} else {
dstp[x] = srcp[x];
}
}
prvp += 2 * psrc_linesize;
prvpp += 2 * psrc_linesize;
prvppp += 2 * psrc_linesize;
prvpn += 2 * psrc_linesize;
prvpnn += 2 * psrc_linesize;
prvp4p += 2 * psrc_linesize;
prvp4n += 2 * psrc_linesize;
srcp += 2 * src_linesize;
srcpp += 2 * src_linesize;
srcppp += 2 * src_linesize;
srcp3p += 2 * src_linesize;
srcp4p += 2 * src_linesize;
srcpn += 2 * src_linesize;
srcpnn += 2 * src_linesize;
srcp3n += 2 * src_linesize;
srcp4n += 2 * src_linesize;
dstp += 2 * dst_linesize;
}
 
srcp = inpic->data[plane];
dstp = kerndeint->tmp_data[plane];
av_image_copy_plane(dstp, psrc_linesize, srcp, src_linesize, bwidth, h);
}
 
av_frame_free(&inpic);
return ff_filter_frame(outlink, outpic);
}
 
static const AVFilterPad kerndeint_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_props,
},
{ NULL }
};
 
static const AVFilterPad kerndeint_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
 
AVFilter avfilter_vf_kerndeint = {
.name = "kerndeint",
.description = NULL_IF_CONFIG_SMALL("Apply kernel deinterlacing to the input."),
.priv_size = sizeof(KerndeintContext),
.priv_class = &kerndeint_class,
.uninit = uninit,
.query_formats = query_formats,
.inputs = kerndeint_inputs,
.outputs = kerndeint_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_libopencv.c
0,0 → 1,416
/*
* Copyright (c) 2010 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* libopencv wrapper functions
*/
 
#include <opencv/cv.h>
#include <opencv/cxcore.h>
#include "libavutil/avstring.h"
#include "libavutil/common.h"
#include "libavutil/file.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
static void fill_iplimage_from_frame(IplImage *img, const AVFrame *frame, enum AVPixelFormat pixfmt)
{
IplImage *tmpimg;
int depth, channels_nb;
 
if (pixfmt == AV_PIX_FMT_GRAY8) { depth = IPL_DEPTH_8U; channels_nb = 1; }
else if (pixfmt == AV_PIX_FMT_BGRA) { depth = IPL_DEPTH_8U; channels_nb = 4; }
else if (pixfmt == AV_PIX_FMT_BGR24) { depth = IPL_DEPTH_8U; channels_nb = 3; }
else return;
 
tmpimg = cvCreateImageHeader((CvSize){frame->width, frame->height}, depth, channels_nb);
*img = *tmpimg;
img->imageData = img->imageDataOrigin = frame->data[0];
img->dataOrder = IPL_DATA_ORDER_PIXEL;
img->origin = IPL_ORIGIN_TL;
img->widthStep = frame->linesize[0];
}
 
static void fill_frame_from_iplimage(AVFrame *frame, const IplImage *img, enum AVPixelFormat pixfmt)
{
frame->linesize[0] = img->widthStep;
frame->data[0] = img->imageData;
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_BGR24, AV_PIX_FMT_BGRA, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
typedef struct {
const AVClass *class;
char *name;
char *params;
int (*init)(AVFilterContext *ctx, const char *args);
void (*uninit)(AVFilterContext *ctx);
void (*end_frame_filter)(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg);
void *priv;
} OCVContext;
 
typedef struct {
int type;
int param1, param2;
double param3, param4;
} SmoothContext;
 
static av_cold int smooth_init(AVFilterContext *ctx, const char *args)
{
OCVContext *s = ctx->priv;
SmoothContext *smooth = s->priv;
char type_str[128] = "gaussian";
 
smooth->param1 = 3;
smooth->param2 = 0;
smooth->param3 = 0.0;
smooth->param4 = 0.0;
 
if (args)
sscanf(args, "%127[^|]|%d|%d|%lf|%lf", type_str, &smooth->param1, &smooth->param2, &smooth->param3, &smooth->param4);
 
if (!strcmp(type_str, "blur" )) smooth->type = CV_BLUR;
else if (!strcmp(type_str, "blur_no_scale")) smooth->type = CV_BLUR_NO_SCALE;
else if (!strcmp(type_str, "median" )) smooth->type = CV_MEDIAN;
else if (!strcmp(type_str, "gaussian" )) smooth->type = CV_GAUSSIAN;
else if (!strcmp(type_str, "bilateral" )) smooth->type = CV_BILATERAL;
else {
av_log(ctx, AV_LOG_ERROR, "Smoothing type '%s' unknown.\n", type_str);
return AVERROR(EINVAL);
}
 
if (smooth->param1 < 0 || !(smooth->param1%2)) {
av_log(ctx, AV_LOG_ERROR,
"Invalid value '%d' for param1, it has to be a positive odd number\n",
smooth->param1);
return AVERROR(EINVAL);
}
if ((smooth->type == CV_BLUR || smooth->type == CV_BLUR_NO_SCALE || smooth->type == CV_GAUSSIAN) &&
(smooth->param2 < 0 || (smooth->param2 && !(smooth->param2%2)))) {
av_log(ctx, AV_LOG_ERROR,
"Invalid value '%d' for param2, it has to be zero or a positive odd number\n",
smooth->param2);
return AVERROR(EINVAL);
}
 
av_log(ctx, AV_LOG_VERBOSE, "type:%s param1:%d param2:%d param3:%f param4:%f\n",
type_str, smooth->param1, smooth->param2, smooth->param3, smooth->param4);
return 0;
}
 
static void smooth_end_frame_filter(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg)
{
OCVContext *s = ctx->priv;
SmoothContext *smooth = s->priv;
cvSmooth(inimg, outimg, smooth->type, smooth->param1, smooth->param2, smooth->param3, smooth->param4);
}
 
static int read_shape_from_file(int *cols, int *rows, int **values, const char *filename,
void *log_ctx)
{
uint8_t *buf, *p, *pend;
size_t size;
int ret, i, j, w;
 
if ((ret = av_file_map(filename, &buf, &size, 0, log_ctx)) < 0)
return ret;
 
/* prescan file to get the number of lines and the maximum width */
w = 0;
for (i = 0; i < size; i++) {
if (buf[i] == '\n') {
if (*rows == INT_MAX) {
av_log(log_ctx, AV_LOG_ERROR, "Overflow on the number of rows in the file\n");
return AVERROR_INVALIDDATA;
}
++(*rows);
*cols = FFMAX(*cols, w);
w = 0;
} else if (w == INT_MAX) {
av_log(log_ctx, AV_LOG_ERROR, "Overflow on the number of columns in the file\n");
return AVERROR_INVALIDDATA;
}
w++;
}
if (*rows > (SIZE_MAX / sizeof(int) / *cols)) {
av_log(log_ctx, AV_LOG_ERROR, "File with size %dx%d is too big\n",
*rows, *cols);
return AVERROR_INVALIDDATA;
}
if (!(*values = av_mallocz(sizeof(int) * *rows * *cols)))
return AVERROR(ENOMEM);
 
/* fill *values */
p = buf;
pend = buf + size-1;
for (i = 0; i < *rows; i++) {
for (j = 0;; j++) {
if (p > pend || *p == '\n') {
p++;
break;
} else
(*values)[*cols*i + j] = !!av_isgraph(*(p++));
}
}
av_file_unmap(buf, size);
 
#ifdef DEBUG
{
char *line;
if (!(line = av_malloc(*cols + 1)))
return AVERROR(ENOMEM);
for (i = 0; i < *rows; i++) {
for (j = 0; j < *cols; j++)
line[j] = (*values)[i * *cols + j] ? '@' : ' ';
line[j] = 0;
av_log(log_ctx, AV_LOG_DEBUG, "%3d: %s\n", i, line);
}
av_free(line);
}
#endif
 
return 0;
}
 
static int parse_iplconvkernel(IplConvKernel **kernel, char *buf, void *log_ctx)
{
char shape_filename[128] = "", shape_str[32] = "rect";
int cols = 0, rows = 0, anchor_x = 0, anchor_y = 0, shape = CV_SHAPE_RECT;
int *values = NULL, ret;
 
sscanf(buf, "%dx%d+%dx%d/%32[^=]=%127s", &cols, &rows, &anchor_x, &anchor_y, shape_str, shape_filename);
 
if (!strcmp(shape_str, "rect" )) shape = CV_SHAPE_RECT;
else if (!strcmp(shape_str, "cross" )) shape = CV_SHAPE_CROSS;
else if (!strcmp(shape_str, "ellipse")) shape = CV_SHAPE_ELLIPSE;
else if (!strcmp(shape_str, "custom" )) {
shape = CV_SHAPE_CUSTOM;
if ((ret = read_shape_from_file(&cols, &rows, &values, shape_filename, log_ctx)) < 0)
return ret;
} else {
av_log(log_ctx, AV_LOG_ERROR,
"Shape unspecified or type '%s' unknown.\n", shape_str);
return AVERROR(EINVAL);
}
 
if (rows <= 0 || cols <= 0) {
av_log(log_ctx, AV_LOG_ERROR,
"Invalid non-positive values for shape size %dx%d\n", cols, rows);
return AVERROR(EINVAL);
}
 
if (anchor_x < 0 || anchor_y < 0 || anchor_x >= cols || anchor_y >= rows) {
av_log(log_ctx, AV_LOG_ERROR,
"Shape anchor %dx%d is not inside the rectangle with size %dx%d.\n",
anchor_x, anchor_y, cols, rows);
return AVERROR(EINVAL);
}
 
*kernel = cvCreateStructuringElementEx(cols, rows, anchor_x, anchor_y, shape, values);
av_freep(&values);
if (!*kernel)
return AVERROR(ENOMEM);
 
av_log(log_ctx, AV_LOG_VERBOSE, "Structuring element: w:%d h:%d x:%d y:%d shape:%s\n",
rows, cols, anchor_x, anchor_y, shape_str);
return 0;
}
 
typedef struct {
int nb_iterations;
IplConvKernel *kernel;
} DilateContext;
 
static av_cold int dilate_init(AVFilterContext *ctx, const char *args)
{
OCVContext *s = ctx->priv;
DilateContext *dilate = s->priv;
char default_kernel_str[] = "3x3+0x0/rect";
char *kernel_str;
const char *buf = args;
int ret;
 
if (args)
kernel_str = av_get_token(&buf, "|");
else
kernel_str = av_strdup(default_kernel_str);
if (!kernel_str)
return AVERROR(ENOMEM);
if ((ret = parse_iplconvkernel(&dilate->kernel, kernel_str, ctx)) < 0)
return ret;
av_free(kernel_str);
 
if (!buf || sscanf(buf, "|%d", &dilate->nb_iterations) != 1)
dilate->nb_iterations = 1;
av_log(ctx, AV_LOG_VERBOSE, "iterations_nb:%d\n", dilate->nb_iterations);
if (dilate->nb_iterations <= 0) {
av_log(ctx, AV_LOG_ERROR, "Invalid non-positive value '%d' for nb_iterations\n",
dilate->nb_iterations);
return AVERROR(EINVAL);
}
return 0;
}
 
static av_cold void dilate_uninit(AVFilterContext *ctx)
{
OCVContext *s = ctx->priv;
DilateContext *dilate = s->priv;
 
cvReleaseStructuringElement(&dilate->kernel);
}
 
static void dilate_end_frame_filter(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg)
{
OCVContext *s = ctx->priv;
DilateContext *dilate = s->priv;
cvDilate(inimg, outimg, dilate->kernel, dilate->nb_iterations);
}
 
static void erode_end_frame_filter(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg)
{
OCVContext *s = ctx->priv;
DilateContext *dilate = s->priv;
cvErode(inimg, outimg, dilate->kernel, dilate->nb_iterations);
}
 
typedef struct {
const char *name;
size_t priv_size;
int (*init)(AVFilterContext *ctx, const char *args);
void (*uninit)(AVFilterContext *ctx);
void (*end_frame_filter)(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg);
} OCVFilterEntry;
 
static OCVFilterEntry ocv_filter_entries[] = {
{ "dilate", sizeof(DilateContext), dilate_init, dilate_uninit, dilate_end_frame_filter },
{ "erode", sizeof(DilateContext), dilate_init, dilate_uninit, erode_end_frame_filter },
{ "smooth", sizeof(SmoothContext), smooth_init, NULL, smooth_end_frame_filter },
};
 
static av_cold int init(AVFilterContext *ctx)
{
OCVContext *s = ctx->priv;
int i;
 
if (!s->name) {
av_log(ctx, AV_LOG_ERROR, "No libopencv filter name specified\n");
return AVERROR(EINVAL);
}
for (i = 0; i < FF_ARRAY_ELEMS(ocv_filter_entries); i++) {
OCVFilterEntry *entry = &ocv_filter_entries[i];
if (!strcmp(s->name, entry->name)) {
s->init = entry->init;
s->uninit = entry->uninit;
s->end_frame_filter = entry->end_frame_filter;
 
if (!(s->priv = av_mallocz(entry->priv_size)))
return AVERROR(ENOMEM);
return s->init(ctx, s->params);
}
}
 
av_log(ctx, AV_LOG_ERROR, "No libopencv filter named '%s'\n", s->name);
return AVERROR(EINVAL);
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
OCVContext *s = ctx->priv;
 
if (s->uninit)
s->uninit(ctx);
av_free(s->priv);
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
OCVContext *s = ctx->priv;
AVFilterLink *outlink= inlink->dst->outputs[0];
AVFrame *out;
IplImage inimg, outimg;
 
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
 
fill_iplimage_from_frame(&inimg , in , inlink->format);
fill_iplimage_from_frame(&outimg, out, inlink->format);
s->end_frame_filter(ctx, &inimg, &outimg);
fill_frame_from_iplimage(out, &outimg, inlink->format);
 
av_frame_free(&in);
 
return ff_filter_frame(outlink, out);
}
 
#define OFFSET(x) offsetof(OCVContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
static const AVOption ocv_options[] = {
{ "filter_name", NULL, OFFSET(name), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "filter_params", NULL, OFFSET(params), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(ocv);
 
static const AVFilterPad avfilter_vf_ocv_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_ocv_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_ocv = {
.name = "ocv",
.description = NULL_IF_CONFIG_SMALL("Apply transform using libopencv."),
.priv_size = sizeof(OCVContext),
.priv_class = &ocv_class,
.query_formats = query_formats,
.init = init,
.uninit = uninit,
.inputs = avfilter_vf_ocv_inputs,
.outputs = avfilter_vf_ocv_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_lut.c
0,0 → 1,439
/*
* Copyright (c) 2011 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Compute a look-up table for binding the input value to the output
* value, and apply it to input video.
*/
 
#include "libavutil/attributes.h"
#include "libavutil/common.h"
#include "libavutil/eval.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "drawutils.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
static const char *const var_names[] = {
"w", ///< width of the input video
"h", ///< height of the input video
"val", ///< input value for the pixel
"maxval", ///< max value for the pixel
"minval", ///< min value for the pixel
"negval", ///< negated value
"clipval",
NULL
};
 
enum var_name {
VAR_W,
VAR_H,
VAR_VAL,
VAR_MAXVAL,
VAR_MINVAL,
VAR_NEGVAL,
VAR_CLIPVAL,
VAR_VARS_NB
};
 
typedef struct {
const AVClass *class;
uint8_t lut[4][256]; ///< lookup table for each component
char *comp_expr_str[4];
AVExpr *comp_expr[4];
int hsub, vsub;
double var_values[VAR_VARS_NB];
int is_rgb, is_yuv;
int step;
int negate_alpha; /* only used by negate */
} LutContext;
 
#define Y 0
#define U 1
#define V 2
#define R 0
#define G 1
#define B 2
#define A 3
 
#define OFFSET(x) offsetof(LutContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
 
static const AVOption options[] = {
{ "c0", "set component #0 expression", OFFSET(comp_expr_str[0]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
{ "c1", "set component #1 expression", OFFSET(comp_expr_str[1]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
{ "c2", "set component #2 expression", OFFSET(comp_expr_str[2]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
{ "c3", "set component #3 expression", OFFSET(comp_expr_str[3]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
{ "y", "set Y expression", OFFSET(comp_expr_str[Y]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
{ "u", "set U expression", OFFSET(comp_expr_str[U]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
{ "v", "set V expression", OFFSET(comp_expr_str[V]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
{ "r", "set R expression", OFFSET(comp_expr_str[R]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
{ "g", "set G expression", OFFSET(comp_expr_str[G]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
{ "b", "set B expression", OFFSET(comp_expr_str[B]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
{ "a", "set A expression", OFFSET(comp_expr_str[A]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
{ NULL }
};
 
static av_cold void uninit(AVFilterContext *ctx)
{
LutContext *s = ctx->priv;
int i;
 
for (i = 0; i < 4; i++) {
av_expr_free(s->comp_expr[i]);
s->comp_expr[i] = NULL;
av_freep(&s->comp_expr_str[i]);
}
}
 
#define YUV_FORMATS \
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, \
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P, \
AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P, \
AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P, \
AV_PIX_FMT_YUVJ440P
 
#define RGB_FORMATS \
AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA, \
AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA, \
AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24
 
static const enum AVPixelFormat yuv_pix_fmts[] = { YUV_FORMATS, AV_PIX_FMT_NONE };
static const enum AVPixelFormat rgb_pix_fmts[] = { RGB_FORMATS, AV_PIX_FMT_NONE };
static const enum AVPixelFormat all_pix_fmts[] = { RGB_FORMATS, YUV_FORMATS, AV_PIX_FMT_NONE };
 
static int query_formats(AVFilterContext *ctx)
{
LutContext *s = ctx->priv;
 
const enum AVPixelFormat *pix_fmts = s->is_rgb ? rgb_pix_fmts :
s->is_yuv ? yuv_pix_fmts :
all_pix_fmts;
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
/**
* Clip value val in the minval - maxval range.
*/
static double clip(void *opaque, double val)
{
LutContext *s = opaque;
double minval = s->var_values[VAR_MINVAL];
double maxval = s->var_values[VAR_MAXVAL];
 
return av_clip(val, minval, maxval);
}
 
/**
* Compute gamma correction for value val, assuming the minval-maxval
* range, val is clipped to a value contained in the same interval.
*/
static double compute_gammaval(void *opaque, double gamma)
{
LutContext *s = opaque;
double val = s->var_values[VAR_CLIPVAL];
double minval = s->var_values[VAR_MINVAL];
double maxval = s->var_values[VAR_MAXVAL];
 
return pow((val-minval)/(maxval-minval), gamma) * (maxval-minval)+minval;
}
 
static double (* const funcs1[])(void *, double) = {
(void *)clip,
(void *)compute_gammaval,
NULL
};
 
static const char * const funcs1_names[] = {
"clip",
"gammaval",
NULL
};
 
static int config_props(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
LutContext *s = ctx->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
uint8_t rgba_map[4]; /* component index -> RGBA color index map */
int min[4], max[4];
int val, color, ret;
 
s->hsub = desc->log2_chroma_w;
s->vsub = desc->log2_chroma_h;
 
s->var_values[VAR_W] = inlink->w;
s->var_values[VAR_H] = inlink->h;
 
switch (inlink->format) {
case AV_PIX_FMT_YUV410P:
case AV_PIX_FMT_YUV411P:
case AV_PIX_FMT_YUV420P:
case AV_PIX_FMT_YUV422P:
case AV_PIX_FMT_YUV440P:
case AV_PIX_FMT_YUV444P:
case AV_PIX_FMT_YUVA420P:
case AV_PIX_FMT_YUVA422P:
case AV_PIX_FMT_YUVA444P:
min[Y] = min[U] = min[V] = 16;
max[Y] = 235;
max[U] = max[V] = 240;
min[A] = 0; max[A] = 255;
break;
default:
min[0] = min[1] = min[2] = min[3] = 0;
max[0] = max[1] = max[2] = max[3] = 255;
}
 
s->is_yuv = s->is_rgb = 0;
if (ff_fmt_is_in(inlink->format, yuv_pix_fmts)) s->is_yuv = 1;
else if (ff_fmt_is_in(inlink->format, rgb_pix_fmts)) s->is_rgb = 1;
 
if (s->is_rgb) {
ff_fill_rgba_map(rgba_map, inlink->format);
s->step = av_get_bits_per_pixel(desc) >> 3;
}
 
for (color = 0; color < desc->nb_components; color++) {
double res;
int comp = s->is_rgb ? rgba_map[color] : color;
 
/* create the parsed expression */
av_expr_free(s->comp_expr[color]);
s->comp_expr[color] = NULL;
ret = av_expr_parse(&s->comp_expr[color], s->comp_expr_str[color],
var_names, funcs1_names, funcs1, NULL, NULL, 0, ctx);
if (ret < 0) {
av_log(ctx, AV_LOG_ERROR,
"Error when parsing the expression '%s' for the component %d and color %d.\n",
s->comp_expr_str[comp], comp, color);
return AVERROR(EINVAL);
}
 
/* compute the lut */
s->var_values[VAR_MAXVAL] = max[color];
s->var_values[VAR_MINVAL] = min[color];
 
for (val = 0; val < 256; val++) {
s->var_values[VAR_VAL] = val;
s->var_values[VAR_CLIPVAL] = av_clip(val, min[color], max[color]);
s->var_values[VAR_NEGVAL] =
av_clip(min[color] + max[color] - s->var_values[VAR_VAL],
min[color], max[color]);
 
res = av_expr_eval(s->comp_expr[color], s->var_values, s);
if (isnan(res)) {
av_log(ctx, AV_LOG_ERROR,
"Error when evaluating the expression '%s' for the value %d for the component %d.\n",
s->comp_expr_str[color], val, comp);
return AVERROR(EINVAL);
}
s->lut[comp][val] = av_clip((int)res, min[color], max[color]);
av_log(ctx, AV_LOG_DEBUG, "val[%d][%d] = %d\n", comp, val, s->lut[comp][val]);
}
}
 
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
LutContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out;
uint8_t *inrow, *outrow, *inrow0, *outrow0;
int i, j, plane, direct = 0;
 
if (av_frame_is_writable(in)) {
direct = 1;
out = in;
} else {
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
 
if (s->is_rgb) {
/* packed */
inrow0 = in ->data[0];
outrow0 = out->data[0];
 
for (i = 0; i < in->height; i ++) {
int w = inlink->w;
const uint8_t (*tab)[256] = (const uint8_t (*)[256])s->lut;
inrow = inrow0;
outrow = outrow0;
for (j = 0; j < w; j++) {
switch (s->step) {
case 4: outrow[3] = tab[3][inrow[3]]; // Fall-through
case 3: outrow[2] = tab[2][inrow[2]]; // Fall-through
case 2: outrow[1] = tab[1][inrow[1]]; // Fall-through
default: outrow[0] = tab[0][inrow[0]];
}
outrow += s->step;
inrow += s->step;
}
inrow0 += in ->linesize[0];
outrow0 += out->linesize[0];
}
} else {
/* planar */
for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
int vsub = plane == 1 || plane == 2 ? s->vsub : 0;
int hsub = plane == 1 || plane == 2 ? s->hsub : 0;
int h = FF_CEIL_RSHIFT(inlink->h, vsub);
int w = FF_CEIL_RSHIFT(inlink->w, hsub);
 
inrow = in ->data[plane];
outrow = out->data[plane];
 
for (i = 0; i < h; i++) {
const uint8_t *tab = s->lut[plane];
for (j = 0; j < w; j++)
outrow[j] = tab[inrow[j]];
inrow += in ->linesize[plane];
outrow += out->linesize[plane];
}
}
}
 
if (!direct)
av_frame_free(&in);
 
return ff_filter_frame(outlink, out);
}
 
static const AVFilterPad inputs[] = {
{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_props,
},
{ NULL }
};
static const AVFilterPad outputs[] = {
{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
#define DEFINE_LUT_FILTER(name_, description_) \
AVFilter avfilter_vf_##name_ = { \
.name = #name_, \
.description = NULL_IF_CONFIG_SMALL(description_), \
.priv_size = sizeof(LutContext), \
.priv_class = &name_ ## _class, \
.init = name_##_init, \
.uninit = uninit, \
.query_formats = query_formats, \
.inputs = inputs, \
.outputs = outputs, \
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, \
}
 
#if CONFIG_LUT_FILTER
 
#define lut_options options
AVFILTER_DEFINE_CLASS(lut);
 
static int lut_init(AVFilterContext *ctx)
{
return 0;
}
 
DEFINE_LUT_FILTER(lut, "Compute and apply a lookup table to the RGB/YUV input video.");
#endif
 
#if CONFIG_LUTYUV_FILTER
 
#define lutyuv_options options
AVFILTER_DEFINE_CLASS(lutyuv);
 
static av_cold int lutyuv_init(AVFilterContext *ctx)
{
LutContext *s = ctx->priv;
 
s->is_yuv = 1;
 
return 0;
}
 
DEFINE_LUT_FILTER(lutyuv, "Compute and apply a lookup table to the YUV input video.");
#endif
 
#if CONFIG_LUTRGB_FILTER
 
#define lutrgb_options options
AVFILTER_DEFINE_CLASS(lutrgb);
 
static av_cold int lutrgb_init(AVFilterContext *ctx)
{
LutContext *s = ctx->priv;
 
s->is_rgb = 1;
 
return 0;
}
 
DEFINE_LUT_FILTER(lutrgb, "Compute and apply a lookup table to the RGB input video.");
#endif
 
#if CONFIG_NEGATE_FILTER
 
static const AVOption negate_options[] = {
{ "negate_alpha", NULL, OFFSET(negate_alpha), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(negate);
 
static av_cold int negate_init(AVFilterContext *ctx)
{
LutContext *s = ctx->priv;
int i;
 
av_log(ctx, AV_LOG_DEBUG, "negate_alpha:%d\n", s->negate_alpha);
 
for (i = 0; i < 4; i++) {
s->comp_expr_str[i] = av_strdup((i == 3 && !s->negate_alpha) ?
"val" : "negval");
if (!s->comp_expr_str[i]) {
uninit(ctx);
return AVERROR(ENOMEM);
}
}
 
return 0;
}
 
DEFINE_LUT_FILTER(negate, "Negate input video.");
 
#endif
/contrib/sdk/sources/ffmpeg/libavfilter/vf_lut3d.c
0,0 → 1,796
/*
* Copyright (c) 2013 Clément Bœsch
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* 3D Lookup table filter
*/
 
#include "libavutil/opt.h"
#include "libavutil/file.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/avassert.h"
#include "libavutil/pixdesc.h"
#include "libavutil/avstring.h"
#include "avfilter.h"
#include "drawutils.h"
#include "dualinput.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
#define R 0
#define G 1
#define B 2
#define A 3
 
enum interp_mode {
INTERPOLATE_NEAREST,
INTERPOLATE_TRILINEAR,
INTERPOLATE_TETRAHEDRAL,
NB_INTERP_MODE
};
 
struct rgbvec {
float r, g, b;
};
 
/* 3D LUT don't often go up to level 32, but it is common to have a Hald CLUT
* of 512x512 (64x64x64) */
#define MAX_LEVEL 64
 
typedef struct LUT3DContext {
const AVClass *class;
enum interp_mode interpolation;
char *file;
uint8_t rgba_map[4];
int step;
int is16bit;
struct rgbvec (*interp_8) (const struct LUT3DContext*, uint8_t, uint8_t, uint8_t);
struct rgbvec (*interp_16)(const struct LUT3DContext*, uint16_t, uint16_t, uint16_t);
struct rgbvec lut[MAX_LEVEL][MAX_LEVEL][MAX_LEVEL];
int lutsize;
#if CONFIG_HALDCLUT_FILTER
uint8_t clut_rgba_map[4];
int clut_step;
int clut_is16bit;
int clut_width;
FFDualInputContext dinput;
#endif
} LUT3DContext;
 
#define OFFSET(x) offsetof(LUT3DContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
#define COMMON_OPTIONS \
{ "interp", "select interpolation mode", OFFSET(interpolation), AV_OPT_TYPE_INT, {.i64=INTERPOLATE_TETRAHEDRAL}, 0, NB_INTERP_MODE-1, FLAGS, "interp_mode" }, \
{ "nearest", "use values from the nearest defined points", 0, AV_OPT_TYPE_CONST, {.i64=INTERPOLATE_NEAREST}, INT_MIN, INT_MAX, FLAGS, "interp_mode" }, \
{ "trilinear", "interpolate values using the 8 points defining a cube", 0, AV_OPT_TYPE_CONST, {.i64=INTERPOLATE_TRILINEAR}, INT_MIN, INT_MAX, FLAGS, "interp_mode" }, \
{ "tetrahedral", "interpolate values using a tetrahedron", 0, AV_OPT_TYPE_CONST, {.i64=INTERPOLATE_TETRAHEDRAL}, INT_MIN, INT_MAX, FLAGS, "interp_mode" }, \
{ NULL }
 
static inline float lerpf(float v0, float v1, float f)
{
return v0 + (v1 - v0) * f;
}
 
static inline struct rgbvec lerp(const struct rgbvec *v0, const struct rgbvec *v1, float f)
{
struct rgbvec v = {
lerpf(v0->r, v1->r, f), lerpf(v0->g, v1->g, f), lerpf(v0->b, v1->b, f)
};
return v;
}
 
#define NEAR(x) ((int)((x) + .5))
#define PREV(x) ((int)(x))
#define NEXT(x) (FFMIN((int)(x) + 1, lut3d->lutsize - 1))
 
/**
* Get the nearest defined point
*/
static inline struct rgbvec interp_nearest(const LUT3DContext *lut3d,
const struct rgbvec *s)
{
return lut3d->lut[NEAR(s->r)][NEAR(s->g)][NEAR(s->b)];
}
 
/**
* Interpolate using the 8 vertices of a cube
* @see https://en.wikipedia.org/wiki/Trilinear_interpolation
*/
static inline struct rgbvec interp_trilinear(const LUT3DContext *lut3d,
const struct rgbvec *s)
{
const int prev[] = {PREV(s->r), PREV(s->g), PREV(s->b)};
const int next[] = {NEXT(s->r), NEXT(s->g), NEXT(s->b)};
const struct rgbvec d = {s->r - prev[0], s->g - prev[1], s->b - prev[2]};
const struct rgbvec c000 = lut3d->lut[prev[0]][prev[1]][prev[2]];
const struct rgbvec c001 = lut3d->lut[prev[0]][prev[1]][next[2]];
const struct rgbvec c010 = lut3d->lut[prev[0]][next[1]][prev[2]];
const struct rgbvec c011 = lut3d->lut[prev[0]][next[1]][next[2]];
const struct rgbvec c100 = lut3d->lut[next[0]][prev[1]][prev[2]];
const struct rgbvec c101 = lut3d->lut[next[0]][prev[1]][next[2]];
const struct rgbvec c110 = lut3d->lut[next[0]][next[1]][prev[2]];
const struct rgbvec c111 = lut3d->lut[next[0]][next[1]][next[2]];
const struct rgbvec c00 = lerp(&c000, &c100, d.r);
const struct rgbvec c10 = lerp(&c010, &c110, d.r);
const struct rgbvec c01 = lerp(&c001, &c101, d.r);
const struct rgbvec c11 = lerp(&c011, &c111, d.r);
const struct rgbvec c0 = lerp(&c00, &c10, d.g);
const struct rgbvec c1 = lerp(&c01, &c11, d.g);
const struct rgbvec c = lerp(&c0, &c1, d.b);
return c;
}
 
/**
* Tetrahedral interpolation. Based on code found in Truelight Software Library paper.
* @see http://www.filmlight.ltd.uk/pdf/whitepapers/FL-TL-TN-0057-SoftwareLib.pdf
*/
static inline struct rgbvec interp_tetrahedral(const LUT3DContext *lut3d,
const struct rgbvec *s)
{
const int prev[] = {PREV(s->r), PREV(s->g), PREV(s->b)};
const int next[] = {NEXT(s->r), NEXT(s->g), NEXT(s->b)};
const struct rgbvec d = {s->r - prev[0], s->g - prev[1], s->b - prev[2]};
const struct rgbvec c000 = lut3d->lut[prev[0]][prev[1]][prev[2]];
const struct rgbvec c111 = lut3d->lut[next[0]][next[1]][next[2]];
struct rgbvec c;
if (d.r > d.g) {
if (d.g > d.b) {
const struct rgbvec c100 = lut3d->lut[next[0]][prev[1]][prev[2]];
const struct rgbvec c110 = lut3d->lut[next[0]][next[1]][prev[2]];
c.r = (1-d.r) * c000.r + (d.r-d.g) * c100.r + (d.g-d.b) * c110.r + (d.b) * c111.r;
c.g = (1-d.r) * c000.g + (d.r-d.g) * c100.g + (d.g-d.b) * c110.g + (d.b) * c111.g;
c.b = (1-d.r) * c000.b + (d.r-d.g) * c100.b + (d.g-d.b) * c110.b + (d.b) * c111.b;
} else if (d.r > d.b) {
const struct rgbvec c100 = lut3d->lut[next[0]][prev[1]][prev[2]];
const struct rgbvec c101 = lut3d->lut[next[0]][prev[1]][next[2]];
c.r = (1-d.r) * c000.r + (d.r-d.b) * c100.r + (d.b-d.g) * c101.r + (d.g) * c111.r;
c.g = (1-d.r) * c000.g + (d.r-d.b) * c100.g + (d.b-d.g) * c101.g + (d.g) * c111.g;
c.b = (1-d.r) * c000.b + (d.r-d.b) * c100.b + (d.b-d.g) * c101.b + (d.g) * c111.b;
} else {
const struct rgbvec c001 = lut3d->lut[prev[0]][prev[1]][next[2]];
const struct rgbvec c101 = lut3d->lut[next[0]][prev[1]][next[2]];
c.r = (1-d.b) * c000.r + (d.b-d.r) * c001.r + (d.r-d.g) * c101.r + (d.g) * c111.r;
c.g = (1-d.b) * c000.g + (d.b-d.r) * c001.g + (d.r-d.g) * c101.g + (d.g) * c111.g;
c.b = (1-d.b) * c000.b + (d.b-d.r) * c001.b + (d.r-d.g) * c101.b + (d.g) * c111.b;
}
} else {
if (d.b > d.g) {
const struct rgbvec c001 = lut3d->lut[prev[0]][prev[1]][next[2]];
const struct rgbvec c011 = lut3d->lut[prev[0]][next[1]][next[2]];
c.r = (1-d.b) * c000.r + (d.b-d.g) * c001.r + (d.g-d.r) * c011.r + (d.r) * c111.r;
c.g = (1-d.b) * c000.g + (d.b-d.g) * c001.g + (d.g-d.r) * c011.g + (d.r) * c111.g;
c.b = (1-d.b) * c000.b + (d.b-d.g) * c001.b + (d.g-d.r) * c011.b + (d.r) * c111.b;
} else if (d.b > d.r) {
const struct rgbvec c010 = lut3d->lut[prev[0]][next[1]][prev[2]];
const struct rgbvec c011 = lut3d->lut[prev[0]][next[1]][next[2]];
c.r = (1-d.g) * c000.r + (d.g-d.b) * c010.r + (d.b-d.r) * c011.r + (d.r) * c111.r;
c.g = (1-d.g) * c000.g + (d.g-d.b) * c010.g + (d.b-d.r) * c011.g + (d.r) * c111.g;
c.b = (1-d.g) * c000.b + (d.g-d.b) * c010.b + (d.b-d.r) * c011.b + (d.r) * c111.b;
} else {
const struct rgbvec c010 = lut3d->lut[prev[0]][next[1]][prev[2]];
const struct rgbvec c110 = lut3d->lut[next[0]][next[1]][prev[2]];
c.r = (1-d.g) * c000.r + (d.g-d.r) * c010.r + (d.r-d.b) * c110.r + (d.b) * c111.r;
c.g = (1-d.g) * c000.g + (d.g-d.r) * c010.g + (d.r-d.b) * c110.g + (d.b) * c111.g;
c.b = (1-d.g) * c000.b + (d.g-d.r) * c010.b + (d.r-d.b) * c110.b + (d.b) * c111.b;
}
}
return c;
}
 
#define DEFINE_INTERP_FUNC(name, nbits) \
static struct rgbvec interp_##nbits##_##name(const LUT3DContext *lut3d, \
uint##nbits##_t r, \
uint##nbits##_t g, \
uint##nbits##_t b) \
{ \
const float scale = (1. / ((1<<nbits) - 1)) * (lut3d->lutsize - 1); \
const struct rgbvec scaled_rgb = {r * scale, g * scale, b * scale}; \
return interp_##name(lut3d, &scaled_rgb); \
}
 
DEFINE_INTERP_FUNC(nearest, 8)
DEFINE_INTERP_FUNC(trilinear, 8)
DEFINE_INTERP_FUNC(tetrahedral, 8)
 
DEFINE_INTERP_FUNC(nearest, 16)
DEFINE_INTERP_FUNC(trilinear, 16)
DEFINE_INTERP_FUNC(tetrahedral, 16)
 
#define MAX_LINE_SIZE 512
 
static int skip_line(const char *p)
{
while (*p && av_isspace(*p))
p++;
return !*p || *p == '#';
}
 
#define NEXT_LINE(loop_cond) do { \
if (!fgets(line, sizeof(line), f)) { \
av_log(ctx, AV_LOG_ERROR, "Unexpected EOF\n"); \
return AVERROR_INVALIDDATA; \
} \
} while (loop_cond)
 
/* Basically r g and b float values on each line; seems to be generated by
* Davinci */
static int parse_dat(AVFilterContext *ctx, FILE *f)
{
LUT3DContext *lut3d = ctx->priv;
const int size = lut3d->lutsize;
int i, j, k;
 
for (k = 0; k < size; k++) {
for (j = 0; j < size; j++) {
for (i = 0; i < size; i++) {
char line[MAX_LINE_SIZE];
struct rgbvec *vec = &lut3d->lut[k][j][i];
NEXT_LINE(skip_line(line));
sscanf(line, "%f %f %f", &vec->r, &vec->g, &vec->b);
}
}
}
return 0;
}
 
/* Iridas format */
static int parse_cube(AVFilterContext *ctx, FILE *f)
{
LUT3DContext *lut3d = ctx->priv;
char line[MAX_LINE_SIZE];
float min[3] = {0.0, 0.0, 0.0};
float max[3] = {1.0, 1.0, 1.0};
 
while (fgets(line, sizeof(line), f)) {
if (!strncmp(line, "LUT_3D_SIZE ", 12)) {
int i, j, k;
const int size = strtol(line + 12, NULL, 0);
 
if (size < 2 || size > MAX_LEVEL) {
av_log(ctx, AV_LOG_ERROR, "Too large or invalid 3D LUT size\n");
return AVERROR(EINVAL);
}
lut3d->lutsize = size;
for (k = 0; k < size; k++) {
for (j = 0; j < size; j++) {
for (i = 0; i < size; i++) {
struct rgbvec *vec = &lut3d->lut[k][j][i];
 
do {
NEXT_LINE(0);
if (!strncmp(line, "DOMAIN_", 7)) {
float *vals = NULL;
if (!strncmp(line + 7, "MIN ", 4)) vals = min;
else if (!strncmp(line + 7, "MAX ", 4)) vals = max;
if (!vals)
return AVERROR_INVALIDDATA;
sscanf(line + 11, "%f %f %f", vals, vals + 1, vals + 2);
av_log(ctx, AV_LOG_DEBUG, "min: %f %f %f | max: %f %f %f\n",
min[0], min[1], min[2], max[0], max[1], max[2]);
continue;
}
} while (skip_line(line));
if (sscanf(line, "%f %f %f", &vec->r, &vec->g, &vec->b) != 3)
return AVERROR_INVALIDDATA;
vec->r *= max[0] - min[0];
vec->g *= max[1] - min[1];
vec->b *= max[2] - min[2];
}
}
}
break;
}
}
return 0;
}
 
/* Assume 17x17x17 LUT with a 16-bit depth
* FIXME: it seems there are various 3dl formats */
static int parse_3dl(AVFilterContext *ctx, FILE *f)
{
char line[MAX_LINE_SIZE];
LUT3DContext *lut3d = ctx->priv;
int i, j, k;
const int size = 17;
const float scale = 16*16*16;
 
lut3d->lutsize = size;
NEXT_LINE(skip_line(line));
for (k = 0; k < size; k++) {
for (j = 0; j < size; j++) {
for (i = 0; i < size; i++) {
int r, g, b;
struct rgbvec *vec = &lut3d->lut[k][j][i];
 
NEXT_LINE(skip_line(line));
if (sscanf(line, "%d %d %d", &r, &g, &b) != 3)
return AVERROR_INVALIDDATA;
vec->r = r / scale;
vec->g = g / scale;
vec->b = b / scale;
}
}
}
return 0;
}
 
/* Pandora format */
static int parse_m3d(AVFilterContext *ctx, FILE *f)
{
LUT3DContext *lut3d = ctx->priv;
float scale;
int i, j, k, size, in = -1, out = -1;
char line[MAX_LINE_SIZE];
uint8_t rgb_map[3] = {0, 1, 2};
 
while (fgets(line, sizeof(line), f)) {
if (!strncmp(line, "in", 2)) in = strtol(line + 2, NULL, 0);
else if (!strncmp(line, "out", 3)) out = strtol(line + 3, NULL, 0);
else if (!strncmp(line, "values", 6)) {
const char *p = line + 6;
#define SET_COLOR(id) do { \
while (av_isspace(*p)) \
p++; \
switch (*p) { \
case 'r': rgb_map[id] = 0; break; \
case 'g': rgb_map[id] = 1; break; \
case 'b': rgb_map[id] = 2; break; \
} \
while (*p && !av_isspace(*p)) \
p++; \
} while (0)
SET_COLOR(0);
SET_COLOR(1);
SET_COLOR(2);
break;
}
}
 
if (in == -1 || out == -1) {
av_log(ctx, AV_LOG_ERROR, "in and out must be defined\n");
return AVERROR_INVALIDDATA;
}
if (in < 2 || out < 2 ||
in > MAX_LEVEL*MAX_LEVEL*MAX_LEVEL ||
out > MAX_LEVEL*MAX_LEVEL*MAX_LEVEL) {
av_log(ctx, AV_LOG_ERROR, "invalid in (%d) or out (%d)\n", in, out);
return AVERROR_INVALIDDATA;
}
for (size = 1; size*size*size < in; size++);
lut3d->lutsize = size;
scale = 1. / (out - 1);
 
for (k = 0; k < size; k++) {
for (j = 0; j < size; j++) {
for (i = 0; i < size; i++) {
struct rgbvec *vec = &lut3d->lut[k][j][i];
float val[3];
 
NEXT_LINE(0);
if (sscanf(line, "%f %f %f", val, val + 1, val + 2) != 3)
return AVERROR_INVALIDDATA;
vec->r = val[rgb_map[0]] * scale;
vec->g = val[rgb_map[1]] * scale;
vec->b = val[rgb_map[2]] * scale;
}
}
}
return 0;
}
 
static void set_identity_matrix(LUT3DContext *lut3d, int size)
{
int i, j, k;
const float c = 1. / (size - 1);
 
lut3d->lutsize = size;
for (k = 0; k < size; k++) {
for (j = 0; j < size; j++) {
for (i = 0; i < size; i++) {
struct rgbvec *vec = &lut3d->lut[k][j][i];
vec->r = k * c;
vec->g = j * c;
vec->b = i * c;
}
}
}
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
AV_PIX_FMT_0RGB, AV_PIX_FMT_0BGR,
AV_PIX_FMT_RGB0, AV_PIX_FMT_BGR0,
AV_PIX_FMT_RGB48, AV_PIX_FMT_BGR48,
AV_PIX_FMT_RGBA64, AV_PIX_FMT_BGRA64,
AV_PIX_FMT_NONE
};
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static int config_input(AVFilterLink *inlink)
{
LUT3DContext *lut3d = inlink->dst->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
 
switch (inlink->format) {
case AV_PIX_FMT_RGB48:
case AV_PIX_FMT_BGR48:
case AV_PIX_FMT_RGBA64:
case AV_PIX_FMT_BGRA64:
lut3d->is16bit = 1;
}
 
ff_fill_rgba_map(lut3d->rgba_map, inlink->format);
lut3d->step = av_get_padded_bits_per_pixel(desc) >> (3 + lut3d->is16bit);
 
#define SET_FUNC(name) do { \
if (lut3d->is16bit) lut3d->interp_16 = interp_16_##name; \
else lut3d->interp_8 = interp_8_##name; \
} while (0)
 
switch (lut3d->interpolation) {
case INTERPOLATE_NEAREST: SET_FUNC(nearest); break;
case INTERPOLATE_TRILINEAR: SET_FUNC(trilinear); break;
case INTERPOLATE_TETRAHEDRAL: SET_FUNC(tetrahedral); break;
default:
av_assert0(0);
}
 
return 0;
}
 
#define FILTER(nbits) do { \
uint8_t *dstrow = out->data[0]; \
const uint8_t *srcrow = in ->data[0]; \
\
for (y = 0; y < inlink->h; y++) { \
uint##nbits##_t *dst = (uint##nbits##_t *)dstrow; \
const uint##nbits##_t *src = (const uint##nbits##_t *)srcrow; \
for (x = 0; x < inlink->w * step; x += step) { \
struct rgbvec vec = lut3d->interp_##nbits(lut3d, src[x + r], src[x + g], src[x + b]); \
dst[x + r] = av_clip_uint##nbits(vec.r * (float)((1<<nbits) - 1)); \
dst[x + g] = av_clip_uint##nbits(vec.g * (float)((1<<nbits) - 1)); \
dst[x + b] = av_clip_uint##nbits(vec.b * (float)((1<<nbits) - 1)); \
if (!direct && step == 4) \
dst[x + a] = src[x + a]; \
} \
dstrow += out->linesize[0]; \
srcrow += in ->linesize[0]; \
} \
} while (0)
 
static AVFrame *apply_lut(AVFilterLink *inlink, AVFrame *in)
{
int x, y, direct = 0;
AVFilterContext *ctx = inlink->dst;
LUT3DContext *lut3d = ctx->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *out;
const int step = lut3d->step;
const uint8_t r = lut3d->rgba_map[R];
const uint8_t g = lut3d->rgba_map[G];
const uint8_t b = lut3d->rgba_map[B];
const uint8_t a = lut3d->rgba_map[A];
 
if (av_frame_is_writable(in)) {
direct = 1;
out = in;
} else {
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return NULL;
}
av_frame_copy_props(out, in);
}
 
if (lut3d->is16bit) FILTER(16);
else FILTER(8);
 
if (!direct)
av_frame_free(&in);
 
return out;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *out = apply_lut(inlink, in);
if (!out)
return AVERROR(ENOMEM);
return ff_filter_frame(outlink, out);
}
 
#if CONFIG_LUT3D_FILTER
static const AVOption lut3d_options[] = {
{ "file", "set 3D LUT file name", OFFSET(file), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
COMMON_OPTIONS
};
 
AVFILTER_DEFINE_CLASS(lut3d);
 
static av_cold int lut3d_init(AVFilterContext *ctx)
{
int ret;
FILE *f;
const char *ext;
LUT3DContext *lut3d = ctx->priv;
 
if (!lut3d->file) {
set_identity_matrix(lut3d, 32);
return 0;
}
 
f = fopen(lut3d->file, "r");
if (!f) {
ret = AVERROR(errno);
av_log(ctx, AV_LOG_ERROR, "%s: %s\n", lut3d->file, av_err2str(ret));
return ret;
}
 
ext = strrchr(lut3d->file, '.');
if (!ext) {
av_log(ctx, AV_LOG_ERROR, "Unable to guess the format from the extension\n");
ret = AVERROR_INVALIDDATA;
goto end;
}
ext++;
 
if (!av_strcasecmp(ext, "dat")) {
lut3d->lutsize = 33;
ret = parse_dat(ctx, f);
} else if (!av_strcasecmp(ext, "3dl")) {
ret = parse_3dl(ctx, f);
} else if (!av_strcasecmp(ext, "cube")) {
ret = parse_cube(ctx, f);
} else if (!av_strcasecmp(ext, "m3d")) {
ret = parse_m3d(ctx, f);
} else {
av_log(ctx, AV_LOG_ERROR, "Unrecognized '.%s' file type\n", ext);
ret = AVERROR(EINVAL);
}
 
if (!ret && !lut3d->lutsize) {
av_log(ctx, AV_LOG_ERROR, "3D LUT is empty\n");
ret = AVERROR_INVALIDDATA;
}
 
end:
fclose(f);
return ret;
}
 
static const AVFilterPad lut3d_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
 
static const AVFilterPad lut3d_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_lut3d = {
.name = "lut3d",
.description = NULL_IF_CONFIG_SMALL("Adjust colors using a 3D LUT."),
.priv_size = sizeof(LUT3DContext),
.init = lut3d_init,
.query_formats = query_formats,
.inputs = lut3d_inputs,
.outputs = lut3d_outputs,
.priv_class = &lut3d_class,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
#endif
 
#if CONFIG_HALDCLUT_FILTER
 
static void update_clut(LUT3DContext *lut3d, const AVFrame *frame)
{
const uint8_t *data = frame->data[0];
const int linesize = frame->linesize[0];
const int w = lut3d->clut_width;
const int step = lut3d->clut_step;
const uint8_t *rgba_map = lut3d->clut_rgba_map;
const int level = lut3d->lutsize;
 
#define LOAD_CLUT(nbits) do { \
int i, j, k, x = 0, y = 0; \
\
for (k = 0; k < level; k++) { \
for (j = 0; j < level; j++) { \
for (i = 0; i < level; i++) { \
const uint##nbits##_t *src = (const uint##nbits##_t *) \
(data + y*linesize + x*step); \
struct rgbvec *vec = &lut3d->lut[k][j][i]; \
vec->r = src[rgba_map[0]] / (float)((1<<(nbits)) - 1); \
vec->g = src[rgba_map[1]] / (float)((1<<(nbits)) - 1); \
vec->b = src[rgba_map[2]] / (float)((1<<(nbits)) - 1); \
if (++x == w) { \
x = 0; \
y++; \
} \
} \
} \
} \
} while (0)
 
if (!lut3d->clut_is16bit) LOAD_CLUT(8);
else LOAD_CLUT(16);
}
 
 
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
LUT3DContext *lut3d = ctx->priv;
int ret;
 
outlink->w = ctx->inputs[0]->w;
outlink->h = ctx->inputs[0]->h;
outlink->time_base = ctx->inputs[0]->time_base;
if ((ret = ff_dualinput_init(ctx, &lut3d->dinput)) < 0)
return ret;
return 0;
}
 
static int filter_frame_hald(AVFilterLink *inlink, AVFrame *inpicref)
{
LUT3DContext *s = inlink->dst->priv;
return ff_dualinput_filter_frame(&s->dinput, inlink, inpicref);
}
 
static int request_frame(AVFilterLink *outlink)
{
LUT3DContext *s = outlink->src->priv;
return ff_dualinput_request_frame(&s->dinput, outlink);
}
 
static int config_clut(AVFilterLink *inlink)
{
int size, level, w, h;
AVFilterContext *ctx = inlink->dst;
LUT3DContext *lut3d = ctx->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
 
lut3d->clut_is16bit = 0;
switch (inlink->format) {
case AV_PIX_FMT_RGB48:
case AV_PIX_FMT_BGR48:
case AV_PIX_FMT_RGBA64:
case AV_PIX_FMT_BGRA64:
lut3d->clut_is16bit = 1;
}
 
lut3d->clut_step = av_get_padded_bits_per_pixel(desc) >> 3;
ff_fill_rgba_map(lut3d->clut_rgba_map, inlink->format);
 
if (inlink->w > inlink->h)
av_log(ctx, AV_LOG_INFO, "Padding on the right (%dpx) of the "
"Hald CLUT will be ignored\n", inlink->w - inlink->h);
else if (inlink->w < inlink->h)
av_log(ctx, AV_LOG_INFO, "Padding at the bottom (%dpx) of the "
"Hald CLUT will be ignored\n", inlink->h - inlink->w);
lut3d->clut_width = w = h = FFMIN(inlink->w, inlink->h);
 
for (level = 1; level*level*level < w; level++);
size = level*level*level;
if (size != w) {
av_log(ctx, AV_LOG_WARNING, "The Hald CLUT width does not match the level\n");
return AVERROR_INVALIDDATA;
}
av_assert0(w == h && w == size);
level *= level;
if (level > MAX_LEVEL) {
const int max_clut_level = sqrt(MAX_LEVEL);
const int max_clut_size = max_clut_level*max_clut_level*max_clut_level;
av_log(ctx, AV_LOG_ERROR, "Too large Hald CLUT "
"(maximum level is %d, or %dx%d CLUT)\n",
max_clut_level, max_clut_size, max_clut_size);
return AVERROR(EINVAL);
}
lut3d->lutsize = level;
 
return 0;
}
 
static AVFrame *update_apply_clut(AVFilterContext *ctx, AVFrame *main,
const AVFrame *second)
{
AVFilterLink *inlink = ctx->inputs[0];
update_clut(ctx->priv, second);
return apply_lut(inlink, main);
}
 
static av_cold int haldclut_init(AVFilterContext *ctx)
{
LUT3DContext *lut3d = ctx->priv;
lut3d->dinput.process = update_apply_clut;
return 0;
}
 
static av_cold void haldclut_uninit(AVFilterContext *ctx)
{
LUT3DContext *lut3d = ctx->priv;
ff_dualinput_uninit(&lut3d->dinput);
}
 
static const AVOption haldclut_options[] = {
{ "shortest", "force termination when the shortest input terminates", OFFSET(dinput.shortest), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS },
{ "repeatlast", "continue applying the last clut after eos", OFFSET(dinput.repeatlast), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, FLAGS },
COMMON_OPTIONS
};
 
AVFILTER_DEFINE_CLASS(haldclut);
 
static const AVFilterPad haldclut_inputs[] = {
{
.name = "main",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame_hald,
.config_props = config_input,
},{
.name = "clut",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame_hald,
.config_props = config_clut,
},
{ NULL }
};
 
static const AVFilterPad haldclut_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.request_frame = request_frame,
.config_props = config_output,
},
{ NULL }
};
 
AVFilter avfilter_vf_haldclut = {
.name = "haldclut",
.description = NULL_IF_CONFIG_SMALL("Adjust colors using a Hald CLUT."),
.priv_size = sizeof(LUT3DContext),
.init = haldclut_init,
.uninit = haldclut_uninit,
.query_formats = query_formats,
.inputs = haldclut_inputs,
.outputs = haldclut_outputs,
.priv_class = &haldclut_class,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
};
#endif
/contrib/sdk/sources/ffmpeg/libavfilter/vf_mcdeint.c
0,0 → 1,315
/*
* Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
/**
* @file
* Motion Compensation Deinterlacer
* Ported from MPlayer libmpcodecs/vf_mcdeint.c.
*
* Known Issues:
*
* The motion estimation is somewhat at the mercy of the input, if the
* input frames are created purely based on spatial interpolation then
* for example a thin black line or another random and not
* interpolateable pattern will cause problems.
* Note: completely ignoring the "unavailable" lines during motion
* estimation did not look any better, so the most obvious solution
* would be to improve tfields or penalize problematic motion vectors.
*
* If non iterative ME is used then snow currently ignores the OBMC
* window and as a result sometimes creates artifacts.
*
* Only past frames are used, we should ideally use future frames too,
* something like filtering the whole movie in forward and then
* backward direction seems like a interesting idea but the current
* filter framework is FAR from supporting such things.
*
* Combining the motion compensated image with the input image also is
* not as trivial as it seems, simple blindly taking even lines from
* one and odd ones from the other does not work at all as ME/MC
* sometimes has nothing in the previous frames which matches the
* current. The current algorithm has been found by trial and error
* and almost certainly can be improved...
*/
 
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavcodec/avcodec.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
 
enum MCDeintMode {
MODE_FAST = 0,
MODE_MEDIUM,
MODE_SLOW,
MODE_EXTRA_SLOW,
MODE_NB,
};
 
enum MCDeintParity {
PARITY_TFF = 0, ///< top field first
PARITY_BFF = 1, ///< bottom field first
};
 
typedef struct {
const AVClass *class;
enum MCDeintMode mode;
enum MCDeintParity parity;
int qp;
AVCodecContext *enc_ctx;
} MCDeintContext;
 
#define OFFSET(x) offsetof(MCDeintContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
#define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, INT_MIN, INT_MAX, FLAGS, unit }
 
static const AVOption mcdeint_options[] = {
{ "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_FAST}, 0, MODE_NB-1, FLAGS, .unit="mode" },
CONST("fast", NULL, MODE_FAST, "mode"),
CONST("medium", NULL, MODE_MEDIUM, "mode"),
CONST("slow", NULL, MODE_SLOW, "mode"),
CONST("extra_slow", NULL, MODE_EXTRA_SLOW, "mode"),
 
{ "parity", "set the assumed picture field parity", OFFSET(parity), AV_OPT_TYPE_INT, {.i64=PARITY_BFF}, -1, 1, FLAGS, "parity" },
CONST("tff", "assume top field first", PARITY_TFF, "parity"),
CONST("bff", "assume bottom field first", PARITY_BFF, "parity"),
 
{ "qp", "set qp", OFFSET(qp), AV_OPT_TYPE_INT, {.i64=1}, INT_MIN, INT_MAX, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(mcdeint);
 
static int config_props(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
MCDeintContext *mcdeint = ctx->priv;
AVCodec *enc;
AVCodecContext *enc_ctx;
AVDictionary *opts = NULL;
int ret;
 
if (!(enc = avcodec_find_encoder(AV_CODEC_ID_SNOW))) {
av_log(ctx, AV_LOG_ERROR, "Snow encoder is not enabled in libavcodec\n");
return AVERROR(EINVAL);
}
 
mcdeint->enc_ctx = avcodec_alloc_context3(enc);
if (!mcdeint->enc_ctx)
return AVERROR(ENOMEM);
enc_ctx = mcdeint->enc_ctx;
enc_ctx->width = inlink->w;
enc_ctx->height = inlink->h;
enc_ctx->time_base = (AVRational){1,25}; // meaningless
enc_ctx->gop_size = 300;
enc_ctx->max_b_frames = 0;
enc_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
enc_ctx->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_LOW_DELAY;
enc_ctx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
enc_ctx->global_quality = 1;
enc_ctx->me_cmp = enc_ctx->me_sub_cmp = FF_CMP_SAD;
enc_ctx->mb_cmp = FF_CMP_SSE;
av_dict_set(&opts, "memc_only", "1", 0);
 
switch (mcdeint->mode) {
case MODE_EXTRA_SLOW:
enc_ctx->refs = 3;
case MODE_SLOW:
enc_ctx->me_method = ME_ITER;
case MODE_MEDIUM:
enc_ctx->flags |= CODEC_FLAG_4MV;
enc_ctx->dia_size = 2;
case MODE_FAST:
enc_ctx->flags |= CODEC_FLAG_QPEL;
}
 
ret = avcodec_open2(enc_ctx, enc, &opts);
av_dict_free(&opts);
if (ret < 0)
return ret;
 
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
MCDeintContext *mcdeint = ctx->priv;
 
if (mcdeint->enc_ctx) {
avcodec_close(mcdeint->enc_ctx);
av_freep(&mcdeint->enc_ctx);
}
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum PixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
 
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
{
MCDeintContext *mcdeint = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *outpic, *frame_dec;
AVPacket pkt;
int x, y, i, ret, got_frame = 0;
 
outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!outpic) {
av_frame_free(&inpic);
return AVERROR(ENOMEM);
}
av_frame_copy_props(outpic, inpic);
inpic->quality = mcdeint->qp * FF_QP2LAMBDA;
 
av_init_packet(&pkt);
pkt.data = NULL; // packet data will be allocated by the encoder
pkt.size = 0;
 
ret = avcodec_encode_video2(mcdeint->enc_ctx, &pkt, inpic, &got_frame);
if (ret < 0)
goto end;
 
frame_dec = mcdeint->enc_ctx->coded_frame;
 
for (i = 0; i < 3; i++) {
int is_chroma = !!i;
int w = FF_CEIL_RSHIFT(inlink->w, is_chroma);
int h = FF_CEIL_RSHIFT(inlink->h, is_chroma);
int fils = frame_dec->linesize[i];
int srcs = inpic ->linesize[i];
int dsts = outpic ->linesize[i];
 
for (y = 0; y < h; y++) {
if ((y ^ mcdeint->parity) & 1) {
for (x = 0; x < w; x++) {
uint8_t *filp = &frame_dec->data[i][x + y*fils];
uint8_t *srcp = &inpic ->data[i][x + y*srcs];
uint8_t *dstp = &outpic ->data[i][x + y*dsts];
 
if (y > 0 && y < h-1){
int is_edge = x < 3 || x > w-4;
int diff0 = filp[-fils] - srcp[-srcs];
int diff1 = filp[+fils] - srcp[+srcs];
int temp = filp[0];
 
#define DELTA(j) av_clip(j, -x, w-1-x)
 
#define GET_SCORE_EDGE(j)\
FFABS(srcp[-srcs+DELTA(-1+(j))] - srcp[+srcs+DELTA(-1-(j))])+\
FFABS(srcp[-srcs+DELTA(j) ] - srcp[+srcs+DELTA( -(j))])+\
FFABS(srcp[-srcs+DELTA(1+(j)) ] - srcp[+srcs+DELTA( 1-(j))])
 
#define GET_SCORE(j)\
FFABS(srcp[-srcs-1+(j)] - srcp[+srcs-1-(j)])+\
FFABS(srcp[-srcs +(j)] - srcp[+srcs -(j)])+\
FFABS(srcp[-srcs+1+(j)] - srcp[+srcs+1-(j)])
 
#define CHECK_EDGE(j)\
{ int score = GET_SCORE_EDGE(j);\
if (score < spatial_score){\
spatial_score = score;\
diff0 = filp[-fils+DELTA(j)] - srcp[-srcs+DELTA(j)];\
diff1 = filp[+fils+DELTA(-(j))] - srcp[+srcs+DELTA(-(j))];\
 
#define CHECK(j)\
{ int score = GET_SCORE(j);\
if (score < spatial_score){\
spatial_score= score;\
diff0 = filp[-fils+(j)] - srcp[-srcs+(j)];\
diff1 = filp[+fils-(j)] - srcp[+srcs-(j)];\
 
if (is_edge) {
int spatial_score = GET_SCORE_EDGE(0) - 1;
CHECK_EDGE(-1) CHECK_EDGE(-2) }} }}
CHECK_EDGE( 1) CHECK_EDGE( 2) }} }}
} else {
int spatial_score = GET_SCORE(0) - 1;
CHECK(-1) CHECK(-2) }} }}
CHECK( 1) CHECK( 2) }} }}
}
 
 
if (diff0 + diff1 > 0)
temp -= (diff0 + diff1 - FFABS(FFABS(diff0) - FFABS(diff1)) / 2) / 2;
else
temp -= (diff0 + diff1 + FFABS(FFABS(diff0) - FFABS(diff1)) / 2) / 2;
*filp = *dstp = temp > 255U ? ~(temp>>31) : temp;
} else {
*dstp = *filp;
}
}
}
}
 
for (y = 0; y < h; y++) {
if (!((y ^ mcdeint->parity) & 1)) {
for (x = 0; x < w; x++) {
frame_dec->data[i][x + y*fils] =
outpic ->data[i][x + y*dsts] = inpic->data[i][x + y*srcs];
}
}
}
}
mcdeint->parity ^= 1;
 
end:
av_free_packet(&pkt);
av_frame_free(&inpic);
if (ret < 0) {
av_frame_free(&outpic);
return ret;
}
return ff_filter_frame(outlink, outpic);
}
 
static const AVFilterPad mcdeint_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_props,
},
{ NULL }
};
 
static const AVFilterPad mcdeint_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_mcdeint = {
.name = "mcdeint",
.description = NULL_IF_CONFIG_SMALL("Apply motion compensating deinterlacing."),
.priv_size = sizeof(MCDeintContext),
.uninit = uninit,
.query_formats = query_formats,
.inputs = mcdeint_inputs,
.outputs = mcdeint_outputs,
.priv_class = &mcdeint_class,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_mergeplanes.c
0,0 → 1,313
/*
* Copyright (c) 2013 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/imgutils.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "internal.h"
#include "framesync.h"
 
typedef struct InputParam {
int depth[4];
int nb_planes;
int planewidth[4];
int planeheight[4];
} InputParam;
 
typedef struct MergePlanesContext {
const AVClass *class;
int64_t mapping;
const enum AVPixelFormat out_fmt;
int nb_inputs;
int nb_planes;
int planewidth[4];
int planeheight[4];
int map[4][2];
const AVPixFmtDescriptor *outdesc;
 
FFFrameSync fs;
FFFrameSyncIn fsin[3]; /* must be immediately after fs */
} MergePlanesContext;
 
#define OFFSET(x) offsetof(MergePlanesContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
static const AVOption mergeplanes_options[] = {
{ "mapping", "set input to output plane mapping", OFFSET(mapping), AV_OPT_TYPE_INT, {.i64=0}, 0, 0x33333333, FLAGS },
{ "format", "set output pixel format", OFFSET(out_fmt), AV_OPT_TYPE_PIXEL_FMT, {.i64=AV_PIX_FMT_YUVA444P}, .flags=FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(mergeplanes);
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
MergePlanesContext *s = inlink->dst->priv;
return ff_framesync_filter_frame(&s->fs, inlink, in);
}
 
static av_cold int init(AVFilterContext *ctx)
{
MergePlanesContext *s = ctx->priv;
int64_t m = s->mapping;
int i, ret;
 
s->outdesc = av_pix_fmt_desc_get(s->out_fmt);
if (!(s->outdesc->flags & AV_PIX_FMT_FLAG_PLANAR) ||
s->outdesc->nb_components < 2) {
av_log(ctx, AV_LOG_ERROR, "Only planar formats with more than one component are supported.\n");
return AVERROR(EINVAL);
}
s->nb_planes = av_pix_fmt_count_planes(s->out_fmt);
 
for (i = s->nb_planes - 1; i >= 0; i--) {
s->map[i][0] = m & 0xf;
m >>= 4;
s->map[i][1] = m & 0xf;
m >>= 4;
 
if (s->map[i][0] > 3 || s->map[i][1] > 3) {
av_log(ctx, AV_LOG_ERROR, "Mapping with out of range input and/or plane number.\n");
return AVERROR(EINVAL);
}
 
s->nb_inputs = FFMAX(s->nb_inputs, s->map[i][1] + 1);
}
 
av_assert0(s->nb_inputs && s->nb_inputs <= 4);
 
for (i = 0; i < s->nb_inputs; i++) {
AVFilterPad pad = { 0 };
 
pad.type = AVMEDIA_TYPE_VIDEO;
pad.name = av_asprintf("in%d", i);
if (!pad.name)
return AVERROR(ENOMEM);
pad.filter_frame = filter_frame;
 
if ((ret = ff_insert_inpad(ctx, i, &pad)) < 0){
av_freep(&pad.name);
return ret;
}
}
 
return 0;
}
 
static int query_formats(AVFilterContext *ctx)
{
MergePlanesContext *s = ctx->priv;
AVFilterFormats *formats = NULL;
int i;
 
s->outdesc = av_pix_fmt_desc_get(s->out_fmt);
for (i = 0; i < AV_PIX_FMT_NB; i++) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(i);
if (desc->comp[0].depth_minus1 == s->outdesc->comp[0].depth_minus1 &&
av_pix_fmt_count_planes(i) == desc->nb_components)
ff_add_format(&formats, i);
}
 
for (i = 0; i < s->nb_inputs; i++)
ff_formats_ref(formats, &ctx->inputs[i]->out_formats);
 
formats = NULL;
ff_add_format(&formats, s->out_fmt);
ff_formats_ref(formats, &ctx->outputs[0]->in_formats);
 
return 0;
}
 
static int process_frame(FFFrameSync *fs)
{
AVFilterContext *ctx = fs->parent;
AVFilterLink *outlink = ctx->outputs[0];
MergePlanesContext *s = fs->opaque;
AVFrame *in[4] = { NULL };
AVFrame *out;
int i, ret;
 
for (i = 0; i < s->nb_inputs; i++) {
if ((ret = ff_framesync_get_frame(&s->fs, i, &in[i], 0)) < 0)
return ret;
}
 
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out)
return AVERROR(ENOMEM);
out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);
 
for (i = 0; i < s->nb_planes; i++) {
const int input = s->map[i][1];
const int plane = s->map[i][0];
 
av_image_copy_plane(out->data[i], out->linesize[i],
in[input]->data[plane], in[input]->linesize[plane],
s->planewidth[i], s->planeheight[i]);
}
 
return ff_filter_frame(outlink, out);
}
 
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
MergePlanesContext *s = ctx->priv;
InputParam inputsp[4];
FFFrameSyncIn *in;
int i;
 
ff_framesync_init(&s->fs, ctx, s->nb_inputs);
in = s->fs.in;
s->fs.opaque = s;
s->fs.on_event = process_frame;
 
outlink->w = ctx->inputs[0]->w;
outlink->h = ctx->inputs[0]->h;
outlink->time_base = ctx->inputs[0]->time_base;
outlink->frame_rate = ctx->inputs[0]->frame_rate;
outlink->sample_aspect_ratio = ctx->inputs[0]->sample_aspect_ratio;
 
s->planewidth[1] =
s->planewidth[2] = FF_CEIL_RSHIFT(outlink->w, s->outdesc->log2_chroma_w);
s->planewidth[0] =
s->planewidth[3] = outlink->w;
s->planeheight[1] =
s->planeheight[2] = FF_CEIL_RSHIFT(outlink->h, s->outdesc->log2_chroma_h);
s->planeheight[0] =
s->planeheight[3] = outlink->h;
 
for (i = 0; i < s->nb_inputs; i++) {
InputParam *inputp = &inputsp[i];
AVFilterLink *inlink = ctx->inputs[i];
const AVPixFmtDescriptor *indesc = av_pix_fmt_desc_get(inlink->format);
int j;
 
if (outlink->sample_aspect_ratio.num != inlink->sample_aspect_ratio.num ||
outlink->sample_aspect_ratio.den != inlink->sample_aspect_ratio.den) {
av_log(ctx, AV_LOG_ERROR, "input #%d link %s SAR %d:%d "
"does not match output link %s SAR %d:%d\n",
i, ctx->input_pads[i].name,
inlink->sample_aspect_ratio.num,
inlink->sample_aspect_ratio.den,
ctx->output_pads[0].name,
outlink->sample_aspect_ratio.num,
outlink->sample_aspect_ratio.den);
return AVERROR(EINVAL);
}
 
inputp->planewidth[1] =
inputp->planewidth[2] = FF_CEIL_RSHIFT(inlink->w, indesc->log2_chroma_w);
inputp->planewidth[0] =
inputp->planewidth[3] = inlink->w;
inputp->planeheight[1] =
inputp->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, indesc->log2_chroma_h);
inputp->planeheight[0] =
inputp->planeheight[3] = inlink->h;
inputp->nb_planes = av_pix_fmt_count_planes(inlink->format);
 
for (j = 0; j < inputp->nb_planes; j++)
inputp->depth[j] = indesc->comp[j].depth_minus1 + 1;
 
in[i].time_base = inlink->time_base;
in[i].sync = 1;
in[i].before = EXT_STOP;
in[i].after = EXT_STOP;
}
 
for (i = 0; i < s->nb_planes; i++) {
const int input = s->map[i][1];
const int plane = s->map[i][0];
InputParam *inputp = &inputsp[input];
 
if (plane + 1 > inputp->nb_planes) {
av_log(ctx, AV_LOG_ERROR, "input %d does not have %d plane\n",
input, plane);
goto fail;
}
if (s->outdesc->comp[i].depth_minus1 + 1 != inputp->depth[plane]) {
av_log(ctx, AV_LOG_ERROR, "output plane %d depth %d does not "
"match input %d plane %d depth %d\n",
i, s->outdesc->comp[i].depth_minus1 + 1,
input, plane, inputp->depth[plane]);
goto fail;
}
if (s->planewidth[i] != inputp->planewidth[plane]) {
av_log(ctx, AV_LOG_ERROR, "output plane %d width %d does not "
"match input %d plane %d width %d\n",
i, s->planewidth[i],
input, plane, inputp->planewidth[plane]);
goto fail;
}
if (s->planeheight[i] != inputp->planeheight[plane]) {
av_log(ctx, AV_LOG_ERROR, "output plane %d height %d does not "
"match input %d plane %d height %d\n",
i, s->planeheight[i],
input, plane, inputp->planeheight[plane]);
goto fail;
}
}
 
return ff_framesync_configure(&s->fs);
fail:
return AVERROR(EINVAL);
}
 
static int request_frame(AVFilterLink *outlink)
{
MergePlanesContext *s = outlink->src->priv;
return ff_framesync_request_frame(&s->fs, outlink);
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
MergePlanesContext *s = ctx->priv;
int i;
 
ff_framesync_uninit(&s->fs);
 
for (i = 0; i < ctx->nb_inputs; i++)
av_freep(&ctx->input_pads[i].name);
}
 
static const AVFilterPad mergeplanes_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_output,
.request_frame = request_frame,
},
{ NULL }
};
 
AVFilter avfilter_vf_mergeplanes = {
.name = "mergeplanes",
.description = NULL_IF_CONFIG_SMALL("Merge planes."),
.priv_size = sizeof(MergePlanesContext),
.priv_class = &mergeplanes_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = NULL,
.outputs = mergeplanes_outputs,
.flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_mp.c
0,0 → 1,791
/*
* Copyright (c) 2011 Michael Niedermayer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* Parts of this file have been stolen from mplayer
*/
 
/**
* @file
*/
 
#include "avfilter.h"
#include "video.h"
#include "formats.h"
#include "internal.h"
#include "libavutil/avassert.h"
#include "libavutil/pixdesc.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/imgutils.h"
#include "libavutil/opt.h"
 
#include "libmpcodecs/vf.h"
#include "libmpcodecs/img_format.h"
#include "libmpcodecs/cpudetect.h"
#include "libmpcodecs/av_helpers.h"
#include "libmpcodecs/libvo/fastmemcpy.h"
 
#include "libswscale/swscale.h"
 
 
//FIXME maybe link the orig in
//XXX: identical pix_fmt must be following with each others
static const struct {
int fmt;
enum AVPixelFormat pix_fmt;
} conversion_map[] = {
{IMGFMT_ARGB, AV_PIX_FMT_ARGB},
{IMGFMT_BGRA, AV_PIX_FMT_BGRA},
{IMGFMT_BGR24, AV_PIX_FMT_BGR24},
{IMGFMT_BGR16BE, AV_PIX_FMT_RGB565BE},
{IMGFMT_BGR16LE, AV_PIX_FMT_RGB565LE},
{IMGFMT_BGR15BE, AV_PIX_FMT_RGB555BE},
{IMGFMT_BGR15LE, AV_PIX_FMT_RGB555LE},
{IMGFMT_BGR12BE, AV_PIX_FMT_RGB444BE},
{IMGFMT_BGR12LE, AV_PIX_FMT_RGB444LE},
{IMGFMT_BGR8, AV_PIX_FMT_RGB8},
{IMGFMT_BGR4, AV_PIX_FMT_RGB4},
{IMGFMT_BGR1, AV_PIX_FMT_MONOBLACK},
{IMGFMT_RGB1, AV_PIX_FMT_MONOBLACK},
{IMGFMT_RG4B, AV_PIX_FMT_BGR4_BYTE},
{IMGFMT_BG4B, AV_PIX_FMT_RGB4_BYTE},
{IMGFMT_RGB48LE, AV_PIX_FMT_RGB48LE},
{IMGFMT_RGB48BE, AV_PIX_FMT_RGB48BE},
{IMGFMT_ABGR, AV_PIX_FMT_ABGR},
{IMGFMT_RGBA, AV_PIX_FMT_RGBA},
{IMGFMT_RGB24, AV_PIX_FMT_RGB24},
{IMGFMT_RGB16BE, AV_PIX_FMT_BGR565BE},
{IMGFMT_RGB16LE, AV_PIX_FMT_BGR565LE},
{IMGFMT_RGB15BE, AV_PIX_FMT_BGR555BE},
{IMGFMT_RGB15LE, AV_PIX_FMT_BGR555LE},
{IMGFMT_RGB12BE, AV_PIX_FMT_BGR444BE},
{IMGFMT_RGB12LE, AV_PIX_FMT_BGR444LE},
{IMGFMT_RGB8, AV_PIX_FMT_BGR8},
{IMGFMT_RGB4, AV_PIX_FMT_BGR4},
{IMGFMT_BGR8, AV_PIX_FMT_PAL8},
{IMGFMT_YUY2, AV_PIX_FMT_YUYV422},
{IMGFMT_UYVY, AV_PIX_FMT_UYVY422},
{IMGFMT_NV12, AV_PIX_FMT_NV12},
{IMGFMT_NV21, AV_PIX_FMT_NV21},
{IMGFMT_Y800, AV_PIX_FMT_GRAY8},
{IMGFMT_Y8, AV_PIX_FMT_GRAY8},
{IMGFMT_YVU9, AV_PIX_FMT_YUV410P},
{IMGFMT_IF09, AV_PIX_FMT_YUV410P},
{IMGFMT_YV12, AV_PIX_FMT_YUV420P},
{IMGFMT_I420, AV_PIX_FMT_YUV420P},
{IMGFMT_IYUV, AV_PIX_FMT_YUV420P},
{IMGFMT_411P, AV_PIX_FMT_YUV411P},
{IMGFMT_422P, AV_PIX_FMT_YUV422P},
{IMGFMT_444P, AV_PIX_FMT_YUV444P},
{IMGFMT_440P, AV_PIX_FMT_YUV440P},
 
{IMGFMT_420A, AV_PIX_FMT_YUVA420P},
 
{IMGFMT_420P16_LE, AV_PIX_FMT_YUV420P16LE},
{IMGFMT_420P16_BE, AV_PIX_FMT_YUV420P16BE},
{IMGFMT_422P16_LE, AV_PIX_FMT_YUV422P16LE},
{IMGFMT_422P16_BE, AV_PIX_FMT_YUV422P16BE},
{IMGFMT_444P16_LE, AV_PIX_FMT_YUV444P16LE},
{IMGFMT_444P16_BE, AV_PIX_FMT_YUV444P16BE},
 
// YUVJ are YUV formats that use the full Y range and not just
// 16 - 235 (see colorspaces.txt).
// Currently they are all treated the same way.
{IMGFMT_YV12, AV_PIX_FMT_YUVJ420P},
{IMGFMT_422P, AV_PIX_FMT_YUVJ422P},
{IMGFMT_444P, AV_PIX_FMT_YUVJ444P},
{IMGFMT_440P, AV_PIX_FMT_YUVJ440P},
 
{IMGFMT_XVMC_MOCO_MPEG2, AV_PIX_FMT_XVMC_MPEG2_MC},
{IMGFMT_XVMC_IDCT_MPEG2, AV_PIX_FMT_XVMC_MPEG2_IDCT},
{IMGFMT_VDPAU_MPEG1, AV_PIX_FMT_VDPAU_MPEG1},
{IMGFMT_VDPAU_MPEG2, AV_PIX_FMT_VDPAU_MPEG2},
{IMGFMT_VDPAU_H264, AV_PIX_FMT_VDPAU_H264},
{IMGFMT_VDPAU_WMV3, AV_PIX_FMT_VDPAU_WMV3},
{IMGFMT_VDPAU_VC1, AV_PIX_FMT_VDPAU_VC1},
{IMGFMT_VDPAU_MPEG4, AV_PIX_FMT_VDPAU_MPEG4},
{0, AV_PIX_FMT_NONE}
};
 
extern const vf_info_t ff_vf_info_eq2;
extern const vf_info_t ff_vf_info_eq;
extern const vf_info_t ff_vf_info_fspp;
extern const vf_info_t ff_vf_info_ilpack;
extern const vf_info_t ff_vf_info_pp7;
extern const vf_info_t ff_vf_info_softpulldown;
extern const vf_info_t ff_vf_info_uspp;
 
 
static const vf_info_t* const filters[]={
&ff_vf_info_eq2,
&ff_vf_info_eq,
&ff_vf_info_fspp,
&ff_vf_info_ilpack,
&ff_vf_info_pp7,
&ff_vf_info_softpulldown,
&ff_vf_info_uspp,
 
NULL
};
 
/*
Unsupported filters
1bpp
ass
bmovl
crop
dvbscale
flip
expand
format
halfpack
lavc
lavcdeint
noformat
pp
scale
tfields
vo
yadif
zrmjpeg
*/
 
CpuCaps ff_gCpuCaps; //FIXME initialize this so optims work
 
enum AVPixelFormat ff_mp2ff_pix_fmt(int mp){
int i;
for(i=0; conversion_map[i].fmt && mp != conversion_map[i].fmt; i++)
;
return mp == conversion_map[i].fmt ? conversion_map[i].pix_fmt : AV_PIX_FMT_NONE;
}
 
typedef struct {
const AVClass *class;
vf_instance_t vf;
vf_instance_t next_vf;
AVFilterContext *avfctx;
int frame_returned;
char *filter;
enum AVPixelFormat in_pix_fmt;
} MPContext;
 
#define OFFSET(x) offsetof(MPContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
static const AVOption mp_options[] = {
{ "filter", "set MPlayer filter name and parameters", OFFSET(filter), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(mp);
 
void ff_mp_msg(int mod, int lev, const char *format, ... ){
va_list va;
va_start(va, format);
//FIXME convert lev/mod
av_vlog(NULL, AV_LOG_DEBUG, format, va);
va_end(va);
}
 
int ff_mp_msg_test(int mod, int lev){
return 123;
}
 
void ff_init_avcodec(void)
{
//we maybe should init but its kinda 1. unneeded 2. a bit inpolite from here
}
 
//Exact copy of vf.c
void ff_vf_clone_mpi_attributes(mp_image_t* dst, mp_image_t* src){
dst->pict_type= src->pict_type;
dst->fields = src->fields;
dst->qscale_type= src->qscale_type;
if(dst->width == src->width && dst->height == src->height){
dst->qstride= src->qstride;
dst->qscale= src->qscale;
}
}
 
//Exact copy of vf.c
void ff_vf_next_draw_slice(struct vf_instance *vf,unsigned char** src, int * stride,int w, int h, int x, int y){
if (vf->next->draw_slice) {
vf->next->draw_slice(vf->next,src,stride,w,h,x,y);
return;
}
if (!vf->dmpi) {
ff_mp_msg(MSGT_VFILTER,MSGL_ERR,"draw_slice: dmpi not stored by vf_%s\n", vf->info->name);
return;
}
if (!(vf->dmpi->flags & MP_IMGFLAG_PLANAR)) {
memcpy_pic(vf->dmpi->planes[0]+y*vf->dmpi->stride[0]+vf->dmpi->bpp/8*x,
src[0], vf->dmpi->bpp/8*w, h, vf->dmpi->stride[0], stride[0]);
return;
}
memcpy_pic(vf->dmpi->planes[0]+y*vf->dmpi->stride[0]+x, src[0],
w, h, vf->dmpi->stride[0], stride[0]);
memcpy_pic(vf->dmpi->planes[1]+(y>>vf->dmpi->chroma_y_shift)*vf->dmpi->stride[1]+(x>>vf->dmpi->chroma_x_shift),
src[1], w>>vf->dmpi->chroma_x_shift, h>>vf->dmpi->chroma_y_shift, vf->dmpi->stride[1], stride[1]);
memcpy_pic(vf->dmpi->planes[2]+(y>>vf->dmpi->chroma_y_shift)*vf->dmpi->stride[2]+(x>>vf->dmpi->chroma_x_shift),
src[2], w>>vf->dmpi->chroma_x_shift, h>>vf->dmpi->chroma_y_shift, vf->dmpi->stride[2], stride[2]);
}
 
//Exact copy of vf.c
void ff_vf_mpi_clear(mp_image_t* mpi,int x0,int y0,int w,int h){
int y;
if(mpi->flags&MP_IMGFLAG_PLANAR){
y0&=~1;h+=h&1;
if(x0==0 && w==mpi->width){
// full width clear:
memset(mpi->planes[0]+mpi->stride[0]*y0,0,mpi->stride[0]*h);
memset(mpi->planes[1]+mpi->stride[1]*(y0>>mpi->chroma_y_shift),128,mpi->stride[1]*(h>>mpi->chroma_y_shift));
memset(mpi->planes[2]+mpi->stride[2]*(y0>>mpi->chroma_y_shift),128,mpi->stride[2]*(h>>mpi->chroma_y_shift));
} else
for(y=y0;y<y0+h;y+=2){
memset(mpi->planes[0]+x0+mpi->stride[0]*y,0,w);
memset(mpi->planes[0]+x0+mpi->stride[0]*(y+1),0,w);
memset(mpi->planes[1]+(x0>>mpi->chroma_x_shift)+mpi->stride[1]*(y>>mpi->chroma_y_shift),128,(w>>mpi->chroma_x_shift));
memset(mpi->planes[2]+(x0>>mpi->chroma_x_shift)+mpi->stride[2]*(y>>mpi->chroma_y_shift),128,(w>>mpi->chroma_x_shift));
}
return;
}
// packed:
for(y=y0;y<y0+h;y++){
unsigned char* dst=mpi->planes[0]+mpi->stride[0]*y+(mpi->bpp>>3)*x0;
if(mpi->flags&MP_IMGFLAG_YUV){
unsigned int* p=(unsigned int*) dst;
int size=(mpi->bpp>>3)*w/4;
int i;
#if HAVE_BIGENDIAN
#define CLEAR_PACKEDYUV_PATTERN 0x00800080
#define CLEAR_PACKEDYUV_PATTERN_SWAPPED 0x80008000
#else
#define CLEAR_PACKEDYUV_PATTERN 0x80008000
#define CLEAR_PACKEDYUV_PATTERN_SWAPPED 0x00800080
#endif
if(mpi->flags&MP_IMGFLAG_SWAPPED){
for(i=0;i<size-3;i+=4) p[i]=p[i+1]=p[i+2]=p[i+3]=CLEAR_PACKEDYUV_PATTERN_SWAPPED;
for(;i<size;i++) p[i]=CLEAR_PACKEDYUV_PATTERN_SWAPPED;
} else {
for(i=0;i<size-3;i+=4) p[i]=p[i+1]=p[i+2]=p[i+3]=CLEAR_PACKEDYUV_PATTERN;
for(;i<size;i++) p[i]=CLEAR_PACKEDYUV_PATTERN;
}
} else
memset(dst,0,(mpi->bpp>>3)*w);
}
}
 
int ff_vf_next_query_format(struct vf_instance *vf, unsigned int fmt){
return 1;
}
 
//used by delogo
unsigned int ff_vf_match_csp(vf_instance_t** vfp,const unsigned int* list,unsigned int preferred){
return preferred;
}
 
mp_image_t* ff_vf_get_image(vf_instance_t* vf, unsigned int outfmt, int mp_imgtype, int mp_imgflag, int w, int h){
MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, next_vf));
mp_image_t* mpi=NULL;
int w2;
int number = mp_imgtype >> 16;
 
av_assert0(vf->next == NULL); // all existing filters call this just on next
 
//vf_dint needs these as it calls ff_vf_get_image() before configuring the output
if(vf->w==0 && w>0) vf->w=w;
if(vf->h==0 && h>0) vf->h=h;
 
av_assert0(w == -1 || w >= vf->w);
av_assert0(h == -1 || h >= vf->h);
av_assert0(vf->w > 0);
av_assert0(vf->h > 0);
 
av_log(m->avfctx, AV_LOG_DEBUG, "get_image: %d:%d, vf: %d:%d\n", w,h,vf->w,vf->h);
 
if (w == -1) w = vf->w;
if (h == -1) h = vf->h;
 
w2=(mp_imgflag&MP_IMGFLAG_ACCEPT_ALIGNED_STRIDE)?((w+15)&(~15)):w;
 
// Note: we should call libvo first to check if it supports direct rendering
// and if not, then fallback to software buffers:
switch(mp_imgtype & 0xff){
case MP_IMGTYPE_EXPORT:
if(!vf->imgctx.export_images[0]) vf->imgctx.export_images[0]=ff_new_mp_image(w2,h);
mpi=vf->imgctx.export_images[0];
break;
case MP_IMGTYPE_STATIC:
if(!vf->imgctx.static_images[0]) vf->imgctx.static_images[0]=ff_new_mp_image(w2,h);
mpi=vf->imgctx.static_images[0];
break;
case MP_IMGTYPE_TEMP:
if(!vf->imgctx.temp_images[0]) vf->imgctx.temp_images[0]=ff_new_mp_image(w2,h);
mpi=vf->imgctx.temp_images[0];
break;
case MP_IMGTYPE_IPB:
if(!(mp_imgflag&MP_IMGFLAG_READABLE)){ // B frame:
if(!vf->imgctx.temp_images[0]) vf->imgctx.temp_images[0]=ff_new_mp_image(w2,h);
mpi=vf->imgctx.temp_images[0];
break;
}
case MP_IMGTYPE_IP:
if(!vf->imgctx.static_images[vf->imgctx.static_idx]) vf->imgctx.static_images[vf->imgctx.static_idx]=ff_new_mp_image(w2,h);
mpi=vf->imgctx.static_images[vf->imgctx.static_idx];
vf->imgctx.static_idx^=1;
break;
case MP_IMGTYPE_NUMBERED:
if (number == -1) {
int i;
for (i = 0; i < NUM_NUMBERED_MPI; i++)
if (!vf->imgctx.numbered_images[i] || !vf->imgctx.numbered_images[i]->usage_count)
break;
number = i;
}
if (number < 0 || number >= NUM_NUMBERED_MPI) return NULL;
if (!vf->imgctx.numbered_images[number]) vf->imgctx.numbered_images[number] = ff_new_mp_image(w2,h);
mpi = vf->imgctx.numbered_images[number];
mpi->number = number;
break;
}
if(mpi){
mpi->type=mp_imgtype;
mpi->w=vf->w; mpi->h=vf->h;
// keep buffer allocation status & color flags only:
// mpi->flags&=~(MP_IMGFLAG_PRESERVE|MP_IMGFLAG_READABLE|MP_IMGFLAG_DIRECT);
mpi->flags&=MP_IMGFLAG_ALLOCATED|MP_IMGFLAG_TYPE_DISPLAYED|MP_IMGFLAGMASK_COLORS;
// accept restrictions, draw_slice and palette flags only:
mpi->flags|=mp_imgflag&(MP_IMGFLAGMASK_RESTRICTIONS|MP_IMGFLAG_DRAW_CALLBACK|MP_IMGFLAG_RGB_PALETTE);
if(!vf->draw_slice) mpi->flags&=~MP_IMGFLAG_DRAW_CALLBACK;
if(mpi->width!=w2 || mpi->height!=h){
// printf("vf.c: MPI parameters changed! %dx%d -> %dx%d \n", mpi->width,mpi->height,w2,h);
if(mpi->flags&MP_IMGFLAG_ALLOCATED){
if(mpi->width<w2 || mpi->height<h){
// need to re-allocate buffer memory:
av_free(mpi->planes[0]);
mpi->flags&=~MP_IMGFLAG_ALLOCATED;
ff_mp_msg(MSGT_VFILTER,MSGL_V,"vf.c: have to REALLOCATE buffer memory :(\n");
}
// } else {
} {
mpi->width=w2; mpi->chroma_width=(w2 + (1<<mpi->chroma_x_shift) - 1)>>mpi->chroma_x_shift;
mpi->height=h; mpi->chroma_height=(h + (1<<mpi->chroma_y_shift) - 1)>>mpi->chroma_y_shift;
}
}
if(!mpi->bpp) ff_mp_image_setfmt(mpi,outfmt);
if(!(mpi->flags&MP_IMGFLAG_ALLOCATED) && mpi->type>MP_IMGTYPE_EXPORT){
 
av_assert0(!vf->get_image);
// check libvo first!
if(vf->get_image) vf->get_image(vf,mpi);
 
if(!(mpi->flags&MP_IMGFLAG_DIRECT)){
// non-direct and not yet allocated image. allocate it!
if (!mpi->bpp) { // no way we can allocate this
ff_mp_msg(MSGT_DECVIDEO, MSGL_FATAL,
"ff_vf_get_image: Tried to allocate a format that can not be allocated!\n");
return NULL;
}
 
// check if codec prefer aligned stride:
if(mp_imgflag&MP_IMGFLAG_PREFER_ALIGNED_STRIDE){
int align=(mpi->flags&MP_IMGFLAG_PLANAR &&
mpi->flags&MP_IMGFLAG_YUV) ?
(8<<mpi->chroma_x_shift)-1 : 15; // -- maybe FIXME
w2=((w+align)&(~align));
if(mpi->width!=w2){
#if 0
// we have to change width... check if we CAN co it:
int flags=vf->query_format(vf,outfmt); // should not fail
if(!(flags&3)) ff_mp_msg(MSGT_DECVIDEO,MSGL_WARN,"??? ff_vf_get_image{vf->query_format(outfmt)} failed!\n");
// printf("query -> 0x%X \n",flags);
if(flags&VFCAP_ACCEPT_STRIDE){
#endif
mpi->width=w2;
mpi->chroma_width=(w2 + (1<<mpi->chroma_x_shift) - 1)>>mpi->chroma_x_shift;
// }
}
}
 
ff_mp_image_alloc_planes(mpi);
// printf("clearing img!\n");
ff_vf_mpi_clear(mpi,0,0,mpi->width,mpi->height);
}
}
av_assert0(!vf->start_slice);
if(mpi->flags&MP_IMGFLAG_DRAW_CALLBACK)
if(vf->start_slice) vf->start_slice(vf,mpi);
if(!(mpi->flags&MP_IMGFLAG_TYPE_DISPLAYED)){
ff_mp_msg(MSGT_DECVIDEO,MSGL_V,"*** [%s] %s%s mp_image_t, %dx%dx%dbpp %s %s, %d bytes\n",
"NULL"/*vf->info->name*/,
(mpi->type==MP_IMGTYPE_EXPORT)?"Exporting":
((mpi->flags&MP_IMGFLAG_DIRECT)?"Direct Rendering":"Allocating"),
(mpi->flags&MP_IMGFLAG_DRAW_CALLBACK)?" (slices)":"",
mpi->width,mpi->height,mpi->bpp,
(mpi->flags&MP_IMGFLAG_YUV)?"YUV":((mpi->flags&MP_IMGFLAG_SWAPPED)?"BGR":"RGB"),
(mpi->flags&MP_IMGFLAG_PLANAR)?"planar":"packed",
mpi->bpp*mpi->width*mpi->height/8);
ff_mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"(imgfmt: %x, planes: %p,%p,%p strides: %d,%d,%d, chroma: %dx%d, shift: h:%d,v:%d)\n",
mpi->imgfmt, mpi->planes[0], mpi->planes[1], mpi->planes[2],
mpi->stride[0], mpi->stride[1], mpi->stride[2],
mpi->chroma_width, mpi->chroma_height, mpi->chroma_x_shift, mpi->chroma_y_shift);
mpi->flags|=MP_IMGFLAG_TYPE_DISPLAYED;
}
 
mpi->qscale = NULL;
mpi->usage_count++;
}
// printf("\rVF_MPI: %p %p %p %d %d %d \n",
// mpi->planes[0],mpi->planes[1],mpi->planes[2],
// mpi->stride[0],mpi->stride[1],mpi->stride[2]);
return mpi;
}
 
int ff_vf_next_put_image(struct vf_instance *vf,mp_image_t *mpi, double pts){
MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, vf));
AVFilterLink *outlink = m->avfctx->outputs[0];
AVFrame *picref = av_frame_alloc();
int i;
 
av_assert0(vf->next);
 
av_log(m->avfctx, AV_LOG_DEBUG, "ff_vf_next_put_image\n");
 
if (!picref)
goto fail;
 
picref->width = mpi->w;
picref->height = mpi->h;
 
picref->type = AVMEDIA_TYPE_VIDEO;
 
for(i=0; conversion_map[i].fmt && mpi->imgfmt != conversion_map[i].fmt; i++);
picref->format = conversion_map[i].pix_fmt;
 
for(i=0; conversion_map[i].fmt && m->in_pix_fmt != conversion_map[i].pix_fmt; i++);
if (mpi->imgfmt == conversion_map[i].fmt)
picref->format = conversion_map[i].pix_fmt;
 
memcpy(picref->linesize, mpi->stride, FFMIN(sizeof(picref->linesize), sizeof(mpi->stride)));
 
for(i=0; i<4 && mpi->stride[i]; i++){
picref->data[i] = mpi->planes[i];
}
 
if(pts != MP_NOPTS_VALUE)
picref->pts= pts * av_q2d(outlink->time_base);
 
if(1) { // mp buffers are currently unsupported in libavfilter, we thus must copy
AVFrame *tofree = picref;
picref = av_frame_clone(picref);
av_frame_free(&tofree);
}
 
ff_filter_frame(outlink, picref);
m->frame_returned++;
 
return 1;
fail:
av_frame_free(&picref);
return 0;
}
 
int ff_vf_next_config(struct vf_instance *vf,
int width, int height, int d_width, int d_height,
unsigned int voflags, unsigned int outfmt){
 
av_assert0(width>0 && height>0);
vf->next->w = width; vf->next->h = height;
 
return 1;
#if 0
int flags=vf->next->query_format(vf->next,outfmt);
if(!flags){
// hmm. colorspace mismatch!!!
//this is fatal for us ATM
return 0;
}
ff_mp_msg(MSGT_VFILTER,MSGL_V,"REQ: flags=0x%X req=0x%X \n",flags,vf->default_reqs);
miss=vf->default_reqs - (flags&vf->default_reqs);
if(miss&VFCAP_ACCEPT_STRIDE){
// vf requires stride support but vf->next doesn't support it!
// let's insert the 'expand' filter, it does the job for us:
vf_instance_t* vf2=vf_open_filter(vf->next,"expand",NULL);
if(!vf2) return 0; // shouldn't happen!
vf->next=vf2;
}
vf->next->w = width; vf->next->h = height;
return 1;
#endif
}
 
int ff_vf_next_control(struct vf_instance *vf, int request, void* data){
MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, vf));
av_log(m->avfctx, AV_LOG_DEBUG, "Received control %d\n", request);
return 0;
}
 
static int vf_default_query_format(struct vf_instance *vf, unsigned int fmt){
MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, vf));
int i;
av_log(m->avfctx, AV_LOG_DEBUG, "query %X\n", fmt);
 
for(i=0; conversion_map[i].fmt; i++){
if(fmt==conversion_map[i].fmt)
return 1; //we suport all
}
return 0;
}
 
 
static av_cold int init(AVFilterContext *ctx)
{
MPContext *m = ctx->priv;
int cpu_flags = av_get_cpu_flags();
char name[256];
const char *args;
int i;
 
ff_gCpuCaps.hasMMX = cpu_flags & AV_CPU_FLAG_MMX;
ff_gCpuCaps.hasMMX2 = cpu_flags & AV_CPU_FLAG_MMX2;
ff_gCpuCaps.hasSSE = cpu_flags & AV_CPU_FLAG_SSE;
ff_gCpuCaps.hasSSE2 = cpu_flags & AV_CPU_FLAG_SSE2;
ff_gCpuCaps.hasSSE3 = cpu_flags & AV_CPU_FLAG_SSE3;
ff_gCpuCaps.hasSSSE3 = cpu_flags & AV_CPU_FLAG_SSSE3;
ff_gCpuCaps.hasSSE4 = cpu_flags & AV_CPU_FLAG_SSE4;
ff_gCpuCaps.hasSSE42 = cpu_flags & AV_CPU_FLAG_SSE42;
ff_gCpuCaps.hasAVX = cpu_flags & AV_CPU_FLAG_AVX;
ff_gCpuCaps.has3DNow = cpu_flags & AV_CPU_FLAG_3DNOW;
ff_gCpuCaps.has3DNowExt = cpu_flags & AV_CPU_FLAG_3DNOWEXT;
 
m->avfctx= ctx;
 
args = m->filter;
if(!args || 1!=sscanf(args, "%255[^:=]", name)){
av_log(ctx, AV_LOG_ERROR, "Invalid parameter.\n");
return AVERROR(EINVAL);
}
args += strlen(name);
if (args[0] == '=')
args++;
 
for(i=0; ;i++){
if(!filters[i] || !strcmp(name, filters[i]->name))
break;
}
 
if(!filters[i]){
av_log(ctx, AV_LOG_ERROR, "Unknown filter %s\n", name);
return AVERROR(EINVAL);
}
 
av_log(ctx, AV_LOG_WARNING,
"'%s' is a wrapped MPlayer filter (libmpcodecs). This filter may be removed\n"
"once it has been ported to a native libavfilter.\n", name);
 
memset(&m->vf,0,sizeof(m->vf));
m->vf.info= filters[i];
 
m->vf.next = &m->next_vf;
m->vf.put_image = ff_vf_next_put_image;
m->vf.config = ff_vf_next_config;
m->vf.query_format= vf_default_query_format;
m->vf.control = ff_vf_next_control;
m->vf.default_caps=VFCAP_ACCEPT_STRIDE;
m->vf.default_reqs=0;
if(m->vf.info->opts)
av_log(ctx, AV_LOG_ERROR, "opts / m_struct_set is unsupported\n");
#if 0
if(vf->info->opts) { // vf_vo get some special argument
const m_struct_t* st = vf->info->opts;
void* vf_priv = m_struct_alloc(st);
int n;
for(n = 0 ; args && args[2*n] ; n++)
m_struct_set(st,vf_priv,args[2*n],args[2*n+1]);
vf->priv = vf_priv;
args = NULL;
} else // Otherwise we should have the '_oldargs_'
if(args && !strcmp(args[0],"_oldargs_"))
args = (char**)args[1];
else
args = NULL;
#endif
if(m->vf.info->vf_open(&m->vf, (char*)args)<=0){
av_log(ctx, AV_LOG_ERROR, "vf_open() of %s with arg=%s failed\n", name, args);
return -1;
}
 
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
MPContext *m = ctx->priv;
vf_instance_t *vf = &m->vf;
 
while(vf){
vf_instance_t *next = vf->next;
if(vf->uninit)
vf->uninit(vf);
ff_free_mp_image(vf->imgctx.static_images[0]);
ff_free_mp_image(vf->imgctx.static_images[1]);
ff_free_mp_image(vf->imgctx.temp_images[0]);
ff_free_mp_image(vf->imgctx.export_images[0]);
vf = next;
}
}
 
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *avfmts=NULL;
MPContext *m = ctx->priv;
enum AVPixelFormat lastpixfmt = AV_PIX_FMT_NONE;
int i;
 
for(i=0; conversion_map[i].fmt; i++){
av_log(ctx, AV_LOG_DEBUG, "query: %X\n", conversion_map[i].fmt);
if(m->vf.query_format(&m->vf, conversion_map[i].fmt)){
av_log(ctx, AV_LOG_DEBUG, "supported,adding\n");
if (conversion_map[i].pix_fmt != lastpixfmt) {
ff_add_format(&avfmts, conversion_map[i].pix_fmt);
lastpixfmt = conversion_map[i].pix_fmt;
}
}
}
 
if (!avfmts)
return -1;
 
//We assume all allowed input formats are also allowed output formats
ff_set_common_formats(ctx, avfmts);
return 0;
}
 
static int config_inprops(AVFilterLink *inlink)
{
MPContext *m = inlink->dst->priv;
int i;
for(i=0; conversion_map[i].fmt && conversion_map[i].pix_fmt != inlink->format; i++);
 
av_assert0(conversion_map[i].fmt && inlink->w && inlink->h);
 
m->vf.fmt.have_configured = 1;
m->vf.fmt.orig_height = inlink->h;
m->vf.fmt.orig_width = inlink->w;
m->vf.fmt.orig_fmt = conversion_map[i].fmt;
 
if(m->vf.config(&m->vf, inlink->w, inlink->h, inlink->w, inlink->h, 0, conversion_map[i].fmt)<=0)
return -1;
 
return 0;
}
 
static int config_outprops(AVFilterLink *outlink)
{
MPContext *m = outlink->src->priv;
 
outlink->w = m->next_vf.w;
outlink->h = m->next_vf.h;
 
return 0;
}
 
static int request_frame(AVFilterLink *outlink)
{
MPContext *m = outlink->src->priv;
int ret;
 
av_log(m->avfctx, AV_LOG_DEBUG, "mp request_frame\n");
 
for(m->frame_returned=0; !m->frame_returned;){
ret=ff_request_frame(outlink->src->inputs[0]);
if(ret<0)
break;
}
 
av_log(m->avfctx, AV_LOG_DEBUG, "mp request_frame ret=%d\n", ret);
return ret;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
{
MPContext *m = inlink->dst->priv;
int i;
double pts= MP_NOPTS_VALUE;
mp_image_t* mpi = ff_new_mp_image(inpic->width, inpic->height);
 
if(inpic->pts != AV_NOPTS_VALUE)
pts= inpic->pts / av_q2d(inlink->time_base);
 
for(i=0; conversion_map[i].fmt && conversion_map[i].pix_fmt != inlink->format; i++);
ff_mp_image_setfmt(mpi,conversion_map[i].fmt);
m->in_pix_fmt = inlink->format;
 
memcpy(mpi->planes, inpic->data, FFMIN(sizeof(inpic->data) , sizeof(mpi->planes)));
memcpy(mpi->stride, inpic->linesize, FFMIN(sizeof(inpic->linesize), sizeof(mpi->stride)));
 
if (inpic->interlaced_frame)
mpi->fields |= MP_IMGFIELD_INTERLACED;
if (inpic->top_field_first)
mpi->fields |= MP_IMGFIELD_TOP_FIRST;
if (inpic->repeat_pict)
mpi->fields |= MP_IMGFIELD_REPEAT_FIRST;
 
// mpi->flags|=MP_IMGFLAG_ALLOCATED; ?
mpi->flags |= MP_IMGFLAG_READABLE;
if(!av_frame_is_writable(inpic))
mpi->flags |= MP_IMGFLAG_PRESERVE;
if(m->vf.put_image(&m->vf, mpi, pts) == 0){
av_log(m->avfctx, AV_LOG_DEBUG, "put_image() says skip\n");
}else{
av_frame_free(&inpic);
}
ff_free_mp_image(mpi);
return 0;
}
 
static const AVFilterPad mp_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_inprops,
},
{ NULL }
};
 
static const AVFilterPad mp_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.request_frame = request_frame,
.config_props = config_outprops,
},
{ NULL }
};
 
AVFilter avfilter_vf_mp = {
.name = "mp",
.description = NULL_IF_CONFIG_SMALL("Apply a libmpcodecs filter to the input video."),
.init = init,
.uninit = uninit,
.priv_size = sizeof(MPContext),
.query_formats = query_formats,
.inputs = mp_inputs,
.outputs = mp_outputs,
.priv_class = &mp_class,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_mpdecimate.c
0,0 → 1,257
/*
* Copyright (c) 2003 Rich Felker
* Copyright (c) 2012 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
/**
* @file mpdecimate filter, ported from libmpcodecs/vf_decimate.c by
* Rich Felker.
*/
 
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/timestamp.h"
#include "libavcodec/dsputil.h"
#include "avfilter.h"
#include "internal.h"
#include "formats.h"
#include "video.h"
 
typedef struct {
const AVClass *class;
int lo, hi; ///< lower and higher threshold number of differences
///< values for 8x8 blocks
 
float frac; ///< threshold of changed pixels over the total fraction
 
int max_drop_count; ///< if positive: maximum number of sequential frames to drop
///< if negative: minimum number of frames between two drops
 
int drop_count; ///< if positive: number of frames sequentially dropped
///< if negative: number of sequential frames which were not dropped
 
int hsub, vsub; ///< chroma subsampling values
AVFrame *ref; ///< reference picture
DSPContext dspctx; ///< context providing optimized diff routines
AVCodecContext *avctx; ///< codec context required for the DSPContext
} DecimateContext;
 
#define OFFSET(x) offsetof(DecimateContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption mpdecimate_options[] = {
{ "max", "set the maximum number of consecutive dropped frames (positive), or the minimum interval between dropped frames (negative)",
OFFSET(max_drop_count), AV_OPT_TYPE_INT, {.i64=0}, INT_MIN, INT_MAX, FLAGS },
{ "hi", "set high dropping threshold", OFFSET(hi), AV_OPT_TYPE_INT, {.i64=64*12}, INT_MIN, INT_MAX, FLAGS },
{ "lo", "set low dropping threshold", OFFSET(lo), AV_OPT_TYPE_INT, {.i64=64*5}, INT_MIN, INT_MAX, FLAGS },
{ "frac", "set fraction dropping threshold", OFFSET(frac), AV_OPT_TYPE_FLOAT, {.dbl=0.33}, 0, 1, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(mpdecimate);
 
/**
* Return 1 if the two planes are different, 0 otherwise.
*/
static int diff_planes(AVFilterContext *ctx,
uint8_t *cur, uint8_t *ref, int linesize,
int w, int h)
{
DecimateContext *decimate = ctx->priv;
DSPContext *dspctx = &decimate->dspctx;
 
int x, y;
int d, c = 0;
int t = (w/16)*(h/16)*decimate->frac;
int16_t block[8*8];
 
/* compute difference for blocks of 8x8 bytes */
for (y = 0; y < h-7; y += 4) {
for (x = 8; x < w-7; x += 4) {
dspctx->diff_pixels(block,
cur+x+y*linesize,
ref+x+y*linesize, linesize);
d = dspctx->sum_abs_dctelem(block);
if (d > decimate->hi)
return 1;
if (d > decimate->lo) {
c++;
if (c > t)
return 1;
}
}
}
return 0;
}
 
/**
* Tell if the frame should be decimated, for example if it is no much
* different with respect to the reference frame ref.
*/
static int decimate_frame(AVFilterContext *ctx,
AVFrame *cur, AVFrame *ref)
{
DecimateContext *decimate = ctx->priv;
int plane;
 
if (decimate->max_drop_count > 0 &&
decimate->drop_count >= decimate->max_drop_count)
return 0;
if (decimate->max_drop_count < 0 &&
(decimate->drop_count-1) > decimate->max_drop_count)
return 0;
 
for (plane = 0; ref->data[plane] && ref->linesize[plane]; plane++) {
int vsub = plane == 1 || plane == 2 ? decimate->vsub : 0;
int hsub = plane == 1 || plane == 2 ? decimate->hsub : 0;
if (diff_planes(ctx,
cur->data[plane], ref->data[plane], ref->linesize[plane],
FF_CEIL_RSHIFT(ref->width, hsub),
FF_CEIL_RSHIFT(ref->height, vsub)))
return 0;
}
 
return 1;
}
 
static av_cold int init(AVFilterContext *ctx)
{
DecimateContext *decimate = ctx->priv;
 
av_log(ctx, AV_LOG_VERBOSE, "max_drop_count:%d hi:%d lo:%d frac:%f\n",
decimate->max_drop_count, decimate->hi, decimate->lo, decimate->frac);
 
decimate->avctx = avcodec_alloc_context3(NULL);
if (!decimate->avctx)
return AVERROR(ENOMEM);
avpriv_dsputil_init(&decimate->dspctx, decimate->avctx);
 
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
DecimateContext *decimate = ctx->priv;
av_frame_free(&decimate->ref);
if (decimate->avctx) {
avcodec_close(decimate->avctx);
av_freep(&decimate->avctx);
}
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P,
AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ440P,
AV_PIX_FMT_YUVA420P,
AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
 
return 0;
}
 
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
DecimateContext *decimate = ctx->priv;
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
decimate->hsub = pix_desc->log2_chroma_w;
decimate->vsub = pix_desc->log2_chroma_h;
 
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *cur)
{
DecimateContext *decimate = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
int ret;
 
if (decimate->ref && decimate_frame(inlink->dst, cur, decimate->ref)) {
decimate->drop_count = FFMAX(1, decimate->drop_count+1);
} else {
av_frame_free(&decimate->ref);
decimate->ref = cur;
decimate->drop_count = FFMIN(-1, decimate->drop_count-1);
 
if (ret = ff_filter_frame(outlink, av_frame_clone(cur)) < 0)
return ret;
}
 
av_log(inlink->dst, AV_LOG_DEBUG,
"%s pts:%s pts_time:%s drop_count:%d\n",
decimate->drop_count > 0 ? "drop" : "keep",
av_ts2str(cur->pts), av_ts2timestr(cur->pts, &inlink->time_base),
decimate->drop_count);
 
if (decimate->drop_count > 0)
av_frame_free(&cur);
 
return 0;
}
 
static int request_frame(AVFilterLink *outlink)
{
DecimateContext *decimate = outlink->src->priv;
AVFilterLink *inlink = outlink->src->inputs[0];
int ret;
 
do {
ret = ff_request_frame(inlink);
} while (decimate->drop_count > 0 && ret >= 0);
 
return ret;
}
 
static const AVFilterPad mpdecimate_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad mpdecimate_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.request_frame = request_frame,
},
{ NULL }
};
 
AVFilter avfilter_vf_mpdecimate = {
.name = "mpdecimate",
.description = NULL_IF_CONFIG_SMALL("Remove near-duplicate frames."),
.init = init,
.uninit = uninit,
.priv_size = sizeof(DecimateContext),
.priv_class = &mpdecimate_class,
.query_formats = query_formats,
.inputs = mpdecimate_inputs,
.outputs = mpdecimate_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_noise.c
0,0 → 1,488
/*
* Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
* Copyright (c) 2013 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* noise generator
*/
 
#include "libavutil/opt.h"
#include "libavutil/imgutils.h"
#include "libavutil/lfg.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
#include "libavutil/x86/asm.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
#define MAX_NOISE 5120
#define MAX_SHIFT 1024
#define MAX_RES (MAX_NOISE-MAX_SHIFT)
 
#define NOISE_UNIFORM 1
#define NOISE_TEMPORAL 2
#define NOISE_AVERAGED 8
#define NOISE_PATTERN 16
 
typedef struct {
int strength;
unsigned flags;
AVLFG lfg;
int seed;
int8_t *noise;
int8_t *prev_shift[MAX_RES][3];
} FilterParams;
 
typedef struct {
const AVClass *class;
int nb_planes;
int bytewidth[4];
int height[4];
FilterParams all;
FilterParams param[4];
int rand_shift[MAX_RES];
int rand_shift_init;
void (*line_noise)(uint8_t *dst, const uint8_t *src, int8_t *noise, int len, int shift);
void (*line_noise_avg)(uint8_t *dst, const uint8_t *src, int len, int8_t **shift);
} NoiseContext;
 
typedef struct ThreadData {
AVFrame *in, *out;
} ThreadData;
 
#define OFFSET(x) offsetof(NoiseContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
 
#define NOISE_PARAMS(name, x, param) \
{#name"_seed", "set component #"#x" noise seed", OFFSET(param.seed), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, FLAGS}, \
{#name"_strength", "set component #"#x" strength", OFFSET(param.strength), AV_OPT_TYPE_INT, {.i64=0}, 0, 100, FLAGS}, \
{#name"s", "set component #"#x" strength", OFFSET(param.strength), AV_OPT_TYPE_INT, {.i64=0}, 0, 100, FLAGS}, \
{#name"_flags", "set component #"#x" flags", OFFSET(param.flags), AV_OPT_TYPE_FLAGS, {.i64=0}, 0, 31, FLAGS, #name"_flags"}, \
{#name"f", "set component #"#x" flags", OFFSET(param.flags), AV_OPT_TYPE_FLAGS, {.i64=0}, 0, 31, FLAGS, #name"_flags"}, \
{"a", "averaged noise", 0, AV_OPT_TYPE_CONST, {.i64=NOISE_AVERAGED}, 0, 0, FLAGS, #name"_flags"}, \
{"p", "(semi)regular pattern", 0, AV_OPT_TYPE_CONST, {.i64=NOISE_PATTERN}, 0, 0, FLAGS, #name"_flags"}, \
{"t", "temporal noise", 0, AV_OPT_TYPE_CONST, {.i64=NOISE_TEMPORAL}, 0, 0, FLAGS, #name"_flags"}, \
{"u", "uniform noise", 0, AV_OPT_TYPE_CONST, {.i64=NOISE_UNIFORM}, 0, 0, FLAGS, #name"_flags"},
 
static const AVOption noise_options[] = {
NOISE_PARAMS(all, 0, all)
NOISE_PARAMS(c0, 0, param[0])
NOISE_PARAMS(c1, 1, param[1])
NOISE_PARAMS(c2, 2, param[2])
NOISE_PARAMS(c3, 3, param[3])
{NULL}
};
 
AVFILTER_DEFINE_CLASS(noise);
 
static const int8_t patt[4] = { -1, 0, 1, 0 };
 
#define RAND_N(range) ((int) ((double) range * av_lfg_get(lfg) / (UINT_MAX + 1.0)))
static av_cold int init_noise(NoiseContext *n, int comp)
{
int8_t *noise = av_malloc(MAX_NOISE * sizeof(int8_t));
FilterParams *fp = &n->param[comp];
AVLFG *lfg = &n->param[comp].lfg;
int strength = fp->strength;
int flags = fp->flags;
int i, j;
 
if (!noise)
return AVERROR(ENOMEM);
 
av_lfg_init(&fp->lfg, fp->seed);
 
for (i = 0, j = 0; i < MAX_NOISE; i++, j++) {
if (flags & NOISE_UNIFORM) {
if (flags & NOISE_AVERAGED) {
if (flags & NOISE_PATTERN) {
noise[i] = (RAND_N(strength) - strength / 2) / 6
+ patt[j % 4] * strength * 0.25 / 3;
} else {
noise[i] = (RAND_N(strength) - strength / 2) / 3;
}
} else {
if (flags & NOISE_PATTERN) {
noise[i] = (RAND_N(strength) - strength / 2) / 2
+ patt[j % 4] * strength * 0.25;
} else {
noise[i] = RAND_N(strength) - strength / 2;
}
}
} else {
double x1, x2, w, y1;
do {
x1 = 2.0 * av_lfg_get(lfg) / (float)UINT_MAX - 1.0;
x2 = 2.0 * av_lfg_get(lfg) / (float)UINT_MAX - 1.0;
w = x1 * x1 + x2 * x2;
} while (w >= 1.0);
 
w = sqrt((-2.0 * log(w)) / w);
y1 = x1 * w;
y1 *= strength / sqrt(3.0);
if (flags & NOISE_PATTERN) {
y1 /= 2;
y1 += patt[j % 4] * strength * 0.35;
}
y1 = av_clipf(y1, -128, 127);
if (flags & NOISE_AVERAGED)
y1 /= 3.0;
noise[i] = (int)y1;
}
if (RAND_N(6) == 0)
j--;
}
 
for (i = 0; i < MAX_RES; i++)
for (j = 0; j < 3; j++)
fp->prev_shift[i][j] = noise + (av_lfg_get(lfg) & (MAX_SHIFT - 1));
 
if (!n->rand_shift_init) {
for (i = 0; i < MAX_RES; i++)
n->rand_shift[i] = av_lfg_get(lfg) & (MAX_SHIFT - 1);
n->rand_shift_init = 1;
}
 
fp->noise = noise;
return 0;
}
 
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
int fmt;
 
for (fmt = 0; fmt < AV_PIX_FMT_NB; fmt++) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
if (desc->flags & AV_PIX_FMT_FLAG_PLANAR && !((desc->comp[0].depth_minus1 + 1) & 7))
ff_add_format(&formats, fmt);
}
 
ff_set_common_formats(ctx, formats);
return 0;
}
 
static int config_input(AVFilterLink *inlink)
{
NoiseContext *n = inlink->dst->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
int ret;
 
n->nb_planes = av_pix_fmt_count_planes(inlink->format);
 
if ((ret = av_image_fill_linesizes(n->bytewidth, inlink->format, inlink->w)) < 0)
return ret;
 
n->height[1] = n->height[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
n->height[0] = n->height[3] = inlink->h;
 
return 0;
}
 
static inline void line_noise_c(uint8_t *dst, const uint8_t *src, int8_t *noise,
int len, int shift)
{
int i;
 
noise += shift;
for (i = 0; i < len; i++) {
int v = src[i] + noise[i];
 
dst[i] = av_clip_uint8(v);
}
}
 
#define ASMALIGN(ZEROBITS) ".p2align " #ZEROBITS "\n\t"
 
static void line_noise_mmx(uint8_t *dst, const uint8_t *src,
int8_t *noise, int len, int shift)
{
#if HAVE_MMX_INLINE
x86_reg mmx_len= len&(~7);
noise+=shift;
 
__asm__ volatile(
"mov %3, %%"REG_a" \n\t"
"pcmpeqb %%mm7, %%mm7 \n\t"
"psllw $15, %%mm7 \n\t"
"packsswb %%mm7, %%mm7 \n\t"
ASMALIGN(4)
"1: \n\t"
"movq (%0, %%"REG_a"), %%mm0 \n\t"
"movq (%1, %%"REG_a"), %%mm1 \n\t"
"pxor %%mm7, %%mm0 \n\t"
"paddsb %%mm1, %%mm0 \n\t"
"pxor %%mm7, %%mm0 \n\t"
"movq %%mm0, (%2, %%"REG_a") \n\t"
"add $8, %%"REG_a" \n\t"
" js 1b \n\t"
:: "r" (src+mmx_len), "r" (noise+mmx_len), "r" (dst+mmx_len), "g" (-mmx_len)
: "%"REG_a
);
if (mmx_len!=len)
line_noise_c(dst+mmx_len, src+mmx_len, noise+mmx_len, len-mmx_len, 0);
#endif
}
 
static void line_noise_mmxext(uint8_t *dst, const uint8_t *src,
int8_t *noise, int len, int shift)
{
#if HAVE_MMXEXT_INLINE
x86_reg mmx_len= len&(~7);
noise+=shift;
 
__asm__ volatile(
"mov %3, %%"REG_a" \n\t"
"pcmpeqb %%mm7, %%mm7 \n\t"
"psllw $15, %%mm7 \n\t"
"packsswb %%mm7, %%mm7 \n\t"
ASMALIGN(4)
"1: \n\t"
"movq (%0, %%"REG_a"), %%mm0 \n\t"
"movq (%1, %%"REG_a"), %%mm1 \n\t"
"pxor %%mm7, %%mm0 \n\t"
"paddsb %%mm1, %%mm0 \n\t"
"pxor %%mm7, %%mm0 \n\t"
"movntq %%mm0, (%2, %%"REG_a") \n\t"
"add $8, %%"REG_a" \n\t"
" js 1b \n\t"
:: "r" (src+mmx_len), "r" (noise+mmx_len), "r" (dst+mmx_len), "g" (-mmx_len)
: "%"REG_a
);
if (mmx_len != len)
line_noise_c(dst+mmx_len, src+mmx_len, noise+mmx_len, len-mmx_len, 0);
#endif
}
 
static inline void line_noise_avg_c(uint8_t *dst, const uint8_t *src,
int len, int8_t **shift)
{
int i;
int8_t *src2 = (int8_t*)src;
 
for (i = 0; i < len; i++) {
const int n = shift[0][i] + shift[1][i] + shift[2][i];
dst[i] = src2[i] + ((n * src2[i]) >> 7);
}
}
 
static inline void line_noise_avg_mmx(uint8_t *dst, const uint8_t *src,
int len, int8_t **shift)
{
#if HAVE_MMX_INLINE
x86_reg mmx_len= len&(~7);
 
__asm__ volatile(
"mov %5, %%"REG_a" \n\t"
ASMALIGN(4)
"1: \n\t"
"movq (%1, %%"REG_a"), %%mm1 \n\t"
"movq (%0, %%"REG_a"), %%mm0 \n\t"
"paddb (%2, %%"REG_a"), %%mm1 \n\t"
"paddb (%3, %%"REG_a"), %%mm1 \n\t"
"movq %%mm0, %%mm2 \n\t"
"movq %%mm1, %%mm3 \n\t"
"punpcklbw %%mm0, %%mm0 \n\t"
"punpckhbw %%mm2, %%mm2 \n\t"
"punpcklbw %%mm1, %%mm1 \n\t"
"punpckhbw %%mm3, %%mm3 \n\t"
"pmulhw %%mm0, %%mm1 \n\t"
"pmulhw %%mm2, %%mm3 \n\t"
"paddw %%mm1, %%mm1 \n\t"
"paddw %%mm3, %%mm3 \n\t"
"paddw %%mm0, %%mm1 \n\t"
"paddw %%mm2, %%mm3 \n\t"
"psrlw $8, %%mm1 \n\t"
"psrlw $8, %%mm3 \n\t"
"packuswb %%mm3, %%mm1 \n\t"
"movq %%mm1, (%4, %%"REG_a") \n\t"
"add $8, %%"REG_a" \n\t"
" js 1b \n\t"
:: "r" (src+mmx_len), "r" (shift[0]+mmx_len), "r" (shift[1]+mmx_len), "r" (shift[2]+mmx_len),
"r" (dst+mmx_len), "g" (-mmx_len)
: "%"REG_a
);
 
if (mmx_len != len){
int8_t *shift2[3]={shift[0]+mmx_len, shift[1]+mmx_len, shift[2]+mmx_len};
line_noise_avg_c(dst+mmx_len, src+mmx_len, len-mmx_len, shift2);
}
#endif
}
 
static void noise(uint8_t *dst, const uint8_t *src,
int dst_linesize, int src_linesize,
int width, int start, int end, NoiseContext *n, int comp)
{
FilterParams *p = &n->param[comp];
int8_t *noise = p->noise;
const int flags = p->flags;
AVLFG *lfg = &p->lfg;
int shift, y;
 
if (!noise) {
if (dst != src)
av_image_copy_plane(dst, dst_linesize, src, src_linesize, width, end - start);
return;
}
 
for (y = start; y < end; y++) {
const int ix = y & (MAX_RES - 1);
if (flags & NOISE_TEMPORAL)
shift = av_lfg_get(lfg) & (MAX_SHIFT - 1);
else
shift = n->rand_shift[ix];
 
if (flags & NOISE_AVERAGED) {
n->line_noise_avg(dst, src, width, p->prev_shift[ix]);
p->prev_shift[ix][shift & 3] = noise + shift;
} else {
n->line_noise(dst, src, noise, width, shift);
}
dst += dst_linesize;
src += src_linesize;
}
}
 
static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
{
NoiseContext *s = ctx->priv;
ThreadData *td = arg;
int plane;
 
for (plane = 0; plane < s->nb_planes; plane++) {
const int height = s->height[plane];
const int start = (height * jobnr ) / nb_jobs;
const int end = (height * (jobnr+1)) / nb_jobs;
noise(td->out->data[plane] + start * td->out->linesize[plane],
td->in->data[plane] + start * td->in->linesize[plane],
td->out->linesize[plane], td->in->linesize[plane],
s->bytewidth[plane], start, end, s, plane);
}
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
NoiseContext *n = ctx->priv;
ThreadData td;
AVFrame *out;
 
if (av_frame_is_writable(inpicref)) {
out = inpicref;
} else {
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&inpicref);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, inpicref);
}
 
td.in = inpicref; td.out = out;
ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(n->height[0], ctx->graph->nb_threads));
emms_c();
 
if (inpicref != out)
av_frame_free(&inpicref);
return ff_filter_frame(outlink, out);
}
 
static av_cold int init(AVFilterContext *ctx)
{
NoiseContext *n = ctx->priv;
int ret, i;
int cpu_flags = av_get_cpu_flags();
 
for (i = 0; i < 4; i++) {
if (n->all.seed >= 0)
n->param[i].seed = n->all.seed;
else
n->param[i].seed = 123457;
if (n->all.strength)
n->param[i].strength = n->all.strength;
if (n->all.flags)
n->param[i].flags = n->all.flags;
}
 
for (i = 0; i < 4; i++) {
if (n->param[i].strength && ((ret = init_noise(n, i)) < 0))
return ret;
}
 
n->line_noise = line_noise_c;
n->line_noise_avg = line_noise_avg_c;
 
if (HAVE_MMX_INLINE &&
cpu_flags & AV_CPU_FLAG_MMX) {
n->line_noise = line_noise_mmx;
n->line_noise_avg = line_noise_avg_mmx;
}
if (HAVE_MMXEXT_INLINE &&
cpu_flags & AV_CPU_FLAG_MMXEXT)
n->line_noise = line_noise_mmxext;
 
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
NoiseContext *n = ctx->priv;
int i;
 
for (i = 0; i < 4; i++)
av_freep(&n->param[i].noise);
}
 
static const AVFilterPad noise_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
 
static const AVFilterPad noise_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_noise = {
.name = "noise",
.description = NULL_IF_CONFIG_SMALL("Add noise."),
.priv_size = sizeof(NoiseContext),
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = noise_inputs,
.outputs = noise_outputs,
.priv_class = &noise_class,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_null.c
0,0 → 1,50
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* null video filter
*/
 
#include "libavutil/internal.h"
#include "avfilter.h"
#include "internal.h"
#include "video.h"
 
static const AVFilterPad avfilter_vf_null_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_null_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_null = {
.name = "null",
.description = NULL_IF_CONFIG_SMALL("Pass the source unchanged to the output."),
.inputs = avfilter_vf_null_inputs,
.outputs = avfilter_vf_null_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_overlay.c
0,0 → 1,637
/*
* Copyright (c) 2010 Stefano Sabatini
* Copyright (c) 2010 Baptiste Coudurier
* Copyright (c) 2007 Bobby Bingham
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* overlay one video on top of another
*/
 
#include "avfilter.h"
#include "formats.h"
#include "libavutil/common.h"
#include "libavutil/eval.h"
#include "libavutil/avstring.h"
#include "libavutil/pixdesc.h"
#include "libavutil/imgutils.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "internal.h"
#include "dualinput.h"
#include "drawutils.h"
#include "video.h"
 
static const char *const var_names[] = {
"main_w", "W", ///< width of the main video
"main_h", "H", ///< height of the main video
"overlay_w", "w", ///< width of the overlay video
"overlay_h", "h", ///< height of the overlay video
"hsub",
"vsub",
"x",
"y",
"n", ///< number of frame
"pos", ///< position in the file
"t", ///< timestamp expressed in seconds
NULL
};
 
enum var_name {
VAR_MAIN_W, VAR_MW,
VAR_MAIN_H, VAR_MH,
VAR_OVERLAY_W, VAR_OW,
VAR_OVERLAY_H, VAR_OH,
VAR_HSUB,
VAR_VSUB,
VAR_X,
VAR_Y,
VAR_N,
VAR_POS,
VAR_T,
VAR_VARS_NB
};
 
#define MAIN 0
#define OVERLAY 1
 
#define R 0
#define G 1
#define B 2
#define A 3
 
#define Y 0
#define U 1
#define V 2
 
typedef struct {
const AVClass *class;
int x, y; ///< position of overlayed picture
 
int allow_packed_rgb;
uint8_t main_is_packed_rgb;
uint8_t main_rgba_map[4];
uint8_t main_has_alpha;
uint8_t overlay_is_packed_rgb;
uint8_t overlay_rgba_map[4];
uint8_t overlay_has_alpha;
enum OverlayFormat { OVERLAY_FORMAT_YUV420, OVERLAY_FORMAT_YUV444, OVERLAY_FORMAT_RGB, OVERLAY_FORMAT_NB} format;
enum EvalMode { EVAL_MODE_INIT, EVAL_MODE_FRAME, EVAL_MODE_NB } eval_mode;
 
FFDualInputContext dinput;
 
int main_pix_step[4]; ///< steps per pixel for each plane of the main output
int overlay_pix_step[4]; ///< steps per pixel for each plane of the overlay
int hsub, vsub; ///< chroma subsampling values
 
double var_values[VAR_VARS_NB];
char *x_expr, *y_expr;
AVExpr *x_pexpr, *y_pexpr;
} OverlayContext;
 
static av_cold void uninit(AVFilterContext *ctx)
{
OverlayContext *s = ctx->priv;
 
ff_dualinput_uninit(&s->dinput);
av_expr_free(s->x_pexpr); s->x_pexpr = NULL;
av_expr_free(s->y_pexpr); s->y_pexpr = NULL;
}
 
static inline int normalize_xy(double d, int chroma_sub)
{
if (isnan(d))
return INT_MAX;
return (int)d & ~((1 << chroma_sub) - 1);
}
 
static void eval_expr(AVFilterContext *ctx)
{
OverlayContext *s = ctx->priv;
 
s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, NULL);
s->var_values[VAR_Y] = av_expr_eval(s->y_pexpr, s->var_values, NULL);
s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, NULL);
s->x = normalize_xy(s->var_values[VAR_X], s->hsub);
s->y = normalize_xy(s->var_values[VAR_Y], s->vsub);
}
 
static int set_expr(AVExpr **pexpr, const char *expr, const char *option, void *log_ctx)
{
int ret;
AVExpr *old = NULL;
 
if (*pexpr)
old = *pexpr;
ret = av_expr_parse(pexpr, expr, var_names,
NULL, NULL, NULL, NULL, 0, log_ctx);
if (ret < 0) {
av_log(log_ctx, AV_LOG_ERROR,
"Error when evaluating the expression '%s' for %s\n",
expr, option);
*pexpr = old;
return ret;
}
 
av_expr_free(old);
return 0;
}
 
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
char *res, int res_len, int flags)
{
OverlayContext *s = ctx->priv;
int ret;
 
if (!strcmp(cmd, "x"))
ret = set_expr(&s->x_pexpr, args, cmd, ctx);
else if (!strcmp(cmd, "y"))
ret = set_expr(&s->y_pexpr, args, cmd, ctx);
else
ret = AVERROR(ENOSYS);
 
if (ret < 0)
return ret;
 
if (s->eval_mode == EVAL_MODE_INIT) {
eval_expr(ctx);
av_log(ctx, AV_LOG_VERBOSE, "x:%f xi:%d y:%f yi:%d\n",
s->var_values[VAR_X], s->x,
s->var_values[VAR_Y], s->y);
}
return ret;
}
 
static int query_formats(AVFilterContext *ctx)
{
OverlayContext *s = ctx->priv;
 
/* overlay formats contains alpha, for avoiding conversion with alpha information loss */
static const enum AVPixelFormat main_pix_fmts_yuv420[] = {
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_NONE
};
static const enum AVPixelFormat overlay_pix_fmts_yuv420[] = {
AV_PIX_FMT_YUVA420P, AV_PIX_FMT_NONE
};
 
static const enum AVPixelFormat main_pix_fmts_yuv444[] = {
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVA444P, AV_PIX_FMT_NONE
};
static const enum AVPixelFormat overlay_pix_fmts_yuv444[] = {
AV_PIX_FMT_YUVA444P, AV_PIX_FMT_NONE
};
 
static const enum AVPixelFormat main_pix_fmts_rgb[] = {
AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA,
AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA,
AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
AV_PIX_FMT_NONE
};
static const enum AVPixelFormat overlay_pix_fmts_rgb[] = {
AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA,
AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA,
AV_PIX_FMT_NONE
};
 
AVFilterFormats *main_formats;
AVFilterFormats *overlay_formats;
 
switch (s->format) {
case OVERLAY_FORMAT_YUV420:
main_formats = ff_make_format_list(main_pix_fmts_yuv420);
overlay_formats = ff_make_format_list(overlay_pix_fmts_yuv420);
break;
case OVERLAY_FORMAT_YUV444:
main_formats = ff_make_format_list(main_pix_fmts_yuv444);
overlay_formats = ff_make_format_list(overlay_pix_fmts_yuv444);
break;
case OVERLAY_FORMAT_RGB:
main_formats = ff_make_format_list(main_pix_fmts_rgb);
overlay_formats = ff_make_format_list(overlay_pix_fmts_rgb);
break;
default:
av_assert0(0);
}
 
ff_formats_ref(main_formats, &ctx->inputs [MAIN ]->out_formats);
ff_formats_ref(overlay_formats, &ctx->inputs [OVERLAY]->out_formats);
ff_formats_ref(main_formats, &ctx->outputs[MAIN ]->in_formats );
 
return 0;
}
 
static const enum AVPixelFormat alpha_pix_fmts[] = {
AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA444P,
AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR, AV_PIX_FMT_RGBA,
AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE
};
 
static int config_input_main(AVFilterLink *inlink)
{
OverlayContext *s = inlink->dst->priv;
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
 
av_image_fill_max_pixsteps(s->main_pix_step, NULL, pix_desc);
 
s->hsub = pix_desc->log2_chroma_w;
s->vsub = pix_desc->log2_chroma_h;
 
s->main_is_packed_rgb =
ff_fill_rgba_map(s->main_rgba_map, inlink->format) >= 0;
s->main_has_alpha = ff_fmt_is_in(inlink->format, alpha_pix_fmts);
return 0;
}
 
static int config_input_overlay(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
OverlayContext *s = inlink->dst->priv;
int ret;
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
 
av_image_fill_max_pixsteps(s->overlay_pix_step, NULL, pix_desc);
 
/* Finish the configuration by evaluating the expressions
now when both inputs are configured. */
s->var_values[VAR_MAIN_W ] = s->var_values[VAR_MW] = ctx->inputs[MAIN ]->w;
s->var_values[VAR_MAIN_H ] = s->var_values[VAR_MH] = ctx->inputs[MAIN ]->h;
s->var_values[VAR_OVERLAY_W] = s->var_values[VAR_OW] = ctx->inputs[OVERLAY]->w;
s->var_values[VAR_OVERLAY_H] = s->var_values[VAR_OH] = ctx->inputs[OVERLAY]->h;
s->var_values[VAR_HSUB] = 1<<pix_desc->log2_chroma_w;
s->var_values[VAR_VSUB] = 1<<pix_desc->log2_chroma_h;
s->var_values[VAR_X] = NAN;
s->var_values[VAR_Y] = NAN;
s->var_values[VAR_N] = 0;
s->var_values[VAR_T] = NAN;
s->var_values[VAR_POS] = NAN;
 
if ((ret = set_expr(&s->x_pexpr, s->x_expr, "x", ctx)) < 0 ||
(ret = set_expr(&s->y_pexpr, s->y_expr, "y", ctx)) < 0)
return ret;
 
s->overlay_is_packed_rgb =
ff_fill_rgba_map(s->overlay_rgba_map, inlink->format) >= 0;
s->overlay_has_alpha = ff_fmt_is_in(inlink->format, alpha_pix_fmts);
 
if (s->eval_mode == EVAL_MODE_INIT) {
eval_expr(ctx);
av_log(ctx, AV_LOG_VERBOSE, "x:%f xi:%d y:%f yi:%d\n",
s->var_values[VAR_X], s->x,
s->var_values[VAR_Y], s->y);
}
 
av_log(ctx, AV_LOG_VERBOSE,
"main w:%d h:%d fmt:%s overlay w:%d h:%d fmt:%s\n",
ctx->inputs[MAIN]->w, ctx->inputs[MAIN]->h,
av_get_pix_fmt_name(ctx->inputs[MAIN]->format),
ctx->inputs[OVERLAY]->w, ctx->inputs[OVERLAY]->h,
av_get_pix_fmt_name(ctx->inputs[OVERLAY]->format));
return 0;
}
 
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
OverlayContext *s = ctx->priv;
int ret;
 
if ((ret = ff_dualinput_init(ctx, &s->dinput)) < 0)
return ret;
 
outlink->w = ctx->inputs[MAIN]->w;
outlink->h = ctx->inputs[MAIN]->h;
outlink->time_base = ctx->inputs[MAIN]->time_base;
 
return 0;
}
 
// divide by 255 and round to nearest
// apply a fast variant: (X+127)/255 = ((X+127)*257+257)>>16 = ((X+128)*257)>>16
#define FAST_DIV255(x) ((((x) + 128) * 257) >> 16)
 
// calculate the unpremultiplied alpha, applying the general equation:
// alpha = alpha_overlay / ( (alpha_main + alpha_overlay) - (alpha_main * alpha_overlay) )
// (((x) << 16) - ((x) << 9) + (x)) is a faster version of: 255 * 255 * x
// ((((x) + (y)) << 8) - ((x) + (y)) - (y) * (x)) is a faster version of: 255 * (x + y)
#define UNPREMULTIPLY_ALPHA(x, y) ((((x) << 16) - ((x) << 9) + (x)) / ((((x) + (y)) << 8) - ((x) + (y)) - (y) * (x)))
 
/**
* Blend image in src to destination buffer dst at position (x, y).
*/
static void blend_image(AVFilterContext *ctx,
AVFrame *dst, const AVFrame *src,
int x, int y)
{
OverlayContext *s = ctx->priv;
int i, imax, j, jmax, k, kmax;
const int src_w = src->width;
const int src_h = src->height;
const int dst_w = dst->width;
const int dst_h = dst->height;
 
if (x >= dst_w || x+src_w < 0 ||
y >= dst_h || y+src_h < 0)
return; /* no intersection */
 
if (s->main_is_packed_rgb) {
uint8_t alpha; ///< the amount of overlay to blend on to main
const int dr = s->main_rgba_map[R];
const int dg = s->main_rgba_map[G];
const int db = s->main_rgba_map[B];
const int da = s->main_rgba_map[A];
const int dstep = s->main_pix_step[0];
const int sr = s->overlay_rgba_map[R];
const int sg = s->overlay_rgba_map[G];
const int sb = s->overlay_rgba_map[B];
const int sa = s->overlay_rgba_map[A];
const int sstep = s->overlay_pix_step[0];
const int main_has_alpha = s->main_has_alpha;
uint8_t *s, *sp, *d, *dp;
 
i = FFMAX(-y, 0);
sp = src->data[0] + i * src->linesize[0];
dp = dst->data[0] + (y+i) * dst->linesize[0];
 
for (imax = FFMIN(-y + dst_h, src_h); i < imax; i++) {
j = FFMAX(-x, 0);
s = sp + j * sstep;
d = dp + (x+j) * dstep;
 
for (jmax = FFMIN(-x + dst_w, src_w); j < jmax; j++) {
alpha = s[sa];
 
// if the main channel has an alpha channel, alpha has to be calculated
// to create an un-premultiplied (straight) alpha value
if (main_has_alpha && alpha != 0 && alpha != 255) {
uint8_t alpha_d = d[da];
alpha = UNPREMULTIPLY_ALPHA(alpha, alpha_d);
}
 
switch (alpha) {
case 0:
break;
case 255:
d[dr] = s[sr];
d[dg] = s[sg];
d[db] = s[sb];
break;
default:
// main_value = main_value * (1 - alpha) + overlay_value * alpha
// since alpha is in the range 0-255, the result must divided by 255
d[dr] = FAST_DIV255(d[dr] * (255 - alpha) + s[sr] * alpha);
d[dg] = FAST_DIV255(d[dg] * (255 - alpha) + s[sg] * alpha);
d[db] = FAST_DIV255(d[db] * (255 - alpha) + s[sb] * alpha);
}
if (main_has_alpha) {
switch (alpha) {
case 0:
break;
case 255:
d[da] = s[sa];
break;
default:
// apply alpha compositing: main_alpha += (1-main_alpha) * overlay_alpha
d[da] += FAST_DIV255((255 - d[da]) * s[sa]);
}
}
d += dstep;
s += sstep;
}
dp += dst->linesize[0];
sp += src->linesize[0];
}
} else {
const int main_has_alpha = s->main_has_alpha;
if (main_has_alpha) {
uint8_t alpha; ///< the amount of overlay to blend on to main
uint8_t *s, *sa, *d, *da;
 
i = FFMAX(-y, 0);
sa = src->data[3] + i * src->linesize[3];
da = dst->data[3] + (y+i) * dst->linesize[3];
 
for (imax = FFMIN(-y + dst_h, src_h); i < imax; i++) {
j = FFMAX(-x, 0);
s = sa + j;
d = da + x+j;
 
for (jmax = FFMIN(-x + dst_w, src_w); j < jmax; j++) {
alpha = *s;
if (alpha != 0 && alpha != 255) {
uint8_t alpha_d = *d;
alpha = UNPREMULTIPLY_ALPHA(alpha, alpha_d);
}
switch (alpha) {
case 0:
break;
case 255:
*d = *s;
break;
default:
// apply alpha compositing: main_alpha += (1-main_alpha) * overlay_alpha
*d += FAST_DIV255((255 - *d) * *s);
}
d += 1;
s += 1;
}
da += dst->linesize[3];
sa += src->linesize[3];
}
}
for (i = 0; i < 3; i++) {
int hsub = i ? s->hsub : 0;
int vsub = i ? s->vsub : 0;
int src_wp = FF_CEIL_RSHIFT(src_w, hsub);
int src_hp = FF_CEIL_RSHIFT(src_h, vsub);
int dst_wp = FF_CEIL_RSHIFT(dst_w, hsub);
int dst_hp = FF_CEIL_RSHIFT(dst_h, vsub);
int yp = y>>vsub;
int xp = x>>hsub;
uint8_t *s, *sp, *d, *dp, *a, *ap;
 
j = FFMAX(-yp, 0);
sp = src->data[i] + j * src->linesize[i];
dp = dst->data[i] + (yp+j) * dst->linesize[i];
ap = src->data[3] + (j<<vsub) * src->linesize[3];
 
for (jmax = FFMIN(-yp + dst_hp, src_hp); j < jmax; j++) {
k = FFMAX(-xp, 0);
d = dp + xp+k;
s = sp + k;
a = ap + (k<<hsub);
 
for (kmax = FFMIN(-xp + dst_wp, src_wp); k < kmax; k++) {
int alpha_v, alpha_h, alpha;
 
// average alpha for color components, improve quality
if (hsub && vsub && j+1 < src_hp && k+1 < src_wp) {
alpha = (a[0] + a[src->linesize[3]] +
a[1] + a[src->linesize[3]+1]) >> 2;
} else if (hsub || vsub) {
alpha_h = hsub && k+1 < src_wp ?
(a[0] + a[1]) >> 1 : a[0];
alpha_v = vsub && j+1 < src_hp ?
(a[0] + a[src->linesize[3]]) >> 1 : a[0];
alpha = (alpha_v + alpha_h) >> 1;
} else
alpha = a[0];
// if the main channel has an alpha channel, alpha has to be calculated
// to create an un-premultiplied (straight) alpha value
if (main_has_alpha && alpha != 0 && alpha != 255) {
// average alpha for color components, improve quality
uint8_t alpha_d;
if (hsub && vsub && j+1 < src_hp && k+1 < src_wp) {
alpha_d = (d[0] + d[src->linesize[3]] +
d[1] + d[src->linesize[3]+1]) >> 2;
} else if (hsub || vsub) {
alpha_h = hsub && k+1 < src_wp ?
(d[0] + d[1]) >> 1 : d[0];
alpha_v = vsub && j+1 < src_hp ?
(d[0] + d[src->linesize[3]]) >> 1 : d[0];
alpha_d = (alpha_v + alpha_h) >> 1;
} else
alpha_d = d[0];
alpha = UNPREMULTIPLY_ALPHA(alpha, alpha_d);
}
*d = FAST_DIV255(*d * (255 - alpha) + *s * alpha);
s++;
d++;
a += 1 << hsub;
}
dp += dst->linesize[i];
sp += src->linesize[i];
ap += (1 << vsub) * src->linesize[3];
}
}
}
}
 
static AVFrame *do_blend(AVFilterContext *ctx, AVFrame *mainpic,
const AVFrame *second)
{
OverlayContext *s = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
 
/* TODO: reindent */
if (s->eval_mode == EVAL_MODE_FRAME) {
int64_t pos = av_frame_get_pkt_pos(mainpic);
 
s->var_values[VAR_N] = inlink->frame_count;
s->var_values[VAR_T] = mainpic->pts == AV_NOPTS_VALUE ?
NAN : mainpic->pts * av_q2d(inlink->time_base);
s->var_values[VAR_POS] = pos == -1 ? NAN : pos;
 
eval_expr(ctx);
av_log(ctx, AV_LOG_DEBUG, "n:%f t:%f pos:%f x:%f xi:%d y:%f yi:%d\n",
s->var_values[VAR_N], s->var_values[VAR_T], s->var_values[VAR_POS],
s->var_values[VAR_X], s->x,
s->var_values[VAR_Y], s->y);
}
 
blend_image(ctx, mainpic, second, s->x, s->y);
return mainpic;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
{
OverlayContext *s = inlink->dst->priv;
return ff_dualinput_filter_frame(&s->dinput, inlink, inpicref);
}
 
static int request_frame(AVFilterLink *outlink)
{
OverlayContext *s = outlink->src->priv;
return ff_dualinput_request_frame(&s->dinput, outlink);
}
 
static av_cold int init(AVFilterContext *ctx)
{
OverlayContext *s = ctx->priv;
 
if (s->allow_packed_rgb) {
av_log(ctx, AV_LOG_WARNING,
"The rgb option is deprecated and is overriding the format option, use format instead\n");
s->format = OVERLAY_FORMAT_RGB;
}
s->dinput.process = do_blend;
return 0;
}
 
#define OFFSET(x) offsetof(OverlayContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption overlay_options[] = {
{ "x", "set the x expression", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "y", "set the y expression", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_FRAME}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
{ "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
{ "frame", "eval expressions per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
{ "rgb", "force packed RGB in input and output (deprecated)", OFFSET(allow_packed_rgb), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
{ "shortest", "force termination when the shortest input terminates", OFFSET(dinput.shortest), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS },
{ "format", "set output format", OFFSET(format), AV_OPT_TYPE_INT, {.i64=OVERLAY_FORMAT_YUV420}, 0, OVERLAY_FORMAT_NB-1, FLAGS, "format" },
{ "yuv420", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV420}, .flags = FLAGS, .unit = "format" },
{ "yuv444", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV444}, .flags = FLAGS, .unit = "format" },
{ "rgb", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_RGB}, .flags = FLAGS, .unit = "format" },
{ "repeatlast", "repeat overlay of the last overlay frame", OFFSET(dinput.repeatlast), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(overlay);
 
static const AVFilterPad avfilter_vf_overlay_inputs[] = {
{
.name = "main",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input_main,
.filter_frame = filter_frame,
.needs_writable = 1,
},
{
.name = "overlay",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input_overlay,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_overlay_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_output,
.request_frame = request_frame,
},
{ NULL }
};
 
AVFilter avfilter_vf_overlay = {
.name = "overlay",
.description = NULL_IF_CONFIG_SMALL("Overlay a video source on top of the input."),
.init = init,
.uninit = uninit,
.priv_size = sizeof(OverlayContext),
.priv_class = &overlay_class,
.query_formats = query_formats,
.process_command = process_command,
.inputs = avfilter_vf_overlay_inputs,
.outputs = avfilter_vf_overlay_outputs,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_owdenoise.c
0,0 → 1,342
/*
* Copyright (c) 2007 Michael Niedermayer <michaelni@gmx.at>
* Copyright (c) 2013 Clément Bœsch <u pkh me>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
/**
* @todo try to change to int
* @todo try lifting based implementation
* @todo optimize optimize optimize
* @todo hard thresholding
* @todo use QP to decide filter strength
* @todo wavelet normalization / least squares optimal signal vs. noise thresholds
*/
 
#include "libavutil/imgutils.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "internal.h"
 
typedef struct {
const AVClass *class;
double luma_strength;
double chroma_strength;
int depth;
float *plane[16+1][4];
int linesize;
int hsub, vsub;
} OWDenoiseContext;
 
#define OFFSET(x) offsetof(OWDenoiseContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
static const AVOption owdenoise_options[] = {
{ "depth", "set depth", OFFSET(depth), AV_OPT_TYPE_INT, {.i64 = 8}, 8, 16, FLAGS },
{ "luma_strength", "set luma strength", OFFSET(luma_strength), AV_OPT_TYPE_DOUBLE, {.dbl = 1.0}, 0, 1000, FLAGS },
{ "ls", "set luma strength", OFFSET(luma_strength), AV_OPT_TYPE_DOUBLE, {.dbl = 1.0}, 0, 1000, FLAGS },
{ "chroma_strength", "set chroma strength", OFFSET(chroma_strength), AV_OPT_TYPE_DOUBLE, {.dbl = 1.0}, 0, 1000, FLAGS },
{ "cs", "set chroma strength", OFFSET(chroma_strength), AV_OPT_TYPE_DOUBLE, {.dbl = 1.0}, 0, 1000, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(owdenoise);
 
DECLARE_ALIGNED(8, static const uint8_t, dither)[8][8] = {
{ 0, 48, 12, 60, 3, 51, 15, 63 },
{ 32, 16, 44, 28, 35, 19, 47, 31 },
{ 8, 56, 4, 52, 11, 59, 7, 55 },
{ 40, 24, 36, 20, 43, 27, 39, 23 },
{ 2, 50, 14, 62, 1, 49, 13, 61 },
{ 34, 18, 46, 30, 33, 17, 45, 29 },
{ 10, 58, 6, 54, 9, 57, 5, 53 },
{ 42, 26, 38, 22, 41, 25, 37, 21 },
};
 
static const double coeff[2][5] = {
{
0.6029490182363579 * M_SQRT2,
0.2668641184428723 * M_SQRT2,
-0.07822326652898785 * M_SQRT2,
-0.01686411844287495 * M_SQRT2,
0.02674875741080976 * M_SQRT2,
},{
1.115087052456994 / M_SQRT2,
-0.5912717631142470 / M_SQRT2,
-0.05754352622849957 / M_SQRT2,
0.09127176311424948 / M_SQRT2,
}
};
 
static const double icoeff[2][5] = {
{
1.115087052456994 / M_SQRT2,
0.5912717631142470 / M_SQRT2,
-0.05754352622849957 / M_SQRT2,
-0.09127176311424948 / M_SQRT2,
},{
0.6029490182363579 * M_SQRT2,
-0.2668641184428723 * M_SQRT2,
-0.07822326652898785 * M_SQRT2,
0.01686411844287495 * M_SQRT2,
0.02674875741080976 * M_SQRT2,
}
};
 
static inline int mirror(int x, int w)
{
while ((unsigned)x > (unsigned)w) {
x = -x;
if (x < 0)
x += 2 * w;
}
return x;
}
 
static inline void decompose(float *dst_l, float *dst_h, const float *src,
int linesize, int w)
{
int x, i;
for (x = 0; x < w; x++) {
double sum_l = src[x * linesize] * coeff[0][0];
double sum_h = src[x * linesize] * coeff[1][0];
for (i = 1; i <= 4; i++) {
const double s = src[mirror(x - i, w - 1) * linesize]
+ src[mirror(x + i, w - 1) * linesize];
 
sum_l += coeff[0][i] * s;
sum_h += coeff[1][i] * s;
}
dst_l[x * linesize] = sum_l;
dst_h[x * linesize] = sum_h;
}
}
 
static inline void compose(float *dst, const float *src_l, const float *src_h,
int linesize, int w)
{
int x, i;
for (x = 0; x < w; x++) {
double sum_l = src_l[x * linesize] * icoeff[0][0];
double sum_h = src_h[x * linesize] * icoeff[1][0];
for (i = 1; i <= 4; i++) {
const int x0 = mirror(x - i, w - 1) * linesize;
const int x1 = mirror(x + i, w - 1) * linesize;
 
sum_l += icoeff[0][i] * (src_l[x0] + src_l[x1]);
sum_h += icoeff[1][i] * (src_h[x0] + src_h[x1]);
}
dst[x * linesize] = (sum_l + sum_h) * 0.5;
}
}
 
static inline void decompose2D(float *dst_l, float *dst_h, const float *src,
int xlinesize, int ylinesize,
int step, int w, int h)
{
int y, x;
for (y = 0; y < h; y++)
for (x = 0; x < step; x++)
decompose(dst_l + ylinesize*y + xlinesize*x,
dst_h + ylinesize*y + xlinesize*x,
src + ylinesize*y + xlinesize*x,
step * xlinesize, (w - x + step - 1) / step);
}
 
static inline void compose2D(float *dst, const float *src_l, const float *src_h,
int xlinesize, int ylinesize,
int step, int w, int h)
{
int y, x;
for (y = 0; y < h; y++)
for (x = 0; x < step; x++)
compose(dst + ylinesize*y + xlinesize*x,
src_l + ylinesize*y + xlinesize*x,
src_h + ylinesize*y + xlinesize*x,
step * xlinesize, (w - x + step - 1) / step);
}
 
static void decompose2D2(float *dst[4], float *src, float *temp[2],
int linesize, int step, int w, int h)
{
decompose2D(temp[0], temp[1], src, 1, linesize, step, w, h);
decompose2D( dst[0], dst[1], temp[0], linesize, 1, step, h, w);
decompose2D( dst[2], dst[3], temp[1], linesize, 1, step, h, w);
}
 
static void compose2D2(float *dst, float *src[4], float *temp[2],
int linesize, int step, int w, int h)
{
compose2D(temp[0], src[0], src[1], linesize, 1, step, h, w);
compose2D(temp[1], src[2], src[3], linesize, 1, step, h, w);
compose2D(dst, temp[0], temp[1], 1, linesize, step, w, h);
}
 
static void filter(OWDenoiseContext *s,
uint8_t *dst, int dst_linesize,
const uint8_t *src, int src_linesize,
int width, int height, double strength)
{
int x, y, i, j, depth = s->depth;
 
while (1<<depth > width || 1<<depth > height)
depth--;
 
for (y = 0; y < height; y++)
for(x = 0; x < width; x++)
s->plane[0][0][y*s->linesize + x] = src[y*src_linesize + x];
 
for (i = 0; i < depth; i++)
decompose2D2(s->plane[i + 1], s->plane[i][0], s->plane[0] + 1, s->linesize, 1<<i, width, height);
 
for (i = 0; i < depth; i++) {
for (j = 1; j < 4; j++) {
for (y = 0; y < height; y++) {
for (x = 0; x < width; x++) {
double v = s->plane[i + 1][j][y*s->linesize + x];
if (v > strength) v -= strength;
else if (v < -strength) v += strength;
else v = 0;
s->plane[i + 1][j][x + y*s->linesize] = v;
}
}
}
}
for (i = depth-1; i >= 0; i--)
compose2D2(s->plane[i][0], s->plane[i + 1], s->plane[0] + 1, s->linesize, 1<<i, width, height);
 
for (y = 0; y < height; y++) {
for (x = 0; x < width; x++) {
i = s->plane[0][0][y*s->linesize + x] + dither[x&7][y&7]*(1.0/64) + 1.0/128; // yes the rounding is insane but optimal :)
if ((unsigned)i > 255U) i = ~(i >> 31);
dst[y*dst_linesize + x] = i;
}
}
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
int direct = 0;
AVFilterContext *ctx = inlink->dst;
OWDenoiseContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out;
const int cw = FF_CEIL_RSHIFT(inlink->w, s->hsub);
const int ch = FF_CEIL_RSHIFT(inlink->h, s->vsub);
 
if (av_frame_is_writable(in)) {
direct = 1;
out = in;
} else {
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
 
filter(s, out->data[0], out->linesize[0], in->data[0], in->linesize[0], inlink->w, inlink->h, s->luma_strength);
filter(s, out->data[1], out->linesize[1], in->data[1], in->linesize[1], cw, ch, s->chroma_strength);
filter(s, out->data[2], out->linesize[2], in->data[2], in->linesize[2], cw, ch, s->chroma_strength);
 
if (!direct) {
if (in->data[3])
av_image_copy_plane(out->data[3], out->linesize[3],
in ->data[3], in ->linesize[3],
inlink->w, inlink->h);
av_frame_free(&in);
}
 
return ff_filter_frame(outlink, out);
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P,
AV_PIX_FMT_YUVA420P,
AV_PIX_FMT_NONE
};
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static int config_input(AVFilterLink *inlink)
{
int i, j;
OWDenoiseContext *s = inlink->dst->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
const int h = FFALIGN(inlink->h, 16);
 
s->hsub = desc->log2_chroma_w;
s->vsub = desc->log2_chroma_h;
 
s->linesize = FFALIGN(inlink->w, 16);
for (j = 0; j < 4; j++) {
for (i = 0; i <= s->depth; i++) {
s->plane[i][j] = av_malloc(s->linesize * h * sizeof(s->plane[0][0][0]));
if (!s->plane[i][j])
return AVERROR(ENOMEM);
}
}
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
int i, j;
OWDenoiseContext *s = ctx->priv;
 
for (j = 0; j < 4; j++)
for (i = 0; i <= s->depth; i++)
av_freep(&s->plane[i][j]);
}
 
static const AVFilterPad owdenoise_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
 
static const AVFilterPad owdenoise_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_owdenoise = {
.name = "owdenoise",
.description = NULL_IF_CONFIG_SMALL("Denoise using wavelets."),
.priv_size = sizeof(OWDenoiseContext),
.uninit = uninit,
.query_formats = query_formats,
.inputs = owdenoise_inputs,
.outputs = owdenoise_outputs,
.priv_class = &owdenoise_class,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_pad.c
0,0 → 1,402
/*
* Copyright (c) 2008 vmrsss
* Copyright (c) 2009 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* video padding filter
*/
 
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
#include "libavutil/avstring.h"
#include "libavutil/common.h"
#include "libavutil/eval.h"
#include "libavutil/pixdesc.h"
#include "libavutil/colorspace.h"
#include "libavutil/avassert.h"
#include "libavutil/imgutils.h"
#include "libavutil/parseutils.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
 
#include "drawutils.h"
 
static const char *const var_names[] = {
"in_w", "iw",
"in_h", "ih",
"out_w", "ow",
"out_h", "oh",
"x",
"y",
"a",
"sar",
"dar",
"hsub",
"vsub",
NULL
};
 
enum var_name {
VAR_IN_W, VAR_IW,
VAR_IN_H, VAR_IH,
VAR_OUT_W, VAR_OW,
VAR_OUT_H, VAR_OH,
VAR_X,
VAR_Y,
VAR_A,
VAR_SAR,
VAR_DAR,
VAR_HSUB,
VAR_VSUB,
VARS_NB
};
 
static int query_formats(AVFilterContext *ctx)
{
ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0));
return 0;
}
 
typedef struct {
const AVClass *class;
int w, h; ///< output dimensions, a value of 0 will result in the input size
int x, y; ///< offsets of the input area with respect to the padded area
int in_w, in_h; ///< width and height for the padded input video, which has to be aligned to the chroma values in order to avoid chroma issues
 
char *w_expr; ///< width expression string
char *h_expr; ///< height expression string
char *x_expr; ///< width expression string
char *y_expr; ///< height expression string
uint8_t rgba_color[4]; ///< color for the padding area
FFDrawContext draw;
FFDrawColor color;
} PadContext;
 
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
PadContext *s = ctx->priv;
int ret;
double var_values[VARS_NB], res;
char *expr;
 
ff_draw_init(&s->draw, inlink->format, 0);
ff_draw_color(&s->draw, &s->color, s->rgba_color);
 
var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w;
var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h;
var_values[VAR_OUT_W] = var_values[VAR_OW] = NAN;
var_values[VAR_OUT_H] = var_values[VAR_OH] = NAN;
var_values[VAR_A] = (double) inlink->w / inlink->h;
var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
(double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR];
var_values[VAR_HSUB] = 1 << s->draw.hsub_max;
var_values[VAR_VSUB] = 1 << s->draw.vsub_max;
 
/* evaluate width and height */
av_expr_parse_and_eval(&res, (expr = s->w_expr),
var_names, var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx);
s->w = var_values[VAR_OUT_W] = var_values[VAR_OW] = res;
if ((ret = av_expr_parse_and_eval(&res, (expr = s->h_expr),
var_names, var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
goto eval_fail;
s->h = var_values[VAR_OUT_H] = var_values[VAR_OH] = res;
/* evaluate the width again, as it may depend on the evaluated output height */
if ((ret = av_expr_parse_and_eval(&res, (expr = s->w_expr),
var_names, var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
goto eval_fail;
s->w = var_values[VAR_OUT_W] = var_values[VAR_OW] = res;
 
/* evaluate x and y */
av_expr_parse_and_eval(&res, (expr = s->x_expr),
var_names, var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx);
s->x = var_values[VAR_X] = res;
if ((ret = av_expr_parse_and_eval(&res, (expr = s->y_expr),
var_names, var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
goto eval_fail;
s->y = var_values[VAR_Y] = res;
/* evaluate x again, as it may depend on the evaluated y value */
if ((ret = av_expr_parse_and_eval(&res, (expr = s->x_expr),
var_names, var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
goto eval_fail;
s->x = var_values[VAR_X] = res;
 
/* sanity check params */
if (s->w < 0 || s->h < 0 || s->x < 0 || s->y < 0) {
av_log(ctx, AV_LOG_ERROR, "Negative values are not acceptable.\n");
return AVERROR(EINVAL);
}
 
if (!s->w)
s->w = inlink->w;
if (!s->h)
s->h = inlink->h;
 
s->w = ff_draw_round_to_sub(&s->draw, 0, -1, s->w);
s->h = ff_draw_round_to_sub(&s->draw, 1, -1, s->h);
s->x = ff_draw_round_to_sub(&s->draw, 0, -1, s->x);
s->y = ff_draw_round_to_sub(&s->draw, 1, -1, s->y);
s->in_w = ff_draw_round_to_sub(&s->draw, 0, -1, inlink->w);
s->in_h = ff_draw_round_to_sub(&s->draw, 1, -1, inlink->h);
 
av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d -> w:%d h:%d x:%d y:%d color:0x%02X%02X%02X%02X\n",
inlink->w, inlink->h, s->w, s->h, s->x, s->y,
s->rgba_color[0], s->rgba_color[1], s->rgba_color[2], s->rgba_color[3]);
 
if (s->x < 0 || s->y < 0 ||
s->w <= 0 || s->h <= 0 ||
(unsigned)s->x + (unsigned)inlink->w > s->w ||
(unsigned)s->y + (unsigned)inlink->h > s->h) {
av_log(ctx, AV_LOG_ERROR,
"Input area %d:%d:%d:%d not within the padded area 0:0:%d:%d or zero-sized\n",
s->x, s->y, s->x + inlink->w, s->y + inlink->h, s->w, s->h);
return AVERROR(EINVAL);
}
 
return 0;
 
eval_fail:
av_log(NULL, AV_LOG_ERROR,
"Error when evaluating the expression '%s'\n", expr);
return ret;
 
}
 
static int config_output(AVFilterLink *outlink)
{
PadContext *s = outlink->src->priv;
 
outlink->w = s->w;
outlink->h = s->h;
return 0;
}
 
static AVFrame *get_video_buffer(AVFilterLink *inlink, int w, int h)
{
PadContext *s = inlink->dst->priv;
 
AVFrame *frame = ff_get_video_buffer(inlink->dst->outputs[0],
w + (s->w - s->in_w),
h + (s->h - s->in_h));
int plane;
 
if (!frame)
return NULL;
 
frame->width = w;
frame->height = h;
 
for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++) {
int hsub = s->draw.hsub[plane];
int vsub = s->draw.vsub[plane];
frame->data[plane] += (s->x >> hsub) * s->draw.pixelstep[plane] +
(s->y >> vsub) * frame->linesize[plane];
}
 
return frame;
}
 
/* check whether each plane in this buffer can be padded without copying */
static int buffer_needs_copy(PadContext *s, AVFrame *frame, AVBufferRef *buf)
{
int planes[4] = { -1, -1, -1, -1}, *p = planes;
int i, j;
 
/* get all planes in this buffer */
for (i = 0; i < FF_ARRAY_ELEMS(planes) && frame->data[i]; i++) {
if (av_frame_get_plane_buffer(frame, i) == buf)
*p++ = i;
}
 
/* for each plane in this buffer, check that it can be padded without
* going over buffer bounds or other planes */
for (i = 0; i < FF_ARRAY_ELEMS(planes) && planes[i] >= 0; i++) {
int hsub = s->draw.hsub[planes[i]];
int vsub = s->draw.vsub[planes[i]];
 
uint8_t *start = frame->data[planes[i]];
uint8_t *end = start + (frame->height >> vsub) *
frame->linesize[planes[i]];
 
/* amount of free space needed before the start and after the end
* of the plane */
ptrdiff_t req_start = (s->x >> hsub) * s->draw.pixelstep[planes[i]] +
(s->y >> vsub) * frame->linesize[planes[i]];
ptrdiff_t req_end = ((s->w - s->x - frame->width) >> hsub) *
s->draw.pixelstep[planes[i]] +
(s->y >> vsub) * frame->linesize[planes[i]];
 
if (frame->linesize[planes[i]] < (s->w >> hsub) * s->draw.pixelstep[planes[i]])
return 1;
if (start - buf->data < req_start ||
(buf->data + buf->size) - end < req_end)
return 1;
 
for (j = 0; j < FF_ARRAY_ELEMS(planes) && planes[j] >= 0; j++) {
int vsub1 = s->draw.vsub[planes[j]];
uint8_t *start1 = frame->data[planes[j]];
uint8_t *end1 = start1 + (frame->height >> vsub1) *
frame->linesize[planes[j]];
if (i == j)
continue;
 
if (FFSIGN(start - end1) != FFSIGN(start - end1 - req_start) ||
FFSIGN(end - start1) != FFSIGN(end - start1 + req_end))
return 1;
}
}
 
return 0;
}
 
static int frame_needs_copy(PadContext *s, AVFrame *frame)
{
int i;
 
if (!av_frame_is_writable(frame))
return 1;
 
for (i = 0; i < 4 && frame->buf[i]; i++)
if (buffer_needs_copy(s, frame, frame->buf[i]))
return 1;
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
PadContext *s = inlink->dst->priv;
AVFrame *out;
int needs_copy = frame_needs_copy(s, in);
 
if (needs_copy) {
av_log(inlink->dst, AV_LOG_DEBUG, "Direct padding impossible allocating new frame\n");
out = ff_get_video_buffer(inlink->dst->outputs[0],
FFMAX(inlink->w, s->w),
FFMAX(inlink->h, s->h));
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
 
av_frame_copy_props(out, in);
} else {
int i;
 
out = in;
for (i = 0; i < 4 && out->data[i] && out->linesize[i]; i++) {
int hsub = s->draw.hsub[i];
int vsub = s->draw.vsub[i];
out->data[i] -= (s->x >> hsub) * s->draw.pixelstep[i] +
(s->y >> vsub) * out->linesize[i];
}
}
 
/* top bar */
if (s->y) {
ff_fill_rectangle(&s->draw, &s->color,
out->data, out->linesize,
0, 0, s->w, s->y);
}
 
/* bottom bar */
if (s->h > s->y + s->in_h) {
ff_fill_rectangle(&s->draw, &s->color,
out->data, out->linesize,
0, s->y + s->in_h, s->w, s->h - s->y - s->in_h);
}
 
/* left border */
ff_fill_rectangle(&s->draw, &s->color, out->data, out->linesize,
0, s->y, s->x, in->height);
 
if (needs_copy) {
ff_copy_rectangle2(&s->draw,
out->data, out->linesize, in->data, in->linesize,
s->x, s->y, 0, 0, in->width, in->height);
}
 
/* right border */
ff_fill_rectangle(&s->draw, &s->color, out->data, out->linesize,
s->x + s->in_w, s->y, s->w - s->x - s->in_w,
in->height);
 
out->width = s->w;
out->height = s->h;
 
if (in != out)
av_frame_free(&in);
return ff_filter_frame(inlink->dst->outputs[0], out);
}
 
#define OFFSET(x) offsetof(PadContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
 
static const AVOption pad_options[] = {
{ "width", "set the pad area width expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "w", "set the pad area width expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "height", "set the pad area height expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "h", "set the pad area height expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "x", "set the x offset expression for the input image position", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "y", "set the y offset expression for the input image position", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "color", "set the color of the padded area border", OFFSET(rgba_color), AV_OPT_TYPE_COLOR, {.str = "black"}, .flags = FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(pad);
 
static const AVFilterPad avfilter_vf_pad_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input,
.get_video_buffer = get_video_buffer,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_pad_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_output,
},
{ NULL }
};
 
AVFilter avfilter_vf_pad = {
.name = "pad",
.description = NULL_IF_CONFIG_SMALL("Pad the input video."),
.priv_size = sizeof(PadContext),
.priv_class = &pad_class,
.query_formats = query_formats,
.inputs = avfilter_vf_pad_inputs,
.outputs = avfilter_vf_pad_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_perspective.c
0,0 → 1,403
/*
* Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
* Copyright (c) 2013 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/eval.h"
#include "libavutil/imgutils.h"
#include "libavutil/pixdesc.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
#define SUB_PIXEL_BITS 8
#define SUB_PIXELS (1 << SUB_PIXEL_BITS)
#define COEFF_BITS 11
 
#define LINEAR 0
#define CUBIC 1
 
typedef struct PerspectiveContext {
const AVClass *class;
char *expr_str[4][2];
double ref[4][2];
int32_t (*pv)[2];
int32_t coeff[SUB_PIXELS][4];
int interpolation;
int linesize[4];
int height[4];
int hsub, vsub;
int nb_planes;
 
void (*perspective)(struct PerspectiveContext *s,
uint8_t *dst, int dst_linesize,
uint8_t *src, int src_linesize,
int w, int h, int hsub, int vsub);
} PerspectiveContext;
 
#define OFFSET(x) offsetof(PerspectiveContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
 
static const AVOption perspective_options[] = {
{ "x0", "set top left x coordinate", OFFSET(expr_str[0][0]), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS },
{ "y0", "set top left y coordinate", OFFSET(expr_str[0][1]), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS },
{ "x1", "set top right x coordinate", OFFSET(expr_str[1][0]), AV_OPT_TYPE_STRING, {.str="W"}, 0, 0, FLAGS },
{ "y1", "set top right y coordinate", OFFSET(expr_str[1][1]), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS },
{ "x2", "set bottom left x coordinate", OFFSET(expr_str[2][0]), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS },
{ "y2", "set bottom left y coordinate", OFFSET(expr_str[2][1]), AV_OPT_TYPE_STRING, {.str="H"}, 0, 0, FLAGS },
{ "x3", "set bottom right x coordinate", OFFSET(expr_str[3][0]), AV_OPT_TYPE_STRING, {.str="W"}, 0, 0, FLAGS },
{ "y3", "set bottom right y coordinate", OFFSET(expr_str[3][1]), AV_OPT_TYPE_STRING, {.str="H"}, 0, 0, FLAGS },
{ "interpolation", "set interpolation", OFFSET(interpolation), AV_OPT_TYPE_INT, {.i64=LINEAR}, 0, 1, FLAGS, "interpolation" },
{ "linear", "", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, "interpolation" },
{ "cubic", "", 0, AV_OPT_TYPE_CONST, {.i64=CUBIC}, 0, 0, FLAGS, "interpolation" },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(perspective);
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ422P,AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ411P,
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static inline double get_coeff(double d)
{
double coeff, A = -0.60;
 
d = fabs(d);
 
if (d < 1.0)
coeff = (1.0 - (A + 3.0) * d * d + (A + 2.0) * d * d * d);
else if (d < 2.0)
coeff = (-4.0 * A + 8.0 * A * d - 5.0 * A * d * d + A * d * d * d);
else
coeff = 0.0;
 
return coeff;
}
 
static const char *const var_names[] = { "W", "H", NULL };
enum { VAR_W, VAR_H, VAR_VARS_NB };
 
static int config_input(AVFilterLink *inlink)
{
double x0, x1, x2, x3, x4, x5, x6, x7, q;
AVFilterContext *ctx = inlink->dst;
PerspectiveContext *s = ctx->priv;
double (*ref)[2] = s->ref;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
double values[VAR_VARS_NB] = { [VAR_W] = inlink->w, [VAR_H] = inlink->h };
int h = inlink->h;
int w = inlink->w;
int x, y, i, j, ret;
 
for (i = 0; i < 4; i++) {
for (j = 0; j < 2; j++) {
if (!s->expr_str[i][j])
return AVERROR(EINVAL);
ret = av_expr_parse_and_eval(&s->ref[i][j], s->expr_str[i][j],
var_names, &values[0],
NULL, NULL, NULL, NULL,
0, 0, ctx);
if (ret < 0)
return ret;
}
}
 
s->hsub = desc->log2_chroma_w;
s->vsub = desc->log2_chroma_h;
s->nb_planes = av_pix_fmt_count_planes(inlink->format);
if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
return ret;
 
s->height[1] = s->height[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
s->height[0] = s->height[3] = inlink->h;
 
s->pv = av_realloc_f(s->pv, w * h, 2 * sizeof(*s->pv));
if (!s->pv)
return AVERROR(ENOMEM);
 
x6 = ((ref[0][0] - ref[1][0] - ref[2][0] + ref[3][0]) *
(ref[2][1] - ref[3][1]) -
( ref[0][1] - ref[1][1] - ref[2][1] + ref[3][1]) *
(ref[2][0] - ref[3][0])) * h;
x7 = ((ref[0][1] - ref[1][1] - ref[2][1] + ref[3][1]) *
(ref[1][0] - ref[3][0]) -
( ref[0][0] - ref[1][0] - ref[2][0] + ref[3][0]) *
(ref[1][1] - ref[3][1])) * w;
q = ( ref[1][0] - ref[3][0]) * (ref[2][1] - ref[3][1]) -
( ref[2][0] - ref[3][0]) * (ref[1][1] - ref[3][1]);
 
x0 = q * (ref[1][0] - ref[0][0]) * h + x6 * ref[1][0];
x1 = q * (ref[2][0] - ref[0][0]) * w + x7 * ref[2][0];
x2 = q * ref[0][0] * w * h;
x3 = q * (ref[1][1] - ref[0][1]) * h + x6 * ref[1][1];
x4 = q * (ref[2][1] - ref[0][1]) * w + x7 * ref[2][1];
x5 = q * ref[0][1] * w * h;
 
for (y = 0; y < h; y++){
for (x = 0; x < w; x++){
int u, v;
 
u = (int)floor(SUB_PIXELS * (x0 * x + x1 * y + x2) /
(x6 * x + x7 * y + q * w * h) + 0.5);
v = (int)floor(SUB_PIXELS * (x3 * x + x4 * y + x5) /
(x6 * x + x7 * y + q * w * h) + 0.5);
 
s->pv[x + y * w][0] = u;
s->pv[x + y * w][1] = v;
}
}
 
for (i = 0; i < SUB_PIXELS; i++){
double d = i / (double)SUB_PIXELS;
double temp[4];
double sum = 0;
 
for (j = 0; j < 4; j++)
temp[j] = get_coeff(j - d - 1);
 
for (j = 0; j < 4; j++)
sum += temp[j];
 
for (j = 0; j < 4; j++)
s->coeff[i][j] = (int)floor((1 << COEFF_BITS) * temp[j] / sum + 0.5);
}
 
return 0;
}
 
static void resample_cubic(PerspectiveContext *s,
uint8_t *dst, int dst_linesize,
uint8_t *src, int src_linesize,
int w, int h, int hsub, int vsub)
{
const int linesize = s->linesize[0];
int x, y;
 
for (y = 0; y < h; y++) {
int sy = y << vsub;
for (x = 0; x < w; x++) {
int u, v, subU, subV, sum, sx;
 
sx = x << hsub;
u = s->pv[sx + sy * linesize][0] >> hsub;
v = s->pv[sx + sy * linesize][1] >> vsub;
subU = u & (SUB_PIXELS - 1);
subV = v & (SUB_PIXELS - 1);
u >>= SUB_PIXEL_BITS;
v >>= SUB_PIXEL_BITS;
 
if (u > 0 && v > 0 && u < w - 2 && v < h - 2){
const int index = u + v*src_linesize;
const int a = s->coeff[subU][0];
const int b = s->coeff[subU][1];
const int c = s->coeff[subU][2];
const int d = s->coeff[subU][3];
 
sum = s->coeff[subV][0] * (a * src[index - 1 - src_linesize] + b * src[index - 0 - src_linesize] +
c * src[index + 1 - src_linesize] + d * src[index + 2 - src_linesize]) +
s->coeff[subV][1] * (a * src[index - 1 ] + b * src[index - 0 ] +
c * src[index + 1 ] + d * src[index + 2 ]) +
s->coeff[subV][2] * (a * src[index - 1 + src_linesize] + b * src[index - 0 + src_linesize] +
c * src[index + 1 + src_linesize] + d * src[index + 2 + src_linesize]) +
s->coeff[subV][3] * (a * src[index - 1 + 2 * src_linesize] + b * src[index - 0 + 2 * src_linesize] +
c * src[index + 1 + 2 * src_linesize] + d * src[index + 2 + 2 * src_linesize]);
} else {
int dx, dy;
 
sum = 0;
 
for (dy = 0; dy < 4; dy++) {
int iy = v + dy - 1;
 
if (iy < 0)
iy = 0;
else if (iy >= h)
iy = h-1;
for (dx = 0; dx < 4; dx++) {
int ix = u + dx - 1;
 
if (ix < 0)
ix = 0;
else if (ix >= w)
ix = w - 1;
 
sum += s->coeff[subU][dx] * s->coeff[subV][dy] * src[ ix + iy * src_linesize];
}
}
}
 
sum = (sum + (1<<(COEFF_BITS * 2 - 1))) >> (COEFF_BITS * 2);
sum = av_clip(sum, 0, 255);
dst[x + y * dst_linesize] = sum;
}
}
}
 
static void resample_linear(PerspectiveContext *s,
uint8_t *dst, int dst_linesize,
uint8_t *src, int src_linesize,
int w, int h, int hsub, int vsub)
{
const int linesize = s->linesize[0];
int x, y;
 
for (y = 0; y < h; y++){
int sy = y << vsub;
for (x = 0; x < w; x++){
int u, v, subU, subV, sum, sx, index, subUI, subVI;
 
sx = x << hsub;
u = s->pv[sx + sy * linesize][0] >> hsub;
v = s->pv[sx + sy * linesize][1] >> vsub;
subU = u & (SUB_PIXELS - 1);
subV = v & (SUB_PIXELS - 1);
u >>= SUB_PIXEL_BITS;
v >>= SUB_PIXEL_BITS;
 
index = u + v * src_linesize;
subUI = SUB_PIXELS - subU;
subVI = SUB_PIXELS - subV;
 
if ((unsigned)u < (unsigned)(w - 1)){
if((unsigned)v < (unsigned)(h - 1)){
sum = subVI * (subUI * src[index] + subU * src[index + 1]) +
subV * (subUI * src[index + src_linesize] + subU * src[index + src_linesize + 1]);
sum = (sum + (1 << (SUB_PIXEL_BITS * 2 - 1)))>> (SUB_PIXEL_BITS * 2);
} else {
if (v < 0)
v = 0;
else
v = h - 1;
index = u + v * src_linesize;
sum = subUI * src[index] + subU * src[index + 1];
sum = (sum + (1 << (SUB_PIXEL_BITS - 1))) >> SUB_PIXEL_BITS;
}
} else {
if (u < 0)
u = 0;
else
u = w - 1;
if ((unsigned)v < (unsigned)(h - 1)){
index = u + v * src_linesize;
sum = subVI * src[index] + subV * src[index + src_linesize];
sum = (sum + (1 << (SUB_PIXEL_BITS - 1))) >> SUB_PIXEL_BITS;
} else {
if (v < 0)
v = 0;
else
v = h - 1;
index = u + v * src_linesize;
sum = src[index];
}
}
 
sum = av_clip(sum, 0, 255);
dst[x + y * dst_linesize] = sum;
}
}
}
 
static av_cold int init(AVFilterContext *ctx)
{
PerspectiveContext *s = ctx->priv;
 
switch (s->interpolation) {
case LINEAR: s->perspective = resample_linear; break;
case CUBIC: s->perspective = resample_cubic; break;
}
 
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
PerspectiveContext *s = ctx->priv;
AVFrame *out;
int plane;
 
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&frame);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, frame);
 
for (plane = 0; plane < s->nb_planes; plane++) {
int hsub = plane == 1 || plane == 2 ? s->hsub : 0;
int vsub = plane == 1 || plane == 2 ? s->vsub : 0;
s->perspective(s, out->data[plane], out->linesize[plane],
frame->data[plane], frame->linesize[plane],
s->linesize[plane], s->height[plane], hsub, vsub);
}
 
av_frame_free(&frame);
return ff_filter_frame(outlink, out);
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
PerspectiveContext *s = ctx->priv;
 
av_freep(&s->pv);
}
 
static const AVFilterPad perspective_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
 
static const AVFilterPad perspective_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_perspective = {
.name = "perspective",
.description = NULL_IF_CONFIG_SMALL("Correct the perspective of video."),
.priv_size = sizeof(PerspectiveContext),
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = perspective_inputs,
.outputs = perspective_outputs,
.priv_class = &perspective_class,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_phase.c
0,0 → 1,320
/*
* Copyright (c) 2004 Ville Saari
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avassert.h"
#include "libavutil/imgutils.h"
#include "libavutil/pixdesc.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
enum PhaseMode {
PROGRESSIVE,
TOP_FIRST,
BOTTOM_FIRST,
TOP_FIRST_ANALYZE,
BOTTOM_FIRST_ANALYZE,
ANALYZE,
FULL_ANALYZE,
AUTO,
AUTO_ANALYZE
};
 
typedef struct PhaseContext {
const AVClass *class;
enum PhaseMode mode;
AVFrame *frame;
int nb_planes;
int planeheight[4];
int linesize[4];
} PhaseContext;
 
#define OFFSET(x) offsetof(PhaseContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
#define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, 0, 0, FLAGS, unit }
 
static const AVOption phase_options[] = {
{ "mode", "set phase mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=AUTO_ANALYZE}, PROGRESSIVE, AUTO_ANALYZE, FLAGS, "mode" },
CONST("p", "progressive", PROGRESSIVE, "mode"),
CONST("t", "top first", TOP_FIRST, "mode"),
CONST("b", "bottom first", BOTTOM_FIRST, "mode"),
CONST("T", "top first analyze", TOP_FIRST_ANALYZE, "mode"),
CONST("B", "bottom first analyze", BOTTOM_FIRST_ANALYZE, "mode"),
CONST("u", "analyze", ANALYZE, "mode"),
CONST("U", "full analyze", FULL_ANALYZE, "mode"),
CONST("a", "auto", AUTO, "mode"),
CONST("A", "auto analyze", AUTO_ANALYZE, "mode"),
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(phase);
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ422P,AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ411P,
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static int config_input(AVFilterLink *inlink)
{
PhaseContext *s = inlink->dst->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
int ret;
 
if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
return ret;
 
s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
s->planeheight[0] = s->planeheight[3] = inlink->h;
 
s->nb_planes = av_pix_fmt_count_planes(inlink->format);
 
return 0;
}
 
/*
* This macro interpolates the value of both fields at a point halfway
* between lines and takes the squared difference. In field resolution
* the point is a quarter pixel below a line in one field and a quarter
* pixel above a line in other.
*
* (The result is actually multiplied by 25)
*/
#define DIFF(a, as, b, bs) (t = ((*a - b[bs]) << 2) + a[as << 1] - b[-bs], t * t)
 
/*
* Find which field combination has the smallest average squared difference
* between the fields.
*/
static enum PhaseMode analyze_plane(AVFilterContext *ctx, PhaseContext *s,
AVFrame *old, AVFrame *new)
{
double bdiff, tdiff, pdiff, scale;
const int ns = new->linesize[0];
const int os = old->linesize[0];
uint8_t *nptr = new->data[0];
uint8_t *optr = old->data[0];
const int h = new->height;
const int w = new->width;
int bdif, tdif, pdif;
enum PhaseMode mode = s->mode;
uint8_t *end, *rend;
int top, t;
 
if (mode == AUTO) {
mode = new->interlaced_frame ? new->top_field_first ?
TOP_FIRST : BOTTOM_FIRST : PROGRESSIVE;
} else if (mode == AUTO_ANALYZE) {
mode = new->interlaced_frame ? new->top_field_first ?
TOP_FIRST_ANALYZE : BOTTOM_FIRST_ANALYZE : FULL_ANALYZE;
}
 
if (mode <= BOTTOM_FIRST) {
bdiff = pdiff = tdiff = 65536.0;
} else {
bdiff = pdiff = tdiff = 0.0;
 
for (end = nptr + (h - 2) * ns, nptr += ns, optr += os, top = 0;
nptr < end; nptr += ns - w, optr += os - w, top ^= 1) {
pdif = tdif = bdif = 0;
 
switch (mode) {
case TOP_FIRST_ANALYZE:
if (top) {
for (rend = nptr + w; nptr < rend; nptr++, optr++) {
pdif += DIFF(nptr, ns, nptr, ns);
tdif += DIFF(nptr, ns, optr, os);
}
} else {
for (rend = nptr + w; nptr < rend; nptr++, optr++) {
pdif += DIFF(nptr, ns, nptr, ns);
tdif += DIFF(optr, os, nptr, ns);
}
}
break;
case BOTTOM_FIRST_ANALYZE:
if (top) {
for (rend = nptr + w; nptr < rend; nptr++, optr++) {
pdif += DIFF(nptr, ns, nptr, ns);
bdif += DIFF(optr, os, nptr, ns);
}
} else {
for (rend = nptr + w; nptr < rend; nptr++, optr++) {
pdif += DIFF(nptr, ns, nptr, ns);
bdif += DIFF(nptr, ns, optr, os);
}
}
break;
case ANALYZE:
if (top) {
for (rend = nptr + w; nptr < rend; nptr++, optr++) {
tdif += DIFF(nptr, ns, optr, os);
bdif += DIFF(optr, os, nptr, ns);
}
} else {
for (rend = nptr + w; nptr < rend; nptr++, optr++) {
bdif += DIFF(nptr, ns, optr, os);
tdif += DIFF(optr, os, nptr, ns);
}
}
break;
case FULL_ANALYZE:
if (top) {
for (rend = nptr + w; nptr < rend; nptr++, optr++) {
pdif += DIFF(nptr, ns, nptr, ns);
tdif += DIFF(nptr, ns, optr, os);
bdif += DIFF(optr, os, nptr, ns);
}
} else {
for (rend = nptr + w; nptr < rend; nptr++, optr++) {
pdif += DIFF(nptr, ns, nptr, ns);
bdif += DIFF(nptr, ns, optr, os);
tdif += DIFF(optr, os, nptr, ns);
}
}
break;
default:
av_assert0(0);
}
 
pdiff += (double)pdif;
tdiff += (double)tdif;
bdiff += (double)bdif;
}
 
scale = 1.0 / (w * (h - 3)) / 25.0;
pdiff *= scale;
tdiff *= scale;
bdiff *= scale;
 
if (mode == TOP_FIRST_ANALYZE) {
bdiff = 65536.0;
} else if (mode == BOTTOM_FIRST_ANALYZE) {
tdiff = 65536.0;
} else if (mode == ANALYZE) {
pdiff = 65536.0;
}
 
if (bdiff < pdiff && bdiff < tdiff) {
mode = BOTTOM_FIRST;
} else if (tdiff < pdiff && tdiff < bdiff) {
mode = TOP_FIRST;
} else {
mode = PROGRESSIVE;
}
}
 
av_log(ctx, AV_LOG_DEBUG, "mode=%c tdiff=%f bdiff=%f pdiff=%f\n",
mode == BOTTOM_FIRST ? 'b' : mode == TOP_FIRST ? 't' : 'p',
tdiff, bdiff, pdiff);
return mode;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
PhaseContext *s = ctx->priv;
enum PhaseMode mode;
int plane, top, y;
AVFrame *out;
 
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
 
if (!s->frame) {
mode = PROGRESSIVE;
s->frame = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!s->frame) {
av_frame_free(&in);
av_frame_free(&out);
return AVERROR(ENOMEM);
}
} else {
mode = analyze_plane(ctx, s, s->frame, in);
}
 
for (plane = 0; plane < s->nb_planes; plane++) {
uint8_t *buf = s->frame->data[plane];
uint8_t *from = in->data[plane];
uint8_t *to = out->data[plane];
 
for (y = 0, top = 1; y < s->planeheight[plane]; y++, top ^= 1) {
memcpy(to, mode == (top ? BOTTOM_FIRST : TOP_FIRST) ? buf : from, s->linesize[plane]);
memcpy(buf, from, s->linesize[plane]);
 
buf += s->frame->linesize[plane];
from += in->linesize[plane];
to += out->linesize[plane];
}
}
 
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
PhaseContext *s = ctx->priv;
 
av_frame_free(&s->frame);
}
 
static const AVFilterPad phase_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
 
static const AVFilterPad phase_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_phase = {
.name = "phase",
.description = NULL_IF_CONFIG_SMALL("Phase shift fields."),
.priv_size = sizeof(PhaseContext),
.priv_class = &phase_class,
.uninit = uninit,
.query_formats = query_formats,
.inputs = phase_inputs,
.outputs = phase_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_pixdesctest.c
0,0 → 1,135
/*
* Copyright (c) 2009 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* pixdesc test filter
*/
 
#include "libavutil/common.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "internal.h"
#include "video.h"
 
typedef struct {
const AVPixFmtDescriptor *pix_desc;
uint16_t *line;
} PixdescTestContext;
 
static av_cold void uninit(AVFilterContext *ctx)
{
PixdescTestContext *priv = ctx->priv;
av_freep(&priv->line);
}
 
static int config_props(AVFilterLink *inlink)
{
PixdescTestContext *priv = inlink->dst->priv;
 
priv->pix_desc = av_pix_fmt_desc_get(inlink->format);
 
av_freep(&priv->line);
if (!(priv->line = av_malloc(sizeof(*priv->line) * inlink->w)))
return AVERROR(ENOMEM);
 
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
PixdescTestContext *priv = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *out;
int i, c, w = inlink->w, h = inlink->h;
const int cw = FF_CEIL_RSHIFT(w, priv->pix_desc->log2_chroma_w);
const int ch = FF_CEIL_RSHIFT(h, priv->pix_desc->log2_chroma_h);
 
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
 
av_frame_copy_props(out, in);
 
for (i = 0; i < 4; i++) {
const int h1 = i == 1 || i == 2 ? ch : h;
if (out->data[i]) {
uint8_t *data = out->data[i] +
(out->linesize[i] > 0 ? 0 : out->linesize[i] * (h1-1));
memset(data, 0, FFABS(out->linesize[i]) * h1);
}
}
 
/* copy palette */
if (priv->pix_desc->flags & AV_PIX_FMT_FLAG_PAL ||
priv->pix_desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL)
memcpy(out->data[1], in->data[1], AVPALETTE_SIZE);
 
for (c = 0; c < priv->pix_desc->nb_components; c++) {
const int w1 = c == 1 || c == 2 ? cw : w;
const int h1 = c == 1 || c == 2 ? ch : h;
 
for (i = 0; i < h1; i++) {
av_read_image_line(priv->line,
(void*)in->data,
in->linesize,
priv->pix_desc,
0, i, c, w1, 0);
 
av_write_image_line(priv->line,
out->data,
out->linesize,
priv->pix_desc,
0, i, c, w1);
}
}
 
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
 
static const AVFilterPad avfilter_vf_pixdesctest_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_props,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_pixdesctest_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_pixdesctest = {
.name = "pixdesctest",
.description = NULL_IF_CONFIG_SMALL("Test pixel format definitions."),
.priv_size = sizeof(PixdescTestContext),
.uninit = uninit,
.inputs = avfilter_vf_pixdesctest_inputs,
.outputs = avfilter_vf_pixdesctest_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_pp.c
0,0 → 1,186
/*
* Copyright (c) 2002 A'rpi
* Copyright (C) 2012 Clément Bœsch
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
/**
* @file
* libpostproc filter, ported from MPlayer.
*/
 
#include "libavutil/avassert.h"
#include "libavutil/opt.h"
#include "internal.h"
 
#include "libpostproc/postprocess.h"
 
typedef struct {
const AVClass *class;
char *subfilters;
int mode_id;
pp_mode *modes[PP_QUALITY_MAX + 1];
void *pp_ctx;
} PPFilterContext;
 
#define OFFSET(x) offsetof(PPFilterContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
static const AVOption pp_options[] = {
{ "subfilters", "set postprocess subfilters", OFFSET(subfilters), AV_OPT_TYPE_STRING, {.str="de"}, .flags = FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(pp);
 
static av_cold int pp_init(AVFilterContext *ctx)
{
int i;
PPFilterContext *pp = ctx->priv;
 
for (i = 0; i <= PP_QUALITY_MAX; i++) {
pp->modes[i] = pp_get_mode_by_name_and_quality(pp->subfilters, i);
if (!pp->modes[i])
return AVERROR_EXTERNAL;
}
pp->mode_id = PP_QUALITY_MAX;
return 0;
}
 
static int pp_process_command(AVFilterContext *ctx, const char *cmd, const char *args,
char *res, int res_len, int flags)
{
PPFilterContext *pp = ctx->priv;
 
if (!strcmp(cmd, "quality")) {
pp->mode_id = av_clip(strtol(args, NULL, 10), 0, PP_QUALITY_MAX);
return 0;
}
return AVERROR(ENOSYS);
}
 
static int pp_query_formats(AVFilterContext *ctx)
{
static const enum PixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVJ420P,
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVJ422P,
AV_PIX_FMT_YUV411P,
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P,
AV_PIX_FMT_NONE
};
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static int pp_config_props(AVFilterLink *inlink)
{
int flags = PP_CPU_CAPS_AUTO;
PPFilterContext *pp = inlink->dst->priv;
 
switch (inlink->format) {
case AV_PIX_FMT_YUVJ420P:
case AV_PIX_FMT_YUV420P: flags |= PP_FORMAT_420; break;
case AV_PIX_FMT_YUVJ422P:
case AV_PIX_FMT_YUV422P: flags |= PP_FORMAT_422; break;
case AV_PIX_FMT_YUV411P: flags |= PP_FORMAT_411; break;
case AV_PIX_FMT_YUVJ444P:
case AV_PIX_FMT_YUV444P: flags |= PP_FORMAT_444; break;
default: av_assert0(0);
}
 
pp->pp_ctx = pp_get_context(inlink->w, inlink->h, flags);
if (!pp->pp_ctx)
return AVERROR(ENOMEM);
return 0;
}
 
static int pp_filter_frame(AVFilterLink *inlink, AVFrame *inbuf)
{
AVFilterContext *ctx = inlink->dst;
PPFilterContext *pp = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
const int aligned_w = FFALIGN(outlink->w, 8);
const int aligned_h = FFALIGN(outlink->h, 8);
AVFrame *outbuf;
int qstride, qp_type;
int8_t *qp_table ;
 
outbuf = ff_get_video_buffer(outlink, aligned_w, aligned_h);
if (!outbuf) {
av_frame_free(&inbuf);
return AVERROR(ENOMEM);
}
av_frame_copy_props(outbuf, inbuf);
outbuf->width = inbuf->width;
outbuf->height = inbuf->height;
qp_table = av_frame_get_qp_table(inbuf, &qstride, &qp_type);
 
pp_postprocess((const uint8_t **)inbuf->data, inbuf->linesize,
outbuf->data, outbuf->linesize,
aligned_w, outlink->h,
qp_table,
qstride,
pp->modes[pp->mode_id],
pp->pp_ctx,
outbuf->pict_type | (qp_type ? PP_PICT_TYPE_QP2 : 0));
 
av_frame_free(&inbuf);
return ff_filter_frame(outlink, outbuf);
}
 
static av_cold void pp_uninit(AVFilterContext *ctx)
{
int i;
PPFilterContext *pp = ctx->priv;
 
for (i = 0; i <= PP_QUALITY_MAX; i++)
pp_free_mode(pp->modes[i]);
if (pp->pp_ctx)
pp_free_context(pp->pp_ctx);
}
 
static const AVFilterPad pp_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = pp_config_props,
.filter_frame = pp_filter_frame,
},
{ NULL }
};
 
static const AVFilterPad pp_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_pp = {
.name = "pp",
.description = NULL_IF_CONFIG_SMALL("Filter video using libpostproc."),
.priv_size = sizeof(PPFilterContext),
.init = pp_init,
.uninit = pp_uninit,
.query_formats = pp_query_formats,
.inputs = pp_inputs,
.outputs = pp_outputs,
.process_command = pp_process_command,
.priv_class = &pp_class,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_psnr.c
0,0 → 1,386
/*
* Copyright (c) 2011 Roger Pau Monné <roger.pau@entel.upc.edu>
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2013 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Caculate the PSNR between two input videos.
*/
 
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "dualinput.h"
#include "drawutils.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
typedef struct PSNRContext {
const AVClass *class;
FFDualInputContext dinput;
double mse, min_mse, max_mse;
uint64_t nb_frames;
FILE *stats_file;
char *stats_file_str;
int max[4], average_max;
int is_rgb;
uint8_t rgba_map[4];
char comps[4];
int nb_components;
int planewidth[4];
int planeheight[4];
 
void (*compute_mse)(struct PSNRContext *s,
const uint8_t *m[4], const int ml[4],
const uint8_t *r[4], const int rl[4],
int w, int h, double mse[4]);
} PSNRContext;
 
#define OFFSET(x) offsetof(PSNRContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
 
static const AVOption psnr_options[] = {
{"stats_file", "Set file where to store per-frame difference information", OFFSET(stats_file_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
{"f", "Set file where to store per-frame difference information", OFFSET(stats_file_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(psnr);
 
static inline unsigned pow2(unsigned base)
{
return base*base;
}
 
static inline double get_psnr(double mse, uint64_t nb_frames, int max)
{
return 10.0 * log(pow2(max) / (mse / nb_frames)) / log(10.0);
}
 
static inline
void compute_images_mse(PSNRContext *s,
const uint8_t *main_data[4], const int main_linesizes[4],
const uint8_t *ref_data[4], const int ref_linesizes[4],
int w, int h, double mse[4])
{
int i, c, j;
 
for (c = 0; c < s->nb_components; c++) {
const int outw = s->planewidth[c];
const int outh = s->planeheight[c];
const uint8_t *main_line = main_data[c];
const uint8_t *ref_line = ref_data[c];
const int ref_linesize = ref_linesizes[c];
const int main_linesize = main_linesizes[c];
uint64_t m = 0;
 
for (i = 0; i < outh; i++) {
int m2 = 0;
for (j = 0; j < outw; j++)
m2 += pow2(main_line[j] - ref_line[j]);
m += m2;
ref_line += ref_linesize;
main_line += main_linesize;
}
mse[c] = m / (double)(outw * outh);
}
}
 
static inline
void compute_images_mse_16bit(PSNRContext *s,
const uint8_t *main_data[4], const int main_linesizes[4],
const uint8_t *ref_data[4], const int ref_linesizes[4],
int w, int h, double mse[4])
{
int i, c, j;
 
for (c = 0; c < s->nb_components; c++) {
const int outw = s->planewidth[c];
const int outh = s->planeheight[c];
const uint16_t *main_line = (uint16_t *)main_data[c];
const uint16_t *ref_line = (uint16_t *)ref_data[c];
const int ref_linesize = ref_linesizes[c] / 2;
const int main_linesize = main_linesizes[c] / 2;
uint64_t m = 0;
 
for (i = 0; i < outh; i++) {
for (j = 0; j < outw; j++)
m += pow2(main_line[j] - ref_line[j]);
ref_line += ref_linesize;
main_line += main_linesize;
}
mse[c] = m / (double)(outw * outh);
}
}
 
static void set_meta(AVDictionary **metadata, const char *key, char comp, float d)
{
char value[128];
snprintf(value, sizeof(value), "%0.2f", d);
if (comp) {
char key2[128];
snprintf(key2, sizeof(key2), "%s%c", key, comp);
av_dict_set(metadata, key2, value, 0);
} else {
av_dict_set(metadata, key, value, 0);
}
}
 
static AVFrame *do_psnr(AVFilterContext *ctx, AVFrame *main,
const AVFrame *ref)
{
PSNRContext *s = ctx->priv;
double comp_mse[4], mse = 0;
int j, c;
AVDictionary **metadata = avpriv_frame_get_metadatap(main);
 
s->compute_mse(s, (const uint8_t **)main->data, main->linesize,
(const uint8_t **)ref->data, ref->linesize,
main->width, main->height, comp_mse);
 
for (j = 0; j < s->nb_components; j++)
mse += comp_mse[j];
mse /= s->nb_components;
 
s->min_mse = FFMIN(s->min_mse, mse);
s->max_mse = FFMAX(s->max_mse, mse);
 
s->mse += mse;
s->nb_frames++;
 
for (j = 0; j < s->nb_components; j++) {
c = s->is_rgb ? s->rgba_map[j] : j;
set_meta(metadata, "lavfi.psnr.mse.", s->comps[j], comp_mse[c]);
set_meta(metadata, "lavfi.psnr.mse_avg", 0, mse);
set_meta(metadata, "lavfi.psnr.psnr.", s->comps[j], get_psnr(comp_mse[c], 1, s->max[c]));
set_meta(metadata, "lavfi.psnr.psnr_avg", 0, get_psnr(mse, 1, s->average_max));
}
 
if (s->stats_file) {
fprintf(s->stats_file, "n:%"PRId64" mse_avg:%0.2f ", s->nb_frames, mse);
for (j = 0; j < s->nb_components; j++) {
c = s->is_rgb ? s->rgba_map[j] : j;
fprintf(s->stats_file, "mse_%c:%0.2f ", s->comps[j], comp_mse[c]);
}
for (j = 0; j < s->nb_components; j++) {
c = s->is_rgb ? s->rgba_map[j] : j;
fprintf(s->stats_file, "psnr_%c:%0.2f ", s->comps[j],
get_psnr(comp_mse[c], 1, s->max[c]));
}
fprintf(s->stats_file, "\n");
}
 
return main;
}
 
static av_cold int init(AVFilterContext *ctx)
{
PSNRContext *s = ctx->priv;
 
s->min_mse = +INFINITY;
s->max_mse = -INFINITY;
 
if (s->stats_file_str) {
s->stats_file = fopen(s->stats_file_str, "w");
if (!s->stats_file) {
int err = AVERROR(errno);
char buf[128];
av_strerror(err, buf, sizeof(buf));
av_log(ctx, AV_LOG_ERROR, "Could not open stats file %s: %s\n",
s->stats_file_str, buf);
return err;
}
}
 
s->dinput.process = do_psnr;
return 0;
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum PixelFormat pix_fmts[] = {
AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY16,
#define PF_NOALPHA(suf) AV_PIX_FMT_YUV420##suf, AV_PIX_FMT_YUV422##suf, AV_PIX_FMT_YUV444##suf
#define PF_ALPHA(suf) AV_PIX_FMT_YUVA420##suf, AV_PIX_FMT_YUVA422##suf, AV_PIX_FMT_YUVA444##suf
#define PF(suf) PF_NOALPHA(suf), PF_ALPHA(suf)
PF(P), PF(P9), PF(P10), PF_NOALPHA(P12), PF_NOALPHA(P14), PF(P16),
AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ444P,
AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP16,
AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static int config_input_ref(AVFilterLink *inlink)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
AVFilterContext *ctx = inlink->dst;
PSNRContext *s = ctx->priv;
int j;
 
s->nb_components = desc->nb_components;
if (ctx->inputs[0]->w != ctx->inputs[1]->w ||
ctx->inputs[0]->h != ctx->inputs[1]->h) {
av_log(ctx, AV_LOG_ERROR, "Width and heigth of input videos must be same.\n");
return AVERROR(EINVAL);
}
if (ctx->inputs[0]->format != ctx->inputs[1]->format) {
av_log(ctx, AV_LOG_ERROR, "Inputs must be of same pixel format.\n");
return AVERROR(EINVAL);
}
 
switch (inlink->format) {
case AV_PIX_FMT_GRAY8:
case AV_PIX_FMT_GRAY16:
case AV_PIX_FMT_GBRP:
case AV_PIX_FMT_GBRP9:
case AV_PIX_FMT_GBRP10:
case AV_PIX_FMT_GBRP12:
case AV_PIX_FMT_GBRP14:
case AV_PIX_FMT_GBRP16:
case AV_PIX_FMT_GBRAP:
case AV_PIX_FMT_GBRAP16:
case AV_PIX_FMT_YUVJ411P:
case AV_PIX_FMT_YUVJ420P:
case AV_PIX_FMT_YUVJ422P:
case AV_PIX_FMT_YUVJ440P:
case AV_PIX_FMT_YUVJ444P:
s->max[0] = (1 << (desc->comp[0].depth_minus1 + 1)) - 1;
s->max[1] = (1 << (desc->comp[1].depth_minus1 + 1)) - 1;
s->max[2] = (1 << (desc->comp[2].depth_minus1 + 1)) - 1;
s->max[3] = (1 << (desc->comp[3].depth_minus1 + 1)) - 1;
break;
default:
s->max[0] = 235 * (1 << (desc->comp[0].depth_minus1 - 7));
s->max[1] = 240 * (1 << (desc->comp[1].depth_minus1 - 7));
s->max[2] = 240 * (1 << (desc->comp[2].depth_minus1 - 7));
s->max[3] = (1 << (desc->comp[3].depth_minus1 + 1)) - 1;
}
 
s->is_rgb = ff_fill_rgba_map(s->rgba_map, inlink->format) >= 0;
s->comps[0] = s->is_rgb ? 'r' : 'y' ;
s->comps[1] = s->is_rgb ? 'g' : 'u' ;
s->comps[2] = s->is_rgb ? 'b' : 'v' ;
s->comps[3] = 'a';
 
for (j = 0; j < s->nb_components; j++)
s->average_max += s->max[j];
s->average_max /= s->nb_components;
 
s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
s->planeheight[0] = s->planeheight[3] = inlink->h;
s->planewidth[1] = s->planewidth[2] = FF_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
s->planewidth[0] = s->planewidth[3] = inlink->w;
 
s->compute_mse = desc->comp[0].depth_minus1 > 7 ? compute_images_mse_16bit : compute_images_mse;
 
return 0;
}
 
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
PSNRContext *s = ctx->priv;
AVFilterLink *mainlink = ctx->inputs[0];
int ret;
 
outlink->w = mainlink->w;
outlink->h = mainlink->h;
outlink->time_base = mainlink->time_base;
outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
outlink->frame_rate = mainlink->frame_rate;
if ((ret = ff_dualinput_init(ctx, &s->dinput)) < 0)
return ret;
 
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
{
PSNRContext *s = inlink->dst->priv;
return ff_dualinput_filter_frame(&s->dinput, inlink, inpicref);
}
 
static int request_frame(AVFilterLink *outlink)
{
PSNRContext *s = outlink->src->priv;
return ff_dualinput_request_frame(&s->dinput, outlink);
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
PSNRContext *s = ctx->priv;
 
if (s->nb_frames > 0) {
av_log(ctx, AV_LOG_INFO, "PSNR average:%0.2f min:%0.2f max:%0.2f\n",
get_psnr(s->mse, s->nb_frames, s->average_max),
get_psnr(s->max_mse, 1, s->average_max),
get_psnr(s->min_mse, 1, s->average_max));
}
 
ff_dualinput_uninit(&s->dinput);
 
if (s->stats_file)
fclose(s->stats_file);
}
 
static const AVFilterPad psnr_inputs[] = {
{
.name = "main",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},{
.name = "reference",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_input_ref,
},
{ NULL }
};
 
static const AVFilterPad psnr_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_output,
.request_frame = request_frame,
},
{ NULL }
};
 
AVFilter avfilter_vf_psnr = {
.name = "psnr",
.description = NULL_IF_CONFIG_SMALL("Calculate the PSNR between two video streams."),
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.priv_size = sizeof(PSNRContext),
.priv_class = &psnr_class,
.inputs = psnr_inputs,
.outputs = psnr_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_pullup.c
0,0 → 1,768
/*
* Copyright (c) 2003 Rich Felker
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avassert.h"
#include "libavutil/imgutils.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
#include "vf_pullup.h"
 
#define F_HAVE_BREAKS 1
#define F_HAVE_AFFINITY 2
 
#define BREAK_LEFT 1
#define BREAK_RIGHT 2
 
#define OFFSET(x) offsetof(PullupContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption pullup_options[] = {
{ "jl", "set left junk size", OFFSET(junk_left), AV_OPT_TYPE_INT, {.i64=1}, 0, INT_MAX, FLAGS },
{ "jr", "set right junk size", OFFSET(junk_right), AV_OPT_TYPE_INT, {.i64=1}, 0, INT_MAX, FLAGS },
{ "jt", "set top junk size", OFFSET(junk_top), AV_OPT_TYPE_INT, {.i64=4}, 1, INT_MAX, FLAGS },
{ "jb", "set bottom junk size", OFFSET(junk_bottom), AV_OPT_TYPE_INT, {.i64=4}, 1, INT_MAX, FLAGS },
{ "sb", "set strict breaks", OFFSET(strict_breaks), AV_OPT_TYPE_INT, {.i64=0},-1, 1, FLAGS },
{ "mp", "set metric plane", OFFSET(metric_plane), AV_OPT_TYPE_INT, {.i64=0}, 0, 2, FLAGS, "mp" },
{ "y", "luma", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "mp" },
{ "u", "chroma blue", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "mp" },
{ "v", "chroma red", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "mp" },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(pullup);
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_GRAY8,
AV_PIX_FMT_NONE
};
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
#define ABS(a) (((a) ^ ((a) >> 31)) - ((a) >> 31))
 
static int diff_c(const uint8_t *a, const uint8_t *b, int s)
{
int i, j, diff = 0;
 
for (i = 0; i < 4; i++) {
for (j = 0; j < 8; j++)
diff += ABS(a[j] - b[j]);
a += s;
b += s;
}
 
return diff;
}
 
static int comb_c(const uint8_t *a, const uint8_t *b, int s)
{
int i, j, comb = 0;
 
for (i = 0; i < 4; i++) {
for (j = 0; j < 8; j++)
comb += ABS((a[j] << 1) - b[j - s] - b[j ]) +
ABS((b[j] << 1) - a[j ] - a[j + s]);
a += s;
b += s;
}
 
return comb;
}
 
static int var_c(const uint8_t *a, const uint8_t *b, int s)
{
int i, j, var = 0;
 
for (i = 0; i < 3; i++) {
for (j = 0; j < 8; j++)
var += ABS(a[j] - a[j + s]);
a += s;
}
 
return 4 * var; /* match comb scaling */
}
 
static int alloc_metrics(PullupContext *s, PullupField *f)
{
f->diffs = av_calloc(FFALIGN(s->metric_length, 16), sizeof(*f->diffs));
f->combs = av_calloc(FFALIGN(s->metric_length, 16), sizeof(*f->combs));
f->vars = av_calloc(FFALIGN(s->metric_length, 16), sizeof(*f->vars));
 
if (!f->diffs || !f->combs || !f->vars) {
av_freep(&f->diffs);
av_freep(&f->combs);
av_freep(&f->vars);
return AVERROR(ENOMEM);
}
return 0;
}
 
static PullupField *make_field_queue(PullupContext *s, int len)
{
PullupField *head, *f;
 
f = head = av_mallocz(sizeof(*head));
if (!f)
return NULL;
 
if (alloc_metrics(s, f) < 0) {
av_free(f);
return NULL;
}
 
for (; len > 0; len--) {
f->next = av_mallocz(sizeof(*f->next));
if (!f->next)
return NULL;
 
f->next->prev = f;
f = f->next;
if (alloc_metrics(s, f) < 0)
return NULL;
}
 
f->next = head;
head->prev = f;
 
return head;
}
 
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
PullupContext *s = ctx->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
int mp = s->metric_plane;
 
s->nb_planes = av_pix_fmt_count_planes(inlink->format);
 
if (mp + 1 > s->nb_planes) {
av_log(ctx, AV_LOG_ERROR, "input format does not have such plane\n");
return AVERROR(EINVAL);
}
 
s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
s->planeheight[0] = s->planeheight[3] = inlink->h;
s->planewidth[1] = s->planewidth[2] = FF_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
s->planewidth[0] = s->planewidth[3] = inlink->w;
 
s->metric_w = (s->planewidth[mp] - ((s->junk_left + s->junk_right) << 3)) >> 3;
s->metric_h = (s->planeheight[mp] - ((s->junk_top + s->junk_bottom) << 1)) >> 3;
s->metric_offset = (s->junk_left << 3) + (s->junk_top << 1) * s->planewidth[mp];
s->metric_length = s->metric_w * s->metric_h;
 
av_log(ctx, AV_LOG_DEBUG, "w: %d h: %d\n", s->metric_w, s->metric_h);
av_log(ctx, AV_LOG_DEBUG, "offset: %d length: %d\n", s->metric_offset, s->metric_length);
 
s->head = make_field_queue(s, 8);
if (!s->head)
return AVERROR(ENOMEM);
 
s->diff = diff_c;
s->comb = comb_c;
s->var = var_c;
 
if (ARCH_X86)
ff_pullup_init_x86(s);
return 0;
}
 
static int config_output(AVFilterLink *outlink)
{
outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
return 0;
}
 
static PullupBuffer *pullup_lock_buffer(PullupBuffer *b, int parity)
{
if (!b)
return NULL;
 
if ((parity + 1) & 1)
b->lock[0]++;
if ((parity + 1) & 2)
b->lock[1]++;
 
return b;
}
 
static void pullup_release_buffer(PullupBuffer *b, int parity)
{
if (!b)
return;
 
if ((parity + 1) & 1)
b->lock[0]--;
if ((parity + 1) & 2)
b->lock[1]--;
}
 
static int alloc_buffer(PullupContext *s, PullupBuffer *b)
{
int i;
 
if (b->planes[0])
return 0;
for (i = 0; i < s->nb_planes; i++) {
b->planes[i] = av_malloc(s->planeheight[i] * s->planewidth[i]);
}
 
return 0;
}
 
static PullupBuffer *pullup_get_buffer(PullupContext *s, int parity)
{
int i;
 
/* Try first to get the sister buffer for the previous field */
if (parity < 2 && s->last && parity != s->last->parity
&& !s->last->buffer->lock[parity]) {
alloc_buffer(s, s->last->buffer);
return pullup_lock_buffer(s->last->buffer, parity);
}
 
/* Prefer a buffer with both fields open */
for (i = 0; i < FF_ARRAY_ELEMS(s->buffers); i++) {
if (s->buffers[i].lock[0])
continue;
if (s->buffers[i].lock[1])
continue;
alloc_buffer(s, &s->buffers[i]);
return pullup_lock_buffer(&s->buffers[i], parity);
}
 
if (parity == 2)
return 0;
 
/* Search for any half-free buffer */
for (i = 0; i < FF_ARRAY_ELEMS(s->buffers); i++) {
if (((parity + 1) & 1) && s->buffers[i].lock[0])
continue;
if (((parity + 1) & 2) && s->buffers[i].lock[1])
continue;
alloc_buffer(s, &s->buffers[i]);
return pullup_lock_buffer(&s->buffers[i], parity);
}
 
return NULL;
}
 
static int queue_length(PullupField *begin, PullupField *end)
{
PullupField *f;
int count = 1;
 
if (!begin || !end)
return 0;
 
for (f = begin; f != end; f = f->next)
count++;
 
return count;
}
 
static int find_first_break(PullupField *f, int max)
{
int i;
 
for (i = 0; i < max; i++) {
if (f->breaks & BREAK_RIGHT || f->next->breaks & BREAK_LEFT)
return i + 1;
f = f->next;
}
 
return 0;
}
 
static void compute_breaks(PullupContext *s, PullupField *f0)
{
PullupField *f1 = f0->next;
PullupField *f2 = f1->next;
PullupField *f3 = f2->next;
int i, l, max_l = 0, max_r = 0;
 
if (f0->flags & F_HAVE_BREAKS)
return;
 
f0->flags |= F_HAVE_BREAKS;
 
/* Special case when fields are 100% identical */
if (f0->buffer == f2->buffer && f1->buffer != f3->buffer) {
f2->breaks |= BREAK_RIGHT;
return;
}
 
if (f0->buffer != f2->buffer && f1->buffer == f3->buffer) {
f1->breaks |= BREAK_LEFT;
return;
}
 
for (i = 0; i < s->metric_length; i++) {
l = f2->diffs[i] - f3->diffs[i];
 
if ( l > max_l)
max_l = l;
if (-l > max_r)
max_r = -l;
}
 
/* Don't get tripped up when differences are mostly quant error */
if (max_l + max_r < 128)
return;
if (max_l > 4 * max_r)
f1->breaks |= BREAK_LEFT;
if (max_r > 4 * max_l)
f2->breaks |= BREAK_RIGHT;
}
 
static void compute_affinity(PullupContext *s, PullupField *f)
{
int i, max_l = 0, max_r = 0, l;
 
if (f->flags & F_HAVE_AFFINITY)
return;
 
f->flags |= F_HAVE_AFFINITY;
 
if (f->buffer == f->next->next->buffer) {
f->affinity = 1;
f->next->affinity = 0;
f->next->next->affinity = -1;
f->next->flags |= F_HAVE_AFFINITY;
f->next->next->flags |= F_HAVE_AFFINITY;
return;
}
 
for (i = 0; i < s->metric_length; i++) {
int v = f->vars[i];
int lv = f->prev->vars[i];
int rv = f->next->vars[i];
int lc = f->combs[i] - (v + lv) + ABS(v - lv);
int rc = f->next->combs[i] - (v + rv) + ABS(v - rv);
 
lc = FFMAX(lc, 0);
rc = FFMAX(rc, 0);
l = lc - rc;
 
if ( l > max_l)
max_l = l;
if (-l > max_r)
max_r = -l;
}
 
if (max_l + max_r < 64)
return;
 
if (max_r > 6 * max_l)
f->affinity = -1;
else if (max_l > 6 * max_r)
f->affinity = 1;
}
 
static int decide_frame_length(PullupContext *s)
{
PullupField *f0 = s->first;
PullupField *f1 = f0->next;
PullupField *f2 = f1->next;
PullupField *f;
int i, l, n;
 
if (queue_length(s->first, s->last) < 4)
return 0;
 
f = s->first;
n = queue_length(f, s->last);
for (i = 0; i < n - 1; i++) {
if (i < n - 3)
compute_breaks(s, f);
 
compute_affinity(s, f);
 
f = f->next;
}
 
if (f0->affinity == -1)
return 1;
 
l = find_first_break(f0, 3);
 
if (l == 1 && s->strict_breaks < 0)
l = 0;
 
switch (l) {
case 1:
return 1 + (s->strict_breaks < 1 && f0->affinity == 1 && f1->affinity == -1);
case 2:
/* FIXME: strictly speaking, f0->prev is no longer valid... :) */
if (s->strict_pairs
&& (f0->prev->breaks & BREAK_RIGHT) && (f2->breaks & BREAK_LEFT)
&& (f0->affinity != 1 || f1->affinity != -1) )
return 1;
return 1 + (f1->affinity != 1);
case 3:
return 2 + (f2->affinity != 1);
default:
/* 9 possibilities covered before switch */
if (f1->affinity == 1)
return 1; /* covers 6 */
else if (f1->affinity == -1)
return 2; /* covers 6 */
else if (f2->affinity == -1) { /* covers 2 */
return (f0->affinity == 1) ? 3 : 1;
} else {
return 2; /* the remaining 6 */
}
}
}
 
static PullupFrame *pullup_get_frame(PullupContext *s)
{
PullupFrame *fr = &s->frame;
int i, n = decide_frame_length(s);
int aff = s->first->next->affinity;
 
av_assert1(n < FF_ARRAY_ELEMS(fr->ifields));
if (!n || fr->lock)
return NULL;
 
fr->lock++;
fr->length = n;
fr->parity = s->first->parity;
fr->buffer = 0;
 
for (i = 0; i < n; i++) {
/* We cheat and steal the buffer without release+relock */
fr->ifields[i] = s->first->buffer;
s->first->buffer = 0;
s->first = s->first->next;
}
 
if (n == 1) {
fr->ofields[fr->parity ] = fr->ifields[0];
fr->ofields[fr->parity ^ 1] = 0;
} else if (n == 2) {
fr->ofields[fr->parity ] = fr->ifields[0];
fr->ofields[fr->parity ^ 1] = fr->ifields[1];
} else if (n == 3) {
if (!aff)
aff = (fr->ifields[0] == fr->ifields[1]) ? -1 : 1;
fr->ofields[fr->parity ] = fr->ifields[1 + aff];
fr->ofields[fr->parity ^ 1] = fr->ifields[1 ];
}
 
pullup_lock_buffer(fr->ofields[0], 0);
pullup_lock_buffer(fr->ofields[1], 1);
 
if (fr->ofields[0] == fr->ofields[1]) {
fr->buffer = fr->ofields[0];
pullup_lock_buffer(fr->buffer, 2);
return fr;
}
 
return fr;
}
 
static void pullup_release_frame(PullupFrame *f)
{
int i;
 
for (i = 0; i < f->length; i++)
pullup_release_buffer(f->ifields[i], f->parity ^ (i & 1));
 
pullup_release_buffer(f->ofields[0], 0);
pullup_release_buffer(f->ofields[1], 1);
 
if (f->buffer)
pullup_release_buffer(f->buffer, 2);
f->lock--;
}
 
static void compute_metric(PullupContext *s, int *dest,
PullupField *fa, int pa, PullupField *fb, int pb,
int (*func)(const uint8_t *, const uint8_t *, int))
{
int mp = s->metric_plane;
int xstep = 8;
int ystep = s->planewidth[mp] << 3;
int stride = s->planewidth[mp] << 1; /* field stride */
int w = s->metric_w * xstep;
uint8_t *a, *b;
int x, y;
 
if (!fa->buffer || !fb->buffer)
return;
 
/* Shortcut for duplicate fields (e.g. from RFF flag) */
if (fa->buffer == fb->buffer && pa == pb) {
memset(dest, 0, s->metric_length * sizeof(*dest));
return;
}
 
a = fa->buffer->planes[mp] + pa * s->planewidth[mp] + s->metric_offset;
b = fb->buffer->planes[mp] + pb * s->planewidth[mp] + s->metric_offset;
 
for (y = 0; y < s->metric_h; y++) {
for (x = 0; x < w; x += xstep)
*dest++ = func(a + x, b + x, stride);
a += ystep; b += ystep;
}
}
 
static int check_field_queue(PullupContext *s)
{
int ret;
 
if (s->head->next == s->first) {
PullupField *f = av_mallocz(sizeof(*f));
 
if (!f)
return AVERROR(ENOMEM);
 
if ((ret = alloc_metrics(s, f)) < 0) {
av_free(f);
return ret;
}
 
f->prev = s->head;
f->next = s->first;
s->head->next = f;
s->first->prev = f;
}
 
return 0;
}
 
static void pullup_submit_field(PullupContext *s, PullupBuffer *b, int parity)
{
PullupField *f;
 
/* Grow the circular list if needed */
if (check_field_queue(s) < 0)
return;
 
/* Cannot have two fields of same parity in a row; drop the new one */
if (s->last && s->last->parity == parity)
return;
 
f = s->head;
f->parity = parity;
f->buffer = pullup_lock_buffer(b, parity);
f->flags = 0;
f->breaks = 0;
f->affinity = 0;
 
compute_metric(s, f->diffs, f, parity, f->prev->prev, parity, s->diff);
compute_metric(s, f->combs, parity ? f->prev : f, 0, parity ? f : f->prev, 1, s->comb);
compute_metric(s, f->vars, f, parity, f, -1, s->var);
emms_c();
 
/* Advance the circular list */
if (!s->first)
s->first = s->head;
 
s->last = s->head;
s->head = s->head->next;
}
 
static void copy_field(PullupContext *s,
PullupBuffer *dst, PullupBuffer *src, int parity)
{
uint8_t *dd, *ss;
int i;
 
for (i = 0; i < s->nb_planes; i++) {
ss = src->planes[i] + parity * s->planewidth[i];
dd = dst->planes[i] + parity * s->planewidth[i];
 
av_image_copy_plane(dd, s->planewidth[i] << 1,
ss, s->planewidth[i] << 1,
s->planewidth[i], s->planeheight[i] >> 1);
}
}
 
static void pullup_pack_frame(PullupContext *s, PullupFrame *fr)
{
int i;
 
if (fr->buffer)
return;
 
if (fr->length < 2)
return; /* FIXME: deal with this */
 
for (i = 0; i < 2; i++) {
if (fr->ofields[i]->lock[i^1])
continue;
 
fr->buffer = fr->ofields[i];
pullup_lock_buffer(fr->buffer, 2);
copy_field(s, fr->buffer, fr->ofields[i^1], i^1);
return;
}
 
fr->buffer = pullup_get_buffer(s, 2);
 
copy_field(s, fr->buffer, fr->ofields[0], 0);
copy_field(s, fr->buffer, fr->ofields[1], 1);
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
PullupContext *s = ctx->priv;
PullupBuffer *b;
PullupFrame *f;
AVFrame *out;
int p, ret = 0;
 
b = pullup_get_buffer(s, 2);
if (!b) {
av_log(ctx, AV_LOG_WARNING, "Could not get buffer!\n");
f = pullup_get_frame(s);
pullup_release_frame(f);
goto end;
}
 
av_image_copy(b->planes, s->planewidth,
(const uint8_t**)in->data, in->linesize,
inlink->format, inlink->w, inlink->h);
 
p = in->interlaced_frame ? !in->top_field_first : 0;
pullup_submit_field(s, b, p );
pullup_submit_field(s, b, p^1);
 
if (in->repeat_pict)
pullup_submit_field(s, b, p);
 
pullup_release_buffer(b, 2);
 
f = pullup_get_frame(s);
if (!f)
goto end;
 
if (f->length < 2) {
pullup_release_frame(f);
f = pullup_get_frame(s);
if (!f)
goto end;
if (f->length < 2) {
pullup_release_frame(f);
if (!in->repeat_pict)
goto end;
f = pullup_get_frame(s);
if (!f)
goto end;
if (f->length < 2) {
pullup_release_frame(f);
goto end;
}
}
}
 
/* If the frame isn't already exportable... */
if (!f->buffer)
pullup_pack_frame(s, f);
 
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
ret = AVERROR(ENOMEM);
goto end;
}
av_frame_copy_props(out, in);
 
av_image_copy(out->data, out->linesize,
(const uint8_t**)f->buffer->planes, s->planewidth,
inlink->format, inlink->w, inlink->h);
 
ret = ff_filter_frame(outlink, out);
pullup_release_frame(f);
end:
av_frame_free(&in);
return ret;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
PullupContext *s = ctx->priv;
PullupField *f;
int i;
 
f = s->head;
while (f) {
av_free(f->diffs);
av_free(f->combs);
av_free(f->vars);
if (f == s->last) {
av_freep(&s->last);
break;
}
f = f->next;
av_freep(&f->prev);
};
 
for (i = 0; i < FF_ARRAY_ELEMS(s->buffers); i++) {
av_freep(&s->buffers[i].planes[0]);
av_freep(&s->buffers[i].planes[1]);
av_freep(&s->buffers[i].planes[2]);
}
}
 
static const AVFilterPad pullup_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
 
static const AVFilterPad pullup_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_output,
},
{ NULL }
};
 
AVFilter avfilter_vf_pullup = {
.name = "pullup",
.description = NULL_IF_CONFIG_SMALL("Pullup from field sequence to frames."),
.priv_size = sizeof(PullupContext),
.priv_class = &pullup_class,
.uninit = uninit,
.query_formats = query_formats,
.inputs = pullup_inputs,
.outputs = pullup_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_pullup.h
0,0 → 1,71
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFILTER_PULLUP_H
#define AVFILTER_PULLUP_H
 
#include "avfilter.h"
 
typedef struct PullupBuffer {
int lock[2];
uint8_t *planes[4];
} PullupBuffer;
 
typedef struct PullupField {
int parity;
PullupBuffer *buffer;
unsigned flags;
int breaks;
int affinity;
int *diffs;
int *combs;
int *vars;
struct PullupField *prev, *next;
} PullupField;
 
typedef struct PullupFrame {
int lock;
int length;
int parity;
PullupBuffer *ifields[4], *ofields[2];
PullupBuffer *buffer;
} PullupFrame;
 
typedef struct PullupContext {
const AVClass *class;
int junk_left, junk_right, junk_top, junk_bottom;
int metric_plane;
int strict_breaks;
int strict_pairs;
int metric_w, metric_h, metric_length;
int metric_offset;
int nb_planes;
int planewidth[4];
int planeheight[4];
PullupField *first, *last, *head;
PullupBuffer buffers[10];
PullupFrame frame;
 
int (*diff)(const uint8_t *a, const uint8_t *b, int s);
int (*comb)(const uint8_t *a, const uint8_t *b, int s);
int (*var )(const uint8_t *a, const uint8_t *b, int s);
} PullupContext;
 
void ff_pullup_init_x86(PullupContext *s);
 
#endif /* AVFILTER_PULLUP_H */
/contrib/sdk/sources/ffmpeg/libavfilter/vf_removelogo.c
0,0 → 1,581
/*
* Copyright (c) 2005 Robert Edele <yartrebo@earthlink.net>
* Copyright (c) 2012 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Advanced blur-based logo removing filter
*
* This filter loads an image mask file showing where a logo is and
* uses a blur transform to remove the logo.
*
* Based on the libmpcodecs remove-logo filter by Robert Edele.
*/
 
/**
* This code implements a filter to remove annoying TV logos and other annoying
* images placed onto a video stream. It works by filling in the pixels that
* comprise the logo with neighboring pixels. The transform is very loosely
* based on a gaussian blur, but it is different enough to merit its own
* paragraph later on. It is a major improvement on the old delogo filter as it
* both uses a better blurring algorithm and uses a bitmap to use an arbitrary
* and generally much tighter fitting shape than a rectangle.
*
* The logo removal algorithm has two key points. The first is that it
* distinguishes between pixels in the logo and those not in the logo by using
* the passed-in bitmap. Pixels not in the logo are copied over directly without
* being modified and they also serve as source pixels for the logo
* fill-in. Pixels inside the logo have the mask applied.
*
* At init-time the bitmap is reprocessed internally, and the distance to the
* nearest edge of the logo (Manhattan distance), along with a little extra to
* remove rough edges, is stored in each pixel. This is done using an in-place
* erosion algorithm, and incrementing each pixel that survives any given
* erosion. Once every pixel is eroded, the maximum value is recorded, and a
* set of masks from size 0 to this size are generaged. The masks are circular
* binary masks, where each pixel within a radius N (where N is the size of the
* mask) is a 1, and all other pixels are a 0. Although a gaussian mask would be
* more mathematically accurate, a binary mask works better in practice because
* we generally do not use the central pixels in the mask (because they are in
* the logo region), and thus a gaussian mask will cause too little blur and
* thus a very unstable image.
*
* The mask is applied in a special way. Namely, only pixels in the mask that
* line up to pixels outside the logo are used. The dynamic mask size means that
* the mask is just big enough so that the edges touch pixels outside the logo,
* so the blurring is kept to a minimum and at least the first boundary
* condition is met (that the image function itself is continuous), even if the
* second boundary condition (that the derivative of the image function is
* continuous) is not met. A masking algorithm that does preserve the second
* boundary coundition (perhaps something based on a highly-modified bi-cubic
* algorithm) should offer even better results on paper, but the noise in a
* typical TV signal should make anything based on derivatives hopelessly noisy.
*/
 
#include "libavutil/imgutils.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
#include "bbox.h"
#include "lavfutils.h"
#include "lswsutils.h"
 
typedef struct {
const AVClass *class;
char *filename;
/* Stores our collection of masks. The first is for an array of
the second for the y axis, and the third for the x axis. */
int ***mask;
int max_mask_size;
int mask_w, mask_h;
 
uint8_t *full_mask_data;
FFBoundingBox full_mask_bbox;
uint8_t *half_mask_data;
FFBoundingBox half_mask_bbox;
} RemovelogoContext;
 
#define OFFSET(x) offsetof(RemovelogoContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
static const AVOption removelogo_options[] = {
{ "filename", "set bitmap filename", OFFSET(filename), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ "f", "set bitmap filename", OFFSET(filename), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(removelogo);
 
/**
* Choose a slightly larger mask size to improve performance.
*
* This function maps the absolute minimum mask size needed to the
* mask size we'll actually use. f(x) = x (the smallest that will
* work) will produce the sharpest results, but will be quite
* jittery. f(x) = 1.25x (what I'm using) is a good tradeoff in my
* opinion. This will calculate only at init-time, so you can put a
* long expression here without effecting performance.
*/
#define apply_mask_fudge_factor(x) (((x) >> 2) + x)
 
/**
* Pre-process an image to give distance information.
*
* This function takes a bitmap image and converts it in place into a
* distance image. A distance image is zero for pixels outside of the
* logo and is the Manhattan distance (|dx| + |dy|) from the logo edge
* for pixels inside of the logo. This will overestimate the distance,
* but that is safe, and is far easier to implement than a proper
* pythagorean distance since I'm using a modified erosion algorithm
* to compute the distances.
*
* @param mask image which will be converted from a greyscale image
* into a distance image.
*/
static void convert_mask_to_strength_mask(uint8_t *data, int linesize,
int w, int h, int min_val,
int *max_mask_size)
{
int x, y;
 
/* How many times we've gone through the loop. Used in the
in-place erosion algorithm and to get us max_mask_size later on. */
int current_pass = 0;
 
/* set all non-zero values to 1 */
for (y = 0; y < h; y++)
for (x = 0; x < w; x++)
data[y*linesize + x] = data[y*linesize + x] > min_val;
 
/* For each pass, if a pixel is itself the same value as the
current pass, and its four neighbors are too, then it is
incremented. If no pixels are incremented by the end of the
pass, then we go again. Edge pixels are counted as always
excluded (this should be true anyway for any sane mask, but if
it isn't this will ensure that we eventually exit). */
while (1) {
/* If this doesn't get set by the end of this pass, then we're done. */
int has_anything_changed = 0;
uint8_t *current_pixel0 = data + 1 + linesize, *current_pixel;
current_pass++;
 
for (y = 1; y < h-1; y++) {
current_pixel = current_pixel0;
for (x = 1; x < w-1; x++) {
/* Apply the in-place erosion transform. It is based
on the following two premises:
1 - Any pixel that fails 1 erosion will fail all
future erosions.
 
2 - Only pixels having survived all erosions up to
the present will be >= to current_pass.
It doesn't matter if it survived the current pass,
failed it, or hasn't been tested yet. By using >=
instead of ==, we allow the algorithm to work in
place. */
if ( *current_pixel >= current_pass &&
*(current_pixel + 1) >= current_pass &&
*(current_pixel - 1) >= current_pass &&
*(current_pixel + linesize) >= current_pass &&
*(current_pixel - linesize) >= current_pass) {
/* Increment the value since it still has not been
* eroded, as evidenced by the if statement that
* just evaluated to true. */
(*current_pixel)++;
has_anything_changed = 1;
}
current_pixel++;
}
current_pixel0 += linesize;
}
if (!has_anything_changed)
break;
}
 
/* Apply the fudge factor, which will increase the size of the
* mask a little to reduce jitter at the cost of more blur. */
for (y = 1; y < h - 1; y++)
for (x = 1; x < w - 1; x++)
data[(y * linesize) + x] = apply_mask_fudge_factor(data[(y * linesize) + x]);
 
/* As a side-effect, we now know the maximum mask size, which
* we'll use to generate our masks. */
/* Apply the fudge factor to this number too, since we must ensure
* that enough masks are generated. */
*max_mask_size = apply_mask_fudge_factor(current_pass + 1);
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static int load_mask(uint8_t **mask, int *w, int *h,
const char *filename, void *log_ctx)
{
int ret;
enum AVPixelFormat pix_fmt;
uint8_t *src_data[4], *gray_data[4];
int src_linesize[4], gray_linesize[4];
 
/* load image from file */
if ((ret = ff_load_image(src_data, src_linesize, w, h, &pix_fmt, filename, log_ctx)) < 0)
return ret;
 
/* convert the image to GRAY8 */
if ((ret = ff_scale_image(gray_data, gray_linesize, *w, *h, AV_PIX_FMT_GRAY8,
src_data, src_linesize, *w, *h, pix_fmt,
log_ctx)) < 0)
goto end;
 
/* copy mask to a newly allocated array */
*mask = av_malloc(*w * *h);
if (!*mask)
ret = AVERROR(ENOMEM);
av_image_copy_plane(*mask, *w, gray_data[0], gray_linesize[0], *w, *h);
 
end:
av_freep(&src_data[0]);
av_freep(&gray_data[0]);
return ret;
}
 
/**
* Generate a scaled down image with half width, height, and intensity.
*
* This function not only scales down an image, but halves the value
* in each pixel too. The purpose of this is to produce a chroma
* filter image out of a luma filter image. The pixel values store the
* distance to the edge of the logo and halving the dimensions halves
* the distance. This function rounds up, because a downwards rounding
* error could cause the filter to fail, but an upwards rounding error
* will only cause a minor amount of excess blur in the chroma planes.
*/
static void generate_half_size_image(const uint8_t *src_data, int src_linesize,
uint8_t *dst_data, int dst_linesize,
int src_w, int src_h,
int *max_mask_size)
{
int x, y;
 
/* Copy over the image data, using the average of 4 pixels for to
* calculate each downsampled pixel. */
for (y = 0; y < src_h/2; y++) {
for (x = 0; x < src_w/2; x++) {
/* Set the pixel if there exists a non-zero value in the
* source pixels, else clear it. */
dst_data[(y * dst_linesize) + x] =
src_data[((y << 1) * src_linesize) + (x << 1)] ||
src_data[((y << 1) * src_linesize) + (x << 1) + 1] ||
src_data[(((y << 1) + 1) * src_linesize) + (x << 1)] ||
src_data[(((y << 1) + 1) * src_linesize) + (x << 1) + 1];
dst_data[(y * dst_linesize) + x] = FFMIN(1, dst_data[(y * dst_linesize) + x]);
}
}
 
convert_mask_to_strength_mask(dst_data, dst_linesize,
src_w/2, src_h/2, 0, max_mask_size);
}
 
static av_cold int init(AVFilterContext *ctx)
{
RemovelogoContext *s = ctx->priv;
int ***mask;
int ret = 0;
int a, b, c, w, h;
int full_max_mask_size, half_max_mask_size;
 
if (!s->filename) {
av_log(ctx, AV_LOG_ERROR, "The bitmap file name is mandatory\n");
return AVERROR(EINVAL);
}
 
/* Load our mask image. */
if ((ret = load_mask(&s->full_mask_data, &w, &h, s->filename, ctx)) < 0)
return ret;
s->mask_w = w;
s->mask_h = h;
 
convert_mask_to_strength_mask(s->full_mask_data, w, w, h,
16, &full_max_mask_size);
 
/* Create the scaled down mask image for the chroma planes. */
if (!(s->half_mask_data = av_mallocz(w/2 * h/2)))
return AVERROR(ENOMEM);
generate_half_size_image(s->full_mask_data, w,
s->half_mask_data, w/2,
w, h, &half_max_mask_size);
 
s->max_mask_size = FFMAX(full_max_mask_size, half_max_mask_size);
 
/* Create a circular mask for each size up to max_mask_size. When
the filter is applied, the mask size is determined on a pixel
by pixel basis, with pixels nearer the edge of the logo getting
smaller mask sizes. */
mask = (int ***)av_malloc(sizeof(int **) * (s->max_mask_size + 1));
if (!mask)
return AVERROR(ENOMEM);
 
for (a = 0; a <= s->max_mask_size; a++) {
mask[a] = (int **)av_malloc(sizeof(int *) * ((a * 2) + 1));
if (!mask[a])
return AVERROR(ENOMEM);
for (b = -a; b <= a; b++) {
mask[a][b + a] = (int *)av_malloc(sizeof(int) * ((a * 2) + 1));
if (!mask[a][b + a])
return AVERROR(ENOMEM);
for (c = -a; c <= a; c++) {
if ((b * b) + (c * c) <= (a * a)) /* Circular 0/1 mask. */
mask[a][b + a][c + a] = 1;
else
mask[a][b + a][c + a] = 0;
}
}
}
s->mask = mask;
 
/* Calculate our bounding rectangles, which determine in what
* region the logo resides for faster processing. */
ff_calculate_bounding_box(&s->full_mask_bbox, s->full_mask_data, w, w, h, 0);
ff_calculate_bounding_box(&s->half_mask_bbox, s->half_mask_data, w/2, w/2, h/2, 0);
 
#define SHOW_LOGO_INFO(mask_type) \
av_log(ctx, AV_LOG_VERBOSE, #mask_type " x1:%d x2:%d y1:%d y2:%d max_mask_size:%d\n", \
s->mask_type##_mask_bbox.x1, s->mask_type##_mask_bbox.x2, \
s->mask_type##_mask_bbox.y1, s->mask_type##_mask_bbox.y2, \
mask_type##_max_mask_size);
SHOW_LOGO_INFO(full);
SHOW_LOGO_INFO(half);
 
return 0;
}
 
static int config_props_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
RemovelogoContext *s = ctx->priv;
 
if (inlink->w != s->mask_w || inlink->h != s->mask_h) {
av_log(ctx, AV_LOG_INFO,
"Mask image size %dx%d does not match with the input video size %dx%d\n",
s->mask_w, s->mask_h, inlink->w, inlink->h);
return AVERROR(EINVAL);
}
 
return 0;
}
 
/**
* Blur image.
*
* It takes a pixel that is inside the mask and blurs it. It does so
* by finding the average of all the pixels within the mask and
* outside of the mask.
*
* @param mask_data the mask plane to use for averaging
* @param image_data the image plane to blur
* @param w width of the image
* @param h height of the image
* @param x x-coordinate of the pixel to blur
* @param y y-coordinate of the pixel to blur
*/
static unsigned int blur_pixel(int ***mask,
const uint8_t *mask_data, int mask_linesize,
uint8_t *image_data, int image_linesize,
int w, int h, int x, int y)
{
/* Mask size tells how large a circle to use. The radius is about
* (slightly larger than) mask size. */
int mask_size;
int start_posx, start_posy, end_posx, end_posy;
int i, j;
unsigned int accumulator = 0, divisor = 0;
/* What pixel we are reading out of the circular blur mask. */
const uint8_t *image_read_position;
/* What pixel we are reading out of the filter image. */
const uint8_t *mask_read_position;
 
/* Prepare our bounding rectangle and clip it if need be. */
mask_size = mask_data[y * mask_linesize + x];
start_posx = FFMAX(0, x - mask_size);
start_posy = FFMAX(0, y - mask_size);
end_posx = FFMIN(w - 1, x + mask_size);
end_posy = FFMIN(h - 1, y + mask_size);
 
image_read_position = image_data + image_linesize * start_posy + start_posx;
mask_read_position = mask_data + mask_linesize * start_posy + start_posx;
 
for (j = start_posy; j <= end_posy; j++) {
for (i = start_posx; i <= end_posx; i++) {
/* Check if this pixel is in the mask or not. Only use the
* pixel if it is not. */
if (!(*mask_read_position) && mask[mask_size][i - start_posx][j - start_posy]) {
accumulator += *image_read_position;
divisor++;
}
 
image_read_position++;
mask_read_position++;
}
 
image_read_position += (image_linesize - ((end_posx + 1) - start_posx));
mask_read_position += (mask_linesize - ((end_posx + 1) - start_posx));
}
 
/* If divisor is 0, it means that not a single pixel is outside of
the logo, so we have no data. Else we need to normalise the
data using the divisor. */
return divisor == 0 ? 255:
(accumulator + (divisor / 2)) / divisor; /* divide, taking into account average rounding error */
}
 
/**
* Blur image plane using a mask.
*
* @param source The image to have it's logo removed.
* @param destination Where the output image will be stored.
* @param source_stride How far apart (in memory) two consecutive lines are.
* @param destination Same as source_stride, but for the destination image.
* @param width Width of the image. This is the same for source and destination.
* @param height Height of the image. This is the same for source and destination.
* @param is_image_direct If the image is direct, then source and destination are
* the same and we can save a lot of time by not copying pixels that
* haven't changed.
* @param filter The image that stores the distance to the edge of the logo for
* each pixel.
* @param logo_start_x smallest x-coordinate that contains at least 1 logo pixel.
* @param logo_start_y smallest y-coordinate that contains at least 1 logo pixel.
* @param logo_end_x largest x-coordinate that contains at least 1 logo pixel.
* @param logo_end_y largest y-coordinate that contains at least 1 logo pixel.
*
* This function processes an entire plane. Pixels outside of the logo are copied
* to the output without change, and pixels inside the logo have the de-blurring
* function applied.
*/
static void blur_image(int ***mask,
const uint8_t *src_data, int src_linesize,
uint8_t *dst_data, int dst_linesize,
const uint8_t *mask_data, int mask_linesize,
int w, int h, int direct,
FFBoundingBox *bbox)
{
int x, y;
uint8_t *dst_line;
const uint8_t *src_line;
 
if (!direct)
av_image_copy_plane(dst_data, dst_linesize, src_data, src_linesize, w, h);
 
for (y = bbox->y1; y <= bbox->y2; y++) {
src_line = src_data + src_linesize * y;
dst_line = dst_data + dst_linesize * y;
 
for (x = bbox->x1; x <= bbox->x2; x++) {
if (mask_data[y * mask_linesize + x]) {
/* Only process if we are in the mask. */
dst_line[x] = blur_pixel(mask,
mask_data, mask_linesize,
dst_data, dst_linesize,
w, h, x, y);
} else {
/* Else just copy the data. */
if (!direct)
dst_line[x] = src_line[x];
}
}
}
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
{
RemovelogoContext *s = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *outpicref;
int direct = 0;
 
if (av_frame_is_writable(inpicref)) {
direct = 1;
outpicref = inpicref;
} else {
outpicref = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!outpicref) {
av_frame_free(&inpicref);
return AVERROR(ENOMEM);
}
av_frame_copy_props(outpicref, inpicref);
}
 
blur_image(s->mask,
inpicref ->data[0], inpicref ->linesize[0],
outpicref->data[0], outpicref->linesize[0],
s->full_mask_data, inlink->w,
inlink->w, inlink->h, direct, &s->full_mask_bbox);
blur_image(s->mask,
inpicref ->data[1], inpicref ->linesize[1],
outpicref->data[1], outpicref->linesize[1],
s->half_mask_data, inlink->w/2,
inlink->w/2, inlink->h/2, direct, &s->half_mask_bbox);
blur_image(s->mask,
inpicref ->data[2], inpicref ->linesize[2],
outpicref->data[2], outpicref->linesize[2],
s->half_mask_data, inlink->w/2,
inlink->w/2, inlink->h/2, direct, &s->half_mask_bbox);
 
if (!direct)
av_frame_free(&inpicref);
 
return ff_filter_frame(outlink, outpicref);
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
RemovelogoContext *s = ctx->priv;
int a, b;
 
av_freep(&s->full_mask_data);
av_freep(&s->half_mask_data);
 
if (s->mask) {
/* Loop through each mask. */
for (a = 0; a <= s->max_mask_size; a++) {
/* Loop through each scanline in a mask. */
for (b = -a; b <= a; b++) {
av_freep(&s->mask[a][b + a]); /* Free a scanline. */
}
av_freep(&s->mask[a]);
}
/* Free the array of pointers pointing to the masks. */
av_freep(&s->mask);
}
}
 
static const AVFilterPad removelogo_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_props_input,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad removelogo_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_removelogo = {
.name = "removelogo",
.description = NULL_IF_CONFIG_SMALL("Remove a TV logo based on a mask image."),
.priv_size = sizeof(RemovelogoContext),
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = removelogo_inputs,
.outputs = removelogo_outputs,
.priv_class = &removelogo_class,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_rotate.c
0,0 → 1,489
/*
* Copyright (c) 2013 Stefano Sabatini
* Copyright (c) 2008 Vitor Sessak
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* rotation filter, partially based on the tests/rotozoom.c program
*/
 
#include "libavutil/avstring.h"
#include "libavutil/eval.h"
#include "libavutil/opt.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
 
#include "avfilter.h"
#include "drawutils.h"
#include "internal.h"
#include "video.h"
 
static const char *var_names[] = {
"in_w" , "iw", ///< width of the input video
"in_h" , "ih", ///< height of the input video
"out_w", "ow", ///< width of the input video
"out_h", "oh", ///< height of the input video
"hsub", "vsub",
"n", ///< number of frame
"t", ///< timestamp expressed in seconds
NULL
};
 
enum var_name {
VAR_IN_W , VAR_IW,
VAR_IN_H , VAR_IH,
VAR_OUT_W, VAR_OW,
VAR_OUT_H, VAR_OH,
VAR_HSUB, VAR_VSUB,
VAR_N,
VAR_T,
VAR_VARS_NB
};
 
typedef struct {
const AVClass *class;
double angle;
char *angle_expr_str; ///< expression for the angle
AVExpr *angle_expr; ///< parsed expression for the angle
char *outw_expr_str, *outh_expr_str;
int outh, outw;
uint8_t fillcolor[4]; ///< color expressed either in YUVA or RGBA colorspace for the padding area
char *fillcolor_str;
int fillcolor_enable;
int hsub, vsub;
int nb_planes;
int use_bilinear;
float sinx, cosx;
double var_values[VAR_VARS_NB];
FFDrawContext draw;
FFDrawColor color;
} RotContext;
 
typedef struct ThreadData {
AVFrame *in, *out;
int inw, inh;
int outw, outh;
int plane;
int xi, yi;
int xprime, yprime;
int c, s;
} ThreadData;
 
#define OFFSET(x) offsetof(RotContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
 
static const AVOption rotate_options[] = {
{ "angle", "set angle (in radians)", OFFSET(angle_expr_str), AV_OPT_TYPE_STRING, {.str="0"}, CHAR_MIN, CHAR_MAX, .flags=FLAGS },
{ "a", "set angle (in radians)", OFFSET(angle_expr_str), AV_OPT_TYPE_STRING, {.str="0"}, CHAR_MIN, CHAR_MAX, .flags=FLAGS },
{ "out_w", "set output width expression", OFFSET(outw_expr_str), AV_OPT_TYPE_STRING, {.str="iw"}, CHAR_MIN, CHAR_MAX, .flags=FLAGS },
{ "ow", "set output width expression", OFFSET(outw_expr_str), AV_OPT_TYPE_STRING, {.str="iw"}, CHAR_MIN, CHAR_MAX, .flags=FLAGS },
{ "out_h", "set output height expression", OFFSET(outh_expr_str), AV_OPT_TYPE_STRING, {.str="ih"}, CHAR_MIN, CHAR_MAX, .flags=FLAGS },
{ "oh", "set output height expression", OFFSET(outh_expr_str), AV_OPT_TYPE_STRING, {.str="ih"}, CHAR_MIN, CHAR_MAX, .flags=FLAGS },
{ "fillcolor", "set background fill color", OFFSET(fillcolor_str), AV_OPT_TYPE_STRING, {.str="black"}, CHAR_MIN, CHAR_MAX, .flags=FLAGS },
{ "c", "set background fill color", OFFSET(fillcolor_str), AV_OPT_TYPE_STRING, {.str="black"}, CHAR_MIN, CHAR_MAX, .flags=FLAGS },
{ "bilinear", "use bilinear interpolation", OFFSET(use_bilinear), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, .flags=FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(rotate);
 
static av_cold int init(AVFilterContext *ctx)
{
RotContext *rot = ctx->priv;
 
if (!strcmp(rot->fillcolor_str, "none"))
rot->fillcolor_enable = 0;
else if (av_parse_color(rot->fillcolor, rot->fillcolor_str, -1, ctx) >= 0)
rot->fillcolor_enable = 1;
else
return AVERROR(EINVAL);
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
RotContext *rot = ctx->priv;
 
av_expr_free(rot->angle_expr);
rot->angle_expr = NULL;
}
 
static int query_formats(AVFilterContext *ctx)
{
static enum PixelFormat pix_fmts[] = {
AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA,
AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA,
AV_PIX_FMT_0RGB, AV_PIX_FMT_RGB0,
AV_PIX_FMT_0BGR, AV_PIX_FMT_BGR0,
AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
AV_PIX_FMT_GRAY8,
AV_PIX_FMT_YUV410P,
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P,
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVJ420P,
AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA420P,
AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static double get_rotated_w(void *opaque, double angle)
{
RotContext *rot = opaque;
double inw = rot->var_values[VAR_IN_W];
double inh = rot->var_values[VAR_IN_H];
float sinx = sin(angle);
float cosx = cos(angle);
 
return FFMAX(0, inh * sinx) + FFMAX(0, -inw * cosx) +
FFMAX(0, inw * cosx) + FFMAX(0, -inh * sinx);
}
 
static double get_rotated_h(void *opaque, double angle)
{
RotContext *rot = opaque;
double inw = rot->var_values[VAR_IN_W];
double inh = rot->var_values[VAR_IN_H];
float sinx = sin(angle);
float cosx = cos(angle);
 
return FFMAX(0, -inh * cosx) + FFMAX(0, -inw * sinx) +
FFMAX(0, inh * cosx) + FFMAX(0, inw * sinx);
}
 
static double (* const func1[])(void *, double) = {
get_rotated_w,
get_rotated_h,
NULL
};
 
static const char * const func1_names[] = {
"rotw",
"roth",
NULL
};
 
static int config_props(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
RotContext *rot = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(inlink->format);
int ret;
double res;
char *expr;
 
ff_draw_init(&rot->draw, inlink->format, 0);
ff_draw_color(&rot->draw, &rot->color, rot->fillcolor);
 
rot->hsub = pixdesc->log2_chroma_w;
rot->vsub = pixdesc->log2_chroma_h;
 
rot->var_values[VAR_IN_W] = rot->var_values[VAR_IW] = inlink->w;
rot->var_values[VAR_IN_H] = rot->var_values[VAR_IH] = inlink->h;
rot->var_values[VAR_HSUB] = 1<<rot->hsub;
rot->var_values[VAR_VSUB] = 1<<rot->vsub;
rot->var_values[VAR_N] = NAN;
rot->var_values[VAR_T] = NAN;
rot->var_values[VAR_OUT_W] = rot->var_values[VAR_OW] = NAN;
rot->var_values[VAR_OUT_H] = rot->var_values[VAR_OH] = NAN;
 
av_expr_free(rot->angle_expr);
rot->angle_expr = NULL;
if ((ret = av_expr_parse(&rot->angle_expr, expr = rot->angle_expr_str, var_names,
func1_names, func1, NULL, NULL, 0, ctx)) < 0) {
av_log(ctx, AV_LOG_ERROR,
"Error occurred parsing angle expression '%s'\n", rot->angle_expr_str);
return ret;
}
 
#define SET_SIZE_EXPR(name, opt_name) do { \
ret = av_expr_parse_and_eval(&res, expr = rot->name##_expr_str, \
var_names, rot->var_values, \
func1_names, func1, NULL, NULL, rot, 0, ctx); \
if (ret < 0 || isnan(res) || isinf(res) || res <= 0) { \
av_log(ctx, AV_LOG_ERROR, \
"Error parsing or evaluating expression for option %s: " \
"invalid expression '%s' or non-positive or indefinite value %f\n", \
opt_name, expr, res); \
return ret; \
} \
} while (0)
 
/* evaluate width and height */
av_expr_parse_and_eval(&res, expr = rot->outw_expr_str, var_names, rot->var_values,
func1_names, func1, NULL, NULL, rot, 0, ctx);
rot->var_values[VAR_OUT_W] = rot->var_values[VAR_OW] = res;
rot->outw = res + 0.5;
SET_SIZE_EXPR(outh, "out_w");
rot->var_values[VAR_OUT_H] = rot->var_values[VAR_OH] = res;
rot->outh = res + 0.5;
 
/* evaluate the width again, as it may depend on the evaluated output height */
SET_SIZE_EXPR(outw, "out_h");
rot->var_values[VAR_OUT_W] = rot->var_values[VAR_OW] = res;
rot->outw = res + 0.5;
 
/* compute number of planes */
rot->nb_planes = av_pix_fmt_count_planes(inlink->format);
outlink->w = rot->outw;
outlink->h = rot->outh;
return 0;
}
 
#define FIXP (1<<16)
#define INT_PI 205887 //(M_PI * FIXP)
 
/**
* Compute the sin of a using integer values.
* Input and output values are scaled by FIXP.
*/
static int64_t int_sin(int64_t a)
{
int64_t a2, res = 0;
int i;
if (a < 0) a = INT_PI-a; // 0..inf
a %= 2 * INT_PI; // 0..2PI
 
if (a >= INT_PI*3/2) a -= 2*INT_PI; // -PI/2 .. 3PI/2
if (a >= INT_PI/2 ) a = INT_PI - a; // -PI/2 .. PI/2
 
/* compute sin using Taylor series approximated to the third term */
a2 = (a*a)/FIXP;
for (i = 2; i < 7; i += 2) {
res += a;
a = -a*a2 / (FIXP*i*(i+1));
}
return res;
}
 
/**
* Interpolate the color in src at position x and y using bilinear
* interpolation.
*/
static uint8_t *interpolate_bilinear(uint8_t *dst_color,
const uint8_t *src, int src_linesize, int src_linestep,
int x, int y, int max_x, int max_y)
{
int int_x = av_clip(x>>16, 0, max_x);
int int_y = av_clip(y>>16, 0, max_y);
int frac_x = x&0xFFFF;
int frac_y = y&0xFFFF;
int i;
int int_x1 = FFMIN(int_x+1, max_x);
int int_y1 = FFMIN(int_y+1, max_y);
 
for (i = 0; i < src_linestep; i++) {
int s00 = src[src_linestep * int_x + i + src_linesize * int_y ];
int s01 = src[src_linestep * int_x1 + i + src_linesize * int_y ];
int s10 = src[src_linestep * int_x + i + src_linesize * int_y1];
int s11 = src[src_linestep * int_x1 + i + src_linesize * int_y1];
int s0 = (((1<<16) - frac_x)*s00 + frac_x*s01);
int s1 = (((1<<16) - frac_x)*s10 + frac_x*s11);
 
dst_color[i] = ((int64_t)((1<<16) - frac_y)*s0 + (int64_t)frac_y*s1) >> 32;
}
 
return dst_color;
}
 
#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)*av_q2d(tb))
 
static int filter_slice(AVFilterContext *ctx, void *arg, int job, int nb_jobs)
{
ThreadData *td = arg;
AVFrame *in = td->in;
AVFrame *out = td->out;
RotContext *rot = ctx->priv;
const int outw = td->outw, outh = td->outh;
const int inw = td->inw, inh = td->inh;
const int plane = td->plane;
const int xi = td->xi, yi = td->yi;
const int c = td->c, s = td->s;
const int start = (outh * job ) / nb_jobs;
const int end = (outh * (job+1)) / nb_jobs;
int xprime = td->xprime + start * s;
int yprime = td->yprime + start * c;
int i, j, x, y;
 
for (j = start; j < end; j++) {
x = xprime + xi + FIXP*inw/2;
y = yprime + yi + FIXP*inh/2;
 
for (i = 0; i < outw; i++) {
int32_t v;
int x1, y1;
uint8_t *pin, *pout;
x += c;
y -= s;
x1 = x>>16;
y1 = y>>16;
 
/* the out-of-range values avoid border artifacts */
if (x1 >= -1 && x1 <= inw && y1 >= -1 && y1 <= inh) {
uint8_t inp_inv[4]; /* interpolated input value */
pout = out->data[plane] + j * out->linesize[plane] + i * rot->draw.pixelstep[plane];
if (rot->use_bilinear) {
pin = interpolate_bilinear(inp_inv,
in->data[plane], in->linesize[plane], rot->draw.pixelstep[plane],
x, y, inw-1, inh-1);
} else {
int x2 = av_clip(x1, 0, inw-1);
int y2 = av_clip(y1, 0, inh-1);
pin = in->data[plane] + y2 * in->linesize[plane] + x2 * rot->draw.pixelstep[plane];
}
switch (rot->draw.pixelstep[plane]) {
case 1:
*pout = *pin;
break;
case 2:
*((uint16_t *)pout) = *((uint16_t *)pin);
break;
case 3:
v = AV_RB24(pin);
AV_WB24(pout, v);
break;
case 4:
*((uint32_t *)pout) = *((uint32_t *)pin);
break;
default:
memcpy(pout, pin, rot->draw.pixelstep[plane]);
break;
}
}
}
xprime += s;
yprime += c;
}
 
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out;
RotContext *rot = ctx->priv;
int angle_int, s, c, plane;
double res;
 
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
 
rot->var_values[VAR_N] = inlink->frame_count;
rot->var_values[VAR_T] = TS2T(in->pts, inlink->time_base);
rot->angle = res = av_expr_eval(rot->angle_expr, rot->var_values, rot);
 
av_log(ctx, AV_LOG_DEBUG, "n:%f time:%f angle:%f/PI\n",
rot->var_values[VAR_N], rot->var_values[VAR_T], rot->angle/M_PI);
 
angle_int = res * FIXP;
s = int_sin(angle_int);
c = int_sin(angle_int + INT_PI/2);
 
/* fill background */
if (rot->fillcolor_enable)
ff_fill_rectangle(&rot->draw, &rot->color, out->data, out->linesize,
0, 0, outlink->w, outlink->h);
 
for (plane = 0; plane < rot->nb_planes; plane++) {
int hsub = plane == 1 || plane == 2 ? rot->hsub : 0;
int vsub = plane == 1 || plane == 2 ? rot->vsub : 0;
const int outw = FF_CEIL_RSHIFT(outlink->w, hsub);
const int outh = FF_CEIL_RSHIFT(outlink->h, vsub);
ThreadData td = { .in = in, .out = out,
.inw = FF_CEIL_RSHIFT(inlink->w, hsub),
.inh = FF_CEIL_RSHIFT(inlink->h, vsub),
.outh = outh, .outw = outw,
.xi = -outw/2 * c, .yi = outw/2 * s,
.xprime = -outh/2 * s,
.yprime = -outh/2 * c,
.plane = plane, .c = c, .s = s };
 
 
ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outh, ctx->graph->nb_threads));
}
 
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
 
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
char *res, int res_len, int flags)
{
RotContext *rot = ctx->priv;
int ret;
 
if (!strcmp(cmd, "angle") || !strcmp(cmd, "a")) {
AVExpr *old = rot->angle_expr;
ret = av_expr_parse(&rot->angle_expr, args, var_names,
NULL, NULL, NULL, NULL, 0, ctx);
if (ret < 0) {
av_log(ctx, AV_LOG_ERROR,
"Error when parsing the expression '%s' for angle command\n", args);
rot->angle_expr = old;
return ret;
}
av_expr_free(old);
} else
ret = AVERROR(ENOSYS);
 
return ret;
}
 
static const AVFilterPad rotate_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad rotate_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_props,
},
{ NULL }
};
 
AVFilter avfilter_vf_rotate = {
.name = "rotate",
.description = NULL_IF_CONFIG_SMALL("Rotate the input image."),
.priv_size = sizeof(RotContext),
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.process_command = process_command,
.inputs = rotate_inputs,
.outputs = rotate_outputs,
.priv_class = &rotate_class,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_sab.c
0,0 → 1,339
/*
* Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
/**
* @file
* Shape Adaptive Blur filter, ported from MPlayer libmpcodecs/vf_sab.c
*/
 
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libswscale/swscale.h"
 
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
 
typedef struct {
float radius;
float pre_filter_radius;
float strength;
float quality;
struct SwsContext *pre_filter_context;
uint8_t *pre_filter_buf;
int pre_filter_linesize;
int dist_width;
int dist_linesize;
int *dist_coeff;
#define COLOR_DIFF_COEFF_SIZE 512
int color_diff_coeff[COLOR_DIFF_COEFF_SIZE];
} FilterParam;
 
typedef struct {
const AVClass *class;
FilterParam luma;
FilterParam chroma;
int hsub;
int vsub;
unsigned int sws_flags;
} SabContext;
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV410P,
AV_PIX_FMT_YUV444P,
AV_PIX_FMT_YUV422P,
AV_PIX_FMT_YUV411P,
AV_PIX_FMT_NONE
};
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
 
return 0;
}
 
#define RADIUS_MIN 0.1
#define RADIUS_MAX 4.0
 
#define PRE_FILTER_RADIUS_MIN 0.1
#define PRE_FILTER_RADIUS_MAX 2.0
 
#define STRENGTH_MIN 0.1
#define STRENGTH_MAX 100.0
 
#define OFFSET(x) offsetof(SabContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
 
static const AVOption sab_options[] = {
{ "luma_radius", "set luma radius", OFFSET(luma.radius), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, RADIUS_MIN, RADIUS_MAX, .flags=FLAGS },
{ "lr" , "set luma radius", OFFSET(luma.radius), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, RADIUS_MIN, RADIUS_MAX, .flags=FLAGS },
{ "luma_pre_filter_radius", "set luma pre-filter radius", OFFSET(luma.pre_filter_radius), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, PRE_FILTER_RADIUS_MIN, PRE_FILTER_RADIUS_MAX, .flags=FLAGS },
{ "lpfr", "set luma pre-filter radius", OFFSET(luma.pre_filter_radius), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, PRE_FILTER_RADIUS_MIN, PRE_FILTER_RADIUS_MAX, .flags=FLAGS },
{ "luma_strength", "set luma strength", OFFSET(luma.strength), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, STRENGTH_MIN, STRENGTH_MAX, .flags=FLAGS },
{ "ls", "set luma strength", OFFSET(luma.strength), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, STRENGTH_MIN, STRENGTH_MAX, .flags=FLAGS },
 
{ "chroma_radius", "set chroma radius", OFFSET(chroma.radius), AV_OPT_TYPE_FLOAT, {.dbl=RADIUS_MIN-1}, RADIUS_MIN-1, RADIUS_MAX, .flags=FLAGS },
{ "cr", "set chroma radius", OFFSET(chroma.radius), AV_OPT_TYPE_FLOAT, {.dbl=RADIUS_MIN-1}, RADIUS_MIN-1, RADIUS_MAX, .flags=FLAGS },
{ "chroma_pre_filter_radius", "set chroma pre-filter radius", OFFSET(chroma.pre_filter_radius), AV_OPT_TYPE_FLOAT, {.dbl=PRE_FILTER_RADIUS_MIN-1},
PRE_FILTER_RADIUS_MIN-1, PRE_FILTER_RADIUS_MAX, .flags=FLAGS },
{ "cpfr", "set chroma pre-filter radius", OFFSET(chroma.pre_filter_radius), AV_OPT_TYPE_FLOAT, {.dbl=PRE_FILTER_RADIUS_MIN-1},
PRE_FILTER_RADIUS_MIN-1, PRE_FILTER_RADIUS_MAX, .flags=FLAGS },
{ "chroma_strength", "set chroma strength", OFFSET(chroma.strength), AV_OPT_TYPE_FLOAT, {.dbl=STRENGTH_MIN-1}, STRENGTH_MIN-1, STRENGTH_MAX, .flags=FLAGS },
{ "cs", "set chroma strength", OFFSET(chroma.strength), AV_OPT_TYPE_FLOAT, {.dbl=STRENGTH_MIN-1}, STRENGTH_MIN-1, STRENGTH_MAX, .flags=FLAGS },
 
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(sab);
 
static av_cold int init(AVFilterContext *ctx)
{
SabContext *sab = ctx->priv;
 
/* make chroma default to luma values, if not explicitly set */
if (sab->chroma.radius < RADIUS_MIN)
sab->chroma.radius = sab->luma.radius;
if (sab->chroma.pre_filter_radius < PRE_FILTER_RADIUS_MIN)
sab->chroma.pre_filter_radius = sab->luma.pre_filter_radius;
if (sab->chroma.strength < STRENGTH_MIN)
sab->chroma.strength = sab->luma.strength;
 
sab->luma.quality = sab->chroma.quality = 3.0;
sab->sws_flags = SWS_POINT;
 
av_log(ctx, AV_LOG_VERBOSE,
"luma_radius:%f luma_pre_filter_radius::%f luma_strength:%f "
"chroma_radius:%f chroma_pre_filter_radius:%f chroma_strength:%f\n",
sab->luma .radius, sab->luma .pre_filter_radius, sab->luma .strength,
sab->chroma.radius, sab->chroma.pre_filter_radius, sab->chroma.strength);
return 0;
}
 
static void close_filter_param(FilterParam *f)
{
if (f->pre_filter_context) {
sws_freeContext(f->pre_filter_context);
f->pre_filter_context = NULL;
}
av_freep(&f->pre_filter_buf);
av_freep(&f->dist_coeff);
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
SabContext *sab = ctx->priv;
 
close_filter_param(&sab->luma);
close_filter_param(&sab->chroma);
}
 
static int open_filter_param(FilterParam *f, int width, int height, unsigned int sws_flags)
{
SwsVector *vec;
SwsFilter sws_f;
int i, x, y;
int linesize = FFALIGN(width, 8);
 
f->pre_filter_buf = av_malloc(linesize * height);
if (!f->pre_filter_buf)
return AVERROR(ENOMEM);
 
f->pre_filter_linesize = linesize;
vec = sws_getGaussianVec(f->pre_filter_radius, f->quality);
sws_f.lumH = sws_f.lumV = vec;
sws_f.chrH = sws_f.chrV = NULL;
f->pre_filter_context = sws_getContext(width, height, AV_PIX_FMT_GRAY8,
width, height, AV_PIX_FMT_GRAY8,
sws_flags, &sws_f, NULL, NULL);
sws_freeVec(vec);
 
vec = sws_getGaussianVec(f->strength, 5.0);
for (i = 0; i < COLOR_DIFF_COEFF_SIZE; i++) {
double d;
int index = i-COLOR_DIFF_COEFF_SIZE/2 + vec->length/2;
 
if (index < 0 || index >= vec->length) d = 0.0;
else d = vec->coeff[index];
 
f->color_diff_coeff[i] = (int)(d/vec->coeff[vec->length/2]*(1<<12) + 0.5);
}
sws_freeVec(vec);
 
vec = sws_getGaussianVec(f->radius, f->quality);
f->dist_width = vec->length;
f->dist_linesize = FFALIGN(vec->length, 8);
f->dist_coeff = av_malloc(f->dist_width * f->dist_linesize * sizeof(*f->dist_coeff));
if (!f->dist_coeff) {
sws_freeVec(vec);
return AVERROR(ENOMEM);
}
 
for (y = 0; y < vec->length; y++) {
for (x = 0; x < vec->length; x++) {
double d = vec->coeff[x] * vec->coeff[y];
f->dist_coeff[x + y*f->dist_linesize] = (int)(d*(1<<10) + 0.5);
}
}
sws_freeVec(vec);
 
return 0;
}
 
static int config_props(AVFilterLink *inlink)
{
SabContext *sab = inlink->dst->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
int ret;
 
sab->hsub = desc->log2_chroma_w;
sab->vsub = desc->log2_chroma_h;
 
close_filter_param(&sab->luma);
ret = open_filter_param(&sab->luma, inlink->w, inlink->h, sab->sws_flags);
if (ret < 0)
return ret;
 
close_filter_param(&sab->chroma);
ret = open_filter_param(&sab->chroma,
FF_CEIL_RSHIFT(inlink->w, sab->hsub),
FF_CEIL_RSHIFT(inlink->h, sab->vsub), sab->sws_flags);
return ret;
}
 
#define NB_PLANES 4
 
static void blur(uint8_t *dst, const int dst_linesize,
const uint8_t *src, const int src_linesize,
const int w, const int h, FilterParam *fp)
{
int x, y;
FilterParam f = *fp;
const int radius = f.dist_width/2;
 
const uint8_t * const src2[NB_PLANES] = { src };
int src2_linesize[NB_PLANES] = { src_linesize };
uint8_t *dst2[NB_PLANES] = { f.pre_filter_buf };
int dst2_linesize[NB_PLANES] = { f.pre_filter_linesize };
 
sws_scale(f.pre_filter_context, src2, src2_linesize, 0, h, dst2, dst2_linesize);
 
#define UPDATE_FACTOR do { \
int factor; \
factor = f.color_diff_coeff[COLOR_DIFF_COEFF_SIZE/2 + pre_val - \
f.pre_filter_buf[ix + iy*f.pre_filter_linesize]] * f.dist_coeff[dx + dy*f.dist_linesize]; \
sum += src[ix + iy*src_linesize] * factor; \
div += factor; \
} while (0)
 
for (y = 0; y < h; y++) {
for (x = 0; x < w; x++) {
int sum = 0;
int div = 0;
int dy;
const int pre_val = f.pre_filter_buf[x + y*f.pre_filter_linesize];
if (x >= radius && x < w - radius) {
for (dy = 0; dy < radius*2 + 1; dy++) {
int dx;
int iy = y+dy - radius;
if (iy < 0) iy = -iy;
else if (iy >= h) iy = h+h-iy-1;
 
for (dx = 0; dx < radius*2 + 1; dx++) {
const int ix = x+dx - radius;
UPDATE_FACTOR;
}
}
} else {
for (dy = 0; dy < radius*2+1; dy++) {
int dx;
int iy = y+dy - radius;
if (iy < 0) iy = -iy;
else if (iy >= h) iy = h+h-iy-1;
 
for (dx = 0; dx < radius*2 + 1; dx++) {
int ix = x+dx - radius;
if (ix < 0) ix = -ix;
else if (ix >= w) ix = w+w-ix-1;
UPDATE_FACTOR;
}
}
}
dst[x + y*dst_linesize] = (sum + div/2) / div;
}
}
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
{
SabContext *sab = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *outpic;
 
outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!outpic) {
av_frame_free(&inpic);
return AVERROR(ENOMEM);
}
av_frame_copy_props(outpic, inpic);
 
blur(outpic->data[0], outpic->linesize[0], inpic->data[0], inpic->linesize[0],
inlink->w, inlink->h, &sab->luma);
if (inpic->data[2]) {
int cw = FF_CEIL_RSHIFT(inlink->w, sab->hsub);
int ch = FF_CEIL_RSHIFT(inlink->h, sab->vsub);
blur(outpic->data[1], outpic->linesize[1], inpic->data[1], inpic->linesize[1], cw, ch, &sab->chroma);
blur(outpic->data[2], outpic->linesize[2], inpic->data[2], inpic->linesize[2], cw, ch, &sab->chroma);
}
 
av_frame_free(&inpic);
return ff_filter_frame(outlink, outpic);
}
 
static const AVFilterPad sab_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_props,
},
{ NULL }
};
 
static const AVFilterPad sab_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_sab = {
.name = "sab",
.description = NULL_IF_CONFIG_SMALL("Apply shape adaptive blur."),
.priv_size = sizeof(SabContext),
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = sab_inputs,
.outputs = sab_outputs,
.priv_class = &sab_class,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_scale.c
0,0 → 1,580
/*
* Copyright (c) 2007 Bobby Bingham
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* scale video filter
*/
 
#include <stdio.h>
#include <string.h>
 
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
#include "libavutil/avstring.h"
#include "libavutil/eval.h"
#include "libavutil/internal.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
#include "libavutil/imgutils.h"
#include "libavutil/avassert.h"
#include "libswscale/swscale.h"
 
static const char *const var_names[] = {
"in_w", "iw",
"in_h", "ih",
"out_w", "ow",
"out_h", "oh",
"a",
"sar",
"dar",
"hsub",
"vsub",
NULL
};
 
enum var_name {
VAR_IN_W, VAR_IW,
VAR_IN_H, VAR_IH,
VAR_OUT_W, VAR_OW,
VAR_OUT_H, VAR_OH,
VAR_A,
VAR_SAR,
VAR_DAR,
VAR_HSUB,
VAR_VSUB,
VARS_NB
};
 
typedef struct {
const AVClass *class;
struct SwsContext *sws; ///< software scaler context
struct SwsContext *isws[2]; ///< software scaler context for interlaced material
AVDictionary *opts;
 
/**
* New dimensions. Special values are:
* 0 = original width/height
* -1 = keep original aspect
*/
int w, h;
char *size_str;
unsigned int flags; ///sws flags
 
int hsub, vsub; ///< chroma subsampling
int slice_y; ///< top of current output slice
int input_is_pal; ///< set to 1 if the input format is paletted
int output_is_pal; ///< set to 1 if the output format is paletted
int interlaced;
 
char *w_expr; ///< width expression string
char *h_expr; ///< height expression string
char *flags_str;
 
char *in_color_matrix;
char *out_color_matrix;
 
int in_range;
int out_range;
 
int out_h_chr_pos;
int out_v_chr_pos;
int in_h_chr_pos;
int in_v_chr_pos;
 
int force_original_aspect_ratio;
} ScaleContext;
 
static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts)
{
ScaleContext *scale = ctx->priv;
int ret;
 
if (scale->size_str && (scale->w_expr || scale->h_expr)) {
av_log(ctx, AV_LOG_ERROR,
"Size and width/height expressions cannot be set at the same time.\n");
return AVERROR(EINVAL);
}
 
if (scale->w_expr && !scale->h_expr)
FFSWAP(char *, scale->w_expr, scale->size_str);
 
if (scale->size_str) {
char buf[32];
if ((ret = av_parse_video_size(&scale->w, &scale->h, scale->size_str)) < 0) {
av_log(ctx, AV_LOG_ERROR,
"Invalid size '%s'\n", scale->size_str);
return ret;
}
snprintf(buf, sizeof(buf)-1, "%d", scale->w);
av_opt_set(scale, "w", buf, 0);
snprintf(buf, sizeof(buf)-1, "%d", scale->h);
av_opt_set(scale, "h", buf, 0);
}
if (!scale->w_expr)
av_opt_set(scale, "w", "iw", 0);
if (!scale->h_expr)
av_opt_set(scale, "h", "ih", 0);
 
av_log(ctx, AV_LOG_VERBOSE, "w:%s h:%s flags:'%s' interl:%d\n",
scale->w_expr, scale->h_expr, (char *)av_x_if_null(scale->flags_str, ""), scale->interlaced);
 
scale->flags = 0;
 
if (scale->flags_str) {
const AVClass *class = sws_get_class();
const AVOption *o = av_opt_find(&class, "sws_flags", NULL, 0,
AV_OPT_SEARCH_FAKE_OBJ);
int ret = av_opt_eval_flags(&class, o, scale->flags_str, &scale->flags);
if (ret < 0)
return ret;
}
scale->opts = *opts;
*opts = NULL;
 
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
ScaleContext *scale = ctx->priv;
sws_freeContext(scale->sws);
sws_freeContext(scale->isws[0]);
sws_freeContext(scale->isws[1]);
scale->sws = NULL;
av_dict_free(&scale->opts);
}
 
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats;
enum AVPixelFormat pix_fmt;
int ret;
 
if (ctx->inputs[0]) {
formats = NULL;
for (pix_fmt = 0; pix_fmt < AV_PIX_FMT_NB; pix_fmt++)
if ((sws_isSupportedInput(pix_fmt) ||
sws_isSupportedEndiannessConversion(pix_fmt))
&& (ret = ff_add_format(&formats, pix_fmt)) < 0) {
ff_formats_unref(&formats);
return ret;
}
ff_formats_ref(formats, &ctx->inputs[0]->out_formats);
}
if (ctx->outputs[0]) {
formats = NULL;
for (pix_fmt = 0; pix_fmt < AV_PIX_FMT_NB; pix_fmt++)
if ((sws_isSupportedOutput(pix_fmt) || pix_fmt == AV_PIX_FMT_PAL8 ||
sws_isSupportedEndiannessConversion(pix_fmt))
&& (ret = ff_add_format(&formats, pix_fmt)) < 0) {
ff_formats_unref(&formats);
return ret;
}
ff_formats_ref(formats, &ctx->outputs[0]->in_formats);
}
 
return 0;
}
 
static const int *parse_yuv_type(const char *s, enum AVColorSpace colorspace)
{
if (!s)
s = "bt601";
 
if (s && strstr(s, "bt709")) {
colorspace = AVCOL_SPC_BT709;
} else if (s && strstr(s, "fcc")) {
colorspace = AVCOL_SPC_FCC;
} else if (s && strstr(s, "smpte240m")) {
colorspace = AVCOL_SPC_SMPTE240M;
} else if (s && (strstr(s, "bt601") || strstr(s, "bt470") || strstr(s, "smpte170m"))) {
colorspace = AVCOL_SPC_BT470BG;
}
 
if (colorspace < 1 || colorspace > 7) {
colorspace = AVCOL_SPC_BT470BG;
}
 
return sws_getCoefficients(colorspace);
}
 
static int config_props(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AVFilterLink *inlink = outlink->src->inputs[0];
enum AVPixelFormat outfmt = outlink->format;
ScaleContext *scale = ctx->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
int64_t w, h;
double var_values[VARS_NB], res;
char *expr;
int ret;
 
var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w;
var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h;
var_values[VAR_OUT_W] = var_values[VAR_OW] = NAN;
var_values[VAR_OUT_H] = var_values[VAR_OH] = NAN;
var_values[VAR_A] = (double) inlink->w / inlink->h;
var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
(double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR];
var_values[VAR_HSUB] = 1 << desc->log2_chroma_w;
var_values[VAR_VSUB] = 1 << desc->log2_chroma_h;
 
/* evaluate width and height */
av_expr_parse_and_eval(&res, (expr = scale->w_expr),
var_names, var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx);
scale->w = var_values[VAR_OUT_W] = var_values[VAR_OW] = res;
if ((ret = av_expr_parse_and_eval(&res, (expr = scale->h_expr),
var_names, var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
goto fail;
scale->h = var_values[VAR_OUT_H] = var_values[VAR_OH] = res;
/* evaluate again the width, as it may depend on the output height */
if ((ret = av_expr_parse_and_eval(&res, (expr = scale->w_expr),
var_names, var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
goto fail;
scale->w = res;
 
w = scale->w;
h = scale->h;
 
/* sanity check params */
if (w < -1 || h < -1) {
av_log(ctx, AV_LOG_ERROR, "Size values less than -1 are not acceptable.\n");
return AVERROR(EINVAL);
}
if (w == -1 && h == -1)
scale->w = scale->h = 0;
 
if (!(w = scale->w))
w = inlink->w;
if (!(h = scale->h))
h = inlink->h;
if (w == -1)
w = av_rescale(h, inlink->w, inlink->h);
if (h == -1)
h = av_rescale(w, inlink->h, inlink->w);
 
if (scale->force_original_aspect_ratio) {
int tmp_w = av_rescale(h, inlink->w, inlink->h);
int tmp_h = av_rescale(w, inlink->h, inlink->w);
 
if (scale->force_original_aspect_ratio == 1) {
w = FFMIN(tmp_w, w);
h = FFMIN(tmp_h, h);
} else {
w = FFMAX(tmp_w, w);
h = FFMAX(tmp_h, h);
}
}
 
if (w > INT_MAX || h > INT_MAX ||
(h * inlink->w) > INT_MAX ||
(w * inlink->h) > INT_MAX)
av_log(ctx, AV_LOG_ERROR, "Rescaled value for width or height is too big.\n");
 
outlink->w = w;
outlink->h = h;
 
/* TODO: make algorithm configurable */
 
scale->input_is_pal = desc->flags & AV_PIX_FMT_FLAG_PAL ||
desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL;
if (outfmt == AV_PIX_FMT_PAL8) outfmt = AV_PIX_FMT_BGR8;
scale->output_is_pal = av_pix_fmt_desc_get(outfmt)->flags & AV_PIX_FMT_FLAG_PAL ||
av_pix_fmt_desc_get(outfmt)->flags & AV_PIX_FMT_FLAG_PSEUDOPAL;
 
if (scale->sws)
sws_freeContext(scale->sws);
if (scale->isws[0])
sws_freeContext(scale->isws[0]);
if (scale->isws[1])
sws_freeContext(scale->isws[1]);
scale->isws[0] = scale->isws[1] = scale->sws = NULL;
if (inlink->w == outlink->w && inlink->h == outlink->h &&
inlink->format == outlink->format)
;
else {
struct SwsContext **swscs[3] = {&scale->sws, &scale->isws[0], &scale->isws[1]};
int i;
 
for (i = 0; i < 3; i++) {
struct SwsContext **s = swscs[i];
*s = sws_alloc_context();
if (!*s)
return AVERROR(ENOMEM);
 
if (scale->opts) {
AVDictionaryEntry *e = NULL;
 
while ((e = av_dict_get(scale->opts, "", e, AV_DICT_IGNORE_SUFFIX))) {
if ((ret = av_opt_set(*s, e->key, e->value, 0)) < 0)
return ret;
}
}
 
av_opt_set_int(*s, "srcw", inlink ->w, 0);
av_opt_set_int(*s, "srch", inlink ->h >> !!i, 0);
av_opt_set_int(*s, "src_format", inlink->format, 0);
av_opt_set_int(*s, "dstw", outlink->w, 0);
av_opt_set_int(*s, "dsth", outlink->h >> !!i, 0);
av_opt_set_int(*s, "dst_format", outfmt, 0);
av_opt_set_int(*s, "sws_flags", scale->flags, 0);
 
av_opt_set_int(*s, "src_h_chr_pos", scale->in_h_chr_pos, 0);
av_opt_set_int(*s, "src_v_chr_pos", scale->in_v_chr_pos, 0);
av_opt_set_int(*s, "dst_h_chr_pos", scale->out_h_chr_pos, 0);
av_opt_set_int(*s, "dst_v_chr_pos", scale->out_v_chr_pos, 0);
 
if ((ret = sws_init_context(*s, NULL, NULL)) < 0)
return ret;
if (!scale->interlaced)
break;
}
}
 
if (inlink->sample_aspect_ratio.num){
outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h * inlink->w, outlink->w * inlink->h}, inlink->sample_aspect_ratio);
} else
outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
 
av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s sar:%d/%d -> w:%d h:%d fmt:%s sar:%d/%d flags:0x%0x\n",
inlink ->w, inlink ->h, av_get_pix_fmt_name( inlink->format),
inlink->sample_aspect_ratio.num, inlink->sample_aspect_ratio.den,
outlink->w, outlink->h, av_get_pix_fmt_name(outlink->format),
outlink->sample_aspect_ratio.num, outlink->sample_aspect_ratio.den,
scale->flags);
return 0;
 
fail:
av_log(NULL, AV_LOG_ERROR,
"Error when evaluating the expression '%s'.\n"
"Maybe the expression for out_w:'%s' or for out_h:'%s' is self-referencing.\n",
expr, scale->w_expr, scale->h_expr);
return ret;
}
 
static int scale_slice(AVFilterLink *link, AVFrame *out_buf, AVFrame *cur_pic, struct SwsContext *sws, int y, int h, int mul, int field)
{
ScaleContext *scale = link->dst->priv;
const uint8_t *in[4];
uint8_t *out[4];
int in_stride[4],out_stride[4];
int i;
 
for(i=0; i<4; i++){
int vsub= ((i+1)&2) ? scale->vsub : 0;
in_stride[i] = cur_pic->linesize[i] * mul;
out_stride[i] = out_buf->linesize[i] * mul;
in[i] = cur_pic->data[i] + ((y>>vsub)+field) * cur_pic->linesize[i];
out[i] = out_buf->data[i] + field * out_buf->linesize[i];
}
if(scale->input_is_pal)
in[1] = cur_pic->data[1];
if(scale->output_is_pal)
out[1] = out_buf->data[1];
 
return sws_scale(sws, in, in_stride, y/mul, h,
out,out_stride);
}
 
static int filter_frame(AVFilterLink *link, AVFrame *in)
{
ScaleContext *scale = link->dst->priv;
AVFilterLink *outlink = link->dst->outputs[0];
AVFrame *out;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
char buf[32];
int in_range;
 
if( in->width != link->w
|| in->height != link->h
|| in->format != link->format) {
int ret;
snprintf(buf, sizeof(buf)-1, "%d", outlink->w);
av_opt_set(scale, "w", buf, 0);
snprintf(buf, sizeof(buf)-1, "%d", outlink->h);
av_opt_set(scale, "h", buf, 0);
 
link->dst->inputs[0]->format = in->format;
link->dst->inputs[0]->w = in->width;
link->dst->inputs[0]->h = in->height;
 
if ((ret = config_props(outlink)) < 0)
return ret;
}
 
if (!scale->sws)
return ff_filter_frame(outlink, in);
 
scale->hsub = desc->log2_chroma_w;
scale->vsub = desc->log2_chroma_h;
 
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
 
av_frame_copy_props(out, in);
out->width = outlink->w;
out->height = outlink->h;
 
if(scale->output_is_pal)
avpriv_set_systematic_pal2((uint32_t*)out->data[1], outlink->format == AV_PIX_FMT_PAL8 ? AV_PIX_FMT_BGR8 : outlink->format);
 
in_range = av_frame_get_color_range(in);
 
if ( scale->in_color_matrix
|| scale->out_color_matrix
|| scale-> in_range != AVCOL_RANGE_UNSPECIFIED
|| in_range != AVCOL_RANGE_UNSPECIFIED
|| scale->out_range != AVCOL_RANGE_UNSPECIFIED) {
int in_full, out_full, brightness, contrast, saturation;
const int *inv_table, *table;
 
sws_getColorspaceDetails(scale->sws, (int **)&inv_table, &in_full,
(int **)&table, &out_full,
&brightness, &contrast, &saturation);
 
if (scale->in_color_matrix)
inv_table = parse_yuv_type(scale->in_color_matrix, av_frame_get_colorspace(in));
if (scale->out_color_matrix)
table = parse_yuv_type(scale->out_color_matrix, AVCOL_SPC_UNSPECIFIED);
 
if (scale-> in_range != AVCOL_RANGE_UNSPECIFIED)
in_full = (scale-> in_range == AVCOL_RANGE_JPEG);
else if (in_range != AVCOL_RANGE_UNSPECIFIED)
in_full = (in_range == AVCOL_RANGE_JPEG);
if (scale->out_range != AVCOL_RANGE_UNSPECIFIED)
out_full = (scale->out_range == AVCOL_RANGE_JPEG);
 
sws_setColorspaceDetails(scale->sws, inv_table, in_full,
table, out_full,
brightness, contrast, saturation);
if (scale->isws[0])
sws_setColorspaceDetails(scale->isws[0], inv_table, in_full,
table, out_full,
brightness, contrast, saturation);
if (scale->isws[1])
sws_setColorspaceDetails(scale->isws[1], inv_table, in_full,
table, out_full,
brightness, contrast, saturation);
}
 
av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den,
(int64_t)in->sample_aspect_ratio.num * outlink->h * link->w,
(int64_t)in->sample_aspect_ratio.den * outlink->w * link->h,
INT_MAX);
 
if(scale->interlaced>0 || (scale->interlaced<0 && in->interlaced_frame)){
scale_slice(link, out, in, scale->isws[0], 0, (link->h+1)/2, 2, 0);
scale_slice(link, out, in, scale->isws[1], 0, link->h /2, 2, 1);
}else{
scale_slice(link, out, in, scale->sws, 0, link->h, 1, 0);
}
 
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
 
static const AVClass *child_class_next(const AVClass *prev)
{
return prev ? NULL : sws_get_class();
}
 
#define OFFSET(x) offsetof(ScaleContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption scale_options[] = {
{ "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "width", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "height","Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "flags", "Flags to pass to libswscale", OFFSET(flags_str), AV_OPT_TYPE_STRING, { .str = "bilinear" }, .flags = FLAGS },
{ "interl", "set interlacing", OFFSET(interlaced), AV_OPT_TYPE_INT, {.i64 = 0 }, -1, 1, FLAGS },
{ "size", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS },
{ "s", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS },
{ "in_color_matrix", "set input YCbCr type", OFFSET(in_color_matrix), AV_OPT_TYPE_STRING, { .str = "auto" }, .flags = FLAGS },
{ "out_color_matrix", "set output YCbCr type", OFFSET(out_color_matrix), AV_OPT_TYPE_STRING, { .str = NULL }, .flags = FLAGS },
{ "in_range", "set input color range", OFFSET( in_range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 2, FLAGS, "range" },
{ "out_range", "set output color range", OFFSET(out_range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 2, FLAGS, "range" },
{ "auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 0, FLAGS, "range" },
{ "full", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
{ "jpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
{ "mpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
{ "tv", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
{ "pc", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
{ "in_v_chr_pos", "input vertical chroma position in luma grid/256" , OFFSET(in_v_chr_pos), AV_OPT_TYPE_INT, { .i64 = -1}, -1, 512, FLAGS },
{ "in_h_chr_pos", "input horizontal chroma position in luma grid/256", OFFSET(in_h_chr_pos), AV_OPT_TYPE_INT, { .i64 = -1}, -1, 512, FLAGS },
{ "out_v_chr_pos", "output vertical chroma position in luma grid/256" , OFFSET(out_v_chr_pos), AV_OPT_TYPE_INT, { .i64 = -1}, -1, 512, FLAGS },
{ "out_h_chr_pos", "output horizontal chroma position in luma grid/256", OFFSET(out_h_chr_pos), AV_OPT_TYPE_INT, { .i64 = -1}, -1, 512, FLAGS },
{ "force_original_aspect_ratio", "decrease or increase w/h if necessary to keep the original AR", OFFSET(force_original_aspect_ratio), AV_OPT_TYPE_INT, { .i64 = 0}, 0, 2, FLAGS, "force_oar" },
{ "disable", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, FLAGS, "force_oar" },
{ "decrease", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, FLAGS, "force_oar" },
{ "increase", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 2 }, 0, 0, FLAGS, "force_oar" },
{ NULL }
};
 
static const AVClass scale_class = {
.class_name = "scale",
.item_name = av_default_item_name,
.option = scale_options,
.version = LIBAVUTIL_VERSION_INT,
.child_class_next = child_class_next,
};
 
static const AVFilterPad avfilter_vf_scale_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_scale_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_props,
},
{ NULL }
};
 
AVFilter avfilter_vf_scale = {
.name = "scale",
.description = NULL_IF_CONFIG_SMALL("Scale the input video size and/or convert the image format."),
.init_dict = init_dict,
.uninit = uninit,
.query_formats = query_formats,
.priv_size = sizeof(ScaleContext),
.priv_class = &scale_class,
.inputs = avfilter_vf_scale_inputs,
.outputs = avfilter_vf_scale_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_separatefields.c
0,0 → 1,146
/*
* Copyright (c) 2013 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "internal.h"
 
typedef struct {
int nb_planes;
AVFrame *second;
} SeparateFieldsContext;
 
static int config_props_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
SeparateFieldsContext *sf = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
 
sf->nb_planes = av_pix_fmt_count_planes(inlink->format);
 
if (inlink->h & 1) {
av_log(ctx, AV_LOG_ERROR, "height must be even\n");
return AVERROR_INVALIDDATA;
}
 
outlink->time_base.num = inlink->time_base.num;
outlink->time_base.den = inlink->time_base.den * 2;
outlink->frame_rate.num = inlink->frame_rate.num * 2;
outlink->frame_rate.den = inlink->frame_rate.den;
outlink->w = inlink->w;
outlink->h = inlink->h / 2;
 
return 0;
}
 
static void extract_field(AVFrame *frame, int nb_planes, int type)
{
int i;
 
for (i = 0; i < nb_planes; i++) {
if (type)
frame->data[i] = frame->data[i] + frame->linesize[i];
frame->linesize[i] *= 2;
}
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
{
AVFilterContext *ctx = inlink->dst;
SeparateFieldsContext *sf = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
int ret;
 
inpicref->height = outlink->h;
inpicref->interlaced_frame = 0;
 
if (!sf->second) {
goto clone;
} else {
AVFrame *second = sf->second;
 
extract_field(second, sf->nb_planes, second->top_field_first);
 
if (second->pts != AV_NOPTS_VALUE &&
inpicref->pts != AV_NOPTS_VALUE)
second->pts += inpicref->pts;
else
second->pts = AV_NOPTS_VALUE;
 
ret = ff_filter_frame(outlink, second);
if (ret < 0)
return ret;
clone:
sf->second = av_frame_clone(inpicref);
if (!sf->second)
return AVERROR(ENOMEM);
}
 
extract_field(inpicref, sf->nb_planes, !inpicref->top_field_first);
 
if (inpicref->pts != AV_NOPTS_VALUE)
inpicref->pts *= 2;
 
return ff_filter_frame(outlink, inpicref);
}
 
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
SeparateFieldsContext *sf = ctx->priv;
int ret;
 
ret = ff_request_frame(ctx->inputs[0]);
if (ret == AVERROR_EOF && sf->second) {
sf->second->pts *= 2;
extract_field(sf->second, sf->nb_planes, sf->second->top_field_first);
ret = ff_filter_frame(outlink, sf->second);
sf->second = 0;
}
 
return ret;
}
 
static const AVFilterPad separatefields_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad separatefields_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_props_output,
.request_frame = request_frame,
},
{ NULL }
};
 
AVFilter avfilter_vf_separatefields = {
.name = "separatefields",
.description = NULL_IF_CONFIG_SMALL("Split input video frames into fields."),
.priv_size = sizeof(SeparateFieldsContext),
.inputs = separatefields_inputs,
.outputs = separatefields_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_setfield.c
0,0 → 1,94
/*
* Copyright (c) 2012 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* set field order
*/
 
#include "libavutil/opt.h"
#include "avfilter.h"
#include "internal.h"
#include "video.h"
 
enum SetFieldMode {
MODE_AUTO = -1,
MODE_BFF,
MODE_TFF,
MODE_PROG,
};
 
typedef struct {
const AVClass *class;
enum SetFieldMode mode;
} SetFieldContext;
 
#define OFFSET(x) offsetof(SetFieldContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
 
static const AVOption setfield_options[] = {
{"mode", "select interlace mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_AUTO}, -1, MODE_PROG, FLAGS, "mode"},
{"auto", "keep the same input field", 0, AV_OPT_TYPE_CONST, {.i64=MODE_AUTO}, INT_MIN, INT_MAX, FLAGS, "mode"},
{"bff", "mark as bottom-field-first", 0, AV_OPT_TYPE_CONST, {.i64=MODE_BFF}, INT_MIN, INT_MAX, FLAGS, "mode"},
{"tff", "mark as top-field-first", 0, AV_OPT_TYPE_CONST, {.i64=MODE_TFF}, INT_MIN, INT_MAX, FLAGS, "mode"},
{"prog", "mark as progressive", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PROG}, INT_MIN, INT_MAX, FLAGS, "mode"},
{NULL}
};
 
AVFILTER_DEFINE_CLASS(setfield);
 
static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
{
SetFieldContext *setfield = inlink->dst->priv;
 
if (setfield->mode == MODE_PROG) {
picref->interlaced_frame = 0;
} else if (setfield->mode != MODE_AUTO) {
picref->interlaced_frame = 1;
picref->top_field_first = setfield->mode;
}
return ff_filter_frame(inlink->dst->outputs[0], picref);
}
 
static const AVFilterPad setfield_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad setfield_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_setfield = {
.name = "setfield",
.description = NULL_IF_CONFIG_SMALL("Force field for the output video frame."),
.priv_size = sizeof(SetFieldContext),
.priv_class = &setfield_class,
.inputs = setfield_inputs,
.outputs = setfield_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_showinfo.c
0,0 → 1,100
/*
* Copyright (c) 2011 Stefano Sabatini
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* filter for showing textual video frame information
*/
 
#include "libavutil/adler32.h"
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "libavutil/pixdesc.h"
#include "libavutil/timestamp.h"
#include "avfilter.h"
#include "internal.h"
#include "video.h"
 
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
uint32_t plane_checksum[4] = {0}, checksum = 0;
int i, plane, vsub = desc->log2_chroma_h;
 
for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++) {
int64_t linesize = av_image_get_linesize(frame->format, frame->width, plane);
uint8_t *data = frame->data[plane];
int h = plane == 1 || plane == 2 ? FF_CEIL_RSHIFT(inlink->h, vsub) : inlink->h;
 
if (linesize < 0)
return linesize;
 
for (i = 0; i < h; i++) {
plane_checksum[plane] = av_adler32_update(plane_checksum[plane], data, linesize);
checksum = av_adler32_update(checksum, data, linesize);
data += frame->linesize[plane];
}
}
 
av_log(ctx, AV_LOG_INFO,
"n:%"PRId64" pts:%s pts_time:%s pos:%"PRId64" "
"fmt:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c "
"checksum:%08X plane_checksum:[%08X",
inlink->frame_count,
av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base), av_frame_get_pkt_pos(frame),
desc->name,
frame->sample_aspect_ratio.num, frame->sample_aspect_ratio.den,
frame->width, frame->height,
!frame->interlaced_frame ? 'P' : /* Progressive */
frame->top_field_first ? 'T' : 'B', /* Top / Bottom */
frame->key_frame,
av_get_picture_type_char(frame->pict_type),
checksum, plane_checksum[0]);
 
for (plane = 1; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++)
av_log(ctx, AV_LOG_INFO, " %08X", plane_checksum[plane]);
av_log(ctx, AV_LOG_INFO, "]\n");
 
return ff_filter_frame(inlink->dst->outputs[0], frame);
}
 
static const AVFilterPad avfilter_vf_showinfo_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_showinfo_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO
},
{ NULL }
};
 
AVFilter avfilter_vf_showinfo = {
.name = "showinfo",
.description = NULL_IF_CONFIG_SMALL("Show textual information for each video frame."),
.inputs = avfilter_vf_showinfo_inputs,
.outputs = avfilter_vf_showinfo_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_smartblur.c
0,0 → 1,304
/*
* Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
* Copyright (c) 2012 Jeremy Tran
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
/**
* @file
* Apply a smartblur filter to the input video
* Ported from MPlayer libmpcodecs/vf_smartblur.c by Michael Niedermayer.
*/
 
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libswscale/swscale.h"
 
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
 
#define RADIUS_MIN 0.1
#define RADIUS_MAX 5.0
 
#define STRENGTH_MIN -1.0
#define STRENGTH_MAX 1.0
 
#define THRESHOLD_MIN -30
#define THRESHOLD_MAX 30
 
typedef struct {
float radius;
float strength;
int threshold;
float quality;
struct SwsContext *filter_context;
} FilterParam;
 
typedef struct {
const AVClass *class;
FilterParam luma;
FilterParam chroma;
int hsub;
int vsub;
unsigned int sws_flags;
} SmartblurContext;
 
#define OFFSET(x) offsetof(SmartblurContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
 
static const AVOption smartblur_options[] = {
{ "luma_radius", "set luma radius", OFFSET(luma.radius), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, RADIUS_MIN, RADIUS_MAX, .flags=FLAGS },
{ "lr" , "set luma radius", OFFSET(luma.radius), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, RADIUS_MIN, RADIUS_MAX, .flags=FLAGS },
{ "luma_strength", "set luma strength", OFFSET(luma.strength), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, STRENGTH_MIN, STRENGTH_MAX, .flags=FLAGS },
{ "ls", "set luma strength", OFFSET(luma.strength), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, STRENGTH_MIN, STRENGTH_MAX, .flags=FLAGS },
{ "luma_threshold", "set luma threshold", OFFSET(luma.threshold), AV_OPT_TYPE_INT, {.i64=0}, THRESHOLD_MIN, THRESHOLD_MAX, .flags=FLAGS },
{ "lt", "set luma threshold", OFFSET(luma.threshold), AV_OPT_TYPE_INT, {.i64=0}, THRESHOLD_MIN, THRESHOLD_MAX, .flags=FLAGS },
 
{ "chroma_radius", "set chroma radius", OFFSET(chroma.radius), AV_OPT_TYPE_FLOAT, {.dbl=RADIUS_MIN-1}, RADIUS_MIN-1, RADIUS_MAX, .flags=FLAGS },
{ "cr", "set chroma radius", OFFSET(chroma.radius), AV_OPT_TYPE_FLOAT, {.dbl=RADIUS_MIN-1}, RADIUS_MIN-1, RADIUS_MAX, .flags=FLAGS },
{ "chroma_strength", "set chroma strength", OFFSET(chroma.strength), AV_OPT_TYPE_FLOAT, {.dbl=STRENGTH_MIN-1}, STRENGTH_MIN-1, STRENGTH_MAX, .flags=FLAGS },
{ "cs", "set chroma strength", OFFSET(chroma.strength), AV_OPT_TYPE_FLOAT, {.dbl=STRENGTH_MIN-1}, STRENGTH_MIN-1, STRENGTH_MAX, .flags=FLAGS },
{ "chroma_threshold", "set chroma threshold", OFFSET(chroma.threshold), AV_OPT_TYPE_INT, {.i64=THRESHOLD_MIN-1}, THRESHOLD_MIN-1, THRESHOLD_MAX, .flags=FLAGS },
{ "ct", "set chroma threshold", OFFSET(chroma.threshold), AV_OPT_TYPE_INT, {.i64=THRESHOLD_MIN-1}, THRESHOLD_MIN-1, THRESHOLD_MAX, .flags=FLAGS },
 
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(smartblur);
 
static av_cold int init(AVFilterContext *ctx)
{
SmartblurContext *sblur = ctx->priv;
 
/* make chroma default to luma values, if not explicitly set */
if (sblur->chroma.radius < RADIUS_MIN)
sblur->chroma.radius = sblur->luma.radius;
if (sblur->chroma.strength < STRENGTH_MIN)
sblur->chroma.strength = sblur->luma.strength;
if (sblur->chroma.threshold < THRESHOLD_MIN)
sblur->chroma.threshold = sblur->luma.threshold;
 
sblur->luma.quality = sblur->chroma.quality = 3.0;
sblur->sws_flags = SWS_BICUBIC;
 
av_log(ctx, AV_LOG_VERBOSE,
"luma_radius:%f luma_strength:%f luma_threshold:%d "
"chroma_radius:%f chroma_strength:%f chroma_threshold:%d\n",
sblur->luma.radius, sblur->luma.strength, sblur->luma.threshold,
sblur->chroma.radius, sblur->chroma.strength, sblur->chroma.threshold);
 
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
SmartblurContext *sblur = ctx->priv;
 
sws_freeContext(sblur->luma.filter_context);
sws_freeContext(sblur->chroma.filter_context);
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
AV_PIX_FMT_GRAY8,
AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
 
return 0;
}
 
static int alloc_sws_context(FilterParam *f, int width, int height, unsigned int flags)
{
SwsVector *vec;
SwsFilter sws_filter;
 
vec = sws_getGaussianVec(f->radius, f->quality);
 
if (!vec)
return AVERROR(EINVAL);
 
sws_scaleVec(vec, f->strength);
vec->coeff[vec->length / 2] += 1.0 - f->strength;
sws_filter.lumH = sws_filter.lumV = vec;
sws_filter.chrH = sws_filter.chrV = NULL;
f->filter_context = sws_getCachedContext(NULL,
width, height, AV_PIX_FMT_GRAY8,
width, height, AV_PIX_FMT_GRAY8,
flags, &sws_filter, NULL, NULL);
 
sws_freeVec(vec);
 
if (!f->filter_context)
return AVERROR(EINVAL);
 
return 0;
}
 
static int config_props(AVFilterLink *inlink)
{
SmartblurContext *sblur = inlink->dst->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
 
sblur->hsub = desc->log2_chroma_w;
sblur->vsub = desc->log2_chroma_h;
 
alloc_sws_context(&sblur->luma, inlink->w, inlink->h, sblur->sws_flags);
alloc_sws_context(&sblur->chroma,
FF_CEIL_RSHIFT(inlink->w, sblur->hsub),
FF_CEIL_RSHIFT(inlink->h, sblur->vsub),
sblur->sws_flags);
 
return 0;
}
 
static void blur(uint8_t *dst, const int dst_linesize,
const uint8_t *src, const int src_linesize,
const int w, const int h, const int threshold,
struct SwsContext *filter_context)
{
int x, y;
int orig, filtered;
int diff;
/* Declare arrays of 4 to get aligned data */
const uint8_t* const src_array[4] = {src};
uint8_t *dst_array[4] = {dst};
int src_linesize_array[4] = {src_linesize};
int dst_linesize_array[4] = {dst_linesize};
 
sws_scale(filter_context, src_array, src_linesize_array,
0, h, dst_array, dst_linesize_array);
 
if (threshold > 0) {
for (y = 0; y < h; ++y) {
for (x = 0; x < w; ++x) {
orig = src[x + y * src_linesize];
filtered = dst[x + y * dst_linesize];
diff = orig - filtered;
 
if (diff > 0) {
if (diff > 2 * threshold)
dst[x + y * dst_linesize] = orig;
else if (diff > threshold)
/* add 'diff' and substract 'threshold' from 'filtered' */
dst[x + y * dst_linesize] = orig - threshold;
} else {
if (-diff > 2 * threshold)
dst[x + y * dst_linesize] = orig;
else if (-diff > threshold)
/* add 'diff' and 'threshold' to 'filtered' */
dst[x + y * dst_linesize] = orig + threshold;
}
}
}
} else if (threshold < 0) {
for (y = 0; y < h; ++y) {
for (x = 0; x < w; ++x) {
orig = src[x + y * src_linesize];
filtered = dst[x + y * dst_linesize];
diff = orig - filtered;
 
if (diff > 0) {
if (diff <= -threshold)
dst[x + y * dst_linesize] = orig;
else if (diff <= -2 * threshold)
/* substract 'diff' and 'threshold' from 'orig' */
dst[x + y * dst_linesize] = filtered - threshold;
} else {
if (diff >= threshold)
dst[x + y * dst_linesize] = orig;
else if (diff >= 2 * threshold)
/* add 'threshold' and substract 'diff' from 'orig' */
dst[x + y * dst_linesize] = filtered + threshold;
}
}
}
}
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
{
SmartblurContext *sblur = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *outpic;
int cw = FF_CEIL_RSHIFT(inlink->w, sblur->hsub);
int ch = FF_CEIL_RSHIFT(inlink->h, sblur->vsub);
 
outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!outpic) {
av_frame_free(&inpic);
return AVERROR(ENOMEM);
}
av_frame_copy_props(outpic, inpic);
 
blur(outpic->data[0], outpic->linesize[0],
inpic->data[0], inpic->linesize[0],
inlink->w, inlink->h, sblur->luma.threshold,
sblur->luma.filter_context);
 
if (inpic->data[2]) {
blur(outpic->data[1], outpic->linesize[1],
inpic->data[1], inpic->linesize[1],
cw, ch, sblur->chroma.threshold,
sblur->chroma.filter_context);
blur(outpic->data[2], outpic->linesize[2],
inpic->data[2], inpic->linesize[2],
cw, ch, sblur->chroma.threshold,
sblur->chroma.filter_context);
}
 
av_frame_free(&inpic);
return ff_filter_frame(outlink, outpic);
}
 
static const AVFilterPad smartblur_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_props,
},
{ NULL }
};
 
static const AVFilterPad smartblur_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_smartblur = {
.name = "smartblur",
.description = NULL_IF_CONFIG_SMALL("Blur the input video without impacting the outlines."),
.priv_size = sizeof(SmartblurContext),
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = smartblur_inputs,
.outputs = smartblur_outputs,
.priv_class = &smartblur_class,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_spp.c
0,0 → 1,437
/*
* Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
* Copyright (c) 2013 Clément Bœsch <u pkh me>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
/**
* @file
* Simple post processing filter
*
* This implementation is based on an algorithm described in
* "Aria Nosratinia Embedded Post-Processing for
* Enhancement of Compressed Images (1999)"
*
* Originally written by Michael Niedermayer for the MPlayer project, and
* ported by Clément Bœsch for FFmpeg.
*/
 
#include "libavcodec/dsputil.h"
#include "libavutil/avassert.h"
#include "libavutil/imgutils.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "internal.h"
#include "vf_spp.h"
 
enum mode {
MODE_HARD,
MODE_SOFT,
NB_MODES
};
 
#define OFFSET(x) offsetof(SPPContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
static const AVOption spp_options[] = {
{ "quality", "set quality", OFFSET(log2_count), AV_OPT_TYPE_INT, {.i64 = 3}, 0, MAX_LEVEL, FLAGS },
{ "qp", "force a constant quantizer parameter", OFFSET(qp), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 63, FLAGS },
{ "mode", "set thresholding mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64 = MODE_HARD}, 0, NB_MODES - 1, FLAGS, "mode" },
{ "hard", "hard thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_HARD}, INT_MIN, INT_MAX, FLAGS, "mode" },
{ "soft", "soft thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_SOFT}, INT_MIN, INT_MAX, FLAGS, "mode" },
{ "use_bframe_qp", "use B-frames' QP", OFFSET(use_bframe_qp), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(spp);
 
// XXX: share between filters?
DECLARE_ALIGNED(8, static const uint8_t, ldither)[8][8] = {
{ 0, 48, 12, 60, 3, 51, 15, 63 },
{ 32, 16, 44, 28, 35, 19, 47, 31 },
{ 8, 56, 4, 52, 11, 59, 7, 55 },
{ 40, 24, 36, 20, 43, 27, 39, 23 },
{ 2, 50, 14, 62, 1, 49, 13, 61 },
{ 34, 18, 46, 30, 33, 17, 45, 29 },
{ 10, 58, 6, 54, 9, 57, 5, 53 },
{ 42, 26, 38, 22, 41, 25, 37, 21 },
};
 
static const uint8_t offset[127][2] = {
{0,0},
{0,0}, {4,4}, // quality = 1
{0,0}, {2,2}, {6,4}, {4,6}, // quality = 2
{0,0}, {5,1}, {2,2}, {7,3}, {4,4}, {1,5}, {6,6}, {3,7}, // quality = 3
 
{0,0}, {4,0}, {1,1}, {5,1}, {3,2}, {7,2}, {2,3}, {6,3}, // quality = 4
{0,4}, {4,4}, {1,5}, {5,5}, {3,6}, {7,6}, {2,7}, {6,7},
 
{0,0}, {0,2}, {0,4}, {0,6}, {1,1}, {1,3}, {1,5}, {1,7}, // quality = 5
{2,0}, {2,2}, {2,4}, {2,6}, {3,1}, {3,3}, {3,5}, {3,7},
{4,0}, {4,2}, {4,4}, {4,6}, {5,1}, {5,3}, {5,5}, {5,7},
{6,0}, {6,2}, {6,4}, {6,6}, {7,1}, {7,3}, {7,5}, {7,7},
 
{0,0}, {4,4}, {0,4}, {4,0}, {2,2}, {6,6}, {2,6}, {6,2}, // quality = 6
{0,2}, {4,6}, {0,6}, {4,2}, {2,0}, {6,4}, {2,4}, {6,0},
{1,1}, {5,5}, {1,5}, {5,1}, {3,3}, {7,7}, {3,7}, {7,3},
{1,3}, {5,7}, {1,7}, {5,3}, {3,1}, {7,5}, {3,5}, {7,1},
{0,1}, {4,5}, {0,5}, {4,1}, {2,3}, {6,7}, {2,7}, {6,3},
{0,3}, {4,7}, {0,7}, {4,3}, {2,1}, {6,5}, {2,5}, {6,1},
{1,0}, {5,4}, {1,4}, {5,0}, {3,2}, {7,6}, {3,6}, {7,2},
{1,2}, {5,6}, {1,6}, {5,2}, {3,0}, {7,4}, {3,4}, {7,0},
};
 
static void hardthresh_c(int16_t dst[64], const int16_t src[64],
int qp, const uint8_t *permutation)
{
int i;
int bias = 0; // FIXME
 
unsigned threshold1 = qp * ((1<<4) - bias) - 1;
unsigned threshold2 = threshold1 << 1;
 
memset(dst, 0, 64 * sizeof(dst[0]));
dst[0] = (src[0] + 4) >> 3;
 
for (i = 1; i < 64; i++) {
int level = src[i];
if (((unsigned)(level + threshold1)) > threshold2) {
const int j = permutation[i];
dst[j] = (level + 4) >> 3;
}
}
}
 
static void softthresh_c(int16_t dst[64], const int16_t src[64],
int qp, const uint8_t *permutation)
{
int i;
int bias = 0; //FIXME
 
unsigned threshold1 = qp * ((1<<4) - bias) - 1;
unsigned threshold2 = threshold1 << 1;
 
memset(dst, 0, 64 * sizeof(dst[0]));
dst[0] = (src[0] + 4) >> 3;
 
for (i = 1; i < 64; i++) {
int level = src[i];
if (((unsigned)(level + threshold1)) > threshold2) {
const int j = permutation[i];
if (level > 0) dst[j] = (level - threshold1 + 4) >> 3;
else dst[j] = (level + threshold1 + 4) >> 3;
}
}
}
 
static void store_slice_c(uint8_t *dst, const int16_t *src,
int dst_linesize, int src_linesize,
int width, int height, int log2_scale,
const uint8_t dither[8][8])
{
int y, x;
 
#define STORE(pos) do { \
temp = ((src[x + y*src_linesize + pos] << log2_scale) + d[pos]) >> 6; \
if (temp & 0x100) \
temp = ~(temp >> 31); \
dst[x + y*dst_linesize + pos] = temp; \
} while (0)
 
for (y = 0; y < height; y++) {
const uint8_t *d = dither[y];
for (x = 0; x < width; x += 8) {
int temp;
STORE(0);
STORE(1);
STORE(2);
STORE(3);
STORE(4);
STORE(5);
STORE(6);
STORE(7);
}
}
}
 
static inline void add_block(int16_t *dst, int linesize, const int16_t block[64])
{
int y;
 
for (y = 0; y < 8; y++) {
*(uint32_t *)&dst[0 + y*linesize] += *(uint32_t *)&block[0 + y*8];
*(uint32_t *)&dst[2 + y*linesize] += *(uint32_t *)&block[2 + y*8];
*(uint32_t *)&dst[4 + y*linesize] += *(uint32_t *)&block[4 + y*8];
*(uint32_t *)&dst[6 + y*linesize] += *(uint32_t *)&block[6 + y*8];
}
}
 
// XXX: export the function?
static inline int norm_qscale(int qscale, int type)
{
switch (type) {
case FF_QSCALE_TYPE_MPEG1: return qscale;
case FF_QSCALE_TYPE_MPEG2: return qscale >> 1;
case FF_QSCALE_TYPE_H264: return qscale >> 2;
case FF_QSCALE_TYPE_VP56: return (63 - qscale + 2) >> 2;
}
return qscale;
}
 
static void filter(SPPContext *p, uint8_t *dst, uint8_t *src,
int dst_linesize, int src_linesize, int width, int height,
const uint8_t *qp_table, int qp_stride, int is_luma)
{
int x, y, i;
const int count = 1 << p->log2_count;
const int linesize = is_luma ? p->temp_linesize : FFALIGN(width+16, 16);
DECLARE_ALIGNED(16, uint64_t, block_align)[32];
int16_t *block = (int16_t *)block_align;
int16_t *block2 = (int16_t *)(block_align + 16);
 
for (y = 0; y < height; y++) {
int index = 8 + 8*linesize + y*linesize;
memcpy(p->src + index, src + y*src_linesize, width);
for (x = 0; x < 8; x++) {
p->src[index - x - 1] = p->src[index + x ];
p->src[index + width + x ] = p->src[index + width - x - 1];
}
}
for (y = 0; y < 8; y++) {
memcpy(p->src + ( 7-y)*linesize, p->src + ( y+8)*linesize, linesize);
memcpy(p->src + (height+8+y)*linesize, p->src + (height-y+7)*linesize, linesize);
}
 
for (y = 0; y < height + 8; y += 8) {
memset(p->temp + (8 + y) * linesize, 0, 8 * linesize * sizeof(*p->temp));
for (x = 0; x < width + 8; x += 8) {
int qp;
 
if (p->qp) {
qp = p->qp;
} else{
const int qps = 3 + is_luma;
qp = qp_table[(FFMIN(x, width - 1) >> qps) + (FFMIN(y, height - 1) >> qps) * qp_stride];
qp = FFMAX(1, norm_qscale(qp, p->qscale_type));
}
for (i = 0; i < count; i++) {
const int x1 = x + offset[i + count - 1][0];
const int y1 = y + offset[i + count - 1][1];
const int index = x1 + y1*linesize;
p->dsp.get_pixels(block, p->src + index, linesize);
p->dsp.fdct(block);
p->requantize(block2, block, qp, p->dsp.idct_permutation);
p->dsp.idct(block2);
add_block(p->temp + index, linesize, block2);
}
}
if (y)
p->store_slice(dst + (y - 8) * dst_linesize, p->temp + 8 + y*linesize,
dst_linesize, linesize, width,
FFMIN(8, height + 8 - y), MAX_LEVEL - p->log2_count,
ldither);
}
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum PixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P,
AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ440P,
AV_PIX_FMT_NONE
};
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static int config_input(AVFilterLink *inlink)
{
SPPContext *spp = inlink->dst->priv;
const int h = FFALIGN(inlink->h + 16, 16);
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
 
spp->hsub = desc->log2_chroma_w;
spp->vsub = desc->log2_chroma_h;
spp->temp_linesize = FFALIGN(inlink->w + 16, 16);
spp->temp = av_malloc(spp->temp_linesize * h * sizeof(*spp->temp));
spp->src = av_malloc(spp->temp_linesize * h * sizeof(*spp->src));
if (!spp->use_bframe_qp) {
/* we are assuming here the qp blocks will not be smaller that 16x16 */
spp->non_b_qp_alloc_size = FF_CEIL_RSHIFT(inlink->w, 4) * FF_CEIL_RSHIFT(inlink->h, 4);
spp->non_b_qp_table = av_calloc(spp->non_b_qp_alloc_size, sizeof(*spp->non_b_qp_table));
if (!spp->non_b_qp_table)
return AVERROR(ENOMEM);
}
if (!spp->temp || !spp->src)
return AVERROR(ENOMEM);
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
SPPContext *spp = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out = in;
int qp_stride = 0;
const int8_t *qp_table = NULL;
 
/* if we are not in a constant user quantizer mode and we don't want to use
* the quantizers from the B-frames (B-frames often have a higher QP), we
* need to save the qp table from the last non B-frame; this is what the
* following code block does */
if (!spp->qp) {
qp_table = av_frame_get_qp_table(in, &qp_stride, &spp->qscale_type);
 
if (qp_table && !spp->use_bframe_qp && in->pict_type != AV_PICTURE_TYPE_B) {
int w, h;
 
/* if the qp stride is not set, it means the QP are only defined on
* a line basis */
if (!qp_stride) {
w = FF_CEIL_RSHIFT(inlink->w, 4);
h = 1;
} else {
w = FF_CEIL_RSHIFT(qp_stride, 4);
h = FF_CEIL_RSHIFT(inlink->h, 4);
}
av_assert0(w * h <= spp->non_b_qp_alloc_size);
memcpy(spp->non_b_qp_table, qp_table, w * h);
}
}
 
if (spp->log2_count && !ctx->is_disabled) {
if (!spp->use_bframe_qp && spp->non_b_qp_table)
qp_table = spp->non_b_qp_table;
 
if (qp_table || spp->qp) {
const int cw = FF_CEIL_RSHIFT(inlink->w, spp->hsub);
const int ch = FF_CEIL_RSHIFT(inlink->h, spp->vsub);
 
/* get a new frame if in-place is not possible or if the dimensions
* are not multiple of 8 */
if (!av_frame_is_writable(in) || (inlink->w & 7) || (inlink->h & 7)) {
const int aligned_w = FFALIGN(inlink->w, 8);
const int aligned_h = FFALIGN(inlink->h, 8);
 
out = ff_get_video_buffer(outlink, aligned_w, aligned_h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
out->width = in->width;
out->height = in->height;
}
 
filter(spp, out->data[0], in->data[0], out->linesize[0], in->linesize[0], inlink->w, inlink->h, qp_table, qp_stride, 1);
filter(spp, out->data[1], in->data[1], out->linesize[1], in->linesize[1], cw, ch, qp_table, qp_stride, 0);
filter(spp, out->data[2], in->data[2], out->linesize[2], in->linesize[2], cw, ch, qp_table, qp_stride, 0);
emms_c();
}
}
 
if (in != out) {
if (in->data[3])
av_image_copy_plane(out->data[3], out->linesize[3],
in ->data[3], in ->linesize[3],
inlink->w, inlink->h);
av_frame_free(&in);
}
return ff_filter_frame(outlink, out);
}
 
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
char *res, int res_len, int flags)
{
SPPContext *spp = ctx->priv;
 
if (!strcmp(cmd, "level")) {
if (!strcmp(args, "max"))
spp->log2_count = MAX_LEVEL;
else
spp->log2_count = av_clip(strtol(args, NULL, 10), 0, MAX_LEVEL);
return 0;
}
return AVERROR(ENOSYS);
}
 
static av_cold int init(AVFilterContext *ctx)
{
SPPContext *spp = ctx->priv;
 
spp->avctx = avcodec_alloc_context3(NULL);
if (!spp->avctx)
return AVERROR(ENOMEM);
avpriv_dsputil_init(&spp->dsp, spp->avctx);
spp->store_slice = store_slice_c;
switch (spp->mode) {
case MODE_HARD: spp->requantize = hardthresh_c; break;
case MODE_SOFT: spp->requantize = softthresh_c; break;
}
if (ARCH_X86)
ff_spp_init_x86(spp);
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
SPPContext *spp = ctx->priv;
 
av_freep(&spp->temp);
av_freep(&spp->src);
if (spp->avctx) {
avcodec_close(spp->avctx);
av_freep(&spp->avctx);
}
av_freep(&spp->non_b_qp_table);
}
 
static const AVFilterPad spp_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad spp_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_spp = {
.name = "spp",
.description = NULL_IF_CONFIG_SMALL("Apply a simple post processing filter."),
.priv_size = sizeof(SPPContext),
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = spp_inputs,
.outputs = spp_outputs,
.process_command = process_command,
.priv_class = &spp_class,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_spp.h
0,0 → 1,59
/*
* Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
* Copyright (c) 2013 Clément Bœsch
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
#ifndef AVFILTER_SPP_H
#define AVFILTER_SPP_H
 
#include "libavcodec/avcodec.h"
#include "libavcodec/dsputil.h"
#include "avfilter.h"
 
#define MAX_LEVEL 6 /* quality levels */
 
typedef struct {
const AVClass *av_class;
 
int log2_count;
int qp;
int mode;
int qscale_type;
int temp_linesize;
uint8_t *src;
int16_t *temp;
AVCodecContext *avctx;
DSPContext dsp;
int8_t *non_b_qp_table;
int non_b_qp_alloc_size;
int use_bframe_qp;
int hsub, vsub;
 
void (*store_slice)(uint8_t *dst, const int16_t *src,
int dst_stride, int src_stride,
int width, int height, int log2_scale,
const uint8_t dither[8][8]);
 
void (*requantize)(int16_t dst[64], const int16_t src[64],
int qp, const uint8_t *permutation);
} SPPContext;
 
void ff_spp_init_x86(SPPContext *s);
 
#endif /* AVFILTER_SPP_H */
/contrib/sdk/sources/ffmpeg/libavfilter/vf_stereo3d.c
0,0 → 1,664
/*
* Copyright (c) 2010 Gordon Schmidt <gordon.schmidt <at> s2000.tu-chemnitz.de>
* Copyright (c) 2013 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avassert.h"
#include "libavutil/imgutils.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "drawutils.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
enum StereoCode {
ANAGLYPH_RC_GRAY, // anaglyph red/cyan gray
ANAGLYPH_RC_HALF, // anaglyph red/cyan half colored
ANAGLYPH_RC_COLOR, // anaglyph red/cyan colored
ANAGLYPH_RC_DUBOIS, // anaglyph red/cyan dubois
ANAGLYPH_GM_GRAY, // anaglyph green/magenta gray
ANAGLYPH_GM_HALF, // anaglyph green/magenta half colored
ANAGLYPH_GM_COLOR, // anaglyph green/magenta colored
ANAGLYPH_GM_DUBOIS, // anaglyph green/magenta dubois
ANAGLYPH_YB_GRAY, // anaglyph yellow/blue gray
ANAGLYPH_YB_HALF, // anaglyph yellow/blue half colored
ANAGLYPH_YB_COLOR, // anaglyph yellow/blue colored
ANAGLYPH_YB_DUBOIS, // anaglyph yellow/blue dubois
ANAGLYPH_RB_GRAY, // anaglyph red/blue gray
ANAGLYPH_RG_GRAY, // anaglyph red/green gray
MONO_L, // mono output for debugging (left eye only)
MONO_R, // mono output for debugging (right eye only)
INTERLEAVE_ROWS_LR, // row-interleave (left eye has top row)
INTERLEAVE_ROWS_RL, // row-interleave (right eye has top row)
SIDE_BY_SIDE_LR, // side by side parallel (left eye left, right eye right)
SIDE_BY_SIDE_RL, // side by side crosseye (right eye left, left eye right)
SIDE_BY_SIDE_2_LR, // side by side parallel with half width resolution
SIDE_BY_SIDE_2_RL, // side by side crosseye with half width resolution
ABOVE_BELOW_LR, // above-below (left eye above, right eye below)
ABOVE_BELOW_RL, // above-below (right eye above, left eye below)
ABOVE_BELOW_2_LR, // above-below with half height resolution
ABOVE_BELOW_2_RL, // above-below with half height resolution
ALTERNATING_LR, // alternating frames (left eye first, right eye second)
ALTERNATING_RL, // alternating frames (right eye first, left eye second)
STEREO_CODE_COUNT // TODO: needs autodetection
};
 
typedef struct StereoComponent {
enum StereoCode format;
int width, height;
int off_left, off_right;
int off_lstep, off_rstep;
int row_left, row_right;
} StereoComponent;
 
static const int ana_coeff[][3][6] = {
[ANAGLYPH_RB_GRAY] =
{{19595, 38470, 7471, 0, 0, 0},
{ 0, 0, 0, 0, 0, 0},
{ 0, 0, 0, 19595, 38470, 7471}},
[ANAGLYPH_RG_GRAY] =
{{19595, 38470, 7471, 0, 0, 0},
{ 0, 0, 0, 19595, 38470, 7471},
{ 0, 0, 0, 0, 0, 0}},
[ANAGLYPH_RC_GRAY] =
{{19595, 38470, 7471, 0, 0, 0},
{ 0, 0, 0, 19595, 38470, 7471},
{ 0, 0, 0, 19595, 38470, 7471}},
[ANAGLYPH_RC_HALF] =
{{19595, 38470, 7471, 0, 0, 0},
{ 0, 0, 0, 0, 65536, 0},
{ 0, 0, 0, 0, 0, 65536}},
[ANAGLYPH_RC_COLOR] =
{{65536, 0, 0, 0, 0, 0},
{ 0, 0, 0, 0, 65536, 0},
{ 0, 0, 0, 0, 0, 65536}},
[ANAGLYPH_RC_DUBOIS] =
{{29891, 32800, 11559, -2849, -5763, -102},
{-2627, -2479, -1033, 24804, 48080, -1209},
{ -997, -1350, -358, -4729, -7403, 80373}},
[ANAGLYPH_GM_GRAY] =
{{ 0, 0, 0, 19595, 38470, 7471},
{19595, 38470, 7471, 0, 0, 0},
{ 0, 0, 0, 19595, 38470, 7471}},
[ANAGLYPH_GM_HALF] =
{{ 0, 0, 0, 65536, 0, 0},
{19595, 38470, 7471, 0, 0, 0},
{ 0, 0, 0, 0, 0, 65536}},
[ANAGLYPH_GM_COLOR] =
{{ 0, 0, 0, 65536, 0, 0},
{ 0, 65536, 0, 0, 0, 0},
{ 0, 0, 0, 0, 0, 65536}},
[ANAGLYPH_GM_DUBOIS] =
{{-4063,-10354, -2556, 34669, 46203, 1573},
{18612, 43778, 9372, -1049, -983, -4260},
{ -983, -1769, 1376, 590, 4915, 61407}},
[ANAGLYPH_YB_GRAY] =
{{ 0, 0, 0, 19595, 38470, 7471},
{ 0, 0, 0, 19595, 38470, 7471},
{19595, 38470, 7471, 0, 0, 0}},
[ANAGLYPH_YB_HALF] =
{{ 0, 0, 0, 65536, 0, 0},
{ 0, 0, 0, 0, 65536, 0},
{19595, 38470, 7471, 0, 0, 0}},
[ANAGLYPH_YB_COLOR] =
{{ 0, 0, 0, 65536, 0, 0},
{ 0, 0, 0, 0, 65536, 0},
{ 0, 0, 65536, 0, 0, 0}},
[ANAGLYPH_YB_DUBOIS] =
{{65535,-12650,18451, -987, -7590, -1049},
{-1604, 56032, 4196, 370, 3826, -1049},
{-2345,-10676, 1358, 5801, 11416, 56217}},
};
 
typedef struct Stereo3DContext {
const AVClass *class;
StereoComponent in, out;
int width, height;
int row_step;
const int *ana_matrix[3];
int nb_planes;
int linesize[4];
int pheight[4];
int hsub, vsub;
int pixstep[4];
AVFrame *prev;
double ts_unit;
} Stereo3DContext;
 
#define OFFSET(x) offsetof(Stereo3DContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
 
static const AVOption stereo3d_options[] = {
{ "in", "set input format", OFFSET(in.format), AV_OPT_TYPE_INT, {.i64=SIDE_BY_SIDE_LR}, SIDE_BY_SIDE_LR, STEREO_CODE_COUNT-1, FLAGS, "in"},
{ "ab2l", "above below half height left first", 0, AV_OPT_TYPE_CONST, {.i64=ABOVE_BELOW_2_LR}, 0, 0, FLAGS, "in" },
{ "ab2r", "above below half height right first", 0, AV_OPT_TYPE_CONST, {.i64=ABOVE_BELOW_2_RL}, 0, 0, FLAGS, "in" },
{ "abl", "above below left first", 0, AV_OPT_TYPE_CONST, {.i64=ABOVE_BELOW_LR}, 0, 0, FLAGS, "in" },
{ "abr", "above below right first", 0, AV_OPT_TYPE_CONST, {.i64=ABOVE_BELOW_RL}, 0, 0, FLAGS, "in" },
{ "al", "alternating frames left first", 0, AV_OPT_TYPE_CONST, {.i64=ALTERNATING_LR}, 0, 0, FLAGS, "in" },
{ "ar", "alternating frames right first", 0, AV_OPT_TYPE_CONST, {.i64=ALTERNATING_RL}, 0, 0, FLAGS, "in" },
{ "sbs2l", "side by side half width left first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_2_LR}, 0, 0, FLAGS, "in" },
{ "sbs2r", "side by side half width right first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_2_RL}, 0, 0, FLAGS, "in" },
{ "sbsl", "side by side left first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_LR}, 0, 0, FLAGS, "in" },
{ "sbsr", "side by side right first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_RL}, 0, 0, FLAGS, "in" },
{ "out", "set output format", OFFSET(out.format), AV_OPT_TYPE_INT, {.i64=ANAGLYPH_RC_DUBOIS}, 0, STEREO_CODE_COUNT-1, FLAGS, "out"},
{ "ab2l", "above below half height left first", 0, AV_OPT_TYPE_CONST, {.i64=ABOVE_BELOW_2_LR}, 0, 0, FLAGS, "out" },
{ "ab2r", "above below half height right first", 0, AV_OPT_TYPE_CONST, {.i64=ABOVE_BELOW_2_RL}, 0, 0, FLAGS, "out" },
{ "abl", "above below left first", 0, AV_OPT_TYPE_CONST, {.i64=ABOVE_BELOW_LR}, 0, 0, FLAGS, "out" },
{ "abr", "above below right first", 0, AV_OPT_TYPE_CONST, {.i64=ABOVE_BELOW_RL}, 0, 0, FLAGS, "out" },
{ "agmc", "anaglyph green magenta color", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_GM_COLOR}, 0, 0, FLAGS, "out" },
{ "agmd", "anaglyph green magenta dubois", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_GM_DUBOIS}, 0, 0, FLAGS, "out" },
{ "agmg", "anaglyph green magenta gray", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_GM_GRAY}, 0, 0, FLAGS, "out" },
{ "agmh", "anaglyph green magenta half color", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_GM_HALF}, 0, 0, FLAGS, "out" },
{ "al", "alternating frames left first", 0, AV_OPT_TYPE_CONST, {.i64=ALTERNATING_LR}, 0, 0, FLAGS, "out" },
{ "ar", "alternating frames right first", 0, AV_OPT_TYPE_CONST, {.i64=ALTERNATING_RL}, 0, 0, FLAGS, "out" },
{ "arbg", "anaglyph red blue gray", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_RB_GRAY}, 0, 0, FLAGS, "out" },
{ "arcc", "anaglyph red cyan color", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_RC_COLOR}, 0, 0, FLAGS, "out" },
{ "arcd", "anaglyph red cyan dubois", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_RC_DUBOIS}, 0, 0, FLAGS, "out" },
{ "arcg", "anaglyph red cyan gray", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_RC_GRAY}, 0, 0, FLAGS, "out" },
{ "arch", "anaglyph red cyan half color", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_RC_HALF}, 0, 0, FLAGS, "out" },
{ "argg", "anaglyph red green gray", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_RG_GRAY}, 0, 0, FLAGS, "out" },
{ "aybc", "anaglyph yellow blue color", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_YB_COLOR}, 0, 0, FLAGS, "out" },
{ "aybd", "anaglyph yellow blue dubois", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_YB_DUBOIS}, 0, 0, FLAGS, "out" },
{ "aybg", "anaglyph yellow blue gray", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_YB_GRAY}, 0, 0, FLAGS, "out" },
{ "aybh", "anaglyph yellow blue half color", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_YB_HALF}, 0, 0, FLAGS, "out" },
{ "irl", "interleave rows left first", 0, AV_OPT_TYPE_CONST, {.i64=INTERLEAVE_ROWS_LR}, 0, 0, FLAGS, "out" },
{ "irr", "interleave rows right first", 0, AV_OPT_TYPE_CONST, {.i64=INTERLEAVE_ROWS_RL}, 0, 0, FLAGS, "out" },
{ "ml", "mono left", 0, AV_OPT_TYPE_CONST, {.i64=MONO_L}, 0, 0, FLAGS, "out" },
{ "mr", "mono right", 0, AV_OPT_TYPE_CONST, {.i64=MONO_R}, 0, 0, FLAGS, "out" },
{ "sbs2l", "side by side half width left first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_2_LR}, 0, 0, FLAGS, "out" },
{ "sbs2r", "side by side half width right first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_2_RL}, 0, 0, FLAGS, "out" },
{ "sbsl", "side by side left first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_LR}, 0, 0, FLAGS, "out" },
{ "sbsr", "side by side right first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_RL}, 0, 0, FLAGS, "out" },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(stereo3d);
 
static const enum AVPixelFormat anaglyph_pix_fmts[] = {
AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
AV_PIX_FMT_NONE
};
 
static const enum AVPixelFormat other_pix_fmts[] = {
AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
AV_PIX_FMT_RGB48BE, AV_PIX_FMT_BGR48BE,
AV_PIX_FMT_RGB48LE, AV_PIX_FMT_BGR48LE,
AV_PIX_FMT_RGBA64BE, AV_PIX_FMT_BGRA64BE,
AV_PIX_FMT_RGBA64LE, AV_PIX_FMT_BGRA64LE,
AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
AV_PIX_FMT_RGB0, AV_PIX_FMT_BGR0,
AV_PIX_FMT_0RGB, AV_PIX_FMT_0BGR,
AV_PIX_FMT_GBRP,
AV_PIX_FMT_GBRP9BE, AV_PIX_FMT_GBRP9LE,
AV_PIX_FMT_GBRP10BE, AV_PIX_FMT_GBRP10LE,
AV_PIX_FMT_GBRP12BE, AV_PIX_FMT_GBRP12LE,
AV_PIX_FMT_GBRP14BE, AV_PIX_FMT_GBRP14LE,
AV_PIX_FMT_GBRP16BE, AV_PIX_FMT_GBRP16LE,
AV_PIX_FMT_YUV410P,
AV_PIX_FMT_YUV411P,
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVA420P,
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA422P,
AV_PIX_FMT_YUV440P,
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVA444P,
AV_PIX_FMT_YUVJ411P,
AV_PIX_FMT_YUVJ420P,
AV_PIX_FMT_YUVJ422P,
AV_PIX_FMT_YUVJ440P,
AV_PIX_FMT_YUVJ444P,
AV_PIX_FMT_YUV420P9LE, AV_PIX_FMT_YUVA420P9LE,
AV_PIX_FMT_YUV420P9BE, AV_PIX_FMT_YUVA420P9BE,
AV_PIX_FMT_YUV422P9LE, AV_PIX_FMT_YUVA422P9LE,
AV_PIX_FMT_YUV422P9BE, AV_PIX_FMT_YUVA422P9BE,
AV_PIX_FMT_YUV444P9LE, AV_PIX_FMT_YUVA444P9LE,
AV_PIX_FMT_YUV444P9BE, AV_PIX_FMT_YUVA444P9BE,
AV_PIX_FMT_YUV420P10LE, AV_PIX_FMT_YUVA420P10LE,
AV_PIX_FMT_YUV420P10BE, AV_PIX_FMT_YUVA420P10BE,
AV_PIX_FMT_YUV422P10LE, AV_PIX_FMT_YUVA422P10LE,
AV_PIX_FMT_YUV422P10BE, AV_PIX_FMT_YUVA422P10BE,
AV_PIX_FMT_YUV444P10LE, AV_PIX_FMT_YUVA444P10LE,
AV_PIX_FMT_YUV444P10BE, AV_PIX_FMT_YUVA444P10BE,
AV_PIX_FMT_YUV420P12BE, AV_PIX_FMT_YUV420P12LE,
AV_PIX_FMT_YUV422P12BE, AV_PIX_FMT_YUV422P12LE,
AV_PIX_FMT_YUV444P12BE, AV_PIX_FMT_YUV444P12LE,
AV_PIX_FMT_YUV420P14BE, AV_PIX_FMT_YUV420P14LE,
AV_PIX_FMT_YUV422P14BE, AV_PIX_FMT_YUV422P14LE,
AV_PIX_FMT_YUV444P14BE, AV_PIX_FMT_YUV444P14LE,
AV_PIX_FMT_YUV420P16LE, AV_PIX_FMT_YUVA420P16LE,
AV_PIX_FMT_YUV420P16BE, AV_PIX_FMT_YUVA420P16BE,
AV_PIX_FMT_YUV422P16LE, AV_PIX_FMT_YUVA422P16LE,
AV_PIX_FMT_YUV422P16BE, AV_PIX_FMT_YUVA422P16BE,
AV_PIX_FMT_YUV444P16LE, AV_PIX_FMT_YUVA444P16LE,
AV_PIX_FMT_YUV444P16BE, AV_PIX_FMT_YUVA444P16BE,
AV_PIX_FMT_NONE
};
 
static int query_formats(AVFilterContext *ctx)
{
Stereo3DContext *s = ctx->priv;
const enum AVPixelFormat *pix_fmts;
 
switch (s->out.format) {
case ANAGLYPH_GM_COLOR:
case ANAGLYPH_GM_DUBOIS:
case ANAGLYPH_GM_GRAY:
case ANAGLYPH_GM_HALF:
case ANAGLYPH_RB_GRAY:
case ANAGLYPH_RC_COLOR:
case ANAGLYPH_RC_DUBOIS:
case ANAGLYPH_RC_GRAY:
case ANAGLYPH_RC_HALF:
case ANAGLYPH_RG_GRAY:
case ANAGLYPH_YB_COLOR:
case ANAGLYPH_YB_DUBOIS:
case ANAGLYPH_YB_GRAY:
case ANAGLYPH_YB_HALF:
pix_fmts = anaglyph_pix_fmts;
break;
default:
pix_fmts = other_pix_fmts;
}
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
 
return 0;
}
 
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AVFilterLink *inlink = ctx->inputs[0];
Stereo3DContext *s = ctx->priv;
AVRational aspect = inlink->sample_aspect_ratio;
AVRational fps = inlink->frame_rate;
AVRational tb = inlink->time_base;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format);
int ret;
 
switch (s->in.format) {
case SIDE_BY_SIDE_2_LR:
case SIDE_BY_SIDE_LR:
case SIDE_BY_SIDE_2_RL:
case SIDE_BY_SIDE_RL:
if (inlink->w & 1) {
av_log(ctx, AV_LOG_ERROR, "width must be even\n");
return AVERROR_INVALIDDATA;
}
break;
case ABOVE_BELOW_2_LR:
case ABOVE_BELOW_LR:
case ABOVE_BELOW_2_RL:
case ABOVE_BELOW_RL:
if (s->out.format == INTERLEAVE_ROWS_LR ||
s->out.format == INTERLEAVE_ROWS_RL) {
if (inlink->h & 3) {
av_log(ctx, AV_LOG_ERROR, "height must be multiple of 4\n");
return AVERROR_INVALIDDATA;
}
}
if (inlink->h & 1) {
av_log(ctx, AV_LOG_ERROR, "height must be even\n");
return AVERROR_INVALIDDATA;
}
break;
}
 
s->in.width =
s->width = inlink->w;
s->in.height =
s->height = inlink->h;
s->row_step = 1;
s->in.off_lstep =
s->in.off_rstep =
s->in.off_left =
s->in.off_right =
s->in.row_left =
s->in.row_right = 0;
 
switch (s->in.format) {
case SIDE_BY_SIDE_2_LR:
aspect.num *= 2;
case SIDE_BY_SIDE_LR:
s->width = inlink->w / 2;
s->in.off_right = s->width;
break;
case SIDE_BY_SIDE_2_RL:
aspect.num *= 2;
case SIDE_BY_SIDE_RL:
s->width = inlink->w / 2;
s->in.off_left = s->width;
break;
case ABOVE_BELOW_2_LR:
aspect.den *= 2;
case ABOVE_BELOW_LR:
s->in.row_right =
s->height = inlink->h / 2;
break;
case ABOVE_BELOW_2_RL:
aspect.den *= 2;
case ABOVE_BELOW_RL:
s->in.row_left =
s->height = inlink->h / 2;
break;
case ALTERNATING_RL:
case ALTERNATING_LR:
outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
fps.den *= 2;
tb.num *= 2;
break;
default:
av_log(ctx, AV_LOG_ERROR, "input format %d is not supported\n", s->in.format);
return AVERROR(EINVAL);
}
 
s->out.width = s->width;
s->out.height = s->height;
s->out.off_lstep =
s->out.off_rstep =
s->out.off_left =
s->out.off_right =
s->out.row_left =
s->out.row_right = 0;
 
switch (s->out.format) {
case ANAGLYPH_RB_GRAY:
case ANAGLYPH_RG_GRAY:
case ANAGLYPH_RC_GRAY:
case ANAGLYPH_RC_HALF:
case ANAGLYPH_RC_COLOR:
case ANAGLYPH_RC_DUBOIS:
case ANAGLYPH_GM_GRAY:
case ANAGLYPH_GM_HALF:
case ANAGLYPH_GM_COLOR:
case ANAGLYPH_GM_DUBOIS:
case ANAGLYPH_YB_GRAY:
case ANAGLYPH_YB_HALF:
case ANAGLYPH_YB_COLOR:
case ANAGLYPH_YB_DUBOIS: {
uint8_t rgba_map[4];
 
ff_fill_rgba_map(rgba_map, outlink->format);
s->ana_matrix[rgba_map[0]] = &ana_coeff[s->out.format][0][0];
s->ana_matrix[rgba_map[1]] = &ana_coeff[s->out.format][1][0];
s->ana_matrix[rgba_map[2]] = &ana_coeff[s->out.format][2][0];
break;
}
case SIDE_BY_SIDE_2_LR:
aspect.den *= 2;
case SIDE_BY_SIDE_LR:
s->out.width = s->width * 2;
s->out.off_right = s->width;
break;
case SIDE_BY_SIDE_2_RL:
aspect.den *= 2;
case SIDE_BY_SIDE_RL:
s->out.width = s->width * 2;
s->out.off_left = s->width;
break;
case ABOVE_BELOW_2_LR:
aspect.num *= 2;
case ABOVE_BELOW_LR:
s->out.height = s->height * 2;
s->out.row_right = s->height;
break;
case ABOVE_BELOW_2_RL:
aspect.num *= 2;
case ABOVE_BELOW_RL:
s->out.height = s->height * 2;
s->out.row_left = s->height;
break;
case INTERLEAVE_ROWS_LR:
s->row_step = 2;
s->height = s->height / 2;
s->out.off_rstep =
s->in.off_rstep = 1;
break;
case INTERLEAVE_ROWS_RL:
s->row_step = 2;
s->height = s->height / 2;
s->out.off_lstep =
s->in.off_lstep = 1;
break;
case MONO_R:
s->in.off_left = s->in.off_right;
s->in.row_left = s->in.row_right;
case MONO_L:
break;
case ALTERNATING_RL:
case ALTERNATING_LR:
fps.num *= 2;
tb.den *= 2;
break;
default:
av_log(ctx, AV_LOG_ERROR, "output format %d is not supported\n", s->out.format);
return AVERROR(EINVAL);
}
 
outlink->w = s->out.width;
outlink->h = s->out.height;
outlink->frame_rate = fps;
outlink->time_base = tb;
outlink->sample_aspect_ratio = aspect;
 
if ((ret = av_image_fill_linesizes(s->linesize, outlink->format, s->width)) < 0)
return ret;
s->nb_planes = av_pix_fmt_count_planes(outlink->format);
av_image_fill_max_pixsteps(s->pixstep, NULL, desc);
s->ts_unit = av_q2d(av_inv_q(av_mul_q(outlink->frame_rate, outlink->time_base)));
s->pheight[1] = s->pheight[2] = FF_CEIL_RSHIFT(s->height, desc->log2_chroma_h);
s->pheight[0] = s->pheight[3] = s->height;
s->hsub = desc->log2_chroma_w;
s->vsub = desc->log2_chroma_h;
 
return 0;
}
 
static inline uint8_t ana_convert(const int *coeff, const uint8_t *left, const uint8_t *right)
{
int sum;
 
sum = coeff[0] * left[0] + coeff[3] * right[0]; //red in
sum += coeff[1] * left[1] + coeff[4] * right[1]; //green in
sum += coeff[2] * left[2] + coeff[5] * right[2]; //blue in
 
return av_clip_uint8(sum >> 16);
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
{
AVFilterContext *ctx = inlink->dst;
Stereo3DContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out, *oleft, *oright, *ileft, *iright;
int out_off_left[4], out_off_right[4];
int in_off_left[4], in_off_right[4];
int i;
 
switch (s->in.format) {
case ALTERNATING_LR:
case ALTERNATING_RL:
if (!s->prev) {
s->prev = inpicref;
return 0;
}
ileft = s->prev;
iright = inpicref;
if (s->in.format == ALTERNATING_RL)
FFSWAP(AVFrame *, ileft, iright);
break;
default:
ileft = iright = inpicref;
};
 
out = oleft = oright = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&s->prev);
av_frame_free(&inpicref);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, inpicref);
 
if (s->out.format == ALTERNATING_LR ||
s->out.format == ALTERNATING_RL) {
oright = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!oright) {
av_frame_free(&oleft);
av_frame_free(&s->prev);
av_frame_free(&inpicref);
return AVERROR(ENOMEM);
}
av_frame_copy_props(oright, inpicref);
}
 
for (i = 0; i < 4; i++) {
int hsub = i == 1 || i == 2 ? s->hsub : 0;
int vsub = i == 1 || i == 2 ? s->vsub : 0;
in_off_left[i] = (FF_CEIL_RSHIFT(s->in.row_left, vsub) + s->in.off_lstep) * ileft->linesize[i] + FF_CEIL_RSHIFT(s->in.off_left * s->pixstep[i], hsub);
in_off_right[i] = (FF_CEIL_RSHIFT(s->in.row_right, vsub) + s->in.off_rstep) * iright->linesize[i] + FF_CEIL_RSHIFT(s->in.off_right * s->pixstep[i], hsub);
out_off_left[i] = (FF_CEIL_RSHIFT(s->out.row_left, vsub) + s->out.off_lstep) * oleft->linesize[i] + FF_CEIL_RSHIFT(s->out.off_left * s->pixstep[i], hsub);
out_off_right[i] = (FF_CEIL_RSHIFT(s->out.row_right, vsub) + s->out.off_rstep) * oright->linesize[i] + FF_CEIL_RSHIFT(s->out.off_right * s->pixstep[i], hsub);
}
 
switch (s->out.format) {
case ALTERNATING_LR:
case ALTERNATING_RL:
case SIDE_BY_SIDE_LR:
case SIDE_BY_SIDE_RL:
case SIDE_BY_SIDE_2_LR:
case SIDE_BY_SIDE_2_RL:
case ABOVE_BELOW_LR:
case ABOVE_BELOW_RL:
case ABOVE_BELOW_2_LR:
case ABOVE_BELOW_2_RL:
case INTERLEAVE_ROWS_LR:
case INTERLEAVE_ROWS_RL:
for (i = 0; i < s->nb_planes; i++) {
av_image_copy_plane(oleft->data[i] + out_off_left[i],
oleft->linesize[i] * s->row_step,
ileft->data[i] + in_off_left[i],
ileft->linesize[i] * s->row_step,
s->linesize[i], s->pheight[i]);
av_image_copy_plane(oright->data[i] + out_off_right[i],
oright->linesize[i] * s->row_step,
iright->data[i] + in_off_right[i],
iright->linesize[i] * s->row_step,
s->linesize[i], s->pheight[i]);
}
break;
case MONO_L:
iright = ileft;
case MONO_R:
for (i = 0; i < s->nb_planes; i++) {
av_image_copy_plane(out->data[i], out->linesize[i],
iright->data[i] + in_off_left[i],
iright->linesize[i],
s->linesize[i], s->pheight[i]);
}
break;
case ANAGLYPH_RB_GRAY:
case ANAGLYPH_RG_GRAY:
case ANAGLYPH_RC_GRAY:
case ANAGLYPH_RC_HALF:
case ANAGLYPH_RC_COLOR:
case ANAGLYPH_RC_DUBOIS:
case ANAGLYPH_GM_GRAY:
case ANAGLYPH_GM_HALF:
case ANAGLYPH_GM_COLOR:
case ANAGLYPH_GM_DUBOIS:
case ANAGLYPH_YB_GRAY:
case ANAGLYPH_YB_HALF:
case ANAGLYPH_YB_COLOR:
case ANAGLYPH_YB_DUBOIS: {
int x, y, il, ir, o;
const uint8_t *lsrc = ileft->data[0];
const uint8_t *rsrc = iright->data[0];
uint8_t *dst = out->data[0];
int out_width = s->out.width;
const int **ana_matrix = s->ana_matrix;
 
for (y = 0; y < s->out.height; y++) {
o = out->linesize[0] * y;
il = in_off_left[0] + y * ileft->linesize[0];
ir = in_off_right[0] + y * iright->linesize[0];
for (x = 0; x < out_width; x++, il += 3, ir += 3, o+= 3) {
dst[o ] = ana_convert(ana_matrix[0], lsrc + il, rsrc + ir);
dst[o + 1] = ana_convert(ana_matrix[1], lsrc + il, rsrc + ir);
dst[o + 2] = ana_convert(ana_matrix[2], lsrc + il, rsrc + ir);
}
}
break;
}
default:
av_assert0(0);
}
 
av_frame_free(&inpicref);
av_frame_free(&s->prev);
if (oright != oleft) {
if (s->out.format == ALTERNATING_LR)
FFSWAP(AVFrame *, oleft, oright);
oright->pts = outlink->frame_count * s->ts_unit;
ff_filter_frame(outlink, oright);
out = oleft;
oleft->pts = outlink->frame_count * s->ts_unit;
} else if (s->in.format == ALTERNATING_LR ||
s->in.format == ALTERNATING_RL) {
out->pts = outlink->frame_count * s->ts_unit;
}
return ff_filter_frame(outlink, out);
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
Stereo3DContext *s = ctx->priv;
 
av_frame_free(&s->prev);
}
 
static const AVFilterPad stereo3d_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad stereo3d_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_output,
},
{ NULL }
};
 
AVFilter avfilter_vf_stereo3d = {
.name = "stereo3d",
.description = NULL_IF_CONFIG_SMALL("Convert video stereoscopic 3D view."),
.priv_size = sizeof(Stereo3DContext),
.uninit = uninit,
.query_formats = query_formats,
.inputs = stereo3d_inputs,
.outputs = stereo3d_outputs,
.priv_class = &stereo3d_class,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_subtitles.c
0,0 → 1,366
/*
* Copyright (c) 2011 Baptiste Coudurier
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2012 Clément Bœsch
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Libass subtitles burning filter.
*
* @see{http://www.matroska.org/technical/specs/subtitles/ssa.html}
*/
 
#include <ass/ass.h>
 
#include "config.h"
#if CONFIG_SUBTITLES_FILTER
# include "libavcodec/avcodec.h"
# include "libavformat/avformat.h"
#endif
#include "libavutil/avstring.h"
#include "libavutil/imgutils.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "drawutils.h"
#include "avfilter.h"
#include "internal.h"
#include "formats.h"
#include "video.h"
 
typedef struct {
const AVClass *class;
ASS_Library *library;
ASS_Renderer *renderer;
ASS_Track *track;
char *filename;
char *charenc;
uint8_t rgba_map[4];
int pix_step[4]; ///< steps per pixel for each plane of the main output
int original_w, original_h;
FFDrawContext draw;
} AssContext;
 
#define OFFSET(x) offsetof(AssContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
 
#define COMMON_OPTIONS \
{"filename", "set the filename of file to read", OFFSET(filename), AV_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX, FLAGS }, \
{"f", "set the filename of file to read", OFFSET(filename), AV_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX, FLAGS }, \
{"original_size", "set the size of the original video (used to scale fonts)", OFFSET(original_w), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, CHAR_MIN, CHAR_MAX, FLAGS }, \
 
/* libass supports a log level ranging from 0 to 7 */
static const int ass_libavfilter_log_level_map[] = {
AV_LOG_QUIET, /* 0 */
AV_LOG_PANIC, /* 1 */
AV_LOG_FATAL, /* 2 */
AV_LOG_ERROR, /* 3 */
AV_LOG_WARNING, /* 4 */
AV_LOG_INFO, /* 5 */
AV_LOG_VERBOSE, /* 6 */
AV_LOG_DEBUG, /* 7 */
};
 
static void ass_log(int ass_level, const char *fmt, va_list args, void *ctx)
{
int level = ass_libavfilter_log_level_map[ass_level];
 
av_vlog(ctx, level, fmt, args);
av_log(ctx, level, "\n");
}
 
static av_cold int init(AVFilterContext *ctx)
{
AssContext *ass = ctx->priv;
 
if (!ass->filename) {
av_log(ctx, AV_LOG_ERROR, "No filename provided!\n");
return AVERROR(EINVAL);
}
 
ass->library = ass_library_init();
if (!ass->library) {
av_log(ctx, AV_LOG_ERROR, "Could not initialize libass.\n");
return AVERROR(EINVAL);
}
ass_set_message_cb(ass->library, ass_log, ctx);
 
ass->renderer = ass_renderer_init(ass->library);
if (!ass->renderer) {
av_log(ctx, AV_LOG_ERROR, "Could not initialize libass renderer.\n");
return AVERROR(EINVAL);
}
 
ass_set_fonts(ass->renderer, NULL, NULL, 1, NULL, 1);
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
AssContext *ass = ctx->priv;
 
if (ass->track)
ass_free_track(ass->track);
if (ass->renderer)
ass_renderer_done(ass->renderer);
if (ass->library)
ass_library_done(ass->library);
}
 
static int query_formats(AVFilterContext *ctx)
{
ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0));
return 0;
}
 
static int config_input(AVFilterLink *inlink)
{
AssContext *ass = inlink->dst->priv;
 
ff_draw_init(&ass->draw, inlink->format, 0);
 
ass_set_frame_size (ass->renderer, inlink->w, inlink->h);
if (ass->original_w && ass->original_h)
ass_set_aspect_ratio(ass->renderer, (double)inlink->w / inlink->h,
(double)ass->original_w / ass->original_h);
 
return 0;
}
 
/* libass stores an RGBA color in the format RRGGBBTT, where TT is the transparency level */
#define AR(c) ( (c)>>24)
#define AG(c) (((c)>>16)&0xFF)
#define AB(c) (((c)>>8) &0xFF)
#define AA(c) ((0xFF-c) &0xFF)
 
static void overlay_ass_image(AssContext *ass, AVFrame *picref,
const ASS_Image *image)
{
for (; image; image = image->next) {
uint8_t rgba_color[] = {AR(image->color), AG(image->color), AB(image->color), AA(image->color)};
FFDrawColor color;
ff_draw_color(&ass->draw, &color, rgba_color);
ff_blend_mask(&ass->draw, &color,
picref->data, picref->linesize,
picref->width, picref->height,
image->bitmap, image->stride, image->w, image->h,
3, 0, image->dst_x, image->dst_y);
}
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
AssContext *ass = ctx->priv;
int detect_change = 0;
double time_ms = picref->pts * av_q2d(inlink->time_base) * 1000;
ASS_Image *image = ass_render_frame(ass->renderer, ass->track,
time_ms, &detect_change);
 
if (detect_change)
av_log(ctx, AV_LOG_DEBUG, "Change happened at time ms:%f\n", time_ms);
 
overlay_ass_image(ass, picref, image);
 
return ff_filter_frame(outlink, picref);
}
 
static const AVFilterPad ass_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_input,
.needs_writable = 1,
},
{ NULL }
};
 
static const AVFilterPad ass_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
#if CONFIG_ASS_FILTER
 
static const AVOption ass_options[] = {
COMMON_OPTIONS
{NULL},
};
 
AVFILTER_DEFINE_CLASS(ass);
 
static av_cold int init_ass(AVFilterContext *ctx)
{
AssContext *ass = ctx->priv;
int ret = init(ctx);
 
if (ret < 0)
return ret;
 
ass->track = ass_read_file(ass->library, ass->filename, NULL);
if (!ass->track) {
av_log(ctx, AV_LOG_ERROR,
"Could not create a libass track when reading file '%s'\n",
ass->filename);
return AVERROR(EINVAL);
}
return 0;
}
 
AVFilter avfilter_vf_ass = {
.name = "ass",
.description = NULL_IF_CONFIG_SMALL("Render ASS subtitles onto input video using the libass library."),
.priv_size = sizeof(AssContext),
.init = init_ass,
.uninit = uninit,
.query_formats = query_formats,
.inputs = ass_inputs,
.outputs = ass_outputs,
.priv_class = &ass_class,
};
#endif
 
#if CONFIG_SUBTITLES_FILTER
 
static const AVOption subtitles_options[] = {
COMMON_OPTIONS
{"charenc", "set input character encoding", OFFSET(charenc), AV_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX, FLAGS},
{NULL},
};
 
AVFILTER_DEFINE_CLASS(subtitles);
 
static av_cold int init_subtitles(AVFilterContext *ctx)
{
int ret, sid;
AVDictionary *codec_opts = NULL;
AVFormatContext *fmt = NULL;
AVCodecContext *dec_ctx = NULL;
AVCodec *dec = NULL;
const AVCodecDescriptor *dec_desc;
AVStream *st;
AVPacket pkt;
AssContext *ass = ctx->priv;
 
/* Init libass */
ret = init(ctx);
if (ret < 0)
return ret;
ass->track = ass_new_track(ass->library);
if (!ass->track) {
av_log(ctx, AV_LOG_ERROR, "Could not create a libass track\n");
return AVERROR(EINVAL);
}
 
/* Open subtitles file */
ret = avformat_open_input(&fmt, ass->filename, NULL, NULL);
if (ret < 0) {
av_log(ctx, AV_LOG_ERROR, "Unable to open %s\n", ass->filename);
goto end;
}
ret = avformat_find_stream_info(fmt, NULL);
if (ret < 0)
goto end;
 
/* Locate subtitles stream */
ret = av_find_best_stream(fmt, AVMEDIA_TYPE_SUBTITLE, -1, -1, NULL, 0);
if (ret < 0) {
av_log(ctx, AV_LOG_ERROR, "Unable to locate subtitle stream in %s\n",
ass->filename);
goto end;
}
sid = ret;
st = fmt->streams[sid];
 
/* Open decoder */
dec_ctx = st->codec;
dec = avcodec_find_decoder(dec_ctx->codec_id);
if (!dec) {
av_log(ctx, AV_LOG_ERROR, "Failed to find subtitle codec %s\n",
avcodec_get_name(dec_ctx->codec_id));
return AVERROR(EINVAL);
}
dec_desc = avcodec_descriptor_get(dec_ctx->codec_id);
if (dec_desc && !(dec_desc->props & AV_CODEC_PROP_TEXT_SUB)) {
av_log(ctx, AV_LOG_ERROR,
"Only text based subtitles are currently supported\n");
return AVERROR_PATCHWELCOME;
}
if (ass->charenc)
av_dict_set(&codec_opts, "sub_charenc", ass->charenc, 0);
ret = avcodec_open2(dec_ctx, dec, &codec_opts);
if (ret < 0)
goto end;
 
/* Decode subtitles and push them into the renderer (libass) */
if (dec_ctx->subtitle_header)
ass_process_codec_private(ass->track,
dec_ctx->subtitle_header,
dec_ctx->subtitle_header_size);
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
while (av_read_frame(fmt, &pkt) >= 0) {
int i, got_subtitle;
AVSubtitle sub = {0};
 
if (pkt.stream_index == sid) {
ret = avcodec_decode_subtitle2(dec_ctx, &sub, &got_subtitle, &pkt);
if (ret < 0) {
av_log(ctx, AV_LOG_WARNING, "Error decoding: %s (ignored)\n",
av_err2str(ret));
} else if (got_subtitle) {
for (i = 0; i < sub.num_rects; i++) {
char *ass_line = sub.rects[i]->ass;
if (!ass_line)
break;
ass_process_data(ass->track, ass_line, strlen(ass_line));
}
}
}
av_free_packet(&pkt);
avsubtitle_free(&sub);
}
 
end:
av_dict_free(&codec_opts);
if (dec_ctx)
avcodec_close(dec_ctx);
if (fmt)
avformat_close_input(&fmt);
return ret;
}
 
AVFilter avfilter_vf_subtitles = {
.name = "subtitles",
.description = NULL_IF_CONFIG_SMALL("Render text subtitles onto input video using the libass library."),
.priv_size = sizeof(AssContext),
.init = init_subtitles,
.uninit = uninit,
.query_formats = query_formats,
.inputs = ass_inputs,
.outputs = ass_outputs,
.priv_class = &subtitles_class,
};
#endif
/contrib/sdk/sources/ffmpeg/libavfilter/vf_super2xsai.c
0,0 → 1,352
/*
* Copyright (c) 2010 Niel van der Westhuizen <nielkie@gmail.com>
* Copyright (c) 2002 A'rpi
* Copyright (c) 1997-2001 ZSNES Team ( zsknight@zsnes.com / _demo_@zsnes.com )
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
/**
* @file
* Super 2xSaI video filter
* Ported from MPlayer libmpcodecs/vf_2xsai.c.
*/
 
#include "libavutil/pixdesc.h"
#include "libavutil/intreadwrite.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
typedef struct {
/* masks used for two pixels interpolation */
uint32_t hi_pixel_mask;
uint32_t lo_pixel_mask;
 
/* masks used for four pixels interpolation */
uint32_t q_hi_pixel_mask;
uint32_t q_lo_pixel_mask;
 
int bpp; ///< bytes per pixel, pixel stride for each (packed) pixel
int is_be;
} Super2xSaIContext;
 
#define GET_RESULT(A, B, C, D) ((A != C || A != D) - (B != C || B != D))
 
#define INTERPOLATE(A, B) (((A & hi_pixel_mask) >> 1) + ((B & hi_pixel_mask) >> 1) + (A & B & lo_pixel_mask))
 
#define Q_INTERPOLATE(A, B, C, D) ((A & q_hi_pixel_mask) >> 2) + ((B & q_hi_pixel_mask) >> 2) + ((C & q_hi_pixel_mask) >> 2) + ((D & q_hi_pixel_mask) >> 2) \
+ ((((A & q_lo_pixel_mask) + (B & q_lo_pixel_mask) + (C & q_lo_pixel_mask) + (D & q_lo_pixel_mask)) >> 2) & q_lo_pixel_mask)
 
static void super2xsai(AVFilterContext *ctx,
uint8_t *src, int src_linesize,
uint8_t *dst, int dst_linesize,
int width, int height)
{
Super2xSaIContext *sai = ctx->priv;
unsigned int x, y;
uint32_t color[4][4];
unsigned char *src_line[4];
const int bpp = sai->bpp;
const uint32_t hi_pixel_mask = sai->hi_pixel_mask;
const uint32_t lo_pixel_mask = sai->lo_pixel_mask;
const uint32_t q_hi_pixel_mask = sai->q_hi_pixel_mask;
const uint32_t q_lo_pixel_mask = sai->q_lo_pixel_mask;
 
/* Point to the first 4 lines, first line is duplicated */
src_line[0] = src;
src_line[1] = src;
src_line[2] = src + src_linesize*FFMIN(1, height-1);
src_line[3] = src + src_linesize*FFMIN(2, height-1);
 
#define READ_COLOR4(dst, src_line, off) dst = *((const uint32_t *)src_line + off)
#define READ_COLOR3(dst, src_line, off) dst = AV_RL24 (src_line + 3*off)
#define READ_COLOR2(dst, src_line, off) dst = sai->is_be ? AV_RB16(src_line + 2 * off) : AV_RL16(src_line + 2 * off)
 
for (y = 0; y < height; y++) {
uint8_t *dst_line[2];
 
dst_line[0] = dst + dst_linesize*2*y;
dst_line[1] = dst + dst_linesize*(2*y+1);
 
switch (bpp) {
case 4:
READ_COLOR4(color[0][0], src_line[0], 0); color[0][1] = color[0][0]; READ_COLOR4(color[0][2], src_line[0], 1); READ_COLOR4(color[0][3], src_line[0], 2);
READ_COLOR4(color[1][0], src_line[1], 0); color[1][1] = color[1][0]; READ_COLOR4(color[1][2], src_line[1], 1); READ_COLOR4(color[1][3], src_line[1], 2);
READ_COLOR4(color[2][0], src_line[2], 0); color[2][1] = color[2][0]; READ_COLOR4(color[2][2], src_line[2], 1); READ_COLOR4(color[2][3], src_line[2], 2);
READ_COLOR4(color[3][0], src_line[3], 0); color[3][1] = color[3][0]; READ_COLOR4(color[3][2], src_line[3], 1); READ_COLOR4(color[3][3], src_line[3], 2);
break;
case 3:
READ_COLOR3(color[0][0], src_line[0], 0); color[0][1] = color[0][0]; READ_COLOR3(color[0][2], src_line[0], 1); READ_COLOR3(color[0][3], src_line[0], 2);
READ_COLOR3(color[1][0], src_line[1], 0); color[1][1] = color[1][0]; READ_COLOR3(color[1][2], src_line[1], 1); READ_COLOR3(color[1][3], src_line[1], 2);
READ_COLOR3(color[2][0], src_line[2], 0); color[2][1] = color[2][0]; READ_COLOR3(color[2][2], src_line[2], 1); READ_COLOR3(color[2][3], src_line[2], 2);
READ_COLOR3(color[3][0], src_line[3], 0); color[3][1] = color[3][0]; READ_COLOR3(color[3][2], src_line[3], 1); READ_COLOR3(color[3][3], src_line[3], 2);
break;
default:
READ_COLOR2(color[0][0], src_line[0], 0); color[0][1] = color[0][0]; READ_COLOR2(color[0][2], src_line[0], 1); READ_COLOR2(color[0][3], src_line[0], 2);
READ_COLOR2(color[1][0], src_line[1], 0); color[1][1] = color[1][0]; READ_COLOR2(color[1][2], src_line[1], 1); READ_COLOR2(color[1][3], src_line[1], 2);
READ_COLOR2(color[2][0], src_line[2], 0); color[2][1] = color[2][0]; READ_COLOR2(color[2][2], src_line[2], 1); READ_COLOR2(color[2][3], src_line[2], 2);
READ_COLOR2(color[3][0], src_line[3], 0); color[3][1] = color[3][0]; READ_COLOR2(color[3][2], src_line[3], 1); READ_COLOR2(color[3][3], src_line[3], 2);
}
 
for (x = 0; x < width; x++) {
uint32_t product1a, product1b, product2a, product2b;
 
//--------------------------------------- B0 B1 B2 B3 0 1 2 3
// 4 5* 6 S2 -> 4 5* 6 7
// 1 2 3 S1 8 9 10 11
// A0 A1 A2 A3 12 13 14 15
//--------------------------------------
if (color[2][1] == color[1][2] && color[1][1] != color[2][2]) {
product2b = color[2][1];
product1b = product2b;
} else if (color[1][1] == color[2][2] && color[2][1] != color[1][2]) {
product2b = color[1][1];
product1b = product2b;
} else if (color[1][1] == color[2][2] && color[2][1] == color[1][2]) {
int r = 0;
 
r += GET_RESULT(color[1][2], color[1][1], color[1][0], color[3][1]);
r += GET_RESULT(color[1][2], color[1][1], color[2][0], color[0][1]);
r += GET_RESULT(color[1][2], color[1][1], color[3][2], color[2][3]);
r += GET_RESULT(color[1][2], color[1][1], color[0][2], color[1][3]);
 
if (r > 0)
product1b = color[1][2];
else if (r < 0)
product1b = color[1][1];
else
product1b = INTERPOLATE(color[1][1], color[1][2]);
 
product2b = product1b;
} else {
if (color[1][2] == color[2][2] && color[2][2] == color[3][1] && color[2][1] != color[3][2] && color[2][2] != color[3][0])
product2b = Q_INTERPOLATE(color[2][2], color[2][2], color[2][2], color[2][1]);
else if (color[1][1] == color[2][1] && color[2][1] == color[3][2] && color[3][1] != color[2][2] && color[2][1] != color[3][3])
product2b = Q_INTERPOLATE(color[2][1], color[2][1], color[2][1], color[2][2]);
else
product2b = INTERPOLATE(color[2][1], color[2][2]);
 
if (color[1][2] == color[2][2] && color[1][2] == color[0][1] && color[1][1] != color[0][2] && color[1][2] != color[0][0])
product1b = Q_INTERPOLATE(color[1][2], color[1][2], color[1][2], color[1][1]);
else if (color[1][1] == color[2][1] && color[1][1] == color[0][2] && color[0][1] != color[1][2] && color[1][1] != color[0][3])
product1b = Q_INTERPOLATE(color[1][2], color[1][1], color[1][1], color[1][1]);
else
product1b = INTERPOLATE(color[1][1], color[1][2]);
}
 
if (color[1][1] == color[2][2] && color[2][1] != color[1][2] && color[1][0] == color[1][1] && color[1][1] != color[3][2])
product2a = INTERPOLATE(color[2][1], color[1][1]);
else if (color[1][1] == color[2][0] && color[1][2] == color[1][1] && color[1][0] != color[2][1] && color[1][1] != color[3][0])
product2a = INTERPOLATE(color[2][1], color[1][1]);
else
product2a = color[2][1];
 
if (color[2][1] == color[1][2] && color[1][1] != color[2][2] && color[2][0] == color[2][1] && color[2][1] != color[0][2])
product1a = INTERPOLATE(color[2][1], color[1][1]);
else if (color[1][0] == color[2][1] && color[2][2] == color[2][1] && color[2][0] != color[1][1] && color[2][1] != color[0][0])
product1a = INTERPOLATE(color[2][1], color[1][1]);
else
product1a = color[1][1];
 
/* Set the calculated pixels */
switch (bpp) {
case 4:
AV_WN32A(dst_line[0] + x * 8, product1a);
AV_WN32A(dst_line[0] + x * 8 + 4, product1b);
AV_WN32A(dst_line[1] + x * 8, product2a);
AV_WN32A(dst_line[1] + x * 8 + 4, product2b);
break;
case 3:
AV_WL24(dst_line[0] + x * 6, product1a);
AV_WL24(dst_line[0] + x * 6 + 3, product1b);
AV_WL24(dst_line[1] + x * 6, product2a);
AV_WL24(dst_line[1] + x * 6 + 3, product2b);
break;
default: // bpp = 2
if (sai->is_be) {
AV_WB32(dst_line[0] + x * 4, product1a | (product1b << 16));
AV_WB32(dst_line[1] + x * 4, product2a | (product2b << 16));
} else {
AV_WL32(dst_line[0] + x * 4, product1a | (product1b << 16));
AV_WL32(dst_line[1] + x * 4, product2a | (product2b << 16));
}
}
 
/* Move color matrix forward */
color[0][0] = color[0][1]; color[0][1] = color[0][2]; color[0][2] = color[0][3];
color[1][0] = color[1][1]; color[1][1] = color[1][2]; color[1][2] = color[1][3];
color[2][0] = color[2][1]; color[2][1] = color[2][2]; color[2][2] = color[2][3];
color[3][0] = color[3][1]; color[3][1] = color[3][2]; color[3][2] = color[3][3];
 
if (x < width - 3) {
x += 3;
switch (bpp) {
case 4:
READ_COLOR4(color[0][3], src_line[0], x);
READ_COLOR4(color[1][3], src_line[1], x);
READ_COLOR4(color[2][3], src_line[2], x);
READ_COLOR4(color[3][3], src_line[3], x);
break;
case 3:
READ_COLOR3(color[0][3], src_line[0], x);
READ_COLOR3(color[1][3], src_line[1], x);
READ_COLOR3(color[2][3], src_line[2], x);
READ_COLOR3(color[3][3], src_line[3], x);
break;
default: /* case 2 */
READ_COLOR2(color[0][3], src_line[0], x);
READ_COLOR2(color[1][3], src_line[1], x);
READ_COLOR2(color[2][3], src_line[2], x);
READ_COLOR2(color[3][3], src_line[3], x);
}
x -= 3;
}
}
 
/* We're done with one line, so we shift the source lines up */
src_line[0] = src_line[1];
src_line[1] = src_line[2];
src_line[2] = src_line[3];
 
/* Read next line */
src_line[3] = src_line[2];
if (y < height - 3)
src_line[3] += src_linesize;
} // y loop
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA, AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
AV_PIX_FMT_RGB565BE, AV_PIX_FMT_BGR565BE, AV_PIX_FMT_RGB555BE, AV_PIX_FMT_BGR555BE,
AV_PIX_FMT_RGB565LE, AV_PIX_FMT_BGR565LE, AV_PIX_FMT_RGB555LE, AV_PIX_FMT_BGR555LE,
AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static int config_input(AVFilterLink *inlink)
{
Super2xSaIContext *sai = inlink->dst->priv;
 
sai->hi_pixel_mask = 0xFEFEFEFE;
sai->lo_pixel_mask = 0x01010101;
sai->q_hi_pixel_mask = 0xFCFCFCFC;
sai->q_lo_pixel_mask = 0x03030303;
sai->bpp = 4;
 
switch (inlink->format) {
case AV_PIX_FMT_RGB24:
case AV_PIX_FMT_BGR24:
sai->bpp = 3;
break;
 
case AV_PIX_FMT_RGB565BE:
case AV_PIX_FMT_BGR565BE:
sai->is_be = 1;
case AV_PIX_FMT_RGB565LE:
case AV_PIX_FMT_BGR565LE:
sai->hi_pixel_mask = 0xF7DEF7DE;
sai->lo_pixel_mask = 0x08210821;
sai->q_hi_pixel_mask = 0xE79CE79C;
sai->q_lo_pixel_mask = 0x18631863;
sai->bpp = 2;
break;
 
case AV_PIX_FMT_BGR555BE:
case AV_PIX_FMT_RGB555BE:
sai->is_be = 1;
case AV_PIX_FMT_BGR555LE:
case AV_PIX_FMT_RGB555LE:
sai->hi_pixel_mask = 0x7BDE7BDE;
sai->lo_pixel_mask = 0x04210421;
sai->q_hi_pixel_mask = 0x739C739C;
sai->q_lo_pixel_mask = 0x0C630C63;
sai->bpp = 2;
break;
}
 
return 0;
}
 
static int config_output(AVFilterLink *outlink)
{
AVFilterLink *inlink = outlink->src->inputs[0];
 
outlink->w = inlink->w*2;
outlink->h = inlink->h*2;
 
av_log(inlink->dst, AV_LOG_VERBOSE, "fmt:%s size:%dx%d -> size:%dx%d\n",
av_get_pix_fmt_name(inlink->format),
inlink->w, inlink->h, outlink->w, outlink->h);
 
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
{
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *outpicref = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!outpicref) {
av_frame_free(&inpicref);
return AVERROR(ENOMEM);
}
av_frame_copy_props(outpicref, inpicref);
outpicref->width = outlink->w;
outpicref->height = outlink->h;
 
super2xsai(inlink->dst, inpicref->data[0], inpicref->linesize[0],
outpicref->data[0], outpicref->linesize[0],
inlink->w, inlink->h);
 
av_frame_free(&inpicref);
return ff_filter_frame(outlink, outpicref);
}
 
static const AVFilterPad super2xsai_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad super2xsai_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_output,
},
{ NULL }
};
 
AVFilter avfilter_vf_super2xsai = {
.name = "super2xsai",
.description = NULL_IF_CONFIG_SMALL("Scale the input by 2x using the Super2xSaI pixel art algorithm."),
.priv_size = sizeof(Super2xSaIContext),
.query_formats = query_formats,
.inputs = super2xsai_inputs,
.outputs = super2xsai_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_swapuv.c
0,0 → 1,110
/*
* Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* swap UV filter
*/
 
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
static void do_swap(AVFrame *frame)
{
FFSWAP(uint8_t*, frame->data[1], frame->data[2]);
FFSWAP(int, frame->linesize[1], frame->linesize[2]);
FFSWAP(uint64_t, frame->error[1], frame->error[2]);
FFSWAP(AVBufferRef*, frame->buf[1], frame->buf[2]);
}
 
static AVFrame *get_video_buffer(AVFilterLink *link, int w, int h)
{
AVFrame *picref = ff_default_get_video_buffer(link, w, h);
do_swap(picref);
return picref;
}
 
static int filter_frame(AVFilterLink *link, AVFrame *inpicref)
{
do_swap(inpicref);
return ff_filter_frame(link->dst->outputs[0], inpicref);
}
 
static int is_planar_yuv(const AVPixFmtDescriptor *desc)
{
int i;
 
if (desc->flags & ~(AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA) ||
desc->nb_components < 3 ||
(desc->comp[1].depth_minus1 != desc->comp[2].depth_minus1))
return 0;
for (i = 0; i < desc->nb_components; i++) {
if (desc->comp[i].offset_plus1 != 1 ||
desc->comp[i].shift != 0 ||
desc->comp[i].plane != i)
return 0;
}
 
return 1;
}
 
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
int fmt;
 
for (fmt = 0; fmt < AV_PIX_FMT_NB; fmt++) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
if (is_planar_yuv(desc))
ff_add_format(&formats, fmt);
}
 
ff_set_common_formats(ctx, formats);
return 0;
}
 
static const AVFilterPad swapuv_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer = get_video_buffer,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad swapuv_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_swapuv = {
.name = "swapuv",
.description = NULL_IF_CONFIG_SMALL("Swap U and V components."),
.query_formats = query_formats,
.inputs = swapuv_inputs,
.outputs = swapuv_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_telecine.c
0,0 → 1,285
/*
* Copyright (c) 2012 Rudolf Polzer
* Copyright (c) 2013 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file telecine filter, heavily based from mpv-player:TOOLS/vf_dlopen/telecine.c by
* Rudolf Polzer.
*/
 
#include "libavutil/avstring.h"
#include "libavutil/imgutils.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
typedef struct {
const AVClass *class;
int first_field;
char *pattern;
unsigned int pattern_pos;
 
AVRational pts;
double ts_unit;
int out_cnt;
int occupied;
 
int nb_planes;
int planeheight[4];
int stride[4];
 
AVFrame *frame[5];
AVFrame *temp;
} TelecineContext;
 
#define OFFSET(x) offsetof(TelecineContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
 
static const AVOption telecine_options[] = {
{"first_field", "select first field", OFFSET(first_field), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "field"},
{"top", "select top field first", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "field"},
{"t", "select top field first", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "field"},
{"bottom", "select bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "field"},
{"b", "select bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "field"},
{"pattern", "pattern that describe for how many fields a frame is to be displayed", OFFSET(pattern), AV_OPT_TYPE_STRING, {.str="23"}, 0, 0, FLAGS},
{NULL}
};
 
AVFILTER_DEFINE_CLASS(telecine);
 
static av_cold int init(AVFilterContext *ctx)
{
TelecineContext *tc = ctx->priv;
const char *p;
int max = 0;
 
if (!strlen(tc->pattern)) {
av_log(ctx, AV_LOG_ERROR, "No pattern provided.\n");
return AVERROR_INVALIDDATA;
}
 
for (p = tc->pattern; *p; p++) {
if (!av_isdigit(*p)) {
av_log(ctx, AV_LOG_ERROR, "Provided pattern includes non-numeric characters.\n");
return AVERROR_INVALIDDATA;
}
 
max = FFMAX(*p - '0', max);
tc->pts.num += 2;
tc->pts.den += *p - '0';
}
 
tc->out_cnt = (max + 1) / 2;
av_log(ctx, AV_LOG_INFO, "Telecine pattern %s yields up to %d frames per frame, pts advance factor: %d/%d\n",
tc->pattern, tc->out_cnt, tc->pts.num, tc->pts.den);
 
return 0;
}
 
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *pix_fmts = NULL;
int fmt;
 
for (fmt = 0; fmt < AV_PIX_FMT_NB; fmt++) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL ||
desc->flags & AV_PIX_FMT_FLAG_PAL ||
desc->flags & AV_PIX_FMT_FLAG_BITSTREAM))
ff_add_format(&pix_fmts, fmt);
}
 
ff_set_common_formats(ctx, pix_fmts);
return 0;
}
 
static int config_input(AVFilterLink *inlink)
{
TelecineContext *tc = inlink->dst->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
int i, ret;
 
tc->temp = ff_get_video_buffer(inlink, inlink->w, inlink->h);
if (!tc->temp)
return AVERROR(ENOMEM);
for (i = 0; i < tc->out_cnt; i++) {
tc->frame[i] = ff_get_video_buffer(inlink, inlink->w, inlink->h);
if (!tc->frame[i])
return AVERROR(ENOMEM);
}
 
if ((ret = av_image_fill_linesizes(tc->stride, inlink->format, inlink->w)) < 0)
return ret;
 
tc->planeheight[1] = tc->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
tc->planeheight[0] = tc->planeheight[3] = inlink->h;
 
tc->nb_planes = av_pix_fmt_count_planes(inlink->format);
 
return 0;
}
 
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
TelecineContext *tc = ctx->priv;
const AVFilterLink *inlink = ctx->inputs[0];
AVRational fps = inlink->frame_rate;
 
if (!fps.num || !fps.den) {
av_log(ctx, AV_LOG_ERROR, "The input needs a constant frame rate; "
"current rate of %d/%d is invalid\n", fps.num, fps.den);
return AVERROR(EINVAL);
}
fps = av_mul_q(fps, av_inv_q(tc->pts));
av_log(ctx, AV_LOG_VERBOSE, "FPS: %d/%d -> %d/%d\n",
inlink->frame_rate.num, inlink->frame_rate.den, fps.num, fps.den);
 
outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
outlink->frame_rate = fps;
outlink->time_base = av_mul_q(inlink->time_base, tc->pts);
av_log(ctx, AV_LOG_VERBOSE, "TB: %d/%d -> %d/%d\n",
inlink->time_base.num, inlink->time_base.den, outlink->time_base.num, outlink->time_base.den);
 
tc->ts_unit = av_q2d(av_inv_q(av_mul_q(fps, outlink->time_base)));
 
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
TelecineContext *tc = ctx->priv;
int i, len, ret = 0, nout = 0;
 
len = tc->pattern[tc->pattern_pos] - '0';
 
tc->pattern_pos++;
if (!tc->pattern[tc->pattern_pos])
tc->pattern_pos = 0;
 
if (!len) { // do not output any field from this frame
av_frame_free(&inpicref);
return 0;
}
 
if (tc->occupied) {
for (i = 0; i < tc->nb_planes; i++) {
// fill in the EARLIER field from the buffered pic
av_image_copy_plane(tc->frame[nout]->data[i] + tc->frame[nout]->linesize[i] * tc->first_field,
tc->frame[nout]->linesize[i] * 2,
tc->temp->data[i] + tc->temp->linesize[i] * tc->first_field,
tc->temp->linesize[i] * 2,
tc->stride[i],
(tc->planeheight[i] - tc->first_field + 1) / 2);
// fill in the LATER field from the new pic
av_image_copy_plane(tc->frame[nout]->data[i] + tc->frame[nout]->linesize[i] * !tc->first_field,
tc->frame[nout]->linesize[i] * 2,
inpicref->data[i] + inpicref->linesize[i] * !tc->first_field,
inpicref->linesize[i] * 2,
tc->stride[i],
(tc->planeheight[i] - !tc->first_field + 1) / 2);
}
nout++;
len--;
tc->occupied = 0;
}
 
while (len >= 2) {
// output THIS image as-is
for (i = 0; i < tc->nb_planes; i++)
av_image_copy_plane(tc->frame[nout]->data[i], tc->frame[nout]->linesize[i],
inpicref->data[i], inpicref->linesize[i],
tc->stride[i],
tc->planeheight[i]);
nout++;
len -= 2;
}
 
if (len >= 1) {
// copy THIS image to the buffer, we need it later
for (i = 0; i < tc->nb_planes; i++)
av_image_copy_plane(tc->temp->data[i], tc->temp->linesize[i],
inpicref->data[i], inpicref->linesize[i],
tc->stride[i],
tc->planeheight[i]);
tc->occupied = 1;
}
 
for (i = 0; i < nout; i++) {
AVFrame *frame = av_frame_clone(tc->frame[i]);
 
if (!frame) {
av_frame_free(&inpicref);
return AVERROR(ENOMEM);
}
 
frame->pts = outlink->frame_count * tc->ts_unit;
ret = ff_filter_frame(outlink, frame);
}
av_frame_free(&inpicref);
 
return ret;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
TelecineContext *tc = ctx->priv;
int i;
 
av_frame_free(&tc->temp);
for (i = 0; i < tc->out_cnt; i++)
av_frame_free(&tc->frame[i]);
}
 
static const AVFilterPad telecine_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
 
static const AVFilterPad telecine_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_output,
},
{ NULL }
};
 
AVFilter avfilter_vf_telecine = {
.name = "telecine",
.description = NULL_IF_CONFIG_SMALL("Apply a telecine pattern."),
.priv_size = sizeof(TelecineContext),
.priv_class = &telecine_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = telecine_inputs,
.outputs = telecine_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_thumbnail.c
0,0 → 1,239
/*
* Copyright (c) 2011 Smartjog S.A.S, Clément Bœsch <clement.boesch@smartjog.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Potential thumbnail lookup filter to reduce the risk of an inappropriate
* selection (such as a black frame) we could get with an absolute seek.
*
* Simplified version of algorithm by Vadim Zaliva <lord@crocodile.org>.
* @see http://notbrainsurgery.livejournal.com/29773.html
*/
 
#include "libavutil/opt.h"
#include "avfilter.h"
#include "internal.h"
 
#define HIST_SIZE (3*256)
 
struct thumb_frame {
AVFrame *buf; ///< cached frame
int histogram[HIST_SIZE]; ///< RGB color distribution histogram of the frame
};
 
typedef struct {
const AVClass *class;
int n; ///< current frame
int n_frames; ///< number of frames for analysis
struct thumb_frame *frames; ///< the n_frames frames
AVRational tb; ///< copy of the input timebase to ease access
} ThumbContext;
 
#define OFFSET(x) offsetof(ThumbContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption thumbnail_options[] = {
{ "n", "set the frames batch size", OFFSET(n_frames), AV_OPT_TYPE_INT, {.i64=100}, 2, INT_MAX, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(thumbnail);
 
static av_cold int init(AVFilterContext *ctx)
{
ThumbContext *thumb = ctx->priv;
 
thumb->frames = av_calloc(thumb->n_frames, sizeof(*thumb->frames));
if (!thumb->frames) {
av_log(ctx, AV_LOG_ERROR,
"Allocation failure, try to lower the number of frames\n");
return AVERROR(ENOMEM);
}
av_log(ctx, AV_LOG_VERBOSE, "batch size: %d frames\n", thumb->n_frames);
return 0;
}
 
/**
* @brief Compute Sum-square deviation to estimate "closeness".
* @param hist color distribution histogram
* @param median average color distribution histogram
* @return sum of squared errors
*/
static double frame_sum_square_err(const int *hist, const double *median)
{
int i;
double err, sum_sq_err = 0;
 
for (i = 0; i < HIST_SIZE; i++) {
err = median[i] - (double)hist[i];
sum_sq_err += err*err;
}
return sum_sq_err;
}
 
static AVFrame *get_best_frame(AVFilterContext *ctx)
{
AVFrame *picref;
ThumbContext *thumb = ctx->priv;
int i, j, best_frame_idx = 0;
int nb_frames = thumb->n;
double avg_hist[HIST_SIZE] = {0}, sq_err, min_sq_err = -1;
 
// average histogram of the N frames
for (j = 0; j < FF_ARRAY_ELEMS(avg_hist); j++) {
for (i = 0; i < nb_frames; i++)
avg_hist[j] += (double)thumb->frames[i].histogram[j];
avg_hist[j] /= nb_frames;
}
 
// find the frame closer to the average using the sum of squared errors
for (i = 0; i < nb_frames; i++) {
sq_err = frame_sum_square_err(thumb->frames[i].histogram, avg_hist);
if (i == 0 || sq_err < min_sq_err)
best_frame_idx = i, min_sq_err = sq_err;
}
 
// free and reset everything (except the best frame buffer)
for (i = 0; i < nb_frames; i++) {
memset(thumb->frames[i].histogram, 0, sizeof(thumb->frames[i].histogram));
if (i != best_frame_idx)
av_frame_free(&thumb->frames[i].buf);
}
thumb->n = 0;
 
// raise the chosen one
picref = thumb->frames[best_frame_idx].buf;
av_log(ctx, AV_LOG_INFO, "frame id #%d (pts_time=%f) selected "
"from a set of %d images\n", best_frame_idx,
picref->pts * av_q2d(thumb->tb), nb_frames);
thumb->frames[best_frame_idx].buf = NULL;
 
return picref;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
int i, j;
AVFilterContext *ctx = inlink->dst;
ThumbContext *thumb = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
int *hist = thumb->frames[thumb->n].histogram;
const uint8_t *p = frame->data[0];
 
// keep a reference of each frame
thumb->frames[thumb->n].buf = frame;
 
// update current frame RGB histogram
for (j = 0; j < inlink->h; j++) {
for (i = 0; i < inlink->w; i++) {
hist[0*256 + p[i*3 ]]++;
hist[1*256 + p[i*3 + 1]]++;
hist[2*256 + p[i*3 + 2]]++;
}
p += frame->linesize[0];
}
 
// no selection until the buffer of N frames is filled up
thumb->n++;
if (thumb->n < thumb->n_frames)
return 0;
 
return ff_filter_frame(outlink, get_best_frame(ctx));
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
int i;
ThumbContext *thumb = ctx->priv;
for (i = 0; i < thumb->n_frames && thumb->frames[i].buf; i++)
av_frame_free(&thumb->frames[i].buf);
av_freep(&thumb->frames);
}
 
static int request_frame(AVFilterLink *link)
{
AVFilterContext *ctx = link->src;
ThumbContext *thumb = ctx->priv;
 
/* loop until a frame thumbnail is available (when a frame is queued,
* thumb->n is reset to zero) */
do {
int ret = ff_request_frame(ctx->inputs[0]);
if (ret == AVERROR_EOF && thumb->n) {
ret = ff_filter_frame(link, get_best_frame(ctx));
if (ret < 0)
return ret;
ret = AVERROR_EOF;
}
if (ret < 0)
return ret;
} while (thumb->n);
return 0;
}
 
static int config_props(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
ThumbContext *thumb = ctx->priv;
 
thumb->tb = inlink->time_base;
return 0;
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
AV_PIX_FMT_NONE
};
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static const AVFilterPad thumbnail_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_props,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad thumbnail_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.request_frame = request_frame,
},
{ NULL }
};
 
AVFilter avfilter_vf_thumbnail = {
.name = "thumbnail",
.description = NULL_IF_CONFIG_SMALL("Select the most representative frame in a given sequence of consecutive frames."),
.priv_size = sizeof(ThumbContext),
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = thumbnail_inputs,
.outputs = thumbnail_outputs,
.priv_class = &thumbnail_class,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_tile.c
0,0 → 1,245
/*
* Copyright (c) 2012 Nicolas George
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* tile video filter
*/
 
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "drawutils.h"
#include "formats.h"
#include "video.h"
#include "internal.h"
 
typedef struct {
const AVClass *class;
unsigned w, h;
unsigned margin;
unsigned padding;
unsigned current;
unsigned nb_frames;
FFDrawContext draw;
FFDrawColor blank;
AVFrame *out_ref;
uint8_t rgba_color[4];
} TileContext;
 
#define REASONABLE_SIZE 1024
 
#define OFFSET(x) offsetof(TileContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption tile_options[] = {
{ "layout", "set grid size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE,
{.str = "6x5"}, 0, 0, FLAGS },
{ "nb_frames", "set maximum number of frame to render", OFFSET(nb_frames),
AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS },
{ "margin", "set outer border margin in pixels", OFFSET(margin),
AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1024, FLAGS },
{ "padding", "set inner border thickness in pixels", OFFSET(padding),
AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1024, FLAGS },
{ "color", "set the color of the unused area", OFFSET(rgba_color), AV_OPT_TYPE_COLOR, {.str = "black"}, .flags = FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(tile);
 
static av_cold int init(AVFilterContext *ctx)
{
TileContext *tile = ctx->priv;
 
if (tile->w > REASONABLE_SIZE || tile->h > REASONABLE_SIZE) {
av_log(ctx, AV_LOG_ERROR, "Tile size %ux%u is insane.\n",
tile->w, tile->h);
return AVERROR(EINVAL);
}
 
if (tile->nb_frames == 0) {
tile->nb_frames = tile->w * tile->h;
} else if (tile->nb_frames > tile->w * tile->h) {
av_log(ctx, AV_LOG_ERROR, "nb_frames must be less than or equal to %dx%d=%d\n",
tile->w, tile->h, tile->w * tile->h);
return AVERROR(EINVAL);
}
 
return 0;
}
 
static int query_formats(AVFilterContext *ctx)
{
ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0));
return 0;
}
 
static int config_props(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
TileContext *tile = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
const unsigned total_margin_w = (tile->w - 1) * tile->padding + 2*tile->margin;
const unsigned total_margin_h = (tile->h - 1) * tile->padding + 2*tile->margin;
 
if (inlink->w > (INT_MAX - total_margin_w) / tile->w) {
av_log(ctx, AV_LOG_ERROR, "Total width %ux%u is too much.\n",
tile->w, inlink->w);
return AVERROR(EINVAL);
}
if (inlink->h > (INT_MAX - total_margin_h) / tile->h) {
av_log(ctx, AV_LOG_ERROR, "Total height %ux%u is too much.\n",
tile->h, inlink->h);
return AVERROR(EINVAL);
}
outlink->w = tile->w * inlink->w + total_margin_w;
outlink->h = tile->h * inlink->h + total_margin_h;
outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
outlink->frame_rate = av_mul_q(inlink->frame_rate,
(AVRational){ 1, tile->nb_frames });
ff_draw_init(&tile->draw, inlink->format, 0);
ff_draw_color(&tile->draw, &tile->blank, tile->rgba_color);
 
outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
 
return 0;
}
 
static void get_current_tile_pos(AVFilterContext *ctx, unsigned *x, unsigned *y)
{
TileContext *tile = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
const unsigned tx = tile->current % tile->w;
const unsigned ty = tile->current / tile->w;
 
*x = tile->margin + (inlink->w + tile->padding) * tx;
*y = tile->margin + (inlink->h + tile->padding) * ty;
}
 
static void draw_blank_frame(AVFilterContext *ctx, AVFrame *out_buf)
{
TileContext *tile = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
unsigned x0, y0;
 
get_current_tile_pos(ctx, &x0, &y0);
ff_fill_rectangle(&tile->draw, &tile->blank,
out_buf->data, out_buf->linesize,
x0, y0, inlink->w, inlink->h);
tile->current++;
}
static int end_last_frame(AVFilterContext *ctx)
{
TileContext *tile = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out_buf = tile->out_ref;
int ret;
 
while (tile->current < tile->nb_frames)
draw_blank_frame(ctx, out_buf);
ret = ff_filter_frame(outlink, out_buf);
tile->current = 0;
return ret;
}
 
/* Note: direct rendering is not possible since there is no guarantee that
* buffers are fed to filter_frame in the order they were obtained from
* get_buffer (think B-frames). */
 
static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
{
AVFilterContext *ctx = inlink->dst;
TileContext *tile = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
unsigned x0, y0;
 
if (!tile->current) {
tile->out_ref = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!tile->out_ref) {
av_frame_free(&picref);
return AVERROR(ENOMEM);
}
av_frame_copy_props(tile->out_ref, picref);
tile->out_ref->width = outlink->w;
tile->out_ref->height = outlink->h;
 
/* fill surface once for margin/padding */
if (tile->margin || tile->padding)
ff_fill_rectangle(&tile->draw, &tile->blank,
tile->out_ref->data,
tile->out_ref->linesize,
0, 0, outlink->w, outlink->h);
}
 
get_current_tile_pos(ctx, &x0, &y0);
ff_copy_rectangle2(&tile->draw,
tile->out_ref->data, tile->out_ref->linesize,
picref->data, picref->linesize,
x0, y0, 0, 0, inlink->w, inlink->h);
 
av_frame_free(&picref);
if (++tile->current == tile->nb_frames)
return end_last_frame(ctx);
 
return 0;
}
 
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
TileContext *tile = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
int r;
 
r = ff_request_frame(inlink);
if (r == AVERROR_EOF && tile->current)
r = end_last_frame(ctx);
return r;
}
 
static const AVFilterPad tile_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad tile_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_props,
.request_frame = request_frame,
},
{ NULL }
};
 
AVFilter avfilter_vf_tile = {
.name = "tile",
.description = NULL_IF_CONFIG_SMALL("Tile several successive frames together."),
.init = init,
.query_formats = query_formats,
.priv_size = sizeof(TileContext),
.inputs = tile_inputs,
.outputs = tile_outputs,
.priv_class = &tile_class,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_tinterlace.c
0,0 → 1,384
/*
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2010 Baptiste Coudurier
* Copyright (c) 2003 Michael Zucchi <notzed@ximian.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
/**
* @file
* temporal field interlace filter, ported from MPlayer/libmpcodecs
*/
 
#include "libavutil/opt.h"
#include "libavutil/imgutils.h"
#include "libavutil/avassert.h"
#include "avfilter.h"
#include "internal.h"
 
enum TInterlaceMode {
MODE_MERGE = 0,
MODE_DROP_EVEN,
MODE_DROP_ODD,
MODE_PAD,
MODE_INTERLEAVE_TOP,
MODE_INTERLEAVE_BOTTOM,
MODE_INTERLACEX2,
MODE_NB,
};
 
typedef struct {
const AVClass *class;
enum TInterlaceMode mode; ///< interlace mode selected
int flags; ///< flags affecting interlacing algorithm
int frame; ///< number of the output frame
int vsub; ///< chroma vertical subsampling
AVFrame *cur;
AVFrame *next;
uint8_t *black_data[4]; ///< buffer used to fill padded lines
int black_linesize[4];
} TInterlaceContext;
 
#define OFFSET(x) offsetof(TInterlaceContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
#define TINTERLACE_FLAG_VLPF 01
 
static const AVOption tinterlace_options[] = {
{"mode", "select interlace mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_MERGE}, 0, MODE_NB-1, FLAGS, "mode"},
{"merge", "merge fields", 0, AV_OPT_TYPE_CONST, {.i64=MODE_MERGE}, INT_MIN, INT_MAX, FLAGS, "mode"},
{"drop_even", "drop even fields", 0, AV_OPT_TYPE_CONST, {.i64=MODE_DROP_EVEN}, INT_MIN, INT_MAX, FLAGS, "mode"},
{"drop_odd", "drop odd fields", 0, AV_OPT_TYPE_CONST, {.i64=MODE_DROP_ODD}, INT_MIN, INT_MAX, FLAGS, "mode"},
{"pad", "pad alternate lines with black", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PAD}, INT_MIN, INT_MAX, FLAGS, "mode"},
{"interleave_top", "interleave top and bottom fields", 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE_TOP}, INT_MIN, INT_MAX, FLAGS, "mode"},
{"interleave_bottom", "interleave bottom and top fields", 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE_BOTTOM}, INT_MIN, INT_MAX, FLAGS, "mode"},
{"interlacex2", "interlace fields from two consecutive frames", 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLACEX2}, INT_MIN, INT_MAX, FLAGS, "mode"},
 
{"flags", "set flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64 = 0}, 0, INT_MAX, 0, "flags" },
{"low_pass_filter", "enable vertical low-pass filter", 0, AV_OPT_TYPE_CONST, {.i64 = TINTERLACE_FLAG_VLPF}, INT_MIN, INT_MAX, FLAGS, "flags" },
{"vlpf", "enable vertical low-pass filter", 0, AV_OPT_TYPE_CONST, {.i64 = TINTERLACE_FLAG_VLPF}, INT_MIN, INT_MAX, FLAGS, "flags" },
 
{NULL}
};
 
AVFILTER_DEFINE_CLASS(tinterlace);
 
#define FULL_SCALE_YUVJ_FORMATS \
AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P
 
static enum AVPixelFormat full_scale_yuvj_pix_fmts[] = {
FULL_SCALE_YUVJ_FORMATS, AV_PIX_FMT_NONE
};
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
AV_PIX_FMT_GRAY8, FULL_SCALE_YUVJ_FORMATS,
AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
TInterlaceContext *tinterlace = ctx->priv;
 
av_frame_free(&tinterlace->cur );
av_frame_free(&tinterlace->next);
av_freep(&tinterlace->black_data[0]);
}
 
static int config_out_props(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AVFilterLink *inlink = outlink->src->inputs[0];
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format);
TInterlaceContext *tinterlace = ctx->priv;
 
tinterlace->vsub = desc->log2_chroma_h;
outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
outlink->w = inlink->w;
outlink->h = tinterlace->mode == MODE_MERGE || tinterlace->mode == MODE_PAD ?
inlink->h*2 : inlink->h;
 
if (tinterlace->mode == MODE_PAD) {
uint8_t black[4] = { 16, 128, 128, 16 };
int i, ret;
if (ff_fmt_is_in(outlink->format, full_scale_yuvj_pix_fmts))
black[0] = black[3] = 0;
ret = av_image_alloc(tinterlace->black_data, tinterlace->black_linesize,
outlink->w, outlink->h, outlink->format, 1);
if (ret < 0)
return ret;
 
/* fill black picture with black */
for (i = 0; i < 4 && tinterlace->black_data[i]; i++) {
int h = i == 1 || i == 2 ? FF_CEIL_RSHIFT(outlink->h, desc->log2_chroma_h) : outlink->h;
memset(tinterlace->black_data[i], black[i],
tinterlace->black_linesize[i] * h);
}
}
if ((tinterlace->flags & TINTERLACE_FLAG_VLPF)
&& !(tinterlace->mode == MODE_INTERLEAVE_TOP
|| tinterlace->mode == MODE_INTERLEAVE_BOTTOM)) {
av_log(ctx, AV_LOG_WARNING, "low_pass_filter flag ignored with mode %d\n",
tinterlace->mode);
tinterlace->flags &= ~TINTERLACE_FLAG_VLPF;
}
av_log(ctx, AV_LOG_VERBOSE, "mode:%d filter:%s h:%d -> h:%d\n",
tinterlace->mode, (tinterlace->flags & TINTERLACE_FLAG_VLPF) ? "on" : "off",
inlink->h, outlink->h);
 
return 0;
}
 
#define FIELD_UPPER 0
#define FIELD_LOWER 1
#define FIELD_UPPER_AND_LOWER 2
 
/**
* Copy picture field from src to dst.
*
* @param src_field copy from upper, lower field or both
* @param interleave leave a padding line between each copied line
* @param dst_field copy to upper or lower field,
* only meaningful when interleave is selected
* @param flags context flags
*/
static inline
void copy_picture_field(uint8_t *dst[4], int dst_linesize[4],
const uint8_t *src[4], int src_linesize[4],
enum AVPixelFormat format, int w, int src_h,
int src_field, int interleave, int dst_field,
int flags)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(format);
int plane, vsub = desc->log2_chroma_h;
int k = src_field == FIELD_UPPER_AND_LOWER ? 1 : 2;
int h, i;
 
for (plane = 0; plane < desc->nb_components; plane++) {
int lines = plane == 1 || plane == 2 ? FF_CEIL_RSHIFT(src_h, vsub) : src_h;
int linesize = av_image_get_linesize(format, w, plane);
uint8_t *dstp = dst[plane];
const uint8_t *srcp = src[plane];
 
if (linesize < 0)
return;
 
lines = (lines + (src_field == FIELD_UPPER)) / k;
if (src_field == FIELD_LOWER)
srcp += src_linesize[plane];
if (interleave && dst_field == FIELD_LOWER)
dstp += dst_linesize[plane];
if (flags & TINTERLACE_FLAG_VLPF) {
// Low-pass filtering is required when creating an interlaced destination from
// a progressive source which contains high-frequency vertical detail.
// Filtering will reduce interlace 'twitter' and Moire patterning.
int srcp_linesize = src_linesize[plane] * k;
int dstp_linesize = dst_linesize[plane] * (interleave ? 2 : 1);
for (h = lines; h > 0; h--) {
const uint8_t *srcp_above = srcp - src_linesize[plane];
const uint8_t *srcp_below = srcp + src_linesize[plane];
if (h == lines) srcp_above = srcp; // there is no line above
if (h == 1) srcp_below = srcp; // there is no line below
for (i = 0; i < linesize; i++) {
// this calculation is an integer representation of
// '0.5 * current + 0.25 * above + 0.25 * below'
// '1 +' is for rounding. */
dstp[i] = (1 + srcp[i] + srcp[i] + srcp_above[i] + srcp_below[i]) >> 2;
}
dstp += dstp_linesize;
srcp += srcp_linesize;
}
} else {
av_image_copy_plane(dstp, dst_linesize[plane] * (interleave ? 2 : 1),
srcp, src_linesize[plane]*k, linesize, lines);
}
}
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
TInterlaceContext *tinterlace = ctx->priv;
AVFrame *cur, *next, *out;
int field, tff, ret;
 
av_frame_free(&tinterlace->cur);
tinterlace->cur = tinterlace->next;
tinterlace->next = picref;
 
cur = tinterlace->cur;
next = tinterlace->next;
/* we need at least two frames */
if (!tinterlace->cur)
return 0;
 
switch (tinterlace->mode) {
case MODE_MERGE: /* move the odd frame into the upper field of the new image, even into
* the lower field, generating a double-height video at half framerate */
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out)
return AVERROR(ENOMEM);
av_frame_copy_props(out, cur);
out->height = outlink->h;
out->interlaced_frame = 1;
out->top_field_first = 1;
 
/* write odd frame lines into the upper field of the new frame */
copy_picture_field(out->data, out->linesize,
(const uint8_t **)cur->data, cur->linesize,
inlink->format, inlink->w, inlink->h,
FIELD_UPPER_AND_LOWER, 1, FIELD_UPPER, tinterlace->flags);
/* write even frame lines into the lower field of the new frame */
copy_picture_field(out->data, out->linesize,
(const uint8_t **)next->data, next->linesize,
inlink->format, inlink->w, inlink->h,
FIELD_UPPER_AND_LOWER, 1, FIELD_LOWER, tinterlace->flags);
av_frame_free(&tinterlace->next);
break;
 
case MODE_DROP_ODD: /* only output even frames, odd frames are dropped; height unchanged, half framerate */
case MODE_DROP_EVEN: /* only output odd frames, even frames are dropped; height unchanged, half framerate */
out = av_frame_clone(tinterlace->mode == MODE_DROP_EVEN ? cur : next);
av_frame_free(&tinterlace->next);
break;
 
case MODE_PAD: /* expand each frame to double height, but pad alternate
* lines with black; framerate unchanged */
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out)
return AVERROR(ENOMEM);
av_frame_copy_props(out, cur);
out->height = outlink->h;
 
field = (1 + tinterlace->frame) & 1 ? FIELD_UPPER : FIELD_LOWER;
/* copy upper and lower fields */
copy_picture_field(out->data, out->linesize,
(const uint8_t **)cur->data, cur->linesize,
inlink->format, inlink->w, inlink->h,
FIELD_UPPER_AND_LOWER, 1, field, tinterlace->flags);
/* pad with black the other field */
copy_picture_field(out->data, out->linesize,
(const uint8_t **)tinterlace->black_data, tinterlace->black_linesize,
inlink->format, inlink->w, inlink->h,
FIELD_UPPER_AND_LOWER, 1, !field, tinterlace->flags);
break;
 
/* interleave upper/lower lines from odd frames with lower/upper lines from even frames,
* halving the frame rate and preserving image height */
case MODE_INTERLEAVE_TOP: /* top field first */
case MODE_INTERLEAVE_BOTTOM: /* bottom field first */
tff = tinterlace->mode == MODE_INTERLEAVE_TOP;
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out)
return AVERROR(ENOMEM);
av_frame_copy_props(out, cur);
out->interlaced_frame = 1;
out->top_field_first = tff;
 
/* copy upper/lower field from cur */
copy_picture_field(out->data, out->linesize,
(const uint8_t **)cur->data, cur->linesize,
inlink->format, inlink->w, inlink->h,
tff ? FIELD_UPPER : FIELD_LOWER, 1, tff ? FIELD_UPPER : FIELD_LOWER,
tinterlace->flags);
/* copy lower/upper field from next */
copy_picture_field(out->data, out->linesize,
(const uint8_t **)next->data, next->linesize,
inlink->format, inlink->w, inlink->h,
tff ? FIELD_LOWER : FIELD_UPPER, 1, tff ? FIELD_LOWER : FIELD_UPPER,
tinterlace->flags);
av_frame_free(&tinterlace->next);
break;
case MODE_INTERLACEX2: /* re-interlace preserving image height, double frame rate */
/* output current frame first */
out = av_frame_clone(cur);
if (!out)
return AVERROR(ENOMEM);
out->interlaced_frame = 1;
 
if ((ret = ff_filter_frame(outlink, out)) < 0)
return ret;
 
/* output mix of current and next frame */
tff = next->top_field_first;
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out)
return AVERROR(ENOMEM);
av_frame_copy_props(out, next);
out->interlaced_frame = 1;
 
/* write current frame second field lines into the second field of the new frame */
copy_picture_field(out->data, out->linesize,
(const uint8_t **)cur->data, cur->linesize,
inlink->format, inlink->w, inlink->h,
tff ? FIELD_LOWER : FIELD_UPPER, 1, tff ? FIELD_LOWER : FIELD_UPPER,
tinterlace->flags);
/* write next frame first field lines into the first field of the new frame */
copy_picture_field(out->data, out->linesize,
(const uint8_t **)next->data, next->linesize,
inlink->format, inlink->w, inlink->h,
tff ? FIELD_UPPER : FIELD_LOWER, 1, tff ? FIELD_UPPER : FIELD_LOWER,
tinterlace->flags);
break;
default:
av_assert0(0);
}
 
ret = ff_filter_frame(outlink, out);
tinterlace->frame++;
 
return ret;
}
 
static const AVFilterPad tinterlace_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad tinterlace_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_out_props,
},
{ NULL }
};
 
AVFilter avfilter_vf_tinterlace = {
.name = "tinterlace",
.description = NULL_IF_CONFIG_SMALL("Perform temporal field interlacing."),
.priv_size = sizeof(TInterlaceContext),
.uninit = uninit,
.query_formats = query_formats,
.inputs = tinterlace_inputs,
.outputs = tinterlace_outputs,
.priv_class = &tinterlace_class,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_transpose.c
0,0 → 1,302
/*
* Copyright (c) 2010 Stefano Sabatini
* Copyright (c) 2008 Vitor Sessak
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* transposition filter
* Based on MPlayer libmpcodecs/vf_rotate.c.
*/
 
#include <stdio.h>
 
#include "libavutil/intreadwrite.h"
#include "libavutil/pixdesc.h"
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
typedef enum {
TRANSPOSE_PT_TYPE_NONE,
TRANSPOSE_PT_TYPE_LANDSCAPE,
TRANSPOSE_PT_TYPE_PORTRAIT,
} PassthroughType;
 
enum TransposeDir {
TRANSPOSE_CCLOCK_FLIP,
TRANSPOSE_CLOCK,
TRANSPOSE_CCLOCK,
TRANSPOSE_CLOCK_FLIP,
};
 
typedef struct {
const AVClass *class;
int hsub, vsub;
int pixsteps[4];
 
PassthroughType passthrough; ///< landscape passthrough mode enabled
enum TransposeDir dir;
} TransContext;
 
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *pix_fmts = NULL;
int fmt;
 
for (fmt = 0; fmt < AV_PIX_FMT_NB; fmt++) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
if (!(desc->flags & AV_PIX_FMT_FLAG_PAL ||
desc->flags & AV_PIX_FMT_FLAG_HWACCEL ||
desc->flags & AV_PIX_FMT_FLAG_BITSTREAM ||
desc->log2_chroma_w != desc->log2_chroma_h))
ff_add_format(&pix_fmts, fmt);
}
 
 
ff_set_common_formats(ctx, pix_fmts);
return 0;
}
 
static int config_props_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
TransContext *trans = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
const AVPixFmtDescriptor *desc_out = av_pix_fmt_desc_get(outlink->format);
const AVPixFmtDescriptor *desc_in = av_pix_fmt_desc_get(inlink->format);
 
if (trans->dir&4) {
av_log(ctx, AV_LOG_WARNING,
"dir values greater than 3 are deprecated, use the passthrough option instead\n");
trans->dir &= 3;
trans->passthrough = TRANSPOSE_PT_TYPE_LANDSCAPE;
}
 
if ((inlink->w >= inlink->h && trans->passthrough == TRANSPOSE_PT_TYPE_LANDSCAPE) ||
(inlink->w <= inlink->h && trans->passthrough == TRANSPOSE_PT_TYPE_PORTRAIT)) {
av_log(ctx, AV_LOG_VERBOSE,
"w:%d h:%d -> w:%d h:%d (passthrough mode)\n",
inlink->w, inlink->h, inlink->w, inlink->h);
return 0;
} else {
trans->passthrough = TRANSPOSE_PT_TYPE_NONE;
}
 
trans->hsub = desc_in->log2_chroma_w;
trans->vsub = desc_in->log2_chroma_h;
 
av_image_fill_max_pixsteps(trans->pixsteps, NULL, desc_out);
 
outlink->w = inlink->h;
outlink->h = inlink->w;
 
if (inlink->sample_aspect_ratio.num){
outlink->sample_aspect_ratio = av_div_q((AVRational){1,1}, inlink->sample_aspect_ratio);
} else
outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
 
av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d dir:%d -> w:%d h:%d rotation:%s vflip:%d\n",
inlink->w, inlink->h, trans->dir, outlink->w, outlink->h,
trans->dir == 1 || trans->dir == 3 ? "clockwise" : "counterclockwise",
trans->dir == 0 || trans->dir == 3);
return 0;
}
 
static AVFrame *get_video_buffer(AVFilterLink *inlink, int w, int h)
{
TransContext *trans = inlink->dst->priv;
 
return trans->passthrough ?
ff_null_get_video_buffer (inlink, w, h) :
ff_default_get_video_buffer(inlink, w, h);
}
 
typedef struct ThreadData {
AVFrame *in, *out;
} ThreadData;
 
static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr,
int nb_jobs)
{
TransContext *trans = ctx->priv;
ThreadData *td = arg;
AVFrame *out = td->out;
AVFrame *in = td->in;
int plane;
 
for (plane = 0; out->data[plane]; plane++) {
int hsub = plane == 1 || plane == 2 ? trans->hsub : 0;
int vsub = plane == 1 || plane == 2 ? trans->vsub : 0;
int pixstep = trans->pixsteps[plane];
int inh = in->height >> vsub;
int outw = FF_CEIL_RSHIFT(out->width, hsub);
int outh = FF_CEIL_RSHIFT(out->height, vsub);
int start = (outh * jobnr ) / nb_jobs;
int end = (outh * (jobnr+1)) / nb_jobs;
uint8_t *dst, *src;
int dstlinesize, srclinesize;
int x, y;
 
dstlinesize = out->linesize[plane];
dst = out->data[plane] + start * dstlinesize;
src = in->data[plane];
srclinesize = in->linesize[plane];
 
if (trans->dir&1) {
src += in->linesize[plane] * (inh-1);
srclinesize *= -1;
}
 
if (trans->dir&2) {
dst = out->data[plane] + dstlinesize * (outh-start-1);
dstlinesize *= -1;
}
 
switch (pixstep) {
case 1:
for (y = start; y < end; y++, dst += dstlinesize)
for (x = 0; x < outw; x++)
dst[x] = src[x*srclinesize + y];
break;
case 2:
for (y = start; y < end; y++, dst += dstlinesize) {
for (x = 0; x < outw; x++)
*((uint16_t *)(dst + 2*x)) = *((uint16_t *)(src + x*srclinesize + y*2));
}
break;
case 3:
for (y = start; y < end; y++, dst += dstlinesize) {
for (x = 0; x < outw; x++) {
int32_t v = AV_RB24(src + x*srclinesize + y*3);
AV_WB24(dst + 3*x, v);
}
}
break;
case 4:
for (y = start; y < end; y++, dst += dstlinesize) {
for (x = 0; x < outw; x++)
*((uint32_t *)(dst + 4*x)) = *((uint32_t *)(src + x*srclinesize + y*4));
}
break;
case 6:
for (y = start; y < end; y++, dst += dstlinesize) {
for (x = 0; x < outw; x++) {
int64_t v = AV_RB48(src + x*srclinesize + y*6);
AV_WB48(dst + 6*x, v);
}
}
break;
case 8:
for (y = start; y < end; y++, dst += dstlinesize) {
for (x = 0; x < outw; x++)
*((uint64_t *)(dst + 8*x)) = *((uint64_t *)(src + x*srclinesize + y*8));
}
break;
}
}
 
return 0;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
TransContext *trans = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
ThreadData td;
AVFrame *out;
 
if (trans->passthrough)
return ff_filter_frame(outlink, in);
 
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
 
if (in->sample_aspect_ratio.num == 0) {
out->sample_aspect_ratio = in->sample_aspect_ratio;
} else {
out->sample_aspect_ratio.num = in->sample_aspect_ratio.den;
out->sample_aspect_ratio.den = in->sample_aspect_ratio.num;
}
 
td.in = in, td.out = out;
ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outlink->h, ctx->graph->nb_threads));
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
 
#define OFFSET(x) offsetof(TransContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
 
static const AVOption transpose_options[] = {
{ "dir", "set transpose direction", OFFSET(dir), AV_OPT_TYPE_INT, { .i64 = TRANSPOSE_CCLOCK_FLIP }, 0, 7, FLAGS, "dir" },
{ "cclock_flip", "rotate counter-clockwise with vertical flip", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK_FLIP }, .unit = "dir" },
{ "clock", "rotate clockwise", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK }, .unit = "dir" },
{ "cclock", "rotate counter-clockwise", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK }, .unit = "dir" },
{ "clock_flip", "rotate clockwise with vertical flip", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK_FLIP }, .unit = "dir" },
 
{ "passthrough", "do not apply transposition if the input matches the specified geometry",
OFFSET(passthrough), AV_OPT_TYPE_INT, {.i64=TRANSPOSE_PT_TYPE_NONE}, 0, INT_MAX, FLAGS, "passthrough" },
{ "none", "always apply transposition", 0, AV_OPT_TYPE_CONST, {.i64=TRANSPOSE_PT_TYPE_NONE}, INT_MIN, INT_MAX, FLAGS, "passthrough" },
{ "portrait", "preserve portrait geometry", 0, AV_OPT_TYPE_CONST, {.i64=TRANSPOSE_PT_TYPE_PORTRAIT}, INT_MIN, INT_MAX, FLAGS, "passthrough" },
{ "landscape", "preserve landscape geometry", 0, AV_OPT_TYPE_CONST, {.i64=TRANSPOSE_PT_TYPE_LANDSCAPE}, INT_MIN, INT_MAX, FLAGS, "passthrough" },
 
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(transpose);
 
static const AVFilterPad avfilter_vf_transpose_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer = get_video_buffer,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_transpose_outputs[] = {
{
.name = "default",
.config_props = config_props_output,
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_transpose = {
.name = "transpose",
.description = NULL_IF_CONFIG_SMALL("Transpose input video."),
.priv_size = sizeof(TransContext),
.priv_class = &transpose_class,
.query_formats = query_formats,
.inputs = avfilter_vf_transpose_inputs,
.outputs = avfilter_vf_transpose_outputs,
.flags = AVFILTER_FLAG_SLICE_THREADS,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_unsharp.c
0,0 → 1,312
/*
* Original copyright (c) 2002 Remi Guyomarch <rguyom@pobox.com>
* Port copyright (c) 2010 Daniel G. Taylor <dan@programmer-art.org>
* Relicensed to the LGPL with permission from Remi Guyomarch.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* blur / sharpen filter, ported to FFmpeg from MPlayer
* libmpcodecs/unsharp.c.
*
* This code is based on:
*
* An Efficient algorithm for Gaussian blur using finite-state machines
* Frederick M. Waltz and John W. V. Miller
*
* SPIE Conf. on Machine Vision Systems for Inspection and Metrology VII
* Originally published Boston, Nov 98
*
* http://www.engin.umd.umich.edu/~jwvm/ece581/21_GBlur.pdf
*/
 
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
#include "libavutil/common.h"
#include "libavutil/imgutils.h"
#include "libavutil/mem.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "unsharp.h"
#include "unsharp_opencl.h"
 
static void apply_unsharp( uint8_t *dst, int dst_stride,
const uint8_t *src, int src_stride,
int width, int height, UnsharpFilterParam *fp)
{
uint32_t **sc = fp->sc;
uint32_t sr[MAX_MATRIX_SIZE - 1], tmp1, tmp2;
 
int32_t res;
int x, y, z;
const uint8_t *src2 = NULL; //silence a warning
const int amount = fp->amount;
const int steps_x = fp->steps_x;
const int steps_y = fp->steps_y;
const int scalebits = fp->scalebits;
const int32_t halfscale = fp->halfscale;
 
if (!amount) {
av_image_copy_plane(dst, dst_stride, src, src_stride, width, height);
return;
}
 
for (y = 0; y < 2 * steps_y; y++)
memset(sc[y], 0, sizeof(sc[y][0]) * (width + 2 * steps_x));
 
for (y = -steps_y; y < height + steps_y; y++) {
if (y < height)
src2 = src;
 
memset(sr, 0, sizeof(sr[0]) * (2 * steps_x - 1));
for (x = -steps_x; x < width + steps_x; x++) {
tmp1 = x <= 0 ? src2[0] : x >= width ? src2[width-1] : src2[x];
for (z = 0; z < steps_x * 2; z += 2) {
tmp2 = sr[z + 0] + tmp1; sr[z + 0] = tmp1;
tmp1 = sr[z + 1] + tmp2; sr[z + 1] = tmp2;
}
for (z = 0; z < steps_y * 2; z += 2) {
tmp2 = sc[z + 0][x + steps_x] + tmp1; sc[z + 0][x + steps_x] = tmp1;
tmp1 = sc[z + 1][x + steps_x] + tmp2; sc[z + 1][x + steps_x] = tmp2;
}
if (x >= steps_x && y >= steps_y) {
const uint8_t *srx = src - steps_y * src_stride + x - steps_x;
uint8_t *dsx = dst - steps_y * dst_stride + x - steps_x;
 
res = (int32_t)*srx + ((((int32_t) * srx - (int32_t)((tmp1 + halfscale) >> scalebits)) * amount) >> 16);
*dsx = av_clip_uint8(res);
}
}
if (y >= 0) {
dst += dst_stride;
src += src_stride;
}
}
}
 
static int apply_unsharp_c(AVFilterContext *ctx, AVFrame *in, AVFrame *out)
{
AVFilterLink *inlink = ctx->inputs[0];
UnsharpContext *unsharp = ctx->priv;
int i, plane_w[3], plane_h[3];
UnsharpFilterParam *fp[3];
plane_w[0] = inlink->w;
plane_w[1] = plane_w[2] = FF_CEIL_RSHIFT(inlink->w, unsharp->hsub);
plane_h[0] = inlink->h;
plane_h[1] = plane_h[2] = FF_CEIL_RSHIFT(inlink->h, unsharp->vsub);
fp[0] = &unsharp->luma;
fp[1] = fp[2] = &unsharp->chroma;
for (i = 0; i < 3; i++) {
apply_unsharp(out->data[i], out->linesize[i], in->data[i], in->linesize[i], plane_w[i], plane_h[i], fp[i]);
}
return 0;
}
 
static void set_filter_param(UnsharpFilterParam *fp, int msize_x, int msize_y, float amount)
{
fp->msize_x = msize_x;
fp->msize_y = msize_y;
fp->amount = amount * 65536.0;
 
fp->steps_x = msize_x / 2;
fp->steps_y = msize_y / 2;
fp->scalebits = (fp->steps_x + fp->steps_y) * 2;
fp->halfscale = 1 << (fp->scalebits - 1);
}
 
static av_cold int init(AVFilterContext *ctx)
{
int ret = 0;
UnsharpContext *unsharp = ctx->priv;
 
 
set_filter_param(&unsharp->luma, unsharp->lmsize_x, unsharp->lmsize_y, unsharp->lamount);
set_filter_param(&unsharp->chroma, unsharp->cmsize_x, unsharp->cmsize_y, unsharp->camount);
 
unsharp->apply_unsharp = apply_unsharp_c;
if (!CONFIG_OPENCL && unsharp->opencl) {
av_log(ctx, AV_LOG_ERROR, "OpenCL support was not enabled in this build, cannot be selected\n");
return AVERROR(EINVAL);
}
if (CONFIG_OPENCL && unsharp->opencl) {
unsharp->apply_unsharp = ff_opencl_apply_unsharp;
ret = ff_opencl_unsharp_init(ctx);
if (ret < 0)
return ret;
}
return 0;
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV410P,
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
 
return 0;
}
 
static int init_filter_param(AVFilterContext *ctx, UnsharpFilterParam *fp, const char *effect_type, int width)
{
int z;
const char *effect = fp->amount == 0 ? "none" : fp->amount < 0 ? "blur" : "sharpen";
 
if (!(fp->msize_x & fp->msize_y & 1)) {
av_log(ctx, AV_LOG_ERROR,
"Invalid even size for %s matrix size %dx%d\n",
effect_type, fp->msize_x, fp->msize_y);
return AVERROR(EINVAL);
}
 
av_log(ctx, AV_LOG_VERBOSE, "effect:%s type:%s msize_x:%d msize_y:%d amount:%0.2f\n",
effect, effect_type, fp->msize_x, fp->msize_y, fp->amount / 65535.0);
 
for (z = 0; z < 2 * fp->steps_y; z++)
if (!(fp->sc[z] = av_malloc(sizeof(*(fp->sc[z])) * (width + 2 * fp->steps_x))))
return AVERROR(ENOMEM);
 
return 0;
}
 
static int config_props(AVFilterLink *link)
{
UnsharpContext *unsharp = link->dst->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
int ret;
 
unsharp->hsub = desc->log2_chroma_w;
unsharp->vsub = desc->log2_chroma_h;
 
ret = init_filter_param(link->dst, &unsharp->luma, "luma", link->w);
if (ret < 0)
return ret;
ret = init_filter_param(link->dst, &unsharp->chroma, "chroma", FF_CEIL_RSHIFT(link->w, unsharp->hsub));
if (ret < 0)
return ret;
 
return 0;
}
 
static void free_filter_param(UnsharpFilterParam *fp)
{
int z;
 
for (z = 0; z < 2 * fp->steps_y; z++)
av_free(fp->sc[z]);
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
UnsharpContext *unsharp = ctx->priv;
 
if (CONFIG_OPENCL && unsharp->opencl) {
ff_opencl_unsharp_uninit(ctx);
}
 
free_filter_param(&unsharp->luma);
free_filter_param(&unsharp->chroma);
}
 
static int filter_frame(AVFilterLink *link, AVFrame *in)
{
UnsharpContext *unsharp = link->dst->priv;
AVFilterLink *outlink = link->dst->outputs[0];
AVFrame *out;
int ret = 0;
 
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
if (CONFIG_OPENCL && unsharp->opencl) {
ret = ff_opencl_unsharp_process_inout_buf(link->dst, in, out);
if (ret < 0)
goto end;
}
 
ret = unsharp->apply_unsharp(link->dst, in, out);
end:
av_frame_free(&in);
 
if (ret < 0)
return ret;
return ff_filter_frame(outlink, out);
}
 
#define OFFSET(x) offsetof(UnsharpContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
#define MIN_SIZE 3
#define MAX_SIZE 63
static const AVOption unsharp_options[] = {
{ "luma_msize_x", "set luma matrix horizontal size", OFFSET(lmsize_x), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
{ "lx", "set luma matrix horizontal size", OFFSET(lmsize_x), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
{ "luma_msize_y", "set luma matrix vertical size", OFFSET(lmsize_y), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
{ "ly", "set luma matrix vertical size", OFFSET(lmsize_y), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
{ "luma_amount", "set luma effect strength", OFFSET(lamount), AV_OPT_TYPE_FLOAT, { .dbl = 1 }, -2, 5, FLAGS },
{ "la", "set luma effect strength", OFFSET(lamount), AV_OPT_TYPE_FLOAT, { .dbl = 1 }, -2, 5, FLAGS },
{ "chroma_msize_x", "set chroma matrix horizontal size", OFFSET(cmsize_x), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
{ "cx", "set chroma matrix horizontal size", OFFSET(cmsize_x), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
{ "chroma_msize_y", "set chroma matrix vertical size", OFFSET(cmsize_y), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
{ "cy", "set chroma matrix vertical size", OFFSET(cmsize_y), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
{ "chroma_amount", "set chroma effect strength", OFFSET(camount), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, -2, 5, FLAGS },
{ "ca", "set chroma effect strength", OFFSET(camount), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, -2, 5, FLAGS },
{ "opencl", "use OpenCL filtering capabilities", OFFSET(opencl), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(unsharp);
 
static const AVFilterPad avfilter_vf_unsharp_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_props,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_unsharp_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_unsharp = {
.name = "unsharp",
.description = NULL_IF_CONFIG_SMALL("Sharpen or blur the input video."),
.priv_size = sizeof(UnsharpContext),
.priv_class = &unsharp_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = avfilter_vf_unsharp_inputs,
.outputs = avfilter_vf_unsharp_outputs,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_vflip.c
0,0 → 1,111
/*
* Copyright (c) 2007 Bobby Bingham
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* video vertical flip filter
*/
 
#include "libavutil/internal.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "internal.h"
#include "video.h"
 
typedef struct {
int vsub; ///< vertical chroma subsampling
} FlipContext;
 
static int config_input(AVFilterLink *link)
{
FlipContext *flip = link->dst->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
 
flip->vsub = desc->log2_chroma_h;
 
return 0;
}
 
static AVFrame *get_video_buffer(AVFilterLink *link, int w, int h)
{
FlipContext *flip = link->dst->priv;
AVFrame *frame;
int i;
 
frame = ff_get_video_buffer(link->dst->outputs[0], w, h);
if (!frame)
return NULL;
 
for (i = 0; i < 4; i ++) {
int vsub = i == 1 || i == 2 ? flip->vsub : 0;
int height = FF_CEIL_RSHIFT(h, vsub);
 
if (frame->data[i]) {
frame->data[i] += (height - 1) * frame->linesize[i];
frame->linesize[i] = -frame->linesize[i];
}
}
 
return frame;
}
 
static int filter_frame(AVFilterLink *link, AVFrame *frame)
{
FlipContext *flip = link->dst->priv;
int i;
 
for (i = 0; i < 4; i ++) {
int vsub = i == 1 || i == 2 ? flip->vsub : 0;
int height = FF_CEIL_RSHIFT(link->h, vsub);
 
if (frame->data[i]) {
frame->data[i] += (height - 1) * frame->linesize[i];
frame->linesize[i] = -frame->linesize[i];
}
}
 
return ff_filter_frame(link->dst->outputs[0], frame);
}
static const AVFilterPad avfilter_vf_vflip_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer = get_video_buffer,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_vflip_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_vflip = {
.name = "vflip",
.description = NULL_IF_CONFIG_SMALL("Flip the input video vertically."),
.priv_size = sizeof(FlipContext),
.inputs = avfilter_vf_vflip_inputs,
.outputs = avfilter_vf_vflip_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_vidstabdetect.c
0,0 → 1,217
/*
* Copyright (c) 2013 Georg Martius <georg dot martius at web dot de>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#define DEFAULT_RESULT_NAME "transforms.trf"
 
#include <vid.stab/libvidstab.h>
 
#include "libavutil/common.h"
#include "libavutil/opt.h"
#include "libavutil/imgutils.h"
#include "avfilter.h"
#include "internal.h"
 
#include "vidstabutils.h"
 
typedef struct {
const AVClass *class;
 
VSMotionDetect md;
VSMotionDetectConfig conf;
 
char *result;
FILE *f;
} StabData;
 
 
#define OFFSET(x) offsetof(StabData, x)
#define OFFSETC(x) (offsetof(StabData, conf)+offsetof(VSMotionDetectConfig, x))
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
 
static const AVOption vidstabdetect_options[] = {
{"result", "path to the file used to write the transforms", OFFSET(result), AV_OPT_TYPE_STRING, {.str = DEFAULT_RESULT_NAME}, .flags = FLAGS},
{"shakiness", "how shaky is the video and how quick is the camera?"
" 1: little (fast) 10: very strong/quick (slow)", OFFSETC(shakiness), AV_OPT_TYPE_INT, {.i64 = 5}, 1, 10, FLAGS},
{"accuracy", "(>=shakiness) 1: low 15: high (slow)", OFFSETC(accuracy), AV_OPT_TYPE_INT, {.i64 = 9}, 1, 15, FLAGS},
{"stepsize", "region around minimum is scanned with 1 pixel resolution", OFFSETC(stepSize), AV_OPT_TYPE_INT, {.i64 = 6}, 1, 32, FLAGS},
{"mincontrast", "below this contrast a field is discarded (0-1)", OFFSETC(contrastThreshold), AV_OPT_TYPE_DOUBLE, {.dbl = 0.25}, 0.0, 1.0, FLAGS},
{"show", "0: draw nothing; 1,2: show fields and transforms", OFFSETC(show), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 2, FLAGS},
{"tripod", "virtual tripod mode (if >0): motion is compared to a reference"
" reference frame (frame # is the value)", OFFSETC(virtualTripod), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS},
{NULL}
};
 
AVFILTER_DEFINE_CLASS(vidstabdetect);
 
static av_cold int init(AVFilterContext *ctx)
{
StabData *sd = ctx->priv;
vs_set_mem_and_log_functions();
sd->class = &vidstabdetect_class;
av_log(ctx, AV_LOG_VERBOSE, "vidstabdetect filter: init %s\n", LIBVIDSTAB_VERSION);
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
StabData *sd = ctx->priv;
VSMotionDetect *md = &(sd->md);
 
if (sd->f) {
fclose(sd->f);
sd->f = NULL;
}
 
vsMotionDetectionCleanup(md);
}
 
static int query_formats(AVFilterContext *ctx)
{
// If you add something here also add it in vidstabutils.c
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUVA420P,
AV_PIX_FMT_YUV440P, AV_PIX_FMT_GRAY8,
AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, AV_PIX_FMT_RGBA,
AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
StabData *sd = ctx->priv;
 
VSMotionDetect* md = &(sd->md);
VSFrameInfo fi;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
 
vsFrameInfoInit(&fi, inlink->w, inlink->h, av_2_vs_pixel_format(ctx, inlink->format));
if (fi.bytesPerPixel != av_get_bits_per_pixel(desc)/8) {
av_log(ctx, AV_LOG_ERROR, "pixel-format error: wrong bits/per/pixel, please report a BUG");
return AVERROR(EINVAL);
}
if (fi.log2ChromaW != desc->log2_chroma_w) {
av_log(ctx, AV_LOG_ERROR, "pixel-format error: log2_chroma_w, please report a BUG");
return AVERROR(EINVAL);
}
 
if (fi.log2ChromaH != desc->log2_chroma_h) {
av_log(ctx, AV_LOG_ERROR, "pixel-format error: log2_chroma_h, please report a BUG");
return AVERROR(EINVAL);
}
 
// set values that are not initialized by the options
sd->conf.algo = 1;
sd->conf.modName = "vidstabdetect";
if (vsMotionDetectInit(md, &sd->conf, &fi) != VS_OK) {
av_log(ctx, AV_LOG_ERROR, "initialization of Motion Detection failed, please report a BUG");
return AVERROR(EINVAL);
}
 
vsMotionDetectGetConfig(&sd->conf, md);
av_log(ctx, AV_LOG_INFO, "Video stabilization settings (pass 1/2):\n");
av_log(ctx, AV_LOG_INFO, " shakiness = %d\n", sd->conf.shakiness);
av_log(ctx, AV_LOG_INFO, " accuracy = %d\n", sd->conf.accuracy);
av_log(ctx, AV_LOG_INFO, " stepsize = %d\n", sd->conf.stepSize);
av_log(ctx, AV_LOG_INFO, " mincontrast = %f\n", sd->conf.contrastThreshold);
av_log(ctx, AV_LOG_INFO, " show = %d\n", sd->conf.show);
av_log(ctx, AV_LOG_INFO, " result = %s\n", sd->result);
 
sd->f = fopen(sd->result, "w");
if (sd->f == NULL) {
av_log(ctx, AV_LOG_ERROR, "cannot open transform file %s\n", sd->result);
return AVERROR(EINVAL);
} else {
if (vsPrepareFile(md, sd->f) != VS_OK) {
av_log(ctx, AV_LOG_ERROR, "cannot write to transform file %s\n", sd->result);
return AVERROR(EINVAL);
}
}
return 0;
}
 
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
StabData *sd = ctx->priv;
VSMotionDetect *md = &(sd->md);
LocalMotions localmotions;
 
AVFilterLink *outlink = inlink->dst->outputs[0];
VSFrame frame;
int plane;
 
if (sd->conf.show > 0 && !av_frame_is_writable(in))
av_frame_make_writable(in);
 
for (plane = 0; plane < md->fi.planes; plane++) {
frame.data[plane] = in->data[plane];
frame.linesize[plane] = in->linesize[plane];
}
if (vsMotionDetection(md, &localmotions, &frame) != VS_OK) {
av_log(ctx, AV_LOG_ERROR, "motion detection failed");
return AVERROR(AVERROR_EXTERNAL);
} else {
if (vsWriteToFile(md, sd->f, &localmotions) != VS_OK) {
av_log(ctx, AV_LOG_ERROR, "cannot write to transform file");
return AVERROR(errno);
}
vs_vector_del(&localmotions);
}
 
return ff_filter_frame(outlink, in);
}
 
static const AVFilterPad avfilter_vf_vidstabdetect_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_vidstabdetect_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_vidstabdetect = {
.name = "vidstabdetect",
.description = NULL_IF_CONFIG_SMALL("Extract relative transformations, "
"pass 1 of 2 for stabilization "
"(see vidstabtransform for pass 2)."),
.priv_size = sizeof(StabData),
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = avfilter_vf_vidstabdetect_inputs,
.outputs = avfilter_vf_vidstabdetect_outputs,
.priv_class = &vidstabdetect_class,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_vidstabtransform.c
0,0 → 1,291
/*
* Copyright (c) 2013 Georg Martius <georg dot martius at web dot de>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#define DEFAULT_INPUT_NAME "transforms.trf"
 
#include <vid.stab/libvidstab.h>
 
#include "libavutil/common.h"
#include "libavutil/opt.h"
#include "libavutil/imgutils.h"
#include "avfilter.h"
#include "internal.h"
 
#include "vidstabutils.h"
 
typedef struct {
const AVClass *class;
 
VSTransformData td;
VSTransformConfig conf;
 
VSTransformations trans; // transformations
char *input; // name of transform file
int tripod;
} TransformContext;
 
#define OFFSET(x) offsetof(TransformContext, x)
#define OFFSETC(x) (offsetof(TransformContext, conf)+offsetof(VSTransformConfig, x))
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
 
static const AVOption vidstabtransform_options[] = {
{"input", "path to the file storing the transforms", OFFSET(input),
AV_OPT_TYPE_STRING, {.str = DEFAULT_INPUT_NAME}, .flags = FLAGS },
{"smoothing", "number of frames*2 + 1 used for lowpass filtering", OFFSETC(smoothing),
AV_OPT_TYPE_INT, {.i64 = 10}, 1, 1000, FLAGS},
{"maxshift", "maximal number of pixels to translate image", OFFSETC(maxShift),
AV_OPT_TYPE_INT, {.i64 = -1}, -1, 500, FLAGS},
{"maxangle", "maximal angle in rad to rotate image", OFFSETC(maxAngle),
AV_OPT_TYPE_DOUBLE, {.dbl = -1.0}, -1.0, 3.14, FLAGS},
{"crop", "set cropping mode", OFFSETC(crop),
AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS, "crop"},
{ "keep", "keep border", 0,
AV_OPT_TYPE_CONST, {.i64 = VSKeepBorder }, 0, 0, FLAGS, "crop"},
{ "black", "black border", 0,
AV_OPT_TYPE_CONST, {.i64 = VSCropBorder }, 0, 0, FLAGS, "crop"},
{"invert", "1: invert transforms", OFFSETC(invert),
AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS},
{"relative", "consider transforms as 0: absolute, 1: relative", OFFSETC(relative),
AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, FLAGS},
{"zoom", "percentage to zoom >0: zoom in, <0 zoom out", OFFSETC(zoom),
AV_OPT_TYPE_DOUBLE, {.dbl = 0}, -100, 100, FLAGS},
{"optzoom", "0: nothing, 1: determine optimal zoom (added to 'zoom')", OFFSETC(optZoom),
AV_OPT_TYPE_INT, {.i64 = 1}, 0, 2, FLAGS},
{"interpol", "type of interpolation", OFFSETC(interpolType),
AV_OPT_TYPE_INT, {.i64 = 2}, 0, 3, FLAGS, "interpol"},
{ "no", "no interpolation", 0,
AV_OPT_TYPE_CONST, {.i64 = VS_Zero }, 0, 0, FLAGS, "interpol"},
{ "linear", "linear (horizontal)", 0,
AV_OPT_TYPE_CONST, {.i64 = VS_Linear }, 0, 0, FLAGS, "interpol"},
{ "bilinear","bi-linear", 0,
AV_OPT_TYPE_CONST, {.i64 = VS_BiLinear},0, 0, FLAGS, "interpol"},
{ "bicubic", "bi-cubic", 0,
AV_OPT_TYPE_CONST, {.i64 = VS_BiCubic },0, 0, FLAGS, "interpol"},
{"tripod", "if 1: virtual tripod mode (equiv. to relative=0:smoothing=0)", OFFSET(tripod),
AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS},
{NULL}
};
 
AVFILTER_DEFINE_CLASS(vidstabtransform);
 
static av_cold int init(AVFilterContext *ctx)
{
TransformContext *tc = ctx->priv;
vs_set_mem_and_log_functions();
tc->class = &vidstabtransform_class;
av_log(ctx, AV_LOG_VERBOSE, "vidstabtransform filter: init %s\n", LIBVIDSTAB_VERSION);
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
TransformContext *tc = ctx->priv;
 
vsTransformDataCleanup(&tc->td);
vsTransformationsCleanup(&tc->trans);
}
 
static int query_formats(AVFilterContext *ctx)
{
// If you add something here also add it in vidstabutils.c
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUVA420P,
AV_PIX_FMT_YUV440P, AV_PIX_FMT_GRAY8,
AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, AV_PIX_FMT_RGBA,
AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
 
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
TransformContext *tc = ctx->priv;
FILE *f;
 
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
 
VSTransformData *td = &(tc->td);
 
VSFrameInfo fi_src;
VSFrameInfo fi_dest;
 
if (!vsFrameInfoInit(&fi_src, inlink->w, inlink->h,
av_2_vs_pixel_format(ctx, inlink->format)) ||
!vsFrameInfoInit(&fi_dest, inlink->w, inlink->h,
av_2_vs_pixel_format(ctx, inlink->format))) {
av_log(ctx, AV_LOG_ERROR, "unknown pixel format: %i (%s)",
inlink->format, desc->name);
return AVERROR(EINVAL);
}
 
if (fi_src.bytesPerPixel != av_get_bits_per_pixel(desc)/8 ||
fi_src.log2ChromaW != desc->log2_chroma_w ||
fi_src.log2ChromaH != desc->log2_chroma_h) {
av_log(ctx, AV_LOG_ERROR, "pixel-format error: bpp %i<>%i ",
fi_src.bytesPerPixel, av_get_bits_per_pixel(desc)/8);
av_log(ctx, AV_LOG_ERROR, "chroma_subsampl: w: %i<>%i h: %i<>%i\n",
fi_src.log2ChromaW, desc->log2_chroma_w,
fi_src.log2ChromaH, desc->log2_chroma_h);
return AVERROR(EINVAL);
}
 
// set values that are not initializes by the options
tc->conf.modName = "vidstabtransform";
tc->conf.verbose =1;
if (tc->tripod) {
av_log(ctx, AV_LOG_INFO, "Virtual tripod mode: relative=0, smoothing=0");
tc->conf.relative = 0;
tc->conf.smoothing = 0;
}
 
if (vsTransformDataInit(td, &tc->conf, &fi_src, &fi_dest) != VS_OK) {
av_log(ctx, AV_LOG_ERROR, "initialization of vid.stab transform failed, please report a BUG\n");
return AVERROR(EINVAL);
}
 
vsTransformGetConfig(&tc->conf, td);
av_log(ctx, AV_LOG_INFO, "Video transformation/stabilization settings (pass 2/2):\n");
av_log(ctx, AV_LOG_INFO, " input = %s\n", tc->input);
av_log(ctx, AV_LOG_INFO, " smoothing = %d\n", tc->conf.smoothing);
av_log(ctx, AV_LOG_INFO, " maxshift = %d\n", tc->conf.maxShift);
av_log(ctx, AV_LOG_INFO, " maxangle = %f\n", tc->conf.maxAngle);
av_log(ctx, AV_LOG_INFO, " crop = %s\n", tc->conf.crop ? "Black" : "Keep");
av_log(ctx, AV_LOG_INFO, " relative = %s\n", tc->conf.relative ? "True": "False");
av_log(ctx, AV_LOG_INFO, " invert = %s\n", tc->conf.invert ? "True" : "False");
av_log(ctx, AV_LOG_INFO, " zoom = %f\n", tc->conf.zoom);
av_log(ctx, AV_LOG_INFO, " optzoom = %s\n", tc->conf.optZoom ? "On" : "Off");
av_log(ctx, AV_LOG_INFO, " interpol = %s\n", getInterpolationTypeName(tc->conf.interpolType));
 
f = fopen(tc->input, "r");
if (f == NULL) {
av_log(ctx, AV_LOG_ERROR, "cannot open input file %s\n", tc->input);
return AVERROR(errno);
} else {
VSManyLocalMotions mlms;
if (vsReadLocalMotionsFile(f, &mlms) == VS_OK) {
// calculate the actual transforms from the local motions
if (vsLocalmotions2TransformsSimple(td, &mlms, &tc->trans) != VS_OK) {
av_log(ctx, AV_LOG_ERROR, "calculating transformations failed\n");
return AVERROR(EINVAL);
}
} else { // try to read old format
if (!vsReadOldTransforms(td, f, &tc->trans)) { /* read input file */
av_log(ctx, AV_LOG_ERROR, "error parsing input file %s\n", tc->input);
return AVERROR(EINVAL);
}
}
}
fclose(f);
 
if (vsPreprocessTransforms(td, &tc->trans) != VS_OK ) {
av_log(ctx, AV_LOG_ERROR, "error while preprocessing transforms\n");
return AVERROR(EINVAL);
}
 
// TODO: add sharpening, so far the user needs to call the unsharp filter manually
return 0;
}
 
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
TransformContext *tc = ctx->priv;
VSTransformData* td = &(tc->td);
 
AVFilterLink *outlink = inlink->dst->outputs[0];
int direct = 0;
AVFrame *out;
VSFrame inframe;
int plane;
 
if (av_frame_is_writable(in)) {
direct = 1;
out = in;
} else {
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
 
for (plane = 0; plane < vsTransformGetSrcFrameInfo(td)->planes; plane++) {
inframe.data[plane] = in->data[plane];
inframe.linesize[plane] = in->linesize[plane];
}
if (direct) {
vsTransformPrepare(td, &inframe, &inframe);
} else { // separate frames
VSFrame outframe;
for (plane = 0; plane < vsTransformGetDestFrameInfo(td)->planes; plane++) {
outframe.data[plane] = out->data[plane];
outframe.linesize[plane] = out->linesize[plane];
}
vsTransformPrepare(td, &inframe, &outframe);
}
 
vsDoTransform(td, vsGetNextTransform(td, &tc->trans));
 
vsTransformFinish(td);
 
if (!direct)
av_frame_free(&in);
 
return ff_filter_frame(outlink, out);
}
 
static const AVFilterPad avfilter_vf_vidstabtransform_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_vidstabtransform_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_vidstabtransform = {
.name = "vidstabtransform",
.description = NULL_IF_CONFIG_SMALL("Transform the frames, "
"pass 2 of 2 for stabilization "
"(see vidstabdetect for pass 1)."),
.priv_size = sizeof(TransformContext),
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = avfilter_vf_vidstabtransform_inputs,
.outputs = avfilter_vf_vidstabtransform_outputs,
.priv_class = &vidstabtransform_class,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_vignette.c
0,0 → 1,339
/*
* Copyright (c) 2013 Clément Bœsch
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <float.h> /* DBL_MAX */
 
#include "libavutil/opt.h"
#include "libavutil/eval.h"
#include "libavutil/avassert.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
static const char *const var_names[] = {
"w", // stream width
"h", // stream height
"n", // frame count
"pts", // presentation timestamp expressed in AV_TIME_BASE units
"r", // frame rate
"t", // timestamp expressed in seconds
"tb", // timebase
NULL
};
 
enum var_name {
VAR_W,
VAR_H,
VAR_N,
VAR_PTS,
VAR_R,
VAR_T,
VAR_TB,
VAR_NB
};
 
typedef struct {
const AVClass *class;
const AVPixFmtDescriptor *desc;
int backward;
enum EvalMode { EVAL_MODE_INIT, EVAL_MODE_FRAME, EVAL_MODE_NB } eval_mode;
#define DEF_EXPR_FIELDS(name) AVExpr *name##_pexpr; char *name##_expr; double name
DEF_EXPR_FIELDS(angle);
DEF_EXPR_FIELDS(x0);
DEF_EXPR_FIELDS(y0);
double var_values[VAR_NB];
float *fmap;
int fmap_linesize;
double dmax;
float xscale, yscale;
uint32_t dither;
int do_dither;
AVRational aspect;
AVRational scale;
} VignetteContext;
 
#define OFFSET(x) offsetof(VignetteContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
static const AVOption vignette_options[] = {
{ "angle", "set lens angle", OFFSET(angle_expr), AV_OPT_TYPE_STRING, {.str="PI/5"}, .flags = FLAGS },
{ "a", "set lens angle", OFFSET(angle_expr), AV_OPT_TYPE_STRING, {.str="PI/5"}, .flags = FLAGS },
{ "x0", "set circle center position on x-axis", OFFSET(x0_expr), AV_OPT_TYPE_STRING, {.str="w/2"}, .flags = FLAGS },
{ "y0", "set circle center position on y-axis", OFFSET(y0_expr), AV_OPT_TYPE_STRING, {.str="h/2"}, .flags = FLAGS },
{ "mode", "set forward/backward mode", OFFSET(backward), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS, "mode" },
{ "forward", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0}, INT_MIN, INT_MAX, FLAGS, "mode"},
{ "backward", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1}, INT_MIN, INT_MAX, FLAGS, "mode"},
{ "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
{ "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
{ "frame", "eval expressions for each frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
{ "dither", "set dithering", OFFSET(do_dither), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, FLAGS },
{ "aspect", "set aspect ratio", OFFSET(aspect), AV_OPT_TYPE_RATIONAL, {.dbl = 1}, 0, DBL_MAX, .flags = FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(vignette);
 
static av_cold int init(AVFilterContext *ctx)
{
VignetteContext *s = ctx->priv;
 
#define PARSE_EXPR(name) do { \
int ret = av_expr_parse(&s->name##_pexpr, s->name##_expr, var_names, \
NULL, NULL, NULL, NULL, 0, ctx); \
if (ret < 0) { \
av_log(ctx, AV_LOG_ERROR, "Unable to parse expression for '" \
AV_STRINGIFY(name) "'\n"); \
return ret; \
} \
} while (0)
 
PARSE_EXPR(angle);
PARSE_EXPR(x0);
PARSE_EXPR(y0);
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
VignetteContext *s = ctx->priv;
av_freep(&s->fmap);
av_expr_free(s->angle_pexpr);
av_expr_free(s->x0_pexpr);
av_expr_free(s->y0_pexpr);
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
AV_PIX_FMT_GRAY8,
AV_PIX_FMT_NONE
};
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static double get_natural_factor(const VignetteContext *s, int x, int y)
{
const int xx = (x - s->x0) * s->xscale;
const int yy = (y - s->y0) * s->yscale;
const double dnorm = hypot(xx, yy) / s->dmax;
if (dnorm > 1) {
return 0;
} else {
const double c = cos(s->angle * dnorm);
return (c*c)*(c*c); // do not remove braces, it helps compilers
}
}
 
#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts) * av_q2d(tb))
 
static void update_context(VignetteContext *s, AVFilterLink *inlink, AVFrame *frame)
{
int x, y;
float *dst = s->fmap;
int dst_linesize = s->fmap_linesize;
 
if (frame) {
s->var_values[VAR_N] = inlink->frame_count;
s->var_values[VAR_T] = TS2T(frame->pts, inlink->time_base);
s->var_values[VAR_PTS] = TS2D(frame->pts);
} else {
s->var_values[VAR_N] = 0;
s->var_values[VAR_T] = NAN;
s->var_values[VAR_PTS] = NAN;
}
 
s->angle = av_clipf(av_expr_eval(s->angle_pexpr, s->var_values, NULL), 0, M_PI_2);
s->x0 = av_expr_eval(s->x0_pexpr, s->var_values, NULL);
s->y0 = av_expr_eval(s->y0_pexpr, s->var_values, NULL);
 
if (s->backward) {
for (y = 0; y < inlink->h; y++) {
for (x = 0; x < inlink->w; x++)
dst[x] = 1. / get_natural_factor(s, x, y);
dst += dst_linesize;
}
} else {
for (y = 0; y < inlink->h; y++) {
for (x = 0; x < inlink->w; x++)
dst[x] = get_natural_factor(s, x, y);
dst += dst_linesize;
}
}
}
 
static inline double get_dither_value(VignetteContext *s)
{
double dv = 0;
if (s->do_dither) {
dv = s->dither / (double)(1LL<<32);
s->dither = s->dither * 1664525 + 1013904223;
}
return dv;
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
unsigned x, y;
AVFilterContext *ctx = inlink->dst;
VignetteContext *s = ctx->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *out;
 
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
 
if (s->eval_mode == EVAL_MODE_FRAME)
update_context(s, inlink, in);
 
if (s->desc->flags & AV_PIX_FMT_FLAG_RGB) {
uint8_t *dst = out->data[0];
const uint8_t *src = in ->data[0];
const float *fmap = s->fmap;
const int dst_linesize = out->linesize[0];
const int src_linesize = in ->linesize[0];
const int fmap_linesize = s->fmap_linesize;
 
for (y = 0; y < inlink->h; y++) {
uint8_t *dstp = dst;
const uint8_t *srcp = src;
 
for (x = 0; x < inlink->w; x++, dstp += 3, srcp += 3) {
const float f = fmap[x];
 
dstp[0] = av_clip_uint8(srcp[0] * f + get_dither_value(s));
dstp[1] = av_clip_uint8(srcp[1] * f + get_dither_value(s));
dstp[2] = av_clip_uint8(srcp[2] * f + get_dither_value(s));
}
dst += dst_linesize;
src += src_linesize;
fmap += fmap_linesize;
}
} else {
int plane;
 
for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
uint8_t *dst = out->data[plane];
const uint8_t *src = in ->data[plane];
const float *fmap = s->fmap;
const int dst_linesize = out->linesize[plane];
const int src_linesize = in ->linesize[plane];
const int fmap_linesize = s->fmap_linesize;
const int chroma = plane == 1 || plane == 2;
const int hsub = chroma ? s->desc->log2_chroma_w : 0;
const int vsub = chroma ? s->desc->log2_chroma_h : 0;
const int w = FF_CEIL_RSHIFT(inlink->w, hsub);
const int h = FF_CEIL_RSHIFT(inlink->h, vsub);
 
for (y = 0; y < h; y++) {
uint8_t *dstp = dst;
const uint8_t *srcp = src;
 
for (x = 0; x < w; x++) {
const double dv = get_dither_value(s);
if (chroma) *dstp++ = av_clip_uint8(fmap[x << hsub] * (*srcp++ - 127) + 127 + dv);
else *dstp++ = av_clip_uint8(fmap[x ] * *srcp++ + dv);
}
dst += dst_linesize;
src += src_linesize;
fmap += fmap_linesize << vsub;
}
}
}
 
return ff_filter_frame(outlink, out);
}
 
static int config_props(AVFilterLink *inlink)
{
VignetteContext *s = inlink->dst->priv;
AVRational sar = inlink->sample_aspect_ratio;
 
s->desc = av_pix_fmt_desc_get(inlink->format);
s->var_values[VAR_W] = inlink->w;
s->var_values[VAR_H] = inlink->h;
s->var_values[VAR_TB] = av_q2d(inlink->time_base);
s->var_values[VAR_R] = inlink->frame_rate.num == 0 || inlink->frame_rate.den == 0 ?
NAN : av_q2d(inlink->frame_rate);
 
if (!sar.num || !sar.den)
sar.num = sar.den = 1;
if (sar.num > sar.den) {
s->xscale = av_q2d(av_div_q(sar, s->aspect));
s->yscale = 1;
} else {
s->yscale = av_q2d(av_div_q(s->aspect, sar));
s->xscale = 1;
}
s->dmax = hypot(inlink->w / 2., inlink->h / 2.);
av_log(s, AV_LOG_DEBUG, "xscale=%f yscale=%f dmax=%f\n",
s->xscale, s->yscale, s->dmax);
 
s->fmap_linesize = FFALIGN(inlink->w, 32);
s->fmap = av_malloc(s->fmap_linesize * inlink->h * sizeof(*s->fmap));
if (!s->fmap)
return AVERROR(ENOMEM);
 
if (s->eval_mode == EVAL_MODE_INIT)
update_context(s, inlink, NULL);
 
return 0;
}
 
static const AVFilterPad vignette_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_props,
},
{ NULL }
};
 
static const AVFilterPad vignette_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
 
AVFilter avfilter_vf_vignette = {
.name = "vignette",
.description = NULL_IF_CONFIG_SMALL("Make or reverse a vignette effect."),
.priv_size = sizeof(VignetteContext),
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = vignette_inputs,
.outputs = vignette_outputs,
.priv_class = &vignette_class,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_w3fdif.c
0,0 → 1,394
/*
* Copyright (C) 2012 British Broadcasting Corporation, All Rights Reserved
* Author of de-interlace algorithm: Jim Easterbrook for BBC R&D
* Based on the process described by Martin Weston for BBC R&D
* Author of FFmpeg filter: Mark Himsley for BBC Broadcast Systems Development
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/common.h"
#include "libavutil/imgutils.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
typedef struct W3FDIFContext {
const AVClass *class;
int filter; ///< 0 is simple, 1 is more complex
int deint; ///< which frames to deinterlace
int linesize[4]; ///< bytes of pixel data per line for each plane
int planeheight[4]; ///< height of each plane
int field; ///< which field are we on, 0 or 1
int eof;
int nb_planes;
AVFrame *prev, *cur, *next; ///< previous, current, next frames
int32_t *work_line; ///< line we are calculating
} W3FDIFContext;
 
#define OFFSET(x) offsetof(W3FDIFContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
#define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, 0, 0, FLAGS, unit }
 
static const AVOption w3fdif_options[] = {
{ "filter", "specify the filter", OFFSET(filter), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "filter" },
CONST("simple", NULL, 0, "filter"),
CONST("complex", NULL, 1, "filter"),
{ "deint", "specify which frames to deinterlace", OFFSET(deint), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "deint" },
CONST("all", "deinterlace all frames", 0, "deint"),
CONST("interlaced", "only deinterlace frames marked as interlaced", 1, "deint"),
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(w3fdif);
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
AV_PIX_FMT_YUVJ411P,
AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
AV_PIX_FMT_GRAY8,
AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
 
return 0;
}
 
static int config_input(AVFilterLink *inlink)
{
W3FDIFContext *s = inlink->dst->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
int ret;
 
if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
return ret;
 
s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
s->planeheight[0] = s->planeheight[3] = inlink->h;
 
s->nb_planes = av_pix_fmt_count_planes(inlink->format);
s->work_line = av_calloc(s->linesize[0], sizeof(*s->work_line));
if (!s->work_line)
return AVERROR(ENOMEM);
 
return 0;
}
 
static int config_output(AVFilterLink *outlink)
{
AVFilterLink *inlink = outlink->src->inputs[0];
 
outlink->time_base.num = inlink->time_base.num;
outlink->time_base.den = inlink->time_base.den * 2;
outlink->frame_rate.num = inlink->frame_rate.num * 2;
outlink->frame_rate.den = inlink->frame_rate.den;
outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
 
return 0;
}
 
/*
* Filter coefficients from PH-2071, scaled by 256 * 256.
* Each set of coefficients has a set for low-frequencies and high-frequencies.
* n_coef_lf[] and n_coef_hf[] are the number of coefs for simple and more-complex.
* It is important for later that n_coef_lf[] is even and n_coef_hf[] is odd.
* coef_lf[][] and coef_hf[][] are the coefficients for low-frequencies
* and high-frequencies for simple and more-complex mode.
*/
static const int8_t n_coef_lf[2] = { 2, 4 };
static const int32_t coef_lf[2][4] = {{ 32768, 32768, 0, 0},
{ -1704, 34472, 34472, -1704}};
static const int8_t n_coef_hf[2] = { 3, 5 };
static const int32_t coef_hf[2][5] = {{ -4096, 8192, -4096, 0, 0},
{ 2032, -7602, 11140, -7602, 2032}};
 
static void deinterlace_plane(AVFilterContext *ctx, AVFrame *out,
const AVFrame *cur, const AVFrame *adj,
const int filter, const int plane)
{
W3FDIFContext *s = ctx->priv;
uint8_t *in_line, *in_lines_cur[5], *in_lines_adj[5];
uint8_t *out_line, *out_pixel;
int32_t *work_line, *work_pixel;
uint8_t *cur_data = cur->data[plane];
uint8_t *adj_data = adj->data[plane];
uint8_t *dst_data = out->data[plane];
const int linesize = s->linesize[plane];
const int height = s->planeheight[plane];
const int cur_line_stride = cur->linesize[plane];
const int adj_line_stride = adj->linesize[plane];
const int dst_line_stride = out->linesize[plane];
int i, j, y_in, y_out;
 
/* copy unchanged the lines of the field */
y_out = s->field == cur->top_field_first;
 
in_line = cur_data + (y_out * cur_line_stride);
out_line = dst_data + (y_out * dst_line_stride);
 
while (y_out < height) {
memcpy(out_line, in_line, linesize);
y_out += 2;
in_line += cur_line_stride * 2;
out_line += dst_line_stride * 2;
}
 
/* interpolate other lines of the field */
y_out = s->field != cur->top_field_first;
 
out_line = dst_data + (y_out * dst_line_stride);
 
while (y_out < height) {
/* clear workspace */
memset(s->work_line, 0, sizeof(*s->work_line) * linesize);
 
/* get low vertical frequencies from current field */
for (j = 0; j < n_coef_lf[filter]; j++) {
y_in = (y_out + 1) + (j * 2) - n_coef_lf[filter];
 
while (y_in < 0)
y_in += 2;
while (y_in >= height)
y_in -= 2;
 
in_lines_cur[j] = cur_data + (y_in * cur_line_stride);
}
 
work_line = s->work_line;
switch (n_coef_lf[filter]) {
case 2:
for (i = 0; i < linesize; i++) {
*work_line += *in_lines_cur[0]++ * coef_lf[filter][0];
*work_line++ += *in_lines_cur[1]++ * coef_lf[filter][1];
}
break;
case 4:
for (i = 0; i < linesize; i++) {
*work_line += *in_lines_cur[0]++ * coef_lf[filter][0];
*work_line += *in_lines_cur[1]++ * coef_lf[filter][1];
*work_line += *in_lines_cur[2]++ * coef_lf[filter][2];
*work_line++ += *in_lines_cur[3]++ * coef_lf[filter][3];
}
}
 
/* get high vertical frequencies from adjacent fields */
for (j = 0; j < n_coef_hf[filter]; j++) {
y_in = (y_out + 1) + (j * 2) - n_coef_hf[filter];
 
while (y_in < 0)
y_in += 2;
while (y_in >= height)
y_in -= 2;
 
in_lines_cur[j] = cur_data + (y_in * cur_line_stride);
in_lines_adj[j] = adj_data + (y_in * adj_line_stride);
}
 
work_line = s->work_line;
switch (n_coef_hf[filter]) {
case 3:
for (i = 0; i < linesize; i++) {
*work_line += *in_lines_cur[0]++ * coef_hf[filter][0];
*work_line += *in_lines_adj[0]++ * coef_hf[filter][0];
*work_line += *in_lines_cur[1]++ * coef_hf[filter][1];
*work_line += *in_lines_adj[1]++ * coef_hf[filter][1];
*work_line += *in_lines_cur[2]++ * coef_hf[filter][2];
*work_line++ += *in_lines_adj[2]++ * coef_hf[filter][2];
}
break;
case 5:
for (i = 0; i < linesize; i++) {
*work_line += *in_lines_cur[0]++ * coef_hf[filter][0];
*work_line += *in_lines_adj[0]++ * coef_hf[filter][0];
*work_line += *in_lines_cur[1]++ * coef_hf[filter][1];
*work_line += *in_lines_adj[1]++ * coef_hf[filter][1];
*work_line += *in_lines_cur[2]++ * coef_hf[filter][2];
*work_line += *in_lines_adj[2]++ * coef_hf[filter][2];
*work_line += *in_lines_cur[3]++ * coef_hf[filter][3];
*work_line += *in_lines_adj[3]++ * coef_hf[filter][3];
*work_line += *in_lines_cur[4]++ * coef_hf[filter][4];
*work_line++ += *in_lines_adj[4]++ * coef_hf[filter][4];
}
}
 
/* save scaled result to the output frame, scaling down by 256 * 256 */
work_pixel = s->work_line;
out_pixel = out_line;
 
for (j = 0; j < linesize; j++, out_pixel++, work_pixel++)
*out_pixel = av_clip(*work_pixel, 0, 255 * 256 * 256) >> 16;
 
/* move on to next line */
y_out += 2;
out_line += dst_line_stride * 2;
}
}
 
static int filter(AVFilterContext *ctx, int is_second)
{
W3FDIFContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out, *adj;
int plane;
 
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out)
return AVERROR(ENOMEM);
av_frame_copy_props(out, s->cur);
out->interlaced_frame = 0;
 
if (!is_second) {
if (out->pts != AV_NOPTS_VALUE)
out->pts *= 2;
} else {
int64_t cur_pts = s->cur->pts;
int64_t next_pts = s->next->pts;
 
if (next_pts != AV_NOPTS_VALUE && cur_pts != AV_NOPTS_VALUE) {
out->pts = cur_pts + next_pts;
} else {
out->pts = AV_NOPTS_VALUE;
}
}
 
adj = s->field ? s->next : s->prev;
for (plane = 0; plane < s->nb_planes; plane++)
deinterlace_plane(ctx, out, s->cur, adj, s->filter, plane);
 
s->field = !s->field;
 
return ff_filter_frame(outlink, out);
}
 
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
W3FDIFContext *s = ctx->priv;
int ret;
 
av_frame_free(&s->prev);
s->prev = s->cur;
s->cur = s->next;
s->next = frame;
 
if (!s->cur) {
s->cur = av_frame_clone(s->next);
if (!s->cur)
return AVERROR(ENOMEM);
}
 
if ((s->deint && !s->cur->interlaced_frame) || ctx->is_disabled) {
AVFrame *out = av_frame_clone(s->cur);
if (!out)
return AVERROR(ENOMEM);
 
av_frame_free(&s->prev);
if (out->pts != AV_NOPTS_VALUE)
out->pts *= 2;
return ff_filter_frame(ctx->outputs[0], out);
}
 
if (!s->prev)
return 0;
 
ret = filter(ctx, 0);
if (ret < 0)
return ret;
 
return filter(ctx, 1);
}
 
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
W3FDIFContext *s = ctx->priv;
 
do {
int ret;
 
if (s->eof)
return AVERROR_EOF;
 
ret = ff_request_frame(ctx->inputs[0]);
 
if (ret == AVERROR_EOF && s->cur) {
AVFrame *next = av_frame_clone(s->next);
if (!next)
return AVERROR(ENOMEM);
next->pts = s->next->pts * 2 - s->cur->pts;
filter_frame(ctx->inputs[0], next);
s->eof = 1;
} else if (ret < 0) {
return ret;
}
} while (!s->cur);
 
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
W3FDIFContext *s = ctx->priv;
 
av_frame_free(&s->prev);
av_frame_free(&s->cur );
av_frame_free(&s->next);
av_freep(&s->work_line);
}
 
static const AVFilterPad w3fdif_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
 
static const AVFilterPad w3fdif_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_output,
.request_frame = request_frame,
},
{ NULL }
};
 
AVFilter avfilter_vf_w3fdif = {
.name = "w3fdif",
.description = NULL_IF_CONFIG_SMALL("Apply Martin Weston three field deinterlace."),
.priv_size = sizeof(W3FDIFContext),
.priv_class = &w3fdif_class,
.uninit = uninit,
.query_formats = query_formats,
.inputs = w3fdif_inputs,
.outputs = w3fdif_outputs,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vf_yadif.c
0,0 → 1,549
/*
* Copyright (C) 2006-2011 Michael Niedermayer <michaelni@gmx.at>
* 2010 James Darnley <james.darnley@gmail.com>
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
#include "libavutil/avassert.h"
#include "libavutil/cpu.h"
#include "libavutil/common.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/imgutils.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
#include "yadif.h"
 
typedef struct ThreadData {
AVFrame *frame;
int plane;
int w, h;
int parity;
int tff;
} ThreadData;
 
#define CHECK(j)\
{ int score = FFABS(cur[mrefs - 1 + (j)] - cur[prefs - 1 - (j)])\
+ FFABS(cur[mrefs +(j)] - cur[prefs -(j)])\
+ FFABS(cur[mrefs + 1 + (j)] - cur[prefs + 1 - (j)]);\
if (score < spatial_score) {\
spatial_score= score;\
spatial_pred= (cur[mrefs +(j)] + cur[prefs -(j)])>>1;\
 
/* The is_not_edge argument here controls when the code will enter a branch
* which reads up to and including x-3 and x+3. */
 
#define FILTER(start, end, is_not_edge) \
for (x = start; x < end; x++) { \
int c = cur[mrefs]; \
int d = (prev2[0] + next2[0])>>1; \
int e = cur[prefs]; \
int temporal_diff0 = FFABS(prev2[0] - next2[0]); \
int temporal_diff1 =(FFABS(prev[mrefs] - c) + FFABS(prev[prefs] - e) )>>1; \
int temporal_diff2 =(FFABS(next[mrefs] - c) + FFABS(next[prefs] - e) )>>1; \
int diff = FFMAX3(temporal_diff0 >> 1, temporal_diff1, temporal_diff2); \
int spatial_pred = (c+e) >> 1; \
\
if (is_not_edge) {\
int spatial_score = FFABS(cur[mrefs - 1] - cur[prefs - 1]) + FFABS(c-e) \
+ FFABS(cur[mrefs + 1] - cur[prefs + 1]) - 1; \
CHECK(-1) CHECK(-2) }} }} \
CHECK( 1) CHECK( 2) }} }} \
}\
\
if (!(mode&2)) { \
int b = (prev2[2 * mrefs] + next2[2 * mrefs])>>1; \
int f = (prev2[2 * prefs] + next2[2 * prefs])>>1; \
int max = FFMAX3(d - e, d - c, FFMIN(b - c, f - e)); \
int min = FFMIN3(d - e, d - c, FFMAX(b - c, f - e)); \
\
diff = FFMAX3(diff, min, -max); \
} \
\
if (spatial_pred > d + diff) \
spatial_pred = d + diff; \
else if (spatial_pred < d - diff) \
spatial_pred = d - diff; \
\
dst[0] = spatial_pred; \
\
dst++; \
cur++; \
prev++; \
next++; \
prev2++; \
next2++; \
}
 
static void filter_line_c(void *dst1,
void *prev1, void *cur1, void *next1,
int w, int prefs, int mrefs, int parity, int mode)
{
uint8_t *dst = dst1;
uint8_t *prev = prev1;
uint8_t *cur = cur1;
uint8_t *next = next1;
int x;
uint8_t *prev2 = parity ? prev : cur ;
uint8_t *next2 = parity ? cur : next;
 
/* The function is called with the pointers already pointing to data[3] and
* with 6 subtracted from the width. This allows the FILTER macro to be
* called so that it processes all the pixels normally. A constant value of
* true for is_not_edge lets the compiler ignore the if statement. */
FILTER(0, w, 1)
}
 
#define MAX_ALIGN 8
static void filter_edges(void *dst1, void *prev1, void *cur1, void *next1,
int w, int prefs, int mrefs, int parity, int mode)
{
uint8_t *dst = dst1;
uint8_t *prev = prev1;
uint8_t *cur = cur1;
uint8_t *next = next1;
int x;
uint8_t *prev2 = parity ? prev : cur ;
uint8_t *next2 = parity ? cur : next;
 
/* Only edge pixels need to be processed here. A constant value of false
* for is_not_edge should let the compiler ignore the whole branch. */
FILTER(0, 3, 0)
 
dst = (uint8_t*)dst1 + w - (MAX_ALIGN-1);
prev = (uint8_t*)prev1 + w - (MAX_ALIGN-1);
cur = (uint8_t*)cur1 + w - (MAX_ALIGN-1);
next = (uint8_t*)next1 + w - (MAX_ALIGN-1);
prev2 = (uint8_t*)(parity ? prev : cur);
next2 = (uint8_t*)(parity ? cur : next);
 
FILTER(w - (MAX_ALIGN-1), w - 3, 1)
FILTER(w - 3, w, 0)
}
 
 
static void filter_line_c_16bit(void *dst1,
void *prev1, void *cur1, void *next1,
int w, int prefs, int mrefs, int parity,
int mode)
{
uint16_t *dst = dst1;
uint16_t *prev = prev1;
uint16_t *cur = cur1;
uint16_t *next = next1;
int x;
uint16_t *prev2 = parity ? prev : cur ;
uint16_t *next2 = parity ? cur : next;
mrefs /= 2;
prefs /= 2;
 
FILTER(0, w, 1)
}
 
static void filter_edges_16bit(void *dst1, void *prev1, void *cur1, void *next1,
int w, int prefs, int mrefs, int parity, int mode)
{
uint16_t *dst = dst1;
uint16_t *prev = prev1;
uint16_t *cur = cur1;
uint16_t *next = next1;
int x;
uint16_t *prev2 = parity ? prev : cur ;
uint16_t *next2 = parity ? cur : next;
mrefs /= 2;
prefs /= 2;
 
FILTER(0, 3, 0)
 
dst = (uint16_t*)dst1 + w - (MAX_ALIGN/2-1);
prev = (uint16_t*)prev1 + w - (MAX_ALIGN/2-1);
cur = (uint16_t*)cur1 + w - (MAX_ALIGN/2-1);
next = (uint16_t*)next1 + w - (MAX_ALIGN/2-1);
prev2 = (uint16_t*)(parity ? prev : cur);
next2 = (uint16_t*)(parity ? cur : next);
 
FILTER(w - (MAX_ALIGN/2-1), w - 3, 1)
FILTER(w - 3, w, 0)
}
 
static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
{
YADIFContext *s = ctx->priv;
ThreadData *td = arg;
int refs = s->cur->linesize[td->plane];
int df = (s->csp->comp[td->plane].depth_minus1 + 8) / 8;
int pix_3 = 3 * df;
int slice_start = (td->h * jobnr ) / nb_jobs;
int slice_end = (td->h * (jobnr+1)) / nb_jobs;
int y;
 
/* filtering reads 3 pixels to the left/right; to avoid invalid reads,
* we need to call the c variant which avoids this for border pixels
*/
for (y = slice_start; y < slice_end; y++) {
if ((y ^ td->parity) & 1) {
uint8_t *prev = &s->prev->data[td->plane][y * refs];
uint8_t *cur = &s->cur ->data[td->plane][y * refs];
uint8_t *next = &s->next->data[td->plane][y * refs];
uint8_t *dst = &td->frame->data[td->plane][y * td->frame->linesize[td->plane]];
int mode = y == 1 || y + 2 == td->h ? 2 : s->mode;
s->filter_line(dst + pix_3, prev + pix_3, cur + pix_3,
next + pix_3, td->w - (3 + MAX_ALIGN/df-1),
y + 1 < td->h ? refs : -refs,
y ? -refs : refs,
td->parity ^ td->tff, mode);
s->filter_edges(dst, prev, cur, next, td->w,
y + 1 < td->h ? refs : -refs,
y ? -refs : refs,
td->parity ^ td->tff, mode);
} else {
memcpy(&td->frame->data[td->plane][y * td->frame->linesize[td->plane]],
&s->cur->data[td->plane][y * refs], td->w * df);
}
}
return 0;
}
 
static void filter(AVFilterContext *ctx, AVFrame *dstpic,
int parity, int tff)
{
YADIFContext *yadif = ctx->priv;
ThreadData td = { .frame = dstpic, .parity = parity, .tff = tff };
int i;
 
for (i = 0; i < yadif->csp->nb_components; i++) {
int w = dstpic->width;
int h = dstpic->height;
 
if (i == 1 || i == 2) {
w = FF_CEIL_RSHIFT(w, yadif->csp->log2_chroma_w);
h = FF_CEIL_RSHIFT(h, yadif->csp->log2_chroma_h);
}
 
 
td.w = w;
td.h = h;
td.plane = i;
 
ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(h, ctx->graph->nb_threads));
}
 
emms_c();
}
 
static int return_frame(AVFilterContext *ctx, int is_second)
{
YADIFContext *yadif = ctx->priv;
AVFilterLink *link = ctx->outputs[0];
int tff, ret;
 
if (yadif->parity == -1) {
tff = yadif->cur->interlaced_frame ?
yadif->cur->top_field_first : 1;
} else {
tff = yadif->parity ^ 1;
}
 
if (is_second) {
yadif->out = ff_get_video_buffer(link, link->w, link->h);
if (!yadif->out)
return AVERROR(ENOMEM);
 
av_frame_copy_props(yadif->out, yadif->cur);
yadif->out->interlaced_frame = 0;
}
 
filter(ctx, yadif->out, tff ^ !is_second, tff);
 
if (is_second) {
int64_t cur_pts = yadif->cur->pts;
int64_t next_pts = yadif->next->pts;
 
if (next_pts != AV_NOPTS_VALUE && cur_pts != AV_NOPTS_VALUE) {
yadif->out->pts = cur_pts + next_pts;
} else {
yadif->out->pts = AV_NOPTS_VALUE;
}
}
ret = ff_filter_frame(ctx->outputs[0], yadif->out);
 
yadif->frame_pending = (yadif->mode&1) && !is_second;
return ret;
}
 
static int checkstride(YADIFContext *yadif, const AVFrame *a, const AVFrame *b)
{
int i;
for (i = 0; i < yadif->csp->nb_components; i++)
if (a->linesize[i] != b->linesize[i])
return 1;
return 0;
}
 
static void fixstride(AVFilterLink *link, AVFrame *f)
{
AVFrame *dst = ff_default_get_video_buffer(link, f->width, f->height);
if(!dst)
return;
av_frame_copy_props(dst, f);
av_image_copy(dst->data, dst->linesize,
(const uint8_t **)f->data, f->linesize,
dst->format, dst->width, dst->height);
av_frame_unref(f);
av_frame_move_ref(f, dst);
av_frame_free(&dst);
}
 
static int filter_frame(AVFilterLink *link, AVFrame *frame)
{
AVFilterContext *ctx = link->dst;
YADIFContext *yadif = ctx->priv;
 
av_assert0(frame);
 
if (yadif->frame_pending)
return_frame(ctx, 1);
 
if (yadif->prev)
av_frame_free(&yadif->prev);
yadif->prev = yadif->cur;
yadif->cur = yadif->next;
yadif->next = frame;
 
if (!yadif->cur)
return 0;
 
if (checkstride(yadif, yadif->next, yadif->cur)) {
av_log(ctx, AV_LOG_VERBOSE, "Reallocating frame due to differing stride\n");
fixstride(link, yadif->next);
}
if (checkstride(yadif, yadif->next, yadif->cur))
fixstride(link, yadif->cur);
if (yadif->prev && checkstride(yadif, yadif->next, yadif->prev))
fixstride(link, yadif->prev);
if (checkstride(yadif, yadif->next, yadif->cur) || (yadif->prev && checkstride(yadif, yadif->next, yadif->prev))) {
av_log(ctx, AV_LOG_ERROR, "Failed to reallocate frame\n");
return -1;
}
 
if ((yadif->deint && !yadif->cur->interlaced_frame) || ctx->is_disabled) {
yadif->out = av_frame_clone(yadif->cur);
if (!yadif->out)
return AVERROR(ENOMEM);
 
av_frame_free(&yadif->prev);
if (yadif->out->pts != AV_NOPTS_VALUE)
yadif->out->pts *= 2;
return ff_filter_frame(ctx->outputs[0], yadif->out);
}
 
if (!yadif->prev &&
!(yadif->prev = av_frame_clone(yadif->cur)))
return AVERROR(ENOMEM);
 
yadif->out = ff_get_video_buffer(ctx->outputs[0], link->w, link->h);
if (!yadif->out)
return AVERROR(ENOMEM);
 
av_frame_copy_props(yadif->out, yadif->cur);
yadif->out->interlaced_frame = 0;
 
if (yadif->out->pts != AV_NOPTS_VALUE)
yadif->out->pts *= 2;
 
return return_frame(ctx, 0);
}
 
static int request_frame(AVFilterLink *link)
{
AVFilterContext *ctx = link->src;
YADIFContext *yadif = ctx->priv;
 
if (yadif->frame_pending) {
return_frame(ctx, 1);
return 0;
}
 
do {
int ret;
 
if (yadif->eof)
return AVERROR_EOF;
 
ret = ff_request_frame(link->src->inputs[0]);
 
if (ret == AVERROR_EOF && yadif->cur) {
AVFrame *next = av_frame_clone(yadif->next);
 
if (!next)
return AVERROR(ENOMEM);
 
next->pts = yadif->next->pts * 2 - yadif->cur->pts;
 
filter_frame(link->src->inputs[0], next);
yadif->eof = 1;
} else if (ret < 0) {
return ret;
}
} while (!yadif->cur);
 
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
YADIFContext *yadif = ctx->priv;
 
av_frame_free(&yadif->prev);
av_frame_free(&yadif->cur );
av_frame_free(&yadif->next);
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV422P,
AV_PIX_FMT_YUV444P,
AV_PIX_FMT_YUV410P,
AV_PIX_FMT_YUV411P,
AV_PIX_FMT_GRAY8,
AV_PIX_FMT_YUVJ420P,
AV_PIX_FMT_YUVJ422P,
AV_PIX_FMT_YUVJ444P,
AV_PIX_FMT_GRAY16,
AV_PIX_FMT_YUV440P,
AV_PIX_FMT_YUVJ440P,
AV_PIX_FMT_YUV420P9,
AV_PIX_FMT_YUV422P9,
AV_PIX_FMT_YUV444P9,
AV_PIX_FMT_YUV420P10,
AV_PIX_FMT_YUV422P10,
AV_PIX_FMT_YUV444P10,
AV_PIX_FMT_YUV420P12,
AV_PIX_FMT_YUV422P12,
AV_PIX_FMT_YUV444P12,
AV_PIX_FMT_YUV420P14,
AV_PIX_FMT_YUV422P14,
AV_PIX_FMT_YUV444P14,
AV_PIX_FMT_YUV420P16,
AV_PIX_FMT_YUV422P16,
AV_PIX_FMT_YUV444P16,
AV_PIX_FMT_YUVA420P,
AV_PIX_FMT_YUVA422P,
AV_PIX_FMT_YUVA444P,
AV_PIX_FMT_GBRP,
AV_PIX_FMT_GBRAP,
AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
 
return 0;
}
 
static int config_props(AVFilterLink *link)
{
AVFilterContext *ctx = link->src;
YADIFContext *s = link->src->priv;
 
link->time_base.num = link->src->inputs[0]->time_base.num;
link->time_base.den = link->src->inputs[0]->time_base.den * 2;
link->w = link->src->inputs[0]->w;
link->h = link->src->inputs[0]->h;
 
if(s->mode&1)
link->frame_rate = av_mul_q(link->src->inputs[0]->frame_rate, (AVRational){2,1});
 
if (link->w < 3 || link->h < 3) {
av_log(ctx, AV_LOG_ERROR, "Video of less than 3 columns or lines is not supported\n");
return AVERROR(EINVAL);
}
 
s->csp = av_pix_fmt_desc_get(link->format);
if (s->csp->comp[0].depth_minus1 / 8 == 1) {
s->filter_line = filter_line_c_16bit;
s->filter_edges = filter_edges_16bit;
} else {
s->filter_line = filter_line_c;
s->filter_edges = filter_edges;
}
 
if (ARCH_X86)
ff_yadif_init_x86(s);
 
return 0;
}
 
 
#define OFFSET(x) offsetof(YADIFContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
#define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, INT_MIN, INT_MAX, FLAGS, unit }
 
static const AVOption yadif_options[] = {
{ "mode", "specify the interlacing mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=YADIF_MODE_SEND_FRAME}, 0, 3, FLAGS, "mode"},
CONST("send_frame", "send one frame for each frame", YADIF_MODE_SEND_FRAME, "mode"),
CONST("send_field", "send one frame for each field", YADIF_MODE_SEND_FIELD, "mode"),
CONST("send_frame_nospatial", "send one frame for each frame, but skip spatial interlacing check", YADIF_MODE_SEND_FRAME_NOSPATIAL, "mode"),
CONST("send_field_nospatial", "send one frame for each field, but skip spatial interlacing check", YADIF_MODE_SEND_FIELD_NOSPATIAL, "mode"),
 
{ "parity", "specify the assumed picture field parity", OFFSET(parity), AV_OPT_TYPE_INT, {.i64=YADIF_PARITY_AUTO}, -1, 1, FLAGS, "parity" },
CONST("tff", "assume top field first", YADIF_PARITY_TFF, "parity"),
CONST("bff", "assume bottom field first", YADIF_PARITY_BFF, "parity"),
CONST("auto", "auto detect parity", YADIF_PARITY_AUTO, "parity"),
 
{ "deint", "specify which frames to deinterlace", OFFSET(deint), AV_OPT_TYPE_INT, {.i64=YADIF_DEINT_ALL}, 0, 1, FLAGS, "deint" },
CONST("all", "deinterlace all frames", YADIF_DEINT_ALL, "deint"),
CONST("interlaced", "only deinterlace frames marked as interlaced", YADIF_DEINT_INTERLACED, "deint"),
 
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(yadif);
 
static const AVFilterPad avfilter_vf_yadif_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
 
static const AVFilterPad avfilter_vf_yadif_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.request_frame = request_frame,
.config_props = config_props,
},
{ NULL }
};
 
AVFilter avfilter_vf_yadif = {
.name = "yadif",
.description = NULL_IF_CONFIG_SMALL("Deinterlace the input image."),
.priv_size = sizeof(YADIFContext),
.priv_class = &yadif_class,
.uninit = uninit,
.query_formats = query_formats,
.inputs = avfilter_vf_yadif_inputs,
.outputs = avfilter_vf_yadif_outputs,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
};
/contrib/sdk/sources/ffmpeg/libavfilter/video.c
0,0 → 1,123
/*
* Copyright 2007 Bobby Bingham
* Copyright Stefano Sabatini <stefasab gmail com>
* Copyright Vitor Sessak <vitor1001 gmail com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <string.h>
#include <stdio.h>
 
#include "libavutil/avassert.h"
#include "libavutil/buffer.h"
#include "libavutil/imgutils.h"
#include "libavutil/mem.h"
 
#include "avfilter.h"
#include "internal.h"
#include "video.h"
 
AVFrame *ff_null_get_video_buffer(AVFilterLink *link, int w, int h)
{
return ff_get_video_buffer(link->dst->outputs[0], w, h);
}
 
/* TODO: set the buffer's priv member to a context structure for the whole
* filter chain. This will allow for a buffer pool instead of the constant
* alloc & free cycle currently implemented. */
AVFrame *ff_default_get_video_buffer(AVFilterLink *link, int w, int h)
{
AVFrame *frame = av_frame_alloc();
int ret;
 
if (!frame)
return NULL;
 
frame->width = w;
frame->height = h;
frame->format = link->format;
 
ret = av_frame_get_buffer(frame, 32);
if (ret < 0)
av_frame_free(&frame);
 
return frame;
}
 
#if FF_API_AVFILTERBUFFER
AVFilterBufferRef *
avfilter_get_video_buffer_ref_from_arrays(uint8_t * const data[4], const int linesize[4], int perms,
int w, int h, enum AVPixelFormat format)
{
AVFilterBuffer *pic = av_mallocz(sizeof(AVFilterBuffer));
AVFilterBufferRef *picref = av_mallocz(sizeof(AVFilterBufferRef));
 
if (!pic || !picref)
goto fail;
 
picref->buf = pic;
picref->buf->free = ff_avfilter_default_free_buffer;
if (!(picref->video = av_mallocz(sizeof(AVFilterBufferRefVideoProps))))
goto fail;
 
pic->w = picref->video->w = w;
pic->h = picref->video->h = h;
 
/* make sure the buffer gets read permission or it's useless for output */
picref->perms = perms | AV_PERM_READ;
 
pic->refcount = 1;
picref->type = AVMEDIA_TYPE_VIDEO;
pic->format = picref->format = format;
 
memcpy(pic->data, data, 4*sizeof(data[0]));
memcpy(pic->linesize, linesize, 4*sizeof(linesize[0]));
memcpy(picref->data, pic->data, sizeof(picref->data));
memcpy(picref->linesize, pic->linesize, sizeof(picref->linesize));
 
pic-> extended_data = pic->data;
picref->extended_data = picref->data;
 
picref->pts = AV_NOPTS_VALUE;
 
return picref;
 
fail:
if (picref && picref->video)
av_free(picref->video);
av_free(picref);
av_free(pic);
return NULL;
}
#endif
 
AVFrame *ff_get_video_buffer(AVFilterLink *link, int w, int h)
{
AVFrame *ret = NULL;
 
av_unused char buf[16];
FF_TPRINTF_START(NULL, get_video_buffer); ff_tlog_link(NULL, link, 0);
 
if (link->dstpad->get_video_buffer)
ret = link->dstpad->get_video_buffer(link, w, h);
 
if (!ret)
ret = ff_default_get_video_buffer(link, w, h);
 
return ret;
}
/contrib/sdk/sources/ffmpeg/libavfilter/video.h
0,0 → 1,41
/*
* Copyright (c) 2007 Bobby Bingham
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFILTER_VIDEO_H
#define AVFILTER_VIDEO_H
 
#include "avfilter.h"
 
AVFrame *ff_default_get_video_buffer(AVFilterLink *link, int w, int h);
AVFrame *ff_null_get_video_buffer(AVFilterLink *link, int w, int h);
 
/**
* Request a picture buffer with a specific set of permissions.
*
* @param link the output link to the filter from which the buffer will
* be requested
* @param w the minimum width of the buffer to allocate
* @param h the minimum height of the buffer to allocate
* @return A reference to the buffer. This must be unreferenced with
* avfilter_unref_buffer when you are finished with it.
*/
AVFrame *ff_get_video_buffer(AVFilterLink *link, int w, int h);
 
#endif /* AVFILTER_VIDEO_H */
/contrib/sdk/sources/ffmpeg/libavfilter/vidstabutils.c
0,0 → 1,85
/*
* Copyright (c) 2013 Georg Martius <georg dot martius at web dot de>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "vidstabutils.h"
 
/** convert AV's pixelformat to vid.stab pixelformat */
VSPixelFormat av_2_vs_pixel_format(AVFilterContext *ctx, enum AVPixelFormat pf)
{
switch (pf) {
case AV_PIX_FMT_YUV420P: return PF_YUV420P;
case AV_PIX_FMT_YUV422P: return PF_YUV422P;
case AV_PIX_FMT_YUV444P: return PF_YUV444P;
case AV_PIX_FMT_YUV410P: return PF_YUV410P;
case AV_PIX_FMT_YUV411P: return PF_YUV411P;
case AV_PIX_FMT_YUV440P: return PF_YUV440P;
case AV_PIX_FMT_YUVA420P: return PF_YUVA420P;
case AV_PIX_FMT_GRAY8: return PF_GRAY8;
case AV_PIX_FMT_RGB24: return PF_RGB24;
case AV_PIX_FMT_BGR24: return PF_BGR24;
case AV_PIX_FMT_RGBA: return PF_RGBA;
default:
av_log(ctx, AV_LOG_ERROR, "cannot deal with pixel format %i\n", pf);
return PF_NONE;
}
}
 
/** struct to hold a valid context for logging from within vid.stab lib */
typedef struct {
const AVClass *class;
} VS2AVLogCtx;
 
/** wrapper to log vs_log into av_log */
static int vs_2_av_log_wrapper(int type, const char *tag, const char *format, ...)
{
va_list ap;
VS2AVLogCtx ctx;
AVClass class = {
.class_name = tag,
.item_name = av_default_item_name,
.option = 0,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_FILTER,
};
ctx.class = &class;
va_start(ap, format);
av_vlog(&ctx, type, format, ap);
va_end(ap);
return VS_OK;
}
 
/** sets the memory allocation function and logging constants to av versions */
void vs_set_mem_and_log_functions(void)
{
vs_malloc = av_malloc;
vs_zalloc = av_mallocz;
vs_realloc = av_realloc;
vs_free = av_free;
 
VS_ERROR_TYPE = AV_LOG_ERROR;
VS_WARN_TYPE = AV_LOG_WARNING;
VS_INFO_TYPE = AV_LOG_INFO;
VS_MSG_TYPE = AV_LOG_VERBOSE;
 
vs_log = vs_2_av_log_wrapper;
 
VS_ERROR = 0;
VS_OK = 1;
}
/contrib/sdk/sources/ffmpeg/libavfilter/vidstabutils.h
0,0 → 1,36
/*
* Copyright (c) 2013 Georg Martius <georg dot martius at web dot de>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFILTER_VIDSTABUTILS_H
#define AVFILTER_VIDSTABUTILS_H
 
#include <vid.stab/libvidstab.h>
 
#include "avfilter.h"
 
/* ** some conversions from avlib to vid.stab constants and functions *** */
 
/** converts the pixelformat of avlib into the one of the vid.stab library */
VSPixelFormat av_2_vs_pixel_format(AVFilterContext *ctx, enum AVPixelFormat pf);
 
/** sets the memory allocation function and logging constants to av versions */
void vs_set_mem_and_log_functions(void);
 
#endif
/contrib/sdk/sources/ffmpeg/libavfilter/vsink_nullsink.c
0,0 → 1,46
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avfilter.h"
#include "internal.h"
#include "libavutil/internal.h"
 
static int filter_frame(AVFilterLink *link, AVFrame *frame)
{
av_frame_free(&frame);
return 0;
}
 
static const AVFilterPad avfilter_vsink_nullsink_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL },
};
 
AVFilter avfilter_vsink_nullsink = {
.name = "nullsink",
.description = NULL_IF_CONFIG_SMALL("Do absolutely nothing with the input video."),
 
.priv_size = 0,
 
.inputs = avfilter_vsink_nullsink_inputs,
.outputs = NULL,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vsrc_cellauto.c
0,0 → 1,337
/*
* Copyright (c) Stefano Sabatini 2011
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* cellular automaton video source, based on Stephen Wolfram "experimentus crucis"
*/
 
/* #define DEBUG */
 
#include "libavutil/file.h"
#include "libavutil/lfg.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavutil/random_seed.h"
#include "libavutil/avstring.h"
#include "avfilter.h"
#include "internal.h"
#include "formats.h"
#include "video.h"
 
typedef struct {
const AVClass *class;
int w, h;
char *filename;
char *rule_str;
uint8_t *file_buf;
size_t file_bufsize;
uint8_t *buf;
int buf_prev_row_idx, buf_row_idx;
uint8_t rule;
uint64_t pts;
AVRational frame_rate;
double random_fill_ratio;
uint32_t random_seed;
int stitch, scroll, start_full;
int64_t generation; ///< the generation number, starting from 0
AVLFG lfg;
char *pattern;
} CellAutoContext;
 
#define OFFSET(x) offsetof(CellAutoContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
 
static const AVOption cellauto_options[] = {
{ "filename", "read initial pattern from file", OFFSET(filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
{ "f", "read initial pattern from file", OFFSET(filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
{ "pattern", "set initial pattern", OFFSET(pattern), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
{ "p", "set initial pattern", OFFSET(pattern), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
{ "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
{ "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
{ "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, FLAGS },
{ "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, FLAGS },
{ "rule", "set rule", OFFSET(rule), AV_OPT_TYPE_INT, {.i64 = 110}, 0, 255, FLAGS },
{ "random_fill_ratio", "set fill ratio for filling initial grid randomly", OFFSET(random_fill_ratio), AV_OPT_TYPE_DOUBLE, {.dbl = 1/M_PHI}, 0, 1, FLAGS },
{ "ratio", "set fill ratio for filling initial grid randomly", OFFSET(random_fill_ratio), AV_OPT_TYPE_DOUBLE, {.dbl = 1/M_PHI}, 0, 1, FLAGS },
{ "random_seed", "set the seed for filling the initial grid randomly", OFFSET(random_seed), AV_OPT_TYPE_INT, {.i64 = -1}, -1, UINT32_MAX, FLAGS },
{ "seed", "set the seed for filling the initial grid randomly", OFFSET(random_seed), AV_OPT_TYPE_INT, {.i64 = -1}, -1, UINT32_MAX, FLAGS },
{ "scroll", "scroll pattern downward", OFFSET(scroll), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, FLAGS },
{ "start_full", "start filling the whole video", OFFSET(start_full), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS },
{ "full", "start filling the whole video", OFFSET(start_full), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, FLAGS },
{ "stitch", "stitch boundaries", OFFSET(stitch), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(cellauto);
 
#ifdef DEBUG
static void show_cellauto_row(AVFilterContext *ctx)
{
CellAutoContext *cellauto = ctx->priv;
int i;
uint8_t *row = cellauto->buf + cellauto->w * cellauto->buf_row_idx;
char *line = av_malloc(cellauto->w + 1);
if (!line)
return;
 
for (i = 0; i < cellauto->w; i++)
line[i] = row[i] ? '@' : ' ';
line[i] = 0;
av_log(ctx, AV_LOG_DEBUG, "generation:%"PRId64" row:%s|\n", cellauto->generation, line);
av_free(line);
}
#endif
 
static int init_pattern_from_string(AVFilterContext *ctx)
{
CellAutoContext *cellauto = ctx->priv;
char *p;
int i, w = 0;
 
w = strlen(cellauto->pattern);
av_log(ctx, AV_LOG_DEBUG, "w:%d\n", w);
 
if (cellauto->w) {
if (w > cellauto->w) {
av_log(ctx, AV_LOG_ERROR,
"The specified width is %d which cannot contain the provided string width of %d\n",
cellauto->w, w);
return AVERROR(EINVAL);
}
} else {
/* width was not specified, set it to width of the provided row */
cellauto->w = w;
cellauto->h = (double)cellauto->w * M_PHI;
}
 
cellauto->buf = av_mallocz(sizeof(uint8_t) * cellauto->w * cellauto->h);
if (!cellauto->buf)
return AVERROR(ENOMEM);
 
/* fill buf */
p = cellauto->pattern;
for (i = (cellauto->w - w)/2;; i++) {
av_log(ctx, AV_LOG_DEBUG, "%d %c\n", i, *p == '\n' ? 'N' : *p);
if (*p == '\n' || !*p)
break;
else
cellauto->buf[i] = !!av_isgraph(*(p++));
}
 
return 0;
}
 
static int init_pattern_from_file(AVFilterContext *ctx)
{
CellAutoContext *cellauto = ctx->priv;
int ret;
 
ret = av_file_map(cellauto->filename,
&cellauto->file_buf, &cellauto->file_bufsize, 0, ctx);
if (ret < 0)
return ret;
 
/* create a string based on the read file */
cellauto->pattern = av_malloc(cellauto->file_bufsize + 1);
if (!cellauto->pattern)
return AVERROR(ENOMEM);
memcpy(cellauto->pattern, cellauto->file_buf, cellauto->file_bufsize);
cellauto->pattern[cellauto->file_bufsize] = 0;
 
return init_pattern_from_string(ctx);
}
 
static av_cold int init(AVFilterContext *ctx)
{
CellAutoContext *cellauto = ctx->priv;
int ret;
 
if (!cellauto->w && !cellauto->filename && !cellauto->pattern)
av_opt_set(cellauto, "size", "320x518", 0);
 
if (cellauto->filename && cellauto->pattern) {
av_log(ctx, AV_LOG_ERROR, "Only one of the filename or pattern options can be used\n");
return AVERROR(EINVAL);
}
 
if (cellauto->filename) {
if ((ret = init_pattern_from_file(ctx)) < 0)
return ret;
} else if (cellauto->pattern) {
if ((ret = init_pattern_from_string(ctx)) < 0)
return ret;
} else {
/* fill the first row randomly */
int i;
 
cellauto->buf = av_mallocz(sizeof(uint8_t) * cellauto->w * cellauto->h);
if (!cellauto->buf)
return AVERROR(ENOMEM);
if (cellauto->random_seed == -1)
cellauto->random_seed = av_get_random_seed();
 
av_lfg_init(&cellauto->lfg, cellauto->random_seed);
 
for (i = 0; i < cellauto->w; i++) {
double r = (double)av_lfg_get(&cellauto->lfg) / UINT32_MAX;
if (r <= cellauto->random_fill_ratio)
cellauto->buf[i] = 1;
}
}
 
av_log(ctx, AV_LOG_VERBOSE,
"s:%dx%d r:%d/%d rule:%d stitch:%d scroll:%d full:%d seed:%u\n",
cellauto->w, cellauto->h, cellauto->frame_rate.num, cellauto->frame_rate.den,
cellauto->rule, cellauto->stitch, cellauto->scroll, cellauto->start_full,
cellauto->random_seed);
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
CellAutoContext *cellauto = ctx->priv;
 
av_file_unmap(cellauto->file_buf, cellauto->file_bufsize);
av_freep(&cellauto->buf);
av_freep(&cellauto->pattern);
}
 
static int config_props(AVFilterLink *outlink)
{
CellAutoContext *cellauto = outlink->src->priv;
 
outlink->w = cellauto->w;
outlink->h = cellauto->h;
outlink->time_base = av_inv_q(cellauto->frame_rate);
 
return 0;
}
 
static void evolve(AVFilterContext *ctx)
{
CellAutoContext *cellauto = ctx->priv;
int i, v, pos[3];
uint8_t *row, *prev_row = cellauto->buf + cellauto->buf_row_idx * cellauto->w;
enum { NW, N, NE };
 
cellauto->buf_prev_row_idx = cellauto->buf_row_idx;
cellauto->buf_row_idx = cellauto->buf_row_idx == cellauto->h-1 ? 0 : cellauto->buf_row_idx+1;
row = cellauto->buf + cellauto->w * cellauto->buf_row_idx;
 
for (i = 0; i < cellauto->w; i++) {
if (cellauto->stitch) {
pos[NW] = i-1 < 0 ? cellauto->w-1 : i-1;
pos[N] = i;
pos[NE] = i+1 == cellauto->w ? 0 : i+1;
v = prev_row[pos[NW]]<<2 | prev_row[pos[N]]<<1 | prev_row[pos[NE]];
} else {
v = 0;
v|= i-1 >= 0 ? prev_row[i-1]<<2 : 0;
v|= prev_row[i ]<<1 ;
v|= i+1 < cellauto->w ? prev_row[i+1] : 0;
}
row[i] = !!(cellauto->rule & (1<<v));
av_dlog(ctx, "i:%d context:%c%c%c -> cell:%d\n", i,
v&4?'@':' ', v&2?'@':' ', v&1?'@':' ', row[i]);
}
 
cellauto->generation++;
}
 
static void fill_picture(AVFilterContext *ctx, AVFrame *picref)
{
CellAutoContext *cellauto = ctx->priv;
int i, j, k, row_idx = 0;
uint8_t *p0 = picref->data[0];
 
if (cellauto->scroll && cellauto->generation >= cellauto->h)
/* show on top the oldest row */
row_idx = (cellauto->buf_row_idx + 1) % cellauto->h;
 
/* fill the output picture with the whole buffer */
for (i = 0; i < cellauto->h; i++) {
uint8_t byte = 0;
uint8_t *row = cellauto->buf + row_idx*cellauto->w;
uint8_t *p = p0;
for (k = 0, j = 0; j < cellauto->w; j++) {
byte |= row[j]<<(7-k++);
if (k==8 || j == cellauto->w-1) {
k = 0;
*p++ = byte;
byte = 0;
}
}
row_idx = (row_idx + 1) % cellauto->h;
p0 += picref->linesize[0];
}
}
 
static int request_frame(AVFilterLink *outlink)
{
CellAutoContext *cellauto = outlink->src->priv;
AVFrame *picref = ff_get_video_buffer(outlink, cellauto->w, cellauto->h);
if (!picref)
return AVERROR(ENOMEM);
picref->sample_aspect_ratio = (AVRational) {1, 1};
if (cellauto->generation == 0 && cellauto->start_full) {
int i;
for (i = 0; i < cellauto->h-1; i++)
evolve(outlink->src);
}
fill_picture(outlink->src, picref);
evolve(outlink->src);
 
picref->pts = cellauto->pts++;
 
#ifdef DEBUG
show_cellauto_row(outlink->src);
#endif
return ff_filter_frame(outlink, picref);
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_MONOBLACK, AV_PIX_FMT_NONE };
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static const AVFilterPad cellauto_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.request_frame = request_frame,
.config_props = config_props,
},
{ NULL }
};
 
AVFilter avfilter_vsrc_cellauto = {
.name = "cellauto",
.description = NULL_IF_CONFIG_SMALL("Create pattern generated by an elementary cellular automaton."),
.priv_size = sizeof(CellAutoContext),
.priv_class = &cellauto_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = NULL,
.outputs = cellauto_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vsrc_life.c
0,0 → 1,450
/*
* Copyright (c) Stefano Sabatini 2010
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* life video source, based on John Conways' Life Game
*/
 
/* #define DEBUG */
 
#include "libavutil/file.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/lfg.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavutil/random_seed.h"
#include "libavutil/avstring.h"
#include "avfilter.h"
#include "internal.h"
#include "formats.h"
#include "video.h"
 
typedef struct {
const AVClass *class;
int w, h;
char *filename;
char *rule_str;
uint8_t *file_buf;
size_t file_bufsize;
 
/**
* The two grid state buffers.
*
* A 0xFF (ALIVE_CELL) value means the cell is alive (or new born), while
* the decreasing values from 0xFE to 0 means the cell is dead; the range
* of values is used for the slow death effect, or mold (0xFE means dead,
* 0xFD means very dead, 0xFC means very very dead... and 0x00 means
* definitely dead/mold).
*/
uint8_t *buf[2];
 
uint8_t buf_idx;
uint16_t stay_rule; ///< encode the behavior for filled cells
uint16_t born_rule; ///< encode the behavior for empty cells
uint64_t pts;
AVRational frame_rate;
double random_fill_ratio;
uint32_t random_seed;
int stitch;
int mold;
uint8_t life_color[4];
uint8_t death_color[4];
uint8_t mold_color[4];
AVLFG lfg;
void (*draw)(AVFilterContext*, AVFrame*);
} LifeContext;
 
#define ALIVE_CELL 0xFF
#define OFFSET(x) offsetof(LifeContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption life_options[] = {
{ "filename", "set source file", OFFSET(filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
{ "f", "set source file", OFFSET(filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
{ "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, FLAGS },
{ "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, FLAGS },
{ "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
{ "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
{ "rule", "set rule", OFFSET(rule_str), AV_OPT_TYPE_STRING, {.str = "B3/S23"}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "random_fill_ratio", "set fill ratio for filling initial grid randomly", OFFSET(random_fill_ratio), AV_OPT_TYPE_DOUBLE, {.dbl=1/M_PHI}, 0, 1, FLAGS },
{ "ratio", "set fill ratio for filling initial grid randomly", OFFSET(random_fill_ratio), AV_OPT_TYPE_DOUBLE, {.dbl=1/M_PHI}, 0, 1, FLAGS },
{ "random_seed", "set the seed for filling the initial grid randomly", OFFSET(random_seed), AV_OPT_TYPE_INT, {.i64=-1}, -1, UINT32_MAX, FLAGS },
{ "seed", "set the seed for filling the initial grid randomly", OFFSET(random_seed), AV_OPT_TYPE_INT, {.i64=-1}, -1, UINT32_MAX, FLAGS },
{ "stitch", "stitch boundaries", OFFSET(stitch), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS },
{ "mold", "set mold speed for dead cells", OFFSET(mold), AV_OPT_TYPE_INT, {.i64=0}, 0, 0xFF, FLAGS },
{ "life_color", "set life color", OFFSET( life_color), AV_OPT_TYPE_COLOR, {.str="white"}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "death_color", "set death color", OFFSET(death_color), AV_OPT_TYPE_COLOR, {.str="black"}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "mold_color", "set mold color", OFFSET( mold_color), AV_OPT_TYPE_COLOR, {.str="black"}, CHAR_MIN, CHAR_MAX, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(life);
 
static int parse_rule(uint16_t *born_rule, uint16_t *stay_rule,
const char *rule_str, void *log_ctx)
{
char *tail;
const char *p = rule_str;
*born_rule = 0;
*stay_rule = 0;
 
if (strchr("bBsS", *p)) {
/* parse rule as a Born / Stay Alive code, see
* http://en.wikipedia.org/wiki/Conway%27s_Game_of_Life */
do {
uint16_t *rule = (*p == 'b' || *p == 'B') ? born_rule : stay_rule;
p++;
while (*p >= '0' && *p <= '8') {
*rule += 1<<(*p - '0');
p++;
}
if (*p != '/')
break;
p++;
} while (strchr("bBsS", *p));
 
if (*p)
goto error;
} else {
/* parse rule as a number, expressed in the form STAY|(BORN<<9),
* where STAY and BORN encode the corresponding 9-bits rule */
long int rule = strtol(rule_str, &tail, 10);
if (*tail)
goto error;
*born_rule = ((1<<9)-1) & rule;
*stay_rule = rule >> 9;
}
 
return 0;
 
error:
av_log(log_ctx, AV_LOG_ERROR, "Invalid rule code '%s' provided\n", rule_str);
return AVERROR(EINVAL);
}
 
#ifdef DEBUG
static void show_life_grid(AVFilterContext *ctx)
{
LifeContext *life = ctx->priv;
int i, j;
 
char *line = av_malloc(life->w + 1);
if (!line)
return;
for (i = 0; i < life->h; i++) {
for (j = 0; j < life->w; j++)
line[j] = life->buf[life->buf_idx][i*life->w + j] == ALIVE_CELL ? '@' : ' ';
line[j] = 0;
av_log(ctx, AV_LOG_DEBUG, "%3d: %s\n", i, line);
}
av_free(line);
}
#endif
 
static int init_pattern_from_file(AVFilterContext *ctx)
{
LifeContext *life = ctx->priv;
char *p;
int ret, i, i0, j, h = 0, w, max_w = 0;
 
if ((ret = av_file_map(life->filename, &life->file_buf, &life->file_bufsize,
0, ctx)) < 0)
return ret;
av_freep(&life->filename);
 
/* prescan file to get the number of lines and the maximum width */
w = 0;
for (i = 0; i < life->file_bufsize; i++) {
if (life->file_buf[i] == '\n') {
h++; max_w = FFMAX(w, max_w); w = 0;
} else {
w++;
}
}
av_log(ctx, AV_LOG_DEBUG, "h:%d max_w:%d\n", h, max_w);
 
if (life->w) {
if (max_w > life->w || h > life->h) {
av_log(ctx, AV_LOG_ERROR,
"The specified size is %dx%d which cannot contain the provided file size of %dx%d\n",
life->w, life->h, max_w, h);
return AVERROR(EINVAL);
}
} else {
/* size was not specified, set it to size of the grid */
life->w = max_w;
life->h = h;
}
 
if (!(life->buf[0] = av_calloc(life->h * life->w, sizeof(*life->buf[0]))) ||
!(life->buf[1] = av_calloc(life->h * life->w, sizeof(*life->buf[1])))) {
av_free(life->buf[0]);
av_free(life->buf[1]);
return AVERROR(ENOMEM);
}
 
/* fill buf[0] */
p = life->file_buf;
for (i0 = 0, i = (life->h - h)/2; i0 < h; i0++, i++) {
for (j = (life->w - max_w)/2;; j++) {
av_log(ctx, AV_LOG_DEBUG, "%d:%d %c\n", i, j, *p == '\n' ? 'N' : *p);
if (*p == '\n') {
p++; break;
} else
life->buf[0][i*life->w + j] = av_isgraph(*(p++)) ? ALIVE_CELL : 0;
}
}
life->buf_idx = 0;
 
return 0;
}
 
static av_cold int init(AVFilterContext *ctx)
{
LifeContext *life = ctx->priv;
int ret;
 
if (!life->w && !life->filename)
av_opt_set(life, "size", "320x240", 0);
 
if ((ret = parse_rule(&life->born_rule, &life->stay_rule, life->rule_str, ctx)) < 0)
return ret;
 
if (!life->mold && memcmp(life->mold_color, "\x00\x00\x00", 3))
av_log(ctx, AV_LOG_WARNING,
"Mold color is set while mold isn't, ignoring the color.\n");
 
if (!life->filename) {
/* fill the grid randomly */
int i;
 
if (!(life->buf[0] = av_calloc(life->h * life->w, sizeof(*life->buf[0]))) ||
!(life->buf[1] = av_calloc(life->h * life->w, sizeof(*life->buf[1])))) {
av_free(life->buf[0]);
av_free(life->buf[1]);
return AVERROR(ENOMEM);
}
if (life->random_seed == -1)
life->random_seed = av_get_random_seed();
 
av_lfg_init(&life->lfg, life->random_seed);
 
for (i = 0; i < life->w * life->h; i++) {
double r = (double)av_lfg_get(&life->lfg) / UINT32_MAX;
if (r <= life->random_fill_ratio)
life->buf[0][i] = ALIVE_CELL;
}
life->buf_idx = 0;
} else {
if ((ret = init_pattern_from_file(ctx)) < 0)
return ret;
}
 
av_log(ctx, AV_LOG_VERBOSE,
"s:%dx%d r:%d/%d rule:%s stay_rule:%d born_rule:%d stitch:%d seed:%u\n",
life->w, life->h, life->frame_rate.num, life->frame_rate.den,
life->rule_str, life->stay_rule, life->born_rule, life->stitch,
life->random_seed);
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
LifeContext *life = ctx->priv;
 
av_file_unmap(life->file_buf, life->file_bufsize);
av_freep(&life->rule_str);
av_freep(&life->buf[0]);
av_freep(&life->buf[1]);
}
 
static int config_props(AVFilterLink *outlink)
{
LifeContext *life = outlink->src->priv;
 
outlink->w = life->w;
outlink->h = life->h;
outlink->time_base = av_inv_q(life->frame_rate);
 
return 0;
}
 
static void evolve(AVFilterContext *ctx)
{
LifeContext *life = ctx->priv;
int i, j;
uint8_t *oldbuf = life->buf[ life->buf_idx];
uint8_t *newbuf = life->buf[!life->buf_idx];
 
enum { NW, N, NE, W, E, SW, S, SE };
 
/* evolve the grid */
for (i = 0; i < life->h; i++) {
for (j = 0; j < life->w; j++) {
int pos[8][2], n, alive, cell;
if (life->stitch) {
pos[NW][0] = (i-1) < 0 ? life->h-1 : i-1; pos[NW][1] = (j-1) < 0 ? life->w-1 : j-1;
pos[N ][0] = (i-1) < 0 ? life->h-1 : i-1; pos[N ][1] = j ;
pos[NE][0] = (i-1) < 0 ? life->h-1 : i-1; pos[NE][1] = (j+1) == life->w ? 0 : j+1;
pos[W ][0] = i ; pos[W ][1] = (j-1) < 0 ? life->w-1 : j-1;
pos[E ][0] = i ; pos[E ][1] = (j+1) == life->w ? 0 : j+1;
pos[SW][0] = (i+1) == life->h ? 0 : i+1; pos[SW][1] = (j-1) < 0 ? life->w-1 : j-1;
pos[S ][0] = (i+1) == life->h ? 0 : i+1; pos[S ][1] = j ;
pos[SE][0] = (i+1) == life->h ? 0 : i+1; pos[SE][1] = (j+1) == life->w ? 0 : j+1;
} else {
pos[NW][0] = (i-1) < 0 ? -1 : i-1; pos[NW][1] = (j-1) < 0 ? -1 : j-1;
pos[N ][0] = (i-1) < 0 ? -1 : i-1; pos[N ][1] = j ;
pos[NE][0] = (i-1) < 0 ? -1 : i-1; pos[NE][1] = (j+1) == life->w ? -1 : j+1;
pos[W ][0] = i ; pos[W ][1] = (j-1) < 0 ? -1 : j-1;
pos[E ][0] = i ; pos[E ][1] = (j+1) == life->w ? -1 : j+1;
pos[SW][0] = (i+1) == life->h ? -1 : i+1; pos[SW][1] = (j-1) < 0 ? -1 : j-1;
pos[S ][0] = (i+1) == life->h ? -1 : i+1; pos[S ][1] = j ;
pos[SE][0] = (i+1) == life->h ? -1 : i+1; pos[SE][1] = (j+1) == life->w ? -1 : j+1;
}
 
/* compute the number of live neighbor cells */
n = (pos[NW][0] == -1 || pos[NW][1] == -1 ? 0 : oldbuf[pos[NW][0]*life->w + pos[NW][1]] == ALIVE_CELL) +
(pos[N ][0] == -1 || pos[N ][1] == -1 ? 0 : oldbuf[pos[N ][0]*life->w + pos[N ][1]] == ALIVE_CELL) +
(pos[NE][0] == -1 || pos[NE][1] == -1 ? 0 : oldbuf[pos[NE][0]*life->w + pos[NE][1]] == ALIVE_CELL) +
(pos[W ][0] == -1 || pos[W ][1] == -1 ? 0 : oldbuf[pos[W ][0]*life->w + pos[W ][1]] == ALIVE_CELL) +
(pos[E ][0] == -1 || pos[E ][1] == -1 ? 0 : oldbuf[pos[E ][0]*life->w + pos[E ][1]] == ALIVE_CELL) +
(pos[SW][0] == -1 || pos[SW][1] == -1 ? 0 : oldbuf[pos[SW][0]*life->w + pos[SW][1]] == ALIVE_CELL) +
(pos[S ][0] == -1 || pos[S ][1] == -1 ? 0 : oldbuf[pos[S ][0]*life->w + pos[S ][1]] == ALIVE_CELL) +
(pos[SE][0] == -1 || pos[SE][1] == -1 ? 0 : oldbuf[pos[SE][0]*life->w + pos[SE][1]] == ALIVE_CELL);
cell = oldbuf[i*life->w + j];
alive = 1<<n & (cell == ALIVE_CELL ? life->stay_rule : life->born_rule);
if (alive) *newbuf = ALIVE_CELL; // new cell is alive
else if (cell) *newbuf = cell - 1; // new cell is dead and in the process of mold
else *newbuf = 0; // new cell is definitely dead
av_dlog(ctx, "i:%d j:%d live_neighbors:%d cell:%d -> cell:%d\n", i, j, n, cell, *newbuf);
newbuf++;
}
}
 
life->buf_idx = !life->buf_idx;
}
 
static void fill_picture_monoblack(AVFilterContext *ctx, AVFrame *picref)
{
LifeContext *life = ctx->priv;
uint8_t *buf = life->buf[life->buf_idx];
int i, j, k;
 
/* fill the output picture with the old grid buffer */
for (i = 0; i < life->h; i++) {
uint8_t byte = 0;
uint8_t *p = picref->data[0] + i * picref->linesize[0];
for (k = 0, j = 0; j < life->w; j++) {
byte |= (buf[i*life->w+j] == ALIVE_CELL)<<(7-k++);
if (k==8 || j == life->w-1) {
k = 0;
*p++ = byte;
byte = 0;
}
}
}
}
 
// divide by 255 and round to nearest
// apply a fast variant: (X+127)/255 = ((X+127)*257+257)>>16 = ((X+128)*257)>>16
#define FAST_DIV255(x) ((((x) + 128) * 257) >> 16)
 
static void fill_picture_rgb(AVFilterContext *ctx, AVFrame *picref)
{
LifeContext *life = ctx->priv;
uint8_t *buf = life->buf[life->buf_idx];
int i, j;
 
/* fill the output picture with the old grid buffer */
for (i = 0; i < life->h; i++) {
uint8_t *p = picref->data[0] + i * picref->linesize[0];
for (j = 0; j < life->w; j++) {
uint8_t v = buf[i*life->w + j];
if (life->mold && v != ALIVE_CELL) {
const uint8_t *c1 = life-> mold_color;
const uint8_t *c2 = life->death_color;
int death_age = FFMIN((0xff - v) * life->mold, 0xff);
*p++ = FAST_DIV255((c2[0] << 8) + ((int)c1[0] - (int)c2[0]) * death_age);
*p++ = FAST_DIV255((c2[1] << 8) + ((int)c1[1] - (int)c2[1]) * death_age);
*p++ = FAST_DIV255((c2[2] << 8) + ((int)c1[2] - (int)c2[2]) * death_age);
} else {
const uint8_t *c = v == ALIVE_CELL ? life->life_color : life->death_color;
AV_WB24(p, c[0]<<16 | c[1]<<8 | c[2]);
p += 3;
}
}
}
}
 
static int request_frame(AVFilterLink *outlink)
{
LifeContext *life = outlink->src->priv;
AVFrame *picref = ff_get_video_buffer(outlink, life->w, life->h);
if (!picref)
return AVERROR(ENOMEM);
picref->sample_aspect_ratio = (AVRational) {1, 1};
picref->pts = life->pts++;
 
life->draw(outlink->src, picref);
evolve(outlink->src);
#ifdef DEBUG
show_life_grid(outlink->src);
#endif
return ff_filter_frame(outlink, picref);
}
 
static int query_formats(AVFilterContext *ctx)
{
LifeContext *life = ctx->priv;
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_NONE, AV_PIX_FMT_NONE };
if (life->mold || memcmp(life-> life_color, "\xff\xff\xff", 3)
|| memcmp(life->death_color, "\x00\x00\x00", 3)) {
pix_fmts[0] = AV_PIX_FMT_RGB24;
life->draw = fill_picture_rgb;
} else {
pix_fmts[0] = AV_PIX_FMT_MONOBLACK;
life->draw = fill_picture_monoblack;
}
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static const AVFilterPad life_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.request_frame = request_frame,
.config_props = config_props,
},
{ NULL}
};
 
AVFilter avfilter_vsrc_life = {
.name = "life",
.description = NULL_IF_CONFIG_SMALL("Create life."),
.priv_size = sizeof(LifeContext),
.priv_class = &life_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = NULL,
.outputs = life_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vsrc_mandelbrot.c
0,0 → 1,430
/*
* Copyright (c) 2011 Michael Niedermayer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* The vsrc_color filter from Stefano Sabatini was used as template to create
* this
*/
 
/**
* @file
* Mandelbrot fraktal renderer
*/
 
#include "avfilter.h"
#include "formats.h"
#include "video.h"
#include "internal.h"
#include "libavutil/imgutils.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include <float.h>
#include <math.h>
 
#define SQR(a) ((a)*(a))
 
enum Outer{
ITERATION_COUNT,
NORMALIZED_ITERATION_COUNT,
WHITE,
OUTZ,
};
 
enum Inner{
BLACK,
PERIOD,
CONVTIME,
MINCOL,
};
 
typedef struct Point {
double p[2];
uint32_t val;
} Point;
 
typedef struct {
const AVClass *class;
int w, h;
AVRational frame_rate;
uint64_t pts;
int maxiter;
double start_x;
double start_y;
double start_scale;
double end_scale;
double end_pts;
double bailout;
enum Outer outer;
enum Inner inner;
int cache_allocated;
int cache_used;
Point *point_cache;
Point *next_cache;
double (*zyklus)[2];
uint32_t dither;
 
double morphxf;
double morphyf;
double morphamp;
} MBContext;
 
#define OFFSET(x) offsetof(MBContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
static const AVOption mandelbrot_options[] = {
{"size", "set frame size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="640x480"}, CHAR_MIN, CHAR_MAX, FLAGS },
{"s", "set frame size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="640x480"}, CHAR_MIN, CHAR_MAX, FLAGS },
{"rate", "set frame rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, CHAR_MIN, CHAR_MAX, FLAGS },
{"r", "set frame rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, CHAR_MIN, CHAR_MAX, FLAGS },
{"maxiter", "set max iterations number", OFFSET(maxiter), AV_OPT_TYPE_INT, {.i64=7189}, 1, INT_MAX, FLAGS },
{"start_x", "set the initial x position", OFFSET(start_x), AV_OPT_TYPE_DOUBLE, {.dbl=-0.743643887037158704752191506114774}, -100, 100, FLAGS },
{"start_y", "set the initial y position", OFFSET(start_y), AV_OPT_TYPE_DOUBLE, {.dbl=-0.131825904205311970493132056385139}, -100, 100, FLAGS },
{"start_scale", "set the initial scale value", OFFSET(start_scale), AV_OPT_TYPE_DOUBLE, {.dbl=3.0}, 0, FLT_MAX, FLAGS },
{"end_scale", "set the terminal scale value", OFFSET(end_scale), AV_OPT_TYPE_DOUBLE, {.dbl=0.3}, 0, FLT_MAX, FLAGS },
{"end_pts", "set the terminal pts value", OFFSET(end_pts), AV_OPT_TYPE_DOUBLE, {.dbl=400}, 0, INT64_MAX, FLAGS },
{"bailout", "set the bailout value", OFFSET(bailout), AV_OPT_TYPE_DOUBLE, {.dbl=10}, 0, FLT_MAX, FLAGS },
{"morphxf", "set morph x frequency", OFFSET(morphxf), AV_OPT_TYPE_DOUBLE, {.dbl=0.01}, -FLT_MAX, FLT_MAX, FLAGS },
{"morphyf", "set morph y frequency", OFFSET(morphyf), AV_OPT_TYPE_DOUBLE, {.dbl=0.0123}, -FLT_MAX, FLT_MAX, FLAGS },
{"morphamp", "set morph amplitude", OFFSET(morphamp), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -FLT_MAX, FLT_MAX, FLAGS },
 
{"outer", "set outer coloring mode", OFFSET(outer), AV_OPT_TYPE_INT, {.i64=NORMALIZED_ITERATION_COUNT}, 0, INT_MAX, FLAGS, "outer" },
{"iteration_count", "set iteration count mode", 0, AV_OPT_TYPE_CONST, {.i64=ITERATION_COUNT}, INT_MIN, INT_MAX, FLAGS, "outer" },
{"normalized_iteration_count", "set normalized iteration count mode", 0, AV_OPT_TYPE_CONST, {.i64=NORMALIZED_ITERATION_COUNT}, INT_MIN, INT_MAX, FLAGS, "outer" },
{"white", "set white mode", 0, AV_OPT_TYPE_CONST, {.i64=WHITE}, INT_MIN, INT_MAX, FLAGS, "outer" },
{"outz", "set outz mode", 0, AV_OPT_TYPE_CONST, {.i64=OUTZ}, INT_MIN, INT_MAX, FLAGS, "outer" },
 
{"inner", "set inner coloring mode", OFFSET(inner), AV_OPT_TYPE_INT, {.i64=MINCOL}, 0, INT_MAX, FLAGS, "inner" },
{"black", "set black mode", 0, AV_OPT_TYPE_CONST, {.i64=BLACK}, INT_MIN, INT_MAX, FLAGS, "inner"},
{"period", "set period mode", 0, AV_OPT_TYPE_CONST, {.i64=PERIOD}, INT_MIN, INT_MAX, FLAGS, "inner"},
{"convergence", "show time until convergence", 0, AV_OPT_TYPE_CONST, {.i64=CONVTIME}, INT_MIN, INT_MAX, FLAGS, "inner"},
{"mincol", "color based on point closest to the origin of the iterations", 0, AV_OPT_TYPE_CONST, {.i64=MINCOL}, INT_MIN, INT_MAX, FLAGS, "inner"},
 
{NULL},
};
 
AVFILTER_DEFINE_CLASS(mandelbrot);
 
static av_cold int init(AVFilterContext *ctx)
{
MBContext *mb = ctx->priv;
 
mb->bailout *= mb->bailout;
 
mb->start_scale /=mb->h;
mb->end_scale /=mb->h;
 
mb->cache_allocated = mb->w * mb->h * 3;
mb->cache_used = 0;
mb->point_cache= av_malloc(sizeof(*mb->point_cache)*mb->cache_allocated);
mb-> next_cache= av_malloc(sizeof(*mb-> next_cache)*mb->cache_allocated);
mb-> zyklus = av_malloc(sizeof(*mb->zyklus) * (mb->maxiter+16));
 
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
MBContext *mb = ctx->priv;
 
av_freep(&mb->point_cache);
av_freep(&mb-> next_cache);
av_freep(&mb->zyklus);
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_BGR32,
AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static int config_props(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->src;
MBContext *mb = ctx->priv;
 
if (av_image_check_size(mb->w, mb->h, 0, ctx) < 0)
return AVERROR(EINVAL);
 
inlink->w = mb->w;
inlink->h = mb->h;
inlink->time_base = av_inv_q(mb->frame_rate);
 
return 0;
}
 
static void fill_from_cache(AVFilterContext *ctx, uint32_t *color, int *in_cidx, int *out_cidx, double py, double scale){
MBContext *mb = ctx->priv;
if(mb->morphamp)
return;
for(; *in_cidx < mb->cache_used; (*in_cidx)++){
Point *p= &mb->point_cache[*in_cidx];
int x;
if(p->p[1] > py)
break;
x= round((p->p[0] - mb->start_x) / scale + mb->w/2);
if(x<0 || x >= mb->w)
continue;
if(color) color[x] = p->val;
if(out_cidx && *out_cidx < mb->cache_allocated)
mb->next_cache[(*out_cidx)++]= *p;
}
}
 
static int interpol(MBContext *mb, uint32_t *color, int x, int y, int linesize)
{
uint32_t a,b,c,d, i;
uint32_t ipol=0xFF000000;
int dist;
 
if(!x || !y || x+1==mb->w || y+1==mb->h)
return 0;
 
dist= FFMAX(FFABS(x-(mb->w>>1))*mb->h, FFABS(y-(mb->h>>1))*mb->w);
 
if(dist<(mb->w*mb->h>>3))
return 0;
 
a=color[(x+1) + (y+0)*linesize];
b=color[(x-1) + (y+1)*linesize];
c=color[(x+0) + (y+1)*linesize];
d=color[(x+1) + (y+1)*linesize];
 
if(a&&c){
b= color[(x-1) + (y+0)*linesize];
d= color[(x+0) + (y-1)*linesize];
}else if(b&&d){
a= color[(x+1) + (y-1)*linesize];
c= color[(x-1) + (y-1)*linesize];
}else if(c){
d= color[(x+0) + (y-1)*linesize];
a= color[(x-1) + (y+0)*linesize];
b= color[(x+1) + (y-1)*linesize];
}else if(d){
c= color[(x-1) + (y-1)*linesize];
a= color[(x-1) + (y+0)*linesize];
b= color[(x+1) + (y-1)*linesize];
}else
return 0;
 
for(i=0; i<3; i++){
int s= 8*i;
uint8_t ac= a>>s;
uint8_t bc= b>>s;
uint8_t cc= c>>s;
uint8_t dc= d>>s;
int ipolab= (ac + bc);
int ipolcd= (cc + dc);
if(FFABS(ipolab - ipolcd) > 5)
return 0;
if(FFABS(ac-bc)+FFABS(cc-dc) > 20)
return 0;
ipol |= ((ipolab + ipolcd + 2)/4)<<s;
}
color[x + y*linesize]= ipol;
return 1;
}
 
static void draw_mandelbrot(AVFilterContext *ctx, uint32_t *color, int linesize, int64_t pts)
{
MBContext *mb = ctx->priv;
int x,y,i, in_cidx=0, next_cidx=0, tmp_cidx;
double scale= mb->start_scale*pow(mb->end_scale/mb->start_scale, pts/mb->end_pts);
int use_zyklus=0;
fill_from_cache(ctx, NULL, &in_cidx, NULL, mb->start_y+scale*(-mb->h/2-0.5), scale);
tmp_cidx= in_cidx;
memset(color, 0, sizeof(*color)*mb->w);
for(y=0; y<mb->h; y++){
int y1= y+1;
const double ci=mb->start_y+scale*(y-mb->h/2);
fill_from_cache(ctx, NULL, &in_cidx, &next_cidx, ci, scale);
if(y1<mb->h){
memset(color+linesize*y1, 0, sizeof(*color)*mb->w);
fill_from_cache(ctx, color+linesize*y1, &tmp_cidx, NULL, ci + 3*scale/2, scale);
}
 
for(x=0; x<mb->w; x++){
float av_uninit(epsilon);
const double cr=mb->start_x+scale*(x-mb->w/2);
double zr=cr;
double zi=ci;
uint32_t c=0;
double dv= mb->dither / (double)(1LL<<32);
mb->dither= mb->dither*1664525+1013904223;
 
if(color[x + y*linesize] & 0xFF000000)
continue;
if(!mb->morphamp){
if(interpol(mb, color, x, y, linesize)){
if(next_cidx < mb->cache_allocated){
mb->next_cache[next_cidx ].p[0]= cr;
mb->next_cache[next_cidx ].p[1]= ci;
mb->next_cache[next_cidx++].val = color[x + y*linesize];
}
continue;
}
}else{
zr += cos(pts * mb->morphxf) * mb->morphamp;
zi += sin(pts * mb->morphyf) * mb->morphamp;
}
 
use_zyklus= (x==0 || mb->inner!=BLACK ||color[x-1 + y*linesize] == 0xFF000000);
if(use_zyklus)
epsilon= scale*1*sqrt(SQR(x-mb->w/2) + SQR(y-mb->h/2))/mb->w;
 
#define Z_Z2_C(outr,outi,inr,ini)\
outr= inr*inr - ini*ini + cr;\
outi= 2*inr*ini + ci;
 
#define Z_Z2_C_ZYKLUS(outr,outi,inr,ini, Z)\
Z_Z2_C(outr,outi,inr,ini)\
if(use_zyklus){\
if(Z && fabs(mb->zyklus[i>>1][0]-outr)+fabs(mb->zyklus[i>>1][1]-outi) <= epsilon)\
break;\
}\
mb->zyklus[i][0]= outr;\
mb->zyklus[i][1]= outi;\
 
 
 
for(i=0; i<mb->maxiter-8; i++){
double t;
Z_Z2_C_ZYKLUS(t, zi, zr, zi, 0)
i++;
Z_Z2_C_ZYKLUS(zr, zi, t, zi, 1)
i++;
Z_Z2_C_ZYKLUS(t, zi, zr, zi, 0)
i++;
Z_Z2_C_ZYKLUS(zr, zi, t, zi, 1)
i++;
Z_Z2_C_ZYKLUS(t, zi, zr, zi, 0)
i++;
Z_Z2_C_ZYKLUS(zr, zi, t, zi, 1)
i++;
Z_Z2_C_ZYKLUS(t, zi, zr, zi, 0)
i++;
Z_Z2_C_ZYKLUS(zr, zi, t, zi, 1)
if(zr*zr + zi*zi > mb->bailout){
i-= FFMIN(7, i);
for(; i<mb->maxiter; i++){
zr= mb->zyklus[i][0];
zi= mb->zyklus[i][1];
if(zr*zr + zi*zi > mb->bailout){
switch(mb->outer){
case ITERATION_COUNT:
zr = i;
c = lrintf((sin(zr)+1)*127) + lrintf((sin(zr/1.234)+1)*127)*256*256 + lrintf((sin(zr/100)+1)*127)*256;
break;
case NORMALIZED_ITERATION_COUNT:
zr = i + log2(log(mb->bailout) / log(zr*zr + zi*zi));
c = lrintf((sin(zr)+1)*127) + lrintf((sin(zr/1.234)+1)*127)*256*256 + lrintf((sin(zr/100)+1)*127)*256;
break;
case WHITE:
c = 0xFFFFFF;
break;
case OUTZ:
zr /= mb->bailout;
zi /= mb->bailout;
c = (((int)(zr*128+128))&0xFF)*256 + (((int)(zi*128+128))&0xFF);
}
break;
}
}
break;
}
}
if(!c){
if(mb->inner==PERIOD){
int j;
for(j=i-1; j; j--)
if(SQR(mb->zyklus[j][0]-zr) + SQR(mb->zyklus[j][1]-zi) < epsilon*epsilon*10)
break;
if(j){
c= i-j;
c= ((c<<5)&0xE0) + ((c<<10)&0xE000) + ((c<<15)&0xE00000);
}
}else if(mb->inner==CONVTIME){
c= floor(i*255.0/mb->maxiter+dv)*0x010101;
} else if(mb->inner==MINCOL){
int j;
double closest=9999;
int closest_index=0;
for(j=i-1; j>=0; j--)
if(SQR(mb->zyklus[j][0]) + SQR(mb->zyklus[j][1]) < closest){
closest= SQR(mb->zyklus[j][0]) + SQR(mb->zyklus[j][1]);
closest_index= j;
}
closest = sqrt(closest);
c= lrintf((mb->zyklus[closest_index][0]/closest+1)*127+dv) + lrintf((mb->zyklus[closest_index][1]/closest+1)*127+dv)*256;
}
}
c |= 0xFF000000;
color[x + y*linesize]= c;
if(next_cidx < mb->cache_allocated){
mb->next_cache[next_cidx ].p[0]= cr;
mb->next_cache[next_cidx ].p[1]= ci;
mb->next_cache[next_cidx++].val = c;
}
}
fill_from_cache(ctx, NULL, &in_cidx, &next_cidx, ci + scale/2, scale);
}
FFSWAP(void*, mb->next_cache, mb->point_cache);
mb->cache_used = next_cidx;
if(mb->cache_used == mb->cache_allocated)
av_log(ctx, AV_LOG_INFO, "Mandelbrot cache is too small!\n");
}
 
static int request_frame(AVFilterLink *link)
{
MBContext *mb = link->src->priv;
AVFrame *picref = ff_get_video_buffer(link, mb->w, mb->h);
if (!picref)
return AVERROR(ENOMEM);
 
picref->sample_aspect_ratio = (AVRational) {1, 1};
picref->pts = mb->pts++;
 
draw_mandelbrot(link->src, (uint32_t*)picref->data[0], picref->linesize[0]/4, picref->pts);
return ff_filter_frame(link, picref);
}
 
static const AVFilterPad mandelbrot_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.request_frame = request_frame,
.config_props = config_props,
},
{ NULL }
};
 
AVFilter avfilter_vsrc_mandelbrot = {
.name = "mandelbrot",
.description = NULL_IF_CONFIG_SMALL("Render a Mandelbrot fractal."),
.priv_size = sizeof(MBContext),
.priv_class = &mandelbrot_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = NULL,
.outputs = mandelbrot_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vsrc_mptestsrc.c
0,0 → 1,361
/*
* Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
/**
* @file
* MP test source, ported from MPlayer libmpcodecs/vf_test.c
*/
 
#include "libavutil/avstring.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "internal.h"
#include "formats.h"
#include "video.h"
 
#define WIDTH 512
#define HEIGHT 512
 
enum test_type {
TEST_DC_LUMA,
TEST_DC_CHROMA,
TEST_FREQ_LUMA,
TEST_FREQ_CHROMA,
TEST_AMP_LUMA,
TEST_AMP_CHROMA,
TEST_CBP,
TEST_MV,
TEST_RING1,
TEST_RING2,
TEST_ALL,
TEST_NB
};
 
typedef struct MPTestContext {
const AVClass *class;
AVRational frame_rate;
int64_t pts, max_pts, duration;
int hsub, vsub;
enum test_type test;
} MPTestContext;
 
#define OFFSET(x) offsetof(MPTestContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
static const AVOption mptestsrc_options[]= {
{ "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
{ "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
{ "duration", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS },
{ "d", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS },
 
{ "test", "set test to perform", OFFSET(test), AV_OPT_TYPE_INT, {.i64=TEST_ALL}, 0, INT_MAX, FLAGS, "test" },
{ "t", "set test to perform", OFFSET(test), AV_OPT_TYPE_INT, {.i64=TEST_ALL}, 0, INT_MAX, FLAGS, "test" },
{ "dc_luma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_DC_LUMA}, INT_MIN, INT_MAX, FLAGS, "test" },
{ "dc_chroma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_DC_CHROMA}, INT_MIN, INT_MAX, FLAGS, "test" },
{ "freq_luma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_FREQ_LUMA}, INT_MIN, INT_MAX, FLAGS, "test" },
{ "freq_chroma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_FREQ_CHROMA}, INT_MIN, INT_MAX, FLAGS, "test" },
{ "amp_luma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_AMP_LUMA}, INT_MIN, INT_MAX, FLAGS, "test" },
{ "amp_chroma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_AMP_CHROMA}, INT_MIN, INT_MAX, FLAGS, "test" },
{ "cbp", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_CBP}, INT_MIN, INT_MAX, FLAGS, "test" },
{ "mv", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_MV}, INT_MIN, INT_MAX, FLAGS, "test" },
{ "ring1", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_RING1}, INT_MIN, INT_MAX, FLAGS, "test" },
{ "ring2", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_RING2}, INT_MIN, INT_MAX, FLAGS, "test" },
{ "all", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_ALL}, INT_MIN, INT_MAX, FLAGS, "test" },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(mptestsrc);
 
static double c[64];
 
static void init_idct(void)
{
int i, j;
 
for (i = 0; i < 8; i++) {
double s = i == 0 ? sqrt(0.125) : 0.5;
 
for (j = 0; j < 8; j++)
c[i*8+j] = s*cos((M_PI/8.0)*i*(j+0.5));
}
}
 
static void idct(uint8_t *dst, int dst_linesize, int src[64])
{
int i, j, k;
double tmp[64];
 
for (i = 0; i < 8; i++) {
for (j = 0; j < 8; j++) {
double sum = 0.0;
 
for (k = 0; k < 8; k++)
sum += c[k*8+j] * src[8*i+k];
 
tmp[8*i+j] = sum;
}
}
 
for (j = 0; j < 8; j++) {
for (i = 0; i < 8; i++) {
double sum = 0.0;
 
for (k = 0; k < 8; k++)
sum += c[k*8+i]*tmp[8*k+j];
 
dst[dst_linesize*i + j] = av_clip((int)floor(sum+0.5), 0, 255);
}
}
}
 
static void draw_dc(uint8_t *dst, int dst_linesize, int color, int w, int h)
{
int x, y;
 
for (y = 0; y < h; y++)
for (x = 0; x < w; x++)
dst[x + y*dst_linesize] = color;
}
 
static void draw_basis(uint8_t *dst, int dst_linesize, int amp, int freq, int dc)
{
int src[64];
 
memset(src, 0, 64*sizeof(int));
src[0] = dc;
if (amp)
src[freq] = amp;
idct(dst, dst_linesize, src);
}
 
static void draw_cbp(uint8_t *dst[3], int dst_linesize[3], int cbp, int amp, int dc)
{
if (cbp&1) draw_basis(dst[0] , dst_linesize[0], amp, 1, dc);
if (cbp&2) draw_basis(dst[0]+8 , dst_linesize[0], amp, 1, dc);
if (cbp&4) draw_basis(dst[0]+ 8*dst_linesize[0], dst_linesize[0], amp, 1, dc);
if (cbp&8) draw_basis(dst[0]+8+8*dst_linesize[0], dst_linesize[0], amp, 1, dc);
if (cbp&16) draw_basis(dst[1] , dst_linesize[1], amp, 1, dc);
if (cbp&32) draw_basis(dst[2] , dst_linesize[2], amp, 1, dc);
}
 
static void dc_test(uint8_t *dst, int dst_linesize, int w, int h, int off)
{
const int step = FFMAX(256/(w*h/256), 1);
int x, y, color = off;
 
for (y = 0; y < h; y += 16) {
for (x = 0; x < w; x += 16) {
draw_dc(dst + x + y*dst_linesize, dst_linesize, color, 8, 8);
color += step;
}
}
}
 
static void freq_test(uint8_t *dst, int dst_linesize, int off)
{
int x, y, freq = 0;
 
for (y = 0; y < 8*16; y += 16) {
for (x = 0; x < 8*16; x += 16) {
draw_basis(dst + x + y*dst_linesize, dst_linesize, 4*(96+off), freq, 128*8);
freq++;
}
}
}
 
static void amp_test(uint8_t *dst, int dst_linesize, int off)
{
int x, y, amp = off;
 
for (y = 0; y < 16*16; y += 16) {
for (x = 0; x < 16*16; x += 16) {
draw_basis(dst + x + y*dst_linesize, dst_linesize, 4*amp, 1, 128*8);
amp++;
}
}
}
 
static void cbp_test(uint8_t *dst[3], int dst_linesize[3], int off)
{
int x, y, cbp = 0;
 
for (y = 0; y < 16*8; y += 16) {
for (x = 0; x < 16*8; x += 16) {
uint8_t *dst1[3];
dst1[0] = dst[0] + x*2 + y*2*dst_linesize[0];
dst1[1] = dst[1] + x + y* dst_linesize[1];
dst1[2] = dst[2] + x + y* dst_linesize[2];
 
draw_cbp(dst1, dst_linesize, cbp, (64+off)*4, 128*8);
cbp++;
}
}
}
 
static void mv_test(uint8_t *dst, int dst_linesize, int off)
{
int x, y;
 
for (y = 0; y < 16*16; y++) {
if (y&16)
continue;
for (x = 0; x < 16*16; x++)
dst[x + y*dst_linesize] = x + off*8/(y/32+1);
}
}
 
static void ring1_test(uint8_t *dst, int dst_linesize, int off)
{
int x, y, color = 0;
 
for (y = off; y < 16*16; y += 16) {
for (x = off; x < 16*16; x += 16) {
draw_dc(dst + x + y*dst_linesize, dst_linesize, ((x+y)&16) ? color : -color, 16, 16);
color++;
}
}
}
 
static void ring2_test(uint8_t *dst, int dst_linesize, int off)
{
int x, y;
 
for (y = 0; y < 16*16; y++) {
for (x = 0; x < 16*16; x++) {
double d = sqrt((x-8*16)*(x-8*16) + (y-8*16)*(y-8*16));
double r = d/20 - (int)(d/20);
if (r < off/30.0) {
dst[x + y*dst_linesize] = 255;
dst[x + y*dst_linesize+256] = 0;
} else {
dst[x + y*dst_linesize] = x;
dst[x + y*dst_linesize+256] = x;
}
}
}
}
 
static av_cold int init(AVFilterContext *ctx)
{
MPTestContext *test = ctx->priv;
 
test->max_pts = test->duration >= 0 ?
av_rescale_q(test->duration, AV_TIME_BASE_Q, av_inv_q(test->frame_rate)) : -1;
test->pts = 0;
 
av_log(ctx, AV_LOG_VERBOSE, "rate:%d/%d duration:%f\n",
test->frame_rate.num, test->frame_rate.den,
test->duration < 0 ? -1 : test->max_pts * av_q2d(av_inv_q(test->frame_rate)));
init_idct();
 
return 0;
}
 
static int config_props(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
MPTestContext *test = ctx->priv;
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(outlink->format);
 
test->hsub = pix_desc->log2_chroma_w;
test->vsub = pix_desc->log2_chroma_h;
 
outlink->w = WIDTH;
outlink->h = HEIGHT;
outlink->time_base = av_inv_q(test->frame_rate);
 
return 0;
}
 
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE
};
 
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static int request_frame(AVFilterLink *outlink)
{
MPTestContext *test = outlink->src->priv;
AVFrame *picref;
int w = WIDTH, h = HEIGHT,
cw = FF_CEIL_RSHIFT(w, test->hsub), ch = FF_CEIL_RSHIFT(h, test->vsub);
unsigned int frame = outlink->frame_count;
enum test_type tt = test->test;
int i;
 
if (test->max_pts >= 0 && test->pts > test->max_pts)
return AVERROR_EOF;
picref = ff_get_video_buffer(outlink, w, h);
if (!picref)
return AVERROR(ENOMEM);
picref->pts = test->pts++;
 
// clean image
for (i = 0; i < h; i++)
memset(picref->data[0] + i*picref->linesize[0], 0, w);
for (i = 0; i < ch; i++) {
memset(picref->data[1] + i*picref->linesize[1], 128, cw);
memset(picref->data[2] + i*picref->linesize[2], 128, cw);
}
 
if (tt == TEST_ALL && frame%30) /* draw a black frame at the beginning of each test */
tt = (frame/30)%(TEST_NB-1);
 
switch (tt) {
case TEST_DC_LUMA: dc_test(picref->data[0], picref->linesize[0], 256, 256, frame%30); break;
case TEST_DC_CHROMA: dc_test(picref->data[1], picref->linesize[1], 256, 256, frame%30); break;
case TEST_FREQ_LUMA: freq_test(picref->data[0], picref->linesize[0], frame%30); break;
case TEST_FREQ_CHROMA: freq_test(picref->data[1], picref->linesize[1], frame%30); break;
case TEST_AMP_LUMA: amp_test(picref->data[0], picref->linesize[0], frame%30); break;
case TEST_AMP_CHROMA: amp_test(picref->data[1], picref->linesize[1], frame%30); break;
case TEST_CBP: cbp_test(picref->data , picref->linesize , frame%30); break;
case TEST_MV: mv_test(picref->data[0], picref->linesize[0], frame%30); break;
case TEST_RING1: ring1_test(picref->data[0], picref->linesize[0], frame%30); break;
case TEST_RING2: ring2_test(picref->data[0], picref->linesize[0], frame%30); break;
}
 
return ff_filter_frame(outlink, picref);
}
 
static const AVFilterPad mptestsrc_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.request_frame = request_frame,
.config_props = config_props,
},
{ NULL }
};
 
AVFilter avfilter_vsrc_mptestsrc = {
.name = "mptestsrc",
.description = NULL_IF_CONFIG_SMALL("Generate various test pattern."),
.priv_size = sizeof(MPTestContext),
.priv_class = &mptestsrc_class,
.init = init,
.query_formats = query_formats,
.inputs = NULL,
.outputs = mptestsrc_outputs,
};
/contrib/sdk/sources/ffmpeg/libavfilter/vsrc_testsrc.c
0,0 → 1,1071
/*
* Copyright (c) 2007 Nicolas George <nicolas.george@normalesup.org>
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2012 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Misc test sources.
*
* testsrc is based on the test pattern generator demuxer by Nicolas George:
* http://lists.ffmpeg.org/pipermail/ffmpeg-devel/2007-October/037845.html
*
* rgbtestsrc is ported from MPlayer libmpcodecs/vf_rgbtest.c by
* Michael Niedermayer.
*
* smptebars and smptehdbars are by Paul B Mahol.
*/
 
#include <float.h>
 
#include "libavutil/avassert.h"
#include "libavutil/common.h"
#include "libavutil/opt.h"
#include "libavutil/imgutils.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/parseutils.h"
#include "avfilter.h"
#include "drawutils.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
 
typedef struct {
const AVClass *class;
int w, h;
unsigned int nb_frame;
AVRational time_base, frame_rate;
int64_t pts;
int64_t duration; ///< duration expressed in microseconds
AVRational sar; ///< sample aspect ratio
int draw_once; ///< draw only the first frame, always put out the same picture
int draw_once_reset; ///< draw only the first frame or in case of reset
AVFrame *picref; ///< cached reference containing the painted picture
 
void (* fill_picture_fn)(AVFilterContext *ctx, AVFrame *frame);
 
/* only used by testsrc */
int nb_decimals;
 
/* only used by color */
FFDrawContext draw;
FFDrawColor color;
uint8_t color_rgba[4];
 
/* only used by rgbtest */
uint8_t rgba_map[4];
 
/* only used by haldclut */
int level;
} TestSourceContext;
 
#define OFFSET(x) offsetof(TestSourceContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
 
#define SIZE_OPTIONS \
{ "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "320x240"}, 0, 0, FLAGS },\
{ "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "320x240"}, 0, 0, FLAGS },\
 
#define COMMON_OPTIONS_NOSIZE \
{ "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },\
{ "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },\
{ "duration", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS },\
{ "d", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS },\
{ "sar", "set video sample aspect ratio", OFFSET(sar), AV_OPT_TYPE_RATIONAL, {.dbl= 1}, 0, INT_MAX, FLAGS },
 
#define COMMON_OPTIONS SIZE_OPTIONS COMMON_OPTIONS_NOSIZE
 
static const AVOption options[] = {
COMMON_OPTIONS
{ NULL }
};
 
static av_cold int init(AVFilterContext *ctx)
{
TestSourceContext *test = ctx->priv;
 
test->time_base = av_inv_q(test->frame_rate);
test->nb_frame = 0;
test->pts = 0;
 
av_log(ctx, AV_LOG_VERBOSE, "size:%dx%d rate:%d/%d duration:%f sar:%d/%d\n",
test->w, test->h, test->frame_rate.num, test->frame_rate.den,
test->duration < 0 ? -1 : (double)test->duration/1000000,
test->sar.num, test->sar.den);
return 0;
}
 
static av_cold void uninit(AVFilterContext *ctx)
{
TestSourceContext *test = ctx->priv;
 
av_frame_free(&test->picref);
}
 
static int config_props(AVFilterLink *outlink)
{
TestSourceContext *test = outlink->src->priv;
 
outlink->w = test->w;
outlink->h = test->h;
outlink->sample_aspect_ratio = test->sar;
outlink->frame_rate = test->frame_rate;
outlink->time_base = test->time_base;
 
return 0;
}
 
static int request_frame(AVFilterLink *outlink)
{
TestSourceContext *test = outlink->src->priv;
AVFrame *frame;
 
if (test->duration >= 0 &&
av_rescale_q(test->pts, test->time_base, AV_TIME_BASE_Q) >= test->duration)
return AVERROR_EOF;
 
if (test->draw_once) {
if (test->draw_once_reset) {
av_frame_free(&test->picref);
test->draw_once_reset = 0;
}
if (!test->picref) {
test->picref =
ff_get_video_buffer(outlink, test->w, test->h);
if (!test->picref)
return AVERROR(ENOMEM);
test->fill_picture_fn(outlink->src, test->picref);
}
frame = av_frame_clone(test->picref);
} else
frame = ff_get_video_buffer(outlink, test->w, test->h);
 
if (!frame)
return AVERROR(ENOMEM);
frame->pts = test->pts;
frame->key_frame = 1;
frame->interlaced_frame = 0;
frame->pict_type = AV_PICTURE_TYPE_I;
frame->sample_aspect_ratio = test->sar;
if (!test->draw_once)
test->fill_picture_fn(outlink->src, frame);
 
test->pts++;
test->nb_frame++;
 
return ff_filter_frame(outlink, frame);
}
 
#if CONFIG_COLOR_FILTER
 
static const AVOption color_options[] = {
{ "color", "set color", OFFSET(color_rgba), AV_OPT_TYPE_COLOR, {.str = "black"}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "c", "set color", OFFSET(color_rgba), AV_OPT_TYPE_COLOR, {.str = "black"}, CHAR_MIN, CHAR_MAX, FLAGS },
COMMON_OPTIONS
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(color);
 
static void color_fill_picture(AVFilterContext *ctx, AVFrame *picref)
{
TestSourceContext *test = ctx->priv;
ff_fill_rectangle(&test->draw, &test->color,
picref->data, picref->linesize,
0, 0, test->w, test->h);
}
 
static av_cold int color_init(AVFilterContext *ctx)
{
TestSourceContext *test = ctx->priv;
test->fill_picture_fn = color_fill_picture;
test->draw_once = 1;
return init(ctx);
}
 
static int color_query_formats(AVFilterContext *ctx)
{
ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0));
return 0;
}
 
static int color_config_props(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->src;
TestSourceContext *test = ctx->priv;
int ret;
 
ff_draw_init(&test->draw, inlink->format, 0);
ff_draw_color(&test->draw, &test->color, test->color_rgba);
 
test->w = ff_draw_round_to_sub(&test->draw, 0, -1, test->w);
test->h = ff_draw_round_to_sub(&test->draw, 1, -1, test->h);
if (av_image_check_size(test->w, test->h, 0, ctx) < 0)
return AVERROR(EINVAL);
 
if ((ret = config_props(inlink)) < 0)
return ret;
 
return 0;
}
 
static int color_process_command(AVFilterContext *ctx, const char *cmd, const char *args,
char *res, int res_len, int flags)
{
TestSourceContext *test = ctx->priv;
int ret;
 
if (!strcmp(cmd, "color") || !strcmp(cmd, "c")) {
uint8_t color_rgba[4];
 
ret = av_parse_color(color_rgba, args, -1, ctx);
if (ret < 0)
return ret;
 
memcpy(test->color_rgba, color_rgba, sizeof(color_rgba));
ff_draw_color(&test->draw, &test->color, test->color_rgba);
test->draw_once_reset = 1;
return 0;
}
 
return AVERROR(ENOSYS);
}
 
static const AVFilterPad color_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.request_frame = request_frame,
.config_props = color_config_props,
},
{ NULL }
};
 
AVFilter avfilter_vsrc_color = {
.name = "color",
.description = NULL_IF_CONFIG_SMALL("Provide an uniformly colored input."),
.priv_class = &color_class,
.priv_size = sizeof(TestSourceContext),
.init = color_init,
.uninit = uninit,
.query_formats = color_query_formats,
.inputs = NULL,
.outputs = color_outputs,
.process_command = color_process_command,
};
 
#endif /* CONFIG_COLOR_FILTER */
 
#if CONFIG_HALDCLUTSRC_FILTER
 
static const AVOption haldclutsrc_options[] = {
{ "level", "set level", OFFSET(level), AV_OPT_TYPE_INT, {.i64 = 6}, 2, 8, FLAGS },
COMMON_OPTIONS_NOSIZE
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(haldclutsrc);
 
static void haldclutsrc_fill_picture(AVFilterContext *ctx, AVFrame *frame)
{
int i, j, k, x = 0, y = 0, is16bit = 0, step;
uint32_t alpha = 0;
const TestSourceContext *hc = ctx->priv;
int level = hc->level;
float scale;
const int w = frame->width;
const int h = frame->height;
const uint8_t *data = frame->data[0];
const int linesize = frame->linesize[0];
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
uint8_t rgba_map[4];
 
av_assert0(w == h && w == level*level*level);
 
ff_fill_rgba_map(rgba_map, frame->format);
 
switch (frame->format) {
case AV_PIX_FMT_RGB48:
case AV_PIX_FMT_BGR48:
case AV_PIX_FMT_RGBA64:
case AV_PIX_FMT_BGRA64:
is16bit = 1;
alpha = 0xffff;
break;
case AV_PIX_FMT_RGBA:
case AV_PIX_FMT_BGRA:
case AV_PIX_FMT_ARGB:
case AV_PIX_FMT_ABGR:
alpha = 0xff;
break;
}
 
step = av_get_padded_bits_per_pixel(desc) >> (3 + is16bit);
scale = ((float)(1 << (8*(is16bit+1))) - 1) / (level*level - 1);
 
#define LOAD_CLUT(nbits) do { \
uint##nbits##_t *dst = ((uint##nbits##_t *)(data + y*linesize)) + x*step; \
dst[rgba_map[0]] = av_clip_uint##nbits(i * scale); \
dst[rgba_map[1]] = av_clip_uint##nbits(j * scale); \
dst[rgba_map[2]] = av_clip_uint##nbits(k * scale); \
if (step == 4) \
dst[rgba_map[3]] = alpha; \
} while (0)
 
level *= level;
for (k = 0; k < level; k++) {
for (j = 0; j < level; j++) {
for (i = 0; i < level; i++) {
if (!is16bit)
LOAD_CLUT(8);
else
LOAD_CLUT(16);
if (++x == w) {
x = 0;
y++;
}
}
}
}
}
 
static av_cold int haldclutsrc_init(AVFilterContext *ctx)
{
TestSourceContext *hc = ctx->priv;
hc->fill_picture_fn = haldclutsrc_fill_picture;
hc->draw_once = 1;
return init(ctx);
}
 
static int haldclutsrc_query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
AV_PIX_FMT_0RGB, AV_PIX_FMT_0BGR,
AV_PIX_FMT_RGB0, AV_PIX_FMT_BGR0,
AV_PIX_FMT_RGB48, AV_PIX_FMT_BGR48,
AV_PIX_FMT_RGBA64, AV_PIX_FMT_BGRA64,
AV_PIX_FMT_NONE,
};
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static int haldclutsrc_config_props(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
TestSourceContext *hc = ctx->priv;
 
hc->w = hc->h = hc->level * hc->level * hc->level;
return config_props(outlink);
}
 
static const AVFilterPad haldclutsrc_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.request_frame = request_frame,
.config_props = haldclutsrc_config_props,
},
{ NULL }
};
 
AVFilter avfilter_vsrc_haldclutsrc = {
.name = "haldclutsrc",
.description = NULL_IF_CONFIG_SMALL("Provide an identity Hald CLUT."),
.priv_class = &haldclutsrc_class,
.priv_size = sizeof(TestSourceContext),
.init = haldclutsrc_init,
.uninit = uninit,
.query_formats = haldclutsrc_query_formats,
.inputs = NULL,
.outputs = haldclutsrc_outputs,
};
#endif /* CONFIG_HALDCLUTSRC_FILTER */
 
#if CONFIG_NULLSRC_FILTER
 
#define nullsrc_options options
AVFILTER_DEFINE_CLASS(nullsrc);
 
static void nullsrc_fill_picture(AVFilterContext *ctx, AVFrame *picref) { }
 
static av_cold int nullsrc_init(AVFilterContext *ctx)
{
TestSourceContext *test = ctx->priv;
 
test->fill_picture_fn = nullsrc_fill_picture;
return init(ctx);
}
 
static const AVFilterPad nullsrc_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.request_frame = request_frame,
.config_props = config_props,
},
{ NULL },
};
 
AVFilter avfilter_vsrc_nullsrc = {
.name = "nullsrc",
.description = NULL_IF_CONFIG_SMALL("Null video source, return unprocessed video frames."),
.init = nullsrc_init,
.uninit = uninit,
.priv_size = sizeof(TestSourceContext),
.priv_class = &nullsrc_class,
.inputs = NULL,
.outputs = nullsrc_outputs,
};
 
#endif /* CONFIG_NULLSRC_FILTER */
 
#if CONFIG_TESTSRC_FILTER
 
static const AVOption testsrc_options[] = {
COMMON_OPTIONS
{ "decimals", "set number of decimals to show", OFFSET(nb_decimals), AV_OPT_TYPE_INT, {.i64=0}, 0, 17, FLAGS },
{ "n", "set number of decimals to show", OFFSET(nb_decimals), AV_OPT_TYPE_INT, {.i64=0}, 0, 17, FLAGS },
{ NULL }
};
 
AVFILTER_DEFINE_CLASS(testsrc);
 
/**
* Fill a rectangle with value val.
*
* @param val the RGB value to set
* @param dst pointer to the destination buffer to fill
* @param dst_linesize linesize of destination
* @param segment_width width of the segment
* @param x horizontal coordinate where to draw the rectangle in the destination buffer
* @param y horizontal coordinate where to draw the rectangle in the destination buffer
* @param w width of the rectangle to draw, expressed as a number of segment_width units
* @param h height of the rectangle to draw, expressed as a number of segment_width units
*/
static void draw_rectangle(unsigned val, uint8_t *dst, int dst_linesize, int segment_width,
int x, int y, int w, int h)
{
int i;
int step = 3;
 
dst += segment_width * (step * x + y * dst_linesize);
w *= segment_width * step;
h *= segment_width;
for (i = 0; i < h; i++) {
memset(dst, val, w);
dst += dst_linesize;
}
}
 
static void draw_digit(int digit, uint8_t *dst, int dst_linesize,
int segment_width)
{
#define TOP_HBAR 1
#define MID_HBAR 2
#define BOT_HBAR 4
#define LEFT_TOP_VBAR 8
#define LEFT_BOT_VBAR 16
#define RIGHT_TOP_VBAR 32
#define RIGHT_BOT_VBAR 64
struct {
int x, y, w, h;
} segments[] = {
{ 1, 0, 5, 1 }, /* TOP_HBAR */
{ 1, 6, 5, 1 }, /* MID_HBAR */
{ 1, 12, 5, 1 }, /* BOT_HBAR */
{ 0, 1, 1, 5 }, /* LEFT_TOP_VBAR */
{ 0, 7, 1, 5 }, /* LEFT_BOT_VBAR */
{ 6, 1, 1, 5 }, /* RIGHT_TOP_VBAR */
{ 6, 7, 1, 5 } /* RIGHT_BOT_VBAR */
};
static const unsigned char masks[10] = {
/* 0 */ TOP_HBAR |BOT_HBAR|LEFT_TOP_VBAR|LEFT_BOT_VBAR|RIGHT_TOP_VBAR|RIGHT_BOT_VBAR,
/* 1 */ RIGHT_TOP_VBAR|RIGHT_BOT_VBAR,
/* 2 */ TOP_HBAR|MID_HBAR|BOT_HBAR|LEFT_BOT_VBAR |RIGHT_TOP_VBAR,
/* 3 */ TOP_HBAR|MID_HBAR|BOT_HBAR |RIGHT_TOP_VBAR|RIGHT_BOT_VBAR,
/* 4 */ MID_HBAR |LEFT_TOP_VBAR |RIGHT_TOP_VBAR|RIGHT_BOT_VBAR,
/* 5 */ TOP_HBAR|BOT_HBAR|MID_HBAR|LEFT_TOP_VBAR |RIGHT_BOT_VBAR,
/* 6 */ TOP_HBAR|BOT_HBAR|MID_HBAR|LEFT_TOP_VBAR|LEFT_BOT_VBAR |RIGHT_BOT_VBAR,
/* 7 */ TOP_HBAR |RIGHT_TOP_VBAR|RIGHT_BOT_VBAR,
/* 8 */ TOP_HBAR|BOT_HBAR|MID_HBAR|LEFT_TOP_VBAR|LEFT_BOT_VBAR|RIGHT_TOP_VBAR|RIGHT_BOT_VBAR,
/* 9 */ TOP_HBAR|BOT_HBAR|MID_HBAR|LEFT_TOP_VBAR |RIGHT_TOP_VBAR|RIGHT_BOT_VBAR,
};
unsigned mask = masks[digit];
int i;
 
draw_rectangle(0, dst, dst_linesize, segment_width, 0, 0, 8, 13);
for (i = 0; i < FF_ARRAY_ELEMS(segments); i++)
if (mask & (1<<i))
draw_rectangle(255, dst, dst_linesize, segment_width,
segments[i].x, segments[i].y, segments[i].w, segments[i].h);
}
 
#define GRADIENT_SIZE (6 * 256)
 
static void test_fill_picture(AVFilterContext *ctx, AVFrame *frame)
{
TestSourceContext *test = ctx->priv;
uint8_t *p, *p0;
int x, y;
int color, color_rest;
int icolor;
int radius;
int quad0, quad;
int dquad_x, dquad_y;
int grad, dgrad, rgrad, drgrad;
int seg_size;
int second;
int i;
uint8_t *data = frame->data[0];
int width = frame->width;
int height = frame->height;
 
/* draw colored bars and circle */
radius = (width + height) / 4;
quad0 = width * width / 4 + height * height / 4 - radius * radius;
dquad_y = 1 - height;
p0 = data;
for (y = 0; y < height; y++) {
p = p0;
color = 0;
color_rest = 0;
quad = quad0;
dquad_x = 1 - width;
for (x = 0; x < width; x++) {
icolor = color;
if (quad < 0)
icolor ^= 7;
quad += dquad_x;
dquad_x += 2;
*(p++) = icolor & 1 ? 255 : 0;
*(p++) = icolor & 2 ? 255 : 0;
*(p++) = icolor & 4 ? 255 : 0;
color_rest += 8;
if (color_rest >= width) {
color_rest -= width;
color++;
}
}
quad0 += dquad_y;
dquad_y += 2;
p0 += frame->linesize[0];
}
 
/* draw sliding color line */
p0 = p = data + frame->linesize[0] * (height * 3/4);
grad = (256 * test->nb_frame * test->time_base.num / test->time_base.den) %
GRADIENT_SIZE;
rgrad = 0;
dgrad = GRADIENT_SIZE / width;
drgrad = GRADIENT_SIZE % width;
for (x = 0; x < width; x++) {
*(p++) =
grad < 256 || grad >= 5 * 256 ? 255 :
grad >= 2 * 256 && grad < 4 * 256 ? 0 :
grad < 2 * 256 ? 2 * 256 - 1 - grad : grad - 4 * 256;
*(p++) =
grad >= 4 * 256 ? 0 :
grad >= 1 * 256 && grad < 3 * 256 ? 255 :
grad < 1 * 256 ? grad : 4 * 256 - 1 - grad;
*(p++) =
grad < 2 * 256 ? 0 :
grad >= 3 * 256 && grad < 5 * 256 ? 255 :
grad < 3 * 256 ? grad - 2 * 256 : 6 * 256 - 1 - grad;
grad += dgrad;
rgrad += drgrad;
if (rgrad >= GRADIENT_SIZE) {
grad++;
rgrad -= GRADIENT_SIZE;
}
if (grad >= GRADIENT_SIZE)
grad -= GRADIENT_SIZE;
}
p = p0;
for (y = height / 8; y > 0; y--) {
memcpy(p+frame->linesize[0], p, 3 * width);
p += frame->linesize[0];
}
 
/* draw digits */
seg_size = width / 80;
if (seg_size >= 1 && height >= 13 * seg_size) {
int64_t p10decimals = 1;
double time = av_q2d(test->time_base) * test->nb_frame *
pow(10, test->nb_decimals);
if (time >= INT_MAX)
return;
 
for (x = 0; x < test->nb_decimals; x++)
p10decimals *= 10;
 
second = av_rescale_rnd(test->nb_frame * test->time_base.num, p10decimals, test->time_base.den, AV_ROUND_ZERO);
x = width - (width - seg_size * 64) / 2;
y = (height - seg_size * 13) / 2;
p = data + (x*3 + y * frame->linesize[0]);
for (i = 0; i < 8; i++) {
p -= 3 * 8 * seg_size;
draw_digit(second % 10, p, frame->linesize[0], seg_size);
second /= 10;
if (second == 0)
break;
}
}
}
 
static av_cold int test_init(AVFilterContext *ctx)
{
TestSourceContext *test = ctx->priv;
 
test->fill_picture_fn = test_fill_picture;
return init(ctx);
}
 
static int test_query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_RGB24, AV_PIX_FMT_NONE
};
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static const AVFilterPad avfilter_vsrc_testsrc_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.request_frame = request_frame,
.config_props = config_props,
},
{ NULL }
};
 
AVFilter avfilter_vsrc_testsrc = {
.name = "testsrc",
.description = NULL_IF_CONFIG_SMALL("Generate test pattern."),
.priv_size = sizeof(TestSourceContext),
.priv_class = &testsrc_class,
.init = test_init,
.uninit = uninit,
.query_formats = test_query_formats,
.inputs = NULL,
.outputs = avfilter_vsrc_testsrc_outputs,
};
 
#endif /* CONFIG_TESTSRC_FILTER */
 
#if CONFIG_RGBTESTSRC_FILTER
 
#define rgbtestsrc_options options
AVFILTER_DEFINE_CLASS(rgbtestsrc);
 
#define R 0
#define G 1
#define B 2
#define A 3
 
static void rgbtest_put_pixel(uint8_t *dst, int dst_linesize,
int x, int y, int r, int g, int b, enum AVPixelFormat fmt,
uint8_t rgba_map[4])
{
int32_t v;
uint8_t *p;
 
switch (fmt) {
case AV_PIX_FMT_BGR444: ((uint16_t*)(dst + y*dst_linesize))[x] = ((r >> 4) << 8) | ((g >> 4) << 4) | (b >> 4); break;
case AV_PIX_FMT_RGB444: ((uint16_t*)(dst + y*dst_linesize))[x] = ((b >> 4) << 8) | ((g >> 4) << 4) | (r >> 4); break;
case AV_PIX_FMT_BGR555: ((uint16_t*)(dst + y*dst_linesize))[x] = ((r>>3)<<10) | ((g>>3)<<5) | (b>>3); break;
case AV_PIX_FMT_RGB555: ((uint16_t*)(dst + y*dst_linesize))[x] = ((b>>3)<<10) | ((g>>3)<<5) | (r>>3); break;
case AV_PIX_FMT_BGR565: ((uint16_t*)(dst + y*dst_linesize))[x] = ((r>>3)<<11) | ((g>>2)<<5) | (b>>3); break;
case AV_PIX_FMT_RGB565: ((uint16_t*)(dst + y*dst_linesize))[x] = ((b>>3)<<11) | ((g>>2)<<5) | (r>>3); break;
case AV_PIX_FMT_RGB24:
case AV_PIX_FMT_BGR24:
v = (r << (rgba_map[R]*8)) + (g << (rgba_map[G]*8)) + (b << (rgba_map[B]*8));
p = dst + 3*x + y*dst_linesize;
AV_WL24(p, v);
break;
case AV_PIX_FMT_RGBA:
case AV_PIX_FMT_BGRA:
case AV_PIX_FMT_ARGB:
case AV_PIX_FMT_ABGR:
v = (r << (rgba_map[R]*8)) + (g << (rgba_map[G]*8)) + (b << (rgba_map[B]*8)) + (255 << (rgba_map[A]*8));
p = dst + 4*x + y*dst_linesize;
AV_WL32(p, v);
break;
}
}
 
static void rgbtest_fill_picture(AVFilterContext *ctx, AVFrame *frame)
{
TestSourceContext *test = ctx->priv;
int x, y, w = frame->width, h = frame->height;
 
for (y = 0; y < h; y++) {
for (x = 0; x < w; x++) {
int c = 256*x/w;
int r = 0, g = 0, b = 0;
 
if (3*y < h ) r = c;
else if (3*y < 2*h) g = c;
else b = c;
 
rgbtest_put_pixel(frame->data[0], frame->linesize[0], x, y, r, g, b,
ctx->outputs[0]->format, test->rgba_map);
}
}
}
 
static av_cold int rgbtest_init(AVFilterContext *ctx)
{
TestSourceContext *test = ctx->priv;
 
test->draw_once = 1;
test->fill_picture_fn = rgbtest_fill_picture;
return init(ctx);
}
 
static int rgbtest_query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_RGBA, AV_PIX_FMT_ARGB, AV_PIX_FMT_BGRA, AV_PIX_FMT_ABGR,
AV_PIX_FMT_BGR24, AV_PIX_FMT_RGB24,
AV_PIX_FMT_RGB444, AV_PIX_FMT_BGR444,
AV_PIX_FMT_RGB565, AV_PIX_FMT_BGR565,
AV_PIX_FMT_RGB555, AV_PIX_FMT_BGR555,
AV_PIX_FMT_NONE
};
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static int rgbtest_config_props(AVFilterLink *outlink)
{
TestSourceContext *test = outlink->src->priv;
 
ff_fill_rgba_map(test->rgba_map, outlink->format);
return config_props(outlink);
}
 
static const AVFilterPad avfilter_vsrc_rgbtestsrc_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.request_frame = request_frame,
.config_props = rgbtest_config_props,
},
{ NULL }
};
 
AVFilter avfilter_vsrc_rgbtestsrc = {
.name = "rgbtestsrc",
.description = NULL_IF_CONFIG_SMALL("Generate RGB test pattern."),
.priv_size = sizeof(TestSourceContext),
.priv_class = &rgbtestsrc_class,
.init = rgbtest_init,
.uninit = uninit,
.query_formats = rgbtest_query_formats,
.inputs = NULL,
.outputs = avfilter_vsrc_rgbtestsrc_outputs,
};
 
#endif /* CONFIG_RGBTESTSRC_FILTER */
 
#if CONFIG_SMPTEBARS_FILTER || CONFIG_SMPTEHDBARS_FILTER
 
static const uint8_t rainbow[7][4] = {
{ 180, 128, 128, 255 }, /* gray */
{ 168, 44, 136, 255 }, /* yellow */
{ 145, 147, 44, 255 }, /* cyan */
{ 133, 63, 52, 255 }, /* green */
{ 63, 193, 204, 255 }, /* magenta */
{ 51, 109, 212, 255 }, /* red */
{ 28, 212, 120, 255 }, /* blue */
};
 
static const uint8_t wobnair[7][4] = {
{ 32, 240, 118, 255 }, /* blue */
{ 19, 128, 128, 255 }, /* 7.5% intensity black */
{ 54, 184, 198, 255 }, /* magenta */
{ 19, 128, 128, 255 }, /* 7.5% intensity black */
{ 188, 154, 16, 255 }, /* cyan */
{ 19, 128, 128, 255 }, /* 7.5% intensity black */
{ 191, 128, 128, 255 }, /* gray */
};
 
static const uint8_t white[4] = { 235, 128, 128, 255 };
static const uint8_t black[4] = { 19, 128, 128, 255 }; /* 7.5% intensity black */
 
/* pluge pulses */
static const uint8_t neg4ire[4] = { 9, 128, 128, 255 }; /* 3.5% intensity black */
static const uint8_t pos4ire[4] = { 29, 128, 128, 255 }; /* 11.5% intensity black */
 
/* fudged Q/-I */
static const uint8_t i_pixel[4] = { 61, 153, 99, 255 };
static const uint8_t q_pixel[4] = { 35, 174, 152, 255 };
 
static const uint8_t gray40[4] = { 104, 128, 128, 255 };
static const uint8_t gray15[4] = { 49, 128, 128, 255 };
static const uint8_t cyan[4] = { 188, 154, 16, 255 };
static const uint8_t yellow[4] = { 219, 16, 138, 255 };
static const uint8_t blue[4] = { 32, 240, 118, 255 };
static const uint8_t red[4] = { 63, 102, 240, 255 };
static const uint8_t black0[4] = { 16, 128, 128, 255 };
static const uint8_t black2[4] = { 20, 128, 128, 255 };
static const uint8_t black4[4] = { 25, 128, 128, 255 };
static const uint8_t neg2[4] = { 12, 128, 128, 255 };
 
static void draw_bar(TestSourceContext *test, const uint8_t color[4],
unsigned x, unsigned y, unsigned w, unsigned h,
AVFrame *frame)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
uint8_t *p, *p0;
int plane;
 
x = FFMIN(x, test->w - 1);
y = FFMIN(y, test->h - 1);
w = FFMIN(w, test->w - x);
h = FFMIN(h, test->h - y);
 
av_assert0(x + w <= test->w);
av_assert0(y + h <= test->h);
 
for (plane = 0; frame->data[plane]; plane++) {
const int c = color[plane];
const int linesize = frame->linesize[plane];
int i, px, py, pw, ph;
 
if (plane == 1 || plane == 2) {
px = x >> desc->log2_chroma_w;
pw = w >> desc->log2_chroma_w;
py = y >> desc->log2_chroma_h;
ph = h >> desc->log2_chroma_h;
} else {
px = x;
pw = w;
py = y;
ph = h;
}
 
p0 = p = frame->data[plane] + py * linesize + px;
memset(p, c, pw);
p += linesize;
for (i = 1; i < ph; i++, p += linesize)
memcpy(p, p0, pw);
}
}
 
static int smptebars_query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
AV_PIX_FMT_NONE,
};
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
 
static const AVFilterPad smptebars_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.request_frame = request_frame,
.config_props = config_props,
},
{ NULL }
};
 
#if CONFIG_SMPTEBARS_FILTER
 
#define smptebars_options options
AVFILTER_DEFINE_CLASS(smptebars);
 
static void smptebars_fill_picture(AVFilterContext *ctx, AVFrame *picref)
{
TestSourceContext *test = ctx->priv;
int r_w, r_h, w_h, p_w, p_h, i, tmp, x = 0;
const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(picref->format);
 
av_frame_set_colorspace(picref, AVCOL_SPC_BT470BG);
 
r_w = FFALIGN((test->w + 6) / 7, 1 << pixdesc->log2_chroma_w);
r_h = FFALIGN(test->h * 2 / 3, 1 << pixdesc->log2_chroma_h);
w_h = FFALIGN(test->h * 3 / 4 - r_h, 1 << pixdesc->log2_chroma_h);
p_w = FFALIGN(r_w * 5 / 4, 1 << pixdesc->log2_chroma_w);
p_h = test->h - w_h - r_h;
 
for (i = 0; i < 7; i++) {
draw_bar(test, rainbow[i], x, 0, r_w, r_h, picref);
draw_bar(test, wobnair[i], x, r_h, r_w, w_h, picref);
x += r_w;
}
x = 0;
draw_bar(test, i_pixel, x, r_h + w_h, p_w, p_h, picref);
x += p_w;
draw_bar(test, white, x, r_h + w_h, p_w, p_h, picref);
x += p_w;
draw_bar(test, q_pixel, x, r_h + w_h, p_w, p_h, picref);
x += p_w;
tmp = FFALIGN(5 * r_w - x, 1 << pixdesc->log2_chroma_w);
draw_bar(test, black, x, r_h + w_h, tmp, p_h, picref);
x += tmp;
tmp = FFALIGN(r_w / 3, 1 << pixdesc->log2_chroma_w);
draw_bar(test, neg4ire, x, r_h + w_h, tmp, p_h, picref);
x += tmp;
draw_bar(test, black, x, r_h + w_h, tmp, p_h, picref);
x += tmp;
draw_bar(test, pos4ire, x, r_h + w_h, tmp, p_h, picref);
x += tmp;
draw_bar(test, black, x, r_h + w_h, test->w - x, p_h, picref);
}
 
static av_cold int smptebars_init(AVFilterContext *ctx)
{
TestSourceContext *test = ctx->priv;
 
test->fill_picture_fn = smptebars_fill_picture;
test->draw_once = 1;
return init(ctx);
}
 
AVFilter avfilter_vsrc_smptebars = {
.name = "smptebars",
.description = NULL_IF_CONFIG_SMALL("Generate SMPTE color bars."),
.priv_size = sizeof(TestSourceContext),
.priv_class = &smptebars_class,
.init = smptebars_init,
.uninit = uninit,
.query_formats = smptebars_query_formats,
.inputs = NULL,
.outputs = smptebars_outputs,
};
 
#endif /* CONFIG_SMPTEBARS_FILTER */
 
#if CONFIG_SMPTEHDBARS_FILTER
 
#define smptehdbars_options options
AVFILTER_DEFINE_CLASS(smptehdbars);
 
static void smptehdbars_fill_picture(AVFilterContext *ctx, AVFrame *picref)
{
TestSourceContext *test = ctx->priv;
int d_w, r_w, r_h, l_w, i, tmp, x = 0, y = 0;
const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(picref->format);
 
av_frame_set_colorspace(picref, AVCOL_SPC_BT709);
 
d_w = FFALIGN(test->w / 8, 1 << pixdesc->log2_chroma_w);
r_h = FFALIGN(test->h * 7 / 12, 1 << pixdesc->log2_chroma_h);
draw_bar(test, gray40, x, 0, d_w, r_h, picref);
x += d_w;
 
r_w = FFALIGN((((test->w + 3) / 4) * 3) / 7, 1 << pixdesc->log2_chroma_w);
for (i = 0; i < 7; i++) {
draw_bar(test, rainbow[i], x, 0, r_w, r_h, picref);
x += r_w;
}
draw_bar(test, gray40, x, 0, test->w - x, r_h, picref);
y = r_h;
r_h = FFALIGN(test->h / 12, 1 << pixdesc->log2_chroma_h);
draw_bar(test, cyan, 0, y, d_w, r_h, picref);
x = d_w;
draw_bar(test, i_pixel, x, y, r_w, r_h, picref);
x += r_w;
tmp = r_w * 6;
draw_bar(test, rainbow[0], x, y, tmp, r_h, picref);
x += tmp;
l_w = x;
draw_bar(test, blue, x, y, test->w - x, r_h, picref);
y += r_h;
draw_bar(test, yellow, 0, y, d_w, r_h, picref);
x = d_w;
draw_bar(test, q_pixel, x, y, r_w, r_h, picref);
x += r_w;
 
for (i = 0; i < tmp; i += 1 << pixdesc->log2_chroma_w) {
uint8_t yramp[4] = {0};
 
yramp[0] = i * 255 / tmp;
yramp[1] = 128;
yramp[2] = 128;
yramp[3] = 255;
 
draw_bar(test, yramp, x, y, 1 << pixdesc->log2_chroma_w, r_h, picref);
x += 1 << pixdesc->log2_chroma_w;
}
draw_bar(test, red, x, y, test->w - x, r_h, picref);
y += r_h;
draw_bar(test, gray15, 0, y, d_w, test->h - y, picref);
x = d_w;
tmp = FFALIGN(r_w * 3 / 2, 1 << pixdesc->log2_chroma_w);
draw_bar(test, black0, x, y, tmp, test->h - y, picref);
x += tmp;
tmp = FFALIGN(r_w * 2, 1 << pixdesc->log2_chroma_w);
draw_bar(test, white, x, y, tmp, test->h - y, picref);
x += tmp;
tmp = FFALIGN(r_w * 5 / 6, 1 << pixdesc->log2_chroma_w);
draw_bar(test, black0, x, y, tmp, test->h - y, picref);
x += tmp;
tmp = FFALIGN(r_w / 3, 1 << pixdesc->log2_chroma_w);
draw_bar(test, neg2, x, y, tmp, test->h - y, picref);
x += tmp;
draw_bar(test, black0, x, y, tmp, test->h - y, picref);
x += tmp;
draw_bar(test, black2, x, y, tmp, test->h - y, picref);
x += tmp;
draw_bar(test, black0, x, y, tmp, test->h - y, picref);
x += tmp;
draw_bar(test, black4, x, y, tmp, test->h - y, picref);
x += tmp;
r_w = l_w - x;
draw_bar(test, black0, x, y, r_w, test->h - y, picref);
x += r_w;
draw_bar(test, gray15, x, y, test->w - x, test->h - y, picref);
}
 
static av_cold int smptehdbars_init(AVFilterContext *ctx)
{
TestSourceContext *test = ctx->priv;
 
test->fill_picture_fn = smptehdbars_fill_picture;
test->draw_once = 1;
return init(ctx);
}
 
AVFilter avfilter_vsrc_smptehdbars = {
.name = "smptehdbars",
.description = NULL_IF_CONFIG_SMALL("Generate SMPTE HD color bars."),
.priv_size = sizeof(TestSourceContext),
.priv_class = &smptehdbars_class,
.init = smptehdbars_init,
.uninit = uninit,
.query_formats = smptebars_query_formats,
.inputs = NULL,
.outputs = smptebars_outputs,
};
 
#endif /* CONFIG_SMPTEHDBARS_FILTER */
#endif /* CONFIG_SMPTEBARS_FILTER || CONFIG_SMPTEHDBARS_FILTER */
/contrib/sdk/sources/ffmpeg/libavfilter/x86/Makefile
0,0 → 1,12
OBJS-$(CONFIG_GRADFUN_FILTER) += x86/vf_gradfun_init.o
OBJS-$(CONFIG_HQDN3D_FILTER) += x86/vf_hqdn3d_init.o
OBJS-$(CONFIG_PULLUP_FILTER) += x86/vf_pullup_init.o
OBJS-$(CONFIG_SPP_FILTER) += x86/vf_spp.o
OBJS-$(CONFIG_VOLUME_FILTER) += x86/af_volume_init.o
OBJS-$(CONFIG_YADIF_FILTER) += x86/vf_yadif_init.o
 
YASM-OBJS-$(CONFIG_GRADFUN_FILTER) += x86/vf_gradfun.o
YASM-OBJS-$(CONFIG_HQDN3D_FILTER) += x86/vf_hqdn3d.o
YASM-OBJS-$(CONFIG_PULLUP_FILTER) += x86/vf_pullup.o
YASM-OBJS-$(CONFIG_VOLUME_FILTER) += x86/af_volume.o
YASM-OBJS-$(CONFIG_YADIF_FILTER) += x86/vf_yadif.o x86/yadif-16.o x86/yadif-10.o
/contrib/sdk/sources/ffmpeg/libavfilter/x86/af_volume.asm
0,0 → 1,140
;*****************************************************************************
;* x86-optimized functions for volume filter
;* Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
 
%include "libavutil/x86/x86util.asm"
 
SECTION_RODATA 32
 
pd_1_256: times 4 dq 0x3F70000000000000
pd_int32_max: times 4 dq 0x41DFFFFFFFC00000
pw_1: times 8 dw 1
pw_128: times 8 dw 128
pq_128: times 2 dq 128
 
SECTION_TEXT
 
;------------------------------------------------------------------------------
; void ff_scale_samples_s16(uint8_t *dst, const uint8_t *src, int len,
; int volume)
;------------------------------------------------------------------------------
 
INIT_XMM sse2
cglobal scale_samples_s16, 4,4,4, dst, src, len, volume
movd m0, volumem
pshuflw m0, m0, 0
punpcklwd m0, [pw_1]
mova m1, [pw_128]
lea lenq, [lend*2-mmsize]
.loop:
; dst[i] = av_clip_int16((src[i] * volume + 128) >> 8);
mova m2, [srcq+lenq]
punpcklwd m3, m2, m1
punpckhwd m2, m1
pmaddwd m3, m0
pmaddwd m2, m0
psrad m3, 8
psrad m2, 8
packssdw m3, m2
mova [dstq+lenq], m3
sub lenq, mmsize
jge .loop
REP_RET
 
;------------------------------------------------------------------------------
; void ff_scale_samples_s32(uint8_t *dst, const uint8_t *src, int len,
; int volume)
;------------------------------------------------------------------------------
 
%macro SCALE_SAMPLES_S32 0
cglobal scale_samples_s32, 4,4,4, dst, src, len, volume
%if ARCH_X86_32 && cpuflag(avx)
vbroadcastss xmm2, volumem
%else
movd xmm2, volumed
pshufd xmm2, xmm2, 0
%endif
CVTDQ2PD m2, xmm2
mulpd m2, m2, [pd_1_256]
mova m3, [pd_int32_max]
lea lenq, [lend*4-mmsize]
.loop:
CVTDQ2PD m0, [srcq+lenq ]
CVTDQ2PD m1, [srcq+lenq+mmsize/2]
mulpd m0, m0, m2
mulpd m1, m1, m2
minpd m0, m0, m3
minpd m1, m1, m3
cvtpd2dq xmm0, m0
cvtpd2dq xmm1, m1
%if cpuflag(avx)
vmovdqa [dstq+lenq ], xmm0
vmovdqa [dstq+lenq+mmsize/2], xmm1
%else
movq [dstq+lenq ], xmm0
movq [dstq+lenq+mmsize/2], xmm1
%endif
sub lenq, mmsize
jge .loop
REP_RET
%endmacro
 
INIT_XMM sse2
%define CVTDQ2PD cvtdq2pd
SCALE_SAMPLES_S32
%if HAVE_AVX_EXTERNAL
%define CVTDQ2PD vcvtdq2pd
INIT_YMM avx
SCALE_SAMPLES_S32
%endif
%undef CVTDQ2PD
 
; NOTE: This is not bit-identical with the C version because it clips to
; [-INT_MAX, INT_MAX] instead of [INT_MIN, INT_MAX]
 
INIT_XMM ssse3, atom
cglobal scale_samples_s32, 4,4,8, dst, src, len, volume
movd m4, volumem
pshufd m4, m4, 0
mova m5, [pq_128]
pxor m6, m6
lea lenq, [lend*4-mmsize]
.loop:
; src[i] = av_clipl_int32((src[i] * volume + 128) >> 8);
mova m7, [srcq+lenq]
pabsd m3, m7
pshufd m0, m3, q0100
pshufd m1, m3, q0302
pmuludq m0, m4
pmuludq m1, m4
paddq m0, m5
paddq m1, m5
psrlq m0, 7
psrlq m1, 7
shufps m2, m0, m1, q3131
shufps m0, m0, m1, q2020
pcmpgtd m2, m6
por m0, m2
psrld m0, 1
psignd m0, m7
mova [dstq+lenq], m0
sub lenq, mmsize
jge .loop
REP_RET
/contrib/sdk/sources/ffmpeg/libavfilter/x86/af_volume_init.c
0,0 → 1,60
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "config.h"
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/samplefmt.h"
#include "libavutil/x86/cpu.h"
#include "libavfilter/af_volume.h"
 
void ff_scale_samples_s16_sse2(uint8_t *dst, const uint8_t *src, int len,
int volume);
 
void ff_scale_samples_s32_sse2(uint8_t *dst, const uint8_t *src, int len,
int volume);
void ff_scale_samples_s32_ssse3_atom(uint8_t *dst, const uint8_t *src, int len,
int volume);
void ff_scale_samples_s32_avx(uint8_t *dst, const uint8_t *src, int len,
int volume);
 
av_cold void ff_volume_init_x86(VolumeContext *vol)
{
int cpu_flags = av_get_cpu_flags();
enum AVSampleFormat sample_fmt = av_get_packed_sample_fmt(vol->sample_fmt);
 
if (sample_fmt == AV_SAMPLE_FMT_S16) {
if (EXTERNAL_SSE2(cpu_flags) && vol->volume_i < 32768) {
vol->scale_samples = ff_scale_samples_s16_sse2;
vol->samples_align = 8;
}
} else if (sample_fmt == AV_SAMPLE_FMT_S32) {
if (EXTERNAL_SSE2(cpu_flags)) {
vol->scale_samples = ff_scale_samples_s32_sse2;
vol->samples_align = 4;
}
if (EXTERNAL_SSSE3(cpu_flags) && cpu_flags & AV_CPU_FLAG_ATOM) {
vol->scale_samples = ff_scale_samples_s32_ssse3_atom;
vol->samples_align = 4;
}
if (EXTERNAL_AVX(cpu_flags)) {
vol->scale_samples = ff_scale_samples_s32_avx;
vol->samples_align = 8;
}
}
}
/contrib/sdk/sources/ffmpeg/libavfilter/x86/vf_gradfun.asm
0,0 → 1,110
;******************************************************************************
;* x86-optimized functions for gradfun filter
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
 
%include "libavutil/x86/x86util.asm"
 
SECTION_RODATA
 
pw_7f: times 8 dw 0x7F
pw_ff: times 8 dw 0xFF
 
SECTION .text
 
%macro FILTER_LINE 1
movh m0, [r2+r0]
movh m1, [r3+r0]
punpcklbw m0, m7
punpcklwd m1, m1
psllw m0, 7
psubw m1, m0
PABSW m2, m1
pmulhuw m2, m5
psubw m2, m6
pminsw m2, m7
pmullw m2, m2
psllw m1, 2
paddw m0, %1
pmulhw m1, m2
paddw m0, m1
psraw m0, 7
packuswb m0, m0
movh [r1+r0], m0
%endmacro
 
INIT_MMX mmxext
cglobal gradfun_filter_line, 6, 6
movh m5, r4d
pxor m7, m7
pshufw m5, m5,0
mova m6, [pw_7f]
mova m3, [r5]
mova m4, [r5+8]
.loop:
FILTER_LINE m3
add r0, 4
jge .end
FILTER_LINE m4
add r0, 4
jl .loop
.end:
REP_RET
 
INIT_XMM ssse3
cglobal gradfun_filter_line, 6, 6, 8
movd m5, r4d
pxor m7, m7
pshuflw m5, m5, 0
mova m6, [pw_7f]
punpcklqdq m5, m5
mova m4, [r5]
.loop:
FILTER_LINE m4
add r0, 8
jl .loop
REP_RET
 
%macro BLUR_LINE 1
cglobal gradfun_blur_line_%1, 6, 6, 8
mova m7, [pw_ff]
.loop:
%1 m0, [r4+r0]
%1 m1, [r5+r0]
mova m2, m0
mova m3, m1
psrlw m0, 8
psrlw m1, 8
pand m2, m7
pand m3, m7
paddw m0, m1
paddw m2, m3
paddw m0, m2
paddw m0, [r2+r0]
mova m1, [r1+r0]
mova [r1+r0], m0
psubw m0, m1
mova [r3+r0], m0
add r0, 16
jl .loop
REP_RET
%endmacro
 
INIT_XMM sse2
BLUR_LINE movdqa
BLUR_LINE movdqu
/contrib/sdk/sources/ffmpeg/libavfilter/x86/vf_gradfun_init.c
0,0 → 1,96
/*
* Copyright (C) 2009 Loren Merritt <lorenm@u.washington.edu>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "config.h"
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/mem.h"
#include "libavutil/x86/asm.h"
#include "libavutil/x86/cpu.h"
#include "libavfilter/gradfun.h"
 
#if HAVE_YASM
void ff_gradfun_filter_line_mmxext(intptr_t x, uint8_t *dst, const uint8_t *src,
const uint16_t *dc, int thresh,
const uint16_t *dithers);
static void gradfun_filter_line_mmxext(uint8_t *dst, const uint8_t *src,
const uint16_t *dc,
int width, int thresh,
const uint16_t *dithers)
{
intptr_t x;
if (width & 3) {
x = width & ~3;
ff_gradfun_filter_line_c(dst + x, src + x, dc + x / 2, width - x, thresh, dithers);
width = x;
}
x = -width;
ff_gradfun_filter_line_mmxext(x, dst + width, src + width, dc + width/2,
thresh, dithers);
}
 
void ff_gradfun_filter_line_ssse3(intptr_t x, uint8_t *dst, const uint8_t *src,
const uint16_t *dc, int thresh,
const uint16_t *dithers);
static void gradfun_filter_line_ssse3(uint8_t *dst, const uint8_t *src, const uint16_t *dc, int width, int thresh, const uint16_t *dithers)
{
intptr_t x;
if (width & 7) {
// could be 10% faster if I somehow eliminated this
x = width & ~7;
ff_gradfun_filter_line_c(dst + x, src + x, dc + x / 2, width - x, thresh, dithers);
width = x;
}
x = -width;
ff_gradfun_filter_line_ssse3(x, dst + width, src + width, dc + width/2,
thresh, dithers);
}
 
void ff_gradfun_blur_line_movdqa_sse2(intptr_t x, uint16_t *buf, const uint16_t *buf1, uint16_t *dc, const uint8_t *src1, const uint8_t *src2);
void ff_gradfun_blur_line_movdqu_sse2(intptr_t x, uint16_t *buf, const uint16_t *buf1, uint16_t *dc, const uint8_t *src1, const uint8_t *src2);
static void gradfun_blur_line_sse2(uint16_t *dc, uint16_t *buf, const uint16_t *buf1, const uint8_t *src, int src_linesize, int width)
{
intptr_t x = -2*width;
if (((intptr_t) src | src_linesize) & 15) {
ff_gradfun_blur_line_movdqu_sse2(x, buf + width, buf1 + width,
dc + width, src + width * 2,
src + width * 2 + src_linesize);
} else {
ff_gradfun_blur_line_movdqa_sse2(x, buf + width, buf1 + width,
dc + width, src + width * 2,
src + width * 2 + src_linesize);
}
}
#endif /* HAVE_YASM */
 
av_cold void ff_gradfun_init_x86(GradFunContext *gf)
{
#if HAVE_YASM
int cpu_flags = av_get_cpu_flags();
 
if (EXTERNAL_MMXEXT(cpu_flags))
gf->filter_line = gradfun_filter_line_mmxext;
if (EXTERNAL_SSSE3(cpu_flags))
gf->filter_line = gradfun_filter_line_ssse3;
 
if (EXTERNAL_SSE2(cpu_flags))
gf->blur_line = gradfun_blur_line_sse2;
#endif /* HAVE_YASM */
}
/contrib/sdk/sources/ffmpeg/libavfilter/x86/vf_hqdn3d.asm
0,0 → 1,106
;******************************************************************************
;* Copyright (c) 2012 Loren Merritt
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
 
%include "libavutil/x86/x86util.asm"
 
SECTION .text
 
%macro LOWPASS 3 ; prevsample, cursample, lut
sub %1q, %2q
%if lut_bits != 8
sar %1q, 8-lut_bits
%endif
movsx %1d, word [%3q+%1q*2]
add %1d, %2d
%endmacro
 
%macro LOAD 3 ; dstreg, x, bitdepth
%if %3 == 8
movzx %1, byte [srcq+%2]
%else
movzx %1, word [srcq+(%2)*2]
%endif
%if %3 != 16
shl %1, 16-%3
add %1, (1<<(15-%3))-1
%endif
%endmacro
 
%macro HQDN3D_ROW 1 ; bitdepth
%if ARCH_X86_64
cglobal hqdn3d_row_%1_x86, 7,10,0, src, dst, lineant, frameant, width, spatial, temporal, pixelant, t0, t1
%else
cglobal hqdn3d_row_%1_x86, 7,7,0, src, dst, lineant, frameant, width, spatial, temporal
%endif
%assign bytedepth (%1+7)>>3
%assign lut_bits 4+4*(%1/16)
dec widthq
lea srcq, [srcq+widthq*bytedepth]
lea dstq, [dstq+widthq*bytedepth]
lea frameantq, [frameantq+widthq*2]
lea lineantq, [lineantq+widthq*2]
neg widthq
%define xq widthq
%if ARCH_X86_32
mov dstmp, dstq
mov srcmp, srcq
mov frameantmp, frameantq
mov lineantmp, lineantq
%define dstq r0
%define frameantq r0
%define lineantq r0
%define pixelantq r1
%define pixelantd r1d
DECLARE_REG_TMP 2,3
%endif
LOAD pixelantd, xq, %1
ALIGN 16
.loop:
movifnidn srcq, srcmp
LOAD t0d, xq+1, %1 ; skip on the last iteration to avoid overread
.loop2:
movifnidn lineantq, lineantmp
movzx t1d, word [lineantq+xq*2]
LOWPASS t1, pixelant, spatial
mov [lineantq+xq*2], t1w
LOWPASS pixelant, t0, spatial
movifnidn frameantq, frameantmp
movzx t0d, word [frameantq+xq*2]
LOWPASS t0, t1, temporal
mov [frameantq+xq*2], t0w
movifnidn dstq, dstmp
%if %1 != 16
shr t0d, 16-%1 ; could eliminate this by storing from t0h, but only with some contraints on register allocation
%endif
%if %1 == 8
mov [dstq+xq], t0b
%else
mov [dstq+xq*2], t0w
%endif
inc xq
jl .loop
je .loop2
REP_RET
%endmacro ; HQDN3D_ROW
 
HQDN3D_ROW 8
HQDN3D_ROW 9
HQDN3D_ROW 10
HQDN3D_ROW 16
/contrib/sdk/sources/ffmpeg/libavfilter/x86/vf_hqdn3d_init.c
0,0 → 1,41
/*
* Copyright (c) 2012 Loren Merritt
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
#include <stddef.h>
#include <stdint.h>
 
#include "libavutil/attributes.h"
#include "libavfilter/vf_hqdn3d.h"
#include "config.h"
 
void ff_hqdn3d_row_8_x86(uint8_t *src, uint8_t *dst, uint16_t *line_ant, uint16_t *frame_ant, ptrdiff_t w, int16_t *spatial, int16_t *temporal);
void ff_hqdn3d_row_9_x86(uint8_t *src, uint8_t *dst, uint16_t *line_ant, uint16_t *frame_ant, ptrdiff_t w, int16_t *spatial, int16_t *temporal);
void ff_hqdn3d_row_10_x86(uint8_t *src, uint8_t *dst, uint16_t *line_ant, uint16_t *frame_ant, ptrdiff_t w, int16_t *spatial, int16_t *temporal);
void ff_hqdn3d_row_16_x86(uint8_t *src, uint8_t *dst, uint16_t *line_ant, uint16_t *frame_ant, ptrdiff_t w, int16_t *spatial, int16_t *temporal);
 
av_cold void ff_hqdn3d_init_x86(HQDN3DContext *hqdn3d)
{
#if HAVE_YASM
hqdn3d->denoise_row[ 8] = ff_hqdn3d_row_8_x86;
hqdn3d->denoise_row[ 9] = ff_hqdn3d_row_9_x86;
hqdn3d->denoise_row[10] = ff_hqdn3d_row_10_x86;
hqdn3d->denoise_row[16] = ff_hqdn3d_row_16_x86;
#endif
}
/contrib/sdk/sources/ffmpeg/libavfilter/x86/vf_pullup.asm
0,0 → 1,178
;*****************************************************************************
;* x86-optimized functions for pullup filter
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or modify
;* it under the terms of the GNU General Public License as published by
;* the Free Software Foundation; either version 2 of the License, or
;* (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;* GNU General Public License for more details.
;*
;* You should have received a copy of the GNU General Public License along
;* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
;* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
;******************************************************************************
 
%include "libavutil/x86/x86util.asm"
 
SECTION_TEXT
 
INIT_MMX mmx
cglobal pullup_filter_diff, 3, 5, 8, first, second, size
mov r3, 4
pxor m4, m4
pxor m7, m7
 
.loop:
movq m0, [firstq]
movq m2, [firstq]
add firstq, sizeq
movq m1, [secondq]
add secondq, sizeq
psubusb m2, m1
psubusb m1, m0
movq m0, m2
movq m3, m1
punpcklbw m0, m7
punpcklbw m1, m7
punpckhbw m2, m7
punpckhbw m3, m7
paddw m4, m0
paddw m4, m1
paddw m4, m2
paddw m4, m3
 
dec r3
jnz .loop
 
movq m3, m4
punpcklwd m4, m7
punpckhwd m3, m7
paddd m3, m4
movd eax, m3
psrlq m3, 32
movd r4, m3
add eax, r4d
RET
 
INIT_MMX mmx
cglobal pullup_filter_comb, 3, 5, 8, first, second, size
mov r3, 4
pxor m6, m6
pxor m7, m7
sub secondq, sizeq
 
.loop:
movq m0, [secondq]
movq m1, [secondq]
punpcklbw m0, m7
movq m2, [secondq+sizeq]
punpcklbw m1, m7
punpcklbw m2, m7
paddw m0, m0
paddw m1, m2
movq m2, m0
psubusw m0, m1
psubusw m1, m2
paddw m6, m0
paddw m6, m1
 
movq m0, [firstq]
movq m1, [secondq]
punpckhbw m0, m7
movq m2, [secondq+sizeq]
punpckhbw m1, m7
punpckhbw m2, m7
paddw m0, m0
paddw m1, m2
movq m2, m0
psubusw m0, m1
psubusw m1, m2
paddw m6, m0
paddw m6, m1
 
movq m0, [secondq+sizeq]
movq m1, [firstq]
punpcklbw m0, m7
movq m2, [firstq+sizeq]
punpcklbw m1, m7
punpcklbw m2, m7
paddw m0, m0
paddw m1, m2
movq m2, m0
psubusw m0, m1
psubusw m1, m2
paddw m6, m0
paddw m6, m1
 
movq m0, [secondq+sizeq]
movq m1, [firstq]
punpckhbw m0, m7
movq m2, [firstq+sizeq]
punpckhbw m1, m7
punpckhbw m2, m7
paddw m0, m0
paddw m1, m2
movq m2, m0
psubusw m0, m1
psubusw m1, m2
paddw m6, m0
paddw m6, m1
 
add firstq, sizeq
add secondq, sizeq
dec r3
jnz .loop
 
movq m5, m6
punpcklwd m6, m7
punpckhwd m5, m7
paddd m5, m6
movd eax, m5
psrlq m5, 32
movd r4, m5
add eax, r4d
RET
 
INIT_MMX mmx
cglobal pullup_filter_var, 3, 5, 8, first, second, size
mov r3, 3
pxor m4, m4
pxor m7, m7
 
.loop:
movq m0, [firstq]
movq m2, [firstq]
movq m1, [firstq+sizeq]
add firstq, sizeq
psubusb m2, m1
psubusb m1, m0
movq m0, m2
movq m3, m1
punpcklbw m0, m7
punpcklbw m1, m7
punpckhbw m2, m7
punpckhbw m3, m7
paddw m4, m0
paddw m4, m1
paddw m4, m2
paddw m4, m3
 
dec r3
jnz .loop
 
movq m3, m4
punpcklwd m4, m7
punpckhwd m3, m7
paddd m3, m4
movd eax, m3
psrlq m3, 32
movd r4, m3
add eax, r4d
shl eax, 2
RET
/contrib/sdk/sources/ffmpeg/libavfilter/x86/vf_pullup_init.c
0,0 → 1,41
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/mem.h"
#include "libavutil/x86/asm.h"
#include "libavutil/x86/cpu.h"
#include "libavfilter/vf_pullup.h"
 
int ff_pullup_filter_diff_mmx(const uint8_t *a, const uint8_t *b, int s);
int ff_pullup_filter_comb_mmx(const uint8_t *a, const uint8_t *b, int s);
int ff_pullup_filter_var_mmx (const uint8_t *a, const uint8_t *b, int s);
 
av_cold void ff_pullup_init_x86(PullupContext *s)
{
#if HAVE_YASM
int cpu_flags = av_get_cpu_flags();
 
if (EXTERNAL_MMX(cpu_flags)) {
s->diff = ff_pullup_filter_diff_mmx;
s->comb = ff_pullup_filter_comb_mmx;
s->var = ff_pullup_filter_var_mmx;
}
#endif
}
/contrib/sdk/sources/ffmpeg/libavfilter/x86/vf_spp.c
0,0 → 1,233
/*
* Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
 
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/mem.h"
#include "libavutil/x86/asm.h"
#include "libavfilter/vf_spp.h"
 
#if HAVE_MMX_INLINE
static void hardthresh_mmx(int16_t dst[64], const int16_t src[64],
int qp, const uint8_t *permutation)
{
int bias = 0; //FIXME
unsigned int threshold1;
 
threshold1 = qp * ((1<<4) - bias) - 1;
 
#define REQUANT_CORE(dst0, dst1, dst2, dst3, src0, src1, src2, src3) \
"movq " #src0 ", %%mm0 \n" \
"movq " #src1 ", %%mm1 \n" \
"movq " #src2 ", %%mm2 \n" \
"movq " #src3 ", %%mm3 \n" \
"psubw %%mm4, %%mm0 \n" \
"psubw %%mm4, %%mm1 \n" \
"psubw %%mm4, %%mm2 \n" \
"psubw %%mm4, %%mm3 \n" \
"paddusw %%mm5, %%mm0 \n" \
"paddusw %%mm5, %%mm1 \n" \
"paddusw %%mm5, %%mm2 \n" \
"paddusw %%mm5, %%mm3 \n" \
"paddw %%mm6, %%mm0 \n" \
"paddw %%mm6, %%mm1 \n" \
"paddw %%mm6, %%mm2 \n" \
"paddw %%mm6, %%mm3 \n" \
"psubusw %%mm6, %%mm0 \n" \
"psubusw %%mm6, %%mm1 \n" \
"psubusw %%mm6, %%mm2 \n" \
"psubusw %%mm6, %%mm3 \n" \
"psraw $3, %%mm0 \n" \
"psraw $3, %%mm1 \n" \
"psraw $3, %%mm2 \n" \
"psraw $3, %%mm3 \n" \
\
"movq %%mm0, %%mm7 \n" \
"punpcklwd %%mm2, %%mm0 \n" /*A*/ \
"punpckhwd %%mm2, %%mm7 \n" /*C*/ \
"movq %%mm1, %%mm2 \n" \
"punpcklwd %%mm3, %%mm1 \n" /*B*/ \
"punpckhwd %%mm3, %%mm2 \n" /*D*/ \
"movq %%mm0, %%mm3 \n" \
"punpcklwd %%mm1, %%mm0 \n" /*A*/ \
"punpckhwd %%mm7, %%mm3 \n" /*C*/ \
"punpcklwd %%mm2, %%mm7 \n" /*B*/ \
"punpckhwd %%mm2, %%mm1 \n" /*D*/ \
\
"movq %%mm0, " #dst0 " \n" \
"movq %%mm7, " #dst1 " \n" \
"movq %%mm3, " #dst2 " \n" \
"movq %%mm1, " #dst3 " \n"
 
__asm__ volatile(
"movd %2, %%mm4 \n"
"movd %3, %%mm5 \n"
"movd %4, %%mm6 \n"
"packssdw %%mm4, %%mm4 \n"
"packssdw %%mm5, %%mm5 \n"
"packssdw %%mm6, %%mm6 \n"
"packssdw %%mm4, %%mm4 \n"
"packssdw %%mm5, %%mm5 \n"
"packssdw %%mm6, %%mm6 \n"
REQUANT_CORE( (%1), 8(%1), 16(%1), 24(%1), (%0), 8(%0), 64(%0), 72(%0))
REQUANT_CORE(32(%1), 40(%1), 48(%1), 56(%1),16(%0),24(%0), 48(%0), 56(%0))
REQUANT_CORE(64(%1), 72(%1), 80(%1), 88(%1),32(%0),40(%0), 96(%0),104(%0))
REQUANT_CORE(96(%1),104(%1),112(%1),120(%1),80(%0),88(%0),112(%0),120(%0))
: : "r" (src), "r" (dst), "g" (threshold1+1), "g" (threshold1+5), "g" (threshold1-4) //FIXME maybe more accurate then needed?
);
dst[0] = (src[0] + 4) >> 3;
}
 
static void softthresh_mmx(int16_t dst[64], const int16_t src[64],
int qp, const uint8_t *permutation)
{
int bias = 0; //FIXME
unsigned int threshold1;
 
threshold1 = qp*((1<<4) - bias) - 1;
 
#undef REQUANT_CORE
#define REQUANT_CORE(dst0, dst1, dst2, dst3, src0, src1, src2, src3) \
"movq " #src0 ", %%mm0 \n" \
"movq " #src1 ", %%mm1 \n" \
"pxor %%mm6, %%mm6 \n" \
"pxor %%mm7, %%mm7 \n" \
"pcmpgtw %%mm0, %%mm6 \n" \
"pcmpgtw %%mm1, %%mm7 \n" \
"pxor %%mm6, %%mm0 \n" \
"pxor %%mm7, %%mm1 \n" \
"psubusw %%mm4, %%mm0 \n" \
"psubusw %%mm4, %%mm1 \n" \
"pxor %%mm6, %%mm0 \n" \
"pxor %%mm7, %%mm1 \n" \
"movq " #src2 ", %%mm2 \n" \
"movq " #src3 ", %%mm3 \n" \
"pxor %%mm6, %%mm6 \n" \
"pxor %%mm7, %%mm7 \n" \
"pcmpgtw %%mm2, %%mm6 \n" \
"pcmpgtw %%mm3, %%mm7 \n" \
"pxor %%mm6, %%mm2 \n" \
"pxor %%mm7, %%mm3 \n" \
"psubusw %%mm4, %%mm2 \n" \
"psubusw %%mm4, %%mm3 \n" \
"pxor %%mm6, %%mm2 \n" \
"pxor %%mm7, %%mm3 \n" \
\
"paddsw %%mm5, %%mm0 \n" \
"paddsw %%mm5, %%mm1 \n" \
"paddsw %%mm5, %%mm2 \n" \
"paddsw %%mm5, %%mm3 \n" \
"psraw $3, %%mm0 \n" \
"psraw $3, %%mm1 \n" \
"psraw $3, %%mm2 \n" \
"psraw $3, %%mm3 \n" \
\
"movq %%mm0, %%mm7 \n" \
"punpcklwd %%mm2, %%mm0 \n" /*A*/ \
"punpckhwd %%mm2, %%mm7 \n" /*C*/ \
"movq %%mm1, %%mm2 \n" \
"punpcklwd %%mm3, %%mm1 \n" /*B*/ \
"punpckhwd %%mm3, %%mm2 \n" /*D*/ \
"movq %%mm0, %%mm3 \n" \
"punpcklwd %%mm1, %%mm0 \n" /*A*/ \
"punpckhwd %%mm7, %%mm3 \n" /*C*/ \
"punpcklwd %%mm2, %%mm7 \n" /*B*/ \
"punpckhwd %%mm2, %%mm1 \n" /*D*/ \
\
"movq %%mm0, " #dst0 " \n" \
"movq %%mm7, " #dst1 " \n" \
"movq %%mm3, " #dst2 " \n" \
"movq %%mm1, " #dst3 " \n"
 
__asm__ volatile(
"movd %2, %%mm4 \n"
"movd %3, %%mm5 \n"
"packssdw %%mm4, %%mm4 \n"
"packssdw %%mm5, %%mm5 \n"
"packssdw %%mm4, %%mm4 \n"
"packssdw %%mm5, %%mm5 \n"
REQUANT_CORE( (%1), 8(%1), 16(%1), 24(%1), (%0), 8(%0), 64(%0), 72(%0))
REQUANT_CORE(32(%1), 40(%1), 48(%1), 56(%1),16(%0),24(%0), 48(%0), 56(%0))
REQUANT_CORE(64(%1), 72(%1), 80(%1), 88(%1),32(%0),40(%0), 96(%0),104(%0))
REQUANT_CORE(96(%1),104(%1),112(%1),120(%1),80(%0),88(%0),112(%0),120(%0))
: : "r" (src), "r" (dst), "g" (threshold1), "rm" (4) //FIXME maybe more accurate then needed?
);
 
dst[0] = (src[0] + 4) >> 3;
}
 
static void store_slice_mmx(uint8_t *dst, const int16_t *src,
int dst_stride, int src_stride,
int width, int height, int log2_scale,
const uint8_t dither[8][8])
{
int y;
 
for (y = 0; y < height; y++) {
uint8_t *dst1 = dst;
const int16_t *src1 = src;
__asm__ volatile(
"movq (%3), %%mm3 \n"
"movq (%3), %%mm4 \n"
"movd %4, %%mm2 \n"
"pxor %%mm0, %%mm0 \n"
"punpcklbw %%mm0, %%mm3 \n"
"punpckhbw %%mm0, %%mm4 \n"
"psraw %%mm2, %%mm3 \n"
"psraw %%mm2, %%mm4 \n"
"movd %5, %%mm2 \n"
"1: \n"
"movq (%0), %%mm0 \n"
"movq 8(%0), %%mm1 \n"
"paddw %%mm3, %%mm0 \n"
"paddw %%mm4, %%mm1 \n"
"psraw %%mm2, %%mm0 \n"
"psraw %%mm2, %%mm1 \n"
"packuswb %%mm1, %%mm0 \n"
"movq %%mm0, (%1) \n"
"add $16, %0 \n"
"add $8, %1 \n"
"cmp %2, %1 \n"
" jb 1b \n"
: "+r" (src1), "+r"(dst1)
: "r"(dst + width), "r"(dither[y]), "g"(log2_scale), "g"(MAX_LEVEL - log2_scale)
);
src += src_stride;
dst += dst_stride;
}
}
 
#endif /* HAVE_MMX_INLINE */
 
av_cold void ff_spp_init_x86(SPPContext *s)
{
#if HAVE_MMX_INLINE
int cpu_flags = av_get_cpu_flags();
 
if (cpu_flags & AV_CPU_FLAG_MMX) {
s->store_slice = store_slice_mmx;
switch (s->mode) {
case 0: s->requantize = hardthresh_mmx; break;
case 1: s->requantize = softthresh_mmx; break;
}
}
#endif
}
/contrib/sdk/sources/ffmpeg/libavfilter/x86/vf_yadif.asm
0,0 → 1,252
;*****************************************************************************
;* x86-optimized functions for yadif filter
;*
;* Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at>
;* Copyright (c) 2013 Daniel Kang <daniel.d.kang@gmail.com>
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or modify
;* it under the terms of the GNU General Public License as published by
;* the Free Software Foundation; either version 2 of the License, or
;* (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;* GNU General Public License for more details.
;*
;* You should have received a copy of the GNU General Public License along
;* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
;* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
;******************************************************************************
 
%include "libavutil/x86/x86util.asm"
 
SECTION_RODATA
 
pb_1: times 16 db 1
pw_1: times 8 dw 1
 
SECTION .text
 
%macro CHECK 2
movu m2, [curq+t1+%1]
movu m3, [curq+t0+%2]
mova m4, m2
mova m5, m2
pxor m4, m3
pavgb m5, m3
pand m4, [pb_1]
psubusb m5, m4
%if mmsize == 16
psrldq m5, 1
%else
psrlq m5, 8
%endif
punpcklbw m5, m7
mova m4, m2
psubusb m2, m3
psubusb m3, m4
pmaxub m2, m3
mova m3, m2
mova m4, m2
%if mmsize == 16
psrldq m3, 1
psrldq m4, 2
%else
psrlq m3, 8
psrlq m4, 16
%endif
punpcklbw m2, m7
punpcklbw m3, m7
punpcklbw m4, m7
paddw m2, m3
paddw m2, m4
%endmacro
 
%macro CHECK1 0
mova m3, m0
pcmpgtw m3, m2
pminsw m0, m2
mova m6, m3
pand m5, m3
pandn m3, m1
por m3, m5
mova m1, m3
%endmacro
 
%macro CHECK2 0
paddw m6, [pw_1]
psllw m6, 14
paddsw m2, m6
mova m3, m0
pcmpgtw m3, m2
pminsw m0, m2
pand m5, m3
pandn m3, m1
por m3, m5
mova m1, m3
%endmacro
 
%macro LOAD 2
movh %1, %2
punpcklbw %1, m7
%endmacro
 
%macro FILTER 3
.loop%1:
pxor m7, m7
LOAD m0, [curq+t1]
LOAD m1, [curq+t0]
LOAD m2, [%2]
LOAD m3, [%3]
mova m4, m3
paddw m3, m2
psraw m3, 1
mova [rsp+ 0], m0
mova [rsp+16], m3
mova [rsp+32], m1
psubw m2, m4
ABS1 m2, m4
LOAD m3, [prevq+t1]
LOAD m4, [prevq+t0]
psubw m3, m0
psubw m4, m1
ABS1 m3, m5
ABS1 m4, m5
paddw m3, m4
psrlw m2, 1
psrlw m3, 1
pmaxsw m2, m3
LOAD m3, [nextq+t1]
LOAD m4, [nextq+t0]
psubw m3, m0
psubw m4, m1
ABS1 m3, m5
ABS1 m4, m5
paddw m3, m4
psrlw m3, 1
pmaxsw m2, m3
mova [rsp+48], m2
 
paddw m1, m0
paddw m0, m0
psubw m0, m1
psrlw m1, 1
ABS1 m0, m2
 
movu m2, [curq+t1-1]
movu m3, [curq+t0-1]
mova m4, m2
psubusb m2, m3
psubusb m3, m4
pmaxub m2, m3
%if mmsize == 16
mova m3, m2
psrldq m3, 2
%else
pshufw m3, m2, q0021
%endif
punpcklbw m2, m7
punpcklbw m3, m7
paddw m0, m2
paddw m0, m3
psubw m0, [pw_1]
 
CHECK -2, 0
CHECK1
CHECK -3, 1
CHECK2
CHECK 0, -2
CHECK1
CHECK 1, -3
CHECK2
 
mova m6, [rsp+48]
cmp DWORD r8m, 2
jge .end%1
LOAD m2, [%2+t1*2]
LOAD m4, [%3+t1*2]
LOAD m3, [%2+t0*2]
LOAD m5, [%3+t0*2]
paddw m2, m4
paddw m3, m5
psrlw m2, 1
psrlw m3, 1
mova m4, [rsp+ 0]
mova m5, [rsp+16]
mova m7, [rsp+32]
psubw m2, m4
psubw m3, m7
mova m0, m5
psubw m5, m4
psubw m0, m7
mova m4, m2
pminsw m2, m3
pmaxsw m3, m4
pmaxsw m2, m5
pminsw m3, m5
pmaxsw m2, m0
pminsw m3, m0
pxor m4, m4
pmaxsw m6, m3
psubw m4, m2
pmaxsw m6, m4
 
.end%1:
mova m2, [rsp+16]
mova m3, m2
psubw m2, m6
paddw m3, m6
pmaxsw m1, m2
pminsw m1, m3
packuswb m1, m1
 
movh [dstq], m1
add dstq, mmsize/2
add prevq, mmsize/2
add curq, mmsize/2
add nextq, mmsize/2
sub DWORD r4m, mmsize/2
jg .loop%1
%endmacro
 
%macro YADIF 0
%if ARCH_X86_32
cglobal yadif_filter_line, 4, 6, 8, 80, dst, prev, cur, next, w, prefs, \
mrefs, parity, mode
%else
cglobal yadif_filter_line, 4, 7, 8, 80, dst, prev, cur, next, w, prefs, \
mrefs, parity, mode
%endif
%if ARCH_X86_32
mov r4, r5mp
mov r5, r6mp
DECLARE_REG_TMP 4,5
%else
movsxd r5, DWORD r5m
movsxd r6, DWORD r6m
DECLARE_REG_TMP 5,6
%endif
 
cmp DWORD paritym, 0
je .parity0
FILTER 1, prevq, curq
jmp .ret
 
.parity0:
FILTER 0, curq, nextq
 
.ret:
RET
%endmacro
 
INIT_XMM ssse3
YADIF
INIT_XMM sse2
YADIF
%if ARCH_X86_32
INIT_MMX mmxext
YADIF
%endif
/contrib/sdk/sources/ffmpeg/libavfilter/x86/vf_yadif_init.c
0,0 → 1,99
/*
* Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/mem.h"
#include "libavutil/x86/asm.h"
#include "libavutil/x86/cpu.h"
#include "libavfilter/yadif.h"
 
void ff_yadif_filter_line_mmxext(void *dst, void *prev, void *cur,
void *next, int w, int prefs,
int mrefs, int parity, int mode);
void ff_yadif_filter_line_sse2(void *dst, void *prev, void *cur,
void *next, int w, int prefs,
int mrefs, int parity, int mode);
void ff_yadif_filter_line_ssse3(void *dst, void *prev, void *cur,
void *next, int w, int prefs,
int mrefs, int parity, int mode);
 
void ff_yadif_filter_line_16bit_mmxext(void *dst, void *prev, void *cur,
void *next, int w, int prefs,
int mrefs, int parity, int mode);
void ff_yadif_filter_line_16bit_sse2(void *dst, void *prev, void *cur,
void *next, int w, int prefs,
int mrefs, int parity, int mode);
void ff_yadif_filter_line_16bit_ssse3(void *dst, void *prev, void *cur,
void *next, int w, int prefs,
int mrefs, int parity, int mode);
void ff_yadif_filter_line_16bit_sse4(void *dst, void *prev, void *cur,
void *next, int w, int prefs,
int mrefs, int parity, int mode);
 
void ff_yadif_filter_line_10bit_mmxext(void *dst, void *prev, void *cur,
void *next, int w, int prefs,
int mrefs, int parity, int mode);
void ff_yadif_filter_line_10bit_sse2(void *dst, void *prev, void *cur,
void *next, int w, int prefs,
int mrefs, int parity, int mode);
void ff_yadif_filter_line_10bit_ssse3(void *dst, void *prev, void *cur,
void *next, int w, int prefs,
int mrefs, int parity, int mode);
 
av_cold void ff_yadif_init_x86(YADIFContext *yadif)
{
#if HAVE_YASM
int cpu_flags = av_get_cpu_flags();
int bit_depth = (!yadif->csp) ? 8
: yadif->csp->comp[0].depth_minus1 + 1;
 
if (bit_depth >= 15) {
#if ARCH_X86_32
if (EXTERNAL_MMXEXT(cpu_flags))
yadif->filter_line = ff_yadif_filter_line_16bit_mmxext;
#endif /* ARCH_X86_32 */
if (EXTERNAL_SSE2(cpu_flags))
yadif->filter_line = ff_yadif_filter_line_16bit_sse2;
if (EXTERNAL_SSSE3(cpu_flags))
yadif->filter_line = ff_yadif_filter_line_16bit_ssse3;
if (EXTERNAL_SSE4(cpu_flags))
yadif->filter_line = ff_yadif_filter_line_16bit_sse4;
} else if ( bit_depth >= 9 && bit_depth <= 14) {
#if ARCH_X86_32
if (EXTERNAL_MMXEXT(cpu_flags))
yadif->filter_line = ff_yadif_filter_line_10bit_mmxext;
#endif /* ARCH_X86_32 */
if (EXTERNAL_SSE2(cpu_flags))
yadif->filter_line = ff_yadif_filter_line_10bit_sse2;
if (EXTERNAL_SSSE3(cpu_flags))
yadif->filter_line = ff_yadif_filter_line_10bit_ssse3;
} else {
#if ARCH_X86_32
if (EXTERNAL_MMXEXT(cpu_flags))
yadif->filter_line = ff_yadif_filter_line_mmxext;
#endif /* ARCH_X86_32 */
if (EXTERNAL_SSE2(cpu_flags))
yadif->filter_line = ff_yadif_filter_line_sse2;
if (EXTERNAL_SSSE3(cpu_flags))
yadif->filter_line = ff_yadif_filter_line_ssse3;
}
#endif /* HAVE_YASM */
}
/contrib/sdk/sources/ffmpeg/libavfilter/x86/yadif-10.asm
0,0 → 1,282
;*****************************************************************************
;* x86-optimized functions for yadif filter
;*
;* Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at>
;* Copyright (c) 2013 Daniel Kang <daniel.d.kang@gmail.com>
;* Copyright (c) 2011-2013 James Darnley <james.darnley@gmail.com>
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or modify
;* it under the terms of the GNU General Public License as published by
;* the Free Software Foundation; either version 2 of the License, or
;* (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;* GNU General Public License for more details.
;*
;* You should have received a copy of the GNU General Public License along
;* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
;* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
;******************************************************************************
 
%include "libavutil/x86/x86util.asm"
 
SECTION_RODATA
 
pw_1: times 8 dw 1
 
SECTION .text
 
%macro PABS 2
%if cpuflag(ssse3)
pabsw %1, %1
%else
pxor %2, %2
pcmpgtw %2, %1
pxor %1, %2
psubw %1, %2
%endif
%endmacro
 
%macro PMAXUW 2
%if cpuflag(sse4)
pmaxuw %1, %2
%else
psubusw %1, %2
paddusw %1, %2
%endif
%endmacro
 
%macro CHECK 2
movu m2, [curq+t1+%1*2]
movu m3, [curq+t0+%2*2]
mova m4, m2
mova m5, m2
pxor m4, m3
pavgw m5, m3
pand m4, [pw_1]
psubusw m5, m4
%if mmsize == 16
psrldq m5, 2
%else
psrlq m5, 16
%endif
mova m4, m2
psubusw m2, m3
psubusw m3, m4
PMAXUW m2, m3
mova m3, m2
mova m4, m2
%if mmsize == 16
psrldq m3, 2
psrldq m4, 4
%else
psrlq m3, 16
psrlq m4, 32
%endif
paddw m2, m3
paddw m2, m4
%endmacro
 
%macro CHECK1 0
mova m3, m0
pcmpgtw m3, m2
pminsw m0, m2
mova m6, m3
pand m5, m3
pandn m3, m1
por m3, m5
mova m1, m3
%endmacro
 
; %macro CHECK2 0
; paddw m6, [pw_1]
; psllw m6, 14
; paddsw m2, m6
; mova m3, m0
; pcmpgtw m3, m2
; pminsw m0, m2
; pand m5, m3
; pandn m3, m1
; por m3, m5
; mova m1, m3
; %endmacro
 
; This version of CHECK2 is required for 14-bit samples. The left-shift trick
; in the old code is not large enough to correctly select pixels or scores.
 
%macro CHECK2 0
mova m3, m0
pcmpgtw m0, m2
pand m0, m6
mova m6, m0
pand m5, m6
pand m2, m0
pandn m6, m1
pandn m0, m3
por m6, m5
por m0, m2
mova m1, m6
%endmacro
 
%macro LOAD 2
movu %1, %2
%endmacro
 
%macro FILTER 3
.loop%1:
pxor m7, m7
LOAD m0, [curq+t1]
LOAD m1, [curq+t0]
LOAD m2, [%2]
LOAD m3, [%3]
mova m4, m3
paddw m3, m2
psraw m3, 1
mova [rsp+ 0], m0
mova [rsp+16], m3
mova [rsp+32], m1
psubw m2, m4
PABS m2, m4
LOAD m3, [prevq+t1]
LOAD m4, [prevq+t0]
psubw m3, m0
psubw m4, m1
PABS m3, m5
PABS m4, m5
paddw m3, m4
psrlw m2, 1
psrlw m3, 1
pmaxsw m2, m3
LOAD m3, [nextq+t1]
LOAD m4, [nextq+t0]
psubw m3, m0
psubw m4, m1
PABS m3, m5
PABS m4, m5
paddw m3, m4
psrlw m3, 1
pmaxsw m2, m3
mova [rsp+48], m2
 
paddw m1, m0
paddw m0, m0
psubw m0, m1
psrlw m1, 1
PABS m0, m2
 
movu m2, [curq+t1-1*2]
movu m3, [curq+t0-1*2]
mova m4, m2
psubusw m2, m3
psubusw m3, m4
PMAXUW m2, m3
%if mmsize == 16
mova m3, m2
psrldq m3, 4
%else
mova m3, m2
psrlq m3, 32
%endif
paddw m0, m2
paddw m0, m3
psubw m0, [pw_1]
 
CHECK -2, 0
CHECK1
CHECK -3, 1
CHECK2
CHECK 0, -2
CHECK1
CHECK 1, -3
CHECK2
 
mova m6, [rsp+48]
cmp DWORD r8m, 2
jge .end%1
LOAD m2, [%2+t1*2]
LOAD m4, [%3+t1*2]
LOAD m3, [%2+t0*2]
LOAD m5, [%3+t0*2]
paddw m2, m4
paddw m3, m5
psrlw m2, 1
psrlw m3, 1
mova m4, [rsp+ 0]
mova m5, [rsp+16]
mova m7, [rsp+32]
psubw m2, m4
psubw m3, m7
mova m0, m5
psubw m5, m4
psubw m0, m7
mova m4, m2
pminsw m2, m3
pmaxsw m3, m4
pmaxsw m2, m5
pminsw m3, m5
pmaxsw m2, m0
pminsw m3, m0
pxor m4, m4
pmaxsw m6, m3
psubw m4, m2
pmaxsw m6, m4
 
.end%1:
mova m2, [rsp+16]
mova m3, m2
psubw m2, m6
paddw m3, m6
pmaxsw m1, m2
pminsw m1, m3
 
movu [dstq], m1
add dstq, mmsize-4
add prevq, mmsize-4
add curq, mmsize-4
add nextq, mmsize-4
sub DWORD r4m, mmsize/2-2
jg .loop%1
%endmacro
 
%macro YADIF 0
%if ARCH_X86_32
cglobal yadif_filter_line_10bit, 4, 6, 8, 80, dst, prev, cur, next, w, \
prefs, mrefs, parity, mode
%else
cglobal yadif_filter_line_10bit, 4, 7, 8, 80, dst, prev, cur, next, w, \
prefs, mrefs, parity, mode
%endif
%if ARCH_X86_32
mov r4, r5mp
mov r5, r6mp
DECLARE_REG_TMP 4,5
%else
movsxd r5, DWORD r5m
movsxd r6, DWORD r6m
DECLARE_REG_TMP 5,6
%endif
 
cmp DWORD paritym, 0
je .parity0
FILTER 1, prevq, curq
jmp .ret
 
.parity0:
FILTER 0, curq, nextq
 
.ret:
RET
%endmacro
 
INIT_XMM ssse3
YADIF
INIT_XMM sse2
YADIF
%if ARCH_X86_32
INIT_MMX mmxext
YADIF
%endif
/contrib/sdk/sources/ffmpeg/libavfilter/x86/yadif-16.asm
0,0 → 1,347
;*****************************************************************************
;* x86-optimized functions for yadif filter
;*
;* Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at>
;* Copyright (c) 2013 Daniel Kang <daniel.d.kang@gmail.com>
;* Copyright (c) 2011-2013 James Darnley <james.darnley@gmail.com>
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or modify
;* it under the terms of the GNU General Public License as published by
;* the Free Software Foundation; either version 2 of the License, or
;* (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;* GNU General Public License for more details.
;*
;* You should have received a copy of the GNU General Public License along
;* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
;* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
;******************************************************************************
 
%include "libavutil/x86/x86util.asm"
 
SECTION_RODATA
 
pw_1: times 8 dw 1
pw_8000: times 8 dw 0x8000
pd_1: times 4 dd 1
pd_8000: times 4 dd 0x8000
 
SECTION .text
 
%macro PIXSHIFT1 1
%if cpuflag(sse2)
psrldq %1, 2
%else
psrlq %1, 16
%endif
%endmacro
 
%macro PIXSHIFT2 1
%if cpuflag(sse2)
psrldq %1, 4
%else
psrlq %1, 32
%endif
%endmacro
 
%macro PABS 2
%if cpuflag(ssse3)
pabsd %1, %1
%else
pxor %2, %2
pcmpgtd %2, %1
pxor %1, %2
psubd %1, %2
%endif
%endmacro
 
%macro PACK 1
%if cpuflag(sse4)
packusdw %1, %1
%else
psubd %1, [pd_8000]
packssdw %1, %1
paddw %1, [pw_8000]
%endif
%endmacro
 
%macro PMINSD 3
%if cpuflag(sse4)
pminsd %1, %2
%else
mova %3, %2
pcmpgtd %3, %1
pand %1, %3
pandn %3, %2
por %1, %3
%endif
%endmacro
 
%macro PMAXSD 3
%if cpuflag(sse4)
pmaxsd %1, %2
%else
mova %3, %1
pcmpgtd %3, %2
pand %1, %3
pandn %3, %2
por %1, %3
%endif
%endmacro
 
%macro PMAXUW 2
%if cpuflag(sse4)
pmaxuw %1, %2
%else
psubusw %1, %2
paddusw %1, %2
%endif
%endmacro
 
%macro CHECK 2
movu m2, [curq+t1+%1*2]
movu m3, [curq+t0+%2*2]
mova m4, m2
mova m5, m2
pxor m4, m3
pavgw m5, m3
pand m4, [pw_1]
psubusw m5, m4
%if mmsize == 16
psrldq m5, 2
%else
psrlq m5, 16
%endif
punpcklwd m5, m7
mova m4, m2
psubusw m2, m3
psubusw m3, m4
PMAXUW m2, m3
mova m3, m2
mova m4, m2
%if mmsize == 16
psrldq m3, 2
psrldq m4, 4
%else
psrlq m3, 16
psrlq m4, 32
%endif
punpcklwd m2, m7
punpcklwd m3, m7
punpcklwd m4, m7
paddd m2, m3
paddd m2, m4
%endmacro
 
%macro CHECK1 0
mova m3, m0
pcmpgtd m3, m2
PMINSD m0, m2, m6
mova m6, m3
pand m5, m3
pandn m3, m1
por m3, m5
mova m1, m3
%endmacro
 
%macro CHECK2 0
paddd m6, [pd_1]
pslld m6, 30
paddd m2, m6
mova m3, m0
pcmpgtd m3, m2
PMINSD m0, m2, m4
pand m5, m3
pandn m3, m1
por m3, m5
mova m1, m3
%endmacro
 
; This version of CHECK2 has 3 fewer instructions on sets older than SSE4 but I
; am not sure whether it is any faster. A rewrite or refactor of the filter
; code should make it possible to eliminate the move intruction at the end. It
; exists to satisfy the expectation that the "score" values are in m1.
 
; %macro CHECK2 0
; mova m3, m0
; pcmpgtd m0, m2
; pand m0, m6
; mova m6, m0
; pand m5, m6
; pand m2, m0
; pandn m6, m1
; pandn m0, m3
; por m6, m5
; por m0, m2
; mova m1, m6
; %endmacro
 
%macro LOAD 2
movh %1, %2
punpcklwd %1, m7
%endmacro
 
%macro FILTER 3
.loop%1:
pxor m7, m7
LOAD m0, [curq+t1]
LOAD m1, [curq+t0]
LOAD m2, [%2]
LOAD m3, [%3]
mova m4, m3
paddd m3, m2
psrad m3, 1
mova [rsp+ 0], m0
mova [rsp+16], m3
mova [rsp+32], m1
psubd m2, m4
PABS m2, m4
LOAD m3, [prevq+t1]
LOAD m4, [prevq+t0]
psubd m3, m0
psubd m4, m1
PABS m3, m5
PABS m4, m5
paddd m3, m4
psrld m2, 1
psrld m3, 1
PMAXSD m2, m3, m6
LOAD m3, [nextq+t1]
LOAD m4, [nextq+t0]
psubd m3, m0
psubd m4, m1
PABS m3, m5
PABS m4, m5
paddd m3, m4
psrld m3, 1
PMAXSD m2, m3, m6
mova [rsp+48], m2
 
paddd m1, m0
paddd m0, m0
psubd m0, m1
psrld m1, 1
PABS m0, m2
 
movu m2, [curq+t1-1*2]
movu m3, [curq+t0-1*2]
mova m4, m2
psubusw m2, m3
psubusw m3, m4
PMAXUW m2, m3
%if mmsize == 16
mova m3, m2
psrldq m3, 4
%else
mova m3, m2
psrlq m3, 32
%endif
punpcklwd m2, m7
punpcklwd m3, m7
paddd m0, m2
paddd m0, m3
psubd m0, [pd_1]
 
CHECK -2, 0
CHECK1
CHECK -3, 1
CHECK2
CHECK 0, -2
CHECK1
CHECK 1, -3
CHECK2
 
mova m6, [rsp+48]
cmp DWORD r8m, 2
jge .end%1
LOAD m2, [%2+t1*2]
LOAD m4, [%3+t1*2]
LOAD m3, [%2+t0*2]
LOAD m5, [%3+t0*2]
paddd m2, m4
paddd m3, m5
psrld m2, 1
psrld m3, 1
mova m4, [rsp+ 0]
mova m5, [rsp+16]
mova m7, [rsp+32]
psubd m2, m4
psubd m3, m7
mova m0, m5
psubd m5, m4
psubd m0, m7
mova m4, m2
PMINSD m2, m3, m7
PMAXSD m3, m4, m7
PMAXSD m2, m5, m7
PMINSD m3, m5, m7
PMAXSD m2, m0, m7
PMINSD m3, m0, m7
pxor m4, m4
PMAXSD m6, m3, m7
psubd m4, m2
PMAXSD m6, m4, m7
 
.end%1:
mova m2, [rsp+16]
mova m3, m2
psubd m2, m6
paddd m3, m6
PMAXSD m1, m2, m7
PMINSD m1, m3, m7
PACK m1
 
movh [dstq], m1
add dstq, mmsize/2
add prevq, mmsize/2
add curq, mmsize/2
add nextq, mmsize/2
sub DWORD r4m, mmsize/4
jg .loop%1
%endmacro
 
%macro YADIF 0
%if ARCH_X86_32
cglobal yadif_filter_line_16bit, 4, 6, 8, 80, dst, prev, cur, next, w, \
prefs, mrefs, parity, mode
%else
cglobal yadif_filter_line_16bit, 4, 7, 8, 80, dst, prev, cur, next, w, \
prefs, mrefs, parity, mode
%endif
%if ARCH_X86_32
mov r4, r5mp
mov r5, r6mp
DECLARE_REG_TMP 4,5
%else
movsxd r5, DWORD r5m
movsxd r6, DWORD r6m
DECLARE_REG_TMP 5,6
%endif
 
cmp DWORD paritym, 0
je .parity0
FILTER 1, prevq, curq
jmp .ret
 
.parity0:
FILTER 0, curq, nextq
 
.ret:
RET
%endmacro
 
INIT_XMM sse4
YADIF
INIT_XMM ssse3
YADIF
INIT_XMM sse2
YADIF
%if ARCH_X86_32
INIT_MMX mmxext
YADIF
%endif
/contrib/sdk/sources/ffmpeg/libavfilter/yadif.h
0,0 → 1,74
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
 
#ifndef AVFILTER_YADIF_H
#define AVFILTER_YADIF_H
 
#include "libavutil/pixdesc.h"
#include "avfilter.h"
 
enum YADIFMode {
YADIF_MODE_SEND_FRAME = 0, ///< send 1 frame for each frame
YADIF_MODE_SEND_FIELD = 1, ///< send 1 frame for each field
YADIF_MODE_SEND_FRAME_NOSPATIAL = 2, ///< send 1 frame for each frame but skips spatial interlacing check
YADIF_MODE_SEND_FIELD_NOSPATIAL = 3, ///< send 1 frame for each field but skips spatial interlacing check
};
 
enum YADIFParity {
YADIF_PARITY_TFF = 0, ///< top field first
YADIF_PARITY_BFF = 1, ///< bottom field first
YADIF_PARITY_AUTO = -1, ///< auto detection
};
 
enum YADIFDeint {
YADIF_DEINT_ALL = 0, ///< deinterlace all frames
YADIF_DEINT_INTERLACED = 1, ///< only deinterlace frames marked as interlaced
};
 
typedef struct YADIFContext {
const AVClass *class;
 
enum YADIFMode mode;
enum YADIFParity parity;
enum YADIFDeint deint;
 
int frame_pending;
 
AVFrame *cur;
AVFrame *next;
AVFrame *prev;
AVFrame *out;
 
/**
* Required alignment for filter_line
*/
void (*filter_line)(void *dst,
void *prev, void *cur, void *next,
int w, int prefs, int mrefs, int parity, int mode);
void (*filter_edges)(void *dst, void *prev, void *cur, void *next,
int w, int prefs, int mrefs, int parity, int mode);
 
const AVPixFmtDescriptor *csp;
int eof;
uint8_t *temp_line;
int temp_line_size;
} YADIFContext;
 
void ff_yadif_init_x86(YADIFContext *yadif);
 
#endif /* AVFILTER_YADIF_H */