Subversion Repositories Kolibri OS

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
6148 serge 1
/*
2
 * Copyright (c) 2010 Stefano Sabatini
3
 * Copyright (c) 2008 Victor Paesa
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21
 
22
/**
23
 * @file
24
 * video presentation timestamp (PTS) modification filter
25
 */
26
 
27
#include "libavutil/eval.h"
28
#include "libavutil/internal.h"
29
#include "libavutil/mathematics.h"
30
#include "libavutil/opt.h"
31
#include "libavutil/time.h"
32
#include "audio.h"
33
#include "avfilter.h"
34
#include "internal.h"
35
#include "video.h"
36
 
37
static const char *const var_names[] = {
38
    "FRAME_RATE",  ///< defined only for constant frame-rate video
39
    "INTERLACED",  ///< tell if the current frame is interlaced
40
    "N",           ///< frame / sample number (starting at zero)
41
    "NB_CONSUMED_SAMPLES", ///< number of samples consumed by the filter (only audio)
42
    "NB_SAMPLES",  ///< number of samples in the current frame (only audio)
43
    "POS",         ///< original position in the file of the frame
44
    "PREV_INPTS",  ///< previous  input PTS
45
    "PREV_INT",    ///< previous  input time in seconds
46
    "PREV_OUTPTS", ///< previous output PTS
47
    "PREV_OUTT",   ///< previous output time in seconds
48
    "PTS",         ///< original pts in the file of the frame
49
    "SAMPLE_RATE", ///< sample rate (only audio)
50
    "STARTPTS",    ///< PTS at start of movie
51
    "STARTT",      ///< time at start of movie
52
    "T",           ///< original time in the file of the frame
53
    "TB",          ///< timebase
54
    "RTCTIME",     ///< wallclock (RTC) time in micro seconds
55
    "RTCSTART",    ///< wallclock (RTC) time at the start of the movie in micro seconds
56
    "S",           //   Number of samples in the current frame
57
    "SR",          //   Audio sample rate
58
    NULL
59
};
60
 
61
enum var_name {
62
    VAR_FRAME_RATE,
63
    VAR_INTERLACED,
64
    VAR_N,
65
    VAR_NB_CONSUMED_SAMPLES,
66
    VAR_NB_SAMPLES,
67
    VAR_POS,
68
    VAR_PREV_INPTS,
69
    VAR_PREV_INT,
70
    VAR_PREV_OUTPTS,
71
    VAR_PREV_OUTT,
72
    VAR_PTS,
73
    VAR_SAMPLE_RATE,
74
    VAR_STARTPTS,
75
    VAR_STARTT,
76
    VAR_T,
77
    VAR_TB,
78
    VAR_RTCTIME,
79
    VAR_RTCSTART,
80
    VAR_S,
81
    VAR_SR,
82
    VAR_VARS_NB
83
};
84
 
85
typedef struct {
86
    const AVClass *class;
87
    char *expr_str;
88
    AVExpr *expr;
89
    double var_values[VAR_VARS_NB];
90
    enum AVMediaType type;
91
} SetPTSContext;
92
 
93
static av_cold int init(AVFilterContext *ctx)
94
{
95
    SetPTSContext *setpts = ctx->priv;
96
    int ret;
97
 
98
    if ((ret = av_expr_parse(&setpts->expr, setpts->expr_str,
99
                             var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) {
100
        av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n", setpts->expr_str);
101
        return ret;
102
    }
103
 
104
    setpts->var_values[VAR_N]           = 0.0;
105
    setpts->var_values[VAR_S]           = 0.0;
106
    setpts->var_values[VAR_PREV_INPTS]  = NAN;
107
    setpts->var_values[VAR_PREV_INT]    = NAN;
108
    setpts->var_values[VAR_PREV_OUTPTS] = NAN;
109
    setpts->var_values[VAR_PREV_OUTT]   = NAN;
110
    setpts->var_values[VAR_STARTPTS]    = NAN;
111
    setpts->var_values[VAR_STARTT]      = NAN;
112
    return 0;
113
}
114
 
115
static int config_input(AVFilterLink *inlink)
116
{
117
    AVFilterContext *ctx = inlink->dst;
118
    SetPTSContext *setpts = ctx->priv;
119
 
120
    setpts->type = inlink->type;
121
    setpts->var_values[VAR_TB] = av_q2d(inlink->time_base);
122
    setpts->var_values[VAR_RTCSTART] = av_gettime();
123
 
124
    setpts->var_values[VAR_SR] =
125
    setpts->var_values[VAR_SAMPLE_RATE] =
126
        setpts->type == AVMEDIA_TYPE_AUDIO ? inlink->sample_rate : NAN;
127
 
128
    setpts->var_values[VAR_FRAME_RATE] = inlink->frame_rate.num && inlink->frame_rate.den ?
129
        av_q2d(inlink->frame_rate) : NAN;
130
 
131
    av_log(inlink->src, AV_LOG_VERBOSE, "TB:%f FRAME_RATE:%f SAMPLE_RATE:%f\n",
132
           setpts->var_values[VAR_TB],
133
           setpts->var_values[VAR_FRAME_RATE],
134
           setpts->var_values[VAR_SAMPLE_RATE]);
135
    return 0;
136
}
137
 
138
#define D2TS(d)  (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d))
139
#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
140
#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)*av_q2d(tb))
141
 
142
#define BUF_SIZE 64
143
 
144
static inline char *double2int64str(char *buf, double v)
145
{
146
    if (isnan(v)) snprintf(buf, BUF_SIZE, "nan");
147
    else          snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)v);
148
    return buf;
149
}
150
 
151
#define d2istr(v) double2int64str((char[BUF_SIZE]){0}, v)
152
 
153
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
154
{
155
    SetPTSContext *setpts = inlink->dst->priv;
156
    int64_t in_pts = frame->pts;
157
    double d;
158
 
159
    if (isnan(setpts->var_values[VAR_STARTPTS])) {
160
        setpts->var_values[VAR_STARTPTS] = TS2D(frame->pts);
161
        setpts->var_values[VAR_STARTT  ] = TS2T(frame->pts, inlink->time_base);
162
    }
163
    setpts->var_values[VAR_PTS       ] = TS2D(frame->pts);
164
    setpts->var_values[VAR_T         ] = TS2T(frame->pts, inlink->time_base);
165
    setpts->var_values[VAR_POS       ] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame);
166
    setpts->var_values[VAR_RTCTIME   ] = av_gettime();
167
 
168
    if (inlink->type == AVMEDIA_TYPE_VIDEO) {
169
        setpts->var_values[VAR_INTERLACED] = frame->interlaced_frame;
170
    } else if (inlink->type == AVMEDIA_TYPE_AUDIO) {
171
        setpts->var_values[VAR_S] = frame->nb_samples;
172
        setpts->var_values[VAR_NB_SAMPLES] = frame->nb_samples;
173
    }
174
 
175
    d = av_expr_eval(setpts->expr, setpts->var_values, NULL);
176
    frame->pts = D2TS(d);
177
 
178
    av_log(inlink->dst, AV_LOG_DEBUG,
179
           "N:%"PRId64" PTS:%s T:%f POS:%s",
180
           (int64_t)setpts->var_values[VAR_N],
181
           d2istr(setpts->var_values[VAR_PTS]),
182
           setpts->var_values[VAR_T],
183
           d2istr(setpts->var_values[VAR_POS]));
184
    switch (inlink->type) {
185
    case AVMEDIA_TYPE_VIDEO:
186
        av_log(inlink->dst, AV_LOG_DEBUG, " INTERLACED:%"PRId64,
187
               (int64_t)setpts->var_values[VAR_INTERLACED]);
188
        break;
189
    case AVMEDIA_TYPE_AUDIO:
190
        av_log(inlink->dst, AV_LOG_DEBUG, " NB_SAMPLES:%"PRId64" NB_CONSUMED_SAMPLES:%"PRId64,
191
               (int64_t)setpts->var_values[VAR_NB_SAMPLES],
192
               (int64_t)setpts->var_values[VAR_NB_CONSUMED_SAMPLES]);
193
        break;
194
    }
195
    av_log(inlink->dst, AV_LOG_DEBUG, " -> PTS:%s T:%f\n", d2istr(d), TS2T(d, inlink->time_base));
196
 
197
    if (inlink->type == AVMEDIA_TYPE_VIDEO) {
198
        setpts->var_values[VAR_N] += 1.0;
199
    } else {
200
        setpts->var_values[VAR_N] += frame->nb_samples;
201
    }
202
 
203
    setpts->var_values[VAR_PREV_INPTS ] = TS2D(in_pts);
204
    setpts->var_values[VAR_PREV_INT   ] = TS2T(in_pts, inlink->time_base);
205
    setpts->var_values[VAR_PREV_OUTPTS] = TS2D(frame->pts);
206
    setpts->var_values[VAR_PREV_OUTT]   = TS2T(frame->pts, inlink->time_base);
207
    if (setpts->type == AVMEDIA_TYPE_AUDIO) {
208
        setpts->var_values[VAR_NB_CONSUMED_SAMPLES] += frame->nb_samples;
209
    }
210
    return ff_filter_frame(inlink->dst->outputs[0], frame);
211
}
212
 
213
static av_cold void uninit(AVFilterContext *ctx)
214
{
215
    SetPTSContext *setpts = ctx->priv;
216
    av_expr_free(setpts->expr);
217
    setpts->expr = NULL;
218
}
219
 
220
#define OFFSET(x) offsetof(SetPTSContext, x)
221
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
222
static const AVOption options[] = {
223
    { "expr", "Expression determining the frame timestamp", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "PTS" }, .flags = FLAGS },
224
    { NULL }
225
};
226
 
227
#if CONFIG_SETPTS_FILTER
228
#define setpts_options options
229
AVFILTER_DEFINE_CLASS(setpts);
230
 
231
static const AVFilterPad avfilter_vf_setpts_inputs[] = {
232
    {
233
        .name         = "default",
234
        .type         = AVMEDIA_TYPE_VIDEO,
235
        .config_props = config_input,
236
        .filter_frame = filter_frame,
237
    },
238
    { NULL }
239
};
240
 
241
static const AVFilterPad avfilter_vf_setpts_outputs[] = {
242
    {
243
        .name = "default",
244
        .type = AVMEDIA_TYPE_VIDEO,
245
    },
246
    { NULL }
247
};
248
 
249
AVFilter avfilter_vf_setpts = {
250
    .name      = "setpts",
251
    .description = NULL_IF_CONFIG_SMALL("Set PTS for the output video frame."),
252
    .init      = init,
253
    .uninit    = uninit,
254
 
255
    .priv_size = sizeof(SetPTSContext),
256
    .priv_class = &setpts_class,
257
 
258
    .inputs    = avfilter_vf_setpts_inputs,
259
    .outputs   = avfilter_vf_setpts_outputs,
260
};
261
#endif /* CONFIG_SETPTS_FILTER */
262
 
263
#if CONFIG_ASETPTS_FILTER
264
 
265
#define asetpts_options options
266
AVFILTER_DEFINE_CLASS(asetpts);
267
 
268
static const AVFilterPad asetpts_inputs[] = {
269
    {
270
        .name         = "default",
271
        .type         = AVMEDIA_TYPE_AUDIO,
272
        .config_props = config_input,
273
        .filter_frame = filter_frame,
274
    },
275
    { NULL }
276
};
277
 
278
static const AVFilterPad asetpts_outputs[] = {
279
    {
280
        .name = "default",
281
        .type = AVMEDIA_TYPE_AUDIO,
282
    },
283
    { NULL }
284
};
285
 
286
AVFilter avfilter_af_asetpts = {
287
    .name        = "asetpts",
288
    .description = NULL_IF_CONFIG_SMALL("Set PTS for the output audio frame."),
289
    .init        = init,
290
    .uninit      = uninit,
291
    .priv_size   = sizeof(SetPTSContext),
292
    .priv_class  = &asetpts_class,
293
    .inputs      = asetpts_inputs,
294
    .outputs     = asetpts_outputs,
295
};
296
#endif /* CONFIG_ASETPTS_FILTER */