Subversion Repositories Kolibri OS

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
6147 serge 1
/*
2
 * Copyright (c) 2000-2003 Fabrice Bellard
3
 *
4
 * This file is part of FFmpeg.
5
 *
6
 * FFmpeg is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2.1 of the License, or (at your option) any later version.
10
 *
11
 * FFmpeg is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with FFmpeg; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19
 */
20
 
21
/**
22
 * @file
23
 * multimedia converter based on the FFmpeg libraries
24
 */
25
 
26
#include "config.h"
27
#include 
28
#include 
29
#include 
30
#include 
31
#include 
32
#include 
33
#include 
34
 
35
#if HAVE_ISATTY
36
#if HAVE_IO_H
37
#include 
38
#endif
39
#if HAVE_UNISTD_H
40
#include 
41
#endif
42
#endif
43
 
44
#include "libavformat/avformat.h"
45
#include "libavdevice/avdevice.h"
46
#include "libswresample/swresample.h"
47
#include "libavutil/opt.h"
48
#include "libavutil/channel_layout.h"
49
#include "libavutil/parseutils.h"
50
#include "libavutil/samplefmt.h"
51
#include "libavutil/fifo.h"
52
#include "libavutil/internal.h"
53
#include "libavutil/intreadwrite.h"
54
#include "libavutil/dict.h"
55
#include "libavutil/mathematics.h"
56
#include "libavutil/pixdesc.h"
57
#include "libavutil/avstring.h"
58
#include "libavutil/libm.h"
59
#include "libavutil/imgutils.h"
60
#include "libavutil/timestamp.h"
61
#include "libavutil/bprint.h"
62
#include "libavutil/time.h"
63
#include "libavutil/threadmessage.h"
64
#include "libavcodec/mathops.h"
65
#include "libavformat/os_support.h"
66
 
67
# include "libavfilter/avcodec.h"
68
# include "libavfilter/avfilter.h"
69
# include "libavfilter/buffersrc.h"
70
# include "libavfilter/buffersink.h"
71
 
72
#if HAVE_SYS_RESOURCE_H
73
#include 
74
#include 
75
#include 
76
#elif HAVE_GETPROCESSTIMES
77
#include 
78
#endif
79
#if HAVE_GETPROCESSMEMORYINFO
80
#include 
81
#include 
82
#endif
83
#if HAVE_SETCONSOLECTRLHANDLER
84
#include 
85
#endif
86
 
87
 
88
#if HAVE_SYS_SELECT_H
89
#include 
90
#endif
91
 
92
#if HAVE_TERMIOS_H
93
#include 
94
#include 
95
#include 
96
#include 
97
#elif HAVE_KBHIT
98
#include 
99
#endif
100
 
101
#if HAVE_PTHREADS
102
#include 
103
#endif
104
 
105
#include 
106
 
107
#include "ffmpeg.h"
108
#include "cmdutils.h"
109
 
110
#include "libavutil/avassert.h"
111
 
112
const char program_name[] = "ffmpeg";
113
const int program_birth_year = 2000;
114
 
115
static FILE *vstats_file;
116
 
117
const char *const forced_keyframes_const_names[] = {
118
    "n",
119
    "n_forced",
120
    "prev_forced_n",
121
    "prev_forced_t",
122
    "t",
123
    NULL
124
};
125
 
126
static void do_video_stats(OutputStream *ost, int frame_size);
127
static int64_t getutime(void);
128
static int64_t getmaxrss(void);
129
 
130
static int run_as_daemon  = 0;
131
static int nb_frames_dup = 0;
132
static int nb_frames_drop = 0;
133
static int64_t decode_error_stat[2];
134
 
135
static int current_time;
136
AVIOContext *progress_avio = NULL;
137
 
138
static uint8_t *subtitle_out;
139
 
140
InputStream **input_streams = NULL;
141
int        nb_input_streams = 0;
142
InputFile   **input_files   = NULL;
143
int        nb_input_files   = 0;
144
 
145
OutputStream **output_streams = NULL;
146
int         nb_output_streams = 0;
147
OutputFile   **output_files   = NULL;
148
int         nb_output_files   = 0;
149
 
150
FilterGraph **filtergraphs;
151
int        nb_filtergraphs;
152
 
153
#if HAVE_TERMIOS_H
154
 
155
/* init terminal so that we can grab keys */
156
static struct termios oldtty;
157
static int restore_tty;
158
#endif
159
 
160
#if HAVE_PTHREADS
161
static void free_input_threads(void);
162
#endif
163
 
164
/* sub2video hack:
165
   Convert subtitles to video with alpha to insert them in filter graphs.
166
   This is a temporary solution until libavfilter gets real subtitles support.
167
 */
168
 
169
static int sub2video_get_blank_frame(InputStream *ist)
170
{
171
    int ret;
172
    AVFrame *frame = ist->sub2video.frame;
173
 
174
    av_frame_unref(frame);
175
    ist->sub2video.frame->width  = ist->dec_ctx->width  ? ist->dec_ctx->width  : ist->sub2video.w;
176
    ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
177
    ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
178
    if ((ret = av_frame_get_buffer(frame, 32)) < 0)
179
        return ret;
180
    memset(frame->data[0], 0, frame->height * frame->linesize[0]);
181
    return 0;
182
}
183
 
184
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
185
                                AVSubtitleRect *r)
186
{
187
    uint32_t *pal, *dst2;
188
    uint8_t *src, *src2;
189
    int x, y;
190
 
191
    if (r->type != SUBTITLE_BITMAP) {
192
        av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
193
        return;
194
    }
195
    if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
196
        av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
197
            r->x, r->y, r->w, r->h, w, h
198
        );
199
        return;
200
    }
201
 
202
    dst += r->y * dst_linesize + r->x * 4;
203
    src = r->pict.data[0];
204
    pal = (uint32_t *)r->pict.data[1];
205
    for (y = 0; y < r->h; y++) {
206
        dst2 = (uint32_t *)dst;
207
        src2 = src;
208
        for (x = 0; x < r->w; x++)
209
            *(dst2++) = pal[*(src2++)];
210
        dst += dst_linesize;
211
        src += r->pict.linesize[0];
212
    }
213
}
214
 
215
static void sub2video_push_ref(InputStream *ist, int64_t pts)
216
{
217
    AVFrame *frame = ist->sub2video.frame;
218
    int i;
219
 
220
    av_assert1(frame->data[0]);
221
    ist->sub2video.last_pts = frame->pts = pts;
222
    for (i = 0; i < ist->nb_filters; i++)
223
        av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
224
                                     AV_BUFFERSRC_FLAG_KEEP_REF |
225
                                     AV_BUFFERSRC_FLAG_PUSH);
226
}
227
 
228
static void sub2video_update(InputStream *ist, AVSubtitle *sub)
229
{
230
    AVFrame *frame = ist->sub2video.frame;
231
    int8_t *dst;
232
    int     dst_linesize;
233
    int num_rects, i;
234
    int64_t pts, end_pts;
235
 
236
    if (!frame)
237
        return;
238
    if (sub) {
239
        pts       = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
240
                                 AV_TIME_BASE_Q, ist->st->time_base);
241
        end_pts   = av_rescale_q(sub->pts + sub->end_display_time   * 1000LL,
242
                                 AV_TIME_BASE_Q, ist->st->time_base);
243
        num_rects = sub->num_rects;
244
    } else {
245
        pts       = ist->sub2video.end_pts;
246
        end_pts   = INT64_MAX;
247
        num_rects = 0;
248
    }
249
    if (sub2video_get_blank_frame(ist) < 0) {
250
        av_log(ist->dec_ctx, AV_LOG_ERROR,
251
               "Impossible to get a blank canvas.\n");
252
        return;
253
    }
254
    dst          = frame->data    [0];
255
    dst_linesize = frame->linesize[0];
256
    for (i = 0; i < num_rects; i++)
257
        sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
258
    sub2video_push_ref(ist, pts);
259
    ist->sub2video.end_pts = end_pts;
260
}
261
 
262
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
263
{
264
    InputFile *infile = input_files[ist->file_index];
265
    int i, j, nb_reqs;
266
    int64_t pts2;
267
 
268
    /* When a frame is read from a file, examine all sub2video streams in
269
       the same file and send the sub2video frame again. Otherwise, decoded
270
       video frames could be accumulating in the filter graph while a filter
271
       (possibly overlay) is desperately waiting for a subtitle frame. */
272
    for (i = 0; i < infile->nb_streams; i++) {
273
        InputStream *ist2 = input_streams[infile->ist_index + i];
274
        if (!ist2->sub2video.frame)
275
            continue;
276
        /* subtitles seem to be usually muxed ahead of other streams;
277
           if not, subtracting a larger time here is necessary */
278
        pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
279
        /* do not send the heartbeat frame if the subtitle is already ahead */
280
        if (pts2 <= ist2->sub2video.last_pts)
281
            continue;
282
        if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
283
            sub2video_update(ist2, NULL);
284
        for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
285
            nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
286
        if (nb_reqs)
287
            sub2video_push_ref(ist2, pts2);
288
    }
289
}
290
 
291
static void sub2video_flush(InputStream *ist)
292
{
293
    int i;
294
 
295
    if (ist->sub2video.end_pts < INT64_MAX)
296
        sub2video_update(ist, NULL);
297
    for (i = 0; i < ist->nb_filters; i++)
298
        av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
299
}
300
 
301
/* end of sub2video hack */
302
 
303
static void term_exit_sigsafe(void)
304
{
305
#if HAVE_TERMIOS_H
306
    if(restore_tty)
307
        tcsetattr (0, TCSANOW, &oldtty);
308
#endif
309
}
310
 
311
void term_exit(void)
312
{
313
    av_log(NULL, AV_LOG_QUIET, "%s", "");
314
    term_exit_sigsafe();
315
}
316
 
317
static volatile int received_sigterm = 0;
318
static volatile int received_nb_signals = 0;
319
static volatile int transcode_init_done = 0;
320
static volatile int ffmpeg_exited = 0;
321
static int main_return_code = 0;
322
 
323
static void
324
sigterm_handler(int sig)
325
{
326
    received_sigterm = sig;
327
    received_nb_signals++;
328
    term_exit_sigsafe();
329
    if(received_nb_signals > 3) {
330
        write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
331
                           strlen("Received > 3 system signals, hard exiting\n"));
332
 
333
        exit(123);
334
    }
335
}
336
 
337
#if HAVE_SETCONSOLECTRLHANDLER
338
static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
339
{
340
    av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
341
 
342
    switch (fdwCtrlType)
343
    {
344
    case CTRL_C_EVENT:
345
    case CTRL_BREAK_EVENT:
346
        sigterm_handler(SIGINT);
347
        return TRUE;
348
 
349
    case CTRL_CLOSE_EVENT:
350
    case CTRL_LOGOFF_EVENT:
351
    case CTRL_SHUTDOWN_EVENT:
352
        sigterm_handler(SIGTERM);
353
        /* Basically, with these 3 events, when we return from this method the
354
           process is hard terminated, so stall as long as we need to
355
           to try and let the main thread(s) clean up and gracefully terminate
356
           (we have at most 5 seconds, but should be done far before that). */
357
        while (!ffmpeg_exited) {
358
            Sleep(0);
359
        }
360
        return TRUE;
361
 
362
    default:
363
        av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
364
        return FALSE;
365
    }
366
}
367
#endif
368
 
369
void term_init(void)
370
{
371
#if HAVE_TERMIOS_H
372
    if(!run_as_daemon){
373
        struct termios tty;
374
        int istty = 1;
375
#if HAVE_ISATTY
376
        istty = isatty(0) && isatty(2);
377
#endif
378
        if (istty && tcgetattr (0, &tty) == 0) {
379
            oldtty = tty;
380
            restore_tty = 1;
381
 
382
            tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
383
                             |INLCR|IGNCR|ICRNL|IXON);
384
            tty.c_oflag |= OPOST;
385
            tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
386
            tty.c_cflag &= ~(CSIZE|PARENB);
387
            tty.c_cflag |= CS8;
388
            tty.c_cc[VMIN] = 1;
389
            tty.c_cc[VTIME] = 0;
390
 
391
            tcsetattr (0, TCSANOW, &tty);
392
        }
393
        signal(SIGQUIT, sigterm_handler); /* Quit (POSIX).  */
394
    }
395
#endif
396
 
397
    signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
398
    signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
399
#ifdef SIGXCPU
400
    signal(SIGXCPU, sigterm_handler);
401
#endif
402
#if HAVE_SETCONSOLECTRLHANDLER
403
    SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
404
#endif
405
}
406
 
407
/* read a key without blocking */
408
static int read_key(void)
409
{
410
    unsigned char ch;
411
#if HAVE_TERMIOS_H
412
    int n = 1;
413
    struct timeval tv;
414
    fd_set rfds;
415
 
416
    FD_ZERO(&rfds);
417
    FD_SET(0, &rfds);
418
    tv.tv_sec = 0;
419
    tv.tv_usec = 0;
420
    n = select(1, &rfds, NULL, NULL, &tv);
421
    if (n > 0) {
422
        n = read(0, &ch, 1);
423
        if (n == 1)
424
            return ch;
425
 
426
        return n;
427
    }
428
#elif HAVE_KBHIT
429
#    if HAVE_PEEKNAMEDPIPE
430
    static int is_pipe;
431
    static HANDLE input_handle;
432
    DWORD dw, nchars;
433
    if(!input_handle){
434
        input_handle = GetStdHandle(STD_INPUT_HANDLE);
435
        is_pipe = !GetConsoleMode(input_handle, &dw);
436
    }
437
 
438
    if (is_pipe) {
439
        /* When running under a GUI, you will end here. */
440
        if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
441
            // input pipe may have been closed by the program that ran ffmpeg
442
            return -1;
443
        }
444
        //Read it
445
        if(nchars != 0) {
446
            read(0, &ch, 1);
447
            return ch;
448
        }else{
449
            return -1;
450
        }
451
    }
452
#    endif
453
    if(kbhit())
454
        return(getch());
455
#endif
456
    return -1;
457
}
458
 
459
static int decode_interrupt_cb(void *ctx)
460
{
461
    return received_nb_signals > transcode_init_done;
462
}
463
 
464
const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
465
 
466
static void ffmpeg_cleanup(int ret)
467
{
468
    int i, j;
469
 
470
    if (do_benchmark) {
471
        int maxrss = getmaxrss() / 1024;
472
        av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
473
    }
474
 
475
    for (i = 0; i < nb_filtergraphs; i++) {
476
        FilterGraph *fg = filtergraphs[i];
477
        avfilter_graph_free(&fg->graph);
478
        for (j = 0; j < fg->nb_inputs; j++) {
479
            av_freep(&fg->inputs[j]->name);
480
            av_freep(&fg->inputs[j]);
481
        }
482
        av_freep(&fg->inputs);
483
        for (j = 0; j < fg->nb_outputs; j++) {
484
            av_freep(&fg->outputs[j]->name);
485
            av_freep(&fg->outputs[j]);
486
        }
487
        av_freep(&fg->outputs);
488
        av_freep(&fg->graph_desc);
489
 
490
        av_freep(&filtergraphs[i]);
491
    }
492
    av_freep(&filtergraphs);
493
 
494
    av_freep(&subtitle_out);
495
 
496
    /* close files */
497
    for (i = 0; i < nb_output_files; i++) {
498
        OutputFile *of = output_files[i];
499
        AVFormatContext *s;
500
        if (!of)
501
            continue;
502
        s = of->ctx;
503
        if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
504
            avio_closep(&s->pb);
505
        avformat_free_context(s);
506
        av_dict_free(&of->opts);
507
 
508
        av_freep(&output_files[i]);
509
    }
510
    for (i = 0; i < nb_output_streams; i++) {
511
        OutputStream *ost = output_streams[i];
512
        AVBitStreamFilterContext *bsfc;
513
 
514
        if (!ost)
515
            continue;
516
 
517
        bsfc = ost->bitstream_filters;
518
        while (bsfc) {
519
            AVBitStreamFilterContext *next = bsfc->next;
520
            av_bitstream_filter_close(bsfc);
521
            bsfc = next;
522
        }
523
        ost->bitstream_filters = NULL;
524
        av_frame_free(&ost->filtered_frame);
525
        av_frame_free(&ost->last_frame);
526
 
527
        av_parser_close(ost->parser);
528
 
529
        av_freep(&ost->forced_keyframes);
530
        av_expr_free(ost->forced_keyframes_pexpr);
531
        av_freep(&ost->avfilter);
532
        av_freep(&ost->logfile_prefix);
533
 
534
        av_freep(&ost->audio_channels_map);
535
        ost->audio_channels_mapped = 0;
536
 
537
        avcodec_free_context(&ost->enc_ctx);
538
 
539
        av_freep(&output_streams[i]);
540
    }
541
#if HAVE_PTHREADS
542
    free_input_threads();
543
#endif
544
    for (i = 0; i < nb_input_files; i++) {
545
        avformat_close_input(&input_files[i]->ctx);
546
        av_freep(&input_files[i]);
547
    }
548
    for (i = 0; i < nb_input_streams; i++) {
549
        InputStream *ist = input_streams[i];
550
 
551
        av_frame_free(&ist->decoded_frame);
552
        av_frame_free(&ist->filter_frame);
553
        av_dict_free(&ist->decoder_opts);
554
        avsubtitle_free(&ist->prev_sub.subtitle);
555
        av_frame_free(&ist->sub2video.frame);
556
        av_freep(&ist->filters);
557
        av_freep(&ist->hwaccel_device);
558
 
559
        avcodec_free_context(&ist->dec_ctx);
560
 
561
        av_freep(&input_streams[i]);
562
    }
563
 
564
    if (vstats_file)
565
        fclose(vstats_file);
566
    av_freep(&vstats_filename);
567
 
568
    av_freep(&input_streams);
569
    av_freep(&input_files);
570
    av_freep(&output_streams);
571
    av_freep(&output_files);
572
 
573
    uninit_opts();
574
 
575
    avformat_network_deinit();
576
 
577
    if (received_sigterm) {
578
        av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
579
               (int) received_sigterm);
580
    } else if (ret && transcode_init_done) {
581
        av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
582
    }
583
    term_exit();
584
    ffmpeg_exited = 1;
585
}
586
 
587
void remove_avoptions(AVDictionary **a, AVDictionary *b)
588
{
589
    AVDictionaryEntry *t = NULL;
590
 
591
    while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
592
        av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
593
    }
594
}
595
 
596
void assert_avoptions(AVDictionary *m)
597
{
598
    AVDictionaryEntry *t;
599
    if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
600
        av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
601
        exit_program(1);
602
    }
603
}
604
 
605
static void abort_codec_experimental(AVCodec *c, int encoder)
606
{
607
    exit_program(1);
608
}
609
 
610
static void update_benchmark(const char *fmt, ...)
611
{
612
    if (do_benchmark_all) {
613
        int64_t t = getutime();
614
        va_list va;
615
        char buf[1024];
616
 
617
        if (fmt) {
618
            va_start(va, fmt);
619
            vsnprintf(buf, sizeof(buf), fmt, va);
620
            va_end(va);
621
            av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
622
        }
623
        current_time = t;
624
    }
625
}
626
 
627
static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
628
{
629
    int i;
630
    for (i = 0; i < nb_output_streams; i++) {
631
        OutputStream *ost2 = output_streams[i];
632
        ost2->finished |= ost == ost2 ? this_stream : others;
633
    }
634
}
635
 
636
static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
637
{
638
    AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
639
    AVCodecContext          *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
640
    int ret;
641
 
642
    if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
643
        ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
644
        if (ost->st->codec->extradata) {
645
            memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
646
            ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
647
        }
648
    }
649
 
650
    if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
651
        (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
652
        pkt->pts = pkt->dts = AV_NOPTS_VALUE;
653
 
654
    /*
655
     * Audio encoders may split the packets --  #frames in != #packets out.
656
     * But there is no reordering, so we can limit the number of output packets
657
     * by simply dropping them here.
658
     * Counting encoded video frames needs to be done separately because of
659
     * reordering, see do_video_out()
660
     */
661
    if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
662
        if (ost->frame_number >= ost->max_frames) {
663
            av_free_packet(pkt);
664
            return;
665
        }
666
        ost->frame_number++;
667
    }
668
    if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
669
        int i;
670
        uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
671
                                              NULL);
672
        ost->quality = sd ? AV_RL32(sd) : -1;
673
        ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
674
 
675
        for (i = 0; ierror); i++) {
676
            if (sd && i < sd[5])
677
                ost->error[i] = AV_RL64(sd + 8 + 8*i);
678
            else
679
                ost->error[i] = -1;
680
        }
681
    }
682
 
683
    if (bsfc)
684
        av_packet_split_side_data(pkt);
685
 
686
    while (bsfc) {
687
        AVPacket new_pkt = *pkt;
688
        AVDictionaryEntry *bsf_arg = av_dict_get(ost->bsf_args,
689
                                                 bsfc->filter->name,
690
                                                 NULL, 0);
691
        int a = av_bitstream_filter_filter(bsfc, avctx,
692
                                           bsf_arg ? bsf_arg->value : NULL,
693
                                           &new_pkt.data, &new_pkt.size,
694
                                           pkt->data, pkt->size,
695
                                           pkt->flags & AV_PKT_FLAG_KEY);
696
FF_DISABLE_DEPRECATION_WARNINGS
697
        if(a == 0 && new_pkt.data != pkt->data
698
#if FF_API_DESTRUCT_PACKET
699
           && new_pkt.destruct
700
#endif
701
           ) {
702
FF_ENABLE_DEPRECATION_WARNINGS
703
            uint8_t *t = av_malloc(new_pkt.size + AV_INPUT_BUFFER_PADDING_SIZE); //the new should be a subset of the old so cannot overflow
704
            if(t) {
705
                memcpy(t, new_pkt.data, new_pkt.size);
706
                memset(t + new_pkt.size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
707
                new_pkt.data = t;
708
                new_pkt.buf = NULL;
709
                a = 1;
710
            } else
711
                a = AVERROR(ENOMEM);
712
        }
713
        if (a > 0) {
714
            pkt->side_data = NULL;
715
            pkt->side_data_elems = 0;
716
            av_free_packet(pkt);
717
            new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
718
                                           av_buffer_default_free, NULL, 0);
719
            if (!new_pkt.buf)
720
                exit_program(1);
721
        } else if (a < 0) {
722
            new_pkt = *pkt;
723
            av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
724
                   bsfc->filter->name, pkt->stream_index,
725
                   avctx->codec ? avctx->codec->name : "copy");
726
            print_error("", a);
727
            if (exit_on_error)
728
                exit_program(1);
729
        }
730
        *pkt = new_pkt;
731
 
732
        bsfc = bsfc->next;
733
    }
734
 
735
    if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
736
        if (pkt->dts != AV_NOPTS_VALUE &&
737
            pkt->pts != AV_NOPTS_VALUE &&
738
            pkt->dts > pkt->pts) {
739
            av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
740
                   pkt->dts, pkt->pts,
741
                   ost->file_index, ost->st->index);
742
            pkt->pts =
743
            pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
744
                     - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
745
                     - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
746
        }
747
     if(
748
        (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
749
        pkt->dts != AV_NOPTS_VALUE &&
750
        ost->last_mux_dts != AV_NOPTS_VALUE) {
751
      int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
752
      if (pkt->dts < max) {
753
        int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
754
        av_log(s, loglevel, "Non-monotonous DTS in output stream "
755
               "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
756
               ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
757
        if (exit_on_error) {
758
            av_log(NULL, AV_LOG_FATAL, "aborting.\n");
759
            exit_program(1);
760
        }
761
        av_log(s, loglevel, "changing to %"PRId64". This may result "
762
               "in incorrect timestamps in the output file.\n",
763
               max);
764
        if(pkt->pts >= pkt->dts)
765
            pkt->pts = FFMAX(pkt->pts, max);
766
        pkt->dts = max;
767
      }
768
     }
769
    }
770
    ost->last_mux_dts = pkt->dts;
771
 
772
    ost->data_size += pkt->size;
773
    ost->packets_written++;
774
 
775
    pkt->stream_index = ost->index;
776
 
777
    if (debug_ts) {
778
        av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
779
                "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
780
                av_get_media_type_string(ost->enc_ctx->codec_type),
781
                av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
782
                av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
783
                pkt->size
784
              );
785
    }
786
 
787
    ret = av_interleaved_write_frame(s, pkt);
788
    if (ret < 0) {
789
        print_error("av_interleaved_write_frame()", ret);
790
        main_return_code = 1;
791
        close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
792
    }
793
    av_free_packet(pkt);
794
}
795
 
796
static void close_output_stream(OutputStream *ost)
797
{
798
    OutputFile *of = output_files[ost->file_index];
799
 
800
    ost->finished |= ENCODER_FINISHED;
801
    if (of->shortest) {
802
        int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
803
        of->recording_time = FFMIN(of->recording_time, end);
804
    }
805
}
806
 
807
static int check_recording_time(OutputStream *ost)
808
{
809
    OutputFile *of = output_files[ost->file_index];
810
 
811
    if (of->recording_time != INT64_MAX &&
812
        av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
813
                      AV_TIME_BASE_Q) >= 0) {
814
        close_output_stream(ost);
815
        return 0;
816
    }
817
    return 1;
818
}
819
 
820
static void do_audio_out(AVFormatContext *s, OutputStream *ost,
821
                         AVFrame *frame)
822
{
823
    AVCodecContext *enc = ost->enc_ctx;
824
    AVPacket pkt;
825
    int got_packet = 0;
826
 
827
    av_init_packet(&pkt);
828
    pkt.data = NULL;
829
    pkt.size = 0;
830
 
831
    if (!check_recording_time(ost))
832
        return;
833
 
834
    if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
835
        frame->pts = ost->sync_opts;
836
    ost->sync_opts = frame->pts + frame->nb_samples;
837
    ost->samples_encoded += frame->nb_samples;
838
    ost->frames_encoded++;
839
 
840
    av_assert0(pkt.size || !pkt.data);
841
    update_benchmark(NULL);
842
    if (debug_ts) {
843
        av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
844
               "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
845
               av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
846
               enc->time_base.num, enc->time_base.den);
847
    }
848
 
849
    if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
850
        av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
851
        exit_program(1);
852
    }
853
    update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
854
 
855
    if (got_packet) {
856
        av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
857
 
858
        if (debug_ts) {
859
            av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
860
                   "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
861
                   av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
862
                   av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
863
        }
864
 
865
        write_frame(s, &pkt, ost);
866
    }
867
}
868
 
869
static void do_subtitle_out(AVFormatContext *s,
870
                            OutputStream *ost,
871
                            InputStream *ist,
872
                            AVSubtitle *sub)
873
{
874
    int subtitle_out_max_size = 1024 * 1024;
875
    int subtitle_out_size, nb, i;
876
    AVCodecContext *enc;
877
    AVPacket pkt;
878
    int64_t pts;
879
 
880
    if (sub->pts == AV_NOPTS_VALUE) {
881
        av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
882
        if (exit_on_error)
883
            exit_program(1);
884
        return;
885
    }
886
 
887
    enc = ost->enc_ctx;
888
 
889
    if (!subtitle_out) {
890
        subtitle_out = av_malloc(subtitle_out_max_size);
891
        if (!subtitle_out) {
892
            av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
893
            exit_program(1);
894
        }
895
    }
896
 
897
    /* Note: DVB subtitle need one packet to draw them and one other
898
       packet to clear them */
899
    /* XXX: signal it in the codec context ? */
900
    if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
901
        nb = 2;
902
    else
903
        nb = 1;
904
 
905
    /* shift timestamp to honor -ss and make check_recording_time() work with -t */
906
    pts = sub->pts;
907
    if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
908
        pts -= output_files[ost->file_index]->start_time;
909
    for (i = 0; i < nb; i++) {
910
        unsigned save_num_rects = sub->num_rects;
911
 
912
        ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
913
        if (!check_recording_time(ost))
914
            return;
915
 
916
        sub->pts = pts;
917
        // start_display_time is required to be 0
918
        sub->pts               += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
919
        sub->end_display_time  -= sub->start_display_time;
920
        sub->start_display_time = 0;
921
        if (i == 1)
922
            sub->num_rects = 0;
923
 
924
        ost->frames_encoded++;
925
 
926
        subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
927
                                                    subtitle_out_max_size, sub);
928
        if (i == 1)
929
            sub->num_rects = save_num_rects;
930
        if (subtitle_out_size < 0) {
931
            av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
932
            exit_program(1);
933
        }
934
 
935
        av_init_packet(&pkt);
936
        pkt.data = subtitle_out;
937
        pkt.size = subtitle_out_size;
938
        pkt.pts  = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
939
        pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
940
        if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
941
            /* XXX: the pts correction is handled here. Maybe handling
942
               it in the codec would be better */
943
            if (i == 0)
944
                pkt.pts += 90 * sub->start_display_time;
945
            else
946
                pkt.pts += 90 * sub->end_display_time;
947
        }
948
        pkt.dts = pkt.pts;
949
        write_frame(s, &pkt, ost);
950
    }
951
}
952
 
953
static void do_video_out(AVFormatContext *s,
954
                         OutputStream *ost,
955
                         AVFrame *next_picture,
956
                         double sync_ipts)
957
{
958
    int ret, format_video_sync;
959
    AVPacket pkt;
960
    AVCodecContext *enc = ost->enc_ctx;
961
    AVCodecContext *mux_enc = ost->st->codec;
962
    int nb_frames, nb0_frames, i;
963
    double delta, delta0;
964
    double duration = 0;
965
    int frame_size = 0;
966
    InputStream *ist = NULL;
967
    AVFilterContext *filter = ost->filter->filter;
968
 
969
    if (ost->source_index >= 0)
970
        ist = input_streams[ost->source_index];
971
 
972
    if (filter->inputs[0]->frame_rate.num > 0 &&
973
        filter->inputs[0]->frame_rate.den > 0)
974
        duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
975
 
976
    if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
977
        duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
978
 
979
    if (!ost->filters_script &&
980
        !ost->filters &&
981
        next_picture &&
982
        ist &&
983
        lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
984
        duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
985
    }
986
 
987
    if (!next_picture) {
988
        //end, flushing
989
        nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
990
                                          ost->last_nb0_frames[1],
991
                                          ost->last_nb0_frames[2]);
992
    } else {
993
        delta0 = sync_ipts - ost->sync_opts;
994
        delta  = delta0 + duration;
995
 
996
        /* by default, we output a single frame */
997
        nb0_frames = 0;
998
        nb_frames = 1;
999
 
1000
        format_video_sync = video_sync_method;
1001
        if (format_video_sync == VSYNC_AUTO) {
1002
            if(!strcmp(s->oformat->name, "avi")) {
1003
                format_video_sync = VSYNC_VFR;
1004
            } else
1005
                format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1006
            if (   ist
1007
                && format_video_sync == VSYNC_CFR
1008
                && input_files[ist->file_index]->ctx->nb_streams == 1
1009
                && input_files[ist->file_index]->input_ts_offset == 0) {
1010
                format_video_sync = VSYNC_VSCFR;
1011
            }
1012
            if (format_video_sync == VSYNC_CFR && copy_ts) {
1013
                format_video_sync = VSYNC_VSCFR;
1014
            }
1015
        }
1016
 
1017
        if (delta0 < 0 &&
1018
            delta > 0 &&
1019
            format_video_sync != VSYNC_PASSTHROUGH &&
1020
            format_video_sync != VSYNC_DROP) {
1021
            double cor = FFMIN(-delta0, duration);
1022
            if (delta0 < -0.6) {
1023
                av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1024
            } else
1025
                av_log(NULL, AV_LOG_DEBUG, "Cliping frame in rate conversion by %f\n", -delta0);
1026
            sync_ipts += cor;
1027
            duration -= cor;
1028
            delta0 += cor;
1029
        }
1030
 
1031
        switch (format_video_sync) {
1032
        case VSYNC_VSCFR:
1033
            if (ost->frame_number == 0 && delta - duration >= 0.5) {
1034
                av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
1035
                delta = duration;
1036
                delta0 = 0;
1037
                ost->sync_opts = lrint(sync_ipts);
1038
            }
1039
        case VSYNC_CFR:
1040
            // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1041
            if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1042
                nb_frames = 0;
1043
            } else if (delta < -1.1)
1044
                nb_frames = 0;
1045
            else if (delta > 1.1) {
1046
                nb_frames = lrintf(delta);
1047
                if (delta0 > 1.1)
1048
                    nb0_frames = lrintf(delta0 - 0.6);
1049
            }
1050
            break;
1051
        case VSYNC_VFR:
1052
            if (delta <= -0.6)
1053
                nb_frames = 0;
1054
            else if (delta > 0.6)
1055
                ost->sync_opts = lrint(sync_ipts);
1056
            break;
1057
        case VSYNC_DROP:
1058
        case VSYNC_PASSTHROUGH:
1059
            ost->sync_opts = lrint(sync_ipts);
1060
            break;
1061
        default:
1062
            av_assert0(0);
1063
        }
1064
    }
1065
 
1066
    nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1067
    nb0_frames = FFMIN(nb0_frames, nb_frames);
1068
 
1069
    memmove(ost->last_nb0_frames + 1,
1070
            ost->last_nb0_frames,
1071
            sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1072
    ost->last_nb0_frames[0] = nb0_frames;
1073
 
1074
    if (nb0_frames == 0 && ost->last_droped) {
1075
        nb_frames_drop++;
1076
        av_log(NULL, AV_LOG_VERBOSE,
1077
               "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1078
               ost->frame_number, ost->st->index, ost->last_frame->pts);
1079
    }
1080
    if (nb_frames > (nb0_frames && ost->last_droped) + (nb_frames > nb0_frames)) {
1081
        if (nb_frames > dts_error_threshold * 30) {
1082
            av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1083
            nb_frames_drop++;
1084
            return;
1085
        }
1086
        nb_frames_dup += nb_frames - (nb0_frames && ost->last_droped) - (nb_frames > nb0_frames);
1087
        av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1088
    }
1089
    ost->last_droped = nb_frames == nb0_frames && next_picture;
1090
 
1091
  /* duplicates frame if needed */
1092
  for (i = 0; i < nb_frames; i++) {
1093
    AVFrame *in_picture;
1094
    av_init_packet(&pkt);
1095
    pkt.data = NULL;
1096
    pkt.size = 0;
1097
 
1098
    if (i < nb0_frames && ost->last_frame) {
1099
        in_picture = ost->last_frame;
1100
    } else
1101
        in_picture = next_picture;
1102
 
1103
    if (!in_picture)
1104
        return;
1105
 
1106
    in_picture->pts = ost->sync_opts;
1107
 
1108
#if 1
1109
    if (!check_recording_time(ost))
1110
#else
1111
    if (ost->frame_number >= ost->max_frames)
1112
#endif
1113
        return;
1114
 
1115
    if (s->oformat->flags & AVFMT_RAWPICTURE &&
1116
        enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1117
        /* raw pictures are written as AVPicture structure to
1118
           avoid any copies. We support temporarily the older
1119
           method. */
1120
        if (in_picture->interlaced_frame)
1121
            mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1122
        else
1123
            mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1124
        pkt.data   = (uint8_t *)in_picture;
1125
        pkt.size   =  sizeof(AVPicture);
1126
        pkt.pts    = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1127
        pkt.flags |= AV_PKT_FLAG_KEY;
1128
 
1129
        write_frame(s, &pkt, ost);
1130
    } else {
1131
        int got_packet, forced_keyframe = 0;
1132
        double pts_time;
1133
 
1134
        if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1135
            ost->top_field_first >= 0)
1136
            in_picture->top_field_first = !!ost->top_field_first;
1137
 
1138
        if (in_picture->interlaced_frame) {
1139
            if (enc->codec->id == AV_CODEC_ID_MJPEG)
1140
                mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1141
            else
1142
                mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1143
        } else
1144
            mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1145
 
1146
        in_picture->quality = enc->global_quality;
1147
        in_picture->pict_type = 0;
1148
 
1149
        pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1150
            in_picture->pts * av_q2d(enc->time_base) : NAN;
1151
        if (ost->forced_kf_index < ost->forced_kf_count &&
1152
            in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1153
            ost->forced_kf_index++;
1154
            forced_keyframe = 1;
1155
        } else if (ost->forced_keyframes_pexpr) {
1156
            double res;
1157
            ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1158
            res = av_expr_eval(ost->forced_keyframes_pexpr,
1159
                               ost->forced_keyframes_expr_const_values, NULL);
1160
            ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1161
                    ost->forced_keyframes_expr_const_values[FKF_N],
1162
                    ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1163
                    ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1164
                    ost->forced_keyframes_expr_const_values[FKF_T],
1165
                    ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1166
                    res);
1167
            if (res) {
1168
                forced_keyframe = 1;
1169
                ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1170
                    ost->forced_keyframes_expr_const_values[FKF_N];
1171
                ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1172
                    ost->forced_keyframes_expr_const_values[FKF_T];
1173
                ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1174
            }
1175
 
1176
            ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1177
        } else if (   ost->forced_keyframes
1178
                   && !strncmp(ost->forced_keyframes, "source", 6)
1179
                   && in_picture->key_frame==1) {
1180
            forced_keyframe = 1;
1181
        }
1182
 
1183
        if (forced_keyframe) {
1184
            in_picture->pict_type = AV_PICTURE_TYPE_I;
1185
            av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1186
        }
1187
 
1188
        update_benchmark(NULL);
1189
        if (debug_ts) {
1190
            av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1191
                   "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1192
                   av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1193
                   enc->time_base.num, enc->time_base.den);
1194
        }
1195
 
1196
        ost->frames_encoded++;
1197
 
1198
        ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1199
        update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1200
        if (ret < 0) {
1201
            av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1202
            exit_program(1);
1203
        }
1204
 
1205
        if (got_packet) {
1206
            if (debug_ts) {
1207
                av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1208
                       "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1209
                       av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1210
                       av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1211
            }
1212
 
1213
            if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1214
                pkt.pts = ost->sync_opts;
1215
 
1216
            av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1217
 
1218
            if (debug_ts) {
1219
                av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1220
                    "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1221
                    av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1222
                    av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1223
            }
1224
 
1225
            frame_size = pkt.size;
1226
            write_frame(s, &pkt, ost);
1227
 
1228
            /* if two pass, output log */
1229
            if (ost->logfile && enc->stats_out) {
1230
                fprintf(ost->logfile, "%s", enc->stats_out);
1231
            }
1232
        }
1233
    }
1234
    ost->sync_opts++;
1235
    /*
1236
     * For video, number of frames in == number of packets out.
1237
     * But there may be reordering, so we can't throw away frames on encoder
1238
     * flush, we need to limit them here, before they go into encoder.
1239
     */
1240
    ost->frame_number++;
1241
 
1242
    if (vstats_filename && frame_size)
1243
        do_video_stats(ost, frame_size);
1244
  }
1245
 
1246
    if (!ost->last_frame)
1247
        ost->last_frame = av_frame_alloc();
1248
    av_frame_unref(ost->last_frame);
1249
    if (next_picture && ost->last_frame)
1250
        av_frame_ref(ost->last_frame, next_picture);
1251
    else
1252
        av_frame_free(&ost->last_frame);
1253
}
1254
 
1255
static double psnr(double d)
1256
{
1257
    return -10.0 * log(d) / log(10.0);
1258
}
1259
 
1260
static void do_video_stats(OutputStream *ost, int frame_size)
1261
{
1262
    AVCodecContext *enc;
1263
    int frame_number;
1264
    double ti1, bitrate, avg_bitrate;
1265
 
1266
    /* this is executed just the first time do_video_stats is called */
1267
    if (!vstats_file) {
1268
        vstats_file = fopen(vstats_filename, "w");
1269
        if (!vstats_file) {
1270
            perror("fopen");
1271
            exit_program(1);
1272
        }
1273
    }
1274
 
1275
    enc = ost->enc_ctx;
1276
    if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1277
        frame_number = ost->st->nb_frames;
1278
        fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1279
                ost->quality / (float)FF_QP2LAMBDA);
1280
 
1281
        if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1282
            fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1283
 
1284
        fprintf(vstats_file,"f_size= %6d ", frame_size);
1285
        /* compute pts value */
1286
        ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1287
        if (ti1 < 0.01)
1288
            ti1 = 0.01;
1289
 
1290
        bitrate     = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1291
        avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1292
        fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1293
               (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1294
        fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1295
    }
1296
}
1297
 
1298
static void finish_output_stream(OutputStream *ost)
1299
{
1300
    OutputFile *of = output_files[ost->file_index];
1301
    int i;
1302
 
1303
    ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1304
 
1305
    if (of->shortest) {
1306
        for (i = 0; i < of->ctx->nb_streams; i++)
1307
            output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1308
    }
1309
}
1310
 
1311
/**
1312
 * Get and encode new output from any of the filtergraphs, without causing
1313
 * activity.
1314
 *
1315
 * @return  0 for success, <0 for severe errors
1316
 */
1317
static int reap_filters(int flush)
1318
{
1319
    AVFrame *filtered_frame = NULL;
1320
    int i;
1321
 
1322
    /* Reap all buffers present in the buffer sinks */
1323
    for (i = 0; i < nb_output_streams; i++) {
1324
        OutputStream *ost = output_streams[i];
1325
        OutputFile    *of = output_files[ost->file_index];
1326
        AVFilterContext *filter;
1327
        AVCodecContext *enc = ost->enc_ctx;
1328
        int ret = 0;
1329
 
1330
        if (!ost->filter)
1331
            continue;
1332
        filter = ost->filter->filter;
1333
 
1334
        if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1335
            return AVERROR(ENOMEM);
1336
        }
1337
        filtered_frame = ost->filtered_frame;
1338
 
1339
        while (1) {
1340
            double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1341
            ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1342
                                               AV_BUFFERSINK_FLAG_NO_REQUEST);
1343
            if (ret < 0) {
1344
                if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1345
                    av_log(NULL, AV_LOG_WARNING,
1346
                           "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1347
                } else if (flush && ret == AVERROR_EOF) {
1348
                    if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1349
                        do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1350
                }
1351
                break;
1352
            }
1353
            if (ost->finished) {
1354
                av_frame_unref(filtered_frame);
1355
                continue;
1356
            }
1357
            if (filtered_frame->pts != AV_NOPTS_VALUE) {
1358
                int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1359
                AVRational tb = enc->time_base;
1360
                int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1361
 
1362
                tb.den <<= extra_bits;
1363
                float_pts =
1364
                    av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1365
                    av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1366
                float_pts /= 1 << extra_bits;
1367
                // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1368
                float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1369
 
1370
                filtered_frame->pts =
1371
                    av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1372
                    av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1373
            }
1374
            //if (ost->source_index >= 0)
1375
            //    *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1376
 
1377
            switch (filter->inputs[0]->type) {
1378
            case AVMEDIA_TYPE_VIDEO:
1379
                if (!ost->frame_aspect_ratio.num)
1380
                    enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1381
 
1382
                if (debug_ts) {
1383
                    av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1384
                            av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1385
                            float_pts,
1386
                            enc->time_base.num, enc->time_base.den);
1387
                }
1388
 
1389
                do_video_out(of->ctx, ost, filtered_frame, float_pts);
1390
                break;
1391
            case AVMEDIA_TYPE_AUDIO:
1392
                if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1393
                    enc->channels != av_frame_get_channels(filtered_frame)) {
1394
                    av_log(NULL, AV_LOG_ERROR,
1395
                           "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1396
                    break;
1397
                }
1398
                do_audio_out(of->ctx, ost, filtered_frame);
1399
                break;
1400
            default:
1401
                // TODO support subtitle filters
1402
                av_assert0(0);
1403
            }
1404
 
1405
            av_frame_unref(filtered_frame);
1406
        }
1407
    }
1408
 
1409
    return 0;
1410
}
1411
 
1412
static void print_final_stats(int64_t total_size)
1413
{
1414
    uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1415
    uint64_t subtitle_size = 0;
1416
    uint64_t data_size = 0;
1417
    float percent = -1.0;
1418
    int i, j;
1419
    int pass1_used = 1;
1420
 
1421
    for (i = 0; i < nb_output_streams; i++) {
1422
        OutputStream *ost = output_streams[i];
1423
        switch (ost->enc_ctx->codec_type) {
1424
            case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1425
            case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1426
            case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1427
            default:                 other_size += ost->data_size; break;
1428
        }
1429
        extra_size += ost->enc_ctx->extradata_size;
1430
        data_size  += ost->data_size;
1431
        if (   (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1432
            != AV_CODEC_FLAG_PASS1)
1433
            pass1_used = 0;
1434
    }
1435
 
1436
    if (data_size && total_size>0 && total_size >= data_size)
1437
        percent = 100.0 * (total_size - data_size) / data_size;
1438
 
1439
    av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1440
           video_size / 1024.0,
1441
           audio_size / 1024.0,
1442
           subtitle_size / 1024.0,
1443
           other_size / 1024.0,
1444
           extra_size / 1024.0);
1445
    if (percent >= 0.0)
1446
        av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1447
    else
1448
        av_log(NULL, AV_LOG_INFO, "unknown");
1449
    av_log(NULL, AV_LOG_INFO, "\n");
1450
 
1451
    /* print verbose per-stream stats */
1452
    for (i = 0; i < nb_input_files; i++) {
1453
        InputFile *f = input_files[i];
1454
        uint64_t total_packets = 0, total_size = 0;
1455
 
1456
        av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1457
               i, f->ctx->filename);
1458
 
1459
        for (j = 0; j < f->nb_streams; j++) {
1460
            InputStream *ist = input_streams[f->ist_index + j];
1461
            enum AVMediaType type = ist->dec_ctx->codec_type;
1462
 
1463
            total_size    += ist->data_size;
1464
            total_packets += ist->nb_packets;
1465
 
1466
            av_log(NULL, AV_LOG_VERBOSE, "  Input stream #%d:%d (%s): ",
1467
                   i, j, media_type_string(type));
1468
            av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1469
                   ist->nb_packets, ist->data_size);
1470
 
1471
            if (ist->decoding_needed) {
1472
                av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1473
                       ist->frames_decoded);
1474
                if (type == AVMEDIA_TYPE_AUDIO)
1475
                    av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1476
                av_log(NULL, AV_LOG_VERBOSE, "; ");
1477
            }
1478
 
1479
            av_log(NULL, AV_LOG_VERBOSE, "\n");
1480
        }
1481
 
1482
        av_log(NULL, AV_LOG_VERBOSE, "  Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1483
               total_packets, total_size);
1484
    }
1485
 
1486
    for (i = 0; i < nb_output_files; i++) {
1487
        OutputFile *of = output_files[i];
1488
        uint64_t total_packets = 0, total_size = 0;
1489
 
1490
        av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1491
               i, of->ctx->filename);
1492
 
1493
        for (j = 0; j < of->ctx->nb_streams; j++) {
1494
            OutputStream *ost = output_streams[of->ost_index + j];
1495
            enum AVMediaType type = ost->enc_ctx->codec_type;
1496
 
1497
            total_size    += ost->data_size;
1498
            total_packets += ost->packets_written;
1499
 
1500
            av_log(NULL, AV_LOG_VERBOSE, "  Output stream #%d:%d (%s): ",
1501
                   i, j, media_type_string(type));
1502
            if (ost->encoding_needed) {
1503
                av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1504
                       ost->frames_encoded);
1505
                if (type == AVMEDIA_TYPE_AUDIO)
1506
                    av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1507
                av_log(NULL, AV_LOG_VERBOSE, "; ");
1508
            }
1509
 
1510
            av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1511
                   ost->packets_written, ost->data_size);
1512
 
1513
            av_log(NULL, AV_LOG_VERBOSE, "\n");
1514
        }
1515
 
1516
        av_log(NULL, AV_LOG_VERBOSE, "  Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1517
               total_packets, total_size);
1518
    }
1519
    if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1520
        av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1521
        if (pass1_used) {
1522
            av_log(NULL, AV_LOG_WARNING, "\n");
1523
        } else {
1524
            av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1525
        }
1526
    }
1527
}
1528
 
1529
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1530
{
1531
    char buf[1024];
1532
    AVBPrint buf_script;
1533
    OutputStream *ost;
1534
    AVFormatContext *oc;
1535
    int64_t total_size;
1536
    AVCodecContext *enc;
1537
    int frame_number, vid, i;
1538
    double bitrate;
1539
    int64_t pts = INT64_MIN;
1540
    static int64_t last_time = -1;
1541
    static int qp_histogram[52];
1542
    int hours, mins, secs, us;
1543
 
1544
    if (!print_stats && !is_last_report && !progress_avio)
1545
        return;
1546
 
1547
    if (!is_last_report) {
1548
        if (last_time == -1) {
1549
            last_time = cur_time;
1550
            return;
1551
        }
1552
        if ((cur_time - last_time) < 500000)
1553
            return;
1554
        last_time = cur_time;
1555
    }
1556
 
1557
 
1558
    oc = output_files[0]->ctx;
1559
 
1560
    total_size = avio_size(oc->pb);
1561
    if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1562
        total_size = avio_tell(oc->pb);
1563
 
1564
    buf[0] = '\0';
1565
    vid = 0;
1566
    av_bprint_init(&buf_script, 0, 1);
1567
    for (i = 0; i < nb_output_streams; i++) {
1568
        float q = -1;
1569
        ost = output_streams[i];
1570
        enc = ost->enc_ctx;
1571
        if (!ost->stream_copy)
1572
            q = ost->quality / (float) FF_QP2LAMBDA;
1573
 
1574
        if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1575
            snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1576
            av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1577
                       ost->file_index, ost->index, q);
1578
        }
1579
        if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1580
            float fps, t = (cur_time-timer_start) / 1000000.0;
1581
 
1582
            frame_number = ost->frame_number;
1583
            fps = t > 1 ? frame_number / t : 0;
1584
            snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1585
                     frame_number, fps < 9.95, fps, q);
1586
            av_bprintf(&buf_script, "frame=%d\n", frame_number);
1587
            av_bprintf(&buf_script, "fps=%.1f\n", fps);
1588
            av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1589
                       ost->file_index, ost->index, q);
1590
            if (is_last_report)
1591
                snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1592
            if (qp_hist) {
1593
                int j;
1594
                int qp = lrintf(q);
1595
                if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1596
                    qp_histogram[qp]++;
1597
                for (j = 0; j < 32; j++)
1598
                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
1599
            }
1600
 
1601
            if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1602
                int j;
1603
                double error, error_sum = 0;
1604
                double scale, scale_sum = 0;
1605
                double p;
1606
                char type[3] = { 'Y','U','V' };
1607
                snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1608
                for (j = 0; j < 3; j++) {
1609
                    if (is_last_report) {
1610
                        error = enc->error[j];
1611
                        scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1612
                    } else {
1613
                        error = ost->error[j];
1614
                        scale = enc->width * enc->height * 255.0 * 255.0;
1615
                    }
1616
                    if (j)
1617
                        scale /= 4;
1618
                    error_sum += error;
1619
                    scale_sum += scale;
1620
                    p = psnr(error / scale);
1621
                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1622
                    av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1623
                               ost->file_index, ost->index, type[j] | 32, p);
1624
                }
1625
                p = psnr(error_sum / scale_sum);
1626
                snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1627
                av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1628
                           ost->file_index, ost->index, p);
1629
            }
1630
            vid = 1;
1631
        }
1632
        /* compute min output value */
1633
        if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1634
            pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1635
                                          ost->st->time_base, AV_TIME_BASE_Q));
1636
        if (is_last_report)
1637
            nb_frames_drop += ost->last_droped;
1638
    }
1639
 
1640
    secs = FFABS(pts) / AV_TIME_BASE;
1641
    us = FFABS(pts) % AV_TIME_BASE;
1642
    mins = secs / 60;
1643
    secs %= 60;
1644
    hours = mins / 60;
1645
    mins %= 60;
1646
 
1647
    bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1648
 
1649
    if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1650
                                 "size=N/A time=");
1651
    else                snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1652
                                 "size=%8.0fkB time=", total_size / 1024.0);
1653
    if (pts < 0)
1654
        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1655
    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1656
             "%02d:%02d:%02d.%02d ", hours, mins, secs,
1657
             (100 * us) / AV_TIME_BASE);
1658
 
1659
    if (bitrate < 0) {
1660
        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1661
        av_bprintf(&buf_script, "bitrate=N/A\n");
1662
    }else{
1663
        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1664
        av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1665
    }
1666
 
1667
    if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1668
    else                av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1669
    av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1670
    av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1671
               hours, mins, secs, us);
1672
 
1673
    if (nb_frames_dup || nb_frames_drop)
1674
        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1675
                nb_frames_dup, nb_frames_drop);
1676
    av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1677
    av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1678
 
1679
    if (print_stats || is_last_report) {
1680
        const char end = is_last_report ? '\n' : '\r';
1681
        if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1682
            fprintf(stderr, "%s    %c", buf, end);
1683
        } else
1684
            av_log(NULL, AV_LOG_INFO, "%s    %c", buf, end);
1685
 
1686
    fflush(stderr);
1687
    }
1688
 
1689
    if (progress_avio) {
1690
        av_bprintf(&buf_script, "progress=%s\n",
1691
                   is_last_report ? "end" : "continue");
1692
        avio_write(progress_avio, buf_script.str,
1693
                   FFMIN(buf_script.len, buf_script.size - 1));
1694
        avio_flush(progress_avio);
1695
        av_bprint_finalize(&buf_script, NULL);
1696
        if (is_last_report) {
1697
            avio_closep(&progress_avio);
1698
        }
1699
    }
1700
 
1701
    if (is_last_report)
1702
        print_final_stats(total_size);
1703
}
1704
 
1705
static void flush_encoders(void)
1706
{
1707
    int i, ret;
1708
 
1709
    for (i = 0; i < nb_output_streams; i++) {
1710
        OutputStream   *ost = output_streams[i];
1711
        AVCodecContext *enc = ost->enc_ctx;
1712
        AVFormatContext *os = output_files[ost->file_index]->ctx;
1713
        int stop_encoding = 0;
1714
 
1715
        if (!ost->encoding_needed)
1716
            continue;
1717
 
1718
        if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1719
            continue;
1720
        if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1721
            continue;
1722
 
1723
        for (;;) {
1724
            int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1725
            const char *desc;
1726
 
1727
            switch (enc->codec_type) {
1728
            case AVMEDIA_TYPE_AUDIO:
1729
                encode = avcodec_encode_audio2;
1730
                desc   = "Audio";
1731
                break;
1732
            case AVMEDIA_TYPE_VIDEO:
1733
                encode = avcodec_encode_video2;
1734
                desc   = "Video";
1735
                break;
1736
            default:
1737
                stop_encoding = 1;
1738
            }
1739
 
1740
            if (encode) {
1741
                AVPacket pkt;
1742
                int pkt_size;
1743
                int got_packet;
1744
                av_init_packet(&pkt);
1745
                pkt.data = NULL;
1746
                pkt.size = 0;
1747
 
1748
                update_benchmark(NULL);
1749
                ret = encode(enc, &pkt, NULL, &got_packet);
1750
                update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
1751
                if (ret < 0) {
1752
                    av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1753
                           desc,
1754
                           av_err2str(ret));
1755
                    exit_program(1);
1756
                }
1757
                if (ost->logfile && enc->stats_out) {
1758
                    fprintf(ost->logfile, "%s", enc->stats_out);
1759
                }
1760
                if (!got_packet) {
1761
                    stop_encoding = 1;
1762
                    break;
1763
                }
1764
                if (ost->finished & MUXER_FINISHED) {
1765
                    av_free_packet(&pkt);
1766
                    continue;
1767
                }
1768
                av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1769
                pkt_size = pkt.size;
1770
                write_frame(os, &pkt, ost);
1771
                if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1772
                    do_video_stats(ost, pkt_size);
1773
                }
1774
            }
1775
 
1776
            if (stop_encoding)
1777
                break;
1778
        }
1779
    }
1780
}
1781
 
1782
/*
1783
 * Check whether a packet from ist should be written into ost at this time
1784
 */
1785
static int check_output_constraints(InputStream *ist, OutputStream *ost)
1786
{
1787
    OutputFile *of = output_files[ost->file_index];
1788
    int ist_index  = input_files[ist->file_index]->ist_index + ist->st->index;
1789
 
1790
    if (ost->source_index != ist_index)
1791
        return 0;
1792
 
1793
    if (ost->finished)
1794
        return 0;
1795
 
1796
    if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1797
        return 0;
1798
 
1799
    return 1;
1800
}
1801
 
1802
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1803
{
1804
    OutputFile *of = output_files[ost->file_index];
1805
    InputFile   *f = input_files [ist->file_index];
1806
    int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1807
    int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1808
    int64_t ist_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ist->st->time_base);
1809
    AVPicture pict;
1810
    AVPacket opkt;
1811
 
1812
    av_init_packet(&opkt);
1813
 
1814
    if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1815
        !ost->copy_initial_nonkeyframes)
1816
        return;
1817
 
1818
    if (pkt->pts == AV_NOPTS_VALUE) {
1819
        if (!ost->frame_number && ist->pts < start_time &&
1820
            !ost->copy_prior_start)
1821
            return;
1822
    } else {
1823
        if (!ost->frame_number && pkt->pts < ist_tb_start_time &&
1824
            !ost->copy_prior_start)
1825
            return;
1826
    }
1827
 
1828
    if (of->recording_time != INT64_MAX &&
1829
        ist->pts >= of->recording_time + start_time) {
1830
        close_output_stream(ost);
1831
        return;
1832
    }
1833
 
1834
    if (f->recording_time != INT64_MAX) {
1835
        start_time = f->ctx->start_time;
1836
        if (f->start_time != AV_NOPTS_VALUE)
1837
            start_time += f->start_time;
1838
        if (ist->pts >= f->recording_time + start_time) {
1839
            close_output_stream(ost);
1840
            return;
1841
        }
1842
    }
1843
 
1844
    /* force the input stream PTS */
1845
    if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1846
        ost->sync_opts++;
1847
 
1848
    if (pkt->pts != AV_NOPTS_VALUE)
1849
        opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1850
    else
1851
        opkt.pts = AV_NOPTS_VALUE;
1852
 
1853
    if (pkt->dts == AV_NOPTS_VALUE)
1854
        opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1855
    else
1856
        opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1857
    opkt.dts -= ost_tb_start_time;
1858
 
1859
    if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1860
        int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1861
        if(!duration)
1862
            duration = ist->dec_ctx->frame_size;
1863
        opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1864
                                               (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1865
                                               ost->st->time_base) - ost_tb_start_time;
1866
    }
1867
 
1868
    opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1869
    opkt.flags    = pkt->flags;
1870
    // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1871
    if (  ost->st->codec->codec_id != AV_CODEC_ID_H264
1872
       && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1873
       && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1874
       && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1875
       ) {
1876
        int ret = av_parser_change(ost->parser, ost->st->codec,
1877
                             &opkt.data, &opkt.size,
1878
                             pkt->data, pkt->size,
1879
                             pkt->flags & AV_PKT_FLAG_KEY);
1880
        if (ret < 0) {
1881
            av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1882
                   av_err2str(ret));
1883
            exit_program(1);
1884
        }
1885
        if (ret) {
1886
            opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1887
            if (!opkt.buf)
1888
                exit_program(1);
1889
        }
1890
    } else {
1891
        opkt.data = pkt->data;
1892
        opkt.size = pkt->size;
1893
    }
1894
    av_copy_packet_side_data(&opkt, pkt);
1895
 
1896
    if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1897
        ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
1898
        (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1899
        /* store AVPicture in AVPacket, as expected by the output format */
1900
        int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1901
        if (ret < 0) {
1902
            av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1903
                   av_err2str(ret));
1904
            exit_program(1);
1905
        }
1906
        opkt.data = (uint8_t *)&pict;
1907
        opkt.size = sizeof(AVPicture);
1908
        opkt.flags |= AV_PKT_FLAG_KEY;
1909
    }
1910
 
1911
    write_frame(of->ctx, &opkt, ost);
1912
}
1913
 
1914
int guess_input_channel_layout(InputStream *ist)
1915
{
1916
    AVCodecContext *dec = ist->dec_ctx;
1917
 
1918
    if (!dec->channel_layout) {
1919
        char layout_name[256];
1920
 
1921
        if (dec->channels > ist->guess_layout_max)
1922
            return 0;
1923
        dec->channel_layout = av_get_default_channel_layout(dec->channels);
1924
        if (!dec->channel_layout)
1925
            return 0;
1926
        av_get_channel_layout_string(layout_name, sizeof(layout_name),
1927
                                     dec->channels, dec->channel_layout);
1928
        av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for  Input Stream "
1929
               "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1930
    }
1931
    return 1;
1932
}
1933
 
1934
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1935
{
1936
    AVFrame *decoded_frame, *f;
1937
    AVCodecContext *avctx = ist->dec_ctx;
1938
    int i, ret, err = 0, resample_changed;
1939
    AVRational decoded_frame_tb;
1940
 
1941
    if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1942
        return AVERROR(ENOMEM);
1943
    if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1944
        return AVERROR(ENOMEM);
1945
    decoded_frame = ist->decoded_frame;
1946
 
1947
    update_benchmark(NULL);
1948
    ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1949
    update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1950
 
1951
    if (ret >= 0 && avctx->sample_rate <= 0) {
1952
        av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1953
        ret = AVERROR_INVALIDDATA;
1954
    }
1955
 
1956
    if (*got_output || ret<0)
1957
        decode_error_stat[ret<0] ++;
1958
 
1959
    if (ret < 0 && exit_on_error)
1960
        exit_program(1);
1961
 
1962
    if (!*got_output || ret < 0)
1963
        return ret;
1964
 
1965
    ist->samples_decoded += decoded_frame->nb_samples;
1966
    ist->frames_decoded++;
1967
 
1968
#if 1
1969
    /* increment next_dts to use for the case where the input stream does not
1970
       have timestamps or there are multiple frames in the packet */
1971
    ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1972
                     avctx->sample_rate;
1973
    ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1974
                     avctx->sample_rate;
1975
#endif
1976
 
1977
    resample_changed = ist->resample_sample_fmt     != decoded_frame->format         ||
1978
                       ist->resample_channels       != avctx->channels               ||
1979
                       ist->resample_channel_layout != decoded_frame->channel_layout ||
1980
                       ist->resample_sample_rate    != decoded_frame->sample_rate;
1981
    if (resample_changed) {
1982
        char layout1[64], layout2[64];
1983
 
1984
        if (!guess_input_channel_layout(ist)) {
1985
            av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1986
                   "layout for Input Stream #%d.%d\n", ist->file_index,
1987
                   ist->st->index);
1988
            exit_program(1);
1989
        }
1990
        decoded_frame->channel_layout = avctx->channel_layout;
1991
 
1992
        av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1993
                                     ist->resample_channel_layout);
1994
        av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1995
                                     decoded_frame->channel_layout);
1996
 
1997
        av_log(NULL, AV_LOG_INFO,
1998
               "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1999
               ist->file_index, ist->st->index,
2000
               ist->resample_sample_rate,  av_get_sample_fmt_name(ist->resample_sample_fmt),
2001
               ist->resample_channels, layout1,
2002
               decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2003
               avctx->channels, layout2);
2004
 
2005
        ist->resample_sample_fmt     = decoded_frame->format;
2006
        ist->resample_sample_rate    = decoded_frame->sample_rate;
2007
        ist->resample_channel_layout = decoded_frame->channel_layout;
2008
        ist->resample_channels       = avctx->channels;
2009
 
2010
        for (i = 0; i < nb_filtergraphs; i++)
2011
            if (ist_in_filtergraph(filtergraphs[i], ist)) {
2012
                FilterGraph *fg = filtergraphs[i];
2013
                if (configure_filtergraph(fg) < 0) {
2014
                    av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2015
                    exit_program(1);
2016
                }
2017
            }
2018
    }
2019
 
2020
    /* if the decoder provides a pts, use it instead of the last packet pts.
2021
       the decoder could be delaying output by a packet or more. */
2022
    if (decoded_frame->pts != AV_NOPTS_VALUE) {
2023
        ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2024
        decoded_frame_tb   = avctx->time_base;
2025
    } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2026
        decoded_frame->pts = decoded_frame->pkt_pts;
2027
        decoded_frame_tb   = ist->st->time_base;
2028
    } else if (pkt->pts != AV_NOPTS_VALUE) {
2029
        decoded_frame->pts = pkt->pts;
2030
        decoded_frame_tb   = ist->st->time_base;
2031
    }else {
2032
        decoded_frame->pts = ist->dts;
2033
        decoded_frame_tb   = AV_TIME_BASE_Q;
2034
    }
2035
    pkt->pts           = AV_NOPTS_VALUE;
2036
    if (decoded_frame->pts != AV_NOPTS_VALUE)
2037
        decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2038
                                              (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2039
                                              (AVRational){1, avctx->sample_rate});
2040
    for (i = 0; i < ist->nb_filters; i++) {
2041
        if (i < ist->nb_filters - 1) {
2042
            f = ist->filter_frame;
2043
            err = av_frame_ref(f, decoded_frame);
2044
            if (err < 0)
2045
                break;
2046
        } else
2047
            f = decoded_frame;
2048
        err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2049
                                     AV_BUFFERSRC_FLAG_PUSH);
2050
        if (err == AVERROR_EOF)
2051
            err = 0; /* ignore */
2052
        if (err < 0)
2053
            break;
2054
    }
2055
    decoded_frame->pts = AV_NOPTS_VALUE;
2056
 
2057
    av_frame_unref(ist->filter_frame);
2058
    av_frame_unref(decoded_frame);
2059
    return err < 0 ? err : ret;
2060
}
2061
 
2062
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2063
{
2064
    AVFrame *decoded_frame, *f;
2065
    int i, ret = 0, err = 0, resample_changed;
2066
    int64_t best_effort_timestamp;
2067
    AVRational *frame_sample_aspect;
2068
 
2069
    if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2070
        return AVERROR(ENOMEM);
2071
    if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2072
        return AVERROR(ENOMEM);
2073
    decoded_frame = ist->decoded_frame;
2074
    pkt->dts  = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2075
 
2076
    update_benchmark(NULL);
2077
    ret = avcodec_decode_video2(ist->dec_ctx,
2078
                                decoded_frame, got_output, pkt);
2079
    update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2080
 
2081
    // The following line may be required in some cases where there is no parser
2082
    // or the parser does not has_b_frames correctly
2083
    if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2084
        if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2085
            ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2086
        } else
2087
            av_log(ist->dec_ctx, AV_LOG_WARNING,
2088
                   "has_b_frames is larger in decoder than demuxer %d > %d.\n"
2089
                   "If you want to help, upload a sample "
2090
                   "of this file to ftp://upload.ffmpeg.org/incoming/ "
2091
                   "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2092
                   ist->dec_ctx->has_b_frames,
2093
                   ist->st->codec->has_b_frames);
2094
    }
2095
 
2096
    if (*got_output || ret<0)
2097
        decode_error_stat[ret<0] ++;
2098
 
2099
    if (ret < 0 && exit_on_error)
2100
        exit_program(1);
2101
 
2102
    if (*got_output && ret >= 0) {
2103
        if (ist->dec_ctx->width  != decoded_frame->width ||
2104
            ist->dec_ctx->height != decoded_frame->height ||
2105
            ist->dec_ctx->pix_fmt != decoded_frame->format) {
2106
            av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2107
                decoded_frame->width,
2108
                decoded_frame->height,
2109
                decoded_frame->format,
2110
                ist->dec_ctx->width,
2111
                ist->dec_ctx->height,
2112
                ist->dec_ctx->pix_fmt);
2113
        }
2114
    }
2115
 
2116
    if (!*got_output || ret < 0)
2117
        return ret;
2118
 
2119
    if(ist->top_field_first>=0)
2120
        decoded_frame->top_field_first = ist->top_field_first;
2121
 
2122
    ist->frames_decoded++;
2123
 
2124
    if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2125
        err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2126
        if (err < 0)
2127
            goto fail;
2128
    }
2129
    ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2130
 
2131
    best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2132
    if(best_effort_timestamp != AV_NOPTS_VALUE)
2133
        ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2134
 
2135
    if (debug_ts) {
2136
        av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2137
               "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2138
               ist->st->index, av_ts2str(decoded_frame->pts),
2139
               av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2140
               best_effort_timestamp,
2141
               av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2142
               decoded_frame->key_frame, decoded_frame->pict_type,
2143
               ist->st->time_base.num, ist->st->time_base.den);
2144
    }
2145
 
2146
    pkt->size = 0;
2147
 
2148
    if (ist->st->sample_aspect_ratio.num)
2149
        decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2150
 
2151
    resample_changed = ist->resample_width   != decoded_frame->width  ||
2152
                       ist->resample_height  != decoded_frame->height ||
2153
                       ist->resample_pix_fmt != decoded_frame->format;
2154
    if (resample_changed) {
2155
        av_log(NULL, AV_LOG_INFO,
2156
               "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2157
               ist->file_index, ist->st->index,
2158
               ist->resample_width,  ist->resample_height,  av_get_pix_fmt_name(ist->resample_pix_fmt),
2159
               decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2160
 
2161
        ist->resample_width   = decoded_frame->width;
2162
        ist->resample_height  = decoded_frame->height;
2163
        ist->resample_pix_fmt = decoded_frame->format;
2164
 
2165
        for (i = 0; i < nb_filtergraphs; i++) {
2166
            if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2167
                configure_filtergraph(filtergraphs[i]) < 0) {
2168
                av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2169
                exit_program(1);
2170
            }
2171
        }
2172
    }
2173
 
2174
    frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2175
    for (i = 0; i < ist->nb_filters; i++) {
2176
        if (!frame_sample_aspect->num)
2177
            *frame_sample_aspect = ist->st->sample_aspect_ratio;
2178
 
2179
        if (i < ist->nb_filters - 1) {
2180
            f = ist->filter_frame;
2181
            err = av_frame_ref(f, decoded_frame);
2182
            if (err < 0)
2183
                break;
2184
        } else
2185
            f = decoded_frame;
2186
        ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2187
        if (ret == AVERROR_EOF) {
2188
            ret = 0; /* ignore */
2189
        } else if (ret < 0) {
2190
            av_log(NULL, AV_LOG_FATAL,
2191
                   "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2192
            exit_program(1);
2193
        }
2194
    }
2195
 
2196
fail:
2197
    av_frame_unref(ist->filter_frame);
2198
    av_frame_unref(decoded_frame);
2199
    return err < 0 ? err : ret;
2200
}
2201
 
2202
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2203
{
2204
    AVSubtitle subtitle;
2205
    int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2206
                                          &subtitle, got_output, pkt);
2207
 
2208
    if (*got_output || ret<0)
2209
        decode_error_stat[ret<0] ++;
2210
 
2211
    if (ret < 0 && exit_on_error)
2212
        exit_program(1);
2213
 
2214
    if (ret < 0 || !*got_output) {
2215
        if (!pkt->size)
2216
            sub2video_flush(ist);
2217
        return ret;
2218
    }
2219
 
2220
    if (ist->fix_sub_duration) {
2221
        int end = 1;
2222
        if (ist->prev_sub.got_output) {
2223
            end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2224
                             1000, AV_TIME_BASE);
2225
            if (end < ist->prev_sub.subtitle.end_display_time) {
2226
                av_log(ist->dec_ctx, AV_LOG_DEBUG,
2227
                       "Subtitle duration reduced from %d to %d%s\n",
2228
                       ist->prev_sub.subtitle.end_display_time, end,
2229
                       end <= 0 ? ", dropping it" : "");
2230
                ist->prev_sub.subtitle.end_display_time = end;
2231
            }
2232
        }
2233
        FFSWAP(int,        *got_output, ist->prev_sub.got_output);
2234
        FFSWAP(int,        ret,         ist->prev_sub.ret);
2235
        FFSWAP(AVSubtitle, subtitle,    ist->prev_sub.subtitle);
2236
        if (end <= 0)
2237
            goto out;
2238
    }
2239
 
2240
    if (!*got_output)
2241
        return ret;
2242
 
2243
    sub2video_update(ist, &subtitle);
2244
 
2245
    if (!subtitle.num_rects)
2246
        goto out;
2247
 
2248
    ist->frames_decoded++;
2249
 
2250
    for (i = 0; i < nb_output_streams; i++) {
2251
        OutputStream *ost = output_streams[i];
2252
 
2253
        if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2254
            || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2255
            continue;
2256
 
2257
        do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2258
    }
2259
 
2260
out:
2261
    avsubtitle_free(&subtitle);
2262
    return ret;
2263
}
2264
 
2265
static int send_filter_eof(InputStream *ist)
2266
{
2267
    int i, ret;
2268
    for (i = 0; i < ist->nb_filters; i++) {
2269
        ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2270
        if (ret < 0)
2271
            return ret;
2272
    }
2273
    return 0;
2274
}
2275
 
2276
/* pkt = NULL means EOF (needed to flush decoder buffers) */
2277
static int process_input_packet(InputStream *ist, const AVPacket *pkt)
2278
{
2279
    int ret = 0, i;
2280
    int got_output = 0;
2281
 
2282
    AVPacket avpkt;
2283
    if (!ist->saw_first_ts) {
2284
        ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2285
        ist->pts = 0;
2286
        if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2287
            ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2288
            ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2289
        }
2290
        ist->saw_first_ts = 1;
2291
    }
2292
 
2293
    if (ist->next_dts == AV_NOPTS_VALUE)
2294
        ist->next_dts = ist->dts;
2295
    if (ist->next_pts == AV_NOPTS_VALUE)
2296
        ist->next_pts = ist->pts;
2297
 
2298
    if (!pkt) {
2299
        /* EOF handling */
2300
        av_init_packet(&avpkt);
2301
        avpkt.data = NULL;
2302
        avpkt.size = 0;
2303
        goto handle_eof;
2304
    } else {
2305
        avpkt = *pkt;
2306
    }
2307
 
2308
    if (pkt->dts != AV_NOPTS_VALUE) {
2309
        ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2310
        if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2311
            ist->next_pts = ist->pts = ist->dts;
2312
    }
2313
 
2314
    // while we have more to decode or while the decoder did output something on EOF
2315
    while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2316
        int duration;
2317
    handle_eof:
2318
 
2319
        ist->pts = ist->next_pts;
2320
        ist->dts = ist->next_dts;
2321
 
2322
        if (avpkt.size && avpkt.size != pkt->size &&
2323
            !(ist->dec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
2324
            av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2325
                   "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2326
            ist->showed_multi_packet_warning = 1;
2327
        }
2328
 
2329
        switch (ist->dec_ctx->codec_type) {
2330
        case AVMEDIA_TYPE_AUDIO:
2331
            ret = decode_audio    (ist, &avpkt, &got_output);
2332
            break;
2333
        case AVMEDIA_TYPE_VIDEO:
2334
            ret = decode_video    (ist, &avpkt, &got_output);
2335
            if (avpkt.duration) {
2336
                duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2337
            } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2338
                int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2339
                duration = ((int64_t)AV_TIME_BASE *
2340
                                ist->dec_ctx->framerate.den * ticks) /
2341
                                ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2342
            } else
2343
                duration = 0;
2344
 
2345
            if(ist->dts != AV_NOPTS_VALUE && duration) {
2346
                ist->next_dts += duration;
2347
            }else
2348
                ist->next_dts = AV_NOPTS_VALUE;
2349
 
2350
            if (got_output)
2351
                ist->next_pts += duration; //FIXME the duration is not correct in some cases
2352
            break;
2353
        case AVMEDIA_TYPE_SUBTITLE:
2354
            ret = transcode_subtitles(ist, &avpkt, &got_output);
2355
            break;
2356
        default:
2357
            return -1;
2358
        }
2359
 
2360
        if (ret < 0) {
2361
            av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2362
                   ist->file_index, ist->st->index, av_err2str(ret));
2363
            if (exit_on_error)
2364
                exit_program(1);
2365
            break;
2366
        }
2367
 
2368
        avpkt.dts=
2369
        avpkt.pts= AV_NOPTS_VALUE;
2370
 
2371
        // touch data and size only if not EOF
2372
        if (pkt) {
2373
            if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
2374
                ret = avpkt.size;
2375
            avpkt.data += ret;
2376
            avpkt.size -= ret;
2377
        }
2378
        if (!got_output) {
2379
            continue;
2380
        }
2381
        if (got_output && !pkt)
2382
            break;
2383
    }
2384
 
2385
    /* after flushing, send an EOF on all the filter inputs attached to the stream */
2386
    if (!pkt && ist->decoding_needed && !got_output) {
2387
        int ret = send_filter_eof(ist);
2388
        if (ret < 0) {
2389
            av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2390
            exit_program(1);
2391
        }
2392
    }
2393
 
2394
    /* handle stream copy */
2395
    if (!ist->decoding_needed) {
2396
        ist->dts = ist->next_dts;
2397
        switch (ist->dec_ctx->codec_type) {
2398
        case AVMEDIA_TYPE_AUDIO:
2399
            ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2400
                             ist->dec_ctx->sample_rate;
2401
            break;
2402
        case AVMEDIA_TYPE_VIDEO:
2403
            if (ist->framerate.num) {
2404
                // TODO: Remove work-around for c99-to-c89 issue 7
2405
                AVRational time_base_q = AV_TIME_BASE_Q;
2406
                int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2407
                ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2408
            } else if (pkt->duration) {
2409
                ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2410
            } else if(ist->dec_ctx->framerate.num != 0) {
2411
                int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2412
                ist->next_dts += ((int64_t)AV_TIME_BASE *
2413
                                  ist->dec_ctx->framerate.den * ticks) /
2414
                                  ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2415
            }
2416
            break;
2417
        }
2418
        ist->pts = ist->dts;
2419
        ist->next_pts = ist->next_dts;
2420
    }
2421
    for (i = 0; pkt && i < nb_output_streams; i++) {
2422
        OutputStream *ost = output_streams[i];
2423
 
2424
        if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2425
            continue;
2426
 
2427
        do_streamcopy(ist, ost, pkt);
2428
    }
2429
 
2430
    return got_output;
2431
}
2432
 
2433
static void print_sdp(void)
2434
{
2435
    char sdp[16384];
2436
    int i;
2437
    int j;
2438
    AVIOContext *sdp_pb;
2439
    AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2440
 
2441
    if (!avc)
2442
        exit_program(1);
2443
    for (i = 0, j = 0; i < nb_output_files; i++) {
2444
        if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2445
            avc[j] = output_files[i]->ctx;
2446
            j++;
2447
        }
2448
    }
2449
 
2450
    if (!j)
2451
        goto fail;
2452
 
2453
    av_sdp_create(avc, j, sdp, sizeof(sdp));
2454
 
2455
    if (!sdp_filename) {
2456
        printf("SDP:\n%s\n", sdp);
2457
        fflush(stdout);
2458
    } else {
2459
        if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2460
            av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2461
        } else {
2462
            avio_printf(sdp_pb, "SDP:\n%s", sdp);
2463
            avio_closep(&sdp_pb);
2464
            av_freep(&sdp_filename);
2465
        }
2466
    }
2467
 
2468
fail:
2469
    av_freep(&avc);
2470
}
2471
 
2472
static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2473
{
2474
    int i;
2475
    for (i = 0; hwaccels[i].name; i++)
2476
        if (hwaccels[i].pix_fmt == pix_fmt)
2477
            return &hwaccels[i];
2478
    return NULL;
2479
}
2480
 
2481
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2482
{
2483
    InputStream *ist = s->opaque;
2484
    const enum AVPixelFormat *p;
2485
    int ret;
2486
 
2487
    for (p = pix_fmts; *p != -1; p++) {
2488
        const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2489
        const HWAccel *hwaccel;
2490
 
2491
        if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2492
            break;
2493
 
2494
        hwaccel = get_hwaccel(*p);
2495
        if (!hwaccel ||
2496
            (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2497
            (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2498
            continue;
2499
 
2500
        ret = hwaccel->init(s);
2501
        if (ret < 0) {
2502
            if (ist->hwaccel_id == hwaccel->id) {
2503
                av_log(NULL, AV_LOG_FATAL,
2504
                       "%s hwaccel requested for input stream #%d:%d, "
2505
                       "but cannot be initialized.\n", hwaccel->name,
2506
                       ist->file_index, ist->st->index);
2507
                return AV_PIX_FMT_NONE;
2508
            }
2509
            continue;
2510
        }
2511
        ist->active_hwaccel_id = hwaccel->id;
2512
        ist->hwaccel_pix_fmt   = *p;
2513
        break;
2514
    }
2515
 
2516
    return *p;
2517
}
2518
 
2519
static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2520
{
2521
    InputStream *ist = s->opaque;
2522
 
2523
    if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2524
        return ist->hwaccel_get_buffer(s, frame, flags);
2525
 
2526
    return avcodec_default_get_buffer2(s, frame, flags);
2527
}
2528
 
2529
static int init_input_stream(int ist_index, char *error, int error_len)
2530
{
2531
    int ret;
2532
    InputStream *ist = input_streams[ist_index];
2533
 
2534
    if (ist->decoding_needed) {
2535
        AVCodec *codec = ist->dec;
2536
        if (!codec) {
2537
            snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2538
                    avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2539
            return AVERROR(EINVAL);
2540
        }
2541
 
2542
        ist->dec_ctx->opaque                = ist;
2543
        ist->dec_ctx->get_format            = get_format;
2544
        ist->dec_ctx->get_buffer2           = get_buffer;
2545
        ist->dec_ctx->thread_safe_callbacks = 1;
2546
 
2547
        av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2548
        if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2549
           (ist->decoding_needed & DECODING_FOR_OST)) {
2550
            av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2551
            if (ist->decoding_needed & DECODING_FOR_FILTER)
2552
                av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2553
        }
2554
 
2555
        if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2556
            av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2557
        if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2558
            if (ret == AVERROR_EXPERIMENTAL)
2559
                abort_codec_experimental(codec, 0);
2560
 
2561
            snprintf(error, error_len,
2562
                     "Error while opening decoder for input stream "
2563
                     "#%d:%d : %s",
2564
                     ist->file_index, ist->st->index, av_err2str(ret));
2565
            return ret;
2566
        }
2567
        assert_avoptions(ist->decoder_opts);
2568
    }
2569
 
2570
    ist->next_pts = AV_NOPTS_VALUE;
2571
    ist->next_dts = AV_NOPTS_VALUE;
2572
 
2573
    return 0;
2574
}
2575
 
2576
static InputStream *get_input_stream(OutputStream *ost)
2577
{
2578
    if (ost->source_index >= 0)
2579
        return input_streams[ost->source_index];
2580
    return NULL;
2581
}
2582
 
2583
static int compare_int64(const void *a, const void *b)
2584
{
2585
    int64_t va = *(int64_t *)a, vb = *(int64_t *)b;
2586
    return va < vb ? -1 : va > vb ? +1 : 0;
2587
}
2588
 
2589
static int init_output_stream(OutputStream *ost, char *error, int error_len)
2590
{
2591
    int ret = 0;
2592
 
2593
    if (ost->encoding_needed) {
2594
        AVCodec      *codec = ost->enc;
2595
        AVCodecContext *dec = NULL;
2596
        InputStream *ist;
2597
 
2598
        if ((ist = get_input_stream(ost)))
2599
            dec = ist->dec_ctx;
2600
        if (dec && dec->subtitle_header) {
2601
            /* ASS code assumes this buffer is null terminated so add extra byte. */
2602
            ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
2603
            if (!ost->enc_ctx->subtitle_header)
2604
                return AVERROR(ENOMEM);
2605
            memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2606
            ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
2607
        }
2608
        if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2609
            av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2610
        av_dict_set(&ost->encoder_opts, "side_data_only_packets", "1", 0);
2611
        if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2612
            !codec->defaults &&
2613
            !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2614
            !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2615
            av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2616
 
2617
        if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2618
            if (ret == AVERROR_EXPERIMENTAL)
2619
                abort_codec_experimental(codec, 1);
2620
            snprintf(error, error_len,
2621
                     "Error while opening encoder for output stream #%d:%d - "
2622
                     "maybe incorrect parameters such as bit_rate, rate, width or height",
2623
                    ost->file_index, ost->index);
2624
            return ret;
2625
        }
2626
        if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2627
            !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2628
            av_buffersink_set_frame_size(ost->filter->filter,
2629
                                            ost->enc_ctx->frame_size);
2630
        assert_avoptions(ost->encoder_opts);
2631
        if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2632
            av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2633
                                         " It takes bits/s as argument, not kbits/s\n");
2634
 
2635
        ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2636
        if (ret < 0) {
2637
            av_log(NULL, AV_LOG_FATAL,
2638
                   "Error initializing the output stream codec context.\n");
2639
            exit_program(1);
2640
        }
2641
 
2642
        // copy timebase while removing common factors
2643
        ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2644
        ost->st->codec->codec= ost->enc_ctx->codec;
2645
    } else {
2646
        ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2647
        if (ret < 0) {
2648
           av_log(NULL, AV_LOG_FATAL,
2649
                  "Error setting up codec context options.\n");
2650
           return ret;
2651
        }
2652
        // copy timebase while removing common factors
2653
        ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2654
    }
2655
 
2656
    return ret;
2657
}
2658
 
2659
static void parse_forced_key_frames(char *kf, OutputStream *ost,
2660
                                    AVCodecContext *avctx)
2661
{
2662
    char *p;
2663
    int n = 1, i, size, index = 0;
2664
    int64_t t, *pts;
2665
 
2666
    for (p = kf; *p; p++)
2667
        if (*p == ',')
2668
            n++;
2669
    size = n;
2670
    pts = av_malloc_array(size, sizeof(*pts));
2671
    if (!pts) {
2672
        av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2673
        exit_program(1);
2674
    }
2675
 
2676
    p = kf;
2677
    for (i = 0; i < n; i++) {
2678
        char *next = strchr(p, ',');
2679
 
2680
        if (next)
2681
            *next++ = 0;
2682
 
2683
        if (!memcmp(p, "chapters", 8)) {
2684
 
2685
            AVFormatContext *avf = output_files[ost->file_index]->ctx;
2686
            int j;
2687
 
2688
            if (avf->nb_chapters > INT_MAX - size ||
2689
                !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2690
                                     sizeof(*pts)))) {
2691
                av_log(NULL, AV_LOG_FATAL,
2692
                       "Could not allocate forced key frames array.\n");
2693
                exit_program(1);
2694
            }
2695
            t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2696
            t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2697
 
2698
            for (j = 0; j < avf->nb_chapters; j++) {
2699
                AVChapter *c = avf->chapters[j];
2700
                av_assert1(index < size);
2701
                pts[index++] = av_rescale_q(c->start, c->time_base,
2702
                                            avctx->time_base) + t;
2703
            }
2704
 
2705
        } else {
2706
 
2707
            t = parse_time_or_die("force_key_frames", p, 1);
2708
            av_assert1(index < size);
2709
            pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2710
 
2711
        }
2712
 
2713
        p = next;
2714
    }
2715
 
2716
    av_assert0(index == size);
2717
    qsort(pts, size, sizeof(*pts), compare_int64);
2718
    ost->forced_kf_count = size;
2719
    ost->forced_kf_pts   = pts;
2720
}
2721
 
2722
static void report_new_stream(int input_index, AVPacket *pkt)
2723
{
2724
    InputFile *file = input_files[input_index];
2725
    AVStream *st = file->ctx->streams[pkt->stream_index];
2726
 
2727
    if (pkt->stream_index < file->nb_streams_warn)
2728
        return;
2729
    av_log(file->ctx, AV_LOG_WARNING,
2730
           "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2731
           av_get_media_type_string(st->codec->codec_type),
2732
           input_index, pkt->stream_index,
2733
           pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2734
    file->nb_streams_warn = pkt->stream_index + 1;
2735
}
2736
 
2737
static void set_encoder_id(OutputFile *of, OutputStream *ost)
2738
{
2739
    AVDictionaryEntry *e;
2740
 
2741
    uint8_t *encoder_string;
2742
    int encoder_string_len;
2743
    int format_flags = 0;
2744
    int codec_flags = 0;
2745
 
2746
    if (av_dict_get(ost->st->metadata, "encoder",  NULL, 0))
2747
        return;
2748
 
2749
    e = av_dict_get(of->opts, "fflags", NULL, 0);
2750
    if (e) {
2751
        const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2752
        if (!o)
2753
            return;
2754
        av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2755
    }
2756
    e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2757
    if (e) {
2758
        const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2759
        if (!o)
2760
            return;
2761
        av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2762
    }
2763
 
2764
    encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2765
    encoder_string     = av_mallocz(encoder_string_len);
2766
    if (!encoder_string)
2767
        exit_program(1);
2768
 
2769
    if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2770
        av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2771
    else
2772
        av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2773
    av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2774
    av_dict_set(&ost->st->metadata, "encoder",  encoder_string,
2775
                AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
2776
}
2777
 
2778
static int transcode_init(void)
2779
{
2780
    int ret = 0, i, j, k;
2781
    AVFormatContext *oc;
2782
    OutputStream *ost;
2783
    InputStream *ist;
2784
    char error[1024] = {0};
2785
    int want_sdp = 1;
2786
 
2787
    for (i = 0; i < nb_filtergraphs; i++) {
2788
        FilterGraph *fg = filtergraphs[i];
2789
        for (j = 0; j < fg->nb_outputs; j++) {
2790
            OutputFilter *ofilter = fg->outputs[j];
2791
            if (!ofilter->ost || ofilter->ost->source_index >= 0)
2792
                continue;
2793
            if (fg->nb_inputs != 1)
2794
                continue;
2795
            for (k = nb_input_streams-1; k >= 0 ; k--)
2796
                if (fg->inputs[0]->ist == input_streams[k])
2797
                    break;
2798
            ofilter->ost->source_index = k;
2799
        }
2800
    }
2801
 
2802
    /* init framerate emulation */
2803
    for (i = 0; i < nb_input_files; i++) {
2804
        InputFile *ifile = input_files[i];
2805
        if (ifile->rate_emu)
2806
            for (j = 0; j < ifile->nb_streams; j++)
2807
                input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2808
    }
2809
 
2810
    /* for each output stream, we compute the right encoding parameters */
2811
    for (i = 0; i < nb_output_streams; i++) {
2812
        AVCodecContext *enc_ctx;
2813
        AVCodecContext *dec_ctx = NULL;
2814
        ost = output_streams[i];
2815
        oc  = output_files[ost->file_index]->ctx;
2816
        ist = get_input_stream(ost);
2817
 
2818
        if (ost->attachment_filename)
2819
            continue;
2820
 
2821
        enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2822
 
2823
        if (ist) {
2824
            dec_ctx = ist->dec_ctx;
2825
 
2826
            ost->st->disposition          = ist->st->disposition;
2827
            enc_ctx->bits_per_raw_sample    = dec_ctx->bits_per_raw_sample;
2828
            enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2829
        } else {
2830
            for (j=0; jnb_streams; j++) {
2831
                AVStream *st = oc->streams[j];
2832
                if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2833
                    break;
2834
            }
2835
            if (j == oc->nb_streams)
2836
                if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2837
                    ost->st->disposition = AV_DISPOSITION_DEFAULT;
2838
        }
2839
 
2840
        if (ost->stream_copy) {
2841
            AVRational sar;
2842
            uint64_t extra_size;
2843
 
2844
            av_assert0(ist && !ost->filter);
2845
 
2846
            extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2847
 
2848
            if (extra_size > INT_MAX) {
2849
                return AVERROR(EINVAL);
2850
            }
2851
 
2852
            /* if stream_copy is selected, no need to decode or encode */
2853
            enc_ctx->codec_id   = dec_ctx->codec_id;
2854
            enc_ctx->codec_type = dec_ctx->codec_type;
2855
 
2856
            if (!enc_ctx->codec_tag) {
2857
                unsigned int codec_tag;
2858
                if (!oc->oformat->codec_tag ||
2859
                     av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2860
                     !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2861
                    enc_ctx->codec_tag = dec_ctx->codec_tag;
2862
            }
2863
 
2864
            enc_ctx->bit_rate       = dec_ctx->bit_rate;
2865
            enc_ctx->rc_max_rate    = dec_ctx->rc_max_rate;
2866
            enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2867
            enc_ctx->field_order    = dec_ctx->field_order;
2868
            if (dec_ctx->extradata_size) {
2869
                enc_ctx->extradata      = av_mallocz(extra_size);
2870
                if (!enc_ctx->extradata) {
2871
                    return AVERROR(ENOMEM);
2872
                }
2873
                memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2874
            }
2875
            enc_ctx->extradata_size= dec_ctx->extradata_size;
2876
            enc_ctx->bits_per_coded_sample  = dec_ctx->bits_per_coded_sample;
2877
 
2878
            enc_ctx->time_base = ist->st->time_base;
2879
            /*
2880
             * Avi is a special case here because it supports variable fps but
2881
             * having the fps and timebase differe significantly adds quite some
2882
             * overhead
2883
             */
2884
            if(!strcmp(oc->oformat->name, "avi")) {
2885
                if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2886
                               && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2887
                               && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2888
                               && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2889
                     || copy_tb==2){
2890
                    enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2891
                    enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2892
                    enc_ctx->ticks_per_frame = 2;
2893
                } else if (   copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2894
                                 && av_q2d(ist->st->time_base) < 1.0/500
2895
                    || copy_tb==0){
2896
                    enc_ctx->time_base = dec_ctx->time_base;
2897
                    enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2898
                    enc_ctx->time_base.den *= 2;
2899
                    enc_ctx->ticks_per_frame = 2;
2900
                }
2901
            } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2902
                      && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2903
                      && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2904
                      && strcmp(oc->oformat->name, "f4v")
2905
            ) {
2906
                if(   copy_tb<0 && dec_ctx->time_base.den
2907
                                && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2908
                                && av_q2d(ist->st->time_base) < 1.0/500
2909
                   || copy_tb==0){
2910
                    enc_ctx->time_base = dec_ctx->time_base;
2911
                    enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2912
                }
2913
            }
2914
            if (   enc_ctx->codec_tag == AV_RL32("tmcd")
2915
                && dec_ctx->time_base.num < dec_ctx->time_base.den
2916
                && dec_ctx->time_base.num > 0
2917
                && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2918
                enc_ctx->time_base = dec_ctx->time_base;
2919
            }
2920
 
2921
            if (!ost->frame_rate.num)
2922
                ost->frame_rate = ist->framerate;
2923
            if(ost->frame_rate.num)
2924
                enc_ctx->time_base = av_inv_q(ost->frame_rate);
2925
 
2926
            av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2927
                        enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2928
 
2929
            if (ist->st->nb_side_data) {
2930
                ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2931
                                                      sizeof(*ist->st->side_data));
2932
                if (!ost->st->side_data)
2933
                    return AVERROR(ENOMEM);
2934
 
2935
                ost->st->nb_side_data = 0;
2936
                for (j = 0; j < ist->st->nb_side_data; j++) {
2937
                    const AVPacketSideData *sd_src = &ist->st->side_data[j];
2938
                    AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2939
 
2940
                    if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2941
                        continue;
2942
 
2943
                    sd_dst->data = av_malloc(sd_src->size);
2944
                    if (!sd_dst->data)
2945
                        return AVERROR(ENOMEM);
2946
                    memcpy(sd_dst->data, sd_src->data, sd_src->size);
2947
                    sd_dst->size = sd_src->size;
2948
                    sd_dst->type = sd_src->type;
2949
                    ost->st->nb_side_data++;
2950
                }
2951
            }
2952
 
2953
            ost->parser = av_parser_init(enc_ctx->codec_id);
2954
 
2955
            switch (enc_ctx->codec_type) {
2956
            case AVMEDIA_TYPE_AUDIO:
2957
                if (audio_volume != 256) {
2958
                    av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2959
                    exit_program(1);
2960
                }
2961
                enc_ctx->channel_layout     = dec_ctx->channel_layout;
2962
                enc_ctx->sample_rate        = dec_ctx->sample_rate;
2963
                enc_ctx->channels           = dec_ctx->channels;
2964
                enc_ctx->frame_size         = dec_ctx->frame_size;
2965
                enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2966
                enc_ctx->block_align        = dec_ctx->block_align;
2967
                enc_ctx->initial_padding    = dec_ctx->delay;
2968
#if FF_API_AUDIOENC_DELAY
2969
                enc_ctx->delay              = dec_ctx->delay;
2970
#endif
2971
                if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2972
                    enc_ctx->block_align= 0;
2973
                if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
2974
                    enc_ctx->block_align= 0;
2975
                break;
2976
            case AVMEDIA_TYPE_VIDEO:
2977
                enc_ctx->pix_fmt            = dec_ctx->pix_fmt;
2978
                enc_ctx->width              = dec_ctx->width;
2979
                enc_ctx->height             = dec_ctx->height;
2980
                enc_ctx->has_b_frames       = dec_ctx->has_b_frames;
2981
                if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2982
                    sar =
2983
                        av_mul_q(ost->frame_aspect_ratio,
2984
                                 (AVRational){ enc_ctx->height, enc_ctx->width });
2985
                    av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2986
                           "with stream copy may produce invalid files\n");
2987
                }
2988
                else if (ist->st->sample_aspect_ratio.num)
2989
                    sar = ist->st->sample_aspect_ratio;
2990
                else
2991
                    sar = dec_ctx->sample_aspect_ratio;
2992
                ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
2993
                ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2994
                ost->st->r_frame_rate = ist->st->r_frame_rate;
2995
                break;
2996
            case AVMEDIA_TYPE_SUBTITLE:
2997
                enc_ctx->width  = dec_ctx->width;
2998
                enc_ctx->height = dec_ctx->height;
2999
                break;
3000
            case AVMEDIA_TYPE_UNKNOWN:
3001
            case AVMEDIA_TYPE_DATA:
3002
            case AVMEDIA_TYPE_ATTACHMENT:
3003
                break;
3004
            default:
3005
                abort();
3006
            }
3007
        } else {
3008
            if (!ost->enc)
3009
                ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
3010
            if (!ost->enc) {
3011
                /* should only happen when a default codec is not present. */
3012
                snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
3013
                         avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
3014
                ret = AVERROR(EINVAL);
3015
                goto dump_format;
3016
            }
3017
 
3018
            set_encoder_id(output_files[ost->file_index], ost);
3019
 
3020
            if (!ost->filter &&
3021
                (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3022
                 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
3023
                    FilterGraph *fg;
3024
                    fg = init_simple_filtergraph(ist, ost);
3025
                    if (configure_filtergraph(fg)) {
3026
                        av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3027
                        exit_program(1);
3028
                    }
3029
            }
3030
 
3031
            if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3032
                if (!ost->frame_rate.num)
3033
                    ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3034
                if (ist && !ost->frame_rate.num)
3035
                    ost->frame_rate = ist->framerate;
3036
                if (ist && !ost->frame_rate.num)
3037
                    ost->frame_rate = ist->st->r_frame_rate;
3038
                if (ist && !ost->frame_rate.num) {
3039
                    ost->frame_rate = (AVRational){25, 1};
3040
                    av_log(NULL, AV_LOG_WARNING,
3041
                           "No information "
3042
                           "about the input framerate is available. Falling "
3043
                           "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3044
                           "if you want a different framerate.\n",
3045
                           ost->file_index, ost->index);
3046
                }
3047
//                    ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3048
                if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3049
                    int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3050
                    ost->frame_rate = ost->enc->supported_framerates[idx];
3051
                }
3052
                // reduce frame rate for mpeg4 to be within the spec limits
3053
                if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3054
                    av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3055
                              ost->frame_rate.num, ost->frame_rate.den, 65535);
3056
                }
3057
            }
3058
 
3059
            switch (enc_ctx->codec_type) {
3060
            case AVMEDIA_TYPE_AUDIO:
3061
                enc_ctx->sample_fmt     = ost->filter->filter->inputs[0]->format;
3062
                enc_ctx->sample_rate    = ost->filter->filter->inputs[0]->sample_rate;
3063
                enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3064
                enc_ctx->channels       = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3065
                enc_ctx->time_base      = (AVRational){ 1, enc_ctx->sample_rate };
3066
                break;
3067
            case AVMEDIA_TYPE_VIDEO:
3068
                enc_ctx->time_base = av_inv_q(ost->frame_rate);
3069
                if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3070
                    enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3071
                if (   av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3072
                   && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3073
                    av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3074
                                               "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3075
                }
3076
                for (j = 0; j < ost->forced_kf_count; j++)
3077
                    ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3078
                                                         AV_TIME_BASE_Q,
3079
                                                         enc_ctx->time_base);
3080
 
3081
                enc_ctx->width  = ost->filter->filter->inputs[0]->w;
3082
                enc_ctx->height = ost->filter->filter->inputs[0]->h;
3083
                enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3084
                    ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3085
                    av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3086
                    ost->filter->filter->inputs[0]->sample_aspect_ratio;
3087
                if (!strncmp(ost->enc->name, "libx264", 7) &&
3088
                    enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3089
                    ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3090
                    av_log(NULL, AV_LOG_WARNING,
3091
                           "No pixel format specified, %s for H.264 encoding chosen.\n"
3092
                           "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3093
                           av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3094
                if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3095
                    enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3096
                    ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3097
                    av_log(NULL, AV_LOG_WARNING,
3098
                           "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3099
                           "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3100
                           av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3101
                enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3102
 
3103
                ost->st->avg_frame_rate = ost->frame_rate;
3104
 
3105
                if (!dec_ctx ||
3106
                    enc_ctx->width   != dec_ctx->width  ||
3107
                    enc_ctx->height  != dec_ctx->height ||
3108
                    enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3109
                    enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3110
                }
3111
 
3112
                if (ost->forced_keyframes) {
3113
                    if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3114
                        ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3115
                                            forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3116
                        if (ret < 0) {
3117
                            av_log(NULL, AV_LOG_ERROR,
3118
                                   "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3119
                            return ret;
3120
                        }
3121
                        ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3122
                        ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3123
                        ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3124
                        ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3125
 
3126
                        // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3127
                        // parse it only for static kf timings
3128
                    } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3129
                        parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3130
                    }
3131
                }
3132
                break;
3133
            case AVMEDIA_TYPE_SUBTITLE:
3134
                enc_ctx->time_base = (AVRational){1, 1000};
3135
                if (!enc_ctx->width) {
3136
                    enc_ctx->width     = input_streams[ost->source_index]->st->codec->width;
3137
                    enc_ctx->height    = input_streams[ost->source_index]->st->codec->height;
3138
                }
3139
                break;
3140
            case AVMEDIA_TYPE_DATA:
3141
                break;
3142
            default:
3143
                abort();
3144
                break;
3145
            }
3146
        }
3147
 
3148
        if (ost->disposition) {
3149
            static const AVOption opts[] = {
3150
                { "disposition"         , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3151
                { "default"             , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT           },    .unit = "flags" },
3152
                { "dub"                 , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB               },    .unit = "flags" },
3153
                { "original"            , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL          },    .unit = "flags" },
3154
                { "comment"             , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT           },    .unit = "flags" },
3155
                { "lyrics"              , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS            },    .unit = "flags" },
3156
                { "karaoke"             , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE           },    .unit = "flags" },
3157
                { "forced"              , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED            },    .unit = "flags" },
3158
                { "hearing_impaired"    , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED  },    .unit = "flags" },
3159
                { "visual_impaired"     , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED   },    .unit = "flags" },
3160
                { "clean_effects"       , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS     },    .unit = "flags" },
3161
                { "captions"            , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS          },    .unit = "flags" },
3162
                { "descriptions"        , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS      },    .unit = "flags" },
3163
                { "metadata"            , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA          },    .unit = "flags" },
3164
                { NULL },
3165
            };
3166
            static const AVClass class = {
3167
                .class_name = "",
3168
                .item_name  = av_default_item_name,
3169
                .option     = opts,
3170
                .version    = LIBAVUTIL_VERSION_INT,
3171
            };
3172
            const AVClass *pclass = &class;
3173
 
3174
            ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3175
            if (ret < 0)
3176
                goto dump_format;
3177
        }
3178
    }
3179
 
3180
    /* open each encoder */
3181
    for (i = 0; i < nb_output_streams; i++) {
3182
        ret = init_output_stream(output_streams[i], error, sizeof(error));
3183
        if (ret < 0)
3184
            goto dump_format;
3185
    }
3186
 
3187
    /* init input streams */
3188
    for (i = 0; i < nb_input_streams; i++)
3189
        if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3190
            for (i = 0; i < nb_output_streams; i++) {
3191
                ost = output_streams[i];
3192
                avcodec_close(ost->enc_ctx);
3193
            }
3194
            goto dump_format;
3195
        }
3196
 
3197
    /* discard unused programs */
3198
    for (i = 0; i < nb_input_files; i++) {
3199
        InputFile *ifile = input_files[i];
3200
        for (j = 0; j < ifile->ctx->nb_programs; j++) {
3201
            AVProgram *p = ifile->ctx->programs[j];
3202
            int discard  = AVDISCARD_ALL;
3203
 
3204
            for (k = 0; k < p->nb_stream_indexes; k++)
3205
                if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3206
                    discard = AVDISCARD_DEFAULT;
3207
                    break;
3208
                }
3209
            p->discard = discard;
3210
        }
3211
    }
3212
 
3213
    /* open files and write file headers */
3214
    for (i = 0; i < nb_output_files; i++) {
3215
        oc = output_files[i]->ctx;
3216
        oc->interrupt_callback = int_cb;
3217
        if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3218
            snprintf(error, sizeof(error),
3219
                     "Could not write header for output file #%d "
3220
                     "(incorrect codec parameters ?): %s",
3221
                     i, av_err2str(ret));
3222
            ret = AVERROR(EINVAL);
3223
            goto dump_format;
3224
        }
3225
//         assert_avoptions(output_files[i]->opts);
3226
        if (strcmp(oc->oformat->name, "rtp")) {
3227
            want_sdp = 0;
3228
        }
3229
    }
3230
 
3231
 dump_format:
3232
    /* dump the file output parameters - cannot be done before in case
3233
       of stream copy */
3234
    for (i = 0; i < nb_output_files; i++) {
3235
        av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3236
    }
3237
 
3238
    /* dump the stream mapping */
3239
    av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3240
    for (i = 0; i < nb_input_streams; i++) {
3241
        ist = input_streams[i];
3242
 
3243
        for (j = 0; j < ist->nb_filters; j++) {
3244
            if (ist->filters[j]->graph->graph_desc) {
3245
                av_log(NULL, AV_LOG_INFO, "  Stream #%d:%d (%s) -> %s",
3246
                       ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3247
                       ist->filters[j]->name);
3248
                if (nb_filtergraphs > 1)
3249
                    av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3250
                av_log(NULL, AV_LOG_INFO, "\n");
3251
            }
3252
        }
3253
    }
3254
 
3255
    for (i = 0; i < nb_output_streams; i++) {
3256
        ost = output_streams[i];
3257
 
3258
        if (ost->attachment_filename) {
3259
            /* an attached file */
3260
            av_log(NULL, AV_LOG_INFO, "  File %s -> Stream #%d:%d\n",
3261
                   ost->attachment_filename, ost->file_index, ost->index);
3262
            continue;
3263
        }
3264
 
3265
        if (ost->filter && ost->filter->graph->graph_desc) {
3266
            /* output from a complex graph */
3267
            av_log(NULL, AV_LOG_INFO, "  %s", ost->filter->name);
3268
            if (nb_filtergraphs > 1)
3269
                av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3270
 
3271
            av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3272
                   ost->index, ost->enc ? ost->enc->name : "?");
3273
            continue;
3274
        }
3275
 
3276
        av_log(NULL, AV_LOG_INFO, "  Stream #%d:%d -> #%d:%d",
3277
               input_streams[ost->source_index]->file_index,
3278
               input_streams[ost->source_index]->st->index,
3279
               ost->file_index,
3280
               ost->index);
3281
        if (ost->sync_ist != input_streams[ost->source_index])
3282
            av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3283
                   ost->sync_ist->file_index,
3284
                   ost->sync_ist->st->index);
3285
        if (ost->stream_copy)
3286
            av_log(NULL, AV_LOG_INFO, " (copy)");
3287
        else {
3288
            const AVCodec *in_codec    = input_streams[ost->source_index]->dec;
3289
            const AVCodec *out_codec   = ost->enc;
3290
            const char *decoder_name   = "?";
3291
            const char *in_codec_name  = "?";
3292
            const char *encoder_name   = "?";
3293
            const char *out_codec_name = "?";
3294
            const AVCodecDescriptor *desc;
3295
 
3296
            if (in_codec) {
3297
                decoder_name  = in_codec->name;
3298
                desc = avcodec_descriptor_get(in_codec->id);
3299
                if (desc)
3300
                    in_codec_name = desc->name;
3301
                if (!strcmp(decoder_name, in_codec_name))
3302
                    decoder_name = "native";
3303
            }
3304
 
3305
            if (out_codec) {
3306
                encoder_name   = out_codec->name;
3307
                desc = avcodec_descriptor_get(out_codec->id);
3308
                if (desc)
3309
                    out_codec_name = desc->name;
3310
                if (!strcmp(encoder_name, out_codec_name))
3311
                    encoder_name = "native";
3312
            }
3313
 
3314
            av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3315
                   in_codec_name, decoder_name,
3316
                   out_codec_name, encoder_name);
3317
        }
3318
        av_log(NULL, AV_LOG_INFO, "\n");
3319
    }
3320
 
3321
    if (ret) {
3322
        av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3323
        return ret;
3324
    }
3325
 
3326
    if (sdp_filename || want_sdp) {
3327
        print_sdp();
3328
    }
3329
 
3330
    transcode_init_done = 1;
3331
 
3332
    return 0;
3333
}
3334
 
3335
/* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3336
static int need_output(void)
3337
{
3338
    int i;
3339
 
3340
    for (i = 0; i < nb_output_streams; i++) {
3341
        OutputStream *ost    = output_streams[i];
3342
        OutputFile *of       = output_files[ost->file_index];
3343
        AVFormatContext *os  = output_files[ost->file_index]->ctx;
3344
 
3345
        if (ost->finished ||
3346
            (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3347
            continue;
3348
        if (ost->frame_number >= ost->max_frames) {
3349
            int j;
3350
            for (j = 0; j < of->ctx->nb_streams; j++)
3351
                close_output_stream(output_streams[of->ost_index + j]);
3352
            continue;
3353
        }
3354
 
3355
        return 1;
3356
    }
3357
 
3358
    return 0;
3359
}
3360
 
3361
/**
3362
 * Select the output stream to process.
3363
 *
3364
 * @return  selected output stream, or NULL if none available
3365
 */
3366
static OutputStream *choose_output(void)
3367
{
3368
    int i;
3369
    int64_t opts_min = INT64_MAX;
3370
    OutputStream *ost_min = NULL;
3371
 
3372
    for (i = 0; i < nb_output_streams; i++) {
3373
        OutputStream *ost = output_streams[i];
3374
        int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3375
                                    AV_TIME_BASE_Q);
3376
        if (!ost->finished && opts < opts_min) {
3377
            opts_min = opts;
3378
            ost_min  = ost->unavailable ? NULL : ost;
3379
        }
3380
    }
3381
    return ost_min;
3382
}
3383
 
3384
static int check_keyboard_interaction(int64_t cur_time)
3385
{
3386
    int i, ret, key;
3387
    static int64_t last_time;
3388
    if (received_nb_signals)
3389
        return AVERROR_EXIT;
3390
    /* read_key() returns 0 on EOF */
3391
    if(cur_time - last_time >= 100000 && !run_as_daemon){
3392
        key =  read_key();
3393
        last_time = cur_time;
3394
    }else
3395
        key = -1;
3396
    if (key == 'q')
3397
        return AVERROR_EXIT;
3398
    if (key == '+') av_log_set_level(av_log_get_level()+10);
3399
    if (key == '-') av_log_set_level(av_log_get_level()-10);
3400
    if (key == 's') qp_hist     ^= 1;
3401
    if (key == 'h'){
3402
        if (do_hex_dump){
3403
            do_hex_dump = do_pkt_dump = 0;
3404
        } else if(do_pkt_dump){
3405
            do_hex_dump = 1;
3406
        } else
3407
            do_pkt_dump = 1;
3408
        av_log_set_level(AV_LOG_DEBUG);
3409
    }
3410
    if (key == 'c' || key == 'C'){
3411
        char buf[4096], target[64], command[256], arg[256] = {0};
3412
        double time;
3413
        int k, n = 0;
3414
        fprintf(stderr, "\nEnter command: |all 
3415
        i = 0;
3416
        while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3417
            if (k > 0)
3418
                buf[i++] = k;
3419
        buf[i] = 0;
3420
        if (k > 0 &&
3421
            (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3422
            av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3423
                   target, time, command, arg);
3424
            for (i = 0; i < nb_filtergraphs; i++) {
3425
                FilterGraph *fg = filtergraphs[i];
3426
                if (fg->graph) {
3427
                    if (time < 0) {
3428
                        ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3429
                                                          key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3430
                        fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3431
                    } else if (key == 'c') {
3432
                        fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3433
                        ret = AVERROR_PATCHWELCOME;
3434
                    } else {
3435
                        ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3436
                        if (ret < 0)
3437
                            fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3438
                    }
3439
                }
3440
            }
3441
        } else {
3442
            av_log(NULL, AV_LOG_ERROR,
3443
                   "Parse error, at least 3 arguments were expected, "
3444
                   "only %d given in string '%s'\n", n, buf);
3445
        }
3446
    }
3447
    if (key == 'd' || key == 'D'){
3448
        int debug=0;
3449
        if(key == 'D') {
3450
            debug = input_streams[0]->st->codec->debug<<1;
3451
            if(!debug) debug = 1;
3452
            while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3453
                debug += debug;
3454
        }else{
3455
            char buf[32];
3456
            int k = 0;
3457
            i = 0;
3458
            while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3459
                if (k > 0)
3460
                    buf[i++] = k;
3461
            buf[i] = 0;
3462
            if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3463
                fprintf(stderr,"error parsing debug value\n");
3464
        }
3465
        for(i=0;i
3466
            input_streams[i]->st->codec->debug = debug;
3467
        }
3468
        for(i=0;i
3469
            OutputStream *ost = output_streams[i];
3470
            ost->enc_ctx->debug = debug;
3471
        }
3472
        if(debug) av_log_set_level(AV_LOG_DEBUG);
3473
        fprintf(stderr,"debug=%d\n", debug);
3474
    }
3475
    if (key == '?'){
3476
        fprintf(stderr, "key    function\n"
3477
                        "?      show this help\n"
3478
                        "+      increase verbosity\n"
3479
                        "-      decrease verbosity\n"
3480
                        "c      Send command to first matching filter supporting it\n"
3481
                        "C      Send/Que command to all matching filters\n"
3482
                        "D      cycle through available debug modes\n"
3483
                        "h      dump packets/hex press to cycle through the 3 states\n"
3484
                        "q      quit\n"
3485
                        "s      Show QP histogram\n"
3486
        );
3487
    }
3488
    return 0;
3489
}
3490
 
3491
#if HAVE_PTHREADS
3492
static void *input_thread(void *arg)
3493
{
3494
    InputFile *f = arg;
3495
    unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3496
    int ret = 0;
3497
 
3498
    while (1) {
3499
        AVPacket pkt;
3500
        ret = av_read_frame(f->ctx, &pkt);
3501
 
3502
        if (ret == AVERROR(EAGAIN)) {
3503
            av_usleep(10000);
3504
            continue;
3505
        }
3506
        if (ret < 0) {
3507
            av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3508
            break;
3509
        }
3510
        av_dup_packet(&pkt);
3511
        ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3512
        if (flags && ret == AVERROR(EAGAIN)) {
3513
            flags = 0;
3514
            ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3515
            av_log(f->ctx, AV_LOG_WARNING,
3516
                   "Thread message queue blocking; consider raising the "
3517
                   "thread_queue_size option (current value: %d)\n",
3518
                   f->thread_queue_size);
3519
        }
3520
        if (ret < 0) {
3521
            if (ret != AVERROR_EOF)
3522
                av_log(f->ctx, AV_LOG_ERROR,
3523
                       "Unable to send packet to main thread: %s\n",
3524
                       av_err2str(ret));
3525
            av_free_packet(&pkt);
3526
            av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3527
            break;
3528
        }
3529
    }
3530
 
3531
    return NULL;
3532
}
3533
 
3534
static void free_input_threads(void)
3535
{
3536
    int i;
3537
 
3538
    for (i = 0; i < nb_input_files; i++) {
3539
        InputFile *f = input_files[i];
3540
        AVPacket pkt;
3541
 
3542
        if (!f || !f->in_thread_queue)
3543
            continue;
3544
        av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3545
        while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3546
            av_free_packet(&pkt);
3547
 
3548
        pthread_join(f->thread, NULL);
3549
        f->joined = 1;
3550
        av_thread_message_queue_free(&f->in_thread_queue);
3551
    }
3552
}
3553
 
3554
static int init_input_threads(void)
3555
{
3556
    int i, ret;
3557
 
3558
    if (nb_input_files == 1)
3559
        return 0;
3560
 
3561
    for (i = 0; i < nb_input_files; i++) {
3562
        InputFile *f = input_files[i];
3563
 
3564
        if (f->ctx->pb ? !f->ctx->pb->seekable :
3565
            strcmp(f->ctx->iformat->name, "lavfi"))
3566
            f->non_blocking = 1;
3567
        ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3568
                                            f->thread_queue_size, sizeof(AVPacket));
3569
        if (ret < 0)
3570
            return ret;
3571
 
3572
        if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3573
            av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3574
            av_thread_message_queue_free(&f->in_thread_queue);
3575
            return AVERROR(ret);
3576
        }
3577
    }
3578
    return 0;
3579
}
3580
 
3581
static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3582
{
3583
    return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3584
                                        f->non_blocking ?
3585
                                        AV_THREAD_MESSAGE_NONBLOCK : 0);
3586
}
3587
#endif
3588
 
3589
static int get_input_packet(InputFile *f, AVPacket *pkt)
3590
{
3591
    if (f->rate_emu) {
3592
        int i;
3593
        for (i = 0; i < f->nb_streams; i++) {
3594
            InputStream *ist = input_streams[f->ist_index + i];
3595
            int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3596
            int64_t now = av_gettime_relative() - ist->start;
3597
            if (pts > now)
3598
                return AVERROR(EAGAIN);
3599
        }
3600
    }
3601
 
3602
#if HAVE_PTHREADS
3603
    if (nb_input_files > 1)
3604
        return get_input_packet_mt(f, pkt);
3605
#endif
3606
    return av_read_frame(f->ctx, pkt);
3607
}
3608
 
3609
static int got_eagain(void)
3610
{
3611
    int i;
3612
    for (i = 0; i < nb_output_streams; i++)
3613
        if (output_streams[i]->unavailable)
3614
            return 1;
3615
    return 0;
3616
}
3617
 
3618
static void reset_eagain(void)
3619
{
3620
    int i;
3621
    for (i = 0; i < nb_input_files; i++)
3622
        input_files[i]->eagain = 0;
3623
    for (i = 0; i < nb_output_streams; i++)
3624
        output_streams[i]->unavailable = 0;
3625
}
3626
 
3627
/*
3628
 * Return
3629
 * - 0 -- one packet was read and processed
3630
 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3631
 *   this function should be called again
3632
 * - AVERROR_EOF -- this function should not be called again
3633
 */
3634
static int process_input(int file_index)
3635
{
3636
    InputFile *ifile = input_files[file_index];
3637
    AVFormatContext *is;
3638
    InputStream *ist;
3639
    AVPacket pkt;
3640
    int ret, i, j;
3641
 
3642
    is  = ifile->ctx;
3643
    ret = get_input_packet(ifile, &pkt);
3644
 
3645
    if (ret == AVERROR(EAGAIN)) {
3646
        ifile->eagain = 1;
3647
        return ret;
3648
    }
3649
    if (ret < 0) {
3650
        if (ret != AVERROR_EOF) {
3651
            print_error(is->filename, ret);
3652
            if (exit_on_error)
3653
                exit_program(1);
3654
        }
3655
 
3656
        for (i = 0; i < ifile->nb_streams; i++) {
3657
            ist = input_streams[ifile->ist_index + i];
3658
            if (ist->decoding_needed) {
3659
                ret = process_input_packet(ist, NULL);
3660
                if (ret>0)
3661
                    return 0;
3662
            }
3663
 
3664
            /* mark all outputs that don't go through lavfi as finished */
3665
            for (j = 0; j < nb_output_streams; j++) {
3666
                OutputStream *ost = output_streams[j];
3667
 
3668
                if (ost->source_index == ifile->ist_index + i &&
3669
                    (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3670
                    finish_output_stream(ost);
3671
            }
3672
        }
3673
 
3674
        ifile->eof_reached = 1;
3675
        return AVERROR(EAGAIN);
3676
    }
3677
 
3678
    reset_eagain();
3679
 
3680
    if (do_pkt_dump) {
3681
        av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
3682
                         is->streams[pkt.stream_index]);
3683
    }
3684
    /* the following test is needed in case new streams appear
3685
       dynamically in stream : we ignore them */
3686
    if (pkt.stream_index >= ifile->nb_streams) {
3687
        report_new_stream(file_index, &pkt);
3688
        goto discard_packet;
3689
    }
3690
 
3691
    ist = input_streams[ifile->ist_index + pkt.stream_index];
3692
 
3693
    ist->data_size += pkt.size;
3694
    ist->nb_packets++;
3695
 
3696
    if (ist->discard)
3697
        goto discard_packet;
3698
 
3699
    if (debug_ts) {
3700
        av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3701
               "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3702
               ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3703
               av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
3704
               av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
3705
               av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3706
               av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3707
               av_ts2str(input_files[ist->file_index]->ts_offset),
3708
               av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3709
    }
3710
 
3711
    if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3712
        int64_t stime, stime2;
3713
        // Correcting starttime based on the enabled streams
3714
        // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3715
        //       so we instead do it here as part of discontinuity handling
3716
        if (   ist->next_dts == AV_NOPTS_VALUE
3717
            && ifile->ts_offset == -is->start_time
3718
            && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3719
            int64_t new_start_time = INT64_MAX;
3720
            for (i=0; inb_streams; i++) {
3721
                AVStream *st = is->streams[i];
3722
                if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3723
                    continue;
3724
                new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3725
            }
3726
            if (new_start_time > is->start_time) {
3727
                av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3728
                ifile->ts_offset = -new_start_time;
3729
            }
3730
        }
3731
 
3732
        stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3733
        stime2= stime + (1ULL<st->pts_wrap_bits);
3734
        ist->wrap_correction_done = 1;
3735
 
3736
        if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3737
            pkt.dts -= 1ULL<st->pts_wrap_bits;
3738
            ist->wrap_correction_done = 0;
3739
        }
3740
        if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3741
            pkt.pts -= 1ULL<st->pts_wrap_bits;
3742
            ist->wrap_correction_done = 0;
3743
        }
3744
    }
3745
 
3746
    /* add the stream-global side data to the first packet */
3747
    if (ist->nb_packets == 1) {
3748
        if (ist->st->nb_side_data)
3749
            av_packet_split_side_data(&pkt);
3750
        for (i = 0; i < ist->st->nb_side_data; i++) {
3751
            AVPacketSideData *src_sd = &ist->st->side_data[i];
3752
            uint8_t *dst_data;
3753
 
3754
            if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3755
                continue;
3756
            if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3757
                continue;
3758
 
3759
            dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3760
            if (!dst_data)
3761
                exit_program(1);
3762
 
3763
            memcpy(dst_data, src_sd->data, src_sd->size);
3764
        }
3765
    }
3766
 
3767
    if (pkt.dts != AV_NOPTS_VALUE)
3768
        pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3769
    if (pkt.pts != AV_NOPTS_VALUE)
3770
        pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3771
 
3772
    if (pkt.pts != AV_NOPTS_VALUE)
3773
        pkt.pts *= ist->ts_scale;
3774
    if (pkt.dts != AV_NOPTS_VALUE)
3775
        pkt.dts *= ist->ts_scale;
3776
 
3777
    if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3778
         ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3779
        pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3780
        && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3781
        int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3782
        int64_t delta   = pkt_dts - ifile->last_ts;
3783
        if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3784
            delta >  1LL*dts_delta_threshold*AV_TIME_BASE){
3785
            ifile->ts_offset -= delta;
3786
            av_log(NULL, AV_LOG_DEBUG,
3787
                   "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3788
                   delta, ifile->ts_offset);
3789
            pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3790
            if (pkt.pts != AV_NOPTS_VALUE)
3791
                pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3792
        }
3793
    }
3794
 
3795
    if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3796
         ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3797
         pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3798
        !copy_ts) {
3799
        int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3800
        int64_t delta   = pkt_dts - ist->next_dts;
3801
        if (is->iformat->flags & AVFMT_TS_DISCONT) {
3802
            if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3803
                delta >  1LL*dts_delta_threshold*AV_TIME_BASE ||
3804
                pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3805
                ifile->ts_offset -= delta;
3806
                av_log(NULL, AV_LOG_DEBUG,
3807
                       "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3808
                       delta, ifile->ts_offset);
3809
                pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3810
                if (pkt.pts != AV_NOPTS_VALUE)
3811
                    pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3812
            }
3813
        } else {
3814
            if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3815
                 delta >  1LL*dts_error_threshold*AV_TIME_BASE) {
3816
                av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3817
                pkt.dts = AV_NOPTS_VALUE;
3818
            }
3819
            if (pkt.pts != AV_NOPTS_VALUE){
3820
                int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3821
                delta   = pkt_pts - ist->next_dts;
3822
                if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3823
                     delta >  1LL*dts_error_threshold*AV_TIME_BASE) {
3824
                    av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3825
                    pkt.pts = AV_NOPTS_VALUE;
3826
                }
3827
            }
3828
        }
3829
    }
3830
 
3831
    if (pkt.dts != AV_NOPTS_VALUE)
3832
        ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3833
 
3834
    if (debug_ts) {
3835
        av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3836
               ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3837
               av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3838
               av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3839
               av_ts2str(input_files[ist->file_index]->ts_offset),
3840
               av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3841
    }
3842
 
3843
    sub2video_heartbeat(ist, pkt.pts);
3844
 
3845
    process_input_packet(ist, &pkt);
3846
 
3847
discard_packet:
3848
    av_free_packet(&pkt);
3849
 
3850
    return 0;
3851
}
3852
 
3853
/**
3854
 * Perform a step of transcoding for the specified filter graph.
3855
 *
3856
 * @param[in]  graph     filter graph to consider
3857
 * @param[out] best_ist  input stream where a frame would allow to continue
3858
 * @return  0 for success, <0 for error
3859
 */
3860
static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
3861
{
3862
    int i, ret;
3863
    int nb_requests, nb_requests_max = 0;
3864
    InputFilter *ifilter;
3865
    InputStream *ist;
3866
 
3867
    *best_ist = NULL;
3868
    ret = avfilter_graph_request_oldest(graph->graph);
3869
    if (ret >= 0)
3870
        return reap_filters(0);
3871
 
3872
    if (ret == AVERROR_EOF) {
3873
        ret = reap_filters(1);
3874
        for (i = 0; i < graph->nb_outputs; i++)
3875
            close_output_stream(graph->outputs[i]->ost);
3876
        return ret;
3877
    }
3878
    if (ret != AVERROR(EAGAIN))
3879
        return ret;
3880
 
3881
    for (i = 0; i < graph->nb_inputs; i++) {
3882
        ifilter = graph->inputs[i];
3883
        ist = ifilter->ist;
3884
        if (input_files[ist->file_index]->eagain ||
3885
            input_files[ist->file_index]->eof_reached)
3886
            continue;
3887
        nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
3888
        if (nb_requests > nb_requests_max) {
3889
            nb_requests_max = nb_requests;
3890
            *best_ist = ist;
3891
        }
3892
    }
3893
 
3894
    if (!*best_ist)
3895
        for (i = 0; i < graph->nb_outputs; i++)
3896
            graph->outputs[i]->ost->unavailable = 1;
3897
 
3898
    return 0;
3899
}
3900
 
3901
/**
3902
 * Run a single step of transcoding.
3903
 *
3904
 * @return  0 for success, <0 for error
3905
 */
3906
static int transcode_step(void)
3907
{
3908
    OutputStream *ost;
3909
    InputStream  *ist;
3910
    int ret;
3911
 
3912
    ost = choose_output();
3913
    if (!ost) {
3914
        if (got_eagain()) {
3915
            reset_eagain();
3916
            av_usleep(10000);
3917
            return 0;
3918
        }
3919
        av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
3920
        return AVERROR_EOF;
3921
    }
3922
 
3923
    if (ost->filter) {
3924
        if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
3925
            return ret;
3926
        if (!ist)
3927
            return 0;
3928
    } else {
3929
        av_assert0(ost->source_index >= 0);
3930
        ist = input_streams[ost->source_index];
3931
    }
3932
 
3933
    ret = process_input(ist->file_index);
3934
    if (ret == AVERROR(EAGAIN)) {
3935
        if (input_files[ist->file_index]->eagain)
3936
            ost->unavailable = 1;
3937
        return 0;
3938
    }
3939
 
3940
    if (ret < 0)
3941
        return ret == AVERROR_EOF ? 0 : ret;
3942
 
3943
    return reap_filters(0);
3944
}
3945
 
3946
/*
3947
 * The following code is the main loop of the file converter
3948
 */
3949
static int transcode(void)
3950
{
3951
    int ret, i;
3952
    AVFormatContext *os;
3953
    OutputStream *ost;
3954
    InputStream *ist;
3955
    int64_t timer_start;
3956
 
3957
    ret = transcode_init();
3958
    if (ret < 0)
3959
        goto fail;
3960
 
3961
    if (stdin_interaction) {
3962
        av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
3963
    }
3964
 
3965
    timer_start = av_gettime_relative();
3966
 
3967
#if HAVE_PTHREADS
3968
    if ((ret = init_input_threads()) < 0)
3969
        goto fail;
3970
#endif
3971
 
3972
    while (!received_sigterm) {
3973
        int64_t cur_time= av_gettime_relative();
3974
 
3975
        /* if 'q' pressed, exits */
3976
        if (stdin_interaction)
3977
            if (check_keyboard_interaction(cur_time) < 0)
3978
                break;
3979
 
3980
        /* check if there's any stream where output is still needed */
3981
        if (!need_output()) {
3982
            av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
3983
            break;
3984
        }
3985
 
3986
        ret = transcode_step();
3987
        if (ret < 0) {
3988
            if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
3989
                continue;
3990
            } else {
3991
                char errbuf[128];
3992
                av_strerror(ret, errbuf, sizeof(errbuf));
3993
 
3994
                av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
3995
                break;
3996
            }
3997
        }
3998
 
3999
        /* dump report by using the output first video and audio streams */
4000
        print_report(0, timer_start, cur_time);
4001
    }
4002
#if HAVE_PTHREADS
4003
    free_input_threads();
4004
#endif
4005
 
4006
    /* at the end of stream, we must flush the decoder buffers */
4007
    for (i = 0; i < nb_input_streams; i++) {
4008
        ist = input_streams[i];
4009
        if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4010
            process_input_packet(ist, NULL);
4011
        }
4012
    }
4013
    flush_encoders();
4014
 
4015
    term_exit();
4016
 
4017
    /* write the trailer if needed and close file */
4018
    for (i = 0; i < nb_output_files; i++) {
4019
        os = output_files[i]->ctx;
4020
        av_write_trailer(os);
4021
    }
4022
 
4023
    /* dump report by using the first video and audio streams */
4024
    print_report(1, timer_start, av_gettime_relative());
4025
 
4026
    /* close each encoder */
4027
    for (i = 0; i < nb_output_streams; i++) {
4028
        ost = output_streams[i];
4029
        if (ost->encoding_needed) {
4030
            av_freep(&ost->enc_ctx->stats_in);
4031
        }
4032
    }
4033
 
4034
    /* close each decoder */
4035
    for (i = 0; i < nb_input_streams; i++) {
4036
        ist = input_streams[i];
4037
        if (ist->decoding_needed) {
4038
            avcodec_close(ist->dec_ctx);
4039
            if (ist->hwaccel_uninit)
4040
                ist->hwaccel_uninit(ist->dec_ctx);
4041
        }
4042
    }
4043
 
4044
    /* finished ! */
4045
    ret = 0;
4046
 
4047
 fail:
4048
#if HAVE_PTHREADS
4049
    free_input_threads();
4050
#endif
4051
 
4052
    if (output_streams) {
4053
        for (i = 0; i < nb_output_streams; i++) {
4054
            ost = output_streams[i];
4055
            if (ost) {
4056
                if (ost->logfile) {
4057
                    fclose(ost->logfile);
4058
                    ost->logfile = NULL;
4059
                }
4060
                av_freep(&ost->forced_kf_pts);
4061
                av_freep(&ost->apad);
4062
                av_freep(&ost->disposition);
4063
                av_dict_free(&ost->encoder_opts);
4064
                av_dict_free(&ost->sws_dict);
4065
                av_dict_free(&ost->swr_opts);
4066
                av_dict_free(&ost->resample_opts);
4067
                av_dict_free(&ost->bsf_args);
4068
            }
4069
        }
4070
    }
4071
    return ret;
4072
}
4073
 
4074
 
4075
static int64_t getutime(void)
4076
{
4077
#if HAVE_GETRUSAGE
4078
    struct rusage rusage;
4079
 
4080
    getrusage(RUSAGE_SELF, &rusage);
4081
    return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4082
#elif HAVE_GETPROCESSTIMES
4083
    HANDLE proc;
4084
    FILETIME c, e, k, u;
4085
    proc = GetCurrentProcess();
4086
    GetProcessTimes(proc, &c, &e, &k, &u);
4087
    return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4088
#else
4089
    return av_gettime_relative();
4090
#endif
4091
}
4092
 
4093
static int64_t getmaxrss(void)
4094
{
4095
#if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4096
    struct rusage rusage;
4097
    getrusage(RUSAGE_SELF, &rusage);
4098
    return (int64_t)rusage.ru_maxrss * 1024;
4099
#elif HAVE_GETPROCESSMEMORYINFO
4100
    HANDLE proc;
4101
    PROCESS_MEMORY_COUNTERS memcounters;
4102
    proc = GetCurrentProcess();
4103
    memcounters.cb = sizeof(memcounters);
4104
    GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4105
    return memcounters.PeakPagefileUsage;
4106
#else
4107
    return 0;
4108
#endif
4109
}
4110
 
4111
static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4112
{
4113
}
4114
 
4115
int main(int argc, char **argv)
4116
{
4117
    int ret;
4118
    int64_t ti;
4119
 
4120
    register_exit(ffmpeg_cleanup);
4121
 
4122
    setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4123
 
4124
    av_log_set_flags(AV_LOG_SKIP_REPEATED);
4125
    parse_loglevel(argc, argv, options);
4126
 
4127
    if(argc>1 && !strcmp(argv[1], "-d")){
4128
        run_as_daemon=1;
4129
        av_log_set_callback(log_callback_null);
4130
        argc--;
4131
        argv++;
4132
    }
4133
 
4134
    avcodec_register_all();
4135
#if CONFIG_AVDEVICE
4136
    avdevice_register_all();
4137
#endif
4138
    avfilter_register_all();
4139
    av_register_all();
4140
    avformat_network_init();
4141
 
4142
    show_banner(argc, argv, options);
4143
 
4144
    term_init();
4145
 
4146
    /* parse options and open all input/output files */
4147
    ret = ffmpeg_parse_options(argc, argv);
4148
    if (ret < 0)
4149
        exit_program(1);
4150
 
4151
    if (nb_output_files <= 0 && nb_input_files == 0) {
4152
        show_usage();
4153
        av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4154
        exit_program(1);
4155
    }
4156
 
4157
    /* file converter / grab */
4158
    if (nb_output_files <= 0) {
4159
        av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4160
        exit_program(1);
4161
    }
4162
 
4163
//     if (nb_input_files == 0) {
4164
//         av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4165
//         exit_program(1);
4166
//     }
4167
 
4168
    current_time = ti = getutime();
4169
    if (transcode() < 0)
4170
        exit_program(1);
4171
    ti = getutime() - ti;
4172
    if (do_benchmark) {
4173
        av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4174
    }
4175
    av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4176
           decode_error_stat[0], decode_error_stat[1]);
4177
    if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4178
        exit_program(69);
4179
 
4180
    exit_program(received_nb_signals ? 255 : main_return_code);
4181
    return main_return_code;
4182
}