Subversion Repositories Kolibri OS

Compare Revisions

No changes between revisions

Regard whitespace Rev 4348 → Rev 4349

/contrib/sdk/sources/ffmpeg/libavformat/4xm.c
0,0 → 1,382
/*
* 4X Technologies .4xm File Demuxer (no muxer)
* Copyright (c) 2003 The ffmpeg Project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* 4X Technologies file demuxer
* by Mike Melanson (melanson@pcisys.net)
* for more information on the .4xm file format, visit:
* http://www.pcisys.net/~melanson/codecs/
*/
 
#include "libavutil/intreadwrite.h"
#include "libavutil/intfloat.h"
#include "avformat.h"
#include "internal.h"
 
#define RIFF_TAG MKTAG('R', 'I', 'F', 'F')
#define FOURXMV_TAG MKTAG('4', 'X', 'M', 'V')
#define LIST_TAG MKTAG('L', 'I', 'S', 'T')
#define HEAD_TAG MKTAG('H', 'E', 'A', 'D')
#define TRK__TAG MKTAG('T', 'R', 'K', '_')
#define MOVI_TAG MKTAG('M', 'O', 'V', 'I')
#define VTRK_TAG MKTAG('V', 'T', 'R', 'K')
#define STRK_TAG MKTAG('S', 'T', 'R', 'K')
#define std__TAG MKTAG('s', 't', 'd', '_')
#define name_TAG MKTAG('n', 'a', 'm', 'e')
#define vtrk_TAG MKTAG('v', 't', 'r', 'k')
#define strk_TAG MKTAG('s', 't', 'r', 'k')
#define ifrm_TAG MKTAG('i', 'f', 'r', 'm')
#define pfrm_TAG MKTAG('p', 'f', 'r', 'm')
#define cfrm_TAG MKTAG('c', 'f', 'r', 'm')
#define ifr2_TAG MKTAG('i', 'f', 'r', '2')
#define pfr2_TAG MKTAG('p', 'f', 'r', '2')
#define cfr2_TAG MKTAG('c', 'f', 'r', '2')
#define snd__TAG MKTAG('s', 'n', 'd', '_')
 
#define vtrk_SIZE 0x44
#define strk_SIZE 0x28
 
#define GET_LIST_HEADER() \
fourcc_tag = avio_rl32(pb); \
size = avio_rl32(pb); \
if (fourcc_tag != LIST_TAG) \
return AVERROR_INVALIDDATA; \
fourcc_tag = avio_rl32(pb);
 
typedef struct AudioTrack {
int sample_rate;
int bits;
int channels;
int stream_index;
int adpcm;
int64_t audio_pts;
} AudioTrack;
 
typedef struct FourxmDemuxContext {
int video_stream_index;
int track_count;
AudioTrack *tracks;
 
int64_t video_pts;
float fps;
} FourxmDemuxContext;
 
static int fourxm_probe(AVProbeData *p)
{
if ((AV_RL32(&p->buf[0]) != RIFF_TAG) ||
(AV_RL32(&p->buf[8]) != FOURXMV_TAG))
return 0;
 
return AVPROBE_SCORE_MAX;
}
 
static int parse_vtrk(AVFormatContext *s,
FourxmDemuxContext *fourxm, uint8_t *buf, int size,
int left)
{
AVStream *st;
/* check that there is enough data */
if (size != vtrk_SIZE || left < size + 8) {
return AVERROR_INVALIDDATA;
}
 
/* allocate a new AVStream */
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
avpriv_set_pts_info(st, 60, 1, fourxm->fps);
 
fourxm->video_stream_index = st->index;
 
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_4XM;
st->codec->extradata_size = 4;
st->codec->extradata = av_malloc(4);
AV_WL32(st->codec->extradata, AV_RL32(buf + 16));
st->codec->width = AV_RL32(buf + 36);
st->codec->height = AV_RL32(buf + 40);
 
return 0;
}
 
 
static int parse_strk(AVFormatContext *s,
FourxmDemuxContext *fourxm, uint8_t *buf, int size,
int left)
{
AVStream *st;
int track;
/* check that there is enough data */
if (size != strk_SIZE || left < size + 8)
return AVERROR_INVALIDDATA;
 
track = AV_RL32(buf + 8);
if ((unsigned)track >= UINT_MAX / sizeof(AudioTrack) - 1) {
av_log(s, AV_LOG_ERROR, "current_track too large\n");
return AVERROR_INVALIDDATA;
}
 
if (track + 1 > fourxm->track_count) {
if (av_reallocp_array(&fourxm->tracks, track + 1, sizeof(AudioTrack)))
return AVERROR(ENOMEM);
memset(&fourxm->tracks[fourxm->track_count], 0,
sizeof(AudioTrack) * (track + 1 - fourxm->track_count));
fourxm->track_count = track + 1;
}
fourxm->tracks[track].adpcm = AV_RL32(buf + 12);
fourxm->tracks[track].channels = AV_RL32(buf + 36);
fourxm->tracks[track].sample_rate = AV_RL32(buf + 40);
fourxm->tracks[track].bits = AV_RL32(buf + 44);
fourxm->tracks[track].audio_pts = 0;
 
if (fourxm->tracks[track].channels <= 0 ||
fourxm->tracks[track].sample_rate <= 0 ||
fourxm->tracks[track].bits <= 0) {
av_log(s, AV_LOG_ERROR, "audio header invalid\n");
return AVERROR_INVALIDDATA;
}
if (!fourxm->tracks[track].adpcm && fourxm->tracks[track].bits<8) {
av_log(s, AV_LOG_ERROR, "bits unspecified for non ADPCM\n");
return AVERROR_INVALIDDATA;
}
 
/* allocate a new AVStream */
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
st->id = track;
avpriv_set_pts_info(st, 60, 1, fourxm->tracks[track].sample_rate);
 
fourxm->tracks[track].stream_index = st->index;
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_tag = 0;
st->codec->channels = fourxm->tracks[track].channels;
st->codec->sample_rate = fourxm->tracks[track].sample_rate;
st->codec->bits_per_coded_sample = fourxm->tracks[track].bits;
st->codec->bit_rate = st->codec->channels *
st->codec->sample_rate *
st->codec->bits_per_coded_sample;
st->codec->block_align = st->codec->channels *
st->codec->bits_per_coded_sample;
 
if (fourxm->tracks[track].adpcm){
st->codec->codec_id = AV_CODEC_ID_ADPCM_4XM;
} else if (st->codec->bits_per_coded_sample == 8) {
st->codec->codec_id = AV_CODEC_ID_PCM_U8;
} else
st->codec->codec_id = AV_CODEC_ID_PCM_S16LE;
 
return 0;
}
 
static int fourxm_read_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
unsigned int fourcc_tag;
unsigned int size;
int header_size;
FourxmDemuxContext *fourxm = s->priv_data;
unsigned char *header;
int i, ret;
 
fourxm->track_count = 0;
fourxm->tracks = NULL;
fourxm->fps = 1.0;
 
/* skip the first 3 32-bit numbers */
avio_skip(pb, 12);
 
/* check for LIST-HEAD */
GET_LIST_HEADER();
header_size = size - 4;
if (fourcc_tag != HEAD_TAG || header_size < 0)
return AVERROR_INVALIDDATA;
 
/* allocate space for the header and load the whole thing */
header = av_malloc(header_size);
if (!header)
return AVERROR(ENOMEM);
if (avio_read(pb, header, header_size) != header_size) {
av_free(header);
return AVERROR(EIO);
}
 
/* take the lazy approach and search for any and all vtrk and strk chunks */
for (i = 0; i < header_size - 8; i++) {
fourcc_tag = AV_RL32(&header[i]);
size = AV_RL32(&header[i + 4]);
if (size > header_size - i - 8 && (fourcc_tag == vtrk_TAG || fourcc_tag == strk_TAG)) {
av_log(s, AV_LOG_ERROR, "chunk larger than array %d>%d\n", size, header_size - i - 8);
return AVERROR_INVALIDDATA;
}
 
if (fourcc_tag == std__TAG) {
if (header_size - i < 16) {
av_log(s, AV_LOG_ERROR, "std TAG truncated\n");
ret = AVERROR_INVALIDDATA;
goto fail;
}
fourxm->fps = av_int2float(AV_RL32(&header[i + 12]));
} else if (fourcc_tag == vtrk_TAG) {
if ((ret = parse_vtrk(s, fourxm, header + i, size,
header_size - i)) < 0)
goto fail;
 
i += 8 + size;
} else if (fourcc_tag == strk_TAG) {
if ((ret = parse_strk(s, fourxm, header + i, size,
header_size - i)) < 0)
goto fail;
 
i += 8 + size;
}
}
 
/* skip over the LIST-MOVI chunk (which is where the stream should be */
GET_LIST_HEADER();
if (fourcc_tag != MOVI_TAG) {
ret = AVERROR_INVALIDDATA;
goto fail;
}
 
av_free(header);
/* initialize context members */
fourxm->video_pts = -1; /* first frame will push to 0 */
 
return 0;
fail:
av_freep(&fourxm->tracks);
av_free(header);
return ret;
}
 
static int fourxm_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
FourxmDemuxContext *fourxm = s->priv_data;
AVIOContext *pb = s->pb;
unsigned int fourcc_tag;
unsigned int size;
int ret = 0;
unsigned int track_number;
int packet_read = 0;
unsigned char header[8];
int audio_frame_count;
 
while (!packet_read) {
if ((ret = avio_read(s->pb, header, 8)) < 0)
return ret;
fourcc_tag = AV_RL32(&header[0]);
size = AV_RL32(&header[4]);
if (url_feof(pb))
return AVERROR(EIO);
switch (fourcc_tag) {
case LIST_TAG:
/* this is a good time to bump the video pts */
fourxm->video_pts++;
 
/* skip the LIST-* tag and move on to the next fourcc */
avio_rl32(pb);
break;
 
case ifrm_TAG:
case pfrm_TAG:
case cfrm_TAG:
case ifr2_TAG:
case pfr2_TAG:
case cfr2_TAG:
/* allocate 8 more bytes than 'size' to account for fourcc
* and size */
if (size + 8 < size || av_new_packet(pkt, size + 8))
return AVERROR(EIO);
pkt->stream_index = fourxm->video_stream_index;
pkt->pts = fourxm->video_pts;
pkt->pos = avio_tell(s->pb);
memcpy(pkt->data, header, 8);
ret = avio_read(s->pb, &pkt->data[8], size);
 
if (ret < 0) {
av_free_packet(pkt);
} else
packet_read = 1;
break;
 
case snd__TAG:
track_number = avio_rl32(pb);
avio_skip(pb, 4);
size -= 8;
 
if (track_number < fourxm->track_count &&
fourxm->tracks[track_number].channels > 0) {
ret = av_get_packet(s->pb, pkt, size);
if (ret < 0)
return AVERROR(EIO);
pkt->stream_index =
fourxm->tracks[track_number].stream_index;
pkt->pts = fourxm->tracks[track_number].audio_pts;
packet_read = 1;
 
/* pts accounting */
audio_frame_count = size;
if (fourxm->tracks[track_number].adpcm)
audio_frame_count -= 2 * (fourxm->tracks[track_number].channels);
audio_frame_count /= fourxm->tracks[track_number].channels;
if (fourxm->tracks[track_number].adpcm) {
audio_frame_count *= 2;
} else
audio_frame_count /=
(fourxm->tracks[track_number].bits / 8);
fourxm->tracks[track_number].audio_pts += audio_frame_count;
} else {
avio_skip(pb, size);
}
break;
 
default:
avio_skip(pb, size);
break;
}
}
return ret;
}
 
static int fourxm_read_close(AVFormatContext *s)
{
FourxmDemuxContext *fourxm = s->priv_data;
 
av_freep(&fourxm->tracks);
 
return 0;
}
 
AVInputFormat ff_fourxm_demuxer = {
.name = "4xm",
.long_name = NULL_IF_CONFIG_SMALL("4X Technologies"),
.priv_data_size = sizeof(FourxmDemuxContext),
.read_probe = fourxm_probe,
.read_header = fourxm_read_header,
.read_packet = fourxm_read_packet,
.read_close = fourxm_read_close,
};
/contrib/sdk/sources/ffmpeg/libavformat/Makefile
0,0 → 1,476
include $(SUBDIR)../config.mak
 
NAME = avformat
FFLIBS = avcodec avutil
 
HEADERS = avformat.h \
avio.h \
version.h \
 
OBJS = allformats.o \
avio.o \
aviobuf.o \
cutils.o \
format.o \
id3v1.o \
id3v2.o \
metadata.o \
mux.o \
options.o \
os_support.o \
riff.o \
sdp.o \
seek.o \
url.o \
utils.o \
 
OBJS-$(HAVE_MSVCRT) += file_open.o
 
OBJS-$(CONFIG_NETWORK) += network.o
OBJS-$(CONFIG_RIFFDEC) += riffdec.o
OBJS-$(CONFIG_RIFFENC) += riffenc.o
OBJS-$(CONFIG_RTPDEC) += rdt.o \
rtp.o \
rtpdec.o \
rtpdec_amr.o \
rtpdec_asf.o \
rtpdec_g726.o \
rtpdec_h263.o \
rtpdec_h263_rfc2190.o \
rtpdec_h264.o \
rtpdec_ilbc.o \
rtpdec_jpeg.o \
rtpdec_latm.o \
rtpdec_mpeg12.o \
rtpdec_mpeg4.o \
rtpdec_mpegts.o \
rtpdec_qcelp.o \
rtpdec_qdm2.o \
rtpdec_qt.o \
rtpdec_svq3.o \
rtpdec_vp8.o \
rtpdec_xiph.o \
srtp.o
OBJS-$(CONFIG_RTPENC_CHAIN) += rtpenc_chain.o rtp.o
OBJS-$(CONFIG_SHARED) += log2_tab.o
 
# muxers/demuxers
OBJS-$(CONFIG_A64_MUXER) += a64.o rawenc.o
OBJS-$(CONFIG_AAC_DEMUXER) += aacdec.o apetag.o img2.o rawdec.o
OBJS-$(CONFIG_AC3_DEMUXER) += ac3dec.o rawdec.o
OBJS-$(CONFIG_AC3_MUXER) += rawenc.o
OBJS-$(CONFIG_ACT_DEMUXER) += act.o
OBJS-$(CONFIG_ADF_DEMUXER) += bintext.o sauce.o
OBJS-$(CONFIG_ADP_DEMUXER) += adp.o
OBJS-$(CONFIG_ADX_DEMUXER) += adxdec.o
OBJS-$(CONFIG_ADX_MUXER) += rawenc.o
OBJS-$(CONFIG_ADTS_MUXER) += adtsenc.o apetag.o
OBJS-$(CONFIG_AEA_DEMUXER) += aea.o pcm.o
OBJS-$(CONFIG_AFC_DEMUXER) += afc.o
OBJS-$(CONFIG_AIFF_DEMUXER) += aiffdec.o pcm.o isom.o \
mov_chan.o
OBJS-$(CONFIG_AIFF_MUXER) += aiffenc.o isom.o id3v2enc.o
OBJS-$(CONFIG_AMR_DEMUXER) += amr.o
OBJS-$(CONFIG_AMR_MUXER) += amr.o
OBJS-$(CONFIG_ANM_DEMUXER) += anm.o
OBJS-$(CONFIG_APC_DEMUXER) += apc.o
OBJS-$(CONFIG_APE_DEMUXER) += ape.o apetag.o img2.o
OBJS-$(CONFIG_AQTITLE_DEMUXER) += aqtitledec.o subtitles.o
OBJS-$(CONFIG_ASF_DEMUXER) += asfdec.o asf.o asfcrypt.o \
avlanguage.o
OBJS-$(CONFIG_ASF_MUXER) += asfenc.o asf.o
OBJS-$(CONFIG_ASS_DEMUXER) += assdec.o subtitles.o
OBJS-$(CONFIG_ASS_MUXER) += assenc.o
OBJS-$(CONFIG_AST_DEMUXER) += ast.o astdec.o
OBJS-$(CONFIG_AST_MUXER) += ast.o astenc.o
OBJS-$(CONFIG_AU_DEMUXER) += au.o pcm.o
OBJS-$(CONFIG_AU_MUXER) += au.o rawenc.o
OBJS-$(CONFIG_AVI_DEMUXER) += avidec.o
OBJS-$(CONFIG_AVI_MUXER) += avienc.o
OBJS-$(CONFIG_AVISYNTH) += avisynth.o
OBJS-$(CONFIG_AVM2_MUXER) += swfenc.o swf.o
OBJS-$(CONFIG_AVR_DEMUXER) += avr.o pcm.o
OBJS-$(CONFIG_AVS_DEMUXER) += avs.o vocdec.o voc.o
OBJS-$(CONFIG_BETHSOFTVID_DEMUXER) += bethsoftvid.o
OBJS-$(CONFIG_BFI_DEMUXER) += bfi.o
OBJS-$(CONFIG_BINK_DEMUXER) += bink.o
OBJS-$(CONFIG_BINTEXT_DEMUXER) += bintext.o sauce.o
OBJS-$(CONFIG_BIT_DEMUXER) += bit.o
OBJS-$(CONFIG_BIT_MUXER) += bit.o
OBJS-$(CONFIG_BMV_DEMUXER) += bmv.o
OBJS-$(CONFIG_BOA_DEMUXER) += boadec.o
OBJS-$(CONFIG_BRSTM_DEMUXER) += brstm.o
OBJS-$(CONFIG_C93_DEMUXER) += c93.o vocdec.o voc.o
OBJS-$(CONFIG_CAF_DEMUXER) += cafdec.o caf.o mov.o mov_chan.o \
isom.o
OBJS-$(CONFIG_CAF_MUXER) += cafenc.o caf.o riff.o isom.o
OBJS-$(CONFIG_CAVSVIDEO_DEMUXER) += cavsvideodec.o rawdec.o
OBJS-$(CONFIG_CAVSVIDEO_MUXER) += rawenc.o
OBJS-$(CONFIG_CDG_DEMUXER) += cdg.o
OBJS-$(CONFIG_CDXL_DEMUXER) += cdxl.o
OBJS-$(CONFIG_CONCAT_DEMUXER) += concatdec.o
OBJS-$(CONFIG_CRC_MUXER) += crcenc.o
OBJS-$(CONFIG_DATA_DEMUXER) += rawdec.o
OBJS-$(CONFIG_DATA_MUXER) += rawdec.o
OBJS-$(CONFIG_DAUD_DEMUXER) += daud.o
OBJS-$(CONFIG_DAUD_MUXER) += daud.o
OBJS-$(CONFIG_DFA_DEMUXER) += dfa.o
OBJS-$(CONFIG_DIRAC_DEMUXER) += diracdec.o rawdec.o
OBJS-$(CONFIG_DIRAC_MUXER) += rawenc.o
OBJS-$(CONFIG_DNXHD_DEMUXER) += dnxhddec.o rawdec.o
OBJS-$(CONFIG_DNXHD_MUXER) += rawenc.o
OBJS-$(CONFIG_DSICIN_DEMUXER) += dsicin.o
OBJS-$(CONFIG_DTSHD_DEMUXER) += dtshddec.o
OBJS-$(CONFIG_DTS_DEMUXER) += dtsdec.o rawdec.o
OBJS-$(CONFIG_DTS_MUXER) += rawenc.o
OBJS-$(CONFIG_DV_DEMUXER) += dv.o
OBJS-$(CONFIG_DV_MUXER) += dvenc.o
OBJS-$(CONFIG_DXA_DEMUXER) += dxa.o
OBJS-$(CONFIG_EA_CDATA_DEMUXER) += eacdata.o
OBJS-$(CONFIG_EA_DEMUXER) += electronicarts.o
OBJS-$(CONFIG_EAC3_DEMUXER) += ac3dec.o rawdec.o
OBJS-$(CONFIG_EAC3_MUXER) += rawenc.o
OBJS-$(CONFIG_EPAF_DEMUXER) += epafdec.o pcm.o
OBJS-$(CONFIG_FFM_DEMUXER) += ffmdec.o
OBJS-$(CONFIG_FFM_MUXER) += ffmenc.o
OBJS-$(CONFIG_FFMETADATA_DEMUXER) += ffmetadec.o
OBJS-$(CONFIG_FFMETADATA_MUXER) += ffmetaenc.o
OBJS-$(CONFIG_FILMSTRIP_DEMUXER) += filmstripdec.o
OBJS-$(CONFIG_FILMSTRIP_MUXER) += filmstripenc.o
OBJS-$(CONFIG_FLAC_DEMUXER) += flacdec.o rawdec.o \
flac_picture.o \
oggparsevorbis.o \
vorbiscomment.o
OBJS-$(CONFIG_FLAC_MUXER) += flacenc.o flacenc_header.o \
vorbiscomment.o
OBJS-$(CONFIG_FLIC_DEMUXER) += flic.o
OBJS-$(CONFIG_FLV_DEMUXER) += flvdec.o
OBJS-$(CONFIG_FLV_MUXER) += flvenc.o avc.o
OBJS-$(CONFIG_FOURXM_DEMUXER) += 4xm.o
OBJS-$(CONFIG_FRAMECRC_MUXER) += framecrcenc.o framehash.o
OBJS-$(CONFIG_FRAMEMD5_MUXER) += md5enc.o framehash.o
OBJS-$(CONFIG_FRM_DEMUXER) += frmdec.o
OBJS-$(CONFIG_GIF_MUXER) += gif.o
OBJS-$(CONFIG_GIF_DEMUXER) += gifdec.o
OBJS-$(CONFIG_GSM_DEMUXER) += gsmdec.o
OBJS-$(CONFIG_GXF_DEMUXER) += gxf.o
OBJS-$(CONFIG_GXF_MUXER) += gxfenc.o audiointerleave.o
OBJS-$(CONFIG_G722_DEMUXER) += g722.o rawdec.o
OBJS-$(CONFIG_G722_MUXER) += rawenc.o
OBJS-$(CONFIG_G723_1_DEMUXER) += g723_1.o
OBJS-$(CONFIG_G723_1_MUXER) += rawenc.o
OBJS-$(CONFIG_G729_DEMUXER) += g729dec.o
OBJS-$(CONFIG_H261_DEMUXER) += h261dec.o rawdec.o
OBJS-$(CONFIG_H261_MUXER) += rawenc.o
OBJS-$(CONFIG_H263_DEMUXER) += h263dec.o rawdec.o
OBJS-$(CONFIG_H263_MUXER) += rawenc.o
OBJS-$(CONFIG_H264_DEMUXER) += h264dec.o rawdec.o
OBJS-$(CONFIG_H264_MUXER) += rawenc.o
OBJS-$(CONFIG_HEVC_DEMUXER) += hevcdec.o rawdec.o
OBJS-$(CONFIG_HLS_DEMUXER) += hls.o
OBJS-$(CONFIG_HLS_MUXER) += hlsenc.o
OBJS-$(CONFIG_ICO_DEMUXER) += icodec.o
OBJS-$(CONFIG_ICO_MUXER) += icoenc.o
OBJS-$(CONFIG_IDCIN_DEMUXER) += idcin.o
OBJS-$(CONFIG_IDF_DEMUXER) += bintext.o sauce.o
OBJS-$(CONFIG_IFF_DEMUXER) += iff.o
OBJS-$(CONFIG_ILBC_DEMUXER) += ilbc.o
OBJS-$(CONFIG_ILBC_MUXER) += ilbc.o
OBJS-$(CONFIG_IMAGE2_DEMUXER) += img2dec.o img2.o
OBJS-$(CONFIG_IMAGE2_MUXER) += img2enc.o img2.o
OBJS-$(CONFIG_IMAGE2PIPE_DEMUXER) += img2dec.o img2.o
OBJS-$(CONFIG_IMAGE2PIPE_MUXER) += img2enc.o img2.o
OBJS-$(CONFIG_INGENIENT_DEMUXER) += ingenientdec.o rawdec.o
OBJS-$(CONFIG_IPMOVIE_DEMUXER) += ipmovie.o
OBJS-$(CONFIG_IRCAM_DEMUXER) += ircamdec.o ircam.o pcm.o
OBJS-$(CONFIG_IRCAM_MUXER) += ircamenc.o ircam.o rawenc.o
OBJS-$(CONFIG_ISS_DEMUXER) += iss.o
OBJS-$(CONFIG_IV8_DEMUXER) += iv8.o
OBJS-$(CONFIG_IVF_DEMUXER) += ivfdec.o
OBJS-$(CONFIG_IVF_MUXER) += ivfenc.o
OBJS-$(CONFIG_JACOSUB_DEMUXER) += jacosubdec.o subtitles.o
OBJS-$(CONFIG_JACOSUB_MUXER) += jacosubenc.o rawenc.o
OBJS-$(CONFIG_JV_DEMUXER) += jvdec.o
OBJS-$(CONFIG_LATM_DEMUXER) += rawdec.o
OBJS-$(CONFIG_LATM_MUXER) += latmenc.o rawenc.o
OBJS-$(CONFIG_LMLM4_DEMUXER) += lmlm4.o
OBJS-$(CONFIG_LOAS_DEMUXER) += loasdec.o rawdec.o
OBJS-$(CONFIG_LVF_DEMUXER) += lvfdec.o
OBJS-$(CONFIG_LXF_DEMUXER) += lxfdec.o
OBJS-$(CONFIG_M4V_DEMUXER) += m4vdec.o rawdec.o
OBJS-$(CONFIG_M4V_MUXER) += rawenc.o
OBJS-$(CONFIG_MATROSKA_DEMUXER) += matroskadec.o matroska.o \
isom.o rmsipr.o
OBJS-$(CONFIG_MATROSKA_MUXER) += matroskaenc.o matroska.o \
isom.o avc.o \
flacenc_header.o avlanguage.o wv.o
OBJS-$(CONFIG_MD5_MUXER) += md5enc.o
OBJS-$(CONFIG_MGSTS_DEMUXER) += mgsts.o
OBJS-$(CONFIG_MICRODVD_DEMUXER) += microdvddec.o subtitles.o
OBJS-$(CONFIG_MICRODVD_MUXER) += microdvdenc.o
OBJS-$(CONFIG_MJPEG_DEMUXER) += rawdec.o
OBJS-$(CONFIG_MJPEG_MUXER) += rawenc.o
OBJS-$(CONFIG_MLP_DEMUXER) += rawdec.o
OBJS-$(CONFIG_MLP_MUXER) += rawenc.o
OBJS-$(CONFIG_MM_DEMUXER) += mm.o
OBJS-$(CONFIG_MMF_DEMUXER) += mmf.o
OBJS-$(CONFIG_MMF_MUXER) += mmf.o rawenc.o
OBJS-$(CONFIG_MOV_DEMUXER) += mov.o isom.o mov_chan.o
OBJS-$(CONFIG_MOV_MUXER) += movenc.o isom.o avc.o \
movenchint.o mov_chan.o rtp.o
OBJS-$(CONFIG_MP2_MUXER) += mp3enc.o rawenc.o id3v2enc.o
OBJS-$(CONFIG_MP3_DEMUXER) += mp3dec.o
OBJS-$(CONFIG_MP3_MUXER) += mp3enc.o rawenc.o id3v2enc.o
OBJS-$(CONFIG_MPC_DEMUXER) += mpc.o apetag.o img2.o
OBJS-$(CONFIG_MPC8_DEMUXER) += mpc8.o apetag.o img2.o
OBJS-$(CONFIG_MPEG1SYSTEM_MUXER) += mpegenc.o
OBJS-$(CONFIG_MPEG1VCD_MUXER) += mpegenc.o
OBJS-$(CONFIG_MPEG2DVD_MUXER) += mpegenc.o
OBJS-$(CONFIG_MPEG2VOB_MUXER) += mpegenc.o
OBJS-$(CONFIG_MPEG2SVCD_MUXER) += mpegenc.o
OBJS-$(CONFIG_MPEG1VIDEO_MUXER) += rawenc.o
OBJS-$(CONFIG_MPEG2VIDEO_MUXER) += rawenc.o
OBJS-$(CONFIG_MPEGPS_DEMUXER) += mpeg.o
OBJS-$(CONFIG_MPEGTS_DEMUXER) += mpegts.o isom.o
OBJS-$(CONFIG_MPEGTS_MUXER) += mpegtsenc.o
OBJS-$(CONFIG_MPEGVIDEO_DEMUXER) += mpegvideodec.o rawdec.o
OBJS-$(CONFIG_MPJPEG_MUXER) += mpjpeg.o
OBJS-$(CONFIG_MPL2_DEMUXER) += mpl2dec.o subtitles.o
OBJS-$(CONFIG_MPSUB_DEMUXER) += mpsubdec.o subtitles.o
OBJS-$(CONFIG_MSNWC_TCP_DEMUXER) += msnwc_tcp.o
OBJS-$(CONFIG_MTV_DEMUXER) += mtv.o
OBJS-$(CONFIG_MVI_DEMUXER) += mvi.o
OBJS-$(CONFIG_MV_DEMUXER) += mvdec.o
OBJS-$(CONFIG_MXF_DEMUXER) += mxfdec.o mxf.o
OBJS-$(CONFIG_MXF_MUXER) += mxfenc.o mxf.o audiointerleave.o
OBJS-$(CONFIG_MXG_DEMUXER) += mxg.o
OBJS-$(CONFIG_NC_DEMUXER) += ncdec.o
OBJS-$(CONFIG_NISTSPHERE_DEMUXER) += nistspheredec.o pcm.o
OBJS-$(CONFIG_NSV_DEMUXER) += nsvdec.o
OBJS-$(CONFIG_NULL_MUXER) += nullenc.o
OBJS-$(CONFIG_NUT_DEMUXER) += nutdec.o nut.o
OBJS-$(CONFIG_NUT_MUXER) += nutenc.o nut.o
OBJS-$(CONFIG_NUV_DEMUXER) += nuv.o
OBJS-$(CONFIG_OGG_DEMUXER) += oggdec.o \
oggparsecelt.o \
oggparsedirac.o \
oggparseflac.o \
oggparseogm.o \
oggparseopus.o \
oggparseskeleton.o \
oggparsespeex.o \
oggparsetheora.o \
oggparsevorbis.o \
vorbiscomment.o \
flac_picture.o
OBJS-$(CONFIG_OGG_MUXER) += oggenc.o \
vorbiscomment.o
OBJS-$(CONFIG_OMA_DEMUXER) += omadec.o pcm.o oma.o
OBJS-$(CONFIG_OMA_MUXER) += omaenc.o rawenc.o oma.o id3v2enc.o
OBJS-$(CONFIG_PAF_DEMUXER) += paf.o
OBJS-$(CONFIG_PCM_ALAW_DEMUXER) += pcmdec.o pcm.o
OBJS-$(CONFIG_PCM_ALAW_MUXER) += pcmenc.o rawenc.o
OBJS-$(CONFIG_PCM_F32BE_DEMUXER) += pcmdec.o pcm.o
OBJS-$(CONFIG_PCM_F32BE_MUXER) += pcmenc.o rawenc.o
OBJS-$(CONFIG_PCM_F32LE_DEMUXER) += pcmdec.o pcm.o
OBJS-$(CONFIG_PCM_F32LE_MUXER) += pcmenc.o rawenc.o
OBJS-$(CONFIG_PCM_F64BE_DEMUXER) += pcmdec.o pcm.o
OBJS-$(CONFIG_PCM_F64BE_MUXER) += pcmenc.o rawenc.o
OBJS-$(CONFIG_PCM_F64LE_DEMUXER) += pcmdec.o pcm.o
OBJS-$(CONFIG_PCM_F64LE_MUXER) += pcmenc.o rawenc.o
OBJS-$(CONFIG_PCM_MULAW_DEMUXER) += pcmdec.o pcm.o
OBJS-$(CONFIG_PCM_MULAW_MUXER) += pcmenc.o rawenc.o
OBJS-$(CONFIG_PCM_S16BE_DEMUXER) += pcmdec.o pcm.o
OBJS-$(CONFIG_PCM_S16BE_MUXER) += pcmenc.o rawenc.o
OBJS-$(CONFIG_PCM_S16LE_DEMUXER) += pcmdec.o pcm.o
OBJS-$(CONFIG_PCM_S16LE_MUXER) += pcmenc.o rawenc.o
OBJS-$(CONFIG_PCM_S24BE_DEMUXER) += pcmdec.o pcm.o
OBJS-$(CONFIG_PCM_S24BE_MUXER) += pcmenc.o rawenc.o
OBJS-$(CONFIG_PCM_S24LE_DEMUXER) += pcmdec.o pcm.o
OBJS-$(CONFIG_PCM_S24LE_MUXER) += pcmenc.o rawenc.o
OBJS-$(CONFIG_PCM_S32BE_DEMUXER) += pcmdec.o pcm.o
OBJS-$(CONFIG_PCM_S32BE_MUXER) += pcmenc.o rawenc.o
OBJS-$(CONFIG_PCM_S32LE_DEMUXER) += pcmdec.o pcm.o
OBJS-$(CONFIG_PCM_S32LE_MUXER) += pcmenc.o rawenc.o
OBJS-$(CONFIG_PCM_S8_DEMUXER) += pcmdec.o pcm.o
OBJS-$(CONFIG_PCM_S8_MUXER) += pcmenc.o rawenc.o
OBJS-$(CONFIG_PCM_U16BE_DEMUXER) += pcmdec.o pcm.o
OBJS-$(CONFIG_PCM_U16BE_MUXER) += pcmenc.o rawenc.o
OBJS-$(CONFIG_PCM_U16LE_DEMUXER) += pcmdec.o pcm.o
OBJS-$(CONFIG_PCM_U16LE_MUXER) += pcmenc.o rawenc.o
OBJS-$(CONFIG_PCM_U24BE_DEMUXER) += pcmdec.o pcm.o
OBJS-$(CONFIG_PCM_U24BE_MUXER) += pcmenc.o rawenc.o
OBJS-$(CONFIG_PCM_U24LE_DEMUXER) += pcmdec.o pcm.o
OBJS-$(CONFIG_PCM_U24LE_MUXER) += pcmenc.o rawenc.o
OBJS-$(CONFIG_PCM_U32BE_DEMUXER) += pcmdec.o pcm.o
OBJS-$(CONFIG_PCM_U32BE_MUXER) += pcmenc.o rawenc.o
OBJS-$(CONFIG_PCM_U32LE_DEMUXER) += pcmdec.o pcm.o
OBJS-$(CONFIG_PCM_U32LE_MUXER) += pcmenc.o rawenc.o
OBJS-$(CONFIG_PCM_U8_DEMUXER) += pcmdec.o pcm.o
OBJS-$(CONFIG_PCM_U8_MUXER) += pcmenc.o rawenc.o
OBJS-$(CONFIG_PJS_DEMUXER) += pjsdec.o subtitles.o
OBJS-$(CONFIG_PMP_DEMUXER) += pmpdec.o
OBJS-$(CONFIG_PVA_DEMUXER) += pva.o
OBJS-$(CONFIG_PVF_DEMUXER) += pvfdec.o pcm.o
OBJS-$(CONFIG_QCP_DEMUXER) += qcp.o
OBJS-$(CONFIG_R3D_DEMUXER) += r3d.o
OBJS-$(CONFIG_RAWVIDEO_DEMUXER) += rawvideodec.o
OBJS-$(CONFIG_RAWVIDEO_MUXER) += rawenc.o
OBJS-$(CONFIG_REALTEXT_DEMUXER) += realtextdec.o subtitles.o
OBJS-$(CONFIG_REDSPARK_DEMUXER) += redspark.o
OBJS-$(CONFIG_RL2_DEMUXER) += rl2.o
OBJS-$(CONFIG_RM_DEMUXER) += rmdec.o rm.o rmsipr.o
OBJS-$(CONFIG_RM_MUXER) += rmenc.o rm.o
OBJS-$(CONFIG_ROQ_DEMUXER) += idroqdec.o
OBJS-$(CONFIG_ROQ_MUXER) += idroqenc.o rawenc.o
OBJS-$(CONFIG_RSD_DEMUXER) += rsd.o
OBJS-$(CONFIG_RSO_DEMUXER) += rsodec.o rso.o pcm.o
OBJS-$(CONFIG_RSO_MUXER) += rsoenc.o rso.o
OBJS-$(CONFIG_RPL_DEMUXER) += rpl.o
OBJS-$(CONFIG_RTP_MUXER) += rtp.o \
rtpenc_aac.o \
rtpenc_latm.o \
rtpenc_amr.o \
rtpenc_h263.o \
rtpenc_h263_rfc2190.o \
rtpenc_jpeg.o \
rtpenc_mpv.o \
rtpenc.o \
rtpenc_h264.o \
rtpenc_vp8.o \
rtpenc_xiph.o \
avc.o
OBJS-$(CONFIG_RTSP_DEMUXER) += rtsp.o rtspdec.o httpauth.o \
urldecode.o
OBJS-$(CONFIG_RTSP_MUXER) += rtsp.o rtspenc.o httpauth.o \
urldecode.o
OBJS-$(CONFIG_SAMI_DEMUXER) += samidec.o subtitles.o
OBJS-$(CONFIG_SAP_DEMUXER) += sapdec.o
OBJS-$(CONFIG_SAP_MUXER) += sapenc.o
OBJS-$(CONFIG_SBG_DEMUXER) += sbgdec.o
OBJS-$(CONFIG_SDP_DEMUXER) += rtsp.o
OBJS-$(CONFIG_SEGAFILM_DEMUXER) += segafilm.o
OBJS-$(CONFIG_SEGMENT_MUXER) += segment.o
OBJS-$(CONFIG_SHORTEN_DEMUXER) += rawdec.o
OBJS-$(CONFIG_SIFF_DEMUXER) += siff.o
OBJS-$(CONFIG_SMACKER_DEMUXER) += smacker.o
OBJS-$(CONFIG_SMJPEG_DEMUXER) += smjpegdec.o smjpeg.o
OBJS-$(CONFIG_SMJPEG_MUXER) += smjpegenc.o smjpeg.o
OBJS-$(CONFIG_SMOOTHSTREAMING_MUXER) += smoothstreamingenc.o isom.o
OBJS-$(CONFIG_SMUSH_DEMUXER) += smush.o
OBJS-$(CONFIG_SOL_DEMUXER) += sol.o pcm.o
OBJS-$(CONFIG_SOX_DEMUXER) += soxdec.o pcm.o
OBJS-$(CONFIG_SOX_MUXER) += soxenc.o rawenc.o
OBJS-$(CONFIG_SPDIF_DEMUXER) += spdif.o spdifdec.o
OBJS-$(CONFIG_SPDIF_MUXER) += spdif.o spdifenc.o
OBJS-$(CONFIG_SRT_DEMUXER) += srtdec.o subtitles.o
OBJS-$(CONFIG_SRT_MUXER) += srtenc.o
OBJS-$(CONFIG_STR_DEMUXER) += psxstr.o
OBJS-$(CONFIG_SUBVIEWER1_DEMUXER) += subviewer1dec.o subtitles.o
OBJS-$(CONFIG_SUBVIEWER_DEMUXER) += subviewerdec.o subtitles.o
OBJS-$(CONFIG_SWF_DEMUXER) += swfdec.o swf.o
OBJS-$(CONFIG_SWF_MUXER) += swfenc.o swf.o
OBJS-$(CONFIG_TAK_DEMUXER) += takdec.o apetag.o img2.o rawdec.o
OBJS-$(CONFIG_TEDCAPTIONS_DEMUXER) += tedcaptionsdec.o subtitles.o
OBJS-$(CONFIG_TEE_MUXER) += tee.o
OBJS-$(CONFIG_THP_DEMUXER) += thp.o
OBJS-$(CONFIG_TIERTEXSEQ_DEMUXER) += tiertexseq.o
OBJS-$(CONFIG_MKVTIMESTAMP_V2_MUXER) += mkvtimestamp_v2.o
OBJS-$(CONFIG_TMV_DEMUXER) += tmv.o
OBJS-$(CONFIG_TRUEHD_DEMUXER) += rawdec.o
OBJS-$(CONFIG_TRUEHD_MUXER) += rawenc.o
OBJS-$(CONFIG_TTA_DEMUXER) += tta.o apetag.o img2.o
OBJS-$(CONFIG_TTY_DEMUXER) += tty.o sauce.o
OBJS-$(CONFIG_TXD_DEMUXER) += txd.o
OBJS-$(CONFIG_VC1_DEMUXER) += rawdec.o
OBJS-$(CONFIG_VC1_MUXER) += rawenc.o
OBJS-$(CONFIG_VC1T_DEMUXER) += vc1test.o
OBJS-$(CONFIG_VC1T_MUXER) += vc1testenc.o
OBJS-$(CONFIG_VIVO_DEMUXER) += vivo.o
OBJS-$(CONFIG_VMD_DEMUXER) += sierravmd.o
OBJS-$(CONFIG_VOBSUB_DEMUXER) += subtitles.o # mpeg demuxer is in the dependencies
OBJS-$(CONFIG_VOC_DEMUXER) += vocdec.o voc.o
OBJS-$(CONFIG_VOC_MUXER) += vocenc.o voc.o
OBJS-$(CONFIG_VPLAYER_DEMUXER) += vplayerdec.o subtitles.o
OBJS-$(CONFIG_VQF_DEMUXER) += vqf.o
OBJS-$(CONFIG_W64_DEMUXER) += wavdec.o w64.o pcm.o
OBJS-$(CONFIG_W64_MUXER) += wavenc.o w64.o
OBJS-$(CONFIG_WAV_DEMUXER) += wavdec.o pcm.o
OBJS-$(CONFIG_WAV_MUXER) += wavenc.o
OBJS-$(CONFIG_WC3_DEMUXER) += wc3movie.o
OBJS-$(CONFIG_WEBM_MUXER) += matroskaenc.o matroska.o \
isom.o avc.o \
flacenc_header.o avlanguage.o wv.o
OBJS-$(CONFIG_WEBVTT_DEMUXER) += webvttdec.o subtitles.o
OBJS-$(CONFIG_WEBVTT_MUXER) += webvttenc.o
OBJS-$(CONFIG_WSAUD_DEMUXER) += westwood_aud.o
OBJS-$(CONFIG_WSVQA_DEMUXER) += westwood_vqa.o
OBJS-$(CONFIG_WTV_DEMUXER) += wtvdec.o wtv_common.o asfdec.o asf.o asfcrypt.o \
avlanguage.o mpegts.o isom.o
OBJS-$(CONFIG_WTV_MUXER) += wtvenc.o wtv_common.o asf.o asfenc.o
OBJS-$(CONFIG_WV_DEMUXER) += wvdec.o wv.o apetag.o img2.o
OBJS-$(CONFIG_WV_MUXER) += wvenc.o wv.o apetag.o img2.o
OBJS-$(CONFIG_XA_DEMUXER) += xa.o
OBJS-$(CONFIG_XBIN_DEMUXER) += bintext.o sauce.o
OBJS-$(CONFIG_XMV_DEMUXER) += xmv.o
OBJS-$(CONFIG_XWMA_DEMUXER) += xwma.o
OBJS-$(CONFIG_YOP_DEMUXER) += yop.o
OBJS-$(CONFIG_YUV4MPEGPIPE_MUXER) += yuv4mpeg.o
OBJS-$(CONFIG_YUV4MPEGPIPE_DEMUXER) += yuv4mpeg.o
 
# external libraries
OBJS-$(CONFIG_LIBGME_DEMUXER) += libgme.o
OBJS-$(CONFIG_LIBMODPLUG_DEMUXER) += libmodplug.o
OBJS-$(CONFIG_LIBNUT_DEMUXER) += libnut.o
OBJS-$(CONFIG_LIBNUT_MUXER) += libnut.o
OBJS-$(CONFIG_LIBQUVI_DEMUXER) += libquvi.o
OBJS-$(CONFIG_LIBRTMP) += librtmp.o
OBJS-$(CONFIG_LIBSSH_PROTOCOL) += libssh.o
 
# protocols I/O
OBJS-$(CONFIG_APPLEHTTP_PROTOCOL) += hlsproto.o
OBJS-$(CONFIG_BLURAY_PROTOCOL) += bluray.o
OBJS-$(CONFIG_CACHE_PROTOCOL) += cache.o
OBJS-$(CONFIG_CONCAT_PROTOCOL) += concat.o
OBJS-$(CONFIG_CRYPTO_PROTOCOL) += crypto.o
OBJS-$(CONFIG_DATA_PROTOCOL) += data_uri.o
OBJS-$(CONFIG_FFRTMPCRYPT_PROTOCOL) += rtmpcrypt.o rtmpdh.o
OBJS-$(CONFIG_FFRTMPHTTP_PROTOCOL) += rtmphttp.o
OBJS-$(CONFIG_FILE_PROTOCOL) += file.o
OBJS-$(CONFIG_FTP_PROTOCOL) += ftp.o
OBJS-$(CONFIG_GOPHER_PROTOCOL) += gopher.o
OBJS-$(CONFIG_HLS_PROTOCOL) += hlsproto.o
OBJS-$(CONFIG_HTTP_PROTOCOL) += http.o httpauth.o urldecode.o
OBJS-$(CONFIG_HTTPPROXY_PROTOCOL) += http.o httpauth.o urldecode.o
OBJS-$(CONFIG_HTTPS_PROTOCOL) += http.o httpauth.o urldecode.o
OBJS-$(CONFIG_MMSH_PROTOCOL) += mmsh.o mms.o asf.o
OBJS-$(CONFIG_MMST_PROTOCOL) += mmst.o mms.o asf.o
OBJS-$(CONFIG_MD5_PROTOCOL) += md5proto.o
OBJS-$(CONFIG_PIPE_PROTOCOL) += file.o
OBJS-$(CONFIG_RTMP_PROTOCOL) += rtmpproto.o rtmppkt.o
OBJS-$(CONFIG_RTMPE_PROTOCOL) += rtmpproto.o rtmppkt.o
OBJS-$(CONFIG_RTMPS_PROTOCOL) += rtmpproto.o rtmppkt.o
OBJS-$(CONFIG_RTMPT_PROTOCOL) += rtmpproto.o rtmppkt.o
OBJS-$(CONFIG_RTMPTE_PROTOCOL) += rtmpproto.o rtmppkt.o
OBJS-$(CONFIG_RTMPTS_PROTOCOL) += rtmpproto.o rtmppkt.o
OBJS-$(CONFIG_RTP_PROTOCOL) += rtpproto.o
OBJS-$(CONFIG_SCTP_PROTOCOL) += sctp.o
OBJS-$(CONFIG_SRTP_PROTOCOL) += srtpproto.o srtp.o
OBJS-$(CONFIG_TCP_PROTOCOL) += tcp.o
OBJS-$(CONFIG_TLS_PROTOCOL) += tls.o
OBJS-$(CONFIG_UDP_PROTOCOL) += udp.o
OBJS-$(CONFIG_UNIX_PROTOCOL) += unix.o
 
SKIPHEADERS-$(CONFIG_FFRTMPCRYPT_PROTOCOL) += rtmpdh.h
SKIPHEADERS-$(CONFIG_NETWORK) += network.h rtsp.h
TESTPROGS = seek \
srtp \
url \
 
TESTPROGS-$(CONFIG_NETWORK) += noproxy
 
TOOLS = aviocat \
ismindex \
pktdumper \
probetest \
seek_print \
/contrib/sdk/sources/ffmpeg/libavformat/a64.c
0,0 → 1,62
/*
* a64 muxer
* Copyright (c) 2009 Tobias Bindhammer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavcodec/avcodec.h"
#include "libavcodec/bytestream.h"
#include "avformat.h"
#include "rawenc.h"
 
static int a64_write_header(struct AVFormatContext *s)
{
AVCodecContext *avctx = s->streams[0]->codec;
uint8_t header[5] = {
0x00, //load
0x40, //address
0x00, //mode
0x00, //charset_lifetime (multi only)
0x00 //fps in 50/fps;
};
switch (avctx->codec->id) {
case AV_CODEC_ID_A64_MULTI:
header[2] = 0x00;
header[3] = AV_RB32(avctx->extradata+0);
header[4] = 2;
break;
case AV_CODEC_ID_A64_MULTI5:
header[2] = 0x01;
header[3] = AV_RB32(avctx->extradata+0);
header[4] = 3;
break;
default:
return AVERROR(EINVAL);
}
avio_write(s->pb, header, 2);
return 0;
}
 
AVOutputFormat ff_a64_muxer = {
.name = "a64",
.long_name = NULL_IF_CONFIG_SMALL("a64 - video for Commodore 64"),
.extensions = "a64, A64",
.video_codec = AV_CODEC_ID_A64_MULTI,
.write_header = a64_write_header,
.write_packet = ff_raw_write_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/aacdec.c
0,0 → 1,100
/*
* raw ADTS AAC demuxer
* Copyright (c) 2008 Michael Niedermayer <michaelni@gmx.at>
* Copyright (c) 2009 Robert Swain ( rob opendot cl )
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
#include "rawdec.h"
#include "id3v1.h"
#include "apetag.h"
 
static int adts_aac_probe(AVProbeData *p)
{
int max_frames = 0, first_frames = 0;
int fsize, frames;
const uint8_t *buf0 = p->buf;
const uint8_t *buf2;
const uint8_t *buf;
const uint8_t *end = buf0 + p->buf_size - 7;
 
buf = buf0;
 
for(; buf < end; buf= buf2+1) {
buf2 = buf;
 
for(frames = 0; buf2 < end; frames++) {
uint32_t header = AV_RB16(buf2);
if((header&0xFFF6) != 0xFFF0)
break;
fsize = (AV_RB32(buf2 + 3) >> 13) & 0x1FFF;
if(fsize < 7)
break;
fsize = FFMIN(fsize, end - buf2);
buf2 += fsize;
}
max_frames = FFMAX(max_frames, frames);
if(buf == buf0)
first_frames= frames;
}
if (first_frames>=3) return AVPROBE_SCORE_EXTENSION + 1;
else if(max_frames>500)return AVPROBE_SCORE_EXTENSION;
else if(max_frames>=3) return AVPROBE_SCORE_EXTENSION / 2;
else if(max_frames>=1) return 1;
else return 0;
}
 
static int adts_aac_read_header(AVFormatContext *s)
{
AVStream *st;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = s->iformat->raw_codec_id;
st->need_parsing = AVSTREAM_PARSE_FULL_RAW;
 
ff_id3v1_read(s);
if (s->pb->seekable &&
!av_dict_get(s->metadata, "", NULL, AV_DICT_IGNORE_SUFFIX)) {
int64_t cur = avio_tell(s->pb);
ff_ape_parse_tag(s);
avio_seek(s->pb, cur, SEEK_SET);
}
 
//LCM of all possible ADTS sample rates
avpriv_set_pts_info(st, 64, 1, 28224000);
 
return 0;
}
 
AVInputFormat ff_aac_demuxer = {
.name = "aac",
.long_name = NULL_IF_CONFIG_SMALL("raw ADTS AAC (Advanced Audio Coding)"),
.read_probe = adts_aac_probe,
.read_header = adts_aac_read_header,
.read_packet = ff_raw_read_partial_packet,
.flags = AVFMT_GENERIC_INDEX,
.extensions = "aac",
.raw_codec_id = AV_CODEC_ID_AAC,
};
/contrib/sdk/sources/ffmpeg/libavformat/ac3dec.c
0,0 → 1,123
/*
* RAW AC-3 and E-AC-3 demuxer
* Copyright (c) 2007 Justin Ruggles <justin.ruggles@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/crc.h"
#include "libavcodec/ac3_parser.h"
#include "avformat.h"
#include "rawdec.h"
 
static int ac3_eac3_probe(AVProbeData *p, enum AVCodecID expected_codec_id)
{
int max_frames, first_frames = 0, frames;
const uint8_t *buf, *buf2, *end;
AC3HeaderInfo hdr;
GetBitContext gbc;
enum AVCodecID codec_id = AV_CODEC_ID_AC3;
 
max_frames = 0;
buf = p->buf;
end = buf + p->buf_size;
 
for(; buf < end; buf++) {
if(buf > p->buf && !(buf[0] == 0x0B && buf[1] == 0x77)
&& !(buf[0] == 0x77 && buf[1] == 0x0B) )
continue;
buf2 = buf;
 
for(frames = 0; buf2 < end; frames++) {
uint8_t buf3[4096];
int i;
if(!memcmp(buf2, "\x1\x10\0\0\0\0\0\0", 8))
buf2+=16;
if (buf[0] == 0x77 && buf[1] == 0x0B) {
for(i=0; i<8; i+=2) {
buf3[i ] = buf[i+1];
buf3[i+1] = buf[i ];
}
init_get_bits(&gbc, buf3, 54);
}else
init_get_bits(&gbc, buf2, 54);
if(avpriv_ac3_parse_header(&gbc, &hdr) < 0)
break;
if(buf2 + hdr.frame_size > end)
break;
if (buf[0] == 0x77 && buf[1] == 0x0B) {
av_assert0(hdr.frame_size <= sizeof(buf3));
for(i=8; i<hdr.frame_size; i+=2) {
buf3[i ] = buf[i+1];
buf3[i+1] = buf[i ];
}
}
if(av_crc(av_crc_get_table(AV_CRC_16_ANSI), 0, gbc.buffer + 2, hdr.frame_size - 2))
break;
if (hdr.bitstream_id > 10)
codec_id = AV_CODEC_ID_EAC3;
buf2 += hdr.frame_size;
}
max_frames = FFMAX(max_frames, frames);
if(buf == p->buf)
first_frames = frames;
}
if(codec_id != expected_codec_id) return 0;
// keep this in sync with mp3 probe, both need to avoid
// issues with MPEG-files!
if (first_frames>=4) return AVPROBE_SCORE_EXTENSION + 1;
else if(max_frames>200)return AVPROBE_SCORE_EXTENSION;
else if(max_frames>=4) return AVPROBE_SCORE_EXTENSION/2;
else if(max_frames>=1) return 1;
else return 0;
}
 
#if CONFIG_AC3_DEMUXER
static int ac3_probe(AVProbeData *p)
{
return ac3_eac3_probe(p, AV_CODEC_ID_AC3);
}
 
AVInputFormat ff_ac3_demuxer = {
.name = "ac3",
.long_name = NULL_IF_CONFIG_SMALL("raw AC-3"),
.read_probe = ac3_probe,
.read_header = ff_raw_audio_read_header,
.read_packet = ff_raw_read_partial_packet,
.flags= AVFMT_GENERIC_INDEX,
.extensions = "ac3",
.raw_codec_id = AV_CODEC_ID_AC3,
};
#endif
 
#if CONFIG_EAC3_DEMUXER
static int eac3_probe(AVProbeData *p)
{
return ac3_eac3_probe(p, AV_CODEC_ID_EAC3);
}
 
AVInputFormat ff_eac3_demuxer = {
.name = "eac3",
.long_name = NULL_IF_CONFIG_SMALL("raw E-AC-3"),
.read_probe = eac3_probe,
.read_header = ff_raw_audio_read_header,
.read_packet = ff_raw_read_partial_packet,
.flags = AVFMT_GENERIC_INDEX,
.extensions = "eac3",
.raw_codec_id = AV_CODEC_ID_EAC3,
};
#endif
/contrib/sdk/sources/ffmpeg/libavformat/act.c
0,0 → 1,207
/*
* ACT file format demuxer
* Copyright (c) 2007-2008 Vladimir Voroshilov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avformat.h"
#include "riff.h"
#include "internal.h"
#include "libavcodec/get_bits.h"
 
#define CHUNK_SIZE 512
#define RIFF_TAG MKTAG('R','I','F','F')
#define WAVE_TAG MKTAG('W','A','V','E')
 
typedef struct{
int bytes_left_in_chunk;
uint8_t audio_buffer[22];///< temporary buffer for ACT frame
char second_packet; ///< 1 - if temporary buffer contains valid (second) G.729 packet
} ACTContext;
 
static int probe(AVProbeData *p)
{
int i;
 
if ((AV_RL32(&p->buf[0]) != RIFF_TAG) ||
(AV_RL32(&p->buf[8]) != WAVE_TAG) ||
(AV_RL32(&p->buf[16]) != 16))
return 0;
 
//We can't be sure that this is ACT and not regular WAV
if (p->buf_size<512)
return 0;
 
for(i=44; i<256; i++)
if(p->buf[i])
return 0;
 
if(p->buf[256]!=0x84)
return 0;
 
for(i=264; i<512; i++)
if(p->buf[i])
return 0;
 
return AVPROBE_SCORE_MAX;
}
 
static int read_header(AVFormatContext *s)
{
ACTContext* ctx = s->priv_data;
AVIOContext *pb = s->pb;
int size;
AVStream* st;
 
int min,sec,msec;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
avio_skip(pb, 16);
size=avio_rl32(pb);
ff_get_wav_header(pb, st->codec, size);
 
/*
8000Hz (Fine-rec) file format has 10 bytes long
packets with 10ms of sound data in them
*/
if (st->codec->sample_rate != 8000) {
av_log(s, AV_LOG_ERROR, "Sample rate %d is not supported.\n", st->codec->sample_rate);
return AVERROR_INVALIDDATA;
}
 
st->codec->frame_size=80;
st->codec->channels=1;
avpriv_set_pts_info(st, 64, 1, 100);
 
st->codec->codec_id=AV_CODEC_ID_G729;
 
avio_seek(pb, 257, SEEK_SET);
msec=avio_rl16(pb);
sec=avio_r8(pb);
min=avio_rl32(pb);
 
st->duration = av_rescale(1000*(min*60+sec)+msec, st->codec->sample_rate, 1000 * st->codec->frame_size);
 
ctx->bytes_left_in_chunk=CHUNK_SIZE;
 
avio_seek(pb, 512, SEEK_SET);
 
return 0;
}
 
 
static int read_packet(AVFormatContext *s,
AVPacket *pkt)
{
ACTContext *ctx = s->priv_data;
AVIOContext *pb = s->pb;
int ret;
int frame_size=s->streams[0]->codec->sample_rate==8000?10:22;
 
 
if(s->streams[0]->codec->sample_rate==8000)
ret=av_new_packet(pkt, 10);
else
ret=av_new_packet(pkt, 11);
 
if(ret)
return ret;
 
if(s->streams[0]->codec->sample_rate==4400 && !ctx->second_packet)
{
ret = avio_read(pb, ctx->audio_buffer, frame_size);
 
if(ret<0)
return ret;
if(ret!=frame_size)
return AVERROR(EIO);
 
pkt->data[0]=ctx->audio_buffer[11];
pkt->data[1]=ctx->audio_buffer[0];
pkt->data[2]=ctx->audio_buffer[12];
pkt->data[3]=ctx->audio_buffer[1];
pkt->data[4]=ctx->audio_buffer[13];
pkt->data[5]=ctx->audio_buffer[2];
pkt->data[6]=ctx->audio_buffer[14];
pkt->data[7]=ctx->audio_buffer[3];
pkt->data[8]=ctx->audio_buffer[15];
pkt->data[9]=ctx->audio_buffer[4];
pkt->data[10]=ctx->audio_buffer[16];
 
ctx->second_packet=1;
}
else if(s->streams[0]->codec->sample_rate==4400 && ctx->second_packet)
{
pkt->data[0]=ctx->audio_buffer[5];
pkt->data[1]=ctx->audio_buffer[17];
pkt->data[2]=ctx->audio_buffer[6];
pkt->data[3]=ctx->audio_buffer[18];
pkt->data[4]=ctx->audio_buffer[7];
pkt->data[5]=ctx->audio_buffer[19];
pkt->data[6]=ctx->audio_buffer[8];
pkt->data[7]=ctx->audio_buffer[20];
pkt->data[8]=ctx->audio_buffer[9];
pkt->data[9]=ctx->audio_buffer[21];
pkt->data[10]=ctx->audio_buffer[10];
 
ctx->second_packet=0;
}
else // 8000 Hz
{
ret = avio_read(pb, ctx->audio_buffer, frame_size);
 
if(ret<0)
return ret;
if(ret!=frame_size)
return AVERROR(EIO);
 
pkt->data[0]=ctx->audio_buffer[5];
pkt->data[1]=ctx->audio_buffer[0];
pkt->data[2]=ctx->audio_buffer[6];
pkt->data[3]=ctx->audio_buffer[1];
pkt->data[4]=ctx->audio_buffer[7];
pkt->data[5]=ctx->audio_buffer[2];
pkt->data[6]=ctx->audio_buffer[8];
pkt->data[7]=ctx->audio_buffer[3];
pkt->data[8]=ctx->audio_buffer[9];
pkt->data[9]=ctx->audio_buffer[4];
}
 
ctx->bytes_left_in_chunk -= frame_size;
 
if(ctx->bytes_left_in_chunk < frame_size)
{
avio_skip(pb, ctx->bytes_left_in_chunk);
ctx->bytes_left_in_chunk=CHUNK_SIZE;
}
 
pkt->duration=1;
 
return ret;
}
 
AVInputFormat ff_act_demuxer = {
.name = "act",
.long_name = "ACT Voice file format",
.priv_data_size = sizeof(ACTContext),
.read_probe = probe,
.read_header = read_header,
.read_packet = read_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/adp.c
0,0 → 1,91
/*
* ADP demuxer
* Copyright (c) 2013 James Almer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
 
static int adp_probe(AVProbeData *p)
{
int i;
 
if (p->buf_size < 32)
return 0;
 
for (i = 0; i < p->buf_size - 3; i+=32)
if (p->buf[i] != p->buf[i+2] || p->buf[i+1] != p->buf[i+3])
return 0;
 
return p->buf_size < 260 ? 1 : AVPROBE_SCORE_MAX / 4;
}
 
static int adp_read_header(AVFormatContext *s)
{
AVStream *st;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_ADPCM_DTK;
st->codec->channel_layout = AV_CH_LAYOUT_STEREO;
st->codec->channels = 2;
st->codec->sample_rate = 48000;
st->start_time = 0;
if (s->pb->seekable)
st->duration = av_get_audio_frame_duration(st->codec, avio_size(s->pb));
 
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
 
return 0;
}
 
static int adp_read_packet(AVFormatContext *s, AVPacket *pkt)
{
int ret, size = 1024;
 
if (url_feof(s->pb))
return AVERROR_EOF;
 
ret = av_get_packet(s->pb, pkt, size);
 
if (ret != size) {
if (ret < 0) {
av_free_packet(pkt);
return ret;
}
av_shrink_packet(pkt, ret);
}
pkt->stream_index = 0;
 
return ret;
}
 
AVInputFormat ff_adp_demuxer = {
.name = "adp",
.long_name = NULL_IF_CONFIG_SMALL("ADP"),
.read_probe = adp_probe,
.read_header = adp_read_header,
.read_packet = adp_read_packet,
.extensions = "adp,dtk",
};
/contrib/sdk/sources/ffmpeg/libavformat/adtsenc.c
0,0 → 1,205
/*
* ADTS muxer.
* Copyright (c) 2006 Baptiste Coudurier <baptiste.coudurier@smartjog.com>
* Mans Rullgard <mans@mansr.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavcodec/get_bits.h"
#include "libavcodec/put_bits.h"
#include "libavcodec/avcodec.h"
#include "libavcodec/mpeg4audio.h"
#include "libavutil/opt.h"
#include "avformat.h"
#include "apetag.h"
 
#define ADTS_HEADER_SIZE 7
 
typedef struct {
AVClass *class;
int write_adts;
int objecttype;
int sample_rate_index;
int channel_conf;
int pce_size;
int apetag;
uint8_t pce_data[MAX_PCE_SIZE];
} ADTSContext;
 
#define ADTS_MAX_FRAME_BYTES ((1 << 13) - 1)
 
static int adts_decode_extradata(AVFormatContext *s, ADTSContext *adts, uint8_t *buf, int size)
{
GetBitContext gb;
PutBitContext pb;
MPEG4AudioConfig m4ac;
int off;
 
init_get_bits(&gb, buf, size * 8);
off = avpriv_mpeg4audio_get_config(&m4ac, buf, size * 8, 1);
if (off < 0)
return off;
skip_bits_long(&gb, off);
adts->objecttype = m4ac.object_type - 1;
adts->sample_rate_index = m4ac.sampling_index;
adts->channel_conf = m4ac.chan_config;
 
if (adts->objecttype > 3U) {
av_log(s, AV_LOG_ERROR, "MPEG-4 AOT %d is not allowed in ADTS\n", adts->objecttype+1);
return -1;
}
if (adts->sample_rate_index == 15) {
av_log(s, AV_LOG_ERROR, "Escape sample rate index illegal in ADTS\n");
return -1;
}
if (get_bits(&gb, 1)) {
av_log(s, AV_LOG_ERROR, "960/120 MDCT window is not allowed in ADTS\n");
return -1;
}
if (get_bits(&gb, 1)) {
av_log(s, AV_LOG_ERROR, "Scalable configurations are not allowed in ADTS\n");
return -1;
}
if (get_bits(&gb, 1)) {
av_log(s, AV_LOG_ERROR, "Extension flag is not allowed in ADTS\n");
return -1;
}
if (!adts->channel_conf) {
init_put_bits(&pb, adts->pce_data, MAX_PCE_SIZE);
 
put_bits(&pb, 3, 5); //ID_PCE
adts->pce_size = (avpriv_copy_pce_data(&pb, &gb) + 3) / 8;
flush_put_bits(&pb);
}
 
adts->write_adts = 1;
 
return 0;
}
 
static int adts_write_header(AVFormatContext *s)
{
ADTSContext *adts = s->priv_data;
AVCodecContext *avc = s->streams[0]->codec;
 
if (avc->extradata_size > 0 &&
adts_decode_extradata(s, adts, avc->extradata, avc->extradata_size) < 0)
return -1;
 
return 0;
}
 
static int adts_write_frame_header(ADTSContext *ctx,
uint8_t *buf, int size, int pce_size)
{
PutBitContext pb;
 
unsigned full_frame_size = (unsigned)ADTS_HEADER_SIZE + size + pce_size;
if (full_frame_size > ADTS_MAX_FRAME_BYTES) {
av_log(NULL, AV_LOG_ERROR, "ADTS frame size too large: %u (max %d)\n",
full_frame_size, ADTS_MAX_FRAME_BYTES);
return AVERROR_INVALIDDATA;
}
 
init_put_bits(&pb, buf, ADTS_HEADER_SIZE);
 
/* adts_fixed_header */
put_bits(&pb, 12, 0xfff); /* syncword */
put_bits(&pb, 1, 0); /* ID */
put_bits(&pb, 2, 0); /* layer */
put_bits(&pb, 1, 1); /* protection_absent */
put_bits(&pb, 2, ctx->objecttype); /* profile_objecttype */
put_bits(&pb, 4, ctx->sample_rate_index);
put_bits(&pb, 1, 0); /* private_bit */
put_bits(&pb, 3, ctx->channel_conf); /* channel_configuration */
put_bits(&pb, 1, 0); /* original_copy */
put_bits(&pb, 1, 0); /* home */
 
/* adts_variable_header */
put_bits(&pb, 1, 0); /* copyright_identification_bit */
put_bits(&pb, 1, 0); /* copyright_identification_start */
put_bits(&pb, 13, full_frame_size); /* aac_frame_length */
put_bits(&pb, 11, 0x7ff); /* adts_buffer_fullness */
put_bits(&pb, 2, 0); /* number_of_raw_data_blocks_in_frame */
 
flush_put_bits(&pb);
 
return 0;
}
 
static int adts_write_packet(AVFormatContext *s, AVPacket *pkt)
{
ADTSContext *adts = s->priv_data;
AVIOContext *pb = s->pb;
uint8_t buf[ADTS_HEADER_SIZE];
 
if (!pkt->size)
return 0;
if (adts->write_adts) {
int err = adts_write_frame_header(adts, buf, pkt->size,
adts->pce_size);
if (err < 0)
return err;
avio_write(pb, buf, ADTS_HEADER_SIZE);
if (adts->pce_size) {
avio_write(pb, adts->pce_data, adts->pce_size);
adts->pce_size = 0;
}
}
avio_write(pb, pkt->data, pkt->size);
 
return 0;
}
 
static int adts_write_trailer(AVFormatContext *s)
{
ADTSContext *adts = s->priv_data;
 
if (adts->apetag)
ff_ape_write_tag(s);
 
return 0;
}
 
#define ENC AV_OPT_FLAG_ENCODING_PARAM
#define OFFSET(obj) offsetof(ADTSContext, obj)
static const AVOption options[] = {
{ "write_apetag", "Enable APE tag writing", OFFSET(apetag), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, ENC},
{ NULL },
};
 
static const AVClass adts_muxer_class = {
.class_name = "ADTS muxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVOutputFormat ff_adts_muxer = {
.name = "adts",
.long_name = NULL_IF_CONFIG_SMALL("ADTS AAC (Advanced Audio Coding)"),
.mime_type = "audio/aac",
.extensions = "aac,adts",
.priv_data_size = sizeof(ADTSContext),
.audio_codec = AV_CODEC_ID_AAC,
.video_codec = AV_CODEC_ID_NONE,
.write_header = adts_write_header,
.write_packet = adts_write_packet,
.write_trailer = adts_write_trailer,
.priv_class = &adts_muxer_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/adxdec.c
0,0 → 1,112
/*
* Copyright (c) 2011 Justin Ruggles
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* CRI ADX demuxer
*/
 
#include "libavutil/intreadwrite.h"
#include "libavcodec/adx.h"
#include "avformat.h"
#include "internal.h"
 
#define BLOCK_SIZE 18
#define BLOCK_SAMPLES 32
 
typedef struct ADXDemuxerContext {
int header_size;
} ADXDemuxerContext;
 
static int adx_read_packet(AVFormatContext *s, AVPacket *pkt)
{
ADXDemuxerContext *c = s->priv_data;
AVCodecContext *avctx = s->streams[0]->codec;
int ret, size;
 
size = BLOCK_SIZE * avctx->channels;
 
pkt->pos = avio_tell(s->pb);
pkt->stream_index = 0;
 
ret = av_get_packet(s->pb, pkt, size);
if (ret != size) {
av_free_packet(pkt);
return ret < 0 ? ret : AVERROR(EIO);
}
if (AV_RB16(pkt->data) & 0x8000) {
av_free_packet(pkt);
return AVERROR_EOF;
}
pkt->size = size;
pkt->duration = 1;
pkt->pts = (pkt->pos - c->header_size) / size;
 
return 0;
}
 
static int adx_read_header(AVFormatContext *s)
{
ADXDemuxerContext *c = s->priv_data;
AVCodecContext *avctx;
int ret;
 
AVStream *st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
avctx = s->streams[0]->codec;
 
if (avio_rb16(s->pb) != 0x8000)
return AVERROR_INVALIDDATA;
c->header_size = avio_rb16(s->pb) + 4;
avio_seek(s->pb, -4, SEEK_CUR);
 
if (ff_alloc_extradata(avctx, c->header_size))
return AVERROR(ENOMEM);
if (avio_read(s->pb, avctx->extradata, c->header_size) < c->header_size) {
av_freep(&avctx->extradata);
return AVERROR(EIO);
}
avctx->extradata_size = c->header_size;
 
ret = avpriv_adx_decode_header(avctx, avctx->extradata,
avctx->extradata_size, &c->header_size,
NULL);
if (ret)
return ret;
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = s->iformat->raw_codec_id;
 
avpriv_set_pts_info(st, 64, BLOCK_SAMPLES, avctx->sample_rate);
 
return 0;
}
 
AVInputFormat ff_adx_demuxer = {
.name = "adx",
.long_name = NULL_IF_CONFIG_SMALL("CRI ADX"),
.priv_data_size = sizeof(ADXDemuxerContext),
.read_header = adx_read_header,
.read_packet = adx_read_packet,
.extensions = "adx",
.raw_codec_id = AV_CODEC_ID_ADPCM_ADX,
.flags = AVFMT_GENERIC_INDEX,
};
/contrib/sdk/sources/ffmpeg/libavformat/aea.c
0,0 → 1,105
/*
* MD STUDIO audio demuxer
*
* Copyright (c) 2009 Benjamin Larsson
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "pcm.h"
 
#define AT1_SU_SIZE 212
 
static int aea_read_probe(AVProbeData *p)
{
if (p->buf_size <= 2048+212)
return 0;
 
/* Magic is '00 08 00 00' in Little Endian*/
if (AV_RL32(p->buf)==0x800) {
int bsm_s, bsm_e, inb_s, inb_e, ch;
ch = p->buf[264];
bsm_s = p->buf[2048];
inb_s = p->buf[2048+1];
inb_e = p->buf[2048+210];
bsm_e = p->buf[2048+211];
 
if (ch != 1 && ch != 2)
return 0;
 
/* Check so that the redundant bsm bytes and info bytes are valid
* the block size mode bytes have to be the same
* the info bytes have to be the same
*/
if (bsm_s == bsm_e && inb_s == inb_e)
return AVPROBE_SCORE_MAX / 4 + 1;
}
return 0;
}
 
static int aea_read_header(AVFormatContext *s)
{
AVStream *st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
/* Parse the amount of channels and skip to pos 2048(0x800) */
avio_skip(s->pb, 264);
st->codec->channels = avio_r8(s->pb);
avio_skip(s->pb, 1783);
 
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_ATRAC1;
st->codec->sample_rate = 44100;
st->codec->bit_rate = 292000;
 
if (st->codec->channels != 1 && st->codec->channels != 2) {
av_log(s,AV_LOG_ERROR,"Channels %d not supported!\n",st->codec->channels);
return -1;
}
 
st->codec->channel_layout = (st->codec->channels == 1) ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO;
 
st->codec->block_align = AT1_SU_SIZE * st->codec->channels;
return 0;
}
 
static int aea_read_packet(AVFormatContext *s, AVPacket *pkt)
{
int ret = av_get_packet(s->pb, pkt, s->streams[0]->codec->block_align);
 
pkt->stream_index = 0;
if (ret <= 0)
return AVERROR(EIO);
 
return ret;
}
 
AVInputFormat ff_aea_demuxer = {
.name = "aea",
.long_name = NULL_IF_CONFIG_SMALL("MD STUDIO audio"),
.read_probe = aea_read_probe,
.read_header = aea_read_header,
.read_packet = aea_read_packet,
.read_seek = ff_pcm_read_seek,
.flags = AVFMT_GENERIC_INDEX,
.extensions = "aea",
};
/contrib/sdk/sources/ffmpeg/libavformat/afc.c
0,0 → 1,79
/*
* AFC demuxer
* Copyright (c) 2012 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/channel_layout.h"
#include "avformat.h"
#include "internal.h"
 
typedef struct AFCDemuxContext {
int64_t data_end;
} AFCDemuxContext;
 
static int afc_read_header(AVFormatContext *s)
{
AFCDemuxContext *c = s->priv_data;
AVStream *st;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_ADPCM_AFC;
st->codec->channels = 2;
st->codec->channel_layout = AV_CH_LAYOUT_STEREO;
 
if (ff_alloc_extradata(st->codec, 1))
return AVERROR(ENOMEM);
st->codec->extradata[0] = 8 * st->codec->channels;
 
c->data_end = avio_rb32(s->pb) + 32LL;
st->duration = avio_rb32(s->pb);
st->codec->sample_rate = avio_rb16(s->pb);
avio_skip(s->pb, 22);
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
 
return 0;
}
 
static int afc_read_packet(AVFormatContext *s, AVPacket *pkt)
{
AFCDemuxContext *c = s->priv_data;
int64_t size;
int ret;
 
size = FFMIN(c->data_end - avio_tell(s->pb), 18 * 128);
if (size <= 0)
return AVERROR_EOF;
 
ret = av_get_packet(s->pb, pkt, size);
pkt->stream_index = 0;
return ret;
}
 
AVInputFormat ff_afc_demuxer = {
.name = "afc",
.long_name = NULL_IF_CONFIG_SMALL("AFC"),
.priv_data_size = sizeof(AFCDemuxContext),
.read_header = afc_read_header,
.read_packet = afc_read_packet,
.extensions = "afc",
.flags = AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH | AVFMT_NO_BYTE_SEEK,
};
/contrib/sdk/sources/ffmpeg/libavformat/aiff.h
0,0 → 1,58
/*
* AIFF/AIFF-C muxer/demuxer common header
* Copyright (c) 2006 Patrick Guimond
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* common header for AIFF muxer and demuxer
*/
 
#ifndef AVFORMAT_AIFF_H
#define AVFORMAT_AIFF_H
 
#include "avformat.h"
#include "internal.h"
 
static const AVCodecTag ff_codec_aiff_tags[] = {
{ AV_CODEC_ID_PCM_S16BE, MKTAG('N','O','N','E') },
{ AV_CODEC_ID_PCM_S8, MKTAG('N','O','N','E') },
{ AV_CODEC_ID_PCM_U8, MKTAG('r','a','w',' ') },
{ AV_CODEC_ID_PCM_S24BE, MKTAG('N','O','N','E') },
{ AV_CODEC_ID_PCM_S32BE, MKTAG('N','O','N','E') },
{ AV_CODEC_ID_PCM_F32BE, MKTAG('f','l','3','2') },
{ AV_CODEC_ID_PCM_F64BE, MKTAG('f','l','6','4') },
{ AV_CODEC_ID_PCM_ALAW, MKTAG('a','l','a','w') },
{ AV_CODEC_ID_PCM_MULAW, MKTAG('u','l','a','w') },
{ AV_CODEC_ID_PCM_S24BE, MKTAG('i','n','2','4') },
{ AV_CODEC_ID_PCM_S32BE, MKTAG('i','n','3','2') },
{ AV_CODEC_ID_MACE3, MKTAG('M','A','C','3') },
{ AV_CODEC_ID_MACE6, MKTAG('M','A','C','6') },
{ AV_CODEC_ID_GSM, MKTAG('G','S','M',' ') },
{ AV_CODEC_ID_ADPCM_G722, MKTAG('G','7','2','2') },
{ AV_CODEC_ID_ADPCM_G726LE, MKTAG('G','7','2','6') },
{ AV_CODEC_ID_PCM_S16BE, MKTAG('t','w','o','s') },
{ AV_CODEC_ID_PCM_S16LE, MKTAG('s','o','w','t') },
{ AV_CODEC_ID_ADPCM_IMA_QT, MKTAG('i','m','a','4') },
{ AV_CODEC_ID_QDM2, MKTAG('Q','D','M','2') },
{ AV_CODEC_ID_QCELP, MKTAG('Q','c','l','p') },
{ AV_CODEC_ID_NONE, 0 },
};
 
#endif /* AVFORMAT_AIFF_H */
/contrib/sdk/sources/ffmpeg/libavformat/aiffdec.c
0,0 → 1,374
/*
* AIFF/AIFF-C demuxer
* Copyright (c) 2006 Patrick Guimond
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intreadwrite.h"
#include "libavutil/mathematics.h"
#include "libavutil/dict.h"
#include "avformat.h"
#include "internal.h"
#include "pcm.h"
#include "aiff.h"
#include "isom.h"
#include "id3v2.h"
#include "mov_chan.h"
 
#define AIFF 0
#define AIFF_C_VERSION1 0xA2805140
 
typedef struct {
int64_t data_end;
int block_duration;
} AIFFInputContext;
 
static enum AVCodecID aiff_codec_get_id(int bps)
{
if (bps <= 8)
return AV_CODEC_ID_PCM_S8;
if (bps <= 16)
return AV_CODEC_ID_PCM_S16BE;
if (bps <= 24)
return AV_CODEC_ID_PCM_S24BE;
if (bps <= 32)
return AV_CODEC_ID_PCM_S32BE;
 
/* bigger than 32 isn't allowed */
return AV_CODEC_ID_NONE;
}
 
/* returns the size of the found tag */
static int get_tag(AVIOContext *pb, uint32_t * tag)
{
int size;
 
if (url_feof(pb))
return AVERROR(EIO);
 
*tag = avio_rl32(pb);
size = avio_rb32(pb);
 
if (size < 0)
size = 0x7fffffff;
 
return size;
}
 
/* Metadata string read */
static void get_meta(AVFormatContext *s, const char *key, int size)
{
uint8_t *str = av_malloc(size+1);
 
if (str) {
int res = avio_read(s->pb, str, size);
if (res < 0){
av_free(str);
return;
}
size += (size&1)-res;
str[res] = 0;
av_dict_set(&s->metadata, key, str, AV_DICT_DONT_STRDUP_VAL);
}else
size+= size&1;
 
avio_skip(s->pb, size);
}
 
/* Returns the number of sound data frames or negative on error */
static unsigned int get_aiff_header(AVFormatContext *s, int size,
unsigned version)
{
AVIOContext *pb = s->pb;
AVCodecContext *codec = s->streams[0]->codec;
AIFFInputContext *aiff = s->priv_data;
int exp;
uint64_t val;
double sample_rate;
unsigned int num_frames;
 
if (size & 1)
size++;
codec->codec_type = AVMEDIA_TYPE_AUDIO;
codec->channels = avio_rb16(pb);
num_frames = avio_rb32(pb);
codec->bits_per_coded_sample = avio_rb16(pb);
 
exp = avio_rb16(pb);
val = avio_rb64(pb);
sample_rate = ldexp(val, exp - 16383 - 63);
codec->sample_rate = sample_rate;
size -= 18;
 
/* get codec id for AIFF-C */
if (version == AIFF_C_VERSION1) {
codec->codec_tag = avio_rl32(pb);
codec->codec_id = ff_codec_get_id(ff_codec_aiff_tags, codec->codec_tag);
size -= 4;
}
 
if (version != AIFF_C_VERSION1 || codec->codec_id == AV_CODEC_ID_PCM_S16BE) {
codec->codec_id = aiff_codec_get_id(codec->bits_per_coded_sample);
codec->bits_per_coded_sample = av_get_bits_per_sample(codec->codec_id);
aiff->block_duration = 1;
} else {
switch (codec->codec_id) {
case AV_CODEC_ID_PCM_F32BE:
case AV_CODEC_ID_PCM_F64BE:
case AV_CODEC_ID_PCM_S16LE:
case AV_CODEC_ID_PCM_ALAW:
case AV_CODEC_ID_PCM_MULAW:
aiff->block_duration = 1;
break;
case AV_CODEC_ID_ADPCM_IMA_QT:
codec->block_align = 34*codec->channels;
break;
case AV_CODEC_ID_MACE3:
codec->block_align = 2*codec->channels;
break;
case AV_CODEC_ID_ADPCM_G726LE:
codec->bits_per_coded_sample = 5;
case AV_CODEC_ID_ADPCM_G722:
case AV_CODEC_ID_MACE6:
codec->block_align = 1*codec->channels;
break;
case AV_CODEC_ID_GSM:
codec->block_align = 33;
break;
default:
aiff->block_duration = 1;
break;
}
if (codec->block_align > 0)
aiff->block_duration = av_get_audio_frame_duration(codec,
codec->block_align);
}
 
/* Block align needs to be computed in all cases, as the definition
* is specific to applications -> here we use the WAVE format definition */
if (!codec->block_align)
codec->block_align = (av_get_bits_per_sample(codec->codec_id) * codec->channels) >> 3;
 
if (aiff->block_duration) {
codec->bit_rate = codec->sample_rate * (codec->block_align << 3) /
aiff->block_duration;
}
 
/* Chunk is over */
if (size)
avio_skip(pb, size);
 
return num_frames;
}
 
static int aiff_probe(AVProbeData *p)
{
/* check file header */
if (p->buf[0] == 'F' && p->buf[1] == 'O' &&
p->buf[2] == 'R' && p->buf[3] == 'M' &&
p->buf[8] == 'A' && p->buf[9] == 'I' &&
p->buf[10] == 'F' && (p->buf[11] == 'F' || p->buf[11] == 'C'))
return AVPROBE_SCORE_MAX;
else
return 0;
}
 
/* aiff input */
static int aiff_read_header(AVFormatContext *s)
{
int ret, size, filesize;
int64_t offset = 0, position;
uint32_t tag;
unsigned version = AIFF_C_VERSION1;
AVIOContext *pb = s->pb;
AVStream * st;
AIFFInputContext *aiff = s->priv_data;
ID3v2ExtraMeta *id3v2_extra_meta = NULL;
 
/* check FORM header */
filesize = get_tag(pb, &tag);
if (filesize < 0 || tag != MKTAG('F', 'O', 'R', 'M'))
return AVERROR_INVALIDDATA;
 
/* AIFF data type */
tag = avio_rl32(pb);
if (tag == MKTAG('A', 'I', 'F', 'F')) /* Got an AIFF file */
version = AIFF;
else if (tag != MKTAG('A', 'I', 'F', 'C')) /* An AIFF-C file then */
return AVERROR_INVALIDDATA;
 
filesize -= 4;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
while (filesize > 0) {
/* parse different chunks */
size = get_tag(pb, &tag);
if (size < 0)
return size;
 
filesize -= size + 8;
 
switch (tag) {
case MKTAG('C', 'O', 'M', 'M'): /* Common chunk */
/* Then for the complete header info */
st->nb_frames = get_aiff_header(s, size, version);
if (st->nb_frames < 0)
return st->nb_frames;
if (offset > 0) // COMM is after SSND
goto got_sound;
break;
case MKTAG('I', 'D', '3', ' '):
position = avio_tell(pb);
ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
if (id3v2_extra_meta)
if ((ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0) {
ff_id3v2_free_extra_meta(&id3v2_extra_meta);
return ret;
}
ff_id3v2_free_extra_meta(&id3v2_extra_meta);
if (position + size > avio_tell(pb))
avio_skip(pb, position + size - avio_tell(pb));
break;
case MKTAG('F', 'V', 'E', 'R'): /* Version chunk */
version = avio_rb32(pb);
break;
case MKTAG('N', 'A', 'M', 'E'): /* Sample name chunk */
get_meta(s, "title" , size);
break;
case MKTAG('A', 'U', 'T', 'H'): /* Author chunk */
get_meta(s, "author" , size);
break;
case MKTAG('(', 'c', ')', ' '): /* Copyright chunk */
get_meta(s, "copyright", size);
break;
case MKTAG('A', 'N', 'N', 'O'): /* Annotation chunk */
get_meta(s, "comment" , size);
break;
case MKTAG('S', 'S', 'N', 'D'): /* Sampled sound chunk */
aiff->data_end = avio_tell(pb) + size;
offset = avio_rb32(pb); /* Offset of sound data */
avio_rb32(pb); /* BlockSize... don't care */
offset += avio_tell(pb); /* Compute absolute data offset */
if (st->codec->block_align && !pb->seekable) /* Assume COMM already parsed */
goto got_sound;
if (!pb->seekable) {
av_log(s, AV_LOG_ERROR, "file is not seekable\n");
return -1;
}
avio_skip(pb, size - 8);
break;
case MKTAG('w', 'a', 'v', 'e'):
if ((uint64_t)size > (1<<30))
return -1;
if (ff_alloc_extradata(st->codec, size))
return AVERROR(ENOMEM);
avio_read(pb, st->codec->extradata, size);
if (st->codec->codec_id == AV_CODEC_ID_QDM2 && size>=12*4 && !st->codec->block_align) {
st->codec->block_align = AV_RB32(st->codec->extradata+11*4);
aiff->block_duration = AV_RB32(st->codec->extradata+9*4);
} else if (st->codec->codec_id == AV_CODEC_ID_QCELP) {
char rate = 0;
if (size >= 25)
rate = st->codec->extradata[24];
switch (rate) {
case 'H': // RATE_HALF
st->codec->block_align = 17;
break;
case 'F': // RATE_FULL
default:
st->codec->block_align = 35;
}
aiff->block_duration = 160;
st->codec->bit_rate = st->codec->sample_rate * (st->codec->block_align << 3) /
aiff->block_duration;
}
break;
case MKTAG('C','H','A','N'):
if(ff_mov_read_chan(s, pb, st, size) < 0)
return AVERROR_INVALIDDATA;
break;
default: /* Jump */
if (size & 1) /* Always even aligned */
size++;
avio_skip(pb, size);
}
}
 
got_sound:
if (!st->codec->block_align) {
av_log(s, AV_LOG_ERROR, "could not find COMM tag or invalid block_align value\n");
return -1;
}
 
/* Now positioned, get the sound data start and end */
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
st->start_time = 0;
st->duration = st->nb_frames * aiff->block_duration;
 
/* Position the stream at the first block */
avio_seek(pb, offset, SEEK_SET);
 
return 0;
}
 
#define MAX_SIZE 4096
 
static int aiff_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
AVStream *st = s->streams[0];
AIFFInputContext *aiff = s->priv_data;
int64_t max_size;
int res, size;
 
/* calculate size of remaining data */
max_size = aiff->data_end - avio_tell(s->pb);
if (max_size <= 0)
return AVERROR_EOF;
 
/* Now for that packet */
if (st->codec->block_align >= 17) // GSM, QCLP, IMA4
size = st->codec->block_align;
else
size = (MAX_SIZE / st->codec->block_align) * st->codec->block_align;
size = FFMIN(max_size, size);
res = av_get_packet(s->pb, pkt, size);
if (res < 0)
return res;
 
if (size >= st->codec->block_align)
pkt->flags &= ~AV_PKT_FLAG_CORRUPT;
/* Only one stream in an AIFF file */
pkt->stream_index = 0;
pkt->duration = (res / st->codec->block_align) * aiff->block_duration;
return 0;
}
 
AVInputFormat ff_aiff_demuxer = {
.name = "aiff",
.long_name = NULL_IF_CONFIG_SMALL("Audio IFF"),
.priv_data_size = sizeof(AIFFInputContext),
.read_probe = aiff_probe,
.read_header = aiff_read_header,
.read_packet = aiff_read_packet,
.read_seek = ff_pcm_read_seek,
.codec_tag = (const AVCodecTag* const []){ ff_codec_aiff_tags, 0 },
};
/contrib/sdk/sources/ffmpeg/libavformat/aiffenc.c
0,0 → 1,332
/*
* AIFF/AIFF-C muxer
* Copyright (c) 2006 Patrick Guimond
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intfloat.h"
#include "libavutil/opt.h"
#include "avformat.h"
#include "internal.h"
#include "aiff.h"
#include "avio_internal.h"
#include "isom.h"
#include "id3v2.h"
 
typedef struct {
const AVClass *class;
int64_t form;
int64_t frames;
int64_t ssnd;
int audio_stream_idx;
AVPacketList *pict_list;
int write_id3v2;
int id3v2_version;
} AIFFOutputContext;
 
static int put_id3v2_tags(AVFormatContext *s, AIFFOutputContext *aiff)
{
int ret;
uint64_t pos, end, size;
ID3v2EncContext id3v2 = { 0 };
AVIOContext *pb = s->pb;
AVPacketList *pict_list = aiff->pict_list;
 
if (!pb->seekable)
return 0;
 
if (!s->metadata && !aiff->pict_list)
return 0;
 
avio_wl32(pb, MKTAG('I', 'D', '3', ' '));
avio_wb32(pb, 0);
pos = avio_tell(pb);
 
ff_id3v2_start(&id3v2, pb, aiff->id3v2_version, ID3v2_DEFAULT_MAGIC);
ff_id3v2_write_metadata(s, &id3v2);
while (pict_list) {
if ((ret = ff_id3v2_write_apic(s, &id3v2, &pict_list->pkt)) < 0)
return ret;
pict_list = pict_list->next;
}
ff_id3v2_finish(&id3v2, pb);
 
end = avio_tell(pb);
size = end - pos;
 
/* Update chunk size */
avio_seek(pb, pos - 4, SEEK_SET);
avio_wb32(pb, size);
avio_seek(pb, end, SEEK_SET);
 
if (size & 1)
avio_w8(pb, 0);
 
return 0;
}
 
static void put_meta(AVFormatContext *s, const char *key, uint32_t id)
{
AVDictionaryEntry *tag;
AVIOContext *pb = s->pb;
 
if (tag = av_dict_get(s->metadata, key, NULL, 0)) {
int size = strlen(tag->value);
 
avio_wl32(pb, id);
avio_wb32(pb, FFALIGN(size, 2));
avio_write(pb, tag->value, size);
if (size & 1)
avio_w8(pb, 0);
}
}
 
static int aiff_write_header(AVFormatContext *s)
{
AIFFOutputContext *aiff = s->priv_data;
AVIOContext *pb = s->pb;
AVCodecContext *enc;
uint64_t sample_rate;
int i, aifc = 0;
 
aiff->audio_stream_idx = -1;
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
if (aiff->audio_stream_idx < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
aiff->audio_stream_idx = i;
} else if (st->codec->codec_type != AVMEDIA_TYPE_VIDEO) {
av_log(s, AV_LOG_ERROR, "Only audio streams and pictures are allowed in AIFF.\n");
return AVERROR(EINVAL);
}
}
if (aiff->audio_stream_idx < 0) {
av_log(s, AV_LOG_ERROR, "No audio stream present.\n");
return AVERROR(EINVAL);
}
 
enc = s->streams[aiff->audio_stream_idx]->codec;
 
/* First verify if format is ok */
if (!enc->codec_tag)
return -1;
if (enc->codec_tag != MKTAG('N','O','N','E'))
aifc = 1;
 
/* FORM AIFF header */
ffio_wfourcc(pb, "FORM");
aiff->form = avio_tell(pb);
avio_wb32(pb, 0); /* file length */
ffio_wfourcc(pb, aifc ? "AIFC" : "AIFF");
 
if (aifc) { // compressed audio
if (!enc->block_align) {
av_log(s, AV_LOG_ERROR, "block align not set\n");
return -1;
}
/* Version chunk */
ffio_wfourcc(pb, "FVER");
avio_wb32(pb, 4);
avio_wb32(pb, 0xA2805140);
}
 
if (enc->channels > 2 && enc->channel_layout) {
ffio_wfourcc(pb, "CHAN");
avio_wb32(pb, 12);
ff_mov_write_chan(pb, enc->channel_layout);
}
 
put_meta(s, "title", MKTAG('N', 'A', 'M', 'E'));
put_meta(s, "author", MKTAG('A', 'U', 'T', 'H'));
put_meta(s, "copyright", MKTAG('(', 'c', ')', ' '));
put_meta(s, "comment", MKTAG('A', 'N', 'N', 'O'));
 
/* Common chunk */
ffio_wfourcc(pb, "COMM");
avio_wb32(pb, aifc ? 24 : 18); /* size */
avio_wb16(pb, enc->channels); /* Number of channels */
 
aiff->frames = avio_tell(pb);
avio_wb32(pb, 0); /* Number of frames */
 
if (!enc->bits_per_coded_sample)
enc->bits_per_coded_sample = av_get_bits_per_sample(enc->codec_id);
if (!enc->bits_per_coded_sample) {
av_log(s, AV_LOG_ERROR, "could not compute bits per sample\n");
return -1;
}
if (!enc->block_align)
enc->block_align = (enc->bits_per_coded_sample * enc->channels) >> 3;
 
avio_wb16(pb, enc->bits_per_coded_sample); /* Sample size */
 
sample_rate = av_double2int(enc->sample_rate);
avio_wb16(pb, (sample_rate >> 52) + (16383 - 1023));
avio_wb64(pb, UINT64_C(1) << 63 | sample_rate << 11);
 
if (aifc) {
avio_wl32(pb, enc->codec_tag);
avio_wb16(pb, 0);
}
 
if (enc->codec_tag == MKTAG('Q','D','M','2') && enc->extradata_size) {
ffio_wfourcc(pb, "wave");
avio_wb32(pb, enc->extradata_size);
avio_write(pb, enc->extradata, enc->extradata_size);
}
 
/* Sound data chunk */
ffio_wfourcc(pb, "SSND");
aiff->ssnd = avio_tell(pb); /* Sound chunk size */
avio_wb32(pb, 0); /* Sound samples data size */
avio_wb32(pb, 0); /* Data offset */
avio_wb32(pb, 0); /* Block-size (block align) */
 
avpriv_set_pts_info(s->streams[aiff->audio_stream_idx], 64, 1,
s->streams[aiff->audio_stream_idx]->codec->sample_rate);
 
/* Data is starting here */
avio_flush(pb);
 
return 0;
}
 
static int aiff_write_packet(AVFormatContext *s, AVPacket *pkt)
{
AIFFOutputContext *aiff = s->priv_data;
AVIOContext *pb = s->pb;
if (pkt->stream_index == aiff->audio_stream_idx)
avio_write(pb, pkt->data, pkt->size);
else {
int ret;
AVPacketList *pict_list, *last;
 
if (s->streams[pkt->stream_index]->codec->codec_type != AVMEDIA_TYPE_VIDEO)
return 0;
 
/* warn only once for each stream */
if (s->streams[pkt->stream_index]->nb_frames == 1) {
av_log(s, AV_LOG_WARNING, "Got more than one picture in stream %d,"
" ignoring.\n", pkt->stream_index);
}
if (s->streams[pkt->stream_index]->nb_frames >= 1)
return 0;
 
pict_list = av_mallocz(sizeof(AVPacketList));
if (!pict_list)
return AVERROR(ENOMEM);
 
if ((ret = av_copy_packet(&pict_list->pkt, pkt)) < 0) {
av_freep(&pict_list);
return ret;
}
 
if (!aiff->pict_list)
aiff->pict_list = pict_list;
else {
last = aiff->pict_list;
while (last->next)
last = last->next;
last->next = pict_list;
}
}
 
return 0;
}
 
static int aiff_write_trailer(AVFormatContext *s)
{
int ret;
AVIOContext *pb = s->pb;
AIFFOutputContext *aiff = s->priv_data;
AVPacketList *pict_list = aiff->pict_list;
AVCodecContext *enc = s->streams[aiff->audio_stream_idx]->codec;
 
/* Chunks sizes must be even */
int64_t file_size, end_size;
end_size = file_size = avio_tell(pb);
if (file_size & 1) {
avio_w8(pb, 0);
end_size++;
}
 
if (s->pb->seekable) {
/* Number of sample frames */
avio_seek(pb, aiff->frames, SEEK_SET);
avio_wb32(pb, (file_size-aiff->ssnd-12)/enc->block_align);
 
/* Sound Data chunk size */
avio_seek(pb, aiff->ssnd, SEEK_SET);
avio_wb32(pb, file_size - aiff->ssnd - 4);
 
/* return to the end */
avio_seek(pb, end_size, SEEK_SET);
 
/* Write ID3 tags */
if (aiff->write_id3v2)
if ((ret = put_id3v2_tags(s, aiff)) < 0)
return ret;
 
/* File length */
file_size = avio_tell(pb);
avio_seek(pb, aiff->form, SEEK_SET);
avio_wb32(pb, file_size - aiff->form - 4);
 
avio_flush(pb);
}
 
while (pict_list) {
AVPacketList *next = pict_list->next;
av_free_packet(&pict_list->pkt);
av_freep(&pict_list);
pict_list = next;
}
 
return 0;
}
 
#define OFFSET(x) offsetof(AIFFOutputContext, x)
#define ENC AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
{ "write_id3v2", "Enable ID3 tags writing.",
OFFSET(write_id3v2), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, ENC },
{ "id3v2_version", "Select ID3v2 version to write. Currently 3 and 4 are supported.",
OFFSET(id3v2_version), AV_OPT_TYPE_INT, {.i64 = 4}, 3, 4, ENC },
{ NULL },
};
 
static const AVClass aiff_muxer_class = {
.class_name = "AIFF muxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVOutputFormat ff_aiff_muxer = {
.name = "aiff",
.long_name = NULL_IF_CONFIG_SMALL("Audio IFF"),
.mime_type = "audio/aiff",
.extensions = "aif,aiff,afc,aifc",
.priv_data_size = sizeof(AIFFOutputContext),
.audio_codec = AV_CODEC_ID_PCM_S16BE,
.video_codec = AV_CODEC_ID_PNG,
.write_header = aiff_write_header,
.write_packet = aiff_write_packet,
.write_trailer = aiff_write_trailer,
.codec_tag = (const AVCodecTag* const []){ ff_codec_aiff_tags, 0 },
.priv_class = &aiff_muxer_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/allformats.c
0,0 → 1,352
/*
* Register all the formats and protocols
* Copyright (c) 2000, 2001, 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "rtp.h"
#include "rdt.h"
#include "url.h"
#include "version.h"
 
#define REGISTER_MUXER(X, x) \
{ \
extern AVOutputFormat ff_##x##_muxer; \
if (CONFIG_##X##_MUXER) \
av_register_output_format(&ff_##x##_muxer); \
}
 
#define REGISTER_DEMUXER(X, x) \
{ \
extern AVInputFormat ff_##x##_demuxer; \
if (CONFIG_##X##_DEMUXER) \
av_register_input_format(&ff_##x##_demuxer); \
}
 
#define REGISTER_MUXDEMUX(X, x) REGISTER_MUXER(X, x); REGISTER_DEMUXER(X, x)
 
#define REGISTER_PROTOCOL(X, x) \
{ \
extern URLProtocol ff_##x##_protocol; \
if (CONFIG_##X##_PROTOCOL) \
ffurl_register_protocol(&ff_##x##_protocol, \
sizeof(ff_##x##_protocol)); \
}
 
void av_register_all(void)
{
static int initialized;
 
if (initialized)
return;
initialized = 1;
 
avcodec_register_all();
 
/* (de)muxers */
REGISTER_MUXER (A64, a64);
REGISTER_DEMUXER (AAC, aac);
REGISTER_MUXDEMUX(AC3, ac3);
REGISTER_DEMUXER (ACT, act);
REGISTER_DEMUXER (ADF, adf);
REGISTER_DEMUXER (ADP, adp);
REGISTER_MUXER (ADTS, adts);
REGISTER_MUXDEMUX(ADX, adx);
REGISTER_DEMUXER (AEA, aea);
REGISTER_DEMUXER (AFC, afc);
REGISTER_MUXDEMUX(AIFF, aiff);
REGISTER_MUXDEMUX(AMR, amr);
REGISTER_DEMUXER (ANM, anm);
REGISTER_DEMUXER (APC, apc);
REGISTER_DEMUXER (APE, ape);
REGISTER_DEMUXER (AQTITLE, aqtitle);
REGISTER_MUXDEMUX(ASF, asf);
REGISTER_MUXDEMUX(ASS, ass);
REGISTER_MUXDEMUX(AST, ast);
REGISTER_MUXER (ASF_STREAM, asf_stream);
REGISTER_MUXDEMUX(AU, au);
REGISTER_MUXDEMUX(AVI, avi);
REGISTER_DEMUXER (AVISYNTH, avisynth);
REGISTER_MUXER (AVM2, avm2);
REGISTER_DEMUXER (AVR, avr);
REGISTER_DEMUXER (AVS, avs);
REGISTER_DEMUXER (BETHSOFTVID, bethsoftvid);
REGISTER_DEMUXER (BFI, bfi);
REGISTER_DEMUXER (BINTEXT, bintext);
REGISTER_DEMUXER (BINK, bink);
REGISTER_MUXDEMUX(BIT, bit);
REGISTER_DEMUXER (BMV, bmv);
REGISTER_DEMUXER (BRSTM, brstm);
REGISTER_DEMUXER (BOA, boa);
REGISTER_DEMUXER (C93, c93);
REGISTER_MUXDEMUX(CAF, caf);
REGISTER_MUXDEMUX(CAVSVIDEO, cavsvideo);
REGISTER_DEMUXER (CDG, cdg);
REGISTER_DEMUXER (CDXL, cdxl);
REGISTER_DEMUXER (CONCAT, concat);
REGISTER_MUXER (CRC, crc);
REGISTER_MUXDEMUX(DATA, data);
REGISTER_MUXDEMUX(DAUD, daud);
REGISTER_DEMUXER (DFA, dfa);
REGISTER_MUXDEMUX(DIRAC, dirac);
REGISTER_MUXDEMUX(DNXHD, dnxhd);
REGISTER_DEMUXER (DSICIN, dsicin);
REGISTER_MUXDEMUX(DTS, dts);
REGISTER_DEMUXER (DTSHD, dtshd);
REGISTER_MUXDEMUX(DV, dv);
REGISTER_DEMUXER (DXA, dxa);
REGISTER_DEMUXER (EA, ea);
REGISTER_DEMUXER (EA_CDATA, ea_cdata);
REGISTER_MUXDEMUX(EAC3, eac3);
REGISTER_DEMUXER (EPAF, epaf);
REGISTER_MUXER (F4V, f4v);
REGISTER_MUXDEMUX(FFM, ffm);
REGISTER_MUXDEMUX(FFMETADATA, ffmetadata);
REGISTER_MUXDEMUX(FILMSTRIP, filmstrip);
REGISTER_MUXDEMUX(FLAC, flac);
REGISTER_DEMUXER (FLIC, flic);
REGISTER_MUXDEMUX(FLV, flv);
REGISTER_DEMUXER (FOURXM, fourxm);
REGISTER_MUXER (FRAMECRC, framecrc);
REGISTER_MUXER (FRAMEMD5, framemd5);
REGISTER_DEMUXER (FRM, frm);
REGISTER_MUXDEMUX(G722, g722);
REGISTER_MUXDEMUX(G723_1, g723_1);
REGISTER_DEMUXER (G729, g729);
REGISTER_MUXDEMUX(GIF, gif);
REGISTER_DEMUXER (GSM, gsm);
REGISTER_MUXDEMUX(GXF, gxf);
REGISTER_MUXDEMUX(H261, h261);
REGISTER_MUXDEMUX(H263, h263);
REGISTER_MUXDEMUX(H264, h264);
REGISTER_DEMUXER (HEVC, hevc);
REGISTER_MUXDEMUX(HLS, hls);
REGISTER_MUXDEMUX(ICO, ico);
REGISTER_DEMUXER (IDCIN, idcin);
REGISTER_DEMUXER (IDF, idf);
REGISTER_DEMUXER (IFF, iff);
REGISTER_MUXDEMUX(ILBC, ilbc);
REGISTER_MUXDEMUX(IMAGE2, image2);
REGISTER_MUXDEMUX(IMAGE2PIPE, image2pipe);
REGISTER_DEMUXER (INGENIENT, ingenient);
REGISTER_DEMUXER (IPMOVIE, ipmovie);
REGISTER_MUXER (IPOD, ipod);
REGISTER_MUXDEMUX(IRCAM, ircam);
REGISTER_MUXER (ISMV, ismv);
REGISTER_DEMUXER (ISS, iss);
REGISTER_DEMUXER (IV8, iv8);
REGISTER_MUXDEMUX(IVF, ivf);
REGISTER_MUXDEMUX(JACOSUB, jacosub);
REGISTER_DEMUXER (JV, jv);
REGISTER_MUXDEMUX(LATM, latm);
REGISTER_DEMUXER (LMLM4, lmlm4);
REGISTER_DEMUXER (LOAS, loas);
REGISTER_DEMUXER (LVF, lvf);
REGISTER_DEMUXER (LXF, lxf);
REGISTER_MUXDEMUX(M4V, m4v);
REGISTER_MUXER (MD5, md5);
REGISTER_MUXDEMUX(MATROSKA, matroska);
REGISTER_MUXER (MATROSKA_AUDIO, matroska_audio);
REGISTER_DEMUXER (MGSTS, mgsts);
REGISTER_MUXDEMUX(MICRODVD, microdvd);
REGISTER_MUXDEMUX(MJPEG, mjpeg);
REGISTER_MUXDEMUX(MLP, mlp);
REGISTER_DEMUXER (MM, mm);
REGISTER_MUXDEMUX(MMF, mmf);
REGISTER_MUXDEMUX(MOV, mov);
REGISTER_MUXER (MP2, mp2);
REGISTER_MUXDEMUX(MP3, mp3);
REGISTER_MUXER (MP4, mp4);
REGISTER_DEMUXER (MPC, mpc);
REGISTER_DEMUXER (MPC8, mpc8);
REGISTER_MUXER (MPEG1SYSTEM, mpeg1system);
REGISTER_MUXER (MPEG1VCD, mpeg1vcd);
REGISTER_MUXER (MPEG1VIDEO, mpeg1video);
REGISTER_MUXER (MPEG2DVD, mpeg2dvd);
REGISTER_MUXER (MPEG2SVCD, mpeg2svcd);
REGISTER_MUXER (MPEG2VIDEO, mpeg2video);
REGISTER_MUXER (MPEG2VOB, mpeg2vob);
REGISTER_DEMUXER (MPEGPS, mpegps);
REGISTER_MUXDEMUX(MPEGTS, mpegts);
REGISTER_DEMUXER (MPEGTSRAW, mpegtsraw);
REGISTER_DEMUXER (MPEGVIDEO, mpegvideo);
REGISTER_MUXER (MPJPEG, mpjpeg);
REGISTER_DEMUXER (MPL2, mpl2);
REGISTER_DEMUXER (MPSUB, mpsub);
REGISTER_DEMUXER (MSNWC_TCP, msnwc_tcp);
REGISTER_DEMUXER (MTV, mtv);
REGISTER_DEMUXER (MV, mv);
REGISTER_DEMUXER (MVI, mvi);
REGISTER_MUXDEMUX(MXF, mxf);
REGISTER_MUXER (MXF_D10, mxf_d10);
REGISTER_DEMUXER (MXG, mxg);
REGISTER_DEMUXER (NC, nc);
REGISTER_DEMUXER (NISTSPHERE, nistsphere);
REGISTER_DEMUXER (NSV, nsv);
REGISTER_MUXER (NULL, null);
REGISTER_MUXDEMUX(NUT, nut);
REGISTER_DEMUXER (NUV, nuv);
REGISTER_MUXDEMUX(OGG, ogg);
REGISTER_MUXDEMUX(OMA, oma);
REGISTER_DEMUXER (PAF, paf);
REGISTER_MUXDEMUX(PCM_ALAW, pcm_alaw);
REGISTER_MUXDEMUX(PCM_MULAW, pcm_mulaw);
REGISTER_MUXDEMUX(PCM_F64BE, pcm_f64be);
REGISTER_MUXDEMUX(PCM_F64LE, pcm_f64le);
REGISTER_MUXDEMUX(PCM_F32BE, pcm_f32be);
REGISTER_MUXDEMUX(PCM_F32LE, pcm_f32le);
REGISTER_MUXDEMUX(PCM_S32BE, pcm_s32be);
REGISTER_MUXDEMUX(PCM_S32LE, pcm_s32le);
REGISTER_MUXDEMUX(PCM_S24BE, pcm_s24be);
REGISTER_MUXDEMUX(PCM_S24LE, pcm_s24le);
REGISTER_MUXDEMUX(PCM_S16BE, pcm_s16be);
REGISTER_MUXDEMUX(PCM_S16LE, pcm_s16le);
REGISTER_MUXDEMUX(PCM_S8, pcm_s8);
REGISTER_MUXDEMUX(PCM_U32BE, pcm_u32be);
REGISTER_MUXDEMUX(PCM_U32LE, pcm_u32le);
REGISTER_MUXDEMUX(PCM_U24BE, pcm_u24be);
REGISTER_MUXDEMUX(PCM_U24LE, pcm_u24le);
REGISTER_MUXDEMUX(PCM_U16BE, pcm_u16be);
REGISTER_MUXDEMUX(PCM_U16LE, pcm_u16le);
REGISTER_MUXDEMUX(PCM_U8, pcm_u8);
REGISTER_DEMUXER (PJS, pjs);
REGISTER_DEMUXER (PMP, pmp);
REGISTER_MUXER (PSP, psp);
REGISTER_DEMUXER (PVA, pva);
REGISTER_DEMUXER (PVF, pvf);
REGISTER_DEMUXER (QCP, qcp);
REGISTER_DEMUXER (R3D, r3d);
REGISTER_MUXDEMUX(RAWVIDEO, rawvideo);
REGISTER_DEMUXER (REALTEXT, realtext);
REGISTER_DEMUXER (REDSPARK, redspark);
REGISTER_DEMUXER (RL2, rl2);
REGISTER_MUXDEMUX(RM, rm);
REGISTER_MUXDEMUX(ROQ, roq);
REGISTER_DEMUXER (RPL, rpl);
REGISTER_DEMUXER (RSD, rsd);
REGISTER_MUXDEMUX(RSO, rso);
REGISTER_MUXDEMUX(RTP, rtp);
REGISTER_MUXDEMUX(RTSP, rtsp);
REGISTER_DEMUXER (SAMI, sami);
REGISTER_MUXDEMUX(SAP, sap);
REGISTER_DEMUXER (SBG, sbg);
REGISTER_DEMUXER (SDP, sdp);
#if CONFIG_RTPDEC
av_register_rtp_dynamic_payload_handlers();
av_register_rdt_dynamic_payload_handlers();
#endif
REGISTER_DEMUXER (SEGAFILM, segafilm);
REGISTER_MUXER (SEGMENT, segment);
REGISTER_MUXER (SEGMENT, stream_segment);
REGISTER_DEMUXER (SHORTEN, shorten);
REGISTER_DEMUXER (SIFF, siff);
REGISTER_DEMUXER (SMACKER, smacker);
REGISTER_MUXDEMUX(SMJPEG, smjpeg);
REGISTER_MUXER (SMOOTHSTREAMING, smoothstreaming);
REGISTER_DEMUXER (SMUSH, smush);
REGISTER_DEMUXER (SOL, sol);
REGISTER_MUXDEMUX(SOX, sox);
REGISTER_MUXDEMUX(SPDIF, spdif);
REGISTER_MUXDEMUX(SRT, srt);
REGISTER_DEMUXER (STR, str);
REGISTER_DEMUXER (SUBVIEWER1, subviewer1);
REGISTER_DEMUXER (SUBVIEWER, subviewer);
REGISTER_MUXDEMUX(SWF, swf);
REGISTER_DEMUXER (TAK, tak);
REGISTER_MUXER (TEE, tee);
REGISTER_DEMUXER (TEDCAPTIONS, tedcaptions);
REGISTER_MUXER (TG2, tg2);
REGISTER_MUXER (TGP, tgp);
REGISTER_DEMUXER (THP, thp);
REGISTER_DEMUXER (TIERTEXSEQ, tiertexseq);
REGISTER_MUXER (MKVTIMESTAMP_V2, mkvtimestamp_v2);
REGISTER_DEMUXER (TMV, tmv);
REGISTER_MUXDEMUX(TRUEHD, truehd);
REGISTER_DEMUXER (TTA, tta);
REGISTER_DEMUXER (TXD, txd);
REGISTER_DEMUXER (TTY, tty);
REGISTER_MUXDEMUX(VC1, vc1);
REGISTER_MUXDEMUX(VC1T, vc1t);
REGISTER_DEMUXER (VIVO, vivo);
REGISTER_DEMUXER (VMD, vmd);
REGISTER_DEMUXER (VOBSUB, vobsub);
REGISTER_MUXDEMUX(VOC, voc);
REGISTER_DEMUXER (VPLAYER, vplayer);
REGISTER_DEMUXER (VQF, vqf);
REGISTER_MUXDEMUX(W64, w64);
REGISTER_MUXDEMUX(WAV, wav);
REGISTER_DEMUXER (WC3, wc3);
REGISTER_MUXER (WEBM, webm);
REGISTER_MUXDEMUX(WEBVTT, webvtt);
REGISTER_DEMUXER (WSAUD, wsaud);
REGISTER_DEMUXER (WSVQA, wsvqa);
REGISTER_MUXDEMUX(WTV, wtv);
REGISTER_MUXDEMUX(WV, wv);
REGISTER_DEMUXER (XA, xa);
REGISTER_DEMUXER (XBIN, xbin);
REGISTER_DEMUXER (XMV, xmv);
REGISTER_DEMUXER (XWMA, xwma);
REGISTER_DEMUXER (YOP, yop);
REGISTER_MUXDEMUX(YUV4MPEGPIPE, yuv4mpegpipe);
 
/* protocols */
REGISTER_PROTOCOL(BLURAY, bluray);
REGISTER_PROTOCOL(CACHE, cache);
REGISTER_PROTOCOL(CONCAT, concat);
REGISTER_PROTOCOL(CRYPTO, crypto);
REGISTER_PROTOCOL(DATA, data);
REGISTER_PROTOCOL(FFRTMPCRYPT, ffrtmpcrypt);
REGISTER_PROTOCOL(FFRTMPHTTP, ffrtmphttp);
REGISTER_PROTOCOL(FILE, file);
REGISTER_PROTOCOL(FTP, ftp);
REGISTER_PROTOCOL(GOPHER, gopher);
REGISTER_PROTOCOL(HLS, hls);
REGISTER_PROTOCOL(HTTP, http);
REGISTER_PROTOCOL(HTTPPROXY, httpproxy);
REGISTER_PROTOCOL(HTTPS, https);
REGISTER_PROTOCOL(MMSH, mmsh);
REGISTER_PROTOCOL(MMST, mmst);
REGISTER_PROTOCOL(MD5, md5);
REGISTER_PROTOCOL(PIPE, pipe);
REGISTER_PROTOCOL(RTMP, rtmp);
REGISTER_PROTOCOL(RTMPE, rtmpe);
REGISTER_PROTOCOL(RTMPS, rtmps);
REGISTER_PROTOCOL(RTMPT, rtmpt);
REGISTER_PROTOCOL(RTMPTE, rtmpte);
REGISTER_PROTOCOL(RTMPTS, rtmpts);
REGISTER_PROTOCOL(RTP, rtp);
REGISTER_PROTOCOL(SCTP, sctp);
REGISTER_PROTOCOL(SRTP, srtp);
REGISTER_PROTOCOL(TCP, tcp);
REGISTER_PROTOCOL(TLS, tls);
REGISTER_PROTOCOL(UDP, udp);
REGISTER_PROTOCOL(UNIX, unix);
 
/* external libraries */
REGISTER_DEMUXER (LIBGME, libgme);
REGISTER_DEMUXER (LIBMODPLUG, libmodplug);
REGISTER_MUXDEMUX(LIBNUT, libnut);
REGISTER_DEMUXER (LIBQUVI, libquvi);
REGISTER_PROTOCOL(LIBRTMP, librtmp);
REGISTER_PROTOCOL(LIBRTMPE, librtmpe);
REGISTER_PROTOCOL(LIBRTMPS, librtmps);
REGISTER_PROTOCOL(LIBRTMPT, librtmpt);
REGISTER_PROTOCOL(LIBRTMPTE, librtmpte);
REGISTER_PROTOCOL(LIBSSH, libssh);
}
/contrib/sdk/sources/ffmpeg/libavformat/amr.c
0,0 → 1,179
/*
* amr file format
* Copyright (c) 2001 ffmpeg project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/*
Write and read amr data according to RFC3267, http://www.ietf.org/rfc/rfc3267.txt?number=3267
 
Only mono files are supported.
 
*/
 
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "avformat.h"
#include "internal.h"
 
static const char AMR_header[] = "#!AMR\n";
static const char AMRWB_header[] = "#!AMR-WB\n";
 
#if CONFIG_AMR_MUXER
static int amr_write_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
AVCodecContext *enc = s->streams[0]->codec;
 
s->priv_data = NULL;
 
if (enc->codec_id == AV_CODEC_ID_AMR_NB) {
avio_write(pb, AMR_header, sizeof(AMR_header) - 1); /* magic number */
} else if (enc->codec_id == AV_CODEC_ID_AMR_WB) {
avio_write(pb, AMRWB_header, sizeof(AMRWB_header) - 1); /* magic number */
} else {
return -1;
}
avio_flush(pb);
return 0;
}
 
static int amr_write_packet(AVFormatContext *s, AVPacket *pkt)
{
avio_write(s->pb, pkt->data, pkt->size);
return 0;
}
#endif /* CONFIG_AMR_MUXER */
 
static int amr_probe(AVProbeData *p)
{
// Only check for "#!AMR" which could be amr-wb, amr-nb.
// This will also trigger multichannel files: "#!AMR_MC1.0\n" and
// "#!AMR-WB_MC1.0\n" (not supported)
 
if (!memcmp(p->buf, AMR_header, 5))
return AVPROBE_SCORE_MAX;
else
return 0;
}
 
/* amr input */
static int amr_read_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
AVStream *st;
uint8_t header[9];
 
avio_read(pb, header, 6);
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
if (memcmp(header, AMR_header, 6)) {
avio_read(pb, header + 6, 3);
if (memcmp(header, AMRWB_header, 9)) {
return -1;
}
 
st->codec->codec_tag = MKTAG('s', 'a', 'w', 'b');
st->codec->codec_id = AV_CODEC_ID_AMR_WB;
st->codec->sample_rate = 16000;
} else {
st->codec->codec_tag = MKTAG('s', 'a', 'm', 'r');
st->codec->codec_id = AV_CODEC_ID_AMR_NB;
st->codec->sample_rate = 8000;
}
st->codec->channels = 1;
st->codec->channel_layout = AV_CH_LAYOUT_MONO;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
 
return 0;
}
 
static int amr_read_packet(AVFormatContext *s, AVPacket *pkt)
{
AVCodecContext *enc = s->streams[0]->codec;
int read, size = 0, toc, mode;
int64_t pos = avio_tell(s->pb);
 
if (url_feof(s->pb)) {
return AVERROR(EIO);
}
 
// FIXME this is wrong, this should rather be in a AVParset
toc = avio_r8(s->pb);
mode = (toc >> 3) & 0x0F;
 
if (enc->codec_id == AV_CODEC_ID_AMR_NB) {
static const uint8_t packed_size[16] = {
12, 13, 15, 17, 19, 20, 26, 31, 5, 0, 0, 0, 0, 0, 0, 0
};
 
size = packed_size[mode] + 1;
} else if (enc->codec_id == AV_CODEC_ID_AMR_WB) {
static const uint8_t packed_size[16] = {
18, 24, 33, 37, 41, 47, 51, 59, 61, 6, 6, 0, 0, 0, 1, 1
};
 
size = packed_size[mode];
}
 
if (!size || av_new_packet(pkt, size))
return AVERROR(EIO);
 
/* Both AMR formats have 50 frames per second */
s->streams[0]->codec->bit_rate = size*8*50;
 
pkt->stream_index = 0;
pkt->pos = pos;
pkt->data[0] = toc;
pkt->duration = enc->codec_id == AV_CODEC_ID_AMR_NB ? 160 : 320;
read = avio_read(s->pb, pkt->data + 1, size - 1);
 
if (read != size - 1) {
av_free_packet(pkt);
return AVERROR(EIO);
}
 
return 0;
}
 
#if CONFIG_AMR_DEMUXER
AVInputFormat ff_amr_demuxer = {
.name = "amr",
.long_name = NULL_IF_CONFIG_SMALL("3GPP AMR"),
.read_probe = amr_probe,
.read_header = amr_read_header,
.read_packet = amr_read_packet,
.flags = AVFMT_GENERIC_INDEX,
};
#endif
 
#if CONFIG_AMR_MUXER
AVOutputFormat ff_amr_muxer = {
.name = "amr",
.long_name = NULL_IF_CONFIG_SMALL("3GPP AMR"),
.mime_type = "audio/amr",
.extensions = "amr",
.audio_codec = AV_CODEC_ID_AMR_NB,
.video_codec = AV_CODEC_ID_NONE,
.write_header = amr_write_header,
.write_packet = amr_write_packet,
};
#endif
/contrib/sdk/sources/ffmpeg/libavformat/anm.c
0,0 → 1,229
/*
* Deluxe Paint Animation demuxer
* Copyright (c) 2009 Peter Ross
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Deluxe Paint Animation demuxer
*/
 
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
 
typedef struct {
int base_record;
unsigned int nb_records;
int size;
} Page;
 
typedef struct {
unsigned int nb_pages; /**< total pages in file */
unsigned int nb_records; /**< total records in file */
int page_table_offset;
#define MAX_PAGES 256 /**< Deluxe Paint hardcoded value */
Page pt[MAX_PAGES]; /**< page table */
int page; /**< current page (or AVERROR_xxx code) */
int record; /**< current record (with in page) */
} AnmDemuxContext;
 
#define LPF_TAG MKTAG('L','P','F',' ')
#define ANIM_TAG MKTAG('A','N','I','M')
 
static int probe(AVProbeData *p)
{
/* verify tags and video dimensions */
if (AV_RL32(&p->buf[0]) == LPF_TAG &&
AV_RL32(&p->buf[16]) == ANIM_TAG &&
AV_RL16(&p->buf[20]) && AV_RL16(&p->buf[22]))
return AVPROBE_SCORE_MAX;
return 0;
}
 
/**
* @return page containing the requested record or AVERROR_XXX
*/
static int find_record(const AnmDemuxContext *anm, int record)
{
int i;
 
if (record >= anm->nb_records)
return AVERROR_EOF;
 
for (i = 0; i < MAX_PAGES; i++) {
const Page *p = &anm->pt[i];
if (p->nb_records > 0 && record >= p->base_record && record < p->base_record + p->nb_records)
return i;
}
 
return AVERROR_INVALIDDATA;
}
 
static int read_header(AVFormatContext *s)
{
AnmDemuxContext *anm = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *st;
int i, ret;
 
avio_skip(pb, 4); /* magic number */
if (avio_rl16(pb) != MAX_PAGES) {
avpriv_request_sample(s, "max_pages != " AV_STRINGIFY(MAX_PAGES));
return AVERROR_PATCHWELCOME;
}
 
anm->nb_pages = avio_rl16(pb);
anm->nb_records = avio_rl32(pb);
avio_skip(pb, 2); /* max records per page */
anm->page_table_offset = avio_rl16(pb);
if (avio_rl32(pb) != ANIM_TAG)
return AVERROR_INVALIDDATA;
 
/* video stream */
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_ANM;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->width = avio_rl16(pb);
st->codec->height = avio_rl16(pb);
if (avio_r8(pb) != 0)
goto invalid;
avio_skip(pb, 1); /* frame rate multiplier info */
 
/* ignore last delta record (used for looping) */
if (avio_r8(pb)) /* has_last_delta */
anm->nb_records = FFMAX(anm->nb_records - 1, 0);
 
avio_skip(pb, 1); /* last_delta_valid */
 
if (avio_r8(pb) != 0)
goto invalid;
 
if (avio_r8(pb) != 1)
goto invalid;
 
avio_skip(pb, 1); /* other recs per frame */
 
if (avio_r8(pb) != 1)
goto invalid;
 
avio_skip(pb, 32); /* record_types */
st->nb_frames = avio_rl32(pb);
avpriv_set_pts_info(st, 64, 1, avio_rl16(pb));
avio_skip(pb, 58);
 
/* color cycling and palette data */
st->codec->extradata_size = 16*8 + 4*256;
st->codec->extradata = av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
if (!st->codec->extradata) {
return AVERROR(ENOMEM);
}
ret = avio_read(pb, st->codec->extradata, st->codec->extradata_size);
if (ret < 0)
return ret;
 
/* read page table */
ret = avio_seek(pb, anm->page_table_offset, SEEK_SET);
if (ret < 0)
return ret;
 
for (i = 0; i < MAX_PAGES; i++) {
Page *p = &anm->pt[i];
p->base_record = avio_rl16(pb);
p->nb_records = avio_rl16(pb);
p->size = avio_rl16(pb);
}
 
/* find page of first frame */
anm->page = find_record(anm, 0);
if (anm->page < 0) {
return anm->page;
}
 
anm->record = -1;
return 0;
 
invalid:
avpriv_request_sample(s, "Invalid header element");
return AVERROR_PATCHWELCOME;
}
 
static int read_packet(AVFormatContext *s,
AVPacket *pkt)
{
AnmDemuxContext *anm = s->priv_data;
AVIOContext *pb = s->pb;
Page *p;
int tmp, record_size;
 
if (url_feof(s->pb))
return AVERROR(EIO);
 
if (anm->page < 0)
return anm->page;
 
repeat:
p = &anm->pt[anm->page];
 
/* parse page header */
if (anm->record < 0) {
avio_seek(pb, anm->page_table_offset + MAX_PAGES*6 + (anm->page<<16), SEEK_SET);
avio_skip(pb, 8 + 2*p->nb_records);
anm->record = 0;
}
 
/* if we have fetched all records in this page, then find the
next page and repeat */
if (anm->record >= p->nb_records) {
anm->page = find_record(anm, p->base_record + p->nb_records);
if (anm->page < 0)
return anm->page;
anm->record = -1;
goto repeat;
}
 
/* fetch record size */
tmp = avio_tell(pb);
avio_seek(pb, anm->page_table_offset + MAX_PAGES*6 + (anm->page<<16) +
8 + anm->record * 2, SEEK_SET);
record_size = avio_rl16(pb);
avio_seek(pb, tmp, SEEK_SET);
 
/* fetch record */
pkt->size = av_get_packet(s->pb, pkt, record_size);
if (pkt->size < 0)
return pkt->size;
if (p->base_record + anm->record == 0)
pkt->flags |= AV_PKT_FLAG_KEY;
 
anm->record++;
return 0;
}
 
AVInputFormat ff_anm_demuxer = {
.name = "anm",
.long_name = NULL_IF_CONFIG_SMALL("Deluxe Paint Animation"),
.priv_data_size = sizeof(AnmDemuxContext),
.read_probe = probe,
.read_header = read_header,
.read_packet = read_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/apc.c
0,0 → 1,94
/*
* CRYO APC audio format demuxer
* Copyright (c) 2007 Anssi Hannula <anssi.hannula@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <string.h>
 
#include "libavutil/channel_layout.h"
#include "avformat.h"
#include "internal.h"
 
static int apc_probe(AVProbeData *p)
{
if (!strncmp(p->buf, "CRYO_APC", 8))
return AVPROBE_SCORE_MAX;
 
return 0;
}
 
static int apc_read_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
AVStream *st;
 
avio_rl32(pb); /* CRYO */
avio_rl32(pb); /* _APC */
avio_rl32(pb); /* 1.20 */
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_ADPCM_IMA_APC;
 
avio_rl32(pb); /* number of samples */
st->codec->sample_rate = avio_rl32(pb);
 
if (ff_alloc_extradata(st->codec, 2 * 4))
return AVERROR(ENOMEM);
 
/* initial predictor values for adpcm decoder */
avio_read(pb, st->codec->extradata, 2 * 4);
 
if (avio_rl32(pb)) {
st->codec->channels = 2;
st->codec->channel_layout = AV_CH_LAYOUT_STEREO;
} else {
st->codec->channels = 1;
st->codec->channel_layout = AV_CH_LAYOUT_MONO;
}
 
st->codec->bits_per_coded_sample = 4;
st->codec->bit_rate = st->codec->bits_per_coded_sample * st->codec->channels
* st->codec->sample_rate;
st->codec->block_align = 1;
 
return 0;
}
 
#define MAX_READ_SIZE 4096
 
static int apc_read_packet(AVFormatContext *s, AVPacket *pkt)
{
if (av_get_packet(s->pb, pkt, MAX_READ_SIZE) <= 0)
return AVERROR(EIO);
pkt->flags &= ~AV_PKT_FLAG_CORRUPT;
pkt->stream_index = 0;
return 0;
}
 
AVInputFormat ff_apc_demuxer = {
.name = "apc",
.long_name = NULL_IF_CONFIG_SMALL("CRYO APC"),
.read_probe = apc_probe,
.read_header = apc_read_header,
.read_packet = apc_read_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/ape.c
0,0 → 1,464
/*
* Monkey's Audio APE demuxer
* Copyright (c) 2007 Benjamin Zores <ben@geexbox.org>
* based upon libdemac from Dave Chapman.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <stdio.h>
 
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
#include "apetag.h"
 
/* The earliest and latest file formats supported by this library */
#define APE_MIN_VERSION 3800
#define APE_MAX_VERSION 3990
 
#define MAC_FORMAT_FLAG_8_BIT 1 // is 8-bit [OBSOLETE]
#define MAC_FORMAT_FLAG_CRC 2 // uses the new CRC32 error detection [OBSOLETE]
#define MAC_FORMAT_FLAG_HAS_PEAK_LEVEL 4 // uint32 nPeakLevel after the header [OBSOLETE]
#define MAC_FORMAT_FLAG_24_BIT 8 // is 24-bit [OBSOLETE]
#define MAC_FORMAT_FLAG_HAS_SEEK_ELEMENTS 16 // has the number of seek elements after the peak level
#define MAC_FORMAT_FLAG_CREATE_WAV_HEADER 32 // create the wave header on decompression (not stored)
 
#define APE_EXTRADATA_SIZE 6
 
typedef struct {
int64_t pos;
int nblocks;
int size;
int skip;
int64_t pts;
} APEFrame;
 
typedef struct {
/* Derived fields */
uint32_t junklength;
uint32_t firstframe;
uint32_t totalsamples;
int currentframe;
APEFrame *frames;
 
/* Info from Descriptor Block */
char magic[4];
int16_t fileversion;
int16_t padding1;
uint32_t descriptorlength;
uint32_t headerlength;
uint32_t seektablelength;
uint32_t wavheaderlength;
uint32_t audiodatalength;
uint32_t audiodatalength_high;
uint32_t wavtaillength;
uint8_t md5[16];
 
/* Info from Header Block */
uint16_t compressiontype;
uint16_t formatflags;
uint32_t blocksperframe;
uint32_t finalframeblocks;
uint32_t totalframes;
uint16_t bps;
uint16_t channels;
uint32_t samplerate;
 
/* Seektable */
uint32_t *seektable;
uint8_t *bittable;
} APEContext;
 
static int ape_probe(AVProbeData * p)
{
if (p->buf[0] == 'M' && p->buf[1] == 'A' && p->buf[2] == 'C' && p->buf[3] == ' ')
return AVPROBE_SCORE_MAX;
 
return 0;
}
 
static void ape_dumpinfo(AVFormatContext * s, APEContext * ape_ctx)
{
#ifdef DEBUG
int i;
 
av_log(s, AV_LOG_DEBUG, "Descriptor Block:\n\n");
av_log(s, AV_LOG_DEBUG, "magic = \"%c%c%c%c\"\n", ape_ctx->magic[0], ape_ctx->magic[1], ape_ctx->magic[2], ape_ctx->magic[3]);
av_log(s, AV_LOG_DEBUG, "fileversion = %"PRId16"\n", ape_ctx->fileversion);
av_log(s, AV_LOG_DEBUG, "descriptorlength = %"PRIu32"\n", ape_ctx->descriptorlength);
av_log(s, AV_LOG_DEBUG, "headerlength = %"PRIu32"\n", ape_ctx->headerlength);
av_log(s, AV_LOG_DEBUG, "seektablelength = %"PRIu32"\n", ape_ctx->seektablelength);
av_log(s, AV_LOG_DEBUG, "wavheaderlength = %"PRIu32"\n", ape_ctx->wavheaderlength);
av_log(s, AV_LOG_DEBUG, "audiodatalength = %"PRIu32"\n", ape_ctx->audiodatalength);
av_log(s, AV_LOG_DEBUG, "audiodatalength_high = %"PRIu32"\n", ape_ctx->audiodatalength_high);
av_log(s, AV_LOG_DEBUG, "wavtaillength = %"PRIu32"\n", ape_ctx->wavtaillength);
av_log(s, AV_LOG_DEBUG, "md5 = ");
for (i = 0; i < 16; i++)
av_log(s, AV_LOG_DEBUG, "%02x", ape_ctx->md5[i]);
av_log(s, AV_LOG_DEBUG, "\n");
 
av_log(s, AV_LOG_DEBUG, "\nHeader Block:\n\n");
 
av_log(s, AV_LOG_DEBUG, "compressiontype = %"PRIu16"\n", ape_ctx->compressiontype);
av_log(s, AV_LOG_DEBUG, "formatflags = %"PRIu16"\n", ape_ctx->formatflags);
av_log(s, AV_LOG_DEBUG, "blocksperframe = %"PRIu32"\n", ape_ctx->blocksperframe);
av_log(s, AV_LOG_DEBUG, "finalframeblocks = %"PRIu32"\n", ape_ctx->finalframeblocks);
av_log(s, AV_LOG_DEBUG, "totalframes = %"PRIu32"\n", ape_ctx->totalframes);
av_log(s, AV_LOG_DEBUG, "bps = %"PRIu16"\n", ape_ctx->bps);
av_log(s, AV_LOG_DEBUG, "channels = %"PRIu16"\n", ape_ctx->channels);
av_log(s, AV_LOG_DEBUG, "samplerate = %"PRIu32"\n", ape_ctx->samplerate);
 
av_log(s, AV_LOG_DEBUG, "\nSeektable\n\n");
if ((ape_ctx->seektablelength / sizeof(uint32_t)) != ape_ctx->totalframes) {
av_log(s, AV_LOG_DEBUG, "No seektable\n");
} else {
for (i = 0; i < ape_ctx->seektablelength / sizeof(uint32_t); i++) {
if (i < ape_ctx->totalframes - 1) {
av_log(s, AV_LOG_DEBUG, "%8d %"PRIu32" (%"PRIu32" bytes)",
i, ape_ctx->seektable[i],
ape_ctx->seektable[i + 1] - ape_ctx->seektable[i]);
if (ape_ctx->bittable)
av_log(s, AV_LOG_DEBUG, " + %2d bits\n",
ape_ctx->bittable[i]);
av_log(s, AV_LOG_DEBUG, "\n");
} else {
av_log(s, AV_LOG_DEBUG, "%8d %"PRIu32"\n", i, ape_ctx->seektable[i]);
}
}
}
 
av_log(s, AV_LOG_DEBUG, "\nFrames\n\n");
for (i = 0; i < ape_ctx->totalframes; i++)
av_log(s, AV_LOG_DEBUG, "%8d %8"PRId64" %8d (%d samples)\n", i,
ape_ctx->frames[i].pos, ape_ctx->frames[i].size,
ape_ctx->frames[i].nblocks);
 
av_log(s, AV_LOG_DEBUG, "\nCalculated information:\n\n");
av_log(s, AV_LOG_DEBUG, "junklength = %"PRIu32"\n", ape_ctx->junklength);
av_log(s, AV_LOG_DEBUG, "firstframe = %"PRIu32"\n", ape_ctx->firstframe);
av_log(s, AV_LOG_DEBUG, "totalsamples = %"PRIu32"\n", ape_ctx->totalsamples);
#endif
}
 
static int ape_read_header(AVFormatContext * s)
{
AVIOContext *pb = s->pb;
APEContext *ape = s->priv_data;
AVStream *st;
uint32_t tag;
int i;
int total_blocks, final_size = 0;
int64_t pts, file_size;
 
/* Skip any leading junk such as id3v2 tags */
ape->junklength = avio_tell(pb);
 
tag = avio_rl32(pb);
if (tag != MKTAG('M', 'A', 'C', ' '))
return AVERROR_INVALIDDATA;
 
ape->fileversion = avio_rl16(pb);
 
if (ape->fileversion < APE_MIN_VERSION || ape->fileversion > APE_MAX_VERSION) {
av_log(s, AV_LOG_ERROR, "Unsupported file version - %d.%02d\n",
ape->fileversion / 1000, (ape->fileversion % 1000) / 10);
return AVERROR_PATCHWELCOME;
}
 
if (ape->fileversion >= 3980) {
ape->padding1 = avio_rl16(pb);
ape->descriptorlength = avio_rl32(pb);
ape->headerlength = avio_rl32(pb);
ape->seektablelength = avio_rl32(pb);
ape->wavheaderlength = avio_rl32(pb);
ape->audiodatalength = avio_rl32(pb);
ape->audiodatalength_high = avio_rl32(pb);
ape->wavtaillength = avio_rl32(pb);
avio_read(pb, ape->md5, 16);
 
/* Skip any unknown bytes at the end of the descriptor.
This is for future compatibility */
if (ape->descriptorlength > 52)
avio_skip(pb, ape->descriptorlength - 52);
 
/* Read header data */
ape->compressiontype = avio_rl16(pb);
ape->formatflags = avio_rl16(pb);
ape->blocksperframe = avio_rl32(pb);
ape->finalframeblocks = avio_rl32(pb);
ape->totalframes = avio_rl32(pb);
ape->bps = avio_rl16(pb);
ape->channels = avio_rl16(pb);
ape->samplerate = avio_rl32(pb);
} else {
ape->descriptorlength = 0;
ape->headerlength = 32;
 
ape->compressiontype = avio_rl16(pb);
ape->formatflags = avio_rl16(pb);
ape->channels = avio_rl16(pb);
ape->samplerate = avio_rl32(pb);
ape->wavheaderlength = avio_rl32(pb);
ape->wavtaillength = avio_rl32(pb);
ape->totalframes = avio_rl32(pb);
ape->finalframeblocks = avio_rl32(pb);
 
if (ape->formatflags & MAC_FORMAT_FLAG_HAS_PEAK_LEVEL) {
avio_skip(pb, 4); /* Skip the peak level */
ape->headerlength += 4;
}
 
if (ape->formatflags & MAC_FORMAT_FLAG_HAS_SEEK_ELEMENTS) {
ape->seektablelength = avio_rl32(pb);
ape->headerlength += 4;
ape->seektablelength *= sizeof(int32_t);
} else
ape->seektablelength = ape->totalframes * sizeof(int32_t);
 
if (ape->formatflags & MAC_FORMAT_FLAG_8_BIT)
ape->bps = 8;
else if (ape->formatflags & MAC_FORMAT_FLAG_24_BIT)
ape->bps = 24;
else
ape->bps = 16;
 
if (ape->fileversion >= 3950)
ape->blocksperframe = 73728 * 4;
else if (ape->fileversion >= 3900 || (ape->fileversion >= 3800 && ape->compressiontype >= 4000))
ape->blocksperframe = 73728;
else
ape->blocksperframe = 9216;
 
/* Skip any stored wav header */
if (!(ape->formatflags & MAC_FORMAT_FLAG_CREATE_WAV_HEADER))
avio_skip(pb, ape->wavheaderlength);
}
 
if(!ape->totalframes){
av_log(s, AV_LOG_ERROR, "No frames in the file!\n");
return AVERROR(EINVAL);
}
if(ape->totalframes > UINT_MAX / sizeof(APEFrame)){
av_log(s, AV_LOG_ERROR, "Too many frames: %"PRIu32"\n",
ape->totalframes);
return AVERROR_INVALIDDATA;
}
if (ape->seektablelength / sizeof(*ape->seektable) < ape->totalframes) {
av_log(s, AV_LOG_ERROR,
"Number of seek entries is less than number of frames: %zu vs. %"PRIu32"\n",
ape->seektablelength / sizeof(*ape->seektable), ape->totalframes);
return AVERROR_INVALIDDATA;
}
ape->frames = av_malloc(ape->totalframes * sizeof(APEFrame));
if(!ape->frames)
return AVERROR(ENOMEM);
ape->firstframe = ape->junklength + ape->descriptorlength + ape->headerlength + ape->seektablelength + ape->wavheaderlength;
if (ape->fileversion < 3810)
ape->firstframe += ape->totalframes;
ape->currentframe = 0;
 
 
ape->totalsamples = ape->finalframeblocks;
if (ape->totalframes > 1)
ape->totalsamples += ape->blocksperframe * (ape->totalframes - 1);
 
if (ape->seektablelength > 0) {
ape->seektable = av_malloc(ape->seektablelength);
if (!ape->seektable)
return AVERROR(ENOMEM);
for (i = 0; i < ape->seektablelength / sizeof(uint32_t) && !pb->eof_reached; i++)
ape->seektable[i] = avio_rl32(pb);
if (ape->fileversion < 3810) {
ape->bittable = av_malloc(ape->totalframes);
if (!ape->bittable)
return AVERROR(ENOMEM);
for (i = 0; i < ape->totalframes && !pb->eof_reached; i++)
ape->bittable[i] = avio_r8(pb);
}
}
 
ape->frames[0].pos = ape->firstframe;
ape->frames[0].nblocks = ape->blocksperframe;
ape->frames[0].skip = 0;
for (i = 1; i < ape->totalframes; i++) {
ape->frames[i].pos = ape->seektable[i] + ape->junklength;
ape->frames[i].nblocks = ape->blocksperframe;
ape->frames[i - 1].size = ape->frames[i].pos - ape->frames[i - 1].pos;
ape->frames[i].skip = (ape->frames[i].pos - ape->frames[0].pos) & 3;
}
ape->frames[ape->totalframes - 1].nblocks = ape->finalframeblocks;
/* calculate final packet size from total file size, if available */
file_size = avio_size(pb);
if (file_size > 0) {
final_size = file_size - ape->frames[ape->totalframes - 1].pos -
ape->wavtaillength;
final_size -= final_size & 3;
}
if (file_size <= 0 || final_size <= 0)
final_size = ape->finalframeblocks * 8;
ape->frames[ape->totalframes - 1].size = final_size;
 
for (i = 0; i < ape->totalframes; i++) {
if(ape->frames[i].skip){
ape->frames[i].pos -= ape->frames[i].skip;
ape->frames[i].size += ape->frames[i].skip;
}
ape->frames[i].size = (ape->frames[i].size + 3) & ~3;
}
if (ape->fileversion < 3810) {
for (i = 0; i < ape->totalframes; i++) {
if (i < ape->totalframes - 1 && ape->bittable[i + 1])
ape->frames[i].size += 4;
ape->frames[i].skip <<= 3;
ape->frames[i].skip += ape->bittable[i];
}
}
 
ape_dumpinfo(s, ape);
 
av_log(s, AV_LOG_DEBUG, "Decoding file - v%d.%02d, compression level %"PRIu16"\n",
ape->fileversion / 1000, (ape->fileversion % 1000) / 10,
ape->compressiontype);
 
/* now we are ready: build format streams */
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
total_blocks = (ape->totalframes == 0) ? 0 : ((ape->totalframes - 1) * ape->blocksperframe) + ape->finalframeblocks;
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_APE;
st->codec->codec_tag = MKTAG('A', 'P', 'E', ' ');
st->codec->channels = ape->channels;
st->codec->sample_rate = ape->samplerate;
st->codec->bits_per_coded_sample = ape->bps;
 
st->nb_frames = ape->totalframes;
st->start_time = 0;
st->duration = total_blocks;
avpriv_set_pts_info(st, 64, 1, ape->samplerate);
 
if (ff_alloc_extradata(st->codec, APE_EXTRADATA_SIZE))
return AVERROR(ENOMEM);
AV_WL16(st->codec->extradata + 0, ape->fileversion);
AV_WL16(st->codec->extradata + 2, ape->compressiontype);
AV_WL16(st->codec->extradata + 4, ape->formatflags);
 
pts = 0;
for (i = 0; i < ape->totalframes; i++) {
ape->frames[i].pts = pts;
av_add_index_entry(st, ape->frames[i].pos, ape->frames[i].pts, 0, 0, AVINDEX_KEYFRAME);
pts += ape->blocksperframe;
}
 
/* try to read APE tags */
if (pb->seekable) {
ff_ape_parse_tag(s);
avio_seek(pb, 0, SEEK_SET);
}
 
return 0;
}
 
static int ape_read_packet(AVFormatContext * s, AVPacket * pkt)
{
int ret;
int nblocks;
APEContext *ape = s->priv_data;
uint32_t extra_size = 8;
 
if (url_feof(s->pb))
return AVERROR_EOF;
if (ape->currentframe >= ape->totalframes)
return AVERROR_EOF;
 
if (avio_seek(s->pb, ape->frames[ape->currentframe].pos, SEEK_SET) < 0)
return AVERROR(EIO);
 
/* Calculate how many blocks there are in this frame */
if (ape->currentframe == (ape->totalframes - 1))
nblocks = ape->finalframeblocks;
else
nblocks = ape->blocksperframe;
 
if (ape->frames[ape->currentframe].size <= 0 ||
ape->frames[ape->currentframe].size > INT_MAX - extra_size) {
av_log(s, AV_LOG_ERROR, "invalid packet size: %d\n",
ape->frames[ape->currentframe].size);
ape->currentframe++;
return AVERROR(EIO);
}
 
if (av_new_packet(pkt, ape->frames[ape->currentframe].size + extra_size) < 0)
return AVERROR(ENOMEM);
 
AV_WL32(pkt->data , nblocks);
AV_WL32(pkt->data + 4, ape->frames[ape->currentframe].skip);
ret = avio_read(s->pb, pkt->data + extra_size, ape->frames[ape->currentframe].size);
if (ret < 0)
return ret;
 
pkt->pts = ape->frames[ape->currentframe].pts;
pkt->stream_index = 0;
 
/* note: we need to modify the packet size here to handle the last
packet */
pkt->size = ret + extra_size;
 
ape->currentframe++;
 
return 0;
}
 
static int ape_read_close(AVFormatContext * s)
{
APEContext *ape = s->priv_data;
 
av_freep(&ape->frames);
av_freep(&ape->seektable);
av_freep(&ape->bittable);
return 0;
}
 
static int ape_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
{
AVStream *st = s->streams[stream_index];
APEContext *ape = s->priv_data;
int index = av_index_search_timestamp(st, timestamp, flags);
 
if (index < 0)
return -1;
 
if (avio_seek(s->pb, st->index_entries[index].pos, SEEK_SET) < 0)
return -1;
ape->currentframe = index;
return 0;
}
 
AVInputFormat ff_ape_demuxer = {
.name = "ape",
.long_name = NULL_IF_CONFIG_SMALL("Monkey's Audio"),
.priv_data_size = sizeof(APEContext),
.read_probe = ape_probe,
.read_header = ape_read_header,
.read_packet = ape_read_packet,
.read_close = ape_read_close,
.read_seek = ape_read_seek,
.extensions = "ape,apl,mac",
};
/contrib/sdk/sources/ffmpeg/libavformat/apetag.c
0,0 → 1,240
/*
* APE tag handling
* Copyright (c) 2007 Benjamin Zores <ben@geexbox.org>
* based upon libdemac from Dave Chapman.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intreadwrite.h"
#include "libavutil/dict.h"
#include "avformat.h"
#include "avio_internal.h"
#include "apetag.h"
#include "internal.h"
 
#define APE_TAG_FLAG_CONTAINS_HEADER (1 << 31)
#define APE_TAG_FLAG_CONTAINS_FOOTER (1 << 30)
#define APE_TAG_FLAG_IS_HEADER (1 << 29)
#define APE_TAG_FLAG_IS_BINARY (1 << 1)
 
static int ape_tag_read_field(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
uint8_t key[1024], *value;
uint32_t size, flags;
int i, c;
 
size = avio_rl32(pb); /* field size */
flags = avio_rl32(pb); /* field flags */
for (i = 0; i < sizeof(key) - 1; i++) {
c = avio_r8(pb);
if (c < 0x20 || c > 0x7E)
break;
else
key[i] = c;
}
key[i] = 0;
if (c != 0) {
av_log(s, AV_LOG_WARNING, "Invalid APE tag key '%s'.\n", key);
return -1;
}
if (size >= UINT_MAX)
return -1;
if (flags & APE_TAG_FLAG_IS_BINARY) {
uint8_t filename[1024];
enum AVCodecID id;
AVStream *st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
size -= avio_get_str(pb, size, filename, sizeof(filename));
if (size <= 0) {
av_log(s, AV_LOG_WARNING, "Skipping binary tag '%s'.\n", key);
return 0;
}
 
av_dict_set(&st->metadata, key, filename, 0);
 
if ((id = ff_guess_image2_codec(filename)) != AV_CODEC_ID_NONE) {
AVPacket pkt;
int ret;
 
ret = av_get_packet(s->pb, &pkt, size);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "Error reading cover art.\n");
return ret;
}
 
st->disposition |= AV_DISPOSITION_ATTACHED_PIC;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = id;
 
st->attached_pic = pkt;
st->attached_pic.stream_index = st->index;
st->attached_pic.flags |= AV_PKT_FLAG_KEY;
} else {
if (ff_alloc_extradata(st->codec, size))
return AVERROR(ENOMEM);
if (avio_read(pb, st->codec->extradata, size) != size) {
av_freep(&st->codec->extradata);
st->codec->extradata_size = 0;
return AVERROR(EIO);
}
st->codec->codec_type = AVMEDIA_TYPE_ATTACHMENT;
}
} else {
value = av_malloc(size+1);
if (!value)
return AVERROR(ENOMEM);
c = avio_read(pb, value, size);
if (c < 0) {
av_free(value);
return c;
}
value[c] = 0;
av_dict_set(&s->metadata, key, value, AV_DICT_DONT_STRDUP_VAL);
}
return 0;
}
 
int64_t ff_ape_parse_tag(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
int64_t file_size = avio_size(pb);
uint32_t val, fields, tag_bytes;
uint8_t buf[8];
int64_t tag_start;
int i;
 
if (file_size < APE_TAG_FOOTER_BYTES)
return 0;
 
avio_seek(pb, file_size - APE_TAG_FOOTER_BYTES, SEEK_SET);
 
avio_read(pb, buf, 8); /* APETAGEX */
if (strncmp(buf, APE_TAG_PREAMBLE, 8)) {
return 0;
}
 
val = avio_rl32(pb); /* APE tag version */
if (val > APE_TAG_VERSION) {
av_log(s, AV_LOG_ERROR, "Unsupported tag version. (>=%d)\n", APE_TAG_VERSION);
return 0;
}
 
tag_bytes = avio_rl32(pb); /* tag size */
if (tag_bytes - APE_TAG_FOOTER_BYTES > (1024 * 1024 * 16)) {
av_log(s, AV_LOG_ERROR, "Tag size is way too big\n");
return 0;
}
 
if (tag_bytes > file_size - APE_TAG_FOOTER_BYTES) {
av_log(s, AV_LOG_ERROR, "Invalid tag size %u.\n", tag_bytes);
return 0;
}
tag_start = file_size - tag_bytes - APE_TAG_FOOTER_BYTES;
 
fields = avio_rl32(pb); /* number of fields */
if (fields > 65536) {
av_log(s, AV_LOG_ERROR, "Too many tag fields (%d)\n", fields);
return 0;
}
 
val = avio_rl32(pb); /* flags */
if (val & APE_TAG_FLAG_IS_HEADER) {
av_log(s, AV_LOG_ERROR, "APE Tag is a header\n");
return 0;
}
 
avio_seek(pb, file_size - tag_bytes, SEEK_SET);
 
for (i=0; i<fields; i++)
if (ape_tag_read_field(s) < 0) break;
 
return tag_start;
}
 
static int string_is_ascii(const uint8_t *str)
{
while (*str && *str >= 0x20 && *str <= 0x7e ) str++;
return !*str;
}
 
int ff_ape_write_tag(AVFormatContext *s)
{
AVDictionaryEntry *e = NULL;
int size, ret, count = 0;
AVIOContext *dyn_bc = NULL;
uint8_t *dyn_buf = NULL;
 
if ((ret = avio_open_dyn_buf(&dyn_bc)) < 0)
goto end;
 
// flags
avio_wl32(dyn_bc, APE_TAG_FLAG_CONTAINS_HEADER | APE_TAG_FLAG_CONTAINS_FOOTER |
APE_TAG_FLAG_IS_HEADER);
ffio_fill(dyn_bc, 0, 8); // reserved
 
while ((e = av_dict_get(s->metadata, "", e, AV_DICT_IGNORE_SUFFIX))) {
int val_len;
 
if (!string_is_ascii(e->key)) {
av_log(s, AV_LOG_WARNING, "Non ASCII keys are not allowed\n");
continue;
}
 
val_len = strlen(e->value);
avio_wl32(dyn_bc, val_len); // value length
avio_wl32(dyn_bc, 0); // item flags
avio_put_str(dyn_bc, e->key); // key
avio_write(dyn_bc, e->value, val_len); // value
count++;
}
if (!count)
goto end;
 
size = avio_close_dyn_buf(dyn_bc, &dyn_buf);
if (size <= 0)
goto end;
size += 20;
 
// header
avio_write(s->pb, "APETAGEX", 8); // id
avio_wl32(s->pb, APE_TAG_VERSION); // version
avio_wl32(s->pb, size);
avio_wl32(s->pb, count);
 
avio_write(s->pb, dyn_buf, size - 20);
 
// footer
avio_write(s->pb, "APETAGEX", 8); // id
avio_wl32(s->pb, APE_TAG_VERSION); // version
avio_wl32(s->pb, size); // size
avio_wl32(s->pb, count); // tag count
 
// flags
avio_wl32(s->pb, APE_TAG_FLAG_CONTAINS_HEADER | APE_TAG_FLAG_CONTAINS_FOOTER);
ffio_fill(s->pb, 0, 8); // reserved
 
end:
if (dyn_bc && !dyn_buf)
avio_close_dyn_buf(dyn_bc, &dyn_buf);
av_freep(&dyn_buf);
 
return ret;
}
/contrib/sdk/sources/ffmpeg/libavformat/apetag.h
0,0 → 1,44
/*
* APE tag handling
* Copyright (c) 2007 Benjamin Zores <ben@geexbox.org>
* based upon libdemac from Dave Chapman.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_APETAG_H
#define AVFORMAT_APETAG_H
 
#include "avformat.h"
 
#define APE_TAG_PREAMBLE "APETAGEX"
#define APE_TAG_VERSION 2000
#define APE_TAG_FOOTER_BYTES 32
 
/**
* Read and parse an APE tag
*
* @return offset of the tag start in the file
*/
int64_t ff_ape_parse_tag(AVFormatContext *s);
 
/**
* Write an APE tag into a file.
*/
int ff_ape_write_tag(AVFormatContext *s);
 
#endif /* AVFORMAT_APETAG_H */
/contrib/sdk/sources/ffmpeg/libavformat/aqtitledec.c
0,0 → 1,148
/*
* Copyright (c) 2012 Clément Bœsch
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* AQTitle subtitles format demuxer
*
* @see http://web.archive.org/web/20070210095721/http://www.volny.cz/aberka/czech/aqt.html
* @see https://trac.annodex.net/wiki/AQTitle
*/
 
#include "avformat.h"
#include "internal.h"
#include "subtitles.h"
#include "libavutil/opt.h"
 
typedef struct {
const AVClass *class;
FFDemuxSubtitlesQueue q;
AVRational frame_rate;
} AQTitleContext;
 
static int aqt_probe(AVProbeData *p)
{
int frame;
const char *ptr = p->buf;
 
if (sscanf(ptr, "-->> %d", &frame) == 1)
return AVPROBE_SCORE_EXTENSION;
return 0;
}
 
static int aqt_read_header(AVFormatContext *s)
{
AQTitleContext *aqt = s->priv_data;
AVStream *st = avformat_new_stream(s, NULL);
int new_event = 1;
int64_t pos = 0, frame = AV_NOPTS_VALUE;
AVPacket *sub = NULL;
 
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 64, aqt->frame_rate.den, aqt->frame_rate.num);
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
st->codec->codec_id = AV_CODEC_ID_TEXT;
 
while (!url_feof(s->pb)) {
char line[4096];
int len = ff_get_line(s->pb, line, sizeof(line));
 
if (!len)
break;
 
line[strcspn(line, "\r\n")] = 0;
 
if (sscanf(line, "-->> %"SCNd64, &frame) == 1) {
new_event = 1;
pos = avio_tell(s->pb);
if (sub) {
sub->duration = frame - sub->pts;
sub = NULL;
}
} else if (*line) {
if (!new_event) {
sub = ff_subtitles_queue_insert(&aqt->q, "\n", 1, 1);
if (!sub)
return AVERROR(ENOMEM);
}
sub = ff_subtitles_queue_insert(&aqt->q, line, strlen(line), !new_event);
if (!sub)
return AVERROR(ENOMEM);
if (new_event) {
sub->pts = frame;
sub->duration = -1;
sub->pos = pos;
}
new_event = 0;
}
}
 
ff_subtitles_queue_finalize(&aqt->q);
return 0;
}
 
static int aqt_read_packet(AVFormatContext *s, AVPacket *pkt)
{
AQTitleContext *aqt = s->priv_data;
return ff_subtitles_queue_read_packet(&aqt->q, pkt);
}
 
static int aqt_read_seek(AVFormatContext *s, int stream_index,
int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
{
AQTitleContext *aqt = s->priv_data;
return ff_subtitles_queue_seek(&aqt->q, s, stream_index,
min_ts, ts, max_ts, flags);
}
 
static int aqt_read_close(AVFormatContext *s)
{
AQTitleContext *aqt = s->priv_data;
ff_subtitles_queue_clean(&aqt->q);
return 0;
}
 
#define OFFSET(x) offsetof(AQTitleContext, x)
#define SD AV_OPT_FLAG_SUBTITLE_PARAM|AV_OPT_FLAG_DECODING_PARAM
static const AVOption aqt_options[] = {
{ "subfps", "set the movie frame rate", OFFSET(frame_rate), AV_OPT_TYPE_RATIONAL, {.dbl=25}, 0, INT_MAX, SD },
{ NULL }
};
 
static const AVClass aqt_class = {
.class_name = "aqtdec",
.item_name = av_default_item_name,
.option = aqt_options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVInputFormat ff_aqtitle_demuxer = {
.name = "aqtitle",
.long_name = NULL_IF_CONFIG_SMALL("AQTitle subtitles"),
.priv_data_size = sizeof(AQTitleContext),
.read_probe = aqt_probe,
.read_header = aqt_read_header,
.read_packet = aqt_read_packet,
.read_seek2 = aqt_read_seek,
.read_close = aqt_read_close,
.extensions = "aqt",
.priv_class = &aqt_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/asf.c
0,0 → 1,166
/*
* Copyright (c) 2000, 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "asf.h"
 
const ff_asf_guid ff_asf_header = {
0x30, 0x26, 0xB2, 0x75, 0x8E, 0x66, 0xCF, 0x11, 0xA6, 0xD9, 0x00, 0xAA, 0x00, 0x62, 0xCE, 0x6C
};
 
const ff_asf_guid ff_asf_file_header = {
0xA1, 0xDC, 0xAB, 0x8C, 0x47, 0xA9, 0xCF, 0x11, 0x8E, 0xE4, 0x00, 0xC0, 0x0C, 0x20, 0x53, 0x65
};
 
const ff_asf_guid ff_asf_stream_header = {
0x91, 0x07, 0xDC, 0xB7, 0xB7, 0xA9, 0xCF, 0x11, 0x8E, 0xE6, 0x00, 0xC0, 0x0C, 0x20, 0x53, 0x65
};
 
const ff_asf_guid ff_asf_ext_stream_header = {
0xCB, 0xA5, 0xE6, 0x14, 0x72, 0xC6, 0x32, 0x43, 0x83, 0x99, 0xA9, 0x69, 0x52, 0x06, 0x5B, 0x5A
};
 
const ff_asf_guid ff_asf_audio_stream = {
0x40, 0x9E, 0x69, 0xF8, 0x4D, 0x5B, 0xCF, 0x11, 0xA8, 0xFD, 0x00, 0x80, 0x5F, 0x5C, 0x44, 0x2B
};
 
const ff_asf_guid ff_asf_audio_conceal_none = {
// 0x40, 0xa4, 0xf1, 0x49, 0x4ece, 0x11d0, 0xa3, 0xac, 0x00, 0xa0, 0xc9, 0x03, 0x48, 0xf6
// New value lifted from avifile
0x00, 0x57, 0xfb, 0x20, 0x55, 0x5B, 0xCF, 0x11, 0xa8, 0xfd, 0x00, 0x80, 0x5f, 0x5c, 0x44, 0x2b
};
 
const ff_asf_guid ff_asf_audio_conceal_spread = {
0x50, 0xCD, 0xC3, 0xBF, 0x8F, 0x61, 0xCF, 0x11, 0x8B, 0xB2, 0x00, 0xAA, 0x00, 0xB4, 0xE2, 0x20
};
 
const ff_asf_guid ff_asf_video_stream = {
0xC0, 0xEF, 0x19, 0xBC, 0x4D, 0x5B, 0xCF, 0x11, 0xA8, 0xFD, 0x00, 0x80, 0x5F, 0x5C, 0x44, 0x2B
};
 
const ff_asf_guid ff_asf_jfif_media = {
0x00, 0xE1, 0x1B, 0xB6, 0x4E, 0x5B, 0xCF, 0x11, 0xA8, 0xFD, 0x00, 0x80, 0x5F, 0x5C, 0x44, 0x2B
};
 
const ff_asf_guid ff_asf_video_conceal_none = {
0x00, 0x57, 0xFB, 0x20, 0x55, 0x5B, 0xCF, 0x11, 0xA8, 0xFD, 0x00, 0x80, 0x5F, 0x5C, 0x44, 0x2B
};
 
const ff_asf_guid ff_asf_command_stream = {
0xC0, 0xCF, 0xDA, 0x59, 0xE6, 0x59, 0xD0, 0x11, 0xA3, 0xAC, 0x00, 0xA0, 0xC9, 0x03, 0x48, 0xF6
};
 
const ff_asf_guid ff_asf_comment_header = {
0x33, 0x26, 0xb2, 0x75, 0x8E, 0x66, 0xCF, 0x11, 0xa6, 0xd9, 0x00, 0xaa, 0x00, 0x62, 0xce, 0x6c
};
 
const ff_asf_guid ff_asf_codec_comment_header = {
0x40, 0x52, 0xD1, 0x86, 0x1D, 0x31, 0xD0, 0x11, 0xA3, 0xA4, 0x00, 0xA0, 0xC9, 0x03, 0x48, 0xF6
};
const ff_asf_guid ff_asf_codec_comment1_header = {
0x41, 0x52, 0xd1, 0x86, 0x1D, 0x31, 0xD0, 0x11, 0xa3, 0xa4, 0x00, 0xa0, 0xc9, 0x03, 0x48, 0xf6
};
 
const ff_asf_guid ff_asf_data_header = {
0x36, 0x26, 0xb2, 0x75, 0x8E, 0x66, 0xCF, 0x11, 0xa6, 0xd9, 0x00, 0xaa, 0x00, 0x62, 0xce, 0x6c
};
 
const ff_asf_guid ff_asf_head1_guid = {
0xb5, 0x03, 0xbf, 0x5f, 0x2E, 0xA9, 0xCF, 0x11, 0x8e, 0xe3, 0x00, 0xc0, 0x0c, 0x20, 0x53, 0x65
};
 
const ff_asf_guid ff_asf_head2_guid = {
0x11, 0xd2, 0xd3, 0xab, 0xBA, 0xA9, 0xCF, 0x11, 0x8e, 0xe6, 0x00, 0xc0, 0x0c, 0x20, 0x53, 0x65
};
 
const ff_asf_guid ff_asf_extended_content_header = {
0x40, 0xA4, 0xD0, 0xD2, 0x07, 0xE3, 0xD2, 0x11, 0x97, 0xF0, 0x00, 0xA0, 0xC9, 0x5E, 0xA8, 0x50
};
 
const ff_asf_guid ff_asf_simple_index_header = {
0x90, 0x08, 0x00, 0x33, 0xB1, 0xE5, 0xCF, 0x11, 0x89, 0xF4, 0x00, 0xA0, 0xC9, 0x03, 0x49, 0xCB
};
 
const ff_asf_guid ff_asf_ext_stream_embed_stream_header = {
0xe2, 0x65, 0xfb, 0x3a, 0xEF, 0x47, 0xF2, 0x40, 0xac, 0x2c, 0x70, 0xa9, 0x0d, 0x71, 0xd3, 0x43
};
 
const ff_asf_guid ff_asf_ext_stream_audio_stream = {
0x9d, 0x8c, 0x17, 0x31, 0xE1, 0x03, 0x28, 0x45, 0xb5, 0x82, 0x3d, 0xf9, 0xdb, 0x22, 0xf5, 0x03
};
 
const ff_asf_guid ff_asf_metadata_header = {
0xea, 0xcb, 0xf8, 0xc5, 0xaf, 0x5b, 0x77, 0x48, 0x84, 0x67, 0xaa, 0x8c, 0x44, 0xfa, 0x4c, 0xca
};
 
const ff_asf_guid ff_asf_metadata_library_header = {
0x94, 0x1c, 0x23, 0x44, 0x98, 0x94, 0xd1, 0x49, 0xa1, 0x41, 0x1d, 0x13, 0x4e, 0x45, 0x70, 0x54
};
 
const ff_asf_guid ff_asf_marker_header = {
0x01, 0xCD, 0x87, 0xF4, 0x51, 0xA9, 0xCF, 0x11, 0x8E, 0xE6, 0x00, 0xC0, 0x0C, 0x20, 0x53, 0x65
};
 
const ff_asf_guid ff_asf_reserved_4 = {
0x20, 0xdb, 0xfe, 0x4c, 0xf6, 0x75, 0xCF, 0x11, 0x9c, 0x0f, 0x00, 0xa0, 0xc9, 0x03, 0x49, 0xcb
};
 
/* I am not a number !!! This GUID is the one found on the PC used to
* generate the stream */
const ff_asf_guid ff_asf_my_guid = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
 
const ff_asf_guid ff_asf_language_guid = {
0xa9, 0x46, 0x43, 0x7c, 0xe0, 0xef, 0xfc, 0x4b, 0xb2, 0x29, 0x39, 0x3e, 0xde, 0x41, 0x5c, 0x85
};
 
const ff_asf_guid ff_asf_content_encryption = {
0xfb, 0xb3, 0x11, 0x22, 0x23, 0xbd, 0xd2, 0x11, 0xb4, 0xb7, 0x00, 0xa0, 0xc9, 0x55, 0xfc, 0x6e
};
 
const ff_asf_guid ff_asf_ext_content_encryption = {
0x14, 0xe6, 0x8a, 0x29, 0x22, 0x26, 0x17, 0x4c, 0xb9, 0x35, 0xda, 0xe0, 0x7e, 0xe9, 0x28, 0x9c
};
 
const ff_asf_guid ff_asf_digital_signature = {
0xfc, 0xb3, 0x11, 0x22, 0x23, 0xbd, 0xd2, 0x11, 0xb4, 0xb7, 0x00, 0xa0, 0xc9, 0x55, 0xfc, 0x6e
};
 
/* List of official tags at http://msdn.microsoft.com/en-us/library/dd743066(VS.85).aspx */
const AVMetadataConv ff_asf_metadata_conv[] = {
{ "WM/AlbumArtist", "album_artist" },
{ "WM/AlbumTitle", "album" },
{ "Author", "artist" },
{ "Description", "comment" },
{ "WM/Composer", "composer" },
{ "WM/EncodedBy", "encoded_by" },
{ "WM/EncodingSettings", "encoder" },
{ "WM/Genre", "genre" },
{ "WM/Language", "language" },
{ "WM/OriginalFilename", "filename" },
{ "WM/PartOfSet", "disc" },
{ "WM/Publisher", "publisher" },
{ "WM/Tool", "encoder" },
{ "WM/TrackNumber", "track" },
{ "WM/MediaStationCallSign", "service_provider" },
{ "WM/MediaStationName", "service_name" },
// { "Year" , "date" }, TODO: conversion year<->date
{ 0 }
};
/contrib/sdk/sources/ffmpeg/libavformat/asf.h
0,0 → 1,193
/*
* Copyright (c) 2000, 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_ASF_H
#define AVFORMAT_ASF_H
 
#include <stdint.h>
#include "avformat.h"
#include "metadata.h"
#include "riff.h"
 
#define PACKET_SIZE 3200
 
typedef struct ASFPayload {
uint8_t type;
uint16_t size;
} ASFPayload;
 
typedef struct ASFStream {
int num;
unsigned char seq;
/* use for reading */
AVPacket pkt;
int frag_offset;
int packet_obj_size;
int timestamp;
int64_t duration;
int skip_to_key;
 
int ds_span; /* descrambling */
int ds_packet_size;
int ds_chunk_size;
 
int64_t packet_pos;
 
uint16_t stream_language_index;
 
int palette_changed;
uint32_t palette[256];
 
int payload_ext_ct;
ASFPayload payload[8];
} ASFStream;
 
typedef struct ASFMainHeader {
ff_asf_guid guid; ///< generated by client computer
uint64_t file_size; /**< in bytes
* invalid if broadcasting */
uint64_t create_time; /**< time of creation, in 100-nanosecond units since 1.1.1601
* invalid if broadcasting */
uint64_t play_time; /**< play time, in 100-nanosecond units
* invalid if broadcasting */
uint64_t send_time; /**< time to send file, in 100-nanosecond units
* invalid if broadcasting (could be ignored) */
uint32_t preroll; /**< timestamp of the first packet, in milliseconds
* if nonzero - subtract from time */
uint32_t ignore; ///< preroll is 64bit - but let's just ignore it
uint32_t flags; /**< 0x01 - broadcast
* 0x02 - seekable
* rest is reserved should be 0 */
uint32_t min_pktsize; /**< size of a data packet
* invalid if broadcasting */
uint32_t max_pktsize; /**< shall be the same as for min_pktsize
* invalid if broadcasting */
uint32_t max_bitrate; /**< bandwidth of stream in bps
* should be the sum of bitrates of the
* individual media streams */
} ASFMainHeader;
 
 
typedef struct ASFIndex {
uint32_t packet_number;
uint16_t packet_count;
uint64_t send_time;
uint64_t offset;
} ASFIndex;
 
extern const ff_asf_guid ff_asf_header;
extern const ff_asf_guid ff_asf_file_header;
extern const ff_asf_guid ff_asf_stream_header;
extern const ff_asf_guid ff_asf_ext_stream_header;
extern const ff_asf_guid ff_asf_audio_stream;
extern const ff_asf_guid ff_asf_audio_conceal_none;
extern const ff_asf_guid ff_asf_audio_conceal_spread;
extern const ff_asf_guid ff_asf_video_stream;
extern const ff_asf_guid ff_asf_jfif_media;
extern const ff_asf_guid ff_asf_video_conceal_none;
extern const ff_asf_guid ff_asf_command_stream;
extern const ff_asf_guid ff_asf_comment_header;
extern const ff_asf_guid ff_asf_codec_comment_header;
extern const ff_asf_guid ff_asf_codec_comment1_header;
extern const ff_asf_guid ff_asf_data_header;
extern const ff_asf_guid ff_asf_head1_guid;
extern const ff_asf_guid ff_asf_head2_guid;
extern const ff_asf_guid ff_asf_extended_content_header;
extern const ff_asf_guid ff_asf_simple_index_header;
extern const ff_asf_guid ff_asf_ext_stream_embed_stream_header;
extern const ff_asf_guid ff_asf_ext_stream_audio_stream;
extern const ff_asf_guid ff_asf_metadata_header;
extern const ff_asf_guid ff_asf_metadata_library_header;
extern const ff_asf_guid ff_asf_marker_header;
extern const ff_asf_guid ff_asf_reserved_4;
extern const ff_asf_guid ff_asf_my_guid;
extern const ff_asf_guid ff_asf_language_guid;
extern const ff_asf_guid ff_asf_content_encryption;
extern const ff_asf_guid ff_asf_ext_content_encryption;
extern const ff_asf_guid ff_asf_digital_signature;
 
extern const AVMetadataConv ff_asf_metadata_conv[];
 
#define ASF_PACKET_FLAG_ERROR_CORRECTION_PRESENT 0x80 //1000 0000
 
 
// ASF data packet structure
// =========================
//
//
// -----------------------------------
// | Error Correction Data | Optional
// -----------------------------------
// | Payload Parsing Information (PPI) |
// -----------------------------------
// | Payload Data |
// -----------------------------------
// | Padding Data |
// -----------------------------------
 
 
// PPI_FLAG - Payload parsing information flags
#define ASF_PPI_FLAG_MULTIPLE_PAYLOADS_PRESENT 1
 
#define ASF_PPI_FLAG_SEQUENCE_FIELD_IS_BYTE 0x02 //0000 0010
#define ASF_PPI_FLAG_SEQUENCE_FIELD_IS_WORD 0x04 //0000 0100
#define ASF_PPI_FLAG_SEQUENCE_FIELD_IS_DWORD 0x06 //0000 0110
#define ASF_PPI_MASK_SEQUENCE_FIELD_SIZE 0x06 //0000 0110
 
#define ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_BYTE 0x08 //0000 1000
#define ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_WORD 0x10 //0001 0000
#define ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_DWORD 0x18 //0001 1000
#define ASF_PPI_MASK_PADDING_LENGTH_FIELD_SIZE 0x18 //0001 1000
 
#define ASF_PPI_FLAG_PACKET_LENGTH_FIELD_IS_BYTE 0x20 //0010 0000
#define ASF_PPI_FLAG_PACKET_LENGTH_FIELD_IS_WORD 0x40 //0100 0000
#define ASF_PPI_FLAG_PACKET_LENGTH_FIELD_IS_DWORD 0x60 //0110 0000
#define ASF_PPI_MASK_PACKET_LENGTH_FIELD_SIZE 0x60 //0110 0000
 
// PL_FLAG - Payload flags
#define ASF_PL_FLAG_REPLICATED_DATA_LENGTH_FIELD_IS_BYTE 0x01 //0000 0001
#define ASF_PL_FLAG_REPLICATED_DATA_LENGTH_FIELD_IS_WORD 0x02 //0000 0010
#define ASF_PL_FLAG_REPLICATED_DATA_LENGTH_FIELD_IS_DWORD 0x03 //0000 0011
#define ASF_PL_MASK_REPLICATED_DATA_LENGTH_FIELD_SIZE 0x03 //0000 0011
 
#define ASF_PL_FLAG_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_IS_BYTE 0x04 //0000 0100
#define ASF_PL_FLAG_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_IS_WORD 0x08 //0000 1000
#define ASF_PL_FLAG_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_IS_DWORD 0x0c //0000 1100
#define ASF_PL_MASK_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_SIZE 0x0c //0000 1100
 
#define ASF_PL_FLAG_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_IS_BYTE 0x10 //0001 0000
#define ASF_PL_FLAG_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_IS_WORD 0x20 //0010 0000
#define ASF_PL_FLAG_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_IS_DWORD 0x30 //0011 0000
#define ASF_PL_MASK_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_SIZE 0x30 //0011 0000
 
#define ASF_PL_FLAG_STREAM_NUMBER_LENGTH_FIELD_IS_BYTE 0x40 //0100 0000
#define ASF_PL_MASK_STREAM_NUMBER_LENGTH_FIELD_SIZE 0xc0 //1100 0000
 
#define ASF_PL_FLAG_PAYLOAD_LENGTH_FIELD_IS_BYTE 0x40 //0100 0000
#define ASF_PL_FLAG_PAYLOAD_LENGTH_FIELD_IS_WORD 0x80 //1000 0000
#define ASF_PL_MASK_PAYLOAD_LENGTH_FIELD_SIZE 0xc0 //1100 0000
 
#define ASF_PL_FLAG_KEY_FRAME 0x80 //1000 0000
 
extern AVInputFormat ff_asf_demuxer;
 
void ff_put_guid(AVIOContext *s, const ff_asf_guid *g);
 
#endif /* AVFORMAT_ASF_H */
/contrib/sdk/sources/ffmpeg/libavformat/asfcrypt.c
0,0 → 1,185
/*
* ASF decryption
* Copyright (c) 2007 Reimar Doeffinger
* This is a rewrite of code contained in freeme/freeme2
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/bswap.h"
#include "libavutil/common.h"
#include "libavutil/des.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/rc4.h"
#include "asfcrypt.h"
 
/**
* @brief find multiplicative inverse modulo 2 ^ 32
* @param v number to invert, must be odd!
* @return number so that result * v = 1 (mod 2^32)
*/
static uint32_t inverse(uint32_t v)
{
// v ^ 3 gives the inverse (mod 16), could also be implemented
// as table etc. (only lowest 4 bits matter!)
uint32_t inverse = v * v * v;
// uses a fixpoint-iteration that doubles the number
// of correct lowest bits each time
inverse *= 2 - v * inverse;
inverse *= 2 - v * inverse;
inverse *= 2 - v * inverse;
return inverse;
}
 
/**
* @brief read keys from keybuf into keys
* @param keybuf buffer containing the keys
* @param keys output key array containing the keys for encryption in
* native endianness
*/
static void multiswap_init(const uint8_t keybuf[48], uint32_t keys[12])
{
int i;
for (i = 0; i < 12; i++)
keys[i] = AV_RL32(keybuf + (i << 2)) | 1;
}
 
/**
* @brief invert the keys so that encryption become decryption keys and
* the other way round.
* @param keys key array of ints to invert
*/
static void multiswap_invert_keys(uint32_t keys[12])
{
int i;
for (i = 0; i < 5; i++)
keys[i] = inverse(keys[i]);
for (i = 6; i < 11; i++)
keys[i] = inverse(keys[i]);
}
 
static uint32_t multiswap_step(const uint32_t keys[12], uint32_t v)
{
int i;
v *= keys[0];
for (i = 1; i < 5; i++) {
v = (v >> 16) | (v << 16);
v *= keys[i];
}
v += keys[5];
return v;
}
 
static uint32_t multiswap_inv_step(const uint32_t keys[12], uint32_t v)
{
int i;
v -= keys[5];
for (i = 4; i > 0; i--) {
v *= keys[i];
v = (v >> 16) | (v << 16);
}
v *= keys[0];
return v;
}
 
/**
* @brief "MultiSwap" encryption
* @param keys 32 bit numbers in machine endianness,
* 0-4 and 6-10 must be inverted from decryption
* @param key another key, this one must be the same for the decryption
* @param data data to encrypt
* @return encrypted data
*/
static uint64_t multiswap_enc(const uint32_t keys[12],
uint64_t key, uint64_t data)
{
uint32_t a = data;
uint32_t b = data >> 32;
uint32_t c;
uint32_t tmp;
a += key;
tmp = multiswap_step(keys, a);
b += tmp;
c = (key >> 32) + tmp;
tmp = multiswap_step(keys + 6, b);
c += tmp;
return ((uint64_t)c << 32) | tmp;
}
 
/**
* @brief "MultiSwap" decryption
* @param keys 32 bit numbers in machine endianness,
* 0-4 and 6-10 must be inverted from encryption
* @param key another key, this one must be the same as for the encryption
* @param data data to decrypt
* @return decrypted data
*/
static uint64_t multiswap_dec(const uint32_t keys[12],
uint64_t key, uint64_t data)
{
uint32_t a;
uint32_t b;
uint32_t c = data >> 32;
uint32_t tmp = data;
c -= tmp;
b = multiswap_inv_step(keys + 6, tmp);
tmp = c - (key >> 32);
b -= tmp;
a = multiswap_inv_step(keys, tmp);
a -= key;
return ((uint64_t)b << 32) | a;
}
 
void ff_asfcrypt_dec(const uint8_t key[20], uint8_t *data, int len)
{
struct AVDES des;
struct AVRC4 rc4;
int num_qwords = len >> 3;
uint8_t *qwords = data;
uint64_t rc4buff[8] = { 0 };
uint64_t packetkey;
uint32_t ms_keys[12];
uint64_t ms_state;
int i;
if (len < 16) {
for (i = 0; i < len; i++)
data[i] ^= key[i];
return;
}
 
av_rc4_init(&rc4, key, 12 * 8, 1);
av_rc4_crypt(&rc4, (uint8_t *)rc4buff, NULL, sizeof(rc4buff), NULL, 1);
multiswap_init((uint8_t *)rc4buff, ms_keys);
 
packetkey = AV_RN64(&qwords[num_qwords * 8 - 8]);
packetkey ^= rc4buff[7];
av_des_init(&des, key + 12, 64, 1);
av_des_crypt(&des, (uint8_t *)&packetkey, (uint8_t *)&packetkey, 1, NULL, 1);
packetkey ^= rc4buff[6];
 
av_rc4_init(&rc4, (uint8_t *)&packetkey, 64, 1);
av_rc4_crypt(&rc4, data, data, len, NULL, 1);
 
ms_state = 0;
for (i = 0; i < num_qwords - 1; i++, qwords += 8)
ms_state = multiswap_enc(ms_keys, ms_state, AV_RL64(qwords));
multiswap_invert_keys(ms_keys);
packetkey = (packetkey << 32) | (packetkey >> 32);
packetkey = av_le2ne64(packetkey);
packetkey = multiswap_dec(ms_keys, ms_state, packetkey);
AV_WL64(qwords, packetkey);
}
/contrib/sdk/sources/ffmpeg/libavformat/asfcrypt.h
0,0 → 1,29
/*
* ASF decryption
* Copyright (c) 2007 Reimar Doeffinger
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_ASFCRYPT_H
#define AVFORMAT_ASFCRYPT_H
 
#include <inttypes.h>
 
void ff_asfcrypt_dec(const uint8_t key[20], uint8_t *data, int len);
 
#endif /* AVFORMAT_ASFCRYPT_H */
/contrib/sdk/sources/ffmpeg/libavformat/asfdec.c
0,0 → 1,1568
/*
* ASF compatible demuxer
* Copyright (c) 2000, 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/attributes.h"
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/bswap.h"
#include "libavutil/common.h"
#include "libavutil/dict.h"
#include "libavutil/internal.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "avformat.h"
#include "avio_internal.h"
#include "avlanguage.h"
#include "id3v2.h"
#include "internal.h"
#include "riff.h"
#include "asf.h"
#include "asfcrypt.h"
 
typedef struct {
const AVClass *class;
int asfid2avid[128]; ///< conversion table from asf ID 2 AVStream ID
ASFStream streams[128]; ///< it's max number and it's not that big
uint32_t stream_bitrates[128]; ///< max number of streams, bitrate for each (for streaming)
AVRational dar[128];
char stream_languages[128][6]; ///< max number of streams, language for each (RFC1766, e.g. en-US)
/* non streamed additonnal info */
/* packet filling */
int packet_size_left;
/* only for reading */
uint64_t data_offset; ///< beginning of the first data packet
uint64_t data_object_offset; ///< data object offset (excl. GUID & size)
uint64_t data_object_size; ///< size of the data object
int index_read;
 
ASFMainHeader hdr;
 
int packet_flags;
int packet_property;
int packet_timestamp;
int packet_segsizetype;
int packet_segments;
int packet_seq;
int packet_replic_size;
int packet_key_frame;
int packet_padsize;
unsigned int packet_frag_offset;
unsigned int packet_frag_size;
int64_t packet_frag_timestamp;
int packet_multi_size;
int packet_time_delta;
int packet_time_start;
int64_t packet_pos;
 
int stream_index;
 
ASFStream *asf_st; ///< currently decoded stream
 
int no_resync_search;
} ASFContext;
 
static const AVOption options[] = {
{ "no_resync_search", "Don't try to resynchronize by looking for a certain optional start code", offsetof(ASFContext, no_resync_search), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
 
static const AVClass asf_class = {
.class_name = "asf demuxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
#undef NDEBUG
#include <assert.h>
 
#define ASF_MAX_STREAMS 127
#define FRAME_HEADER_SIZE 16
// Fix Me! FRAME_HEADER_SIZE may be different. (17 is known to be too large)
 
#ifdef DEBUG
static const ff_asf_guid stream_bitrate_guid = { /* (http://get.to/sdp) */
0xce, 0x75, 0xf8, 0x7b, 0x8d, 0x46, 0xd1, 0x11, 0x8d, 0x82, 0x00, 0x60, 0x97, 0xc9, 0xa2, 0xb2
};
 
#define PRINT_IF_GUID(g, cmp) \
if (!ff_guidcmp(g, &cmp)) \
av_dlog(NULL, "(GUID: %s) ", # cmp)
 
static void print_guid(ff_asf_guid *g)
{
int i;
PRINT_IF_GUID(g, ff_asf_header);
else PRINT_IF_GUID(g, ff_asf_file_header);
else PRINT_IF_GUID(g, ff_asf_stream_header);
else PRINT_IF_GUID(g, ff_asf_audio_stream);
else PRINT_IF_GUID(g, ff_asf_audio_conceal_none);
else PRINT_IF_GUID(g, ff_asf_video_stream);
else PRINT_IF_GUID(g, ff_asf_video_conceal_none);
else PRINT_IF_GUID(g, ff_asf_command_stream);
else PRINT_IF_GUID(g, ff_asf_comment_header);
else PRINT_IF_GUID(g, ff_asf_codec_comment_header);
else PRINT_IF_GUID(g, ff_asf_codec_comment1_header);
else PRINT_IF_GUID(g, ff_asf_data_header);
else PRINT_IF_GUID(g, ff_asf_simple_index_header);
else PRINT_IF_GUID(g, ff_asf_head1_guid);
else PRINT_IF_GUID(g, ff_asf_head2_guid);
else PRINT_IF_GUID(g, ff_asf_my_guid);
else PRINT_IF_GUID(g, ff_asf_ext_stream_header);
else PRINT_IF_GUID(g, ff_asf_extended_content_header);
else PRINT_IF_GUID(g, ff_asf_ext_stream_embed_stream_header);
else PRINT_IF_GUID(g, ff_asf_ext_stream_audio_stream);
else PRINT_IF_GUID(g, ff_asf_metadata_header);
else PRINT_IF_GUID(g, ff_asf_metadata_library_header);
else PRINT_IF_GUID(g, ff_asf_marker_header);
else PRINT_IF_GUID(g, stream_bitrate_guid);
else PRINT_IF_GUID(g, ff_asf_language_guid);
else
av_dlog(NULL, "(GUID: unknown) ");
for (i = 0; i < 16; i++)
av_dlog(NULL, " 0x%02x,", (*g)[i]);
av_dlog(NULL, "}\n");
}
#undef PRINT_IF_GUID
#else
#define print_guid(g)
#endif
 
static int asf_probe(AVProbeData *pd)
{
/* check file header */
if (!ff_guidcmp(pd->buf, &ff_asf_header))
return AVPROBE_SCORE_MAX;
else
return 0;
}
 
/* size of type 2 (BOOL) is 32bit for "Extended Content Description Object"
* but 16 bit for "Metadata Object" and "Metadata Library Object" */
static int get_value(AVIOContext *pb, int type, int type2_size)
{
switch (type) {
case 2:
return (type2_size == 32) ? avio_rl32(pb) : avio_rl16(pb);
case 3:
return avio_rl32(pb);
case 4:
return avio_rl64(pb);
case 5:
return avio_rl16(pb);
default:
return INT_MIN;
}
}
 
/* MSDN claims that this should be "compatible with the ID3 frame, APIC",
* but in reality this is only loosely similar */
static int asf_read_picture(AVFormatContext *s, int len)
{
AVPacket pkt = { 0 };
const CodecMime *mime = ff_id3v2_mime_tags;
enum AVCodecID id = AV_CODEC_ID_NONE;
char mimetype[64];
uint8_t *desc = NULL;
AVStream *st = NULL;
int ret, type, picsize, desc_len;
 
/* type + picsize + mime + desc */
if (len < 1 + 4 + 2 + 2) {
av_log(s, AV_LOG_ERROR, "Invalid attached picture size: %d.\n", len);
return AVERROR_INVALIDDATA;
}
 
/* picture type */
type = avio_r8(s->pb);
len--;
if (type >= FF_ARRAY_ELEMS(ff_id3v2_picture_types) || type < 0) {
av_log(s, AV_LOG_WARNING, "Unknown attached picture type: %d.\n", type);
type = 0;
}
 
/* picture data size */
picsize = avio_rl32(s->pb);
len -= 4;
 
/* picture MIME type */
len -= avio_get_str16le(s->pb, len, mimetype, sizeof(mimetype));
while (mime->id != AV_CODEC_ID_NONE) {
if (!strncmp(mime->str, mimetype, sizeof(mimetype))) {
id = mime->id;
break;
}
mime++;
}
if (id == AV_CODEC_ID_NONE) {
av_log(s, AV_LOG_ERROR, "Unknown attached picture mimetype: %s.\n",
mimetype);
return 0;
}
 
if (picsize >= len) {
av_log(s, AV_LOG_ERROR, "Invalid attached picture data size: %d >= %d.\n",
picsize, len);
return AVERROR_INVALIDDATA;
}
 
/* picture description */
desc_len = (len - picsize) * 2 + 1;
desc = av_malloc(desc_len);
if (!desc)
return AVERROR(ENOMEM);
len -= avio_get_str16le(s->pb, len - picsize, desc, desc_len);
 
ret = av_get_packet(s->pb, &pkt, picsize);
if (ret < 0)
goto fail;
 
st = avformat_new_stream(s, NULL);
if (!st) {
ret = AVERROR(ENOMEM);
goto fail;
}
st->disposition |= AV_DISPOSITION_ATTACHED_PIC;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = id;
st->attached_pic = pkt;
st->attached_pic.stream_index = st->index;
st->attached_pic.flags |= AV_PKT_FLAG_KEY;
 
if (*desc)
av_dict_set(&st->metadata, "title", desc, AV_DICT_DONT_STRDUP_VAL);
else
av_freep(&desc);
 
av_dict_set(&st->metadata, "comment", ff_id3v2_picture_types[type], 0);
 
return 0;
 
fail:
av_freep(&desc);
av_free_packet(&pkt);
return ret;
}
 
static void get_id3_tag(AVFormatContext *s, int len)
{
ID3v2ExtraMeta *id3v2_extra_meta = NULL;
 
ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
if (id3v2_extra_meta)
ff_id3v2_parse_apic(s, &id3v2_extra_meta);
ff_id3v2_free_extra_meta(&id3v2_extra_meta);
}
 
static void get_tag(AVFormatContext *s, const char *key, int type, int len, int type2_size)
{
char *value;
int64_t off = avio_tell(s->pb);
#define LEN 22
 
if ((unsigned)len >= (UINT_MAX - LEN) / 2)
return;
 
value = av_malloc(2 * len + LEN);
if (!value)
goto finish;
 
if (type == 0) { // UTF16-LE
avio_get_str16le(s->pb, len, value, 2 * len + 1);
} else if (type == -1) { // ASCII
avio_read(s->pb, value, len);
value[len]=0;
} else if (type == 1) { // byte array
if (!strcmp(key, "WM/Picture")) { // handle cover art
asf_read_picture(s, len);
} else if (!strcmp(key, "ID3")) { // handle ID3 tag
get_id3_tag(s, len);
} else {
av_log(s, AV_LOG_VERBOSE, "Unsupported byte array in tag %s.\n", key);
}
goto finish;
} else if (type > 1 && type <= 5) { // boolean or DWORD or QWORD or WORD
uint64_t num = get_value(s->pb, type, type2_size);
snprintf(value, LEN, "%"PRIu64, num);
} else if (type == 6) { // (don't) handle GUID
av_log(s, AV_LOG_DEBUG, "Unsupported GUID value in tag %s.\n", key);
goto finish;
} else {
av_log(s, AV_LOG_DEBUG,
"Unsupported value type %d in tag %s.\n", type, key);
goto finish;
}
if (*value)
av_dict_set(&s->metadata, key, value, 0);
 
finish:
av_freep(&value);
avio_seek(s->pb, off + len, SEEK_SET);
}
 
static int asf_read_file_properties(AVFormatContext *s, int64_t size)
{
ASFContext *asf = s->priv_data;
AVIOContext *pb = s->pb;
 
ff_get_guid(pb, &asf->hdr.guid);
asf->hdr.file_size = avio_rl64(pb);
asf->hdr.create_time = avio_rl64(pb);
avio_rl64(pb); /* number of packets */
asf->hdr.play_time = avio_rl64(pb);
asf->hdr.send_time = avio_rl64(pb);
asf->hdr.preroll = avio_rl32(pb);
asf->hdr.ignore = avio_rl32(pb);
asf->hdr.flags = avio_rl32(pb);
asf->hdr.min_pktsize = avio_rl32(pb);
asf->hdr.max_pktsize = avio_rl32(pb);
if (asf->hdr.min_pktsize >= (1U << 29))
return AVERROR_INVALIDDATA;
asf->hdr.max_bitrate = avio_rl32(pb);
s->packet_size = asf->hdr.max_pktsize;
 
return 0;
}
 
static int asf_read_stream_properties(AVFormatContext *s, int64_t size)
{
ASFContext *asf = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *st;
ASFStream *asf_st;
ff_asf_guid g;
enum AVMediaType type;
int type_specific_size, sizeX;
unsigned int tag1;
int64_t pos1, pos2, start_time;
int test_for_ext_stream_audio, is_dvr_ms_audio = 0;
 
if (s->nb_streams == ASF_MAX_STREAMS) {
av_log(s, AV_LOG_ERROR, "too many streams\n");
return AVERROR(EINVAL);
}
 
pos1 = avio_tell(pb);
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 32, 1, 1000); /* 32 bit pts in ms */
start_time = asf->hdr.preroll;
 
if (!(asf->hdr.flags & 0x01)) { // if we aren't streaming...
int64_t fsize = avio_size(pb);
if (fsize <= 0 || (int64_t)asf->hdr.file_size <= 0 || FFABS(fsize - (int64_t)asf->hdr.file_size) < 10000)
st->duration = asf->hdr.play_time /
(10000000 / 1000) - start_time;
}
ff_get_guid(pb, &g);
 
test_for_ext_stream_audio = 0;
if (!ff_guidcmp(&g, &ff_asf_audio_stream)) {
type = AVMEDIA_TYPE_AUDIO;
} else if (!ff_guidcmp(&g, &ff_asf_video_stream)) {
type = AVMEDIA_TYPE_VIDEO;
} else if (!ff_guidcmp(&g, &ff_asf_jfif_media)) {
type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_MJPEG;
} else if (!ff_guidcmp(&g, &ff_asf_command_stream)) {
type = AVMEDIA_TYPE_DATA;
} else if (!ff_guidcmp(&g, &ff_asf_ext_stream_embed_stream_header)) {
test_for_ext_stream_audio = 1;
type = AVMEDIA_TYPE_UNKNOWN;
} else {
return -1;
}
ff_get_guid(pb, &g);
avio_skip(pb, 8); /* total_size */
type_specific_size = avio_rl32(pb);
avio_rl32(pb);
st->id = avio_rl16(pb) & 0x7f; /* stream id */
// mapping of asf ID to AV stream ID;
asf->asfid2avid[st->id] = s->nb_streams - 1;
asf_st = &asf->streams[st->id];
 
avio_rl32(pb);
 
if (test_for_ext_stream_audio) {
ff_get_guid(pb, &g);
if (!ff_guidcmp(&g, &ff_asf_ext_stream_audio_stream)) {
type = AVMEDIA_TYPE_AUDIO;
is_dvr_ms_audio = 1;
ff_get_guid(pb, &g);
avio_rl32(pb);
avio_rl32(pb);
avio_rl32(pb);
ff_get_guid(pb, &g);
avio_rl32(pb);
}
}
 
st->codec->codec_type = type;
if (type == AVMEDIA_TYPE_AUDIO) {
int ret = ff_get_wav_header(pb, st->codec, type_specific_size);
if (ret < 0)
return ret;
if (is_dvr_ms_audio) {
// codec_id and codec_tag are unreliable in dvr_ms
// files. Set them later by probing stream.
st->request_probe = 1;
st->codec->codec_tag = 0;
}
if (st->codec->codec_id == AV_CODEC_ID_AAC)
st->need_parsing = AVSTREAM_PARSE_NONE;
else
st->need_parsing = AVSTREAM_PARSE_FULL;
/* We have to init the frame size at some point .... */
pos2 = avio_tell(pb);
if (size >= (pos2 + 8 - pos1 + 24)) {
asf_st->ds_span = avio_r8(pb);
asf_st->ds_packet_size = avio_rl16(pb);
asf_st->ds_chunk_size = avio_rl16(pb);
avio_rl16(pb); // ds_data_size
avio_r8(pb); // ds_silence_data
}
if (asf_st->ds_span > 1) {
if (!asf_st->ds_chunk_size ||
(asf_st->ds_packet_size / asf_st->ds_chunk_size <= 1) ||
asf_st->ds_packet_size % asf_st->ds_chunk_size)
asf_st->ds_span = 0; // disable descrambling
}
} else if (type == AVMEDIA_TYPE_VIDEO &&
size - (avio_tell(pb) - pos1 + 24) >= 51) {
avio_rl32(pb);
avio_rl32(pb);
avio_r8(pb);
avio_rl16(pb); /* size */
sizeX = avio_rl32(pb); /* size */
st->codec->width = avio_rl32(pb);
st->codec->height = avio_rl32(pb);
/* not available for asf */
avio_rl16(pb); /* panes */
st->codec->bits_per_coded_sample = avio_rl16(pb); /* depth */
tag1 = avio_rl32(pb);
avio_skip(pb, 20);
if (sizeX > 40) {
st->codec->extradata_size = ffio_limit(pb, sizeX - 40);
st->codec->extradata = av_mallocz(st->codec->extradata_size +
FF_INPUT_BUFFER_PADDING_SIZE);
if (!st->codec->extradata)
return AVERROR(ENOMEM);
avio_read(pb, st->codec->extradata, st->codec->extradata_size);
}
 
/* Extract palette from extradata if bpp <= 8 */
/* This code assumes that extradata contains only palette */
/* This is true for all paletted codecs implemented in libavcodec */
if (st->codec->extradata_size && (st->codec->bits_per_coded_sample <= 8)) {
#if HAVE_BIGENDIAN
int i;
for (i = 0; i < FFMIN(st->codec->extradata_size, AVPALETTE_SIZE) / 4; i++)
asf_st->palette[i] = av_bswap32(((uint32_t *)st->codec->extradata)[i]);
#else
memcpy(asf_st->palette, st->codec->extradata,
FFMIN(st->codec->extradata_size, AVPALETTE_SIZE));
#endif
asf_st->palette_changed = 1;
}
 
st->codec->codec_tag = tag1;
st->codec->codec_id = ff_codec_get_id(ff_codec_bmp_tags, tag1);
if (tag1 == MKTAG('D', 'V', 'R', ' ')) {
st->need_parsing = AVSTREAM_PARSE_FULL;
/* issue658 contains wrong w/h and MS even puts a fake seq header
* with wrong w/h in extradata while a correct one is in the stream.
* maximum lameness */
st->codec->width =
st->codec->height = 0;
av_freep(&st->codec->extradata);
st->codec->extradata_size = 0;
}
if (st->codec->codec_id == AV_CODEC_ID_H264)
st->need_parsing = AVSTREAM_PARSE_FULL_ONCE;
}
pos2 = avio_tell(pb);
avio_skip(pb, size - (pos2 - pos1 + 24));
 
return 0;
}
 
static int asf_read_ext_stream_properties(AVFormatContext *s, int64_t size)
{
ASFContext *asf = s->priv_data;
AVIOContext *pb = s->pb;
ff_asf_guid g;
int ext_len, payload_ext_ct, stream_ct, i;
uint32_t leak_rate, stream_num;
unsigned int stream_languageid_index;
 
avio_rl64(pb); // starttime
avio_rl64(pb); // endtime
leak_rate = avio_rl32(pb); // leak-datarate
avio_rl32(pb); // bucket-datasize
avio_rl32(pb); // init-bucket-fullness
avio_rl32(pb); // alt-leak-datarate
avio_rl32(pb); // alt-bucket-datasize
avio_rl32(pb); // alt-init-bucket-fullness
avio_rl32(pb); // max-object-size
avio_rl32(pb); // flags (reliable,seekable,no_cleanpoints?,resend-live-cleanpoints, rest of bits reserved)
stream_num = avio_rl16(pb); // stream-num
 
stream_languageid_index = avio_rl16(pb); // stream-language-id-index
if (stream_num < 128)
asf->streams[stream_num].stream_language_index = stream_languageid_index;
 
avio_rl64(pb); // avg frametime in 100ns units
stream_ct = avio_rl16(pb); // stream-name-count
payload_ext_ct = avio_rl16(pb); // payload-extension-system-count
 
if (stream_num < 128) {
asf->stream_bitrates[stream_num] = leak_rate;
asf->streams[stream_num].payload_ext_ct = 0;
}
 
for (i = 0; i < stream_ct; i++) {
avio_rl16(pb);
ext_len = avio_rl16(pb);
avio_skip(pb, ext_len);
}
 
for (i = 0; i < payload_ext_ct; i++) {
int size;
ff_get_guid(pb, &g);
size = avio_rl16(pb);
ext_len = avio_rl32(pb);
avio_skip(pb, ext_len);
if (stream_num < 128 && i < FF_ARRAY_ELEMS(asf->streams[stream_num].payload)) {
ASFPayload *p = &asf->streams[stream_num].payload[i];
p->type = g[0];
p->size = size;
av_log(s, AV_LOG_DEBUG, "Payload extension %x %d\n", g[0], p->size );
asf->streams[stream_num].payload_ext_ct ++;
}
}
 
return 0;
}
 
static int asf_read_content_desc(AVFormatContext *s, int64_t size)
{
AVIOContext *pb = s->pb;
int len1, len2, len3, len4, len5;
 
len1 = avio_rl16(pb);
len2 = avio_rl16(pb);
len3 = avio_rl16(pb);
len4 = avio_rl16(pb);
len5 = avio_rl16(pb);
get_tag(s, "title", 0, len1, 32);
get_tag(s, "author", 0, len2, 32);
get_tag(s, "copyright", 0, len3, 32);
get_tag(s, "comment", 0, len4, 32);
avio_skip(pb, len5);
 
return 0;
}
 
static int asf_read_ext_content_desc(AVFormatContext *s, int64_t size)
{
AVIOContext *pb = s->pb;
ASFContext *asf = s->priv_data;
int desc_count, i, ret;
 
desc_count = avio_rl16(pb);
for (i = 0; i < desc_count; i++) {
int name_len, value_type, value_len;
char name[1024];
 
name_len = avio_rl16(pb);
if (name_len % 2) // must be even, broken lavf versions wrote len-1
name_len += 1;
if ((ret = avio_get_str16le(pb, name_len, name, sizeof(name))) < name_len)
avio_skip(pb, name_len - ret);
value_type = avio_rl16(pb);
value_len = avio_rl16(pb);
if (!value_type && value_len % 2)
value_len += 1;
/* My sample has that stream set to 0 maybe that mean the container.
* ASF stream count starts at 1. I am using 0 to the container value
* since it's unused. */
if (!strcmp(name, "AspectRatioX"))
asf->dar[0].num = get_value(s->pb, value_type, 32);
else if (!strcmp(name, "AspectRatioY"))
asf->dar[0].den = get_value(s->pb, value_type, 32);
else
get_tag(s, name, value_type, value_len, 32);
}
 
return 0;
}
 
static int asf_read_language_list(AVFormatContext *s, int64_t size)
{
AVIOContext *pb = s->pb;
ASFContext *asf = s->priv_data;
int j, ret;
int stream_count = avio_rl16(pb);
for (j = 0; j < stream_count; j++) {
char lang[6];
unsigned int lang_len = avio_r8(pb);
if ((ret = avio_get_str16le(pb, lang_len, lang,
sizeof(lang))) < lang_len)
avio_skip(pb, lang_len - ret);
if (j < 128)
av_strlcpy(asf->stream_languages[j], lang,
sizeof(*asf->stream_languages));
}
 
return 0;
}
 
static int asf_read_metadata(AVFormatContext *s, int64_t size)
{
AVIOContext *pb = s->pb;
ASFContext *asf = s->priv_data;
int n, stream_num, name_len, value_len;
int ret, i;
n = avio_rl16(pb);
 
for (i = 0; i < n; i++) {
char name[1024];
int value_type;
 
avio_rl16(pb); // lang_list_index
stream_num = avio_rl16(pb);
name_len = avio_rl16(pb);
value_type = avio_rl16(pb); /* value_type */
value_len = avio_rl32(pb);
 
if ((ret = avio_get_str16le(pb, name_len, name, sizeof(name))) < name_len)
avio_skip(pb, name_len - ret);
av_dlog(s, "%d stream %d name_len %2d type %d len %4d <%s>\n",
i, stream_num, name_len, value_type, value_len, name);
 
if (!strcmp(name, "AspectRatioX")){
int aspect_x = get_value(s->pb, value_type, 16);
if(stream_num < 128)
asf->dar[stream_num].num = aspect_x;
} else if(!strcmp(name, "AspectRatioY")){
int aspect_y = get_value(s->pb, value_type, 16);
if(stream_num < 128)
asf->dar[stream_num].den = aspect_y;
} else {
get_tag(s, name, value_type, value_len, 16);
}
}
 
return 0;
}
 
static int asf_read_marker(AVFormatContext *s, int64_t size)
{
AVIOContext *pb = s->pb;
ASFContext *asf = s->priv_data;
int i, count, name_len, ret;
char name[1024];
 
avio_rl64(pb); // reserved 16 bytes
avio_rl64(pb); // ...
count = avio_rl32(pb); // markers count
avio_rl16(pb); // reserved 2 bytes
name_len = avio_rl16(pb); // name length
for (i = 0; i < name_len; i++)
avio_r8(pb); // skip the name
 
for (i = 0; i < count; i++) {
int64_t pres_time;
int name_len;
 
avio_rl64(pb); // offset, 8 bytes
pres_time = avio_rl64(pb); // presentation time
pres_time -= asf->hdr.preroll * 10000;
avio_rl16(pb); // entry length
avio_rl32(pb); // send time
avio_rl32(pb); // flags
name_len = avio_rl32(pb); // name length
if ((ret = avio_get_str16le(pb, name_len * 2, name,
sizeof(name))) < name_len)
avio_skip(pb, name_len - ret);
avpriv_new_chapter(s, i, (AVRational) { 1, 10000000 }, pres_time,
AV_NOPTS_VALUE, name);
}
 
return 0;
}
 
static int asf_read_header(AVFormatContext *s)
{
ASFContext *asf = s->priv_data;
ff_asf_guid g;
AVIOContext *pb = s->pb;
int i;
int64_t gsize;
 
ff_get_guid(pb, &g);
if (ff_guidcmp(&g, &ff_asf_header))
return AVERROR_INVALIDDATA;
avio_rl64(pb);
avio_rl32(pb);
avio_r8(pb);
avio_r8(pb);
memset(&asf->asfid2avid, -1, sizeof(asf->asfid2avid));
 
for (i = 0; i<128; i++)
asf->streams[i].stream_language_index = 128; // invalid stream index means no language info
 
for (;;) {
uint64_t gpos = avio_tell(pb);
ff_get_guid(pb, &g);
gsize = avio_rl64(pb);
print_guid(&g);
if (!ff_guidcmp(&g, &ff_asf_data_header)) {
asf->data_object_offset = avio_tell(pb);
/* If not streaming, gsize is not unlimited (how?),
* and there is enough space in the file.. */
if (!(asf->hdr.flags & 0x01) && gsize >= 100)
asf->data_object_size = gsize - 24;
else
asf->data_object_size = (uint64_t)-1;
break;
}
if (gsize < 24)
return AVERROR_INVALIDDATA;
if (!ff_guidcmp(&g, &ff_asf_file_header)) {
int ret = asf_read_file_properties(s, gsize);
if (ret < 0)
return ret;
} else if (!ff_guidcmp(&g, &ff_asf_stream_header)) {
int ret = asf_read_stream_properties(s, gsize);
if (ret < 0)
return ret;
} else if (!ff_guidcmp(&g, &ff_asf_comment_header)) {
asf_read_content_desc(s, gsize);
} else if (!ff_guidcmp(&g, &ff_asf_language_guid)) {
asf_read_language_list(s, gsize);
} else if (!ff_guidcmp(&g, &ff_asf_extended_content_header)) {
asf_read_ext_content_desc(s, gsize);
} else if (!ff_guidcmp(&g, &ff_asf_metadata_header)) {
asf_read_metadata(s, gsize);
} else if (!ff_guidcmp(&g, &ff_asf_metadata_library_header)) {
asf_read_metadata(s, gsize);
} else if (!ff_guidcmp(&g, &ff_asf_ext_stream_header)) {
asf_read_ext_stream_properties(s, gsize);
 
// there could be a optional stream properties object to follow
// if so the next iteration will pick it up
continue;
} else if (!ff_guidcmp(&g, &ff_asf_head1_guid)) {
ff_get_guid(pb, &g);
avio_skip(pb, 6);
continue;
} else if (!ff_guidcmp(&g, &ff_asf_marker_header)) {
asf_read_marker(s, gsize);
} else if (url_feof(pb)) {
return AVERROR_EOF;
} else {
if (!s->keylen) {
if (!ff_guidcmp(&g, &ff_asf_content_encryption)) {
unsigned int len;
AVPacket pkt;
av_log(s, AV_LOG_WARNING,
"DRM protected stream detected, decoding will likely fail!\n");
len= avio_rl32(pb);
av_log(s, AV_LOG_DEBUG, "Secret data:\n");
av_get_packet(pb, &pkt, len); av_hex_dump_log(s, AV_LOG_DEBUG, pkt.data, pkt.size); av_free_packet(&pkt);
len= avio_rl32(pb);
get_tag(s, "ASF_Protection_Type", -1, len, 32);
len= avio_rl32(pb);
get_tag(s, "ASF_Key_ID", -1, len, 32);
len= avio_rl32(pb);
get_tag(s, "ASF_License_URL", -1, len, 32);
} else if (!ff_guidcmp(&g, &ff_asf_ext_content_encryption)) {
av_log(s, AV_LOG_WARNING,
"Ext DRM protected stream detected, decoding will likely fail!\n");
av_dict_set(&s->metadata, "encryption", "ASF Extended Content Encryption", 0);
} else if (!ff_guidcmp(&g, &ff_asf_digital_signature)) {
av_log(s, AV_LOG_INFO, "Digital signature detected!\n");
}
}
}
if (avio_tell(pb) != gpos + gsize)
av_log(s, AV_LOG_DEBUG,
"gpos mismatch our pos=%"PRIu64", end=%"PRId64"\n",
avio_tell(pb) - gpos, gsize);
avio_seek(pb, gpos + gsize, SEEK_SET);
}
ff_get_guid(pb, &g);
avio_rl64(pb);
avio_r8(pb);
avio_r8(pb);
if (url_feof(pb))
return AVERROR_EOF;
asf->data_offset = avio_tell(pb);
asf->packet_size_left = 0;
 
for (i = 0; i < 128; i++) {
int stream_num = asf->asfid2avid[i];
if (stream_num >= 0) {
AVStream *st = s->streams[stream_num];
if (!st->codec->bit_rate)
st->codec->bit_rate = asf->stream_bitrates[i];
if (asf->dar[i].num > 0 && asf->dar[i].den > 0) {
av_reduce(&st->sample_aspect_ratio.num,
&st->sample_aspect_ratio.den,
asf->dar[i].num, asf->dar[i].den, INT_MAX);
} else if ((asf->dar[0].num > 0) && (asf->dar[0].den > 0) &&
// Use ASF container value if the stream doesn't set AR.
(st->codec->codec_type == AVMEDIA_TYPE_VIDEO))
av_reduce(&st->sample_aspect_ratio.num,
&st->sample_aspect_ratio.den,
asf->dar[0].num, asf->dar[0].den, INT_MAX);
 
av_dlog(s, "i=%d, st->codec->codec_type:%d, asf->dar %d:%d sar=%d:%d\n",
i, st->codec->codec_type, asf->dar[i].num, asf->dar[i].den,
st->sample_aspect_ratio.num, st->sample_aspect_ratio.den);
 
// copy and convert language codes to the frontend
if (asf->streams[i].stream_language_index < 128) {
const char *rfc1766 = asf->stream_languages[asf->streams[i].stream_language_index];
if (rfc1766 && strlen(rfc1766) > 1) {
const char primary_tag[3] = { rfc1766[0], rfc1766[1], '\0' }; // ignore country code if any
const char *iso6392 = av_convert_lang_to(primary_tag,
AV_LANG_ISO639_2_BIBL);
if (iso6392)
av_dict_set(&st->metadata, "language", iso6392, 0);
}
}
}
}
 
ff_metadata_conv(&s->metadata, NULL, ff_asf_metadata_conv);
 
return 0;
}
 
#define DO_2BITS(bits, var, defval) \
switch (bits & 3) { \
case 3: \
var = avio_rl32(pb); \
rsize += 4; \
break; \
case 2: \
var = avio_rl16(pb); \
rsize += 2; \
break; \
case 1: \
var = avio_r8(pb); \
rsize++; \
break; \
default: \
var = defval; \
break; \
}
 
/**
* Load a single ASF packet into the demuxer.
* @param s demux context
* @param pb context to read data from
* @return 0 on success, <0 on error
*/
static int asf_get_packet(AVFormatContext *s, AVIOContext *pb)
{
ASFContext *asf = s->priv_data;
uint32_t packet_length, padsize;
int rsize = 8;
int c, d, e, off;
 
// if we do not know packet size, allow skipping up to 32 kB
off = 32768;
if (asf->no_resync_search)
off = 3;
else if (s->packet_size > 0)
off = (avio_tell(pb) - s->data_offset) % s->packet_size + 3;
 
c = d = e = -1;
while (off-- > 0) {
c = d;
d = e;
e = avio_r8(pb);
if (c == 0x82 && !d && !e)
break;
}
 
if (c != 0x82) {
/* This code allows handling of -EAGAIN at packet boundaries (i.e.
* if the packet sync code above triggers -EAGAIN). This does not
* imply complete -EAGAIN handling support at random positions in
* the stream. */
if (pb->error == AVERROR(EAGAIN))
return AVERROR(EAGAIN);
if (!url_feof(pb))
av_log(s, AV_LOG_ERROR,
"ff asf bad header %x at:%"PRId64"\n", c, avio_tell(pb));
}
if ((c & 0x8f) == 0x82) {
if (d || e) {
if (!url_feof(pb))
av_log(s, AV_LOG_ERROR, "ff asf bad non zero\n");
return AVERROR_INVALIDDATA;
}
c = avio_r8(pb);
d = avio_r8(pb);
rsize += 3;
} else if(!url_feof(pb)) {
avio_seek(pb, -1, SEEK_CUR); // FIXME
}
 
asf->packet_flags = c;
asf->packet_property = d;
 
DO_2BITS(asf->packet_flags >> 5, packet_length, s->packet_size);
DO_2BITS(asf->packet_flags >> 1, padsize, 0); // sequence ignored
DO_2BITS(asf->packet_flags >> 3, padsize, 0); // padding length
 
// the following checks prevent overflows and infinite loops
if (!packet_length || packet_length >= (1U << 29)) {
av_log(s, AV_LOG_ERROR,
"invalid packet_length %d at:%"PRId64"\n",
packet_length, avio_tell(pb));
return AVERROR_INVALIDDATA;
}
if (padsize >= packet_length) {
av_log(s, AV_LOG_ERROR,
"invalid padsize %d at:%"PRId64"\n", padsize, avio_tell(pb));
return AVERROR_INVALIDDATA;
}
 
asf->packet_timestamp = avio_rl32(pb);
avio_rl16(pb); /* duration */
// rsize has at least 11 bytes which have to be present
 
if (asf->packet_flags & 0x01) {
asf->packet_segsizetype = avio_r8(pb);
rsize++;
asf->packet_segments = asf->packet_segsizetype & 0x3f;
} else {
asf->packet_segments = 1;
asf->packet_segsizetype = 0x80;
}
if (rsize > packet_length - padsize) {
asf->packet_size_left = 0;
av_log(s, AV_LOG_ERROR,
"invalid packet header length %d for pktlen %d-%d at %"PRId64"\n",
rsize, packet_length, padsize, avio_tell(pb));
return AVERROR_INVALIDDATA;
}
asf->packet_size_left = packet_length - padsize - rsize;
if (packet_length < asf->hdr.min_pktsize)
padsize += asf->hdr.min_pktsize - packet_length;
asf->packet_padsize = padsize;
av_dlog(s, "packet: size=%d padsize=%d left=%d\n",
s->packet_size, asf->packet_padsize, asf->packet_size_left);
return 0;
}
 
/**
*
* @return <0 if error
*/
static int asf_read_frame_header(AVFormatContext *s, AVIOContext *pb)
{
ASFContext *asf = s->priv_data;
ASFStream *asfst;
int rsize = 1;
int num = avio_r8(pb);
int i;
int64_t ts0, ts1 av_unused;
 
asf->packet_segments--;
asf->packet_key_frame = num >> 7;
asf->stream_index = asf->asfid2avid[num & 0x7f];
asfst = &asf->streams[num & 0x7f];
// sequence should be ignored!
DO_2BITS(asf->packet_property >> 4, asf->packet_seq, 0);
DO_2BITS(asf->packet_property >> 2, asf->packet_frag_offset, 0);
DO_2BITS(asf->packet_property, asf->packet_replic_size, 0);
av_dlog(asf, "key:%d stream:%d seq:%d offset:%d replic_size:%d\n",
asf->packet_key_frame, asf->stream_index, asf->packet_seq,
asf->packet_frag_offset, asf->packet_replic_size);
if (rsize+(int64_t)asf->packet_replic_size > asf->packet_size_left) {
av_log(s, AV_LOG_ERROR, "packet_replic_size %d is invalid\n", asf->packet_replic_size);
return AVERROR_INVALIDDATA;
}
if (asf->packet_replic_size >= 8) {
int64_t end = avio_tell(pb) + asf->packet_replic_size;
AVRational aspect;
asfst->packet_obj_size = avio_rl32(pb);
if (asfst->packet_obj_size >= (1 << 24) || asfst->packet_obj_size <= 0) {
av_log(s, AV_LOG_ERROR, "packet_obj_size invalid\n");
asfst->packet_obj_size = 0;
return AVERROR_INVALIDDATA;
}
asf->packet_frag_timestamp = avio_rl32(pb); // timestamp
 
for (i = 0; i < asfst->payload_ext_ct; i++) {
ASFPayload *p = &asfst->payload[i];
int size = p->size;
int64_t payend;
if (size == 0xFFFF)
size = avio_rl16(pb);
payend = avio_tell(pb) + size;
if (payend > end) {
av_log(s, AV_LOG_ERROR, "too long payload\n");
break;
}
switch (p->type) {
case 0x50:
// duration = avio_rl16(pb);
break;
case 0x54:
aspect.num = avio_r8(pb);
aspect.den = avio_r8(pb);
if (aspect.num > 0 && aspect.den > 0 && asf->stream_index >= 0) {
s->streams[asf->stream_index]->sample_aspect_ratio = aspect;
}
break;
case 0x2A:
avio_skip(pb, 8);
ts0 = avio_rl64(pb);
ts1 = avio_rl64(pb);
if (ts0!= -1) asf->packet_frag_timestamp = ts0/10000;
else asf->packet_frag_timestamp = AV_NOPTS_VALUE;
break;
case 0x5B:
case 0xB7:
case 0xCC:
case 0xC0:
case 0xA0:
//unknown
break;
}
avio_seek(pb, payend, SEEK_SET);
}
 
avio_seek(pb, end, SEEK_SET);
rsize += asf->packet_replic_size; // FIXME - check validity
} else if (asf->packet_replic_size == 1) {
// multipacket - frag_offset is beginning timestamp
asf->packet_time_start = asf->packet_frag_offset;
asf->packet_frag_offset = 0;
asf->packet_frag_timestamp = asf->packet_timestamp;
 
asf->packet_time_delta = avio_r8(pb);
rsize++;
} else if (asf->packet_replic_size != 0) {
av_log(s, AV_LOG_ERROR, "unexpected packet_replic_size of %d\n",
asf->packet_replic_size);
return AVERROR_INVALIDDATA;
}
if (asf->packet_flags & 0x01) {
DO_2BITS(asf->packet_segsizetype >> 6, asf->packet_frag_size, 0); // 0 is illegal
if (rsize > asf->packet_size_left) {
av_log(s, AV_LOG_ERROR, "packet_replic_size is invalid\n");
return AVERROR_INVALIDDATA;
} else if (asf->packet_frag_size > asf->packet_size_left - rsize) {
if (asf->packet_frag_size > asf->packet_size_left - rsize + asf->packet_padsize) {
av_log(s, AV_LOG_ERROR, "packet_frag_size is invalid (%d-%d)\n",
asf->packet_size_left, rsize);
return AVERROR_INVALIDDATA;
} else {
int diff = asf->packet_frag_size - (asf->packet_size_left - rsize);
asf->packet_size_left += diff;
asf->packet_padsize -= diff;
}
}
} else {
asf->packet_frag_size = asf->packet_size_left - rsize;
}
if (asf->packet_replic_size == 1) {
asf->packet_multi_size = asf->packet_frag_size;
if (asf->packet_multi_size > asf->packet_size_left)
return AVERROR_INVALIDDATA;
}
asf->packet_size_left -= rsize;
 
return 0;
}
 
/**
* Parse data from individual ASF packets (which were previously loaded
* with asf_get_packet()).
* @param s demux context
* @param pb context to read data from
* @param pkt pointer to store packet data into
* @return 0 if data was stored in pkt, <0 on error or 1 if more ASF
* packets need to be loaded (through asf_get_packet())
*/
static int asf_parse_packet(AVFormatContext *s, AVIOContext *pb, AVPacket *pkt)
{
ASFContext *asf = s->priv_data;
ASFStream *asf_st = 0;
for (;;) {
int ret;
if (url_feof(pb))
return AVERROR_EOF;
 
if (asf->packet_size_left < FRAME_HEADER_SIZE ||
asf->packet_segments < 1) {
int ret = asf->packet_size_left + asf->packet_padsize;
 
assert(ret >= 0);
/* fail safe */
avio_skip(pb, ret);
 
asf->packet_pos = avio_tell(pb);
if (asf->data_object_size != (uint64_t)-1 &&
(asf->packet_pos - asf->data_object_offset >= asf->data_object_size))
return AVERROR_EOF; /* Do not exceed the size of the data object */
return 1;
}
if (asf->packet_time_start == 0) {
if (asf_read_frame_header(s, pb) < 0) {
asf->packet_segments = 0;
continue;
}
if (asf->stream_index < 0 ||
s->streams[asf->stream_index]->discard >= AVDISCARD_ALL ||
(!asf->packet_key_frame &&
(s->streams[asf->stream_index]->discard >= AVDISCARD_NONKEY || asf->streams[s->streams[asf->stream_index]->id].skip_to_key))) {
asf->packet_time_start = 0;
/* unhandled packet (should not happen) */
avio_skip(pb, asf->packet_frag_size);
asf->packet_size_left -= asf->packet_frag_size;
if (asf->stream_index < 0)
av_log(s, AV_LOG_ERROR, "ff asf skip %d (unknown stream)\n",
asf->packet_frag_size);
continue;
}
asf->asf_st = &asf->streams[s->streams[asf->stream_index]->id];
asf->asf_st->skip_to_key = 0;
}
asf_st = asf->asf_st;
av_assert0(asf_st);
 
if (asf->packet_replic_size == 1) {
// frag_offset is here used as the beginning timestamp
asf->packet_frag_timestamp = asf->packet_time_start;
asf->packet_time_start += asf->packet_time_delta;
asf_st->packet_obj_size = asf->packet_frag_size = avio_r8(pb);
asf->packet_size_left--;
asf->packet_multi_size--;
if (asf->packet_multi_size < asf_st->packet_obj_size) {
asf->packet_time_start = 0;
avio_skip(pb, asf->packet_multi_size);
asf->packet_size_left -= asf->packet_multi_size;
continue;
}
asf->packet_multi_size -= asf_st->packet_obj_size;
}
 
if (asf_st->pkt.size != asf_st->packet_obj_size ||
// FIXME is this condition sufficient?
asf_st->frag_offset + asf->packet_frag_size > asf_st->pkt.size) {
if (asf_st->pkt.data) {
av_log(s, AV_LOG_INFO,
"freeing incomplete packet size %d, new %d\n",
asf_st->pkt.size, asf_st->packet_obj_size);
asf_st->frag_offset = 0;
av_free_packet(&asf_st->pkt);
}
/* new packet */
av_new_packet(&asf_st->pkt, asf_st->packet_obj_size);
asf_st->seq = asf->packet_seq;
asf_st->pkt.dts = asf->packet_frag_timestamp - asf->hdr.preroll;
asf_st->pkt.stream_index = asf->stream_index;
asf_st->pkt.pos = asf_st->packet_pos = asf->packet_pos;
 
if (asf_st->pkt.data && asf_st->palette_changed) {
uint8_t *pal;
pal = av_packet_new_side_data(&asf_st->pkt, AV_PKT_DATA_PALETTE,
AVPALETTE_SIZE);
if (!pal) {
av_log(s, AV_LOG_ERROR, "Cannot append palette to packet\n");
} else {
memcpy(pal, asf_st->palette, AVPALETTE_SIZE);
asf_st->palette_changed = 0;
}
}
av_dlog(asf, "new packet: stream:%d key:%d packet_key:%d audio:%d size:%d\n",
asf->stream_index, asf->packet_key_frame,
asf_st->pkt.flags & AV_PKT_FLAG_KEY,
s->streams[asf->stream_index]->codec->codec_type == AVMEDIA_TYPE_AUDIO,
asf_st->packet_obj_size);
if (s->streams[asf->stream_index]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
asf->packet_key_frame = 1;
if (asf->packet_key_frame)
asf_st->pkt.flags |= AV_PKT_FLAG_KEY;
}
 
/* read data */
av_dlog(asf, "READ PACKET s:%d os:%d o:%d,%d l:%d DATA:%p\n",
s->packet_size, asf_st->pkt.size, asf->packet_frag_offset,
asf_st->frag_offset, asf->packet_frag_size, asf_st->pkt.data);
asf->packet_size_left -= asf->packet_frag_size;
if (asf->packet_size_left < 0)
continue;
 
if (asf->packet_frag_offset >= asf_st->pkt.size ||
asf->packet_frag_size > asf_st->pkt.size - asf->packet_frag_offset) {
av_log(s, AV_LOG_ERROR,
"packet fragment position invalid %u,%u not in %u\n",
asf->packet_frag_offset, asf->packet_frag_size,
asf_st->pkt.size);
continue;
}
 
ret = avio_read(pb, asf_st->pkt.data + asf->packet_frag_offset,
asf->packet_frag_size);
if (ret != asf->packet_frag_size) {
if (ret < 0 || asf->packet_frag_offset + ret == 0)
return ret < 0 ? ret : AVERROR_EOF;
 
if (asf_st->ds_span > 1) {
// scrambling, we can either drop it completely or fill the remainder
// TODO: should we fill the whole packet instead of just the current
// fragment?
memset(asf_st->pkt.data + asf->packet_frag_offset + ret, 0,
asf->packet_frag_size - ret);
ret = asf->packet_frag_size;
} else {
// no scrambling, so we can return partial packets
av_shrink_packet(&asf_st->pkt, asf->packet_frag_offset + ret);
}
}
if (s->key && s->keylen == 20)
ff_asfcrypt_dec(s->key, asf_st->pkt.data + asf->packet_frag_offset,
ret);
asf_st->frag_offset += ret;
/* test if whole packet is read */
if (asf_st->frag_offset == asf_st->pkt.size) {
// workaround for macroshit radio DVR-MS files
if (s->streams[asf->stream_index]->codec->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
asf_st->pkt.size > 100) {
int i;
for (i = 0; i < asf_st->pkt.size && !asf_st->pkt.data[i]; i++)
;
if (i == asf_st->pkt.size) {
av_log(s, AV_LOG_DEBUG, "discarding ms fart\n");
asf_st->frag_offset = 0;
av_free_packet(&asf_st->pkt);
continue;
}
}
 
/* return packet */
if (asf_st->ds_span > 1) {
if (asf_st->pkt.size != asf_st->ds_packet_size * asf_st->ds_span) {
av_log(s, AV_LOG_ERROR,
"pkt.size != ds_packet_size * ds_span (%d %d %d)\n",
asf_st->pkt.size, asf_st->ds_packet_size,
asf_st->ds_span);
} else {
/* packet descrambling */
AVBufferRef *buf = av_buffer_alloc(asf_st->pkt.size +
FF_INPUT_BUFFER_PADDING_SIZE);
if (buf) {
uint8_t *newdata = buf->data;
int offset = 0;
memset(newdata + asf_st->pkt.size, 0,
FF_INPUT_BUFFER_PADDING_SIZE);
while (offset < asf_st->pkt.size) {
int off = offset / asf_st->ds_chunk_size;
int row = off / asf_st->ds_span;
int col = off % asf_st->ds_span;
int idx = row + col * asf_st->ds_packet_size / asf_st->ds_chunk_size;
assert(offset + asf_st->ds_chunk_size <= asf_st->pkt.size);
assert(idx + 1 <= asf_st->pkt.size / asf_st->ds_chunk_size);
memcpy(newdata + offset,
asf_st->pkt.data + idx * asf_st->ds_chunk_size,
asf_st->ds_chunk_size);
offset += asf_st->ds_chunk_size;
}
av_buffer_unref(&asf_st->pkt.buf);
asf_st->pkt.buf = buf;
asf_st->pkt.data = buf->data;
}
}
}
asf_st->frag_offset = 0;
*pkt = asf_st->pkt;
#if FF_API_DESTRUCT_PACKET
FF_DISABLE_DEPRECATION_WARNINGS
asf_st->pkt.destruct = NULL;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
asf_st->pkt.buf = 0;
asf_st->pkt.size = 0;
asf_st->pkt.data = 0;
asf_st->pkt.side_data_elems = 0;
asf_st->pkt.side_data = NULL;
break; // packet completed
}
}
return 0;
}
 
static int asf_read_packet(AVFormatContext *s, AVPacket *pkt)
{
ASFContext *asf = s->priv_data;
 
for (;;) {
int ret;
 
/* parse cached packets, if any */
if ((ret = asf_parse_packet(s, s->pb, pkt)) <= 0)
return ret;
if ((ret = asf_get_packet(s, s->pb)) < 0)
assert(asf->packet_size_left < FRAME_HEADER_SIZE ||
asf->packet_segments < 1);
asf->packet_time_start = 0;
}
}
 
// Added to support seeking after packets have been read
// If information is not reset, read_packet fails due to
// leftover information from previous reads
static void asf_reset_header(AVFormatContext *s)
{
ASFContext *asf = s->priv_data;
ASFStream *asf_st;
int i;
 
asf->packet_size_left = 0;
asf->packet_segments = 0;
asf->packet_flags = 0;
asf->packet_property = 0;
asf->packet_timestamp = 0;
asf->packet_segsizetype = 0;
asf->packet_segments = 0;
asf->packet_seq = 0;
asf->packet_replic_size = 0;
asf->packet_key_frame = 0;
asf->packet_padsize = 0;
asf->packet_frag_offset = 0;
asf->packet_frag_size = 0;
asf->packet_frag_timestamp = 0;
asf->packet_multi_size = 0;
asf->packet_time_delta = 0;
asf->packet_time_start = 0;
 
for (i = 0; i < 128; i++) {
asf_st = &asf->streams[i];
av_free_packet(&asf_st->pkt);
asf_st->packet_obj_size = 0;
asf_st->frag_offset = 0;
asf_st->seq = 0;
}
asf->asf_st = NULL;
}
 
static void skip_to_key(AVFormatContext *s)
{
ASFContext *asf = s->priv_data;
int i;
 
for (i = 0; i < 128; i++) {
int j = asf->asfid2avid[i];
ASFStream *asf_st = &asf->streams[i];
if (j < 0 || s->streams[j]->codec->codec_type != AVMEDIA_TYPE_VIDEO)
continue;
 
asf_st->skip_to_key = 1;
}
}
 
static int asf_read_close(AVFormatContext *s)
{
asf_reset_header(s);
 
return 0;
}
 
static int64_t asf_read_pts(AVFormatContext *s, int stream_index,
int64_t *ppos, int64_t pos_limit)
{
ASFContext *asf = s->priv_data;
AVPacket pkt1, *pkt = &pkt1;
ASFStream *asf_st;
int64_t pts;
int64_t pos = *ppos;
int i;
int64_t start_pos[ASF_MAX_STREAMS];
 
for (i = 0; i < s->nb_streams; i++)
start_pos[i] = pos;
 
if (s->packet_size > 0)
pos = (pos + s->packet_size - 1 - s->data_offset) /
s->packet_size * s->packet_size +
s->data_offset;
*ppos = pos;
if (avio_seek(s->pb, pos, SEEK_SET) < 0)
return AV_NOPTS_VALUE;
 
ff_read_frame_flush(s);
asf_reset_header(s);
for (;;) {
if (av_read_frame(s, pkt) < 0) {
av_log(s, AV_LOG_INFO, "asf_read_pts failed\n");
return AV_NOPTS_VALUE;
}
 
pts = pkt->dts;
 
av_free_packet(pkt);
if (pkt->flags & AV_PKT_FLAG_KEY) {
i = pkt->stream_index;
 
asf_st = &asf->streams[s->streams[i]->id];
 
// assert((asf_st->packet_pos - s->data_offset) % s->packet_size == 0);
pos = asf_st->packet_pos;
 
av_add_index_entry(s->streams[i], pos, pts, pkt->size,
pos - start_pos[i] + 1, AVINDEX_KEYFRAME);
start_pos[i] = asf_st->packet_pos + 1;
 
if (pkt->stream_index == stream_index)
break;
}
}
 
*ppos = pos;
return pts;
}
 
static void asf_build_simple_index(AVFormatContext *s, int stream_index)
{
ff_asf_guid g;
ASFContext *asf = s->priv_data;
int64_t current_pos = avio_tell(s->pb);
 
if(avio_seek(s->pb, asf->data_object_offset + asf->data_object_size, SEEK_SET) < 0) {
asf->index_read= -1;
return;
}
 
ff_get_guid(s->pb, &g);
 
/* the data object can be followed by other top-level objects,
* skip them until the simple index object is reached */
while (ff_guidcmp(&g, &ff_asf_simple_index_header)) {
int64_t gsize = avio_rl64(s->pb);
if (gsize < 24 || url_feof(s->pb)) {
avio_seek(s->pb, current_pos, SEEK_SET);
asf->index_read= -1;
return;
}
avio_skip(s->pb, gsize - 24);
ff_get_guid(s->pb, &g);
}
 
{
int64_t itime, last_pos = -1;
int pct, ict;
int i;
int64_t av_unused gsize = avio_rl64(s->pb);
ff_get_guid(s->pb, &g);
itime = avio_rl64(s->pb);
pct = avio_rl32(s->pb);
ict = avio_rl32(s->pb);
av_log(s, AV_LOG_DEBUG,
"itime:0x%"PRIx64", pct:%d, ict:%d\n", itime, pct, ict);
 
for (i = 0; i < ict; i++) {
int pktnum = avio_rl32(s->pb);
int pktct = avio_rl16(s->pb);
int64_t pos = s->data_offset + s->packet_size * (int64_t)pktnum;
int64_t index_pts = FFMAX(av_rescale(itime, i, 10000) - asf->hdr.preroll, 0);
 
if (pos != last_pos) {
av_log(s, AV_LOG_DEBUG, "pktnum:%d, pktct:%d pts: %"PRId64"\n",
pktnum, pktct, index_pts);
av_add_index_entry(s->streams[stream_index], pos, index_pts,
s->packet_size, 0, AVINDEX_KEYFRAME);
last_pos = pos;
}
}
asf->index_read = ict > 1;
}
avio_seek(s->pb, current_pos, SEEK_SET);
}
 
static int asf_read_seek(AVFormatContext *s, int stream_index,
int64_t pts, int flags)
{
ASFContext *asf = s->priv_data;
AVStream *st = s->streams[stream_index];
 
if (s->packet_size <= 0)
return -1;
 
/* Try using the protocol's read_seek if available */
if (s->pb) {
int ret = avio_seek_time(s->pb, stream_index, pts, flags);
if (ret >= 0)
asf_reset_header(s);
if (ret != AVERROR(ENOSYS))
return ret;
}
 
if (!asf->index_read)
asf_build_simple_index(s, stream_index);
 
if ((asf->index_read > 0 && st->index_entries)) {
int index = av_index_search_timestamp(st, pts, flags);
if (index >= 0) {
/* find the position */
uint64_t pos = st->index_entries[index].pos;
 
/* do the seek */
av_log(s, AV_LOG_DEBUG, "SEEKTO: %"PRId64"\n", pos);
if(avio_seek(s->pb, pos, SEEK_SET) < 0)
return -1;
asf_reset_header(s);
skip_to_key(s);
return 0;
}
}
/* no index or seeking by index failed */
if (ff_seek_frame_binary(s, stream_index, pts, flags) < 0)
return -1;
asf_reset_header(s);
skip_to_key(s);
return 0;
}
 
AVInputFormat ff_asf_demuxer = {
.name = "asf",
.long_name = NULL_IF_CONFIG_SMALL("ASF (Advanced / Active Streaming Format)"),
.priv_data_size = sizeof(ASFContext),
.read_probe = asf_probe,
.read_header = asf_read_header,
.read_packet = asf_read_packet,
.read_close = asf_read_close,
.read_seek = asf_read_seek,
.read_timestamp = asf_read_pts,
.flags = AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH,
.priv_class = &asf_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/asfenc.c
0,0 → 1,1003
/*
* ASF muxer
* Copyright (c) 2000, 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avassert.h"
#include "libavutil/dict.h"
#include "libavutil/mathematics.h"
#include "avformat.h"
#include "avio_internal.h"
#include "internal.h"
#include "riff.h"
#include "asf.h"
 
#undef NDEBUG
#include <assert.h>
 
 
#define ASF_INDEXED_INTERVAL 10000000
#define ASF_INDEX_BLOCK (1<<9)
 
#define ASF_PACKET_ERROR_CORRECTION_DATA_SIZE 0x2
#define ASF_PACKET_ERROR_CORRECTION_FLAGS \
(ASF_PACKET_FLAG_ERROR_CORRECTION_PRESENT | \
ASF_PACKET_ERROR_CORRECTION_DATA_SIZE)
 
#if (ASF_PACKET_ERROR_CORRECTION_FLAGS != 0)
# define ASF_PACKET_ERROR_CORRECTION_FLAGS_FIELD_SIZE 1
#else
# define ASF_PACKET_ERROR_CORRECTION_FLAGS_FIELD_SIZE 0
#endif
 
#define ASF_PPI_PROPERTY_FLAGS \
(ASF_PL_FLAG_REPLICATED_DATA_LENGTH_FIELD_IS_BYTE | \
ASF_PL_FLAG_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_IS_DWORD | \
ASF_PL_FLAG_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_IS_BYTE | \
ASF_PL_FLAG_STREAM_NUMBER_LENGTH_FIELD_IS_BYTE)
 
#define ASF_PPI_LENGTH_TYPE_FLAGS 0
 
#define ASF_PAYLOAD_FLAGS ASF_PL_FLAG_PAYLOAD_LENGTH_FIELD_IS_WORD
 
#if (ASF_PPI_FLAG_SEQUENCE_FIELD_IS_BYTE == (ASF_PPI_LENGTH_TYPE_FLAGS & ASF_PPI_MASK_SEQUENCE_FIELD_SIZE))
# define ASF_PPI_SEQUENCE_FIELD_SIZE 1
#endif
#if (ASF_PPI_FLAG_SEQUENCE_FIELD_IS_WORD == (ASF_PPI_LENGTH_TYPE_FLAGS & ASF_PPI_MASK_SEQUENCE_FIELD_SIZE))
# define ASF_PPI_SEQUENCE_FIELD_SIZE 2
#endif
#if (ASF_PPI_FLAG_SEQUENCE_FIELD_IS_DWORD == (ASF_PPI_LENGTH_TYPE_FLAGS & ASF_PPI_MASK_SEQUENCE_FIELD_SIZE))
# define ASF_PPI_SEQUENCE_FIELD_SIZE 4
#endif
#ifndef ASF_PPI_SEQUENCE_FIELD_SIZE
# define ASF_PPI_SEQUENCE_FIELD_SIZE 0
#endif
 
#if (ASF_PPI_FLAG_PACKET_LENGTH_FIELD_IS_BYTE == (ASF_PPI_LENGTH_TYPE_FLAGS & ASF_PPI_MASK_PACKET_LENGTH_FIELD_SIZE))
# define ASF_PPI_PACKET_LENGTH_FIELD_SIZE 1
#endif
#if (ASF_PPI_FLAG_PACKET_LENGTH_FIELD_IS_WORD == (ASF_PPI_LENGTH_TYPE_FLAGS & ASF_PPI_MASK_PACKET_LENGTH_FIELD_SIZE))
# define ASF_PPI_PACKET_LENGTH_FIELD_SIZE 2
#endif
#if (ASF_PPI_FLAG_PACKET_LENGTH_FIELD_IS_DWORD == (ASF_PPI_LENGTH_TYPE_FLAGS & ASF_PPI_MASK_PACKET_LENGTH_FIELD_SIZE))
# define ASF_PPI_PACKET_LENGTH_FIELD_SIZE 4
#endif
#ifndef ASF_PPI_PACKET_LENGTH_FIELD_SIZE
# define ASF_PPI_PACKET_LENGTH_FIELD_SIZE 0
#endif
 
#if (ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_BYTE == (ASF_PPI_LENGTH_TYPE_FLAGS & ASF_PPI_MASK_PADDING_LENGTH_FIELD_SIZE))
# define ASF_PPI_PADDING_LENGTH_FIELD_SIZE 1
#endif
#if (ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_WORD == (ASF_PPI_LENGTH_TYPE_FLAGS & ASF_PPI_MASK_PADDING_LENGTH_FIELD_SIZE))
# define ASF_PPI_PADDING_LENGTH_FIELD_SIZE 2
#endif
#if (ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_DWORD == (ASF_PPI_LENGTH_TYPE_FLAGS & ASF_PPI_MASK_PADDING_LENGTH_FIELD_SIZE))
# define ASF_PPI_PADDING_LENGTH_FIELD_SIZE 4
#endif
#ifndef ASF_PPI_PADDING_LENGTH_FIELD_SIZE
# define ASF_PPI_PADDING_LENGTH_FIELD_SIZE 0
#endif
 
#if (ASF_PL_FLAG_REPLICATED_DATA_LENGTH_FIELD_IS_BYTE == (ASF_PPI_PROPERTY_FLAGS & ASF_PL_MASK_REPLICATED_DATA_LENGTH_FIELD_SIZE))
# define ASF_PAYLOAD_REPLICATED_DATA_LENGTH_FIELD_SIZE 1
#endif
#if (ASF_PL_FLAG_REPLICATED_DATA_LENGTH_FIELD_IS_WORD == (ASF_PPI_PROPERTY_FLAGS & ASF_PL_MASK_REPLICATED_DATA_LENGTH_FIELD_SIZE))
# define ASF_PAYLOAD_REPLICATED_DATA_LENGTH_FIELD_SIZE 2
#endif
#if (ASF_PL_FLAG_REPLICATED_DATA_LENGTH_FIELD_IS_DWORD == (ASF_PPI_PROPERTY_FLAGS & ASF_PL_MASK_REPLICATED_DATA_LENGTH_FIELD_SIZE))
# define ASF_PAYLOAD_REPLICATED_DATA_LENGTH_FIELD_SIZE 4
#endif
#ifndef ASF_PAYLOAD_REPLICATED_DATA_LENGTH_FIELD_SIZE
# define ASF_PAYLOAD_REPLICATED_DATA_LENGTH_FIELD_SIZE 0
#endif
 
#if (ASF_PL_FLAG_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_IS_BYTE == (ASF_PPI_PROPERTY_FLAGS & ASF_PL_MASK_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_SIZE))
# define ASF_PAYLOAD_OFFSET_INTO_MEDIA_OBJECT_FIELD_SIZE 1
#endif
#if (ASF_PL_FLAG_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_IS_WORD == (ASF_PPI_PROPERTY_FLAGS & ASF_PL_MASK_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_SIZE))
# define ASF_PAYLOAD_OFFSET_INTO_MEDIA_OBJECT_FIELD_SIZE 2
#endif
#if (ASF_PL_FLAG_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_IS_DWORD == (ASF_PPI_PROPERTY_FLAGS & ASF_PL_MASK_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_SIZE))
# define ASF_PAYLOAD_OFFSET_INTO_MEDIA_OBJECT_FIELD_SIZE 4
#endif
#ifndef ASF_PAYLOAD_OFFSET_INTO_MEDIA_OBJECT_FIELD_SIZE
# define ASF_PAYLOAD_OFFSET_INTO_MEDIA_OBJECT_FIELD_SIZE 0
#endif
 
#if (ASF_PL_FLAG_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_IS_BYTE == (ASF_PPI_PROPERTY_FLAGS & ASF_PL_MASK_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_SIZE))
# define ASF_PAYLOAD_MEDIA_OBJECT_NUMBER_FIELD_SIZE 1
#endif
#if (ASF_PL_FLAG_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_IS_WORD == (ASF_PPI_PROPERTY_FLAGS & ASF_PL_MASK_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_SIZE))
# define ASF_PAYLOAD_MEDIA_OBJECT_NUMBER_FIELD_SIZE 2
#endif
#if (ASF_PL_FLAG_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_IS_DWORD == (ASF_PPI_PROPERTY_FLAGS & ASF_PL_MASK_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_SIZE))
# define ASF_PAYLOAD_MEDIA_OBJECT_NUMBER_FIELD_SIZE 4
#endif
#ifndef ASF_PAYLOAD_MEDIA_OBJECT_NUMBER_FIELD_SIZE
# define ASF_PAYLOAD_MEDIA_OBJECT_NUMBER_FIELD_SIZE 0
#endif
 
#if (ASF_PL_FLAG_PAYLOAD_LENGTH_FIELD_IS_BYTE == (ASF_PAYLOAD_FLAGS & ASF_PL_MASK_PAYLOAD_LENGTH_FIELD_SIZE))
# define ASF_PAYLOAD_LENGTH_FIELD_SIZE 1
#endif
#if (ASF_PL_FLAG_PAYLOAD_LENGTH_FIELD_IS_WORD == (ASF_PAYLOAD_FLAGS & ASF_PL_MASK_PAYLOAD_LENGTH_FIELD_SIZE))
# define ASF_PAYLOAD_LENGTH_FIELD_SIZE 2
#endif
#ifndef ASF_PAYLOAD_LENGTH_FIELD_SIZE
# define ASF_PAYLOAD_LENGTH_FIELD_SIZE 0
#endif
 
#define PACKET_HEADER_MIN_SIZE \
(ASF_PACKET_ERROR_CORRECTION_FLAGS_FIELD_SIZE + \
ASF_PACKET_ERROR_CORRECTION_DATA_SIZE + \
1 + /* Length Type Flags */ \
1 + /* Property Flags */ \
ASF_PPI_PACKET_LENGTH_FIELD_SIZE + \
ASF_PPI_SEQUENCE_FIELD_SIZE + \
ASF_PPI_PADDING_LENGTH_FIELD_SIZE + \
4 + /* Send Time Field */ \
2) /* Duration Field */
 
// Replicated Data shall be at least 8 bytes long.
#define ASF_PAYLOAD_REPLICATED_DATA_LENGTH 0x08
 
#define PAYLOAD_HEADER_SIZE_SINGLE_PAYLOAD \
(1 + /* Stream Number */ \
ASF_PAYLOAD_MEDIA_OBJECT_NUMBER_FIELD_SIZE + \
ASF_PAYLOAD_OFFSET_INTO_MEDIA_OBJECT_FIELD_SIZE + \
ASF_PAYLOAD_REPLICATED_DATA_LENGTH_FIELD_SIZE + \
ASF_PAYLOAD_REPLICATED_DATA_LENGTH)
 
#define PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS \
(1 + /* Stream Number */ \
ASF_PAYLOAD_MEDIA_OBJECT_NUMBER_FIELD_SIZE + \
ASF_PAYLOAD_OFFSET_INTO_MEDIA_OBJECT_FIELD_SIZE + \
ASF_PAYLOAD_REPLICATED_DATA_LENGTH_FIELD_SIZE + \
ASF_PAYLOAD_REPLICATED_DATA_LENGTH + \
ASF_PAYLOAD_LENGTH_FIELD_SIZE)
 
#define SINGLE_PAYLOAD_DATA_LENGTH \
(PACKET_SIZE - \
PACKET_HEADER_MIN_SIZE - \
PAYLOAD_HEADER_SIZE_SINGLE_PAYLOAD)
 
#define MULTI_PAYLOAD_CONSTANT \
(PACKET_SIZE - \
PACKET_HEADER_MIN_SIZE - \
1 - /* Payload Flags */ \
2 * PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS)
 
#define DATA_HEADER_SIZE 50
 
typedef struct {
uint32_t seqno;
int is_streamed;
ASFStream streams[128]; ///< it's max number and it's not that big
/* non streamed additonnal info */
uint64_t nb_packets; ///< how many packets are there in the file, invalid if broadcasting
int64_t duration; ///< in 100ns units
/* packet filling */
unsigned char multi_payloads_present;
int packet_size_left;
int64_t packet_timestamp_start;
int64_t packet_timestamp_end;
unsigned int packet_nb_payloads;
uint8_t packet_buf[PACKET_SIZE];
AVIOContext pb;
/* only for reading */
uint64_t data_offset; ///< beginning of the first data packet
 
ASFIndex *index_ptr;
uint32_t nb_index_memory_alloc;
uint16_t maximum_packet;
uint32_t next_packet_number;
uint16_t next_packet_count;
uint64_t next_packet_offset;
int next_start_sec;
int end_sec;
} ASFContext;
 
static const AVCodecTag codec_asf_bmp_tags[] = {
{ AV_CODEC_ID_MPEG4, MKTAG('M', '4', 'S', '2') },
{ AV_CODEC_ID_MPEG4, MKTAG('M', 'P', '4', 'S') },
{ AV_CODEC_ID_MSMPEG4V3, MKTAG('M', 'P', '4', '3') },
{ AV_CODEC_ID_NONE, 0 },
};
 
#define PREROLL_TIME 3100
 
void ff_put_guid(AVIOContext *s, const ff_asf_guid *g)
{
av_assert0(sizeof(*g) == 16);
avio_write(s, *g, sizeof(*g));
}
 
static void put_str16(AVIOContext *s, const char *tag)
{
int len;
uint8_t *pb;
AVIOContext *dyn_buf;
if (avio_open_dyn_buf(&dyn_buf) < 0)
return;
 
avio_put_str16le(dyn_buf, tag);
len = avio_close_dyn_buf(dyn_buf, &pb);
avio_wl16(s, len);
avio_write(s, pb, len);
av_freep(&pb);
}
 
static int64_t put_header(AVIOContext *pb, const ff_asf_guid *g)
{
int64_t pos;
 
pos = avio_tell(pb);
ff_put_guid(pb, g);
avio_wl64(pb, 24);
return pos;
}
 
/* update header size */
static void end_header(AVIOContext *pb, int64_t pos)
{
int64_t pos1;
 
pos1 = avio_tell(pb);
avio_seek(pb, pos + 16, SEEK_SET);
avio_wl64(pb, pos1 - pos);
avio_seek(pb, pos1, SEEK_SET);
}
 
/* write an asf chunk (only used in streaming case) */
static void put_chunk(AVFormatContext *s, int type,
int payload_length, int flags)
{
ASFContext *asf = s->priv_data;
AVIOContext *pb = s->pb;
int length;
 
length = payload_length + 8;
avio_wl16(pb, type);
avio_wl16(pb, length); // size
avio_wl32(pb, asf->seqno); // sequence number
avio_wl16(pb, flags); // unknown bytes
avio_wl16(pb, length); // size_confirm
asf->seqno++;
}
 
/* convert from unix to windows time */
static int64_t unix_to_file_time(int ti)
{
int64_t t;
 
t = ti * INT64_C(10000000);
t += INT64_C(116444736000000000);
return t;
}
 
static int32_t get_send_time(ASFContext *asf, int64_t pres_time, uint64_t *offset)
{
int i;
int32_t send_time = 0;
*offset = asf->data_offset + DATA_HEADER_SIZE;
for (i = 0; i < asf->next_start_sec; i++) {
if (pres_time <= asf->index_ptr[i].send_time)
break;
send_time = asf->index_ptr[i].send_time;
*offset = asf->index_ptr[i].offset;
}
 
return send_time / 10000;
}
 
static int asf_write_markers(AVFormatContext *s)
{
ASFContext *asf = s->priv_data;
AVIOContext *pb = s->pb;
int i;
AVRational scale = {1, 10000000};
int64_t hpos = put_header(pb, &ff_asf_marker_header);
 
ff_put_guid(pb, &ff_asf_reserved_4);// ASF spec mandates this reserved value
avio_wl32(pb, s->nb_chapters); // markers count
avio_wl16(pb, 0); // ASF spec mandates 0 for this
avio_wl16(pb, 0); // name length 0, no name given
 
for (i = 0; i < s->nb_chapters; i++) {
AVChapter *c = s->chapters[i];
AVDictionaryEntry *t = av_dict_get(c->metadata, "title", NULL, 0);
int64_t pres_time = av_rescale_q(c->start, c->time_base, scale);
uint64_t offset;
int32_t send_time = get_send_time(asf, pres_time, &offset);
int len = 0;
uint8_t *buf;
AVIOContext *dyn_buf;
if (t) {
if (avio_open_dyn_buf(&dyn_buf) < 0)
return AVERROR(ENOMEM);
avio_put_str16le(dyn_buf, t->value);
len = avio_close_dyn_buf(dyn_buf, &buf);
}
avio_wl64(pb, offset); // offset of the packet with send_time
avio_wl64(pb, pres_time + PREROLL_TIME * 10000); // presentation time
avio_wl16(pb, 12 + len); // entry length
avio_wl32(pb, send_time); // send time
avio_wl32(pb, 0); // flags, should be 0
avio_wl32(pb, len / 2); // marker desc length in WCHARS!
if (t) {
avio_write(pb, buf, len); // marker desc
av_freep(&buf);
}
}
end_header(pb, hpos);
return 0;
}
 
/* write the header (used two times if non streamed) */
static int asf_write_header1(AVFormatContext *s, int64_t file_size,
int64_t data_chunk_size)
{
ASFContext *asf = s->priv_data;
AVIOContext *pb = s->pb;
AVDictionaryEntry *tags[5];
int header_size, n, extra_size, extra_size2, wav_extra_size, file_time;
int has_title;
int metadata_count;
AVCodecContext *enc;
int64_t header_offset, cur_pos, hpos;
int bit_rate;
int64_t duration;
 
ff_metadata_conv(&s->metadata, ff_asf_metadata_conv, NULL);
 
tags[0] = av_dict_get(s->metadata, "title", NULL, 0);
tags[1] = av_dict_get(s->metadata, "author", NULL, 0);
tags[2] = av_dict_get(s->metadata, "copyright", NULL, 0);
tags[3] = av_dict_get(s->metadata, "comment", NULL, 0);
tags[4] = av_dict_get(s->metadata, "rating", NULL, 0);
 
duration = asf->duration + PREROLL_TIME * 10000;
has_title = tags[0] || tags[1] || tags[2] || tags[3] || tags[4];
metadata_count = av_dict_count(s->metadata);
 
bit_rate = 0;
for (n = 0; n < s->nb_streams; n++) {
enc = s->streams[n]->codec;
 
avpriv_set_pts_info(s->streams[n], 32, 1, 1000); /* 32 bit pts in ms */
 
bit_rate += enc->bit_rate;
}
 
if (asf->is_streamed) {
put_chunk(s, 0x4824, 0, 0xc00); /* start of stream (length will be patched later) */
}
 
ff_put_guid(pb, &ff_asf_header);
avio_wl64(pb, -1); /* header length, will be patched after */
avio_wl32(pb, 3 + has_title + !!metadata_count + s->nb_streams); /* number of chunks in header */
avio_w8(pb, 1); /* ??? */
avio_w8(pb, 2); /* ??? */
 
/* file header */
header_offset = avio_tell(pb);
hpos = put_header(pb, &ff_asf_file_header);
ff_put_guid(pb, &ff_asf_my_guid);
avio_wl64(pb, file_size);
file_time = 0;
avio_wl64(pb, unix_to_file_time(file_time));
avio_wl64(pb, asf->nb_packets); /* number of packets */
avio_wl64(pb, duration); /* end time stamp (in 100ns units) */
avio_wl64(pb, asf->duration); /* duration (in 100ns units) */
avio_wl64(pb, PREROLL_TIME); /* start time stamp */
avio_wl32(pb, (asf->is_streamed || !pb->seekable) ? 3 : 2); /* ??? */
avio_wl32(pb, s->packet_size); /* packet size */
avio_wl32(pb, s->packet_size); /* packet size */
avio_wl32(pb, bit_rate); /* Nominal data rate in bps */
end_header(pb, hpos);
 
/* unknown headers */
hpos = put_header(pb, &ff_asf_head1_guid);
ff_put_guid(pb, &ff_asf_head2_guid);
avio_wl32(pb, 6);
avio_wl16(pb, 0);
end_header(pb, hpos);
 
/* title and other infos */
if (has_title) {
int len;
uint8_t *buf;
AVIOContext *dyn_buf;
 
if (avio_open_dyn_buf(&dyn_buf) < 0)
return AVERROR(ENOMEM);
 
hpos = put_header(pb, &ff_asf_comment_header);
 
for (n = 0; n < FF_ARRAY_ELEMS(tags); n++) {
len = tags[n] ? avio_put_str16le(dyn_buf, tags[n]->value) : 0;
avio_wl16(pb, len);
}
len = avio_close_dyn_buf(dyn_buf, &buf);
avio_write(pb, buf, len);
av_freep(&buf);
end_header(pb, hpos);
}
if (metadata_count) {
AVDictionaryEntry *tag = NULL;
hpos = put_header(pb, &ff_asf_extended_content_header);
avio_wl16(pb, metadata_count);
while ((tag = av_dict_get(s->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) {
put_str16(pb, tag->key);
avio_wl16(pb, 0);
put_str16(pb, tag->value);
}
end_header(pb, hpos);
}
/* chapters using ASF markers */
if (!asf->is_streamed && s->nb_chapters) {
int ret;
if (ret = asf_write_markers(s))
return ret;
}
/* stream headers */
for (n = 0; n < s->nb_streams; n++) {
int64_t es_pos;
// ASFStream *stream = &asf->streams[n];
 
enc = s->streams[n]->codec;
asf->streams[n].num = n + 1;
asf->streams[n].seq = 1;
 
switch (enc->codec_type) {
case AVMEDIA_TYPE_AUDIO:
wav_extra_size = 0;
extra_size = 18 + wav_extra_size;
extra_size2 = 8;
break;
default:
case AVMEDIA_TYPE_VIDEO:
wav_extra_size = enc->extradata_size;
extra_size = 0x33 + wav_extra_size;
extra_size2 = 0;
break;
}
 
hpos = put_header(pb, &ff_asf_stream_header);
if (enc->codec_type == AVMEDIA_TYPE_AUDIO) {
ff_put_guid(pb, &ff_asf_audio_stream);
ff_put_guid(pb, &ff_asf_audio_conceal_spread);
} else {
ff_put_guid(pb, &ff_asf_video_stream);
ff_put_guid(pb, &ff_asf_video_conceal_none);
}
avio_wl64(pb, 0); /* ??? */
es_pos = avio_tell(pb);
avio_wl32(pb, extra_size); /* wav header len */
avio_wl32(pb, extra_size2); /* additional data len */
avio_wl16(pb, n + 1); /* stream number */
avio_wl32(pb, 0); /* ??? */
 
if (enc->codec_type == AVMEDIA_TYPE_AUDIO) {
/* WAVEFORMATEX header */
int wavsize = ff_put_wav_header(pb, enc);
 
if (wavsize < 0)
return -1;
if (wavsize != extra_size) {
cur_pos = avio_tell(pb);
avio_seek(pb, es_pos, SEEK_SET);
avio_wl32(pb, wavsize); /* wav header len */
avio_seek(pb, cur_pos, SEEK_SET);
}
/* ERROR Correction */
avio_w8(pb, 0x01);
if (enc->codec_id == AV_CODEC_ID_ADPCM_G726 || !enc->block_align) {
avio_wl16(pb, 0x0190);
avio_wl16(pb, 0x0190);
} else {
avio_wl16(pb, enc->block_align);
avio_wl16(pb, enc->block_align);
}
avio_wl16(pb, 0x01);
avio_w8(pb, 0x00);
} else {
avio_wl32(pb, enc->width);
avio_wl32(pb, enc->height);
avio_w8(pb, 2); /* ??? */
avio_wl16(pb, 40 + enc->extradata_size); /* size */
 
/* BITMAPINFOHEADER header */
ff_put_bmp_header(pb, enc, ff_codec_bmp_tags, 1);
}
end_header(pb, hpos);
}
 
/* media comments */
 
hpos = put_header(pb, &ff_asf_codec_comment_header);
ff_put_guid(pb, &ff_asf_codec_comment1_header);
avio_wl32(pb, s->nb_streams);
for (n = 0; n < s->nb_streams; n++) {
AVCodec *p;
const char *desc;
int len;
uint8_t *buf;
AVIOContext *dyn_buf;
 
enc = s->streams[n]->codec;
p = avcodec_find_encoder(enc->codec_id);
 
if (enc->codec_type == AVMEDIA_TYPE_AUDIO)
avio_wl16(pb, 2);
else if (enc->codec_type == AVMEDIA_TYPE_VIDEO)
avio_wl16(pb, 1);
else
avio_wl16(pb, -1);
 
if (enc->codec_id == AV_CODEC_ID_WMAV2)
desc = "Windows Media Audio V8";
else
desc = p ? p->name : enc->codec_name;
 
if (avio_open_dyn_buf(&dyn_buf) < 0)
return AVERROR(ENOMEM);
 
avio_put_str16le(dyn_buf, desc);
len = avio_close_dyn_buf(dyn_buf, &buf);
avio_wl16(pb, len / 2); // "number of characters" = length in bytes / 2
 
avio_write(pb, buf, len);
av_freep(&buf);
 
avio_wl16(pb, 0); /* no parameters */
 
/* id */
if (enc->codec_type == AVMEDIA_TYPE_AUDIO) {
avio_wl16(pb, 2);
avio_wl16(pb, enc->codec_tag);
} else {
avio_wl16(pb, 4);
avio_wl32(pb, enc->codec_tag);
}
if (!enc->codec_tag)
return -1;
}
end_header(pb, hpos);
 
/* patch the header size fields */
 
cur_pos = avio_tell(pb);
header_size = cur_pos - header_offset;
if (asf->is_streamed) {
header_size += 8 + 30 + DATA_HEADER_SIZE;
 
avio_seek(pb, header_offset - 10 - 30, SEEK_SET);
avio_wl16(pb, header_size);
avio_seek(pb, header_offset - 2 - 30, SEEK_SET);
avio_wl16(pb, header_size);
 
header_size -= 8 + 30 + DATA_HEADER_SIZE;
}
header_size += 24 + 6;
avio_seek(pb, header_offset - 14, SEEK_SET);
avio_wl64(pb, header_size);
avio_seek(pb, cur_pos, SEEK_SET);
 
/* movie chunk, followed by packets of packet_size */
asf->data_offset = cur_pos;
ff_put_guid(pb, &ff_asf_data_header);
avio_wl64(pb, data_chunk_size);
ff_put_guid(pb, &ff_asf_my_guid);
avio_wl64(pb, asf->nb_packets); /* nb packets */
avio_w8(pb, 1); /* ??? */
avio_w8(pb, 1); /* ??? */
return 0;
}
 
static int asf_write_header(AVFormatContext *s)
{
ASFContext *asf = s->priv_data;
 
s->packet_size = PACKET_SIZE;
asf->nb_packets = 0;
 
asf->index_ptr = av_malloc(sizeof(ASFIndex) * ASF_INDEX_BLOCK);
asf->nb_index_memory_alloc = ASF_INDEX_BLOCK;
asf->maximum_packet = 0;
 
/* the data-chunk-size has to be 50 (DATA_HEADER_SIZE), which is
* data_size - asf->data_offset at the moment this function is done.
* It is needed to use asf as a streamable format. */
if (asf_write_header1(s, 0, DATA_HEADER_SIZE) < 0) {
//av_free(asf);
return -1;
}
 
avio_flush(s->pb);
 
asf->packet_nb_payloads = 0;
asf->packet_timestamp_start = -1;
asf->packet_timestamp_end = -1;
ffio_init_context(&asf->pb, asf->packet_buf, s->packet_size, 1,
NULL, NULL, NULL, NULL);
 
if (s->avoid_negative_ts < 0)
s->avoid_negative_ts = 1;
 
return 0;
}
 
static int asf_write_stream_header(AVFormatContext *s)
{
ASFContext *asf = s->priv_data;
 
asf->is_streamed = 1;
 
return asf_write_header(s);
}
 
static int put_payload_parsing_info(AVFormatContext *s,
unsigned sendtime, unsigned duration,
int nb_payloads, int padsize)
{
ASFContext *asf = s->priv_data;
AVIOContext *pb = s->pb;
int ppi_size, i;
int64_t start = avio_tell(pb);
 
int iLengthTypeFlags = ASF_PPI_LENGTH_TYPE_FLAGS;
 
padsize -= PACKET_HEADER_MIN_SIZE;
if (asf->multi_payloads_present)
padsize--;
av_assert0(padsize >= 0);
 
avio_w8(pb, ASF_PACKET_ERROR_CORRECTION_FLAGS);
for (i = 0; i < ASF_PACKET_ERROR_CORRECTION_DATA_SIZE; i++)
avio_w8(pb, 0x0);
 
if (asf->multi_payloads_present)
iLengthTypeFlags |= ASF_PPI_FLAG_MULTIPLE_PAYLOADS_PRESENT;
 
if (padsize > 0) {
if (padsize < 256)
iLengthTypeFlags |= ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_BYTE;
else
iLengthTypeFlags |= ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_WORD;
}
avio_w8(pb, iLengthTypeFlags);
 
avio_w8(pb, ASF_PPI_PROPERTY_FLAGS);
 
if (iLengthTypeFlags & ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_WORD)
avio_wl16(pb, padsize - 2);
if (iLengthTypeFlags & ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_BYTE)
avio_w8(pb, padsize - 1);
 
avio_wl32(pb, sendtime);
avio_wl16(pb, duration);
if (asf->multi_payloads_present)
avio_w8(pb, nb_payloads | ASF_PAYLOAD_FLAGS);
 
ppi_size = avio_tell(pb) - start;
 
return ppi_size;
}
 
static void flush_packet(AVFormatContext *s)
{
ASFContext *asf = s->priv_data;
int packet_hdr_size, packet_filled_size;
 
av_assert0(asf->packet_timestamp_end >= asf->packet_timestamp_start);
 
if (asf->is_streamed)
put_chunk(s, 0x4424, s->packet_size, 0);
 
packet_hdr_size = put_payload_parsing_info(s,
asf->packet_timestamp_start,
asf->packet_timestamp_end - asf->packet_timestamp_start,
asf->packet_nb_payloads,
asf->packet_size_left);
 
packet_filled_size = PACKET_SIZE - asf->packet_size_left;
av_assert0(packet_hdr_size <= asf->packet_size_left);
memset(asf->packet_buf + packet_filled_size, 0, asf->packet_size_left);
 
avio_write(s->pb, asf->packet_buf, s->packet_size - packet_hdr_size);
 
avio_flush(s->pb);
asf->nb_packets++;
asf->packet_nb_payloads = 0;
asf->packet_timestamp_start = -1;
asf->packet_timestamp_end = -1;
ffio_init_context(&asf->pb, asf->packet_buf, s->packet_size, 1,
NULL, NULL, NULL, NULL);
}
 
static void put_payload_header(AVFormatContext *s, ASFStream *stream,
int64_t presentation_time, int m_obj_size,
int m_obj_offset, int payload_len, int flags)
{
ASFContext *asf = s->priv_data;
AVIOContext *pb = &asf->pb;
int val;
 
val = stream->num;
if (flags & AV_PKT_FLAG_KEY)
val |= ASF_PL_FLAG_KEY_FRAME;
avio_w8(pb, val);
 
avio_w8(pb, stream->seq); // Media object number
avio_wl32(pb, m_obj_offset); // Offset Into Media Object
 
// Replicated Data shall be at least 8 bytes long.
// The first 4 bytes of data shall contain the
// Size of the Media Object that the payload belongs to.
// The next 4 bytes of data shall contain the
// Presentation Time for the media object that the payload belongs to.
avio_w8(pb, ASF_PAYLOAD_REPLICATED_DATA_LENGTH);
 
avio_wl32(pb, m_obj_size); // Replicated Data - Media Object Size
avio_wl32(pb, (uint32_t) presentation_time); // Replicated Data - Presentation Time
 
if (asf->multi_payloads_present) {
avio_wl16(pb, payload_len); // payload length
}
}
 
static void put_frame(AVFormatContext *s, ASFStream *stream, AVStream *avst,
int64_t timestamp, const uint8_t *buf,
int m_obj_size, int flags)
{
ASFContext *asf = s->priv_data;
int m_obj_offset, payload_len, frag_len1;
 
m_obj_offset = 0;
while (m_obj_offset < m_obj_size) {
payload_len = m_obj_size - m_obj_offset;
if (asf->packet_timestamp_start == -1) {
asf->multi_payloads_present = (payload_len < MULTI_PAYLOAD_CONSTANT);
 
asf->packet_size_left = PACKET_SIZE;
if (asf->multi_payloads_present) {
frag_len1 = MULTI_PAYLOAD_CONSTANT - 1;
} else {
frag_len1 = SINGLE_PAYLOAD_DATA_LENGTH;
}
asf->packet_timestamp_start = timestamp;
} else {
// multi payloads
frag_len1 = asf->packet_size_left -
PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS -
PACKET_HEADER_MIN_SIZE - 1;
 
if (frag_len1 < payload_len &&
avst->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
flush_packet(s);
continue;
}
}
if (frag_len1 > 0) {
if (payload_len > frag_len1)
payload_len = frag_len1;
else if (payload_len == (frag_len1 - 1))
payload_len = frag_len1 - 2; // additional byte need to put padding length
 
put_payload_header(s, stream, timestamp + PREROLL_TIME,
m_obj_size, m_obj_offset, payload_len, flags);
avio_write(&asf->pb, buf, payload_len);
 
if (asf->multi_payloads_present)
asf->packet_size_left -= (payload_len + PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS);
else
asf->packet_size_left -= (payload_len + PAYLOAD_HEADER_SIZE_SINGLE_PAYLOAD);
asf->packet_timestamp_end = timestamp;
 
asf->packet_nb_payloads++;
} else {
payload_len = 0;
}
m_obj_offset += payload_len;
buf += payload_len;
 
if (!asf->multi_payloads_present)
flush_packet(s);
else if (asf->packet_size_left <= (PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS + PACKET_HEADER_MIN_SIZE + 1))
flush_packet(s);
}
stream->seq++;
}
 
static int update_index(AVFormatContext *s, int start_sec,
uint32_t packet_number, uint16_t packet_count,
uint64_t packet_offset)
{
ASFContext *asf = s->priv_data;
 
if (start_sec > asf->next_start_sec) {
int i;
 
if (!asf->next_start_sec) {
asf->next_packet_number = packet_number;
asf->next_packet_count = packet_count;
asf->next_packet_offset = packet_offset;
}
 
if (start_sec > asf->nb_index_memory_alloc) {
int err;
asf->nb_index_memory_alloc = (start_sec + ASF_INDEX_BLOCK) & ~(ASF_INDEX_BLOCK - 1);
if ((err = av_reallocp_array(&asf->index_ptr,
asf->nb_index_memory_alloc,
sizeof(*asf->index_ptr))) < 0) {
asf->nb_index_memory_alloc = 0;
return err;
}
}
for (i = asf->next_start_sec; i < start_sec; i++) {
asf->index_ptr[i].packet_number = asf->next_packet_number;
asf->index_ptr[i].packet_count = asf->next_packet_count;
asf->index_ptr[i].send_time = asf->next_start_sec * INT64_C(10000000);
asf->index_ptr[i].offset = asf->next_packet_offset;
 
}
}
asf->maximum_packet = FFMAX(asf->maximum_packet, packet_count);
asf->next_packet_number = packet_number;
asf->next_packet_count = packet_count;
asf->next_packet_offset = packet_offset;
asf->next_start_sec = start_sec;
 
return 0;
}
 
static int asf_write_packet(AVFormatContext *s, AVPacket *pkt)
{
ASFContext *asf = s->priv_data;
AVIOContext *pb = s->pb;
ASFStream *stream;
AVCodecContext *codec;
uint32_t packet_number;
int64_t pts;
int start_sec;
int flags = pkt->flags;
int ret;
uint64_t offset = avio_tell(pb);
 
codec = s->streams[pkt->stream_index]->codec;
stream = &asf->streams[pkt->stream_index];
 
if (codec->codec_type == AVMEDIA_TYPE_AUDIO)
flags &= ~AV_PKT_FLAG_KEY;
 
pts = (pkt->pts != AV_NOPTS_VALUE) ? pkt->pts : pkt->dts;
av_assert0(pts != AV_NOPTS_VALUE);
pts *= 10000;
asf->duration = FFMAX(asf->duration, pts + pkt->duration * 10000);
 
packet_number = asf->nb_packets;
put_frame(s, stream, s->streams[pkt->stream_index],
pkt->dts, pkt->data, pkt->size, flags);
 
start_sec = (int)((PREROLL_TIME * 10000 + pts + ASF_INDEXED_INTERVAL - 1)
/ ASF_INDEXED_INTERVAL);
 
/* check index */
if ((!asf->is_streamed) && (flags & AV_PKT_FLAG_KEY)) {
uint16_t packet_count = asf->nb_packets - packet_number;
ret = update_index(s, start_sec, packet_number, packet_count, offset);
if (ret < 0)
return ret;
}
asf->end_sec = start_sec;
 
return 0;
}
 
static int asf_write_index(AVFormatContext *s, ASFIndex *index,
uint16_t max, uint32_t count)
{
AVIOContext *pb = s->pb;
int i;
 
ff_put_guid(pb, &ff_asf_simple_index_header);
avio_wl64(pb, 24 + 16 + 8 + 4 + 4 + (4 + 2) * count);
ff_put_guid(pb, &ff_asf_my_guid);
avio_wl64(pb, ASF_INDEXED_INTERVAL);
avio_wl32(pb, max);
avio_wl32(pb, count);
for (i = 0; i < count; i++) {
avio_wl32(pb, index[i].packet_number);
avio_wl16(pb, index[i].packet_count);
}
 
return 0;
}
 
static int asf_write_trailer(AVFormatContext *s)
{
ASFContext *asf = s->priv_data;
int64_t file_size, data_size;
int ret;
 
/* flush the current packet */
if (asf->pb.buf_ptr > asf->pb.buffer)
flush_packet(s);
 
/* write index */
data_size = avio_tell(s->pb);
if (!asf->is_streamed && asf->next_start_sec) {
if ((ret = update_index(s, asf->end_sec + 1, 0, 0, 0)) < 0)
return ret;
asf_write_index(s, asf->index_ptr, asf->maximum_packet, asf->next_start_sec);
}
avio_flush(s->pb);
 
if (asf->is_streamed || !s->pb->seekable) {
put_chunk(s, 0x4524, 0, 0); /* end of stream */
} else {
/* rewrite an updated header */
file_size = avio_tell(s->pb);
avio_seek(s->pb, 0, SEEK_SET);
asf_write_header1(s, file_size, data_size - asf->data_offset);
}
 
av_freep(&asf->index_ptr);
return 0;
}
 
#if CONFIG_ASF_MUXER
AVOutputFormat ff_asf_muxer = {
.name = "asf",
.long_name = NULL_IF_CONFIG_SMALL("ASF (Advanced / Active Streaming Format)"),
.mime_type = "video/x-ms-asf",
.extensions = "asf,wmv,wma",
.priv_data_size = sizeof(ASFContext),
.audio_codec = AV_CODEC_ID_WMAV2,
.video_codec = AV_CODEC_ID_MSMPEG4V3,
.write_header = asf_write_header,
.write_packet = asf_write_packet,
.write_trailer = asf_write_trailer,
.flags = AVFMT_GLOBALHEADER,
.codec_tag = (const AVCodecTag * const []) {
codec_asf_bmp_tags, ff_codec_bmp_tags, ff_codec_wav_tags, 0
},
};
#endif /* CONFIG_ASF_MUXER */
 
#if CONFIG_ASF_STREAM_MUXER
AVOutputFormat ff_asf_stream_muxer = {
.name = "asf_stream",
.long_name = NULL_IF_CONFIG_SMALL("ASF (Advanced / Active Streaming Format)"),
.mime_type = "video/x-ms-asf",
.extensions = "asf,wmv,wma",
.priv_data_size = sizeof(ASFContext),
.audio_codec = AV_CODEC_ID_WMAV2,
.video_codec = AV_CODEC_ID_MSMPEG4V3,
.write_header = asf_write_stream_header,
.write_packet = asf_write_packet,
.write_trailer = asf_write_trailer,
.flags = AVFMT_GLOBALHEADER,
.codec_tag = (const AVCodecTag * const []) {
codec_asf_bmp_tags, ff_codec_bmp_tags, ff_codec_wav_tags, 0
},
};
#endif /* CONFIG_ASF_STREAM_MUXER */
/contrib/sdk/sources/ffmpeg/libavformat/assdec.c
0,0 → 1,169
/*
* SSA/ASS demuxer
* Copyright (c) 2008 Michael Niedermayer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "internal.h"
#include "subtitles.h"
#include "libavcodec/internal.h"
#include "libavutil/bprint.h"
 
typedef struct ASSContext{
FFDemuxSubtitlesQueue q;
}ASSContext;
 
static int ass_probe(AVProbeData *p)
{
const char *header= "[Script Info]";
 
if( !memcmp(p->buf , header, strlen(header))
|| !memcmp(p->buf+3, header, strlen(header)))
return AVPROBE_SCORE_MAX;
 
return 0;
}
 
static int ass_read_close(AVFormatContext *s)
{
ASSContext *ass = s->priv_data;
ff_subtitles_queue_clean(&ass->q);
return 0;
}
 
static int read_ts(const uint8_t *p, int64_t *start, int *duration)
{
int64_t end;
int hh1, mm1, ss1, ms1;
int hh2, mm2, ss2, ms2;
 
if (sscanf(p, "%*[^,],%d:%d:%d%*c%d,%d:%d:%d%*c%d",
&hh1, &mm1, &ss1, &ms1,
&hh2, &mm2, &ss2, &ms2) == 8) {
end = (hh2*3600LL + mm2*60LL + ss2) * 100LL + ms2;
*start = (hh1*3600LL + mm1*60LL + ss1) * 100LL + ms1;
*duration = end - *start;
return 0;
}
return -1;
}
 
static int64_t get_line(AVBPrint *buf, AVIOContext *pb)
{
int64_t pos = avio_tell(pb);
 
av_bprint_clear(buf);
for (;;) {
char c = avio_r8(pb);
if (!c)
break;
av_bprint_chars(buf, c, 1);
if (c == '\n')
break;
}
return pos;
}
 
static int ass_read_header(AVFormatContext *s)
{
ASSContext *ass = s->priv_data;
AVBPrint header, line;
int header_remaining, res = 0;
AVStream *st;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 64, 1, 100);
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
st->codec->codec_id= AV_CODEC_ID_SSA;
 
header_remaining= INT_MAX;
 
av_bprint_init(&header, 0, AV_BPRINT_SIZE_UNLIMITED);
av_bprint_init(&line, 0, AV_BPRINT_SIZE_UNLIMITED);
 
for (;;) {
int64_t pos = get_line(&line, s->pb);
 
if (!line.str[0]) // EOF
break;
 
if (!memcmp(line.str, "[Events]", 8))
header_remaining= 2;
else if (line.str[0]=='[')
header_remaining= INT_MAX;
 
if (header_remaining) {
av_bprintf(&header, "%s", line.str);
header_remaining--;
} else {
int64_t ts_start = AV_NOPTS_VALUE;
int duration = -1;
AVPacket *sub;
 
if (read_ts(line.str, &ts_start, &duration) < 0)
continue;
sub = ff_subtitles_queue_insert(&ass->q, line.str, line.len, 0);
if (!sub) {
res = AVERROR(ENOMEM);
goto end;
}
sub->pos = pos;
sub->pts = ts_start;
sub->duration = duration;
}
}
 
av_bprint_finalize(&line, NULL);
 
res = avpriv_bprint_to_extradata(st->codec, &header);
if (res < 0)
goto end;
 
ff_subtitles_queue_finalize(&ass->q);
 
end:
return res;
}
 
static int ass_read_packet(AVFormatContext *s, AVPacket *pkt)
{
ASSContext *ass = s->priv_data;
return ff_subtitles_queue_read_packet(&ass->q, pkt);
}
 
static int ass_read_seek(AVFormatContext *s, int stream_index,
int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
{
ASSContext *ass = s->priv_data;
return ff_subtitles_queue_seek(&ass->q, s, stream_index,
min_ts, ts, max_ts, flags);
}
 
AVInputFormat ff_ass_demuxer = {
.name = "ass",
.long_name = NULL_IF_CONFIG_SMALL("SSA (SubStation Alpha) subtitle"),
.priv_data_size = sizeof(ASSContext),
.read_probe = ass_probe,
.read_header = ass_read_header,
.read_packet = ass_read_packet,
.read_close = ass_read_close,
.read_seek2 = ass_read_seek,
};
/contrib/sdk/sources/ffmpeg/libavformat/assenc.c
0,0 → 1,116
/*
* SSA/ASS muxer
* Copyright (c) 2008 Michael Niedermayer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "internal.h"
 
typedef struct ASSContext{
unsigned int extra_index;
int write_ts; // 0: ssa (timing in payload), 1: ass (matroska like)
}ASSContext;
 
static int write_header(AVFormatContext *s)
{
ASSContext *ass = s->priv_data;
AVCodecContext *avctx= s->streams[0]->codec;
uint8_t *last= NULL;
 
if (s->nb_streams != 1 || (avctx->codec_id != AV_CODEC_ID_SSA &&
avctx->codec_id != AV_CODEC_ID_ASS)) {
av_log(s, AV_LOG_ERROR, "Exactly one ASS/SSA stream is needed.\n");
return -1;
}
ass->write_ts = avctx->codec_id == AV_CODEC_ID_ASS;
avpriv_set_pts_info(s->streams[0], 64, 1, 100);
 
while(ass->extra_index < avctx->extradata_size){
uint8_t *p = avctx->extradata + ass->extra_index;
uint8_t *end= strchr(p, '\n');
if(!end) end= avctx->extradata + avctx->extradata_size;
else end++;
 
avio_write(s->pb, p, end-p);
ass->extra_index += end-p;
 
if(last && !memcmp(last, "[Events]", 8))
break;
last=p;
}
 
avio_flush(s->pb);
 
return 0;
}
 
static int write_packet(AVFormatContext *s, AVPacket *pkt)
{
ASSContext *ass = s->priv_data;
 
if (ass->write_ts) {
long int layer;
char *p;
int64_t start = pkt->pts;
int64_t end = start + pkt->duration;
int hh1, mm1, ss1, ms1;
int hh2, mm2, ss2, ms2;
 
p = pkt->data + strcspn(pkt->data, ",") + 1; // skip ReadOrder
layer = strtol(p, &p, 10);
if (*p == ',')
p++;
hh1 = (int)(start / 360000); mm1 = (int)(start / 6000) % 60;
hh2 = (int)(end / 360000); mm2 = (int)(end / 6000) % 60;
ss1 = (int)(start / 100) % 60; ms1 = (int)(start % 100);
ss2 = (int)(end / 100) % 60; ms2 = (int)(end % 100);
if (hh1 > 9) hh1 = 9, mm1 = 59, ss1 = 59, ms1 = 99;
if (hh2 > 9) hh2 = 9, mm2 = 59, ss2 = 59, ms2 = 99;
avio_printf(s->pb, "Dialogue: %ld,%d:%02d:%02d.%02d,%d:%02d:%02d.%02d,%s\r\n",
layer, hh1, mm1, ss1, ms1, hh2, mm2, ss2, ms2, p);
} else {
avio_write(s->pb, pkt->data, pkt->size);
}
 
return 0;
}
 
static int write_trailer(AVFormatContext *s)
{
ASSContext *ass = s->priv_data;
AVCodecContext *avctx= s->streams[0]->codec;
 
avio_write(s->pb, avctx->extradata + ass->extra_index,
avctx->extradata_size - ass->extra_index);
 
return 0;
}
 
AVOutputFormat ff_ass_muxer = {
.name = "ass",
.long_name = NULL_IF_CONFIG_SMALL("SSA (SubStation Alpha) subtitle"),
.mime_type = "text/x-ssa",
.extensions = "ass,ssa",
.priv_data_size = sizeof(ASSContext),
.subtitle_codec = AV_CODEC_ID_SSA,
.write_header = write_header,
.write_packet = write_packet,
.write_trailer = write_trailer,
.flags = AVFMT_GLOBALHEADER | AVFMT_NOTIMESTAMPS | AVFMT_TS_NONSTRICT,
};
/contrib/sdk/sources/ffmpeg/libavformat/ast.c
0,0 → 1,29
/*
* AST common code
* Copyright (c) 2012 James Almer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "internal.h"
 
const AVCodecTag ff_codec_ast_tags[] = {
{ AV_CODEC_ID_ADPCM_AFC, 0 },
{ AV_CODEC_ID_PCM_S16BE_PLANAR, 1 },
{ AV_CODEC_ID_NONE, 0 },
};
/contrib/sdk/sources/ffmpeg/libavformat/ast.h
0,0 → 1,30
/*
* AST common code
* Copyright (c) 2012 James Almer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_AST_H
#define AVFORMAT_AST_H
 
#include "avformat.h"
#include "internal.h"
 
extern const AVCodecTag ff_codec_ast_tags[];
 
#endif /* AVFORMAT_AST_H */
/contrib/sdk/sources/ffmpeg/libavformat/astdec.c
0,0 → 1,119
/*
* AST demuxer
* Copyright (c) 2012 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
#include "ast.h"
 
static int ast_probe(AVProbeData *p)
{
if (AV_RL32(p->buf) == MKTAG('S','T','R','M') &&
AV_RB16(p->buf + 10) &&
AV_RB16(p->buf + 12) &&
AV_RB32(p->buf + 16))
return AVPROBE_SCORE_MAX / 3 * 2;
return 0;
}
 
static int ast_read_header(AVFormatContext *s)
{
int depth;
AVStream *st;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
avio_skip(s->pb, 8);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = ff_codec_get_id(ff_codec_ast_tags, avio_rb16(s->pb));
 
depth = avio_rb16(s->pb);
if (depth != 16) {
avpriv_request_sample(s, "depth %d", depth);
return AVERROR_INVALIDDATA;
}
 
st->codec->channels = avio_rb16(s->pb);
if (!st->codec->channels)
return AVERROR_INVALIDDATA;
 
if (st->codec->channels == 2)
st->codec->channel_layout = AV_CH_LAYOUT_STEREO;
else if (st->codec->channels == 4)
st->codec->channel_layout = AV_CH_LAYOUT_4POINT0;
 
avio_skip(s->pb, 2);
st->codec->sample_rate = avio_rb32(s->pb);
if (st->codec->sample_rate <= 0)
return AVERROR_INVALIDDATA;
st->start_time = 0;
st->duration = avio_rb32(s->pb);
avio_skip(s->pb, 40);
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
 
return 0;
}
 
static int ast_read_packet(AVFormatContext *s, AVPacket *pkt)
{
uint32_t type, size;
int64_t pos;
int ret;
 
if (url_feof(s->pb))
return AVERROR_EOF;
 
pos = avio_tell(s->pb);
type = avio_rl32(s->pb);
size = avio_rb32(s->pb);
if (size > INT_MAX / s->streams[0]->codec->channels)
return AVERROR_INVALIDDATA;
 
size *= s->streams[0]->codec->channels;
if ((ret = avio_skip(s->pb, 24)) < 0) // padding
return ret;
 
if (type == MKTAG('B','L','C','K')) {
ret = av_get_packet(s->pb, pkt, size);
pkt->stream_index = 0;
pkt->pos = pos;
} else {
av_log(s, AV_LOG_ERROR, "unknown chunk %x\n", type);
avio_skip(s->pb, size);
ret = AVERROR_INVALIDDATA;
}
 
return ret;
}
 
AVInputFormat ff_ast_demuxer = {
.name = "ast",
.long_name = NULL_IF_CONFIG_SMALL("AST (Audio Stream)"),
.read_probe = ast_probe,
.read_header = ast_read_header,
.read_packet = ast_read_packet,
.extensions = "ast",
.flags = AVFMT_GENERIC_INDEX,
.codec_tag = (const AVCodecTag* const []){ff_codec_ast_tags, 0},
};
/contrib/sdk/sources/ffmpeg/libavformat/astenc.c
0,0 → 1,214
/*
* AST muxer
* Copyright (c) 2012 James Almer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "avio_internal.h"
#include "internal.h"
#include "ast.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
 
typedef struct ASTMuxContext {
AVClass *class;
int64_t size;
int64_t samples;
int64_t loopstart;
int64_t loopend;
int fbs;
} ASTMuxContext;
 
#define CHECK_LOOP(type) \
if (ast->loop ## type > 0) { \
ast->loop ## type = av_rescale_rnd(ast->loop ## type, enc->sample_rate, 1000, AV_ROUND_DOWN); \
if (ast->loop ## type < 0 || ast->loop ## type > UINT_MAX) { \
av_log(s, AV_LOG_ERROR, "Invalid loop" #type " value\n"); \
return AVERROR(EINVAL); \
} \
}
 
static int ast_write_header(AVFormatContext *s)
{
ASTMuxContext *ast = s->priv_data;
AVIOContext *pb = s->pb;
AVCodecContext *enc;
unsigned int codec_tag;
 
if (s->nb_streams == 1) {
enc = s->streams[0]->codec;
} else {
av_log(s, AV_LOG_ERROR, "only one stream is supported\n");
return AVERROR(EINVAL);
}
 
if (enc->codec_id == AV_CODEC_ID_ADPCM_AFC) {
av_log(s, AV_LOG_ERROR, "muxing ADPCM AFC is not implemented\n");
return AVERROR_PATCHWELCOME;
}
 
codec_tag = ff_codec_get_tag(ff_codec_ast_tags, enc->codec_id);
if (!codec_tag) {
av_log(s, AV_LOG_ERROR, "unsupported codec\n");
return AVERROR(EINVAL);
}
 
if (ast->loopend > 0 && ast->loopstart >= ast->loopend) {
av_log(s, AV_LOG_ERROR, "loopend can't be less or equal to loopstart\n");
return AVERROR(EINVAL);
}
 
/* Convert milliseconds to samples */
CHECK_LOOP(start)
CHECK_LOOP(end)
 
ffio_wfourcc(pb, "STRM");
 
ast->size = avio_tell(pb);
avio_wb32(pb, 0); /* File size minus header */
avio_wb16(pb, codec_tag);
avio_wb16(pb, 16); /* Bit depth */
avio_wb16(pb, enc->channels);
avio_wb16(pb, 0); /* Loop flag */
avio_wb32(pb, enc->sample_rate);
 
ast->samples = avio_tell(pb);
avio_wb32(pb, 0); /* Number of samples */
avio_wb32(pb, 0); /* Loopstart */
avio_wb32(pb, 0); /* Loopend */
avio_wb32(pb, 0); /* Size of first block */
 
/* Unknown */
avio_wb32(pb, 0);
avio_wl32(pb, 0x7F);
avio_wb64(pb, 0);
avio_wb64(pb, 0);
avio_wb32(pb, 0);
 
avio_flush(pb);
 
return 0;
}
 
static int ast_write_packet(AVFormatContext *s, AVPacket *pkt)
{
AVIOContext *pb = s->pb;
ASTMuxContext *ast = s->priv_data;
AVCodecContext *enc = s->streams[0]->codec;
int size = pkt->size / enc->channels;
 
if (enc->frame_number == 1)
ast->fbs = size;
 
ffio_wfourcc(pb, "BLCK");
avio_wb32(pb, size); /* Block size */
 
/* padding */
avio_wb64(pb, 0);
avio_wb64(pb, 0);
avio_wb64(pb, 0);
 
avio_write(pb, pkt->data, pkt->size);
 
return 0;
}
 
static int ast_write_trailer(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
ASTMuxContext *ast = s->priv_data;
AVCodecContext *enc = s->streams[0]->codec;
int64_t file_size = avio_tell(pb);
int64_t samples = (file_size - 64 - (32 * enc->frame_number)) / enc->block_align; /* PCM_S16BE_PLANAR */
 
av_log(s, AV_LOG_DEBUG, "total samples: %"PRId64"\n", samples);
 
if (s->pb->seekable) {
/* Number of samples */
avio_seek(pb, ast->samples, SEEK_SET);
avio_wb32(pb, samples);
 
/* Loopstart if provided */
if (ast->loopstart > 0) {
if (ast->loopstart >= samples) {
av_log(s, AV_LOG_WARNING, "Loopstart value is out of range and will be ignored\n");
ast->loopstart = -1;
avio_skip(pb, 4);
} else
avio_wb32(pb, ast->loopstart);
} else
avio_skip(pb, 4);
 
/* Loopend if provided. Otherwise number of samples again */
if (ast->loopend && ast->loopstart >= 0) {
if (ast->loopend > samples) {
av_log(s, AV_LOG_WARNING, "Loopend value is out of range and will be ignored\n");
ast->loopend = samples;
}
avio_wb32(pb, ast->loopend);
} else {
avio_wb32(pb, samples);
}
 
/* Size of first block */
avio_wb32(pb, ast->fbs);
 
/* File size minus header */
avio_seek(pb, ast->size, SEEK_SET);
avio_wb32(pb, file_size - 64);
 
/* Loop flag */
if (ast->loopstart >= 0) {
avio_skip(pb, 6);
avio_wb16(pb, 0xFFFF);
}
 
avio_seek(pb, file_size, SEEK_SET);
avio_flush(pb);
}
return 0;
}
 
#define OFFSET(obj) offsetof(ASTMuxContext, obj)
static const AVOption options[] = {
{ "loopstart", "Loopstart position in milliseconds.", OFFSET(loopstart), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
{ "loopend", "Loopend position in milliseconds.", OFFSET(loopend), AV_OPT_TYPE_INT64, { .i64 = 0 }, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
{ NULL },
};
 
static const AVClass ast_muxer_class = {
.class_name = "AST muxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVOutputFormat ff_ast_muxer = {
.name = "ast",
.long_name = NULL_IF_CONFIG_SMALL("AST (Audio Stream)"),
.extensions = "ast",
.priv_data_size = sizeof(ASTMuxContext),
.audio_codec = AV_CODEC_ID_PCM_S16BE_PLANAR,
.video_codec = AV_CODEC_ID_NONE,
.write_header = ast_write_header,
.write_packet = ast_write_packet,
.write_trailer = ast_write_trailer,
.priv_class = &ast_muxer_class,
.codec_tag = (const AVCodecTag* const []){ff_codec_ast_tags, 0},
};
/contrib/sdk/sources/ffmpeg/libavformat/au.c
0,0 → 1,225
/*
* AU muxer and demuxer
* Copyright (c) 2001 Fabrice Bellard
*
* first version by Francois Revol <revol@free.fr>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/*
* Reference documents:
* http://www.opengroup.org/public/pubs/external/auformat.html
* http://www.goice.co.jp/member/mo/formats/au.html
*/
 
#include "avformat.h"
#include "internal.h"
#include "avio_internal.h"
#include "pcm.h"
#include "libavutil/avassert.h"
 
/* if we don't know the size in advance */
#define AU_UNKNOWN_SIZE ((uint32_t)(~0))
/* the specification requires an annotation field of at least eight bytes */
#define AU_HEADER_SIZE (24+8)
 
static const AVCodecTag codec_au_tags[] = {
{ AV_CODEC_ID_PCM_MULAW, 1 },
{ AV_CODEC_ID_PCM_S8, 2 },
{ AV_CODEC_ID_PCM_S16BE, 3 },
{ AV_CODEC_ID_PCM_S24BE, 4 },
{ AV_CODEC_ID_PCM_S32BE, 5 },
{ AV_CODEC_ID_PCM_F32BE, 6 },
{ AV_CODEC_ID_PCM_F64BE, 7 },
{ AV_CODEC_ID_ADPCM_G726LE, 23 },
{ AV_CODEC_ID_ADPCM_G722,24 },
{ AV_CODEC_ID_ADPCM_G726LE, 25 },
{ AV_CODEC_ID_ADPCM_G726LE, 26 },
{ AV_CODEC_ID_PCM_ALAW, 27 },
{ AV_CODEC_ID_ADPCM_G726LE, MKBETAG('7','2','6','2') },
{ AV_CODEC_ID_NONE, 0 },
};
 
#if CONFIG_AU_DEMUXER
 
static int au_probe(AVProbeData *p)
{
if (p->buf[0] == '.' && p->buf[1] == 's' &&
p->buf[2] == 'n' && p->buf[3] == 'd')
return AVPROBE_SCORE_MAX;
else
return 0;
}
 
#define BLOCK_SIZE 1024
 
static int au_read_header(AVFormatContext *s)
{
int size, data_size = 0;
unsigned int tag;
AVIOContext *pb = s->pb;
unsigned int id, channels, rate;
int bps;
enum AVCodecID codec;
AVStream *st;
 
tag = avio_rl32(pb);
if (tag != MKTAG('.', 's', 'n', 'd'))
return AVERROR_INVALIDDATA;
size = avio_rb32(pb); /* header size */
data_size = avio_rb32(pb); /* data size in bytes */
 
if (data_size < 0 && data_size != AU_UNKNOWN_SIZE) {
av_log(s, AV_LOG_ERROR, "Invalid negative data size '%d' found\n", data_size);
return AVERROR_INVALIDDATA;
}
 
id = avio_rb32(pb);
rate = avio_rb32(pb);
channels = avio_rb32(pb);
 
if (size > 24) {
/* skip unused data */
avio_skip(pb, size - 24);
}
 
codec = ff_codec_get_id(codec_au_tags, id);
 
if (codec == AV_CODEC_ID_NONE) {
avpriv_request_sample(s, "unknown or unsupported codec tag: %u", id);
return AVERROR_PATCHWELCOME;
}
 
bps = av_get_bits_per_sample(codec);
if (codec == AV_CODEC_ID_ADPCM_G726LE) {
if (id == MKBETAG('7','2','6','2')) {
bps = 2;
} else {
const uint8_t bpcss[] = {4, 0, 3, 5};
av_assert0(id >= 23 && id < 23 + 4);
bps = bpcss[id - 23];
}
} else if (!bps) {
avpriv_request_sample(s, "Unknown bits per sample");
return AVERROR_PATCHWELCOME;
}
 
if (channels == 0 || channels >= INT_MAX / (BLOCK_SIZE * bps >> 3)) {
av_log(s, AV_LOG_ERROR, "Invalid number of channels %u\n", channels);
return AVERROR_INVALIDDATA;
}
 
if (rate == 0 || rate > INT_MAX) {
av_log(s, AV_LOG_ERROR, "Invalid sample rate: %u\n", rate);
return AVERROR_INVALIDDATA;
}
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_tag = id;
st->codec->codec_id = codec;
st->codec->channels = channels;
st->codec->sample_rate = rate;
st->codec->bits_per_coded_sample = bps;
st->codec->bit_rate = channels * rate * bps;
st->codec->block_align = FFMAX(bps * st->codec->channels / 8, 1);
if (data_size != AU_UNKNOWN_SIZE)
st->duration = (((int64_t)data_size)<<3) / (st->codec->channels * (int64_t)bps);
 
st->start_time = 0;
avpriv_set_pts_info(st, 64, 1, rate);
 
return 0;
}
 
AVInputFormat ff_au_demuxer = {
.name = "au",
.long_name = NULL_IF_CONFIG_SMALL("Sun AU"),
.read_probe = au_probe,
.read_header = au_read_header,
.read_packet = ff_pcm_read_packet,
.read_seek = ff_pcm_read_seek,
.codec_tag = (const AVCodecTag* const []) { codec_au_tags, 0 },
};
 
#endif /* CONFIG_AU_DEMUXER */
 
#if CONFIG_AU_MUXER
 
#include "rawenc.h"
 
static int au_write_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
AVCodecContext *enc = s->streams[0]->codec;
 
if (s->nb_streams != 1) {
av_log(s, AV_LOG_ERROR, "only one stream is supported\n");
return AVERROR(EINVAL);
}
 
enc->codec_tag = ff_codec_get_tag(codec_au_tags, enc->codec_id);
if (!enc->codec_tag) {
av_log(s, AV_LOG_ERROR, "unsupported codec\n");
return AVERROR(EINVAL);
}
 
ffio_wfourcc(pb, ".snd"); /* magic number */
avio_wb32(pb, AU_HEADER_SIZE); /* header size */
avio_wb32(pb, AU_UNKNOWN_SIZE); /* data size */
avio_wb32(pb, enc->codec_tag); /* codec ID */
avio_wb32(pb, enc->sample_rate);
avio_wb32(pb, enc->channels);
avio_wb64(pb, 0); /* annotation field */
avio_flush(pb);
 
return 0;
}
 
static int au_write_trailer(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
int64_t file_size = avio_tell(pb);
 
if (s->pb->seekable && file_size < INT32_MAX) {
/* update file size */
avio_seek(pb, 8, SEEK_SET);
avio_wb32(pb, (uint32_t)(file_size - AU_HEADER_SIZE));
avio_seek(pb, file_size, SEEK_SET);
avio_flush(pb);
}
 
return 0;
}
 
AVOutputFormat ff_au_muxer = {
.name = "au",
.long_name = NULL_IF_CONFIG_SMALL("Sun AU"),
.mime_type = "audio/basic",
.extensions = "au",
.audio_codec = AV_CODEC_ID_PCM_S16BE,
.video_codec = AV_CODEC_ID_NONE,
.write_header = au_write_header,
.write_packet = ff_raw_write_packet,
.write_trailer = au_write_trailer,
.codec_tag = (const AVCodecTag* const []) { codec_au_tags, 0 },
};
 
#endif /* CONFIG_AU_MUXER */
/contrib/sdk/sources/ffmpeg/libavformat/audiointerleave.c
0,0 → 1,148
/*
* Audio Interleaving functions
*
* Copyright (c) 2009 Baptiste Coudurier <baptiste dot coudurier at gmail dot com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/fifo.h"
#include "libavutil/mathematics.h"
#include "avformat.h"
#include "audiointerleave.h"
#include "internal.h"
 
void ff_audio_interleave_close(AVFormatContext *s)
{
int i;
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
AudioInterleaveContext *aic = st->priv_data;
 
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
av_fifo_free(aic->fifo);
}
}
 
int ff_audio_interleave_init(AVFormatContext *s,
const int *samples_per_frame,
AVRational time_base)
{
int i;
 
if (!samples_per_frame)
return -1;
 
if (!time_base.num) {
av_log(s, AV_LOG_ERROR, "timebase not set for audio interleave\n");
return -1;
}
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
AudioInterleaveContext *aic = st->priv_data;
 
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
aic->sample_size = (st->codec->channels *
av_get_bits_per_sample(st->codec->codec_id)) / 8;
if (!aic->sample_size) {
av_log(s, AV_LOG_ERROR, "could not compute sample size\n");
return -1;
}
aic->samples_per_frame = samples_per_frame;
aic->samples = aic->samples_per_frame;
aic->time_base = time_base;
 
aic->fifo_size = 100* *aic->samples;
aic->fifo= av_fifo_alloc(100 * *aic->samples);
}
}
 
return 0;
}
 
static int interleave_new_audio_packet(AVFormatContext *s, AVPacket *pkt,
int stream_index, int flush)
{
AVStream *st = s->streams[stream_index];
AudioInterleaveContext *aic = st->priv_data;
 
int size = FFMIN(av_fifo_size(aic->fifo), *aic->samples * aic->sample_size);
if (!size || (!flush && size == av_fifo_size(aic->fifo)))
return 0;
 
if (av_new_packet(pkt, size) < 0)
return AVERROR(ENOMEM);
av_fifo_generic_read(aic->fifo, pkt->data, size, NULL);
 
pkt->dts = pkt->pts = aic->dts;
pkt->duration = av_rescale_q(*aic->samples, st->time_base, aic->time_base);
pkt->stream_index = stream_index;
aic->dts += pkt->duration;
 
aic->samples++;
if (!*aic->samples)
aic->samples = aic->samples_per_frame;
 
return size;
}
 
int ff_audio_rechunk_interleave(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush,
int (*get_packet)(AVFormatContext *, AVPacket *, AVPacket *, int),
int (*compare_ts)(AVFormatContext *, AVPacket *, AVPacket *))
{
int i;
 
if (pkt) {
AVStream *st = s->streams[pkt->stream_index];
AudioInterleaveContext *aic = st->priv_data;
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
unsigned new_size = av_fifo_size(aic->fifo) + pkt->size;
if (new_size > aic->fifo_size) {
if (av_fifo_realloc2(aic->fifo, new_size) < 0)
return -1;
aic->fifo_size = new_size;
}
av_fifo_generic_write(aic->fifo, pkt->data, pkt->size, NULL);
} else {
int ret;
// rewrite pts and dts to be decoded time line position
pkt->pts = pkt->dts = aic->dts;
aic->dts += pkt->duration;
ret = ff_interleave_add_packet(s, pkt, compare_ts);
if (ret < 0)
return ret;
}
pkt = NULL;
}
 
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
AVPacket new_pkt;
int ret;
while ((ret = interleave_new_audio_packet(s, &new_pkt, i, flush)) > 0) {
ret = ff_interleave_add_packet(s, &new_pkt, compare_ts);
if (ret < 0)
return ret;
}
if (ret < 0)
return ret;
}
}
 
return get_packet(s, out, NULL, flush);
}
/contrib/sdk/sources/ffmpeg/libavformat/audiointerleave.h
0,0 → 1,55
/*
* audio interleaving prototypes and declarations
*
* Copyright (c) 2009 Baptiste Coudurier <baptiste dot coudurier at gmail dot com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_AUDIOINTERLEAVE_H
#define AVFORMAT_AUDIOINTERLEAVE_H
 
#include "libavutil/fifo.h"
#include "avformat.h"
 
typedef struct AudioInterleaveContext {
AVFifoBuffer *fifo;
unsigned fifo_size; ///< size of currently allocated FIFO
uint64_t dts; ///< current dts
int sample_size; ///< size of one sample all channels included
const int *samples_per_frame; ///< must be 0-terminated
const int *samples; ///< current samples per frame, pointer to samples_per_frame
AVRational time_base; ///< time base of output audio packets
} AudioInterleaveContext;
 
int ff_audio_interleave_init(AVFormatContext *s, const int *samples_per_frame, AVRational time_base);
void ff_audio_interleave_close(AVFormatContext *s);
 
/**
* Rechunk audio PCM packets per AudioInterleaveContext->samples_per_frame
* and interleave them correctly.
* The first element of AVStream->priv_data must be AudioInterleaveContext
* when using this function.
*
* @param get_packet function will output a packet when streams are correctly interleaved.
* @param compare_ts function will compare AVPackets and decide interleaving order.
*/
int ff_audio_rechunk_interleave(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush,
int (*get_packet)(AVFormatContext *, AVPacket *, AVPacket *, int),
int (*compare_ts)(AVFormatContext *, AVPacket *, AVPacket *));
 
#endif /* AVFORMAT_AUDIOINTERLEAVE_H */
/contrib/sdk/sources/ffmpeg/libavformat/avc.c
0,0 → 1,193
/*
* AVC helper functions for muxers
* Copyright (c) 2006 Baptiste Coudurier <baptiste.coudurier@smartjog.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "avio.h"
#include "avc.h"
 
static const uint8_t *ff_avc_find_startcode_internal(const uint8_t *p, const uint8_t *end)
{
const uint8_t *a = p + 4 - ((intptr_t)p & 3);
 
for (end -= 3; p < a && p < end; p++) {
if (p[0] == 0 && p[1] == 0 && p[2] == 1)
return p;
}
 
for (end -= 3; p < end; p += 4) {
uint32_t x = *(const uint32_t*)p;
// if ((x - 0x01000100) & (~x) & 0x80008000) // little endian
// if ((x - 0x00010001) & (~x) & 0x00800080) // big endian
if ((x - 0x01010101) & (~x) & 0x80808080) { // generic
if (p[1] == 0) {
if (p[0] == 0 && p[2] == 1)
return p;
if (p[2] == 0 && p[3] == 1)
return p+1;
}
if (p[3] == 0) {
if (p[2] == 0 && p[4] == 1)
return p+2;
if (p[4] == 0 && p[5] == 1)
return p+3;
}
}
}
 
for (end += 3; p < end; p++) {
if (p[0] == 0 && p[1] == 0 && p[2] == 1)
return p;
}
 
return end + 3;
}
 
const uint8_t *ff_avc_find_startcode(const uint8_t *p, const uint8_t *end){
const uint8_t *out= ff_avc_find_startcode_internal(p, end);
if(p<out && out<end && !out[-1]) out--;
return out;
}
 
int ff_avc_parse_nal_units(AVIOContext *pb, const uint8_t *buf_in, int size)
{
const uint8_t *p = buf_in;
const uint8_t *end = p + size;
const uint8_t *nal_start, *nal_end;
 
size = 0;
nal_start = ff_avc_find_startcode(p, end);
for (;;) {
while (nal_start < end && !*(nal_start++));
if (nal_start == end)
break;
 
nal_end = ff_avc_find_startcode(nal_start, end);
avio_wb32(pb, nal_end - nal_start);
avio_write(pb, nal_start, nal_end - nal_start);
size += 4 + nal_end - nal_start;
nal_start = nal_end;
}
return size;
}
 
int ff_avc_parse_nal_units_buf(const uint8_t *buf_in, uint8_t **buf, int *size)
{
AVIOContext *pb;
int ret = avio_open_dyn_buf(&pb);
if(ret < 0)
return ret;
 
ff_avc_parse_nal_units(pb, buf_in, *size);
 
av_freep(buf);
*size = avio_close_dyn_buf(pb, buf);
return 0;
}
 
int ff_isom_write_avcc(AVIOContext *pb, const uint8_t *data, int len)
{
if (len > 6) {
/* check for h264 start code */
if (AV_RB32(data) == 0x00000001 ||
AV_RB24(data) == 0x000001) {
uint8_t *buf=NULL, *end, *start;
uint32_t sps_size=0, pps_size=0;
uint8_t *sps=0, *pps=0;
 
int ret = ff_avc_parse_nal_units_buf(data, &buf, &len);
if (ret < 0)
return ret;
start = buf;
end = buf + len;
 
/* look for sps and pps */
while (end - buf > 4) {
uint32_t size;
uint8_t nal_type;
size = FFMIN(AV_RB32(buf), end - buf - 4);
buf += 4;
nal_type = buf[0] & 0x1f;
 
if (nal_type == 7) { /* SPS */
sps = buf;
sps_size = size;
} else if (nal_type == 8) { /* PPS */
pps = buf;
pps_size = size;
}
 
buf += size;
}
 
if (!sps || !pps || sps_size < 4 || sps_size > UINT16_MAX || pps_size > UINT16_MAX)
return AVERROR_INVALIDDATA;
 
avio_w8(pb, 1); /* version */
avio_w8(pb, sps[1]); /* profile */
avio_w8(pb, sps[2]); /* profile compat */
avio_w8(pb, sps[3]); /* level */
avio_w8(pb, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 1 (11) */
avio_w8(pb, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */
 
avio_wb16(pb, sps_size);
avio_write(pb, sps, sps_size);
avio_w8(pb, 1); /* number of pps */
avio_wb16(pb, pps_size);
avio_write(pb, pps, pps_size);
av_free(start);
} else {
avio_write(pb, data, len);
}
}
return 0;
}
 
int ff_avc_write_annexb_extradata(const uint8_t *in, uint8_t **buf, int *size)
{
uint16_t sps_size, pps_size;
uint8_t *out;
int out_size;
 
*buf = NULL;
if (*size >= 4 && (AV_RB32(in) == 0x00000001 || AV_RB24(in) == 0x000001))
return 0;
if (*size < 11 || in[0] != 1)
return AVERROR_INVALIDDATA;
 
sps_size = AV_RB16(&in[6]);
if (11 + sps_size > *size)
return AVERROR_INVALIDDATA;
pps_size = AV_RB16(&in[9 + sps_size]);
if (11 + sps_size + pps_size > *size)
return AVERROR_INVALIDDATA;
out_size = 8 + sps_size + pps_size;
out = av_mallocz(out_size);
if (!out)
return AVERROR(ENOMEM);
AV_WB32(&out[0], 0x00000001);
memcpy(out + 4, &in[8], sps_size);
AV_WB32(&out[4 + sps_size], 0x00000001);
memcpy(out + 8 + sps_size, &in[11 + sps_size], pps_size);
*buf = out;
*size = out_size;
return 0;
}
/contrib/sdk/sources/ffmpeg/libavformat/avc.h
0,0 → 1,34
/*
* AVC helper functions for muxers
* Copyright (c) 2008 Aurelien Jacobs <aurel@gnuage.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_AVC_H
#define AVFORMAT_AVC_H
 
#include <stdint.h>
#include "avio.h"
 
int ff_avc_parse_nal_units(AVIOContext *s, const uint8_t *buf, int size);
int ff_avc_parse_nal_units_buf(const uint8_t *buf_in, uint8_t **buf, int *size);
int ff_isom_write_avcc(AVIOContext *pb, const uint8_t *data, int len);
const uint8_t *ff_avc_find_startcode(const uint8_t *p, const uint8_t *end);
int ff_avc_write_annexb_extradata(const uint8_t *in, uint8_t **buf, int *size);
 
#endif /* AVFORMAT_AVC_H */
/contrib/sdk/sources/ffmpeg/libavformat/avformat-55.def
0,0 → 1,144
EXPORTS
DllStartup
av_add_index_entry
av_append_packet
av_close_input_file
av_codec_get_id
av_codec_get_tag
av_codec_get_tag2
av_convert_lang_to
av_demuxer_open
av_dump_format
av_filename_number_test
av_find_best_stream
av_find_default_stream_index
av_find_input_format
av_find_program_from_stream
av_find_stream_info
av_fmt_ctx_get_duration_estimation_method
av_format_get_audio_codec
av_format_get_probe_score
av_format_get_subtitle_codec
av_format_get_video_codec
av_format_set_audio_codec
av_format_set_subtitle_codec
av_format_set_video_codec
av_get_frame_filename
av_get_output_timestamp
av_get_packet
av_guess_codec
av_guess_format
av_guess_frame_rate
av_guess_sample_aspect_ratio
av_hex_dump
av_hex_dump_log
av_iformat_next
av_index_search_timestamp
av_interleaved_write_frame
av_match_ext
av_new_program
av_new_stream
av_oformat_next
av_pkt_dump2
av_pkt_dump_log2
av_probe_input_buffer
av_probe_input_buffer2
av_probe_input_format
av_probe_input_format2
av_probe_input_format3
av_read_frame
av_read_packet
av_read_pause
av_read_play
av_register_all
av_register_input_format
av_register_output_format
av_sdp_create
av_seek_frame
av_set_pts_info
av_stream_get_r_frame_rate
av_stream_set_r_frame_rate
av_url_split
av_write_frame
av_write_trailer
avformat_alloc_context
avformat_alloc_output_context
avformat_alloc_output_context2
avformat_close_input
avformat_configuration
avformat_find_stream_info
avformat_free_context
avformat_get_class
avformat_get_riff_audio_tags
avformat_get_riff_video_tags
avformat_license
avformat_match_stream_specifier
avformat_network_deinit
avformat_network_init
avformat_new_stream
avformat_open_input
avformat_query_codec
avformat_queue_attached_pictures
avformat_seek_file
avformat_version
avformat_write_header
avio_alloc_context
avio_check
avio_close
avio_close_dyn_buf
avio_closep
avio_enum_protocols
avio_flush
avio_get_str
avio_get_str16be
avio_get_str16le
avio_open
avio_open2
avio_open_dyn_buf
avio_pause
avio_printf
avio_put_str
avio_put_str16le
avio_r8
avio_rb16
avio_rb24
avio_rb32
avio_rb64
avio_read
avio_rl16
avio_rl24
avio_rl32
avio_rl64
avio_seek
avio_seek_time
avio_size
avio_skip
avio_w8
avio_wb16
avio_wb24
avio_wb32
avio_wb64
avio_wl16
avio_wl24
avio_wl32
avio_wl64
avio_write
avpriv_dv_get_packet
avpriv_dv_init_demux
avpriv_dv_produce_packet
avpriv_new_chapter
avpriv_set_pts_info
ff_codec_get_id
ff_mpegts_parse_close
ff_mpegts_parse_open
ff_mpegts_parse_packet
ffio_open_dyn_packet_buf
ffio_set_buf_size
ffurl_close
ffurl_open
ffurl_protocol_next
ffurl_read_complete
ffurl_seek
ffurl_size
ffurl_write
url_feof
/contrib/sdk/sources/ffmpeg/libavformat/avformat-55.orig.def
0,0 → 1,144
EXPORTS
DllStartup @1
av_add_index_entry @2
av_append_packet @3
av_close_input_file @4
av_codec_get_id @5
av_codec_get_tag @6
av_codec_get_tag2 @7
av_convert_lang_to @8
av_demuxer_open @9
av_dump_format @10
av_filename_number_test @11
av_find_best_stream @12
av_find_default_stream_index @13
av_find_input_format @14
av_find_program_from_stream @15
av_find_stream_info @16
av_fmt_ctx_get_duration_estimation_method @17
av_format_get_audio_codec @18
av_format_get_probe_score @19
av_format_get_subtitle_codec @20
av_format_get_video_codec @21
av_format_set_audio_codec @22
av_format_set_subtitle_codec @23
av_format_set_video_codec @24
av_get_frame_filename @25
av_get_output_timestamp @26
av_get_packet @27
av_guess_codec @28
av_guess_format @29
av_guess_frame_rate @30
av_guess_sample_aspect_ratio @31
av_hex_dump @32
av_hex_dump_log @33
av_iformat_next @34
av_index_search_timestamp @35
av_interleaved_write_frame @36
av_match_ext @37
av_new_program @38
av_new_stream @39
av_oformat_next @40
av_pkt_dump2 @41
av_pkt_dump_log2 @42
av_probe_input_buffer @43
av_probe_input_buffer2 @44
av_probe_input_format @45
av_probe_input_format2 @46
av_probe_input_format3 @47
av_read_frame @48
av_read_packet @49
av_read_pause @50
av_read_play @51
av_register_all @52
av_register_input_format @53
av_register_output_format @54
av_sdp_create @55
av_seek_frame @56
av_set_pts_info @57
av_stream_get_r_frame_rate @58
av_stream_set_r_frame_rate @59
av_url_split @60
av_write_frame @61
av_write_trailer @62
avformat_alloc_context @63
avformat_alloc_output_context @64
avformat_alloc_output_context2 @65
avformat_close_input @66
avformat_configuration @67
avformat_find_stream_info @68
avformat_free_context @69
avformat_get_class @70
avformat_get_riff_audio_tags @71
avformat_get_riff_video_tags @72
avformat_license @73
avformat_match_stream_specifier @74
avformat_network_deinit @75
avformat_network_init @76
avformat_new_stream @77
avformat_open_input @78
avformat_query_codec @79
avformat_queue_attached_pictures @80
avformat_seek_file @81
avformat_version @82
avformat_write_header @83
avio_alloc_context @84
avio_check @85
avio_close @86
avio_close_dyn_buf @87
avio_closep @88
avio_enum_protocols @89
avio_flush @90
avio_get_str @91
avio_get_str16be @92
avio_get_str16le @93
avio_open @94
avio_open2 @95
avio_open_dyn_buf @96
avio_pause @97
avio_printf @98
avio_put_str @99
avio_put_str16le @100
avio_r8 @101
avio_rb16 @102
avio_rb24 @103
avio_rb32 @104
avio_rb64 @105
avio_read @106
avio_rl16 @107
avio_rl24 @108
avio_rl32 @109
avio_rl64 @110
avio_seek @111
avio_seek_time @112
avio_size @113
avio_skip @114
avio_w8 @115
avio_wb16 @116
avio_wb24 @117
avio_wb32 @118
avio_wb64 @119
avio_wl16 @120
avio_wl24 @121
avio_wl32 @122
avio_wl64 @123
avio_write @124
avpriv_dv_get_packet @125
avpriv_dv_init_demux @126
avpriv_dv_produce_packet @127
avpriv_new_chapter @128
avpriv_set_pts_info @129
ff_codec_get_id @130
ff_mpegts_parse_close @131
ff_mpegts_parse_open @132
ff_mpegts_parse_packet @133
ffio_open_dyn_packet_buf @134
ffio_set_buf_size @135
ffurl_close @136
ffurl_open @137
ffurl_protocol_next @138
ffurl_read_complete @139
ffurl_seek @140
ffurl_size @141
ffurl_write @142
url_feof @143
/contrib/sdk/sources/ffmpeg/libavformat/avformat.h
0,0 → 1,2239
/*
* copyright (c) 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_AVFORMAT_H
#define AVFORMAT_AVFORMAT_H
 
/**
* @file
* @ingroup libavf
* Main libavformat public API header
*/
 
/**
* @defgroup libavf I/O and Muxing/Demuxing Library
* @{
*
* Libavformat (lavf) is a library for dealing with various media container
* formats. Its main two purposes are demuxing - i.e. splitting a media file
* into component streams, and the reverse process of muxing - writing supplied
* data in a specified container format. It also has an @ref lavf_io
* "I/O module" which supports a number of protocols for accessing the data (e.g.
* file, tcp, http and others). Before using lavf, you need to call
* av_register_all() to register all compiled muxers, demuxers and protocols.
* Unless you are absolutely sure you won't use libavformat's network
* capabilities, you should also call avformat_network_init().
*
* A supported input format is described by an AVInputFormat struct, conversely
* an output format is described by AVOutputFormat. You can iterate over all
* registered input/output formats using the av_iformat_next() /
* av_oformat_next() functions. The protocols layer is not part of the public
* API, so you can only get the names of supported protocols with the
* avio_enum_protocols() function.
*
* Main lavf structure used for both muxing and demuxing is AVFormatContext,
* which exports all information about the file being read or written. As with
* most Libavformat structures, its size is not part of public ABI, so it cannot be
* allocated on stack or directly with av_malloc(). To create an
* AVFormatContext, use avformat_alloc_context() (some functions, like
* avformat_open_input() might do that for you).
*
* Most importantly an AVFormatContext contains:
* @li the @ref AVFormatContext.iformat "input" or @ref AVFormatContext.oformat
* "output" format. It is either autodetected or set by user for input;
* always set by user for output.
* @li an @ref AVFormatContext.streams "array" of AVStreams, which describe all
* elementary streams stored in the file. AVStreams are typically referred to
* using their index in this array.
* @li an @ref AVFormatContext.pb "I/O context". It is either opened by lavf or
* set by user for input, always set by user for output (unless you are dealing
* with an AVFMT_NOFILE format).
*
* @section lavf_options Passing options to (de)muxers
* Lavf allows to configure muxers and demuxers using the @ref avoptions
* mechanism. Generic (format-independent) libavformat options are provided by
* AVFormatContext, they can be examined from a user program by calling
* av_opt_next() / av_opt_find() on an allocated AVFormatContext (or its AVClass
* from avformat_get_class()). Private (format-specific) options are provided by
* AVFormatContext.priv_data if and only if AVInputFormat.priv_class /
* AVOutputFormat.priv_class of the corresponding format struct is non-NULL.
* Further options may be provided by the @ref AVFormatContext.pb "I/O context",
* if its AVClass is non-NULL, and the protocols layer. See the discussion on
* nesting in @ref avoptions documentation to learn how to access those.
*
* @defgroup lavf_decoding Demuxing
* @{
* Demuxers read a media file and split it into chunks of data (@em packets). A
* @ref AVPacket "packet" contains one or more encoded frames which belongs to a
* single elementary stream. In the lavf API this process is represented by the
* avformat_open_input() function for opening a file, av_read_frame() for
* reading a single packet and finally avformat_close_input(), which does the
* cleanup.
*
* @section lavf_decoding_open Opening a media file
* The minimum information required to open a file is its URL or filename, which
* is passed to avformat_open_input(), as in the following code:
* @code
* const char *url = "in.mp3";
* AVFormatContext *s = NULL;
* int ret = avformat_open_input(&s, url, NULL, NULL);
* if (ret < 0)
* abort();
* @endcode
* The above code attempts to allocate an AVFormatContext, open the
* specified file (autodetecting the format) and read the header, exporting the
* information stored there into s. Some formats do not have a header or do not
* store enough information there, so it is recommended that you call the
* avformat_find_stream_info() function which tries to read and decode a few
* frames to find missing information.
*
* In some cases you might want to preallocate an AVFormatContext yourself with
* avformat_alloc_context() and do some tweaking on it before passing it to
* avformat_open_input(). One such case is when you want to use custom functions
* for reading input data instead of lavf internal I/O layer.
* To do that, create your own AVIOContext with avio_alloc_context(), passing
* your reading callbacks to it. Then set the @em pb field of your
* AVFormatContext to newly created AVIOContext.
*
* Since the format of the opened file is in general not known until after
* avformat_open_input() has returned, it is not possible to set demuxer private
* options on a preallocated context. Instead, the options should be passed to
* avformat_open_input() wrapped in an AVDictionary:
* @code
* AVDictionary *options = NULL;
* av_dict_set(&options, "video_size", "640x480", 0);
* av_dict_set(&options, "pixel_format", "rgb24", 0);
*
* if (avformat_open_input(&s, url, NULL, &options) < 0)
* abort();
* av_dict_free(&options);
* @endcode
* This code passes the private options 'video_size' and 'pixel_format' to the
* demuxer. They would be necessary for e.g. the rawvideo demuxer, since it
* cannot know how to interpret raw video data otherwise. If the format turns
* out to be something different than raw video, those options will not be
* recognized by the demuxer and therefore will not be applied. Such unrecognized
* options are then returned in the options dictionary (recognized options are
* consumed). The calling program can handle such unrecognized options as it
* wishes, e.g.
* @code
* AVDictionaryEntry *e;
* if (e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX)) {
* fprintf(stderr, "Option %s not recognized by the demuxer.\n", e->key);
* abort();
* }
* @endcode
*
* After you have finished reading the file, you must close it with
* avformat_close_input(). It will free everything associated with the file.
*
* @section lavf_decoding_read Reading from an opened file
* Reading data from an opened AVFormatContext is done by repeatedly calling
* av_read_frame() on it. Each call, if successful, will return an AVPacket
* containing encoded data for one AVStream, identified by
* AVPacket.stream_index. This packet may be passed straight into the libavcodec
* decoding functions avcodec_decode_video2(), avcodec_decode_audio4() or
* avcodec_decode_subtitle2() if the caller wishes to decode the data.
*
* AVPacket.pts, AVPacket.dts and AVPacket.duration timing information will be
* set if known. They may also be unset (i.e. AV_NOPTS_VALUE for
* pts/dts, 0 for duration) if the stream does not provide them. The timing
* information will be in AVStream.time_base units, i.e. it has to be
* multiplied by the timebase to convert them to seconds.
*
* If AVPacket.buf is set on the returned packet, then the packet is
* allocated dynamically and the user may keep it indefinitely.
* Otherwise, if AVPacket.buf is NULL, the packet data is backed by a
* static storage somewhere inside the demuxer and the packet is only valid
* until the next av_read_frame() call or closing the file. If the caller
* requires a longer lifetime, av_dup_packet() will make an av_malloc()ed copy
* of it.
* In both cases, the packet must be freed with av_free_packet() when it is no
* longer needed.
*
* @section lavf_decoding_seek Seeking
* @}
*
* @defgroup lavf_encoding Muxing
* @{
* @}
*
* @defgroup lavf_io I/O Read/Write
* @{
* @}
*
* @defgroup lavf_codec Demuxers
* @{
* @defgroup lavf_codec_native Native Demuxers
* @{
* @}
* @defgroup lavf_codec_wrappers External library wrappers
* @{
* @}
* @}
* @defgroup lavf_protos I/O Protocols
* @{
* @}
* @defgroup lavf_internal Internal
* @{
* @}
* @}
*
*/
 
#include <time.h>
#include <stdio.h> /* FILE */
#include "libavcodec/avcodec.h"
#include "libavutil/dict.h"
#include "libavutil/log.h"
 
#include "avio.h"
#include "libavformat/version.h"
 
struct AVFormatContext;
 
 
/**
* @defgroup metadata_api Public Metadata API
* @{
* @ingroup libavf
* The metadata API allows libavformat to export metadata tags to a client
* application when demuxing. Conversely it allows a client application to
* set metadata when muxing.
*
* Metadata is exported or set as pairs of key/value strings in the 'metadata'
* fields of the AVFormatContext, AVStream, AVChapter and AVProgram structs
* using the @ref lavu_dict "AVDictionary" API. Like all strings in FFmpeg,
* metadata is assumed to be UTF-8 encoded Unicode. Note that metadata
* exported by demuxers isn't checked to be valid UTF-8 in most cases.
*
* Important concepts to keep in mind:
* - Keys are unique; there can never be 2 tags with the same key. This is
* also meant semantically, i.e., a demuxer should not knowingly produce
* several keys that are literally different but semantically identical.
* E.g., key=Author5, key=Author6. In this example, all authors must be
* placed in the same tag.
* - Metadata is flat, not hierarchical; there are no subtags. If you
* want to store, e.g., the email address of the child of producer Alice
* and actor Bob, that could have key=alice_and_bobs_childs_email_address.
* - Several modifiers can be applied to the tag name. This is done by
* appending a dash character ('-') and the modifier name in the order
* they appear in the list below -- e.g. foo-eng-sort, not foo-sort-eng.
* - language -- a tag whose value is localized for a particular language
* is appended with the ISO 639-2/B 3-letter language code.
* For example: Author-ger=Michael, Author-eng=Mike
* The original/default language is in the unqualified "Author" tag.
* A demuxer should set a default if it sets any translated tag.
* - sorting -- a modified version of a tag that should be used for
* sorting will have '-sort' appended. E.g. artist="The Beatles",
* artist-sort="Beatles, The".
*
* - Demuxers attempt to export metadata in a generic format, however tags
* with no generic equivalents are left as they are stored in the container.
* Follows a list of generic tag names:
*
@verbatim
album -- name of the set this work belongs to
album_artist -- main creator of the set/album, if different from artist.
e.g. "Various Artists" for compilation albums.
artist -- main creator of the work
comment -- any additional description of the file.
composer -- who composed the work, if different from artist.
copyright -- name of copyright holder.
creation_time-- date when the file was created, preferably in ISO 8601.
date -- date when the work was created, preferably in ISO 8601.
disc -- number of a subset, e.g. disc in a multi-disc collection.
encoder -- name/settings of the software/hardware that produced the file.
encoded_by -- person/group who created the file.
filename -- original name of the file.
genre -- <self-evident>.
language -- main language in which the work is performed, preferably
in ISO 639-2 format. Multiple languages can be specified by
separating them with commas.
performer -- artist who performed the work, if different from artist.
E.g for "Also sprach Zarathustra", artist would be "Richard
Strauss" and performer "London Philharmonic Orchestra".
publisher -- name of the label/publisher.
service_name -- name of the service in broadcasting (channel name).
service_provider -- name of the service provider in broadcasting.
title -- name of the work.
track -- number of this work in the set, can be in form current/total.
variant_bitrate -- the total bitrate of the bitrate variant that the current stream is part of
@endverbatim
*
* Look in the examples section for an application example how to use the Metadata API.
*
* @}
*/
 
/* packet functions */
 
 
/**
* Allocate and read the payload of a packet and initialize its
* fields with default values.
*
* @param pkt packet
* @param size desired payload size
* @return >0 (read size) if OK, AVERROR_xxx otherwise
*/
int av_get_packet(AVIOContext *s, AVPacket *pkt, int size);
 
 
/**
* Read data and append it to the current content of the AVPacket.
* If pkt->size is 0 this is identical to av_get_packet.
* Note that this uses av_grow_packet and thus involves a realloc
* which is inefficient. Thus this function should only be used
* when there is no reasonable way to know (an upper bound of)
* the final size.
*
* @param pkt packet
* @param size amount of data to read
* @return >0 (read size) if OK, AVERROR_xxx otherwise, previous data
* will not be lost even if an error occurs.
*/
int av_append_packet(AVIOContext *s, AVPacket *pkt, int size);
 
/*************************************************/
/* fractional numbers for exact pts handling */
 
/**
* The exact value of the fractional number is: 'val + num / den'.
* num is assumed to be 0 <= num < den.
*/
typedef struct AVFrac {
int64_t val, num, den;
} AVFrac;
 
/*************************************************/
/* input/output formats */
 
struct AVCodecTag;
 
/**
* This structure contains the data a format has to probe a file.
*/
typedef struct AVProbeData {
const char *filename;
unsigned char *buf; /**< Buffer must have AVPROBE_PADDING_SIZE of extra allocated bytes filled with zero. */
int buf_size; /**< Size of buf except extra allocated bytes */
} AVProbeData;
 
#define AVPROBE_SCORE_RETRY (AVPROBE_SCORE_MAX/4)
#define AVPROBE_SCORE_EXTENSION 50 ///< score for file extension
#define AVPROBE_SCORE_MAX 100 ///< maximum score
 
#define AVPROBE_PADDING_SIZE 32 ///< extra allocated bytes at the end of the probe buffer
 
/// Demuxer will use avio_open, no opened file should be provided by the caller.
#define AVFMT_NOFILE 0x0001
#define AVFMT_NEEDNUMBER 0x0002 /**< Needs '%d' in filename. */
#define AVFMT_SHOW_IDS 0x0008 /**< Show format stream IDs numbers. */
#define AVFMT_RAWPICTURE 0x0020 /**< Format wants AVPicture structure for
raw picture data. */
#define AVFMT_GLOBALHEADER 0x0040 /**< Format wants global header. */
#define AVFMT_NOTIMESTAMPS 0x0080 /**< Format does not need / have any timestamps. */
#define AVFMT_GENERIC_INDEX 0x0100 /**< Use generic index building code. */
#define AVFMT_TS_DISCONT 0x0200 /**< Format allows timestamp discontinuities. Note, muxers always require valid (monotone) timestamps */
#define AVFMT_VARIABLE_FPS 0x0400 /**< Format allows variable fps. */
#define AVFMT_NODIMENSIONS 0x0800 /**< Format does not need width/height */
#define AVFMT_NOSTREAMS 0x1000 /**< Format does not require any streams */
#define AVFMT_NOBINSEARCH 0x2000 /**< Format does not allow to fall back on binary search via read_timestamp */
#define AVFMT_NOGENSEARCH 0x4000 /**< Format does not allow to fall back on generic search */
#define AVFMT_NO_BYTE_SEEK 0x8000 /**< Format does not allow seeking by bytes */
#define AVFMT_ALLOW_FLUSH 0x10000 /**< Format allows flushing. If not set, the muxer will not receive a NULL packet in the write_packet function. */
#if LIBAVFORMAT_VERSION_MAJOR <= 54
#define AVFMT_TS_NONSTRICT 0x8020000 //we try to be compatible to the ABIs of ffmpeg and major forks
#else
#define AVFMT_TS_NONSTRICT 0x20000
#endif
/**< Format does not require strictly
increasing timestamps, but they must
still be monotonic */
#define AVFMT_TS_NEGATIVE 0x40000 /**< Format allows muxing negative
timestamps. If not set the timestamp
will be shifted in av_write_frame and
av_interleaved_write_frame so they
start from 0.
The user or muxer can override this through
AVFormatContext.avoid_negative_ts
*/
 
#define AVFMT_SEEK_TO_PTS 0x4000000 /**< Seeking is based on PTS */
 
/**
* @addtogroup lavf_encoding
* @{
*/
typedef struct AVOutputFormat {
const char *name;
/**
* Descriptive name for the format, meant to be more human-readable
* than name. You should use the NULL_IF_CONFIG_SMALL() macro
* to define it.
*/
const char *long_name;
const char *mime_type;
const char *extensions; /**< comma-separated filename extensions */
/* output support */
enum AVCodecID audio_codec; /**< default audio codec */
enum AVCodecID video_codec; /**< default video codec */
enum AVCodecID subtitle_codec; /**< default subtitle codec */
/**
* can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_RAWPICTURE,
* AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS,
* AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH,
* AVFMT_TS_NONSTRICT
*/
int flags;
 
/**
* List of supported codec_id-codec_tag pairs, ordered by "better
* choice first". The arrays are all terminated by AV_CODEC_ID_NONE.
*/
const struct AVCodecTag * const *codec_tag;
 
 
const AVClass *priv_class; ///< AVClass for the private context
 
/*****************************************************************
* No fields below this line are part of the public API. They
* may not be used outside of libavformat and can be changed and
* removed at will.
* New public fields should be added right above.
*****************************************************************
*/
struct AVOutputFormat *next;
/**
* size of private data so that it can be allocated in the wrapper
*/
int priv_data_size;
 
int (*write_header)(struct AVFormatContext *);
/**
* Write a packet. If AVFMT_ALLOW_FLUSH is set in flags,
* pkt can be NULL in order to flush data buffered in the muxer.
* When flushing, return 0 if there still is more data to flush,
* or 1 if everything was flushed and there is no more buffered
* data.
*/
int (*write_packet)(struct AVFormatContext *, AVPacket *pkt);
int (*write_trailer)(struct AVFormatContext *);
/**
* Currently only used to set pixel format if not YUV420P.
*/
int (*interleave_packet)(struct AVFormatContext *, AVPacket *out,
AVPacket *in, int flush);
/**
* Test if the given codec can be stored in this container.
*
* @return 1 if the codec is supported, 0 if it is not.
* A negative number if unknown.
* MKTAG('A', 'P', 'I', 'C') if the codec is only supported as AV_DISPOSITION_ATTACHED_PIC
*/
int (*query_codec)(enum AVCodecID id, int std_compliance);
 
void (*get_output_timestamp)(struct AVFormatContext *s, int stream,
int64_t *dts, int64_t *wall);
} AVOutputFormat;
/**
* @}
*/
 
/**
* @addtogroup lavf_decoding
* @{
*/
typedef struct AVInputFormat {
/**
* A comma separated list of short names for the format. New names
* may be appended with a minor bump.
*/
const char *name;
 
/**
* Descriptive name for the format, meant to be more human-readable
* than name. You should use the NULL_IF_CONFIG_SMALL() macro
* to define it.
*/
const char *long_name;
 
/**
* Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS,
* AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH,
* AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
*/
int flags;
 
/**
* If extensions are defined, then no probe is done. You should
* usually not use extension format guessing because it is not
* reliable enough
*/
const char *extensions;
 
const struct AVCodecTag * const *codec_tag;
 
const AVClass *priv_class; ///< AVClass for the private context
 
/*****************************************************************
* No fields below this line are part of the public API. They
* may not be used outside of libavformat and can be changed and
* removed at will.
* New public fields should be added right above.
*****************************************************************
*/
struct AVInputFormat *next;
 
/**
* Raw demuxers store their codec ID here.
*/
int raw_codec_id;
 
/**
* Size of private data so that it can be allocated in the wrapper.
*/
int priv_data_size;
 
/**
* Tell if a given file has a chance of being parsed as this format.
* The buffer provided is guaranteed to be AVPROBE_PADDING_SIZE bytes
* big so you do not have to check for that unless you need more.
*/
int (*read_probe)(AVProbeData *);
 
/**
* Read the format header and initialize the AVFormatContext
* structure. Return 0 if OK. Only used in raw format right
* now. 'avformat_new_stream' should be called to create new streams.
*/
int (*read_header)(struct AVFormatContext *);
 
/**
* Read one packet and put it in 'pkt'. pts and flags are also
* set. 'avformat_new_stream' can be called only if the flag
* AVFMTCTX_NOHEADER is used and only in the calling thread (not in a
* background thread).
* @return 0 on success, < 0 on error.
* When returning an error, pkt must not have been allocated
* or must be freed before returning
*/
int (*read_packet)(struct AVFormatContext *, AVPacket *pkt);
 
/**
* Close the stream. The AVFormatContext and AVStreams are not
* freed by this function
*/
int (*read_close)(struct AVFormatContext *);
 
/**
* Seek to a given timestamp relative to the frames in
* stream component stream_index.
* @param stream_index Must not be -1.
* @param flags Selects which direction should be preferred if no exact
* match is available.
* @return >= 0 on success (but not necessarily the new offset)
*/
int (*read_seek)(struct AVFormatContext *,
int stream_index, int64_t timestamp, int flags);
 
/**
* Get the next timestamp in stream[stream_index].time_base units.
* @return the timestamp or AV_NOPTS_VALUE if an error occurred
*/
int64_t (*read_timestamp)(struct AVFormatContext *s, int stream_index,
int64_t *pos, int64_t pos_limit);
 
/**
* Start/resume playing - only meaningful if using a network-based format
* (RTSP).
*/
int (*read_play)(struct AVFormatContext *);
 
/**
* Pause playing - only meaningful if using a network-based format
* (RTSP).
*/
int (*read_pause)(struct AVFormatContext *);
 
/**
* Seek to timestamp ts.
* Seeking will be done so that the point from which all active streams
* can be presented successfully will be closest to ts and within min/max_ts.
* Active streams are all streams that have AVStream.discard < AVDISCARD_ALL.
*/
int (*read_seek2)(struct AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags);
} AVInputFormat;
/**
* @}
*/
 
enum AVStreamParseType {
AVSTREAM_PARSE_NONE,
AVSTREAM_PARSE_FULL, /**< full parsing and repack */
AVSTREAM_PARSE_HEADERS, /**< Only parse headers, do not repack. */
AVSTREAM_PARSE_TIMESTAMPS, /**< full parsing and interpolation of timestamps for frames not starting on a packet boundary */
AVSTREAM_PARSE_FULL_ONCE, /**< full parsing and repack of the first frame only, only implemented for H.264 currently */
AVSTREAM_PARSE_FULL_RAW=MKTAG(0,'R','A','W'), /**< full parsing and repack with timestamp and position generation by parser for raw
this assumes that each packet in the file contains no demuxer level headers and
just codec level data, otherwise position generation would fail */
};
 
typedef struct AVIndexEntry {
int64_t pos;
int64_t timestamp; /**<
* Timestamp in AVStream.time_base units, preferably the time from which on correctly decoded frames are available
* when seeking to this entry. That means preferable PTS on keyframe based formats.
* But demuxers can choose to store a different timestamp, if it is more convenient for the implementation or nothing better
* is known
*/
#define AVINDEX_KEYFRAME 0x0001
int flags:2;
int size:30; //Yeah, trying to keep the size of this small to reduce memory requirements (it is 24 vs. 32 bytes due to possible 8-byte alignment).
int min_distance; /**< Minimum distance between this and the previous keyframe, used to avoid unneeded searching. */
} AVIndexEntry;
 
#define AV_DISPOSITION_DEFAULT 0x0001
#define AV_DISPOSITION_DUB 0x0002
#define AV_DISPOSITION_ORIGINAL 0x0004
#define AV_DISPOSITION_COMMENT 0x0008
#define AV_DISPOSITION_LYRICS 0x0010
#define AV_DISPOSITION_KARAOKE 0x0020
 
/**
* Track should be used during playback by default.
* Useful for subtitle track that should be displayed
* even when user did not explicitly ask for subtitles.
*/
#define AV_DISPOSITION_FORCED 0x0040
#define AV_DISPOSITION_HEARING_IMPAIRED 0x0080 /**< stream for hearing impaired audiences */
#define AV_DISPOSITION_VISUAL_IMPAIRED 0x0100 /**< stream for visual impaired audiences */
#define AV_DISPOSITION_CLEAN_EFFECTS 0x0200 /**< stream without voice */
/**
* The stream is stored in the file as an attached picture/"cover art" (e.g.
* APIC frame in ID3v2). The single packet associated with it will be returned
* among the first few packets read from the file unless seeking takes place.
* It can also be accessed at any time in AVStream.attached_pic.
*/
#define AV_DISPOSITION_ATTACHED_PIC 0x0400
 
/**
* To specify text track kind (different from subtitles default).
*/
#define AV_DISPOSITION_CAPTIONS 0x10000
#define AV_DISPOSITION_DESCRIPTIONS 0x20000
#define AV_DISPOSITION_METADATA 0x40000
 
/**
* Options for behavior on timestamp wrap detection.
*/
#define AV_PTS_WRAP_IGNORE 0 ///< ignore the wrap
#define AV_PTS_WRAP_ADD_OFFSET 1 ///< add the format specific offset on wrap detection
#define AV_PTS_WRAP_SUB_OFFSET -1 ///< subtract the format specific offset on wrap detection
 
/**
* Stream structure.
* New fields can be added to the end with minor version bumps.
* Removal, reordering and changes to existing fields require a major
* version bump.
* sizeof(AVStream) must not be used outside libav*.
*/
typedef struct AVStream {
int index; /**< stream index in AVFormatContext */
/**
* Format-specific stream ID.
* decoding: set by libavformat
* encoding: set by the user, replaced by libavformat if left unset
*/
int id;
/**
* Codec context associated with this stream. Allocated and freed by
* libavformat.
*
* - decoding: The demuxer exports codec information stored in the headers
* here.
* - encoding: The user sets codec information, the muxer writes it to the
* output. Mandatory fields as specified in AVCodecContext
* documentation must be set even if this AVCodecContext is
* not actually used for encoding.
*/
AVCodecContext *codec;
void *priv_data;
 
/**
* encoding: pts generation when outputting stream
*/
struct AVFrac pts;
 
/**
* This is the fundamental unit of time (in seconds) in terms
* of which frame timestamps are represented.
*
* decoding: set by libavformat
* encoding: set by libavformat in avformat_write_header. The muxer may use the
* user-provided value of @ref AVCodecContext.time_base "codec->time_base"
* as a hint.
*/
AVRational time_base;
 
/**
* Decoding: pts of the first frame of the stream in presentation order, in stream time base.
* Only set this if you are absolutely 100% sure that the value you set
* it to really is the pts of the first frame.
* This may be undefined (AV_NOPTS_VALUE).
* @note The ASF header does NOT contain a correct start_time the ASF
* demuxer must NOT set this.
*/
int64_t start_time;
 
/**
* Decoding: duration of the stream, in stream time base.
* If a source file does not specify a duration, but does specify
* a bitrate, this value will be estimated from bitrate and file size.
*/
int64_t duration;
 
int64_t nb_frames; ///< number of frames in this stream if known or 0
 
int disposition; /**< AV_DISPOSITION_* bit field */
 
enum AVDiscard discard; ///< Selects which packets can be discarded at will and do not need to be demuxed.
 
/**
* sample aspect ratio (0 if unknown)
* - encoding: Set by user.
* - decoding: Set by libavformat.
*/
AVRational sample_aspect_ratio;
 
AVDictionary *metadata;
 
/**
* Average framerate
*/
AVRational avg_frame_rate;
 
/**
* For streams with AV_DISPOSITION_ATTACHED_PIC disposition, this packet
* will contain the attached picture.
*
* decoding: set by libavformat, must not be modified by the caller.
* encoding: unused
*/
AVPacket attached_pic;
 
/*****************************************************************
* All fields below this line are not part of the public API. They
* may not be used outside of libavformat and can be changed and
* removed at will.
* New public fields should be added right above.
*****************************************************************
*/
 
/**
* Stream information used internally by av_find_stream_info()
*/
#define MAX_STD_TIMEBASES (60*12+6)
struct {
int64_t last_dts;
int64_t duration_gcd;
int duration_count;
double (*duration_error)[2][MAX_STD_TIMEBASES];
int64_t codec_info_duration;
int64_t codec_info_duration_fields;
int found_decoder;
 
int64_t last_duration;
 
/**
* Those are used for average framerate estimation.
*/
int64_t fps_first_dts;
int fps_first_dts_idx;
int64_t fps_last_dts;
int fps_last_dts_idx;
 
} *info;
 
int pts_wrap_bits; /**< number of bits in pts (used for wrapping control) */
 
// Timestamp generation support:
/**
* Timestamp corresponding to the last dts sync point.
*
* Initialized when AVCodecParserContext.dts_sync_point >= 0 and
* a DTS is received from the underlying container. Otherwise set to
* AV_NOPTS_VALUE by default.
*/
int64_t reference_dts;
int64_t first_dts;
int64_t cur_dts;
int64_t last_IP_pts;
int last_IP_duration;
 
/**
* Number of packets to buffer for codec probing
*/
#define MAX_PROBE_PACKETS 2500
int probe_packets;
 
/**
* Number of frames that have been demuxed during av_find_stream_info()
*/
int codec_info_nb_frames;
 
/* av_read_frame() support */
enum AVStreamParseType need_parsing;
struct AVCodecParserContext *parser;
 
/**
* last packet in packet_buffer for this stream when muxing.
*/
struct AVPacketList *last_in_packet_buffer;
AVProbeData probe_data;
#define MAX_REORDER_DELAY 16
int64_t pts_buffer[MAX_REORDER_DELAY+1];
 
AVIndexEntry *index_entries; /**< Only used if the format does not
support seeking natively. */
int nb_index_entries;
unsigned int index_entries_allocated_size;
 
/**
* Real base framerate of the stream.
* This is the lowest framerate with which all timestamps can be
* represented accurately (it is the least common multiple of all
* framerates in the stream). Note, this value is just a guess!
* For example, if the time base is 1/90000 and all frames have either
* approximately 3600 or 1800 timer ticks, then r_frame_rate will be 50/1.
*
* Code outside avformat should access this field using:
* av_stream_get/set_r_frame_rate(stream)
*/
AVRational r_frame_rate;
 
/**
* Stream Identifier
* This is the MPEG-TS stream identifier +1
* 0 means unknown
*/
int stream_identifier;
 
int64_t interleaver_chunk_size;
int64_t interleaver_chunk_duration;
 
/**
* stream probing state
* -1 -> probing finished
* 0 -> no probing requested
* rest -> perform probing with request_probe being the minimum score to accept.
* NOT PART OF PUBLIC API
*/
int request_probe;
/**
* Indicates that everything up to the next keyframe
* should be discarded.
*/
int skip_to_keyframe;
 
/**
* Number of samples to skip at the start of the frame decoded from the next packet.
*/
int skip_samples;
 
/**
* Number of internally decoded frames, used internally in libavformat, do not access
* its lifetime differs from info which is why it is not in that structure.
*/
int nb_decoded_frames;
 
/**
* Timestamp offset added to timestamps before muxing
* NOT PART OF PUBLIC API
*/
int64_t mux_ts_offset;
 
/**
* Internal data to check for wrapping of the time stamp
*/
int64_t pts_wrap_reference;
 
/**
* Options for behavior, when a wrap is detected.
*
* Defined by AV_PTS_WRAP_ values.
*
* If correction is enabled, there are two possibilities:
* If the first time stamp is near the wrap point, the wrap offset
* will be subtracted, which will create negative time stamps.
* Otherwise the offset will be added.
*/
int pts_wrap_behavior;
 
} AVStream;
 
AVRational av_stream_get_r_frame_rate(const AVStream *s);
void av_stream_set_r_frame_rate(AVStream *s, AVRational r);
 
#define AV_PROGRAM_RUNNING 1
 
/**
* New fields can be added to the end with minor version bumps.
* Removal, reordering and changes to existing fields require a major
* version bump.
* sizeof(AVProgram) must not be used outside libav*.
*/
typedef struct AVProgram {
int id;
int flags;
enum AVDiscard discard; ///< selects which program to discard and which to feed to the caller
unsigned int *stream_index;
unsigned int nb_stream_indexes;
AVDictionary *metadata;
 
int program_num;
int pmt_pid;
int pcr_pid;
 
/*****************************************************************
* All fields below this line are not part of the public API. They
* may not be used outside of libavformat and can be changed and
* removed at will.
* New public fields should be added right above.
*****************************************************************
*/
int64_t start_time;
int64_t end_time;
 
int64_t pts_wrap_reference; ///< reference dts for wrap detection
int pts_wrap_behavior; ///< behavior on wrap detection
} AVProgram;
 
#define AVFMTCTX_NOHEADER 0x0001 /**< signal that no header is present
(streams are added dynamically) */
 
typedef struct AVChapter {
int id; ///< unique ID to identify the chapter
AVRational time_base; ///< time base in which the start/end timestamps are specified
int64_t start, end; ///< chapter start/end time in time_base units
AVDictionary *metadata;
} AVChapter;
 
 
/**
* The duration of a video can be estimated through various ways, and this enum can be used
* to know how the duration was estimated.
*/
enum AVDurationEstimationMethod {
AVFMT_DURATION_FROM_PTS, ///< Duration accurately estimated from PTSes
AVFMT_DURATION_FROM_STREAM, ///< Duration estimated from a stream with a known duration
AVFMT_DURATION_FROM_BITRATE ///< Duration estimated from bitrate (less accurate)
};
 
/**
* Format I/O context.
* New fields can be added to the end with minor version bumps.
* Removal, reordering and changes to existing fields require a major
* version bump.
* sizeof(AVFormatContext) must not be used outside libav*, use
* avformat_alloc_context() to create an AVFormatContext.
*/
typedef struct AVFormatContext {
/**
* A class for logging and AVOptions. Set by avformat_alloc_context().
* Exports (de)muxer private options if they exist.
*/
const AVClass *av_class;
 
/**
* Can only be iformat or oformat, not both at the same time.
*
* decoding: set by avformat_open_input().
* encoding: set by the user.
*/
struct AVInputFormat *iformat;
struct AVOutputFormat *oformat;
 
/**
* Format private data. This is an AVOptions-enabled struct
* if and only if iformat/oformat.priv_class is not NULL.
*/
void *priv_data;
 
/**
* I/O context.
*
* decoding: either set by the user before avformat_open_input() (then
* the user must close it manually) or set by avformat_open_input().
* encoding: set by the user.
*
* Do NOT set this field if AVFMT_NOFILE flag is set in
* iformat/oformat.flags. In such a case, the (de)muxer will handle
* I/O in some other way and this field will be NULL.
*/
AVIOContext *pb;
 
/* stream info */
int ctx_flags; /**< Format-specific flags, see AVFMTCTX_xx */
 
/**
* A list of all streams in the file. New streams are created with
* avformat_new_stream().
*
* decoding: streams are created by libavformat in avformat_open_input().
* If AVFMTCTX_NOHEADER is set in ctx_flags, then new streams may also
* appear in av_read_frame().
* encoding: streams are created by the user before avformat_write_header().
*/
unsigned int nb_streams;
AVStream **streams;
 
char filename[1024]; /**< input or output filename */
 
/**
* Decoding: position of the first frame of the component, in
* AV_TIME_BASE fractional seconds. NEVER set this value directly:
* It is deduced from the AVStream values.
*/
int64_t start_time;
 
/**
* Decoding: duration of the stream, in AV_TIME_BASE fractional
* seconds. Only set this value if you know none of the individual stream
* durations and also do not set any of them. This is deduced from the
* AVStream values if not set.
*/
int64_t duration;
 
/**
* Decoding: total stream bitrate in bit/s, 0 if not
* available. Never set it directly if the file_size and the
* duration are known as FFmpeg can compute it automatically.
*/
int bit_rate;
 
unsigned int packet_size;
int max_delay;
 
int flags;
#define AVFMT_FLAG_GENPTS 0x0001 ///< Generate missing pts even if it requires parsing future frames.
#define AVFMT_FLAG_IGNIDX 0x0002 ///< Ignore index.
#define AVFMT_FLAG_NONBLOCK 0x0004 ///< Do not block when reading packets from input.
#define AVFMT_FLAG_IGNDTS 0x0008 ///< Ignore DTS on frames that contain both DTS & PTS
#define AVFMT_FLAG_NOFILLIN 0x0010 ///< Do not infer any values from other values, just return what is stored in the container
#define AVFMT_FLAG_NOPARSE 0x0020 ///< Do not use AVParsers, you also must set AVFMT_FLAG_NOFILLIN as the fillin code works on frames and no parsing -> no frames. Also seeking to frames can not work if parsing to find frame boundaries has been disabled
#define AVFMT_FLAG_NOBUFFER 0x0040 ///< Do not buffer frames when possible
#define AVFMT_FLAG_CUSTOM_IO 0x0080 ///< The caller has supplied a custom AVIOContext, don't avio_close() it.
#define AVFMT_FLAG_DISCARD_CORRUPT 0x0100 ///< Discard frames marked corrupted
#define AVFMT_FLAG_FLUSH_PACKETS 0x0200 ///< Flush the AVIOContext every packet.
#define AVFMT_FLAG_MP4A_LATM 0x8000 ///< Enable RTP MP4A-LATM payload
#define AVFMT_FLAG_SORT_DTS 0x10000 ///< try to interleave outputted packets by dts (using this flag can slow demuxing down)
#define AVFMT_FLAG_PRIV_OPT 0x20000 ///< Enable use of private options by delaying codec open (this could be made default once all code is converted)
#define AVFMT_FLAG_KEEP_SIDE_DATA 0x40000 ///< Don't merge side data but keep it separate.
 
/**
* decoding: size of data to probe; encoding: unused.
*/
unsigned int probesize;
 
/**
* decoding: maximum time (in AV_TIME_BASE units) during which the input should
* be analyzed in avformat_find_stream_info().
*/
int max_analyze_duration;
 
const uint8_t *key;
int keylen;
 
unsigned int nb_programs;
AVProgram **programs;
 
/**
* Forced video codec_id.
* Demuxing: Set by user.
*/
enum AVCodecID video_codec_id;
 
/**
* Forced audio codec_id.
* Demuxing: Set by user.
*/
enum AVCodecID audio_codec_id;
 
/**
* Forced subtitle codec_id.
* Demuxing: Set by user.
*/
enum AVCodecID subtitle_codec_id;
 
/**
* Maximum amount of memory in bytes to use for the index of each stream.
* If the index exceeds this size, entries will be discarded as
* needed to maintain a smaller size. This can lead to slower or less
* accurate seeking (depends on demuxer).
* Demuxers for which a full in-memory index is mandatory will ignore
* this.
* muxing : unused
* demuxing: set by user
*/
unsigned int max_index_size;
 
/**
* Maximum amount of memory in bytes to use for buffering frames
* obtained from realtime capture devices.
*/
unsigned int max_picture_buffer;
 
/**
* Number of chapters in AVChapter array.
* When muxing, chapters are normally written in the file header,
* so nb_chapters should normally be initialized before write_header
* is called. Some muxers (e.g. mov and mkv) can also write chapters
* in the trailer. To write chapters in the trailer, nb_chapters
* must be zero when write_header is called and non-zero when
* write_trailer is called.
* muxing : set by user
* demuxing: set by libavformat
*/
unsigned int nb_chapters;
AVChapter **chapters;
 
AVDictionary *metadata;
 
/**
* Start time of the stream in real world time, in microseconds
* since the unix epoch (00:00 1st January 1970). That is, pts=0
* in the stream was captured at this real world time.
* - encoding: Set by user.
* - decoding: Unused.
*/
int64_t start_time_realtime;
 
/**
* decoding: number of frames used to probe fps
*/
int fps_probe_size;
 
/**
* Error recognition; higher values will detect more errors but may
* misdetect some more or less valid parts as errors.
* - encoding: unused
* - decoding: Set by user.
*/
int error_recognition;
 
/**
* Custom interrupt callbacks for the I/O layer.
*
* decoding: set by the user before avformat_open_input().
* encoding: set by the user before avformat_write_header()
* (mainly useful for AVFMT_NOFILE formats). The callback
* should also be passed to avio_open2() if it's used to
* open the file.
*/
AVIOInterruptCB interrupt_callback;
 
/**
* Flags to enable debugging.
*/
int debug;
#define FF_FDEBUG_TS 0x0001
 
/**
* Transport stream id.
* This will be moved into demuxer private options. Thus no API/ABI compatibility
*/
int ts_id;
 
/**
* Audio preload in microseconds.
* Note, not all formats support this and unpredictable things may happen if it is used when not supported.
* - encoding: Set by user via AVOptions (NO direct access)
* - decoding: unused
*/
int audio_preload;
 
/**
* Max chunk time in microseconds.
* Note, not all formats support this and unpredictable things may happen if it is used when not supported.
* - encoding: Set by user via AVOptions (NO direct access)
* - decoding: unused
*/
int max_chunk_duration;
 
/**
* Max chunk size in bytes
* Note, not all formats support this and unpredictable things may happen if it is used when not supported.
* - encoding: Set by user via AVOptions (NO direct access)
* - decoding: unused
*/
int max_chunk_size;
 
/**
* forces the use of wallclock timestamps as pts/dts of packets
* This has undefined results in the presence of B frames.
* - encoding: unused
* - decoding: Set by user via AVOptions (NO direct access)
*/
int use_wallclock_as_timestamps;
 
/**
* Avoid negative timestamps during muxing.
* 0 -> allow negative timestamps
* 1 -> avoid negative timestamps
* -1 -> choose automatically (default)
* Note, this only works when interleave_packet_per_dts is in use.
* - encoding: Set by user via AVOptions (NO direct access)
* - decoding: unused
*/
int avoid_negative_ts;
 
/**
* avio flags, used to force AVIO_FLAG_DIRECT.
* - encoding: unused
* - decoding: Set by user via AVOptions (NO direct access)
*/
int avio_flags;
 
/**
* The duration field can be estimated through various ways, and this field can be used
* to know how the duration was estimated.
* - encoding: unused
* - decoding: Read by user via AVOptions (NO direct access)
*/
enum AVDurationEstimationMethod duration_estimation_method;
 
/**
* Skip initial bytes when opening stream
* - encoding: unused
* - decoding: Set by user via AVOptions (NO direct access)
*/
unsigned int skip_initial_bytes;
 
/**
* Correct single timestamp overflows
* - encoding: unused
* - decoding: Set by user via AVOPtions (NO direct access)
*/
unsigned int correct_ts_overflow;
 
/**
* Force seeking to any (also non key) frames.
* - encoding: unused
* - decoding: Set by user via AVOPtions (NO direct access)
*/
int seek2any;
 
/**
* Flush the I/O context after each packet.
* - encoding: Set by user via AVOptions (NO direct access)
* - decoding: unused
*/
int flush_packets;
 
/**
* format probing score.
* The maximal score is AVPROBE_SCORE_MAX, its set when the demuxer probes
* the format.
* - encoding: unused
* - decoding: set by avformat, read by user via av_format_get_probe_score() (NO direct access)
*/
int probe_score;
 
/*****************************************************************
* All fields below this line are not part of the public API. They
* may not be used outside of libavformat and can be changed and
* removed at will.
* New public fields should be added right above.
*****************************************************************
*/
 
/**
* This buffer is only needed when packets were already buffered but
* not decoded, for example to get the codec parameters in MPEG
* streams.
*/
struct AVPacketList *packet_buffer;
struct AVPacketList *packet_buffer_end;
 
/* av_seek_frame() support */
int64_t data_offset; /**< offset of the first packet */
 
/**
* Raw packets from the demuxer, prior to parsing and decoding.
* This buffer is used for buffering packets until the codec can
* be identified, as parsing cannot be done without knowing the
* codec.
*/
struct AVPacketList *raw_packet_buffer;
struct AVPacketList *raw_packet_buffer_end;
/**
* Packets split by the parser get queued here.
*/
struct AVPacketList *parse_queue;
struct AVPacketList *parse_queue_end;
/**
* Remaining size available for raw_packet_buffer, in bytes.
*/
#define RAW_PACKET_BUFFER_SIZE 2500000
int raw_packet_buffer_remaining_size;
 
/**
* Offset to remap timestamps to be non-negative.
* Expressed in timebase units.
* @see AVStream.mux_ts_offset
*/
int64_t offset;
 
/**
* Timebase for the timestamp offset.
*/
AVRational offset_timebase;
 
/**
* IO repositioned flag.
* This is set by avformat when the underlaying IO context read pointer
* is repositioned, for example when doing byte based seeking.
* Demuxers can use the flag to detect such changes.
*/
int io_repositioned;
 
/**
* Forced video codec.
* This allows forcing a specific decoder, even when there are multiple with
* the same codec_id.
* Demuxing: Set by user via av_format_set_video_codec (NO direct access).
*/
AVCodec *video_codec;
 
/**
* Forced audio codec.
* This allows forcing a specific decoder, even when there are multiple with
* the same codec_id.
* Demuxing: Set by user via av_format_set_audio_codec (NO direct access).
*/
AVCodec *audio_codec;
 
/**
* Forced subtitle codec.
* This allows forcing a specific decoder, even when there are multiple with
* the same codec_id.
* Demuxing: Set by user via av_format_set_subtitle_codec (NO direct access).
*/
AVCodec *subtitle_codec;
} AVFormatContext;
 
int av_format_get_probe_score(const AVFormatContext *s);
AVCodec * av_format_get_video_codec(const AVFormatContext *s);
void av_format_set_video_codec(AVFormatContext *s, AVCodec *c);
AVCodec * av_format_get_audio_codec(const AVFormatContext *s);
void av_format_set_audio_codec(AVFormatContext *s, AVCodec *c);
AVCodec * av_format_get_subtitle_codec(const AVFormatContext *s);
void av_format_set_subtitle_codec(AVFormatContext *s, AVCodec *c);
 
/**
* Returns the method used to set ctx->duration.
*
* @return AVFMT_DURATION_FROM_PTS, AVFMT_DURATION_FROM_STREAM, or AVFMT_DURATION_FROM_BITRATE.
*/
enum AVDurationEstimationMethod av_fmt_ctx_get_duration_estimation_method(const AVFormatContext* ctx);
 
typedef struct AVPacketList {
AVPacket pkt;
struct AVPacketList *next;
} AVPacketList;
 
 
/**
* @defgroup lavf_core Core functions
* @ingroup libavf
*
* Functions for querying libavformat capabilities, allocating core structures,
* etc.
* @{
*/
 
/**
* Return the LIBAVFORMAT_VERSION_INT constant.
*/
unsigned avformat_version(void);
 
/**
* Return the libavformat build-time configuration.
*/
const char *avformat_configuration(void);
 
/**
* Return the libavformat license.
*/
const char *avformat_license(void);
 
/**
* Initialize libavformat and register all the muxers, demuxers and
* protocols. If you do not call this function, then you can select
* exactly which formats you want to support.
*
* @see av_register_input_format()
* @see av_register_output_format()
*/
void av_register_all(void);
 
void av_register_input_format(AVInputFormat *format);
void av_register_output_format(AVOutputFormat *format);
 
/**
* Do global initialization of network components. This is optional,
* but recommended, since it avoids the overhead of implicitly
* doing the setup for each session.
*
* Calling this function will become mandatory if using network
* protocols at some major version bump.
*/
int avformat_network_init(void);
 
/**
* Undo the initialization done by avformat_network_init.
*/
int avformat_network_deinit(void);
 
/**
* If f is NULL, returns the first registered input format,
* if f is non-NULL, returns the next registered input format after f
* or NULL if f is the last one.
*/
AVInputFormat *av_iformat_next(AVInputFormat *f);
 
/**
* If f is NULL, returns the first registered output format,
* if f is non-NULL, returns the next registered output format after f
* or NULL if f is the last one.
*/
AVOutputFormat *av_oformat_next(AVOutputFormat *f);
 
/**
* Allocate an AVFormatContext.
* avformat_free_context() can be used to free the context and everything
* allocated by the framework within it.
*/
AVFormatContext *avformat_alloc_context(void);
 
/**
* Free an AVFormatContext and all its streams.
* @param s context to free
*/
void avformat_free_context(AVFormatContext *s);
 
/**
* Get the AVClass for AVFormatContext. It can be used in combination with
* AV_OPT_SEARCH_FAKE_OBJ for examining options.
*
* @see av_opt_find().
*/
const AVClass *avformat_get_class(void);
 
/**
* Add a new stream to a media file.
*
* When demuxing, it is called by the demuxer in read_header(). If the
* flag AVFMTCTX_NOHEADER is set in s.ctx_flags, then it may also
* be called in read_packet().
*
* When muxing, should be called by the user before avformat_write_header().
*
* User is required to call avcodec_close() and avformat_free_context() to
* clean up the allocation by avformat_new_stream().
*
* @param c If non-NULL, the AVCodecContext corresponding to the new stream
* will be initialized to use this codec. This is needed for e.g. codec-specific
* defaults to be set, so codec should be provided if it is known.
*
* @return newly created stream or NULL on error.
*/
AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c);
 
AVProgram *av_new_program(AVFormatContext *s, int id);
 
/**
* @}
*/
 
 
#if FF_API_ALLOC_OUTPUT_CONTEXT
/**
* @deprecated deprecated in favor of avformat_alloc_output_context2()
*/
attribute_deprecated
AVFormatContext *avformat_alloc_output_context(const char *format,
AVOutputFormat *oformat,
const char *filename);
#endif
 
/**
* Allocate an AVFormatContext for an output format.
* avformat_free_context() can be used to free the context and
* everything allocated by the framework within it.
*
* @param *ctx is set to the created format context, or to NULL in
* case of failure
* @param oformat format to use for allocating the context, if NULL
* format_name and filename are used instead
* @param format_name the name of output format to use for allocating the
* context, if NULL filename is used instead
* @param filename the name of the filename to use for allocating the
* context, may be NULL
* @return >= 0 in case of success, a negative AVERROR code in case of
* failure
*/
int avformat_alloc_output_context2(AVFormatContext **ctx, AVOutputFormat *oformat,
const char *format_name, const char *filename);
 
/**
* @addtogroup lavf_decoding
* @{
*/
 
/**
* Find AVInputFormat based on the short name of the input format.
*/
AVInputFormat *av_find_input_format(const char *short_name);
 
/**
* Guess the file format.
*
* @param is_opened Whether the file is already opened; determines whether
* demuxers with or without AVFMT_NOFILE are probed.
*/
AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened);
 
/**
* Guess the file format.
*
* @param is_opened Whether the file is already opened; determines whether
* demuxers with or without AVFMT_NOFILE are probed.
* @param score_max A probe score larger that this is required to accept a
* detection, the variable is set to the actual detection
* score afterwards.
* If the score is <= AVPROBE_SCORE_MAX / 4 it is recommended
* to retry with a larger probe buffer.
*/
AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max);
 
/**
* Guess the file format.
*
* @param is_opened Whether the file is already opened; determines whether
* demuxers with or without AVFMT_NOFILE are probed.
* @param score_ret The score of the best detection.
*/
AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret);
 
/**
* Probe a bytestream to determine the input format. Each time a probe returns
* with a score that is too low, the probe buffer size is increased and another
* attempt is made. When the maximum probe size is reached, the input format
* with the highest score is returned.
*
* @param pb the bytestream to probe
* @param fmt the input format is put here
* @param filename the filename of the stream
* @param logctx the log context
* @param offset the offset within the bytestream to probe from
* @param max_probe_size the maximum probe buffer size (zero for default)
* @return the score in case of success, a negative value corresponding to an
* the maximal score is AVPROBE_SCORE_MAX
* AVERROR code otherwise
*/
int av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt,
const char *filename, void *logctx,
unsigned int offset, unsigned int max_probe_size);
 
/**
* Like av_probe_input_buffer2() but returns 0 on success
*/
int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
const char *filename, void *logctx,
unsigned int offset, unsigned int max_probe_size);
 
/**
* Open an input stream and read the header. The codecs are not opened.
* The stream must be closed with avformat_close_input().
*
* @param ps Pointer to user-supplied AVFormatContext (allocated by avformat_alloc_context).
* May be a pointer to NULL, in which case an AVFormatContext is allocated by this
* function and written into ps.
* Note that a user-supplied AVFormatContext will be freed on failure.
* @param filename Name of the stream to open.
* @param fmt If non-NULL, this parameter forces a specific input format.
* Otherwise the format is autodetected.
* @param options A dictionary filled with AVFormatContext and demuxer-private options.
* On return this parameter will be destroyed and replaced with a dict containing
* options that were not found. May be NULL.
*
* @return 0 on success, a negative AVERROR on failure.
*
* @note If you want to use custom IO, preallocate the format context and set its pb field.
*/
int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options);
 
attribute_deprecated
int av_demuxer_open(AVFormatContext *ic);
 
#if FF_API_FORMAT_PARAMETERS
/**
* Read packets of a media file to get stream information. This
* is useful for file formats with no headers such as MPEG. This
* function also computes the real framerate in case of MPEG-2 repeat
* frame mode.
* The logical file position is not changed by this function;
* examined packets may be buffered for later processing.
*
* @param ic media file handle
* @return >=0 if OK, AVERROR_xxx on error
* @todo Let the user decide somehow what information is needed so that
* we do not waste time getting stuff the user does not need.
*
* @deprecated use avformat_find_stream_info.
*/
attribute_deprecated
int av_find_stream_info(AVFormatContext *ic);
#endif
 
/**
* Read packets of a media file to get stream information. This
* is useful for file formats with no headers such as MPEG. This
* function also computes the real framerate in case of MPEG-2 repeat
* frame mode.
* The logical file position is not changed by this function;
* examined packets may be buffered for later processing.
*
* @param ic media file handle
* @param options If non-NULL, an ic.nb_streams long array of pointers to
* dictionaries, where i-th member contains options for
* codec corresponding to i-th stream.
* On return each dictionary will be filled with options that were not found.
* @return >=0 if OK, AVERROR_xxx on error
*
* @note this function isn't guaranteed to open all the codecs, so
* options being non-empty at return is a perfectly normal behavior.
*
* @todo Let the user decide somehow what information is needed so that
* we do not waste time getting stuff the user does not need.
*/
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options);
 
/**
* Find the programs which belong to a given stream.
*
* @param ic media file handle
* @param last the last found program, the search will start after this
* program, or from the beginning if it is NULL
* @param s stream index
* @return the next program which belongs to s, NULL if no program is found or
* the last program is not among the programs of ic.
*/
AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s);
 
/**
* Find the "best" stream in the file.
* The best stream is determined according to various heuristics as the most
* likely to be what the user expects.
* If the decoder parameter is non-NULL, av_find_best_stream will find the
* default decoder for the stream's codec; streams for which no decoder can
* be found are ignored.
*
* @param ic media file handle
* @param type stream type: video, audio, subtitles, etc.
* @param wanted_stream_nb user-requested stream number,
* or -1 for automatic selection
* @param related_stream try to find a stream related (eg. in the same
* program) to this one, or -1 if none
* @param decoder_ret if non-NULL, returns the decoder for the
* selected stream
* @param flags flags; none are currently defined
* @return the non-negative stream number in case of success,
* AVERROR_STREAM_NOT_FOUND if no stream with the requested type
* could be found,
* AVERROR_DECODER_NOT_FOUND if streams were found but no decoder
* @note If av_find_best_stream returns successfully and decoder_ret is not
* NULL, then *decoder_ret is guaranteed to be set to a valid AVCodec.
*/
int av_find_best_stream(AVFormatContext *ic,
enum AVMediaType type,
int wanted_stream_nb,
int related_stream,
AVCodec **decoder_ret,
int flags);
 
#if FF_API_READ_PACKET
/**
* @deprecated use AVFMT_FLAG_NOFILLIN | AVFMT_FLAG_NOPARSE to read raw
* unprocessed packets
*
* Read a transport packet from a media file.
*
* This function is obsolete and should never be used.
* Use av_read_frame() instead.
*
* @param s media file handle
* @param pkt is filled
* @return 0 if OK, AVERROR_xxx on error
*/
attribute_deprecated
int av_read_packet(AVFormatContext *s, AVPacket *pkt);
#endif
 
/**
* Return the next frame of a stream.
* This function returns what is stored in the file, and does not validate
* that what is there are valid frames for the decoder. It will split what is
* stored in the file into frames and return one for each call. It will not
* omit invalid data between valid frames so as to give the decoder the maximum
* information possible for decoding.
*
* If pkt->buf is NULL, then the packet is valid until the next
* av_read_frame() or until avformat_close_input(). Otherwise the packet
* is valid indefinitely. In both cases the packet must be freed with
* av_free_packet when it is no longer needed. For video, the packet contains
* exactly one frame. For audio, it contains an integer number of frames if each
* frame has a known fixed size (e.g. PCM or ADPCM data). If the audio frames
* have a variable size (e.g. MPEG audio), then it contains one frame.
*
* pkt->pts, pkt->dts and pkt->duration are always set to correct
* values in AVStream.time_base units (and guessed if the format cannot
* provide them). pkt->pts can be AV_NOPTS_VALUE if the video format
* has B-frames, so it is better to rely on pkt->dts if you do not
* decompress the payload.
*
* @return 0 if OK, < 0 on error or end of file
*/
int av_read_frame(AVFormatContext *s, AVPacket *pkt);
 
/**
* Seek to the keyframe at timestamp.
* 'timestamp' in 'stream_index'.
* @param stream_index If stream_index is (-1), a default
* stream is selected, and timestamp is automatically converted
* from AV_TIME_BASE units to the stream specific time_base.
* @param timestamp Timestamp in AVStream.time_base units
* or, if no stream is specified, in AV_TIME_BASE units.
* @param flags flags which select direction and seeking mode
* @return >= 0 on success
*/
int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp,
int flags);
 
/**
* Seek to timestamp ts.
* Seeking will be done so that the point from which all active streams
* can be presented successfully will be closest to ts and within min/max_ts.
* Active streams are all streams that have AVStream.discard < AVDISCARD_ALL.
*
* If flags contain AVSEEK_FLAG_BYTE, then all timestamps are in bytes and
* are the file position (this may not be supported by all demuxers).
* If flags contain AVSEEK_FLAG_FRAME, then all timestamps are in frames
* in the stream with stream_index (this may not be supported by all demuxers).
* Otherwise all timestamps are in units of the stream selected by stream_index
* or if stream_index is -1, in AV_TIME_BASE units.
* If flags contain AVSEEK_FLAG_ANY, then non-keyframes are treated as
* keyframes (this may not be supported by all demuxers).
* If flags contain AVSEEK_FLAG_BACKWARD, it is ignored.
*
* @param stream_index index of the stream which is used as time base reference
* @param min_ts smallest acceptable timestamp
* @param ts target timestamp
* @param max_ts largest acceptable timestamp
* @param flags flags
* @return >=0 on success, error code otherwise
*
* @note This is part of the new seek API which is still under construction.
* Thus do not use this yet. It may change at any time, do not expect
* ABI compatibility yet!
*/
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags);
 
/**
* Start playing a network-based stream (e.g. RTSP stream) at the
* current position.
*/
int av_read_play(AVFormatContext *s);
 
/**
* Pause a network-based stream (e.g. RTSP stream).
*
* Use av_read_play() to resume it.
*/
int av_read_pause(AVFormatContext *s);
 
#if FF_API_CLOSE_INPUT_FILE
/**
* @deprecated use avformat_close_input()
* Close a media file (but not its codecs).
*
* @param s media file handle
*/
attribute_deprecated
void av_close_input_file(AVFormatContext *s);
#endif
 
/**
* Close an opened input AVFormatContext. Free it and all its contents
* and set *s to NULL.
*/
void avformat_close_input(AVFormatContext **s);
/**
* @}
*/
 
#if FF_API_NEW_STREAM
/**
* Add a new stream to a media file.
*
* Can only be called in the read_header() function. If the flag
* AVFMTCTX_NOHEADER is in the format context, then new streams
* can be added in read_packet too.
*
* @param s media file handle
* @param id file-format-dependent stream ID
*/
attribute_deprecated
AVStream *av_new_stream(AVFormatContext *s, int id);
#endif
 
#if FF_API_SET_PTS_INFO
/**
* @deprecated this function is not supposed to be called outside of lavf
*/
attribute_deprecated
void av_set_pts_info(AVStream *s, int pts_wrap_bits,
unsigned int pts_num, unsigned int pts_den);
#endif
 
#define AVSEEK_FLAG_BACKWARD 1 ///< seek backward
#define AVSEEK_FLAG_BYTE 2 ///< seeking based on position in bytes
#define AVSEEK_FLAG_ANY 4 ///< seek to any frame, even non-keyframes
#define AVSEEK_FLAG_FRAME 8 ///< seeking based on frame number
 
/**
* @addtogroup lavf_encoding
* @{
*/
/**
* Allocate the stream private data and write the stream header to
* an output media file.
*
* @param s Media file handle, must be allocated with avformat_alloc_context().
* Its oformat field must be set to the desired output format;
* Its pb field must be set to an already opened AVIOContext.
* @param options An AVDictionary filled with AVFormatContext and muxer-private options.
* On return this parameter will be destroyed and replaced with a dict containing
* options that were not found. May be NULL.
*
* @return 0 on success, negative AVERROR on failure.
*
* @see av_opt_find, av_dict_set, avio_open, av_oformat_next.
*/
int avformat_write_header(AVFormatContext *s, AVDictionary **options);
 
/**
* Write a packet to an output media file.
*
* The packet shall contain one audio or video frame.
* The packet must be correctly interleaved according to the container
* specification, if not then av_interleaved_write_frame must be used.
*
* @param s media file handle
* @param pkt The packet, which contains the stream_index, buf/buf_size,
* dts/pts, ...
* This can be NULL (at any time, not just at the end), in
* order to immediately flush data buffered within the muxer,
* for muxers that buffer up data internally before writing it
* to the output.
* @return < 0 on error, = 0 if OK, 1 if flushed and there is no more data to flush
*/
int av_write_frame(AVFormatContext *s, AVPacket *pkt);
 
/**
* Write a packet to an output media file ensuring correct interleaving.
*
* The packet must contain one audio or video frame.
* If the packets are already correctly interleaved, the application should
* call av_write_frame() instead as it is slightly faster. It is also important
* to keep in mind that completely non-interleaved input will need huge amounts
* of memory to interleave with this, so it is preferable to interleave at the
* demuxer level.
*
* @param s media file handle
* @param pkt The packet containing the data to be written. pkt->buf must be set
* to a valid AVBufferRef describing the packet data. Libavformat takes
* ownership of this reference and will unref it when it sees fit. The caller
* must not access the data through this reference after this function returns.
* This can be NULL (at any time, not just at the end), to flush the
* interleaving queues.
* Packet's @ref AVPacket.stream_index "stream_index" field must be set to the
* index of the corresponding stream in @ref AVFormatContext.streams
* "s.streams".
* It is very strongly recommended that timing information (@ref AVPacket.pts
* "pts", @ref AVPacket.dts "dts" @ref AVPacket.duration "duration") is set to
* correct values.
*
* @return 0 on success, a negative AVERROR on error.
*/
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt);
 
/**
* Write the stream trailer to an output media file and free the
* file private data.
*
* May only be called after a successful call to avformat_write_header.
*
* @param s media file handle
* @return 0 if OK, AVERROR_xxx on error
*/
int av_write_trailer(AVFormatContext *s);
 
/**
* Return the output format in the list of registered output formats
* which best matches the provided parameters, or return NULL if
* there is no match.
*
* @param short_name if non-NULL checks if short_name matches with the
* names of the registered formats
* @param filename if non-NULL checks if filename terminates with the
* extensions of the registered formats
* @param mime_type if non-NULL checks if mime_type matches with the
* MIME type of the registered formats
*/
AVOutputFormat *av_guess_format(const char *short_name,
const char *filename,
const char *mime_type);
 
/**
* Guess the codec ID based upon muxer and filename.
*/
enum AVCodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
const char *filename, const char *mime_type,
enum AVMediaType type);
 
/**
* Get timing information for the data currently output.
* The exact meaning of "currently output" depends on the format.
* It is mostly relevant for devices that have an internal buffer and/or
* work in real time.
* @param s media file handle
* @param stream stream in the media file
* @param[out] dts DTS of the last packet output for the stream, in stream
* time_base units
* @param[out] wall absolute time when that packet whas output,
* in microsecond
* @return 0 if OK, AVERROR(ENOSYS) if the format does not support it
* Note: some formats or devices may not allow to measure dts and wall
* atomically.
*/
int av_get_output_timestamp(struct AVFormatContext *s, int stream,
int64_t *dts, int64_t *wall);
 
 
/**
* @}
*/
 
 
/**
* @defgroup lavf_misc Utility functions
* @ingroup libavf
* @{
*
* Miscellaneous utility functions related to both muxing and demuxing
* (or neither).
*/
 
/**
* Send a nice hexadecimal dump of a buffer to the specified file stream.
*
* @param f The file stream pointer where the dump should be sent to.
* @param buf buffer
* @param size buffer size
*
* @see av_hex_dump_log, av_pkt_dump2, av_pkt_dump_log2
*/
void av_hex_dump(FILE *f, const uint8_t *buf, int size);
 
/**
* Send a nice hexadecimal dump of a buffer to the log.
*
* @param avcl A pointer to an arbitrary struct of which the first field is a
* pointer to an AVClass struct.
* @param level The importance level of the message, lower values signifying
* higher importance.
* @param buf buffer
* @param size buffer size
*
* @see av_hex_dump, av_pkt_dump2, av_pkt_dump_log2
*/
void av_hex_dump_log(void *avcl, int level, const uint8_t *buf, int size);
 
/**
* Send a nice dump of a packet to the specified file stream.
*
* @param f The file stream pointer where the dump should be sent to.
* @param pkt packet to dump
* @param dump_payload True if the payload must be displayed, too.
* @param st AVStream that the packet belongs to
*/
void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st);
 
 
/**
* Send a nice dump of a packet to the log.
*
* @param avcl A pointer to an arbitrary struct of which the first field is a
* pointer to an AVClass struct.
* @param level The importance level of the message, lower values signifying
* higher importance.
* @param pkt packet to dump
* @param dump_payload True if the payload must be displayed, too.
* @param st AVStream that the packet belongs to
*/
void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
AVStream *st);
 
/**
* Get the AVCodecID for the given codec tag tag.
* If no codec id is found returns AV_CODEC_ID_NONE.
*
* @param tags list of supported codec_id-codec_tag pairs, as stored
* in AVInputFormat.codec_tag and AVOutputFormat.codec_tag
*/
enum AVCodecID av_codec_get_id(const struct AVCodecTag * const *tags, unsigned int tag);
 
/**
* Get the codec tag for the given codec id id.
* If no codec tag is found returns 0.
*
* @param tags list of supported codec_id-codec_tag pairs, as stored
* in AVInputFormat.codec_tag and AVOutputFormat.codec_tag
*/
unsigned int av_codec_get_tag(const struct AVCodecTag * const *tags, enum AVCodecID id);
 
/**
* Get the codec tag for the given codec id.
*
* @param tags list of supported codec_id - codec_tag pairs, as stored
* in AVInputFormat.codec_tag and AVOutputFormat.codec_tag
* @param id codec id that should be searched for in the list
* @param tag A pointer to the found tag
* @return 0 if id was not found in tags, > 0 if it was found
*/
int av_codec_get_tag2(const struct AVCodecTag * const *tags, enum AVCodecID id,
unsigned int *tag);
 
int av_find_default_stream_index(AVFormatContext *s);
 
/**
* Get the index for a specific timestamp.
* @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond
* to the timestamp which is <= the requested one, if backward
* is 0, then it will be >=
* if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise
* @return < 0 if no such timestamp could be found
*/
int av_index_search_timestamp(AVStream *st, int64_t timestamp, int flags);
 
/**
* Add an index entry into a sorted list. Update the entry if the list
* already contains it.
*
* @param timestamp timestamp in the time base of the given stream
*/
int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp,
int size, int distance, int flags);
 
 
/**
* Split a URL string into components.
*
* The pointers to buffers for storing individual components may be null,
* in order to ignore that component. Buffers for components not found are
* set to empty strings. If the port is not found, it is set to a negative
* value.
*
* @param proto the buffer for the protocol
* @param proto_size the size of the proto buffer
* @param authorization the buffer for the authorization
* @param authorization_size the size of the authorization buffer
* @param hostname the buffer for the host name
* @param hostname_size the size of the hostname buffer
* @param port_ptr a pointer to store the port number in
* @param path the buffer for the path
* @param path_size the size of the path buffer
* @param url the URL to split
*/
void av_url_split(char *proto, int proto_size,
char *authorization, int authorization_size,
char *hostname, int hostname_size,
int *port_ptr,
char *path, int path_size,
const char *url);
 
 
void av_dump_format(AVFormatContext *ic,
int index,
const char *url,
int is_output);
 
/**
* Return in 'buf' the path with '%d' replaced by a number.
*
* Also handles the '%0nd' format where 'n' is the total number
* of digits and '%%'.
*
* @param buf destination buffer
* @param buf_size destination buffer size
* @param path numbered sequence string
* @param number frame number
* @return 0 if OK, -1 on format error
*/
int av_get_frame_filename(char *buf, int buf_size,
const char *path, int number);
 
/**
* Check whether filename actually is a numbered sequence generator.
*
* @param filename possible numbered sequence string
* @return 1 if a valid numbered sequence string, 0 otherwise
*/
int av_filename_number_test(const char *filename);
 
/**
* Generate an SDP for an RTP session.
*
* Note, this overwrites the id values of AVStreams in the muxer contexts
* for getting unique dynamic payload types.
*
* @param ac array of AVFormatContexts describing the RTP streams. If the
* array is composed by only one context, such context can contain
* multiple AVStreams (one AVStream per RTP stream). Otherwise,
* all the contexts in the array (an AVCodecContext per RTP stream)
* must contain only one AVStream.
* @param n_files number of AVCodecContexts contained in ac
* @param buf buffer where the SDP will be stored (must be allocated by
* the caller)
* @param size the size of the buffer
* @return 0 if OK, AVERROR_xxx on error
*/
int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size);
 
/**
* Return a positive value if the given filename has one of the given
* extensions, 0 otherwise.
*
* @param extensions a comma-separated list of filename extensions
*/
int av_match_ext(const char *filename, const char *extensions);
 
/**
* Test if the given container can store a codec.
*
* @param std_compliance standards compliance level, one of FF_COMPLIANCE_*
*
* @return 1 if codec with ID codec_id can be stored in ofmt, 0 if it cannot.
* A negative number if this information is not available.
*/
int avformat_query_codec(AVOutputFormat *ofmt, enum AVCodecID codec_id, int std_compliance);
 
/**
* @defgroup riff_fourcc RIFF FourCCs
* @{
* Get the tables mapping RIFF FourCCs to libavcodec AVCodecIDs. The tables are
* meant to be passed to av_codec_get_id()/av_codec_get_tag() as in the
* following code:
* @code
* uint32_t tag = MKTAG('H', '2', '6', '4');
* const struct AVCodecTag *table[] = { avformat_get_riff_video_tags(), 0 };
* enum AVCodecID id = av_codec_get_id(table, tag);
* @endcode
*/
/**
* @return the table mapping RIFF FourCCs for video to libavcodec AVCodecID.
*/
const struct AVCodecTag *avformat_get_riff_video_tags(void);
/**
* @return the table mapping RIFF FourCCs for audio to AVCodecID.
*/
const struct AVCodecTag *avformat_get_riff_audio_tags(void);
 
/**
* @}
*/
 
/**
* Guess the sample aspect ratio of a frame, based on both the stream and the
* frame aspect ratio.
*
* Since the frame aspect ratio is set by the codec but the stream aspect ratio
* is set by the demuxer, these two may not be equal. This function tries to
* return the value that you should use if you would like to display the frame.
*
* Basic logic is to use the stream aspect ratio if it is set to something sane
* otherwise use the frame aspect ratio. This way a container setting, which is
* usually easy to modify can override the coded value in the frames.
*
* @param format the format context which the stream is part of
* @param stream the stream which the frame is part of
* @param frame the frame with the aspect ratio to be determined
* @return the guessed (valid) sample_aspect_ratio, 0/1 if no idea
*/
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame);
 
/**
* Guess the frame rate, based on both the container and codec information.
*
* @param ctx the format context which the stream is part of
* @param stream the stream which the frame is part of
* @param frame the frame for which the frame rate should be determined, may be NULL
* @return the guessed (valid) frame rate, 0/1 if no idea
*/
AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame);
 
/**
* Check if the stream st contained in s is matched by the stream specifier
* spec.
*
* See the "stream specifiers" chapter in the documentation for the syntax
* of spec.
*
* @return >0 if st is matched by spec;
* 0 if st is not matched by spec;
* AVERROR code if spec is invalid
*
* @note A stream specifier can match several streams in the format.
*/
int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st,
const char *spec);
 
int avformat_queue_attached_pictures(AVFormatContext *s);
 
 
/**
* @}
*/
 
#endif /* AVFORMAT_AVFORMAT_H */
/contrib/sdk/sources/ffmpeg/libavformat/avformat.lib
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/contrib/sdk/sources/ffmpeg/libavformat/avi.h
0,0 → 1,38
/*
* copyright (c) 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_AVI_H
#define AVFORMAT_AVI_H
 
#define AVIF_HASINDEX 0x00000010 // Index at end of file?
#define AVIF_MUSTUSEINDEX 0x00000020
#define AVIF_ISINTERLEAVED 0x00000100
#define AVIF_TRUSTCKTYPE 0x00000800 // Use CKType to find key frames?
#define AVIF_WASCAPTUREFILE 0x00010000
#define AVIF_COPYRIGHTED 0x00020000
 
#define AVI_MAX_RIFF_SIZE 0x40000000LL
#define AVI_MASTER_INDEX_SIZE 256
#define AVI_MAX_STREAM_COUNT 100
 
/* index flags */
#define AVIIF_INDEX 0x10
 
#endif /* AVFORMAT_AVI_H */
/contrib/sdk/sources/ffmpeg/libavformat/avidec.c
0,0 → 1,1738
/*
* AVI demuxer
* Copyright (c) 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/bswap.h"
#include "libavutil/opt.h"
#include "libavutil/dict.h"
#include "libavutil/internal.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/mathematics.h"
#include "avformat.h"
#include "avi.h"
#include "dv.h"
#include "internal.h"
#include "riff.h"
 
typedef struct AVIStream {
int64_t frame_offset; /* current frame (video) or byte (audio) counter
* (used to compute the pts) */
int remaining;
int packet_size;
 
uint32_t scale;
uint32_t rate;
int sample_size; /* size of one sample (or packet)
* (in the rate/scale sense) in bytes */
 
int64_t cum_len; /* temporary storage (used during seek) */
int prefix; /* normally 'd'<<8 + 'c' or 'w'<<8 + 'b' */
int prefix_count;
uint32_t pal[256];
int has_pal;
int dshow_block_align; /* block align variable used to emulate bugs in
* the MS dshow demuxer */
 
AVFormatContext *sub_ctx;
AVPacket sub_pkt;
uint8_t *sub_buffer;
 
int64_t seek_pos;
} AVIStream;
 
typedef struct {
const AVClass *class;
int64_t riff_end;
int64_t movi_end;
int64_t fsize;
int64_t io_fsize;
int64_t movi_list;
int64_t last_pkt_pos;
int index_loaded;
int is_odml;
int non_interleaved;
int stream_index;
DVDemuxContext *dv_demux;
int odml_depth;
int use_odml;
#define MAX_ODML_DEPTH 1000
int64_t dts_max;
} AVIContext;
 
 
static const AVOption options[] = {
{ "use_odml", "use odml index", offsetof(AVIContext, use_odml), AV_OPT_TYPE_INT, {.i64 = 1}, -1, 1, AV_OPT_FLAG_DECODING_PARAM},
{ NULL },
};
 
static const AVClass demuxer_class = {
.class_name = "avi",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEMUXER,
};
 
 
static const char avi_headers[][8] = {
{ 'R', 'I', 'F', 'F', 'A', 'V', 'I', ' ' },
{ 'R', 'I', 'F', 'F', 'A', 'V', 'I', 'X' },
{ 'R', 'I', 'F', 'F', 'A', 'V', 'I', 0x19 },
{ 'O', 'N', '2', ' ', 'O', 'N', '2', 'f' },
{ 'R', 'I', 'F', 'F', 'A', 'M', 'V', ' ' },
{ 0 }
};
 
static const AVMetadataConv avi_metadata_conv[] = {
{ "strn", "title" },
{ 0 },
};
 
static int avi_load_index(AVFormatContext *s);
static int guess_ni_flag(AVFormatContext *s);
 
#define print_tag(str, tag, size) \
av_dlog(NULL, "%s: tag=%c%c%c%c size=0x%x\n", \
str, tag & 0xff, \
(tag >> 8) & 0xff, \
(tag >> 16) & 0xff, \
(tag >> 24) & 0xff, \
size)
 
static inline int get_duration(AVIStream *ast, int len)
{
if (ast->sample_size)
return len;
else if (ast->dshow_block_align)
return (len + ast->dshow_block_align - 1) / ast->dshow_block_align;
else
return 1;
}
 
static int get_riff(AVFormatContext *s, AVIOContext *pb)
{
AVIContext *avi = s->priv_data;
char header[8];
int i;
 
/* check RIFF header */
avio_read(pb, header, 4);
avi->riff_end = avio_rl32(pb); /* RIFF chunk size */
avi->riff_end += avio_tell(pb); /* RIFF chunk end */
avio_read(pb, header + 4, 4);
 
for (i = 0; avi_headers[i][0]; i++)
if (!memcmp(header, avi_headers[i], 8))
break;
if (!avi_headers[i][0])
return AVERROR_INVALIDDATA;
 
if (header[7] == 0x19)
av_log(s, AV_LOG_INFO,
"This file has been generated by a totally broken muxer.\n");
 
return 0;
}
 
static int read_braindead_odml_indx(AVFormatContext *s, int frame_num)
{
AVIContext *avi = s->priv_data;
AVIOContext *pb = s->pb;
int longs_pre_entry = avio_rl16(pb);
int index_sub_type = avio_r8(pb);
int index_type = avio_r8(pb);
int entries_in_use = avio_rl32(pb);
int chunk_id = avio_rl32(pb);
int64_t base = avio_rl64(pb);
int stream_id = ((chunk_id & 0xFF) - '0') * 10 +
((chunk_id >> 8 & 0xFF) - '0');
AVStream *st;
AVIStream *ast;
int i;
int64_t last_pos = -1;
int64_t filesize = avi->fsize;
 
av_dlog(s,
"longs_pre_entry:%d index_type:%d entries_in_use:%d "
"chunk_id:%X base:%16"PRIX64"\n",
longs_pre_entry,
index_type,
entries_in_use,
chunk_id,
base);
 
if (stream_id >= s->nb_streams || stream_id < 0)
return AVERROR_INVALIDDATA;
st = s->streams[stream_id];
ast = st->priv_data;
 
if (index_sub_type)
return AVERROR_INVALIDDATA;
 
avio_rl32(pb);
 
if (index_type && longs_pre_entry != 2)
return AVERROR_INVALIDDATA;
if (index_type > 1)
return AVERROR_INVALIDDATA;
 
if (filesize > 0 && base >= filesize) {
av_log(s, AV_LOG_ERROR, "ODML index invalid\n");
if (base >> 32 == (base & 0xFFFFFFFF) &&
(base & 0xFFFFFFFF) < filesize &&
filesize <= 0xFFFFFFFF)
base &= 0xFFFFFFFF;
else
return AVERROR_INVALIDDATA;
}
 
for (i = 0; i < entries_in_use; i++) {
if (index_type) {
int64_t pos = avio_rl32(pb) + base - 8;
int len = avio_rl32(pb);
int key = len >= 0;
len &= 0x7FFFFFFF;
 
#ifdef DEBUG_SEEK
av_log(s, AV_LOG_ERROR, "pos:%"PRId64", len:%X\n", pos, len);
#endif
if (url_feof(pb))
return AVERROR_INVALIDDATA;
 
if (last_pos == pos || pos == base - 8)
avi->non_interleaved = 1;
if (last_pos != pos && (len || !ast->sample_size))
av_add_index_entry(st, pos, ast->cum_len, len, 0,
key ? AVINDEX_KEYFRAME : 0);
 
ast->cum_len += get_duration(ast, len);
last_pos = pos;
} else {
int64_t offset, pos;
int duration;
offset = avio_rl64(pb);
avio_rl32(pb); /* size */
duration = avio_rl32(pb);
 
if (url_feof(pb))
return AVERROR_INVALIDDATA;
 
pos = avio_tell(pb);
 
if (avi->odml_depth > MAX_ODML_DEPTH) {
av_log(s, AV_LOG_ERROR, "Too deeply nested ODML indexes\n");
return AVERROR_INVALIDDATA;
}
 
if (avio_seek(pb, offset + 8, SEEK_SET) < 0)
return -1;
avi->odml_depth++;
read_braindead_odml_indx(s, frame_num);
avi->odml_depth--;
frame_num += duration;
 
if (avio_seek(pb, pos, SEEK_SET) < 0) {
av_log(s, AV_LOG_ERROR, "Failed to restore position after reading index\n");
return -1;
}
 
}
}
avi->index_loaded = 2;
return 0;
}
 
static void clean_index(AVFormatContext *s)
{
int i;
int64_t j;
 
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
AVIStream *ast = st->priv_data;
int n = st->nb_index_entries;
int max = ast->sample_size;
int64_t pos, size, ts;
 
if (n != 1 || ast->sample_size == 0)
continue;
 
while (max < 1024)
max += max;
 
pos = st->index_entries[0].pos;
size = st->index_entries[0].size;
ts = st->index_entries[0].timestamp;
 
for (j = 0; j < size; j += max)
av_add_index_entry(st, pos + j, ts + j, FFMIN(max, size - j), 0,
AVINDEX_KEYFRAME);
}
}
 
static int avi_read_tag(AVFormatContext *s, AVStream *st, uint32_t tag,
uint32_t size)
{
AVIOContext *pb = s->pb;
char key[5] = { 0 };
char *value;
 
size += (size & 1);
 
if (size == UINT_MAX)
return AVERROR(EINVAL);
value = av_malloc(size + 1);
if (!value)
return AVERROR(ENOMEM);
avio_read(pb, value, size);
value[size] = 0;
 
AV_WL32(key, tag);
 
return av_dict_set(st ? &st->metadata : &s->metadata, key, value,
AV_DICT_DONT_STRDUP_VAL);
}
 
static const char months[12][4] = { "Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec" };
 
static void avi_metadata_creation_time(AVDictionary **metadata, char *date)
{
char month[4], time[9], buffer[64];
int i, day, year;
/* parse standard AVI date format (ie. "Mon Mar 10 15:04:43 2003") */
if (sscanf(date, "%*3s%*[ ]%3s%*[ ]%2d%*[ ]%8s%*[ ]%4d",
month, &day, time, &year) == 4) {
for (i = 0; i < 12; i++)
if (!av_strcasecmp(month, months[i])) {
snprintf(buffer, sizeof(buffer), "%.4d-%.2d-%.2d %s",
year, i + 1, day, time);
av_dict_set(metadata, "creation_time", buffer, 0);
}
} else if (date[4] == '/' && date[7] == '/') {
date[4] = date[7] = '-';
av_dict_set(metadata, "creation_time", date, 0);
}
}
 
static void avi_read_nikon(AVFormatContext *s, uint64_t end)
{
while (avio_tell(s->pb) < end) {
uint32_t tag = avio_rl32(s->pb);
uint32_t size = avio_rl32(s->pb);
switch (tag) {
case MKTAG('n', 'c', 't', 'g'): /* Nikon Tags */
{
uint64_t tag_end = avio_tell(s->pb) + size;
while (avio_tell(s->pb) < tag_end) {
uint16_t tag = avio_rl16(s->pb);
uint16_t size = avio_rl16(s->pb);
const char *name = NULL;
char buffer[64] = { 0 };
size -= avio_read(s->pb, buffer,
FFMIN(size, sizeof(buffer) - 1));
switch (tag) {
case 0x03:
name = "maker";
break;
case 0x04:
name = "model";
break;
case 0x13:
name = "creation_time";
if (buffer[4] == ':' && buffer[7] == ':')
buffer[4] = buffer[7] = '-';
break;
}
if (name)
av_dict_set(&s->metadata, name, buffer, 0);
avio_skip(s->pb, size);
}
break;
}
default:
avio_skip(s->pb, size);
break;
}
}
}
 
static int avi_read_header(AVFormatContext *s)
{
AVIContext *avi = s->priv_data;
AVIOContext *pb = s->pb;
unsigned int tag, tag1, handler;
int codec_type, stream_index, frame_period;
unsigned int size;
int i;
AVStream *st;
AVIStream *ast = NULL;
int avih_width = 0, avih_height = 0;
int amv_file_format = 0;
uint64_t list_end = 0;
int ret;
AVDictionaryEntry *dict_entry;
 
avi->stream_index = -1;
 
ret = get_riff(s, pb);
if (ret < 0)
return ret;
 
av_log(avi, AV_LOG_DEBUG, "use odml:%d\n", avi->use_odml);
 
avi->io_fsize = avi->fsize = avio_size(pb);
if (avi->fsize <= 0 || avi->fsize < avi->riff_end)
avi->fsize = avi->riff_end == 8 ? INT64_MAX : avi->riff_end;
 
/* first list tag */
stream_index = -1;
codec_type = -1;
frame_period = 0;
for (;;) {
if (url_feof(pb))
goto fail;
tag = avio_rl32(pb);
size = avio_rl32(pb);
 
print_tag("tag", tag, size);
 
switch (tag) {
case MKTAG('L', 'I', 'S', 'T'):
list_end = avio_tell(pb) + size;
/* Ignored, except at start of video packets. */
tag1 = avio_rl32(pb);
 
print_tag("list", tag1, 0);
 
if (tag1 == MKTAG('m', 'o', 'v', 'i')) {
avi->movi_list = avio_tell(pb) - 4;
if (size)
avi->movi_end = avi->movi_list + size + (size & 1);
else
avi->movi_end = avi->fsize;
av_dlog(NULL, "movi end=%"PRIx64"\n", avi->movi_end);
goto end_of_header;
} else if (tag1 == MKTAG('I', 'N', 'F', 'O'))
ff_read_riff_info(s, size - 4);
else if (tag1 == MKTAG('n', 'c', 'd', 't'))
avi_read_nikon(s, list_end);
 
break;
case MKTAG('I', 'D', 'I', 'T'):
{
unsigned char date[64] = { 0 };
size += (size & 1);
size -= avio_read(pb, date, FFMIN(size, sizeof(date) - 1));
avio_skip(pb, size);
avi_metadata_creation_time(&s->metadata, date);
break;
}
case MKTAG('d', 'm', 'l', 'h'):
avi->is_odml = 1;
avio_skip(pb, size + (size & 1));
break;
case MKTAG('a', 'm', 'v', 'h'):
amv_file_format = 1;
case MKTAG('a', 'v', 'i', 'h'):
/* AVI header */
/* using frame_period is bad idea */
frame_period = avio_rl32(pb);
avio_rl32(pb); /* max. bytes per second */
avio_rl32(pb);
avi->non_interleaved |= avio_rl32(pb) & AVIF_MUSTUSEINDEX;
 
avio_skip(pb, 2 * 4);
avio_rl32(pb);
avio_rl32(pb);
avih_width = avio_rl32(pb);
avih_height = avio_rl32(pb);
 
avio_skip(pb, size - 10 * 4);
break;
case MKTAG('s', 't', 'r', 'h'):
/* stream header */
 
tag1 = avio_rl32(pb);
handler = avio_rl32(pb); /* codec tag */
 
if (tag1 == MKTAG('p', 'a', 'd', 's')) {
avio_skip(pb, size - 8);
break;
} else {
stream_index++;
st = avformat_new_stream(s, NULL);
if (!st)
goto fail;
 
st->id = stream_index;
ast = av_mallocz(sizeof(AVIStream));
if (!ast)
goto fail;
st->priv_data = ast;
}
if (amv_file_format)
tag1 = stream_index ? MKTAG('a', 'u', 'd', 's')
: MKTAG('v', 'i', 'd', 's');
 
print_tag("strh", tag1, -1);
 
if (tag1 == MKTAG('i', 'a', 'v', 's') ||
tag1 == MKTAG('i', 'v', 'a', 's')) {
int64_t dv_dur;
 
/* After some consideration -- I don't think we
* have to support anything but DV in type1 AVIs. */
if (s->nb_streams != 1)
goto fail;
 
if (handler != MKTAG('d', 'v', 's', 'd') &&
handler != MKTAG('d', 'v', 'h', 'd') &&
handler != MKTAG('d', 'v', 's', 'l'))
goto fail;
 
ast = s->streams[0]->priv_data;
av_freep(&s->streams[0]->codec->extradata);
av_freep(&s->streams[0]->codec);
if (s->streams[0]->info)
av_freep(&s->streams[0]->info->duration_error);
av_freep(&s->streams[0]->info);
av_freep(&s->streams[0]);
s->nb_streams = 0;
if (CONFIG_DV_DEMUXER) {
avi->dv_demux = avpriv_dv_init_demux(s);
if (!avi->dv_demux)
goto fail;
} else
goto fail;
s->streams[0]->priv_data = ast;
avio_skip(pb, 3 * 4);
ast->scale = avio_rl32(pb);
ast->rate = avio_rl32(pb);
avio_skip(pb, 4); /* start time */
 
dv_dur = avio_rl32(pb);
if (ast->scale > 0 && ast->rate > 0 && dv_dur > 0) {
dv_dur *= AV_TIME_BASE;
s->duration = av_rescale(dv_dur, ast->scale, ast->rate);
}
/* else, leave duration alone; timing estimation in utils.c
* will make a guess based on bitrate. */
 
stream_index = s->nb_streams - 1;
avio_skip(pb, size - 9 * 4);
break;
}
 
av_assert0(stream_index < s->nb_streams);
st->codec->stream_codec_tag = handler;
 
avio_rl32(pb); /* flags */
avio_rl16(pb); /* priority */
avio_rl16(pb); /* language */
avio_rl32(pb); /* initial frame */
ast->scale = avio_rl32(pb);
ast->rate = avio_rl32(pb);
if (!(ast->scale && ast->rate)) {
av_log(s, AV_LOG_WARNING,
"scale/rate is %u/%u which is invalid. "
"(This file has been generated by broken software.)\n",
ast->scale,
ast->rate);
if (frame_period) {
ast->rate = 1000000;
ast->scale = frame_period;
} else {
ast->rate = 25;
ast->scale = 1;
}
}
avpriv_set_pts_info(st, 64, ast->scale, ast->rate);
 
ast->cum_len = avio_rl32(pb); /* start */
st->nb_frames = avio_rl32(pb);
 
st->start_time = 0;
avio_rl32(pb); /* buffer size */
avio_rl32(pb); /* quality */
if (ast->cum_len*ast->scale/ast->rate > 3600) {
av_log(s, AV_LOG_ERROR, "crazy start time, iam scared, giving up\n");
return AVERROR_INVALIDDATA;
}
ast->sample_size = avio_rl32(pb); /* sample ssize */
ast->cum_len *= FFMAX(1, ast->sample_size);
av_dlog(s, "%"PRIu32" %"PRIu32" %d\n",
ast->rate, ast->scale, ast->sample_size);
 
switch (tag1) {
case MKTAG('v', 'i', 'd', 's'):
codec_type = AVMEDIA_TYPE_VIDEO;
 
ast->sample_size = 0;
break;
case MKTAG('a', 'u', 'd', 's'):
codec_type = AVMEDIA_TYPE_AUDIO;
break;
case MKTAG('t', 'x', 't', 's'):
codec_type = AVMEDIA_TYPE_SUBTITLE;
break;
case MKTAG('d', 'a', 't', 's'):
codec_type = AVMEDIA_TYPE_DATA;
break;
default:
av_log(s, AV_LOG_INFO, "unknown stream type %X\n", tag1);
}
if (ast->sample_size == 0) {
st->duration = st->nb_frames;
if (st->duration > 0 && avi->io_fsize > 0 && avi->riff_end > avi->io_fsize) {
av_log(s, AV_LOG_DEBUG, "File is truncated adjusting duration\n");
st->duration = av_rescale(st->duration, avi->io_fsize, avi->riff_end);
}
}
ast->frame_offset = ast->cum_len;
avio_skip(pb, size - 12 * 4);
break;
case MKTAG('s', 't', 'r', 'f'):
/* stream header */
if (!size)
break;
if (stream_index >= (unsigned)s->nb_streams || avi->dv_demux) {
avio_skip(pb, size);
} else {
uint64_t cur_pos = avio_tell(pb);
unsigned esize;
if (cur_pos < list_end)
size = FFMIN(size, list_end - cur_pos);
st = s->streams[stream_index];
switch (codec_type) {
case AVMEDIA_TYPE_VIDEO:
if (amv_file_format) {
st->codec->width = avih_width;
st->codec->height = avih_height;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_AMV;
avio_skip(pb, size);
break;
}
tag1 = ff_get_bmp_header(pb, st, &esize);
 
if (tag1 == MKTAG('D', 'X', 'S', 'B') ||
tag1 == MKTAG('D', 'X', 'S', 'A')) {
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
st->codec->codec_tag = tag1;
st->codec->codec_id = AV_CODEC_ID_XSUB;
break;
}
 
if (size > 10 * 4 && size < (1 << 30) && size < avi->fsize) {
if (esize == size-1 && (esize&1)) {
st->codec->extradata_size = esize - 10 * 4;
} else
st->codec->extradata_size = size - 10 * 4;
if (ff_alloc_extradata(st->codec, st->codec->extradata_size))
return AVERROR(ENOMEM);
avio_read(pb,
st->codec->extradata,
st->codec->extradata_size);
}
 
// FIXME: check if the encoder really did this correctly
if (st->codec->extradata_size & 1)
avio_r8(pb);
 
/* Extract palette from extradata if bpp <= 8.
* This code assumes that extradata contains only palette.
* This is true for all paletted codecs implemented in
* FFmpeg. */
if (st->codec->extradata_size &&
(st->codec->bits_per_coded_sample <= 8)) {
int pal_size = (1 << st->codec->bits_per_coded_sample) << 2;
const uint8_t *pal_src;
 
pal_size = FFMIN(pal_size, st->codec->extradata_size);
pal_src = st->codec->extradata +
st->codec->extradata_size - pal_size;
for (i = 0; i < pal_size / 4; i++)
ast->pal[i] = 0xFFU<<24 | AV_RL32(pal_src+4*i);
ast->has_pal = 1;
}
 
print_tag("video", tag1, 0);
 
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_tag = tag1;
st->codec->codec_id = ff_codec_get_id(ff_codec_bmp_tags,
tag1);
/* This is needed to get the pict type which is necessary
* for generating correct pts. */
st->need_parsing = AVSTREAM_PARSE_HEADERS;
 
if (st->codec->codec_tag == 0 && st->codec->height > 0 &&
st->codec->extradata_size < 1U << 30) {
st->codec->extradata_size += 9;
if ((ret = av_reallocp(&st->codec->extradata,
st->codec->extradata_size +
FF_INPUT_BUFFER_PADDING_SIZE)) < 0) {
st->codec->extradata_size = 0;
return ret;
} else
memcpy(st->codec->extradata + st->codec->extradata_size - 9,
"BottomUp", 9);
}
st->codec->height = FFABS(st->codec->height);
 
// avio_skip(pb, size - 5 * 4);
break;
case AVMEDIA_TYPE_AUDIO:
ret = ff_get_wav_header(pb, st->codec, size);
if (ret < 0)
return ret;
ast->dshow_block_align = st->codec->block_align;
if (ast->sample_size && st->codec->block_align &&
ast->sample_size != st->codec->block_align) {
av_log(s,
AV_LOG_WARNING,
"sample size (%d) != block align (%d)\n",
ast->sample_size,
st->codec->block_align);
ast->sample_size = st->codec->block_align;
}
/* 2-aligned
* (fix for Stargate SG-1 - 3x18 - Shades of Grey.avi) */
if (size & 1)
avio_skip(pb, 1);
/* Force parsing as several audio frames can be in
* one packet and timestamps refer to packet start. */
st->need_parsing = AVSTREAM_PARSE_TIMESTAMPS;
/* ADTS header is in extradata, AAC without header must be
* stored as exact frames. Parser not needed and it will
* fail. */
if (st->codec->codec_id == AV_CODEC_ID_AAC &&
st->codec->extradata_size)
st->need_parsing = AVSTREAM_PARSE_NONE;
/* AVI files with Xan DPCM audio (wrongly) declare PCM
* audio in the header but have Axan as stream_code_tag. */
if (st->codec->stream_codec_tag == AV_RL32("Axan")) {
st->codec->codec_id = AV_CODEC_ID_XAN_DPCM;
st->codec->codec_tag = 0;
ast->dshow_block_align = 0;
}
if (amv_file_format) {
st->codec->codec_id = AV_CODEC_ID_ADPCM_IMA_AMV;
ast->dshow_block_align = 0;
}
if (st->codec->codec_id == AV_CODEC_ID_AAC && ast->dshow_block_align <= 4 && ast->dshow_block_align) {
av_log(s, AV_LOG_DEBUG, "overriding invalid dshow_block_align of %d\n", ast->dshow_block_align);
ast->dshow_block_align = 0;
}
if (st->codec->codec_id == AV_CODEC_ID_AAC && ast->dshow_block_align == 1024 && ast->sample_size == 1024 ||
st->codec->codec_id == AV_CODEC_ID_AAC && ast->dshow_block_align == 4096 && ast->sample_size == 4096 ||
st->codec->codec_id == AV_CODEC_ID_MP3 && ast->dshow_block_align == 1152 && ast->sample_size == 1152) {
av_log(s, AV_LOG_DEBUG, "overriding sample_size\n");
ast->sample_size = 0;
}
break;
case AVMEDIA_TYPE_SUBTITLE:
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
st->request_probe= 1;
avio_skip(pb, size);
break;
default:
st->codec->codec_type = AVMEDIA_TYPE_DATA;
st->codec->codec_id = AV_CODEC_ID_NONE;
st->codec->codec_tag = 0;
avio_skip(pb, size);
break;
}
}
break;
case MKTAG('s', 't', 'r', 'd'):
if (stream_index >= (unsigned)s->nb_streams
|| s->streams[stream_index]->codec->extradata_size
|| s->streams[stream_index]->codec->codec_tag == MKTAG('H','2','6','4')) {
avio_skip(pb, size);
} else {
uint64_t cur_pos = avio_tell(pb);
if (cur_pos < list_end)
size = FFMIN(size, list_end - cur_pos);
st = s->streams[stream_index];
 
if (size<(1<<30)) {
if (ff_alloc_extradata(st->codec, size))
return AVERROR(ENOMEM);
avio_read(pb, st->codec->extradata, st->codec->extradata_size);
}
 
if (st->codec->extradata_size & 1) //FIXME check if the encoder really did this correctly
avio_r8(pb);
}
break;
case MKTAG('i', 'n', 'd', 'x'):
i = avio_tell(pb);
if (pb->seekable && !(s->flags & AVFMT_FLAG_IGNIDX) &&
avi->use_odml &&
read_braindead_odml_indx(s, 0) < 0 &&
(s->error_recognition & AV_EF_EXPLODE))
goto fail;
avio_seek(pb, i + size, SEEK_SET);
break;
case MKTAG('v', 'p', 'r', 'p'):
if (stream_index < (unsigned)s->nb_streams && size > 9 * 4) {
AVRational active, active_aspect;
 
st = s->streams[stream_index];
avio_rl32(pb);
avio_rl32(pb);
avio_rl32(pb);
avio_rl32(pb);
avio_rl32(pb);
 
active_aspect.den = avio_rl16(pb);
active_aspect.num = avio_rl16(pb);
active.num = avio_rl32(pb);
active.den = avio_rl32(pb);
avio_rl32(pb); // nbFieldsPerFrame
 
if (active_aspect.num && active_aspect.den &&
active.num && active.den) {
st->sample_aspect_ratio = av_div_q(active_aspect, active);
av_dlog(s, "vprp %d/%d %d/%d\n",
active_aspect.num, active_aspect.den,
active.num, active.den);
}
size -= 9 * 4;
}
avio_skip(pb, size);
break;
case MKTAG('s', 't', 'r', 'n'):
if (s->nb_streams) {
ret = avi_read_tag(s, s->streams[s->nb_streams - 1], tag, size);
if (ret < 0)
return ret;
break;
}
default:
if (size > 1000000) {
av_log(s, AV_LOG_ERROR,
"Something went wrong during header parsing, "
"I will ignore it and try to continue anyway.\n");
if (s->error_recognition & AV_EF_EXPLODE)
goto fail;
avi->movi_list = avio_tell(pb) - 4;
avi->movi_end = avi->fsize;
goto end_of_header;
}
/* skip tag */
size += (size & 1);
avio_skip(pb, size);
break;
}
}
 
end_of_header:
/* check stream number */
if (stream_index != s->nb_streams - 1) {
 
fail:
return AVERROR_INVALIDDATA;
}
 
if (!avi->index_loaded && pb->seekable)
avi_load_index(s);
avi->index_loaded |= 1;
avi->non_interleaved |= guess_ni_flag(s) | (s->flags & AVFMT_FLAG_SORT_DTS);
 
dict_entry = av_dict_get(s->metadata, "ISFT", NULL, 0);
if (dict_entry && !strcmp(dict_entry->value, "PotEncoder"))
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
if ( st->codec->codec_id == AV_CODEC_ID_MPEG1VIDEO
|| st->codec->codec_id == AV_CODEC_ID_MPEG2VIDEO)
st->need_parsing = AVSTREAM_PARSE_FULL;
}
 
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
if (st->nb_index_entries)
break;
}
// DV-in-AVI cannot be non-interleaved, if set this must be
// a mis-detection.
if (avi->dv_demux)
avi->non_interleaved = 0;
if (i == s->nb_streams && avi->non_interleaved) {
av_log(s, AV_LOG_WARNING,
"Non-interleaved AVI without index, switching to interleaved\n");
avi->non_interleaved = 0;
}
 
if (avi->non_interleaved) {
av_log(s, AV_LOG_INFO, "non-interleaved AVI\n");
clean_index(s);
}
 
ff_metadata_conv_ctx(s, NULL, avi_metadata_conv);
ff_metadata_conv_ctx(s, NULL, ff_riff_info_conv);
 
return 0;
}
 
static int read_gab2_sub(AVStream *st, AVPacket *pkt)
{
if (pkt->size >= 7 &&
!strcmp(pkt->data, "GAB2") && AV_RL16(pkt->data + 5) == 2) {
uint8_t desc[256];
int score = AVPROBE_SCORE_EXTENSION, ret;
AVIStream *ast = st->priv_data;
AVInputFormat *sub_demuxer;
AVRational time_base;
AVIOContext *pb = avio_alloc_context(pkt->data + 7,
pkt->size - 7,
0, NULL, NULL, NULL, NULL);
AVProbeData pd;
unsigned int desc_len = avio_rl32(pb);
 
if (desc_len > pb->buf_end - pb->buf_ptr)
goto error;
 
ret = avio_get_str16le(pb, desc_len, desc, sizeof(desc));
avio_skip(pb, desc_len - ret);
if (*desc)
av_dict_set(&st->metadata, "title", desc, 0);
 
avio_rl16(pb); /* flags? */
avio_rl32(pb); /* data size */
 
pd = (AVProbeData) { .buf = pb->buf_ptr,
.buf_size = pb->buf_end - pb->buf_ptr };
if (!(sub_demuxer = av_probe_input_format2(&pd, 1, &score)))
goto error;
 
if (!(ast->sub_ctx = avformat_alloc_context()))
goto error;
 
ast->sub_ctx->pb = pb;
if (!avformat_open_input(&ast->sub_ctx, "", sub_demuxer, NULL)) {
ff_read_packet(ast->sub_ctx, &ast->sub_pkt);
*st->codec = *ast->sub_ctx->streams[0]->codec;
ast->sub_ctx->streams[0]->codec->extradata = NULL;
time_base = ast->sub_ctx->streams[0]->time_base;
avpriv_set_pts_info(st, 64, time_base.num, time_base.den);
}
ast->sub_buffer = pkt->data;
memset(pkt, 0, sizeof(*pkt));
return 1;
 
error:
av_freep(&pb);
}
return 0;
}
 
static AVStream *get_subtitle_pkt(AVFormatContext *s, AVStream *next_st,
AVPacket *pkt)
{
AVIStream *ast, *next_ast = next_st->priv_data;
int64_t ts, next_ts, ts_min = INT64_MAX;
AVStream *st, *sub_st = NULL;
int i;
 
next_ts = av_rescale_q(next_ast->frame_offset, next_st->time_base,
AV_TIME_BASE_Q);
 
for (i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
ast = st->priv_data;
if (st->discard < AVDISCARD_ALL && ast && ast->sub_pkt.data) {
ts = av_rescale_q(ast->sub_pkt.dts, st->time_base, AV_TIME_BASE_Q);
if (ts <= next_ts && ts < ts_min) {
ts_min = ts;
sub_st = st;
}
}
}
 
if (sub_st) {
ast = sub_st->priv_data;
*pkt = ast->sub_pkt;
pkt->stream_index = sub_st->index;
 
if (ff_read_packet(ast->sub_ctx, &ast->sub_pkt) < 0)
ast->sub_pkt.data = NULL;
}
return sub_st;
}
 
static int get_stream_idx(unsigned *d)
{
if (d[0] >= '0' && d[0] <= '9' &&
d[1] >= '0' && d[1] <= '9') {
return (d[0] - '0') * 10 + (d[1] - '0');
} else {
return 100; // invalid stream ID
}
}
 
/**
*
* @param exit_early set to 1 to just gather packet position without making the changes needed to actually read & return the packet
*/
static int avi_sync(AVFormatContext *s, int exit_early)
{
AVIContext *avi = s->priv_data;
AVIOContext *pb = s->pb;
int n;
unsigned int d[8];
unsigned int size;
int64_t i, sync;
 
start_sync:
memset(d, -1, sizeof(d));
for (i = sync = avio_tell(pb); !url_feof(pb); i++) {
int j;
 
for (j = 0; j < 7; j++)
d[j] = d[j + 1];
d[7] = avio_r8(pb);
 
size = d[4] + (d[5] << 8) + (d[6] << 16) + (d[7] << 24);
 
n = get_stream_idx(d + 2);
av_dlog(s, "%X %X %X %X %X %X %X %X %"PRId64" %u %d\n",
d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], i, size, n);
if (i*(avi->io_fsize>0) + (uint64_t)size > avi->fsize || d[0] > 127)
continue;
 
// parse ix##
if ((d[0] == 'i' && d[1] == 'x' && n < s->nb_streams) ||
// parse JUNK
(d[0] == 'J' && d[1] == 'U' && d[2] == 'N' && d[3] == 'K') ||
(d[0] == 'i' && d[1] == 'd' && d[2] == 'x' && d[3] == '1')) {
avio_skip(pb, size);
goto start_sync;
}
 
// parse stray LIST
if (d[0] == 'L' && d[1] == 'I' && d[2] == 'S' && d[3] == 'T') {
avio_skip(pb, 4);
goto start_sync;
}
 
n = avi->dv_demux ? 0 : get_stream_idx(d);
 
if (!((i - avi->last_pkt_pos) & 1) &&
get_stream_idx(d + 1) < s->nb_streams)
continue;
 
// detect ##ix chunk and skip
if (d[2] == 'i' && d[3] == 'x' && n < s->nb_streams) {
avio_skip(pb, size);
goto start_sync;
}
 
// parse ##dc/##wb
if (n < s->nb_streams) {
AVStream *st;
AVIStream *ast;
st = s->streams[n];
ast = st->priv_data;
 
if (!ast) {
av_log(s, AV_LOG_WARNING, "Skiping foreign stream %d packet\n", n);
continue;
}
 
if (s->nb_streams >= 2) {
AVStream *st1 = s->streams[1];
AVIStream *ast1 = st1->priv_data;
// workaround for broken small-file-bug402.avi
if ( d[2] == 'w' && d[3] == 'b'
&& n == 0
&& st ->codec->codec_type == AVMEDIA_TYPE_VIDEO
&& st1->codec->codec_type == AVMEDIA_TYPE_AUDIO
&& ast->prefix == 'd'*256+'c'
&& (d[2]*256+d[3] == ast1->prefix || !ast1->prefix_count)
) {
n = 1;
st = st1;
ast = ast1;
av_log(s, AV_LOG_WARNING,
"Invalid stream + prefix combination, assuming audio.\n");
}
}
 
if (!avi->dv_demux &&
((st->discard >= AVDISCARD_DEFAULT && size == 0) /* ||
// FIXME: needs a little reordering
(st->discard >= AVDISCARD_NONKEY &&
!(pkt->flags & AV_PKT_FLAG_KEY)) */
|| st->discard >= AVDISCARD_ALL)) {
if (!exit_early) {
ast->frame_offset += get_duration(ast, size);
avio_skip(pb, size);
goto start_sync;
}
}
 
if (d[2] == 'p' && d[3] == 'c' && size <= 4 * 256 + 4) {
int k = avio_r8(pb);
int last = (k + avio_r8(pb) - 1) & 0xFF;
 
avio_rl16(pb); // flags
 
// b + (g << 8) + (r << 16);
for (; k <= last; k++)
ast->pal[k] = 0xFFU<<24 | avio_rb32(pb)>>8;
 
ast->has_pal = 1;
goto start_sync;
} else if (((ast->prefix_count < 5 || sync + 9 > i) &&
d[2] < 128 && d[3] < 128) ||
d[2] * 256 + d[3] == ast->prefix /* ||
(d[2] == 'd' && d[3] == 'c') ||
(d[2] == 'w' && d[3] == 'b') */) {
if (exit_early)
return 0;
if (d[2] * 256 + d[3] == ast->prefix)
ast->prefix_count++;
else {
ast->prefix = d[2] * 256 + d[3];
ast->prefix_count = 0;
}
 
avi->stream_index = n;
ast->packet_size = size + 8;
ast->remaining = size;
 
if (size || !ast->sample_size) {
uint64_t pos = avio_tell(pb) - 8;
if (!st->index_entries || !st->nb_index_entries ||
st->index_entries[st->nb_index_entries - 1].pos < pos) {
av_add_index_entry(st, pos, ast->frame_offset, size,
0, AVINDEX_KEYFRAME);
}
}
return 0;
}
}
}
 
if (pb->error)
return pb->error;
return AVERROR_EOF;
}
 
static int avi_read_packet(AVFormatContext *s, AVPacket *pkt)
{
AVIContext *avi = s->priv_data;
AVIOContext *pb = s->pb;
int err;
#if FF_API_DESTRUCT_PACKET
void *dstr;
#endif
 
if (CONFIG_DV_DEMUXER && avi->dv_demux) {
int size = avpriv_dv_get_packet(avi->dv_demux, pkt);
if (size >= 0)
return size;
else
goto resync;
}
 
if (avi->non_interleaved) {
int best_stream_index = 0;
AVStream *best_st = NULL;
AVIStream *best_ast;
int64_t best_ts = INT64_MAX;
int i;
 
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
AVIStream *ast = st->priv_data;
int64_t ts = ast->frame_offset;
int64_t last_ts;
 
if (!st->nb_index_entries)
continue;
 
last_ts = st->index_entries[st->nb_index_entries - 1].timestamp;
if (!ast->remaining && ts > last_ts)
continue;
 
ts = av_rescale_q(ts, st->time_base,
(AVRational) { FFMAX(1, ast->sample_size),
AV_TIME_BASE });
 
av_dlog(s, "%"PRId64" %d/%d %"PRId64"\n", ts,
st->time_base.num, st->time_base.den, ast->frame_offset);
if (ts < best_ts) {
best_ts = ts;
best_st = st;
best_stream_index = i;
}
}
if (!best_st)
return AVERROR_EOF;
 
best_ast = best_st->priv_data;
best_ts = best_ast->frame_offset;
if (best_ast->remaining) {
i = av_index_search_timestamp(best_st,
best_ts,
AVSEEK_FLAG_ANY |
AVSEEK_FLAG_BACKWARD);
} else {
i = av_index_search_timestamp(best_st, best_ts, AVSEEK_FLAG_ANY);
if (i >= 0)
best_ast->frame_offset = best_st->index_entries[i].timestamp;
}
 
if (i >= 0) {
int64_t pos = best_st->index_entries[i].pos;
pos += best_ast->packet_size - best_ast->remaining;
if (avio_seek(s->pb, pos + 8, SEEK_SET) < 0)
return AVERROR_EOF;
 
av_assert0(best_ast->remaining <= best_ast->packet_size);
 
avi->stream_index = best_stream_index;
if (!best_ast->remaining)
best_ast->packet_size =
best_ast->remaining = best_st->index_entries[i].size;
}
else
return AVERROR_EOF;
}
 
resync:
if (avi->stream_index >= 0) {
AVStream *st = s->streams[avi->stream_index];
AVIStream *ast = st->priv_data;
int size, err;
 
if (get_subtitle_pkt(s, st, pkt))
return 0;
 
// minorityreport.AVI block_align=1024 sample_size=1 IMA-ADPCM
if (ast->sample_size <= 1)
size = INT_MAX;
else if (ast->sample_size < 32)
// arbitrary multiplier to avoid tiny packets for raw PCM data
size = 1024 * ast->sample_size;
else
size = ast->sample_size;
 
if (size > ast->remaining)
size = ast->remaining;
avi->last_pkt_pos = avio_tell(pb);
err = av_get_packet(pb, pkt, size);
if (err < 0)
return err;
size = err;
 
if (ast->has_pal && pkt->size < (unsigned)INT_MAX / 2) {
uint8_t *pal;
pal = av_packet_new_side_data(pkt,
AV_PKT_DATA_PALETTE,
AVPALETTE_SIZE);
if (!pal) {
av_log(s, AV_LOG_ERROR,
"Failed to allocate data for palette\n");
} else {
memcpy(pal, ast->pal, AVPALETTE_SIZE);
ast->has_pal = 0;
}
}
 
if (CONFIG_DV_DEMUXER && avi->dv_demux) {
AVBufferRef *avbuf = pkt->buf;
#if FF_API_DESTRUCT_PACKET
FF_DISABLE_DEPRECATION_WARNINGS
dstr = pkt->destruct;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
size = avpriv_dv_produce_packet(avi->dv_demux, pkt,
pkt->data, pkt->size, pkt->pos);
#if FF_API_DESTRUCT_PACKET
FF_DISABLE_DEPRECATION_WARNINGS
pkt->destruct = dstr;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
pkt->buf = avbuf;
pkt->flags |= AV_PKT_FLAG_KEY;
if (size < 0)
av_free_packet(pkt);
} else if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE &&
!st->codec->codec_tag && read_gab2_sub(st, pkt)) {
ast->frame_offset++;
avi->stream_index = -1;
ast->remaining = 0;
goto resync;
} else {
/* XXX: How to handle B-frames in AVI? */
pkt->dts = ast->frame_offset;
// pkt->dts += ast->start;
if (ast->sample_size)
pkt->dts /= ast->sample_size;
av_dlog(s,
"dts:%"PRId64" offset:%"PRId64" %d/%d smpl_siz:%d "
"base:%d st:%d size:%d\n",
pkt->dts,
ast->frame_offset,
ast->scale,
ast->rate,
ast->sample_size,
AV_TIME_BASE,
avi->stream_index,
size);
pkt->stream_index = avi->stream_index;
 
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
AVIndexEntry *e;
int index;
av_assert0(st->index_entries);
 
index = av_index_search_timestamp(st, ast->frame_offset, 0);
e = &st->index_entries[index];
 
if (index >= 0 && e->timestamp == ast->frame_offset) {
if (index == st->nb_index_entries-1) {
int key=1;
int i;
uint32_t state=-1;
for (i=0; i<FFMIN(size,256); i++) {
if (st->codec->codec_id == AV_CODEC_ID_MPEG4) {
if (state == 0x1B6) {
key= !(pkt->data[i]&0xC0);
break;
}
}else
break;
state= (state<<8) + pkt->data[i];
}
if (!key)
e->flags &= ~AVINDEX_KEYFRAME;
}
if (e->flags & AVINDEX_KEYFRAME)
pkt->flags |= AV_PKT_FLAG_KEY;
}
} else {
pkt->flags |= AV_PKT_FLAG_KEY;
}
ast->frame_offset += get_duration(ast, pkt->size);
}
ast->remaining -= err;
if (!ast->remaining) {
avi->stream_index = -1;
ast->packet_size = 0;
}
 
if (!avi->non_interleaved && pkt->pos >= 0 && ast->seek_pos > pkt->pos) {
av_free_packet(pkt);
goto resync;
}
ast->seek_pos= 0;
 
if (!avi->non_interleaved && st->nb_index_entries>1 && avi->index_loaded>1) {
int64_t dts= av_rescale_q(pkt->dts, st->time_base, AV_TIME_BASE_Q);
 
if (avi->dts_max - dts > 2*AV_TIME_BASE) {
avi->non_interleaved= 1;
av_log(s, AV_LOG_INFO, "Switching to NI mode, due to poor interleaving\n");
}else if (avi->dts_max < dts)
avi->dts_max = dts;
}
 
return 0;
}
 
if ((err = avi_sync(s, 0)) < 0)
return err;
goto resync;
}
 
/* XXX: We make the implicit supposition that the positions are sorted
* for each stream. */
static int avi_read_idx1(AVFormatContext *s, int size)
{
AVIContext *avi = s->priv_data;
AVIOContext *pb = s->pb;
int nb_index_entries, i;
AVStream *st;
AVIStream *ast;
unsigned int index, tag, flags, pos, len, first_packet = 1;
unsigned last_pos = -1;
unsigned last_idx = -1;
int64_t idx1_pos, first_packet_pos = 0, data_offset = 0;
int anykey = 0;
 
nb_index_entries = size / 16;
if (nb_index_entries <= 0)
return AVERROR_INVALIDDATA;
 
idx1_pos = avio_tell(pb);
avio_seek(pb, avi->movi_list + 4, SEEK_SET);
if (avi_sync(s, 1) == 0)
first_packet_pos = avio_tell(pb) - 8;
avi->stream_index = -1;
avio_seek(pb, idx1_pos, SEEK_SET);
 
if (s->nb_streams == 1 && s->streams[0]->codec->codec_tag == AV_RL32("MMES")) {
first_packet_pos = 0;
data_offset = avi->movi_list;
}
 
/* Read the entries and sort them in each stream component. */
for (i = 0; i < nb_index_entries; i++) {
if (url_feof(pb))
return -1;
 
tag = avio_rl32(pb);
flags = avio_rl32(pb);
pos = avio_rl32(pb);
len = avio_rl32(pb);
av_dlog(s, "%d: tag=0x%x flags=0x%x pos=0x%x len=%d/",
i, tag, flags, pos, len);
 
index = ((tag & 0xff) - '0') * 10;
index += (tag >> 8 & 0xff) - '0';
if (index >= s->nb_streams)
continue;
st = s->streams[index];
ast = st->priv_data;
 
if (first_packet && first_packet_pos) {
data_offset = first_packet_pos - pos;
first_packet = 0;
}
pos += data_offset;
 
av_dlog(s, "%d cum_len=%"PRId64"\n", len, ast->cum_len);
 
// even if we have only a single stream, we should
// switch to non-interleaved to get correct timestamps
if (last_pos == pos)
avi->non_interleaved = 1;
if (last_idx != pos && len) {
av_add_index_entry(st, pos, ast->cum_len, len, 0,
(flags & AVIIF_INDEX) ? AVINDEX_KEYFRAME : 0);
last_idx= pos;
}
ast->cum_len += get_duration(ast, len);
last_pos = pos;
anykey |= flags&AVIIF_INDEX;
}
if (!anykey) {
for (index = 0; index < s->nb_streams; index++) {
st = s->streams[index];
if (st->nb_index_entries)
st->index_entries[0].flags |= AVINDEX_KEYFRAME;
}
}
return 0;
}
 
static int guess_ni_flag(AVFormatContext *s)
{
int i;
int64_t last_start = 0;
int64_t first_end = INT64_MAX;
int64_t oldpos = avio_tell(s->pb);
int *idx;
int64_t min_pos, pos;
 
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
int n = st->nb_index_entries;
unsigned int size;
 
if (n <= 0)
continue;
 
if (n >= 2) {
int64_t pos = st->index_entries[0].pos;
avio_seek(s->pb, pos + 4, SEEK_SET);
size = avio_rl32(s->pb);
if (pos + size > st->index_entries[1].pos)
last_start = INT64_MAX;
}
 
if (st->index_entries[0].pos > last_start)
last_start = st->index_entries[0].pos;
if (st->index_entries[n - 1].pos < first_end)
first_end = st->index_entries[n - 1].pos;
}
avio_seek(s->pb, oldpos, SEEK_SET);
if (last_start > first_end)
return 1;
idx= av_calloc(s->nb_streams, sizeof(*idx));
if (!idx)
return 0;
for (min_pos=pos=0; min_pos!=INT64_MAX; pos= min_pos+1LU) {
int64_t max_dts = INT64_MIN/2, min_dts= INT64_MAX/2;
min_pos = INT64_MAX;
 
for (i=0; i<s->nb_streams; i++) {
AVStream *st = s->streams[i];
AVIStream *ast = st->priv_data;
int n= st->nb_index_entries;
while (idx[i]<n && st->index_entries[idx[i]].pos < pos)
idx[i]++;
if (idx[i] < n) {
min_dts = FFMIN(min_dts, av_rescale_q(st->index_entries[idx[i]].timestamp/FFMAX(ast->sample_size, 1), st->time_base, AV_TIME_BASE_Q));
min_pos = FFMIN(min_pos, st->index_entries[idx[i]].pos);
}
if (idx[i])
max_dts = FFMAX(max_dts, av_rescale_q(st->index_entries[idx[i]-1].timestamp/FFMAX(ast->sample_size, 1), st->time_base, AV_TIME_BASE_Q));
}
if (max_dts - min_dts > 2*AV_TIME_BASE) {
av_free(idx);
return 1;
}
}
av_free(idx);
return 0;
}
 
static int avi_load_index(AVFormatContext *s)
{
AVIContext *avi = s->priv_data;
AVIOContext *pb = s->pb;
uint32_t tag, size;
int64_t pos = avio_tell(pb);
int64_t next;
int ret = -1;
 
if (avio_seek(pb, avi->movi_end, SEEK_SET) < 0)
goto the_end; // maybe truncated file
av_dlog(s, "movi_end=0x%"PRIx64"\n", avi->movi_end);
for (;;) {
tag = avio_rl32(pb);
size = avio_rl32(pb);
if (url_feof(pb))
break;
next = avio_tell(pb) + size + (size & 1);
 
av_dlog(s, "tag=%c%c%c%c size=0x%x\n",
tag & 0xff,
(tag >> 8) & 0xff,
(tag >> 16) & 0xff,
(tag >> 24) & 0xff,
size);
 
if (tag == MKTAG('i', 'd', 'x', '1') &&
avi_read_idx1(s, size) >= 0) {
avi->index_loaded=2;
ret = 0;
}else if (tag == MKTAG('L', 'I', 'S', 'T')) {
uint32_t tag1 = avio_rl32(pb);
 
if (tag1 == MKTAG('I', 'N', 'F', 'O'))
ff_read_riff_info(s, size - 4);
}else if (!ret)
break;
 
if (avio_seek(pb, next, SEEK_SET) < 0)
break; // something is wrong here
}
 
the_end:
avio_seek(pb, pos, SEEK_SET);
return ret;
}
 
static void seek_subtitle(AVStream *st, AVStream *st2, int64_t timestamp)
{
AVIStream *ast2 = st2->priv_data;
int64_t ts2 = av_rescale_q(timestamp, st->time_base, st2->time_base);
av_free_packet(&ast2->sub_pkt);
if (avformat_seek_file(ast2->sub_ctx, 0, INT64_MIN, ts2, ts2, 0) >= 0 ||
avformat_seek_file(ast2->sub_ctx, 0, ts2, ts2, INT64_MAX, 0) >= 0)
ff_read_packet(ast2->sub_ctx, &ast2->sub_pkt);
}
 
static int avi_read_seek(AVFormatContext *s, int stream_index,
int64_t timestamp, int flags)
{
AVIContext *avi = s->priv_data;
AVStream *st;
int i, index;
int64_t pos, pos_min;
AVIStream *ast;
 
/* Does not matter which stream is requested dv in avi has the
* stream information in the first video stream.
*/
if (avi->dv_demux)
stream_index = 0;
 
if (!avi->index_loaded) {
/* we only load the index on demand */
avi_load_index(s);
avi->index_loaded |= 1;
}
av_assert0(stream_index >= 0);
 
st = s->streams[stream_index];
ast = st->priv_data;
index = av_index_search_timestamp(st,
timestamp * FFMAX(ast->sample_size, 1),
flags);
if (index < 0) {
if (st->nb_index_entries > 0)
av_log(s, AV_LOG_DEBUG, "Failed to find timestamp %"PRId64 " in index %"PRId64 " .. %"PRId64 "\n",
timestamp * FFMAX(ast->sample_size, 1),
st->index_entries[0].timestamp,
st->index_entries[st->nb_index_entries - 1].timestamp);
return AVERROR_INVALIDDATA;
}
 
/* find the position */
pos = st->index_entries[index].pos;
timestamp = st->index_entries[index].timestamp / FFMAX(ast->sample_size, 1);
 
av_dlog(s, "XX %"PRId64" %d %"PRId64"\n",
timestamp, index, st->index_entries[index].timestamp);
 
if (CONFIG_DV_DEMUXER && avi->dv_demux) {
/* One and only one real stream for DV in AVI, and it has video */
/* offsets. Calling with other stream indexes should have failed */
/* the av_index_search_timestamp call above. */
 
if (avio_seek(s->pb, pos, SEEK_SET) < 0)
return -1;
 
/* Feed the DV video stream version of the timestamp to the */
/* DV demux so it can synthesize correct timestamps. */
ff_dv_offset_reset(avi->dv_demux, timestamp);
 
avi->stream_index = -1;
return 0;
}
 
pos_min = pos;
for (i = 0; i < s->nb_streams; i++) {
AVStream *st2 = s->streams[i];
AVIStream *ast2 = st2->priv_data;
 
ast2->packet_size =
ast2->remaining = 0;
 
if (ast2->sub_ctx) {
seek_subtitle(st, st2, timestamp);
continue;
}
 
if (st2->nb_index_entries <= 0)
continue;
 
// av_assert1(st2->codec->block_align);
av_assert0((int64_t)st2->time_base.num * ast2->rate ==
(int64_t)st2->time_base.den * ast2->scale);
index = av_index_search_timestamp(st2,
av_rescale_q(timestamp,
st->time_base,
st2->time_base) *
FFMAX(ast2->sample_size, 1),
flags |
AVSEEK_FLAG_BACKWARD |
(st2->codec->codec_type != AVMEDIA_TYPE_VIDEO ? AVSEEK_FLAG_ANY : 0));
if (index < 0)
index = 0;
ast2->seek_pos = st2->index_entries[index].pos;
pos_min = FFMIN(pos_min,ast2->seek_pos);
}
for (i = 0; i < s->nb_streams; i++) {
AVStream *st2 = s->streams[i];
AVIStream *ast2 = st2->priv_data;
 
if (ast2->sub_ctx || st2->nb_index_entries <= 0)
continue;
 
index = av_index_search_timestamp(
st2,
av_rescale_q(timestamp, st->time_base, st2->time_base) * FFMAX(ast2->sample_size, 1),
flags | AVSEEK_FLAG_BACKWARD | (st2->codec->codec_type != AVMEDIA_TYPE_VIDEO ? AVSEEK_FLAG_ANY : 0));
if (index < 0)
index = 0;
while (!avi->non_interleaved && index>0 && st2->index_entries[index-1].pos >= pos_min)
index--;
ast2->frame_offset = st2->index_entries[index].timestamp;
}
 
/* do the seek */
if (avio_seek(s->pb, pos_min, SEEK_SET) < 0) {
av_log(s, AV_LOG_ERROR, "Seek failed\n");
return -1;
}
avi->stream_index = -1;
avi->dts_max = INT_MIN;
return 0;
}
 
static int avi_read_close(AVFormatContext *s)
{
int i;
AVIContext *avi = s->priv_data;
 
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
AVIStream *ast = st->priv_data;
if (ast) {
if (ast->sub_ctx) {
av_freep(&ast->sub_ctx->pb);
avformat_close_input(&ast->sub_ctx);
}
av_free(ast->sub_buffer);
av_free_packet(&ast->sub_pkt);
}
}
 
av_free(avi->dv_demux);
 
return 0;
}
 
static int avi_probe(AVProbeData *p)
{
int i;
 
/* check file header */
for (i = 0; avi_headers[i][0]; i++)
if (!memcmp(p->buf, avi_headers[i], 4) &&
!memcmp(p->buf + 8, avi_headers[i] + 4, 4))
return AVPROBE_SCORE_MAX;
 
return 0;
}
 
AVInputFormat ff_avi_demuxer = {
.name = "avi",
.long_name = NULL_IF_CONFIG_SMALL("AVI (Audio Video Interleaved)"),
.priv_data_size = sizeof(AVIContext),
.read_probe = avi_probe,
.read_header = avi_read_header,
.read_packet = avi_read_packet,
.read_close = avi_read_close,
.read_seek = avi_read_seek,
.priv_class = &demuxer_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/avienc.c
0,0 → 1,665
/*
* AVI muxer
* Copyright (c) 2000 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
//#define DEBUG
 
#include "avformat.h"
#include "internal.h"
#include "avi.h"
#include "avio_internal.h"
#include "riff.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/dict.h"
#include "libavutil/avassert.h"
#include "libavutil/timestamp.h"
 
/*
* TODO:
* - fill all fields if non streamed (nb_frames for example)
*/
 
typedef struct AVIIentry {
unsigned int flags, pos, len;
} AVIIentry;
 
#define AVI_INDEX_CLUSTER_SIZE 16384
 
typedef struct AVIIndex {
int64_t indx_start;
int entry;
int ents_allocated;
AVIIentry** cluster;
} AVIIndex;
 
typedef struct {
int64_t riff_start, movi_list, odml_list;
int64_t frames_hdr_all;
int riff_id;
} AVIContext;
 
typedef struct {
int64_t frames_hdr_strm;
int64_t audio_strm_length;
int packet_count;
int entry;
 
AVIIndex indexes;
} AVIStream ;
 
static inline AVIIentry* avi_get_ientry(AVIIndex* idx, int ent_id)
{
int cl = ent_id / AVI_INDEX_CLUSTER_SIZE;
int id = ent_id % AVI_INDEX_CLUSTER_SIZE;
return &idx->cluster[cl][id];
}
 
static int64_t avi_start_new_riff(AVFormatContext *s, AVIOContext *pb,
const char* riff_tag, const char* list_tag)
{
AVIContext *avi= s->priv_data;
int64_t loff;
int i;
 
avi->riff_id++;
for (i=0; i<s->nb_streams; i++){
AVIStream *avist= s->streams[i]->priv_data;
avist->indexes.entry = 0;
}
 
avi->riff_start = ff_start_tag(pb, "RIFF");
ffio_wfourcc(pb, riff_tag);
loff = ff_start_tag(pb, "LIST");
ffio_wfourcc(pb, list_tag);
return loff;
}
 
static char* avi_stream2fourcc(char* tag, int index, enum AVMediaType type)
{
tag[0] = '0' + index/10;
tag[1] = '0' + index%10;
if (type == AVMEDIA_TYPE_VIDEO) {
tag[2] = 'd';
tag[3] = 'c';
} else if (type == AVMEDIA_TYPE_SUBTITLE) {
// note: this is not an official code
tag[2] = 's';
tag[3] = 'b';
} else {
tag[2] = 'w';
tag[3] = 'b';
}
tag[4] = '\0';
return tag;
}
 
static int avi_write_counters(AVFormatContext* s, int riff_id)
{
AVIOContext *pb = s->pb;
AVIContext *avi = s->priv_data;
int n, au_byterate, au_ssize, au_scale, nb_frames = 0;
int64_t file_size;
AVCodecContext* stream;
 
file_size = avio_tell(pb);
for(n = 0; n < s->nb_streams; n++) {
AVIStream *avist= s->streams[n]->priv_data;
 
av_assert0(avist->frames_hdr_strm);
stream = s->streams[n]->codec;
avio_seek(pb, avist->frames_hdr_strm, SEEK_SET);
ff_parse_specific_params(stream, &au_byterate, &au_ssize, &au_scale);
if(au_ssize == 0) {
avio_wl32(pb, avist->packet_count);
} else {
avio_wl32(pb, avist->audio_strm_length / au_ssize);
}
if(stream->codec_type == AVMEDIA_TYPE_VIDEO)
nb_frames = FFMAX(nb_frames, avist->packet_count);
}
if(riff_id == 1) {
av_assert0(avi->frames_hdr_all);
avio_seek(pb, avi->frames_hdr_all, SEEK_SET);
avio_wl32(pb, nb_frames);
}
avio_seek(pb, file_size, SEEK_SET);
 
return 0;
}
 
static int avi_write_header(AVFormatContext *s)
{
AVIContext *avi = s->priv_data;
AVIOContext *pb = s->pb;
int bitrate, n, i, nb_frames, au_byterate, au_ssize, au_scale;
AVCodecContext *stream, *video_enc;
int64_t list1, list2, strh, strf;
AVDictionaryEntry *t = NULL;
 
if (s->nb_streams > AVI_MAX_STREAM_COUNT) {
av_log(s, AV_LOG_ERROR, "AVI does not support >%d streams\n",
AVI_MAX_STREAM_COUNT);
return AVERROR(EINVAL);
}
 
for(n=0;n<s->nb_streams;n++) {
s->streams[n]->priv_data= av_mallocz(sizeof(AVIStream));
if(!s->streams[n]->priv_data)
return AVERROR(ENOMEM);
}
 
/* header list */
avi->riff_id = 0;
list1 = avi_start_new_riff(s, pb, "AVI ", "hdrl");
 
/* avi header */
ffio_wfourcc(pb, "avih");
avio_wl32(pb, 14 * 4);
bitrate = 0;
 
video_enc = NULL;
for(n=0;n<s->nb_streams;n++) {
stream = s->streams[n]->codec;
bitrate += stream->bit_rate;
if (stream->codec_type == AVMEDIA_TYPE_VIDEO)
video_enc = stream;
}
 
nb_frames = 0;
 
if(video_enc){
avio_wl32(pb, (uint32_t)(INT64_C(1000000) * video_enc->time_base.num / video_enc->time_base.den));
} else {
avio_wl32(pb, 0);
}
avio_wl32(pb, bitrate / 8); /* XXX: not quite exact */
avio_wl32(pb, 0); /* padding */
if (!pb->seekable)
avio_wl32(pb, AVIF_TRUSTCKTYPE | AVIF_ISINTERLEAVED); /* flags */
else
avio_wl32(pb, AVIF_TRUSTCKTYPE | AVIF_HASINDEX | AVIF_ISINTERLEAVED); /* flags */
avi->frames_hdr_all = avio_tell(pb); /* remember this offset to fill later */
avio_wl32(pb, nb_frames); /* nb frames, filled later */
avio_wl32(pb, 0); /* initial frame */
avio_wl32(pb, s->nb_streams); /* nb streams */
avio_wl32(pb, 1024 * 1024); /* suggested buffer size */
if(video_enc){
avio_wl32(pb, video_enc->width);
avio_wl32(pb, video_enc->height);
} else {
avio_wl32(pb, 0);
avio_wl32(pb, 0);
}
avio_wl32(pb, 0); /* reserved */
avio_wl32(pb, 0); /* reserved */
avio_wl32(pb, 0); /* reserved */
avio_wl32(pb, 0); /* reserved */
 
/* stream list */
for(i=0;i<n;i++) {
AVIStream *avist= s->streams[i]->priv_data;
list2 = ff_start_tag(pb, "LIST");
ffio_wfourcc(pb, "strl");
 
stream = s->streams[i]->codec;
 
/* stream generic header */
strh = ff_start_tag(pb, "strh");
switch(stream->codec_type) {
case AVMEDIA_TYPE_SUBTITLE:
// XSUB subtitles behave like video tracks, other subtitles
// are not (yet) supported.
if (stream->codec_id != AV_CODEC_ID_XSUB) {
av_log(s, AV_LOG_ERROR, "Subtitle streams other than DivX XSUB are not supported by the AVI muxer.\n");
return AVERROR_PATCHWELCOME;
}
case AVMEDIA_TYPE_VIDEO: ffio_wfourcc(pb, "vids"); break;
case AVMEDIA_TYPE_AUDIO: ffio_wfourcc(pb, "auds"); break;
// case AVMEDIA_TYPE_TEXT : ffio_wfourcc(pb, "txts"); break;
case AVMEDIA_TYPE_DATA : ffio_wfourcc(pb, "dats"); break;
}
if(stream->codec_type == AVMEDIA_TYPE_VIDEO ||
stream->codec_id == AV_CODEC_ID_XSUB)
avio_wl32(pb, stream->codec_tag);
else
avio_wl32(pb, 1);
avio_wl32(pb, 0); /* flags */
avio_wl16(pb, 0); /* priority */
avio_wl16(pb, 0); /* language */
avio_wl32(pb, 0); /* initial frame */
 
ff_parse_specific_params(stream, &au_byterate, &au_ssize, &au_scale);
 
if ( stream->codec_type == AVMEDIA_TYPE_VIDEO
&& stream->codec_id != AV_CODEC_ID_XSUB
&& au_byterate > 1000LL*au_scale) {
au_byterate = 600;
au_scale = 1;
}
avpriv_set_pts_info(s->streams[i], 64, au_scale, au_byterate);
if(stream->codec_id == AV_CODEC_ID_XSUB)
au_scale = au_byterate = 0;
 
avio_wl32(pb, au_scale); /* scale */
avio_wl32(pb, au_byterate); /* rate */
 
avio_wl32(pb, 0); /* start */
avist->frames_hdr_strm = avio_tell(pb); /* remember this offset to fill later */
if (!pb->seekable)
avio_wl32(pb, AVI_MAX_RIFF_SIZE); /* FIXME: this may be broken, but who cares */
else
avio_wl32(pb, 0); /* length, XXX: filled later */
 
/* suggested buffer size */ //FIXME set at the end to largest chunk
if(stream->codec_type == AVMEDIA_TYPE_VIDEO)
avio_wl32(pb, 1024 * 1024);
else if(stream->codec_type == AVMEDIA_TYPE_AUDIO)
avio_wl32(pb, 12 * 1024);
else
avio_wl32(pb, 0);
avio_wl32(pb, -1); /* quality */
avio_wl32(pb, au_ssize); /* sample size */
avio_wl32(pb, 0);
avio_wl16(pb, stream->width);
avio_wl16(pb, stream->height);
ff_end_tag(pb, strh);
 
if(stream->codec_type != AVMEDIA_TYPE_DATA){
int ret;
 
strf = ff_start_tag(pb, "strf");
switch(stream->codec_type) {
case AVMEDIA_TYPE_SUBTITLE:
// XSUB subtitles behave like video tracks, other subtitles
// are not (yet) supported.
if (stream->codec_id != AV_CODEC_ID_XSUB) break;
case AVMEDIA_TYPE_VIDEO:
ff_put_bmp_header(pb, stream, ff_codec_bmp_tags, 0);
break;
case AVMEDIA_TYPE_AUDIO:
if ((ret = ff_put_wav_header(pb, stream)) < 0) {
return ret;
}
break;
default:
av_log(s, AV_LOG_ERROR,
"Invalid or not supported codec type '%s' found in the input\n",
(char *)av_x_if_null(av_get_media_type_string(stream->codec_type), "?"));
return AVERROR(EINVAL);
}
ff_end_tag(pb, strf);
if ((t = av_dict_get(s->streams[i]->metadata, "title", NULL, 0))) {
ff_riff_write_info_tag(s->pb, "strn", t->value);
t = NULL;
}
}
 
if (pb->seekable) {
unsigned char tag[5];
int j;
 
/* Starting to lay out AVI OpenDML master index.
* We want to make it JUNK entry for now, since we'd
* like to get away without making AVI an OpenDML one
* for compatibility reasons.
*/
avist->indexes.entry = avist->indexes.ents_allocated = 0;
avist->indexes.indx_start = ff_start_tag(pb, "JUNK");
avio_wl16(pb, 4); /* wLongsPerEntry */
avio_w8(pb, 0); /* bIndexSubType (0 == frame index) */
avio_w8(pb, 0); /* bIndexType (0 == AVI_INDEX_OF_INDEXES) */
avio_wl32(pb, 0); /* nEntriesInUse (will fill out later on) */
ffio_wfourcc(pb, avi_stream2fourcc(tag, i, stream->codec_type));
/* dwChunkId */
avio_wl64(pb, 0); /* dwReserved[3]
avio_wl32(pb, 0); Must be 0. */
for (j=0; j < AVI_MASTER_INDEX_SIZE * 2; j++)
avio_wl64(pb, 0);
ff_end_tag(pb, avist->indexes.indx_start);
}
 
if( stream->codec_type == AVMEDIA_TYPE_VIDEO
&& s->streams[i]->sample_aspect_ratio.num>0
&& s->streams[i]->sample_aspect_ratio.den>0){
int vprp= ff_start_tag(pb, "vprp");
AVRational dar = av_mul_q(s->streams[i]->sample_aspect_ratio,
(AVRational){stream->width, stream->height});
int num, den;
av_reduce(&num, &den, dar.num, dar.den, 0xFFFF);
 
avio_wl32(pb, 0); //video format = unknown
avio_wl32(pb, 0); //video standard= unknown
avio_wl32(pb, lrintf(1.0/av_q2d(stream->time_base)));
avio_wl32(pb, stream->width );
avio_wl32(pb, stream->height);
avio_wl16(pb, den);
avio_wl16(pb, num);
avio_wl32(pb, stream->width );
avio_wl32(pb, stream->height);
avio_wl32(pb, 1); //progressive FIXME
 
avio_wl32(pb, stream->height);
avio_wl32(pb, stream->width );
avio_wl32(pb, stream->height);
avio_wl32(pb, stream->width );
avio_wl32(pb, 0);
avio_wl32(pb, 0);
 
avio_wl32(pb, 0);
avio_wl32(pb, 0);
ff_end_tag(pb, vprp);
}
 
ff_end_tag(pb, list2);
}
 
if (pb->seekable) {
/* AVI could become an OpenDML one, if it grows beyond 2Gb range */
avi->odml_list = ff_start_tag(pb, "JUNK");
ffio_wfourcc(pb, "odml");
ffio_wfourcc(pb, "dmlh");
avio_wl32(pb, 248);
for (i = 0; i < 248; i+= 4)
avio_wl32(pb, 0);
ff_end_tag(pb, avi->odml_list);
}
 
ff_end_tag(pb, list1);
 
ff_riff_write_info(s);
 
/* some padding for easier tag editing */
list2 = ff_start_tag(pb, "JUNK");
for (i = 0; i < 1016; i += 4)
avio_wl32(pb, 0);
ff_end_tag(pb, list2);
 
avi->movi_list = ff_start_tag(pb, "LIST");
ffio_wfourcc(pb, "movi");
 
avio_flush(pb);
 
return 0;
}
 
static int avi_write_ix(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
AVIContext *avi = s->priv_data;
char tag[5];
char ix_tag[] = "ix00";
int i, j;
 
av_assert0(pb->seekable);
 
if (avi->riff_id > AVI_MASTER_INDEX_SIZE) {
av_log(s, AV_LOG_ERROR, "Invalid riff index %d > %d\n",
avi->riff_id, AVI_MASTER_INDEX_SIZE);
return AVERROR(EINVAL);
}
 
for (i=0;i<s->nb_streams;i++) {
AVIStream *avist= s->streams[i]->priv_data;
int64_t ix, pos;
 
avi_stream2fourcc(tag, i, s->streams[i]->codec->codec_type);
ix_tag[3] = '0' + i;
 
/* Writing AVI OpenDML leaf index chunk */
ix = avio_tell(pb);
ffio_wfourcc(pb, ix_tag); /* ix?? */
avio_wl32(pb, avist->indexes.entry * 8 + 24);
/* chunk size */
avio_wl16(pb, 2); /* wLongsPerEntry */
avio_w8(pb, 0); /* bIndexSubType (0 == frame index) */
avio_w8(pb, 1); /* bIndexType (1 == AVI_INDEX_OF_CHUNKS) */
avio_wl32(pb, avist->indexes.entry);
/* nEntriesInUse */
ffio_wfourcc(pb, tag); /* dwChunkId */
avio_wl64(pb, avi->movi_list);/* qwBaseOffset */
avio_wl32(pb, 0); /* dwReserved_3 (must be 0) */
 
for (j=0; j<avist->indexes.entry; j++) {
AVIIentry* ie = avi_get_ientry(&avist->indexes, j);
avio_wl32(pb, ie->pos + 8);
avio_wl32(pb, ((uint32_t)ie->len & ~0x80000000) |
(ie->flags & 0x10 ? 0 : 0x80000000));
}
avio_flush(pb);
pos = avio_tell(pb);
 
/* Updating one entry in the AVI OpenDML master index */
avio_seek(pb, avist->indexes.indx_start - 8, SEEK_SET);
ffio_wfourcc(pb, "indx"); /* enabling this entry */
avio_skip(pb, 8);
avio_wl32(pb, avi->riff_id); /* nEntriesInUse */
avio_skip(pb, 16*avi->riff_id);
avio_wl64(pb, ix); /* qwOffset */
avio_wl32(pb, pos - ix); /* dwSize */
avio_wl32(pb, avist->indexes.entry); /* dwDuration */
 
avio_seek(pb, pos, SEEK_SET);
}
return 0;
}
 
static int avi_write_idx1(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
AVIContext *avi = s->priv_data;
int64_t idx_chunk;
int i;
char tag[5];
 
if (pb->seekable) {
AVIStream *avist;
AVIIentry* ie = 0, *tie;
int empty, stream_id = -1;
 
idx_chunk = ff_start_tag(pb, "idx1");
for(i=0; i<s->nb_streams; i++){
avist= s->streams[i]->priv_data;
avist->entry=0;
}
 
do {
empty = 1;
for (i=0; i<s->nb_streams; i++) {
avist= s->streams[i]->priv_data;
if (avist->indexes.entry <= avist->entry)
continue;
 
tie = avi_get_ientry(&avist->indexes, avist->entry);
if (empty || tie->pos < ie->pos) {
ie = tie;
stream_id = i;
}
empty = 0;
}
if (!empty) {
avist= s->streams[stream_id]->priv_data;
avi_stream2fourcc(tag, stream_id,
s->streams[stream_id]->codec->codec_type);
ffio_wfourcc(pb, tag);
avio_wl32(pb, ie->flags);
avio_wl32(pb, ie->pos);
avio_wl32(pb, ie->len);
avist->entry++;
}
} while (!empty);
ff_end_tag(pb, idx_chunk);
 
avi_write_counters(s, avi->riff_id);
}
return 0;
}
 
static int avi_write_packet(AVFormatContext *s, AVPacket *pkt)
{
AVIContext *avi = s->priv_data;
AVIOContext *pb = s->pb;
unsigned char tag[5];
unsigned int flags=0;
const int stream_index= pkt->stream_index;
AVIStream *avist= s->streams[stream_index]->priv_data;
AVCodecContext *enc= s->streams[stream_index]->codec;
int size= pkt->size;
 
av_dlog(s, "dts:%s packet_count:%d stream_index:%d\n", av_ts2str(pkt->dts), avist->packet_count, stream_index);
while(enc->block_align==0 && pkt->dts != AV_NOPTS_VALUE && pkt->dts > avist->packet_count && enc->codec_id != AV_CODEC_ID_XSUB && avist->packet_count){
AVPacket empty_packet;
 
if(pkt->dts - avist->packet_count > 60000){
av_log(s, AV_LOG_ERROR, "Too large number of skipped frames %"PRId64" > 60000\n", pkt->dts - avist->packet_count);
return AVERROR(EINVAL);
}
 
av_init_packet(&empty_packet);
empty_packet.size= 0;
empty_packet.data= NULL;
empty_packet.stream_index= stream_index;
avi_write_packet(s, &empty_packet);
av_dlog(s, "dup dts:%s packet_count:%d\n", av_ts2str(pkt->dts), avist->packet_count);
}
avist->packet_count++;
 
// Make sure to put an OpenDML chunk when the file size exceeds the limits
if (pb->seekable &&
(avio_tell(pb) - avi->riff_start > AVI_MAX_RIFF_SIZE)) {
 
avi_write_ix(s);
ff_end_tag(pb, avi->movi_list);
 
if (avi->riff_id == 1)
avi_write_idx1(s);
 
ff_end_tag(pb, avi->riff_start);
avi->movi_list = avi_start_new_riff(s, pb, "AVIX", "movi");
}
 
avi_stream2fourcc(tag, stream_index, enc->codec_type);
if(pkt->flags&AV_PKT_FLAG_KEY)
flags = 0x10;
if (enc->codec_type == AVMEDIA_TYPE_AUDIO) {
avist->audio_strm_length += size;
}
 
if (s->pb->seekable) {
AVIIndex* idx = &avist->indexes;
int cl = idx->entry / AVI_INDEX_CLUSTER_SIZE;
int id = idx->entry % AVI_INDEX_CLUSTER_SIZE;
if (idx->ents_allocated <= idx->entry) {
idx->cluster = av_realloc_f(idx->cluster, sizeof(void*), cl+1);
if (!idx->cluster) {
idx->ents_allocated = 0;
idx->entry = 0;
return AVERROR(ENOMEM);
}
idx->cluster[cl] = av_malloc(AVI_INDEX_CLUSTER_SIZE*sizeof(AVIIentry));
if (!idx->cluster[cl])
return AVERROR(ENOMEM);
idx->ents_allocated += AVI_INDEX_CLUSTER_SIZE;
}
 
idx->cluster[cl][id].flags = flags;
idx->cluster[cl][id].pos = avio_tell(pb) - avi->movi_list;
idx->cluster[cl][id].len = size;
idx->entry++;
}
 
avio_write(pb, tag, 4);
avio_wl32(pb, size);
avio_write(pb, pkt->data, size);
if (size & 1)
avio_w8(pb, 0);
 
return 0;
}
 
static int avi_write_trailer(AVFormatContext *s)
{
AVIContext *avi = s->priv_data;
AVIOContext *pb = s->pb;
int res = 0;
int i, j, n, nb_frames;
int64_t file_size;
 
if (pb->seekable){
if (avi->riff_id == 1) {
ff_end_tag(pb, avi->movi_list);
res = avi_write_idx1(s);
ff_end_tag(pb, avi->riff_start);
} else {
avi_write_ix(s);
ff_end_tag(pb, avi->movi_list);
ff_end_tag(pb, avi->riff_start);
 
file_size = avio_tell(pb);
avio_seek(pb, avi->odml_list - 8, SEEK_SET);
ffio_wfourcc(pb, "LIST"); /* Making this AVI OpenDML one */
avio_skip(pb, 16);
 
for (n=nb_frames=0;n<s->nb_streams;n++) {
AVCodecContext *stream = s->streams[n]->codec;
AVIStream *avist= s->streams[n]->priv_data;
 
if (stream->codec_type == AVMEDIA_TYPE_VIDEO) {
if (nb_frames < avist->packet_count)
nb_frames = avist->packet_count;
} else {
if (stream->codec_id == AV_CODEC_ID_MP2 || stream->codec_id == AV_CODEC_ID_MP3) {
nb_frames += avist->packet_count;
}
}
}
avio_wl32(pb, nb_frames);
avio_seek(pb, file_size, SEEK_SET);
 
avi_write_counters(s, avi->riff_id);
}
}
 
for (i=0; i<s->nb_streams; i++) {
AVIStream *avist= s->streams[i]->priv_data;
for (j=0; j<avist->indexes.ents_allocated/AVI_INDEX_CLUSTER_SIZE; j++)
av_freep(&avist->indexes.cluster[j]);
av_freep(&avist->indexes.cluster);
avist->indexes.ents_allocated = avist->indexes.entry = 0;
}
 
return res;
}
 
AVOutputFormat ff_avi_muxer = {
.name = "avi",
.long_name = NULL_IF_CONFIG_SMALL("AVI (Audio Video Interleaved)"),
.mime_type = "video/x-msvideo",
.extensions = "avi",
.priv_data_size = sizeof(AVIContext),
.audio_codec = CONFIG_LIBMP3LAME ? AV_CODEC_ID_MP3 : AV_CODEC_ID_AC3,
.video_codec = AV_CODEC_ID_MPEG4,
.write_header = avi_write_header,
.write_packet = avi_write_packet,
.write_trailer = avi_write_trailer,
.codec_tag = (const AVCodecTag* const []){
ff_codec_bmp_tags, ff_codec_wav_tags, 0
},
.flags = AVFMT_VARIABLE_FPS,
};
/contrib/sdk/sources/ffmpeg/libavformat/avio.c
0,0 → 1,443
/*
* unbuffered I/O
* Copyright (c) 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avstring.h"
#include "libavutil/dict.h"
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include "os_support.h"
#include "avformat.h"
#if CONFIG_NETWORK
#include "network.h"
#endif
#include "url.h"
 
static URLProtocol *first_protocol = NULL;
 
URLProtocol *ffurl_protocol_next(URLProtocol *prev)
{
return prev ? prev->next : first_protocol;
}
 
/** @name Logging context. */
/*@{*/
static const char *urlcontext_to_name(void *ptr)
{
URLContext *h = (URLContext *)ptr;
if(h->prot) return h->prot->name;
else return "NULL";
}
 
static void *urlcontext_child_next(void *obj, void *prev)
{
URLContext *h = obj;
if (!prev && h->priv_data && h->prot->priv_data_class)
return h->priv_data;
return NULL;
}
 
static const AVClass *urlcontext_child_class_next(const AVClass *prev)
{
URLProtocol *p = NULL;
 
/* find the protocol that corresponds to prev */
while (prev && (p = ffurl_protocol_next(p)))
if (p->priv_data_class == prev)
break;
 
/* find next protocol with priv options */
while (p = ffurl_protocol_next(p))
if (p->priv_data_class)
return p->priv_data_class;
return NULL;
 
}
 
static const AVOption options[] = {{NULL}};
const AVClass ffurl_context_class = {
.class_name = "URLContext",
.item_name = urlcontext_to_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.child_next = urlcontext_child_next,
.child_class_next = urlcontext_child_class_next,
};
/*@}*/
 
 
const char *avio_enum_protocols(void **opaque, int output)
{
URLProtocol *p;
*opaque = ffurl_protocol_next(*opaque);
if (!(p = *opaque)) return NULL;
if ((output && p->url_write) || (!output && p->url_read))
return p->name;
return avio_enum_protocols(opaque, output);
}
 
int ffurl_register_protocol(URLProtocol *protocol, int size)
{
URLProtocol **p;
if (size < sizeof(URLProtocol)) {
URLProtocol* temp = av_mallocz(sizeof(URLProtocol));
memcpy(temp, protocol, size);
protocol = temp;
}
p = &first_protocol;
while (*p != NULL) p = &(*p)->next;
*p = protocol;
protocol->next = NULL;
return 0;
}
 
static int url_alloc_for_protocol (URLContext **puc, struct URLProtocol *up,
const char *filename, int flags,
const AVIOInterruptCB *int_cb)
{
URLContext *uc;
int err;
 
#if CONFIG_NETWORK
if (up->flags & URL_PROTOCOL_FLAG_NETWORK && !ff_network_init())
return AVERROR(EIO);
#endif
if ((flags & AVIO_FLAG_READ) && !up->url_read) {
av_log(NULL, AV_LOG_ERROR,
"Impossible to open the '%s' protocol for reading\n", up->name);
return AVERROR(EIO);
}
if ((flags & AVIO_FLAG_WRITE) && !up->url_write) {
av_log(NULL, AV_LOG_ERROR,
"Impossible to open the '%s' protocol for writing\n", up->name);
return AVERROR(EIO);
}
uc = av_mallocz(sizeof(URLContext) + strlen(filename) + 1);
if (!uc) {
err = AVERROR(ENOMEM);
goto fail;
}
uc->av_class = &ffurl_context_class;
uc->filename = (char *) &uc[1];
strcpy(uc->filename, filename);
uc->prot = up;
uc->flags = flags;
uc->is_streamed = 0; /* default = not streamed */
uc->max_packet_size = 0; /* default: stream file */
if (up->priv_data_size) {
uc->priv_data = av_mallocz(up->priv_data_size);
if (!uc->priv_data) {
err = AVERROR(ENOMEM);
goto fail;
}
if (up->priv_data_class) {
int proto_len= strlen(up->name);
char *start = strchr(uc->filename, ',');
*(const AVClass**)uc->priv_data = up->priv_data_class;
av_opt_set_defaults(uc->priv_data);
if(!strncmp(up->name, uc->filename, proto_len) && uc->filename + proto_len == start){
int ret= 0;
char *p= start;
char sep= *++p;
char *key, *val;
p++;
while(ret >= 0 && (key= strchr(p, sep)) && p<key && (val = strchr(key+1, sep))){
*val= *key= 0;
ret= av_opt_set(uc->priv_data, p, key+1, 0);
if (ret == AVERROR_OPTION_NOT_FOUND)
av_log(uc, AV_LOG_ERROR, "Key '%s' not found.\n", p);
*val= *key= sep;
p= val+1;
}
if(ret<0 || p!=key){
av_log(uc, AV_LOG_ERROR, "Error parsing options string %s\n", start);
av_freep(&uc->priv_data);
av_freep(&uc);
err = AVERROR(EINVAL);
goto fail;
}
memmove(start, key+1, strlen(key));
}
}
}
if (int_cb)
uc->interrupt_callback = *int_cb;
 
*puc = uc;
return 0;
fail:
*puc = NULL;
if (uc)
av_freep(&uc->priv_data);
av_freep(&uc);
#if CONFIG_NETWORK
if (up->flags & URL_PROTOCOL_FLAG_NETWORK)
ff_network_close();
#endif
return err;
}
 
int ffurl_connect(URLContext* uc, AVDictionary **options)
{
int err =
uc->prot->url_open2 ? uc->prot->url_open2(uc, uc->filename, uc->flags, options) :
uc->prot->url_open(uc, uc->filename, uc->flags);
if (err)
return err;
uc->is_connected = 1;
//We must be careful here as ffurl_seek() could be slow, for example for http
if( (uc->flags & AVIO_FLAG_WRITE)
|| !strcmp(uc->prot->name, "file"))
if(!uc->is_streamed && ffurl_seek(uc, 0, SEEK_SET) < 0)
uc->is_streamed= 1;
return 0;
}
 
#define URL_SCHEME_CHARS \
"abcdefghijklmnopqrstuvwxyz" \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" \
"0123456789+-."
 
int ffurl_alloc(URLContext **puc, const char *filename, int flags,
const AVIOInterruptCB *int_cb)
{
URLProtocol *up = NULL;
char proto_str[128], proto_nested[128], *ptr;
size_t proto_len = strspn(filename, URL_SCHEME_CHARS);
 
if (!first_protocol) {
av_log(NULL, AV_LOG_WARNING, "No URL Protocols are registered. "
"Missing call to av_register_all()?\n");
}
 
if (filename[proto_len] != ':' &&
(filename[proto_len] != ',' || !strchr(filename + proto_len + 1, ':')) ||
is_dos_path(filename))
strcpy(proto_str, "file");
else
av_strlcpy(proto_str, filename, FFMIN(proto_len+1, sizeof(proto_str)));
 
if ((ptr = strchr(proto_str, ',')))
*ptr = '\0';
av_strlcpy(proto_nested, proto_str, sizeof(proto_nested));
if ((ptr = strchr(proto_nested, '+')))
*ptr = '\0';
 
while (up = ffurl_protocol_next(up)) {
if (!strcmp(proto_str, up->name))
return url_alloc_for_protocol (puc, up, filename, flags, int_cb);
if (up->flags & URL_PROTOCOL_FLAG_NESTED_SCHEME &&
!strcmp(proto_nested, up->name))
return url_alloc_for_protocol (puc, up, filename, flags, int_cb);
}
*puc = NULL;
if (!strcmp("https", proto_str))
av_log(NULL, AV_LOG_WARNING, "https protocol not found, recompile with openssl or gnutls enabled.\n");
return AVERROR_PROTOCOL_NOT_FOUND;
}
 
int ffurl_open(URLContext **puc, const char *filename, int flags,
const AVIOInterruptCB *int_cb, AVDictionary **options)
{
int ret = ffurl_alloc(puc, filename, flags, int_cb);
if (ret)
return ret;
if (options && (*puc)->prot->priv_data_class &&
(ret = av_opt_set_dict((*puc)->priv_data, options)) < 0)
goto fail;
ret = ffurl_connect(*puc, options);
if (!ret)
return 0;
fail:
ffurl_close(*puc);
*puc = NULL;
return ret;
}
 
static inline int retry_transfer_wrapper(URLContext *h, unsigned char *buf, int size, int size_min,
int (*transfer_func)(URLContext *h, unsigned char *buf, int size))
{
int ret, len;
int fast_retries = 5;
int64_t wait_since = 0;
 
len = 0;
while (len < size_min) {
if (ff_check_interrupt(&h->interrupt_callback))
return AVERROR_EXIT;
ret = transfer_func(h, buf+len, size-len);
if (ret == AVERROR(EINTR))
continue;
if (h->flags & AVIO_FLAG_NONBLOCK)
return ret;
if (ret == AVERROR(EAGAIN)) {
ret = 0;
if (fast_retries) {
fast_retries--;
} else {
if (h->rw_timeout) {
if (!wait_since)
wait_since = av_gettime();
else if (av_gettime() > wait_since + h->rw_timeout)
return AVERROR(EIO);
}
av_usleep(1000);
}
} else if (ret < 1)
return (ret < 0 && ret != AVERROR_EOF) ? ret : len;
if (ret)
fast_retries = FFMAX(fast_retries, 2);
len += ret;
}
return len;
}
 
int ffurl_read(URLContext *h, unsigned char *buf, int size)
{
if (!(h->flags & AVIO_FLAG_READ))
return AVERROR(EIO);
return retry_transfer_wrapper(h, buf, size, 1, h->prot->url_read);
}
 
int ffurl_read_complete(URLContext *h, unsigned char *buf, int size)
{
if (!(h->flags & AVIO_FLAG_READ))
return AVERROR(EIO);
return retry_transfer_wrapper(h, buf, size, size, h->prot->url_read);
}
 
int ffurl_write(URLContext *h, const unsigned char *buf, int size)
{
if (!(h->flags & AVIO_FLAG_WRITE))
return AVERROR(EIO);
/* avoid sending too big packets */
if (h->max_packet_size && size > h->max_packet_size)
return AVERROR(EIO);
 
return retry_transfer_wrapper(h, (unsigned char *)buf, size, size, (void*)h->prot->url_write);
}
 
int64_t ffurl_seek(URLContext *h, int64_t pos, int whence)
{
int64_t ret;
 
if (!h->prot->url_seek)
return AVERROR(ENOSYS);
ret = h->prot->url_seek(h, pos, whence & ~AVSEEK_FORCE);
return ret;
}
 
int ffurl_closep(URLContext **hh)
{
URLContext *h= *hh;
int ret = 0;
if (!h) return 0; /* can happen when ffurl_open fails */
 
if (h->is_connected && h->prot->url_close)
ret = h->prot->url_close(h);
#if CONFIG_NETWORK
if (h->prot->flags & URL_PROTOCOL_FLAG_NETWORK)
ff_network_close();
#endif
if (h->prot->priv_data_size) {
if (h->prot->priv_data_class)
av_opt_free(h->priv_data);
av_freep(&h->priv_data);
}
av_freep(hh);
return ret;
}
 
int ffurl_close(URLContext *h)
{
return ffurl_closep(&h);
}
 
 
int avio_check(const char *url, int flags)
{
URLContext *h;
int ret = ffurl_alloc(&h, url, flags, NULL);
if (ret)
return ret;
 
if (h->prot->url_check) {
ret = h->prot->url_check(h, flags);
} else {
ret = ffurl_connect(h, NULL);
if (ret >= 0)
ret = flags;
}
 
ffurl_close(h);
return ret;
}
 
int64_t ffurl_size(URLContext *h)
{
int64_t pos, size;
 
size= ffurl_seek(h, 0, AVSEEK_SIZE);
if(size<0){
pos = ffurl_seek(h, 0, SEEK_CUR);
if ((size = ffurl_seek(h, -1, SEEK_END)) < 0)
return size;
size++;
ffurl_seek(h, pos, SEEK_SET);
}
return size;
}
 
int ffurl_get_file_handle(URLContext *h)
{
if (!h->prot->url_get_file_handle)
return -1;
return h->prot->url_get_file_handle(h);
}
 
int ffurl_get_multi_file_handle(URLContext *h, int **handles, int *numhandles)
{
if (!h->prot->url_get_multi_file_handle) {
if (!h->prot->url_get_file_handle)
return AVERROR(ENOSYS);
*handles = av_malloc(sizeof(**handles));
if (!*handles)
return AVERROR(ENOMEM);
*numhandles = 1;
*handles[0] = h->prot->url_get_file_handle(h);
return 0;
}
return h->prot->url_get_multi_file_handle(h, handles, numhandles);
}
 
int ffurl_shutdown(URLContext *h, int flags)
{
if (!h->prot->url_shutdown)
return AVERROR(EINVAL);
return h->prot->url_shutdown(h, flags);
}
 
int ff_check_interrupt(AVIOInterruptCB *cb)
{
int ret;
if (cb && cb->callback && (ret = cb->callback(cb->opaque)))
return ret;
return 0;
}
/contrib/sdk/sources/ffmpeg/libavformat/avio.h
0,0 → 1,481
/*
* copyright (c) 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_AVIO_H
#define AVFORMAT_AVIO_H
 
/**
* @file
* @ingroup lavf_io
* Buffered I/O operations
*/
 
#include <stdint.h>
 
#include "libavutil/common.h"
#include "libavutil/dict.h"
#include "libavutil/log.h"
 
#include "libavformat/version.h"
 
 
#define AVIO_SEEKABLE_NORMAL 0x0001 /**< Seeking works like for a local file */
 
/**
* Callback for checking whether to abort blocking functions.
* AVERROR_EXIT is returned in this case by the interrupted
* function. During blocking operations, callback is called with
* opaque as parameter. If the callback returns 1, the
* blocking operation will be aborted.
*
* No members can be added to this struct without a major bump, if
* new elements have been added after this struct in AVFormatContext
* or AVIOContext.
*/
typedef struct AVIOInterruptCB {
int (*callback)(void*);
void *opaque;
} AVIOInterruptCB;
 
/**
* Bytestream IO Context.
* New fields can be added to the end with minor version bumps.
* Removal, reordering and changes to existing fields require a major
* version bump.
* sizeof(AVIOContext) must not be used outside libav*.
*
* @note None of the function pointers in AVIOContext should be called
* directly, they should only be set by the client application
* when implementing custom I/O. Normally these are set to the
* function pointers specified in avio_alloc_context()
*/
typedef struct AVIOContext {
/**
* A class for private options.
*
* If this AVIOContext is created by avio_open2(), av_class is set and
* passes the options down to protocols.
*
* If this AVIOContext is manually allocated, then av_class may be set by
* the caller.
*
* warning -- this field can be NULL, be sure to not pass this AVIOContext
* to any av_opt_* functions in that case.
*/
const AVClass *av_class;
unsigned char *buffer; /**< Start of the buffer. */
int buffer_size; /**< Maximum buffer size */
unsigned char *buf_ptr; /**< Current position in the buffer */
unsigned char *buf_end; /**< End of the data, may be less than
buffer+buffer_size if the read function returned
less data than requested, e.g. for streams where
no more data has been received yet. */
void *opaque; /**< A private pointer, passed to the read/write/seek/...
functions. */
int (*read_packet)(void *opaque, uint8_t *buf, int buf_size);
int (*write_packet)(void *opaque, uint8_t *buf, int buf_size);
int64_t (*seek)(void *opaque, int64_t offset, int whence);
int64_t pos; /**< position in the file of the current buffer */
int must_flush; /**< true if the next seek should flush */
int eof_reached; /**< true if eof reached */
int write_flag; /**< true if open for writing */
int max_packet_size;
unsigned long checksum;
unsigned char *checksum_ptr;
unsigned long (*update_checksum)(unsigned long checksum, const uint8_t *buf, unsigned int size);
int error; /**< contains the error code or 0 if no error happened */
/**
* Pause or resume playback for network streaming protocols - e.g. MMS.
*/
int (*read_pause)(void *opaque, int pause);
/**
* Seek to a given timestamp in stream with the specified stream_index.
* Needed for some network streaming protocols which don't support seeking
* to byte position.
*/
int64_t (*read_seek)(void *opaque, int stream_index,
int64_t timestamp, int flags);
/**
* A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable.
*/
int seekable;
 
/**
* max filesize, used to limit allocations
* This field is internal to libavformat and access from outside is not allowed.
*/
int64_t maxsize;
 
/**
* avio_read and avio_write should if possible be satisfied directly
* instead of going through a buffer, and avio_seek will always
* call the underlying seek function directly.
*/
int direct;
 
/**
* Bytes read statistic
* This field is internal to libavformat and access from outside is not allowed.
*/
int64_t bytes_read;
 
/**
* seek statistic
* This field is internal to libavformat and access from outside is not allowed.
*/
int seek_count;
 
/**
* writeout statistic
* This field is internal to libavformat and access from outside is not allowed.
*/
int writeout_count;
} AVIOContext;
 
/* unbuffered I/O */
 
/**
* Return AVIO_FLAG_* access flags corresponding to the access permissions
* of the resource in url, or a negative value corresponding to an
* AVERROR code in case of failure. The returned access flags are
* masked by the value in flags.
*
* @note This function is intrinsically unsafe, in the sense that the
* checked resource may change its existence or permission status from
* one call to another. Thus you should not trust the returned value,
* unless you are sure that no other processes are accessing the
* checked resource.
*/
int avio_check(const char *url, int flags);
 
/**
* Allocate and initialize an AVIOContext for buffered I/O. It must be later
* freed with av_free().
*
* @param buffer Memory block for input/output operations via AVIOContext.
* The buffer must be allocated with av_malloc() and friends.
* @param buffer_size The buffer size is very important for performance.
* For protocols with fixed blocksize it should be set to this blocksize.
* For others a typical size is a cache page, e.g. 4kb.
* @param write_flag Set to 1 if the buffer should be writable, 0 otherwise.
* @param opaque An opaque pointer to user-specific data.
* @param read_packet A function for refilling the buffer, may be NULL.
* @param write_packet A function for writing the buffer contents, may be NULL.
* The function may not change the input buffers content.
* @param seek A function for seeking to specified byte position, may be NULL.
*
* @return Allocated AVIOContext or NULL on failure.
*/
AVIOContext *avio_alloc_context(
unsigned char *buffer,
int buffer_size,
int write_flag,
void *opaque,
int (*read_packet)(void *opaque, uint8_t *buf, int buf_size),
int (*write_packet)(void *opaque, uint8_t *buf, int buf_size),
int64_t (*seek)(void *opaque, int64_t offset, int whence));
 
void avio_w8(AVIOContext *s, int b);
void avio_write(AVIOContext *s, const unsigned char *buf, int size);
void avio_wl64(AVIOContext *s, uint64_t val);
void avio_wb64(AVIOContext *s, uint64_t val);
void avio_wl32(AVIOContext *s, unsigned int val);
void avio_wb32(AVIOContext *s, unsigned int val);
void avio_wl24(AVIOContext *s, unsigned int val);
void avio_wb24(AVIOContext *s, unsigned int val);
void avio_wl16(AVIOContext *s, unsigned int val);
void avio_wb16(AVIOContext *s, unsigned int val);
 
/**
* Write a NULL-terminated string.
* @return number of bytes written.
*/
int avio_put_str(AVIOContext *s, const char *str);
 
/**
* Convert an UTF-8 string to UTF-16LE and write it.
* @return number of bytes written.
*/
int avio_put_str16le(AVIOContext *s, const char *str);
 
/**
* Passing this as the "whence" parameter to a seek function causes it to
* return the filesize without seeking anywhere. Supporting this is optional.
* If it is not supported then the seek function will return <0.
*/
#define AVSEEK_SIZE 0x10000
 
/**
* Oring this flag as into the "whence" parameter to a seek function causes it to
* seek by any means (like reopening and linear reading) or other normally unreasonable
* means that can be extremely slow.
* This may be ignored by the seek code.
*/
#define AVSEEK_FORCE 0x20000
 
/**
* fseek() equivalent for AVIOContext.
* @return new position or AVERROR.
*/
int64_t avio_seek(AVIOContext *s, int64_t offset, int whence);
 
/**
* Skip given number of bytes forward
* @return new position or AVERROR.
*/
int64_t avio_skip(AVIOContext *s, int64_t offset);
 
/**
* ftell() equivalent for AVIOContext.
* @return position or AVERROR.
*/
static av_always_inline int64_t avio_tell(AVIOContext *s)
{
return avio_seek(s, 0, SEEK_CUR);
}
 
/**
* Get the filesize.
* @return filesize or AVERROR
*/
int64_t avio_size(AVIOContext *s);
 
/**
* feof() equivalent for AVIOContext.
* @return non zero if and only if end of file
*/
int url_feof(AVIOContext *s);
 
/** @warning currently size is limited */
int avio_printf(AVIOContext *s, const char *fmt, ...) av_printf_format(2, 3);
 
/**
* Force flushing of buffered data to the output s.
*
* Force the buffered data to be immediately written to the output,
* without to wait to fill the internal buffer.
*/
void avio_flush(AVIOContext *s);
 
/**
* Read size bytes from AVIOContext into buf.
* @return number of bytes read or AVERROR
*/
int avio_read(AVIOContext *s, unsigned char *buf, int size);
 
/**
* @name Functions for reading from AVIOContext
* @{
*
* @note return 0 if EOF, so you cannot use it if EOF handling is
* necessary
*/
int avio_r8 (AVIOContext *s);
unsigned int avio_rl16(AVIOContext *s);
unsigned int avio_rl24(AVIOContext *s);
unsigned int avio_rl32(AVIOContext *s);
uint64_t avio_rl64(AVIOContext *s);
unsigned int avio_rb16(AVIOContext *s);
unsigned int avio_rb24(AVIOContext *s);
unsigned int avio_rb32(AVIOContext *s);
uint64_t avio_rb64(AVIOContext *s);
/**
* @}
*/
 
/**
* Read a string from pb into buf. The reading will terminate when either
* a NULL character was encountered, maxlen bytes have been read, or nothing
* more can be read from pb. The result is guaranteed to be NULL-terminated, it
* will be truncated if buf is too small.
* Note that the string is not interpreted or validated in any way, it
* might get truncated in the middle of a sequence for multi-byte encodings.
*
* @return number of bytes read (is always <= maxlen).
* If reading ends on EOF or error, the return value will be one more than
* bytes actually read.
*/
int avio_get_str(AVIOContext *pb, int maxlen, char *buf, int buflen);
 
/**
* Read a UTF-16 string from pb and convert it to UTF-8.
* The reading will terminate when either a null or invalid character was
* encountered or maxlen bytes have been read.
* @return number of bytes read (is always <= maxlen)
*/
int avio_get_str16le(AVIOContext *pb, int maxlen, char *buf, int buflen);
int avio_get_str16be(AVIOContext *pb, int maxlen, char *buf, int buflen);
 
 
/**
* @name URL open modes
* The flags argument to avio_open must be one of the following
* constants, optionally ORed with other flags.
* @{
*/
#define AVIO_FLAG_READ 1 /**< read-only */
#define AVIO_FLAG_WRITE 2 /**< write-only */
#define AVIO_FLAG_READ_WRITE (AVIO_FLAG_READ|AVIO_FLAG_WRITE) /**< read-write pseudo flag */
/**
* @}
*/
 
/**
* Use non-blocking mode.
* If this flag is set, operations on the context will return
* AVERROR(EAGAIN) if they can not be performed immediately.
* If this flag is not set, operations on the context will never return
* AVERROR(EAGAIN).
* Note that this flag does not affect the opening/connecting of the
* context. Connecting a protocol will always block if necessary (e.g. on
* network protocols) but never hang (e.g. on busy devices).
* Warning: non-blocking protocols is work-in-progress; this flag may be
* silently ignored.
*/
#define AVIO_FLAG_NONBLOCK 8
 
/**
* Use direct mode.
* avio_read and avio_write should if possible be satisfied directly
* instead of going through a buffer, and avio_seek will always
* call the underlying seek function directly.
*/
#define AVIO_FLAG_DIRECT 0x8000
 
/**
* Create and initialize a AVIOContext for accessing the
* resource indicated by url.
* @note When the resource indicated by url has been opened in
* read+write mode, the AVIOContext can be used only for writing.
*
* @param s Used to return the pointer to the created AVIOContext.
* In case of failure the pointed to value is set to NULL.
* @param flags flags which control how the resource indicated by url
* is to be opened
* @return >= 0 in case of success, a negative value corresponding to an
* AVERROR code in case of failure
*/
int avio_open(AVIOContext **s, const char *url, int flags);
 
/**
* Create and initialize a AVIOContext for accessing the
* resource indicated by url.
* @note When the resource indicated by url has been opened in
* read+write mode, the AVIOContext can be used only for writing.
*
* @param s Used to return the pointer to the created AVIOContext.
* In case of failure the pointed to value is set to NULL.
* @param flags flags which control how the resource indicated by url
* is to be opened
* @param int_cb an interrupt callback to be used at the protocols level
* @param options A dictionary filled with protocol-private options. On return
* this parameter will be destroyed and replaced with a dict containing options
* that were not found. May be NULL.
* @return >= 0 in case of success, a negative value corresponding to an
* AVERROR code in case of failure
*/
int avio_open2(AVIOContext **s, const char *url, int flags,
const AVIOInterruptCB *int_cb, AVDictionary **options);
 
/**
* Close the resource accessed by the AVIOContext s and free it.
* This function can only be used if s was opened by avio_open().
*
* The internal buffer is automatically flushed before closing the
* resource.
*
* @return 0 on success, an AVERROR < 0 on error.
* @see avio_closep
*/
int avio_close(AVIOContext *s);
 
/**
* Close the resource accessed by the AVIOContext *s, free it
* and set the pointer pointing to it to NULL.
* This function can only be used if s was opened by avio_open().
*
* The internal buffer is automatically flushed before closing the
* resource.
*
* @return 0 on success, an AVERROR < 0 on error.
* @see avio_close
*/
int avio_closep(AVIOContext **s);
 
 
/**
* Open a write only memory stream.
*
* @param s new IO context
* @return zero if no error.
*/
int avio_open_dyn_buf(AVIOContext **s);
 
/**
* Return the written size and a pointer to the buffer. The buffer
* must be freed with av_free().
* Padding of FF_INPUT_BUFFER_PADDING_SIZE is added to the buffer.
*
* @param s IO context
* @param pbuffer pointer to a byte buffer
* @return the length of the byte buffer
*/
int avio_close_dyn_buf(AVIOContext *s, uint8_t **pbuffer);
 
/**
* Iterate through names of available protocols.
*
* @param opaque A private pointer representing current protocol.
* It must be a pointer to NULL on first iteration and will
* be updated by successive calls to avio_enum_protocols.
* @param output If set to 1, iterate over output protocols,
* otherwise over input protocols.
*
* @return A static string containing the name of current protocol or NULL
*/
const char *avio_enum_protocols(void **opaque, int output);
 
/**
* Pause and resume playing - only meaningful if using a network streaming
* protocol (e.g. MMS).
* @param pause 1 for pause, 0 for resume
*/
int avio_pause(AVIOContext *h, int pause);
 
/**
* Seek to a given timestamp relative to some component stream.
* Only meaningful if using a network streaming protocol (e.g. MMS.).
* @param stream_index The stream index that the timestamp is relative to.
* If stream_index is (-1) the timestamp should be in AV_TIME_BASE
* units from the beginning of the presentation.
* If a stream_index >= 0 is used and the protocol does not support
* seeking based on component streams, the call will fail.
* @param timestamp timestamp in AVStream.time_base units
* or if there is no stream specified then in AV_TIME_BASE units.
* @param flags Optional combination of AVSEEK_FLAG_BACKWARD, AVSEEK_FLAG_BYTE
* and AVSEEK_FLAG_ANY. The protocol may silently ignore
* AVSEEK_FLAG_BACKWARD and AVSEEK_FLAG_ANY, but AVSEEK_FLAG_BYTE will
* fail if used and not supported.
* @return >= 0 on success
* @see AVInputFormat::read_seek
*/
int64_t avio_seek_time(AVIOContext *h, int stream_index,
int64_t timestamp, int flags);
 
#endif /* AVFORMAT_AVIO_H */
/contrib/sdk/sources/ffmpeg/libavformat/avio_internal.h
0,0 → 1,151
/*
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_AVIO_INTERNAL_H
#define AVFORMAT_AVIO_INTERNAL_H
 
#include "avio.h"
#include "url.h"
 
#include "libavutil/log.h"
 
extern const AVClass ffio_url_class;
 
int ffio_init_context(AVIOContext *s,
unsigned char *buffer,
int buffer_size,
int write_flag,
void *opaque,
int (*read_packet)(void *opaque, uint8_t *buf, int buf_size),
int (*write_packet)(void *opaque, uint8_t *buf, int buf_size),
int64_t (*seek)(void *opaque, int64_t offset, int whence));
 
 
/**
* Read size bytes from AVIOContext, returning a pointer.
* Note that the data pointed at by the returned pointer is only
* valid until the next call that references the same IO context.
* @param s IO context
* @param buf pointer to buffer into which to assemble the requested
* data if it is not available in contiguous addresses in the
* underlying buffer
* @param size number of bytes requested
* @param data address at which to store pointer: this will be a
* a direct pointer into the underlying buffer if the requested
* number of bytes are available at contiguous addresses, otherwise
* will be a copy of buf
* @return number of bytes read or AVERROR
*/
int ffio_read_indirect(AVIOContext *s, unsigned char *buf, int size, const unsigned char **data);
 
/**
* Read size bytes from AVIOContext into buf.
* This reads at most 1 packet. If that is not enough fewer bytes will be
* returned.
* @return number of bytes read or AVERROR
*/
int ffio_read_partial(AVIOContext *s, unsigned char *buf, int size);
 
void ffio_fill(AVIOContext *s, int b, int count);
 
static av_always_inline void ffio_wfourcc(AVIOContext *pb, const uint8_t *s)
{
avio_wl32(pb, MKTAG(s[0], s[1], s[2], s[3]));
}
 
/**
* Rewind the AVIOContext using the specified buffer containing the first buf_size bytes of the file.
* Used after probing to avoid seeking.
* Joins buf and s->buffer, taking any overlap into consideration.
* @note s->buffer must overlap with buf or they can't be joined and the function fails
*
* @param s The read-only AVIOContext to rewind
* @param buf The probe buffer containing the first buf_size bytes of the file
* @param buf_size The size of buf
* @return >= 0 in case of success, a negative value corresponding to an
* AVERROR code in case of failure
*/
int ffio_rewind_with_probe_data(AVIOContext *s, unsigned char **buf, int buf_size);
 
uint64_t ffio_read_varlen(AVIOContext *bc);
 
/** @warning must be called before any I/O */
int ffio_set_buf_size(AVIOContext *s, int buf_size);
 
/**
* Ensures that the requested seekback buffer size will be available
*
* Will ensure that when reading sequentially up to buf_size, seeking
* within the current pos and pos+buf_size is possible.
* Once the stream position moves outside this window this gurantee is lost.
*/
int ffio_ensure_seekback(AVIOContext *s, int buf_size);
 
int ffio_limit(AVIOContext *s, int size);
 
void ffio_init_checksum(AVIOContext *s,
unsigned long (*update_checksum)(unsigned long c, const uint8_t *p, unsigned int len),
unsigned long checksum);
unsigned long ffio_get_checksum(AVIOContext *s);
unsigned long ff_crc04C11DB7_update(unsigned long checksum, const uint8_t *buf,
unsigned int len);
 
/**
* Open a write only packetized memory stream with a maximum packet
* size of 'max_packet_size'. The stream is stored in a memory buffer
* with a big-endian 4 byte header giving the packet size in bytes.
*
* @param s new IO context
* @param max_packet_size maximum packet size (must be > 0)
* @return zero if no error.
*/
int ffio_open_dyn_packet_buf(AVIOContext **s, int max_packet_size);
 
/**
* Create and initialize a AVIOContext for accessing the
* resource referenced by the URLContext h.
* @note When the URLContext h has been opened in read+write mode, the
* AVIOContext can be used only for writing.
*
* @param s Used to return the pointer to the created AVIOContext.
* In case of failure the pointed to value is set to NULL.
* @return >= 0 in case of success, a negative value corresponding to an
* AVERROR code in case of failure
*/
int ffio_fdopen(AVIOContext **s, URLContext *h);
 
/**
* Open a write-only fake memory stream. The written data is not stored
* anywhere - this is only used for measuring the amount of data
* written.
*
* @param s new IO context
* @return zero if no error.
*/
int ffio_open_null_buf(AVIOContext **s);
 
/**
* Close a null buffer.
*
* @param s an IO context opened by ffio_open_null_buf
* @return the number of bytes written to the null buffer
*/
int ffio_close_null_buf(AVIOContext *s);
 
#endif /* AVFORMAT_AVIO_INTERNAL_H */
/contrib/sdk/sources/ffmpeg/libavformat/aviobuf.c
0,0 → 1,1083
/*
* buffered I/O
* Copyright (c) 2000,2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/crc.h"
#include "libavutil/dict.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavutil/avassert.h"
#include "avformat.h"
#include "avio.h"
#include "avio_internal.h"
#include "internal.h"
#include "url.h"
#include <stdarg.h>
 
#define IO_BUFFER_SIZE 32768
 
/**
* Do seeks within this distance ahead of the current buffer by skipping
* data instead of calling the protocol seek function, for seekable
* protocols.
*/
#define SHORT_SEEK_THRESHOLD 4096
 
static void *ffio_url_child_next(void *obj, void *prev)
{
AVIOContext *s = obj;
return prev ? NULL : s->opaque;
}
 
static const AVClass *ffio_url_child_class_next(const AVClass *prev)
{
return prev ? NULL : &ffurl_context_class;
}
 
static const AVOption ffio_url_options[] = {
{ NULL },
};
 
const AVClass ffio_url_class = {
.class_name = "AVIOContext",
.item_name = av_default_item_name,
.version = LIBAVUTIL_VERSION_INT,
.option = ffio_url_options,
.child_next = ffio_url_child_next,
.child_class_next = ffio_url_child_class_next,
};
 
static void fill_buffer(AVIOContext *s);
static int url_resetbuf(AVIOContext *s, int flags);
 
int ffio_init_context(AVIOContext *s,
unsigned char *buffer,
int buffer_size,
int write_flag,
void *opaque,
int (*read_packet)(void *opaque, uint8_t *buf, int buf_size),
int (*write_packet)(void *opaque, uint8_t *buf, int buf_size),
int64_t (*seek)(void *opaque, int64_t offset, int whence))
{
s->buffer = buffer;
s->buffer_size = buffer_size;
s->buf_ptr = buffer;
s->opaque = opaque;
s->direct = 0;
 
url_resetbuf(s, write_flag ? AVIO_FLAG_WRITE : AVIO_FLAG_READ);
 
s->write_packet = write_packet;
s->read_packet = read_packet;
s->seek = seek;
s->pos = 0;
s->must_flush = 0;
s->eof_reached = 0;
s->error = 0;
s->seekable = seek ? AVIO_SEEKABLE_NORMAL : 0;
s->max_packet_size = 0;
s->update_checksum = NULL;
 
if (!read_packet && !write_flag) {
s->pos = buffer_size;
s->buf_end = s->buffer + buffer_size;
}
s->read_pause = NULL;
s->read_seek = NULL;
 
return 0;
}
 
AVIOContext *avio_alloc_context(
unsigned char *buffer,
int buffer_size,
int write_flag,
void *opaque,
int (*read_packet)(void *opaque, uint8_t *buf, int buf_size),
int (*write_packet)(void *opaque, uint8_t *buf, int buf_size),
int64_t (*seek)(void *opaque, int64_t offset, int whence))
{
AVIOContext *s = av_mallocz(sizeof(AVIOContext));
if (!s)
return NULL;
ffio_init_context(s, buffer, buffer_size, write_flag, opaque,
read_packet, write_packet, seek);
return s;
}
 
static void writeout(AVIOContext *s, const uint8_t *data, int len)
{
if (s->write_packet && !s->error) {
int ret = s->write_packet(s->opaque, (uint8_t *)data, len);
if (ret < 0) {
s->error = ret;
}
}
s->writeout_count ++;
s->pos += len;
}
 
static void flush_buffer(AVIOContext *s)
{
if (s->buf_ptr > s->buffer) {
writeout(s, s->buffer, s->buf_ptr - s->buffer);
if (s->update_checksum) {
s->checksum = s->update_checksum(s->checksum, s->checksum_ptr,
s->buf_ptr - s->checksum_ptr);
s->checksum_ptr = s->buffer;
}
}
s->buf_ptr = s->buffer;
}
 
void avio_w8(AVIOContext *s, int b)
{
av_assert2(b>=-128 && b<=255);
*s->buf_ptr++ = b;
if (s->buf_ptr >= s->buf_end)
flush_buffer(s);
}
 
void ffio_fill(AVIOContext *s, int b, int count)
{
while (count > 0) {
int len = FFMIN(s->buf_end - s->buf_ptr, count);
memset(s->buf_ptr, b, len);
s->buf_ptr += len;
 
if (s->buf_ptr >= s->buf_end)
flush_buffer(s);
 
count -= len;
}
}
 
void avio_write(AVIOContext *s, const unsigned char *buf, int size)
{
if (s->direct && !s->update_checksum) {
avio_flush(s);
writeout(s, buf, size);
return;
}
while (size > 0) {
int len = FFMIN(s->buf_end - s->buf_ptr, size);
memcpy(s->buf_ptr, buf, len);
s->buf_ptr += len;
 
if (s->buf_ptr >= s->buf_end)
flush_buffer(s);
 
buf += len;
size -= len;
}
}
 
void avio_flush(AVIOContext *s)
{
flush_buffer(s);
s->must_flush = 0;
}
 
int64_t avio_seek(AVIOContext *s, int64_t offset, int whence)
{
int64_t offset1;
int64_t pos;
int force = whence & AVSEEK_FORCE;
whence &= ~AVSEEK_FORCE;
 
if(!s)
return AVERROR(EINVAL);
 
pos = s->pos - (s->write_flag ? 0 : (s->buf_end - s->buffer));
 
if (whence != SEEK_CUR && whence != SEEK_SET)
return AVERROR(EINVAL);
 
if (whence == SEEK_CUR) {
offset1 = pos + (s->buf_ptr - s->buffer);
if (offset == 0)
return offset1;
offset += offset1;
}
offset1 = offset - pos;
if (!s->must_flush && (!s->direct || !s->seek) &&
offset1 >= 0 && offset1 <= (s->buf_end - s->buffer)) {
/* can do the seek inside the buffer */
s->buf_ptr = s->buffer + offset1;
} else if ((!s->seekable ||
offset1 <= s->buf_end + SHORT_SEEK_THRESHOLD - s->buffer) &&
!s->write_flag && offset1 >= 0 &&
(!s->direct || !s->seek) &&
(whence != SEEK_END || force)) {
while(s->pos < offset && !s->eof_reached)
fill_buffer(s);
if (s->eof_reached)
return AVERROR_EOF;
s->buf_ptr = s->buf_end + offset - s->pos;
} else {
int64_t res;
 
if (s->write_flag) {
flush_buffer(s);
s->must_flush = 1;
}
if (!s->seek)
return AVERROR(EPIPE);
if ((res = s->seek(s->opaque, offset, SEEK_SET)) < 0)
return res;
s->seek_count ++;
if (!s->write_flag)
s->buf_end = s->buffer;
s->buf_ptr = s->buffer;
s->pos = offset;
}
s->eof_reached = 0;
return offset;
}
 
int64_t avio_skip(AVIOContext *s, int64_t offset)
{
return avio_seek(s, offset, SEEK_CUR);
}
 
int64_t avio_size(AVIOContext *s)
{
int64_t size;
 
if (!s)
return AVERROR(EINVAL);
 
if (!s->seek)
return AVERROR(ENOSYS);
size = s->seek(s->opaque, 0, AVSEEK_SIZE);
if (size < 0) {
if ((size = s->seek(s->opaque, -1, SEEK_END)) < 0)
return size;
size++;
s->seek(s->opaque, s->pos, SEEK_SET);
}
return size;
}
 
int url_feof(AVIOContext *s)
{
if(!s)
return 0;
if(s->eof_reached){
s->eof_reached=0;
fill_buffer(s);
}
return s->eof_reached;
}
 
void avio_wl32(AVIOContext *s, unsigned int val)
{
avio_w8(s, (uint8_t) val );
avio_w8(s, (uint8_t)(val >> 8 ));
avio_w8(s, (uint8_t)(val >> 16));
avio_w8(s, val >> 24 );
}
 
void avio_wb32(AVIOContext *s, unsigned int val)
{
avio_w8(s, val >> 24 );
avio_w8(s, (uint8_t)(val >> 16));
avio_w8(s, (uint8_t)(val >> 8 ));
avio_w8(s, (uint8_t) val );
}
 
int avio_put_str(AVIOContext *s, const char *str)
{
int len = 1;
if (str) {
len += strlen(str);
avio_write(s, (const unsigned char *) str, len);
} else
avio_w8(s, 0);
return len;
}
 
int avio_put_str16le(AVIOContext *s, const char *str)
{
const uint8_t *q = str;
int ret = 0;
 
while (*q) {
uint32_t ch;
uint16_t tmp;
 
GET_UTF8(ch, *q++, break;)
PUT_UTF16(ch, tmp, avio_wl16(s, tmp); ret += 2;)
}
avio_wl16(s, 0);
ret += 2;
return ret;
}
 
int ff_get_v_length(uint64_t val)
{
int i = 1;
 
while (val >>= 7)
i++;
 
return i;
}
 
void ff_put_v(AVIOContext *bc, uint64_t val)
{
int i = ff_get_v_length(val);
 
while (--i > 0)
avio_w8(bc, 128 | (uint8_t)(val >> (7*i)));
 
avio_w8(bc, val & 127);
}
 
void avio_wl64(AVIOContext *s, uint64_t val)
{
avio_wl32(s, (uint32_t)(val & 0xffffffff));
avio_wl32(s, (uint32_t)(val >> 32));
}
 
void avio_wb64(AVIOContext *s, uint64_t val)
{
avio_wb32(s, (uint32_t)(val >> 32));
avio_wb32(s, (uint32_t)(val & 0xffffffff));
}
 
void avio_wl16(AVIOContext *s, unsigned int val)
{
avio_w8(s, (uint8_t)val);
avio_w8(s, (int)val >> 8);
}
 
void avio_wb16(AVIOContext *s, unsigned int val)
{
avio_w8(s, (int)val >> 8);
avio_w8(s, (uint8_t)val);
}
 
void avio_wl24(AVIOContext *s, unsigned int val)
{
avio_wl16(s, val & 0xffff);
avio_w8(s, (int)val >> 16);
}
 
void avio_wb24(AVIOContext *s, unsigned int val)
{
avio_wb16(s, (int)val >> 8);
avio_w8(s, (uint8_t)val);
}
 
/* Input stream */
 
static void fill_buffer(AVIOContext *s)
{
int max_buffer_size = s->max_packet_size ?
s->max_packet_size : IO_BUFFER_SIZE;
uint8_t *dst = s->buf_end - s->buffer + max_buffer_size < s->buffer_size ?
s->buf_end : s->buffer;
int len = s->buffer_size - (dst - s->buffer);
 
/* can't fill the buffer without read_packet, just set EOF if appropriate */
if (!s->read_packet && s->buf_ptr >= s->buf_end)
s->eof_reached = 1;
 
/* no need to do anything if EOF already reached */
if (s->eof_reached)
return;
 
if (s->update_checksum && dst == s->buffer) {
if (s->buf_end > s->checksum_ptr)
s->checksum = s->update_checksum(s->checksum, s->checksum_ptr,
s->buf_end - s->checksum_ptr);
s->checksum_ptr = s->buffer;
}
 
/* make buffer smaller in case it ended up large after probing */
if (s->read_packet && s->buffer_size > max_buffer_size) {
if (dst == s->buffer) {
ffio_set_buf_size(s, max_buffer_size);
 
s->checksum_ptr = dst = s->buffer;
}
av_assert0(len >= max_buffer_size);
len = max_buffer_size;
}
 
if (s->read_packet)
len = s->read_packet(s->opaque, dst, len);
else
len = 0;
if (len <= 0) {
/* do not modify buffer if EOF reached so that a seek back can
be done without rereading data */
s->eof_reached = 1;
if (len < 0)
s->error = len;
} else {
s->pos += len;
s->buf_ptr = dst;
s->buf_end = dst + len;
s->bytes_read += len;
}
}
 
unsigned long ff_crc04C11DB7_update(unsigned long checksum, const uint8_t *buf,
unsigned int len)
{
return av_crc(av_crc_get_table(AV_CRC_32_IEEE), checksum, buf, len);
}
 
unsigned long ffio_get_checksum(AVIOContext *s)
{
s->checksum = s->update_checksum(s->checksum, s->checksum_ptr,
s->buf_ptr - s->checksum_ptr);
s->update_checksum = NULL;
return s->checksum;
}
 
void ffio_init_checksum(AVIOContext *s,
unsigned long (*update_checksum)(unsigned long c, const uint8_t *p, unsigned int len),
unsigned long checksum)
{
s->update_checksum = update_checksum;
if (s->update_checksum) {
s->checksum = checksum;
s->checksum_ptr = s->buf_ptr;
}
}
 
/* XXX: put an inline version */
int avio_r8(AVIOContext *s)
{
if (s->buf_ptr >= s->buf_end)
fill_buffer(s);
if (s->buf_ptr < s->buf_end)
return *s->buf_ptr++;
return 0;
}
 
int avio_read(AVIOContext *s, unsigned char *buf, int size)
{
int len, size1;
 
size1 = size;
while (size > 0) {
len = s->buf_end - s->buf_ptr;
if (len > size)
len = size;
if (len == 0 || s->write_flag) {
if((s->direct || size > s->buffer_size) && !s->update_checksum){
if(s->read_packet)
len = s->read_packet(s->opaque, buf, size);
if (len <= 0) {
/* do not modify buffer if EOF reached so that a seek back can
be done without rereading data */
s->eof_reached = 1;
if(len<0)
s->error= len;
break;
} else {
s->pos += len;
s->bytes_read += len;
size -= len;
buf += len;
s->buf_ptr = s->buffer;
s->buf_end = s->buffer/* + len*/;
}
} else {
fill_buffer(s);
len = s->buf_end - s->buf_ptr;
if (len == 0)
break;
}
} else {
memcpy(buf, s->buf_ptr, len);
buf += len;
s->buf_ptr += len;
size -= len;
}
}
if (size1 == size) {
if (s->error) return s->error;
if (url_feof(s)) return AVERROR_EOF;
}
return size1 - size;
}
 
int ffio_read_indirect(AVIOContext *s, unsigned char *buf, int size, const unsigned char **data)
{
if (s->buf_end - s->buf_ptr >= size && !s->write_flag) {
*data = s->buf_ptr;
s->buf_ptr += size;
return size;
} else {
*data = buf;
return avio_read(s, buf, size);
}
}
 
int ffio_read_partial(AVIOContext *s, unsigned char *buf, int size)
{
int len;
 
if (size < 0)
return -1;
 
if (s->read_packet && s->write_flag) {
len = s->read_packet(s->opaque, buf, size);
if (len > 0)
s->pos += len;
return len;
}
 
len = s->buf_end - s->buf_ptr;
if (len == 0) {
/* Reset the buf_end pointer to the start of the buffer, to make sure
* the fill_buffer call tries to read as much data as fits into the
* full buffer, instead of just what space is left after buf_end.
* This avoids returning partial packets at the end of the buffer,
* for packet based inputs.
*/
s->buf_end = s->buf_ptr = s->buffer;
fill_buffer(s);
len = s->buf_end - s->buf_ptr;
}
if (len > size)
len = size;
memcpy(buf, s->buf_ptr, len);
s->buf_ptr += len;
if (!len) {
if (s->error) return s->error;
if (url_feof(s)) return AVERROR_EOF;
}
return len;
}
 
unsigned int avio_rl16(AVIOContext *s)
{
unsigned int val;
val = avio_r8(s);
val |= avio_r8(s) << 8;
return val;
}
 
unsigned int avio_rl24(AVIOContext *s)
{
unsigned int val;
val = avio_rl16(s);
val |= avio_r8(s) << 16;
return val;
}
 
unsigned int avio_rl32(AVIOContext *s)
{
unsigned int val;
val = avio_rl16(s);
val |= avio_rl16(s) << 16;
return val;
}
 
uint64_t avio_rl64(AVIOContext *s)
{
uint64_t val;
val = (uint64_t)avio_rl32(s);
val |= (uint64_t)avio_rl32(s) << 32;
return val;
}
 
unsigned int avio_rb16(AVIOContext *s)
{
unsigned int val;
val = avio_r8(s) << 8;
val |= avio_r8(s);
return val;
}
 
unsigned int avio_rb24(AVIOContext *s)
{
unsigned int val;
val = avio_rb16(s) << 8;
val |= avio_r8(s);
return val;
}
unsigned int avio_rb32(AVIOContext *s)
{
unsigned int val;
val = avio_rb16(s) << 16;
val |= avio_rb16(s);
return val;
}
 
int ff_get_line(AVIOContext *s, char *buf, int maxlen)
{
int i = 0;
char c;
 
do {
c = avio_r8(s);
if (c && i < maxlen-1)
buf[i++] = c;
} while (c != '\n' && c);
 
buf[i] = 0;
return i;
}
 
int avio_get_str(AVIOContext *s, int maxlen, char *buf, int buflen)
{
int i;
 
if (buflen <= 0)
return AVERROR(EINVAL);
// reserve 1 byte for terminating 0
buflen = FFMIN(buflen - 1, maxlen);
for (i = 0; i < buflen; i++)
if (!(buf[i] = avio_r8(s)))
return i + 1;
buf[i] = 0;
for (; i < maxlen; i++)
if (!avio_r8(s))
return i + 1;
return maxlen;
}
 
#define GET_STR16(type, read) \
int avio_get_str16 ##type(AVIOContext *pb, int maxlen, char *buf, int buflen)\
{\
char* q = buf;\
int ret = 0;\
if (buflen <= 0) \
return AVERROR(EINVAL); \
while (ret + 1 < maxlen) {\
uint8_t tmp;\
uint32_t ch;\
GET_UTF16(ch, (ret += 2) <= maxlen ? read(pb) : 0, break;)\
if (!ch)\
break;\
PUT_UTF8(ch, tmp, if (q - buf < buflen - 1) *q++ = tmp;)\
}\
*q = 0;\
return ret;\
}\
 
GET_STR16(le, avio_rl16)
GET_STR16(be, avio_rb16)
 
#undef GET_STR16
 
uint64_t avio_rb64(AVIOContext *s)
{
uint64_t val;
val = (uint64_t)avio_rb32(s) << 32;
val |= (uint64_t)avio_rb32(s);
return val;
}
 
uint64_t ffio_read_varlen(AVIOContext *bc){
uint64_t val = 0;
int tmp;
 
do{
tmp = avio_r8(bc);
val= (val<<7) + (tmp&127);
}while(tmp&128);
return val;
}
 
int ffio_fdopen(AVIOContext **s, URLContext *h)
{
uint8_t *buffer;
int buffer_size, max_packet_size;
 
max_packet_size = h->max_packet_size;
if (max_packet_size) {
buffer_size = max_packet_size; /* no need to bufferize more than one packet */
} else {
buffer_size = IO_BUFFER_SIZE;
}
buffer = av_malloc(buffer_size);
if (!buffer)
return AVERROR(ENOMEM);
 
*s = avio_alloc_context(buffer, buffer_size, h->flags & AVIO_FLAG_WRITE, h,
(void*)ffurl_read, (void*)ffurl_write, (void*)ffurl_seek);
if (!*s) {
av_free(buffer);
return AVERROR(ENOMEM);
}
(*s)->direct = h->flags & AVIO_FLAG_DIRECT;
(*s)->seekable = h->is_streamed ? 0 : AVIO_SEEKABLE_NORMAL;
(*s)->max_packet_size = max_packet_size;
if(h->prot) {
(*s)->read_pause = (int (*)(void *, int))h->prot->url_read_pause;
(*s)->read_seek = (int64_t (*)(void *, int, int64_t, int))h->prot->url_read_seek;
}
(*s)->av_class = &ffio_url_class;
return 0;
}
 
int ffio_ensure_seekback(AVIOContext *s, int buf_size)
{
uint8_t *buffer;
int max_buffer_size = s->max_packet_size ?
s->max_packet_size : IO_BUFFER_SIZE;
 
buf_size += s->buf_ptr - s->buffer + max_buffer_size;
 
if (buf_size < s->buffer_size || s->seekable)
return 0;
av_assert0(!s->write_flag);
 
buffer = av_malloc(buf_size);
if (!buffer)
return AVERROR(ENOMEM);
 
memcpy(buffer, s->buffer, s->buffer_size);
av_free(s->buffer);
s->buf_ptr = buffer + (s->buf_ptr - s->buffer);
s->buf_end = buffer + (s->buf_end - s->buffer);
s->buffer = buffer;
s->buffer_size = buf_size;
return 0;
}
 
int ffio_set_buf_size(AVIOContext *s, int buf_size)
{
uint8_t *buffer;
buffer = av_malloc(buf_size);
if (!buffer)
return AVERROR(ENOMEM);
 
av_free(s->buffer);
s->buffer = buffer;
s->buffer_size = buf_size;
s->buf_ptr = buffer;
url_resetbuf(s, s->write_flag ? AVIO_FLAG_WRITE : AVIO_FLAG_READ);
return 0;
}
 
static int url_resetbuf(AVIOContext *s, int flags)
{
av_assert1(flags == AVIO_FLAG_WRITE || flags == AVIO_FLAG_READ);
 
if (flags & AVIO_FLAG_WRITE) {
s->buf_end = s->buffer + s->buffer_size;
s->write_flag = 1;
} else {
s->buf_end = s->buffer;
s->write_flag = 0;
}
return 0;
}
 
int ffio_rewind_with_probe_data(AVIOContext *s, unsigned char **bufp, int buf_size)
{
int64_t buffer_start;
int buffer_size;
int overlap, new_size, alloc_size;
uint8_t *buf = *bufp;
 
if (s->write_flag) {
av_freep(bufp);
return AVERROR(EINVAL);
}
 
buffer_size = s->buf_end - s->buffer;
 
/* the buffers must touch or overlap */
if ((buffer_start = s->pos - buffer_size) > buf_size) {
av_freep(bufp);
return AVERROR(EINVAL);
}
 
overlap = buf_size - buffer_start;
new_size = buf_size + buffer_size - overlap;
 
alloc_size = FFMAX(s->buffer_size, new_size);
if (alloc_size > buf_size)
if (!(buf = (*bufp) = av_realloc_f(buf, 1, alloc_size)))
return AVERROR(ENOMEM);
 
if (new_size > buf_size) {
memcpy(buf + buf_size, s->buffer + overlap, buffer_size - overlap);
buf_size = new_size;
}
 
av_free(s->buffer);
s->buf_ptr = s->buffer = buf;
s->buffer_size = alloc_size;
s->pos = buf_size;
s->buf_end = s->buf_ptr + buf_size;
s->eof_reached = 0;
s->must_flush = 0;
 
return 0;
}
 
int avio_open(AVIOContext **s, const char *filename, int flags)
{
return avio_open2(s, filename, flags, NULL, NULL);
}
 
int avio_open2(AVIOContext **s, const char *filename, int flags,
const AVIOInterruptCB *int_cb, AVDictionary **options)
{
URLContext *h;
int err;
 
err = ffurl_open(&h, filename, flags, int_cb, options);
if (err < 0)
return err;
err = ffio_fdopen(s, h);
if (err < 0) {
ffurl_close(h);
return err;
}
return 0;
}
 
int avio_close(AVIOContext *s)
{
URLContext *h;
 
if (!s)
return 0;
 
avio_flush(s);
h = s->opaque;
av_freep(&s->buffer);
if (s->write_flag)
av_log(s, AV_LOG_DEBUG, "Statistics: %d seeks, %d writeouts\n", s->seek_count, s->writeout_count);
else
av_log(s, AV_LOG_DEBUG, "Statistics: %"PRId64" bytes read, %d seeks\n", s->bytes_read, s->seek_count);
av_free(s);
return ffurl_close(h);
}
 
int avio_closep(AVIOContext **s)
{
int ret = avio_close(*s);
*s = NULL;
return ret;
}
 
int avio_printf(AVIOContext *s, const char *fmt, ...)
{
va_list ap;
char buf[4096];
int ret;
 
va_start(ap, fmt);
ret = vsnprintf(buf, sizeof(buf), fmt, ap);
va_end(ap);
avio_write(s, buf, strlen(buf));
return ret;
}
 
int avio_pause(AVIOContext *s, int pause)
{
if (!s->read_pause)
return AVERROR(ENOSYS);
return s->read_pause(s->opaque, pause);
}
 
int64_t avio_seek_time(AVIOContext *s, int stream_index,
int64_t timestamp, int flags)
{
URLContext *h = s->opaque;
int64_t ret;
if (!s->read_seek)
return AVERROR(ENOSYS);
ret = s->read_seek(h, stream_index, timestamp, flags);
if (ret >= 0) {
int64_t pos;
s->buf_ptr = s->buf_end; // Flush buffer
pos = s->seek(h, 0, SEEK_CUR);
if (pos >= 0)
s->pos = pos;
else if (pos != AVERROR(ENOSYS))
ret = pos;
}
return ret;
}
 
/* output in a dynamic buffer */
 
typedef struct DynBuffer {
int pos, size, allocated_size;
uint8_t *buffer;
int io_buffer_size;
uint8_t io_buffer[1];
} DynBuffer;
 
static int dyn_buf_write(void *opaque, uint8_t *buf, int buf_size)
{
DynBuffer *d = opaque;
unsigned new_size, new_allocated_size;
 
/* reallocate buffer if needed */
new_size = d->pos + buf_size;
new_allocated_size = d->allocated_size;
if (new_size < d->pos || new_size > INT_MAX/2)
return -1;
while (new_size > new_allocated_size) {
if (!new_allocated_size)
new_allocated_size = new_size;
else
new_allocated_size += new_allocated_size / 2 + 1;
}
 
if (new_allocated_size > d->allocated_size) {
int err;
if ((err = av_reallocp(&d->buffer, new_allocated_size)) < 0) {
d->allocated_size = 0;
d->size = 0;
return err;
}
d->allocated_size = new_allocated_size;
}
memcpy(d->buffer + d->pos, buf, buf_size);
d->pos = new_size;
if (d->pos > d->size)
d->size = d->pos;
return buf_size;
}
 
static int dyn_packet_buf_write(void *opaque, uint8_t *buf, int buf_size)
{
unsigned char buf1[4];
int ret;
 
/* packetized write: output the header */
AV_WB32(buf1, buf_size);
ret = dyn_buf_write(opaque, buf1, 4);
if (ret < 0)
return ret;
 
/* then the data */
return dyn_buf_write(opaque, buf, buf_size);
}
 
static int64_t dyn_buf_seek(void *opaque, int64_t offset, int whence)
{
DynBuffer *d = opaque;
 
if (whence == SEEK_CUR)
offset += d->pos;
else if (whence == SEEK_END)
offset += d->size;
if (offset < 0 || offset > 0x7fffffffLL)
return -1;
d->pos = offset;
return 0;
}
 
static int url_open_dyn_buf_internal(AVIOContext **s, int max_packet_size)
{
DynBuffer *d;
unsigned io_buffer_size = max_packet_size ? max_packet_size : 1024;
 
if (sizeof(DynBuffer) + io_buffer_size < io_buffer_size)
return -1;
d = av_mallocz(sizeof(DynBuffer) + io_buffer_size);
if (!d)
return AVERROR(ENOMEM);
d->io_buffer_size = io_buffer_size;
*s = avio_alloc_context(d->io_buffer, d->io_buffer_size, 1, d, NULL,
max_packet_size ? dyn_packet_buf_write : dyn_buf_write,
max_packet_size ? NULL : dyn_buf_seek);
if(!*s) {
av_free(d);
return AVERROR(ENOMEM);
}
(*s)->max_packet_size = max_packet_size;
return 0;
}
 
int avio_open_dyn_buf(AVIOContext **s)
{
return url_open_dyn_buf_internal(s, 0);
}
 
int ffio_open_dyn_packet_buf(AVIOContext **s, int max_packet_size)
{
if (max_packet_size <= 0)
return -1;
return url_open_dyn_buf_internal(s, max_packet_size);
}
 
int avio_close_dyn_buf(AVIOContext *s, uint8_t **pbuffer)
{
DynBuffer *d = s->opaque;
int size;
static const char padbuf[FF_INPUT_BUFFER_PADDING_SIZE] = {0};
int padding = 0;
 
/* don't attempt to pad fixed-size packet buffers */
if (!s->max_packet_size) {
avio_write(s, padbuf, sizeof(padbuf));
padding = FF_INPUT_BUFFER_PADDING_SIZE;
}
 
avio_flush(s);
 
*pbuffer = d->buffer;
size = d->size;
av_free(d);
av_free(s);
return size - padding;
}
 
static int null_buf_write(void *opaque, uint8_t *buf, int buf_size)
{
DynBuffer *d = opaque;
 
d->pos += buf_size;
if (d->pos > d->size)
d->size = d->pos;
return buf_size;
}
 
int ffio_open_null_buf(AVIOContext **s)
{
int ret = url_open_dyn_buf_internal(s, 0);
if (ret >= 0) {
AVIOContext *pb = *s;
pb->write_packet = null_buf_write;
}
return ret;
}
 
int ffio_close_null_buf(AVIOContext *s)
{
DynBuffer *d = s->opaque;
int size;
 
avio_flush(s);
 
size = d->size;
av_free(d);
av_free(s);
return size;
}
/contrib/sdk/sources/ffmpeg/libavformat/avisynth.c
0,0 → 1,665
/*
* Avi/AvxSynth support
* Copyright (c) 2012 AvxSynth Team.
*
* This file is part of FFmpeg
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/internal.h"
#include "avformat.h"
#include "internal.h"
#include "libavcodec/internal.h"
 
// Enable function pointer definitions for runtime loading.
#define AVSC_NO_DECLSPEC
 
// Shut up ffmpeg error messages.
// avisynth_c.h contains inline functions that call these functions.
#undef malloc
#undef free
#undef printf
 
// Platform-specific directives for AviSynth vs AvxSynth.
#ifdef _WIN32
#include <windows.h>
#undef EXTERN_C
#include "compat/avisynth/avisynth_c.h"
#include "compat/avisynth/avisynth_c_25.h"
#define AVISYNTH_LIB "avisynth"
#else
#include <dlfcn.h>
#include "compat/avisynth/avxsynth_c.h"
#if defined (__APPLE__)
#define AVISYNTH_LIB "libavxsynth.dylib"
#else
#define AVISYNTH_LIB "libavxsynth.so"
#endif
 
#define LoadLibrary(x) dlopen(x, RTLD_NOW | RTLD_GLOBAL)
#define GetProcAddress dlsym
#define FreeLibrary dlclose
#endif
 
// AvxSynth doesn't have these colorspaces, so disable them
#ifndef _WIN32
#define avs_is_yv24(vi) 0
#define avs_is_yv16(vi) 0
#define avs_is_yv411(vi) 0
#define avs_is_y8(vi) 0
#endif
 
typedef struct {
void *library;
#define AVSC_DECLARE_FUNC(name) name##_func name
AVSC_DECLARE_FUNC(avs_bit_blt);
AVSC_DECLARE_FUNC(avs_clip_get_error);
AVSC_DECLARE_FUNC(avs_create_script_environment);
AVSC_DECLARE_FUNC(avs_delete_script_environment);
AVSC_DECLARE_FUNC(avs_get_audio);
AVSC_DECLARE_FUNC(avs_get_error);
AVSC_DECLARE_FUNC(avs_get_frame);
AVSC_DECLARE_FUNC(avs_get_version);
AVSC_DECLARE_FUNC(avs_get_video_info);
AVSC_DECLARE_FUNC(avs_invoke);
AVSC_DECLARE_FUNC(avs_release_clip);
AVSC_DECLARE_FUNC(avs_release_value);
AVSC_DECLARE_FUNC(avs_release_video_frame);
AVSC_DECLARE_FUNC(avs_take_clip);
#undef AVSC_DECLARE_FUNC
} AviSynthLibrary;
 
struct AviSynthContext {
AVS_ScriptEnvironment *env;
AVS_Clip *clip;
const AVS_VideoInfo *vi;
 
// avisynth_read_packet_video() iterates over this.
int n_planes;
const int *planes;
 
int curr_stream;
int curr_frame;
int64_t curr_sample;
 
int error;
 
// Linked list pointers.
struct AviSynthContext *next;
};
typedef struct AviSynthContext AviSynthContext;
 
static const int avs_planes_packed[1] = {0};
static const int avs_planes_grey[1] = {AVS_PLANAR_Y};
static const int avs_planes_yuv[3] = {AVS_PLANAR_Y, AVS_PLANAR_U, AVS_PLANAR_V};
 
// A conflict between C++ global objects, atexit, and dynamic loading requires
// us to register our own atexit handler to prevent double freeing.
static AviSynthLibrary *avs_library = NULL;
static int avs_atexit_called = 0;
 
// Linked list of AviSynthContexts. An atexit handler destroys this list.
static AviSynthContext *avs_ctx_list = NULL;
 
static av_cold void avisynth_atexit_handler(void);
 
static av_cold int avisynth_load_library(void) {
avs_library = av_mallocz(sizeof(AviSynthLibrary));
if (!avs_library)
return AVERROR_UNKNOWN;
 
avs_library->library = LoadLibrary(AVISYNTH_LIB);
if (!avs_library->library)
goto init_fail;
 
#define LOAD_AVS_FUNC(name, continue_on_fail) \
{ \
avs_library->name = (void*)GetProcAddress(avs_library->library, #name); \
if(!continue_on_fail && !avs_library->name) \
goto fail; \
}
LOAD_AVS_FUNC(avs_bit_blt, 0);
LOAD_AVS_FUNC(avs_clip_get_error, 0);
LOAD_AVS_FUNC(avs_create_script_environment, 0);
LOAD_AVS_FUNC(avs_delete_script_environment, 0);
LOAD_AVS_FUNC(avs_get_audio, 0);
LOAD_AVS_FUNC(avs_get_error, 1); // New to AviSynth 2.6
LOAD_AVS_FUNC(avs_get_frame, 0);
LOAD_AVS_FUNC(avs_get_version, 0);
LOAD_AVS_FUNC(avs_get_video_info, 0);
LOAD_AVS_FUNC(avs_invoke, 0);
LOAD_AVS_FUNC(avs_release_clip, 0);
LOAD_AVS_FUNC(avs_release_value, 0);
LOAD_AVS_FUNC(avs_release_video_frame, 0);
LOAD_AVS_FUNC(avs_take_clip, 0);
#undef LOAD_AVS_FUNC
 
atexit(avisynth_atexit_handler);
return 0;
 
fail:
FreeLibrary(avs_library->library);
init_fail:
av_freep(&avs_library);
return AVERROR_UNKNOWN;
}
 
// Note that avisynth_context_create and avisynth_context_destroy
// do not allocate or free the actual context! That is taken care of
// by libavformat.
static av_cold int avisynth_context_create(AVFormatContext *s) {
AviSynthContext *avs = (AviSynthContext *)s->priv_data;
int ret;
 
if (!avs_library) {
if (ret = avisynth_load_library())
return ret;
}
 
avs->env = avs_library->avs_create_script_environment(3);
if (avs_library->avs_get_error) {
const char *error = avs_library->avs_get_error(avs->env);
if (error) {
av_log(s, AV_LOG_ERROR, "%s\n", error);
return AVERROR_UNKNOWN;
}
}
 
if (!avs_ctx_list) {
avs_ctx_list = avs;
} else {
avs->next = avs_ctx_list;
avs_ctx_list = avs;
}
 
return 0;
}
 
static av_cold void avisynth_context_destroy(AviSynthContext *avs) {
if (avs_atexit_called)
return;
 
if (avs == avs_ctx_list) {
avs_ctx_list = avs->next;
} else {
AviSynthContext *prev = avs_ctx_list;
while (prev->next != avs)
prev = prev->next;
prev->next = avs->next;
}
 
if (avs->clip) {
avs_library->avs_release_clip(avs->clip);
avs->clip = NULL;
}
if (avs->env) {
avs_library->avs_delete_script_environment(avs->env);
avs->env = NULL;
}
}
 
static av_cold void avisynth_atexit_handler(void) {
AviSynthContext *avs = avs_ctx_list;
 
while (avs) {
AviSynthContext *next = avs->next;
avisynth_context_destroy(avs);
avs = next;
}
FreeLibrary(avs_library->library);
av_freep(&avs_library);
 
avs_atexit_called = 1;
}
 
// Create AVStream from audio and video data.
static int avisynth_create_stream_video(AVFormatContext *s, AVStream *st) {
AviSynthContext *avs = s->priv_data;
int planar = 0; // 0: packed, 1: YUV, 2: Y8
 
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = CODEC_ID_RAWVIDEO;
st->codec->width = avs->vi->width;
st->codec->height = avs->vi->height;
 
st->time_base = (AVRational) {avs->vi->fps_denominator, avs->vi->fps_numerator};
st->avg_frame_rate = (AVRational) {avs->vi->fps_numerator, avs->vi->fps_denominator};
st->start_time = 0;
st->duration = avs->vi->num_frames;
st->nb_frames = avs->vi->num_frames;
 
switch (avs->vi->pixel_type) {
#ifdef _WIN32
case AVS_CS_YV24:
st->codec->pix_fmt = AV_PIX_FMT_YUV444P;
planar = 1;
break;
case AVS_CS_YV16:
st->codec->pix_fmt = AV_PIX_FMT_YUV422P;
planar = 1;
break;
case AVS_CS_YV411:
st->codec->pix_fmt = AV_PIX_FMT_YUV411P;
planar = 1;
break;
case AVS_CS_Y8:
st->codec->pix_fmt = AV_PIX_FMT_GRAY8;
planar = 2;
break;
#endif
case AVS_CS_BGR24:
st->codec->pix_fmt = AV_PIX_FMT_BGR24;
break;
case AVS_CS_BGR32:
st->codec->pix_fmt = AV_PIX_FMT_RGB32;
break;
case AVS_CS_YUY2:
st->codec->pix_fmt = AV_PIX_FMT_YUYV422;
break;
case AVS_CS_YV12:
st->codec->pix_fmt = AV_PIX_FMT_YUV420P;
planar = 1;
break;
case AVS_CS_I420: // Is this even used anywhere?
st->codec->pix_fmt = AV_PIX_FMT_YUV420P;
planar = 1;
break;
default:
av_log(s, AV_LOG_ERROR, "unknown AviSynth colorspace %d\n", avs->vi->pixel_type);
avs->error = 1;
return AVERROR_UNKNOWN;
}
 
switch (planar) {
case 2: // Y8
avs->n_planes = 1;
avs->planes = avs_planes_grey;
break;
case 1: // YUV
avs->n_planes = 3;
avs->planes = avs_planes_yuv;
break;
default:
avs->n_planes = 1;
avs->planes = avs_planes_packed;
}
return 0;
}
 
static int avisynth_create_stream_audio(AVFormatContext *s, AVStream *st) {
AviSynthContext *avs = s->priv_data;
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->sample_rate = avs->vi->audio_samples_per_second;
st->codec->channels = avs->vi->nchannels;
st->time_base = (AVRational) {1, avs->vi->audio_samples_per_second};
 
switch (avs->vi->sample_type) {
case AVS_SAMPLE_INT8:
st->codec->codec_id = CODEC_ID_PCM_U8;
break;
case AVS_SAMPLE_INT16:
st->codec->codec_id = CODEC_ID_PCM_S16LE;
break;
case AVS_SAMPLE_INT24:
st->codec->codec_id = CODEC_ID_PCM_S24LE;
break;
case AVS_SAMPLE_INT32:
st->codec->codec_id = CODEC_ID_PCM_S32LE;
break;
case AVS_SAMPLE_FLOAT:
st->codec->codec_id = CODEC_ID_PCM_F32LE;
break;
default:
av_log(s, AV_LOG_ERROR, "unknown AviSynth sample type %d\n", avs->vi->sample_type);
avs->error = 1;
return AVERROR_UNKNOWN;
}
return 0;
}
 
static int avisynth_create_stream(AVFormatContext *s) {
AviSynthContext *avs = s->priv_data;
AVStream *st;
int ret;
int id = 0;
 
if (avs_has_video(avs->vi)) {
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR_UNKNOWN;
st->id = id++;
if (ret = avisynth_create_stream_video(s, st))
return ret;
}
if (avs_has_audio(avs->vi)) {
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR_UNKNOWN;
st->id = id++;
if (ret = avisynth_create_stream_audio(s, st))
return ret;
}
return 0;
}
 
static int avisynth_open_file(AVFormatContext *s) {
AviSynthContext *avs = (AviSynthContext *)s->priv_data;
AVS_Value arg, val;
int ret;
#ifdef _WIN32
char filename_ansi[MAX_PATH * 4];
wchar_t filename_wc[MAX_PATH * 4];
#endif
 
if (ret = avisynth_context_create(s))
return ret;
 
#ifdef _WIN32
// Convert UTF-8 to ANSI code page
MultiByteToWideChar(CP_UTF8, 0, s->filename, -1, filename_wc, MAX_PATH * 4);
WideCharToMultiByte(CP_THREAD_ACP, 0, filename_wc, -1, filename_ansi, MAX_PATH * 4, NULL, NULL);
arg = avs_new_value_string(filename_ansi);
#else
arg = avs_new_value_string(s->filename);
#endif
val = avs_library->avs_invoke(avs->env, "Import", arg, 0);
if (avs_is_error(val)) {
av_log(s, AV_LOG_ERROR, "%s\n", avs_as_error(val));
ret = AVERROR_UNKNOWN;
goto fail;
}
if (!avs_is_clip(val)) {
av_log(s, AV_LOG_ERROR, "%s\n", "AviSynth script did not return a clip");
ret = AVERROR_UNKNOWN;
goto fail;
}
 
avs->clip = avs_library->avs_take_clip(val, avs->env);
avs->vi = avs_library->avs_get_video_info(avs->clip);
 
// Release the AVS_Value as it will go out of scope.
avs_library->avs_release_value(val);
 
if (ret = avisynth_create_stream(s))
goto fail;
 
return 0;
 
fail:
avisynth_context_destroy(avs);
return ret;
}
 
static void avisynth_next_stream(AVFormatContext *s, AVStream **st, AVPacket *pkt, int *discard) {
AviSynthContext *avs = s->priv_data;
 
pkt->stream_index = avs->curr_stream++;
avs->curr_stream %= s->nb_streams;
 
*st = s->streams[pkt->stream_index];
if ((*st)->discard == AVDISCARD_ALL)
*discard = 1;
else
*discard = 0;
 
return;
}
 
// Copy AviSynth clip data into an AVPacket.
static int avisynth_read_packet_video(AVFormatContext *s, AVPacket *pkt, int discard) {
AviSynthContext *avs = s->priv_data;
AVS_VideoFrame *frame;
unsigned char *dst_p;
const unsigned char *src_p;
int n, i, plane, rowsize, planeheight, pitch, bits;
const char *error;
 
if (avs->curr_frame >= avs->vi->num_frames)
return AVERROR_EOF;
 
// This must happen even if the stream is discarded to prevent desync.
n = avs->curr_frame++;
if (discard)
return 0;
 
pkt->pts = n;
pkt->dts = n;
pkt->duration = 1;
 
// Define the bpp values for the new AviSynth 2.6 colorspaces
if (avs_is_yv24(avs->vi)) {
bits = 24;
} else if (avs_is_yv16(avs->vi)) {
bits = 16;
} else if (avs_is_yv411(avs->vi)) {
bits = 12;
} else if (avs_is_y8(avs->vi)) {
bits = 8;
} else {
bits = avs_bits_per_pixel(avs->vi);
}
 
// Without cast to int64_t, calculation overflows at about 9k x 9k resolution.
pkt->size = (((int64_t)avs->vi->width * (int64_t)avs->vi->height) * bits) / 8;
if (!pkt->size)
return AVERROR_UNKNOWN;
pkt->data = av_malloc(pkt->size);
if (!pkt->data)
return AVERROR_UNKNOWN;
 
frame = avs_library->avs_get_frame(avs->clip, n);
error = avs_library->avs_clip_get_error(avs->clip);
if (error) {
av_log(s, AV_LOG_ERROR, "%s\n", error);
avs->error = 1;
av_freep(&pkt->data);
return AVERROR_UNKNOWN;
}
 
dst_p = pkt->data;
for (i = 0; i < avs->n_planes; i++) {
plane = avs->planes[i];
src_p = avs_get_read_ptr_p(frame, plane);
pitch = avs_get_pitch_p(frame, plane);
 
#ifdef _WIN32
if (avs_library->avs_get_version(avs->clip) == 3) {
rowsize = avs_get_row_size_p_25(frame, plane);
planeheight = avs_get_height_p_25(frame, plane);
} else {
rowsize = avs_get_row_size_p(frame, plane);
planeheight = avs_get_height_p(frame, plane);
}
#else
rowsize = avs_get_row_size_p(frame, plane);
planeheight = avs_get_height_p(frame, plane);
#endif
 
// Flip RGB video.
if (avs_is_rgb24(avs->vi) || avs_is_rgb(avs->vi)) {
src_p = src_p + (planeheight - 1) * pitch;
pitch = -pitch;
}
 
avs_library->avs_bit_blt(avs->env, dst_p, rowsize, src_p, pitch, rowsize, planeheight);
dst_p += rowsize * planeheight;
}
 
avs_library->avs_release_video_frame(frame);
return 0;
}
 
static int avisynth_read_packet_audio(AVFormatContext *s, AVPacket *pkt, int discard) {
AviSynthContext *avs = s->priv_data;
AVRational fps, samplerate;
int samples;
int64_t n;
const char *error;
 
if (avs->curr_sample >= avs->vi->num_audio_samples)
return AVERROR_EOF;
 
fps.num = avs->vi->fps_numerator;
fps.den = avs->vi->fps_denominator;
samplerate.num = avs->vi->audio_samples_per_second;
samplerate.den = 1;
 
if (avs_has_video(avs->vi)) {
if (avs->curr_frame < avs->vi->num_frames)
samples = av_rescale_q(avs->curr_frame, samplerate, fps) - avs->curr_sample;
else
samples = av_rescale_q(1, samplerate, fps);
} else {
samples = 1000;
}
 
// After seeking, audio may catch up with video.
if (samples <= 0) {
pkt->size = 0;
pkt->data = NULL;
return 0;
}
 
if (avs->curr_sample + samples > avs->vi->num_audio_samples)
samples = avs->vi->num_audio_samples - avs->curr_sample;
 
// This must happen even if the stream is discarded to prevent desync.
n = avs->curr_sample;
avs->curr_sample += samples;
if (discard)
return 0;
 
pkt->pts = n;
pkt->dts = n;
pkt->duration = samples;
 
pkt->size = avs_bytes_per_channel_sample(avs->vi) * samples * avs->vi->nchannels;
if (!pkt->size)
return AVERROR_UNKNOWN;
pkt->data = av_malloc(pkt->size);
if (!pkt->data)
return AVERROR_UNKNOWN;
 
avs_library->avs_get_audio(avs->clip, pkt->data, n, samples);
error = avs_library->avs_clip_get_error(avs->clip);
if (error) {
av_log(s, AV_LOG_ERROR, "%s\n", error);
avs->error = 1;
av_freep(&pkt->data);
return AVERROR_UNKNOWN;
}
return 0;
}
 
static av_cold int avisynth_read_header(AVFormatContext *s) {
int ret;
 
// Calling library must implement a lock for thread-safe opens.
if (ret = avpriv_lock_avformat())
return ret;
 
if (ret = avisynth_open_file(s)) {
avpriv_unlock_avformat();
return ret;
}
 
avpriv_unlock_avformat();
return 0;
}
 
static int avisynth_read_packet(AVFormatContext *s, AVPacket *pkt) {
AviSynthContext *avs = s->priv_data;
AVStream *st;
int discard = 0;
int ret;
 
if (avs->error)
return AVERROR_UNKNOWN;
 
pkt->destruct = av_destruct_packet;
 
// If either stream reaches EOF, try to read the other one before giving up.
avisynth_next_stream(s, &st, pkt, &discard);
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
ret = avisynth_read_packet_video(s, pkt, discard);
if (ret == AVERROR_EOF && avs_has_audio(avs->vi)) {
avisynth_next_stream(s, &st, pkt, &discard);
return avisynth_read_packet_audio(s, pkt, discard);
}
return ret;
} else {
ret = avisynth_read_packet_audio(s, pkt, discard);
if (ret == AVERROR_EOF && avs_has_video(avs->vi)) {
avisynth_next_stream(s, &st, pkt, &discard);
return avisynth_read_packet_video(s, pkt, discard);
}
return ret;
}
}
 
static av_cold int avisynth_read_close(AVFormatContext *s) {
if (avpriv_lock_avformat())
return AVERROR_UNKNOWN;
 
avisynth_context_destroy(s->priv_data);
avpriv_unlock_avformat();
return 0;
}
 
static int avisynth_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) {
AviSynthContext *avs = s->priv_data;
AVStream *st;
AVRational fps, samplerate;
 
if (avs->error)
return AVERROR_UNKNOWN;
 
fps = (AVRational) {avs->vi->fps_numerator, avs->vi->fps_denominator};
samplerate = (AVRational) {avs->vi->audio_samples_per_second, 1};
 
st = s->streams[stream_index];
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
// AviSynth frame counts are signed int.
if ((timestamp >= avs->vi->num_frames) || (timestamp > INT_MAX) || (timestamp < 0))
return AVERROR_EOF;
avs->curr_frame = timestamp;
if (avs_has_audio(avs->vi))
avs->curr_sample = av_rescale_q(timestamp, samplerate, fps);
} else {
if ((timestamp >= avs->vi->num_audio_samples) || (timestamp < 0))
return AVERROR_EOF;
// Force frame granularity for seeking.
if (avs_has_video(avs->vi)) {
avs->curr_frame = av_rescale_q(timestamp, fps, samplerate);
avs->curr_sample = av_rescale_q(avs->curr_frame, samplerate, fps);
} else {
avs->curr_sample = timestamp;
}
}
 
return 0;
}
 
AVInputFormat ff_avisynth_demuxer = {
.name = "avisynth",
.long_name = NULL_IF_CONFIG_SMALL("AviSynth script"),
.priv_data_size = sizeof(AviSynthContext),
.read_header = avisynth_read_header,
.read_packet = avisynth_read_packet,
.read_close = avisynth_read_close,
.read_seek = avisynth_read_seek,
.extensions = "avs",
};
/contrib/sdk/sources/ffmpeg/libavformat/avlanguage.c
0,0 → 1,765
/*
* Cyril Comparon, Larbi Joubala, Resonate-MP4 2009
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avlanguage.h"
#include "libavutil/avstring.h"
#include "libavutil/common.h"
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
 
typedef struct LangEntry {
const char str[4];
uint16_t next_equivalent;
} LangEntry;
 
static const uint16_t lang_table_counts[] = { 484, 20, 184 };
static const uint16_t lang_table_offsets[] = { 0, 484, 504 };
 
static const LangEntry lang_table[] = {
/*----- AV_LANG_ISO639_2_BIBL entries (484) -----*/
/*0000*/ { "aar", 504 },
/*0001*/ { "abk", 505 },
/*0002*/ { "ace", 2 },
/*0003*/ { "ach", 3 },
/*0004*/ { "ada", 4 },
/*0005*/ { "ady", 5 },
/*0006*/ { "afa", 6 },
/*0007*/ { "afh", 7 },
/*0008*/ { "afr", 507 },
/*0009*/ { "ain", 9 },
/*0010*/ { "aka", 508 },
/*0011*/ { "akk", 11 },
/*0012*/ { "alb", 502 },
/*0013*/ { "ale", 13 },
/*0014*/ { "alg", 14 },
/*0015*/ { "alt", 15 },
/*0016*/ { "amh", 509 },
/*0017*/ { "ang", 17 },
/*0018*/ { "anp", 18 },
/*0019*/ { "apa", 19 },
/*0020*/ { "ara", 511 },
/*0021*/ { "arc", 21 },
/*0022*/ { "arg", 510 },
/*0023*/ { "arm", 492 },
/*0024*/ { "arn", 24 },
/*0025*/ { "arp", 25 },
/*0026*/ { "art", 26 },
/*0027*/ { "arw", 27 },
/*0028*/ { "asm", 512 },
/*0029*/ { "ast", 29 },
/*0030*/ { "ath", 30 },
/*0031*/ { "aus", 31 },
/*0032*/ { "ava", 513 },
/*0033*/ { "ave", 506 },
/*0034*/ { "awa", 34 },
/*0035*/ { "aym", 514 },
/*0036*/ { "aze", 515 },
/*0037*/ { "bad", 37 },
/*0038*/ { "bai", 38 },
/*0039*/ { "bak", 516 },
/*0040*/ { "bal", 40 },
/*0041*/ { "bam", 521 },
/*0042*/ { "ban", 42 },
/*0043*/ { "baq", 489 },
/*0044*/ { "bas", 44 },
/*0045*/ { "bat", 45 },
/*0046*/ { "bej", 46 },
/*0047*/ { "bel", 517 },
/*0048*/ { "bem", 48 },
/*0049*/ { "ben", 522 },
/*0050*/ { "ber", 50 },
/*0051*/ { "bho", 51 },
/*0052*/ { "bih", 519 },
/*0053*/ { "bik", 53 },
/*0054*/ { "bin", 54 },
/*0055*/ { "bis", 520 },
/*0056*/ { "bla", 56 },
/*0057*/ { "bnt", 57 },
/*0058*/ { "bos", 525 },
/*0059*/ { "bra", 59 },
/*0060*/ { "bre", 524 },
/*0061*/ { "btk", 61 },
/*0062*/ { "bua", 62 },
/*0063*/ { "bug", 63 },
/*0064*/ { "bul", 518 },
/*0065*/ { "bur", 498 },
/*0066*/ { "byn", 66 },
/*0067*/ { "cad", 67 },
/*0068*/ { "cai", 68 },
/*0069*/ { "car", 69 },
/*0070*/ { "cat", 526 },
/*0071*/ { "cau", 71 },
/*0072*/ { "ceb", 72 },
/*0073*/ { "cel", 73 },
/*0074*/ { "cha", 528 },
/*0075*/ { "chb", 75 },
/*0076*/ { "che", 527 },
/*0077*/ { "chg", 77 },
/*0078*/ { "chi", 503 },
/*0079*/ { "chk", 79 },
/*0080*/ { "chm", 80 },
/*0081*/ { "chn", 81 },
/*0082*/ { "cho", 82 },
/*0083*/ { "chp", 83 },
/*0084*/ { "chr", 84 },
/*0085*/ { "chu", 532 },
/*0086*/ { "chv", 533 },
/*0087*/ { "chy", 87 },
/*0088*/ { "cmc", 88 },
/*0089*/ { "cop", 89 },
/*0090*/ { "cor", 593 },
/*0091*/ { "cos", 529 },
/*0092*/ { "cpe", 92 },
/*0093*/ { "cpf", 93 },
/*0094*/ { "cpp", 94 },
/*0095*/ { "cre", 530 },
/*0096*/ { "crh", 96 },
/*0097*/ { "crp", 97 },
/*0098*/ { "csb", 98 },
/*0099*/ { "cus", 99 },
/*0100*/ { "cze", 485 },
/*0101*/ { "dak", 101 },
/*0102*/ { "dan", 535 },
/*0103*/ { "dar", 103 },
/*0104*/ { "day", 104 },
/*0105*/ { "del", 105 },
/*0106*/ { "den", 106 },
/*0107*/ { "dgr", 107 },
/*0108*/ { "din", 108 },
/*0109*/ { "div", 537 },
/*0110*/ { "doi", 110 },
/*0111*/ { "dra", 111 },
/*0112*/ { "dsb", 112 },
/*0113*/ { "dua", 113 },
/*0114*/ { "dum", 114 },
/*0115*/ { "dut", 499 },
/*0116*/ { "dyu", 116 },
/*0117*/ { "dzo", 538 },
/*0118*/ { "efi", 118 },
/*0119*/ { "egy", 119 },
/*0120*/ { "eka", 120 },
/*0121*/ { "elx", 121 },
/*0122*/ { "eng", 541 },
/*0123*/ { "enm", 123 },
/*0124*/ { "epo", 542 },
/*0125*/ { "est", 544 },
/*0126*/ { "ewe", 539 },
/*0127*/ { "ewo", 127 },
/*0128*/ { "fan", 128 },
/*0129*/ { "fao", 550 },
/*0130*/ { "fat", 130 },
/*0131*/ { "fij", 549 },
/*0132*/ { "fil", 132 },
/*0133*/ { "fin", 548 },
/*0134*/ { "fiu", 134 },
/*0135*/ { "fon", 135 },
/*0136*/ { "fre", 491 },
/*0137*/ { "frm", 137 },
/*0138*/ { "fro", 138 },
/*0139*/ { "frr", 139 },
/*0140*/ { "frs", 140 },
/*0141*/ { "fry", 552 },
/*0142*/ { "ful", 547 },
/*0143*/ { "fur", 143 },
/*0144*/ { "gaa", 144 },
/*0145*/ { "gay", 145 },
/*0146*/ { "gba", 146 },
/*0147*/ { "gem", 147 },
/*0148*/ { "geo", 494 },
/*0149*/ { "ger", 487 },
/*0150*/ { "gez", 150 },
/*0151*/ { "gil", 151 },
/*0152*/ { "gla", 554 },
/*0153*/ { "gle", 553 },
/*0154*/ { "glg", 555 },
/*0155*/ { "glv", 558 },
/*0156*/ { "gmh", 156 },
/*0157*/ { "goh", 157 },
/*0158*/ { "gon", 158 },
/*0159*/ { "gor", 159 },
/*0160*/ { "got", 160 },
/*0161*/ { "grb", 161 },
/*0162*/ { "grc", 162 },
/*0163*/ { "gre", 488 },
/*0164*/ { "grn", 556 },
/*0165*/ { "gsw", 165 },
/*0166*/ { "guj", 557 },
/*0167*/ { "gwi", 167 },
/*0168*/ { "hai", 168 },
/*0169*/ { "hat", 564 },
/*0170*/ { "hau", 559 },
/*0171*/ { "haw", 171 },
/*0172*/ { "heb", 560 },
/*0173*/ { "her", 567 },
/*0174*/ { "hil", 174 },
/*0175*/ { "him", 175 },
/*0176*/ { "hin", 561 },
/*0177*/ { "hit", 177 },
/*0178*/ { "hmn", 178 },
/*0179*/ { "hmo", 562 },
/*0180*/ { "hrv", 563 },
/*0181*/ { "hsb", 181 },
/*0182*/ { "hun", 565 },
/*0183*/ { "hup", 183 },
/*0184*/ { "iba", 184 },
/*0185*/ { "ibo", 571 },
/*0186*/ { "ice", 493 },
/*0187*/ { "ido", 574 },
/*0188*/ { "iii", 572 },
/*0189*/ { "ijo", 189 },
/*0190*/ { "iku", 577 },
/*0191*/ { "ile", 570 },
/*0192*/ { "ilo", 192 },
/*0193*/ { "ina", 568 },
/*0194*/ { "inc", 194 },
/*0195*/ { "ind", 569 },
/*0196*/ { "ine", 196 },
/*0197*/ { "inh", 197 },
/*0198*/ { "ipk", 573 },
/*0199*/ { "ira", 199 },
/*0200*/ { "iro", 200 },
/*0201*/ { "ita", 576 },
/*0202*/ { "jav", 579 },
/*0203*/ { "jbo", 203 },
/*0204*/ { "jpn", 578 },
/*0205*/ { "jpr", 205 },
/*0206*/ { "jrb", 206 },
/*0207*/ { "kaa", 207 },
/*0208*/ { "kab", 208 },
/*0209*/ { "kac", 209 },
/*0210*/ { "kal", 585 },
/*0211*/ { "kam", 211 },
/*0212*/ { "kan", 587 },
/*0213*/ { "kar", 213 },
/*0214*/ { "kas", 590 },
/*0215*/ { "kau", 589 },
/*0216*/ { "kaw", 216 },
/*0217*/ { "kaz", 584 },
/*0218*/ { "kbd", 218 },
/*0219*/ { "kha", 219 },
/*0220*/ { "khi", 220 },
/*0221*/ { "khm", 586 },
/*0222*/ { "kho", 222 },
/*0223*/ { "kik", 582 },
/*0224*/ { "kin", 640 },
/*0225*/ { "kir", 594 },
/*0226*/ { "kmb", 226 },
/*0227*/ { "kok", 227 },
/*0228*/ { "kom", 592 },
/*0229*/ { "kon", 581 },
/*0230*/ { "kor", 588 },
/*0231*/ { "kos", 231 },
/*0232*/ { "kpe", 232 },
/*0233*/ { "krc", 233 },
/*0234*/ { "krl", 234 },
/*0235*/ { "kro", 235 },
/*0236*/ { "kru", 236 },
/*0237*/ { "kua", 583 },
/*0238*/ { "kum", 238 },
/*0239*/ { "kur", 591 },
/*0240*/ { "kut", 240 },
/*0241*/ { "lad", 241 },
/*0242*/ { "lah", 242 },
/*0243*/ { "lam", 243 },
/*0244*/ { "lao", 600 },
/*0245*/ { "lat", 595 },
/*0246*/ { "lav", 603 },
/*0247*/ { "lez", 247 },
/*0248*/ { "lim", 598 },
/*0249*/ { "lin", 599 },
/*0250*/ { "lit", 601 },
/*0251*/ { "lol", 251 },
/*0252*/ { "loz", 252 },
/*0253*/ { "ltz", 596 },
/*0254*/ { "lua", 254 },
/*0255*/ { "lub", 602 },
/*0256*/ { "lug", 597 },
/*0257*/ { "lui", 257 },
/*0258*/ { "lun", 258 },
/*0259*/ { "luo", 259 },
/*0260*/ { "lus", 260 },
/*0261*/ { "mac", 495 },
/*0262*/ { "mad", 262 },
/*0263*/ { "mag", 263 },
/*0264*/ { "mah", 605 },
/*0265*/ { "mai", 265 },
/*0266*/ { "mak", 266 },
/*0267*/ { "mal", 608 },
/*0268*/ { "man", 268 },
/*0269*/ { "mao", 496 },
/*0270*/ { "map", 270 },
/*0271*/ { "mar", 610 },
/*0272*/ { "mas", 272 },
/*0273*/ { "may", 497 },
/*0274*/ { "mdf", 274 },
/*0275*/ { "mdr", 275 },
/*0276*/ { "men", 276 },
/*0277*/ { "mga", 277 },
/*0278*/ { "mic", 278 },
/*0279*/ { "min", 279 },
/*0280*/ { "mis", 280 },
/*0281*/ { "mkh", 281 },
/*0282*/ { "mlg", 604 },
/*0283*/ { "mlt", 612 },
/*0284*/ { "mnc", 284 },
/*0285*/ { "mni", 285 },
/*0286*/ { "mno", 286 },
/*0287*/ { "moh", 287 },
/*0288*/ { "mon", 609 },
/*0289*/ { "mos", 289 },
/*0290*/ { "mul", 290 },
/*0291*/ { "mun", 291 },
/*0292*/ { "mus", 292 },
/*0293*/ { "mwl", 293 },
/*0294*/ { "mwr", 294 },
/*0295*/ { "myn", 295 },
/*0296*/ { "myv", 296 },
/*0297*/ { "nah", 297 },
/*0298*/ { "nai", 298 },
/*0299*/ { "nap", 299 },
/*0300*/ { "nau", 614 },
/*0301*/ { "nav", 623 },
/*0302*/ { "nbl", 622 },
/*0303*/ { "nde", 616 },
/*0304*/ { "ndo", 618 },
/*0305*/ { "nds", 305 },
/*0306*/ { "nep", 617 },
/*0307*/ { "new", 307 },
/*0308*/ { "nia", 308 },
/*0309*/ { "nic", 309 },
/*0310*/ { "niu", 310 },
/*0311*/ { "nno", 620 },
/*0312*/ { "nob", 615 },
/*0313*/ { "nog", 313 },
/*0314*/ { "non", 314 },
/*0315*/ { "nor", 621 },
/*0316*/ { "nqo", 316 },
/*0317*/ { "nso", 317 },
/*0318*/ { "nub", 318 },
/*0319*/ { "nwc", 319 },
/*0320*/ { "nya", 624 },
/*0321*/ { "nym", 321 },
/*0322*/ { "nyn", 322 },
/*0323*/ { "nyo", 323 },
/*0324*/ { "nzi", 324 },
/*0325*/ { "oci", 625 },
/*0326*/ { "oji", 626 },
/*0327*/ { "ori", 628 },
/*0328*/ { "orm", 627 },
/*0329*/ { "osa", 329 },
/*0330*/ { "oss", 629 },
/*0331*/ { "ota", 331 },
/*0332*/ { "oto", 332 },
/*0333*/ { "paa", 333 },
/*0334*/ { "pag", 334 },
/*0335*/ { "pal", 335 },
/*0336*/ { "pam", 336 },
/*0337*/ { "pan", 630 },
/*0338*/ { "pap", 338 },
/*0339*/ { "pau", 339 },
/*0340*/ { "peo", 340 },
/*0341*/ { "per", 490 },
/*0342*/ { "phi", 342 },
/*0343*/ { "phn", 343 },
/*0344*/ { "pli", 631 },
/*0345*/ { "pol", 632 },
/*0346*/ { "pon", 346 },
/*0347*/ { "por", 634 },
/*0348*/ { "pra", 348 },
/*0349*/ { "pro", 349 },
/*0350*/ { "pus", 633 },
/*0351*/ { "que", 635 },
/*0352*/ { "raj", 352 },
/*0353*/ { "rap", 353 },
/*0354*/ { "rar", 354 },
/*0355*/ { "roa", 355 },
/*0356*/ { "roh", 636 },
/*0357*/ { "rom", 357 },
/*0358*/ { "rum", 500 },
/*0359*/ { "run", 637 },
/*0360*/ { "rup", 360 },
/*0361*/ { "rus", 639 },
/*0362*/ { "sad", 362 },
/*0363*/ { "sag", 645 },
/*0364*/ { "sah", 364 },
/*0365*/ { "sai", 365 },
/*0366*/ { "sal", 366 },
/*0367*/ { "sam", 367 },
/*0368*/ { "san", 641 },
/*0369*/ { "sas", 369 },
/*0370*/ { "sat", 370 },
/*0371*/ { "scn", 371 },
/*0372*/ { "sco", 372 },
/*0373*/ { "sel", 373 },
/*0374*/ { "sem", 374 },
/*0375*/ { "sga", 375 },
/*0376*/ { "sgn", 376 },
/*0377*/ { "shn", 377 },
/*0378*/ { "sid", 378 },
/*0379*/ { "sin", 646 },
/*0380*/ { "sio", 380 },
/*0381*/ { "sit", 381 },
/*0382*/ { "sla", 382 },
/*0383*/ { "slo", 501 },
/*0384*/ { "slv", 648 },
/*0385*/ { "sma", 385 },
/*0386*/ { "sme", 644 },
/*0387*/ { "smi", 387 },
/*0388*/ { "smj", 388 },
/*0389*/ { "smn", 389 },
/*0390*/ { "smo", 649 },
/*0391*/ { "sms", 391 },
/*0392*/ { "sna", 650 },
/*0393*/ { "snd", 643 },
/*0394*/ { "snk", 394 },
/*0395*/ { "sog", 395 },
/*0396*/ { "som", 651 },
/*0397*/ { "son", 397 },
/*0398*/ { "sot", 655 },
/*0399*/ { "spa", 543 },
/*0400*/ { "srd", 642 },
/*0401*/ { "srn", 401 },
/*0402*/ { "srp", 653 },
/*0403*/ { "srr", 403 },
/*0404*/ { "ssa", 404 },
/*0405*/ { "ssw", 654 },
/*0406*/ { "suk", 406 },
/*0407*/ { "sun", 656 },
/*0408*/ { "sus", 408 },
/*0409*/ { "sux", 409 },
/*0410*/ { "swa", 658 },
/*0411*/ { "swe", 657 },
/*0412*/ { "syc", 412 },
/*0413*/ { "syr", 413 },
/*0414*/ { "tah", 672 },
/*0415*/ { "tai", 415 },
/*0416*/ { "tam", 659 },
/*0417*/ { "tat", 670 },
/*0418*/ { "tel", 660 },
/*0419*/ { "tem", 419 },
/*0420*/ { "ter", 420 },
/*0421*/ { "tet", 421 },
/*0422*/ { "tgk", 661 },
/*0423*/ { "tgl", 665 },
/*0424*/ { "tha", 662 },
/*0425*/ { "tib", 484 },
/*0426*/ { "tig", 426 },
/*0427*/ { "tir", 663 },
/*0428*/ { "tiv", 428 },
/*0429*/ { "tkl", 429 },
/*0430*/ { "tlh", 430 },
/*0431*/ { "tli", 431 },
/*0432*/ { "tmh", 432 },
/*0433*/ { "tog", 433 },
/*0434*/ { "ton", 667 },
/*0435*/ { "tpi", 435 },
/*0436*/ { "tsi", 436 },
/*0437*/ { "tsn", 666 },
/*0438*/ { "tso", 669 },
/*0439*/ { "tuk", 664 },
/*0440*/ { "tum", 440 },
/*0441*/ { "tup", 441 },
/*0442*/ { "tur", 668 },
/*0443*/ { "tut", 443 },
/*0444*/ { "tvl", 444 },
/*0445*/ { "twi", 671 },
/*0446*/ { "tyv", 446 },
/*0447*/ { "udm", 447 },
/*0448*/ { "uga", 448 },
/*0449*/ { "uig", 673 },
/*0450*/ { "ukr", 674 },
/*0451*/ { "umb", 451 },
/*0452*/ { "und", 452 },
/*0453*/ { "urd", 675 },
/*0454*/ { "uzb", 676 },
/*0455*/ { "vai", 455 },
/*0456*/ { "ven", 677 },
/*0457*/ { "vie", 678 },
/*0458*/ { "vol", 679 },
/*0459*/ { "vot", 459 },
/*0460*/ { "wak", 460 },
/*0461*/ { "wal", 461 },
/*0462*/ { "war", 462 },
/*0463*/ { "was", 463 },
/*0464*/ { "wel", 486 },
/*0465*/ { "wen", 465 },
/*0466*/ { "wln", 680 },
/*0467*/ { "wol", 681 },
/*0468*/ { "xal", 468 },
/*0469*/ { "xho", 682 },
/*0470*/ { "yao", 470 },
/*0471*/ { "yap", 471 },
/*0472*/ { "yid", 683 },
/*0473*/ { "yor", 684 },
/*0474*/ { "ypk", 474 },
/*0475*/ { "zap", 475 },
/*0476*/ { "zbl", 476 },
/*0477*/ { "zen", 477 },
/*0478*/ { "zha", 685 },
/*0479*/ { "znd", 479 },
/*0480*/ { "zul", 687 },
/*0481*/ { "zun", 481 },
/*0482*/ { "zxx", 482 },
/*0483*/ { "zza", 483 },
/*----- AV_LANG_ISO639_2_TERM entries (20) -----*/
/*0484*/ { "bod", 523 },
/*0485*/ { "ces", 531 },
/*0486*/ { "cym", 534 },
/*0487*/ { "deu", 536 },
/*0488*/ { "ell", 540 },
/*0489*/ { "eus", 545 },
/*0490*/ { "fas", 546 },
/*0491*/ { "fra", 551 },
/*0492*/ { "hye", 566 },
/*0493*/ { "isl", 575 },
/*0494*/ { "kat", 580 },
/*0495*/ { "mkd", 607 },
/*0496*/ { "mri", 606 },
/*0497*/ { "msa", 611 },
/*0498*/ { "mya", 613 },
/*0499*/ { "nld", 619 },
/*0500*/ { "ron", 638 },
/*0501*/ { "slk", 647 },
/*0502*/ { "sqi", 652 },
/*0503*/ { "zho", 686 },
/*----- AV_LANG_ISO639_1 entries (184) -----*/
/*0504*/ { "aa" , 0 },
/*0505*/ { "ab" , 1 },
/*0506*/ { "ae" , 33 },
/*0507*/ { "af" , 8 },
/*0508*/ { "ak" , 10 },
/*0509*/ { "am" , 16 },
/*0510*/ { "an" , 22 },
/*0511*/ { "ar" , 20 },
/*0512*/ { "as" , 28 },
/*0513*/ { "av" , 32 },
/*0514*/ { "ay" , 35 },
/*0515*/ { "az" , 36 },
/*0516*/ { "ba" , 39 },
/*0517*/ { "be" , 47 },
/*0518*/ { "bg" , 64 },
/*0519*/ { "bh" , 52 },
/*0520*/ { "bi" , 55 },
/*0521*/ { "bm" , 41 },
/*0522*/ { "bn" , 49 },
/*0523*/ { "bo" , 425 },
/*0524*/ { "br" , 60 },
/*0525*/ { "bs" , 58 },
/*0526*/ { "ca" , 70 },
/*0527*/ { "ce" , 76 },
/*0528*/ { "ch" , 74 },
/*0529*/ { "co" , 91 },
/*0530*/ { "cr" , 95 },
/*0531*/ { "cs" , 100 },
/*0532*/ { "cu" , 85 },
/*0533*/ { "cv" , 86 },
/*0534*/ { "cy" , 464 },
/*0535*/ { "da" , 102 },
/*0536*/ { "de" , 149 },
/*0537*/ { "dv" , 109 },
/*0538*/ { "dz" , 117 },
/*0539*/ { "ee" , 126 },
/*0540*/ { "el" , 163 },
/*0541*/ { "en" , 122 },
/*0542*/ { "eo" , 124 },
/*0543*/ { "es" , 399 },
/*0544*/ { "et" , 125 },
/*0545*/ { "eu" , 43 },
/*0546*/ { "fa" , 341 },
/*0547*/ { "ff" , 142 },
/*0548*/ { "fi" , 133 },
/*0549*/ { "fj" , 131 },
/*0550*/ { "fo" , 129 },
/*0551*/ { "fr" , 136 },
/*0552*/ { "fy" , 141 },
/*0553*/ { "ga" , 153 },
/*0554*/ { "gd" , 152 },
/*0555*/ { "gl" , 154 },
/*0556*/ { "gn" , 164 },
/*0557*/ { "gu" , 166 },
/*0558*/ { "gv" , 155 },
/*0559*/ { "ha" , 170 },
/*0560*/ { "he" , 172 },
/*0561*/ { "hi" , 176 },
/*0562*/ { "ho" , 179 },
/*0563*/ { "hr" , 180 },
/*0564*/ { "ht" , 169 },
/*0565*/ { "hu" , 182 },
/*0566*/ { "hy" , 23 },
/*0567*/ { "hz" , 173 },
/*0568*/ { "ia" , 193 },
/*0569*/ { "id" , 195 },
/*0570*/ { "ie" , 191 },
/*0571*/ { "ig" , 185 },
/*0572*/ { "ii" , 188 },
/*0573*/ { "ik" , 198 },
/*0574*/ { "io" , 187 },
/*0575*/ { "is" , 186 },
/*0576*/ { "it" , 201 },
/*0577*/ { "iu" , 190 },
/*0578*/ { "ja" , 204 },
/*0579*/ { "jv" , 202 },
/*0580*/ { "ka" , 148 },
/*0581*/ { "kg" , 229 },
/*0582*/ { "ki" , 223 },
/*0583*/ { "kj" , 237 },
/*0584*/ { "kk" , 217 },
/*0585*/ { "kl" , 210 },
/*0586*/ { "km" , 221 },
/*0587*/ { "kn" , 212 },
/*0588*/ { "ko" , 230 },
/*0589*/ { "kr" , 215 },
/*0590*/ { "ks" , 214 },
/*0591*/ { "ku" , 239 },
/*0592*/ { "kv" , 228 },
/*0593*/ { "kw" , 90 },
/*0594*/ { "ky" , 225 },
/*0595*/ { "la" , 245 },
/*0596*/ { "lb" , 253 },
/*0597*/ { "lg" , 256 },
/*0598*/ { "li" , 248 },
/*0599*/ { "ln" , 249 },
/*0600*/ { "lo" , 244 },
/*0601*/ { "lt" , 250 },
/*0602*/ { "lu" , 255 },
/*0603*/ { "lv" , 246 },
/*0604*/ { "mg" , 282 },
/*0605*/ { "mh" , 264 },
/*0606*/ { "mi" , 269 },
/*0607*/ { "mk" , 261 },
/*0608*/ { "ml" , 267 },
/*0609*/ { "mn" , 288 },
/*0610*/ { "mr" , 271 },
/*0611*/ { "ms" , 273 },
/*0612*/ { "mt" , 283 },
/*0613*/ { "my" , 65 },
/*0614*/ { "na" , 300 },
/*0615*/ { "nb" , 312 },
/*0616*/ { "nd" , 303 },
/*0617*/ { "ne" , 306 },
/*0618*/ { "ng" , 304 },
/*0619*/ { "nl" , 115 },
/*0620*/ { "nn" , 311 },
/*0621*/ { "no" , 315 },
/*0622*/ { "nr" , 302 },
/*0623*/ { "nv" , 301 },
/*0624*/ { "ny" , 320 },
/*0625*/ { "oc" , 325 },
/*0626*/ { "oj" , 326 },
/*0627*/ { "om" , 328 },
/*0628*/ { "or" , 327 },
/*0629*/ { "os" , 330 },
/*0630*/ { "pa" , 337 },
/*0631*/ { "pi" , 344 },
/*0632*/ { "pl" , 345 },
/*0633*/ { "ps" , 350 },
/*0634*/ { "pt" , 347 },
/*0635*/ { "qu" , 351 },
/*0636*/ { "rm" , 356 },
/*0637*/ { "rn" , 359 },
/*0638*/ { "ro" , 358 },
/*0639*/ { "ru" , 361 },
/*0640*/ { "rw" , 224 },
/*0641*/ { "sa" , 368 },
/*0642*/ { "sc" , 400 },
/*0643*/ { "sd" , 393 },
/*0644*/ { "se" , 386 },
/*0645*/ { "sg" , 363 },
/*0646*/ { "si" , 379 },
/*0647*/ { "sk" , 383 },
/*0648*/ { "sl" , 384 },
/*0649*/ { "sm" , 390 },
/*0650*/ { "sn" , 392 },
/*0651*/ { "so" , 396 },
/*0652*/ { "sq" , 12 },
/*0653*/ { "sr" , 402 },
/*0654*/ { "ss" , 405 },
/*0655*/ { "st" , 398 },
/*0656*/ { "su" , 407 },
/*0657*/ { "sv" , 411 },
/*0658*/ { "sw" , 410 },
/*0659*/ { "ta" , 416 },
/*0660*/ { "te" , 418 },
/*0661*/ { "tg" , 422 },
/*0662*/ { "th" , 424 },
/*0663*/ { "ti" , 427 },
/*0664*/ { "tk" , 439 },
/*0665*/ { "tl" , 423 },
/*0666*/ { "tn" , 437 },
/*0667*/ { "to" , 434 },
/*0668*/ { "tr" , 442 },
/*0669*/ { "ts" , 438 },
/*0670*/ { "tt" , 417 },
/*0671*/ { "tw" , 445 },
/*0672*/ { "ty" , 414 },
/*0673*/ { "ug" , 449 },
/*0674*/ { "uk" , 450 },
/*0675*/ { "ur" , 453 },
/*0676*/ { "uz" , 454 },
/*0677*/ { "ve" , 456 },
/*0678*/ { "vi" , 457 },
/*0679*/ { "vo" , 458 },
/*0680*/ { "wa" , 466 },
/*0681*/ { "wo" , 467 },
/*0682*/ { "xh" , 469 },
/*0683*/ { "yi" , 472 },
/*0684*/ { "yo" , 473 },
/*0685*/ { "za" , 478 },
/*0686*/ { "zh" , 78 },
/*0687*/ { "zu" , 480 },
{ "", 0 }
};
 
static int lang_table_compare(const void *lhs, const void *rhs)
{
return strcmp(lhs, ((const LangEntry *)rhs)->str);
}
 
const char *av_convert_lang_to(const char *lang, enum AVLangCodespace target_codespace)
{
int i;
const LangEntry *entry = NULL;
const int NB_CODESPACES = FF_ARRAY_ELEMS(lang_table_counts);
 
if (target_codespace >= NB_CODESPACES)
return NULL;
 
for (i=0; !entry && i<NB_CODESPACES; i++)
entry = bsearch(lang,
lang_table + lang_table_offsets[i],
lang_table_counts[i],
sizeof(LangEntry),
lang_table_compare);
if (!entry)
return NULL;
 
for (i=0; i<NB_CODESPACES; i++)
if (entry >= lang_table + lang_table_offsets[target_codespace] &&
entry < lang_table + lang_table_offsets[target_codespace] + lang_table_counts[target_codespace])
return entry->str;
else
entry = lang_table + entry->next_equivalent;
 
if (target_codespace == AV_LANG_ISO639_2_TERM)
return av_convert_lang_to(lang, AV_LANG_ISO639_2_BIBL);
 
return NULL;
}
/contrib/sdk/sources/ffmpeg/libavformat/avlanguage.h
0,0 → 1,39
/*
* Cyril Comparon, Larbi Joubala, Resonate-MP4 2009
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_AVLANGUAGE_H
#define AVFORMAT_AVLANGUAGE_H
 
/**
* Known language codespaces
*/
enum AVLangCodespace {
AV_LANG_ISO639_2_BIBL, /** 3-char bibliographic language codes as per ISO-IEC 639-2 */
AV_LANG_ISO639_2_TERM, /** 3-char terminologic language codes as per ISO-IEC 639-2 */
AV_LANG_ISO639_1 /** 2-char code of language as per ISO/IEC 639-1 */
};
 
/**
* Convert a language code to a target codespace. The source codespace is guessed.
* @return NULL if the provided lang is null or invalid.
*/
const char *av_convert_lang_to(const char *lang, enum AVLangCodespace target_codespace);
 
#endif /* AVFORMAT_AVLANGUAGE_H */
/contrib/sdk/sources/ffmpeg/libavformat/avr.c
0,0 → 1,93
/*
* AVR demuxer
* Copyright (c) 2012 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
#include "pcm.h"
 
static int avr_probe(AVProbeData *p)
{
if (AV_RL32(p->buf) == MKTAG('2', 'B', 'I', 'T'))
return AVPROBE_SCORE_EXTENSION;
return 0;
}
 
static int avr_read_header(AVFormatContext *s)
{
uint16_t chan, sign, bps;
AVStream *st;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
 
avio_skip(s->pb, 4); // magic
avio_skip(s->pb, 8); // sample_name
 
chan = avio_rb16(s->pb);
if (!chan) {
st->codec->channels = 1;
} else if (chan == 0xFFFFu) {
st->codec->channels = 2;
} else {
avpriv_request_sample(s, "chan %d", chan);
return AVERROR_PATCHWELCOME;
}
 
st->codec->bits_per_coded_sample = bps = avio_rb16(s->pb);
 
sign = avio_rb16(s->pb);
 
avio_skip(s->pb, 2); // loop
avio_skip(s->pb, 2); // midi
avio_skip(s->pb, 1); // replay speed
 
st->codec->sample_rate = avio_rb24(s->pb);
avio_skip(s->pb, 4 * 3);
avio_skip(s->pb, 2 * 3);
avio_skip(s->pb, 20);
avio_skip(s->pb, 64);
 
st->codec->codec_id = ff_get_pcm_codec_id(bps, 0, 1, sign);
if (st->codec->codec_id == AV_CODEC_ID_NONE) {
avpriv_request_sample(s, "Bps %d and sign %d", bps, sign);
return AVERROR_PATCHWELCOME;
}
 
st->codec->block_align = bps * st->codec->channels / 8;
 
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
return 0;
}
 
AVInputFormat ff_avr_demuxer = {
.name = "avr",
.long_name = NULL_IF_CONFIG_SMALL("AVR (Audio Visual Research)"),
.read_probe = avr_probe,
.read_header = avr_read_header,
.read_packet = ff_pcm_read_packet,
.read_seek = ff_pcm_read_seek,
.extensions = "avr",
.flags = AVFMT_GENERIC_INDEX,
};
/contrib/sdk/sources/ffmpeg/libavformat/avs.c
0,0 → 1,234
/*
* AVS demuxer.
* Copyright (c) 2006 Aurelien Jacobs <aurel@gnuage.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "voc.h"
 
 
typedef struct avs_format {
VocDecContext voc;
AVStream *st_video;
AVStream *st_audio;
int width;
int height;
int bits_per_sample;
int fps;
int nb_frames;
int remaining_frame_size;
int remaining_audio_size;
} AvsFormat;
 
typedef enum avs_block_type {
AVS_NONE = 0x00,
AVS_VIDEO = 0x01,
AVS_AUDIO = 0x02,
AVS_PALETTE = 0x03,
AVS_GAME_DATA = 0x04,
} AvsBlockType;
 
static int avs_probe(AVProbeData * p)
{
const uint8_t *d;
 
d = p->buf;
if (d[0] == 'w' && d[1] == 'W' && d[2] == 0x10 && d[3] == 0)
/* Ensure the buffer probe scores higher than the extension probe.
* This avoids problems with misdetection as AviSynth scripts. */
return AVPROBE_SCORE_EXTENSION + 5;
 
return 0;
}
 
static int avs_read_header(AVFormatContext * s)
{
AvsFormat *avs = s->priv_data;
 
s->ctx_flags |= AVFMTCTX_NOHEADER;
 
avio_skip(s->pb, 4);
avs->width = avio_rl16(s->pb);
avs->height = avio_rl16(s->pb);
avs->bits_per_sample = avio_rl16(s->pb);
avs->fps = avio_rl16(s->pb);
avs->nb_frames = avio_rl32(s->pb);
avs->remaining_frame_size = 0;
avs->remaining_audio_size = 0;
 
avs->st_video = avs->st_audio = NULL;
 
if (avs->width != 318 || avs->height != 198)
av_log(s, AV_LOG_ERROR, "This avs pretend to be %dx%d "
"when the avs format is supposed to be 318x198 only.\n",
avs->width, avs->height);
 
return 0;
}
 
static int
avs_read_video_packet(AVFormatContext * s, AVPacket * pkt,
AvsBlockType type, int sub_type, int size,
uint8_t * palette, int palette_size)
{
AvsFormat *avs = s->priv_data;
int ret;
 
ret = av_new_packet(pkt, size + palette_size);
if (ret < 0)
return ret;
 
if (palette_size) {
pkt->data[0] = 0x00;
pkt->data[1] = 0x03;
pkt->data[2] = palette_size & 0xFF;
pkt->data[3] = (palette_size >> 8) & 0xFF;
memcpy(pkt->data + 4, palette, palette_size - 4);
}
 
pkt->data[palette_size + 0] = sub_type;
pkt->data[palette_size + 1] = type;
pkt->data[palette_size + 2] = size & 0xFF;
pkt->data[palette_size + 3] = (size >> 8) & 0xFF;
ret = avio_read(s->pb, pkt->data + palette_size + 4, size - 4) + 4;
if (ret < size) {
av_free_packet(pkt);
return AVERROR(EIO);
}
 
pkt->size = ret + palette_size;
pkt->stream_index = avs->st_video->index;
if (sub_type == 0)
pkt->flags |= AV_PKT_FLAG_KEY;
 
return 0;
}
 
static int avs_read_audio_packet(AVFormatContext * s, AVPacket * pkt)
{
AvsFormat *avs = s->priv_data;
int ret, size;
 
size = avio_tell(s->pb);
ret = ff_voc_get_packet(s, pkt, avs->st_audio, avs->remaining_audio_size);
size = avio_tell(s->pb) - size;
avs->remaining_audio_size -= size;
 
if (ret == AVERROR(EIO))
return 0; /* this indicate EOS */
if (ret < 0)
return ret;
 
pkt->stream_index = avs->st_audio->index;
pkt->flags |= AV_PKT_FLAG_KEY;
 
return size;
}
 
static int avs_read_packet(AVFormatContext * s, AVPacket * pkt)
{
AvsFormat *avs = s->priv_data;
int sub_type = 0, size = 0;
AvsBlockType type = AVS_NONE;
int palette_size = 0;
uint8_t palette[4 + 3 * 256];
int ret;
 
if (avs->remaining_audio_size > 0)
if (avs_read_audio_packet(s, pkt) > 0)
return 0;
 
while (1) {
if (avs->remaining_frame_size <= 0) {
if (!avio_rl16(s->pb)) /* found EOF */
return AVERROR(EIO);
avs->remaining_frame_size = avio_rl16(s->pb) - 4;
}
 
while (avs->remaining_frame_size > 0) {
sub_type = avio_r8(s->pb);
type = avio_r8(s->pb);
size = avio_rl16(s->pb);
if (size < 4)
return AVERROR_INVALIDDATA;
avs->remaining_frame_size -= size;
 
switch (type) {
case AVS_PALETTE:
if (size - 4 > sizeof(palette))
return AVERROR_INVALIDDATA;
ret = avio_read(s->pb, palette, size - 4);
if (ret < size - 4)
return AVERROR(EIO);
palette_size = size;
break;
 
case AVS_VIDEO:
if (!avs->st_video) {
avs->st_video = avformat_new_stream(s, NULL);
if (avs->st_video == NULL)
return AVERROR(ENOMEM);
avs->st_video->codec->codec_type = AVMEDIA_TYPE_VIDEO;
avs->st_video->codec->codec_id = AV_CODEC_ID_AVS;
avs->st_video->codec->width = avs->width;
avs->st_video->codec->height = avs->height;
avs->st_video->codec->bits_per_coded_sample=avs->bits_per_sample;
avs->st_video->nb_frames = avs->nb_frames;
#if FF_API_R_FRAME_RATE
avs->st_video->r_frame_rate =
#endif
avs->st_video->avg_frame_rate = (AVRational){avs->fps, 1};
}
return avs_read_video_packet(s, pkt, type, sub_type, size,
palette, palette_size);
 
case AVS_AUDIO:
if (!avs->st_audio) {
avs->st_audio = avformat_new_stream(s, NULL);
if (avs->st_audio == NULL)
return AVERROR(ENOMEM);
avs->st_audio->codec->codec_type = AVMEDIA_TYPE_AUDIO;
}
avs->remaining_audio_size = size - 4;
size = avs_read_audio_packet(s, pkt);
if (size != 0)
return size;
break;
 
default:
avio_skip(s->pb, size - 4);
}
}
}
}
 
static int avs_read_close(AVFormatContext * s)
{
return 0;
}
 
AVInputFormat ff_avs_demuxer = {
.name = "avs",
.long_name = NULL_IF_CONFIG_SMALL("AVS"),
.priv_data_size = sizeof(AvsFormat),
.read_probe = avs_probe,
.read_header = avs_read_header,
.read_packet = avs_read_packet,
.read_close = avs_read_close,
};
/contrib/sdk/sources/ffmpeg/libavformat/bethsoftvid.c
0,0 → 1,296
/*
* Bethsoft VID format Demuxer
* Copyright (c) 2007 Nicholas Tung
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* @brief Bethesda Softworks VID (.vid) file demuxer
* @author Nicholas Tung [ntung (at. ntung com] (2007-03)
* @see http://wiki.multimedia.cx/index.php?title=Bethsoft_VID
* @see http://www.svatopluk.com/andux/docs/dfvid.html
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
#include "libavcodec/bethsoftvideo.h"
 
#define BVID_PALETTE_SIZE 3 * 256
 
#define DEFAULT_SAMPLE_RATE 11111
 
typedef struct BVID_DemuxContext
{
int nframes;
int sample_rate; /**< audio sample rate */
int width; /**< video width */
int height; /**< video height */
/** delay value between frames, added to individual frame delay.
* custom units, which will be added to other custom units (~=16ms according
* to free, unofficial documentation) */
int bethsoft_global_delay;
int video_index; /**< video stream index */
int audio_index; /**< audio stream index */
uint8_t *palette;
 
int is_finished;
 
} BVID_DemuxContext;
 
static int vid_probe(AVProbeData *p)
{
// little-endian VID tag, file starts with "VID\0"
if (AV_RL32(p->buf) != MKTAG('V', 'I', 'D', 0))
return 0;
 
return AVPROBE_SCORE_MAX;
}
 
static int vid_read_header(AVFormatContext *s)
{
BVID_DemuxContext *vid = s->priv_data;
AVIOContext *pb = s->pb;
 
/* load main header. Contents:
* bytes: 'V' 'I' 'D'
* int16s: always_512, nframes, width, height, delay, always_14
*/
avio_skip(pb, 5);
vid->nframes = avio_rl16(pb);
vid->width = avio_rl16(pb);
vid->height = avio_rl16(pb);
vid->bethsoft_global_delay = avio_rl16(pb);
avio_rl16(pb);
 
// wait until the first packet to create each stream
vid->video_index = -1;
vid->audio_index = -1;
vid->sample_rate = DEFAULT_SAMPLE_RATE;
s->ctx_flags |= AVFMTCTX_NOHEADER;
 
return 0;
}
 
#define BUFFER_PADDING_SIZE 1000
static int read_frame(BVID_DemuxContext *vid, AVIOContext *pb, AVPacket *pkt,
uint8_t block_type, AVFormatContext *s)
{
uint8_t * vidbuf_start = NULL;
int vidbuf_nbytes = 0;
int code;
int bytes_copied = 0;
int position, duration, npixels;
unsigned int vidbuf_capacity;
int ret = 0;
AVStream *st;
 
if (vid->video_index < 0) {
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
vid->video_index = st->index;
if (vid->audio_index < 0) {
avpriv_request_sample(s, "Using default video time base since "
"having no audio packet before the first "
"video packet");
}
avpriv_set_pts_info(st, 64, 185, vid->sample_rate);
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_BETHSOFTVID;
st->codec->width = vid->width;
st->codec->height = vid->height;
}
st = s->streams[vid->video_index];
npixels = st->codec->width * st->codec->height;
 
vidbuf_start = av_malloc(vidbuf_capacity = BUFFER_PADDING_SIZE);
if(!vidbuf_start)
return AVERROR(ENOMEM);
 
// save the file position for the packet, include block type
position = avio_tell(pb) - 1;
 
vidbuf_start[vidbuf_nbytes++] = block_type;
 
// get the current packet duration
duration = vid->bethsoft_global_delay + avio_rl16(pb);
 
// set the y offset if it exists (decoder header data should be in data section)
if(block_type == VIDEO_YOFF_P_FRAME){
if (avio_read(pb, &vidbuf_start[vidbuf_nbytes], 2) != 2) {
ret = AVERROR(EIO);
goto fail;
}
vidbuf_nbytes += 2;
}
 
do{
vidbuf_start = av_fast_realloc(vidbuf_start, &vidbuf_capacity, vidbuf_nbytes + BUFFER_PADDING_SIZE);
if(!vidbuf_start)
return AVERROR(ENOMEM);
 
code = avio_r8(pb);
vidbuf_start[vidbuf_nbytes++] = code;
 
if(code >= 0x80){ // rle sequence
if(block_type == VIDEO_I_FRAME)
vidbuf_start[vidbuf_nbytes++] = avio_r8(pb);
} else if(code){ // plain sequence
if (avio_read(pb, &vidbuf_start[vidbuf_nbytes], code) != code) {
ret = AVERROR(EIO);
goto fail;
}
vidbuf_nbytes += code;
}
bytes_copied += code & 0x7F;
if(bytes_copied == npixels){ // sometimes no stop character is given, need to keep track of bytes copied
// may contain a 0 byte even if read all pixels
if(avio_r8(pb))
avio_seek(pb, -1, SEEK_CUR);
break;
}
if (bytes_copied > npixels) {
ret = AVERROR_INVALIDDATA;
goto fail;
}
} while(code);
 
// copy data into packet
if ((ret = av_new_packet(pkt, vidbuf_nbytes)) < 0)
goto fail;
memcpy(pkt->data, vidbuf_start, vidbuf_nbytes);
av_free(vidbuf_start);
 
pkt->pos = position;
pkt->stream_index = vid->video_index;
pkt->duration = duration;
if (block_type == VIDEO_I_FRAME)
pkt->flags |= AV_PKT_FLAG_KEY;
 
/* if there is a new palette available, add it to packet side data */
if (vid->palette) {
uint8_t *pdata = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE,
BVID_PALETTE_SIZE);
if (pdata)
memcpy(pdata, vid->palette, BVID_PALETTE_SIZE);
av_freep(&vid->palette);
}
 
vid->nframes--; // used to check if all the frames were read
return 0;
fail:
av_free(vidbuf_start);
return ret;
}
 
static int vid_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
BVID_DemuxContext *vid = s->priv_data;
AVIOContext *pb = s->pb;
unsigned char block_type;
int audio_length;
int ret_value;
 
if(vid->is_finished || url_feof(pb))
return AVERROR_EOF;
 
block_type = avio_r8(pb);
switch(block_type){
case PALETTE_BLOCK:
if (vid->palette) {
av_log(s, AV_LOG_WARNING, "discarding unused palette\n");
av_freep(&vid->palette);
}
vid->palette = av_malloc(BVID_PALETTE_SIZE);
if (!vid->palette)
return AVERROR(ENOMEM);
if (avio_read(pb, vid->palette, BVID_PALETTE_SIZE) != BVID_PALETTE_SIZE) {
av_freep(&vid->palette);
return AVERROR(EIO);
}
return vid_read_packet(s, pkt);
 
case FIRST_AUDIO_BLOCK:
avio_rl16(pb);
// soundblaster DAC used for sample rate, as on specification page (link above)
vid->sample_rate = 1000000 / (256 - avio_r8(pb));
case AUDIO_BLOCK:
if (vid->audio_index < 0) {
AVStream *st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
vid->audio_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_PCM_U8;
st->codec->channels = 1;
st->codec->channel_layout = AV_CH_LAYOUT_MONO;
st->codec->bits_per_coded_sample = 8;
st->codec->sample_rate = vid->sample_rate;
st->codec->bit_rate = 8 * st->codec->sample_rate;
st->start_time = 0;
avpriv_set_pts_info(st, 64, 1, vid->sample_rate);
}
audio_length = avio_rl16(pb);
if ((ret_value = av_get_packet(pb, pkt, audio_length)) != audio_length) {
if (ret_value < 0)
return ret_value;
av_log(s, AV_LOG_ERROR, "incomplete audio block\n");
return AVERROR(EIO);
}
pkt->stream_index = vid->audio_index;
pkt->duration = audio_length;
pkt->flags |= AV_PKT_FLAG_KEY;
return 0;
 
case VIDEO_P_FRAME:
case VIDEO_YOFF_P_FRAME:
case VIDEO_I_FRAME:
return read_frame(vid, pb, pkt, block_type, s);
 
case EOF_BLOCK:
if(vid->nframes != 0)
av_log(s, AV_LOG_VERBOSE, "reached terminating character but not all frames read.\n");
vid->is_finished = 1;
return AVERROR(EIO);
default:
av_log(s, AV_LOG_ERROR, "unknown block (character = %c, decimal = %d, hex = %x)!!!\n",
block_type, block_type, block_type);
return AVERROR_INVALIDDATA;
}
}
 
static int vid_read_close(AVFormatContext *s)
{
BVID_DemuxContext *vid = s->priv_data;
av_freep(&vid->palette);
return 0;
}
 
AVInputFormat ff_bethsoftvid_demuxer = {
.name = "bethsoftvid",
.long_name = NULL_IF_CONFIG_SMALL("Bethesda Softworks VID"),
.priv_data_size = sizeof(BVID_DemuxContext),
.read_probe = vid_probe,
.read_header = vid_read_header,
.read_packet = vid_read_packet,
.read_close = vid_read_close,
};
/contrib/sdk/sources/ffmpeg/libavformat/bfi.c
0,0 → 1,180
/*
* Brute Force & Ignorance (BFI) demuxer
* Copyright (c) 2008 Sisir Koppaka
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* @brief Brute Force & Ignorance (.bfi) file demuxer
* @author Sisir Koppaka ( sisir.koppaka at gmail dot com )
* @see http://wiki.multimedia.cx/index.php?title=BFI
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
 
typedef struct BFIContext {
int nframes;
int audio_frame;
int video_frame;
int video_size;
int avflag;
} BFIContext;
 
static int bfi_probe(AVProbeData * p)
{
/* Check file header */
if (AV_RL32(p->buf) == MKTAG('B', 'F', '&', 'I'))
return AVPROBE_SCORE_MAX;
else
return 0;
}
 
static int bfi_read_header(AVFormatContext * s)
{
BFIContext *bfi = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *vstream;
AVStream *astream;
int fps, chunk_header;
 
/* Initialize the video codec... */
vstream = avformat_new_stream(s, NULL);
if (!vstream)
return AVERROR(ENOMEM);
 
/* Initialize the audio codec... */
astream = avformat_new_stream(s, NULL);
if (!astream)
return AVERROR(ENOMEM);
 
/* Set the total number of frames. */
avio_skip(pb, 8);
chunk_header = avio_rl32(pb);
bfi->nframes = avio_rl32(pb);
avio_rl32(pb);
avio_rl32(pb);
avio_rl32(pb);
fps = avio_rl32(pb);
avio_skip(pb, 12);
vstream->codec->width = avio_rl32(pb);
vstream->codec->height = avio_rl32(pb);
 
/*Load the palette to extradata */
avio_skip(pb, 8);
vstream->codec->extradata = av_malloc(768);
if (!vstream->codec->extradata)
return AVERROR(ENOMEM);
vstream->codec->extradata_size = 768;
avio_read(pb, vstream->codec->extradata,
vstream->codec->extradata_size);
 
astream->codec->sample_rate = avio_rl32(pb);
 
/* Set up the video codec... */
avpriv_set_pts_info(vstream, 32, 1, fps);
vstream->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vstream->codec->codec_id = AV_CODEC_ID_BFI;
vstream->codec->pix_fmt = AV_PIX_FMT_PAL8;
vstream->nb_frames =
vstream->duration = bfi->nframes;
 
/* Set up the audio codec now... */
astream->codec->codec_type = AVMEDIA_TYPE_AUDIO;
astream->codec->codec_id = AV_CODEC_ID_PCM_U8;
astream->codec->channels = 1;
astream->codec->channel_layout = AV_CH_LAYOUT_MONO;
astream->codec->bits_per_coded_sample = 8;
astream->codec->bit_rate =
astream->codec->sample_rate * astream->codec->bits_per_coded_sample;
avio_seek(pb, chunk_header - 3, SEEK_SET);
avpriv_set_pts_info(astream, 64, 1, astream->codec->sample_rate);
return 0;
}
 
 
static int bfi_read_packet(AVFormatContext * s, AVPacket * pkt)
{
BFIContext *bfi = s->priv_data;
AVIOContext *pb = s->pb;
int ret, audio_offset, video_offset, chunk_size, audio_size = 0;
if (bfi->nframes == 0 || url_feof(pb)) {
return AVERROR_EOF;
}
 
/* If all previous chunks were completely read, then find a new one... */
if (!bfi->avflag) {
uint32_t state = 0;
while(state != MKTAG('S','A','V','I')){
if (url_feof(pb))
return AVERROR(EIO);
state = 256*state + avio_r8(pb);
}
/* Now that the chunk's location is confirmed, we proceed... */
chunk_size = avio_rl32(pb);
avio_rl32(pb);
audio_offset = avio_rl32(pb);
avio_rl32(pb);
video_offset = avio_rl32(pb);
audio_size = video_offset - audio_offset;
bfi->video_size = chunk_size - video_offset;
if (audio_size < 0 || bfi->video_size < 0) {
av_log(s, AV_LOG_ERROR, "Invalid audio/video offsets or chunk size\n");
return AVERROR_INVALIDDATA;
}
 
//Tossing an audio packet at the audio decoder.
ret = av_get_packet(pb, pkt, audio_size);
if (ret < 0)
return ret;
 
pkt->pts = bfi->audio_frame;
bfi->audio_frame += ret;
} else if (bfi->video_size > 0) {
 
//Tossing a video packet at the video decoder.
ret = av_get_packet(pb, pkt, bfi->video_size);
if (ret < 0)
return ret;
 
pkt->pts = bfi->video_frame;
bfi->video_frame += ret / bfi->video_size;
 
/* One less frame to read. A cursory decrement. */
bfi->nframes--;
} else {
/* Empty video packet */
ret = AVERROR(EAGAIN);
}
 
bfi->avflag = !bfi->avflag;
pkt->stream_index = bfi->avflag;
return ret;
}
 
AVInputFormat ff_bfi_demuxer = {
.name = "bfi",
.long_name = NULL_IF_CONFIG_SMALL("Brute Force & Ignorance"),
.priv_data_size = sizeof(BFIContext),
.read_probe = bfi_probe,
.read_header = bfi_read_header,
.read_packet = bfi_read_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/bink.c
0,0 → 1,283
/*
* Bink demuxer
* Copyright (c) 2008-2010 Peter Ross (pross@xvid.org)
* Copyright (c) 2009 Daniel Verkamp (daniel@drv.nu)
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Bink demuxer
*
* Technical details here:
* http://wiki.multimedia.cx/index.php?title=Bink_Container
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
 
enum BinkAudFlags {
BINK_AUD_16BITS = 0x4000, ///< prefer 16-bit output
BINK_AUD_STEREO = 0x2000,
BINK_AUD_USEDCT = 0x1000,
};
 
#define BINK_EXTRADATA_SIZE 1
#define BINK_MAX_AUDIO_TRACKS 256
#define BINK_MAX_WIDTH 7680
#define BINK_MAX_HEIGHT 4800
 
typedef struct {
uint32_t file_size;
 
uint32_t num_audio_tracks;
int current_track; ///< audio track to return in next packet
int64_t video_pts;
int64_t audio_pts[BINK_MAX_AUDIO_TRACKS];
 
uint32_t remain_packet_size;
} BinkDemuxContext;
 
static int probe(AVProbeData *p)
{
const uint8_t *b = p->buf;
 
if ( b[0] == 'B' && b[1] == 'I' && b[2] == 'K' &&
(b[3] == 'b' || b[3] == 'f' || b[3] == 'g' || b[3] == 'h' || b[3] == 'i') &&
AV_RL32(b+8) > 0 && // num_frames
AV_RL32(b+20) > 0 && AV_RL32(b+20) <= BINK_MAX_WIDTH &&
AV_RL32(b+24) > 0 && AV_RL32(b+24) <= BINK_MAX_HEIGHT &&
AV_RL32(b+28) > 0 && AV_RL32(b+32) > 0) // fps num,den
return AVPROBE_SCORE_MAX;
return 0;
}
 
static int read_header(AVFormatContext *s)
{
BinkDemuxContext *bink = s->priv_data;
AVIOContext *pb = s->pb;
uint32_t fps_num, fps_den;
AVStream *vst, *ast;
unsigned int i;
uint32_t pos, next_pos;
uint16_t flags;
int keyframe;
 
vst = avformat_new_stream(s, NULL);
if (!vst)
return AVERROR(ENOMEM);
 
vst->codec->codec_tag = avio_rl32(pb);
 
bink->file_size = avio_rl32(pb) + 8;
vst->duration = avio_rl32(pb);
 
if (vst->duration > 1000000) {
av_log(s, AV_LOG_ERROR, "invalid header: more than 1000000 frames\n");
return AVERROR(EIO);
}
 
if (avio_rl32(pb) > bink->file_size) {
av_log(s, AV_LOG_ERROR,
"invalid header: largest frame size greater than file size\n");
return AVERROR(EIO);
}
 
avio_skip(pb, 4);
 
vst->codec->width = avio_rl32(pb);
vst->codec->height = avio_rl32(pb);
 
fps_num = avio_rl32(pb);
fps_den = avio_rl32(pb);
if (fps_num == 0 || fps_den == 0) {
av_log(s, AV_LOG_ERROR, "invalid header: invalid fps (%d/%d)\n", fps_num, fps_den);
return AVERROR(EIO);
}
avpriv_set_pts_info(vst, 64, fps_den, fps_num);
vst->avg_frame_rate = av_inv_q(vst->time_base);
 
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codec->codec_id = AV_CODEC_ID_BINKVIDEO;
if (ff_alloc_extradata(vst->codec, 4))
return AVERROR(ENOMEM);
avio_read(pb, vst->codec->extradata, 4);
 
bink->num_audio_tracks = avio_rl32(pb);
 
if (bink->num_audio_tracks > BINK_MAX_AUDIO_TRACKS) {
av_log(s, AV_LOG_ERROR,
"invalid header: more than "AV_STRINGIFY(BINK_MAX_AUDIO_TRACKS)" audio tracks (%d)\n",
bink->num_audio_tracks);
return AVERROR(EIO);
}
 
if (bink->num_audio_tracks) {
avio_skip(pb, 4 * bink->num_audio_tracks);
 
for (i = 0; i < bink->num_audio_tracks; i++) {
ast = avformat_new_stream(s, NULL);
if (!ast)
return AVERROR(ENOMEM);
ast->codec->codec_type = AVMEDIA_TYPE_AUDIO;
ast->codec->codec_tag = 0;
ast->codec->sample_rate = avio_rl16(pb);
avpriv_set_pts_info(ast, 64, 1, ast->codec->sample_rate);
flags = avio_rl16(pb);
ast->codec->codec_id = flags & BINK_AUD_USEDCT ?
AV_CODEC_ID_BINKAUDIO_DCT : AV_CODEC_ID_BINKAUDIO_RDFT;
if (flags & BINK_AUD_STEREO) {
ast->codec->channels = 2;
ast->codec->channel_layout = AV_CH_LAYOUT_STEREO;
} else {
ast->codec->channels = 1;
ast->codec->channel_layout = AV_CH_LAYOUT_MONO;
}
if (ff_alloc_extradata(ast->codec, 4))
return AVERROR(ENOMEM);
AV_WL32(ast->codec->extradata, vst->codec->codec_tag);
}
 
for (i = 0; i < bink->num_audio_tracks; i++)
s->streams[i + 1]->id = avio_rl32(pb);
}
 
/* frame index table */
next_pos = avio_rl32(pb);
for (i = 0; i < vst->duration; i++) {
pos = next_pos;
if (i == vst->duration - 1) {
next_pos = bink->file_size;
keyframe = 0;
} else {
next_pos = avio_rl32(pb);
keyframe = pos & 1;
}
pos &= ~1;
next_pos &= ~1;
 
if (next_pos <= pos) {
av_log(s, AV_LOG_ERROR, "invalid frame index table\n");
return AVERROR(EIO);
}
av_add_index_entry(vst, pos, i, next_pos - pos, 0,
keyframe ? AVINDEX_KEYFRAME : 0);
}
 
avio_skip(pb, 4);
 
bink->current_track = -1;
return 0;
}
 
static int read_packet(AVFormatContext *s, AVPacket *pkt)
{
BinkDemuxContext *bink = s->priv_data;
AVIOContext *pb = s->pb;
int ret;
 
if (bink->current_track < 0) {
int index_entry;
AVStream *st = s->streams[0]; // stream 0 is video stream with index
 
if (bink->video_pts >= st->duration)
return AVERROR_EOF;
 
index_entry = av_index_search_timestamp(st, bink->video_pts,
AVSEEK_FLAG_ANY);
if (index_entry < 0) {
av_log(s, AV_LOG_ERROR,
"could not find index entry for frame %"PRId64"\n",
bink->video_pts);
return AVERROR(EIO);
}
 
bink->remain_packet_size = st->index_entries[index_entry].size;
bink->current_track = 0;
}
 
while (bink->current_track < bink->num_audio_tracks) {
uint32_t audio_size = avio_rl32(pb);
if (audio_size > bink->remain_packet_size - 4) {
av_log(s, AV_LOG_ERROR,
"frame %"PRId64": audio size in header (%u) > size of packet left (%u)\n",
bink->video_pts, audio_size, bink->remain_packet_size);
return AVERROR(EIO);
}
bink->remain_packet_size -= 4 + audio_size;
bink->current_track++;
if (audio_size >= 4) {
/* get one audio packet per track */
if ((ret = av_get_packet(pb, pkt, audio_size)) < 0)
return ret;
pkt->stream_index = bink->current_track;
pkt->pts = bink->audio_pts[bink->current_track - 1];
 
/* Each audio packet reports the number of decompressed samples
(in bytes). We use this value to calcuate the audio PTS */
if (pkt->size >= 4)
bink->audio_pts[bink->current_track -1] +=
AV_RL32(pkt->data) / (2 * s->streams[bink->current_track]->codec->channels);
return 0;
} else {
avio_skip(pb, audio_size);
}
}
 
/* get video packet */
if ((ret = av_get_packet(pb, pkt, bink->remain_packet_size)) < 0)
return ret;
pkt->stream_index = 0;
pkt->pts = bink->video_pts++;
pkt->flags |= AV_PKT_FLAG_KEY;
 
/* -1 instructs the next call to read_packet() to read the next frame */
bink->current_track = -1;
 
return 0;
}
 
static int read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
{
BinkDemuxContext *bink = s->priv_data;
AVStream *vst = s->streams[0];
 
if (!s->pb->seekable)
return -1;
 
/* seek to the first frame */
if (avio_seek(s->pb, vst->index_entries[0].pos, SEEK_SET) < 0)
return -1;
 
bink->video_pts = 0;
memset(bink->audio_pts, 0, sizeof(bink->audio_pts));
bink->current_track = -1;
return 0;
}
 
AVInputFormat ff_bink_demuxer = {
.name = "bink",
.long_name = NULL_IF_CONFIG_SMALL("Bink"),
.priv_data_size = sizeof(BinkDemuxContext),
.read_probe = probe,
.read_header = read_header,
.read_packet = read_packet,
.read_seek = read_seek,
};
/contrib/sdk/sources/ffmpeg/libavformat/bintext.c
0,0 → 1,388
/*
* Binary text demuxer
* eXtended BINary text (XBIN) demuxer
* Artworx Data Format demuxer
* iCEDraw File demuxer
* Copyright (c) 2010 Peter Ross <pross@xvid.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Binary text demuxer
* eXtended BINary text (XBIN) demuxer
* Artworx Data Format demuxer
* iCEDraw File demuxer
*/
 
#include "libavutil/intreadwrite.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "avformat.h"
#include "internal.h"
#include "sauce.h"
#include "libavcodec/bintext.h"
 
typedef struct {
const AVClass *class;
int chars_per_frame; /**< characters to send decoder per frame;
set by private options as characters per second, and then
converted to characters per frame at runtime */
int width, height; /**< video size (WxH pixels) (private option) */
AVRational framerate; /**< frames per second (private option) */
uint64_t fsize; /**< file size less metadata buffer */
} BinDemuxContext;
 
static AVStream * init_stream(AVFormatContext *s)
{
BinDemuxContext *bin = s->priv_data;
AVStream *st = avformat_new_stream(s, NULL);
if (!st)
return NULL;
st->codec->codec_tag = 0;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
 
if (!bin->width) {
st->codec->width = (80<<3);
st->codec->height = (25<<4);
}
 
avpriv_set_pts_info(st, 60, bin->framerate.den, bin->framerate.num);
 
/* simulate tty display speed */
bin->chars_per_frame = av_clip(av_q2d(st->time_base) * bin->chars_per_frame, 1, INT_MAX);
 
return st;
}
 
#if CONFIG_BINTEXT_DEMUXER | CONFIG_ADF_DEMUXER | CONFIG_IDF_DEMUXER
/**
* Given filesize and width, calculate height (assume font_height of 16)
*/
static void calculate_height(AVCodecContext *avctx, uint64_t fsize)
{
avctx->height = (fsize / ((avctx->width>>3)*2)) << 4;
}
#endif
 
#if CONFIG_BINTEXT_DEMUXER
static const uint8_t next_magic[]={
0x1A, 0x1B, '[', '0', ';', '3', '0', ';', '4', '0', 'm', 'N', 'E', 'X', 'T', 0x00
};
 
static int next_tag_read(AVFormatContext *avctx, uint64_t *fsize)
{
AVIOContext *pb = avctx->pb;
char buf[36];
int len;
uint64_t start_pos = avio_size(pb) - 256;
 
avio_seek(pb, start_pos, SEEK_SET);
if (avio_read(pb, buf, sizeof(next_magic)) != sizeof(next_magic))
return -1;
if (memcmp(buf, next_magic, sizeof(next_magic)))
return -1;
if (avio_r8(pb) != 0x01)
return -1;
 
*fsize -= 256;
 
#define GET_EFI2_META(name,size) \
len = avio_r8(pb); \
if (len < 1 || len > size) \
return -1; \
if (avio_read(pb, buf, size) == size && *buf) { \
buf[len] = 0; \
av_dict_set(&avctx->metadata, name, buf, 0); \
}
 
GET_EFI2_META("filename", 12)
GET_EFI2_META("author", 20)
GET_EFI2_META("publisher", 20)
GET_EFI2_META("title", 35)
 
return 0;
}
 
static void predict_width(AVCodecContext *avctx, uint64_t fsize, int got_width)
{
/** attempt to guess width */
if (!got_width)
avctx->width = fsize > 4000 ? (160<<3) : (80<<3);
}
 
static int bintext_read_header(AVFormatContext *s)
{
BinDemuxContext *bin = s->priv_data;
AVIOContext *pb = s->pb;
 
AVStream *st = init_stream(s);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_id = AV_CODEC_ID_BINTEXT;
 
if (ff_alloc_extradata(st->codec, 2))
return AVERROR(ENOMEM);
st->codec->extradata[0] = 16;
st->codec->extradata[1] = 0;
 
if (pb->seekable) {
int got_width = 0;
bin->fsize = avio_size(pb);
if (ff_sauce_read(s, &bin->fsize, &got_width, 0) < 0)
next_tag_read(s, &bin->fsize);
if (!bin->width) {
predict_width(st->codec, bin->fsize, got_width);
calculate_height(st->codec, bin->fsize);
}
avio_seek(pb, 0, SEEK_SET);
}
return 0;
}
#endif /* CONFIG_BINTEXT_DEMUXER */
 
#if CONFIG_XBIN_DEMUXER
static int xbin_probe(AVProbeData *p)
{
const uint8_t *d = p->buf;
 
if (AV_RL32(d) == MKTAG('X','B','I','N') && d[4] == 0x1A &&
AV_RL16(d+5) > 0 && AV_RL16(d+5) <= 160 &&
d[9] > 0 && d[9] <= 32)
return AVPROBE_SCORE_MAX;
return 0;
}
 
static int xbin_read_header(AVFormatContext *s)
{
BinDemuxContext *bin = s->priv_data;
AVIOContext *pb = s->pb;
char fontheight, flags;
 
AVStream *st = init_stream(s);
if (!st)
return AVERROR(ENOMEM);
 
avio_skip(pb, 5);
st->codec->width = avio_rl16(pb)<<3;
st->codec->height = avio_rl16(pb);
fontheight = avio_r8(pb);
st->codec->height *= fontheight;
flags = avio_r8(pb);
 
st->codec->extradata_size = 2;
if ((flags & BINTEXT_PALETTE))
st->codec->extradata_size += 48;
if ((flags & BINTEXT_FONT))
st->codec->extradata_size += fontheight * (flags & 0x10 ? 512 : 256);
st->codec->codec_id = flags & 4 ? AV_CODEC_ID_XBIN : AV_CODEC_ID_BINTEXT;
 
if (ff_alloc_extradata(st->codec, st->codec->extradata_size))
return AVERROR(ENOMEM);
st->codec->extradata[0] = fontheight;
st->codec->extradata[1] = flags;
if (avio_read(pb, st->codec->extradata + 2, st->codec->extradata_size - 2) < 0)
return AVERROR(EIO);
 
if (pb->seekable) {
bin->fsize = avio_size(pb) - 9 - st->codec->extradata_size;
ff_sauce_read(s, &bin->fsize, NULL, 0);
avio_seek(pb, 9 + st->codec->extradata_size, SEEK_SET);
}
 
return 0;
}
#endif /* CONFIG_XBIN_DEMUXER */
 
#if CONFIG_ADF_DEMUXER
static int adf_read_header(AVFormatContext *s)
{
BinDemuxContext *bin = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *st;
 
if (avio_r8(pb) != 1)
return AVERROR_INVALIDDATA;
 
st = init_stream(s);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_id = AV_CODEC_ID_BINTEXT;
 
if (ff_alloc_extradata(st->codec, 2 + 48 + 4096))
return AVERROR(ENOMEM);
st->codec->extradata[0] = 16;
st->codec->extradata[1] = BINTEXT_PALETTE|BINTEXT_FONT;
 
if (avio_read(pb, st->codec->extradata + 2, 24) < 0)
return AVERROR(EIO);
avio_skip(pb, 144);
if (avio_read(pb, st->codec->extradata + 2 + 24, 24) < 0)
return AVERROR(EIO);
if (avio_read(pb, st->codec->extradata + 2 + 48, 4096) < 0)
return AVERROR(EIO);
 
if (pb->seekable) {
int got_width = 0;
bin->fsize = avio_size(pb) - 1 - 192 - 4096;
st->codec->width = 80<<3;
ff_sauce_read(s, &bin->fsize, &got_width, 0);
if (!bin->width)
calculate_height(st->codec, bin->fsize);
avio_seek(pb, 1 + 192 + 4096, SEEK_SET);
}
return 0;
}
#endif /* CONFIG_ADF_DEMUXER */
 
#if CONFIG_IDF_DEMUXER
static const uint8_t idf_magic[] = {
0x04, 0x31, 0x2e, 0x34, 0x00, 0x00, 0x00, 0x00, 0x4f, 0x00, 0x15, 0x00
};
 
static int idf_probe(AVProbeData *p)
{
if (p->buf_size < sizeof(idf_magic))
return 0;
if (!memcmp(p->buf, idf_magic, sizeof(idf_magic)))
return AVPROBE_SCORE_MAX;
return 0;
}
 
static int idf_read_header(AVFormatContext *s)
{
BinDemuxContext *bin = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *st;
int got_width = 0;
 
if (!pb->seekable)
return AVERROR(EIO);
 
st = init_stream(s);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_id = AV_CODEC_ID_IDF;
 
if (ff_alloc_extradata(st->codec, 2 + 48 + 4096))
return AVERROR(ENOMEM);
st->codec->extradata[0] = 16;
st->codec->extradata[1] = BINTEXT_PALETTE|BINTEXT_FONT;
 
avio_seek(pb, avio_size(pb) - 4096 - 48, SEEK_SET);
 
if (avio_read(pb, st->codec->extradata + 2 + 48, 4096) < 0)
return AVERROR(EIO);
if (avio_read(pb, st->codec->extradata + 2, 48) < 0)
return AVERROR(EIO);
 
bin->fsize = avio_size(pb) - 12 - 4096 - 48;
ff_sauce_read(s, &bin->fsize, &got_width, 0);
if (!bin->width)
calculate_height(st->codec, bin->fsize);
avio_seek(pb, 12, SEEK_SET);
return 0;
}
#endif /* CONFIG_IDF_DEMUXER */
 
static int read_packet(AVFormatContext *s,
AVPacket *pkt)
{
BinDemuxContext *bin = s->priv_data;
 
if (bin->fsize > 0) {
if (av_get_packet(s->pb, pkt, bin->fsize) < 0)
return AVERROR(EIO);
bin->fsize = -1; /* done */
} else if (!bin->fsize) {
if (url_feof(s->pb))
return AVERROR(EIO);
if (av_get_packet(s->pb, pkt, bin->chars_per_frame) < 0)
return AVERROR(EIO);
} else {
return AVERROR(EIO);
}
 
pkt->flags |= AV_PKT_FLAG_KEY;
return 0;
}
 
#define OFFSET(x) offsetof(BinDemuxContext, x)
static const AVOption options[] = {
{ "linespeed", "set simulated line speed (bytes per second)", OFFSET(chars_per_frame), AV_OPT_TYPE_INT, {.i64 = 6000}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM},
{ "video_size", "set video size, such as 640x480 or hd720.", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, AV_OPT_FLAG_DECODING_PARAM },
{ "framerate", "set framerate (frames per second)", OFFSET(framerate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
 
#define CLASS(name) \
(const AVClass[1]){{ \
.class_name = name, \
.item_name = av_default_item_name, \
.option = options, \
.version = LIBAVUTIL_VERSION_INT, \
}}
 
#if CONFIG_BINTEXT_DEMUXER
AVInputFormat ff_bintext_demuxer = {
.name = "bin",
.long_name = NULL_IF_CONFIG_SMALL("Binary text"),
.priv_data_size = sizeof(BinDemuxContext),
.read_header = bintext_read_header,
.read_packet = read_packet,
.extensions = "bin",
.priv_class = CLASS("Binary text demuxer"),
};
#endif
 
#if CONFIG_XBIN_DEMUXER
AVInputFormat ff_xbin_demuxer = {
.name = "xbin",
.long_name = NULL_IF_CONFIG_SMALL("eXtended BINary text (XBIN)"),
.priv_data_size = sizeof(BinDemuxContext),
.read_probe = xbin_probe,
.read_header = xbin_read_header,
.read_packet = read_packet,
.priv_class = CLASS("eXtended BINary text (XBIN) demuxer"),
};
#endif
 
#if CONFIG_ADF_DEMUXER
AVInputFormat ff_adf_demuxer = {
.name = "adf",
.long_name = NULL_IF_CONFIG_SMALL("Artworx Data Format"),
.priv_data_size = sizeof(BinDemuxContext),
.read_header = adf_read_header,
.read_packet = read_packet,
.extensions = "adf",
.priv_class = CLASS("Artworx Data Format demuxer"),
};
#endif
 
#if CONFIG_IDF_DEMUXER
AVInputFormat ff_idf_demuxer = {
.name = "idf",
.long_name = NULL_IF_CONFIG_SMALL("iCE Draw File"),
.priv_data_size = sizeof(BinDemuxContext),
.read_probe = idf_probe,
.read_header = idf_read_header,
.read_packet = read_packet,
.extensions = "idf",
.priv_class = CLASS("iCE Draw File demuxer"),
};
#endif
/contrib/sdk/sources/ffmpeg/libavformat/bit.c
0,0 → 1,156
/*
* G.729 bit format muxer and demuxer
* Copyright (c) 2007-2008 Vladimir Voroshilov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avformat.h"
#include "internal.h"
#include "libavcodec/get_bits.h"
#include "libavcodec/put_bits.h"
 
#define MAX_FRAME_SIZE 10
 
#define SYNC_WORD 0x6b21
#define BIT_0 0x7f
#define BIT_1 0x81
 
static int probe(AVProbeData *p)
{
int i, j;
 
if(p->buf_size < 0x40)
return 0;
 
for(i=0; i+3<p->buf_size && i< 10*0x50; ){
if(AV_RL16(&p->buf[0]) != SYNC_WORD)
return 0;
j=AV_RL16(&p->buf[2]);
if(j!=0x40 && j!=0x50)
return 0;
i+=j;
}
return AVPROBE_SCORE_EXTENSION;
}
 
static int read_header(AVFormatContext *s)
{
AVStream* st;
 
st=avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id=AV_CODEC_ID_G729;
st->codec->sample_rate=8000;
st->codec->block_align = 16;
st->codec->channels=1;
 
avpriv_set_pts_info(st, 64, 1, 100);
return 0;
}
 
static int read_packet(AVFormatContext *s,
AVPacket *pkt)
{
AVIOContext *pb = s->pb;
PutBitContext pbo;
uint16_t buf[8 * MAX_FRAME_SIZE + 2];
int packet_size;
uint16_t* src=buf;
int i, j, ret;
int64_t pos= avio_tell(pb);
 
if(url_feof(pb))
return AVERROR_EOF;
 
avio_rl16(pb); // sync word
packet_size = avio_rl16(pb) / 8;
if(packet_size > MAX_FRAME_SIZE)
return AVERROR_INVALIDDATA;
 
ret = avio_read(pb, (uint8_t*)buf, (8 * packet_size) * sizeof(uint16_t));
if(ret<0)
return ret;
if(ret != 8 * packet_size * sizeof(uint16_t))
return AVERROR(EIO);
 
if (av_new_packet(pkt, packet_size) < 0)
return AVERROR(ENOMEM);
 
init_put_bits(&pbo, pkt->data, packet_size);
for(j=0; j < packet_size; j++)
for(i=0; i<8;i++)
put_bits(&pbo,1, AV_RL16(src++) == BIT_1 ? 1 : 0);
 
flush_put_bits(&pbo);
 
pkt->duration=1;
pkt->pos = pos;
return 0;
}
 
AVInputFormat ff_bit_demuxer = {
.name = "bit",
.long_name = NULL_IF_CONFIG_SMALL("G.729 BIT file format"),
.read_probe = probe,
.read_header = read_header,
.read_packet = read_packet,
.extensions = "bit",
};
 
#if CONFIG_MUXERS
static int write_header(AVFormatContext *s)
{
AVCodecContext *enc = s->streams[0]->codec;
 
enc->codec_id = AV_CODEC_ID_G729;
enc->channels = 1;
enc->bits_per_coded_sample = 16;
enc->block_align = (enc->bits_per_coded_sample * enc->channels) >> 3;
 
return 0;
}
 
static int write_packet(AVFormatContext *s, AVPacket *pkt)
{
AVIOContext *pb = s->pb;
GetBitContext gb;
int i;
 
avio_wl16(pb, SYNC_WORD);
avio_wl16(pb, 8 * 10);
 
init_get_bits(&gb, pkt->data, 8*10);
for(i=0; i< 8 * 10; i++)
avio_wl16(pb, get_bits1(&gb) ? BIT_1 : BIT_0);
 
return 0;
}
 
AVOutputFormat ff_bit_muxer = {
.name = "bit",
.long_name = NULL_IF_CONFIG_SMALL("G.729 BIT file format"),
.mime_type = "audio/bit",
.extensions = "bit",
.audio_codec = AV_CODEC_ID_G729,
.video_codec = AV_CODEC_ID_NONE,
.write_header = write_header,
.write_packet = write_packet,
};
#endif
/contrib/sdk/sources/ffmpeg/libavformat/bluray.c
0,0 → 1,235
/*
* BluRay (libbluray) protocol
*
* Copyright (c) 2012 Petri Hintukainen <phintuka <at> users.sourceforge.net>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <libbluray/bluray.h>
 
#include "libavutil/avstring.h"
#include "libavformat/avformat.h"
#include "libavformat/url.h"
#include "libavutil/opt.h"
 
#define BLURAY_PROTO_PREFIX "bluray:"
#define MIN_PLAYLIST_LENGTH 180 /* 3 min */
 
typedef struct {
const AVClass *class;
 
BLURAY *bd;
 
int playlist;
int angle;
int chapter;
/*int region;*/
} BlurayContext;
 
#define OFFSET(x) offsetof(BlurayContext, x)
static const AVOption options[] = {
{"playlist", "", OFFSET(playlist), AV_OPT_TYPE_INT, { .i64=-1 }, -1, 99999, AV_OPT_FLAG_DECODING_PARAM },
{"angle", "", OFFSET(angle), AV_OPT_TYPE_INT, { .i64=0 }, 0, 0xfe, AV_OPT_FLAG_DECODING_PARAM },
{"chapter", "", OFFSET(chapter), AV_OPT_TYPE_INT, { .i64=1 }, 1, 0xfffe, AV_OPT_FLAG_DECODING_PARAM },
/*{"region", "bluray player region code (1 = region A, 2 = region B, 4 = region C)", OFFSET(region), AV_OPT_TYPE_INT, { .i64=0 }, 0, 3, AV_OPT_FLAG_DECODING_PARAM },*/
{NULL}
};
 
static const AVClass bluray_context_class = {
.class_name = "bluray",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
 
static int check_disc_info(URLContext *h)
{
BlurayContext *bd = h->priv_data;
const BLURAY_DISC_INFO *disc_info;
 
disc_info = bd_get_disc_info(bd->bd);
if (!disc_info) {
av_log(h, AV_LOG_ERROR, "bd_get_disc_info() failed\n");
return -1;
}
 
if (!disc_info->bluray_detected) {
av_log(h, AV_LOG_ERROR, "BluRay disc not detected\n");
return -1;
}
 
/* AACS */
if (disc_info->aacs_detected && !disc_info->aacs_handled) {
if (!disc_info->libaacs_detected) {
av_log(h, AV_LOG_ERROR,
"Media stream encrypted with AACS, install and configure libaacs\n");
} else {
av_log(h, AV_LOG_ERROR, "Your libaacs can't decrypt this media\n");
}
return -1;
}
 
/* BD+ */
if (disc_info->bdplus_detected && !disc_info->bdplus_handled) {
/*
if (!disc_info->libbdplus_detected) {
av_log(h, AV_LOG_ERROR,
"Media stream encrypted with BD+, install and configure libbdplus");
} else {
*/
av_log(h, AV_LOG_ERROR, "Unable to decrypt BD+ encrypted media\n");
/*}*/
return -1;
}
 
return 0;
}
 
static int bluray_close(URLContext *h)
{
BlurayContext *bd = h->priv_data;
if (bd->bd) {
bd_close(bd->bd);
}
 
return 0;
}
 
static int bluray_open(URLContext *h, const char *path, int flags)
{
BlurayContext *bd = h->priv_data;
int num_title_idx;
const char *diskname = path;
 
av_strstart(path, BLURAY_PROTO_PREFIX, &diskname);
 
bd->bd = bd_open(diskname, NULL);
if (!bd->bd) {
av_log(h, AV_LOG_ERROR, "bd_open() failed\n");
return AVERROR(EIO);
}
 
/* check if disc can be played */
if (check_disc_info(h) < 0) {
return AVERROR(EIO);
}
 
/* setup player registers */
/* region code has no effect without menus
if (bd->region > 0 && bd->region < 5) {
av_log(h, AV_LOG_INFO, "setting region code to %d (%c)\n", bd->region, 'A' + (bd->region - 1));
bd_set_player_setting(bd->bd, BLURAY_PLAYER_SETTING_REGION_CODE, bd->region);
}
*/
 
/* load title list */
num_title_idx = bd_get_titles(bd->bd, TITLES_RELEVANT, MIN_PLAYLIST_LENGTH);
av_log(h, AV_LOG_INFO, "%d usable playlists:\n", num_title_idx);
if (num_title_idx < 1) {
return AVERROR(EIO);
}
 
/* if playlist was not given, select longest playlist */
if (bd->playlist < 0) {
uint64_t duration = 0;
int i;
for (i = 0; i < num_title_idx; i++) {
BLURAY_TITLE_INFO *info = bd_get_title_info(bd->bd, i, 0);
 
av_log(h, AV_LOG_INFO, "playlist %05d.mpls (%d:%02d:%02d)\n",
info->playlist,
((int)(info->duration / 90000) / 3600),
((int)(info->duration / 90000) % 3600) / 60,
((int)(info->duration / 90000) % 60));
 
if (info->duration > duration) {
bd->playlist = info->playlist;
duration = info->duration;
}
 
bd_free_title_info(info);
}
av_log(h, AV_LOG_INFO, "selected %05d.mpls\n", bd->playlist);
}
 
/* select playlist */
if (bd_select_playlist(bd->bd, bd->playlist) <= 0) {
av_log(h, AV_LOG_ERROR, "bd_select_playlist(%05d.mpls) failed\n", bd->playlist);
return AVERROR(EIO);
}
 
/* select angle */
if (bd->angle >= 0) {
bd_select_angle(bd->bd, bd->angle);
}
 
/* select chapter */
if (bd->chapter > 1) {
bd_seek_chapter(bd->bd, bd->chapter - 1);
}
 
return 0;
}
 
static int bluray_read(URLContext *h, unsigned char *buf, int size)
{
BlurayContext *bd = h->priv_data;
int len;
 
if (!bd || !bd->bd) {
return AVERROR(EFAULT);
}
 
len = bd_read(bd->bd, buf, size);
 
return len;
}
 
static int64_t bluray_seek(URLContext *h, int64_t pos, int whence)
{
BlurayContext *bd = h->priv_data;
 
if (!bd || !bd->bd) {
return AVERROR(EFAULT);
}
 
switch (whence) {
case SEEK_SET:
case SEEK_CUR:
case SEEK_END:
return bd_seek(bd->bd, pos);
 
case AVSEEK_SIZE:
return bd_get_title_size(bd->bd);
}
 
av_log(h, AV_LOG_ERROR, "Unsupported whence operation %d\n", whence);
return AVERROR(EINVAL);
}
 
 
URLProtocol ff_bluray_protocol = {
.name = "bluray",
.url_close = bluray_close,
.url_open = bluray_open,
.url_read = bluray_read,
.url_seek = bluray_seek,
.priv_data_size = sizeof(BlurayContext),
.priv_data_class = &bluray_context_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/bmv.c
0,0 → 1,136
/*
* Discworld II BMV demuxer
* Copyright (c) 2011 Konstantin Shishkov.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/channel_layout.h"
#include "avformat.h"
#include "internal.h"
 
enum BMVFlags {
BMV_NOP = 0,
BMV_END,
BMV_DELTA,
BMV_INTRA,
 
BMV_AUDIO = 0x20,
};
 
typedef struct BMVContext {
uint8_t *packet;
int size;
int get_next;
int64_t audio_pos;
} BMVContext;
 
static int bmv_read_header(AVFormatContext *s)
{
AVStream *st, *ast;
BMVContext *c = s->priv_data;
 
st = avformat_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_BMV_VIDEO;
st->codec->width = 640;
st->codec->height = 429;
st->codec->pix_fmt = AV_PIX_FMT_PAL8;
avpriv_set_pts_info(st, 16, 1, 12);
ast = avformat_new_stream(s, 0);
if (!ast)
return AVERROR(ENOMEM);
ast->codec->codec_type = AVMEDIA_TYPE_AUDIO;
ast->codec->codec_id = AV_CODEC_ID_BMV_AUDIO;
ast->codec->channels = 2;
ast->codec->channel_layout = AV_CH_LAYOUT_STEREO;
ast->codec->sample_rate = 22050;
avpriv_set_pts_info(ast, 16, 1, 22050);
 
c->get_next = 1;
c->audio_pos = 0;
return 0;
}
 
static int bmv_read_packet(AVFormatContext *s, AVPacket *pkt)
{
BMVContext *c = s->priv_data;
int type, err;
 
while (c->get_next) {
if (s->pb->eof_reached)
return AVERROR_EOF;
type = avio_r8(s->pb);
if (type == BMV_NOP)
continue;
if (type == BMV_END)
return AVERROR_EOF;
c->size = avio_rl24(s->pb);
if (!c->size)
return AVERROR_INVALIDDATA;
if ((err = av_reallocp(&c->packet, c->size + 1)) < 0)
return err;
c->packet[0] = type;
if (avio_read(s->pb, c->packet + 1, c->size) != c->size)
return AVERROR(EIO);
if (type & BMV_AUDIO) {
int audio_size = c->packet[1] * 65 + 1;
if (audio_size >= c->size) {
av_log(s, AV_LOG_ERROR, "Reported audio size %d is bigger than packet size (%d)\n",
audio_size, c->size);
return AVERROR_INVALIDDATA;
}
if (av_new_packet(pkt, audio_size) < 0)
return AVERROR(ENOMEM);
memcpy(pkt->data, c->packet + 1, pkt->size);
pkt->stream_index = 1;
pkt->pts = c->audio_pos;
pkt->duration = c->packet[1] * 32;
c->audio_pos += pkt->duration;
c->get_next = 0;
return pkt->size;
} else
break;
}
if (av_new_packet(pkt, c->size + 1) < 0)
return AVERROR(ENOMEM);
pkt->stream_index = 0;
c->get_next = 1;
memcpy(pkt->data, c->packet, pkt->size);
return pkt->size;
}
 
static int bmv_read_close(AVFormatContext *s)
{
BMVContext *c = s->priv_data;
 
av_freep(&c->packet);
 
return 0;
}
 
AVInputFormat ff_bmv_demuxer = {
.name = "bmv",
.long_name = NULL_IF_CONFIG_SMALL("Discworld II BMV"),
.priv_data_size = sizeof(BMVContext),
.read_header = bmv_read_header,
.read_packet = bmv_read_packet,
.read_close = bmv_read_close,
.extensions = "bmv",
};
/contrib/sdk/sources/ffmpeg/libavformat/boadec.c
0,0 → 1,78
/*
* Black ops audio demuxer
* Copyright (c) 2013 Michael Niedermayer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intreadwrite.h"
#include "avformat.h"
 
static int probe(AVProbeData *p)
{
if (p->buf_size < 2096)
return 0;
if ( AV_RL32(p->buf ) != 1
|| AV_RL32(p->buf + 8) > 100000
|| AV_RL32(p->buf + 12) > 8
|| AV_RL32(p->buf + 16) != 2096
||!AV_RL32(p->buf + 21)
|| AV_RL16(p->buf + 25) != 2096
|| AV_RL32(p->buf + 48) % AV_RL32(p->buf + 21)
)
return 0;
return AVPROBE_SCORE_EXTENSION;
}
 
 
static int read_header(AVFormatContext *s)
{
AVStream *st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_ADPCM_MS;
 
avio_rl32(s->pb);
avio_rl32(s->pb);
st->codec->sample_rate = avio_rl32(s->pb);
st->codec->channels = avio_rl32(s->pb);
s->data_offset = avio_rl32(s->pb);
avio_r8(s->pb);
st->codec->block_align = st->codec->channels * avio_rl32(s->pb);
 
avio_seek(s->pb, s->data_offset, SEEK_SET);
 
return 0;
}
 
static int read_packet(AVFormatContext *s, AVPacket *pkt)
{
AVStream *st = s->streams[0];
 
return av_get_packet(s->pb, pkt, st->codec->block_align);
}
 
AVInputFormat ff_boa_demuxer = {
.name = "boa",
.long_name = NULL_IF_CONFIG_SMALL("Black Ops Audio"),
.read_probe = probe,
.read_header = read_header,
.read_packet = read_packet,
.flags = AVFMT_GENERIC_INDEX,
};
/contrib/sdk/sources/ffmpeg/libavformat/brstm.c
0,0 → 1,297
/*
* BRSTM demuxer
* Copyright (c) 2012 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intreadwrite.h"
#include "libavcodec/bytestream.h"
#include "avformat.h"
#include "internal.h"
 
typedef struct BRSTMDemuxContext {
uint32_t block_size;
uint32_t block_count;
uint32_t current_block;
uint32_t samples_per_block;
uint32_t last_block_used_bytes;
uint8_t *table;
uint8_t *adpc;
} BRSTMDemuxContext;
 
static int probe(AVProbeData *p)
{
if (AV_RL32(p->buf) == MKTAG('R','S','T','M') &&
(AV_RL16(p->buf + 4) == 0xFFFE ||
AV_RL16(p->buf + 4) == 0xFEFF))
return AVPROBE_SCORE_MAX / 3 * 2;
return 0;
}
 
static int read_close(AVFormatContext *s)
{
BRSTMDemuxContext *b = s->priv_data;
 
av_freep(&b->table);
av_freep(&b->adpc);
 
return 0;
}
 
static int read_header(AVFormatContext *s)
{
BRSTMDemuxContext *b = s->priv_data;
int bom, major, minor, codec, chunk;
int64_t pos, h1offset, toffset;
uint32_t size, start, asize;
AVStream *st;
int ret = AVERROR_EOF;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
 
avio_skip(s->pb, 4);
 
bom = avio_rb16(s->pb);
if (bom != 0xFEFF && bom != 0xFFFE) {
av_log(s, AV_LOG_ERROR, "invalid byte order: %X\n", bom);
return AVERROR_INVALIDDATA;
}
if (bom == 0xFFFE) {
avpriv_request_sample(s, "little endian byte order");
return AVERROR_PATCHWELCOME;
}
 
major = avio_r8(s->pb);
minor = avio_r8(s->pb);
avio_skip(s->pb, 4); // size of file
size = avio_rb16(s->pb);
if (size < 14)
return AVERROR_INVALIDDATA;
 
avio_skip(s->pb, size - 14);
pos = avio_tell(s->pb);
if (avio_rl32(s->pb) != MKTAG('H','E','A','D'))
return AVERROR_INVALIDDATA;
size = avio_rb32(s->pb);
if (size < 256)
return AVERROR_INVALIDDATA;
avio_skip(s->pb, 4); // unknown
h1offset = avio_rb32(s->pb);
if (h1offset > size)
return AVERROR_INVALIDDATA;
avio_skip(s->pb, 12);
toffset = avio_rb32(s->pb) + 16LL;
if (toffset > size)
return AVERROR_INVALIDDATA;
 
avio_skip(s->pb, pos + h1offset + 8 - avio_tell(s->pb));
codec = avio_r8(s->pb);
 
switch (codec) {
case 0: codec = AV_CODEC_ID_PCM_S8_PLANAR; break;
case 1: codec = AV_CODEC_ID_PCM_S16BE_PLANAR; break;
case 2: codec = AV_CODEC_ID_ADPCM_THP; break;
default:
avpriv_request_sample(s, "codec %d", codec);
return AVERROR_PATCHWELCOME;
}
 
avio_skip(s->pb, 1); // loop flag
st->codec->codec_id = codec;
st->codec->channels = avio_r8(s->pb);
if (!st->codec->channels)
return AVERROR_INVALIDDATA;
 
avio_skip(s->pb, 1); // padding
st->codec->sample_rate = avio_rb16(s->pb);
if (!st->codec->sample_rate)
return AVERROR_INVALIDDATA;
 
avio_skip(s->pb, 2); // padding
avio_skip(s->pb, 4); // loop start sample
st->start_time = 0;
st->duration = avio_rb32(s->pb);
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
 
start = avio_rb32(s->pb);
b->current_block = 0;
b->block_count = avio_rb32(s->pb);
if (b->block_count > UINT16_MAX) {
av_log(s, AV_LOG_WARNING, "too many blocks: %u\n", b->block_count);
return AVERROR_INVALIDDATA;
}
 
b->block_size = avio_rb32(s->pb);
if (b->block_size > UINT16_MAX / st->codec->channels)
return AVERROR_INVALIDDATA;
b->block_size *= st->codec->channels;
 
b->samples_per_block = avio_rb32(s->pb);
b->last_block_used_bytes = avio_rb32(s->pb);
if (b->last_block_used_bytes > UINT16_MAX / st->codec->channels)
return AVERROR_INVALIDDATA;
b->last_block_used_bytes *= st->codec->channels;
 
avio_skip(s->pb, 4); // last block samples
avio_skip(s->pb, 4); // last block size
 
if (codec == AV_CODEC_ID_ADPCM_THP) {
int ch;
 
avio_skip(s->pb, pos + toffset - avio_tell(s->pb));
toffset = avio_rb32(s->pb) + 16LL;
if (toffset > size)
return AVERROR_INVALIDDATA;
 
avio_skip(s->pb, pos + toffset - avio_tell(s->pb));
b->table = av_mallocz(32 * st->codec->channels);
if (!b->table)
return AVERROR(ENOMEM);
 
for (ch = 0; ch < st->codec->channels; ch++) {
if (avio_read(s->pb, b->table + ch * 32, 32) != 32) {
ret = AVERROR_INVALIDDATA;
goto fail;
}
avio_skip(s->pb, 24);
}
}
 
if (size < (avio_tell(s->pb) - pos)) {
ret = AVERROR_INVALIDDATA;
goto fail;
}
avio_skip(s->pb, size - (avio_tell(s->pb) - pos));
 
while (!url_feof(s->pb)) {
chunk = avio_rl32(s->pb);
size = avio_rb32(s->pb);
if (size < 8) {
ret = AVERROR_INVALIDDATA;
goto fail;
}
size -= 8;
switch (chunk) {
case MKTAG('A','D','P','C'):
if (codec != AV_CODEC_ID_ADPCM_THP)
goto skip;
 
asize = b->block_count * st->codec->channels * 4;
if (size < asize) {
ret = AVERROR_INVALIDDATA;
goto fail;
}
if (b->adpc) {
av_log(s, AV_LOG_WARNING, "skipping additonal ADPC chunk\n");
goto skip;
} else {
b->adpc = av_mallocz(asize);
if (!b->adpc) {
ret = AVERROR(ENOMEM);
goto fail;
}
avio_read(s->pb, b->adpc, asize);
avio_skip(s->pb, size - asize);
}
break;
case MKTAG('D','A','T','A'):
if ((start < avio_tell(s->pb)) ||
(!b->adpc && codec == AV_CODEC_ID_ADPCM_THP)) {
ret = AVERROR_INVALIDDATA;
goto fail;
}
avio_skip(s->pb, start - avio_tell(s->pb));
 
if (major != 1 || minor)
avpriv_request_sample(s, "Version %d.%d", major, minor);
 
return 0;
default:
av_log(s, AV_LOG_WARNING, "skipping unknown chunk: %X\n", chunk);
skip:
avio_skip(s->pb, size);
}
}
 
fail:
read_close(s);
 
return ret;
}
 
static int read_packet(AVFormatContext *s, AVPacket *pkt)
{
AVCodecContext *codec = s->streams[0]->codec;
BRSTMDemuxContext *b = s->priv_data;
uint32_t samples, size;
int ret;
 
if (url_feof(s->pb))
return AVERROR_EOF;
b->current_block++;
if (b->current_block == b->block_count) {
size = b->last_block_used_bytes;
samples = size / (8 * codec->channels) * 14;
} else if (b->current_block < b->block_count) {
size = b->block_size;
samples = b->samples_per_block;
} else {
return AVERROR_EOF;
}
 
if (codec->codec_id == AV_CODEC_ID_ADPCM_THP) {
uint8_t *dst;
 
if (av_new_packet(pkt, 8 + (32 + 4) * codec->channels + size) < 0)
return AVERROR(ENOMEM);
dst = pkt->data;
bytestream_put_be32(&dst, size);
bytestream_put_be32(&dst, samples);
bytestream_put_buffer(&dst, b->table, 32 * codec->channels);
bytestream_put_buffer(&dst, b->adpc + 4 * codec->channels *
(b->current_block - 1), 4 * codec->channels);
 
ret = avio_read(s->pb, dst, size);
if (ret != size)
av_free_packet(pkt);
pkt->duration = samples;
} else {
ret = av_get_packet(s->pb, pkt, size);
}
 
pkt->stream_index = 0;
 
if (ret != size)
ret = AVERROR(EIO);
 
return ret;
}
 
AVInputFormat ff_brstm_demuxer = {
.name = "brstm",
.long_name = NULL_IF_CONFIG_SMALL("BRSTM (Binary Revolution Stream)"),
.priv_data_size = sizeof(BRSTMDemuxContext),
.read_probe = probe,
.read_header = read_header,
.read_packet = read_packet,
.read_close = read_close,
.extensions = "brstm",
};
/contrib/sdk/sources/ffmpeg/libavformat/c93.c
0,0 → 1,202
/*
* Interplay C93 demuxer
* Copyright (c) 2007 Anssi Hannula <anssi.hannula@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "internal.h"
#include "voc.h"
#include "libavutil/intreadwrite.h"
 
typedef struct {
uint16_t index;
uint8_t length;
uint8_t frames;
} C93BlockRecord;
 
typedef struct {
VocDecContext voc;
 
C93BlockRecord block_records[512];
int current_block;
 
uint32_t frame_offsets[32];
int current_frame;
int next_pkt_is_audio;
 
AVStream *audio;
} C93DemuxContext;
 
static int probe(AVProbeData *p)
{
int i;
int index = 1;
if (p->buf_size < 16)
return 0;
for (i = 0; i < 16; i += 4) {
if (AV_RL16(p->buf + i) != index || !p->buf[i + 2] || !p->buf[i + 3])
return 0;
index += p->buf[i + 2];
}
return AVPROBE_SCORE_MAX;
}
 
static int read_header(AVFormatContext *s)
{
AVStream *video;
AVIOContext *pb = s->pb;
C93DemuxContext *c93 = s->priv_data;
int i;
int framecount = 0;
 
for (i = 0; i < 512; i++) {
c93->block_records[i].index = avio_rl16(pb);
c93->block_records[i].length = avio_r8(pb);
c93->block_records[i].frames = avio_r8(pb);
if (c93->block_records[i].frames > 32) {
av_log(s, AV_LOG_ERROR, "too many frames in block\n");
return AVERROR_INVALIDDATA;
}
framecount += c93->block_records[i].frames;
}
 
/* Audio streams are added if audio packets are found */
s->ctx_flags |= AVFMTCTX_NOHEADER;
 
video = avformat_new_stream(s, NULL);
if (!video)
return AVERROR(ENOMEM);
 
video->codec->codec_type = AVMEDIA_TYPE_VIDEO;
video->codec->codec_id = AV_CODEC_ID_C93;
video->codec->width = 320;
video->codec->height = 192;
/* 4:3 320x200 with 8 empty lines */
video->sample_aspect_ratio = (AVRational) { 5, 6 };
avpriv_set_pts_info(video, 64, 2, 25);
video->nb_frames = framecount;
video->duration = framecount;
video->start_time = 0;
 
c93->current_block = 0;
c93->current_frame = 0;
c93->next_pkt_is_audio = 0;
return 0;
}
 
#define C93_HAS_PALETTE 0x01
#define C93_FIRST_FRAME 0x02
 
static int read_packet(AVFormatContext *s, AVPacket *pkt)
{
AVIOContext *pb = s->pb;
C93DemuxContext *c93 = s->priv_data;
C93BlockRecord *br = &c93->block_records[c93->current_block];
int datasize;
int ret, i;
 
if (c93->next_pkt_is_audio) {
c93->current_frame++;
c93->next_pkt_is_audio = 0;
datasize = avio_rl16(pb);
if (datasize > 42) {
if (!c93->audio) {
c93->audio = avformat_new_stream(s, NULL);
if (!c93->audio)
return AVERROR(ENOMEM);
c93->audio->codec->codec_type = AVMEDIA_TYPE_AUDIO;
}
avio_skip(pb, 26); /* VOC header */
ret = ff_voc_get_packet(s, pkt, c93->audio, datasize - 26);
if (ret > 0) {
pkt->stream_index = 1;
pkt->flags |= AV_PKT_FLAG_KEY;
return ret;
}
}
}
if (c93->current_frame >= br->frames) {
if (c93->current_block >= 511 || !br[1].length)
return AVERROR_EOF;
br++;
c93->current_block++;
c93->current_frame = 0;
}
 
if (c93->current_frame == 0) {
avio_seek(pb, br->index * 2048, SEEK_SET);
for (i = 0; i < 32; i++) {
c93->frame_offsets[i] = avio_rl32(pb);
}
}
 
avio_seek(pb,br->index * 2048 +
c93->frame_offsets[c93->current_frame], SEEK_SET);
datasize = avio_rl16(pb); /* video frame size */
 
ret = av_new_packet(pkt, datasize + 768 + 1);
if (ret < 0)
return ret;
pkt->data[0] = 0;
pkt->size = datasize + 1;
 
ret = avio_read(pb, pkt->data + 1, datasize);
if (ret < datasize) {
ret = AVERROR(EIO);
goto fail;
}
 
datasize = avio_rl16(pb); /* palette size */
if (datasize) {
if (datasize != 768) {
av_log(s, AV_LOG_ERROR, "invalid palette size %u\n", datasize);
ret = AVERROR_INVALIDDATA;
goto fail;
}
pkt->data[0] |= C93_HAS_PALETTE;
ret = avio_read(pb, pkt->data + pkt->size, datasize);
if (ret < datasize) {
ret = AVERROR(EIO);
goto fail;
}
pkt->size += 768;
}
pkt->stream_index = 0;
c93->next_pkt_is_audio = 1;
 
/* only the first frame is guaranteed to not reference previous frames */
if (c93->current_block == 0 && c93->current_frame == 0) {
pkt->flags |= AV_PKT_FLAG_KEY;
pkt->data[0] |= C93_FIRST_FRAME;
}
return 0;
 
fail:
av_free_packet(pkt);
return ret;
}
 
AVInputFormat ff_c93_demuxer = {
.name = "c93",
.long_name = NULL_IF_CONFIG_SMALL("Interplay C93"),
.priv_data_size = sizeof(C93DemuxContext),
.read_probe = probe,
.read_header = read_header,
.read_packet = read_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/cache.c
0,0 → 1,140
/*
* Input cache protocol.
* Copyright (c) 2011 Michael Niedermayer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* Based on file.c by Fabrice Bellard
*/
 
/**
* @TODO
* support non continuous caching
* support keeping files
* support filling with a background thread
*/
 
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/file.h"
#include "avformat.h"
#include <fcntl.h>
#if HAVE_IO_H
#include <io.h>
#endif
#if HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <sys/stat.h>
#include <stdlib.h>
#include "os_support.h"
#include "url.h"
 
typedef struct Context {
int fd;
int64_t end;
int64_t pos;
URLContext *inner;
} Context;
 
static int cache_open(URLContext *h, const char *arg, int flags)
{
char *buffername;
Context *c= h->priv_data;
 
av_strstart(arg, "cache:", &arg);
 
c->fd = av_tempfile("ffcache", &buffername, 0, h);
if (c->fd < 0){
av_log(h, AV_LOG_ERROR, "Failed to create tempfile\n");
return c->fd;
}
 
unlink(buffername);
av_freep(&buffername);
 
return ffurl_open(&c->inner, arg, flags, &h->interrupt_callback, NULL);
}
 
static int cache_read(URLContext *h, unsigned char *buf, int size)
{
Context *c= h->priv_data;
int r;
 
if(c->pos<c->end){
r = read(c->fd, buf, FFMIN(size, c->end - c->pos));
if(r>0)
c->pos += r;
return (-1 == r)?AVERROR(errno):r;
}else{
r = ffurl_read(c->inner, buf, size);
if(r > 0){
int r2= write(c->fd, buf, r);
av_assert0(r2==r); // FIXME handle cache failure
c->pos += r;
c->end += r;
}
return r;
}
}
 
static int64_t cache_seek(URLContext *h, int64_t pos, int whence)
{
Context *c= h->priv_data;
 
if (whence == AVSEEK_SIZE) {
pos= ffurl_seek(c->inner, pos, whence);
if(pos <= 0){
pos= ffurl_seek(c->inner, -1, SEEK_END);
ffurl_seek(c->inner, c->end, SEEK_SET);
if(pos <= 0)
return c->end;
}
return pos;
}
 
pos= lseek(c->fd, pos, whence);
if(pos<0){
return pos;
}else if(pos <= c->end){
c->pos= pos;
return pos;
}else{
if(lseek(c->fd, c->pos, SEEK_SET) < 0) {
av_log(h, AV_LOG_ERROR, "Failure to seek in cache\n");
}
return AVERROR(EPIPE);
}
}
 
static int cache_close(URLContext *h)
{
Context *c= h->priv_data;
close(c->fd);
ffurl_close(c->inner);
 
return 0;
}
 
URLProtocol ff_cache_protocol = {
.name = "cache",
.url_open = cache_open,
.url_read = cache_read,
.url_seek = cache_seek,
.url_close = cache_close,
.priv_data_size = sizeof(Context),
};
/contrib/sdk/sources/ffmpeg/libavformat/caf.c
0,0 → 1,66
/*
* CAF common code
* Copyright (c) 2007 Justin Ruggles
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* CAF common code
*/
 
#include "avformat.h"
#include "internal.h"
#include "caf.h"
 
/**
* Known codec tags for CAF
*/
const AVCodecTag ff_codec_caf_tags[] = {
{ AV_CODEC_ID_AAC, MKTAG('a','a','c',' ') },
{ AV_CODEC_ID_AC3, MKTAG('a','c','-','3') },
{ AV_CODEC_ID_ADPCM_IMA_QT, MKTAG('i','m','a','4') },
{ AV_CODEC_ID_ADPCM_IMA_WAV, MKTAG('m','s', 0, 17 ) },
{ AV_CODEC_ID_ADPCM_MS, MKTAG('m','s', 0, 2 ) },
{ AV_CODEC_ID_ALAC, MKTAG('a','l','a','c') },
{ AV_CODEC_ID_AMR_NB, MKTAG('s','a','m','r') },
/* FIXME: use DV demuxer, as done in MOV */
/*{ AV_CODEC_ID_DVAUDIO, MKTAG('v','d','v','a') },*/
/*{ AV_CODEC_ID_DVAUDIO, MKTAG('d','v','c','a') },*/
{ AV_CODEC_ID_GSM, MKTAG('a','g','s','m') },
{ AV_CODEC_ID_GSM_MS, MKTAG('m','s', 0, '1') },
{ AV_CODEC_ID_ILBC, MKTAG('i','l','b','c') },
{ AV_CODEC_ID_MACE3, MKTAG('M','A','C','3') },
{ AV_CODEC_ID_MACE6, MKTAG('M','A','C','6') },
{ AV_CODEC_ID_MP1, MKTAG('.','m','p','1') },
{ AV_CODEC_ID_MP2, MKTAG('.','m','p','2') },
{ AV_CODEC_ID_MP3, MKTAG('.','m','p','3') },
{ AV_CODEC_ID_MP3, MKTAG('m','s', 0 ,'U') },
{ AV_CODEC_ID_PCM_ALAW, MKTAG('a','l','a','w') },
{ AV_CODEC_ID_PCM_MULAW, MKTAG('u','l','a','w') },
{ AV_CODEC_ID_QCELP, MKTAG('Q','c','l','p') },
{ AV_CODEC_ID_QDM2, MKTAG('Q','D','M','2') },
{ AV_CODEC_ID_QDM2, MKTAG('Q','D','M','C') },
/* currently unsupported codecs */
/*{ AC-3 over S/PDIF MKTAG('c','a','c','3') },*/
/*{ MPEG4CELP MKTAG('c','e','l','p') },*/
/*{ MPEG4HVXC MKTAG('h','v','x','c') },*/
/*{ MPEG4TwinVQ MKTAG('t','w','v','q') },*/
{ AV_CODEC_ID_NONE, 0 },
};
 
/contrib/sdk/sources/ffmpeg/libavformat/caf.h
0,0 → 1,34
/*
* CAF common code
* Copyright (c) 2007 Justin Ruggles
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* CAF common code
*/
 
#ifndef AVFORMAT_CAF_H
#define AVFORMAT_CAF_H
 
#include "internal.h"
 
extern const AVCodecTag ff_codec_caf_tags[];
 
#endif /* AVFORMAT_CAF_H */
/contrib/sdk/sources/ffmpeg/libavformat/cafdec.c
0,0 → 1,430
/*
* Core Audio Format demuxer
* Copyright (c) 2007 Justin Ruggles
* Copyright (c) 2009 Peter Ross
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Core Audio Format demuxer
*/
 
#include "avformat.h"
#include "internal.h"
#include "isom.h"
#include "mov_chan.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/intfloat.h"
#include "libavutil/dict.h"
#include "caf.h"
 
typedef struct {
int bytes_per_packet; ///< bytes in a packet, or 0 if variable
int frames_per_packet; ///< frames in a packet, or 0 if variable
int64_t num_bytes; ///< total number of bytes in stream
 
int64_t packet_cnt; ///< packet counter
int64_t frame_cnt; ///< frame counter
 
int64_t data_start; ///< data start position, in bytes
int64_t data_size; ///< raw data size, in bytes
} CaffContext;
 
static int probe(AVProbeData *p)
{
if (AV_RB32(p->buf) == MKBETAG('c','a','f','f') && AV_RB16(&p->buf[4]) == 1)
return AVPROBE_SCORE_MAX;
return 0;
}
 
/** Read audio description chunk */
static int read_desc_chunk(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
CaffContext *caf = s->priv_data;
AVStream *st;
int flags;
 
/* new audio stream */
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
/* parse format description */
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->sample_rate = av_int2double(avio_rb64(pb));
st->codec->codec_tag = avio_rl32(pb);
flags = avio_rb32(pb);
caf->bytes_per_packet = avio_rb32(pb);
st->codec->block_align = caf->bytes_per_packet;
caf->frames_per_packet = avio_rb32(pb);
st->codec->channels = avio_rb32(pb);
st->codec->bits_per_coded_sample = avio_rb32(pb);
 
/* calculate bit rate for constant size packets */
if (caf->frames_per_packet > 0 && caf->bytes_per_packet > 0) {
st->codec->bit_rate = (uint64_t)st->codec->sample_rate * (uint64_t)caf->bytes_per_packet * 8
/ (uint64_t)caf->frames_per_packet;
} else {
st->codec->bit_rate = 0;
}
 
/* determine codec */
if (st->codec->codec_tag == MKTAG('l','p','c','m'))
st->codec->codec_id = ff_mov_get_lpcm_codec_id(st->codec->bits_per_coded_sample, (flags ^ 0x2) | 0x4);
else
st->codec->codec_id = ff_codec_get_id(ff_codec_caf_tags, st->codec->codec_tag);
return 0;
}
 
/** Read magic cookie chunk */
static int read_kuki_chunk(AVFormatContext *s, int64_t size)
{
AVIOContext *pb = s->pb;
AVStream *st = s->streams[0];
 
if (size < 0 || size > INT_MAX - FF_INPUT_BUFFER_PADDING_SIZE)
return -1;
 
if (st->codec->codec_id == AV_CODEC_ID_AAC) {
/* The magic cookie format for AAC is an mp4 esds atom.
The lavc AAC decoder requires the data from the codec specific
description as extradata input. */
int strt, skip;
MOVAtom atom;
 
strt = avio_tell(pb);
ff_mov_read_esds(s, pb, atom);
skip = size - (avio_tell(pb) - strt);
if (skip < 0 || !st->codec->extradata ||
st->codec->codec_id != AV_CODEC_ID_AAC) {
av_log(s, AV_LOG_ERROR, "invalid AAC magic cookie\n");
return AVERROR_INVALIDDATA;
}
avio_skip(pb, skip);
} else if (st->codec->codec_id == AV_CODEC_ID_ALAC) {
#define ALAC_PREAMBLE 12
#define ALAC_HEADER 36
#define ALAC_NEW_KUKI 24
uint8_t preamble[12];
if (size < ALAC_NEW_KUKI) {
av_log(s, AV_LOG_ERROR, "invalid ALAC magic cookie\n");
avio_skip(pb, size);
return AVERROR_INVALIDDATA;
}
avio_read(pb, preamble, ALAC_PREAMBLE);
 
if (ff_alloc_extradata(st->codec, ALAC_HEADER))
return AVERROR(ENOMEM);
 
/* For the old style cookie, we skip 12 bytes, then read 36 bytes.
* The new style cookie only contains the last 24 bytes of what was
* 36 bytes in the old style cookie, so we fabricate the first 12 bytes
* in that case to maintain compatibility. */
if (!memcmp(&preamble[4], "frmaalac", 8)) {
if (size < ALAC_PREAMBLE + ALAC_HEADER) {
av_log(s, AV_LOG_ERROR, "invalid ALAC magic cookie\n");
av_freep(&st->codec->extradata);
return AVERROR_INVALIDDATA;
}
avio_read(pb, st->codec->extradata, ALAC_HEADER);
avio_skip(pb, size - ALAC_PREAMBLE - ALAC_HEADER);
} else {
AV_WB32(st->codec->extradata, 36);
memcpy(&st->codec->extradata[4], "alac", 4);
AV_WB32(&st->codec->extradata[8], 0);
memcpy(&st->codec->extradata[12], preamble, 12);
avio_read(pb, &st->codec->extradata[24], ALAC_NEW_KUKI - 12);
avio_skip(pb, size - ALAC_NEW_KUKI);
}
} else {
if (ff_alloc_extradata(st->codec, size))
return AVERROR(ENOMEM);
avio_read(pb, st->codec->extradata, size);
}
 
return 0;
}
 
/** Read packet table chunk */
static int read_pakt_chunk(AVFormatContext *s, int64_t size)
{
AVIOContext *pb = s->pb;
AVStream *st = s->streams[0];
CaffContext *caf = s->priv_data;
int64_t pos = 0, ccount, num_packets;
int i;
 
ccount = avio_tell(pb);
 
num_packets = avio_rb64(pb);
if (num_packets < 0 || INT32_MAX / sizeof(AVIndexEntry) < num_packets)
return AVERROR_INVALIDDATA;
 
st->nb_frames = avio_rb64(pb); /* valid frames */
st->nb_frames += avio_rb32(pb); /* priming frames */
st->nb_frames += avio_rb32(pb); /* remainder frames */
 
st->duration = 0;
for (i = 0; i < num_packets; i++) {
av_add_index_entry(s->streams[0], pos, st->duration, 0, 0, AVINDEX_KEYFRAME);
pos += caf->bytes_per_packet ? caf->bytes_per_packet : ff_mp4_read_descr_len(pb);
st->duration += caf->frames_per_packet ? caf->frames_per_packet : ff_mp4_read_descr_len(pb);
}
 
if (avio_tell(pb) - ccount > size) {
av_log(s, AV_LOG_ERROR, "error reading packet table\n");
return AVERROR_INVALIDDATA;
}
avio_skip(pb, ccount + size - avio_tell(pb));
 
caf->num_bytes = pos;
return 0;
}
 
/** Read information chunk */
static void read_info_chunk(AVFormatContext *s, int64_t size)
{
AVIOContext *pb = s->pb;
unsigned int i;
unsigned int nb_entries = avio_rb32(pb);
for (i = 0; i < nb_entries; i++) {
char key[32];
char value[1024];
avio_get_str(pb, INT_MAX, key, sizeof(key));
avio_get_str(pb, INT_MAX, value, sizeof(value));
av_dict_set(&s->metadata, key, value, 0);
}
}
 
static int read_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
CaffContext *caf = s->priv_data;
AVStream *st;
uint32_t tag = 0;
int found_data, ret;
int64_t size, pos;
 
avio_skip(pb, 8); /* magic, version, file flags */
 
/* audio description chunk */
if (avio_rb32(pb) != MKBETAG('d','e','s','c')) {
av_log(s, AV_LOG_ERROR, "desc chunk not present\n");
return AVERROR_INVALIDDATA;
}
size = avio_rb64(pb);
if (size != 32)
return AVERROR_INVALIDDATA;
 
ret = read_desc_chunk(s);
if (ret)
return ret;
st = s->streams[0];
 
/* parse each chunk */
found_data = 0;
while (!url_feof(pb)) {
 
/* stop at data chunk if seeking is not supported or
data chunk size is unknown */
if (found_data && (caf->data_size < 0 || !pb->seekable))
break;
 
tag = avio_rb32(pb);
size = avio_rb64(pb);
pos = avio_tell(pb);
if (url_feof(pb))
break;
 
switch (tag) {
case MKBETAG('d','a','t','a'):
avio_skip(pb, 4); /* edit count */
caf->data_start = avio_tell(pb);
caf->data_size = size < 0 ? -1 : size - 4;
if (caf->data_size > 0 && pb->seekable)
avio_skip(pb, caf->data_size);
found_data = 1;
break;
 
case MKBETAG('c','h','a','n'):
if ((ret = ff_mov_read_chan(s, s->pb, st, size)) < 0)
return ret;
break;
 
/* magic cookie chunk */
case MKBETAG('k','u','k','i'):
if (read_kuki_chunk(s, size))
return AVERROR_INVALIDDATA;
break;
 
/* packet table chunk */
case MKBETAG('p','a','k','t'):
if (read_pakt_chunk(s, size))
return AVERROR_INVALIDDATA;
break;
 
case MKBETAG('i','n','f','o'):
read_info_chunk(s, size);
break;
 
default:
#define _(x) ((x) >= ' ' ? (x) : ' ')
av_log(s, AV_LOG_WARNING, "skipping CAF chunk: %08X (%c%c%c%c), size %"PRId64"\n",
tag, _(tag>>24), _((tag>>16)&0xFF), _((tag>>8)&0xFF), _(tag&0xFF), size);
#undef _
case MKBETAG('f','r','e','e'):
if (size < 0)
return AVERROR_INVALIDDATA;
break;
}
 
if (size > 0) {
if (pos > INT64_MAX - size)
return AVERROR_INVALIDDATA;
avio_skip(pb, FFMAX(0, pos + size - avio_tell(pb)));
}
}
 
if (!found_data)
return AVERROR_INVALIDDATA;
 
if (caf->bytes_per_packet > 0 && caf->frames_per_packet > 0) {
if (caf->data_size > 0)
st->nb_frames = (caf->data_size / caf->bytes_per_packet) * caf->frames_per_packet;
} else if (st->nb_index_entries && st->duration > 0) {
st->codec->bit_rate = st->codec->sample_rate * caf->data_size * 8 /
st->duration;
} else {
av_log(s, AV_LOG_ERROR, "Missing packet table. It is required when "
"block size or frame size are variable.\n");
return AVERROR_INVALIDDATA;
}
 
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
st->start_time = 0;
 
/* position the stream at the start of data */
if (caf->data_size >= 0)
avio_seek(pb, caf->data_start, SEEK_SET);
 
return 0;
}
 
#define CAF_MAX_PKT_SIZE 4096
 
static int read_packet(AVFormatContext *s, AVPacket *pkt)
{
AVIOContext *pb = s->pb;
AVStream *st = s->streams[0];
CaffContext *caf = s->priv_data;
int res, pkt_size = 0, pkt_frames = 0;
int64_t left = CAF_MAX_PKT_SIZE;
 
if (url_feof(pb))
return AVERROR_EOF;
 
/* don't read past end of data chunk */
if (caf->data_size > 0) {
left = (caf->data_start + caf->data_size) - avio_tell(pb);
if (!left)
return AVERROR_EOF;
if (left < 0)
return AVERROR(EIO);
}
 
pkt_frames = caf->frames_per_packet;
pkt_size = caf->bytes_per_packet;
 
if (pkt_size > 0 && pkt_frames == 1) {
pkt_size = (CAF_MAX_PKT_SIZE / pkt_size) * pkt_size;
pkt_size = FFMIN(pkt_size, left);
pkt_frames = pkt_size / caf->bytes_per_packet;
} else if (st->nb_index_entries) {
if (caf->packet_cnt < st->nb_index_entries - 1) {
pkt_size = st->index_entries[caf->packet_cnt + 1].pos - st->index_entries[caf->packet_cnt].pos;
pkt_frames = st->index_entries[caf->packet_cnt + 1].timestamp - st->index_entries[caf->packet_cnt].timestamp;
} else if (caf->packet_cnt == st->nb_index_entries - 1) {
pkt_size = caf->num_bytes - st->index_entries[caf->packet_cnt].pos;
pkt_frames = st->duration - st->index_entries[caf->packet_cnt].timestamp;
} else {
return AVERROR(EIO);
}
}
 
if (pkt_size == 0 || pkt_frames == 0 || pkt_size > left)
return AVERROR(EIO);
 
res = av_get_packet(pb, pkt, pkt_size);
if (res < 0)
return res;
 
pkt->size = res;
pkt->stream_index = 0;
pkt->dts = pkt->pts = caf->frame_cnt;
 
caf->packet_cnt++;
caf->frame_cnt += pkt_frames;
 
return 0;
}
 
static int read_seek(AVFormatContext *s, int stream_index,
int64_t timestamp, int flags)
{
AVStream *st = s->streams[0];
CaffContext *caf = s->priv_data;
int64_t pos, packet_cnt, frame_cnt;
 
timestamp = FFMAX(timestamp, 0);
 
if (caf->frames_per_packet > 0 && caf->bytes_per_packet > 0) {
/* calculate new byte position based on target frame position */
pos = caf->bytes_per_packet * (timestamp / caf->frames_per_packet);
if (caf->data_size > 0)
pos = FFMIN(pos, caf->data_size);
packet_cnt = pos / caf->bytes_per_packet;
frame_cnt = caf->frames_per_packet * packet_cnt;
} else if (st->nb_index_entries) {
packet_cnt = av_index_search_timestamp(st, timestamp, flags);
frame_cnt = st->index_entries[packet_cnt].timestamp;
pos = st->index_entries[packet_cnt].pos;
} else {
return -1;
}
 
if (avio_seek(s->pb, pos + caf->data_start, SEEK_SET) < 0)
return -1;
 
caf->packet_cnt = packet_cnt;
caf->frame_cnt = frame_cnt;
 
return 0;
}
 
AVInputFormat ff_caf_demuxer = {
.name = "caf",
.long_name = NULL_IF_CONFIG_SMALL("Apple CAF (Core Audio Format)"),
.priv_data_size = sizeof(CaffContext),
.read_probe = probe,
.read_header = read_header,
.read_packet = read_packet,
.read_seek = read_seek,
.codec_tag = (const AVCodecTag* const []){ ff_codec_caf_tags, 0 },
};
/contrib/sdk/sources/ffmpeg/libavformat/cafenc.c
0,0 → 1,280
/*
* Core Audio Format muxer
* Copyright (c) 2011 Carl Eugen Hoyos
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "caf.h"
#include "isom.h"
#include "avio_internal.h"
#include "libavutil/intfloat.h"
#include "libavutil/dict.h"
 
typedef struct {
int64_t data;
uint8_t *pkt_sizes;
int size_buffer_size;
int size_entries_used;
int packets;
} CAFContext;
 
static uint32_t codec_flags(enum AVCodecID codec_id) {
switch (codec_id) {
case AV_CODEC_ID_PCM_F32BE:
case AV_CODEC_ID_PCM_F64BE:
return 1; //< kCAFLinearPCMFormatFlagIsFloat
case AV_CODEC_ID_PCM_S16LE:
case AV_CODEC_ID_PCM_S24LE:
case AV_CODEC_ID_PCM_S32LE:
return 2; //< kCAFLinearPCMFormatFlagIsLittleEndian
case AV_CODEC_ID_PCM_F32LE:
case AV_CODEC_ID_PCM_F64LE:
return 3; //< kCAFLinearPCMFormatFlagIsFloat | kCAFLinearPCMFormatFlagIsLittleEndian
default:
return 0;
}
}
 
static uint32_t samples_per_packet(enum AVCodecID codec_id, int channels) {
switch (codec_id) {
case AV_CODEC_ID_PCM_S8:
case AV_CODEC_ID_PCM_S16LE:
case AV_CODEC_ID_PCM_S16BE:
case AV_CODEC_ID_PCM_S24LE:
case AV_CODEC_ID_PCM_S24BE:
case AV_CODEC_ID_PCM_S32LE:
case AV_CODEC_ID_PCM_S32BE:
case AV_CODEC_ID_PCM_F32LE:
case AV_CODEC_ID_PCM_F32BE:
case AV_CODEC_ID_PCM_F64LE:
case AV_CODEC_ID_PCM_F64BE:
case AV_CODEC_ID_PCM_ALAW:
case AV_CODEC_ID_PCM_MULAW:
return 1;
case AV_CODEC_ID_MACE3:
case AV_CODEC_ID_MACE6:
return 6;
case AV_CODEC_ID_ADPCM_IMA_QT:
return 64;
case AV_CODEC_ID_AMR_NB:
case AV_CODEC_ID_GSM:
case AV_CODEC_ID_ILBC:
case AV_CODEC_ID_QCELP:
return 160;
case AV_CODEC_ID_GSM_MS:
return 320;
case AV_CODEC_ID_MP1:
return 384;
case AV_CODEC_ID_MP2:
case AV_CODEC_ID_MP3:
return 1152;
case AV_CODEC_ID_AC3:
return 1536;
case AV_CODEC_ID_QDM2:
return 2048 * channels;
case AV_CODEC_ID_ALAC:
return 4096;
case AV_CODEC_ID_ADPCM_IMA_WAV:
return (1024 - 4 * channels) * 8 / (4 * channels) + 1;
case AV_CODEC_ID_ADPCM_MS:
return (1024 - 7 * channels) * 2 / channels + 2;
default:
return 0;
}
}
 
static int caf_write_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
AVCodecContext *enc = s->streams[0]->codec;
CAFContext *caf = s->priv_data;
AVDictionaryEntry *t = NULL;
unsigned int codec_tag = ff_codec_get_tag(ff_codec_caf_tags, enc->codec_id);
int64_t chunk_size = 0;
 
switch (enc->codec_id) {
case AV_CODEC_ID_AAC:
case AV_CODEC_ID_AC3:
av_log(s, AV_LOG_ERROR, "muxing codec currently unsupported\n");
return AVERROR_PATCHWELCOME;
}
 
switch (enc->codec_id) {
case AV_CODEC_ID_PCM_S8:
case AV_CODEC_ID_PCM_S16LE:
case AV_CODEC_ID_PCM_S16BE:
case AV_CODEC_ID_PCM_S24LE:
case AV_CODEC_ID_PCM_S24BE:
case AV_CODEC_ID_PCM_S32LE:
case AV_CODEC_ID_PCM_S32BE:
case AV_CODEC_ID_PCM_F32LE:
case AV_CODEC_ID_PCM_F32BE:
case AV_CODEC_ID_PCM_F64LE:
case AV_CODEC_ID_PCM_F64BE:
case AV_CODEC_ID_PCM_ALAW:
case AV_CODEC_ID_PCM_MULAW:
codec_tag = MKTAG('l','p','c','m');
}
 
if (!codec_tag) {
av_log(s, AV_LOG_ERROR, "unsupported codec\n");
return AVERROR_INVALIDDATA;
}
 
if (!enc->block_align && !pb->seekable) {
av_log(s, AV_LOG_ERROR, "Muxing variable packet size not supported on non seekable output\n");
return AVERROR_INVALIDDATA;
}
 
ffio_wfourcc(pb, "caff"); //< mFileType
avio_wb16(pb, 1); //< mFileVersion
avio_wb16(pb, 0); //< mFileFlags
 
ffio_wfourcc(pb, "desc"); //< Audio Description chunk
avio_wb64(pb, 32); //< mChunkSize
avio_wb64(pb, av_double2int(enc->sample_rate)); //< mSampleRate
avio_wl32(pb, codec_tag); //< mFormatID
avio_wb32(pb, codec_flags(enc->codec_id)); //< mFormatFlags
avio_wb32(pb, enc->block_align); //< mBytesPerPacket
avio_wb32(pb, samples_per_packet(enc->codec_id, enc->channels)); //< mFramesPerPacket
avio_wb32(pb, enc->channels); //< mChannelsPerFrame
avio_wb32(pb, av_get_bits_per_sample(enc->codec_id)); //< mBitsPerChannel
 
if (enc->channel_layout) {
ffio_wfourcc(pb, "chan");
avio_wb64(pb, 12);
ff_mov_write_chan(pb, enc->channel_layout);
}
 
if (enc->codec_id == AV_CODEC_ID_ALAC) {
ffio_wfourcc(pb, "kuki");
avio_wb64(pb, 12 + enc->extradata_size);
avio_write(pb, "\0\0\0\14frmaalac", 12);
avio_write(pb, enc->extradata, enc->extradata_size);
} else if (enc->codec_id == AV_CODEC_ID_AMR_NB) {
ffio_wfourcc(pb, "kuki");
avio_wb64(pb, 29);
avio_write(pb, "\0\0\0\14frmasamr", 12);
avio_wb32(pb, 0x11); /* size */
avio_write(pb, "samrFFMP", 8);
avio_w8(pb, 0); /* decoder version */
 
avio_wb16(pb, 0x81FF); /* Mode set (all modes for AMR_NB) */
avio_w8(pb, 0x00); /* Mode change period (no restriction) */
avio_w8(pb, 0x01); /* Frames per sample */
} else if (enc->codec_id == AV_CODEC_ID_QDM2) {
ffio_wfourcc(pb, "kuki");
avio_wb64(pb, enc->extradata_size);
avio_write(pb, enc->extradata, enc->extradata_size);
}
 
if (av_dict_count(s->metadata)) {
ffio_wfourcc(pb, "info"); //< Information chunk
while ((t = av_dict_get(s->metadata, "", t, AV_DICT_IGNORE_SUFFIX))) {
chunk_size += strlen(t->key) + strlen(t->value) + 2;
}
avio_wb64(pb, chunk_size + 4);
avio_wb32(pb, av_dict_count(s->metadata));
t = NULL;
while ((t = av_dict_get(s->metadata, "", t, AV_DICT_IGNORE_SUFFIX))) {
avio_put_str(pb, t->key);
avio_put_str(pb, t->value);
}
}
 
ffio_wfourcc(pb, "data"); //< Audio Data chunk
caf->data = avio_tell(pb);
avio_wb64(pb, -1); //< mChunkSize
avio_wb32(pb, 0); //< mEditCount
 
avio_flush(pb);
return 0;
}
 
static int caf_write_packet(AVFormatContext *s, AVPacket *pkt)
{
CAFContext *caf = s->priv_data;
 
avio_write(s->pb, pkt->data, pkt->size);
if (!s->streams[0]->codec->block_align) {
void *pkt_sizes = caf->pkt_sizes;
int i, alloc_size = caf->size_entries_used + 5;
if (alloc_size < 0) {
caf->pkt_sizes = NULL;
} else {
caf->pkt_sizes = av_fast_realloc(caf->pkt_sizes,
&caf->size_buffer_size,
alloc_size);
}
if (!caf->pkt_sizes) {
av_free(pkt_sizes);
return AVERROR(ENOMEM);
}
for (i = 4; i > 0; i--) {
unsigned top = pkt->size >> i * 7;
if (top)
caf->pkt_sizes[caf->size_entries_used++] = 128 | top;
}
caf->pkt_sizes[caf->size_entries_used++] = pkt->size & 127;
caf->packets++;
}
return 0;
}
 
static int caf_write_trailer(AVFormatContext *s)
{
CAFContext *caf = s->priv_data;
AVIOContext *pb = s->pb;
AVCodecContext *enc = s->streams[0]->codec;
 
if (pb->seekable) {
int64_t file_size = avio_tell(pb);
 
avio_seek(pb, caf->data, SEEK_SET);
avio_wb64(pb, file_size - caf->data - 8);
avio_seek(pb, file_size, SEEK_SET);
if (!enc->block_align) {
ffio_wfourcc(pb, "pakt");
avio_wb64(pb, caf->size_entries_used + 24);
avio_wb64(pb, caf->packets); ///< mNumberPackets
avio_wb64(pb, caf->packets * samples_per_packet(enc->codec_id, enc->channels)); ///< mNumberValidFrames
avio_wb32(pb, 0); ///< mPrimingFrames
avio_wb32(pb, 0); ///< mRemainderFrames
avio_write(pb, caf->pkt_sizes, caf->size_entries_used);
caf->size_buffer_size = 0;
}
avio_flush(pb);
}
av_freep(&caf->pkt_sizes);
return 0;
}
 
AVOutputFormat ff_caf_muxer = {
.name = "caf",
.long_name = NULL_IF_CONFIG_SMALL("Apple CAF (Core Audio Format)"),
.mime_type = "audio/x-caf",
.extensions = "caf",
.priv_data_size = sizeof(CAFContext),
.audio_codec = AV_CODEC_ID_PCM_S16BE,
.video_codec = AV_CODEC_ID_NONE,
.write_header = caf_write_header,
.write_packet = caf_write_packet,
.write_trailer = caf_write_trailer,
.codec_tag = (const AVCodecTag* const []){ff_codec_caf_tags, 0},
};
/contrib/sdk/sources/ffmpeg/libavformat/cavsvideodec.c
0,0 → 1,68
/*
* RAW Chinese AVS video demuxer
* Copyright (c) 2009 Stefan Gehrer <stefan.gehrer@gmx.de>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "rawdec.h"
 
#define CAVS_SEQ_START_CODE 0x000001b0
#define CAVS_PIC_I_START_CODE 0x000001b3
#define CAVS_UNDEF_START_CODE 0x000001b4
#define CAVS_PIC_PB_START_CODE 0x000001b6
#define CAVS_VIDEO_EDIT_CODE 0x000001b7
#define CAVS_PROFILE_JIZHUN 0x20
 
static int cavsvideo_probe(AVProbeData *p)
{
uint32_t code= -1;
int pic=0, seq=0, slice_pos = 0;
int i;
 
for(i=0; i<p->buf_size; i++){
code = (code<<8) + p->buf[i];
if ((code & 0xffffff00) == 0x100) {
if(code < CAVS_SEQ_START_CODE) {
/* slices have to be consecutive */
if(code < slice_pos)
return 0;
slice_pos = code;
} else {
slice_pos = 0;
}
if (code == CAVS_SEQ_START_CODE) {
seq++;
/* check for the only currently supported profile */
if(p->buf[i+1] != CAVS_PROFILE_JIZHUN)
return 0;
} else if ((code == CAVS_PIC_I_START_CODE) ||
(code == CAVS_PIC_PB_START_CODE)) {
pic++;
} else if ((code == CAVS_UNDEF_START_CODE) ||
(code > CAVS_VIDEO_EDIT_CODE)) {
return 0;
}
}
}
if(seq && seq*9<=pic*10)
return AVPROBE_SCORE_EXTENSION;
return 0;
}
 
FF_DEF_RAWVIDEO_DEMUXER(cavsvideo, "raw Chinese AVS (Audio Video Standard)", cavsvideo_probe, NULL, AV_CODEC_ID_CAVS)
/contrib/sdk/sources/ffmpeg/libavformat/cdg.c
0,0 → 1,79
/*
* CD Graphics Demuxer
* Copyright (c) 2009 Michael Tison
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "internal.h"
 
#define CDG_PACKET_SIZE 24
#define CDG_COMMAND 0x09
#define CDG_MASK 0x3F
 
static int read_header(AVFormatContext *s)
{
AVStream *vst;
int ret;
 
vst = avformat_new_stream(s, NULL);
if (!vst)
return AVERROR(ENOMEM);
 
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codec->codec_id = AV_CODEC_ID_CDGRAPHICS;
 
/// 75 sectors/sec * 4 packets/sector = 300 packets/sec
avpriv_set_pts_info(vst, 32, 1, 300);
 
ret = avio_size(s->pb);
if (ret > 0)
vst->duration = (ret * vst->time_base.den) / (CDG_PACKET_SIZE * 300);
 
return 0;
}
 
static int read_packet(AVFormatContext *s, AVPacket *pkt)
{
int ret;
 
while (1) {
ret = av_get_packet(s->pb, pkt, CDG_PACKET_SIZE);
if (ret < 1 || (pkt->data[0] & CDG_MASK) == CDG_COMMAND)
break;
av_free_packet(pkt);
}
 
pkt->stream_index = 0;
pkt->dts=
pkt->pts= pkt->pos / CDG_PACKET_SIZE;
 
if(ret>5 && (pkt->data[0]&0x3F) == 9 && (pkt->data[1]&0x3F)==1 && !(pkt->data[2+2+1] & 0x0F)){
pkt->flags = AV_PKT_FLAG_KEY;
}
return ret;
}
 
AVInputFormat ff_cdg_demuxer = {
.name = "cdg",
.long_name = NULL_IF_CONFIG_SMALL("CD Graphics"),
.read_header = read_header,
.read_packet = read_packet,
.flags = AVFMT_GENERIC_INDEX,
.extensions = "cdg",
};
/contrib/sdk/sources/ffmpeg/libavformat/cdxl.c
0,0 → 1,231
/*
* CDXL demuxer
* Copyright (c) 2011-2012 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/parseutils.h"
#include "libavutil/opt.h"
#include "avformat.h"
#include "internal.h"
 
#define CDXL_HEADER_SIZE 32
 
typedef struct CDXLDemuxContext {
AVClass *class;
int sample_rate;
char *framerate;
AVRational fps;
int read_chunk;
uint8_t header[CDXL_HEADER_SIZE];
int video_stream_index;
int audio_stream_index;
} CDXLDemuxContext;
 
static int cdxl_read_probe(AVProbeData *p)
{
int score = AVPROBE_SCORE_EXTENSION + 10;
 
if (p->buf_size < CDXL_HEADER_SIZE)
return 0;
 
/* reserved bytes should always be set to 0 */
if (AV_RN64(&p->buf[24]) || AV_RN16(&p->buf[10]))
return 0;
 
/* check type */
if (p->buf[0] != 1)
return 0;
 
/* check palette size */
if (AV_RB16(&p->buf[20]) > 512)
return 0;
 
/* check number of planes */
if (p->buf[18] || !p->buf[19])
return 0;
 
/* check widh and height */
if (!AV_RN16(&p->buf[14]) || !AV_RN16(&p->buf[16]))
return 0;
 
/* chunk size */
if (AV_RB32(&p->buf[2]) < AV_RB16(&p->buf[22]) + AV_RB16(&p->buf[20]) + CDXL_HEADER_SIZE)
return 0;
 
/* previous chunk size */
if (AV_RN32(&p->buf[6]))
score /= 2;
 
/* current frame number, usually starts from 1 */
if (AV_RB16(&p->buf[12]) != 1)
score /= 2;
 
return score;
}
 
static int cdxl_read_header(AVFormatContext *s)
{
CDXLDemuxContext *cdxl = s->priv_data;
int ret;
 
if (cdxl->framerate && (ret = av_parse_video_rate(&cdxl->fps, cdxl->framerate)) < 0) {
av_log(s, AV_LOG_ERROR,
"Could not parse framerate: %s.\n", cdxl->framerate);
return ret;
}
 
cdxl->read_chunk = 0;
cdxl->video_stream_index = -1;
cdxl->audio_stream_index = -1;
 
s->ctx_flags |= AVFMTCTX_NOHEADER;
 
return 0;
}
 
static int cdxl_read_packet(AVFormatContext *s, AVPacket *pkt)
{
CDXLDemuxContext *cdxl = s->priv_data;
AVIOContext *pb = s->pb;
uint32_t current_size, video_size, image_size;
uint16_t audio_size, palette_size, width, height;
int64_t pos;
int ret;
 
if (url_feof(pb))
return AVERROR_EOF;
 
pos = avio_tell(pb);
if (!cdxl->read_chunk &&
avio_read(pb, cdxl->header, CDXL_HEADER_SIZE) != CDXL_HEADER_SIZE)
return AVERROR_EOF;
if (cdxl->header[0] != 1) {
av_log(s, AV_LOG_ERROR, "non-standard cdxl file\n");
return AVERROR_INVALIDDATA;
}
 
current_size = AV_RB32(&cdxl->header[2]);
width = AV_RB16(&cdxl->header[14]);
height = AV_RB16(&cdxl->header[16]);
palette_size = AV_RB16(&cdxl->header[20]);
audio_size = AV_RB16(&cdxl->header[22]);
image_size = FFALIGN(width, 16) * height * cdxl->header[19] / 8;
video_size = palette_size + image_size;
 
if (palette_size > 512)
return AVERROR_INVALIDDATA;
if (current_size < (uint64_t)audio_size + video_size + CDXL_HEADER_SIZE)
return AVERROR_INVALIDDATA;
 
if (cdxl->read_chunk && audio_size) {
if (cdxl->audio_stream_index == -1) {
AVStream *st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_tag = 0;
st->codec->codec_id = AV_CODEC_ID_PCM_S8;
if (cdxl->header[1] & 0x10) {
st->codec->channels = 2;
st->codec->channel_layout = AV_CH_LAYOUT_STEREO;
} else {
st->codec->channels = 1;
st->codec->channel_layout = AV_CH_LAYOUT_MONO;
}
st->codec->sample_rate = cdxl->sample_rate;
st->start_time = 0;
cdxl->audio_stream_index = st->index;
avpriv_set_pts_info(st, 64, 1, cdxl->sample_rate);
}
 
ret = av_get_packet(pb, pkt, audio_size);
if (ret < 0)
return ret;
pkt->stream_index = cdxl->audio_stream_index;
pkt->pos = pos;
pkt->duration = audio_size;
cdxl->read_chunk = 0;
} else {
if (cdxl->video_stream_index == -1) {
AVStream *st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_tag = 0;
st->codec->codec_id = AV_CODEC_ID_CDXL;
st->codec->width = width;
st->codec->height = height;
st->start_time = 0;
cdxl->video_stream_index = st->index;
if (cdxl->framerate)
avpriv_set_pts_info(st, 64, cdxl->fps.den, cdxl->fps.num);
else
avpriv_set_pts_info(st, 64, 1, cdxl->sample_rate);
}
 
if (av_new_packet(pkt, video_size + CDXL_HEADER_SIZE) < 0)
return AVERROR(ENOMEM);
memcpy(pkt->data, cdxl->header, CDXL_HEADER_SIZE);
ret = avio_read(pb, pkt->data + CDXL_HEADER_SIZE, video_size);
if (ret < 0) {
av_free_packet(pkt);
return ret;
}
av_shrink_packet(pkt, CDXL_HEADER_SIZE + ret);
pkt->stream_index = cdxl->video_stream_index;
pkt->flags |= AV_PKT_FLAG_KEY;
pkt->pos = pos;
pkt->duration = cdxl->framerate ? 1 : audio_size ? audio_size : 220;
cdxl->read_chunk = audio_size;
}
 
if (!cdxl->read_chunk)
avio_skip(pb, current_size - audio_size - video_size - CDXL_HEADER_SIZE);
return ret;
}
 
#define OFFSET(x) offsetof(CDXLDemuxContext, x)
static const AVOption cdxl_options[] = {
{ "sample_rate", "", OFFSET(sample_rate), AV_OPT_TYPE_INT, { .i64 = 11025 }, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ "framerate", "", OFFSET(framerate), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
 
static const AVClass cdxl_demuxer_class = {
.class_name = "CDXL demuxer",
.item_name = av_default_item_name,
.option = cdxl_options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVInputFormat ff_cdxl_demuxer = {
.name = "cdxl",
.long_name = NULL_IF_CONFIG_SMALL("Commodore CDXL video"),
.priv_data_size = sizeof(CDXLDemuxContext),
.read_probe = cdxl_read_probe,
.read_header = cdxl_read_header,
.read_packet = cdxl_read_packet,
.extensions = "cdxl,xl",
.flags = AVFMT_GENERIC_INDEX,
.priv_class = &cdxl_demuxer_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/concat.c
0,0 → 1,190
/*
* Concat URL protocol
* Copyright (c) 2006 Steve Lhomme
* Copyright (c) 2007 Wolfram Gloger
* Copyright (c) 2010 Michele Orrù
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "libavutil/avstring.h"
#include "libavutil/mem.h"
#include "url.h"
 
#define AV_CAT_SEPARATOR "|"
 
struct concat_nodes {
URLContext *uc; ///< node's URLContext
int64_t size; ///< url filesize
};
 
struct concat_data {
struct concat_nodes *nodes; ///< list of nodes to concat
size_t length; ///< number of cat'ed nodes
size_t current; ///< index of currently read node
};
 
static av_cold int concat_close(URLContext *h)
{
int err = 0;
size_t i;
struct concat_data *data = h->priv_data;
struct concat_nodes *nodes = data->nodes;
 
for (i = 0; i != data->length; i++)
err |= ffurl_close(nodes[i].uc);
 
av_freep(&data->nodes);
 
return err < 0 ? -1 : 0;
}
 
static av_cold int concat_open(URLContext *h, const char *uri, int flags)
{
char *node_uri = NULL;
int err = 0;
int64_t size;
size_t len, i;
URLContext *uc;
struct concat_data *data = h->priv_data;
struct concat_nodes *nodes;
 
av_strstart(uri, "concat:", &uri);
 
for (i = 0, len = 1; uri[i]; i++)
if (uri[i] == *AV_CAT_SEPARATOR)
/* integer overflow */
if (++len == UINT_MAX / sizeof(*nodes)) {
av_freep(&h->priv_data);
return AVERROR(ENAMETOOLONG);
}
 
if (!(nodes = av_realloc(NULL, sizeof(*nodes) * len))) {
return AVERROR(ENOMEM);
} else
data->nodes = nodes;
 
/* handle input */
if (!*uri)
err = AVERROR(ENOENT);
for (i = 0; *uri; i++) {
/* parsing uri */
len = strcspn(uri, AV_CAT_SEPARATOR);
if ((err = av_reallocp(&node_uri, len + 1)) < 0)
break;
av_strlcpy(node_uri, uri, len+1);
uri += len + strspn(uri+len, AV_CAT_SEPARATOR);
 
/* creating URLContext */
if ((err = ffurl_open(&uc, node_uri, flags,
&h->interrupt_callback, NULL)) < 0)
break;
 
/* creating size */
if ((size = ffurl_size(uc)) < 0) {
ffurl_close(uc);
err = AVERROR(ENOSYS);
break;
}
 
/* assembling */
nodes[i].uc = uc;
nodes[i].size = size;
}
av_free(node_uri);
data->length = i;
 
if (err < 0)
concat_close(h);
else if (!(nodes = av_realloc(nodes, data->length * sizeof(*nodes)))) {
concat_close(h);
err = AVERROR(ENOMEM);
} else
data->nodes = nodes;
return err;
}
 
static int concat_read(URLContext *h, unsigned char *buf, int size)
{
int result, total = 0;
struct concat_data *data = h->priv_data;
struct concat_nodes *nodes = data->nodes;
size_t i = data->current;
 
while (size > 0) {
result = ffurl_read(nodes[i].uc, buf, size);
if (result < 0)
return total ? total : result;
if (!result)
if (i + 1 == data->length ||
ffurl_seek(nodes[++i].uc, 0, SEEK_SET) < 0)
break;
total += result;
buf += result;
size -= result;
}
data->current = i;
return total;
}
 
static int64_t concat_seek(URLContext *h, int64_t pos, int whence)
{
int64_t result;
struct concat_data *data = h->priv_data;
struct concat_nodes *nodes = data->nodes;
size_t i;
 
switch (whence) {
case SEEK_END:
for (i = data->length - 1;
i && pos < -nodes[i].size;
i--)
pos += nodes[i].size;
break;
case SEEK_CUR:
/* get the absolute position */
for (i = 0; i != data->current; i++)
pos += nodes[i].size;
pos += ffurl_seek(nodes[i].uc, 0, SEEK_CUR);
whence = SEEK_SET;
/* fall through with the absolute position */
case SEEK_SET:
for (i = 0; i != data->length - 1 && pos >= nodes[i].size; i++)
pos -= nodes[i].size;
break;
default:
return AVERROR(EINVAL);
}
 
result = ffurl_seek(nodes[i].uc, pos, whence);
if (result >= 0) {
data->current = i;
while (i)
result += nodes[--i].size;
}
return result;
}
 
URLProtocol ff_concat_protocol = {
.name = "concat",
.url_open = concat_open,
.url_read = concat_read,
.url_seek = concat_seek,
.url_close = concat_close,
.priv_data_size = sizeof(struct concat_data),
};
/contrib/sdk/sources/ffmpeg/libavformat/concatdec.c
0,0 → 1,408
/*
* Copyright (c) 2012 Nicolas George
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avstring.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "avformat.h"
#include "internal.h"
#include "url.h"
 
typedef struct {
char *url;
int64_t start_time;
int64_t duration;
} ConcatFile;
 
typedef struct {
AVClass *class;
ConcatFile *files;
ConcatFile *cur_file;
unsigned nb_files;
AVFormatContext *avf;
int safe;
int seekable;
} ConcatContext;
 
static int concat_probe(AVProbeData *probe)
{
return memcmp(probe->buf, "ffconcat version 1.0", 20) ?
0 : AVPROBE_SCORE_MAX;
}
 
static char *get_keyword(uint8_t **cursor)
{
char *ret = *cursor += strspn(*cursor, SPACE_CHARS);
*cursor += strcspn(*cursor, SPACE_CHARS);
if (**cursor) {
*((*cursor)++) = 0;
*cursor += strspn(*cursor, SPACE_CHARS);
}
return ret;
}
 
static int safe_filename(const char *f)
{
const char *start = f;
 
for (; *f; f++) {
/* A-Za-z0-9_- */
if (!((unsigned)((*f | 32) - 'a') < 26 ||
(unsigned)(*f - '0') < 10 || *f == '_' || *f == '-')) {
if (f == start)
return 0;
else if (*f == '/')
start = f + 1;
else if (*f != '.')
return 0;
}
}
return 1;
}
 
#define FAIL(retcode) do { ret = (retcode); goto fail; } while(0)
 
static int add_file(AVFormatContext *avf, char *filename, ConcatFile **rfile,
unsigned *nb_files_alloc)
{
ConcatContext *cat = avf->priv_data;
ConcatFile *file;
char *url = NULL;
size_t url_len;
int ret;
 
if (cat->safe > 0 && !safe_filename(filename)) {
av_log(avf, AV_LOG_ERROR, "Unsafe file name '%s'\n", filename);
FAIL(AVERROR(EPERM));
}
url_len = strlen(avf->filename) + strlen(filename) + 16;
if (!(url = av_malloc(url_len)))
FAIL(AVERROR(ENOMEM));
ff_make_absolute_url(url, url_len, avf->filename, filename);
av_freep(&filename);
 
if (cat->nb_files >= *nb_files_alloc) {
size_t n = FFMAX(*nb_files_alloc * 2, 16);
ConcatFile *new_files;
if (n <= cat->nb_files || n > SIZE_MAX / sizeof(*cat->files) ||
!(new_files = av_realloc(cat->files, n * sizeof(*cat->files))))
FAIL(AVERROR(ENOMEM));
cat->files = new_files;
*nb_files_alloc = n;
}
 
file = &cat->files[cat->nb_files++];
memset(file, 0, sizeof(*file));
*rfile = file;
 
file->url = url;
file->start_time = AV_NOPTS_VALUE;
file->duration = AV_NOPTS_VALUE;
 
return 0;
 
fail:
av_free(url);
av_free(filename);
return ret;
}
 
static int open_file(AVFormatContext *avf, unsigned fileno)
{
ConcatContext *cat = avf->priv_data;
ConcatFile *file = &cat->files[fileno];
int ret;
 
if (cat->avf)
avformat_close_input(&cat->avf);
if ((ret = avformat_open_input(&cat->avf, file->url, NULL, NULL)) < 0 ||
(ret = avformat_find_stream_info(cat->avf, NULL)) < 0) {
av_log(avf, AV_LOG_ERROR, "Impossible to open '%s'\n", file->url);
return ret;
}
cat->cur_file = file;
if (file->start_time == AV_NOPTS_VALUE)
file->start_time = !fileno ? 0 :
cat->files[fileno - 1].start_time +
cat->files[fileno - 1].duration;
return 0;
}
 
static int concat_read_close(AVFormatContext *avf)
{
ConcatContext *cat = avf->priv_data;
unsigned i;
 
if (cat->avf)
avformat_close_input(&cat->avf);
for (i = 0; i < cat->nb_files; i++)
av_freep(&cat->files[i].url);
av_freep(&cat->files);
return 0;
}
 
static int concat_read_header(AVFormatContext *avf)
{
ConcatContext *cat = avf->priv_data;
uint8_t buf[4096];
uint8_t *cursor, *keyword;
int ret, line = 0, i;
unsigned nb_files_alloc = 0;
ConcatFile *file = NULL;
AVStream *st, *source_st;
int64_t time = 0;
 
while (1) {
if ((ret = ff_get_line(avf->pb, buf, sizeof(buf))) <= 0)
break;
line++;
cursor = buf;
keyword = get_keyword(&cursor);
if (!*keyword || *keyword == '#')
continue;
 
if (!strcmp(keyword, "file")) {
char *filename = av_get_token((const char **)&cursor, SPACE_CHARS);
if (!filename) {
av_log(avf, AV_LOG_ERROR, "Line %d: filename required\n", line);
FAIL(AVERROR_INVALIDDATA);
}
if ((ret = add_file(avf, filename, &file, &nb_files_alloc)) < 0)
FAIL(ret);
} else if (!strcmp(keyword, "duration")) {
char *dur_str = get_keyword(&cursor);
int64_t dur;
if (!file) {
av_log(avf, AV_LOG_ERROR, "Line %d: duration without file\n",
line);
FAIL(AVERROR_INVALIDDATA);
}
if ((ret = av_parse_time(&dur, dur_str, 1)) < 0) {
av_log(avf, AV_LOG_ERROR, "Line %d: invalid duration '%s'\n",
line, dur_str);
FAIL(ret);
}
file->duration = dur;
} else if (!strcmp(keyword, "ffconcat")) {
char *ver_kw = get_keyword(&cursor);
char *ver_val = get_keyword(&cursor);
if (strcmp(ver_kw, "version") || strcmp(ver_val, "1.0")) {
av_log(avf, AV_LOG_ERROR, "Line %d: invalid version\n", line);
FAIL(AVERROR_INVALIDDATA);
}
if (cat->safe < 0)
cat->safe = 1;
} else {
av_log(avf, AV_LOG_ERROR, "Line %d: unknown keyword '%s'\n",
line, keyword);
FAIL(AVERROR_INVALIDDATA);
}
}
if (ret < 0)
FAIL(ret);
if (!cat->nb_files)
FAIL(AVERROR_INVALIDDATA);
 
for (i = 0; i < cat->nb_files; i++) {
if (cat->files[i].start_time == AV_NOPTS_VALUE)
cat->files[i].start_time = time;
else
time = cat->files[i].start_time;
if (cat->files[i].duration == AV_NOPTS_VALUE)
break;
time += cat->files[i].duration;
}
if (i == cat->nb_files) {
avf->duration = time;
cat->seekable = 1;
}
 
if ((ret = open_file(avf, 0)) < 0)
FAIL(ret);
for (i = 0; i < cat->avf->nb_streams; i++) {
if (!(st = avformat_new_stream(avf, NULL)))
FAIL(AVERROR(ENOMEM));
source_st = cat->avf->streams[i];
if ((ret = avcodec_copy_context(st->codec, source_st->codec)) < 0)
FAIL(ret);
st->r_frame_rate = source_st->r_frame_rate;
st->avg_frame_rate = source_st->avg_frame_rate;
st->time_base = source_st->time_base;
st->sample_aspect_ratio = source_st->sample_aspect_ratio;
}
 
return 0;
 
fail:
concat_read_close(avf);
return ret;
}
 
static int open_next_file(AVFormatContext *avf)
{
ConcatContext *cat = avf->priv_data;
unsigned fileno = cat->cur_file - cat->files;
 
if (cat->cur_file->duration == AV_NOPTS_VALUE)
cat->cur_file->duration = cat->avf->duration;
 
if (++fileno >= cat->nb_files)
return AVERROR_EOF;
return open_file(avf, fileno);
}
 
static int concat_read_packet(AVFormatContext *avf, AVPacket *pkt)
{
ConcatContext *cat = avf->priv_data;
int ret;
int64_t delta;
 
while (1) {
if ((ret = av_read_frame(cat->avf, pkt)) != AVERROR_EOF ||
(ret = open_next_file(avf)) < 0)
break;
}
delta = av_rescale_q(cat->cur_file->start_time - cat->avf->start_time,
AV_TIME_BASE_Q,
cat->avf->streams[pkt->stream_index]->time_base);
if (pkt->pts != AV_NOPTS_VALUE)
pkt->pts += delta;
if (pkt->dts != AV_NOPTS_VALUE)
pkt->dts += delta;
return ret;
}
 
static void rescale_interval(AVRational tb_in, AVRational tb_out,
int64_t *min_ts, int64_t *ts, int64_t *max_ts)
{
*ts = av_rescale_q (* ts, tb_in, tb_out);
*min_ts = av_rescale_q_rnd(*min_ts, tb_in, tb_out,
AV_ROUND_UP | AV_ROUND_PASS_MINMAX);
*max_ts = av_rescale_q_rnd(*max_ts, tb_in, tb_out,
AV_ROUND_DOWN | AV_ROUND_PASS_MINMAX);
}
 
static int try_seek(AVFormatContext *avf, int stream,
int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
{
ConcatContext *cat = avf->priv_data;
int64_t t0 = cat->cur_file->start_time - cat->avf->start_time;
 
ts -= t0;
min_ts = min_ts == INT64_MIN ? INT64_MIN : min_ts - t0;
max_ts = max_ts == INT64_MAX ? INT64_MAX : max_ts - t0;
if (stream >= 0) {
if (stream >= cat->avf->nb_streams)
return AVERROR(EIO);
rescale_interval(AV_TIME_BASE_Q, cat->avf->streams[stream]->time_base,
&min_ts, &ts, &max_ts);
}
return avformat_seek_file(cat->avf, stream, min_ts, ts, max_ts, flags);
}
 
static int real_seek(AVFormatContext *avf, int stream,
int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
{
ConcatContext *cat = avf->priv_data;
int ret, left, right;
 
if (stream >= 0) {
if (stream >= avf->nb_streams)
return AVERROR(EINVAL);
rescale_interval(avf->streams[stream]->time_base, AV_TIME_BASE_Q,
&min_ts, &ts, &max_ts);
}
 
left = 0;
right = cat->nb_files;
while (right - left > 1) {
int mid = (left + right) / 2;
if (ts < cat->files[mid].start_time)
right = mid;
else
left = mid;
}
 
if ((ret = open_file(avf, left)) < 0)
return ret;
 
ret = try_seek(avf, stream, min_ts, ts, max_ts, flags);
if (ret < 0 &&
left < cat->nb_files - 1 &&
cat->files[left + 1].start_time < max_ts) {
if ((ret = open_file(avf, left + 1)) < 0)
return ret;
ret = try_seek(avf, stream, min_ts, ts, max_ts, flags);
}
return ret;
}
 
static int concat_seek(AVFormatContext *avf, int stream,
int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
{
ConcatContext *cat = avf->priv_data;
ConcatFile *cur_file_saved = cat->cur_file;
AVFormatContext *cur_avf_saved = cat->avf;
int ret;
 
if (!cat->seekable)
return AVERROR(ESPIPE); /* XXX: can we use it? */
if (flags & (AVSEEK_FLAG_BYTE | AVSEEK_FLAG_FRAME))
return AVERROR(ENOSYS);
cat->avf = NULL;
if ((ret = real_seek(avf, stream, min_ts, ts, max_ts, flags)) < 0) {
if (cat->avf)
avformat_close_input(&cat->avf);
cat->avf = cur_avf_saved;
cat->cur_file = cur_file_saved;
} else {
avformat_close_input(&cur_avf_saved);
}
return ret;
}
 
#define OFFSET(x) offsetof(ConcatContext, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
 
static const AVOption options[] = {
{ "safe", "enable safe mode",
OFFSET(safe), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 1, DEC },
{ NULL }
};
 
static const AVClass concat_class = {
.class_name = "concat demuxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
 
AVInputFormat ff_concat_demuxer = {
.name = "concat",
.long_name = NULL_IF_CONFIG_SMALL("Virtual concatenation script"),
.priv_data_size = sizeof(ConcatContext),
.read_probe = concat_probe,
.read_header = concat_read_header,
.read_packet = concat_read_packet,
.read_close = concat_read_close,
.read_seek2 = concat_seek,
.priv_class = &concat_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/crcenc.c
0,0 → 1,67
/*
* CRC encoder (for codec/format testing)
* Copyright (c) 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/adler32.h"
#include "avformat.h"
 
typedef struct CRCState {
uint32_t crcval;
} CRCState;
 
static int crc_write_header(struct AVFormatContext *s)
{
CRCState *crc = s->priv_data;
 
/* init CRC */
crc->crcval = 1;
 
return 0;
}
 
static int crc_write_packet(struct AVFormatContext *s, AVPacket *pkt)
{
CRCState *crc = s->priv_data;
crc->crcval = av_adler32_update(crc->crcval, pkt->data, pkt->size);
return 0;
}
 
static int crc_write_trailer(struct AVFormatContext *s)
{
CRCState *crc = s->priv_data;
char buf[64];
 
snprintf(buf, sizeof(buf), "CRC=0x%08x\n", crc->crcval);
avio_write(s->pb, buf, strlen(buf));
 
return 0;
}
 
AVOutputFormat ff_crc_muxer = {
.name = "crc",
.long_name = NULL_IF_CONFIG_SMALL("CRC testing"),
.priv_data_size = sizeof(CRCState),
.audio_codec = AV_CODEC_ID_PCM_S16LE,
.video_codec = AV_CODEC_ID_RAWVIDEO,
.write_header = crc_write_header,
.write_packet = crc_write_packet,
.write_trailer = crc_write_trailer,
.flags = AVFMT_NOTIMESTAMPS,
};
/contrib/sdk/sources/ffmpeg/libavformat/crypto.c
0,0 → 1,170
/*
* Decryption protocol handler
* Copyright (c) 2011 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "libavutil/aes.h"
#include "libavutil/avstring.h"
#include "libavutil/opt.h"
#include "internal.h"
#include "url.h"
 
#define MAX_BUFFER_BLOCKS 150
#define BLOCKSIZE 16
 
typedef struct {
const AVClass *class;
URLContext *hd;
uint8_t inbuffer [BLOCKSIZE*MAX_BUFFER_BLOCKS],
outbuffer[BLOCKSIZE*MAX_BUFFER_BLOCKS];
uint8_t *outptr;
int indata, indata_used, outdata;
int eof;
uint8_t *key;
int keylen;
uint8_t *iv;
int ivlen;
struct AVAES *aes;
} CryptoContext;
 
#define OFFSET(x) offsetof(CryptoContext, x)
#define D AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{"key", "AES decryption key", OFFSET(key), AV_OPT_TYPE_BINARY, .flags = D },
{"iv", "AES decryption initialization vector", OFFSET(iv), AV_OPT_TYPE_BINARY, .flags = D },
{ NULL }
};
 
static const AVClass crypto_class = {
.class_name = "crypto",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
static int crypto_open2(URLContext *h, const char *uri, int flags, AVDictionary **options)
{
const char *nested_url;
int ret = 0;
CryptoContext *c = h->priv_data;
 
if (!av_strstart(uri, "crypto+", &nested_url) &&
!av_strstart(uri, "crypto:", &nested_url)) {
av_log(h, AV_LOG_ERROR, "Unsupported url %s\n", uri);
ret = AVERROR(EINVAL);
goto err;
}
 
if (c->keylen < BLOCKSIZE || c->ivlen < BLOCKSIZE) {
av_log(h, AV_LOG_ERROR, "Key or IV not set\n");
ret = AVERROR(EINVAL);
goto err;
}
if (flags & AVIO_FLAG_WRITE) {
av_log(h, AV_LOG_ERROR, "Only decryption is supported currently\n");
ret = AVERROR(ENOSYS);
goto err;
}
if ((ret = ffurl_open(&c->hd, nested_url, AVIO_FLAG_READ,
&h->interrupt_callback, options)) < 0) {
av_log(h, AV_LOG_ERROR, "Unable to open input\n");
goto err;
}
c->aes = av_aes_alloc();
if (!c->aes) {
ret = AVERROR(ENOMEM);
goto err;
}
 
av_aes_init(c->aes, c->key, 128, 1);
 
h->is_streamed = 1;
 
err:
return ret;
}
 
static int crypto_read(URLContext *h, uint8_t *buf, int size)
{
CryptoContext *c = h->priv_data;
int blocks;
retry:
if (c->outdata > 0) {
size = FFMIN(size, c->outdata);
memcpy(buf, c->outptr, size);
c->outptr += size;
c->outdata -= size;
return size;
}
// We avoid using the last block until we've found EOF,
// since we'll remove PKCS7 padding at the end. So make
// sure we've got at least 2 blocks, so we can decrypt
// at least one.
while (c->indata - c->indata_used < 2*BLOCKSIZE) {
int n = ffurl_read(c->hd, c->inbuffer + c->indata,
sizeof(c->inbuffer) - c->indata);
if (n <= 0) {
c->eof = 1;
break;
}
c->indata += n;
}
blocks = (c->indata - c->indata_used) / BLOCKSIZE;
if (!blocks)
return AVERROR_EOF;
if (!c->eof)
blocks--;
av_aes_crypt(c->aes, c->outbuffer, c->inbuffer + c->indata_used, blocks,
c->iv, 1);
c->outdata = BLOCKSIZE * blocks;
c->outptr = c->outbuffer;
c->indata_used += BLOCKSIZE * blocks;
if (c->indata_used >= sizeof(c->inbuffer)/2) {
memmove(c->inbuffer, c->inbuffer + c->indata_used,
c->indata - c->indata_used);
c->indata -= c->indata_used;
c->indata_used = 0;
}
if (c->eof) {
// Remove PKCS7 padding at the end
int padding = c->outbuffer[c->outdata - 1];
c->outdata -= padding;
}
goto retry;
}
 
static int crypto_close(URLContext *h)
{
CryptoContext *c = h->priv_data;
if (c->hd)
ffurl_close(c->hd);
av_freep(&c->aes);
return 0;
}
 
URLProtocol ff_crypto_protocol = {
.name = "crypto",
.url_open2 = crypto_open2,
.url_read = crypto_read,
.url_close = crypto_close,
.priv_data_size = sizeof(CryptoContext),
.priv_data_class = &crypto_class,
.flags = URL_PROTOCOL_FLAG_NESTED_SCHEME,
};
/contrib/sdk/sources/ffmpeg/libavformat/cutils.c
0,0 → 1,57
/*
* various simple utilities for libavformat
* Copyright (c) 2000, 2001, 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avformat.h"
#include "internal.h"
 
#define ISLEAP(y) (((y) % 4 == 0) && (((y) % 100) != 0 || ((y) % 400) == 0))
#define LEAPS_COUNT(y) ((y)/4 - (y)/100 + (y)/400)
 
/* This is our own gmtime_r. It differs from its POSIX counterpart in a
couple of places, though. */
struct tm *ff_brktimegm(time_t secs, struct tm *tm)
{
int days, y, ny, m;
int md[] = { 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 };
 
days = secs / 86400;
secs %= 86400;
tm->tm_hour = secs / 3600;
tm->tm_min = (secs % 3600) / 60;
tm->tm_sec = secs % 60;
 
/* oh well, may be someone some day will invent a formula for this stuff */
y = 1970; /* start "guessing" */
while (days > 365) {
ny = (y + days/366);
days -= (ny - y) * 365 + LEAPS_COUNT(ny - 1) - LEAPS_COUNT(y - 1);
y = ny;
}
if (days==365 && !ISLEAP(y)) { days=0; y++; }
md[1] = ISLEAP(y)?29:28;
for (m=0; days >= md[m]; m++)
days -= md[m];
 
tm->tm_year = y; /* unlike gmtime_r we store complete year here */
tm->tm_mon = m+1; /* unlike gmtime_r tm_mon is from 1 to 12 */
tm->tm_mday = days+1;
 
return tm;
}
/contrib/sdk/sources/ffmpeg/libavformat/data_uri.c
0,0 → 1,118
/*
* Copyright (c) 2012 Nicolas George
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <string.h>
#include "libavutil/avstring.h"
#include "libavutil/base64.h"
#include "url.h"
 
typedef struct {
const uint8_t *data;
void *tofree;
size_t size;
size_t pos;
} DataContext;
 
static av_cold int data_open(URLContext *h, const char *uri, int flags)
{
DataContext *dc = h->priv_data;
const char *data, *opt, *next;
char *ddata;
int ret, base64 = 0;
size_t in_size;
 
/* data:content/type[;base64],payload */
 
av_strstart(uri, "data:", &uri);
data = strchr(uri, ',');
if (!data) {
av_log(h, AV_LOG_ERROR, "No ',' delimiter in URI\n");
return AVERROR(EINVAL);
}
opt = uri;
while (opt < data) {
next = av_x_if_null(memchr(opt, ';', data - opt), data);
if (opt == uri) {
if (!memchr(opt, '/', next - opt)) { /* basic validity check */
av_log(h, AV_LOG_ERROR, "Invalid content-type '%.*s'\n",
(int)(next - opt), opt);
return AVERROR(EINVAL);
}
av_log(h, AV_LOG_VERBOSE, "Content-type: %.*s\n",
(int)(next - opt), opt);
} else {
if (!av_strncasecmp(opt, "base64", next - opt)) {
base64 = 1;
} else {
av_log(h, AV_LOG_VERBOSE, "Ignoring option '%.*s'\n",
(int)(next - opt), opt);
}
}
opt = next + 1;
}
 
data++;
in_size = strlen(data);
if (base64) {
size_t out_size = 3 * (in_size / 4) + 1;
 
if (out_size > INT_MAX || !(ddata = av_malloc(out_size)))
return AVERROR(ENOMEM);
if ((ret = av_base64_decode(ddata, data, out_size)) < 0) {
av_free(ddata);
av_log(h, AV_LOG_ERROR, "Invalid base64 in URI\n");
return ret;
}
dc->data = dc->tofree = ddata;
dc->size = ret;
} else {
dc->data = data;
dc->size = in_size;
}
return 0;
}
 
static av_cold int data_close(URLContext *h)
{
DataContext *dc = h->priv_data;
 
av_freep(&dc->tofree);
return 0;
}
 
static int data_read(URLContext *h, unsigned char *buf, int size)
{
DataContext *dc = h->priv_data;
 
if (dc->pos >= dc->size)
return AVERROR_EOF;
size = FFMIN(size, dc->size - dc->pos);
memcpy(buf, dc->data + dc->pos, size);
dc->pos += size;
return size;
}
 
URLProtocol ff_data_protocol = {
.name = "data",
.url_open = data_open,
.url_close = data_close,
.url_read = data_read,
.priv_data_size = sizeof(DataContext),
};
/contrib/sdk/sources/ffmpeg/libavformat/daud.c
0,0 → 1,95
/*
* D-Cinema audio demuxer
* Copyright (c) 2005 Reimar Döffinger
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/channel_layout.h"
#include "avformat.h"
 
static int daud_header(AVFormatContext *s) {
AVStream *st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_PCM_S24DAUD;
st->codec->codec_tag = MKTAG('d', 'a', 'u', 'd');
st->codec->channels = 6;
st->codec->channel_layout = AV_CH_LAYOUT_5POINT1;
st->codec->sample_rate = 96000;
st->codec->bit_rate = 3 * 6 * 96000 * 8;
st->codec->block_align = 3 * 6;
st->codec->bits_per_coded_sample = 24;
return 0;
}
 
static int daud_packet(AVFormatContext *s, AVPacket *pkt) {
AVIOContext *pb = s->pb;
int ret, size;
if (url_feof(pb))
return AVERROR(EIO);
size = avio_rb16(pb);
avio_rb16(pb); // unknown
ret = av_get_packet(pb, pkt, size);
pkt->stream_index = 0;
return ret;
}
 
static int daud_write_header(struct AVFormatContext *s)
{
AVCodecContext *codec = s->streams[0]->codec;
if (codec->channels!=6 || codec->sample_rate!=96000)
return -1;
return 0;
}
 
static int daud_write_packet(struct AVFormatContext *s, AVPacket *pkt)
{
if (pkt->size > 65535) {
av_log(s, AV_LOG_ERROR,
"Packet size too large for s302m. (%d > 65535)\n", pkt->size);
return -1;
}
avio_wb16(s->pb, pkt->size);
avio_wb16(s->pb, 0x8010); // unknown
avio_write(s->pb, pkt->data, pkt->size);
return 0;
}
 
#if CONFIG_DAUD_DEMUXER
AVInputFormat ff_daud_demuxer = {
.name = "daud",
.long_name = NULL_IF_CONFIG_SMALL("D-Cinema audio"),
.read_header = daud_header,
.read_packet = daud_packet,
.extensions = "302,daud",
};
#endif
 
#if CONFIG_DAUD_MUXER
AVOutputFormat ff_daud_muxer = {
.name = "daud",
.long_name = NULL_IF_CONFIG_SMALL("D-Cinema audio"),
.extensions = "302",
.audio_codec = AV_CODEC_ID_PCM_S24DAUD,
.video_codec = AV_CODEC_ID_NONE,
.write_header = daud_write_header,
.write_packet = daud_write_packet,
.flags = AVFMT_NOTIMESTAMPS,
};
#endif
/contrib/sdk/sources/ffmpeg/libavformat/dfa.c
0,0 → 1,126
/*
* Chronomaster DFA Format Demuxer
* Copyright (c) 2011 Konstantin Shishkov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
 
static int dfa_probe(AVProbeData *p)
{
if (p->buf_size < 4 || AV_RL32(p->buf) != MKTAG('D', 'F', 'I', 'A'))
return 0;
 
return AVPROBE_SCORE_MAX;
}
 
static int dfa_read_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
AVStream *st;
int frames;
int version;
uint32_t mspf;
 
if (avio_rl32(pb) != MKTAG('D', 'F', 'I', 'A')) {
av_log(s, AV_LOG_ERROR, "Invalid magic for DFA\n");
return AVERROR_INVALIDDATA;
}
 
version = avio_rl16(pb);
frames = avio_rl16(pb);
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_DFA;
st->codec->width = avio_rl16(pb);
st->codec->height = avio_rl16(pb);
mspf = avio_rl32(pb);
if (!mspf) {
av_log(s, AV_LOG_WARNING, "Zero FPS reported, defaulting to 10\n");
mspf = 100;
}
avpriv_set_pts_info(st, 24, mspf, 1000);
avio_skip(pb, 128 - 16); // padding
st->duration = frames;
 
if (ff_alloc_extradata(st->codec, 2))
return AVERROR(ENOMEM);
AV_WL16(st->codec->extradata, version);
if (version == 0x100)
st->sample_aspect_ratio = (AVRational){2, 1};
 
return 0;
}
 
static int dfa_read_packet(AVFormatContext *s, AVPacket *pkt)
{
AVIOContext *pb = s->pb;
uint32_t frame_size;
int ret, first = 1;
 
if (pb->eof_reached)
return AVERROR_EOF;
 
if (av_get_packet(pb, pkt, 12) != 12)
return AVERROR(EIO);
while (!pb->eof_reached) {
if (!first) {
ret = av_append_packet(pb, pkt, 12);
if (ret < 0) {
av_free_packet(pkt);
return ret;
}
} else
first = 0;
frame_size = AV_RL32(pkt->data + pkt->size - 8);
if (frame_size > INT_MAX - 4) {
av_log(s, AV_LOG_ERROR, "Too large chunk size: %d\n", frame_size);
return AVERROR(EIO);
}
if (AV_RL32(pkt->data + pkt->size - 12) == MKTAG('E', 'O', 'F', 'R')) {
if (frame_size) {
av_log(s, AV_LOG_WARNING, "skipping %d bytes of end-of-frame marker chunk\n",
frame_size);
avio_skip(pb, frame_size);
}
return 0;
}
ret = av_append_packet(pb, pkt, frame_size);
if (ret < 0) {
av_free_packet(pkt);
return ret;
}
}
 
return 0;
}
 
AVInputFormat ff_dfa_demuxer = {
.name = "dfa",
.long_name = NULL_IF_CONFIG_SMALL("Chronomaster DFA"),
.read_probe = dfa_probe,
.read_header = dfa_read_header,
.read_packet = dfa_read_packet,
.flags = AVFMT_GENERIC_INDEX,
};
/contrib/sdk/sources/ffmpeg/libavformat/diracdec.c
0,0 → 1,34
/*
* RAW Dirac demuxer
* Copyright (c) 2007 Marco Gerards <marco@gnu.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "rawdec.h"
 
static int dirac_probe(AVProbeData *p)
{
if (AV_RL32(p->buf) == MKTAG('B', 'B', 'C', 'D'))
return AVPROBE_SCORE_MAX;
else
return 0;
}
 
FF_DEF_RAWVIDEO_DEMUXER(dirac, "raw Dirac", dirac_probe, NULL, AV_CODEC_ID_DIRAC)
/contrib/sdk/sources/ffmpeg/libavformat/dnxhddec.c
0,0 → 1,45
/*
* RAW DNxHD (SMPTE VC-3) demuxer
* Copyright (c) 2008 Baptiste Coudurier <baptiste.coudurier@gmail.com>
* Copyright (c) 2009 Reimar Döffinger <Reimar.Doeffinger@gmx.de>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "rawdec.h"
 
static int dnxhd_probe(AVProbeData *p)
{
static const uint8_t header[] = {0x00,0x00,0x02,0x80,0x01};
int w, h, compression_id;
if (p->buf_size < 0x2c)
return 0;
if (memcmp(p->buf, header, 5))
return 0;
h = AV_RB16(p->buf + 0x18);
w = AV_RB16(p->buf + 0x1a);
if (!w || !h)
return 0;
compression_id = AV_RB32(p->buf + 0x28);
if (compression_id < 1235 || compression_id > 1253)
return 0;
return AVPROBE_SCORE_MAX;
}
 
FF_DEF_RAWVIDEO_DEMUXER(dnxhd, "raw DNxHD (SMPTE VC-3)", dnxhd_probe, NULL, AV_CODEC_ID_DNXHD)
/contrib/sdk/sources/ffmpeg/libavformat/dsicin.c
0,0 → 1,234
/*
* Delphine Software International CIN File Demuxer
* Copyright (c) 2006 Gregory Montoir (cyx@users.sourceforge.net)
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Delphine Software International CIN file demuxer
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
#include "avio_internal.h"
 
 
typedef struct CinFileHeader {
int video_frame_size;
int video_frame_width;
int video_frame_height;
int audio_frequency;
int audio_bits;
int audio_stereo;
int audio_frame_size;
} CinFileHeader;
 
typedef struct CinFrameHeader {
int audio_frame_type;
int video_frame_type;
int pal_colors_count;
int audio_frame_size;
int video_frame_size;
} CinFrameHeader;
 
typedef struct CinDemuxContext {
int audio_stream_index;
int video_stream_index;
CinFileHeader file_header;
int64_t audio_stream_pts;
int64_t video_stream_pts;
CinFrameHeader frame_header;
int audio_buffer_size;
} CinDemuxContext;
 
 
static int cin_probe(AVProbeData *p)
{
/* header starts with this special marker */
if (AV_RL32(&p->buf[0]) != 0x55AA0000)
return 0;
 
/* for accuracy, check some header field values */
if (AV_RL32(&p->buf[12]) != 22050 || p->buf[16] != 16 || p->buf[17] != 0)
return 0;
 
return AVPROBE_SCORE_MAX;
}
 
static int cin_read_file_header(CinDemuxContext *cin, AVIOContext *pb) {
CinFileHeader *hdr = &cin->file_header;
 
if (avio_rl32(pb) != 0x55AA0000)
return AVERROR_INVALIDDATA;
 
hdr->video_frame_size = avio_rl32(pb);
hdr->video_frame_width = avio_rl16(pb);
hdr->video_frame_height = avio_rl16(pb);
hdr->audio_frequency = avio_rl32(pb);
hdr->audio_bits = avio_r8(pb);
hdr->audio_stereo = avio_r8(pb);
hdr->audio_frame_size = avio_rl16(pb);
 
if (hdr->audio_frequency != 22050 || hdr->audio_bits != 16 || hdr->audio_stereo != 0)
return AVERROR_INVALIDDATA;
 
return 0;
}
 
static int cin_read_header(AVFormatContext *s)
{
int rc;
CinDemuxContext *cin = s->priv_data;
CinFileHeader *hdr = &cin->file_header;
AVIOContext *pb = s->pb;
AVStream *st;
 
rc = cin_read_file_header(cin, pb);
if (rc)
return rc;
 
cin->video_stream_pts = 0;
cin->audio_stream_pts = 0;
cin->audio_buffer_size = 0;
 
/* initialize the video decoder stream */
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
avpriv_set_pts_info(st, 32, 1, 12);
cin->video_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_DSICINVIDEO;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->width = hdr->video_frame_width;
st->codec->height = hdr->video_frame_height;
 
/* initialize the audio decoder stream */
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
avpriv_set_pts_info(st, 32, 1, 22050);
cin->audio_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_DSICINAUDIO;
st->codec->codec_tag = 0; /* no tag */
st->codec->channels = 1;
st->codec->channel_layout = AV_CH_LAYOUT_MONO;
st->codec->sample_rate = 22050;
st->codec->bits_per_coded_sample = 8;
st->codec->bit_rate = st->codec->sample_rate * st->codec->bits_per_coded_sample * st->codec->channels;
 
return 0;
}
 
static int cin_read_frame_header(CinDemuxContext *cin, AVIOContext *pb) {
CinFrameHeader *hdr = &cin->frame_header;
 
hdr->video_frame_type = avio_r8(pb);
hdr->audio_frame_type = avio_r8(pb);
hdr->pal_colors_count = avio_rl16(pb);
hdr->video_frame_size = avio_rl32(pb);
hdr->audio_frame_size = avio_rl32(pb);
 
if (url_feof(pb) || pb->error)
return AVERROR(EIO);
 
if (avio_rl32(pb) != 0xAA55AA55)
return AVERROR_INVALIDDATA;
if (hdr->video_frame_size < 0 || hdr->audio_frame_size < 0)
return AVERROR_INVALIDDATA;
 
return 0;
}
 
static int cin_read_packet(AVFormatContext *s, AVPacket *pkt)
{
CinDemuxContext *cin = s->priv_data;
AVIOContext *pb = s->pb;
CinFrameHeader *hdr = &cin->frame_header;
int rc, palette_type, pkt_size;
int ret;
 
if (cin->audio_buffer_size == 0) {
rc = cin_read_frame_header(cin, pb);
if (rc)
return rc;
 
if ((int16_t)hdr->pal_colors_count < 0) {
hdr->pal_colors_count = -(int16_t)hdr->pal_colors_count;
palette_type = 1;
} else {
palette_type = 0;
}
 
/* palette and video packet */
pkt_size = (palette_type + 3) * hdr->pal_colors_count + hdr->video_frame_size;
 
pkt_size = ffio_limit(pb, pkt_size);
 
ret = av_new_packet(pkt, 4 + pkt_size);
if (ret < 0)
return ret;
 
pkt->stream_index = cin->video_stream_index;
pkt->pts = cin->video_stream_pts++;
 
pkt->data[0] = palette_type;
pkt->data[1] = hdr->pal_colors_count & 0xFF;
pkt->data[2] = hdr->pal_colors_count >> 8;
pkt->data[3] = hdr->video_frame_type;
 
ret = avio_read(pb, &pkt->data[4], pkt_size);
if (ret < 0) {
av_free_packet(pkt);
return ret;
}
if (ret < pkt_size)
av_shrink_packet(pkt, 4 + ret);
 
/* sound buffer will be processed on next read_packet() call */
cin->audio_buffer_size = hdr->audio_frame_size;
return 0;
}
 
/* audio packet */
ret = av_get_packet(pb, pkt, cin->audio_buffer_size);
if (ret < 0)
return ret;
 
pkt->stream_index = cin->audio_stream_index;
pkt->pts = cin->audio_stream_pts;
pkt->duration = cin->audio_buffer_size - (pkt->pts == 0);
cin->audio_stream_pts += pkt->duration;
cin->audio_buffer_size = 0;
return 0;
}
 
AVInputFormat ff_dsicin_demuxer = {
.name = "dsicin",
.long_name = NULL_IF_CONFIG_SMALL("Delphine Software International CIN"),
.priv_data_size = sizeof(CinDemuxContext),
.read_probe = cin_probe,
.read_header = cin_read_header,
.read_packet = cin_read_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/dtsdec.c
0,0 → 1,82
/*
* RAW DTS demuxer
* Copyright (c) 2008 Benjamin Larsson
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavcodec/bytestream.h"
#include "avformat.h"
#include "rawdec.h"
 
#define DCA_MARKER_14B_BE 0x1FFFE800
#define DCA_MARKER_14B_LE 0xFF1F00E8
#define DCA_MARKER_RAW_BE 0x7FFE8001
#define DCA_MARKER_RAW_LE 0xFE7F0180
 
static int dts_probe(AVProbeData *p)
{
const uint8_t *buf, *bufp;
uint32_t state = -1;
int markers[3] = {0};
int sum, max;
int64_t diff = 0;
 
buf = p->buf;
 
for(; buf < (p->buf+p->buf_size)-2; buf+=2) {
bufp = buf;
state = (state << 16) | bytestream_get_be16(&bufp);
 
/* regular bitstream */
if (state == DCA_MARKER_RAW_BE || state == DCA_MARKER_RAW_LE)
markers[0]++;
 
/* 14 bits big-endian bitstream */
if (state == DCA_MARKER_14B_BE)
if ((bytestream_get_be16(&bufp) & 0xFFF0) == 0x07F0)
markers[1]++;
 
/* 14 bits little-endian bitstream */
if (state == DCA_MARKER_14B_LE)
if ((bytestream_get_be16(&bufp) & 0xF0FF) == 0xF007)
markers[2]++;
 
if (buf - p->buf >= 4)
diff += FFABS(AV_RL16(buf) - AV_RL16(buf-4));
}
sum = markers[0] + markers[1] + markers[2];
max = markers[1] > markers[0];
max = markers[2] > markers[max] ? 2 : max;
if (markers[max] > 3 && p->buf_size / markers[max] < 32*1024 &&
markers[max] * 4 > sum * 3 &&
diff / p->buf_size > 200)
return AVPROBE_SCORE_EXTENSION + 1;
 
return 0;
}
 
AVInputFormat ff_dts_demuxer = {
.name = "dts",
.long_name = NULL_IF_CONFIG_SMALL("raw DTS"),
.read_probe = dts_probe,
.read_header = ff_raw_audio_read_header,
.read_packet = ff_raw_read_partial_packet,
.flags = AVFMT_GENERIC_INDEX,
.extensions = "dts",
.raw_codec_id = AV_CODEC_ID_DTS,
};
/contrib/sdk/sources/ffmpeg/libavformat/dtshddec.c
0,0 → 1,139
/*
* Raw DTS-HD demuxer
* Copyright (c) 2012 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intreadwrite.h"
#include "libavutil/dict.h"
#include "avformat.h"
 
#define AUPR_HDR 0x415550522D484452
#define AUPRINFO 0x41555052494E464F
#define BITSHVTB 0x4249545348565442
#define BLACKOUT 0x424C41434B4F5554
#define BRANCHPT 0x4252414E43485054
#define BUILDVER 0x4255494C44564552
#define CORESSMD 0x434F524553534D44
#define DTSHDHDR 0x4454534844484452
#define EXTSS_MD 0x45585453535f4d44
#define FILEINFO 0x46494C45494E464F
#define NAVI_TBL 0x4E4156492D54424C
#define STRMDATA 0x5354524D44415441
#define TIMECODE 0x54494D45434F4445
 
typedef struct DTSHDDemuxContext {
uint64_t data_end;
} DTSHDDemuxContext;
 
static int dtshd_probe(AVProbeData *p)
{
if (AV_RB64(p->buf) == DTSHDHDR)
return AVPROBE_SCORE_MAX;
return 0;
}
 
static int dtshd_read_header(AVFormatContext *s)
{
DTSHDDemuxContext *dtshd = s->priv_data;
AVIOContext *pb = s->pb;
uint64_t chunk_type, chunk_size;
AVStream *st;
int ret;
char *value;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_DTS;
st->need_parsing = AVSTREAM_PARSE_FULL_RAW;
 
while (!url_feof(pb)) {
chunk_type = avio_rb64(pb);
chunk_size = avio_rb64(pb);
 
if (chunk_size < 4) {
av_log(s, AV_LOG_ERROR, "chunk size too small\n");
return AVERROR_INVALIDDATA;
}
if (chunk_size > ((uint64_t)1 << 61)) {
av_log(s, AV_LOG_ERROR, "chunk size too big\n");
return AVERROR_INVALIDDATA;
}
 
switch (chunk_type) {
case STRMDATA:
dtshd->data_end = chunk_size + avio_tell(pb);
if (dtshd->data_end <= chunk_size)
return AVERROR_INVALIDDATA;
return 0;
break;
case FILEINFO:
if (chunk_size > INT_MAX)
goto skip;
value = av_malloc(chunk_size);
if (!value)
goto skip;
avio_read(pb, value, chunk_size);
value[chunk_size - 1] = 0;
av_dict_set(&s->metadata, "fileinfo", value,
AV_DICT_DONT_STRDUP_VAL);
break;
default:
skip:
ret = avio_skip(pb, chunk_size);
if (ret < 0)
return ret;
};
}
 
return AVERROR_EOF;
}
 
static int raw_read_packet(AVFormatContext *s, AVPacket *pkt)
{
DTSHDDemuxContext *dtshd = s->priv_data;
int64_t size, left;
int ret;
 
left = dtshd->data_end - avio_tell(s->pb);
size = FFMIN(left, 1024);
if (size <= 0)
return AVERROR_EOF;
 
ret = av_get_packet(s->pb, pkt, size);
if (ret < 0)
return ret;
 
pkt->stream_index = 0;
 
return ret;
}
 
AVInputFormat ff_dtshd_demuxer = {
.name = "dtshd",
.long_name = NULL_IF_CONFIG_SMALL("raw DTS-HD"),
.priv_data_size = sizeof(DTSHDDemuxContext),
.read_probe = dtshd_probe,
.read_header = dtshd_read_header,
.read_packet = raw_read_packet,
.flags = AVFMT_GENERIC_INDEX,
.extensions = "dtshd",
.raw_codec_id = AV_CODEC_ID_DTS,
};
/contrib/sdk/sources/ffmpeg/libavformat/dv.c
0,0 → 1,631
/*
* General DV muxer/demuxer
* Copyright (c) 2003 Roman Shaposhnik
*
* Many thanks to Dan Dennedy <dan@dennedy.org> for providing wealth
* of DV technical info.
*
* Raw DV format
* Copyright (c) 2002 Fabrice Bellard
*
* 50 Mbps (DVCPRO50) and 100 Mbps (DVCPRO HD) support
* Copyright (c) 2006 Daniel Maas <dmaas@maasdigital.com>
* Funded by BBC Research & Development
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <time.h>
#include "avformat.h"
#include "internal.h"
#include "libavcodec/dv_profile.h"
#include "libavcodec/dvdata.h"
#include "libavutil/channel_layout.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/mathematics.h"
#include "libavutil/timecode.h"
#include "dv.h"
#include "libavutil/avassert.h"
 
struct DVDemuxContext {
const DVprofile* sys; /* Current DV profile. E.g.: 525/60, 625/50 */
AVFormatContext* fctx;
AVStream* vst;
AVStream* ast[4];
AVPacket audio_pkt[4];
uint8_t audio_buf[4][8192];
int ach;
int frames;
uint64_t abytes;
};
 
static inline uint16_t dv_audio_12to16(uint16_t sample)
{
uint16_t shift, result;
 
sample = (sample < 0x800) ? sample : sample | 0xf000;
shift = (sample & 0xf00) >> 8;
 
if (shift < 0x2 || shift > 0xd) {
result = sample;
} else if (shift < 0x8) {
shift--;
result = (sample - (256 * shift)) << shift;
} else {
shift = 0xe - shift;
result = ((sample + ((256 * shift) + 1)) << shift) - 1;
}
 
return result;
}
 
/*
* This is the dumbest implementation of all -- it simply looks at
* a fixed offset and if pack isn't there -- fails. We might want
* to have a fallback mechanism for complete search of missing packs.
*/
static const uint8_t *dv_extract_pack(uint8_t *frame, enum dv_pack_type t)
{
int offs;
 
switch (t) {
case dv_audio_source:
offs = (80 * 6 + 80 * 16 * 3 + 3);
break;
case dv_audio_control:
offs = (80 * 6 + 80 * 16 * 4 + 3);
break;
case dv_video_control:
offs = (80 * 5 + 48 + 5);
break;
case dv_timecode:
offs = (80*1 + 3 + 3);
break;
default:
return NULL;
}
 
return frame[offs] == t ? &frame[offs] : NULL;
}
 
static const int dv_audio_frequency[3] = {
48000, 44100, 32000,
};
 
/*
* There's a couple of assumptions being made here:
* 1. By default we silence erroneous (0x8000/16bit 0x800/12bit) audio samples.
* We can pass them upwards when libavcodec will be ready to deal with them.
* 2. We don't do software emphasis.
* 3. Audio is always returned as 16bit linear samples: 12bit nonlinear samples
* are converted into 16bit linear ones.
*/
static int dv_extract_audio(uint8_t *frame, uint8_t **ppcm,
const DVprofile *sys)
{
int size, chan, i, j, d, of, smpls, freq, quant, half_ch;
uint16_t lc, rc;
const uint8_t *as_pack;
uint8_t *pcm, ipcm;
 
as_pack = dv_extract_pack(frame, dv_audio_source);
if (!as_pack) /* No audio ? */
return 0;
 
smpls = as_pack[1] & 0x3f; /* samples in this frame - min. samples */
freq = as_pack[4] >> 3 & 0x07; /* 0 - 48kHz, 1 - 44,1kHz, 2 - 32kHz */
quant = as_pack[4] & 0x07; /* 0 - 16bit linear, 1 - 12bit nonlinear */
 
if (quant > 1)
return -1; /* unsupported quantization */
 
if (freq >= FF_ARRAY_ELEMS(dv_audio_frequency))
return AVERROR_INVALIDDATA;
 
size = (sys->audio_min_samples[freq] + smpls) * 4; /* 2ch, 2bytes */
half_ch = sys->difseg_size / 2;
 
/* We work with 720p frames split in half, thus even frames have
* channels 0,1 and odd 2,3. */
ipcm = (sys->height == 720 && !(frame[1] & 0x0C)) ? 2 : 0;
 
if (ipcm + sys->n_difchan > (quant == 1 ? 2 : 4)) {
av_log(NULL, AV_LOG_ERROR, "too many dv pcm frames\n");
return AVERROR_INVALIDDATA;
}
 
/* for each DIF channel */
for (chan = 0; chan < sys->n_difchan; chan++) {
av_assert0(ipcm<4);
pcm = ppcm[ipcm++];
if (!pcm)
break;
 
/* for each DIF segment */
for (i = 0; i < sys->difseg_size; i++) {
frame += 6 * 80; /* skip DIF segment header */
if (quant == 1 && i == half_ch) {
/* next stereo channel (12bit mode only) */
av_assert0(ipcm<4);
pcm = ppcm[ipcm++];
if (!pcm)
break;
}
 
/* for each AV sequence */
for (j = 0; j < 9; j++) {
for (d = 8; d < 80; d += 2) {
if (quant == 0) { /* 16bit quantization */
of = sys->audio_shuffle[i][j] +
(d - 8) / 2 * sys->audio_stride;
if (of * 2 >= size)
continue;
 
/* FIXME: maybe we have to admit that DV is a
* big-endian PCM */
pcm[of * 2] = frame[d + 1];
pcm[of * 2 + 1] = frame[d];
 
if (pcm[of * 2 + 1] == 0x80 && pcm[of * 2] == 0x00)
pcm[of * 2 + 1] = 0;
} else { /* 12bit quantization */
lc = ((uint16_t)frame[d] << 4) |
((uint16_t)frame[d + 2] >> 4);
rc = ((uint16_t)frame[d + 1] << 4) |
((uint16_t)frame[d + 2] & 0x0f);
lc = (lc == 0x800 ? 0 : dv_audio_12to16(lc));
rc = (rc == 0x800 ? 0 : dv_audio_12to16(rc));
 
of = sys->audio_shuffle[i % half_ch][j] +
(d - 8) / 3 * sys->audio_stride;
if (of * 2 >= size)
continue;
 
/* FIXME: maybe we have to admit that DV is a
* big-endian PCM */
pcm[of * 2] = lc & 0xff;
pcm[of * 2 + 1] = lc >> 8;
of = sys->audio_shuffle[i % half_ch + half_ch][j] +
(d - 8) / 3 * sys->audio_stride;
/* FIXME: maybe we have to admit that DV is a
* big-endian PCM */
pcm[of * 2] = rc & 0xff;
pcm[of * 2 + 1] = rc >> 8;
++d;
}
}
 
frame += 16 * 80; /* 15 Video DIFs + 1 Audio DIF */
}
}
}
 
return size;
}
 
static int dv_extract_audio_info(DVDemuxContext *c, uint8_t *frame)
{
const uint8_t *as_pack;
int freq, stype, smpls, quant, i, ach;
 
as_pack = dv_extract_pack(frame, dv_audio_source);
if (!as_pack || !c->sys) { /* No audio ? */
c->ach = 0;
return 0;
}
 
smpls = as_pack[1] & 0x3f; /* samples in this frame - min. samples */
freq = as_pack[4] >> 3 & 0x07; /* 0 - 48kHz, 1 - 44,1kHz, 2 - 32kHz */
stype = as_pack[3] & 0x1f; /* 0 - 2CH, 2 - 4CH, 3 - 8CH */
quant = as_pack[4] & 0x07; /* 0 - 16bit linear, 1 - 12bit nonlinear */
 
if (freq >= FF_ARRAY_ELEMS(dv_audio_frequency)) {
av_log(c->fctx, AV_LOG_ERROR,
"Unrecognized audio sample rate index (%d)\n", freq);
return 0;
}
 
if (stype > 3) {
av_log(c->fctx, AV_LOG_ERROR, "stype %d is invalid\n", stype);
c->ach = 0;
return 0;
}
 
/* note: ach counts PAIRS of channels (i.e. stereo channels) */
ach = ((int[4]) { 1, 0, 2, 4 })[stype];
if (ach == 1 && quant && freq == 2)
ach = 2;
 
/* Dynamic handling of the audio streams in DV */
for (i = 0; i < ach; i++) {
if (!c->ast[i]) {
c->ast[i] = avformat_new_stream(c->fctx, NULL);
if (!c->ast[i])
break;
avpriv_set_pts_info(c->ast[i], 64, 1, 30000);
c->ast[i]->codec->codec_type = AVMEDIA_TYPE_AUDIO;
c->ast[i]->codec->codec_id = AV_CODEC_ID_PCM_S16LE;
 
av_init_packet(&c->audio_pkt[i]);
c->audio_pkt[i].size = 0;
c->audio_pkt[i].data = c->audio_buf[i];
c->audio_pkt[i].stream_index = c->ast[i]->index;
c->audio_pkt[i].flags |= AV_PKT_FLAG_KEY;
}
c->ast[i]->codec->sample_rate = dv_audio_frequency[freq];
c->ast[i]->codec->channels = 2;
c->ast[i]->codec->channel_layout = AV_CH_LAYOUT_STEREO;
c->ast[i]->codec->bit_rate = 2 * dv_audio_frequency[freq] * 16;
c->ast[i]->start_time = 0;
}
c->ach = i;
 
return (c->sys->audio_min_samples[freq] + smpls) * 4; /* 2ch, 2bytes */
}
 
static int dv_extract_video_info(DVDemuxContext *c, uint8_t *frame)
{
const uint8_t *vsc_pack;
AVCodecContext *avctx;
int apt, is16_9;
int size = 0;
 
if (c->sys) {
avctx = c->vst->codec;
 
avpriv_set_pts_info(c->vst, 64, c->sys->time_base.num,
c->sys->time_base.den);
avctx->time_base = c->sys->time_base;
 
/* finding out SAR is a little bit messy */
vsc_pack = dv_extract_pack(frame, dv_video_control);
apt = frame[4] & 0x07;
is16_9 = (vsc_pack && ((vsc_pack[2] & 0x07) == 0x02 ||
(!apt && (vsc_pack[2] & 0x07) == 0x07)));
c->vst->sample_aspect_ratio = c->sys->sar[is16_9];
avctx->bit_rate = av_rescale_q(c->sys->frame_size,
(AVRational) { 8, 1 },
c->sys->time_base);
size = c->sys->frame_size;
}
return size;
}
 
static int dv_extract_timecode(DVDemuxContext* c, uint8_t* frame, char *tc)
{
const uint8_t *tc_pack;
 
// For PAL systems, drop frame bit is replaced by an arbitrary
// bit so its value should not be considered. Drop frame timecode
// is only relevant for NTSC systems.
int prevent_df = c->sys->ltc_divisor == 25 || c->sys->ltc_divisor == 50;
 
tc_pack = dv_extract_pack(frame, dv_timecode);
if (!tc_pack)
return 0;
av_timecode_make_smpte_tc_string(tc, AV_RB32(tc_pack + 1), prevent_df);
return 1;
}
 
/* The following 3 functions constitute our interface to the world */
 
DVDemuxContext *avpriv_dv_init_demux(AVFormatContext *s)
{
DVDemuxContext *c;
 
c = av_mallocz(sizeof(DVDemuxContext));
if (!c)
return NULL;
 
c->vst = avformat_new_stream(s, NULL);
if (!c->vst) {
av_free(c);
return NULL;
}
 
c->fctx = s;
c->vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
c->vst->codec->codec_id = AV_CODEC_ID_DVVIDEO;
c->vst->codec->bit_rate = 25000000;
c->vst->start_time = 0;
 
return c;
}
 
int avpriv_dv_get_packet(DVDemuxContext *c, AVPacket *pkt)
{
int size = -1;
int i;
 
for (i = 0; i < c->ach; i++) {
if (c->ast[i] && c->audio_pkt[i].size) {
*pkt = c->audio_pkt[i];
c->audio_pkt[i].size = 0;
size = pkt->size;
break;
}
}
 
return size;
}
 
int avpriv_dv_produce_packet(DVDemuxContext *c, AVPacket *pkt,
uint8_t *buf, int buf_size, int64_t pos)
{
int size, i;
uint8_t *ppcm[5] = { 0 };
 
if (buf_size < DV_PROFILE_BYTES ||
!(c->sys = avpriv_dv_frame_profile(c->sys, buf, buf_size)) ||
buf_size < c->sys->frame_size) {
return -1; /* Broken frame, or not enough data */
}
 
/* Queueing audio packet */
/* FIXME: in case of no audio/bad audio we have to do something */
size = dv_extract_audio_info(c, buf);
for (i = 0; i < c->ach; i++) {
c->audio_pkt[i].pos = pos;
c->audio_pkt[i].size = size;
c->audio_pkt[i].pts = c->abytes * 30000 * 8 /
c->ast[i]->codec->bit_rate;
ppcm[i] = c->audio_buf[i];
}
if (c->ach)
dv_extract_audio(buf, ppcm, c->sys);
 
/* We work with 720p frames split in half, thus even frames have
* channels 0,1 and odd 2,3. */
if (c->sys->height == 720) {
if (buf[1] & 0x0C) {
c->audio_pkt[2].size = c->audio_pkt[3].size = 0;
} else {
c->audio_pkt[0].size = c->audio_pkt[1].size = 0;
c->abytes += size;
}
} else {
c->abytes += size;
}
 
/* Now it's time to return video packet */
size = dv_extract_video_info(c, buf);
av_init_packet(pkt);
pkt->data = buf;
pkt->pos = pos;
pkt->size = size;
pkt->flags |= AV_PKT_FLAG_KEY;
pkt->stream_index = c->vst->index;
pkt->pts = c->frames;
 
c->frames++;
 
return size;
}
 
static int64_t dv_frame_offset(AVFormatContext *s, DVDemuxContext *c,
int64_t timestamp, int flags)
{
// FIXME: sys may be wrong if last dv_read_packet() failed (buffer is junk)
const DVprofile *sys = avpriv_dv_codec_profile(c->vst->codec);
int64_t offset;
int64_t size = avio_size(s->pb) - s->data_offset;
int64_t max_offset = ((size - 1) / sys->frame_size) * sys->frame_size;
 
offset = sys->frame_size * timestamp;
 
if (size >= 0 && offset > max_offset)
offset = max_offset;
else if (offset < 0)
offset = 0;
 
return offset + s->data_offset;
}
 
void ff_dv_offset_reset(DVDemuxContext *c, int64_t frame_offset)
{
c->frames = frame_offset;
if (c->ach) {
if (c->sys) {
c->abytes = av_rescale_q(c->frames, c->sys->time_base,
(AVRational) { 8, c->ast[0]->codec->bit_rate });
} else
av_log(c->fctx, AV_LOG_ERROR, "cannot adjust audio bytes\n");
}
c->audio_pkt[0].size = c->audio_pkt[1].size = 0;
c->audio_pkt[2].size = c->audio_pkt[3].size = 0;
}
 
/************************************************************
* Implementation of the easiest DV storage of all -- raw DV.
************************************************************/
 
typedef struct RawDVContext {
DVDemuxContext *dv_demux;
uint8_t buf[DV_MAX_FRAME_SIZE];
} RawDVContext;
 
static int dv_read_timecode(AVFormatContext *s) {
int ret;
char timecode[AV_TIMECODE_STR_SIZE];
int64_t pos = avio_tell(s->pb);
 
// Read 3 DIF blocks: Header block and 2 Subcode blocks.
int partial_frame_size = 3 * 80;
uint8_t *partial_frame = av_mallocz(sizeof(*partial_frame) *
partial_frame_size);
 
RawDVContext *c = s->priv_data;
ret = avio_read(s->pb, partial_frame, partial_frame_size);
if (ret < 0)
goto finish;
 
if (ret < partial_frame_size) {
ret = -1;
goto finish;
}
 
ret = dv_extract_timecode(c->dv_demux, partial_frame, timecode);
if (ret)
av_dict_set(&s->metadata, "timecode", timecode, 0);
else
av_log(s, AV_LOG_ERROR, "Detected timecode is invalid\n");
 
finish:
av_free(partial_frame);
avio_seek(s->pb, pos, SEEK_SET);
return ret;
}
 
static int dv_read_header(AVFormatContext *s)
{
unsigned state, marker_pos = 0;
RawDVContext *c = s->priv_data;
 
c->dv_demux = avpriv_dv_init_demux(s);
if (!c->dv_demux)
return -1;
 
state = avio_rb32(s->pb);
while ((state & 0xffffff7f) != 0x1f07003f) {
if (url_feof(s->pb)) {
av_log(s, AV_LOG_ERROR, "Cannot find DV header.\n");
return -1;
}
if (state == 0x003f0700 || state == 0xff3f0700)
marker_pos = avio_tell(s->pb);
if (state == 0xff3f0701 && avio_tell(s->pb) - marker_pos == 80) {
avio_seek(s->pb, -163, SEEK_CUR);
state = avio_rb32(s->pb);
break;
}
state = (state << 8) | avio_r8(s->pb);
}
AV_WB32(c->buf, state);
 
if (avio_read(s->pb, c->buf + 4, DV_PROFILE_BYTES - 4) != DV_PROFILE_BYTES - 4 ||
avio_seek(s->pb, -DV_PROFILE_BYTES, SEEK_CUR) < 0)
return AVERROR(EIO);
 
c->dv_demux->sys = avpriv_dv_frame_profile(c->dv_demux->sys,
c->buf,
DV_PROFILE_BYTES);
if (!c->dv_demux->sys) {
av_log(s, AV_LOG_ERROR,
"Can't determine profile of DV input stream.\n");
return -1;
}
 
s->bit_rate = av_rescale_q(c->dv_demux->sys->frame_size,
(AVRational) { 8, 1 },
c->dv_demux->sys->time_base);
 
if (s->pb->seekable)
dv_read_timecode(s);
 
return 0;
}
 
static int dv_read_packet(AVFormatContext *s, AVPacket *pkt)
{
int size;
RawDVContext *c = s->priv_data;
 
size = avpriv_dv_get_packet(c->dv_demux, pkt);
 
if (size < 0) {
int64_t pos = avio_tell(s->pb);
if (!c->dv_demux->sys)
return AVERROR(EIO);
size = c->dv_demux->sys->frame_size;
if (avio_read(s->pb, c->buf, size) <= 0)
return AVERROR(EIO);
 
size = avpriv_dv_produce_packet(c->dv_demux, pkt, c->buf, size, pos);
}
 
return size;
}
 
static int dv_read_seek(AVFormatContext *s, int stream_index,
int64_t timestamp, int flags)
{
RawDVContext *r = s->priv_data;
DVDemuxContext *c = r->dv_demux;
int64_t offset = dv_frame_offset(s, c, timestamp, flags);
 
if (avio_seek(s->pb, offset, SEEK_SET) < 0)
return -1;
 
ff_dv_offset_reset(c, offset / c->sys->frame_size);
return 0;
}
 
static int dv_read_close(AVFormatContext *s)
{
RawDVContext *c = s->priv_data;
av_free(c->dv_demux);
return 0;
}
 
static int dv_probe(AVProbeData *p)
{
unsigned state, marker_pos = 0;
int i;
int matches = 0;
int secondary_matches = 0;
 
if (p->buf_size < 5)
return 0;
 
state = AV_RB32(p->buf);
for (i = 4; i < p->buf_size; i++) {
if ((state & 0xffffff7f) == 0x1f07003f)
matches++;
// any section header, also with seq/chan num != 0,
// should appear around every 12000 bytes, at least 10 per frame
if ((state & 0xff07ff7f) == 0x1f07003f)
secondary_matches++;
if (state == 0x003f0700 || state == 0xff3f0700)
marker_pos = i;
if (state == 0xff3f0701 && i - marker_pos == 80)
matches++;
state = (state << 8) | p->buf[i];
}
 
if (matches && p->buf_size / matches < 1024 * 1024) {
if (matches > 4 ||
(secondary_matches >= 10 &&
p->buf_size / secondary_matches < 24000))
// not max to avoid dv in mov to match
return AVPROBE_SCORE_MAX * 3 / 4;
return AVPROBE_SCORE_MAX / 4;
}
return 0;
}
 
#if CONFIG_DV_DEMUXER
AVInputFormat ff_dv_demuxer = {
.name = "dv",
.long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"),
.priv_data_size = sizeof(RawDVContext),
.read_probe = dv_probe,
.read_header = dv_read_header,
.read_packet = dv_read_packet,
.read_close = dv_read_close,
.read_seek = dv_read_seek,
.extensions = "dv,dif",
};
#endif
/contrib/sdk/sources/ffmpeg/libavformat/dv.h
0,0 → 1,41
/*
* General DV muxer/demuxer
* Copyright (c) 2003 Roman Shaposhnik
*
* Many thanks to Dan Dennedy <dan@dennedy.org> for providing wealth
* of DV technical info.
*
* Raw DV format
* Copyright (c) 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_DV_H
#define AVFORMAT_DV_H
 
#include "avformat.h"
 
typedef struct DVDemuxContext DVDemuxContext;
DVDemuxContext* avpriv_dv_init_demux(AVFormatContext* s);
int avpriv_dv_get_packet(DVDemuxContext*, AVPacket *);
int avpriv_dv_produce_packet(DVDemuxContext*, AVPacket*, uint8_t*, int, int64_t);
void ff_dv_offset_reset(DVDemuxContext *c, int64_t frame_offset);
 
typedef struct DVMuxContext DVMuxContext;
 
#endif /* AVFORMAT_DV_H */
/contrib/sdk/sources/ffmpeg/libavformat/dvenc.c
0,0 → 1,419
/*
* General DV muxer/demuxer
* Copyright (c) 2003 Roman Shaposhnik
*
* Many thanks to Dan Dennedy <dan@dennedy.org> for providing wealth
* of DV technical info.
*
* Raw DV format
* Copyright (c) 2002 Fabrice Bellard
*
* 50 Mbps (DVCPRO50) support
* Copyright (c) 2006 Daniel Maas <dmaas@maasdigital.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <time.h>
#include <stdarg.h>
 
#include "avformat.h"
#include "internal.h"
#include "libavcodec/dv_profile.h"
#include "libavcodec/dvdata.h"
#include "dv.h"
#include "libavutil/fifo.h"
#include "libavutil/mathematics.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/opt.h"
#include "libavutil/timecode.h"
 
#define MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio
 
struct DVMuxContext {
AVClass *av_class;
const DVprofile* sys; /* current DV profile, e.g.: 525/60, 625/50 */
int n_ast; /* number of stereo audio streams (up to 2) */
AVStream *ast[2]; /* stereo audio streams */
AVFifoBuffer *audio_data[2]; /* FIFO for storing excessive amounts of PCM */
int frames; /* current frame number */
int64_t start_time; /* recording start time */
int has_audio; /* frame under construction has audio */
int has_video; /* frame under construction has video */
uint8_t frame_buf[DV_MAX_FRAME_SIZE]; /* frame under construction */
AVTimecode tc; /* timecode context */
};
 
static const int dv_aaux_packs_dist[12][9] = {
{ 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff },
{ 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff },
{ 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff },
{ 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff },
{ 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff },
{ 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff },
{ 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff },
{ 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff },
{ 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff },
{ 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff },
{ 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff },
{ 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff },
};
 
static int dv_audio_frame_size(const DVprofile* sys, int frame)
{
return sys->audio_samples_dist[frame % (sizeof(sys->audio_samples_dist) /
sizeof(sys->audio_samples_dist[0]))];
}
 
static int dv_write_pack(enum dv_pack_type pack_id, DVMuxContext *c, uint8_t* buf, ...)
{
struct tm tc;
time_t ct;
uint32_t timecode;
va_list ap;
 
buf[0] = (uint8_t)pack_id;
switch (pack_id) {
case dv_timecode:
timecode = av_timecode_get_smpte_from_framenum(&c->tc, c->frames);
timecode |= 1<<23 | 1<<15 | 1<<7 | 1<<6; // biphase and binary group flags
AV_WB32(buf + 1, timecode);
break;
case dv_audio_source: /* AAUX source pack */
va_start(ap, buf);
buf[1] = (1 << 7) | /* locked mode -- SMPTE only supports locked mode */
(1 << 6) | /* reserved -- always 1 */
(dv_audio_frame_size(c->sys, c->frames) -
c->sys->audio_min_samples[0]);
/* # of samples */
buf[2] = (0 << 7) | /* multi-stereo */
(0 << 5) | /* #of audio channels per block: 0 -- 1 channel */
(0 << 4) | /* pair bit: 0 -- one pair of channels */
!!va_arg(ap, int); /* audio mode */
buf[3] = (1 << 7) | /* res */
(1 << 6) | /* multi-language flag */
(c->sys->dsf << 5) | /* system: 60fields/50fields */
(c->sys->n_difchan & 2); /* definition: 0 -- 25Mbps, 2 -- 50Mbps */
buf[4] = (1 << 7) | /* emphasis: 1 -- off */
(0 << 6) | /* emphasis time constant: 0 -- reserved */
(0 << 3) | /* frequency: 0 -- 48kHz, 1 -- 44,1kHz, 2 -- 32kHz */
0; /* quantization: 0 -- 16bit linear, 1 -- 12bit nonlinear */
va_end(ap);
break;
case dv_audio_control:
buf[1] = (0 << 6) | /* copy protection: 0 -- unrestricted */
(1 << 4) | /* input source: 1 -- digital input */
(3 << 2) | /* compression: 3 -- no information */
0; /* misc. info/SMPTE emphasis off */
buf[2] = (1 << 7) | /* recording start point: 1 -- no */
(1 << 6) | /* recording end point: 1 -- no */
(1 << 3) | /* recording mode: 1 -- original */
7;
buf[3] = (1 << 7) | /* direction: 1 -- forward */
(c->sys->pix_fmt == AV_PIX_FMT_YUV420P ? 0x20 : /* speed */
c->sys->ltc_divisor * 4);
buf[4] = (1 << 7) | /* reserved -- always 1 */
0x7f; /* genre category */
break;
case dv_audio_recdate:
case dv_video_recdate: /* VAUX recording date */
ct = c->start_time + av_rescale_rnd(c->frames, c->sys->time_base.num,
c->sys->time_base.den, AV_ROUND_DOWN);
ff_brktimegm(ct, &tc);
buf[1] = 0xff; /* ds, tm, tens of time zone, units of time zone */
/* 0xff is very likely to be "unknown" */
buf[2] = (3 << 6) | /* reserved -- always 1 */
((tc.tm_mday / 10) << 4) | /* Tens of day */
(tc.tm_mday % 10); /* Units of day */
buf[3] = /* we set high 4 bits to 0, shouldn't we set them to week? */
((tc.tm_mon / 10) << 4) | /* Tens of month */
(tc.tm_mon % 10); /* Units of month */
buf[4] = (((tc.tm_year % 100) / 10) << 4) | /* Tens of year */
(tc.tm_year % 10); /* Units of year */
break;
case dv_audio_rectime: /* AAUX recording time */
case dv_video_rectime: /* VAUX recording time */
ct = c->start_time + av_rescale_rnd(c->frames, c->sys->time_base.num,
c->sys->time_base.den, AV_ROUND_DOWN);
ff_brktimegm(ct, &tc);
buf[1] = (3 << 6) | /* reserved -- always 1 */
0x3f; /* tens of frame, units of frame: 0x3f - "unknown" ? */
buf[2] = (1 << 7) | /* reserved -- always 1 */
((tc.tm_sec / 10) << 4) | /* Tens of seconds */
(tc.tm_sec % 10); /* Units of seconds */
buf[3] = (1 << 7) | /* reserved -- always 1 */
((tc.tm_min / 10) << 4) | /* Tens of minutes */
(tc.tm_min % 10); /* Units of minutes */
buf[4] = (3 << 6) | /* reserved -- always 1 */
((tc.tm_hour / 10) << 4) | /* Tens of hours */
(tc.tm_hour % 10); /* Units of hours */
break;
default:
buf[1] = buf[2] = buf[3] = buf[4] = 0xff;
}
return 5;
}
 
static void dv_inject_audio(DVMuxContext *c, int channel, uint8_t* frame_ptr)
{
int i, j, d, of, size;
size = 4 * dv_audio_frame_size(c->sys, c->frames);
frame_ptr += channel * c->sys->difseg_size * 150 * 80;
for (i = 0; i < c->sys->difseg_size; i++) {
frame_ptr += 6 * 80; /* skip DIF segment header */
for (j = 0; j < 9; j++) {
dv_write_pack(dv_aaux_packs_dist[i][j], c, &frame_ptr[3], i >= c->sys->difseg_size/2);
for (d = 8; d < 80; d+=2) {
of = c->sys->audio_shuffle[i][j] + (d - 8)/2 * c->sys->audio_stride;
if (of*2 >= size)
continue;
 
frame_ptr[d] = *av_fifo_peek2(c->audio_data[channel], of*2+1); // FIXME: maybe we have to admit
frame_ptr[d+1] = *av_fifo_peek2(c->audio_data[channel], of*2); // that DV is a big-endian PCM
}
frame_ptr += 16 * 80; /* 15 Video DIFs + 1 Audio DIF */
}
}
}
 
static void dv_inject_metadata(DVMuxContext *c, uint8_t* frame)
{
int j, k;
uint8_t* buf;
 
for (buf = frame; buf < frame + c->sys->frame_size; buf += 150 * 80) {
/* DV subcode: 2nd and 3d DIFs */
for (j = 80; j < 80 * 3; j += 80) {
for (k = 6; k < 6 * 8; k += 8)
dv_write_pack(dv_timecode, c, &buf[j+k]);
 
if (((long)(buf-frame)/(c->sys->frame_size/(c->sys->difseg_size*c->sys->n_difchan))%c->sys->difseg_size) > 5) { /* FIXME: is this really needed ? */
dv_write_pack(dv_video_recdate, c, &buf[j+14]);
dv_write_pack(dv_video_rectime, c, &buf[j+22]);
dv_write_pack(dv_video_recdate, c, &buf[j+38]);
dv_write_pack(dv_video_rectime, c, &buf[j+46]);
}
}
 
/* DV VAUX: 4th, 5th and 6th 3DIFs */
for (j = 80*3 + 3; j < 80*6; j += 80) {
dv_write_pack(dv_video_recdate, c, &buf[j+5*2]);
dv_write_pack(dv_video_rectime, c, &buf[j+5*3]);
dv_write_pack(dv_video_recdate, c, &buf[j+5*11]);
dv_write_pack(dv_video_rectime, c, &buf[j+5*12]);
}
}
}
 
/*
* The following 3 functions constitute our interface to the world
*/
 
static int dv_assemble_frame(DVMuxContext *c, AVStream* st,
uint8_t* data, int data_size, uint8_t** frame)
{
int i, reqasize;
 
*frame = &c->frame_buf[0];
reqasize = 4 * dv_audio_frame_size(c->sys, c->frames);
 
switch (st->codec->codec_type) {
case AVMEDIA_TYPE_VIDEO:
/* FIXME: we have to have more sensible approach than this one */
if (c->has_video)
av_log(st->codec, AV_LOG_ERROR, "Can't process DV frame #%d. Insufficient audio data or severe sync problem.\n", c->frames);
 
memcpy(*frame, data, c->sys->frame_size);
c->has_video = 1;
break;
case AVMEDIA_TYPE_AUDIO:
for (i = 0; i < c->n_ast && st != c->ast[i]; i++);
 
/* FIXME: we have to have more sensible approach than this one */
if (av_fifo_size(c->audio_data[i]) + data_size >= 100*MAX_AUDIO_FRAME_SIZE)
av_log(st->codec, AV_LOG_ERROR, "Can't process DV frame #%d. Insufficient video data or severe sync problem.\n", c->frames);
av_fifo_generic_write(c->audio_data[i], data, data_size, NULL);
 
/* Let us see if we've got enough audio for one DV frame. */
c->has_audio |= ((reqasize <= av_fifo_size(c->audio_data[i])) << i);
 
break;
default:
break;
}
 
/* Let us see if we have enough data to construct one DV frame. */
if (c->has_video == 1 && c->has_audio + 1 == 1 << c->n_ast) {
dv_inject_metadata(c, *frame);
c->has_audio = 0;
for (i=0; i < c->n_ast; i++) {
dv_inject_audio(c, i, *frame);
av_fifo_drain(c->audio_data[i], reqasize);
c->has_audio |= ((reqasize <= av_fifo_size(c->audio_data[i])) << i);
}
 
c->has_video = 0;
 
c->frames++;
 
return c->sys->frame_size;
}
 
return 0;
}
 
static DVMuxContext* dv_init_mux(AVFormatContext* s)
{
DVMuxContext *c = s->priv_data;
AVStream *vst = NULL;
AVDictionaryEntry *t;
int i;
 
/* we support at most 1 video and 2 audio streams */
if (s->nb_streams > 3)
return NULL;
 
c->n_ast = 0;
c->ast[0] = c->ast[1] = NULL;
 
/* We have to sort out where audio and where video stream is */
for (i=0; i<s->nb_streams; i++) {
switch (s->streams[i]->codec->codec_type) {
case AVMEDIA_TYPE_VIDEO:
if (vst) return NULL;
vst = s->streams[i];
break;
case AVMEDIA_TYPE_AUDIO:
if (c->n_ast > 1) return NULL;
c->ast[c->n_ast++] = s->streams[i];
break;
default:
goto bail_out;
}
}
 
/* Some checks -- DV format is very picky about its incoming streams */
if (!vst || vst->codec->codec_id != AV_CODEC_ID_DVVIDEO)
goto bail_out;
for (i=0; i<c->n_ast; i++) {
if (c->ast[i] && (c->ast[i]->codec->codec_id != AV_CODEC_ID_PCM_S16LE ||
c->ast[i]->codec->sample_rate != 48000 ||
c->ast[i]->codec->channels != 2))
goto bail_out;
}
c->sys = avpriv_dv_codec_profile(vst->codec);
if (!c->sys)
goto bail_out;
 
if ((c->n_ast > 1) && (c->sys->n_difchan < 2)) {
/* only 1 stereo pair is allowed in 25Mbps mode */
goto bail_out;
}
 
/* Ok, everything seems to be in working order */
c->frames = 0;
c->has_audio = 0;
c->has_video = 0;
if (t = av_dict_get(s->metadata, "creation_time", NULL, 0))
c->start_time = ff_iso8601_to_unix_time(t->value);
 
for (i=0; i < c->n_ast; i++) {
if (c->ast[i] && !(c->audio_data[i]=av_fifo_alloc(100*MAX_AUDIO_FRAME_SIZE))) {
while (i > 0) {
i--;
av_fifo_free(c->audio_data[i]);
}
goto bail_out;
}
}
 
return c;
 
bail_out:
return NULL;
}
 
static void dv_delete_mux(DVMuxContext *c)
{
int i;
for (i=0; i < c->n_ast; i++)
av_fifo_free(c->audio_data[i]);
}
 
static int dv_write_header(AVFormatContext *s)
{
AVRational rate;
DVMuxContext *dvc = s->priv_data;
AVDictionaryEntry *tcr = av_dict_get(s->metadata, "timecode", NULL, 0);
 
if (!dv_init_mux(s)) {
av_log(s, AV_LOG_ERROR, "Can't initialize DV format!\n"
"Make sure that you supply exactly two streams:\n"
" video: 25fps or 29.97fps, audio: 2ch/48kHz/PCM\n"
" (50Mbps allows an optional second audio stream)\n");
return -1;
}
rate.num = dvc->sys->ltc_divisor;
rate.den = 1;
if (!tcr) { // no global timecode, look into the streams
int i;
for (i = 0; i < s->nb_streams; i++) {
tcr = av_dict_get(s->streams[i]->metadata, "timecode", NULL, 0);
if (tcr)
break;
}
}
if (tcr && av_timecode_init_from_string(&dvc->tc, rate, tcr->value, s) >= 0)
return 0;
return av_timecode_init(&dvc->tc, rate, 0, 0, s);
}
 
static int dv_write_packet(struct AVFormatContext *s, AVPacket *pkt)
{
uint8_t* frame;
int fsize;
 
fsize = dv_assemble_frame(s->priv_data, s->streams[pkt->stream_index],
pkt->data, pkt->size, &frame);
if (fsize > 0) {
avio_write(s->pb, frame, fsize);
}
return 0;
}
 
/*
* We might end up with some extra A/V data without matching counterpart.
* E.g. video data without enough audio to write the complete frame.
* Currently we simply drop the last frame. I don't know whether this
* is the best strategy of all
*/
static int dv_write_trailer(struct AVFormatContext *s)
{
dv_delete_mux(s->priv_data);
return 0;
}
 
AVOutputFormat ff_dv_muxer = {
.name = "dv",
.long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"),
.extensions = "dv",
.priv_data_size = sizeof(DVMuxContext),
.audio_codec = AV_CODEC_ID_PCM_S16LE,
.video_codec = AV_CODEC_ID_DVVIDEO,
.write_header = dv_write_header,
.write_packet = dv_write_packet,
.write_trailer = dv_write_trailer,
};
/contrib/sdk/sources/ffmpeg/libavformat/dxa.c
0,0 → 1,225
/*
* DXA demuxer
* Copyright (c) 2007 Konstantin Shishkov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
#include "riff.h"
 
#define DXA_EXTRA_SIZE 9
 
typedef struct{
int frames;
int has_sound;
int bpc;
uint32_t bytes_left;
int64_t wavpos, vidpos;
int readvid;
}DXAContext;
 
static int dxa_probe(AVProbeData *p)
{
int w, h;
if (p->buf_size < 15)
return 0;
w = AV_RB16(p->buf + 11);
h = AV_RB16(p->buf + 13);
/* check file header */
if (p->buf[0] == 'D' && p->buf[1] == 'E' &&
p->buf[2] == 'X' && p->buf[3] == 'A' &&
w && w <= 2048 && h && h <= 2048)
return AVPROBE_SCORE_MAX;
else
return 0;
}
 
static int dxa_read_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
DXAContext *c = s->priv_data;
AVStream *st, *ast;
uint32_t tag;
int32_t fps;
int w, h;
int num, den;
int flags;
int ret;
 
tag = avio_rl32(pb);
if (tag != MKTAG('D', 'E', 'X', 'A'))
return AVERROR_INVALIDDATA;
flags = avio_r8(pb);
c->frames = avio_rb16(pb);
if(!c->frames){
av_log(s, AV_LOG_ERROR, "File contains no frames ???\n");
return AVERROR_INVALIDDATA;
}
 
fps = avio_rb32(pb);
if(fps > 0){
den = 1000;
num = fps;
}else if (fps < 0){
den = 100000;
num = -fps;
}else{
den = 10;
num = 1;
}
w = avio_rb16(pb);
h = avio_rb16(pb);
c->has_sound = 0;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
// Parse WAV data header
if(avio_rl32(pb) == MKTAG('W', 'A', 'V', 'E')){
uint32_t size, fsize;
c->has_sound = 1;
size = avio_rb32(pb);
c->vidpos = avio_tell(pb) + size;
avio_skip(pb, 16);
fsize = avio_rl32(pb);
 
ast = avformat_new_stream(s, NULL);
if (!ast)
return AVERROR(ENOMEM);
ret = ff_get_wav_header(pb, ast->codec, fsize);
if (ret < 0)
return ret;
if (ast->codec->sample_rate > 0)
avpriv_set_pts_info(ast, 64, 1, ast->codec->sample_rate);
// find 'data' chunk
while(avio_tell(pb) < c->vidpos && !url_feof(pb)){
tag = avio_rl32(pb);
fsize = avio_rl32(pb);
if(tag == MKTAG('d', 'a', 't', 'a')) break;
avio_skip(pb, fsize);
}
c->bpc = (fsize + c->frames - 1) / c->frames;
if(ast->codec->block_align)
c->bpc = ((c->bpc + ast->codec->block_align - 1) / ast->codec->block_align) * ast->codec->block_align;
c->bytes_left = fsize;
c->wavpos = avio_tell(pb);
avio_seek(pb, c->vidpos, SEEK_SET);
}
 
/* now we are ready: build format streams */
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_DXA;
st->codec->width = w;
st->codec->height = h;
av_reduce(&den, &num, den, num, (1UL<<31)-1);
avpriv_set_pts_info(st, 33, num, den);
/* flags & 0x80 means that image is interlaced,
* flags & 0x40 means that image has double height
* either way set true height
*/
if(flags & 0xC0){
st->codec->height >>= 1;
}
c->readvid = !c->has_sound;
c->vidpos = avio_tell(pb);
s->start_time = 0;
s->duration = (int64_t)c->frames * AV_TIME_BASE * num / den;
av_log(s, AV_LOG_DEBUG, "%d frame(s)\n",c->frames);
 
return 0;
}
 
static int dxa_read_packet(AVFormatContext *s, AVPacket *pkt)
{
DXAContext *c = s->priv_data;
int ret;
uint32_t size;
uint8_t buf[DXA_EXTRA_SIZE], pal[768+4];
int pal_size = 0;
 
if(!c->readvid && c->has_sound && c->bytes_left){
c->readvid = 1;
avio_seek(s->pb, c->wavpos, SEEK_SET);
size = FFMIN(c->bytes_left, c->bpc);
ret = av_get_packet(s->pb, pkt, size);
pkt->stream_index = 1;
if(ret != size)
return AVERROR(EIO);
c->bytes_left -= size;
c->wavpos = avio_tell(s->pb);
return 0;
}
avio_seek(s->pb, c->vidpos, SEEK_SET);
while(!url_feof(s->pb) && c->frames){
avio_read(s->pb, buf, 4);
switch(AV_RL32(buf)){
case MKTAG('N', 'U', 'L', 'L'):
if(av_new_packet(pkt, 4 + pal_size) < 0)
return AVERROR(ENOMEM);
pkt->stream_index = 0;
if(pal_size) memcpy(pkt->data, pal, pal_size);
memcpy(pkt->data + pal_size, buf, 4);
c->frames--;
c->vidpos = avio_tell(s->pb);
c->readvid = 0;
return 0;
case MKTAG('C', 'M', 'A', 'P'):
pal_size = 768+4;
memcpy(pal, buf, 4);
avio_read(s->pb, pal + 4, 768);
break;
case MKTAG('F', 'R', 'A', 'M'):
avio_read(s->pb, buf + 4, DXA_EXTRA_SIZE - 4);
size = AV_RB32(buf + 5);
if(size > 0xFFFFFF){
av_log(s, AV_LOG_ERROR, "Frame size is too big: %d\n", size);
return AVERROR_INVALIDDATA;
}
if(av_new_packet(pkt, size + DXA_EXTRA_SIZE + pal_size) < 0)
return AVERROR(ENOMEM);
memcpy(pkt->data + pal_size, buf, DXA_EXTRA_SIZE);
ret = avio_read(s->pb, pkt->data + DXA_EXTRA_SIZE + pal_size, size);
if(ret != size){
av_free_packet(pkt);
return AVERROR(EIO);
}
if(pal_size) memcpy(pkt->data, pal, pal_size);
pkt->stream_index = 0;
c->frames--;
c->vidpos = avio_tell(s->pb);
c->readvid = 0;
return 0;
default:
av_log(s, AV_LOG_ERROR, "Unknown tag %c%c%c%c\n", buf[0], buf[1], buf[2], buf[3]);
return AVERROR_INVALIDDATA;
}
}
return AVERROR_EOF;
}
 
AVInputFormat ff_dxa_demuxer = {
.name = "dxa",
.long_name = NULL_IF_CONFIG_SMALL("DXA"),
.priv_data_size = sizeof(DXAContext),
.read_probe = dxa_probe,
.read_header = dxa_read_header,
.read_packet = dxa_read_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/eacdata.c
0,0 → 1,105
/*
* Electronic Arts .cdata file Demuxer
* Copyright (c) 2007 Peter Ross
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Electronic Arts cdata Format Demuxer
* by Peter Ross (pross@xvid.org)
*
* Technical details here:
* http://wiki.multimedia.cx/index.php?title=EA_Command_And_Conquer_3_Audio_Codec
*/
 
#include "avformat.h"
#include "internal.h"
 
typedef struct {
unsigned int channels;
unsigned int audio_pts;
} CdataDemuxContext;
 
static int cdata_probe(AVProbeData *p)
{
const uint8_t *b = p->buf;
 
if (b[0] == 0x04 && (b[1] == 0x00 || b[1] == 0x04 || b[1] == 0x0C || b[1] == 0x14))
return AVPROBE_SCORE_MAX/8;
return 0;
}
 
static int cdata_read_header(AVFormatContext *s)
{
CdataDemuxContext *cdata = s->priv_data;
AVIOContext *pb = s->pb;
unsigned int sample_rate, header;
AVStream *st;
int64_t channel_layout = 0;
 
header = avio_rb16(pb);
switch (header) {
case 0x0400: cdata->channels = 1; break;
case 0x0404: cdata->channels = 2; break;
case 0x040C: cdata->channels = 4; channel_layout = AV_CH_LAYOUT_QUAD; break;
case 0x0414: cdata->channels = 6; channel_layout = AV_CH_LAYOUT_5POINT1_BACK; break;
default:
av_log(s, AV_LOG_INFO, "unknown header 0x%04x\n", header);
return -1;
};
 
sample_rate = avio_rb16(pb);
avio_skip(pb, (avio_r8(pb) & 0x20) ? 15 : 11);
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->codec_id = AV_CODEC_ID_ADPCM_EA_XAS;
st->codec->channels = cdata->channels;
st->codec->channel_layout = channel_layout;
st->codec->sample_rate = sample_rate;
avpriv_set_pts_info(st, 64, 1, sample_rate);
 
cdata->audio_pts = 0;
return 0;
}
 
static int cdata_read_packet(AVFormatContext *s, AVPacket *pkt)
{
CdataDemuxContext *cdata = s->priv_data;
int packet_size = 76*cdata->channels;
 
int ret = av_get_packet(s->pb, pkt, packet_size);
if (ret < 0)
return ret;
pkt->pts = cdata->audio_pts++;
return 0;
}
 
AVInputFormat ff_ea_cdata_demuxer = {
.name = "ea_cdata",
.long_name = NULL_IF_CONFIG_SMALL("Electronic Arts cdata"),
.priv_data_size = sizeof(CdataDemuxContext),
.read_probe = cdata_probe,
.read_header = cdata_read_header,
.read_packet = cdata_read_packet,
.extensions = "cdata",
};
/contrib/sdk/sources/ffmpeg/libavformat/electronicarts.c
0,0 → 1,674
/* Electronic Arts Multimedia File Demuxer
* Copyright (c) 2004 The ffmpeg Project
* Copyright (c) 2006-2008 Peter Ross
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Electronic Arts Multimedia file demuxer (WVE/UV2/etc.)
* by Robin Kay (komadori at gekkou.co.uk)
*/
 
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
 
#define SCHl_TAG MKTAG('S', 'C', 'H', 'l')
#define SEAD_TAG MKTAG('S', 'E', 'A', 'D') /* Sxxx header */
#define SNDC_TAG MKTAG('S', 'N', 'D', 'C') /* Sxxx data */
#define SEND_TAG MKTAG('S', 'E', 'N', 'D') /* Sxxx end */
#define SHEN_TAG MKTAG('S', 'H', 'E', 'N') /* SxEN header */
#define SDEN_TAG MKTAG('S', 'D', 'E', 'N') /* SxEN data */
#define SEEN_TAG MKTAG('S', 'E', 'E', 'N') /* SxEN end */
#define ISNh_TAG MKTAG('1', 'S', 'N', 'h') /* 1SNx header */
#define EACS_TAG MKTAG('E', 'A', 'C', 'S')
#define ISNd_TAG MKTAG('1', 'S', 'N', 'd') /* 1SNx data */
#define ISNe_TAG MKTAG('1', 'S', 'N', 'e') /* 1SNx end */
#define PT00_TAG MKTAG('P', 'T', 0x0, 0x0)
#define GSTR_TAG MKTAG('G', 'S', 'T', 'R')
#define SCDl_TAG MKTAG('S', 'C', 'D', 'l')
#define SCEl_TAG MKTAG('S', 'C', 'E', 'l')
#define kVGT_TAG MKTAG('k', 'V', 'G', 'T') /* TGV I-frame */
#define fVGT_TAG MKTAG('f', 'V', 'G', 'T') /* TGV P-frame */
#define mTCD_TAG MKTAG('m', 'T', 'C', 'D') /* MDEC */
#define MADk_TAG MKTAG('M', 'A', 'D', 'k') /* MAD I-frame */
#define MADm_TAG MKTAG('M', 'A', 'D', 'm') /* MAD P-frame */
#define MADe_TAG MKTAG('M', 'A', 'D', 'e') /* MAD lqp-frame */
#define MPCh_TAG MKTAG('M', 'P', 'C', 'h') /* MPEG-2 */
#define TGQs_TAG MKTAG('T', 'G', 'Q', 's') /* TGQ I-frame (appears in .TGQ files) */
#define pQGT_TAG MKTAG('p', 'Q', 'G', 'T') /* TGQ I-frame (appears in .UV files) */
#define pIQT_TAG MKTAG('p', 'I', 'Q', 'T') /* TQI/UV2 I-frame (.UV2/.WVE) */
#define MVhd_TAG MKTAG('M', 'V', 'h', 'd')
#define MV0K_TAG MKTAG('M', 'V', '0', 'K')
#define MV0F_TAG MKTAG('M', 'V', '0', 'F')
#define MVIh_TAG MKTAG('M', 'V', 'I', 'h') /* CMV header */
#define MVIf_TAG MKTAG('M', 'V', 'I', 'f') /* CMV I-frame */
 
typedef struct EaDemuxContext {
int big_endian;
 
enum AVCodecID video_codec;
AVRational time_base;
int width, height;
int nb_frames;
int video_stream_index;
 
enum AVCodecID audio_codec;
int audio_stream_index;
 
int bytes;
int sample_rate;
int num_channels;
int num_samples;
} EaDemuxContext;
 
static uint32_t read_arbitrary(AVIOContext *pb)
{
uint8_t size, byte;
int i;
uint32_t word;
 
size = avio_r8(pb);
 
word = 0;
for (i = 0; i < size; i++) {
byte = avio_r8(pb);
word <<= 8;
word |= byte;
}
 
return word;
}
 
static int process_audio_header_elements(AVFormatContext *s)
{
EaDemuxContext *ea = s->priv_data;
AVIOContext *pb = s->pb;
int in_header = 1;
int compression_type = -1, revision = -1, revision2 = -1;
 
ea->bytes = 2;
ea->sample_rate = -1;
ea->num_channels = 1;
 
while (!url_feof(pb) && in_header) {
int in_subheader;
uint8_t byte;
byte = avio_r8(pb);
 
switch (byte) {
case 0xFD:
av_log(s, AV_LOG_DEBUG, "entered audio subheader\n");
in_subheader = 1;
while (!url_feof(pb) && in_subheader) {
uint8_t subbyte;
subbyte = avio_r8(pb);
 
switch (subbyte) {
case 0x80:
revision = read_arbitrary(pb);
av_log(s, AV_LOG_DEBUG,
"revision (element 0x80) set to 0x%08x\n", revision);
break;
case 0x82:
ea->num_channels = read_arbitrary(pb);
av_log(s, AV_LOG_DEBUG,
"num_channels (element 0x82) set to 0x%08x\n",
ea->num_channels);
break;
case 0x83:
compression_type = read_arbitrary(pb);
av_log(s, AV_LOG_DEBUG,
"compression_type (element 0x83) set to 0x%08x\n",
compression_type);
break;
case 0x84:
ea->sample_rate = read_arbitrary(pb);
av_log(s, AV_LOG_DEBUG,
"sample_rate (element 0x84) set to %i\n",
ea->sample_rate);
break;
case 0x85:
ea->num_samples = read_arbitrary(pb);
av_log(s, AV_LOG_DEBUG,
"num_samples (element 0x85) set to 0x%08x\n",
ea->num_samples);
break;
case 0x8A:
av_log(s, AV_LOG_DEBUG,
"element 0x%02x set to 0x%08x\n",
subbyte, read_arbitrary(pb));
av_log(s, AV_LOG_DEBUG, "exited audio subheader\n");
in_subheader = 0;
break;
case 0xA0:
revision2 = read_arbitrary(pb);
av_log(s, AV_LOG_DEBUG,
"revision2 (element 0xA0) set to 0x%08x\n",
revision2);
break;
case 0xFF:
av_log(s, AV_LOG_DEBUG,
"end of header block reached (within audio subheader)\n");
in_subheader = 0;
in_header = 0;
break;
default:
av_log(s, AV_LOG_DEBUG,
"element 0x%02x set to 0x%08x\n",
subbyte, read_arbitrary(pb));
break;
}
}
break;
case 0xFF:
av_log(s, AV_LOG_DEBUG, "end of header block reached\n");
in_header = 0;
break;
default:
av_log(s, AV_LOG_DEBUG,
"header element 0x%02x set to 0x%08x\n",
byte, read_arbitrary(pb));
break;
}
}
 
switch (compression_type) {
case 0:
ea->audio_codec = AV_CODEC_ID_PCM_S16LE;
break;
case 7:
ea->audio_codec = AV_CODEC_ID_ADPCM_EA;
break;
case -1:
switch (revision) {
case 1:
ea->audio_codec = AV_CODEC_ID_ADPCM_EA_R1;
break;
case 2:
ea->audio_codec = AV_CODEC_ID_ADPCM_EA_R2;
break;
case 3:
ea->audio_codec = AV_CODEC_ID_ADPCM_EA_R3;
break;
case -1:
break;
default:
avpriv_request_sample(s, "stream type; revision=%i", revision);
return 0;
}
switch (revision2) {
case 8:
ea->audio_codec = AV_CODEC_ID_PCM_S16LE_PLANAR;
break;
case 10:
switch (revision) {
case -1:
case 2: ea->audio_codec = AV_CODEC_ID_ADPCM_EA_R1; break;
case 3: ea->audio_codec = AV_CODEC_ID_ADPCM_EA_R2; break;
default:
avpriv_request_sample(s, "stream type; revision=%i, revision2=%i", revision, revision2);
return 0;
}
break;
case 16:
ea->audio_codec = AV_CODEC_ID_MP3;
break;
case -1:
break;
default:
ea->audio_codec = AV_CODEC_ID_NONE;
avpriv_request_sample(s, "stream type; revision2=%i", revision2);
return 0;
}
break;
default:
avpriv_request_sample(s,
"stream type; compression_type=%i",
compression_type);
return 0;
}
 
if (ea->sample_rate == -1)
ea->sample_rate = revision == 3 ? 48000 : 22050;
 
return 1;
}
 
static void process_audio_header_eacs(AVFormatContext *s)
{
EaDemuxContext *ea = s->priv_data;
AVIOContext *pb = s->pb;
int compression_type;
 
ea->sample_rate = ea->big_endian ? avio_rb32(pb) : avio_rl32(pb);
ea->bytes = avio_r8(pb); /* 1=8-bit, 2=16-bit */
ea->num_channels = avio_r8(pb);
compression_type = avio_r8(pb);
avio_skip(pb, 13);
 
switch (compression_type) {
case 0:
switch (ea->bytes) {
case 1:
ea->audio_codec = AV_CODEC_ID_PCM_S8;
break;
case 2:
ea->audio_codec = AV_CODEC_ID_PCM_S16LE;
break;
}
break;
case 1:
ea->audio_codec = AV_CODEC_ID_PCM_MULAW;
ea->bytes = 1;
break;
case 2:
ea->audio_codec = AV_CODEC_ID_ADPCM_IMA_EA_EACS;
break;
default:
avpriv_request_sample(s,
"stream type; audio compression_type=%i",
compression_type);
}
}
 
static void process_audio_header_sead(AVFormatContext *s)
{
EaDemuxContext *ea = s->priv_data;
AVIOContext *pb = s->pb;
 
ea->sample_rate = avio_rl32(pb);
ea->bytes = avio_rl32(pb); /* 1=8-bit, 2=16-bit */
ea->num_channels = avio_rl32(pb);
ea->audio_codec = AV_CODEC_ID_ADPCM_IMA_EA_SEAD;
}
 
static void process_video_header_mdec(AVFormatContext *s)
{
EaDemuxContext *ea = s->priv_data;
AVIOContext *pb = s->pb;
avio_skip(pb, 4);
ea->width = avio_rl16(pb);
ea->height = avio_rl16(pb);
ea->time_base = (AVRational) { 1, 15 };
ea->video_codec = AV_CODEC_ID_MDEC;
}
 
static int process_video_header_vp6(AVFormatContext *s)
{
EaDemuxContext *ea = s->priv_data;
AVIOContext *pb = s->pb;
 
avio_skip(pb, 8);
ea->nb_frames = avio_rl32(pb);
avio_skip(pb, 4);
ea->time_base.den = avio_rl32(pb);
ea->time_base.num = avio_rl32(pb);
if (ea->time_base.den <= 0 || ea->time_base.num <= 0) {
av_log(s, AV_LOG_ERROR, "Timebase is invalid\n");
return AVERROR_INVALIDDATA;
}
ea->video_codec = AV_CODEC_ID_VP6;
 
return 1;
}
 
static void process_video_header_cmv(AVFormatContext *s)
{
EaDemuxContext *ea = s->priv_data;
int fps;
 
avio_skip(s->pb, 10);
fps = avio_rl16(s->pb);
if (fps)
ea->time_base = (AVRational) { 1, fps };
ea->video_codec = AV_CODEC_ID_CMV;
}
 
/* Process EA file header.
* Return 1 if the EA file is valid and successfully opened, 0 otherwise. */
static int process_ea_header(AVFormatContext *s)
{
uint32_t blockid, size = 0;
EaDemuxContext *ea = s->priv_data;
AVIOContext *pb = s->pb;
int i;
 
for (i = 0; i < 5 && (!ea->audio_codec || !ea->video_codec); i++) {
unsigned int startpos = avio_tell(pb);
int err = 0;
 
blockid = avio_rl32(pb);
size = avio_rl32(pb);
if (i == 0)
ea->big_endian = size > 0x000FFFFF;
if (ea->big_endian)
size = av_bswap32(size);
 
switch (blockid) {
case ISNh_TAG:
if (avio_rl32(pb) != EACS_TAG) {
avpriv_request_sample(s, "unknown 1SNh headerid");
return 0;
}
process_audio_header_eacs(s);
break;
 
case SCHl_TAG:
case SHEN_TAG:
blockid = avio_rl32(pb);
if (blockid == GSTR_TAG) {
avio_skip(pb, 4);
} else if ((blockid & 0xFFFF) != PT00_TAG) {
avpriv_request_sample(s, "unknown SCHl headerid");
return 0;
}
err = process_audio_header_elements(s);
break;
 
case SEAD_TAG:
process_audio_header_sead(s);
break;
 
case MVIh_TAG:
process_video_header_cmv(s);
break;
 
case kVGT_TAG:
ea->video_codec = AV_CODEC_ID_TGV;
break;
 
case mTCD_TAG:
process_video_header_mdec(s);
break;
 
case MPCh_TAG:
ea->video_codec = AV_CODEC_ID_MPEG2VIDEO;
break;
 
case pQGT_TAG:
case TGQs_TAG:
ea->video_codec = AV_CODEC_ID_TGQ;
break;
 
case pIQT_TAG:
ea->video_codec = AV_CODEC_ID_TQI;
break;
 
case MADk_TAG:
ea->video_codec = AV_CODEC_ID_MAD;
break;
 
case MVhd_TAG:
err = process_video_header_vp6(s);
break;
}
 
if (err < 0) {
av_log(s, AV_LOG_ERROR, "error parsing header: %i\n", err);
return err;
}
 
avio_seek(pb, startpos + size, SEEK_SET);
}
 
avio_seek(pb, 0, SEEK_SET);
 
return 1;
}
 
static int ea_probe(AVProbeData *p)
{
switch (AV_RL32(&p->buf[0])) {
case ISNh_TAG:
case SCHl_TAG:
case SEAD_TAG:
case SHEN_TAG:
case kVGT_TAG:
case MADk_TAG:
case MPCh_TAG:
case MVhd_TAG:
case MVIh_TAG:
break;
default:
return 0;
}
if (AV_RL32(&p->buf[4]) > 0xfffff && AV_RB32(&p->buf[4]) > 0xfffff)
return 0;
 
return AVPROBE_SCORE_MAX;
}
 
static int ea_read_header(AVFormatContext *s)
{
EaDemuxContext *ea = s->priv_data;
AVStream *st;
 
if (process_ea_header(s)<=0)
return AVERROR(EIO);
 
if (ea->video_codec) {
/* initialize the video decoder stream */
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
ea->video_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = ea->video_codec;
// parsing is necessary to make FFmpeg generate correct timestamps
if (st->codec->codec_id == AV_CODEC_ID_MPEG2VIDEO)
st->need_parsing = AVSTREAM_PARSE_HEADERS;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->width = ea->width;
st->codec->height = ea->height;
st->duration = st->nb_frames = ea->nb_frames;
if (ea->time_base.num)
avpriv_set_pts_info(st, 64, ea->time_base.num, ea->time_base.den);
st->r_frame_rate =
st->avg_frame_rate = av_inv_q(ea->time_base);
}
 
if (ea->audio_codec) {
if (ea->num_channels <= 0 || ea->num_channels > 2) {
av_log(s, AV_LOG_WARNING,
"Unsupported number of channels: %d\n", ea->num_channels);
ea->audio_codec = 0;
return 1;
}
if (ea->sample_rate <= 0) {
av_log(s, AV_LOG_ERROR,
"Unsupported sample rate: %d\n", ea->sample_rate);
ea->audio_codec = 0;
return 1;
}
if (ea->bytes <= 0) {
av_log(s, AV_LOG_ERROR,
"Invalid number of bytes per sample: %d\n", ea->bytes);
ea->audio_codec = AV_CODEC_ID_NONE;
return 1;
}
 
/* initialize the audio decoder stream */
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 33, 1, ea->sample_rate);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = ea->audio_codec;
st->codec->codec_tag = 0; /* no tag */
st->codec->channels = ea->num_channels;
st->codec->sample_rate = ea->sample_rate;
st->codec->bits_per_coded_sample = ea->bytes * 8;
st->codec->bit_rate = st->codec->channels *
st->codec->sample_rate *
st->codec->bits_per_coded_sample / 4;
st->codec->block_align = st->codec->channels *
st->codec->bits_per_coded_sample;
ea->audio_stream_index = st->index;
st->start_time = 0;
}
 
return 1;
}
 
static int ea_read_packet(AVFormatContext *s, AVPacket *pkt)
{
EaDemuxContext *ea = s->priv_data;
AVIOContext *pb = s->pb;
int partial_packet = 0;
unsigned int chunk_type, chunk_size;
int ret = 0, packet_read = 0, key = 0;
int av_uninit(num_samples);
 
while (!packet_read || partial_packet) {
chunk_type = avio_rl32(pb);
chunk_size = ea->big_endian ? avio_rb32(pb) : avio_rl32(pb);
if (chunk_size <= 8)
return AVERROR_INVALIDDATA;
chunk_size -= 8;
 
switch (chunk_type) {
/* audio data */
case ISNh_TAG:
/* header chunk also contains data; skip over the header portion */
if (chunk_size < 32)
return AVERROR_INVALIDDATA;
avio_skip(pb, 32);
chunk_size -= 32;
case ISNd_TAG:
case SCDl_TAG:
case SNDC_TAG:
case SDEN_TAG:
if (!ea->audio_codec) {
avio_skip(pb, chunk_size);
break;
} else if (ea->audio_codec == AV_CODEC_ID_PCM_S16LE_PLANAR ||
ea->audio_codec == AV_CODEC_ID_MP3) {
num_samples = avio_rl32(pb);
avio_skip(pb, 8);
chunk_size -= 12;
}
if (partial_packet) {
avpriv_request_sample(s, "video header followed by audio packet");
av_free_packet(pkt);
partial_packet = 0;
}
ret = av_get_packet(pb, pkt, chunk_size);
if (ret < 0)
return ret;
pkt->stream_index = ea->audio_stream_index;
 
switch (ea->audio_codec) {
case AV_CODEC_ID_ADPCM_EA:
case AV_CODEC_ID_ADPCM_EA_R1:
case AV_CODEC_ID_ADPCM_EA_R2:
case AV_CODEC_ID_ADPCM_IMA_EA_EACS:
case AV_CODEC_ID_ADPCM_EA_R3:
if (pkt->size < 4) {
av_log(s, AV_LOG_ERROR, "Packet is too short\n");
av_free_packet(pkt);
return AVERROR_INVALIDDATA;
}
if (ea->audio_codec == AV_CODEC_ID_ADPCM_EA_R3)
pkt->duration = AV_RB32(pkt->data);
else
pkt->duration = AV_RL32(pkt->data);
break;
case AV_CODEC_ID_ADPCM_IMA_EA_SEAD:
pkt->duration = ret * 2 / ea->num_channels;
break;
case AV_CODEC_ID_PCM_S16LE_PLANAR:
case AV_CODEC_ID_MP3:
pkt->duration = num_samples;
break;
default:
pkt->duration = chunk_size / (ea->bytes * ea->num_channels);
}
 
packet_read = 1;
break;
 
/* ending tag */
case 0:
case ISNe_TAG:
case SCEl_TAG:
case SEND_TAG:
case SEEN_TAG:
ret = AVERROR(EIO);
packet_read = 1;
break;
 
case MVIh_TAG:
case kVGT_TAG:
case pQGT_TAG:
case TGQs_TAG:
case MADk_TAG:
key = AV_PKT_FLAG_KEY;
case MVIf_TAG:
case fVGT_TAG:
case MADm_TAG:
case MADe_TAG:
avio_seek(pb, -8, SEEK_CUR); // include chunk preamble
chunk_size += 8;
goto get_video_packet;
 
case mTCD_TAG:
avio_skip(pb, 8); // skip ea DCT header
chunk_size -= 8;
goto get_video_packet;
 
case MV0K_TAG:
case MPCh_TAG:
case pIQT_TAG:
key = AV_PKT_FLAG_KEY;
case MV0F_TAG:
get_video_packet:
if (partial_packet) {
ret = av_append_packet(pb, pkt, chunk_size);
} else
ret = av_get_packet(pb, pkt, chunk_size);
if (ret < 0) {
packet_read = 1;
break;
}
partial_packet = chunk_type == MVIh_TAG;
pkt->stream_index = ea->video_stream_index;
pkt->flags |= key;
packet_read = 1;
break;
 
default:
avio_skip(pb, chunk_size);
break;
}
}
 
if (ret < 0 && partial_packet)
av_free_packet(pkt);
return ret;
}
 
AVInputFormat ff_ea_demuxer = {
.name = "ea",
.long_name = NULL_IF_CONFIG_SMALL("Electronic Arts Multimedia"),
.priv_data_size = sizeof(EaDemuxContext),
.read_probe = ea_probe,
.read_header = ea_read_header,
.read_packet = ea_read_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/epafdec.c
0,0 → 1,104
/*
* Ensoniq Paris Audio File demuxer
* Copyright (c) 2012 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
#include "pcm.h"
 
static int epaf_probe(AVProbeData *p)
{
if (((AV_RL32(p->buf) == MKTAG('f','a','p',' ') &&
AV_RL32(p->buf + 8) == 1) ||
(AV_RL32(p->buf) == MKTAG(' ','p','a','f') &&
AV_RN32(p->buf + 8) == 0)) &&
!AV_RN32(p->buf + 4) && AV_RN32(p->buf + 12) &&
AV_RN32(p->buf + 20))
return AVPROBE_SCORE_MAX / 4 * 3;
return 0;
}
 
static int epaf_read_header(AVFormatContext *s)
{
int le, sample_rate, codec, channels;
AVStream *st;
 
avio_skip(s->pb, 4);
if (avio_rl32(s->pb))
return AVERROR_INVALIDDATA;
 
le = avio_rl32(s->pb);
if (le && le != 1)
return AVERROR_INVALIDDATA;
 
if (le) {
sample_rate = avio_rl32(s->pb);
codec = avio_rl32(s->pb);
channels = avio_rl32(s->pb);
} else {
sample_rate = avio_rb32(s->pb);
codec = avio_rb32(s->pb);
channels = avio_rb32(s->pb);
}
 
if (!channels || !sample_rate)
return AVERROR_INVALIDDATA;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->channels = channels;
st->codec->sample_rate = sample_rate;
switch (codec) {
case 0:
st->codec->codec_id = le ? AV_CODEC_ID_PCM_S16LE : AV_CODEC_ID_PCM_S16BE;
break;
case 2:
st->codec->codec_id = AV_CODEC_ID_PCM_S8;
break;
case 1:
avpriv_request_sample(s, "24-bit Paris PCM format");
default:
return AVERROR_INVALIDDATA;
}
 
st->codec->bits_per_coded_sample = av_get_bits_per_sample(st->codec->codec_id);
st->codec->block_align = st->codec->bits_per_coded_sample * st->codec->channels / 8;
 
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
 
if (avio_skip(s->pb, 2024) < 0)
return AVERROR_INVALIDDATA;
return 0;
}
 
AVInputFormat ff_epaf_demuxer = {
.name = "epaf",
.long_name = NULL_IF_CONFIG_SMALL("Ensoniq Paris Audio File"),
.read_probe = epaf_probe,
.read_header = epaf_read_header,
.read_packet = ff_pcm_read_packet,
.read_seek = ff_pcm_read_seek,
.extensions = "paf,fap",
.flags = AVFMT_GENERIC_INDEX,
};
/contrib/sdk/sources/ffmpeg/libavformat/ffm.h
0,0 → 1,60
/*
* FFM (ffserver live feed) common header
* Copyright (c) 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_FFM_H
#define AVFORMAT_FFM_H
 
#include <stdint.h>
#include "avformat.h"
#include "avio.h"
 
/* The FFM file is made of blocks of fixed size */
#define FFM_HEADER_SIZE 14
#define FFM_PACKET_SIZE 4096
#define PACKET_ID 0x666d
 
/* each packet contains frames (which can span several packets */
#define FRAME_HEADER_SIZE 16
#define FLAG_KEY_FRAME 0x01
#define FLAG_DTS 0x02
 
enum {
READ_HEADER,
READ_DATA,
};
 
typedef struct FFMContext {
/* only reading mode */
int64_t write_index, file_size;
int read_state;
uint8_t header[FRAME_HEADER_SIZE+4];
 
/* read and write */
int first_packet; /* true if first packet, needed to set the discontinuity tag */
int packet_size;
int frame_offset;
int64_t dts;
uint8_t *packet_ptr, *packet_end;
uint8_t packet[FFM_PACKET_SIZE];
int64_t start_time;
} FFMContext;
 
#endif /* AVFORMAT_FFM_H */
/contrib/sdk/sources/ffmpeg/libavformat/ffmdec.c
0,0 → 1,635
/*
* FFM (ffserver live feed) demuxer
* Copyright (c) 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intreadwrite.h"
#include "libavutil/intfloat.h"
#include "avformat.h"
#include "internal.h"
#include "ffm.h"
#include "avio_internal.h"
 
static int ffm_is_avail_data(AVFormatContext *s, int size)
{
FFMContext *ffm = s->priv_data;
int64_t pos, avail_size;
int len;
 
len = ffm->packet_end - ffm->packet_ptr;
if (size <= len)
return 1;
pos = avio_tell(s->pb);
if (!ffm->write_index) {
if (pos == ffm->file_size)
return AVERROR_EOF;
avail_size = ffm->file_size - pos;
} else {
if (pos == ffm->write_index) {
/* exactly at the end of stream */
return AVERROR(EAGAIN);
} else if (pos < ffm->write_index) {
avail_size = ffm->write_index - pos;
} else {
avail_size = (ffm->file_size - pos) + (ffm->write_index - FFM_PACKET_SIZE);
}
}
avail_size = (avail_size / ffm->packet_size) * (ffm->packet_size - FFM_HEADER_SIZE) + len;
if (size <= avail_size)
return 1;
else
return AVERROR(EAGAIN);
}
 
static int ffm_resync(AVFormatContext *s, int state)
{
av_log(s, AV_LOG_ERROR, "resyncing\n");
while (state != PACKET_ID) {
if (url_feof(s->pb)) {
av_log(s, AV_LOG_ERROR, "cannot find FFM syncword\n");
return -1;
}
state = (state << 8) | avio_r8(s->pb);
}
return 0;
}
 
/* first is true if we read the frame header */
static int ffm_read_data(AVFormatContext *s,
uint8_t *buf, int size, int header)
{
FFMContext *ffm = s->priv_data;
AVIOContext *pb = s->pb;
int len, fill_size, size1, frame_offset, id;
 
size1 = size;
while (size > 0) {
redo:
len = ffm->packet_end - ffm->packet_ptr;
if (len < 0)
return -1;
if (len > size)
len = size;
if (len == 0) {
if (avio_tell(pb) == ffm->file_size)
avio_seek(pb, ffm->packet_size, SEEK_SET);
retry_read:
if (pb->buffer_size != ffm->packet_size) {
int64_t tell = avio_tell(pb);
ffio_set_buf_size(pb, ffm->packet_size);
avio_seek(pb, tell, SEEK_SET);
}
id = avio_rb16(pb); /* PACKET_ID */
if (id != PACKET_ID)
if (ffm_resync(s, id) < 0)
return -1;
fill_size = avio_rb16(pb);
ffm->dts = avio_rb64(pb);
frame_offset = avio_rb16(pb);
avio_read(pb, ffm->packet, ffm->packet_size - FFM_HEADER_SIZE);
ffm->packet_end = ffm->packet + (ffm->packet_size - FFM_HEADER_SIZE - fill_size);
if (ffm->packet_end < ffm->packet || frame_offset < 0)
return -1;
/* if first packet or resynchronization packet, we must
handle it specifically */
if (ffm->first_packet || (frame_offset & 0x8000)) {
if (!frame_offset) {
/* This packet has no frame headers in it */
if (avio_tell(pb) >= ffm->packet_size * 3LL) {
avio_seek(pb, -ffm->packet_size * 2LL, SEEK_CUR);
goto retry_read;
}
/* This is bad, we cannot find a valid frame header */
return 0;
}
ffm->first_packet = 0;
if ((frame_offset & 0x7fff) < FFM_HEADER_SIZE)
return -1;
ffm->packet_ptr = ffm->packet + (frame_offset & 0x7fff) - FFM_HEADER_SIZE;
if (!header)
break;
} else {
ffm->packet_ptr = ffm->packet;
}
goto redo;
}
memcpy(buf, ffm->packet_ptr, len);
buf += len;
ffm->packet_ptr += len;
size -= len;
header = 0;
}
return size1 - size;
}
 
/* ensure that acutal seeking happens between FFM_PACKET_SIZE
and file_size - FFM_PACKET_SIZE */
static int64_t ffm_seek1(AVFormatContext *s, int64_t pos1)
{
FFMContext *ffm = s->priv_data;
AVIOContext *pb = s->pb;
int64_t pos;
 
pos = FFMIN(pos1, ffm->file_size - FFM_PACKET_SIZE);
pos = FFMAX(pos, FFM_PACKET_SIZE);
av_dlog(s, "seek to %"PRIx64" -> %"PRIx64"\n", pos1, pos);
return avio_seek(pb, pos, SEEK_SET);
}
 
static int64_t get_dts(AVFormatContext *s, int64_t pos)
{
AVIOContext *pb = s->pb;
int64_t dts;
 
ffm_seek1(s, pos);
avio_skip(pb, 4);
dts = avio_rb64(pb);
av_dlog(s, "dts=%0.6f\n", dts / 1000000.0);
return dts;
}
 
static void adjust_write_index(AVFormatContext *s)
{
FFMContext *ffm = s->priv_data;
AVIOContext *pb = s->pb;
int64_t pts;
//int64_t orig_write_index = ffm->write_index;
int64_t pos_min, pos_max;
int64_t pts_start;
int64_t ptr = avio_tell(pb);
 
 
pos_min = 0;
pos_max = ffm->file_size - 2 * FFM_PACKET_SIZE;
 
pts_start = get_dts(s, pos_min);
 
pts = get_dts(s, pos_max);
 
if (pts - 100000 > pts_start)
goto end;
 
ffm->write_index = FFM_PACKET_SIZE;
 
pts_start = get_dts(s, pos_min);
 
pts = get_dts(s, pos_max);
 
if (pts - 100000 <= pts_start) {
while (1) {
int64_t newpos;
int64_t newpts;
 
newpos = ((pos_max + pos_min) / (2 * FFM_PACKET_SIZE)) * FFM_PACKET_SIZE;
 
if (newpos == pos_min)
break;
 
newpts = get_dts(s, newpos);
 
if (newpts - 100000 <= pts) {
pos_max = newpos;
pts = newpts;
} else {
pos_min = newpos;
}
}
ffm->write_index += pos_max;
}
 
end:
avio_seek(pb, ptr, SEEK_SET);
}
 
 
static int ffm_close(AVFormatContext *s)
{
int i;
 
for (i = 0; i < s->nb_streams; i++)
av_freep(&s->streams[i]->codec->rc_eq);
 
return 0;
}
 
static int ffm2_read_header(AVFormatContext *s)
{
FFMContext *ffm = s->priv_data;
AVStream *st;
AVIOContext *pb = s->pb;
AVCodecContext *codec;
 
ffm->packet_size = avio_rb32(pb);
if (ffm->packet_size != FFM_PACKET_SIZE)
goto fail;
ffm->write_index = avio_rb64(pb);
/* get also filesize */
if (pb->seekable) {
ffm->file_size = avio_size(pb);
if (ffm->write_index && 0)
adjust_write_index(s);
} else {
ffm->file_size = (UINT64_C(1) << 63) - 1;
}
 
while(!url_feof(pb)) {
unsigned id = avio_rb32(pb);
unsigned size = avio_rb32(pb);
int64_t next = avio_tell(pb) + size;
char rc_eq_buf[128];
 
if(!id)
break;
 
switch(id) {
case MKBETAG('M', 'A', 'I', 'N'):
avio_rb32(pb); /* nb_streams */
avio_rb32(pb); /* total bitrate */
break;
case MKBETAG('C', 'O', 'M', 'M'):
st = avformat_new_stream(s, NULL);
if (!st)
goto fail;
 
avpriv_set_pts_info(st, 64, 1, 1000000);
 
codec = st->codec;
/* generic info */
codec->codec_id = avio_rb32(pb);
codec->codec_type = avio_r8(pb);
codec->bit_rate = avio_rb32(pb);
codec->flags = avio_rb32(pb);
codec->flags2 = avio_rb32(pb);
codec->debug = avio_rb32(pb);
if (codec->flags & CODEC_FLAG_GLOBAL_HEADER) {
if (ff_alloc_extradata(codec, avio_rb32(pb)))
return AVERROR(ENOMEM);
avio_read(pb, codec->extradata, codec->extradata_size);
}
avio_seek(pb, next, SEEK_SET);
id = avio_rb32(pb);
size = avio_rb32(pb);
next = avio_tell(pb) + size;
switch(id) {
case MKBETAG('S', 'T', 'V', 'I'):
codec->time_base.num = avio_rb32(pb);
codec->time_base.den = avio_rb32(pb);
codec->width = avio_rb16(pb);
codec->height = avio_rb16(pb);
codec->gop_size = avio_rb16(pb);
codec->pix_fmt = avio_rb32(pb);
codec->qmin = avio_r8(pb);
codec->qmax = avio_r8(pb);
codec->max_qdiff = avio_r8(pb);
codec->qcompress = avio_rb16(pb) / 10000.0;
codec->qblur = avio_rb16(pb) / 10000.0;
codec->bit_rate_tolerance = avio_rb32(pb);
avio_get_str(pb, INT_MAX, rc_eq_buf, sizeof(rc_eq_buf));
codec->rc_eq = av_strdup(rc_eq_buf);
codec->rc_max_rate = avio_rb32(pb);
codec->rc_min_rate = avio_rb32(pb);
codec->rc_buffer_size = avio_rb32(pb);
codec->i_quant_factor = av_int2double(avio_rb64(pb));
codec->b_quant_factor = av_int2double(avio_rb64(pb));
codec->i_quant_offset = av_int2double(avio_rb64(pb));
codec->b_quant_offset = av_int2double(avio_rb64(pb));
codec->dct_algo = avio_rb32(pb);
codec->strict_std_compliance = avio_rb32(pb);
codec->max_b_frames = avio_rb32(pb);
codec->mpeg_quant = avio_rb32(pb);
codec->intra_dc_precision = avio_rb32(pb);
codec->me_method = avio_rb32(pb);
codec->mb_decision = avio_rb32(pb);
codec->nsse_weight = avio_rb32(pb);
codec->frame_skip_cmp = avio_rb32(pb);
codec->rc_buffer_aggressivity = av_int2double(avio_rb64(pb));
codec->codec_tag = avio_rb32(pb);
codec->thread_count = avio_r8(pb);
codec->coder_type = avio_rb32(pb);
codec->me_cmp = avio_rb32(pb);
codec->me_subpel_quality = avio_rb32(pb);
codec->me_range = avio_rb32(pb);
codec->keyint_min = avio_rb32(pb);
codec->scenechange_threshold = avio_rb32(pb);
codec->b_frame_strategy = avio_rb32(pb);
codec->qcompress = av_int2double(avio_rb64(pb));
codec->qblur = av_int2double(avio_rb64(pb));
codec->max_qdiff = avio_rb32(pb);
codec->refs = avio_rb32(pb);
break;
case MKBETAG('S', 'T', 'A', 'U'):
codec->sample_rate = avio_rb32(pb);
codec->channels = avio_rl16(pb);
codec->frame_size = avio_rl16(pb);
break;
}
break;
}
avio_seek(pb, next, SEEK_SET);
}
 
/* get until end of block reached */
while ((avio_tell(pb) % ffm->packet_size) != 0)
avio_r8(pb);
 
/* init packet demux */
ffm->packet_ptr = ffm->packet;
ffm->packet_end = ffm->packet;
ffm->frame_offset = 0;
ffm->dts = 0;
ffm->read_state = READ_HEADER;
ffm->first_packet = 1;
return 0;
fail:
ffm_close(s);
return -1;
}
 
static int ffm_read_header(AVFormatContext *s)
{
FFMContext *ffm = s->priv_data;
AVStream *st;
AVIOContext *pb = s->pb;
AVCodecContext *codec;
int i, nb_streams;
uint32_t tag;
 
/* header */
tag = avio_rl32(pb);
if (tag == MKTAG('F', 'F', 'M', '2'))
return ffm2_read_header(s);
if (tag != MKTAG('F', 'F', 'M', '1'))
goto fail;
ffm->packet_size = avio_rb32(pb);
if (ffm->packet_size != FFM_PACKET_SIZE)
goto fail;
ffm->write_index = avio_rb64(pb);
/* get also filesize */
if (pb->seekable) {
ffm->file_size = avio_size(pb);
if (ffm->write_index && 0)
adjust_write_index(s);
} else {
ffm->file_size = (UINT64_C(1) << 63) - 1;
}
 
nb_streams = avio_rb32(pb);
avio_rb32(pb); /* total bitrate */
/* read each stream */
for(i=0;i<nb_streams;i++) {
char rc_eq_buf[128];
 
st = avformat_new_stream(s, NULL);
if (!st)
goto fail;
 
avpriv_set_pts_info(st, 64, 1, 1000000);
 
codec = st->codec;
/* generic info */
codec->codec_id = avio_rb32(pb);
codec->codec_type = avio_r8(pb); /* codec_type */
codec->bit_rate = avio_rb32(pb);
codec->flags = avio_rb32(pb);
codec->flags2 = avio_rb32(pb);
codec->debug = avio_rb32(pb);
/* specific info */
switch(codec->codec_type) {
case AVMEDIA_TYPE_VIDEO:
codec->time_base.num = avio_rb32(pb);
codec->time_base.den = avio_rb32(pb);
codec->width = avio_rb16(pb);
codec->height = avio_rb16(pb);
codec->gop_size = avio_rb16(pb);
codec->pix_fmt = avio_rb32(pb);
codec->qmin = avio_r8(pb);
codec->qmax = avio_r8(pb);
codec->max_qdiff = avio_r8(pb);
codec->qcompress = avio_rb16(pb) / 10000.0;
codec->qblur = avio_rb16(pb) / 10000.0;
codec->bit_rate_tolerance = avio_rb32(pb);
avio_get_str(pb, INT_MAX, rc_eq_buf, sizeof(rc_eq_buf));
codec->rc_eq = av_strdup(rc_eq_buf);
codec->rc_max_rate = avio_rb32(pb);
codec->rc_min_rate = avio_rb32(pb);
codec->rc_buffer_size = avio_rb32(pb);
codec->i_quant_factor = av_int2double(avio_rb64(pb));
codec->b_quant_factor = av_int2double(avio_rb64(pb));
codec->i_quant_offset = av_int2double(avio_rb64(pb));
codec->b_quant_offset = av_int2double(avio_rb64(pb));
codec->dct_algo = avio_rb32(pb);
codec->strict_std_compliance = avio_rb32(pb);
codec->max_b_frames = avio_rb32(pb);
codec->mpeg_quant = avio_rb32(pb);
codec->intra_dc_precision = avio_rb32(pb);
codec->me_method = avio_rb32(pb);
codec->mb_decision = avio_rb32(pb);
codec->nsse_weight = avio_rb32(pb);
codec->frame_skip_cmp = avio_rb32(pb);
codec->rc_buffer_aggressivity = av_int2double(avio_rb64(pb));
codec->codec_tag = avio_rb32(pb);
codec->thread_count = avio_r8(pb);
codec->coder_type = avio_rb32(pb);
codec->me_cmp = avio_rb32(pb);
codec->me_subpel_quality = avio_rb32(pb);
codec->me_range = avio_rb32(pb);
codec->keyint_min = avio_rb32(pb);
codec->scenechange_threshold = avio_rb32(pb);
codec->b_frame_strategy = avio_rb32(pb);
codec->qcompress = av_int2double(avio_rb64(pb));
codec->qblur = av_int2double(avio_rb64(pb));
codec->max_qdiff = avio_rb32(pb);
codec->refs = avio_rb32(pb);
break;
case AVMEDIA_TYPE_AUDIO:
codec->sample_rate = avio_rb32(pb);
codec->channels = avio_rl16(pb);
codec->frame_size = avio_rl16(pb);
break;
default:
goto fail;
}
if (codec->flags & CODEC_FLAG_GLOBAL_HEADER) {
if (ff_alloc_extradata(codec, avio_rb32(pb)))
return AVERROR(ENOMEM);
avio_read(pb, codec->extradata, codec->extradata_size);
}
}
 
/* get until end of block reached */
while ((avio_tell(pb) % ffm->packet_size) != 0)
avio_r8(pb);
 
/* init packet demux */
ffm->packet_ptr = ffm->packet;
ffm->packet_end = ffm->packet;
ffm->frame_offset = 0;
ffm->dts = 0;
ffm->read_state = READ_HEADER;
ffm->first_packet = 1;
return 0;
fail:
ffm_close(s);
return -1;
}
 
/* return < 0 if eof */
static int ffm_read_packet(AVFormatContext *s, AVPacket *pkt)
{
int size;
FFMContext *ffm = s->priv_data;
int duration, ret;
 
switch(ffm->read_state) {
case READ_HEADER:
if ((ret = ffm_is_avail_data(s, FRAME_HEADER_SIZE+4)) < 0)
return ret;
 
av_dlog(s, "pos=%08"PRIx64" spos=%"PRIx64", write_index=%"PRIx64" size=%"PRIx64"\n",
avio_tell(s->pb), s->pb->pos, ffm->write_index, ffm->file_size);
if (ffm_read_data(s, ffm->header, FRAME_HEADER_SIZE, 1) !=
FRAME_HEADER_SIZE)
return -1;
if (ffm->header[1] & FLAG_DTS)
if (ffm_read_data(s, ffm->header+16, 4, 1) != 4)
return -1;
ffm->read_state = READ_DATA;
/* fall thru */
case READ_DATA:
size = AV_RB24(ffm->header + 2);
if ((ret = ffm_is_avail_data(s, size)) < 0)
return ret;
 
duration = AV_RB24(ffm->header + 5);
 
if (av_new_packet(pkt, size) < 0) {
return AVERROR(ENOMEM);
}
pkt->stream_index = ffm->header[0];
if ((unsigned)pkt->stream_index >= s->nb_streams) {
av_log(s, AV_LOG_ERROR, "invalid stream index %d\n", pkt->stream_index);
av_free_packet(pkt);
ffm->read_state = READ_HEADER;
return -1;
}
pkt->pos = avio_tell(s->pb);
if (ffm->header[1] & FLAG_KEY_FRAME)
pkt->flags |= AV_PKT_FLAG_KEY;
 
ffm->read_state = READ_HEADER;
if (ffm_read_data(s, pkt->data, size, 0) != size) {
/* bad case: desynchronized packet. we cancel all the packet loading */
av_free_packet(pkt);
return -1;
}
pkt->pts = AV_RB64(ffm->header+8);
if (ffm->header[1] & FLAG_DTS)
pkt->dts = pkt->pts - AV_RB32(ffm->header+16);
else
pkt->dts = pkt->pts;
pkt->duration = duration;
break;
}
return 0;
}
 
/* seek to a given time in the file. The file read pointer is
positioned at or before pts. XXX: the following code is quite
approximative */
static int ffm_seek(AVFormatContext *s, int stream_index, int64_t wanted_pts, int flags)
{
FFMContext *ffm = s->priv_data;
int64_t pos_min, pos_max, pos;
int64_t pts_min, pts_max, pts;
double pos1;
 
av_dlog(s, "wanted_pts=%0.6f\n", wanted_pts / 1000000.0);
/* find the position using linear interpolation (better than
dichotomy in typical cases) */
if (ffm->write_index && ffm->write_index < ffm->file_size) {
if (get_dts(s, FFM_PACKET_SIZE) < wanted_pts) {
pos_min = FFM_PACKET_SIZE;
pos_max = ffm->write_index - FFM_PACKET_SIZE;
} else {
pos_min = ffm->write_index;
pos_max = ffm->file_size - FFM_PACKET_SIZE;
}
} else {
pos_min = FFM_PACKET_SIZE;
pos_max = ffm->file_size - FFM_PACKET_SIZE;
}
while (pos_min <= pos_max) {
pts_min = get_dts(s, pos_min);
pts_max = get_dts(s, pos_max);
if (pts_min > wanted_pts || pts_max <= wanted_pts) {
pos = pts_min > wanted_pts ? pos_min : pos_max;
goto found;
}
/* linear interpolation */
pos1 = (double)(pos_max - pos_min) * (double)(wanted_pts - pts_min) /
(double)(pts_max - pts_min);
pos = (((int64_t)pos1) / FFM_PACKET_SIZE) * FFM_PACKET_SIZE;
if (pos <= pos_min)
pos = pos_min;
else if (pos >= pos_max)
pos = pos_max;
pts = get_dts(s, pos);
/* check if we are lucky */
if (pts == wanted_pts) {
goto found;
} else if (pts > wanted_pts) {
pos_max = pos - FFM_PACKET_SIZE;
} else {
pos_min = pos + FFM_PACKET_SIZE;
}
}
pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
 
found:
if (ffm_seek1(s, pos) < 0)
return -1;
 
/* reset read state */
ffm->read_state = READ_HEADER;
ffm->packet_ptr = ffm->packet;
ffm->packet_end = ffm->packet;
ffm->first_packet = 1;
 
return 0;
}
 
static int ffm_probe(AVProbeData *p)
{
if (
p->buf[0] == 'F' && p->buf[1] == 'F' && p->buf[2] == 'M' &&
(p->buf[3] == '1' || p->buf[3] == '2'))
return AVPROBE_SCORE_MAX + 1;
return 0;
}
 
AVInputFormat ff_ffm_demuxer = {
.name = "ffm",
.long_name = NULL_IF_CONFIG_SMALL("FFM (FFserver live feed)"),
.priv_data_size = sizeof(FFMContext),
.read_probe = ffm_probe,
.read_header = ffm_read_header,
.read_packet = ffm_read_packet,
.read_close = ffm_close,
.read_seek = ffm_seek,
};
/contrib/sdk/sources/ffmpeg/libavformat/ffmenc.c
0,0 → 1,281
/*
* FFM (ffserver live feed) muxer
* Copyright (c) 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intreadwrite.h"
#include "libavutil/intfloat.h"
#include "libavutil/avassert.h"
#include "libavutil/parseutils.h"
#include "avformat.h"
#include "internal.h"
#include "ffm.h"
 
static void flush_packet(AVFormatContext *s)
{
FFMContext *ffm = s->priv_data;
int fill_size, h;
AVIOContext *pb = s->pb;
 
fill_size = ffm->packet_end - ffm->packet_ptr;
memset(ffm->packet_ptr, 0, fill_size);
 
av_assert1(avio_tell(pb) % ffm->packet_size == 0);
 
/* put header */
avio_wb16(pb, PACKET_ID);
avio_wb16(pb, fill_size);
avio_wb64(pb, ffm->dts);
h = ffm->frame_offset;
if (ffm->first_packet)
h |= 0x8000;
avio_wb16(pb, h);
avio_write(pb, ffm->packet, ffm->packet_end - ffm->packet);
avio_flush(pb);
 
/* prepare next packet */
ffm->frame_offset = 0; /* no key frame */
ffm->packet_ptr = ffm->packet;
ffm->first_packet = 0;
}
 
/* 'first' is true if first data of a frame */
static void ffm_write_data(AVFormatContext *s,
const uint8_t *buf, int size,
int64_t dts, int header)
{
FFMContext *ffm = s->priv_data;
int len;
 
if (header && ffm->frame_offset == 0) {
ffm->frame_offset = ffm->packet_ptr - ffm->packet + FFM_HEADER_SIZE;
ffm->dts = dts;
}
 
/* write as many packets as needed */
while (size > 0) {
len = ffm->packet_end - ffm->packet_ptr;
if (len > size)
len = size;
memcpy(ffm->packet_ptr, buf, len);
 
ffm->packet_ptr += len;
buf += len;
size -= len;
if (ffm->packet_ptr >= ffm->packet_end)
flush_packet(s);
}
}
 
static void write_header_chunk(AVIOContext *pb, AVIOContext *dpb, unsigned id)
{
uint8_t *dyn_buf;
int dyn_size= avio_close_dyn_buf(dpb, &dyn_buf);
avio_wb32(pb, id);
avio_wb32(pb, dyn_size);
avio_write(pb, dyn_buf, dyn_size);
av_free(dyn_buf);
}
 
static int ffm_write_header(AVFormatContext *s)
{
FFMContext *ffm = s->priv_data;
AVDictionaryEntry *t;
AVStream *st;
AVIOContext *pb = s->pb;
AVCodecContext *codec;
int bit_rate, i;
 
if (t = av_dict_get(s->metadata, "creation_time", NULL, 0)) {
int ret = av_parse_time(&ffm->start_time, t->value, 0);
if (ret < 0)
return ret;
}
 
ffm->packet_size = FFM_PACKET_SIZE;
 
/* header */
avio_wl32(pb, MKTAG('F', 'F', 'M', '2'));
avio_wb32(pb, ffm->packet_size);
avio_wb64(pb, 0); /* current write position */
 
if(avio_open_dyn_buf(&pb) < 0)
return AVERROR(ENOMEM);
 
avio_wb32(pb, s->nb_streams);
bit_rate = 0;
for(i=0;i<s->nb_streams;i++) {
st = s->streams[i];
bit_rate += st->codec->bit_rate;
}
avio_wb32(pb, bit_rate);
 
write_header_chunk(s->pb, pb, MKBETAG('M', 'A', 'I', 'N'));
 
/* list of streams */
for(i=0;i<s->nb_streams;i++) {
st = s->streams[i];
avpriv_set_pts_info(st, 64, 1, 1000000);
if(avio_open_dyn_buf(&pb) < 0)
return AVERROR(ENOMEM);
 
codec = st->codec;
/* generic info */
avio_wb32(pb, codec->codec_id);
avio_w8(pb, codec->codec_type);
avio_wb32(pb, codec->bit_rate);
avio_wb32(pb, codec->flags);
avio_wb32(pb, codec->flags2);
avio_wb32(pb, codec->debug);
if (codec->flags & CODEC_FLAG_GLOBAL_HEADER) {
avio_wb32(pb, codec->extradata_size);
avio_write(pb, codec->extradata, codec->extradata_size);
}
write_header_chunk(s->pb, pb, MKBETAG('C', 'O', 'M', 'M'));
if(avio_open_dyn_buf(&pb) < 0)
return AVERROR(ENOMEM);
/* specific info */
switch(codec->codec_type) {
case AVMEDIA_TYPE_VIDEO:
avio_wb32(pb, codec->time_base.num);
avio_wb32(pb, codec->time_base.den);
avio_wb16(pb, codec->width);
avio_wb16(pb, codec->height);
avio_wb16(pb, codec->gop_size);
avio_wb32(pb, codec->pix_fmt);
avio_w8(pb, codec->qmin);
avio_w8(pb, codec->qmax);
avio_w8(pb, codec->max_qdiff);
avio_wb16(pb, (int) (codec->qcompress * 10000.0));
avio_wb16(pb, (int) (codec->qblur * 10000.0));
avio_wb32(pb, codec->bit_rate_tolerance);
avio_put_str(pb, codec->rc_eq ? codec->rc_eq : "tex^qComp");
avio_wb32(pb, codec->rc_max_rate);
avio_wb32(pb, codec->rc_min_rate);
avio_wb32(pb, codec->rc_buffer_size);
avio_wb64(pb, av_double2int(codec->i_quant_factor));
avio_wb64(pb, av_double2int(codec->b_quant_factor));
avio_wb64(pb, av_double2int(codec->i_quant_offset));
avio_wb64(pb, av_double2int(codec->b_quant_offset));
avio_wb32(pb, codec->dct_algo);
avio_wb32(pb, codec->strict_std_compliance);
avio_wb32(pb, codec->max_b_frames);
avio_wb32(pb, codec->mpeg_quant);
avio_wb32(pb, codec->intra_dc_precision);
avio_wb32(pb, codec->me_method);
avio_wb32(pb, codec->mb_decision);
avio_wb32(pb, codec->nsse_weight);
avio_wb32(pb, codec->frame_skip_cmp);
avio_wb64(pb, av_double2int(codec->rc_buffer_aggressivity));
avio_wb32(pb, codec->codec_tag);
avio_w8(pb, codec->thread_count);
avio_wb32(pb, codec->coder_type);
avio_wb32(pb, codec->me_cmp);
avio_wb32(pb, codec->me_subpel_quality);
avio_wb32(pb, codec->me_range);
avio_wb32(pb, codec->keyint_min);
avio_wb32(pb, codec->scenechange_threshold);
avio_wb32(pb, codec->b_frame_strategy);
avio_wb64(pb, av_double2int(codec->qcompress));
avio_wb64(pb, av_double2int(codec->qblur));
avio_wb32(pb, codec->max_qdiff);
avio_wb32(pb, codec->refs);
write_header_chunk(s->pb, pb, MKBETAG('S', 'T', 'V', 'I'));
break;
case AVMEDIA_TYPE_AUDIO:
avio_wb32(pb, codec->sample_rate);
avio_wl16(pb, codec->channels);
avio_wl16(pb, codec->frame_size);
write_header_chunk(s->pb, pb, MKBETAG('S', 'T', 'A', 'U'));
break;
default:
return -1;
}
}
pb = s->pb;
 
avio_wb64(pb, 0); // end of header
 
/* flush until end of block reached */
while ((avio_tell(pb) % ffm->packet_size) != 0)
avio_w8(pb, 0);
 
avio_flush(pb);
 
/* init packet mux */
ffm->packet_ptr = ffm->packet;
ffm->packet_end = ffm->packet + ffm->packet_size - FFM_HEADER_SIZE;
av_assert0(ffm->packet_end >= ffm->packet);
ffm->frame_offset = 0;
ffm->dts = 0;
ffm->first_packet = 1;
 
return 0;
}
 
static int ffm_write_packet(AVFormatContext *s, AVPacket *pkt)
{
FFMContext *ffm = s->priv_data;
int64_t dts;
uint8_t header[FRAME_HEADER_SIZE+4];
int header_size = FRAME_HEADER_SIZE;
 
dts = ffm->start_time + pkt->dts;
/* packet size & key_frame */
header[0] = pkt->stream_index;
header[1] = 0;
if (pkt->flags & AV_PKT_FLAG_KEY)
header[1] |= FLAG_KEY_FRAME;
AV_WB24(header+2, pkt->size);
AV_WB24(header+5, pkt->duration);
AV_WB64(header+8, ffm->start_time + pkt->pts);
if (pkt->pts != pkt->dts) {
header[1] |= FLAG_DTS;
AV_WB32(header+16, pkt->pts - pkt->dts);
header_size += 4;
}
ffm_write_data(s, header, header_size, dts, 1);
ffm_write_data(s, pkt->data, pkt->size, dts, 0);
 
return 0;
}
 
static int ffm_write_trailer(AVFormatContext *s)
{
FFMContext *ffm = s->priv_data;
 
/* flush packets */
if (ffm->packet_ptr > ffm->packet)
flush_packet(s);
 
return 0;
}
 
AVOutputFormat ff_ffm_muxer = {
.name = "ffm",
.long_name = NULL_IF_CONFIG_SMALL("FFM (FFserver live feed)"),
.extensions = "ffm",
.priv_data_size = sizeof(FFMContext),
.audio_codec = AV_CODEC_ID_MP2,
.video_codec = AV_CODEC_ID_MPEG1VIDEO,
.write_header = ffm_write_header,
.write_packet = ffm_write_packet,
.write_trailer = ffm_write_trailer,
.flags = AVFMT_TS_NEGATIVE,
};
/contrib/sdk/sources/ffmpeg/libavformat/ffmeta.h
0,0 → 1,29
/*
* Common data for metadata muxer/demuxer
* Copyright (c) 2010 Anton Khirnov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_FFMETA_H
#define AVFORMAT_FFMETA_H
 
#define ID_STRING ";FFMETADATA"
#define ID_CHAPTER "[CHAPTER]"
#define ID_STREAM "[STREAM]"
 
#endif /* AVFORMAT_FFMETA_H */
/contrib/sdk/sources/ffmpeg/libavformat/ffmetadec.c
0,0 → 1,175
/*
* Metadata demuxer
* Copyright (c) 2010 Anton Khirnov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/mathematics.h"
#include "avformat.h"
#include "ffmeta.h"
#include "internal.h"
#include "libavutil/dict.h"
 
static int probe(AVProbeData *p)
{
if(!memcmp(p->buf, ID_STRING, strlen(ID_STRING)))
return AVPROBE_SCORE_MAX;
return 0;
}
 
static void get_line(AVIOContext *s, uint8_t *buf, int size)
{
do {
uint8_t c;
int i = 0;
 
while ((c = avio_r8(s))) {
if (c == '\\') {
if (i < size - 1)
buf[i++] = c;
c = avio_r8(s);
} else if (c == '\n')
break;
 
if (i < size - 1)
buf[i++] = c;
}
buf[i] = 0;
} while (!url_feof(s) && (buf[0] == ';' || buf[0] == '#' || buf[0] == 0));
}
 
static AVChapter *read_chapter(AVFormatContext *s)
{
uint8_t line[256];
int64_t start, end;
AVRational tb = {1, 1e9};
 
get_line(s->pb, line, sizeof(line));
 
if (sscanf(line, "TIMEBASE=%d/%d", &tb.num, &tb.den))
get_line(s->pb, line, sizeof(line));
if (!sscanf(line, "START=%"SCNd64, &start)) {
av_log(s, AV_LOG_ERROR, "Expected chapter start timestamp, found %s.\n", line);
start = (s->nb_chapters && s->chapters[s->nb_chapters - 1]->end != AV_NOPTS_VALUE) ?
s->chapters[s->nb_chapters - 1]->end : 0;
} else
get_line(s->pb, line, sizeof(line));
 
if (!sscanf(line, "END=%"SCNd64, &end)) {
av_log(s, AV_LOG_ERROR, "Expected chapter end timestamp, found %s.\n", line);
end = AV_NOPTS_VALUE;
}
 
return avpriv_new_chapter(s, s->nb_chapters, tb, start, end, NULL);
}
 
static uint8_t *unescape(uint8_t *buf, int size)
{
uint8_t *ret = av_malloc(size + 1);
uint8_t *p1 = ret, *p2 = buf;
 
if (!ret)
return NULL;
 
while (p2 < buf + size) {
if (*p2 == '\\')
p2++;
*p1++ = *p2++;
}
*p1 = 0;
return ret;
}
 
static int read_tag(uint8_t *line, AVDictionary **m)
{
uint8_t *key, *value, *p = line;
 
/* find first not escaped '=' */
while (1) {
if (*p == '=')
break;
else if (*p == '\\')
p++;
 
if (*p++)
continue;
 
return 0;
}
 
if (!(key = unescape(line, p - line)))
return AVERROR(ENOMEM);
if (!(value = unescape(p + 1, strlen(p + 1)))) {
av_free(key);
return AVERROR(ENOMEM);
}
 
av_dict_set(m, key, value, AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL);
return 0;
}
 
static int read_header(AVFormatContext *s)
{
AVDictionary **m = &s->metadata;
uint8_t line[1024];
 
while(!url_feof(s->pb)) {
get_line(s->pb, line, sizeof(line));
 
if (!memcmp(line, ID_STREAM, strlen(ID_STREAM))) {
AVStream *st = avformat_new_stream(s, NULL);
 
if (!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_DATA;
st->codec->codec_id = AV_CODEC_ID_FFMETADATA;
 
m = &st->metadata;
} else if (!memcmp(line, ID_CHAPTER, strlen(ID_CHAPTER))) {
AVChapter *ch = read_chapter(s);
 
if (!ch)
return AVERROR(ENOMEM);
 
m = &ch->metadata;
} else
read_tag(line, m);
}
 
s->start_time = 0;
if (s->nb_chapters)
s->duration = av_rescale_q(s->chapters[s->nb_chapters - 1]->end,
s->chapters[s->nb_chapters - 1]->time_base,
AV_TIME_BASE_Q);
 
return 0;
}
 
static int read_packet(AVFormatContext *s, AVPacket *pkt)
{
return AVERROR_EOF;
}
 
AVInputFormat ff_ffmetadata_demuxer = {
.name = "ffmetadata",
.long_name = NULL_IF_CONFIG_SMALL("FFmpeg metadata in text"),
.read_probe = probe,
.read_header = read_header,
.read_packet = read_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/ffmetaenc.c
0,0 → 1,99
/*
* Metadata muxer
* Copyright (c) 2010 Anton Khirnov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <inttypes.h>
 
#include "avformat.h"
#include "ffmeta.h"
#include "libavutil/dict.h"
 
 
static void write_escape_str(AVIOContext *s, const uint8_t *str)
{
const uint8_t *p = str;
 
while (*p) {
if (*p == '#' || *p == ';' || *p == '=' || *p == '\\' || *p == '\n')
avio_w8(s, '\\');
avio_w8(s, *p);
p++;
}
}
 
static void write_tags(AVIOContext *s, AVDictionary *m)
{
AVDictionaryEntry *t = NULL;
while ((t = av_dict_get(m, "", t, AV_DICT_IGNORE_SUFFIX))) {
write_escape_str(s, t->key);
avio_w8(s, '=');
write_escape_str(s, t->value);
avio_w8(s, '\n');
}
}
 
static int write_header(AVFormatContext *s)
{
avio_write(s->pb, ID_STRING, sizeof(ID_STRING) - 1);
avio_w8(s->pb, '1'); // version
avio_w8(s->pb, '\n');
avio_flush(s->pb);
return 0;
}
 
static int write_trailer(AVFormatContext *s)
{
int i;
 
write_tags(s->pb, s->metadata);
 
for (i = 0; i < s->nb_streams; i++) {
avio_write(s->pb, ID_STREAM, sizeof(ID_STREAM) - 1);
avio_w8(s->pb, '\n');
write_tags(s->pb, s->streams[i]->metadata);
}
 
for (i = 0; i < s->nb_chapters; i++) {
AVChapter *ch = s->chapters[i];
avio_write(s->pb, ID_CHAPTER, sizeof(ID_CHAPTER) - 1);
avio_w8(s->pb, '\n');
avio_printf(s->pb, "TIMEBASE=%d/%d\n", ch->time_base.num, ch->time_base.den);
avio_printf(s->pb, "START=%"PRId64"\n", ch->start);
avio_printf(s->pb, "END=%"PRId64"\n", ch->end);
write_tags(s->pb, ch->metadata);
}
 
return 0;
}
 
static int write_packet(AVFormatContext *s, AVPacket *pkt)
{
return 0;
}
 
AVOutputFormat ff_ffmetadata_muxer = {
.name = "ffmetadata",
.long_name = NULL_IF_CONFIG_SMALL("FFmpeg metadata in text"),
.extensions = "ffmeta",
.write_header = write_header,
.write_packet = write_packet,
.write_trailer = write_trailer,
.flags = AVFMT_NOTIMESTAMPS | AVFMT_NOSTREAMS,
};
/contrib/sdk/sources/ffmpeg/libavformat/file.c
0,0 → 1,221
/*
* buffered file I/O
* Copyright (c) 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avstring.h"
#include "libavutil/internal.h"
#include "libavutil/opt.h"
#include "avformat.h"
#include <fcntl.h>
//#if HAVE_IO_H
//#include <io.h>
//#endif
#if HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <sys/stat.h>
#include <stdlib.h>
#include "os_support.h"
#include "url.h"
 
/* Some systems may not have S_ISFIFO */
#ifndef S_ISFIFO
# ifdef S_IFIFO
# define S_ISFIFO(m) (((m) & S_IFMT) == S_IFIFO)
# else
# define S_ISFIFO(m) 0
# endif
#endif
 
/* standard file protocol */
 
typedef struct FileContext {
const AVClass *class;
int fd;
int trunc;
int blocksize;
} FileContext;
 
static const AVOption file_options[] = {
{ "truncate", "Truncate existing files on write", offsetof(FileContext, trunc), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, AV_OPT_FLAG_ENCODING_PARAM },
{ "blocksize", "set I/O operation maximum block size", offsetof(FileContext, blocksize), AV_OPT_TYPE_INT, { .i64 = INT_MAX }, 1, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
{ NULL }
};
 
static const AVOption pipe_options[] = {
{ "blocksize", "set I/O operation maximum block size", offsetof(FileContext, blocksize), AV_OPT_TYPE_INT, { .i64 = INT_MAX }, 1, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
{ NULL }
};
 
static const AVClass file_class = {
.class_name = "file",
.item_name = av_default_item_name,
.option = file_options,
.version = LIBAVUTIL_VERSION_INT,
};
 
static const AVClass pipe_class = {
.class_name = "pipe",
.item_name = av_default_item_name,
.option = pipe_options,
.version = LIBAVUTIL_VERSION_INT,
};
 
static int file_read(URLContext *h, unsigned char *buf, int size)
{
FileContext *c = h->priv_data;
int r;
size = FFMIN(size, c->blocksize);
r = read(c->fd, buf, size);
return (-1 == r)?AVERROR(errno):r;
}
 
static int file_write(URLContext *h, const unsigned char *buf, int size)
{
FileContext *c = h->priv_data;
int r;
size = FFMIN(size, c->blocksize);
r = write(c->fd, buf, size);
return (-1 == r)?AVERROR(errno):r;
}
 
static int file_get_handle(URLContext *h)
{
FileContext *c = h->priv_data;
return c->fd;
}
 
static int file_check(URLContext *h, int mask)
{
 
 
return AVIO_FLAG_READ | AVIO_FLAG_WRITE ;
}
 
#if CONFIG_FILE_PROTOCOL
 
static int file_open(URLContext *h, const char *filename, int flags)
{
FileContext *c = h->priv_data;
int access;
int fd;
struct stat st;
 
av_strstart(filename, "file:", &filename);
 
if (flags & AVIO_FLAG_WRITE && flags & AVIO_FLAG_READ) {
access = O_CREAT | O_RDWR;
if (c->trunc)
access |= O_TRUNC;
} else if (flags & AVIO_FLAG_WRITE) {
access = O_CREAT | O_WRONLY;
if (c->trunc)
access |= O_TRUNC;
} else {
access = O_RDONLY;
}
#ifdef O_BINARY
access |= O_BINARY;
#endif
fd = avpriv_open(filename, access, 0666);
if (fd == -1)
return AVERROR(errno);
c->fd = fd;
 
h->is_streamed = !fstat(fd, &st) && S_ISFIFO(st.st_mode);
 
return 0;
}
 
/* XXX: use llseek */
static int64_t file_seek(URLContext *h, int64_t pos, int whence)
{
FileContext *c = h->priv_data;
int64_t ret;
 
if (whence == AVSEEK_SIZE) {
struct stat st;
ret = fstat(c->fd, &st);
return ret < 0 ? AVERROR(errno) : (S_ISFIFO(st.st_mode) ? 0 : st.st_size);
}
 
ret = lseek(c->fd, pos, whence);
 
return ret < 0 ? AVERROR(errno) : ret;
}
 
static int file_close(URLContext *h)
{
FileContext *c = h->priv_data;
return close(c->fd);
}
 
URLProtocol ff_file_protocol = {
.name = "file",
.url_open = file_open,
.url_read = file_read,
.url_write = file_write,
.url_seek = file_seek,
.url_close = file_close,
.url_get_file_handle = file_get_handle,
.url_check = file_check,
.priv_data_size = sizeof(FileContext),
.priv_data_class = &file_class,
};
 
#endif /* CONFIG_FILE_PROTOCOL */
 
#if CONFIG_PIPE_PROTOCOL
 
static int pipe_open(URLContext *h, const char *filename, int flags)
{
FileContext *c = h->priv_data;
int fd;
char *final;
av_strstart(filename, "pipe:", &filename);
 
fd = strtol(filename, &final, 10);
if((filename == final) || *final ) {/* No digits found, or something like 10ab */
if (flags & AVIO_FLAG_WRITE) {
fd = 1;
} else {
fd = 0;
}
}
#if HAVE_SETMODE
setmode(fd, O_BINARY);
#endif
c->fd = fd;
h->is_streamed = 1;
return 0;
}
 
URLProtocol ff_pipe_protocol = {
.name = "pipe",
.url_open = pipe_open,
.url_read = file_read,
.url_write = file_write,
.url_get_file_handle = file_get_handle,
.url_check = file_check,
.priv_data_size = sizeof(FileContext),
.priv_data_class = &pipe_class,
};
 
#endif /* CONFIG_PIPE_PROTOCOL */
/contrib/sdk/sources/ffmpeg/libavformat/file_open.c
0,0 → 1,0
#include "libavutil/file_open.c"
/contrib/sdk/sources/ffmpeg/libavformat/filmstripdec.c
0,0 → 1,110
/*
* Adobe Filmstrip demuxer
* Copyright (c) 2010 Peter Ross
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Adobe Filmstrip demuxer
*/
 
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
 
#define RAND_TAG MKBETAG('R','a','n','d')
 
typedef struct {
int leading;
} FilmstripDemuxContext;
 
static int read_header(AVFormatContext *s)
{
FilmstripDemuxContext *film = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *st;
 
if (!s->pb->seekable)
return AVERROR(EIO);
 
avio_seek(pb, avio_size(pb) - 36, SEEK_SET);
if (avio_rb32(pb) != RAND_TAG) {
av_log(s, AV_LOG_ERROR, "magic number not found\n");
return AVERROR_INVALIDDATA;
}
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
st->nb_frames = avio_rb32(pb);
if (avio_rb16(pb) != 0) {
avpriv_request_sample(s, "Unsupported packing method");
return AVERROR_PATCHWELCOME;
}
 
avio_skip(pb, 2);
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
st->codec->pix_fmt = AV_PIX_FMT_RGBA;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->width = avio_rb16(pb);
st->codec->height = avio_rb16(pb);
film->leading = avio_rb16(pb);
avpriv_set_pts_info(st, 64, 1, avio_rb16(pb));
 
avio_seek(pb, 0, SEEK_SET);
 
return 0;
}
 
static int read_packet(AVFormatContext *s,
AVPacket *pkt)
{
FilmstripDemuxContext *film = s->priv_data;
AVStream *st = s->streams[0];
 
if (url_feof(s->pb))
return AVERROR(EIO);
pkt->dts = avio_tell(s->pb) / (st->codec->width * (st->codec->height + film->leading) * 4);
pkt->size = av_get_packet(s->pb, pkt, st->codec->width * st->codec->height * 4);
avio_skip(s->pb, st->codec->width * film->leading * 4);
if (pkt->size < 0)
return pkt->size;
pkt->flags |= AV_PKT_FLAG_KEY;
return 0;
}
 
static int read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
{
AVStream *st = s->streams[stream_index];
if (avio_seek(s->pb, FFMAX(timestamp, 0) * st->codec->width * st->codec->height * 4, SEEK_SET) < 0)
return -1;
return 0;
}
 
AVInputFormat ff_filmstrip_demuxer = {
.name = "filmstrip",
.long_name = NULL_IF_CONFIG_SMALL("Adobe Filmstrip"),
.priv_data_size = sizeof(FilmstripDemuxContext),
.read_header = read_header,
.read_packet = read_packet,
.read_seek = read_seek,
.extensions = "flm",
};
/contrib/sdk/sources/ffmpeg/libavformat/filmstripenc.c
0,0 → 1,84
/*
* Adobe Filmstrip muxer
* Copyright (c) 2010 Peter Ross
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Adobe Filmstrip muxer
*/
 
#include "libavutil/intreadwrite.h"
#include "avformat.h"
 
#define RAND_TAG MKBETAG('R','a','n','d')
 
typedef struct {
int nb_frames;
} FilmstripMuxContext;
 
static int write_header(AVFormatContext *s)
{
if (s->streams[0]->codec->pix_fmt != AV_PIX_FMT_RGBA) {
av_log(s, AV_LOG_ERROR, "only AV_PIX_FMT_RGBA is supported\n");
return AVERROR_INVALIDDATA;
}
return 0;
}
 
static int write_packet(AVFormatContext *s, AVPacket *pkt)
{
FilmstripMuxContext *film = s->priv_data;
avio_write(s->pb, pkt->data, pkt->size);
film->nb_frames++;
return 0;
}
 
static int write_trailer(AVFormatContext *s)
{
FilmstripMuxContext *film = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *st = s->streams[0];
int i;
 
avio_wb32(pb, RAND_TAG);
avio_wb32(pb, film->nb_frames);
avio_wb16(pb, 0); // packing method
avio_wb16(pb, 0); // reserved
avio_wb16(pb, st->codec->width);
avio_wb16(pb, st->codec->height);
avio_wb16(pb, 0); // leading
avio_wb16(pb, st->codec->time_base.den / st->codec->time_base.num);
for (i = 0; i < 16; i++)
avio_w8(pb, 0x00); // reserved
 
return 0;
}
 
AVOutputFormat ff_filmstrip_muxer = {
.name = "filmstrip",
.long_name = NULL_IF_CONFIG_SMALL("Adobe Filmstrip"),
.extensions = "flm",
.priv_data_size = sizeof(FilmstripMuxContext),
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_RAWVIDEO,
.write_header = write_header,
.write_packet = write_packet,
.write_trailer = write_trailer,
};
/contrib/sdk/sources/ffmpeg/libavformat/flac_picture.c
0,0 → 1,151
/*
* Raw FLAC picture parser
* Copyright (c) 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avassert.h"
#include "avformat.h"
#include "flac_picture.h"
#include "id3v2.h"
#include "internal.h"
 
int ff_flac_parse_picture(AVFormatContext *s, uint8_t *buf, int buf_size)
{
const CodecMime *mime = ff_id3v2_mime_tags;
enum AVCodecID id = AV_CODEC_ID_NONE;
AVBufferRef *data = NULL;
uint8_t mimetype[64], *desc = NULL;
AVIOContext *pb = NULL;
AVStream *st;
int type, width, height;
int len, ret = 0;
 
pb = avio_alloc_context(buf, buf_size, 0, NULL, NULL, NULL, NULL);
if (!pb)
return AVERROR(ENOMEM);
 
/* read the picture type */
type = avio_rb32(pb);
if (type >= FF_ARRAY_ELEMS(ff_id3v2_picture_types) || type < 0) {
av_log(s, AV_LOG_ERROR, "Invalid picture type: %d.\n", type);
if (s->error_recognition & AV_EF_EXPLODE) {
RETURN_ERROR(AVERROR_INVALIDDATA);
}
type = 0;
}
 
/* picture mimetype */
len = avio_rb32(pb);
if (len <= 0 ||
avio_read(pb, mimetype, FFMIN(len, sizeof(mimetype) - 1)) != len) {
av_log(s, AV_LOG_ERROR, "Could not read mimetype from an attached "
"picture.\n");
if (s->error_recognition & AV_EF_EXPLODE)
ret = AVERROR_INVALIDDATA;
goto fail;
}
av_assert0(len < sizeof(mimetype));
mimetype[len] = 0;
 
while (mime->id != AV_CODEC_ID_NONE) {
if (!strncmp(mime->str, mimetype, sizeof(mimetype))) {
id = mime->id;
break;
}
mime++;
}
if (id == AV_CODEC_ID_NONE) {
av_log(s, AV_LOG_ERROR, "Unknown attached picture mimetype: %s.\n",
mimetype);
if (s->error_recognition & AV_EF_EXPLODE)
ret = AVERROR_INVALIDDATA;
goto fail;
}
 
/* picture description */
len = avio_rb32(pb);
if (len > 0) {
if (!(desc = av_malloc(len + 1))) {
RETURN_ERROR(AVERROR(ENOMEM));
}
 
if (avio_read(pb, desc, len) != len) {
av_log(s, AV_LOG_ERROR, "Error reading attached picture description.\n");
if (s->error_recognition & AV_EF_EXPLODE)
ret = AVERROR(EIO);
goto fail;
}
desc[len] = 0;
}
 
/* picture metadata */
width = avio_rb32(pb);
height = avio_rb32(pb);
avio_skip(pb, 8);
 
/* picture data */
len = avio_rb32(pb);
if (len <= 0) {
av_log(s, AV_LOG_ERROR, "Invalid attached picture size: %d.\n", len);
if (s->error_recognition & AV_EF_EXPLODE)
ret = AVERROR_INVALIDDATA;
goto fail;
}
if (!(data = av_buffer_alloc(len))) {
RETURN_ERROR(AVERROR(ENOMEM));
}
if (avio_read(pb, data->data, len) != len) {
av_log(s, AV_LOG_ERROR, "Error reading attached picture data.\n");
if (s->error_recognition & AV_EF_EXPLODE)
ret = AVERROR(EIO);
goto fail;
}
 
st = avformat_new_stream(s, NULL);
if (!st) {
RETURN_ERROR(AVERROR(ENOMEM));
}
 
av_init_packet(&st->attached_pic);
st->attached_pic.buf = data;
st->attached_pic.data = data->data;
st->attached_pic.size = len;
st->attached_pic.stream_index = st->index;
st->attached_pic.flags |= AV_PKT_FLAG_KEY;
 
st->disposition |= AV_DISPOSITION_ATTACHED_PIC;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = id;
st->codec->width = width;
st->codec->height = height;
av_dict_set(&st->metadata, "comment", ff_id3v2_picture_types[type], 0);
if (desc)
av_dict_set(&st->metadata, "title", desc, AV_DICT_DONT_STRDUP_VAL);
 
av_freep(&pb);
 
return 0;
 
fail:
av_buffer_unref(&data);
av_freep(&desc);
av_freep(&pb);
 
return ret;
}
/contrib/sdk/sources/ffmpeg/libavformat/flac_picture.h
0,0 → 1,31
/*
* Raw FLAC picture parser
* Copyright (c) 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_FLAC_PICTURE_H
#define AVFORMAT_FLAC_PICTURE_H
 
#include "avformat.h"
 
#define RETURN_ERROR(code) do { ret = (code); goto fail; } while (0)
 
int ff_flac_parse_picture(AVFormatContext *s, uint8_t *buf, int buf_size);
 
#endif /* AVFORMAT_FLAC_PICTURE_H */
/contrib/sdk/sources/ffmpeg/libavformat/flacdec.c
0,0 → 1,169
/*
* Raw FLAC demuxer
* Copyright (c) 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavcodec/flac.h"
#include "avformat.h"
#include "flac_picture.h"
#include "internal.h"
#include "rawdec.h"
#include "oggdec.h"
#include "vorbiscomment.h"
#include "libavcodec/bytestream.h"
 
static int flac_read_header(AVFormatContext *s)
{
int ret, metadata_last=0, metadata_type, metadata_size, found_streaminfo=0;
uint8_t header[4];
uint8_t *buffer=NULL;
AVStream *st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_FLAC;
st->need_parsing = AVSTREAM_PARSE_FULL_RAW;
/* the parameters will be extracted from the compressed bitstream */
 
/* if fLaC marker is not found, assume there is no header */
if (avio_rl32(s->pb) != MKTAG('f','L','a','C')) {
avio_seek(s->pb, -4, SEEK_CUR);
return 0;
}
 
/* process metadata blocks */
while (!url_feof(s->pb) && !metadata_last) {
avio_read(s->pb, header, 4);
avpriv_flac_parse_block_header(header, &metadata_last, &metadata_type,
&metadata_size);
switch (metadata_type) {
/* allocate and read metadata block for supported types */
case FLAC_METADATA_TYPE_STREAMINFO:
case FLAC_METADATA_TYPE_CUESHEET:
case FLAC_METADATA_TYPE_PICTURE:
case FLAC_METADATA_TYPE_VORBIS_COMMENT:
buffer = av_mallocz(metadata_size + FF_INPUT_BUFFER_PADDING_SIZE);
if (!buffer) {
return AVERROR(ENOMEM);
}
if (avio_read(s->pb, buffer, metadata_size) != metadata_size) {
RETURN_ERROR(AVERROR(EIO));
}
break;
/* skip metadata block for unsupported types */
default:
ret = avio_skip(s->pb, metadata_size);
if (ret < 0)
return ret;
}
 
if (metadata_type == FLAC_METADATA_TYPE_STREAMINFO) {
FLACStreaminfo si;
/* STREAMINFO can only occur once */
if (found_streaminfo) {
RETURN_ERROR(AVERROR_INVALIDDATA);
}
if (metadata_size != FLAC_STREAMINFO_SIZE) {
RETURN_ERROR(AVERROR_INVALIDDATA);
}
found_streaminfo = 1;
st->codec->extradata = buffer;
st->codec->extradata_size = metadata_size;
buffer = NULL;
 
/* get codec params from STREAMINFO header */
avpriv_flac_parse_streaminfo(st->codec, &si, st->codec->extradata);
 
/* set time base and duration */
if (si.samplerate > 0) {
avpriv_set_pts_info(st, 64, 1, si.samplerate);
if (si.samples > 0)
st->duration = si.samples;
}
} else if (metadata_type == FLAC_METADATA_TYPE_CUESHEET) {
uint8_t isrc[13];
uint64_t start;
const uint8_t *offset;
int i, chapters, track, ti;
if (metadata_size < 431)
RETURN_ERROR(AVERROR_INVALIDDATA);
offset = buffer + 395;
chapters = bytestream_get_byte(&offset) - 1;
if (chapters <= 0)
RETURN_ERROR(AVERROR_INVALIDDATA);
for (i = 0; i < chapters; i++) {
if (offset + 36 - buffer > metadata_size)
RETURN_ERROR(AVERROR_INVALIDDATA);
start = bytestream_get_be64(&offset);
track = bytestream_get_byte(&offset);
bytestream_get_buffer(&offset, isrc, 12);
isrc[12] = 0;
offset += 14;
ti = bytestream_get_byte(&offset);
if (ti <= 0) RETURN_ERROR(AVERROR_INVALIDDATA);
offset += ti * 12;
avpriv_new_chapter(s, track, st->time_base, start, AV_NOPTS_VALUE, isrc);
}
av_freep(&buffer);
} else if (metadata_type == FLAC_METADATA_TYPE_PICTURE) {
ret = ff_flac_parse_picture(s, buffer, metadata_size);
av_freep(&buffer);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "Error parsing attached picture.\n");
return ret;
}
} else {
/* STREAMINFO must be the first block */
if (!found_streaminfo) {
RETURN_ERROR(AVERROR_INVALIDDATA);
}
/* process supported blocks other than STREAMINFO */
if (metadata_type == FLAC_METADATA_TYPE_VORBIS_COMMENT) {
if (ff_vorbis_comment(s, &s->metadata, buffer, metadata_size)) {
av_log(s, AV_LOG_WARNING, "error parsing VorbisComment metadata\n");
}
}
av_freep(&buffer);
}
}
 
return 0;
 
fail:
av_free(buffer);
return ret;
}
 
static int flac_probe(AVProbeData *p)
{
if (p->buf_size < 4 || memcmp(p->buf, "fLaC", 4))
return 0;
return AVPROBE_SCORE_EXTENSION;
}
 
AVInputFormat ff_flac_demuxer = {
.name = "flac",
.long_name = NULL_IF_CONFIG_SMALL("raw FLAC"),
.read_probe = flac_probe,
.read_header = flac_read_header,
.read_packet = ff_raw_read_partial_packet,
.flags = AVFMT_GENERIC_INDEX,
.extensions = "flac",
.raw_codec_id = AV_CODEC_ID_FLAC,
};
/contrib/sdk/sources/ffmpeg/libavformat/flacenc.c
0,0 → 1,137
/*
* raw FLAC muxer
* Copyright (c) 2006-2009 Justin Ruggles
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavcodec/flac.h"
#include "avformat.h"
#include "avio_internal.h"
#include "flacenc.h"
#include "vorbiscomment.h"
#include "libavcodec/bytestream.h"
 
 
static int flac_write_block_padding(AVIOContext *pb, unsigned int n_padding_bytes,
int last_block)
{
avio_w8(pb, last_block ? 0x81 : 0x01);
avio_wb24(pb, n_padding_bytes);
ffio_fill(pb, 0, n_padding_bytes);
return 0;
}
 
static int flac_write_block_comment(AVIOContext *pb, AVDictionary **m,
int last_block, int bitexact)
{
const char *vendor = bitexact ? "ffmpeg" : LIBAVFORMAT_IDENT;
unsigned int len, count;
uint8_t *p, *p0;
 
ff_metadata_conv(m, ff_vorbiscomment_metadata_conv, NULL);
 
len = ff_vorbiscomment_length(*m, vendor, &count);
p0 = av_malloc(len+4);
if (!p0)
return AVERROR(ENOMEM);
p = p0;
 
bytestream_put_byte(&p, last_block ? 0x84 : 0x04);
bytestream_put_be24(&p, len);
ff_vorbiscomment_write(&p, m, vendor, count);
 
avio_write(pb, p0, len+4);
av_freep(&p0);
p = NULL;
 
return 0;
}
 
static int flac_write_header(struct AVFormatContext *s)
{
int ret;
AVCodecContext *codec = s->streams[0]->codec;
 
if (s->nb_streams > 1) {
av_log(s, AV_LOG_ERROR, "only one stream is supported\n");
return AVERROR(EINVAL);
}
if (codec->codec_id != AV_CODEC_ID_FLAC) {
av_log(s, AV_LOG_ERROR, "unsupported codec\n");
return AVERROR(EINVAL);
}
 
ret = ff_flac_write_header(s->pb, codec, 0);
if (ret)
return ret;
 
ret = flac_write_block_comment(s->pb, &s->metadata, 0,
codec->flags & CODEC_FLAG_BITEXACT);
if (ret)
return ret;
 
/* The command line flac encoder defaults to placing a seekpoint
* every 10s. So one might add padding to allow that later
* but there seems to be no simple way to get the duration here.
* So let's try the flac default of 8192 bytes */
flac_write_block_padding(s->pb, 8192, 1);
 
return ret;
}
 
static int flac_write_trailer(struct AVFormatContext *s)
{
AVIOContext *pb = s->pb;
uint8_t *streaminfo;
enum FLACExtradataFormat format;
int64_t file_size;
 
if (!avpriv_flac_is_extradata_valid(s->streams[0]->codec, &format, &streaminfo))
return -1;
 
if (pb->seekable) {
/* rewrite the STREAMINFO header block data */
file_size = avio_tell(pb);
avio_seek(pb, 8, SEEK_SET);
avio_write(pb, streaminfo, FLAC_STREAMINFO_SIZE);
avio_seek(pb, file_size, SEEK_SET);
avio_flush(pb);
} else {
av_log(s, AV_LOG_WARNING, "unable to rewrite FLAC header.\n");
}
return 0;
}
 
static int flac_write_packet(struct AVFormatContext *s, AVPacket *pkt)
{
avio_write(s->pb, pkt->data, pkt->size);
return 0;
}
 
AVOutputFormat ff_flac_muxer = {
.name = "flac",
.long_name = NULL_IF_CONFIG_SMALL("raw FLAC"),
.mime_type = "audio/x-flac",
.extensions = "flac",
.audio_codec = AV_CODEC_ID_FLAC,
.video_codec = AV_CODEC_ID_NONE,
.write_header = flac_write_header,
.write_packet = flac_write_packet,
.write_trailer = flac_write_trailer,
.flags = AVFMT_NOTIMESTAMPS,
};
/contrib/sdk/sources/ffmpeg/libavformat/flacenc.h
0,0 → 1,32
/*
* raw FLAC muxer
* Copyright (C) 2009 Justin Ruggles
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_FLACENC_H
#define AVFORMAT_FLACENC_H
 
#include "libavcodec/flac.h"
#include "libavcodec/bytestream.h"
#include "avformat.h"
 
int ff_flac_write_header(AVIOContext *pb, AVCodecContext *codec,
int last_block);
 
#endif /* AVFORMAT_FLACENC_H */
/contrib/sdk/sources/ffmpeg/libavformat/flacenc_header.c
0,0 → 1,47
/*
* raw FLAC muxer
* Copyright (C) 2009 Justin Ruggles
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavcodec/flac.h"
#include "libavcodec/bytestream.h"
#include "avformat.h"
#include "flacenc.h"
 
int ff_flac_write_header(AVIOContext *pb, AVCodecContext *codec,
int last_block)
{
uint8_t header[8] = {
0x66, 0x4C, 0x61, 0x43, 0x00, 0x00, 0x00, 0x22
};
uint8_t *streaminfo;
enum FLACExtradataFormat format;
 
header[4] = last_block ? 0x80 : 0x00;
if (!avpriv_flac_is_extradata_valid(codec, &format, &streaminfo))
return -1;
 
/* write "fLaC" stream marker and first metadata block header */
avio_write(pb, header, 8);
 
/* write STREAMINFO */
avio_write(pb, streaminfo, FLAC_STREAMINFO_SIZE);
 
return 0;
}
/contrib/sdk/sources/ffmpeg/libavformat/flic.c
0,0 → 1,269
/*
* FLI/FLC Animation File Demuxer
* Copyright (c) 2003 The ffmpeg Project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* FLI/FLC file demuxer
* by Mike Melanson (melanson@pcisys.net)
* for more information on the .fli/.flc file format and all of its many
* variations, visit:
* http://www.compuphase.com/flic.htm
*
* This demuxer handles standard 0xAF11- and 0xAF12-type FLIs. It also handles
* special FLIs from the PC games "Magic Carpet" and "X-COM: Terror from the Deep".
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
 
#define FLIC_FILE_MAGIC_1 0xAF11
#define FLIC_FILE_MAGIC_2 0xAF12
#define FLIC_FILE_MAGIC_3 0xAF44 /* Flic Type for Extended FLX Format which
originated in Dave's Targa Animator (DTA) */
#define FLIC_CHUNK_MAGIC_1 0xF1FA
#define FLIC_CHUNK_MAGIC_2 0xF5FA
#define FLIC_MC_SPEED 5 /* speed for Magic Carpet game FLIs */
#define FLIC_DEFAULT_SPEED 5 /* for FLIs that have 0 speed */
#define FLIC_TFTD_CHUNK_AUDIO 0xAAAA /* Audio chunk. Used in Terror from the Deep.
Has 10 B extra header not accounted for in the chunk header */
#define FLIC_TFTD_SAMPLE_RATE 22050
 
#define FLIC_HEADER_SIZE 128
#define FLIC_PREAMBLE_SIZE 6
 
typedef struct FlicDemuxContext {
int video_stream_index;
int audio_stream_index;
int frame_number;
} FlicDemuxContext;
 
static int flic_probe(AVProbeData *p)
{
int magic_number;
 
if(p->buf_size < FLIC_HEADER_SIZE)
return 0;
 
magic_number = AV_RL16(&p->buf[4]);
if ((magic_number != FLIC_FILE_MAGIC_1) &&
(magic_number != FLIC_FILE_MAGIC_2) &&
(magic_number != FLIC_FILE_MAGIC_3))
return 0;
 
if(AV_RL16(&p->buf[0x10]) != FLIC_CHUNK_MAGIC_1){
if(AV_RL32(&p->buf[0x10]) > 2000)
return 0;
}
 
if( AV_RL16(&p->buf[0x08]) > 4096
|| AV_RL16(&p->buf[0x0A]) > 4096)
return 0;
 
 
return AVPROBE_SCORE_MAX - 1;
}
 
static int flic_read_header(AVFormatContext *s)
{
FlicDemuxContext *flic = s->priv_data;
AVIOContext *pb = s->pb;
unsigned char header[FLIC_HEADER_SIZE];
AVStream *st, *ast;
int speed;
int magic_number;
unsigned char preamble[FLIC_PREAMBLE_SIZE];
 
flic->frame_number = 0;
 
/* load the whole header and pull out the width and height */
if (avio_read(pb, header, FLIC_HEADER_SIZE) != FLIC_HEADER_SIZE)
return AVERROR(EIO);
 
magic_number = AV_RL16(&header[4]);
speed = AV_RL32(&header[0x10]);
if (speed == 0)
speed = FLIC_DEFAULT_SPEED;
 
/* initialize the decoder streams */
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
flic->video_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_FLIC;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->width = AV_RL16(&header[0x08]);
st->codec->height = AV_RL16(&header[0x0A]);
 
if (!st->codec->width || !st->codec->height) {
/* Ugly hack needed for the following sample: */
/* http://samples.mplayerhq.hu/fli-flc/fli-bugs/specular.flc */
av_log(s, AV_LOG_WARNING,
"File with no specified width/height. Trying 640x480.\n");
st->codec->width = 640;
st->codec->height = 480;
}
 
/* send over the whole 128-byte FLIC header */
if (ff_alloc_extradata(st->codec, FLIC_HEADER_SIZE))
return AVERROR(ENOMEM);
memcpy(st->codec->extradata, header, FLIC_HEADER_SIZE);
 
/* peek at the preamble to detect TFTD videos - they seem to always start with an audio chunk */
if (avio_read(pb, preamble, FLIC_PREAMBLE_SIZE) != FLIC_PREAMBLE_SIZE) {
av_log(s, AV_LOG_ERROR, "Failed to peek at preamble\n");
return AVERROR(EIO);
}
 
avio_seek(pb, -FLIC_PREAMBLE_SIZE, SEEK_CUR);
 
/* Time to figure out the framerate:
* If the first preamble's magic number is 0xAAAA then this file is from
* X-COM: Terror from the Deep. If on the other hand there is a FLIC chunk
* magic number at offset 0x10 assume this file is from Magic Carpet instead.
* If neither of the above is true then this is a normal FLIC file.
*/
if (AV_RL16(&preamble[4]) == FLIC_TFTD_CHUNK_AUDIO) {
/* TFTD videos have an extra 22050 Hz 8-bit mono audio stream */
ast = avformat_new_stream(s, NULL);
if (!ast)
return AVERROR(ENOMEM);
 
flic->audio_stream_index = ast->index;
 
/* all audio frames are the same size, so use the size of the first chunk for block_align */
ast->codec->block_align = AV_RL32(&preamble[0]);
ast->codec->codec_type = AVMEDIA_TYPE_AUDIO;
ast->codec->codec_id = AV_CODEC_ID_PCM_U8;
ast->codec->codec_tag = 0;
ast->codec->sample_rate = FLIC_TFTD_SAMPLE_RATE;
ast->codec->channels = 1;
ast->codec->bit_rate = st->codec->sample_rate * 8;
ast->codec->bits_per_coded_sample = 8;
ast->codec->channel_layout = AV_CH_LAYOUT_MONO;
ast->codec->extradata_size = 0;
 
/* Since the header information is incorrect we have to figure out the
* framerate using block_align and the fact that the audio is 22050 Hz.
* We usually have two cases: 2205 -> 10 fps and 1470 -> 15 fps */
avpriv_set_pts_info(st, 64, ast->codec->block_align, FLIC_TFTD_SAMPLE_RATE);
avpriv_set_pts_info(ast, 64, 1, FLIC_TFTD_SAMPLE_RATE);
} else if (AV_RL16(&header[0x10]) == FLIC_CHUNK_MAGIC_1) {
avpriv_set_pts_info(st, 64, FLIC_MC_SPEED, 70);
 
/* rewind the stream since the first chunk is at offset 12 */
avio_seek(pb, 12, SEEK_SET);
 
/* send over abbreviated FLIC header chunk */
av_free(st->codec->extradata);
if (ff_alloc_extradata(st->codec, 12))
return AVERROR(ENOMEM);
memcpy(st->codec->extradata, header, 12);
 
} else if (magic_number == FLIC_FILE_MAGIC_1) {
avpriv_set_pts_info(st, 64, speed, 70);
} else if ((magic_number == FLIC_FILE_MAGIC_2) ||
(magic_number == FLIC_FILE_MAGIC_3)) {
avpriv_set_pts_info(st, 64, speed, 1000);
} else {
av_log(s, AV_LOG_ERROR, "Invalid or unsupported magic chunk in file\n");
return AVERROR_INVALIDDATA;
}
 
return 0;
}
 
static int flic_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
FlicDemuxContext *flic = s->priv_data;
AVIOContext *pb = s->pb;
int packet_read = 0;
unsigned int size;
int magic;
int ret = 0;
unsigned char preamble[FLIC_PREAMBLE_SIZE];
 
while (!packet_read) {
 
if ((ret = avio_read(pb, preamble, FLIC_PREAMBLE_SIZE)) !=
FLIC_PREAMBLE_SIZE) {
ret = AVERROR(EIO);
break;
}
 
size = AV_RL32(&preamble[0]);
magic = AV_RL16(&preamble[4]);
 
if (((magic == FLIC_CHUNK_MAGIC_1) || (magic == FLIC_CHUNK_MAGIC_2)) && size > FLIC_PREAMBLE_SIZE) {
if (av_new_packet(pkt, size)) {
ret = AVERROR(EIO);
break;
}
pkt->stream_index = flic->video_stream_index;
pkt->pts = flic->frame_number++;
pkt->pos = avio_tell(pb);
memcpy(pkt->data, preamble, FLIC_PREAMBLE_SIZE);
ret = avio_read(pb, pkt->data + FLIC_PREAMBLE_SIZE,
size - FLIC_PREAMBLE_SIZE);
if (ret != size - FLIC_PREAMBLE_SIZE) {
av_free_packet(pkt);
ret = AVERROR(EIO);
}
packet_read = 1;
} else if (magic == FLIC_TFTD_CHUNK_AUDIO) {
if (av_new_packet(pkt, size)) {
ret = AVERROR(EIO);
break;
}
 
/* skip useless 10B sub-header (yes, it's not accounted for in the chunk header) */
avio_skip(pb, 10);
 
pkt->stream_index = flic->audio_stream_index;
pkt->pos = avio_tell(pb);
ret = avio_read(pb, pkt->data, size);
 
if (ret != size) {
av_free_packet(pkt);
ret = AVERROR(EIO);
}
 
packet_read = 1;
} else {
/* not interested in this chunk */
avio_skip(pb, size - 6);
}
}
 
return ret;
}
 
AVInputFormat ff_flic_demuxer = {
.name = "flic",
.long_name = NULL_IF_CONFIG_SMALL("FLI/FLC/FLX animation"),
.priv_data_size = sizeof(FlicDemuxContext),
.read_probe = flic_probe,
.read_header = flic_read_header,
.read_packet = flic_read_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/flv.h
0,0 → 1,138
/*
* FLV common header
*
* Copyright (c) 2006 The FFmpeg Project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* FLV common header
*/
 
#ifndef AVFORMAT_FLV_H
#define AVFORMAT_FLV_H
 
/* offsets for packed values */
#define FLV_AUDIO_SAMPLESSIZE_OFFSET 1
#define FLV_AUDIO_SAMPLERATE_OFFSET 2
#define FLV_AUDIO_CODECID_OFFSET 4
 
#define FLV_VIDEO_FRAMETYPE_OFFSET 4
 
/* bitmasks to isolate specific values */
#define FLV_AUDIO_CHANNEL_MASK 0x01
#define FLV_AUDIO_SAMPLESIZE_MASK 0x02
#define FLV_AUDIO_SAMPLERATE_MASK 0x0c
#define FLV_AUDIO_CODECID_MASK 0xf0
 
#define FLV_VIDEO_CODECID_MASK 0x0f
#define FLV_VIDEO_FRAMETYPE_MASK 0xf0
 
#define AMF_END_OF_OBJECT 0x09
 
#define KEYFRAMES_TAG "keyframes"
#define KEYFRAMES_TIMESTAMP_TAG "times"
#define KEYFRAMES_BYTEOFFSET_TAG "filepositions"
 
 
enum {
FLV_HEADER_FLAG_HASVIDEO = 1,
FLV_HEADER_FLAG_HASAUDIO = 4,
};
 
enum {
FLV_TAG_TYPE_AUDIO = 0x08,
FLV_TAG_TYPE_VIDEO = 0x09,
FLV_TAG_TYPE_META = 0x12,
};
 
enum {
FLV_STREAM_TYPE_VIDEO,
FLV_STREAM_TYPE_AUDIO,
FLV_STREAM_TYPE_DATA,
FLV_STREAM_TYPE_NB,
};
 
enum {
FLV_MONO = 0,
FLV_STEREO = 1,
};
 
enum {
FLV_SAMPLESSIZE_8BIT = 0,
FLV_SAMPLESSIZE_16BIT = 1 << FLV_AUDIO_SAMPLESSIZE_OFFSET,
};
 
enum {
FLV_SAMPLERATE_SPECIAL = 0, /**< signifies 5512Hz and 8000Hz in the case of NELLYMOSER */
FLV_SAMPLERATE_11025HZ = 1 << FLV_AUDIO_SAMPLERATE_OFFSET,
FLV_SAMPLERATE_22050HZ = 2 << FLV_AUDIO_SAMPLERATE_OFFSET,
FLV_SAMPLERATE_44100HZ = 3 << FLV_AUDIO_SAMPLERATE_OFFSET,
};
 
enum {
FLV_CODECID_PCM = 0,
FLV_CODECID_ADPCM = 1 << FLV_AUDIO_CODECID_OFFSET,
FLV_CODECID_MP3 = 2 << FLV_AUDIO_CODECID_OFFSET,
FLV_CODECID_PCM_LE = 3 << FLV_AUDIO_CODECID_OFFSET,
FLV_CODECID_NELLYMOSER_16KHZ_MONO = 4 << FLV_AUDIO_CODECID_OFFSET,
FLV_CODECID_NELLYMOSER_8KHZ_MONO = 5 << FLV_AUDIO_CODECID_OFFSET,
FLV_CODECID_NELLYMOSER = 6 << FLV_AUDIO_CODECID_OFFSET,
FLV_CODECID_PCM_ALAW = 7 << FLV_AUDIO_CODECID_OFFSET,
FLV_CODECID_PCM_MULAW = 8 << FLV_AUDIO_CODECID_OFFSET,
FLV_CODECID_AAC = 10<< FLV_AUDIO_CODECID_OFFSET,
FLV_CODECID_SPEEX = 11<< FLV_AUDIO_CODECID_OFFSET,
};
 
enum {
FLV_CODECID_H263 = 2,
FLV_CODECID_SCREEN = 3,
FLV_CODECID_VP6 = 4,
FLV_CODECID_VP6A = 5,
FLV_CODECID_SCREEN2 = 6,
FLV_CODECID_H264 = 7,
FLV_CODECID_REALH263= 8,
FLV_CODECID_MPEG4 = 9,
};
 
enum {
FLV_FRAME_KEY = 1 << FLV_VIDEO_FRAMETYPE_OFFSET, ///< key frame (for AVC, a seekable frame)
FLV_FRAME_INTER = 2 << FLV_VIDEO_FRAMETYPE_OFFSET, ///< inter frame (for AVC, a non-seekable frame)
FLV_FRAME_DISP_INTER = 3 << FLV_VIDEO_FRAMETYPE_OFFSET, ///< disposable inter frame (H.263 only)
FLV_FRAME_GENERATED_KEY = 4 << FLV_VIDEO_FRAMETYPE_OFFSET, ///< generated key frame (reserved for server use only)
FLV_FRAME_VIDEO_INFO_CMD = 5 << FLV_VIDEO_FRAMETYPE_OFFSET, ///< video info/command frame
};
 
typedef enum {
AMF_DATA_TYPE_NUMBER = 0x00,
AMF_DATA_TYPE_BOOL = 0x01,
AMF_DATA_TYPE_STRING = 0x02,
AMF_DATA_TYPE_OBJECT = 0x03,
AMF_DATA_TYPE_NULL = 0x05,
AMF_DATA_TYPE_UNDEFINED = 0x06,
AMF_DATA_TYPE_REFERENCE = 0x07,
AMF_DATA_TYPE_MIXEDARRAY = 0x08,
AMF_DATA_TYPE_OBJECT_END = 0x09,
AMF_DATA_TYPE_ARRAY = 0x0a,
AMF_DATA_TYPE_DATE = 0x0b,
AMF_DATA_TYPE_LONG_STRING = 0x0c,
AMF_DATA_TYPE_UNSUPPORTED = 0x0d,
} AMFDataType;
 
#endif /* AVFORMAT_FLV_H */
/contrib/sdk/sources/ffmpeg/libavformat/flvdec.c
0,0 → 1,1047
/*
* FLV demuxer
* Copyright (c) 2003 The FFmpeg Project
*
* This demuxer will generate a 1 byte extradata for VP6F content.
* It is composed of:
* - upper 4bits: difference between encoded width and visible width
* - lower 4bits: difference between encoded height and visible height
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/dict.h"
#include "libavutil/opt.h"
#include "libavutil/intfloat.h"
#include "libavutil/mathematics.h"
#include "libavcodec/bytestream.h"
#include "libavcodec/mpeg4audio.h"
#include "avformat.h"
#include "internal.h"
#include "avio_internal.h"
#include "flv.h"
 
#define VALIDATE_INDEX_TS_THRESH 2500
 
typedef struct {
const AVClass *class; ///< Class for private options.
int trust_metadata; ///< configure streams according onMetaData
int wrong_dts; ///< wrong dts due to negative cts
uint8_t *new_extradata[FLV_STREAM_TYPE_NB];
int new_extradata_size[FLV_STREAM_TYPE_NB];
int last_sample_rate;
int last_channels;
struct {
int64_t dts;
int64_t pos;
} validate_index[2];
int validate_next;
int validate_count;
int searched_for_end;
} FLVContext;
 
static int flv_probe(AVProbeData *p)
{
const uint8_t *d;
 
d = p->buf;
if (d[0] == 'F' &&
d[1] == 'L' &&
d[2] == 'V' &&
d[3] < 5 && d[5] == 0 &&
AV_RB32(d + 5) > 8) {
return AVPROBE_SCORE_MAX;
}
return 0;
}
 
static AVStream *create_stream(AVFormatContext *s, int codec_type)
{
AVStream *st = avformat_new_stream(s, NULL);
if (!st)
return NULL;
st->codec->codec_type = codec_type;
if (s->nb_streams>=3 ||( s->nb_streams==2
&& s->streams[0]->codec->codec_type != AVMEDIA_TYPE_DATA
&& s->streams[1]->codec->codec_type != AVMEDIA_TYPE_DATA))
s->ctx_flags &= ~AVFMTCTX_NOHEADER;
 
avpriv_set_pts_info(st, 32, 1, 1000); /* 32 bit pts in ms */
return st;
}
 
static int flv_same_audio_codec(AVCodecContext *acodec, int flags)
{
int bits_per_coded_sample = (flags & FLV_AUDIO_SAMPLESIZE_MASK) ? 16 : 8;
int flv_codecid = flags & FLV_AUDIO_CODECID_MASK;
int codec_id;
 
if (!acodec->codec_id && !acodec->codec_tag)
return 1;
 
if (acodec->bits_per_coded_sample != bits_per_coded_sample)
return 0;
 
switch (flv_codecid) {
// no distinction between S16 and S8 PCM codec flags
case FLV_CODECID_PCM:
codec_id = bits_per_coded_sample == 8
? AV_CODEC_ID_PCM_U8
#if HAVE_BIGENDIAN
: AV_CODEC_ID_PCM_S16BE;
#else
: AV_CODEC_ID_PCM_S16LE;
#endif
return codec_id == acodec->codec_id;
case FLV_CODECID_PCM_LE:
codec_id = bits_per_coded_sample == 8
? AV_CODEC_ID_PCM_U8
: AV_CODEC_ID_PCM_S16LE;
return codec_id == acodec->codec_id;
case FLV_CODECID_AAC:
return acodec->codec_id == AV_CODEC_ID_AAC;
case FLV_CODECID_ADPCM:
return acodec->codec_id == AV_CODEC_ID_ADPCM_SWF;
case FLV_CODECID_SPEEX:
return acodec->codec_id == AV_CODEC_ID_SPEEX;
case FLV_CODECID_MP3:
return acodec->codec_id == AV_CODEC_ID_MP3;
case FLV_CODECID_NELLYMOSER_8KHZ_MONO:
case FLV_CODECID_NELLYMOSER_16KHZ_MONO:
case FLV_CODECID_NELLYMOSER:
return acodec->codec_id == AV_CODEC_ID_NELLYMOSER;
case FLV_CODECID_PCM_MULAW:
return acodec->sample_rate == 8000 &&
acodec->codec_id == AV_CODEC_ID_PCM_MULAW;
case FLV_CODECID_PCM_ALAW:
return acodec->sample_rate == 8000 &&
acodec->codec_id == AV_CODEC_ID_PCM_ALAW;
default:
return acodec->codec_tag == (flv_codecid >> FLV_AUDIO_CODECID_OFFSET);
}
}
 
static void flv_set_audio_codec(AVFormatContext *s, AVStream *astream,
AVCodecContext *acodec, int flv_codecid)
{
switch (flv_codecid) {
// no distinction between S16 and S8 PCM codec flags
case FLV_CODECID_PCM:
acodec->codec_id = acodec->bits_per_coded_sample == 8
? AV_CODEC_ID_PCM_U8
#if HAVE_BIGENDIAN
: AV_CODEC_ID_PCM_S16BE;
#else
: AV_CODEC_ID_PCM_S16LE;
#endif
break;
case FLV_CODECID_PCM_LE:
acodec->codec_id = acodec->bits_per_coded_sample == 8
? AV_CODEC_ID_PCM_U8
: AV_CODEC_ID_PCM_S16LE;
break;
case FLV_CODECID_AAC:
acodec->codec_id = AV_CODEC_ID_AAC;
break;
case FLV_CODECID_ADPCM:
acodec->codec_id = AV_CODEC_ID_ADPCM_SWF;
break;
case FLV_CODECID_SPEEX:
acodec->codec_id = AV_CODEC_ID_SPEEX;
acodec->sample_rate = 16000;
break;
case FLV_CODECID_MP3:
acodec->codec_id = AV_CODEC_ID_MP3;
astream->need_parsing = AVSTREAM_PARSE_FULL;
break;
case FLV_CODECID_NELLYMOSER_8KHZ_MONO:
// in case metadata does not otherwise declare samplerate
acodec->sample_rate = 8000;
acodec->codec_id = AV_CODEC_ID_NELLYMOSER;
break;
case FLV_CODECID_NELLYMOSER_16KHZ_MONO:
acodec->sample_rate = 16000;
acodec->codec_id = AV_CODEC_ID_NELLYMOSER;
break;
case FLV_CODECID_NELLYMOSER:
acodec->codec_id = AV_CODEC_ID_NELLYMOSER;
break;
case FLV_CODECID_PCM_MULAW:
acodec->sample_rate = 8000;
acodec->codec_id = AV_CODEC_ID_PCM_MULAW;
break;
case FLV_CODECID_PCM_ALAW:
acodec->sample_rate = 8000;
acodec->codec_id = AV_CODEC_ID_PCM_ALAW;
break;
default:
avpriv_request_sample(s, "Audio codec (%x)",
flv_codecid >> FLV_AUDIO_CODECID_OFFSET);
acodec->codec_tag = flv_codecid >> FLV_AUDIO_CODECID_OFFSET;
}
}
 
static int flv_same_video_codec(AVCodecContext *vcodec, int flags)
{
int flv_codecid = flags & FLV_VIDEO_CODECID_MASK;
 
if (!vcodec->codec_id && !vcodec->codec_tag)
return 1;
 
switch (flv_codecid) {
case FLV_CODECID_H263:
return vcodec->codec_id == AV_CODEC_ID_FLV1;
case FLV_CODECID_SCREEN:
return vcodec->codec_id == AV_CODEC_ID_FLASHSV;
case FLV_CODECID_SCREEN2:
return vcodec->codec_id == AV_CODEC_ID_FLASHSV2;
case FLV_CODECID_VP6:
return vcodec->codec_id == AV_CODEC_ID_VP6F;
case FLV_CODECID_VP6A:
return vcodec->codec_id == AV_CODEC_ID_VP6A;
case FLV_CODECID_H264:
return vcodec->codec_id == AV_CODEC_ID_H264;
default:
return vcodec->codec_tag == flv_codecid;
}
}
 
static int flv_set_video_codec(AVFormatContext *s, AVStream *vstream,
int flv_codecid, int read)
{
AVCodecContext *vcodec = vstream->codec;
switch (flv_codecid) {
case FLV_CODECID_H263:
vcodec->codec_id = AV_CODEC_ID_FLV1;
break;
case FLV_CODECID_REALH263:
vcodec->codec_id = AV_CODEC_ID_H263;
break; // Really mean it this time
case FLV_CODECID_SCREEN:
vcodec->codec_id = AV_CODEC_ID_FLASHSV;
break;
case FLV_CODECID_SCREEN2:
vcodec->codec_id = AV_CODEC_ID_FLASHSV2;
break;
case FLV_CODECID_VP6:
vcodec->codec_id = AV_CODEC_ID_VP6F;
case FLV_CODECID_VP6A:
if (flv_codecid == FLV_CODECID_VP6A)
vcodec->codec_id = AV_CODEC_ID_VP6A;
if (read) {
if (vcodec->extradata_size != 1) {
ff_alloc_extradata(vcodec, 1);
}
if (vcodec->extradata)
vcodec->extradata[0] = avio_r8(s->pb);
else
avio_skip(s->pb, 1);
}
return 1; // 1 byte body size adjustment for flv_read_packet()
case FLV_CODECID_H264:
vcodec->codec_id = AV_CODEC_ID_H264;
return 3; // not 4, reading packet type will consume one byte
case FLV_CODECID_MPEG4:
vcodec->codec_id = AV_CODEC_ID_MPEG4;
return 3;
default:
avpriv_request_sample(s, "Video codec (%x)", flv_codecid);
vcodec->codec_tag = flv_codecid;
}
 
return 0;
}
 
static int amf_get_string(AVIOContext *ioc, char *buffer, int buffsize)
{
int length = avio_rb16(ioc);
if (length >= buffsize) {
avio_skip(ioc, length);
return -1;
}
 
avio_read(ioc, buffer, length);
 
buffer[length] = '\0';
 
return length;
}
 
static int parse_keyframes_index(AVFormatContext *s, AVIOContext *ioc,
AVStream *vstream, int64_t max_pos)
{
FLVContext *flv = s->priv_data;
unsigned int timeslen = 0, fileposlen = 0, i;
char str_val[256];
int64_t *times = NULL;
int64_t *filepositions = NULL;
int ret = AVERROR(ENOSYS);
int64_t initial_pos = avio_tell(ioc);
 
if (vstream->nb_index_entries>0) {
av_log(s, AV_LOG_WARNING, "Skiping duplicate index\n");
return 0;
}
 
if (s->flags & AVFMT_FLAG_IGNIDX)
return 0;
 
while (avio_tell(ioc) < max_pos - 2 &&
amf_get_string(ioc, str_val, sizeof(str_val)) > 0) {
int64_t **current_array;
unsigned int arraylen;
 
// Expect array object in context
if (avio_r8(ioc) != AMF_DATA_TYPE_ARRAY)
break;
 
arraylen = avio_rb32(ioc);
if (arraylen>>28)
break;
 
if (!strcmp(KEYFRAMES_TIMESTAMP_TAG , str_val) && !times) {
current_array = &times;
timeslen = arraylen;
} else if (!strcmp(KEYFRAMES_BYTEOFFSET_TAG, str_val) &&
!filepositions) {
current_array = &filepositions;
fileposlen = arraylen;
} else
// unexpected metatag inside keyframes, will not use such
// metadata for indexing
break;
 
if (!(*current_array = av_mallocz(sizeof(**current_array) * arraylen))) {
ret = AVERROR(ENOMEM);
goto finish;
}
 
for (i = 0; i < arraylen && avio_tell(ioc) < max_pos - 1; i++) {
if (avio_r8(ioc) != AMF_DATA_TYPE_NUMBER)
goto invalid;
current_array[0][i] = av_int2double(avio_rb64(ioc));
}
if (times && filepositions) {
// All done, exiting at a position allowing amf_parse_object
// to finish parsing the object
ret = 0;
break;
}
}
 
if (timeslen == fileposlen && fileposlen>1 && max_pos <= filepositions[0]) {
for (i = 0; i < fileposlen; i++) {
av_add_index_entry(vstream, filepositions[i], times[i] * 1000,
0, 0, AVINDEX_KEYFRAME);
if (i < 2) {
flv->validate_index[i].pos = filepositions[i];
flv->validate_index[i].dts = times[i] * 1000;
flv->validate_count = i + 1;
}
}
} else {
invalid:
av_log(s, AV_LOG_WARNING, "Invalid keyframes object, skipping.\n");
}
 
finish:
av_freep(&times);
av_freep(&filepositions);
avio_seek(ioc, initial_pos, SEEK_SET);
return ret;
}
 
static int amf_parse_object(AVFormatContext *s, AVStream *astream,
AVStream *vstream, const char *key,
int64_t max_pos, int depth)
{
AVCodecContext *acodec, *vcodec;
FLVContext *flv = s->priv_data;
AVIOContext *ioc;
AMFDataType amf_type;
char str_val[256];
double num_val;
 
num_val = 0;
ioc = s->pb;
amf_type = avio_r8(ioc);
 
switch (amf_type) {
case AMF_DATA_TYPE_NUMBER:
num_val = av_int2double(avio_rb64(ioc));
break;
case AMF_DATA_TYPE_BOOL:
num_val = avio_r8(ioc);
break;
case AMF_DATA_TYPE_STRING:
if (amf_get_string(ioc, str_val, sizeof(str_val)) < 0)
return -1;
break;
case AMF_DATA_TYPE_OBJECT:
if ((vstream || astream) && key &&
ioc->seekable &&
!strcmp(KEYFRAMES_TAG, key) && depth == 1)
if (parse_keyframes_index(s, ioc, vstream ? vstream : astream,
max_pos) < 0)
av_log(s, AV_LOG_ERROR, "Keyframe index parsing failed\n");
 
while (avio_tell(ioc) < max_pos - 2 &&
amf_get_string(ioc, str_val, sizeof(str_val)) > 0)
if (amf_parse_object(s, astream, vstream, str_val, max_pos,
depth + 1) < 0)
return -1; // if we couldn't skip, bomb out.
if (avio_r8(ioc) != AMF_END_OF_OBJECT)
return -1;
break;
case AMF_DATA_TYPE_NULL:
case AMF_DATA_TYPE_UNDEFINED:
case AMF_DATA_TYPE_UNSUPPORTED:
break; // these take up no additional space
case AMF_DATA_TYPE_MIXEDARRAY:
avio_skip(ioc, 4); // skip 32-bit max array index
while (avio_tell(ioc) < max_pos - 2 &&
amf_get_string(ioc, str_val, sizeof(str_val)) > 0)
// this is the only case in which we would want a nested
// parse to not skip over the object
if (amf_parse_object(s, astream, vstream, str_val, max_pos,
depth + 1) < 0)
return -1;
if (avio_r8(ioc) != AMF_END_OF_OBJECT)
return -1;
break;
case AMF_DATA_TYPE_ARRAY:
{
unsigned int arraylen, i;
 
arraylen = avio_rb32(ioc);
for (i = 0; i < arraylen && avio_tell(ioc) < max_pos - 1; i++)
if (amf_parse_object(s, NULL, NULL, NULL, max_pos,
depth + 1) < 0)
return -1; // if we couldn't skip, bomb out.
}
break;
case AMF_DATA_TYPE_DATE:
avio_skip(ioc, 8 + 2); // timestamp (double) and UTC offset (int16)
break;
default: // unsupported type, we couldn't skip
return -1;
}
 
// only look for metadata values when we are not nested and key != NULL
if (depth == 1 && key) {
acodec = astream ? astream->codec : NULL;
vcodec = vstream ? vstream->codec : NULL;
 
if (amf_type == AMF_DATA_TYPE_NUMBER ||
amf_type == AMF_DATA_TYPE_BOOL) {
if (!strcmp(key, "duration"))
s->duration = num_val * AV_TIME_BASE;
else if (!strcmp(key, "videodatarate") && vcodec &&
0 <= (int)(num_val * 1024.0))
vcodec->bit_rate = num_val * 1024.0;
else if (!strcmp(key, "audiodatarate") && acodec &&
0 <= (int)(num_val * 1024.0))
acodec->bit_rate = num_val * 1024.0;
else if (!strcmp(key, "datastream")) {
AVStream *st = create_stream(s, AVMEDIA_TYPE_DATA);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_id = AV_CODEC_ID_TEXT;
} else if (flv->trust_metadata) {
if (!strcmp(key, "videocodecid") && vcodec) {
flv_set_video_codec(s, vstream, num_val, 0);
} else if (!strcmp(key, "audiocodecid") && acodec) {
int id = ((int)num_val) << FLV_AUDIO_CODECID_OFFSET;
flv_set_audio_codec(s, astream, acodec, id);
} else if (!strcmp(key, "audiosamplerate") && acodec) {
acodec->sample_rate = num_val;
} else if (!strcmp(key, "audiosamplesize") && acodec) {
acodec->bits_per_coded_sample = num_val;
} else if (!strcmp(key, "stereo") && acodec) {
acodec->channels = num_val + 1;
acodec->channel_layout = acodec->channels == 2 ?
AV_CH_LAYOUT_STEREO :
AV_CH_LAYOUT_MONO;
} else if (!strcmp(key, "width") && vcodec) {
vcodec->width = num_val;
} else if (!strcmp(key, "height") && vcodec) {
vcodec->height = num_val;
}
}
}
 
if (amf_type == AMF_DATA_TYPE_OBJECT && s->nb_streams == 1 &&
((!acodec && !strcmp(key, "audiocodecid")) ||
(!vcodec && !strcmp(key, "videocodecid"))))
s->ctx_flags &= ~AVFMTCTX_NOHEADER; //If there is either audio/video missing, codecid will be an empty object
 
if (!strcmp(key, "duration") ||
!strcmp(key, "filesize") ||
!strcmp(key, "width") ||
!strcmp(key, "height") ||
!strcmp(key, "videodatarate") ||
!strcmp(key, "framerate") ||
!strcmp(key, "videocodecid") ||
!strcmp(key, "audiodatarate") ||
!strcmp(key, "audiosamplerate") ||
!strcmp(key, "audiosamplesize") ||
!strcmp(key, "stereo") ||
!strcmp(key, "audiocodecid") ||
!strcmp(key, "datastream"))
return 0;
 
if (amf_type == AMF_DATA_TYPE_BOOL) {
av_strlcpy(str_val, num_val > 0 ? "true" : "false",
sizeof(str_val));
av_dict_set(&s->metadata, key, str_val, 0);
} else if (amf_type == AMF_DATA_TYPE_NUMBER) {
snprintf(str_val, sizeof(str_val), "%.f", num_val);
av_dict_set(&s->metadata, key, str_val, 0);
} else if (amf_type == AMF_DATA_TYPE_STRING)
av_dict_set(&s->metadata, key, str_val, 0);
}
 
return 0;
}
 
static int flv_read_metabody(AVFormatContext *s, int64_t next_pos)
{
AMFDataType type;
AVStream *stream, *astream, *vstream;
AVStream av_unused *dstream;
AVIOContext *ioc;
int i;
// only needs to hold the string "onMetaData".
// Anything longer is something we don't want.
char buffer[11];
 
astream = NULL;
vstream = NULL;
dstream = NULL;
ioc = s->pb;
 
// first object needs to be "onMetaData" string
type = avio_r8(ioc);
if (type != AMF_DATA_TYPE_STRING ||
amf_get_string(ioc, buffer, sizeof(buffer)) < 0)
return -1;
 
if (!strcmp(buffer, "onTextData"))
return 1;
 
if (strcmp(buffer, "onMetaData"))
return -1;
 
// find the streams now so that amf_parse_object doesn't need to do
// the lookup every time it is called.
for (i = 0; i < s->nb_streams; i++) {
stream = s->streams[i];
if (stream->codec->codec_type == AVMEDIA_TYPE_VIDEO)
vstream = stream;
else if (stream->codec->codec_type == AVMEDIA_TYPE_AUDIO)
astream = stream;
else if (stream->codec->codec_type == AVMEDIA_TYPE_DATA)
dstream = stream;
}
 
// parse the second object (we want a mixed array)
if (amf_parse_object(s, astream, vstream, buffer, next_pos, 0) < 0)
return -1;
 
return 0;
}
 
static int flv_read_header(AVFormatContext *s)
{
int offset, flags;
 
avio_skip(s->pb, 4);
flags = avio_r8(s->pb);
/* old flvtool cleared this field */
/* FIXME: better fix needed */
if (!flags) {
flags = FLV_HEADER_FLAG_HASVIDEO | FLV_HEADER_FLAG_HASAUDIO;
av_log(s, AV_LOG_WARNING,
"Broken FLV file, which says no streams present, "
"this might fail\n");
}
 
s->ctx_flags |= AVFMTCTX_NOHEADER;
 
if (flags & FLV_HEADER_FLAG_HASVIDEO)
if (!create_stream(s, AVMEDIA_TYPE_VIDEO))
return AVERROR(ENOMEM);
if (flags & FLV_HEADER_FLAG_HASAUDIO)
if (!create_stream(s, AVMEDIA_TYPE_AUDIO))
return AVERROR(ENOMEM);
// Flag doesn't indicate whether or not there is script-data present. Must
// create that stream if it's encountered.
 
offset = avio_rb32(s->pb);
avio_seek(s->pb, offset, SEEK_SET);
avio_skip(s->pb, 4);
 
s->start_time = 0;
 
return 0;
}
 
static int flv_read_close(AVFormatContext *s)
{
int i;
FLVContext *flv = s->priv_data;
for (i=0; i<FLV_STREAM_TYPE_NB; i++)
av_freep(&flv->new_extradata[i]);
return 0;
}
 
static int flv_get_extradata(AVFormatContext *s, AVStream *st, int size)
{
av_free(st->codec->extradata);
if (ff_alloc_extradata(st->codec, size))
return AVERROR(ENOMEM);
avio_read(s->pb, st->codec->extradata, st->codec->extradata_size);
return 0;
}
 
static int flv_queue_extradata(FLVContext *flv, AVIOContext *pb, int stream,
int size)
{
av_free(flv->new_extradata[stream]);
flv->new_extradata[stream] = av_mallocz(size +
FF_INPUT_BUFFER_PADDING_SIZE);
if (!flv->new_extradata[stream])
return AVERROR(ENOMEM);
flv->new_extradata_size[stream] = size;
avio_read(pb, flv->new_extradata[stream], size);
return 0;
}
 
static void clear_index_entries(AVFormatContext *s, int64_t pos)
{
int i, j, out;
av_log(s, AV_LOG_WARNING,
"Found invalid index entries, clearing the index.\n");
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
/* Remove all index entries that point to >= pos */
out = 0;
for (j = 0; j < st->nb_index_entries; j++)
if (st->index_entries[j].pos < pos)
st->index_entries[out++] = st->index_entries[j];
st->nb_index_entries = out;
}
}
 
static int amf_skip_tag(AVIOContext *pb, AMFDataType type)
{
int nb = -1, ret, parse_name = 1;
 
switch (type) {
case AMF_DATA_TYPE_NUMBER:
avio_skip(pb, 8);
break;
case AMF_DATA_TYPE_BOOL:
avio_skip(pb, 1);
break;
case AMF_DATA_TYPE_STRING:
avio_skip(pb, avio_rb16(pb));
break;
case AMF_DATA_TYPE_ARRAY:
parse_name = 0;
case AMF_DATA_TYPE_MIXEDARRAY:
nb = avio_rb32(pb);
case AMF_DATA_TYPE_OBJECT:
while(!pb->eof_reached && (nb-- > 0 || type != AMF_DATA_TYPE_ARRAY)) {
if (parse_name) {
int size = avio_rb16(pb);
if (!size) {
avio_skip(pb, 1);
break;
}
avio_skip(pb, size);
}
if ((ret = amf_skip_tag(pb, avio_r8(pb))) < 0)
return ret;
}
break;
case AMF_DATA_TYPE_NULL:
case AMF_DATA_TYPE_OBJECT_END:
break;
default:
return AVERROR_INVALIDDATA;
}
return 0;
}
 
static int flv_data_packet(AVFormatContext *s, AVPacket *pkt,
int64_t dts, int64_t next)
{
AVIOContext *pb = s->pb;
AVStream *st = NULL;
char buf[20];
int ret = AVERROR_INVALIDDATA;
int i, length = -1;
 
switch (avio_r8(pb)) {
case AMF_DATA_TYPE_MIXEDARRAY:
avio_seek(pb, 4, SEEK_CUR);
case AMF_DATA_TYPE_OBJECT:
break;
default:
goto skip;
}
 
while ((ret = amf_get_string(pb, buf, sizeof(buf))) > 0) {
AMFDataType type = avio_r8(pb);
if (type == AMF_DATA_TYPE_STRING && !strcmp(buf, "text")) {
length = avio_rb16(pb);
ret = av_get_packet(pb, pkt, length);
if (ret < 0)
goto skip;
else
break;
} else {
if ((ret = amf_skip_tag(pb, type)) < 0)
goto skip;
}
}
 
if (length < 0) {
ret = AVERROR_INVALIDDATA;
goto skip;
}
 
for (i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
if (st->codec->codec_type == AVMEDIA_TYPE_DATA)
break;
}
 
if (i == s->nb_streams) {
st = create_stream(s, AVMEDIA_TYPE_DATA);
if (!st)
return AVERROR_INVALIDDATA;
st->codec->codec_id = AV_CODEC_ID_TEXT;
}
 
pkt->dts = dts;
pkt->pts = dts;
pkt->size = ret;
 
pkt->stream_index = st->index;
pkt->flags |= AV_PKT_FLAG_KEY;
 
skip:
avio_seek(s->pb, next + 4, SEEK_SET);
 
return ret;
}
 
static int flv_read_packet(AVFormatContext *s, AVPacket *pkt)
{
FLVContext *flv = s->priv_data;
int ret, i, type, size, flags;
int stream_type=-1;
int64_t next, pos, meta_pos;
int64_t dts, pts = AV_NOPTS_VALUE;
int av_uninit(channels);
int av_uninit(sample_rate);
AVStream *st = NULL;
 
/* pkt size is repeated at end. skip it */
for (;; avio_skip(s->pb, 4)) {
pos = avio_tell(s->pb);
type = avio_r8(s->pb);
size = avio_rb24(s->pb);
dts = avio_rb24(s->pb);
dts |= avio_r8(s->pb) << 24;
av_dlog(s, "type:%d, size:%d, dts:%"PRId64"\n", type, size, dts);
if (url_feof(s->pb))
return AVERROR_EOF;
avio_skip(s->pb, 3); /* stream id, always 0 */
flags = 0;
 
if (flv->validate_next < flv->validate_count) {
int64_t validate_pos = flv->validate_index[flv->validate_next].pos;
if (pos == validate_pos) {
if (FFABS(dts - flv->validate_index[flv->validate_next].dts) <=
VALIDATE_INDEX_TS_THRESH) {
flv->validate_next++;
} else {
clear_index_entries(s, validate_pos);
flv->validate_count = 0;
}
} else if (pos > validate_pos) {
clear_index_entries(s, validate_pos);
flv->validate_count = 0;
}
}
 
if (size == 0)
continue;
 
next = size + avio_tell(s->pb);
 
if (type == FLV_TAG_TYPE_AUDIO) {
stream_type = FLV_STREAM_TYPE_AUDIO;
flags = avio_r8(s->pb);
size--;
} else if (type == FLV_TAG_TYPE_VIDEO) {
stream_type = FLV_STREAM_TYPE_VIDEO;
flags = avio_r8(s->pb);
size--;
if ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_VIDEO_INFO_CMD)
goto skip;
} else if (type == FLV_TAG_TYPE_META) {
stream_type=FLV_STREAM_TYPE_DATA;
if (size > 13 + 1 + 4 && dts == 0) { // Header-type metadata stuff
meta_pos = avio_tell(s->pb);
if (flv_read_metabody(s, next) == 0) {
goto skip;
}
avio_seek(s->pb, meta_pos, SEEK_SET);
}
} else {
av_log(s, AV_LOG_DEBUG,
"skipping flv packet: type %d, size %d, flags %d\n",
type, size, flags);
skip:
avio_seek(s->pb, next, SEEK_SET);
continue;
}
 
/* skip empty data packets */
if (!size)
continue;
 
/* now find stream */
for (i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
if (stream_type == FLV_STREAM_TYPE_AUDIO) {
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
(s->audio_codec_id || flv_same_audio_codec(st->codec, flags)))
break;
} else if (stream_type == FLV_STREAM_TYPE_VIDEO) {
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
(s->video_codec_id || flv_same_video_codec(st->codec, flags)))
break;
} else if (stream_type == FLV_STREAM_TYPE_DATA) {
if (st->codec->codec_type == AVMEDIA_TYPE_DATA)
break;
}
}
if (i == s->nb_streams) {
static const enum AVMediaType stream_types[] = {AVMEDIA_TYPE_VIDEO, AVMEDIA_TYPE_AUDIO, AVMEDIA_TYPE_DATA};
av_log(s, AV_LOG_WARNING, "Stream discovered after head already parsed\n");
st = create_stream(s, stream_types[stream_type]);
if (!st)
return AVERROR(ENOMEM);
 
}
av_dlog(s, "%d %X %d \n", stream_type, flags, st->discard);
if ( (st->discard >= AVDISCARD_NONKEY && !((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY || (stream_type == FLV_STREAM_TYPE_AUDIO)))
||(st->discard >= AVDISCARD_BIDIR && ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_DISP_INTER && (stream_type == FLV_STREAM_TYPE_VIDEO)))
|| st->discard >= AVDISCARD_ALL
) {
avio_seek(s->pb, next, SEEK_SET);
continue;
}
if ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY || stream_type == FLV_STREAM_TYPE_AUDIO)
av_add_index_entry(st, pos, dts, size, 0, AVINDEX_KEYFRAME);
break;
}
 
// if not streamed and no duration from metadata then seek to end to find
// the duration from the timestamps
if (s->pb->seekable && (!s->duration || s->duration == AV_NOPTS_VALUE) && !flv->searched_for_end) {
int size;
const int64_t pos = avio_tell(s->pb);
int64_t fsize = avio_size(s->pb);
retry_duration:
avio_seek(s->pb, fsize - 4, SEEK_SET);
size = avio_rb32(s->pb);
avio_seek(s->pb, fsize - 3 - size, SEEK_SET);
if (size == avio_rb24(s->pb) + 11) {
uint32_t ts = avio_rb24(s->pb);
ts |= avio_r8(s->pb) << 24;
if (ts)
s->duration = ts * (int64_t)AV_TIME_BASE / 1000;
else if (fsize >= 8 && fsize - 8 >= size) {
fsize -= size+4;
goto retry_duration;
}
}
 
avio_seek(s->pb, pos, SEEK_SET);
flv->searched_for_end = 1;
}
 
if (stream_type == FLV_STREAM_TYPE_AUDIO) {
int bits_per_coded_sample;
channels = (flags & FLV_AUDIO_CHANNEL_MASK) == FLV_STEREO ? 2 : 1;
sample_rate = 44100 << ((flags & FLV_AUDIO_SAMPLERATE_MASK) >>
FLV_AUDIO_SAMPLERATE_OFFSET) >> 3;
bits_per_coded_sample = (flags & FLV_AUDIO_SAMPLESIZE_MASK) ? 16 : 8;
if (!st->codec->channels || !st->codec->sample_rate ||
!st->codec->bits_per_coded_sample) {
st->codec->channels = channels;
st->codec->channel_layout = channels == 1
? AV_CH_LAYOUT_MONO
: AV_CH_LAYOUT_STEREO;
st->codec->sample_rate = sample_rate;
st->codec->bits_per_coded_sample = bits_per_coded_sample;
}
if (!st->codec->codec_id) {
flv_set_audio_codec(s, st, st->codec,
flags & FLV_AUDIO_CODECID_MASK);
flv->last_sample_rate =
sample_rate = st->codec->sample_rate;
flv->last_channels =
channels = st->codec->channels;
} else {
AVCodecContext ctx;
ctx.sample_rate = sample_rate;
flv_set_audio_codec(s, st, &ctx, flags & FLV_AUDIO_CODECID_MASK);
sample_rate = ctx.sample_rate;
}
} else if (stream_type == FLV_STREAM_TYPE_VIDEO) {
size -= flv_set_video_codec(s, st, flags & FLV_VIDEO_CODECID_MASK, 1);
}
 
if (st->codec->codec_id == AV_CODEC_ID_AAC ||
st->codec->codec_id == AV_CODEC_ID_H264 ||
st->codec->codec_id == AV_CODEC_ID_MPEG4) {
int type = avio_r8(s->pb);
size--;
if (st->codec->codec_id == AV_CODEC_ID_H264 || st->codec->codec_id == AV_CODEC_ID_MPEG4) {
// sign extension
int32_t cts = (avio_rb24(s->pb) + 0xff800000) ^ 0xff800000;
pts = dts + cts;
if (cts < 0) { // dts are wrong
flv->wrong_dts = 1;
av_log(s, AV_LOG_WARNING,
"negative cts, previous timestamps might be wrong\n");
}
if (flv->wrong_dts)
dts = AV_NOPTS_VALUE;
}
if (type == 0 && (!st->codec->extradata || st->codec->codec_id == AV_CODEC_ID_AAC)) {
if (st->codec->extradata) {
if ((ret = flv_queue_extradata(flv, s->pb, stream_type, size)) < 0)
return ret;
ret = AVERROR(EAGAIN);
goto leave;
}
if ((ret = flv_get_extradata(s, st, size)) < 0)
return ret;
if (st->codec->codec_id == AV_CODEC_ID_AAC && 0) {
MPEG4AudioConfig cfg;
if (avpriv_mpeg4audio_get_config(&cfg, st->codec->extradata,
st->codec->extradata_size * 8, 1) >= 0) {
st->codec->channels = cfg.channels;
st->codec->channel_layout = 0;
if (cfg.ext_sample_rate)
st->codec->sample_rate = cfg.ext_sample_rate;
else
st->codec->sample_rate = cfg.sample_rate;
av_dlog(s, "mp4a config channels %d sample rate %d\n",
st->codec->channels, st->codec->sample_rate);
}
}
 
ret = AVERROR(EAGAIN);
goto leave;
}
}
 
/* skip empty data packets */
if (!size) {
ret = AVERROR(EAGAIN);
goto leave;
}
 
ret = av_get_packet(s->pb, pkt, size);
if (ret < 0)
return ret;
pkt->dts = dts;
pkt->pts = pts == AV_NOPTS_VALUE ? dts : pts;
pkt->stream_index = st->index;
if (flv->new_extradata[stream_type]) {
uint8_t *side = av_packet_new_side_data(pkt, AV_PKT_DATA_NEW_EXTRADATA,
flv->new_extradata_size[stream_type]);
if (side) {
memcpy(side, flv->new_extradata[stream_type],
flv->new_extradata_size[stream_type]);
av_freep(&flv->new_extradata[stream_type]);
flv->new_extradata_size[stream_type] = 0;
}
}
if (stream_type == FLV_STREAM_TYPE_AUDIO &&
(sample_rate != flv->last_sample_rate ||
channels != flv->last_channels)) {
flv->last_sample_rate = sample_rate;
flv->last_channels = channels;
ff_add_param_change(pkt, channels, 0, sample_rate, 0, 0);
}
 
if ( stream_type == FLV_STREAM_TYPE_AUDIO ||
((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY) ||
stream_type == FLV_STREAM_TYPE_DATA)
pkt->flags |= AV_PKT_FLAG_KEY;
 
leave:
avio_skip(s->pb, 4);
return ret;
}
 
static int flv_read_seek(AVFormatContext *s, int stream_index,
int64_t ts, int flags)
{
FLVContext *flv = s->priv_data;
flv->validate_count = 0;
return avio_seek_time(s->pb, stream_index, ts, flags);
}
 
#define OFFSET(x) offsetof(FLVContext, x)
#define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "flv_metadata", "Allocate streams according to the onMetaData array", OFFSET(trust_metadata), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VD },
{ NULL }
};
 
static const AVClass flv_class = {
.class_name = "flvdec",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVInputFormat ff_flv_demuxer = {
.name = "flv",
.long_name = NULL_IF_CONFIG_SMALL("FLV (Flash Video)"),
.priv_data_size = sizeof(FLVContext),
.read_probe = flv_probe,
.read_header = flv_read_header,
.read_packet = flv_read_packet,
.read_seek = flv_read_seek,
.read_close = flv_read_close,
.extensions = "flv",
.priv_class = &flv_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/flvenc.c
0,0 → 1,604
/*
* FLV muxer
* Copyright (c) 2003 The FFmpeg Project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intreadwrite.h"
#include "libavutil/dict.h"
#include "libavutil/intfloat.h"
#include "libavutil/avassert.h"
#include "avc.h"
#include "avformat.h"
#include "flv.h"
#include "internal.h"
#include "metadata.h"
 
 
static const AVCodecTag flv_video_codec_ids[] = {
{ AV_CODEC_ID_FLV1, FLV_CODECID_H263 },
{ AV_CODEC_ID_H263, FLV_CODECID_REALH263 },
{ AV_CODEC_ID_MPEG4, FLV_CODECID_MPEG4 },
{ AV_CODEC_ID_FLASHSV, FLV_CODECID_SCREEN },
{ AV_CODEC_ID_FLASHSV2, FLV_CODECID_SCREEN2 },
{ AV_CODEC_ID_VP6F, FLV_CODECID_VP6 },
{ AV_CODEC_ID_VP6A, FLV_CODECID_VP6A },
{ AV_CODEC_ID_H264, FLV_CODECID_H264 },
{ AV_CODEC_ID_NONE, 0 }
};
 
static const AVCodecTag flv_audio_codec_ids[] = {
{ AV_CODEC_ID_MP3, FLV_CODECID_MP3 >> FLV_AUDIO_CODECID_OFFSET },
{ AV_CODEC_ID_PCM_U8, FLV_CODECID_PCM >> FLV_AUDIO_CODECID_OFFSET },
{ AV_CODEC_ID_PCM_S16BE, FLV_CODECID_PCM >> FLV_AUDIO_CODECID_OFFSET },
{ AV_CODEC_ID_PCM_S16LE, FLV_CODECID_PCM_LE >> FLV_AUDIO_CODECID_OFFSET },
{ AV_CODEC_ID_ADPCM_SWF, FLV_CODECID_ADPCM >> FLV_AUDIO_CODECID_OFFSET },
{ AV_CODEC_ID_AAC, FLV_CODECID_AAC >> FLV_AUDIO_CODECID_OFFSET },
{ AV_CODEC_ID_NELLYMOSER, FLV_CODECID_NELLYMOSER >> FLV_AUDIO_CODECID_OFFSET },
{ AV_CODEC_ID_PCM_MULAW, FLV_CODECID_PCM_MULAW >> FLV_AUDIO_CODECID_OFFSET },
{ AV_CODEC_ID_PCM_ALAW, FLV_CODECID_PCM_ALAW >> FLV_AUDIO_CODECID_OFFSET },
{ AV_CODEC_ID_SPEEX, FLV_CODECID_SPEEX >> FLV_AUDIO_CODECID_OFFSET },
{ AV_CODEC_ID_NONE, 0 }
};
 
typedef struct FLVContext {
int reserved;
int64_t duration_offset;
int64_t filesize_offset;
int64_t duration;
int64_t delay; ///< first dts delay (needed for AVC & Speex)
} FLVContext;
 
typedef struct FLVStreamContext {
int64_t last_ts; ///< last timestamp for each stream
} FLVStreamContext;
 
static int get_audio_flags(AVFormatContext *s, AVCodecContext *enc)
{
int flags = (enc->bits_per_coded_sample == 16) ? FLV_SAMPLESSIZE_16BIT
: FLV_SAMPLESSIZE_8BIT;
 
if (enc->codec_id == AV_CODEC_ID_AAC) // specs force these parameters
return FLV_CODECID_AAC | FLV_SAMPLERATE_44100HZ |
FLV_SAMPLESSIZE_16BIT | FLV_STEREO;
else if (enc->codec_id == AV_CODEC_ID_SPEEX) {
if (enc->sample_rate != 16000) {
av_log(s, AV_LOG_ERROR,
"FLV only supports wideband (16kHz) Speex audio\n");
return AVERROR(EINVAL);
}
if (enc->channels != 1) {
av_log(s, AV_LOG_ERROR, "FLV only supports mono Speex audio\n");
return AVERROR(EINVAL);
}
return FLV_CODECID_SPEEX | FLV_SAMPLERATE_11025HZ | FLV_SAMPLESSIZE_16BIT;
} else {
switch (enc->sample_rate) {
case 44100:
flags |= FLV_SAMPLERATE_44100HZ;
break;
case 22050:
flags |= FLV_SAMPLERATE_22050HZ;
break;
case 11025:
flags |= FLV_SAMPLERATE_11025HZ;
break;
case 16000: // nellymoser only
case 8000: // nellymoser only
case 5512: // not MP3
if (enc->codec_id != AV_CODEC_ID_MP3) {
flags |= FLV_SAMPLERATE_SPECIAL;
break;
}
default:
av_log(s, AV_LOG_ERROR,
"FLV does not support sample rate %d, "
"choose from (44100, 22050, 11025)\n", enc->sample_rate);
return AVERROR(EINVAL);
}
}
 
if (enc->channels > 1)
flags |= FLV_STEREO;
 
switch (enc->codec_id) {
case AV_CODEC_ID_MP3:
flags |= FLV_CODECID_MP3 | FLV_SAMPLESSIZE_16BIT;
break;
case AV_CODEC_ID_PCM_U8:
flags |= FLV_CODECID_PCM | FLV_SAMPLESSIZE_8BIT;
break;
case AV_CODEC_ID_PCM_S16BE:
flags |= FLV_CODECID_PCM | FLV_SAMPLESSIZE_16BIT;
break;
case AV_CODEC_ID_PCM_S16LE:
flags |= FLV_CODECID_PCM_LE | FLV_SAMPLESSIZE_16BIT;
break;
case AV_CODEC_ID_ADPCM_SWF:
flags |= FLV_CODECID_ADPCM | FLV_SAMPLESSIZE_16BIT;
break;
case AV_CODEC_ID_NELLYMOSER:
if (enc->sample_rate == 8000)
flags |= FLV_CODECID_NELLYMOSER_8KHZ_MONO | FLV_SAMPLESSIZE_16BIT;
else if (enc->sample_rate == 16000)
flags |= FLV_CODECID_NELLYMOSER_16KHZ_MONO | FLV_SAMPLESSIZE_16BIT;
else
flags |= FLV_CODECID_NELLYMOSER | FLV_SAMPLESSIZE_16BIT;
break;
case AV_CODEC_ID_PCM_MULAW:
flags = FLV_CODECID_PCM_MULAW | FLV_SAMPLERATE_SPECIAL | FLV_SAMPLESSIZE_16BIT;
break;
case AV_CODEC_ID_PCM_ALAW:
flags = FLV_CODECID_PCM_ALAW | FLV_SAMPLERATE_SPECIAL | FLV_SAMPLESSIZE_16BIT;
break;
case 0:
flags |= enc->codec_tag << 4;
break;
default:
av_log(s, AV_LOG_ERROR, "Audio codec '%s' not compatible with FLV\n",
avcodec_get_name(enc->codec_id));
return AVERROR(EINVAL);
}
 
return flags;
}
 
static void put_amf_string(AVIOContext *pb, const char *str)
{
size_t len = strlen(str);
avio_wb16(pb, len);
avio_write(pb, str, len);
}
 
static void put_avc_eos_tag(AVIOContext *pb, unsigned ts)
{
avio_w8(pb, FLV_TAG_TYPE_VIDEO);
avio_wb24(pb, 5); /* Tag Data Size */
avio_wb24(pb, ts); /* lower 24 bits of timestamp in ms */
avio_w8(pb, (ts >> 24) & 0x7F); /* MSB of ts in ms */
avio_wb24(pb, 0); /* StreamId = 0 */
avio_w8(pb, 23); /* ub[4] FrameType = 1, ub[4] CodecId = 7 */
avio_w8(pb, 2); /* AVC end of sequence */
avio_wb24(pb, 0); /* Always 0 for AVC EOS. */
avio_wb32(pb, 16); /* Size of FLV tag */
}
 
static void put_amf_double(AVIOContext *pb, double d)
{
avio_w8(pb, AMF_DATA_TYPE_NUMBER);
avio_wb64(pb, av_double2int(d));
}
 
static void put_amf_bool(AVIOContext *pb, int b)
{
avio_w8(pb, AMF_DATA_TYPE_BOOL);
avio_w8(pb, !!b);
}
 
static int flv_write_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
FLVContext *flv = s->priv_data;
AVCodecContext *audio_enc = NULL, *video_enc = NULL, *data_enc = NULL;
int i, metadata_count = 0;
double framerate = 0.0;
int64_t metadata_size_pos, data_size, metadata_count_pos;
AVDictionaryEntry *tag = NULL;
 
for (i = 0; i < s->nb_streams; i++) {
AVCodecContext *enc = s->streams[i]->codec;
FLVStreamContext *sc;
switch (enc->codec_type) {
case AVMEDIA_TYPE_VIDEO:
if (s->streams[i]->avg_frame_rate.den &&
s->streams[i]->avg_frame_rate.num) {
framerate = av_q2d(s->streams[i]->avg_frame_rate);
} else {
framerate = 1 / av_q2d(s->streams[i]->codec->time_base);
}
if (video_enc) {
av_log(s, AV_LOG_ERROR,
"at most one video stream is supported in flv\n");
return AVERROR(EINVAL);
}
video_enc = enc;
if (enc->codec_tag == 0) {
av_log(s, AV_LOG_ERROR, "Video codec '%s' for stream %d is not compatible with FLV\n",
avcodec_get_name(enc->codec_id), i);
return AVERROR(EINVAL);
}
break;
case AVMEDIA_TYPE_AUDIO:
if (audio_enc) {
av_log(s, AV_LOG_ERROR,
"at most one audio stream is supported in flv\n");
return AVERROR(EINVAL);
}
audio_enc = enc;
if (get_audio_flags(s, enc) < 0)
return AVERROR_INVALIDDATA;
break;
case AVMEDIA_TYPE_DATA:
if (enc->codec_id != AV_CODEC_ID_TEXT) {
av_log(s, AV_LOG_ERROR, "Data codec '%s' for stream %d is not compatible with FLV\n",
avcodec_get_name(enc->codec_id), i);
return AVERROR_INVALIDDATA;
}
data_enc = enc;
break;
default:
av_log(s, AV_LOG_ERROR, "Codec type '%s' for stream %d is not compatible with FLV\n",
av_get_media_type_string(enc->codec_type), i);
return AVERROR(EINVAL);
}
avpriv_set_pts_info(s->streams[i], 32, 1, 1000); /* 32 bit pts in ms */
 
sc = av_mallocz(sizeof(FLVStreamContext));
if (!sc)
return AVERROR(ENOMEM);
s->streams[i]->priv_data = sc;
sc->last_ts = -1;
}
 
flv->delay = AV_NOPTS_VALUE;
 
avio_write(pb, "FLV", 3);
avio_w8(pb, 1);
avio_w8(pb, FLV_HEADER_FLAG_HASAUDIO * !!audio_enc +
FLV_HEADER_FLAG_HASVIDEO * !!video_enc);
avio_wb32(pb, 9);
avio_wb32(pb, 0);
 
for (i = 0; i < s->nb_streams; i++)
if (s->streams[i]->codec->codec_tag == 5) {
avio_w8(pb, 8); // message type
avio_wb24(pb, 0); // include flags
avio_wb24(pb, 0); // time stamp
avio_wb32(pb, 0); // reserved
avio_wb32(pb, 11); // size
flv->reserved = 5;
}
 
/* write meta_tag */
avio_w8(pb, 18); // tag type META
metadata_size_pos = avio_tell(pb);
avio_wb24(pb, 0); // size of data part (sum of all parts below)
avio_wb24(pb, 0); // timestamp
avio_wb32(pb, 0); // reserved
 
/* now data of data_size size */
 
/* first event name as a string */
avio_w8(pb, AMF_DATA_TYPE_STRING);
put_amf_string(pb, "onMetaData"); // 12 bytes
 
/* mixed array (hash) with size and string/type/data tuples */
avio_w8(pb, AMF_DATA_TYPE_MIXEDARRAY);
metadata_count_pos = avio_tell(pb);
metadata_count = 5 * !!video_enc +
5 * !!audio_enc +
1 * !!data_enc +
2; // +2 for duration and file size
 
avio_wb32(pb, metadata_count);
 
put_amf_string(pb, "duration");
flv->duration_offset= avio_tell(pb);
 
// fill in the guessed duration, it'll be corrected later if incorrect
put_amf_double(pb, s->duration / AV_TIME_BASE);
 
if (video_enc) {
put_amf_string(pb, "width");
put_amf_double(pb, video_enc->width);
 
put_amf_string(pb, "height");
put_amf_double(pb, video_enc->height);
 
put_amf_string(pb, "videodatarate");
put_amf_double(pb, video_enc->bit_rate / 1024.0);
 
put_amf_string(pb, "framerate");
put_amf_double(pb, framerate);
 
put_amf_string(pb, "videocodecid");
put_amf_double(pb, video_enc->codec_tag);
}
 
if (audio_enc) {
put_amf_string(pb, "audiodatarate");
put_amf_double(pb, audio_enc->bit_rate / 1024.0);
 
put_amf_string(pb, "audiosamplerate");
put_amf_double(pb, audio_enc->sample_rate);
 
put_amf_string(pb, "audiosamplesize");
put_amf_double(pb, audio_enc->codec_id == AV_CODEC_ID_PCM_U8 ? 8 : 16);
 
put_amf_string(pb, "stereo");
put_amf_bool(pb, audio_enc->channels == 2);
 
put_amf_string(pb, "audiocodecid");
put_amf_double(pb, audio_enc->codec_tag);
}
 
if (data_enc) {
put_amf_string(pb, "datastream");
put_amf_double(pb, 0.0);
}
 
while ((tag = av_dict_get(s->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) {
if( !strcmp(tag->key, "width")
||!strcmp(tag->key, "height")
||!strcmp(tag->key, "videodatarate")
||!strcmp(tag->key, "framerate")
||!strcmp(tag->key, "videocodecid")
||!strcmp(tag->key, "audiodatarate")
||!strcmp(tag->key, "audiosamplerate")
||!strcmp(tag->key, "audiosamplesize")
||!strcmp(tag->key, "stereo")
||!strcmp(tag->key, "audiocodecid")
||!strcmp(tag->key, "duration")
||!strcmp(tag->key, "onMetaData")
){
av_log(s, AV_LOG_DEBUG, "Ignoring metadata for %s\n", tag->key);
continue;
}
put_amf_string(pb, tag->key);
avio_w8(pb, AMF_DATA_TYPE_STRING);
put_amf_string(pb, tag->value);
metadata_count++;
}
 
put_amf_string(pb, "filesize");
flv->filesize_offset = avio_tell(pb);
put_amf_double(pb, 0); // delayed write
 
put_amf_string(pb, "");
avio_w8(pb, AMF_END_OF_OBJECT);
 
/* write total size of tag */
data_size = avio_tell(pb) - metadata_size_pos - 10;
 
avio_seek(pb, metadata_count_pos, SEEK_SET);
avio_wb32(pb, metadata_count);
 
avio_seek(pb, metadata_size_pos, SEEK_SET);
avio_wb24(pb, data_size);
avio_skip(pb, data_size + 10 - 3);
avio_wb32(pb, data_size + 11);
 
for (i = 0; i < s->nb_streams; i++) {
AVCodecContext *enc = s->streams[i]->codec;
if (enc->codec_id == AV_CODEC_ID_AAC || enc->codec_id == AV_CODEC_ID_H264 || enc->codec_id == AV_CODEC_ID_MPEG4) {
int64_t pos;
avio_w8(pb, enc->codec_type == AVMEDIA_TYPE_VIDEO ?
FLV_TAG_TYPE_VIDEO : FLV_TAG_TYPE_AUDIO);
avio_wb24(pb, 0); // size patched later
avio_wb24(pb, 0); // ts
avio_w8(pb, 0); // ts ext
avio_wb24(pb, 0); // streamid
pos = avio_tell(pb);
if (enc->codec_id == AV_CODEC_ID_AAC) {
avio_w8(pb, get_audio_flags(s, enc));
avio_w8(pb, 0); // AAC sequence header
avio_write(pb, enc->extradata, enc->extradata_size);
} else {
avio_w8(pb, enc->codec_tag | FLV_FRAME_KEY); // flags
avio_w8(pb, 0); // AVC sequence header
avio_wb24(pb, 0); // composition time
ff_isom_write_avcc(pb, enc->extradata, enc->extradata_size);
}
data_size = avio_tell(pb) - pos;
avio_seek(pb, -data_size - 10, SEEK_CUR);
avio_wb24(pb, data_size);
avio_skip(pb, data_size + 10 - 3);
avio_wb32(pb, data_size + 11); // previous tag size
}
}
 
return 0;
}
 
static int flv_write_trailer(AVFormatContext *s)
{
int64_t file_size;
 
AVIOContext *pb = s->pb;
FLVContext *flv = s->priv_data;
int i;
 
/* Add EOS tag */
for (i = 0; i < s->nb_streams; i++) {
AVCodecContext *enc = s->streams[i]->codec;
FLVStreamContext *sc = s->streams[i]->priv_data;
if (enc->codec_type == AVMEDIA_TYPE_VIDEO &&
(enc->codec_id == AV_CODEC_ID_H264 || enc->codec_id == AV_CODEC_ID_MPEG4))
put_avc_eos_tag(pb, sc->last_ts);
}
 
file_size = avio_tell(pb);
 
/* update information */
if (avio_seek(pb, flv->duration_offset, SEEK_SET) < 0)
av_log(s, AV_LOG_WARNING, "Failed to update header with correct duration.\n");
else
put_amf_double(pb, flv->duration / (double)1000);
if (avio_seek(pb, flv->filesize_offset, SEEK_SET) < 0)
av_log(s, AV_LOG_WARNING, "Failed to update header with correct filesize.\n");
else
put_amf_double(pb, file_size);
 
avio_seek(pb, file_size, SEEK_SET);
return 0;
}
 
static int flv_write_packet(AVFormatContext *s, AVPacket *pkt)
{
AVIOContext *pb = s->pb;
AVCodecContext *enc = s->streams[pkt->stream_index]->codec;
FLVContext *flv = s->priv_data;
FLVStreamContext *sc = s->streams[pkt->stream_index]->priv_data;
unsigned ts;
int size = pkt->size;
uint8_t *data = NULL;
int flags = -1, flags_size, ret;
 
if (enc->codec_id == AV_CODEC_ID_VP6F || enc->codec_id == AV_CODEC_ID_VP6A ||
enc->codec_id == AV_CODEC_ID_AAC)
flags_size = 2;
else if (enc->codec_id == AV_CODEC_ID_H264 || enc->codec_id == AV_CODEC_ID_MPEG4)
flags_size = 5;
else
flags_size = 1;
 
switch (enc->codec_type) {
case AVMEDIA_TYPE_VIDEO:
avio_w8(pb, FLV_TAG_TYPE_VIDEO);
 
flags = enc->codec_tag;
if (flags == 0) {
av_log(s, AV_LOG_ERROR,
"Video codec '%s' is not compatible with FLV\n",
avcodec_get_name(enc->codec_id));
return AVERROR(EINVAL);
}
 
flags |= pkt->flags & AV_PKT_FLAG_KEY ? FLV_FRAME_KEY : FLV_FRAME_INTER;
break;
case AVMEDIA_TYPE_AUDIO:
flags = get_audio_flags(s, enc);
 
av_assert0(size);
 
avio_w8(pb, FLV_TAG_TYPE_AUDIO);
break;
case AVMEDIA_TYPE_DATA:
avio_w8(pb, FLV_TAG_TYPE_META);
break;
default:
return AVERROR(EINVAL);
}
 
if (enc->codec_id == AV_CODEC_ID_H264 || enc->codec_id == AV_CODEC_ID_MPEG4) {
/* check if extradata looks like mp4 formated */
if (enc->extradata_size > 0 && *(uint8_t*)enc->extradata != 1)
if ((ret = ff_avc_parse_nal_units_buf(pkt->data, &data, &size)) < 0)
return ret;
} else if (enc->codec_id == AV_CODEC_ID_AAC && pkt->size > 2 &&
(AV_RB16(pkt->data) & 0xfff0) == 0xfff0) {
if (!s->streams[pkt->stream_index]->nb_frames) {
av_log(s, AV_LOG_ERROR, "Malformed AAC bitstream detected: "
"use audio bitstream filter 'aac_adtstoasc' to fix it "
"('-bsf:a aac_adtstoasc' option with ffmpeg)\n");
return AVERROR_INVALIDDATA;
}
av_log(s, AV_LOG_WARNING, "aac bitstream error\n");
}
 
if (flv->delay == AV_NOPTS_VALUE)
flv->delay = -pkt->dts;
 
if (pkt->dts < -flv->delay) {
av_log(s, AV_LOG_WARNING,
"Packets are not in the proper order with respect to DTS\n");
return AVERROR(EINVAL);
}
 
ts = pkt->dts + flv->delay; // add delay to force positive dts
 
/* check Speex packet duration */
if (enc->codec_id == AV_CODEC_ID_SPEEX && ts - sc->last_ts > 160)
av_log(s, AV_LOG_WARNING, "Warning: Speex stream has more than "
"8 frames per packet. Adobe Flash "
"Player cannot handle this!\n");
 
if (sc->last_ts < ts)
sc->last_ts = ts;
 
avio_wb24(pb, size + flags_size);
avio_wb24(pb, ts);
avio_w8(pb, (ts >> 24) & 0x7F); // timestamps are 32 bits _signed_
avio_wb24(pb, flv->reserved);
 
if (enc->codec_type == AVMEDIA_TYPE_DATA) {
int data_size;
int metadata_size_pos = avio_tell(pb);
avio_w8(pb, AMF_DATA_TYPE_STRING);
put_amf_string(pb, "onTextData");
avio_w8(pb, AMF_DATA_TYPE_MIXEDARRAY);
avio_wb32(pb, 2);
put_amf_string(pb, "type");
avio_w8(pb, AMF_DATA_TYPE_STRING);
put_amf_string(pb, "Text");
put_amf_string(pb, "text");
avio_w8(pb, AMF_DATA_TYPE_STRING);
put_amf_string(pb, pkt->data);
put_amf_string(pb, "");
avio_w8(pb, AMF_END_OF_OBJECT);
/* write total size of tag */
data_size = avio_tell(pb) - metadata_size_pos;
avio_seek(pb, metadata_size_pos - 10, SEEK_SET);
avio_wb24(pb, data_size);
avio_seek(pb, data_size + 10 - 3, SEEK_CUR);
avio_wb32(pb, data_size + 11);
} else {
av_assert1(flags>=0);
avio_w8(pb,flags);
if (enc->codec_id == AV_CODEC_ID_VP6F || enc->codec_id == AV_CODEC_ID_VP6A) {
if (enc->extradata_size)
avio_w8(pb, enc->extradata[0]);
else
avio_w8(pb, ((FFALIGN(enc->width, 16) - enc->width) << 4) |
(FFALIGN(enc->height, 16) - enc->height));
} else if (enc->codec_id == AV_CODEC_ID_AAC)
avio_w8(pb, 1); // AAC raw
else if (enc->codec_id == AV_CODEC_ID_H264 || enc->codec_id == AV_CODEC_ID_MPEG4) {
avio_w8(pb, 1); // AVC NALU
avio_wb24(pb, pkt->pts - pkt->dts);
}
 
avio_write(pb, data ? data : pkt->data, size);
 
avio_wb32(pb, size + flags_size + 11); // previous tag size
flv->duration = FFMAX(flv->duration,
pkt->pts + flv->delay + pkt->duration);
}
 
av_free(data);
 
return pb->error;
}
 
AVOutputFormat ff_flv_muxer = {
.name = "flv",
.long_name = NULL_IF_CONFIG_SMALL("FLV (Flash Video)"),
.mime_type = "video/x-flv",
.extensions = "flv",
.priv_data_size = sizeof(FLVContext),
.audio_codec = CONFIG_LIBMP3LAME ? AV_CODEC_ID_MP3 : AV_CODEC_ID_ADPCM_SWF,
.video_codec = AV_CODEC_ID_FLV1,
.write_header = flv_write_header,
.write_packet = flv_write_packet,
.write_trailer = flv_write_trailer,
.codec_tag = (const AVCodecTag* const []) {
flv_video_codec_ids, flv_audio_codec_ids, 0
},
.flags = AVFMT_GLOBALHEADER | AVFMT_VARIABLE_FPS |
AVFMT_TS_NONSTRICT,
};
/contrib/sdk/sources/ffmpeg/libavformat/format.c
0,0 → 1,184
/*
* Format register and lookup
* Copyright (c) 2000, 2001, 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "internal.h"
#include "libavutil/atomic.h"
#include "libavutil/avstring.h"
 
/**
* @file
* Format register and lookup
*/
/** head of registered input format linked list */
static AVInputFormat *first_iformat = NULL;
/** head of registered output format linked list */
static AVOutputFormat *first_oformat = NULL;
 
AVInputFormat *av_iformat_next(AVInputFormat *f)
{
if (f)
return f->next;
else
return first_iformat;
}
 
AVOutputFormat *av_oformat_next(AVOutputFormat *f)
{
if (f)
return f->next;
else
return first_oformat;
}
 
void av_register_input_format(AVInputFormat *format)
{
AVInputFormat **p = &first_iformat;
 
format->next = NULL;
while(avpriv_atomic_ptr_cas((void * volatile *)p, NULL, format))
p = &(*p)->next;
}
 
void av_register_output_format(AVOutputFormat *format)
{
AVOutputFormat **p = &first_oformat;
 
format->next = NULL;
while(avpriv_atomic_ptr_cas((void * volatile *)p, NULL, format))
p = &(*p)->next;
}
 
int av_match_ext(const char *filename, const char *extensions)
{
const char *ext, *p;
char ext1[32], *q;
 
if (!filename)
return 0;
 
ext = strrchr(filename, '.');
if (ext) {
ext++;
p = extensions;
for (;;) {
q = ext1;
while (*p != '\0' && *p != ',' && q - ext1 < sizeof(ext1) - 1)
*q++ = *p++;
*q = '\0';
if (!av_strcasecmp(ext1, ext))
return 1;
if (*p == '\0')
break;
p++;
}
}
return 0;
}
 
static int match_format(const char *name, const char *names)
{
const char *p;
int len, namelen;
 
if (!name || !names)
return 0;
 
namelen = strlen(name);
while ((p = strchr(names, ','))) {
len = FFMAX(p - names, namelen);
if (!av_strncasecmp(name, names, len))
return 1;
names = p + 1;
}
return !av_strcasecmp(name, names);
}
 
AVOutputFormat *av_guess_format(const char *short_name, const char *filename,
const char *mime_type)
{
AVOutputFormat *fmt = NULL, *fmt_found;
int score_max, score;
 
/* specific test for image sequences */
#if CONFIG_IMAGE2_MUXER
if (!short_name && filename &&
av_filename_number_test(filename) &&
ff_guess_image2_codec(filename) != AV_CODEC_ID_NONE) {
return av_guess_format("image2", NULL, NULL);
}
#endif
/* Find the proper file type. */
fmt_found = NULL;
score_max = 0;
while ((fmt = av_oformat_next(fmt))) {
score = 0;
if (fmt->name && short_name && match_format(short_name, fmt->name))
score += 100;
if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
score += 10;
if (filename && fmt->extensions &&
av_match_ext(filename, fmt->extensions)) {
score += 5;
}
if (score > score_max) {
score_max = score;
fmt_found = fmt;
}
}
return fmt_found;
}
 
enum AVCodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
const char *filename, const char *mime_type,
enum AVMediaType type)
{
if (!strcmp(fmt->name, "segment") || !strcmp(fmt->name, "ssegment")) {
fmt = av_guess_format(NULL, filename, NULL);
}
 
if (type == AVMEDIA_TYPE_VIDEO) {
enum AVCodecID codec_id = AV_CODEC_ID_NONE;
 
#if CONFIG_IMAGE2_MUXER
if (!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")) {
codec_id = ff_guess_image2_codec(filename);
}
#endif
if (codec_id == AV_CODEC_ID_NONE)
codec_id = fmt->video_codec;
return codec_id;
} else if (type == AVMEDIA_TYPE_AUDIO)
return fmt->audio_codec;
else if (type == AVMEDIA_TYPE_SUBTITLE)
return fmt->subtitle_codec;
else
return AV_CODEC_ID_NONE;
}
 
AVInputFormat *av_find_input_format(const char *short_name)
{
AVInputFormat *fmt = NULL;
while ((fmt = av_iformat_next(fmt)))
if (match_format(short_name, fmt->name))
return fmt;
return NULL;
}
/contrib/sdk/sources/ffmpeg/libavformat/framecrcenc.c
0,0 → 1,70
/*
* frame CRC encoder (for codec/format testing)
* Copyright (c) 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/adler32.h"
#include "libavutil/avstring.h"
#include "avformat.h"
#include "internal.h"
 
static int framecrc_write_packet(struct AVFormatContext *s, AVPacket *pkt)
{
uint32_t crc = av_adler32_update(0, pkt->data, pkt->size);
char buf[256];
 
snprintf(buf, sizeof(buf), "%d, %10"PRId64", %10"PRId64", %8d, %8d, 0x%08x",
pkt->stream_index, pkt->dts, pkt->pts, pkt->duration, pkt->size, crc);
if (pkt->flags != AV_PKT_FLAG_KEY)
av_strlcatf(buf, sizeof(buf), ", F=0x%0X", pkt->flags);
if (pkt->side_data_elems) {
int i, j;
av_strlcatf(buf, sizeof(buf), ", S=%d", pkt->side_data_elems);
 
for (i=0; i<pkt->side_data_elems; i++) {
uint32_t side_data_crc = 0;
if (HAVE_BIGENDIAN && AV_PKT_DATA_PALETTE == pkt->side_data[i].type) {
for (j=0; j<pkt->side_data[i].size; j++) {
side_data_crc = av_adler32_update(side_data_crc,
pkt->side_data[i].data + (j^3),
1);
}
} else {
side_data_crc = av_adler32_update(0,
pkt->side_data[i].data,
pkt->side_data[i].size);
}
av_strlcatf(buf, sizeof(buf), ", %8d, 0x%08x", pkt->side_data[i].size, side_data_crc);
}
}
av_strlcatf(buf, sizeof(buf), "\n");
avio_write(s->pb, buf, strlen(buf));
return 0;
}
 
AVOutputFormat ff_framecrc_muxer = {
.name = "framecrc",
.long_name = NULL_IF_CONFIG_SMALL("framecrc testing"),
.audio_codec = AV_CODEC_ID_PCM_S16LE,
.video_codec = AV_CODEC_ID_RAWVIDEO,
.write_header = ff_framehash_write_header,
.write_packet = framecrc_write_packet,
.flags = AVFMT_VARIABLE_FPS | AVFMT_TS_NONSTRICT |
AVFMT_TS_NEGATIVE,
};
/contrib/sdk/sources/ffmpeg/libavformat/framehash.c
0,0 → 1,36
/*
* Common functions for the frame{crc,md5} muxers
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "internal.h"
 
int ff_framehash_write_header(AVFormatContext *s)
{
int i;
 
if (s->nb_streams && !(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT))
avio_printf(s->pb, "#software: %s\n", LIBAVFORMAT_IDENT);
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
avpriv_set_pts_info(st, 64, st->codec->time_base.num, st->codec->time_base.den);
avio_printf(s->pb, "#tb %d: %d/%d\n", i, st->time_base.num, st->time_base.den);
avio_flush(s->pb);
}
return 0;
}
/contrib/sdk/sources/ffmpeg/libavformat/frmdec.c
0,0 → 1,110
/*
* Megalux Frame demuxer
* Copyright (c) 2010 Peter Ross <pross@xvid.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Megalux Frame demuxer
*/
 
#include "libavcodec/raw.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
 
static const PixelFormatTag frm_pix_fmt_tags[] = {
{ AV_PIX_FMT_RGB555, 1 },
{ AV_PIX_FMT_RGB0, 2 },
{ AV_PIX_FMT_RGB24, 3 },
{ AV_PIX_FMT_BGR0, 4 },
{ AV_PIX_FMT_BGRA, 5 },
{ AV_PIX_FMT_NONE, 0 },
};
 
typedef struct {
int count;
} FrmContext;
 
static int frm_read_probe(AVProbeData *p)
{
if (p->buf_size > 8 &&
p->buf[0] == 'F' && p->buf[1] == 'R' && p->buf[2] == 'M' &&
AV_RL16(&p->buf[4]) && AV_RL16(&p->buf[6]))
return AVPROBE_SCORE_MAX / 4;
return 0;
}
 
static int frm_read_header(AVFormatContext *avctx)
{
AVIOContext *pb = avctx->pb;
AVStream *st = avformat_new_stream(avctx, 0);
if (!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
avio_skip(pb, 3);
 
st->codec->pix_fmt = avpriv_find_pix_fmt(frm_pix_fmt_tags, avio_r8(pb));
if (!st->codec->pix_fmt)
return AVERROR_INVALIDDATA;
 
st->codec->codec_tag = 0;
st->codec->width = avio_rl16(pb);
st->codec->height = avio_rl16(pb);
return 0;
}
 
static int frm_read_packet(AVFormatContext *avctx, AVPacket *pkt)
{
FrmContext *s = avctx->priv_data;
AVCodecContext *stc = avctx->streams[0]->codec;
int packet_size, ret;
 
if (s->count)
return AVERROR_EOF;
 
packet_size = avpicture_get_size(stc->pix_fmt, stc->width, stc->height);
if (packet_size < 0)
return AVERROR_INVALIDDATA;
 
ret = av_get_packet(avctx->pb, pkt, packet_size);
if (ret < 0)
return ret;
 
if (stc->pix_fmt == AV_PIX_FMT_BGRA) {
int i;
for (i = 3; i + 1 <= pkt->size; i += 4)
pkt->data[i] = 0xFF - pkt->data[i];
}
 
pkt->stream_index = 0;
s->count++;
 
return 0;
}
 
AVInputFormat ff_frm_demuxer = {
.name = "frm",
.priv_data_size = sizeof(FrmContext),
.long_name = NULL_IF_CONFIG_SMALL("Megalux Frame"),
.read_probe = frm_read_probe,
.read_header = frm_read_header,
.read_packet = frm_read_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/ftp.c
0,0 → 1,721
/*
* Copyright (c) 2013 Lukasz Marek <lukasz.m.luki@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avstring.h"
#include "avformat.h"
#include "internal.h"
#include "url.h"
#include "libavutil/opt.h"
#include "libavutil/bprint.h"
 
#define CONTROL_BUFFER_SIZE 1024
#define CREDENTIALS_BUFFER_SIZE 128
 
typedef enum {
UNKNOWN,
READY,
DOWNLOADING,
UPLOADING,
DISCONNECTED
} FTPState;
 
typedef struct {
const AVClass *class;
URLContext *conn_control; /**< Control connection */
URLContext *conn_data; /**< Data connection, NULL when not connected */
uint8_t control_buffer[CONTROL_BUFFER_SIZE]; /**< Control connection buffer */
uint8_t *control_buf_ptr, *control_buf_end;
int server_data_port; /**< Data connection port opened by server, -1 on error. */
int server_control_port; /**< Control connection port, default is 21 */
char hostname[512]; /**< Server address. */
char credencials[CREDENTIALS_BUFFER_SIZE]; /**< Authentication data */
char path[MAX_URL_SIZE]; /**< Path to resource on server. */
int64_t filesize; /**< Size of file on server, -1 on error. */
int64_t position; /**< Current position, calculated. */
int rw_timeout; /**< Network timeout. */
const char *anonymous_password; /**< Password to be used for anonymous user. An email should be used. */
int write_seekable; /**< Control seekability, 0 = disable, 1 = enable. */
FTPState state; /**< State of data connection */
} FTPContext;
 
#define OFFSET(x) offsetof(FTPContext, x)
#define D AV_OPT_FLAG_DECODING_PARAM
#define E AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
{"timeout", "set timeout of socket I/O operations", OFFSET(rw_timeout), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, D|E },
{"ftp-write-seekable", "control seekability of connection during encoding", OFFSET(write_seekable), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, E },
{"ftp-anonymous-password", "password for anonymous login. E-mail address should be used.", OFFSET(anonymous_password), AV_OPT_TYPE_STRING, { 0 }, 0, 0, D|E },
{NULL}
};
 
static const AVClass ftp_context_class = {
.class_name = "ftp",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
static int ftp_getc(FTPContext *s)
{
int len;
if (s->control_buf_ptr >= s->control_buf_end) {
len = ffurl_read(s->conn_control, s->control_buffer, CONTROL_BUFFER_SIZE);
if (len < 0) {
return len;
} else if (!len) {
return -1;
} else {
s->control_buf_ptr = s->control_buffer;
s->control_buf_end = s->control_buffer + len;
}
}
return *s->control_buf_ptr++;
}
 
static int ftp_get_line(FTPContext *s, char *line, int line_size)
{
int ch;
char *q = line;
 
for (;;) {
ch = ftp_getc(s);
if (ch < 0) {
return ch;
}
if (ch == '\n') {
/* process line */
if (q > line && q[-1] == '\r')
q--;
*q = '\0';
return 0;
} else {
if ((q - line) < line_size - 1)
*q++ = ch;
}
}
}
 
/*
* This routine returns ftp server response code.
* Server may send more than one response for a certain command.
* First expected code is returned.
*/
static int ftp_status(FTPContext *s, char **line, const int response_codes[])
{
int err, i, dash = 0, result = 0, code_found = 0;
char buf[CONTROL_BUFFER_SIZE];
AVBPrint line_buffer;
 
if (line)
av_bprint_init(&line_buffer, 0, AV_BPRINT_SIZE_AUTOMATIC);
 
while (!code_found || dash) {
if ((err = ftp_get_line(s, buf, sizeof(buf))) < 0) {
if (line)
av_bprint_finalize(&line_buffer, NULL);
return err;
}
 
av_log(s, AV_LOG_DEBUG, "%s\n", buf);
 
if (strlen(buf) < 4)
continue;
 
err = 0;
for (i = 0; i < 3; ++i) {
if (buf[i] < '0' || buf[i] > '9')
continue;
err *= 10;
err += buf[i] - '0';
}
dash = !!(buf[3] == '-');
 
for (i = 0; response_codes[i]; ++i) {
if (err == response_codes[i]) {
if (line)
av_bprintf(&line_buffer, "%s", buf);
code_found = 1;
result = err;
break;
}
}
}
 
if (line)
av_bprint_finalize(&line_buffer, line);
return result;
}
 
static int ftp_send_command(FTPContext *s, const char *command,
const int response_codes[], char **response)
{
int err;
 
if ((err = ffurl_write(s->conn_control, command, strlen(command))) < 0)
return err;
if (!err)
return -1;
 
/* return status */
if (response_codes) {
return ftp_status(s, response, response_codes);
}
return 0;
}
 
static void ftp_close_data_connection(FTPContext *s)
{
ffurl_closep(&s->conn_data);
s->position = 0;
s->state = DISCONNECTED;
}
 
static void ftp_close_both_connections(FTPContext *s)
{
ffurl_closep(&s->conn_control);
ftp_close_data_connection(s);
}
 
static int ftp_auth(FTPContext *s)
{
const char *user = NULL, *pass = NULL;
char *end = NULL, buf[CONTROL_BUFFER_SIZE], credencials[CREDENTIALS_BUFFER_SIZE];
int err;
static const int user_codes[] = {331, 230, 500, 530, 0}; /* 500, 530 are incorrect codes */
static const int pass_codes[] = {230, 503, 530, 0}; /* 503, 530 are incorrect codes */
 
/* Authentication may be repeated, original string has to be saved */
av_strlcpy(credencials, s->credencials, sizeof(credencials));
 
user = av_strtok(credencials, ":", &end);
pass = av_strtok(end, ":", &end);
 
if (!user) {
user = "anonymous";
pass = s->anonymous_password ? s->anonymous_password : "nopassword";
}
 
snprintf(buf, sizeof(buf), "USER %s\r\n", user);
err = ftp_send_command(s, buf, user_codes, NULL);
if (err == 331) {
if (pass) {
snprintf(buf, sizeof(buf), "PASS %s\r\n", pass);
err = ftp_send_command(s, buf, pass_codes, NULL);
} else
return AVERROR(EACCES);
}
if (err != 230)
return AVERROR(EACCES);
 
return 0;
}
 
static int ftp_passive_mode(FTPContext *s)
{
char *res = NULL, *start = NULL, *end = NULL;
int i;
static const char *command = "PASV\r\n";
static const int pasv_codes[] = {227, 501, 0}; /* 501 is incorrect code */
 
if (ftp_send_command(s, command, pasv_codes, &res) != 227 || !res)
goto fail;
 
for (i = 0; res[i]; ++i) {
if (res[i] == '(') {
start = res + i + 1;
} else if (res[i] == ')') {
end = res + i;
break;
}
}
if (!start || !end)
goto fail;
 
*end = '\0';
/* skip ip */
if (!av_strtok(start, ",", &end)) goto fail;
if (!av_strtok(end, ",", &end)) goto fail;
if (!av_strtok(end, ",", &end)) goto fail;
if (!av_strtok(end, ",", &end)) goto fail;
 
/* parse port number */
start = av_strtok(end, ",", &end);
if (!start) goto fail;
s->server_data_port = atoi(start) * 256;
start = av_strtok(end, ",", &end);
if (!start) goto fail;
s->server_data_port += atoi(start);
av_dlog(s, "Server data port: %d\n", s->server_data_port);
 
av_free(res);
return 0;
 
fail:
av_free(res);
s->server_data_port = -1;
av_log(s, AV_LOG_ERROR, "Set passive mode failed\n"
"Your FTP server may use IPv6 which is not supported yet.\n");
return AVERROR(EIO);
}
 
static int ftp_current_dir(FTPContext *s)
{
char *res = NULL, *start = NULL, *end = NULL;
int i;
static const char *command = "PWD\r\n";
static const int pwd_codes[] = {257, 0};
 
if (ftp_send_command(s, command, pwd_codes, &res) != 257 || !res)
goto fail;
 
for (i = 0; res[i]; ++i) {
if (res[i] == '"') {
if (!start) {
start = res + i + 1;
continue;
}
end = res + i;
break;
}
}
 
if (!end)
goto fail;
 
if (end > res && end[-1] == '/') {
end[-1] = '\0';
} else
*end = '\0';
av_strlcpy(s->path, start, sizeof(s->path));
 
av_free(res);
return 0;
 
fail:
av_free(res);
return AVERROR(EIO);
}
 
static int ftp_file_size(FTPContext *s)
{
char command[CONTROL_BUFFER_SIZE];
char *res = NULL;
static const int size_codes[] = {213, 501, 550, 0}; /* 501, 550 are incorrect codes */
 
snprintf(command, sizeof(command), "SIZE %s\r\n", s->path);
if (ftp_send_command(s, command, size_codes, &res) == 213 && res) {
s->filesize = strtoll(&res[4], NULL, 10);
} else {
s->filesize = -1;
av_free(res);
return AVERROR(EIO);
}
 
av_free(res);
return 0;
}
 
static int ftp_retrieve(FTPContext *s)
{
char command[CONTROL_BUFFER_SIZE];
static const int retr_codes[] = {150, 550, 554, 0}; /* 550, 554 are incorrect codes */
 
snprintf(command, sizeof(command), "RETR %s\r\n", s->path);
if (ftp_send_command(s, command, retr_codes, NULL) != 150)
return AVERROR(EIO);
 
s->state = DOWNLOADING;
 
return 0;
}
 
static int ftp_store(FTPContext *s)
{
char command[CONTROL_BUFFER_SIZE];
static const int stor_codes[] = {150, 0};
 
snprintf(command, sizeof(command), "STOR %s\r\n", s->path);
if (ftp_send_command(s, command, stor_codes, NULL) != 150)
return AVERROR(EIO);
 
s->state = UPLOADING;
 
return 0;
}
 
static int ftp_type(FTPContext *s)
{
static const char *command = "TYPE I\r\n";
static const int type_codes[] = {200, 500, 504, 0}; /* 500, 504 are incorrect codes */
 
if (ftp_send_command(s, command, type_codes, NULL) != 200)
return AVERROR(EIO);
 
return 0;
}
 
static int ftp_restart(FTPContext *s, int64_t pos)
{
char command[CONTROL_BUFFER_SIZE];
static const int rest_codes[] = {350, 500, 501, 0}; /* 500, 501 are incorrect codes */
 
snprintf(command, sizeof(command), "REST %"PRId64"\r\n", pos);
if (ftp_send_command(s, command, rest_codes, NULL) != 350)
return AVERROR(EIO);
 
return 0;
}
 
static int ftp_connect_control_connection(URLContext *h)
{
char buf[CONTROL_BUFFER_SIZE], opts_format[20], *response = NULL;
int err;
AVDictionary *opts = NULL;
FTPContext *s = h->priv_data;
static const int connect_codes[] = {220, 0};
 
if (!s->conn_control) {
ff_url_join(buf, sizeof(buf), "tcp", NULL,
s->hostname, s->server_control_port, NULL);
if (s->rw_timeout != -1) {
snprintf(opts_format, sizeof(opts_format), "%d", s->rw_timeout);
av_dict_set(&opts, "timeout", opts_format, 0);
} /* if option is not given, don't pass it and let tcp use its own default */
err = ffurl_open(&s->conn_control, buf, AVIO_FLAG_READ_WRITE,
&h->interrupt_callback, &opts);
av_dict_free(&opts);
if (err < 0) {
av_log(h, AV_LOG_ERROR, "Cannot open control connection\n");
return err;
}
 
/* check if server is ready */
if (ftp_status(s, ((h->flags & AVIO_FLAG_WRITE) ? &response : NULL), connect_codes) != 220) {
av_log(h, AV_LOG_ERROR, "FTP server not ready for new users\n");
return AVERROR(EACCES);
}
 
if ((h->flags & AVIO_FLAG_WRITE) && av_stristr(response, "pure-ftpd")) {
av_log(h, AV_LOG_WARNING, "Pure-FTPd server is used as an output protocol. It is known issue this implementation may produce incorrect content and it cannot be fixed at this moment.");
}
av_free(response);
 
if ((err = ftp_auth(s)) < 0) {
av_log(h, AV_LOG_ERROR, "FTP authentication failed\n");
return err;
}
 
if ((err = ftp_type(s)) < 0) {
av_log(h, AV_LOG_ERROR, "Set content type failed\n");
return err;
}
}
return 0;
}
 
static int ftp_connect_data_connection(URLContext *h)
{
int err;
char buf[CONTROL_BUFFER_SIZE], opts_format[20];
AVDictionary *opts = NULL;
FTPContext *s = h->priv_data;
 
if (!s->conn_data) {
/* Enter passive mode */
if ((err = ftp_passive_mode(s)) < 0)
return err;
/* Open data connection */
ff_url_join(buf, sizeof(buf), "tcp", NULL, s->hostname, s->server_data_port, NULL);
if (s->rw_timeout != -1) {
snprintf(opts_format, sizeof(opts_format), "%d", s->rw_timeout);
av_dict_set(&opts, "timeout", opts_format, 0);
} /* if option is not given, don't pass it and let tcp use its own default */
err = ffurl_open(&s->conn_data, buf, h->flags,
&h->interrupt_callback, &opts);
av_dict_free(&opts);
if (err < 0)
return err;
 
if (s->position)
if ((err = ftp_restart(s, s->position)) < 0)
return err;
}
s->state = READY;
return 0;
}
 
static int ftp_abort(URLContext *h)
{
static const char *command = "ABOR\r\n";
int err;
static const int abor_codes[] = {225, 226, 0};
FTPContext *s = h->priv_data;
 
/* According to RCF 959:
"ABOR command tells the server to abort the previous FTP
service command and any associated transfer of data."
 
There are FTP server implementations that don't response
to any commands during data transfer in passive mode (including ABOR).
 
This implementation closes data connection by force.
*/
 
if (ftp_send_command(s, command, NULL, NULL) < 0) {
ftp_close_both_connections(s);
if ((err = ftp_connect_control_connection(h)) < 0) {
av_log(h, AV_LOG_ERROR, "Reconnect failed.\n");
return err;
}
} else {
ftp_close_data_connection(s);
if (ftp_status(s, NULL, abor_codes) < 225) {
/* wu-ftpd also closes control connection after data connection closing */
ffurl_closep(&s->conn_control);
if ((err = ftp_connect_control_connection(h)) < 0) {
av_log(h, AV_LOG_ERROR, "Reconnect failed.\n");
return err;
}
}
}
 
return 0;
}
 
static int ftp_open(URLContext *h, const char *url, int flags)
{
char proto[10], path[MAX_URL_SIZE];
int err;
FTPContext *s = h->priv_data;
 
av_dlog(h, "ftp protocol open\n");
 
s->state = DISCONNECTED;
s->filesize = -1;
s->position = 0;
 
av_url_split(proto, sizeof(proto),
s->credencials, sizeof(s->credencials),
s->hostname, sizeof(s->hostname),
&s->server_control_port,
path, sizeof(path),
url);
 
if (s->server_control_port < 0 || s->server_control_port > 65535)
s->server_control_port = 21;
 
if ((err = ftp_connect_control_connection(h)) < 0)
goto fail;
 
if ((err = ftp_current_dir(s)) < 0)
goto fail;
av_strlcat(s->path, path, sizeof(s->path));
 
if (ftp_restart(s, 0) < 0) {
h->is_streamed = 1;
} else {
if (ftp_file_size(s) < 0 && flags & AVIO_FLAG_READ)
h->is_streamed = 1;
if (s->write_seekable != 1 && flags & AVIO_FLAG_WRITE)
h->is_streamed = 1;
}
 
return 0;
 
fail:
av_log(h, AV_LOG_ERROR, "FTP open failed\n");
ffurl_closep(&s->conn_control);
ffurl_closep(&s->conn_data);
return err;
}
 
static int64_t ftp_seek(URLContext *h, int64_t pos, int whence)
{
FTPContext *s = h->priv_data;
int err;
int64_t new_pos, fake_pos;
 
av_dlog(h, "ftp protocol seek %"PRId64" %d\n", pos, whence);
 
switch(whence) {
case AVSEEK_SIZE:
return s->filesize;
case SEEK_SET:
new_pos = pos;
break;
case SEEK_CUR:
new_pos = s->position + pos;
break;
case SEEK_END:
if (s->filesize < 0)
return AVERROR(EIO);
new_pos = s->filesize + pos;
break;
default:
return AVERROR(EINVAL);
}
 
if (h->is_streamed)
return AVERROR(EIO);
 
/* XXX: Simulate behaviour of lseek in file protocol, which could be treated as a reference */
new_pos = FFMAX(0, new_pos);
fake_pos = s->filesize != -1 ? FFMIN(new_pos, s->filesize) : new_pos;
 
if (fake_pos != s->position) {
if ((err = ftp_abort(h)) < 0)
return err;
s->position = fake_pos;
}
return new_pos;
}
 
static int ftp_read(URLContext *h, unsigned char *buf, int size)
{
FTPContext *s = h->priv_data;
int read, err, retry_done = 0;
 
av_dlog(h, "ftp protocol read %d bytes\n", size);
retry:
if (s->state == DISCONNECTED) {
/* optimization */
if (s->position >= s->filesize)
return 0;
if ((err = ftp_connect_data_connection(h)) < 0)
return err;
}
if (s->state == READY) {
if (s->position >= s->filesize)
return 0;
if ((err = ftp_retrieve(s)) < 0)
return err;
}
if (s->conn_data && s->state == DOWNLOADING) {
read = ffurl_read(s->conn_data, buf, size);
if (read >= 0) {
s->position += read;
if (s->position >= s->filesize) {
/* server will terminate, but keep current position to avoid madness */
/* save position to restart from it */
int64_t pos = s->position;
if (ftp_abort(h) < 0) {
s->position = pos;
return AVERROR(EIO);
}
s->position = pos;
}
}
if (read <= 0 && s->position < s->filesize && !h->is_streamed) {
/* Server closed connection. Probably due to inactivity */
int64_t pos = s->position;
av_log(h, AV_LOG_INFO, "Reconnect to FTP server.\n");
if ((err = ftp_abort(h)) < 0)
return err;
if ((err = ftp_seek(h, pos, SEEK_SET)) < 0) {
av_log(h, AV_LOG_ERROR, "Position cannot be restored.\n");
return err;
}
if (!retry_done) {
retry_done = 1;
goto retry;
}
}
return read;
}
 
av_log(h, AV_LOG_DEBUG, "FTP read failed\n");
return AVERROR(EIO);
}
 
static int ftp_write(URLContext *h, const unsigned char *buf, int size)
{
int err;
FTPContext *s = h->priv_data;
int written;
 
av_dlog(h, "ftp protocol write %d bytes\n", size);
 
if (s->state == DISCONNECTED) {
if ((err = ftp_connect_data_connection(h)) < 0)
return err;
}
if (s->state == READY) {
if ((err = ftp_store(s)) < 0)
return err;
}
if (s->conn_data && s->state == UPLOADING) {
written = ffurl_write(s->conn_data, buf, size);
if (written > 0) {
s->position += written;
s->filesize = FFMAX(s->filesize, s->position);
}
return written;
}
 
av_log(h, AV_LOG_ERROR, "FTP write failed\n");
return AVERROR(EIO);
}
 
static int ftp_close(URLContext *h)
{
av_dlog(h, "ftp protocol close\n");
 
ftp_close_both_connections(h->priv_data);
 
return 0;
}
 
static int ftp_get_file_handle(URLContext *h)
{
FTPContext *s = h->priv_data;
 
av_dlog(h, "ftp protocol get_file_handle\n");
 
if (s->conn_data)
return ffurl_get_file_handle(s->conn_data);
 
return AVERROR(EIO);
}
 
static int ftp_shutdown(URLContext *h, int flags)
{
FTPContext *s = h->priv_data;
 
av_dlog(h, "ftp protocol shutdown\n");
 
if (s->conn_data)
return ffurl_shutdown(s->conn_data, flags);
 
return AVERROR(EIO);
}
 
URLProtocol ff_ftp_protocol = {
.name = "ftp",
.url_open = ftp_open,
.url_read = ftp_read,
.url_write = ftp_write,
.url_seek = ftp_seek,
.url_close = ftp_close,
.url_get_file_handle = ftp_get_file_handle,
.url_shutdown = ftp_shutdown,
.priv_data_size = sizeof(FTPContext),
.priv_data_class = &ftp_context_class,
.flags = URL_PROTOCOL_FLAG_NETWORK,
};
/contrib/sdk/sources/ffmpeg/libavformat/g722.c
0,0 → 1,57
/*
* g722 raw demuxer
* Copyright (c) 2010 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avassert.h"
#include "avformat.h"
#include "internal.h"
#include "rawdec.h"
 
static int g722_read_header(AVFormatContext *s)
{
AVStream *st;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_ADPCM_G722;
st->codec->sample_rate = 16000;
st->codec->channels = 1;
 
st->codec->bits_per_coded_sample =
av_get_bits_per_sample(st->codec->codec_id);
 
av_assert0(st->codec->bits_per_coded_sample > 0);
 
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
return 0;
}
 
AVInputFormat ff_g722_demuxer = {
.name = "g722",
.long_name = NULL_IF_CONFIG_SMALL("raw G.722"),
.read_header = g722_read_header,
.read_packet = ff_raw_read_partial_packet,
.flags = AVFMT_GENERIC_INDEX,
.extensions = "g722,722",
.raw_codec_id = AV_CODEC_ID_ADPCM_G722,
};
/contrib/sdk/sources/ffmpeg/libavformat/g723_1.c
0,0 → 1,86
/*
* G.723.1 demuxer
* Copyright (c) 2010 Mohamed Naufal Basheer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* G.723.1 demuxer
*/
 
#include "libavutil/attributes.h"
#include "libavutil/channel_layout.h"
#include "avformat.h"
#include "internal.h"
 
static const uint8_t frame_size[4] = { 24, 20, 4, 1 };
 
static av_cold int g723_1_init(AVFormatContext *s)
{
AVStream *st;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_G723_1;
st->codec->channel_layout = AV_CH_LAYOUT_MONO;
st->codec->channels = 1;
st->codec->sample_rate = 8000;
 
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
st->start_time = 0;
 
return 0;
}
 
static int g723_1_read_packet(AVFormatContext *s, AVPacket *pkt)
{
int size, byte, ret;
 
pkt->pos = avio_tell(s->pb);
byte = avio_r8(s->pb);
size = frame_size[byte & 3];
 
ret = av_new_packet(pkt, size);
if (ret < 0)
return ret;
 
pkt->data[0] = byte;
pkt->duration = 240;
pkt->stream_index = 0;
 
ret = avio_read(s->pb, pkt->data + 1, size - 1);
if (ret < size - 1) {
av_free_packet(pkt);
return ret < 0 ? ret : AVERROR_EOF;
}
 
return pkt->size;
}
 
AVInputFormat ff_g723_1_demuxer = {
.name = "g723_1",
.long_name = NULL_IF_CONFIG_SMALL("G.723.1"),
.read_header = g723_1_init,
.read_packet = g723_1_read_packet,
.extensions = "tco,rco,g723_1",
.flags = AVFMT_GENERIC_INDEX
};
/contrib/sdk/sources/ffmpeg/libavformat/g729dec.c
0,0 → 1,103
/*
* G.729 raw format demuxer
* Copyright (c) 2011 Vladimir Voroshilov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "internal.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
 
typedef struct G729DemuxerContext {
AVClass *class;
int bit_rate;
} G729DemuxerContext;
 
static int g729_read_header(AVFormatContext *s)
{
AVStream* st;
G729DemuxerContext *s1 = s->priv_data;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_G729;
st->codec->sample_rate = 8000;
st->codec->channels = 1;
 
if (s1 && s1->bit_rate) {
s->bit_rate = s1->bit_rate;
}
 
if (s->bit_rate == 0) {
av_log(s, AV_LOG_DEBUG, "No bitrate specified. Assuming 8000 b/s\n");
s->bit_rate = 8000;
}
 
if (s->bit_rate == 6400) {
st->codec->block_align = 8;
} else if (s->bit_rate == 8000) {
st->codec->block_align = 10;
} else {
av_log(s, AV_LOG_ERROR, "Only 8000 b/s and 6400 b/s bitrates are supported. Provided: %d b/s\n", s->bit_rate);
return AVERROR_INVALIDDATA;
}
 
avpriv_set_pts_info(st, st->codec->block_align << 3, 1, st->codec->sample_rate);
return 0;
}
static int g729_read_packet(AVFormatContext *s, AVPacket *pkt)
{
int ret;
 
ret = av_get_packet(s->pb, pkt, s->streams[0]->codec->block_align);
 
pkt->stream_index = 0;
if (ret < 0)
return ret;
 
pkt->dts = pkt->pts = pkt->pos / s->streams[0]->codec->block_align;
 
return ret;
}
 
static const AVOption g729_options[] = {
{ "bit_rate", "", offsetof(G729DemuxerContext, bit_rate), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
 
static const AVClass g729_demuxer_class = {
.class_name = "g729 demuxer",
.item_name = av_default_item_name,
.option = g729_options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVInputFormat ff_g729_demuxer = {
.name = "g729",
.long_name = NULL_IF_CONFIG_SMALL("G.729 raw format demuxer"),
.priv_data_size = sizeof(G729DemuxerContext),
.read_header = g729_read_header,
.read_packet = g729_read_packet,
.flags = AVFMT_GENERIC_INDEX,
.extensions = "g729",
.priv_class = &g729_demuxer_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/gif.c
0,0 → 1,222
/*
* Animated GIF muxer
* Copyright (c) 2000 Fabrice Bellard
*
* first version by Francois Revol <revol@free.fr>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "internal.h"
#include "libavutil/avassert.h"
#include "libavutil/imgutils.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
 
static int gif_image_write_header(AVIOContext *pb, int width, int height,
int loop_count, uint32_t *palette)
{
int i;
 
avio_write(pb, "GIF", 3);
avio_write(pb, "89a", 3);
avio_wl16(pb, width);
avio_wl16(pb, height);
 
if (palette) {
avio_w8(pb, 0xf7); /* flags: global clut, 256 entries */
avio_w8(pb, 0x1f); /* background color index */
avio_w8(pb, 0); /* aspect ratio */
for (i = 0; i < 256; i++) {
const uint32_t v = palette[i] & 0xffffff;
avio_wb24(pb, v);
}
} else {
avio_w8(pb, 0); /* flags */
avio_w8(pb, 0); /* background color index */
avio_w8(pb, 0); /* aspect ratio */
}
 
 
if (loop_count >= 0 ) {
/* "NETSCAPE EXTENSION" for looped animation GIF */
avio_w8(pb, 0x21); /* GIF Extension code */
avio_w8(pb, 0xff); /* Application Extension Label */
avio_w8(pb, 0x0b); /* Length of Application Block */
avio_write(pb, "NETSCAPE2.0", sizeof("NETSCAPE2.0") - 1);
avio_w8(pb, 0x03); /* Length of Data Sub-Block */
avio_w8(pb, 0x01);
avio_wl16(pb, (uint16_t)loop_count);
avio_w8(pb, 0x00); /* Data Sub-block Terminator */
}
 
return 0;
}
 
typedef struct {
AVClass *class;
int loop;
int last_delay;
AVPacket *prev_pkt;
int duration;
} GIFContext;
 
static int gif_write_header(AVFormatContext *s)
{
GIFContext *gif = s->priv_data;
AVIOContext *pb = s->pb;
AVCodecContext *video_enc;
int width, height;
uint32_t palette[AVPALETTE_COUNT];
 
if (s->nb_streams != 1 ||
s->streams[0]->codec->codec_type != AVMEDIA_TYPE_VIDEO ||
s->streams[0]->codec->codec_id != AV_CODEC_ID_GIF) {
av_log(s, AV_LOG_ERROR,
"GIF muxer supports only a single video GIF stream.\n");
return AVERROR(EINVAL);
}
 
video_enc = s->streams[0]->codec;
width = video_enc->width;
height = video_enc->height;
 
avpriv_set_pts_info(s->streams[0], 64, 1, 100);
if (avpriv_set_systematic_pal2(palette, video_enc->pix_fmt) < 0) {
av_assert0(video_enc->pix_fmt == AV_PIX_FMT_PAL8);
gif_image_write_header(pb, width, height, gif->loop, NULL);
} else {
gif_image_write_header(pb, width, height, gif->loop, palette);
}
 
avio_flush(s->pb);
return 0;
}
 
static int flush_packet(AVFormatContext *s, AVPacket *new)
{
GIFContext *gif = s->priv_data;
int size;
AVIOContext *pb = s->pb;
uint8_t flags = 0x4, transparent_color_index = 0x1f;
const uint32_t *palette;
AVPacket *pkt = gif->prev_pkt;
 
if (!pkt)
return 0;
 
/* Mark one colour as transparent if the input palette contains at least
* one colour that is more than 50% transparent. */
palette = (uint32_t*)av_packet_get_side_data(pkt, AV_PKT_DATA_PALETTE, &size);
if (palette && size != AVPALETTE_SIZE) {
av_log(s, AV_LOG_ERROR, "Invalid palette extradata\n");
return AVERROR_INVALIDDATA;
}
if (palette) {
unsigned i, smallest_alpha = 0xff;
 
for (i = 0; i < AVPALETTE_COUNT; i++) {
const uint32_t v = palette[i];
if (v >> 24 < smallest_alpha) {
smallest_alpha = v >> 24;
transparent_color_index = i;
}
}
if (smallest_alpha < 128)
flags |= 0x1; /* Transparent Color Flag */
}
 
if (new && new->pts != AV_NOPTS_VALUE)
gif->duration = av_clip_uint16(new->pts - gif->prev_pkt->pts);
else if (!new && gif->last_delay >= 0)
gif->duration = gif->last_delay;
 
/* graphic control extension block */
avio_w8(pb, 0x21);
avio_w8(pb, 0xf9);
avio_w8(pb, 0x04); /* block size */
avio_w8(pb, flags);
avio_wl16(pb, gif->duration);
avio_w8(pb, transparent_color_index);
avio_w8(pb, 0x00);
 
avio_write(pb, pkt->data, pkt->size);
 
av_free_packet(gif->prev_pkt);
if (new)
av_copy_packet(gif->prev_pkt, new);
 
return 0;
}
 
static int gif_write_packet(AVFormatContext *s, AVPacket *pkt)
{
GIFContext *gif = s->priv_data;
 
if (!gif->prev_pkt) {
gif->prev_pkt = av_malloc(sizeof(*gif->prev_pkt));
if (!gif->prev_pkt)
return AVERROR(ENOMEM);
return av_copy_packet(gif->prev_pkt, pkt);
}
return flush_packet(s, pkt);
}
 
static int gif_write_trailer(AVFormatContext *s)
{
GIFContext *gif = s->priv_data;
AVIOContext *pb = s->pb;
 
flush_packet(s, NULL);
av_freep(&gif->prev_pkt);
avio_w8(pb, 0x3b);
 
return 0;
}
 
#define OFFSET(x) offsetof(GIFContext, x)
#define ENC AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
{ "loop", "Number of times to loop the output: -1 - no loop, 0 - infinite loop", OFFSET(loop),
AV_OPT_TYPE_INT, { .i64 = 0 }, -1, 65535, ENC },
{ "final_delay", "Force delay (in ms) after the last frame", OFFSET(last_delay),
AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 65535, ENC },
{ NULL },
};
 
static const AVClass gif_muxer_class = {
.class_name = "GIF muxer",
.item_name = av_default_item_name,
.version = LIBAVUTIL_VERSION_INT,
.option = options,
};
 
AVOutputFormat ff_gif_muxer = {
.name = "gif",
.long_name = NULL_IF_CONFIG_SMALL("GIF Animation"),
.mime_type = "image/gif",
.extensions = "gif",
.priv_data_size = sizeof(GIFContext),
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_GIF,
.write_header = gif_write_header,
.write_packet = gif_write_packet,
.write_trailer = gif_write_trailer,
.priv_class = &gif_muxer_class,
.flags = AVFMT_VARIABLE_FPS,
};
/contrib/sdk/sources/ffmpeg/libavformat/gifdec.c
0,0 → 1,334
/*
* GIF demuxer
* Copyright (c) 2012 Vitaliy E Sugrobov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* GIF demuxer.
*/
 
#include "avformat.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/opt.h"
#include "internal.h"
#include "libavcodec/gif.h"
 
typedef struct GIFDemuxContext {
const AVClass *class;
/**
* Time span in hundredths of second before
* the next frame should be drawn on screen.
*/
int delay;
/**
* Minimum allowed delay between frames in hundredths of
* second. Values below this threshold considered to be
* invalid and set to value of default_delay.
*/
int min_delay;
int default_delay;
 
/**
* loop options
*/
int total_iter;
int iter_count;
int ignore_loop;
} GIFDemuxContext;
 
/**
* Major web browsers display gifs at ~10-15fps when rate
* is not explicitly set or have too low values. We assume default rate to be 10.
* Default delay = 100hundredths of second / 10fps = 10hos per frame.
*/
#define GIF_DEFAULT_DELAY 10
/**
* By default delay values less than this threshold considered to be invalid.
*/
#define GIF_MIN_DELAY 2
 
static int gif_probe(AVProbeData *p)
{
/* check magick */
if (memcmp(p->buf, gif87a_sig, 6) && memcmp(p->buf, gif89a_sig, 6))
return 0;
 
/* width or height contains zero? */
if (!AV_RL16(&p->buf[6]) || !AV_RL16(&p->buf[8]))
return 0;
 
return AVPROBE_SCORE_MAX;
}
 
static int resync(AVIOContext *pb)
{
int i;
for (i = 0; i < 6; i++) {
int b = avio_r8(pb);
if (b != gif87a_sig[i] && b != gif89a_sig[i])
i = -(b != 'G');
if (url_feof(pb))
return AVERROR_EOF;
}
return 0;
}
 
static int gif_read_header(AVFormatContext *s)
{
GIFDemuxContext *gdc = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *st;
int width, height, ret;
 
if ((ret = resync(pb)) < 0)
return ret;
 
gdc->delay = gdc->default_delay;
width = avio_rl16(pb);
height = avio_rl16(pb);
 
if (width == 0 || height == 0)
return AVERROR_INVALIDDATA;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
/* GIF format operates with time in "hundredths of second",
* therefore timebase is 1/100 */
avpriv_set_pts_info(st, 64, 1, 100);
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_GIF;
st->codec->width = width;
st->codec->height = height;
 
/* jump to start because gif decoder needs header data too */
if (avio_seek(pb, 0, SEEK_SET) != 0)
return AVERROR(EIO);
 
return 0;
}
 
static int gif_skip_subblocks(AVIOContext *pb)
{
int sb_size, ret = 0;
 
while (0x00 != (sb_size = avio_r8(pb))) {
if ((ret = avio_skip(pb, sb_size)) < 0)
return ret;
}
 
return ret;
}
 
static int gif_read_ext(AVFormatContext *s)
{
GIFDemuxContext *gdc = s->priv_data;
AVIOContext *pb = s->pb;
int sb_size, ext_label = avio_r8(pb);
int ret;
 
if (ext_label == GIF_GCE_EXT_LABEL) {
if ((sb_size = avio_r8(pb)) < 4) {
av_log(s, AV_LOG_FATAL, "Graphic Control Extension block's size less than 4.\n");
return AVERROR_INVALIDDATA;
}
 
/* skip packed fields */
if ((ret = avio_skip(pb, 1)) < 0)
return ret;
 
gdc->delay = avio_rl16(pb);
 
if (gdc->delay < gdc->min_delay)
gdc->delay = gdc->default_delay;
 
/* skip the rest of the Graphic Control Extension block */
if ((ret = avio_skip(pb, sb_size - 3)) < 0 )
return ret;
} else if (ext_label == GIF_APP_EXT_LABEL) {
uint8_t data[256];
 
sb_size = avio_r8(pb);
ret = avio_read(pb, data, sb_size);
if (ret < 0 || !sb_size)
return ret;
 
if (sb_size == strlen(NETSCAPE_EXT_STR)) {
sb_size = avio_r8(pb);
ret = avio_read(pb, data, sb_size);
if (ret < 0 || !sb_size)
return ret;
 
if (sb_size == 3 && data[0] == 1) {
gdc->total_iter = AV_RL16(data+1);
 
if (gdc->total_iter == 0)
gdc->total_iter = -1;
}
}
}
 
if ((ret = gif_skip_subblocks(pb)) < 0)
return ret;
 
return 0;
}
 
static int gif_read_packet(AVFormatContext *s, AVPacket *pkt)
{
GIFDemuxContext *gdc = s->priv_data;
AVIOContext *pb = s->pb;
int packed_fields, block_label, ct_size,
keyframe, frame_parsed = 0, ret;
int64_t frame_start = avio_tell(pb), frame_end;
unsigned char buf[6];
 
if ((ret = avio_read(pb, buf, 6)) == 6) {
keyframe = memcmp(buf, gif87a_sig, 6) == 0 ||
memcmp(buf, gif89a_sig, 6) == 0;
} else if (ret < 0) {
return ret;
} else {
keyframe = 0;
}
 
if (keyframe) {
parse_keyframe:
/* skip 2 bytes of width and 2 of height */
if ((ret = avio_skip(pb, 4)) < 0)
return ret;
 
packed_fields = avio_r8(pb);
 
/* skip 1 byte of Background Color Index and 1 byte of Pixel Aspect Ratio */
if ((ret = avio_skip(pb, 2)) < 0)
return ret;
 
/* global color table presence */
if (packed_fields & 0x80) {
ct_size = 3 * (1 << ((packed_fields & 0x07) + 1));
 
if ((ret = avio_skip(pb, ct_size)) < 0)
return ret;
}
} else {
avio_seek(pb, -ret, SEEK_CUR);
ret = AVERROR_EOF;
}
 
while (GIF_TRAILER != (block_label = avio_r8(pb)) && !url_feof(pb)) {
if (block_label == GIF_EXTENSION_INTRODUCER) {
if ((ret = gif_read_ext (s)) < 0 )
goto resync;
} else if (block_label == GIF_IMAGE_SEPARATOR) {
/* skip to last byte of Image Descriptor header */
if ((ret = avio_skip(pb, 8)) < 0)
return ret;
 
packed_fields = avio_r8(pb);
 
/* local color table presence */
if (packed_fields & 0x80) {
ct_size = 3 * (1 << ((packed_fields & 0x07) + 1));
 
if ((ret = avio_skip(pb, ct_size)) < 0)
return ret;
}
 
/* read LZW Minimum Code Size */
if (avio_r8(pb) < 1) {
av_log(s, AV_LOG_ERROR, "lzw minimum code size must be >= 1\n");
goto resync;
}
 
if ((ret = gif_skip_subblocks(pb)) < 0)
goto resync;
 
frame_end = avio_tell(pb);
 
if (avio_seek(pb, frame_start, SEEK_SET) != frame_start)
return AVERROR(EIO);
 
ret = av_get_packet(pb, pkt, frame_end - frame_start);
if (ret < 0)
return ret;
 
if (keyframe)
pkt->flags |= AV_PKT_FLAG_KEY;
 
pkt->stream_index = 0;
pkt->duration = gdc->delay;
 
/* Graphic Control Extension's scope is single frame.
* Remove its influence. */
gdc->delay = gdc->default_delay;
frame_parsed = 1;
 
break;
} else {
av_log(s, AV_LOG_ERROR, "invalid block label\n");
resync:
if (!keyframe)
avio_seek(pb, frame_start, SEEK_SET);
if ((ret = resync(pb)) < 0)
return ret;
frame_start = avio_tell(pb) - 6;
keyframe = 1;
goto parse_keyframe;
}
}
 
if ((ret >= 0 && !frame_parsed) || ret == AVERROR_EOF) {
/* This might happen when there is no image block
* between extension blocks and GIF_TRAILER or EOF */
if (!gdc->ignore_loop && (block_label == GIF_TRAILER || url_feof(pb))
&& (gdc->total_iter < 0 || ++gdc->iter_count < gdc->total_iter))
return avio_seek(pb, 0, SEEK_SET);
return AVERROR_EOF;
} else
return ret;
}
 
static const AVOption options[] = {
{ "min_delay" , "minimum valid delay between frames (in hundredths of second)", offsetof(GIFDemuxContext, min_delay) , AV_OPT_TYPE_INT, {.i64 = GIF_MIN_DELAY} , 0, 100 * 60, AV_OPT_FLAG_DECODING_PARAM },
{ "default_delay", "default delay between frames (in hundredths of second)" , offsetof(GIFDemuxContext, default_delay), AV_OPT_TYPE_INT, {.i64 = GIF_DEFAULT_DELAY}, 0, 100 * 60, AV_OPT_FLAG_DECODING_PARAM },
{ "ignore_loop" , "ignore loop setting (netscape extension)" , offsetof(GIFDemuxContext, ignore_loop) , AV_OPT_TYPE_INT, {.i64 = 1} , 0, 1, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
 
static const AVClass demuxer_class = {
.class_name = "GIF demuxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEMUXER,
};
 
AVInputFormat ff_gif_demuxer = {
.name = "gif",
.long_name = NULL_IF_CONFIG_SMALL("CompuServe Graphics Interchange Format (GIF)"),
.priv_data_size = sizeof(GIFDemuxContext),
.read_probe = gif_probe,
.read_header = gif_read_header,
.read_packet = gif_read_packet,
.flags = AVFMT_GENERIC_INDEX,
.priv_class = &demuxer_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/gopher.c
0,0 → 1,125
/*
* Gopher protocol
*
* Copyright (c) 2009 Toshimitsu Kimura
*
* based on libavformat/http.c, Copyright (c) 2000, 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avstring.h"
#include "avformat.h"
#include "internal.h"
#include "network.h"
#include "url.h"
 
typedef struct {
URLContext *hd;
} GopherContext;
 
static int gopher_write(URLContext *h, const uint8_t *buf, int size)
{
GopherContext *s = h->priv_data;
return ffurl_write(s->hd, buf, size);
}
 
static int gopher_connect(URLContext *h, const char *path)
{
char buffer[1024];
 
if (!*path) return AVERROR(EINVAL);
switch (*++path) {
case '5':
case '9':
path = strchr(path, '/');
if (!path) return AVERROR(EINVAL);
break;
default:
av_log(h, AV_LOG_WARNING,
"Gopher protocol type '%c' not supported yet!\n",
*path);
return AVERROR(EINVAL);
}
 
/* send gopher sector */
snprintf(buffer, sizeof(buffer), "%s\r\n", path);
 
if (gopher_write(h, buffer, strlen(buffer)) < 0)
return AVERROR(EIO);
 
return 0;
}
 
static int gopher_close(URLContext *h)
{
GopherContext *s = h->priv_data;
if (s->hd) {
ffurl_close(s->hd);
s->hd = NULL;
}
return 0;
}
 
static int gopher_open(URLContext *h, const char *uri, int flags)
{
GopherContext *s = h->priv_data;
char hostname[1024], auth[1024], path[1024], buf[1024];
int port, err;
 
h->is_streamed = 1;
 
/* needed in any case to build the host string */
av_url_split(NULL, 0, auth, sizeof(auth), hostname, sizeof(hostname), &port,
path, sizeof(path), uri);
 
if (port < 0)
port = 70;
 
ff_url_join(buf, sizeof(buf), "tcp", NULL, hostname, port, NULL);
 
s->hd = NULL;
err = ffurl_open(&s->hd, buf, AVIO_FLAG_READ_WRITE,
&h->interrupt_callback, NULL);
if (err < 0)
goto fail;
 
if ((err = gopher_connect(h, path)) < 0)
goto fail;
return 0;
fail:
gopher_close(h);
return err;
}
 
static int gopher_read(URLContext *h, uint8_t *buf, int size)
{
GopherContext *s = h->priv_data;
int len = ffurl_read(s->hd, buf, size);
return len;
}
 
 
URLProtocol ff_gopher_protocol = {
.name = "gopher",
.url_open = gopher_open,
.url_read = gopher_read,
.url_write = gopher_write,
.url_close = gopher_close,
.priv_data_size = sizeof(GopherContext),
.flags = URL_PROTOCOL_FLAG_NETWORK,
};
/contrib/sdk/sources/ffmpeg/libavformat/gsmdec.c
0,0 → 1,100
/*
* RAW GSM demuxer
* Copyright (c) 2011 Justin Ruggles
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "avformat.h"
#include "internal.h"
 
#define GSM_BLOCK_SIZE 33
#define GSM_BLOCK_SAMPLES 160
#define GSM_SAMPLE_RATE 8000
 
typedef struct {
AVClass *class;
int sample_rate;
} GSMDemuxerContext;
 
static int gsm_read_packet(AVFormatContext *s, AVPacket *pkt)
{
int ret, size;
 
size = GSM_BLOCK_SIZE;
 
pkt->pos = avio_tell(s->pb);
pkt->stream_index = 0;
 
ret = av_get_packet(s->pb, pkt, size);
if (ret < GSM_BLOCK_SIZE) {
av_free_packet(pkt);
return ret < 0 ? ret : AVERROR(EIO);
}
pkt->duration = 1;
pkt->pts = pkt->pos / GSM_BLOCK_SIZE;
 
return 0;
}
 
static int gsm_read_header(AVFormatContext *s)
{
GSMDemuxerContext *c = s->priv_data;
AVStream *st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = s->iformat->raw_codec_id;
st->codec->channels = 1;
st->codec->channel_layout = AV_CH_LAYOUT_MONO;
st->codec->sample_rate = c->sample_rate;
st->codec->bit_rate = GSM_BLOCK_SIZE * 8 * c->sample_rate / GSM_BLOCK_SAMPLES;
 
avpriv_set_pts_info(st, 64, GSM_BLOCK_SAMPLES, GSM_SAMPLE_RATE);
 
return 0;
}
 
static const AVOption options[] = {
{ "sample_rate", "", offsetof(GSMDemuxerContext, sample_rate),
AV_OPT_TYPE_INT, {.i64 = GSM_SAMPLE_RATE}, 1, INT_MAX / GSM_BLOCK_SIZE,
AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
 
static const AVClass gsm_class = {
.class_name = "gsm demuxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVInputFormat ff_gsm_demuxer = {
.name = "gsm",
.long_name = NULL_IF_CONFIG_SMALL("raw GSM"),
.priv_data_size = sizeof(GSMDemuxerContext),
.read_header = gsm_read_header,
.read_packet = gsm_read_packet,
.flags = AVFMT_GENERIC_INDEX,
.extensions = "gsm",
.raw_codec_id = AV_CODEC_ID_GSM,
.priv_class = &gsm_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/gxf.c
0,0 → 1,604
/*
* GXF demuxer.
* Copyright (c) 2006 Reimar Doeffinger
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "avformat.h"
#include "internal.h"
#include "gxf.h"
#include "libavcodec/mpeg12data.h"
 
struct gxf_stream_info {
int64_t first_field;
int64_t last_field;
AVRational frames_per_second;
int32_t fields_per_frame;
int64_t track_aux_data;
};
 
/**
* @brief parse gxf timecode and add it to metadata
*/
static int add_timecode_metadata(AVDictionary **pm, const char *key, uint32_t timecode, int fields_per_frame)
{
char tmp[128];
int field = timecode & 0xff;
int frame = fields_per_frame ? field / fields_per_frame : field;
int second = (timecode >> 8) & 0xff;
int minute = (timecode >> 16) & 0xff;
int hour = (timecode >> 24) & 0x1f;
int drop = (timecode >> 29) & 1;
// bit 30: color_frame, unused
// ignore invalid time code
if (timecode >> 31)
return 0;
snprintf(tmp, sizeof(tmp), "%02d:%02d:%02d%c%02d",
hour, minute, second, drop ? ';' : ':', frame);
return av_dict_set(pm, key, tmp, 0);
}
 
/**
* @brief parses a packet header, extracting type and length
* @param pb AVIOContext to read header from
* @param type detected packet type is stored here
* @param length detected packet length, excluding header is stored here
* @return 0 if header not found or contains invalid data, 1 otherwise
*/
static int parse_packet_header(AVIOContext *pb, GXFPktType *type, int *length) {
if (avio_rb32(pb))
return 0;
if (avio_r8(pb) != 1)
return 0;
*type = avio_r8(pb);
*length = avio_rb32(pb);
if ((*length >> 24) || *length < 16)
return 0;
*length -= 16;
if (avio_rb32(pb))
return 0;
if (avio_r8(pb) != 0xe1)
return 0;
if (avio_r8(pb) != 0xe2)
return 0;
return 1;
}
 
/**
* @brief check if file starts with a PKT_MAP header
*/
static int gxf_probe(AVProbeData *p) {
static const uint8_t startcode[] = {0, 0, 0, 0, 1, 0xbc}; // start with map packet
static const uint8_t endcode[] = {0, 0, 0, 0, 0xe1, 0xe2};
if (!memcmp(p->buf, startcode, sizeof(startcode)) &&
!memcmp(&p->buf[16 - sizeof(endcode)], endcode, sizeof(endcode)))
return AVPROBE_SCORE_MAX;
return 0;
}
 
/**
* @brief gets the stream index for the track with the specified id, creates new
* stream if not found
* @param id id of stream to find / add
* @param format stream format identifier
*/
static int get_sindex(AVFormatContext *s, int id, int format) {
int i;
AVStream *st = NULL;
i = ff_find_stream_index(s, id);
if (i >= 0)
return i;
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
st->id = id;
switch (format) {
case 3:
case 4:
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_MJPEG;
break;
case 13:
case 14:
case 15:
case 16:
case 25:
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_DVVIDEO;
break;
case 11:
case 12:
case 20:
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_MPEG2VIDEO;
st->need_parsing = AVSTREAM_PARSE_HEADERS; //get keyframe flag etc.
break;
case 22:
case 23:
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_MPEG1VIDEO;
st->need_parsing = AVSTREAM_PARSE_HEADERS; //get keyframe flag etc.
break;
case 9:
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_PCM_S24LE;
st->codec->channels = 1;
st->codec->channel_layout = AV_CH_LAYOUT_MONO;
st->codec->sample_rate = 48000;
st->codec->bit_rate = 3 * 1 * 48000 * 8;
st->codec->block_align = 3 * 1;
st->codec->bits_per_coded_sample = 24;
break;
case 10:
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_PCM_S16LE;
st->codec->channels = 1;
st->codec->channel_layout = AV_CH_LAYOUT_MONO;
st->codec->sample_rate = 48000;
st->codec->bit_rate = 2 * 1 * 48000 * 8;
st->codec->block_align = 2 * 1;
st->codec->bits_per_coded_sample = 16;
break;
case 17:
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_AC3;
st->codec->channels = 2;
st->codec->channel_layout = AV_CH_LAYOUT_STEREO;
st->codec->sample_rate = 48000;
break;
case 26: /* AVCi50 / AVCi100 (AVC Intra) */
case 29: /* AVCHD */
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = CODEC_ID_H264;
st->need_parsing = AVSTREAM_PARSE_HEADERS;
break;
// timecode tracks:
case 7:
case 8:
case 24:
st->codec->codec_type = AVMEDIA_TYPE_DATA;
st->codec->codec_id = AV_CODEC_ID_NONE;
break;
case 30:
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_DNXHD;
break;
default:
st->codec->codec_type = AVMEDIA_TYPE_UNKNOWN;
st->codec->codec_id = AV_CODEC_ID_NONE;
break;
}
return s->nb_streams - 1;
}
 
/**
* @brief filters out interesting tags from material information.
* @param len length of tag section, will be adjusted to contain remaining bytes
* @param si struct to store collected information into
*/
static void gxf_material_tags(AVIOContext *pb, int *len, struct gxf_stream_info *si) {
si->first_field = AV_NOPTS_VALUE;
si->last_field = AV_NOPTS_VALUE;
while (*len >= 2) {
GXFMatTag tag = avio_r8(pb);
int tlen = avio_r8(pb);
*len -= 2;
if (tlen > *len)
return;
*len -= tlen;
if (tlen == 4) {
uint32_t value = avio_rb32(pb);
if (tag == MAT_FIRST_FIELD)
si->first_field = value;
else if (tag == MAT_LAST_FIELD)
si->last_field = value;
} else
avio_skip(pb, tlen);
}
}
 
static const AVRational frame_rate_tab[] = {
{ 60, 1},
{60000, 1001},
{ 50, 1},
{ 30, 1},
{30000, 1001},
{ 25, 1},
{ 24, 1},
{24000, 1001},
{ 0, 0},
};
 
/**
* @brief convert fps tag value to AVRational fps
* @param fps fps value from tag
* @return fps as AVRational, or 0 / 0 if unknown
*/
static AVRational fps_tag2avr(int32_t fps) {
if (fps < 1 || fps > 9) fps = 9;
return frame_rate_tab[fps - 1];
}
 
/**
* @brief convert UMF attributes flags to AVRational fps
* @param flags UMF flags to convert
* @return fps as AVRational, or 0 / 0 if unknown
*/
static AVRational fps_umf2avr(uint32_t flags) {
static const AVRational map[] = {{50, 1}, {60000, 1001}, {24, 1},
{25, 1}, {30000, 1001}};
int idx = av_log2((flags & 0x7c0) >> 6);
return map[idx];
}
 
/**
* @brief filters out interesting tags from track information.
* @param len length of tag section, will be adjusted to contain remaining bytes
* @param si struct to store collected information into
*/
static void gxf_track_tags(AVIOContext *pb, int *len, struct gxf_stream_info *si) {
si->frames_per_second = (AVRational){0, 0};
si->fields_per_frame = 0;
si->track_aux_data = 0x80000000;
while (*len >= 2) {
GXFTrackTag tag = avio_r8(pb);
int tlen = avio_r8(pb);
*len -= 2;
if (tlen > *len)
return;
*len -= tlen;
if (tlen == 4) {
uint32_t value = avio_rb32(pb);
if (tag == TRACK_FPS)
si->frames_per_second = fps_tag2avr(value);
else if (tag == TRACK_FPF && (value == 1 || value == 2))
si->fields_per_frame = value;
} else if (tlen == 8 && tag == TRACK_AUX)
si->track_aux_data = avio_rl64(pb);
else
avio_skip(pb, tlen);
}
}
 
/**
* @brief read index from FLT packet into stream 0 av_index
*/
static void gxf_read_index(AVFormatContext *s, int pkt_len) {
AVIOContext *pb = s->pb;
AVStream *st;
uint32_t fields_per_map = avio_rl32(pb);
uint32_t map_cnt = avio_rl32(pb);
int i;
pkt_len -= 8;
if ((s->flags & AVFMT_FLAG_IGNIDX) || !s->streams) {
avio_skip(pb, pkt_len);
return;
}
st = s->streams[0];
if (map_cnt > 1000) {
av_log(s, AV_LOG_ERROR, "too many index entries %u (%x)\n", map_cnt, map_cnt);
map_cnt = 1000;
}
if (pkt_len < 4 * map_cnt) {
av_log(s, AV_LOG_ERROR, "invalid index length\n");
avio_skip(pb, pkt_len);
return;
}
pkt_len -= 4 * map_cnt;
av_add_index_entry(st, 0, 0, 0, 0, 0);
for (i = 0; i < map_cnt; i++)
av_add_index_entry(st, (uint64_t)avio_rl32(pb) * 1024,
i * (uint64_t)fields_per_map + 1, 0, 0, 0);
avio_skip(pb, pkt_len);
}
 
static int gxf_header(AVFormatContext *s) {
AVIOContext *pb = s->pb;
GXFPktType pkt_type;
int map_len;
int len;
AVRational main_timebase = {0, 0};
struct gxf_stream_info *si = s->priv_data;
int i;
if (!parse_packet_header(pb, &pkt_type, &map_len) || pkt_type != PKT_MAP) {
av_log(s, AV_LOG_ERROR, "map packet not found\n");
return 0;
}
map_len -= 2;
if (avio_r8(pb) != 0x0e0 || avio_r8(pb) != 0xff) {
av_log(s, AV_LOG_ERROR, "unknown version or invalid map preamble\n");
return 0;
}
map_len -= 2;
len = avio_rb16(pb); // length of material data section
if (len > map_len) {
av_log(s, AV_LOG_ERROR, "material data longer than map data\n");
return 0;
}
map_len -= len;
gxf_material_tags(pb, &len, si);
avio_skip(pb, len);
map_len -= 2;
len = avio_rb16(pb); // length of track description
if (len > map_len) {
av_log(s, AV_LOG_ERROR, "track description longer than map data\n");
return 0;
}
map_len -= len;
while (len > 0) {
int track_type, track_id, track_len;
AVStream *st;
int idx;
len -= 4;
track_type = avio_r8(pb);
track_id = avio_r8(pb);
track_len = avio_rb16(pb);
len -= track_len;
if (!(track_type & 0x80)) {
av_log(s, AV_LOG_ERROR, "invalid track type %x\n", track_type);
continue;
}
track_type &= 0x7f;
if ((track_id & 0xc0) != 0xc0) {
av_log(s, AV_LOG_ERROR, "invalid track id %x\n", track_id);
continue;
}
track_id &= 0x3f;
gxf_track_tags(pb, &track_len, si);
// check for timecode tracks
if (track_type == 7 || track_type == 8 || track_type == 24) {
add_timecode_metadata(&s->metadata, "timecode",
si->track_aux_data & 0xffffffff,
si->fields_per_frame);
 
}
avio_skip(pb, track_len);
 
idx = get_sindex(s, track_id, track_type);
if (idx < 0) continue;
st = s->streams[idx];
if (!main_timebase.num || !main_timebase.den) {
main_timebase.num = si->frames_per_second.den;
main_timebase.den = si->frames_per_second.num * 2;
}
st->start_time = si->first_field;
if (si->first_field != AV_NOPTS_VALUE && si->last_field != AV_NOPTS_VALUE)
st->duration = si->last_field - si->first_field;
}
if (len < 0)
av_log(s, AV_LOG_ERROR, "invalid track description length specified\n");
if (map_len)
avio_skip(pb, map_len);
if (!parse_packet_header(pb, &pkt_type, &len)) {
av_log(s, AV_LOG_ERROR, "sync lost in header\n");
return -1;
}
if (pkt_type == PKT_FLT) {
gxf_read_index(s, len);
if (!parse_packet_header(pb, &pkt_type, &len)) {
av_log(s, AV_LOG_ERROR, "sync lost in header\n");
return -1;
}
}
if (pkt_type == PKT_UMF) {
if (len >= 0x39) {
AVRational fps;
len -= 0x39;
avio_skip(pb, 5); // preamble
avio_skip(pb, 0x30); // payload description
fps = fps_umf2avr(avio_rl32(pb));
if (!main_timebase.num || !main_timebase.den) {
av_log(s, AV_LOG_WARNING, "No FPS track tag, using UMF fps tag."
" This might give wrong results.\n");
// this may not always be correct, but simply the best we can get
main_timebase.num = fps.den;
main_timebase.den = fps.num * 2;
}
 
if (len >= 0x18) {
len -= 0x18;
avio_skip(pb, 0x10);
add_timecode_metadata(&s->metadata, "timecode_at_mark_in",
avio_rl32(pb), si->fields_per_frame);
add_timecode_metadata(&s->metadata, "timecode_at_mark_out",
avio_rl32(pb), si->fields_per_frame);
}
} else
av_log(s, AV_LOG_INFO, "UMF packet too short\n");
} else
av_log(s, AV_LOG_INFO, "UMF packet missing\n");
avio_skip(pb, len);
// set a fallback value, 60000/1001 is specified for audio-only files
// so use that regardless of why we do not know the video frame rate.
if (!main_timebase.num || !main_timebase.den)
main_timebase = (AVRational){1001, 60000};
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
avpriv_set_pts_info(st, 32, main_timebase.num, main_timebase.den);
}
return 0;
}
 
#define READ_ONE() \
{ \
if (!max_interval-- || url_feof(pb)) \
goto out; \
tmp = tmp << 8 | avio_r8(pb); \
}
 
/**
* @brief resync the stream on the next media packet with specified properties
* @param max_interval how many bytes to search for matching packet at most
* @param track track id the media packet must belong to, -1 for any
* @param timestamp minimum timestamp (== field number) the packet must have, -1 for any
* @return timestamp of packet found
*/
static int64_t gxf_resync_media(AVFormatContext *s, uint64_t max_interval, int track, int timestamp) {
uint32_t tmp;
uint64_t last_pos;
uint64_t last_found_pos = 0;
int cur_track;
int64_t cur_timestamp = AV_NOPTS_VALUE;
int len;
AVIOContext *pb = s->pb;
GXFPktType type;
tmp = avio_rb32(pb);
start:
while (tmp)
READ_ONE();
READ_ONE();
if (tmp != 1)
goto start;
last_pos = avio_tell(pb);
if (avio_seek(pb, -5, SEEK_CUR) < 0)
goto out;
if (!parse_packet_header(pb, &type, &len) || type != PKT_MEDIA) {
if (avio_seek(pb, last_pos, SEEK_SET) < 0)
goto out;
goto start;
}
avio_r8(pb);
cur_track = avio_r8(pb);
cur_timestamp = avio_rb32(pb);
last_found_pos = avio_tell(pb) - 16 - 6;
if ((track >= 0 && track != cur_track) || (timestamp >= 0 && timestamp > cur_timestamp)) {
if (avio_seek(pb, last_pos, SEEK_SET) >= 0)
goto start;
}
out:
if (last_found_pos)
avio_seek(pb, last_found_pos, SEEK_SET);
return cur_timestamp;
}
 
static int gxf_packet(AVFormatContext *s, AVPacket *pkt) {
AVIOContext *pb = s->pb;
GXFPktType pkt_type;
int pkt_len;
struct gxf_stream_info *si = s->priv_data;
 
while (!pb->eof_reached) {
AVStream *st;
int track_type, track_id, ret;
int field_nr, field_info, skip = 0;
int stream_index;
if (!parse_packet_header(pb, &pkt_type, &pkt_len)) {
if (!url_feof(pb))
av_log(s, AV_LOG_ERROR, "sync lost\n");
return -1;
}
if (pkt_type == PKT_FLT) {
gxf_read_index(s, pkt_len);
continue;
}
if (pkt_type != PKT_MEDIA) {
avio_skip(pb, pkt_len);
continue;
}
if (pkt_len < 16) {
av_log(s, AV_LOG_ERROR, "invalid media packet length\n");
continue;
}
pkt_len -= 16;
track_type = avio_r8(pb);
track_id = avio_r8(pb);
stream_index = get_sindex(s, track_id, track_type);
if (stream_index < 0)
return stream_index;
st = s->streams[stream_index];
field_nr = avio_rb32(pb);
field_info = avio_rb32(pb);
avio_rb32(pb); // "timeline" field number
avio_r8(pb); // flags
avio_r8(pb); // reserved
if (st->codec->codec_id == AV_CODEC_ID_PCM_S24LE ||
st->codec->codec_id == AV_CODEC_ID_PCM_S16LE) {
int first = field_info >> 16;
int last = field_info & 0xffff; // last is exclusive
int bps = av_get_bits_per_sample(st->codec->codec_id)>>3;
if (first <= last && last*bps <= pkt_len) {
avio_skip(pb, first*bps);
skip = pkt_len - last*bps;
pkt_len = (last-first)*bps;
} else
av_log(s, AV_LOG_ERROR, "invalid first and last sample values\n");
}
ret = av_get_packet(pb, pkt, pkt_len);
if (skip)
avio_skip(pb, skip);
pkt->stream_index = stream_index;
pkt->dts = field_nr;
 
//set duration manually for DV or else lavf misdetects the frame rate
if (st->codec->codec_id == AV_CODEC_ID_DVVIDEO)
pkt->duration = si->fields_per_frame;
 
return ret;
}
return AVERROR_EOF;
}
 
static int gxf_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) {
int res = 0;
uint64_t pos;
uint64_t maxlen = 100 * 1024 * 1024;
AVStream *st = s->streams[0];
int64_t start_time = s->streams[stream_index]->start_time;
int64_t found;
int idx;
if (timestamp < start_time) timestamp = start_time;
idx = av_index_search_timestamp(st, timestamp - start_time,
AVSEEK_FLAG_ANY | AVSEEK_FLAG_BACKWARD);
if (idx < 0)
return -1;
pos = st->index_entries[idx].pos;
if (idx < st->nb_index_entries - 2)
maxlen = st->index_entries[idx + 2].pos - pos;
maxlen = FFMAX(maxlen, 200 * 1024);
res = avio_seek(s->pb, pos, SEEK_SET);
if (res < 0)
return res;
found = gxf_resync_media(s, maxlen, -1, timestamp);
if (FFABS(found - timestamp) > 4)
return -1;
return 0;
}
 
static int64_t gxf_read_timestamp(AVFormatContext *s, int stream_index,
int64_t *pos, int64_t pos_limit) {
AVIOContext *pb = s->pb;
int64_t res;
if (avio_seek(pb, *pos, SEEK_SET) < 0)
return AV_NOPTS_VALUE;
res = gxf_resync_media(s, pos_limit - *pos, -1, -1);
*pos = avio_tell(pb);
return res;
}
 
AVInputFormat ff_gxf_demuxer = {
.name = "gxf",
.long_name = NULL_IF_CONFIG_SMALL("GXF (General eXchange Format)"),
.priv_data_size = sizeof(struct gxf_stream_info),
.read_probe = gxf_probe,
.read_header = gxf_header,
.read_packet = gxf_packet,
.read_seek = gxf_seek,
.read_timestamp = gxf_read_timestamp,
};
/contrib/sdk/sources/ffmpeg/libavformat/gxf.h
0,0 → 1,52
/*
* GXF demuxer
* copyright (c) 2006 Reimar Doeffinger
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_GXF_H
#define AVFORMAT_GXF_H
 
typedef enum {
PKT_MAP = 0xbc,
PKT_MEDIA = 0xbf,
PKT_EOS = 0xfb,
PKT_FLT = 0xfc,
PKT_UMF = 0xfd,
} GXFPktType;
 
typedef enum {
MAT_NAME = 0x40,
MAT_FIRST_FIELD = 0x41,
MAT_LAST_FIELD = 0x42,
MAT_MARK_IN = 0x43,
MAT_MARK_OUT = 0x44,
MAT_SIZE = 0x45,
} GXFMatTag;
 
typedef enum {
TRACK_NAME = 0x4c,
TRACK_AUX = 0x4d,
TRACK_VER = 0x4e,
TRACK_MPG_AUX = 0x4f,
TRACK_FPS = 0x50,
TRACK_LINES = 0x51,
TRACK_FPF = 0x52,
} GXFTrackTag;
 
#endif /* AVFORMAT_GXF_H */
/contrib/sdk/sources/ffmpeg/libavformat/gxfenc.c
0,0 → 1,1033
/*
* GXF muxer.
* Copyright (c) 2006 SmartJog S.A., Baptiste Coudurier <baptiste dot coudurier at smartjog dot com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avassert.h"
#include "libavutil/intfloat.h"
#include "libavutil/opt.h"
#include "libavutil/mathematics.h"
#include "libavutil/timecode.h"
#include "avformat.h"
#include "internal.h"
#include "gxf.h"
#include "audiointerleave.h"
 
#define GXF_AUDIO_PACKET_SIZE 65536
 
#define GXF_TIMECODE(c, d, h, m, s, f) \
((c) << 30 | (d) << 29 | (h) << 24 | (m) << 16 | (s) << 8 | (f))
 
typedef struct GXFTimecode{
int hh;
int mm;
int ss;
int ff;
int color;
int drop;
} GXFTimecode;
 
typedef struct GXFStreamContext {
AudioInterleaveContext aic;
uint32_t track_type;
uint32_t sample_size;
uint32_t sample_rate;
uint16_t media_type;
uint16_t media_info;
int frame_rate_index;
int lines_index;
int fields;
int iframes;
int pframes;
int bframes;
int p_per_gop;
int b_per_i_or_p; ///< number of B frames per I frame or P frame
int first_gop_closed;
unsigned order; ///< interleaving order
} GXFStreamContext;
 
typedef struct GXFContext {
AVClass *av_class;
uint32_t nb_fields;
uint16_t audio_tracks;
uint16_t mpeg_tracks;
int64_t creation_time;
uint32_t umf_start_offset;
uint32_t umf_track_offset;
uint32_t umf_media_offset;
uint32_t umf_length;
uint16_t umf_track_size;
uint16_t umf_media_size;
AVRational time_base;
int flags;
GXFStreamContext timecode_track;
unsigned *flt_entries; ///< offsets of packets /1024, starts after 2nd video field
unsigned flt_entries_nb;
uint64_t *map_offsets; ///< offset of map packets
unsigned map_offsets_nb;
unsigned packet_count;
GXFTimecode tc;
} GXFContext;
 
static const struct {
int height, index;
} gxf_lines_tab[] = {
{ 480, 1 }, /* NTSC */
{ 512, 1 }, /* NTSC + VBI */
{ 576, 2 }, /* PAL */
{ 608, 2 }, /* PAL + VBI */
{ 1080, 4 },
{ 720, 6 },
};
 
static const AVCodecTag gxf_media_types[] = {
{ AV_CODEC_ID_MJPEG , 3 }, /* NTSC */
{ AV_CODEC_ID_MJPEG , 4 }, /* PAL */
{ AV_CODEC_ID_PCM_S24LE , 9 },
{ AV_CODEC_ID_PCM_S16LE , 10 },
{ AV_CODEC_ID_MPEG2VIDEO, 11 }, /* NTSC */
{ AV_CODEC_ID_MPEG2VIDEO, 12 }, /* PAL */
{ AV_CODEC_ID_DVVIDEO , 13 }, /* NTSC */
{ AV_CODEC_ID_DVVIDEO , 14 }, /* PAL */
{ AV_CODEC_ID_DVVIDEO , 15 }, /* 50M NTSC */
{ AV_CODEC_ID_DVVIDEO , 16 }, /* 50M PAL */
{ AV_CODEC_ID_AC3 , 17 },
//{ AV_CODEC_ID_NONE, , 18 }, /* Non compressed 24 bit audio */
{ AV_CODEC_ID_MPEG2VIDEO, 20 }, /* MPEG HD */
{ AV_CODEC_ID_MPEG1VIDEO, 22 }, /* NTSC */
{ AV_CODEC_ID_MPEG1VIDEO, 23 }, /* PAL */
{ AV_CODEC_ID_NONE, 0 },
};
 
#define SERVER_PATH "EXT:/PDR/default/"
#define ES_NAME_PATTERN "EXT:/PDR/default/ES."
 
static int gxf_find_lines_index(AVStream *st)
{
GXFStreamContext *sc = st->priv_data;
int i;
 
for (i = 0; i < 6; ++i) {
if (st->codec->height == gxf_lines_tab[i].height) {
sc->lines_index = gxf_lines_tab[i].index;
return 0;
}
}
return -1;
}
 
static void gxf_write_padding(AVIOContext *pb, int64_t to_pad)
{
for (; to_pad > 0; to_pad--) {
avio_w8(pb, 0);
}
}
 
static int64_t updatePacketSize(AVIOContext *pb, int64_t pos)
{
int64_t curpos;
int size;
 
size = avio_tell(pb) - pos;
if (size % 4) {
gxf_write_padding(pb, 4 - size % 4);
size = avio_tell(pb) - pos;
}
curpos = avio_tell(pb);
avio_seek(pb, pos + 6, SEEK_SET);
avio_wb32(pb, size);
avio_seek(pb, curpos, SEEK_SET);
return curpos - pos;
}
 
static int64_t updateSize(AVIOContext *pb, int64_t pos)
{
int64_t curpos;
 
curpos = avio_tell(pb);
avio_seek(pb, pos, SEEK_SET);
avio_wb16(pb, curpos - pos - 2);
avio_seek(pb, curpos, SEEK_SET);
return curpos - pos;
}
 
static void gxf_write_packet_header(AVIOContext *pb, GXFPktType type)
{
avio_wb32(pb, 0); /* packet leader for synchro */
avio_w8(pb, 1);
avio_w8(pb, type); /* map packet */
avio_wb32(pb, 0); /* size */
avio_wb32(pb, 0); /* reserved */
avio_w8(pb, 0xE1); /* trailer 1 */
avio_w8(pb, 0xE2); /* trailer 2 */
}
 
static int gxf_write_mpeg_auxiliary(AVIOContext *pb, AVStream *st)
{
GXFStreamContext *sc = st->priv_data;
char buffer[1024];
int size, starting_line;
 
if (sc->iframes) {
sc->p_per_gop = sc->pframes / sc->iframes;
if (sc->pframes % sc->iframes)
sc->p_per_gop++;
if (sc->pframes) {
sc->b_per_i_or_p = sc->bframes / sc->pframes;
if (sc->bframes % sc->pframes)
sc->b_per_i_or_p++;
}
if (sc->p_per_gop > 9)
sc->p_per_gop = 9; /* ensure value won't take more than one char */
if (sc->b_per_i_or_p > 9)
sc->b_per_i_or_p = 9; /* ensure value won't take more than one char */
}
if (st->codec->height == 512 || st->codec->height == 608)
starting_line = 7; // VBI
else if (st->codec->height == 480)
starting_line = 20;
else
starting_line = 23; // default PAL
 
size = snprintf(buffer, sizeof(buffer), "Ver 1\nBr %.6f\nIpg 1\nPpi %d\nBpiop %d\n"
"Pix 0\nCf %d\nCg %d\nSl %d\nnl16 %d\nVi 1\nf1 1\n",
(float)st->codec->bit_rate, sc->p_per_gop, sc->b_per_i_or_p,
st->codec->pix_fmt == AV_PIX_FMT_YUV422P ? 2 : 1, sc->first_gop_closed == 1,
starting_line, (st->codec->height + 15) / 16);
av_assert0(size < sizeof(buffer));
avio_w8(pb, TRACK_MPG_AUX);
avio_w8(pb, size + 1);
avio_write(pb, (uint8_t *)buffer, size + 1);
return size + 3;
}
 
static int gxf_write_dv_auxiliary(AVIOContext *pb, AVStream *st)
{
int64_t track_aux_data = 0;
 
avio_w8(pb, TRACK_AUX);
avio_w8(pb, 8);
if (st->codec->pix_fmt == AV_PIX_FMT_YUV420P)
track_aux_data |= 0x01; /* marks stream as DVCAM instead of DVPRO */
track_aux_data |= 0x40000000; /* aux data is valid */
avio_wl64(pb, track_aux_data);
return 8;
}
 
static int gxf_write_timecode_auxiliary(AVIOContext *pb, GXFContext *gxf)
{
uint32_t timecode = GXF_TIMECODE(gxf->tc.color, gxf->tc.drop,
gxf->tc.hh, gxf->tc.mm,
gxf->tc.ss, gxf->tc.ff);
 
avio_w8(pb, TRACK_AUX);
avio_w8(pb, 8);
avio_wl32(pb, timecode);
/* reserved */
avio_wl32(pb, 0);
return 8;
}
 
static int gxf_write_track_description(AVFormatContext *s, GXFStreamContext *sc, int index)
{
GXFContext *gxf = s->priv_data;
AVIOContext *pb = s->pb;
int64_t pos;
 
/* track description section */
avio_w8(pb, sc->media_type + 0x80);
avio_w8(pb, index + 0xC0);
 
pos = avio_tell(pb);
avio_wb16(pb, 0); /* size */
 
/* media file name */
avio_w8(pb, TRACK_NAME);
avio_w8(pb, strlen(ES_NAME_PATTERN) + 3);
avio_write(pb, ES_NAME_PATTERN, sizeof(ES_NAME_PATTERN) - 1);
avio_wb16(pb, sc->media_info);
avio_w8(pb, 0);
 
switch (sc->track_type) {
case 3: /* timecode */
gxf_write_timecode_auxiliary(pb, gxf);
break;
case 4: /* MPEG2 */
case 9: /* MPEG1 */
gxf_write_mpeg_auxiliary(pb, s->streams[index]);
break;
case 5: /* DV25 */
case 6: /* DV50 */
gxf_write_dv_auxiliary(pb, s->streams[index]);
break;
default:
avio_w8(pb, TRACK_AUX);
avio_w8(pb, 8);
avio_wl64(pb, 0);
}
 
/* file system version */
avio_w8(pb, TRACK_VER);
avio_w8(pb, 4);
avio_wb32(pb, 0);
 
/* frame rate */
avio_w8(pb, TRACK_FPS);
avio_w8(pb, 4);
avio_wb32(pb, sc->frame_rate_index);
 
/* lines per frame */
avio_w8(pb, TRACK_LINES);
avio_w8(pb, 4);
avio_wb32(pb, sc->lines_index);
 
/* fields per frame */
avio_w8(pb, TRACK_FPF);
avio_w8(pb, 4);
avio_wb32(pb, sc->fields);
 
return updateSize(pb, pos);
}
 
static int gxf_write_material_data_section(AVFormatContext *s)
{
GXFContext *gxf = s->priv_data;
AVIOContext *pb = s->pb;
int64_t pos;
int len;
const char *filename = strrchr(s->filename, '/');
 
pos = avio_tell(pb);
avio_wb16(pb, 0); /* size */
 
/* name */
if (filename)
filename++;
else
filename = s->filename;
len = strlen(filename);
 
avio_w8(pb, MAT_NAME);
avio_w8(pb, strlen(SERVER_PATH) + len + 1);
avio_write(pb, SERVER_PATH, sizeof(SERVER_PATH) - 1);
avio_write(pb, filename, len);
avio_w8(pb, 0);
 
/* first field */
avio_w8(pb, MAT_FIRST_FIELD);
avio_w8(pb, 4);
avio_wb32(pb, 0);
 
/* last field */
avio_w8(pb, MAT_LAST_FIELD);
avio_w8(pb, 4);
avio_wb32(pb, gxf->nb_fields);
 
/* reserved */
avio_w8(pb, MAT_MARK_IN);
avio_w8(pb, 4);
avio_wb32(pb, 0);
 
avio_w8(pb, MAT_MARK_OUT);
avio_w8(pb, 4);
avio_wb32(pb, gxf->nb_fields);
 
/* estimated size */
avio_w8(pb, MAT_SIZE);
avio_w8(pb, 4);
avio_wb32(pb, avio_size(pb) / 1024);
 
return updateSize(pb, pos);
}
 
static int gxf_write_track_description_section(AVFormatContext *s)
{
GXFContext *gxf = s->priv_data;
AVIOContext *pb = s->pb;
int64_t pos;
int i;
 
pos = avio_tell(pb);
avio_wb16(pb, 0); /* size */
for (i = 0; i < s->nb_streams; ++i)
gxf_write_track_description(s, s->streams[i]->priv_data, i);
 
gxf_write_track_description(s, &gxf->timecode_track, s->nb_streams);
 
return updateSize(pb, pos);
}
 
static int gxf_write_map_packet(AVFormatContext *s, int rewrite)
{
GXFContext *gxf = s->priv_data;
AVIOContext *pb = s->pb;
int64_t pos = avio_tell(pb);
 
if (!rewrite) {
if (!(gxf->map_offsets_nb % 30)) {
int err;
if ((err = av_reallocp_array(&gxf->map_offsets,
gxf->map_offsets_nb + 30,
sizeof(*gxf->map_offsets))) < 0) {
gxf->map_offsets_nb = 0;
av_log(s, AV_LOG_ERROR, "could not realloc map offsets\n");
return err;
}
}
gxf->map_offsets[gxf->map_offsets_nb++] = pos; // do not increment here
}
 
gxf_write_packet_header(pb, PKT_MAP);
 
/* preamble */
avio_w8(pb, 0xE0); /* version */
avio_w8(pb, 0xFF); /* reserved */
 
gxf_write_material_data_section(s);
gxf_write_track_description_section(s);
 
return updatePacketSize(pb, pos);
}
 
static int gxf_write_flt_packet(AVFormatContext *s)
{
GXFContext *gxf = s->priv_data;
AVIOContext *pb = s->pb;
int64_t pos = avio_tell(pb);
int fields_per_flt = (gxf->nb_fields+1) / 1000 + 1;
int flt_entries = gxf->nb_fields / fields_per_flt;
int i = 0;
 
gxf_write_packet_header(pb, PKT_FLT);
 
avio_wl32(pb, fields_per_flt); /* number of fields */
avio_wl32(pb, flt_entries); /* number of active flt entries */
 
if (gxf->flt_entries) {
for (i = 0; i < flt_entries; i++)
avio_wl32(pb, gxf->flt_entries[(i*fields_per_flt)>>1]);
}
 
for (; i < 1000; i++)
avio_wl32(pb, 0);
 
return updatePacketSize(pb, pos);
}
 
static int gxf_write_umf_material_description(AVFormatContext *s)
{
GXFContext *gxf = s->priv_data;
AVIOContext *pb = s->pb;
int timecode_base = gxf->time_base.den == 60000 ? 60 : 50;
int64_t timestamp = 0;
AVDictionaryEntry *t;
uint64_t nb_fields;
uint32_t timecode_in; // timecode at mark in
uint32_t timecode_out; // timecode at mark out
 
if (t = av_dict_get(s->metadata, "creation_time", NULL, 0))
timestamp = ff_iso8601_to_unix_time(t->value);
 
timecode_in = GXF_TIMECODE(gxf->tc.color, gxf->tc.drop,
gxf->tc.hh, gxf->tc.mm,
gxf->tc.ss, gxf->tc.ff);
 
nb_fields = gxf->nb_fields +
gxf->tc.hh * (timecode_base * 3600) +
gxf->tc.mm * (timecode_base * 60) +
gxf->tc.ss * timecode_base +
gxf->tc.ff;
 
timecode_out = GXF_TIMECODE(gxf->tc.color, gxf->tc.drop,
nb_fields / (timecode_base * 3600) % 24,
nb_fields / (timecode_base * 60) % 60,
nb_fields / timecode_base % 60,
nb_fields % timecode_base);
 
avio_wl32(pb, gxf->flags);
avio_wl32(pb, gxf->nb_fields); /* length of the longest track */
avio_wl32(pb, gxf->nb_fields); /* length of the shortest track */
avio_wl32(pb, 0); /* mark in */
avio_wl32(pb, gxf->nb_fields); /* mark out */
avio_wl32(pb, timecode_in); /* timecode mark in */
avio_wl32(pb, timecode_out); /* timecode mark out */
avio_wl64(pb, timestamp); /* modification time */
avio_wl64(pb, timestamp); /* creation time */
avio_wl16(pb, 0); /* reserved */
avio_wl16(pb, 0); /* reserved */
avio_wl16(pb, gxf->audio_tracks);
avio_wl16(pb, 1); /* timecode track count */
avio_wl16(pb, 0); /* reserved */
avio_wl16(pb, gxf->mpeg_tracks);
return 48;
}
 
static int gxf_write_umf_payload(AVFormatContext *s)
{
GXFContext *gxf = s->priv_data;
AVIOContext *pb = s->pb;
 
avio_wl32(pb, gxf->umf_length); /* total length of the umf data */
avio_wl32(pb, 3); /* version */
avio_wl32(pb, s->nb_streams+1);
avio_wl32(pb, gxf->umf_track_offset); /* umf track section offset */
avio_wl32(pb, gxf->umf_track_size);
avio_wl32(pb, s->nb_streams+1);
avio_wl32(pb, gxf->umf_media_offset);
avio_wl32(pb, gxf->umf_media_size);
avio_wl32(pb, gxf->umf_length); /* user data offset */
avio_wl32(pb, 0); /* user data size */
avio_wl32(pb, 0); /* reserved */
avio_wl32(pb, 0); /* reserved */
return 48;
}
 
static int gxf_write_umf_track_description(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
GXFContext *gxf = s->priv_data;
int64_t pos = avio_tell(pb);
int i;
 
gxf->umf_track_offset = pos - gxf->umf_start_offset;
for (i = 0; i < s->nb_streams; ++i) {
GXFStreamContext *sc = s->streams[i]->priv_data;
avio_wl16(pb, sc->media_info);
avio_wl16(pb, 1);
}
 
avio_wl16(pb, gxf->timecode_track.media_info);
avio_wl16(pb, 1);
 
return avio_tell(pb) - pos;
}
 
static int gxf_write_umf_media_mpeg(AVIOContext *pb, AVStream *st)
{
GXFStreamContext *sc = st->priv_data;
 
if (st->codec->pix_fmt == AV_PIX_FMT_YUV422P)
avio_wl32(pb, 2);
else
avio_wl32(pb, 1); /* default to 420 */
avio_wl32(pb, sc->first_gop_closed == 1); /* closed = 1, open = 0, unknown = 255 */
avio_wl32(pb, 3); /* top = 1, bottom = 2, frame = 3, unknown = 0 */
avio_wl32(pb, 1); /* I picture per GOP */
avio_wl32(pb, sc->p_per_gop);
avio_wl32(pb, sc->b_per_i_or_p);
if (st->codec->codec_id == AV_CODEC_ID_MPEG2VIDEO)
avio_wl32(pb, 2);
else if (st->codec->codec_id == AV_CODEC_ID_MPEG1VIDEO)
avio_wl32(pb, 1);
else
avio_wl32(pb, 0);
avio_wl32(pb, 0); /* reserved */
return 32;
}
 
static int gxf_write_umf_media_timecode(AVIOContext *pb, int drop)
{
avio_wl32(pb, drop); /* drop frame */
avio_wl32(pb, 0); /* reserved */
avio_wl32(pb, 0); /* reserved */
avio_wl32(pb, 0); /* reserved */
avio_wl32(pb, 0); /* reserved */
avio_wl32(pb, 0); /* reserved */
avio_wl32(pb, 0); /* reserved */
avio_wl32(pb, 0); /* reserved */
return 32;
}
 
static int gxf_write_umf_media_dv(AVIOContext *pb, GXFStreamContext *sc, AVStream *st)
{
int dv_umf_data = 0;
 
if (st->codec->pix_fmt == AV_PIX_FMT_YUV420P)
dv_umf_data |= 0x20; /* marks as DVCAM instead of DVPRO */
avio_wl32(pb, dv_umf_data);
avio_wl32(pb, 0);
avio_wl32(pb, 0);
avio_wl32(pb, 0);
avio_wl32(pb, 0);
avio_wl32(pb, 0);
avio_wl32(pb, 0);
avio_wl32(pb, 0);
return 32;
}
 
static int gxf_write_umf_media_audio(AVIOContext *pb, GXFStreamContext *sc)
{
avio_wl64(pb, av_double2int(1)); /* sound level to begin to */
avio_wl64(pb, av_double2int(1)); /* sound level to begin to */
avio_wl32(pb, 0); /* number of fields over which to ramp up sound level */
avio_wl32(pb, 0); /* number of fields over which to ramp down sound level */
avio_wl32(pb, 0); /* reserved */
avio_wl32(pb, 0); /* reserved */
return 32;
}
 
static int gxf_write_umf_media_description(AVFormatContext *s)
{
GXFContext *gxf = s->priv_data;
AVIOContext *pb = s->pb;
int64_t pos;
int i, j;
 
pos = avio_tell(pb);
gxf->umf_media_offset = pos - gxf->umf_start_offset;
for (i = 0; i <= s->nb_streams; ++i) {
GXFStreamContext *sc;
int64_t startpos, curpos;
 
if (i == s->nb_streams)
sc = &gxf->timecode_track;
else
sc = s->streams[i]->priv_data;
 
startpos = avio_tell(pb);
avio_wl16(pb, 0); /* length */
avio_wl16(pb, sc->media_info);
avio_wl16(pb, 0); /* reserved */
avio_wl16(pb, 0); /* reserved */
avio_wl32(pb, gxf->nb_fields);
avio_wl32(pb, 0); /* attributes rw, ro */
avio_wl32(pb, 0); /* mark in */
avio_wl32(pb, gxf->nb_fields); /* mark out */
avio_write(pb, ES_NAME_PATTERN, strlen(ES_NAME_PATTERN));
avio_wb16(pb, sc->media_info);
for (j = strlen(ES_NAME_PATTERN)+2; j < 88; j++)
avio_w8(pb, 0);
avio_wl32(pb, sc->track_type);
avio_wl32(pb, sc->sample_rate);
avio_wl32(pb, sc->sample_size);
avio_wl32(pb, 0); /* reserved */
 
if (sc == &gxf->timecode_track)
gxf_write_umf_media_timecode(pb, gxf->tc.drop);
else {
AVStream *st = s->streams[i];
switch (st->codec->codec_id) {
case AV_CODEC_ID_MPEG1VIDEO:
case AV_CODEC_ID_MPEG2VIDEO:
gxf_write_umf_media_mpeg(pb, st);
break;
case AV_CODEC_ID_PCM_S16LE:
gxf_write_umf_media_audio(pb, sc);
break;
case AV_CODEC_ID_DVVIDEO:
gxf_write_umf_media_dv(pb, sc, st);
break;
}
}
 
curpos = avio_tell(pb);
avio_seek(pb, startpos, SEEK_SET);
avio_wl16(pb, curpos - startpos);
avio_seek(pb, curpos, SEEK_SET);
}
return avio_tell(pb) - pos;
}
 
static int gxf_write_umf_packet(AVFormatContext *s)
{
GXFContext *gxf = s->priv_data;
AVIOContext *pb = s->pb;
int64_t pos = avio_tell(pb);
 
gxf_write_packet_header(pb, PKT_UMF);
 
/* preamble */
avio_w8(pb, 3); /* first and last (only) packet */
avio_wb32(pb, gxf->umf_length); /* data length */
 
gxf->umf_start_offset = avio_tell(pb);
gxf_write_umf_payload(s);
gxf_write_umf_material_description(s);
gxf->umf_track_size = gxf_write_umf_track_description(s);
gxf->umf_media_size = gxf_write_umf_media_description(s);
gxf->umf_length = avio_tell(pb) - gxf->umf_start_offset;
return updatePacketSize(pb, pos);
}
 
static const int GXF_samples_per_frame[] = { 32768, 0 };
 
static void gxf_init_timecode_track(GXFStreamContext *sc, GXFStreamContext *vsc)
{
if (!vsc)
return;
 
sc->media_type = vsc->sample_rate == 60 ? 7 : 8;
sc->sample_rate = vsc->sample_rate;
sc->media_info = ('T'<<8) | '0';
sc->track_type = 3;
sc->frame_rate_index = vsc->frame_rate_index;
sc->lines_index = vsc->lines_index;
sc->sample_size = 16;
sc->fields = vsc->fields;
}
 
static int gxf_init_timecode(AVFormatContext *s, GXFTimecode *tc, const char *tcstr, int fields)
{
char c;
 
if (sscanf(tcstr, "%d:%d:%d%c%d", &tc->hh, &tc->mm, &tc->ss, &c, &tc->ff) != 5) {
av_log(s, AV_LOG_ERROR, "unable to parse timecode, "
"syntax: hh:mm:ss[:;.]ff\n");
return -1;
}
 
tc->color = 0;
tc->drop = c != ':';
 
if (fields == 2)
tc->ff = tc->ff * 2;
 
return 0;
}
 
static int gxf_write_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
GXFContext *gxf = s->priv_data;
GXFStreamContext *vsc = NULL;
uint8_t tracks[255] = {0};
int i, media_info = 0;
int ret;
AVDictionaryEntry *tcr = av_dict_get(s->metadata, "timecode", NULL, 0);
 
if (!pb->seekable) {
av_log(s, AV_LOG_ERROR, "gxf muxer does not support streamed output, patch welcome\n");
return -1;
}
 
gxf->flags |= 0x00080000; /* material is simple clip */
for (i = 0; i < s->nb_streams; ++i) {
AVStream *st = s->streams[i];
GXFStreamContext *sc = av_mallocz(sizeof(*sc));
if (!sc)
return AVERROR(ENOMEM);
st->priv_data = sc;
 
sc->media_type = ff_codec_get_tag(gxf_media_types, st->codec->codec_id);
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
if (st->codec->codec_id != AV_CODEC_ID_PCM_S16LE) {
av_log(s, AV_LOG_ERROR, "only 16 BIT PCM LE allowed for now\n");
return -1;
}
if (st->codec->sample_rate != 48000) {
av_log(s, AV_LOG_ERROR, "only 48000hz sampling rate is allowed\n");
return -1;
}
if (st->codec->channels != 1) {
av_log(s, AV_LOG_ERROR, "only mono tracks are allowed\n");
return -1;
}
sc->track_type = 2;
sc->sample_rate = st->codec->sample_rate;
avpriv_set_pts_info(st, 64, 1, sc->sample_rate);
sc->sample_size = 16;
sc->frame_rate_index = -2;
sc->lines_index = -2;
sc->fields = -2;
gxf->audio_tracks++;
gxf->flags |= 0x04000000; /* audio is 16 bit pcm */
media_info = 'A';
} else if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
if (i != 0) {
av_log(s, AV_LOG_ERROR, "video stream must be the first track\n");
return -1;
}
/* FIXME check from time_base ? */
if (st->codec->height == 480 || st->codec->height == 512) { /* NTSC or NTSC+VBI */
sc->frame_rate_index = 5;
sc->sample_rate = 60;
gxf->flags |= 0x00000080;
gxf->time_base = (AVRational){ 1001, 60000 };
} else if (st->codec->height == 576 || st->codec->height == 608) { /* PAL or PAL+VBI */
sc->frame_rate_index = 6;
sc->media_type++;
sc->sample_rate = 50;
gxf->flags |= 0x00000040;
gxf->time_base = (AVRational){ 1, 50 };
} else {
av_log(s, AV_LOG_ERROR, "unsupported video resolution, "
"gxf muxer only accepts PAL or NTSC resolutions currently\n");
return -1;
}
if (!tcr)
tcr = av_dict_get(st->metadata, "timecode", NULL, 0);
avpriv_set_pts_info(st, 64, gxf->time_base.num, gxf->time_base.den);
if (gxf_find_lines_index(st) < 0)
sc->lines_index = -1;
sc->sample_size = st->codec->bit_rate;
sc->fields = 2; /* interlaced */
 
vsc = sc;
 
switch (st->codec->codec_id) {
case AV_CODEC_ID_MJPEG:
sc->track_type = 1;
gxf->flags |= 0x00004000;
media_info = 'J';
break;
case AV_CODEC_ID_MPEG1VIDEO:
sc->track_type = 9;
gxf->mpeg_tracks++;
media_info = 'L';
break;
case AV_CODEC_ID_MPEG2VIDEO:
sc->first_gop_closed = -1;
sc->track_type = 4;
gxf->mpeg_tracks++;
gxf->flags |= 0x00008000;
media_info = 'M';
break;
case AV_CODEC_ID_DVVIDEO:
if (st->codec->pix_fmt == AV_PIX_FMT_YUV422P) {
sc->media_type += 2;
sc->track_type = 6;
gxf->flags |= 0x00002000;
media_info = 'E';
} else {
sc->track_type = 5;
gxf->flags |= 0x00001000;
media_info = 'D';
}
break;
default:
av_log(s, AV_LOG_ERROR, "video codec not supported\n");
return -1;
}
}
/* FIXME first 10 audio tracks are 0 to 9 next 22 are A to V */
sc->media_info = media_info<<8 | ('0'+tracks[media_info]++);
sc->order = s->nb_streams - st->index;
}
 
if (ff_audio_interleave_init(s, GXF_samples_per_frame, (AVRational){ 1, 48000 }) < 0)
return -1;
 
if (tcr && vsc)
gxf_init_timecode(s, &gxf->tc, tcr->value, vsc->fields);
 
gxf_init_timecode_track(&gxf->timecode_track, vsc);
gxf->flags |= 0x200000; // time code track is non-drop frame
 
if ((ret = gxf_write_map_packet(s, 0)) < 0)
return ret;
gxf_write_flt_packet(s);
gxf_write_umf_packet(s);
 
gxf->packet_count = 3;
 
avio_flush(pb);
return 0;
}
 
static int gxf_write_eos_packet(AVIOContext *pb)
{
int64_t pos = avio_tell(pb);
 
gxf_write_packet_header(pb, PKT_EOS);
return updatePacketSize(pb, pos);
}
 
static int gxf_write_trailer(AVFormatContext *s)
{
GXFContext *gxf = s->priv_data;
AVIOContext *pb = s->pb;
int64_t end;
int i;
int ret;
 
ff_audio_interleave_close(s);
 
gxf_write_eos_packet(pb);
end = avio_tell(pb);
avio_seek(pb, 0, SEEK_SET);
/* overwrite map, flt and umf packets with new values */
if ((ret = gxf_write_map_packet(s, 1)) < 0)
return ret;
gxf_write_flt_packet(s);
gxf_write_umf_packet(s);
avio_flush(pb);
/* update duration in all map packets */
for (i = 1; i < gxf->map_offsets_nb; i++) {
avio_seek(pb, gxf->map_offsets[i], SEEK_SET);
if ((ret = gxf_write_map_packet(s, 1)) < 0)
return ret;
avio_flush(pb);
}
 
avio_seek(pb, end, SEEK_SET);
 
av_freep(&gxf->flt_entries);
av_freep(&gxf->map_offsets);
 
return 0;
}
 
static int gxf_parse_mpeg_frame(GXFStreamContext *sc, const uint8_t *buf, int size)
{
uint32_t c=-1;
int i;
for(i=0; i<size-4 && c!=0x100; i++){
c = (c<<8) + buf[i];
if(c == 0x1B8 && sc->first_gop_closed == -1) /* GOP start code */
sc->first_gop_closed= (buf[i+4]>>6)&1;
}
return (buf[i+1]>>3)&7;
}
 
static int gxf_write_media_preamble(AVFormatContext *s, AVPacket *pkt, int size)
{
GXFContext *gxf = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *st = s->streams[pkt->stream_index];
GXFStreamContext *sc = st->priv_data;
unsigned field_nb;
/* If the video is frame-encoded, the frame numbers shall be represented by
* even field numbers.
* see SMPTE360M-2004 6.4.2.1.3 Media field number */
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
field_nb = gxf->nb_fields;
} else {
field_nb = av_rescale_rnd(pkt->dts, gxf->time_base.den,
(int64_t)48000*gxf->time_base.num, AV_ROUND_UP);
}
 
avio_w8(pb, sc->media_type);
avio_w8(pb, st->index);
avio_wb32(pb, field_nb);
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
avio_wb16(pb, 0);
avio_wb16(pb, size / 2);
} else if (st->codec->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
int frame_type = gxf_parse_mpeg_frame(sc, pkt->data, pkt->size);
if (frame_type == AV_PICTURE_TYPE_I) {
avio_w8(pb, 0x0d);
sc->iframes++;
} else if (frame_type == AV_PICTURE_TYPE_B) {
avio_w8(pb, 0x0f);
sc->bframes++;
} else {
avio_w8(pb, 0x0e);
sc->pframes++;
}
avio_wb24(pb, size);
} else if (st->codec->codec_id == AV_CODEC_ID_DVVIDEO) {
avio_w8(pb, size / 4096);
avio_wb24(pb, 0);
} else
avio_wb32(pb, size);
avio_wb32(pb, field_nb);
avio_w8(pb, 1); /* flags */
avio_w8(pb, 0); /* reserved */
return 16;
}
 
static int gxf_write_packet(AVFormatContext *s, AVPacket *pkt)
{
GXFContext *gxf = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *st = s->streams[pkt->stream_index];
int64_t pos = avio_tell(pb);
int padding = 0;
int packet_start_offset = avio_tell(pb) / 1024;
int ret;
 
gxf_write_packet_header(pb, PKT_MEDIA);
if (st->codec->codec_id == AV_CODEC_ID_MPEG2VIDEO && pkt->size % 4) /* MPEG-2 frames must be padded */
padding = 4 - pkt->size % 4;
else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
padding = GXF_AUDIO_PACKET_SIZE - pkt->size;
gxf_write_media_preamble(s, pkt, pkt->size + padding);
avio_write(pb, pkt->data, pkt->size);
gxf_write_padding(pb, padding);
 
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
if (!(gxf->flt_entries_nb % 500)) {
int err;
if ((err = av_reallocp_array(&gxf->flt_entries,
gxf->flt_entries_nb + 500,
sizeof(*gxf->flt_entries))) < 0) {
gxf->flt_entries_nb = 0;
gxf->nb_fields = 0;
av_log(s, AV_LOG_ERROR, "could not reallocate flt entries\n");
return err;
}
}
gxf->flt_entries[gxf->flt_entries_nb++] = packet_start_offset;
gxf->nb_fields += 2; // count fields
}
 
updatePacketSize(pb, pos);
 
gxf->packet_count++;
if (gxf->packet_count == 100) {
if ((ret = gxf_write_map_packet(s, 0)) < 0)
return ret;
gxf->packet_count = 0;
}
 
return 0;
}
 
static int gxf_compare_field_nb(AVFormatContext *s, AVPacket *next, AVPacket *cur)
{
GXFContext *gxf = s->priv_data;
AVPacket *pkt[2] = { cur, next };
int i, field_nb[2];
GXFStreamContext *sc[2];
 
for (i = 0; i < 2; i++) {
AVStream *st = s->streams[pkt[i]->stream_index];
sc[i] = st->priv_data;
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
field_nb[i] = av_rescale_rnd(pkt[i]->dts, gxf->time_base.den,
(int64_t)48000*gxf->time_base.num, AV_ROUND_UP);
field_nb[i] &= ~1; // compare against even field number because audio must be before video
} else
field_nb[i] = pkt[i]->dts; // dts are field based
}
 
return field_nb[1] > field_nb[0] ||
(field_nb[1] == field_nb[0] && sc[1]->order > sc[0]->order);
}
 
static int gxf_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush)
{
if (pkt && s->streams[pkt->stream_index]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
pkt->duration = 2; // enforce 2 fields
return ff_audio_rechunk_interleave(s, out, pkt, flush,
ff_interleave_packet_per_dts, gxf_compare_field_nb);
}
 
AVOutputFormat ff_gxf_muxer = {
.name = "gxf",
.long_name = NULL_IF_CONFIG_SMALL("GXF (General eXchange Format)"),
.extensions = "gxf",
.priv_data_size = sizeof(GXFContext),
.audio_codec = AV_CODEC_ID_PCM_S16LE,
.video_codec = AV_CODEC_ID_MPEG2VIDEO,
.write_header = gxf_write_header,
.write_packet = gxf_write_packet,
.write_trailer = gxf_write_trailer,
.interleave_packet = gxf_interleave_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/h261dec.c
0,0 → 1,65
/*
* RAW H.261 video demuxer
* Copyright (c) 2009 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavcodec/get_bits.h"
#include "avformat.h"
#include "rawdec.h"
 
static int h261_probe(AVProbeData *p)
{
uint32_t code= -1;
int i;
int valid_psc=0;
int invalid_psc=0;
int next_gn=0;
int src_fmt=0;
GetBitContext gb;
 
init_get_bits8(&gb, p->buf, p->buf_size);
 
for(i=0; i<p->buf_size*8; i++){
if ((code & 0x01ff0000) || !(code & 0xff00)) {
code = (code<<8) + get_bits(&gb, 8);
i += 7;
} else
code = (code<<1) + get_bits1(&gb);
if ((code & 0xffff0000) == 0x10000) {
int gn= (code>>12)&0xf;
if(!gn)
src_fmt= code&8;
if(gn != next_gn) invalid_psc++;
else valid_psc++;
 
if(src_fmt){ // CIF
next_gn= (gn+1 )%13;
}else{ //QCIF
next_gn= (gn+1+!!gn)% 7;
}
}
}
if(valid_psc > 2*invalid_psc + 6){
return AVPROBE_SCORE_EXTENSION;
}else if(valid_psc > 2*invalid_psc + 2)
return AVPROBE_SCORE_EXTENSION / 2;
return 0;
}
 
FF_DEF_RAWVIDEO_DEMUXER(h261, "raw H.261", h261_probe, "h261", AV_CODEC_ID_H261)
/contrib/sdk/sources/ffmpeg/libavformat/h263dec.c
0,0 → 1,65
/*
* RAW H.263 video demuxer
* Copyright (c) 2009 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "rawdec.h"
 
static int h263_probe(AVProbeData *p)
{
uint64_t code= -1;
int i;
int valid_psc=0;
int invalid_psc=0;
int res_change=0;
int src_fmt, last_src_fmt=-1;
int last_gn=0;
 
for(i=0; i<p->buf_size; i++){
code = (code<<8) + p->buf[i];
if ((code & 0xfffffc0000) == 0x800000) {
src_fmt= (code>>2)&3;
if( src_fmt != last_src_fmt
&& last_src_fmt>0 && last_src_fmt<6
&& src_fmt<6)
res_change++;
 
if((code&0x300)==0x200 && src_fmt){
valid_psc++;
last_gn=0;
}else
invalid_psc++;
last_src_fmt= src_fmt;
} else if((code & 0xffff800000) == 0x800000) {
int gn= (code>>(23-5)) & 0x1F;
if(gn<last_gn){
invalid_psc++;
}else
last_gn= gn;
}
}
if(valid_psc > 2*invalid_psc + 2*res_change + 3){
return AVPROBE_SCORE_EXTENSION;
}else if(valid_psc > 2*invalid_psc)
return AVPROBE_SCORE_EXTENSION / 2;
return 0;
}
 
FF_DEF_RAWVIDEO_DEMUXER(h263, "raw H.263", h263_probe, NULL, AV_CODEC_ID_H263)
/contrib/sdk/sources/ffmpeg/libavformat/h264dec.c
0,0 → 1,78
/*
* RAW H.264 video demuxer
* Copyright (c) 2008 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "rawdec.h"
 
static int h264_probe(AVProbeData *p)
{
uint32_t code = -1;
int sps = 0, pps = 0, idr = 0, res = 0, sli = 0;
int i;
 
for (i = 0; i < p->buf_size; i++) {
code = (code << 8) + p->buf[i];
if ((code & 0xffffff00) == 0x100) {
int ref_idc = (code >> 5) & 3;
int type = code & 0x1F;
static const int8_t ref_zero[] = {
2, 0, 0, 0, 0, -1, 1, -1,
-1, 1, 1, 1, 1, -1, 2, 2,
2, 2, 2, 0, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2
};
 
if (code & 0x80) // forbidden_bit
return 0;
 
if (ref_zero[type] == 1 && ref_idc)
return 0;
if (ref_zero[type] == -1 && !ref_idc)
return 0;
if (ref_zero[type] == 2)
res++;
 
switch (type) {
case 1:
sli++;
break;
case 5:
idr++;
break;
case 7:
if (p->buf[i + 2] & 0x03)
return 0;
sps++;
break;
case 8:
pps++;
break;
}
}
}
 
if (sps && pps && (idr || sli > 3) && res < (sps + pps + idr))
return AVPROBE_SCORE_EXTENSION + 1; // 1 more than .mpg
 
return 0;
}
 
FF_DEF_RAWVIDEO_DEMUXER(h264, "raw H.264 video", h264_probe, "h26l,h264,264,avc", AV_CODEC_ID_H264)
/contrib/sdk/sources/ffmpeg/libavformat/hevcdec.c
0,0 → 1,66
/*
* RAW HEVC video demuxer
* Copyright (c) 2013 Dirk Farin <dirk.farin@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "rawdec.h"
 
#include "libavcodec/hevc.h"
 
static int hevc_probe(AVProbeData *p)
{
uint32_t code = -1;
int vps = 0, sps = 0, pps = 0, irap = 0;
int i;
 
for (i = 0; i < p->buf_size - 1; i++) {
code = (code << 8) + p->buf[i];
if ((code & 0xffffff00) == 0x100) {
uint8_t nal2 = p->buf[i + 1];
int type = (code & 0x7E) >> 1;
 
if (code & 0x81) // forbidden and reserved zero bits
return 0;
 
if (nal2 & 0xf8) // reserved zero
return 0;
 
switch (type) {
case NAL_VPS: vps++; break;
case NAL_SPS: sps++; break;
case NAL_PPS: pps++; break;
case NAL_BLA_N_LP:
case NAL_BLA_W_LP:
case NAL_BLA_W_RADL:
case NAL_CRA_NUT:
case NAL_IDR_N_LP:
case NAL_IDR_W_RADL: irap++; break;
}
}
}
 
// printf("vps=%d, sps=%d, pps=%d, irap=%d\n", vps, sps, pps, irap);
 
if (vps && sps && pps && irap)
return AVPROBE_SCORE_EXTENSION + 1; // 1 more than .mpg
return 0;
}
 
FF_DEF_RAWVIDEO_DEMUXER(hevc, "raw HEVC video", hevc_probe, "hevc,h265,265", AV_CODEC_ID_HEVC)
/contrib/sdk/sources/ffmpeg/libavformat/hls.c
0,0 → 1,839
/*
* Apple HTTP Live Streaming demuxer
* Copyright (c) 2010 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Apple HTTP Live Streaming demuxer
* http://tools.ietf.org/html/draft-pantos-http-live-streaming
*/
 
#include "libavutil/avstring.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/dict.h"
#include "libavutil/time.h"
#include "avformat.h"
#include "internal.h"
#include "avio_internal.h"
#include "url.h"
 
#define INITIAL_BUFFER_SIZE 32768
 
/*
* An apple http stream consists of a playlist with media segment files,
* played sequentially. There may be several playlists with the same
* video content, in different bandwidth variants, that are played in
* parallel (preferably only one bandwidth variant at a time). In this case,
* the user supplied the url to a main playlist that only lists the variant
* playlists.
*
* If the main playlist doesn't point at any variants, we still create
* one anonymous toplevel variant for this, to maintain the structure.
*/
 
enum KeyType {
KEY_NONE,
KEY_AES_128,
};
 
struct segment {
int64_t duration;
char url[MAX_URL_SIZE];
char key[MAX_URL_SIZE];
enum KeyType key_type;
uint8_t iv[16];
};
 
/*
* Each variant has its own demuxer. If it currently is active,
* it has an open AVIOContext too, and potentially an AVPacket
* containing the next packet from this stream.
*/
struct variant {
int bandwidth;
char url[MAX_URL_SIZE];
AVIOContext pb;
uint8_t* read_buffer;
URLContext *input;
AVFormatContext *parent;
int index;
AVFormatContext *ctx;
AVPacket pkt;
int stream_offset;
 
int finished;
int64_t target_duration;
int start_seq_no;
int n_segments;
struct segment **segments;
int needed, cur_needed;
int cur_seq_no;
int64_t last_load_time;
 
char key_url[MAX_URL_SIZE];
uint8_t key[16];
};
 
typedef struct HLSContext {
int n_variants;
struct variant **variants;
int cur_seq_no;
int end_of_segment;
int first_packet;
int64_t first_timestamp;
int64_t seek_timestamp;
int seek_flags;
AVIOInterruptCB *interrupt_callback;
char *user_agent; ///< holds HTTP user agent set as an AVOption to the HTTP protocol context
char *cookies; ///< holds HTTP cookie values set in either the initial response or as an AVOption to the HTTP protocol context
} HLSContext;
 
static int read_chomp_line(AVIOContext *s, char *buf, int maxlen)
{
int len = ff_get_line(s, buf, maxlen);
while (len > 0 && av_isspace(buf[len - 1]))
buf[--len] = '\0';
return len;
}
 
static void free_segment_list(struct variant *var)
{
int i;
for (i = 0; i < var->n_segments; i++)
av_free(var->segments[i]);
av_freep(&var->segments);
var->n_segments = 0;
}
 
static void free_variant_list(HLSContext *c)
{
int i;
for (i = 0; i < c->n_variants; i++) {
struct variant *var = c->variants[i];
free_segment_list(var);
av_free_packet(&var->pkt);
av_free(var->pb.buffer);
if (var->input)
ffurl_close(var->input);
if (var->ctx) {
var->ctx->pb = NULL;
avformat_close_input(&var->ctx);
}
av_free(var);
}
av_freep(&c->variants);
av_freep(&c->cookies);
av_freep(&c->user_agent);
c->n_variants = 0;
}
 
/*
* Used to reset a statically allocated AVPacket to a clean slate,
* containing no data.
*/
static void reset_packet(AVPacket *pkt)
{
av_init_packet(pkt);
pkt->data = NULL;
}
 
static struct variant *new_variant(HLSContext *c, int bandwidth,
const char *url, const char *base)
{
struct variant *var = av_mallocz(sizeof(struct variant));
if (!var)
return NULL;
reset_packet(&var->pkt);
var->bandwidth = bandwidth;
ff_make_absolute_url(var->url, sizeof(var->url), base, url);
dynarray_add(&c->variants, &c->n_variants, var);
return var;
}
 
struct variant_info {
char bandwidth[20];
};
 
static void handle_variant_args(struct variant_info *info, const char *key,
int key_len, char **dest, int *dest_len)
{
if (!strncmp(key, "BANDWIDTH=", key_len)) {
*dest = info->bandwidth;
*dest_len = sizeof(info->bandwidth);
}
}
 
struct key_info {
char uri[MAX_URL_SIZE];
char method[10];
char iv[35];
};
 
static void handle_key_args(struct key_info *info, const char *key,
int key_len, char **dest, int *dest_len)
{
if (!strncmp(key, "METHOD=", key_len)) {
*dest = info->method;
*dest_len = sizeof(info->method);
} else if (!strncmp(key, "URI=", key_len)) {
*dest = info->uri;
*dest_len = sizeof(info->uri);
} else if (!strncmp(key, "IV=", key_len)) {
*dest = info->iv;
*dest_len = sizeof(info->iv);
}
}
 
static int parse_playlist(HLSContext *c, const char *url,
struct variant *var, AVIOContext *in)
{
int ret = 0, is_segment = 0, is_variant = 0, bandwidth = 0;
int64_t duration = 0;
enum KeyType key_type = KEY_NONE;
uint8_t iv[16] = "";
int has_iv = 0;
char key[MAX_URL_SIZE] = "";
char line[MAX_URL_SIZE];
const char *ptr;
int close_in = 0;
 
if (!in) {
AVDictionary *opts = NULL;
close_in = 1;
/* Some HLS servers don't like being sent the range header */
av_dict_set(&opts, "seekable", "0", 0);
 
// broker prior HTTP options that should be consistent across requests
av_dict_set(&opts, "user-agent", c->user_agent, 0);
av_dict_set(&opts, "cookies", c->cookies, 0);
 
ret = avio_open2(&in, url, AVIO_FLAG_READ,
c->interrupt_callback, &opts);
av_dict_free(&opts);
if (ret < 0)
return ret;
}
 
read_chomp_line(in, line, sizeof(line));
if (strcmp(line, "#EXTM3U")) {
ret = AVERROR_INVALIDDATA;
goto fail;
}
 
if (var) {
free_segment_list(var);
var->finished = 0;
}
while (!url_feof(in)) {
read_chomp_line(in, line, sizeof(line));
if (av_strstart(line, "#EXT-X-STREAM-INF:", &ptr)) {
struct variant_info info = {{0}};
is_variant = 1;
ff_parse_key_value(ptr, (ff_parse_key_val_cb) handle_variant_args,
&info);
bandwidth = atoi(info.bandwidth);
} else if (av_strstart(line, "#EXT-X-KEY:", &ptr)) {
struct key_info info = {{0}};
ff_parse_key_value(ptr, (ff_parse_key_val_cb) handle_key_args,
&info);
key_type = KEY_NONE;
has_iv = 0;
if (!strcmp(info.method, "AES-128"))
key_type = KEY_AES_128;
if (!strncmp(info.iv, "0x", 2) || !strncmp(info.iv, "0X", 2)) {
ff_hex_to_data(iv, info.iv + 2);
has_iv = 1;
}
av_strlcpy(key, info.uri, sizeof(key));
} else if (av_strstart(line, "#EXT-X-TARGETDURATION:", &ptr)) {
if (!var) {
var = new_variant(c, 0, url, NULL);
if (!var) {
ret = AVERROR(ENOMEM);
goto fail;
}
}
var->target_duration = atoi(ptr) * AV_TIME_BASE;
} else if (av_strstart(line, "#EXT-X-MEDIA-SEQUENCE:", &ptr)) {
if (!var) {
var = new_variant(c, 0, url, NULL);
if (!var) {
ret = AVERROR(ENOMEM);
goto fail;
}
}
var->start_seq_no = atoi(ptr);
} else if (av_strstart(line, "#EXT-X-ENDLIST", &ptr)) {
if (var)
var->finished = 1;
} else if (av_strstart(line, "#EXTINF:", &ptr)) {
is_segment = 1;
duration = atof(ptr) * AV_TIME_BASE;
} else if (av_strstart(line, "#", NULL)) {
continue;
} else if (line[0]) {
if (is_variant) {
if (!new_variant(c, bandwidth, line, url)) {
ret = AVERROR(ENOMEM);
goto fail;
}
is_variant = 0;
bandwidth = 0;
}
if (is_segment) {
struct segment *seg;
if (!var) {
var = new_variant(c, 0, url, NULL);
if (!var) {
ret = AVERROR(ENOMEM);
goto fail;
}
}
seg = av_malloc(sizeof(struct segment));
if (!seg) {
ret = AVERROR(ENOMEM);
goto fail;
}
seg->duration = duration;
seg->key_type = key_type;
if (has_iv) {
memcpy(seg->iv, iv, sizeof(iv));
} else {
int seq = var->start_seq_no + var->n_segments;
memset(seg->iv, 0, sizeof(seg->iv));
AV_WB32(seg->iv + 12, seq);
}
ff_make_absolute_url(seg->key, sizeof(seg->key), url, key);
ff_make_absolute_url(seg->url, sizeof(seg->url), url, line);
dynarray_add(&var->segments, &var->n_segments, seg);
is_segment = 0;
}
}
}
if (var)
var->last_load_time = av_gettime();
 
fail:
if (close_in)
avio_close(in);
return ret;
}
 
static int open_input(HLSContext *c, struct variant *var)
{
AVDictionary *opts = NULL;
int ret;
struct segment *seg = var->segments[var->cur_seq_no - var->start_seq_no];
 
// broker prior HTTP options that should be consistent across requests
av_dict_set(&opts, "user-agent", c->user_agent, 0);
av_dict_set(&opts, "cookies", c->cookies, 0);
av_dict_set(&opts, "seekable", "0", 0);
 
if (seg->key_type == KEY_NONE) {
ret = ffurl_open(&var->input, seg->url, AVIO_FLAG_READ,
&var->parent->interrupt_callback, &opts);
goto cleanup;
} else if (seg->key_type == KEY_AES_128) {
char iv[33], key[33], url[MAX_URL_SIZE];
if (strcmp(seg->key, var->key_url)) {
URLContext *uc;
if (ffurl_open(&uc, seg->key, AVIO_FLAG_READ,
&var->parent->interrupt_callback, &opts) == 0) {
if (ffurl_read_complete(uc, var->key, sizeof(var->key))
!= sizeof(var->key)) {
av_log(NULL, AV_LOG_ERROR, "Unable to read key file %s\n",
seg->key);
}
ffurl_close(uc);
} else {
av_log(NULL, AV_LOG_ERROR, "Unable to open key file %s\n",
seg->key);
}
av_strlcpy(var->key_url, seg->key, sizeof(var->key_url));
}
ff_data_to_hex(iv, seg->iv, sizeof(seg->iv), 0);
ff_data_to_hex(key, var->key, sizeof(var->key), 0);
iv[32] = key[32] = '\0';
if (strstr(seg->url, "://"))
snprintf(url, sizeof(url), "crypto+%s", seg->url);
else
snprintf(url, sizeof(url), "crypto:%s", seg->url);
if ((ret = ffurl_alloc(&var->input, url, AVIO_FLAG_READ,
&var->parent->interrupt_callback)) < 0)
goto cleanup;
av_opt_set(var->input->priv_data, "key", key, 0);
av_opt_set(var->input->priv_data, "iv", iv, 0);
/* Need to repopulate options */
av_dict_free(&opts);
av_dict_set(&opts, "seekable", "0", 0);
if ((ret = ffurl_connect(var->input, &opts)) < 0) {
ffurl_close(var->input);
var->input = NULL;
goto cleanup;
}
ret = 0;
}
else
ret = AVERROR(ENOSYS);
 
cleanup:
av_dict_free(&opts);
return ret;
}
 
static int read_data(void *opaque, uint8_t *buf, int buf_size)
{
struct variant *v = opaque;
HLSContext *c = v->parent->priv_data;
int ret, i;
 
restart:
if (!v->input) {
/* If this is a live stream and the reload interval has elapsed since
* the last playlist reload, reload the variant playlists now. */
int64_t reload_interval = v->n_segments > 0 ?
v->segments[v->n_segments - 1]->duration :
v->target_duration;
 
reload:
if (!v->finished &&
av_gettime() - v->last_load_time >= reload_interval) {
if ((ret = parse_playlist(c, v->url, v, NULL)) < 0)
return ret;
/* If we need to reload the playlist again below (if
* there's still no more segments), switch to a reload
* interval of half the target duration. */
reload_interval = v->target_duration / 2;
}
if (v->cur_seq_no < v->start_seq_no) {
av_log(NULL, AV_LOG_WARNING,
"skipping %d segments ahead, expired from playlists\n",
v->start_seq_no - v->cur_seq_no);
v->cur_seq_no = v->start_seq_no;
}
if (v->cur_seq_no >= v->start_seq_no + v->n_segments) {
if (v->finished)
return AVERROR_EOF;
while (av_gettime() - v->last_load_time < reload_interval) {
if (ff_check_interrupt(c->interrupt_callback))
return AVERROR_EXIT;
av_usleep(100*1000);
}
/* Enough time has elapsed since the last reload */
goto reload;
}
 
ret = open_input(c, v);
if (ret < 0)
return ret;
}
ret = ffurl_read(v->input, buf, buf_size);
if (ret > 0)
return ret;
ffurl_close(v->input);
v->input = NULL;
v->cur_seq_no++;
 
c->end_of_segment = 1;
c->cur_seq_no = v->cur_seq_no;
 
if (v->ctx && v->ctx->nb_streams &&
v->parent->nb_streams >= v->stream_offset + v->ctx->nb_streams) {
v->needed = 0;
for (i = v->stream_offset; i < v->stream_offset + v->ctx->nb_streams;
i++) {
if (v->parent->streams[i]->discard < AVDISCARD_ALL)
v->needed = 1;
}
}
if (!v->needed) {
av_log(v->parent, AV_LOG_INFO, "No longer receiving variant %d\n",
v->index);
return AVERROR_EOF;
}
goto restart;
}
 
static int hls_read_header(AVFormatContext *s)
{
URLContext *u = (s->flags & AVFMT_FLAG_CUSTOM_IO) ? NULL : s->pb->opaque;
HLSContext *c = s->priv_data;
int ret = 0, i, j, stream_offset = 0;
 
c->interrupt_callback = &s->interrupt_callback;
 
// if the URL context is good, read important options we must broker later
if (u && u->prot->priv_data_class) {
// get the previous user agent & set back to null if string size is zero
av_freep(&c->user_agent);
av_opt_get(u->priv_data, "user-agent", 0, (uint8_t**)&(c->user_agent));
if (c->user_agent && !strlen(c->user_agent))
av_freep(&c->user_agent);
 
// get the previous cookies & set back to null if string size is zero
av_freep(&c->cookies);
av_opt_get(u->priv_data, "cookies", 0, (uint8_t**)&(c->cookies));
if (c->cookies && !strlen(c->cookies))
av_freep(&c->cookies);
}
 
if ((ret = parse_playlist(c, s->filename, NULL, s->pb)) < 0)
goto fail;
 
if (c->n_variants == 0) {
av_log(NULL, AV_LOG_WARNING, "Empty playlist\n");
ret = AVERROR_EOF;
goto fail;
}
/* If the playlist only contained variants, parse each individual
* variant playlist. */
if (c->n_variants > 1 || c->variants[0]->n_segments == 0) {
for (i = 0; i < c->n_variants; i++) {
struct variant *v = c->variants[i];
if ((ret = parse_playlist(c, v->url, v, NULL)) < 0)
goto fail;
}
}
 
if (c->variants[0]->n_segments == 0) {
av_log(NULL, AV_LOG_WARNING, "Empty playlist\n");
ret = AVERROR_EOF;
goto fail;
}
 
/* If this isn't a live stream, calculate the total duration of the
* stream. */
if (c->variants[0]->finished) {
int64_t duration = 0;
for (i = 0; i < c->variants[0]->n_segments; i++)
duration += c->variants[0]->segments[i]->duration;
s->duration = duration;
}
 
/* Open the demuxer for each variant */
for (i = 0; i < c->n_variants; i++) {
struct variant *v = c->variants[i];
AVInputFormat *in_fmt = NULL;
char bitrate_str[20];
AVProgram *program;
 
if (v->n_segments == 0)
continue;
 
if (!(v->ctx = avformat_alloc_context())) {
ret = AVERROR(ENOMEM);
goto fail;
}
 
v->index = i;
v->needed = 1;
v->parent = s;
 
/* If this is a live stream with more than 3 segments, start at the
* third last segment. */
v->cur_seq_no = v->start_seq_no;
if (!v->finished && v->n_segments > 3)
v->cur_seq_no = v->start_seq_no + v->n_segments - 3;
 
v->read_buffer = av_malloc(INITIAL_BUFFER_SIZE);
ffio_init_context(&v->pb, v->read_buffer, INITIAL_BUFFER_SIZE, 0, v,
read_data, NULL, NULL);
v->pb.seekable = 0;
ret = av_probe_input_buffer(&v->pb, &in_fmt, v->segments[0]->url,
NULL, 0, 0);
if (ret < 0) {
/* Free the ctx - it isn't initialized properly at this point,
* so avformat_close_input shouldn't be called. If
* avformat_open_input fails below, it frees and zeros the
* context, so it doesn't need any special treatment like this. */
av_log(s, AV_LOG_ERROR, "Error when loading first segment '%s'\n", v->segments[0]->url);
avformat_free_context(v->ctx);
v->ctx = NULL;
goto fail;
}
v->ctx->pb = &v->pb;
v->stream_offset = stream_offset;
ret = avformat_open_input(&v->ctx, v->segments[0]->url, in_fmt, NULL);
if (ret < 0)
goto fail;
 
v->ctx->ctx_flags &= ~AVFMTCTX_NOHEADER;
ret = avformat_find_stream_info(v->ctx, NULL);
if (ret < 0)
goto fail;
snprintf(bitrate_str, sizeof(bitrate_str), "%d", v->bandwidth);
 
program = av_new_program(s, i);
if (!program)
goto fail;
av_dict_set(&program->metadata, "variant_bitrate", bitrate_str, 0);
 
/* Create new AVStreams for each stream in this variant */
for (j = 0; j < v->ctx->nb_streams; j++) {
AVStream *st = avformat_new_stream(s, NULL);
AVStream *ist = v->ctx->streams[j];
if (!st) {
ret = AVERROR(ENOMEM);
goto fail;
}
ff_program_add_stream_index(s, i, stream_offset + j);
st->id = i;
avpriv_set_pts_info(st, ist->pts_wrap_bits, ist->time_base.num, ist->time_base.den);
avcodec_copy_context(st->codec, v->ctx->streams[j]->codec);
if (v->bandwidth)
av_dict_set(&st->metadata, "variant_bitrate", bitrate_str,
0);
}
stream_offset += v->ctx->nb_streams;
}
 
c->first_packet = 1;
c->first_timestamp = AV_NOPTS_VALUE;
c->seek_timestamp = AV_NOPTS_VALUE;
 
return 0;
fail:
free_variant_list(c);
return ret;
}
 
static int recheck_discard_flags(AVFormatContext *s, int first)
{
HLSContext *c = s->priv_data;
int i, changed = 0;
 
/* Check if any new streams are needed */
for (i = 0; i < c->n_variants; i++)
c->variants[i]->cur_needed = 0;
 
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
struct variant *var = c->variants[s->streams[i]->id];
if (st->discard < AVDISCARD_ALL)
var->cur_needed = 1;
}
for (i = 0; i < c->n_variants; i++) {
struct variant *v = c->variants[i];
if (v->cur_needed && !v->needed) {
v->needed = 1;
changed = 1;
v->cur_seq_no = c->cur_seq_no;
v->pb.eof_reached = 0;
av_log(s, AV_LOG_INFO, "Now receiving variant %d\n", i);
} else if (first && !v->cur_needed && v->needed) {
if (v->input)
ffurl_close(v->input);
v->input = NULL;
v->needed = 0;
changed = 1;
av_log(s, AV_LOG_INFO, "No longer receiving variant %d\n", i);
}
}
return changed;
}
 
static int hls_read_packet(AVFormatContext *s, AVPacket *pkt)
{
HLSContext *c = s->priv_data;
int ret, i, minvariant = -1;
 
if (c->first_packet) {
recheck_discard_flags(s, 1);
c->first_packet = 0;
}
 
start:
c->end_of_segment = 0;
for (i = 0; i < c->n_variants; i++) {
struct variant *var = c->variants[i];
/* Make sure we've got one buffered packet from each open variant
* stream */
if (var->needed && !var->pkt.data) {
while (1) {
int64_t ts_diff;
AVStream *st;
ret = av_read_frame(var->ctx, &var->pkt);
if (ret < 0) {
if (!url_feof(&var->pb) && ret != AVERROR_EOF)
return ret;
reset_packet(&var->pkt);
break;
} else {
if (c->first_timestamp == AV_NOPTS_VALUE &&
var->pkt.dts != AV_NOPTS_VALUE)
c->first_timestamp = av_rescale_q(var->pkt.dts,
var->ctx->streams[var->pkt.stream_index]->time_base,
AV_TIME_BASE_Q);
}
 
if (c->seek_timestamp == AV_NOPTS_VALUE)
break;
 
if (var->pkt.dts == AV_NOPTS_VALUE) {
c->seek_timestamp = AV_NOPTS_VALUE;
break;
}
 
st = var->ctx->streams[var->pkt.stream_index];
ts_diff = av_rescale_rnd(var->pkt.dts, AV_TIME_BASE,
st->time_base.den, AV_ROUND_DOWN) -
c->seek_timestamp;
if (ts_diff >= 0 && (c->seek_flags & AVSEEK_FLAG_ANY ||
var->pkt.flags & AV_PKT_FLAG_KEY)) {
c->seek_timestamp = AV_NOPTS_VALUE;
break;
}
av_free_packet(&var->pkt);
reset_packet(&var->pkt);
}
}
/* Check if this stream still is on an earlier segment number, or
* has the packet with the lowest dts */
if (var->pkt.data) {
struct variant *minvar = c->variants[minvariant];
if (minvariant < 0 || var->cur_seq_no < minvar->cur_seq_no) {
minvariant = i;
} else if (var->cur_seq_no == minvar->cur_seq_no) {
int64_t dts = var->pkt.dts;
int64_t mindts = minvar->pkt.dts;
AVStream *st = var->ctx->streams[var->pkt.stream_index];
AVStream *minst = minvar->ctx->streams[minvar->pkt.stream_index];
 
if (dts == AV_NOPTS_VALUE) {
minvariant = i;
} else if (mindts != AV_NOPTS_VALUE) {
if (st->start_time != AV_NOPTS_VALUE)
dts -= st->start_time;
if (minst->start_time != AV_NOPTS_VALUE)
mindts -= minst->start_time;
 
if (av_compare_ts(dts, st->time_base,
mindts, minst->time_base) < 0)
minvariant = i;
}
}
}
}
if (c->end_of_segment) {
if (recheck_discard_flags(s, 0))
goto start;
}
/* If we got a packet, return it */
if (minvariant >= 0) {
*pkt = c->variants[minvariant]->pkt;
pkt->stream_index += c->variants[minvariant]->stream_offset;
reset_packet(&c->variants[minvariant]->pkt);
return 0;
}
return AVERROR_EOF;
}
 
static int hls_close(AVFormatContext *s)
{
HLSContext *c = s->priv_data;
 
free_variant_list(c);
return 0;
}
 
static int hls_read_seek(AVFormatContext *s, int stream_index,
int64_t timestamp, int flags)
{
HLSContext *c = s->priv_data;
int i, j, ret;
 
if ((flags & AVSEEK_FLAG_BYTE) || !c->variants[0]->finished)
return AVERROR(ENOSYS);
 
c->seek_flags = flags;
c->seek_timestamp = stream_index < 0 ? timestamp :
av_rescale_rnd(timestamp, AV_TIME_BASE,
s->streams[stream_index]->time_base.den,
flags & AVSEEK_FLAG_BACKWARD ?
AV_ROUND_DOWN : AV_ROUND_UP);
timestamp = av_rescale_rnd(timestamp, AV_TIME_BASE, stream_index >= 0 ?
s->streams[stream_index]->time_base.den :
AV_TIME_BASE, flags & AVSEEK_FLAG_BACKWARD ?
AV_ROUND_DOWN : AV_ROUND_UP);
if (s->duration < c->seek_timestamp) {
c->seek_timestamp = AV_NOPTS_VALUE;
return AVERROR(EIO);
}
 
ret = AVERROR(EIO);
for (i = 0; i < c->n_variants; i++) {
/* Reset reading */
struct variant *var = c->variants[i];
int64_t pos = c->first_timestamp == AV_NOPTS_VALUE ?
0 : c->first_timestamp;
if (var->input) {
ffurl_close(var->input);
var->input = NULL;
}
av_free_packet(&var->pkt);
reset_packet(&var->pkt);
var->pb.eof_reached = 0;
/* Clear any buffered data */
var->pb.buf_end = var->pb.buf_ptr = var->pb.buffer;
/* Reset the pos, to let the mpegts demuxer know we've seeked. */
var->pb.pos = 0;
 
/* Locate the segment that contains the target timestamp */
for (j = 0; j < var->n_segments; j++) {
if (timestamp >= pos &&
timestamp < pos + var->segments[j]->duration) {
var->cur_seq_no = var->start_seq_no + j;
ret = 0;
break;
}
pos += var->segments[j]->duration;
}
if (ret)
c->seek_timestamp = AV_NOPTS_VALUE;
}
return ret;
}
 
static int hls_probe(AVProbeData *p)
{
/* Require #EXTM3U at the start, and either one of the ones below
* somewhere for a proper match. */
if (strncmp(p->buf, "#EXTM3U", 7))
return 0;
if (strstr(p->buf, "#EXT-X-STREAM-INF:") ||
strstr(p->buf, "#EXT-X-TARGETDURATION:") ||
strstr(p->buf, "#EXT-X-MEDIA-SEQUENCE:"))
return AVPROBE_SCORE_MAX;
return 0;
}
 
AVInputFormat ff_hls_demuxer = {
.name = "hls,applehttp",
.long_name = NULL_IF_CONFIG_SMALL("Apple HTTP Live Streaming"),
.priv_data_size = sizeof(HLSContext),
.read_probe = hls_probe,
.read_header = hls_read_header,
.read_packet = hls_read_packet,
.read_close = hls_close,
.read_seek = hls_read_seek,
};
/contrib/sdk/sources/ffmpeg/libavformat/hlsenc.c
0,0 → 1,349
/*
* Apple HTTP Live Streaming segmenter
* Copyright (c) 2012, Luca Barbato
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <float.h>
 
#include "libavutil/mathematics.h"
#include "libavutil/parseutils.h"
#include "libavutil/avstring.h"
#include "libavutil/opt.h"
#include "libavutil/log.h"
 
#include "avformat.h"
#include "internal.h"
 
typedef struct ListEntry {
char name[1024];
int duration;
struct ListEntry *next;
} ListEntry;
 
typedef struct HLSContext {
const AVClass *class; // Class for private options.
unsigned number;
int64_t sequence;
AVOutputFormat *oformat;
AVFormatContext *avf;
float time; // Set by a private option.
int size; // Set by a private option.
int wrap; // Set by a private option.
int64_t recording_time;
int has_video;
int64_t start_pts;
int64_t end_pts;
int64_t duration; // last segment duration computed so far, in seconds
int nb_entries;
ListEntry *list;
ListEntry *end_list;
char *basename;
AVIOContext *pb;
} HLSContext;
 
static int hls_mux_init(AVFormatContext *s)
{
HLSContext *hls = s->priv_data;
AVFormatContext *oc;
int i;
 
hls->avf = oc = avformat_alloc_context();
if (!oc)
return AVERROR(ENOMEM);
 
oc->oformat = hls->oformat;
oc->interrupt_callback = s->interrupt_callback;
 
for (i = 0; i < s->nb_streams; i++) {
AVStream *st;
if (!(st = avformat_new_stream(oc, NULL)))
return AVERROR(ENOMEM);
avcodec_copy_context(st->codec, s->streams[i]->codec);
st->sample_aspect_ratio = s->streams[i]->sample_aspect_ratio;
}
 
return 0;
}
 
static int append_entry(HLSContext *hls, uint64_t duration)
{
ListEntry *en = av_malloc(sizeof(*en));
 
if (!en)
return AVERROR(ENOMEM);
 
av_strlcpy(en->name, av_basename(hls->avf->filename), sizeof(en->name));
 
en->duration = duration;
en->next = NULL;
 
if (!hls->list)
hls->list = en;
else
hls->end_list->next = en;
 
hls->end_list = en;
 
if (hls->nb_entries >= hls->size) {
en = hls->list;
hls->list = en->next;
av_free(en);
} else
hls->nb_entries++;
 
hls->sequence++;
 
return 0;
}
 
static void free_entries(HLSContext *hls)
{
ListEntry *p = hls->list, *en;
 
while(p) {
en = p;
p = p->next;
av_free(en);
}
}
 
static int hls_window(AVFormatContext *s, int last)
{
HLSContext *hls = s->priv_data;
ListEntry *en;
int target_duration = 0;
int ret = 0;
 
if ((ret = avio_open2(&hls->pb, s->filename, AVIO_FLAG_WRITE,
&s->interrupt_callback, NULL)) < 0)
goto fail;
 
for (en = hls->list; en; en = en->next) {
if (target_duration < en->duration)
target_duration = en->duration;
}
 
avio_printf(hls->pb, "#EXTM3U\n");
avio_printf(hls->pb, "#EXT-X-VERSION:3\n");
avio_printf(hls->pb, "#EXT-X-TARGETDURATION:%d\n", target_duration);
avio_printf(hls->pb, "#EXT-X-MEDIA-SEQUENCE:%"PRId64"\n",
FFMAX(0, hls->sequence - hls->size));
 
for (en = hls->list; en; en = en->next) {
avio_printf(hls->pb, "#EXTINF:%d,\n", en->duration);
avio_printf(hls->pb, "%s\n", en->name);
}
 
if (last)
avio_printf(hls->pb, "#EXT-X-ENDLIST\n");
 
fail:
avio_closep(&hls->pb);
return ret;
}
 
static int hls_start(AVFormatContext *s)
{
HLSContext *c = s->priv_data;
AVFormatContext *oc = c->avf;
int err = 0;
 
if (av_get_frame_filename(oc->filename, sizeof(oc->filename),
c->basename, c->wrap ? c->number % c->wrap : c->number) < 0) {
av_log(oc, AV_LOG_ERROR, "Invalid segment filename template '%s'\n", c->basename);
return AVERROR(EINVAL);
}
c->number++;
 
if ((err = avio_open2(&oc->pb, oc->filename, AVIO_FLAG_WRITE,
&s->interrupt_callback, NULL)) < 0)
return err;
 
if (oc->oformat->priv_class && oc->priv_data)
av_opt_set(oc->priv_data, "mpegts_flags", "resend_headers", 0);
 
return 0;
}
 
static int hls_write_header(AVFormatContext *s)
{
HLSContext *hls = s->priv_data;
int ret, i;
char *p;
const char *pattern = "%d.ts";
int basename_size = strlen(s->filename) + strlen(pattern) + 1;
 
hls->number = 0;
 
hls->recording_time = hls->time * AV_TIME_BASE;
hls->start_pts = AV_NOPTS_VALUE;
 
for (i = 0; i < s->nb_streams; i++)
hls->has_video +=
s->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO;
 
if (hls->has_video > 1)
av_log(s, AV_LOG_WARNING,
"More than a single video stream present, "
"expect issues decoding it.\n");
 
hls->oformat = av_guess_format("mpegts", NULL, NULL);
 
if (!hls->oformat) {
ret = AVERROR_MUXER_NOT_FOUND;
goto fail;
}
 
hls->basename = av_malloc(basename_size);
 
if (!hls->basename) {
ret = AVERROR(ENOMEM);
goto fail;
}
 
strcpy(hls->basename, s->filename);
 
p = strrchr(hls->basename, '.');
 
if (p)
*p = '\0';
 
av_strlcat(hls->basename, pattern, basename_size);
 
if ((ret = hls_mux_init(s)) < 0)
goto fail;
 
if ((ret = hls_start(s)) < 0)
goto fail;
 
if ((ret = avformat_write_header(hls->avf, NULL)) < 0)
return ret;
 
 
fail:
if (ret) {
av_free(hls->basename);
if (hls->avf)
avformat_free_context(hls->avf);
}
return ret;
}
 
static int hls_write_packet(AVFormatContext *s, AVPacket *pkt)
{
HLSContext *hls = s->priv_data;
AVFormatContext *oc = hls->avf;
AVStream *st = s->streams[pkt->stream_index];
int64_t end_pts = hls->recording_time * hls->number;
int is_ref_pkt = 1;
int ret, can_split = 1;
 
if (hls->start_pts == AV_NOPTS_VALUE) {
hls->start_pts = pkt->pts;
hls->end_pts = pkt->pts;
}
 
if (hls->has_video) {
can_split = st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
pkt->flags & AV_PKT_FLAG_KEY;
is_ref_pkt = st->codec->codec_type == AVMEDIA_TYPE_VIDEO;
}
if (pkt->pts == AV_NOPTS_VALUE)
is_ref_pkt = can_split = 0;
 
if (is_ref_pkt)
hls->duration = av_rescale(pkt->pts - hls->end_pts,
st->time_base.num, st->time_base.den);
 
if (can_split && av_compare_ts(pkt->pts - hls->start_pts, st->time_base,
end_pts, AV_TIME_BASE_Q) >= 0) {
ret = append_entry(hls, hls->duration);
if (ret)
return ret;
 
hls->end_pts = pkt->pts;
hls->duration = 0;
 
av_write_frame(oc, NULL); /* Flush any buffered data */
avio_close(oc->pb);
 
ret = hls_start(s);
 
if (ret)
return ret;
 
oc = hls->avf;
 
if ((ret = hls_window(s, 0)) < 0)
return ret;
}
 
ret = ff_write_chained(oc, pkt->stream_index, pkt, s);
 
return ret;
}
 
static int hls_write_trailer(struct AVFormatContext *s)
{
HLSContext *hls = s->priv_data;
AVFormatContext *oc = hls->avf;
 
av_write_trailer(oc);
avio_closep(&oc->pb);
avformat_free_context(oc);
av_free(hls->basename);
append_entry(hls, hls->duration);
hls_window(s, 1);
 
free_entries(hls);
avio_close(hls->pb);
return 0;
}
 
#define OFFSET(x) offsetof(HLSContext, x)
#define E AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
{"start_number", "set first number in the sequence", OFFSET(sequence),AV_OPT_TYPE_INT64, {.i64 = 0}, 0, INT64_MAX, E},
{"hls_time", "set segment length in seconds", OFFSET(time), AV_OPT_TYPE_FLOAT, {.dbl = 2}, 0, FLT_MAX, E},
{"hls_list_size", "set maximum number of playlist entries", OFFSET(size), AV_OPT_TYPE_INT, {.i64 = 5}, 0, INT_MAX, E},
{"hls_wrap", "set number after which the index wraps", OFFSET(wrap), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, E},
{ NULL },
};
 
static const AVClass hls_class = {
.class_name = "hls muxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
 
AVOutputFormat ff_hls_muxer = {
.name = "hls",
.long_name = NULL_IF_CONFIG_SMALL("Apple HTTP Live Streaming"),
.extensions = "m3u8",
.priv_data_size = sizeof(HLSContext),
.audio_codec = AV_CODEC_ID_MP2,
.video_codec = AV_CODEC_ID_MPEG2VIDEO,
.flags = AVFMT_NOFILE | AVFMT_ALLOW_FLUSH,
.write_header = hls_write_header,
.write_packet = hls_write_packet,
.write_trailer = hls_write_trailer,
.priv_class = &hls_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/hlsproto.c
0,0 → 1,323
/*
* Apple HTTP Live Streaming Protocol Handler
* Copyright (c) 2010 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Apple HTTP Live Streaming Protocol Handler
* http://tools.ietf.org/html/draft-pantos-http-live-streaming
*/
 
#include "libavutil/avstring.h"
#include "libavutil/time.h"
#include "avformat.h"
#include "internal.h"
#include "url.h"
#include "version.h"
 
/*
* An apple http stream consists of a playlist with media segment files,
* played sequentially. There may be several playlists with the same
* video content, in different bandwidth variants, that are played in
* parallel (preferably only one bandwidth variant at a time). In this case,
* the user supplied the url to a main playlist that only lists the variant
* playlists.
*
* If the main playlist doesn't point at any variants, we still create
* one anonymous toplevel variant for this, to maintain the structure.
*/
 
struct segment {
int64_t duration;
char url[MAX_URL_SIZE];
};
 
struct variant {
int bandwidth;
char url[MAX_URL_SIZE];
};
 
typedef struct HLSContext {
char playlisturl[MAX_URL_SIZE];
int64_t target_duration;
int start_seq_no;
int finished;
int n_segments;
struct segment **segments;
int n_variants;
struct variant **variants;
int cur_seq_no;
URLContext *seg_hd;
int64_t last_load_time;
} HLSContext;
 
static int read_chomp_line(AVIOContext *s, char *buf, int maxlen)
{
int len = ff_get_line(s, buf, maxlen);
while (len > 0 && av_isspace(buf[len - 1]))
buf[--len] = '\0';
return len;
}
 
static void free_segment_list(HLSContext *s)
{
int i;
for (i = 0; i < s->n_segments; i++)
av_free(s->segments[i]);
av_freep(&s->segments);
s->n_segments = 0;
}
 
static void free_variant_list(HLSContext *s)
{
int i;
for (i = 0; i < s->n_variants; i++)
av_free(s->variants[i]);
av_freep(&s->variants);
s->n_variants = 0;
}
 
struct variant_info {
char bandwidth[20];
};
 
static void handle_variant_args(struct variant_info *info, const char *key,
int key_len, char **dest, int *dest_len)
{
if (!strncmp(key, "BANDWIDTH=", key_len)) {
*dest = info->bandwidth;
*dest_len = sizeof(info->bandwidth);
}
}
 
static int parse_playlist(URLContext *h, const char *url)
{
HLSContext *s = h->priv_data;
AVIOContext *in;
int ret = 0, is_segment = 0, is_variant = 0, bandwidth = 0;
int64_t duration = 0;
char line[1024];
const char *ptr;
 
if ((ret = avio_open2(&in, url, AVIO_FLAG_READ,
&h->interrupt_callback, NULL)) < 0)
return ret;
 
read_chomp_line(in, line, sizeof(line));
if (strcmp(line, "#EXTM3U"))
return AVERROR_INVALIDDATA;
 
free_segment_list(s);
s->finished = 0;
while (!url_feof(in)) {
read_chomp_line(in, line, sizeof(line));
if (av_strstart(line, "#EXT-X-STREAM-INF:", &ptr)) {
struct variant_info info = {{0}};
is_variant = 1;
ff_parse_key_value(ptr, (ff_parse_key_val_cb) handle_variant_args,
&info);
bandwidth = atoi(info.bandwidth);
} else if (av_strstart(line, "#EXT-X-TARGETDURATION:", &ptr)) {
s->target_duration = atoi(ptr) * AV_TIME_BASE;
} else if (av_strstart(line, "#EXT-X-MEDIA-SEQUENCE:", &ptr)) {
s->start_seq_no = atoi(ptr);
} else if (av_strstart(line, "#EXT-X-ENDLIST", &ptr)) {
s->finished = 1;
} else if (av_strstart(line, "#EXTINF:", &ptr)) {
is_segment = 1;
duration = atof(ptr) * AV_TIME_BASE;
} else if (av_strstart(line, "#", NULL)) {
continue;
} else if (line[0]) {
if (is_segment) {
struct segment *seg = av_malloc(sizeof(struct segment));
if (!seg) {
ret = AVERROR(ENOMEM);
goto fail;
}
seg->duration = duration;
ff_make_absolute_url(seg->url, sizeof(seg->url), url, line);
dynarray_add(&s->segments, &s->n_segments, seg);
is_segment = 0;
} else if (is_variant) {
struct variant *var = av_malloc(sizeof(struct variant));
if (!var) {
ret = AVERROR(ENOMEM);
goto fail;
}
var->bandwidth = bandwidth;
ff_make_absolute_url(var->url, sizeof(var->url), url, line);
dynarray_add(&s->variants, &s->n_variants, var);
is_variant = 0;
}
}
}
s->last_load_time = av_gettime();
 
fail:
avio_close(in);
return ret;
}
 
static int hls_close(URLContext *h)
{
HLSContext *s = h->priv_data;
 
free_segment_list(s);
free_variant_list(s);
ffurl_close(s->seg_hd);
return 0;
}
 
static int hls_open(URLContext *h, const char *uri, int flags)
{
HLSContext *s = h->priv_data;
int ret, i;
const char *nested_url;
 
if (flags & AVIO_FLAG_WRITE)
return AVERROR(ENOSYS);
 
h->is_streamed = 1;
 
if (av_strstart(uri, "hls+", &nested_url)) {
av_strlcpy(s->playlisturl, nested_url, sizeof(s->playlisturl));
} else if (av_strstart(uri, "hls://", &nested_url)) {
av_log(h, AV_LOG_ERROR,
"No nested protocol specified. Specify e.g. hls+http://%s\n",
nested_url);
ret = AVERROR(EINVAL);
goto fail;
} else {
av_log(h, AV_LOG_ERROR, "Unsupported url %s\n", uri);
ret = AVERROR(EINVAL);
goto fail;
}
av_log(h, AV_LOG_WARNING,
"Using the hls protocol is discouraged, please try using the "
"hls demuxer instead. The hls demuxer should be more complete "
"and work as well as the protocol implementation. (If not, "
"please report it.) To use the demuxer, simply use %s as url.\n",
s->playlisturl);
 
if ((ret = parse_playlist(h, s->playlisturl)) < 0)
goto fail;
 
if (s->n_segments == 0 && s->n_variants > 0) {
int max_bandwidth = 0, maxvar = -1;
for (i = 0; i < s->n_variants; i++) {
if (s->variants[i]->bandwidth > max_bandwidth || i == 0) {
max_bandwidth = s->variants[i]->bandwidth;
maxvar = i;
}
}
av_strlcpy(s->playlisturl, s->variants[maxvar]->url,
sizeof(s->playlisturl));
if ((ret = parse_playlist(h, s->playlisturl)) < 0)
goto fail;
}
 
if (s->n_segments == 0) {
av_log(h, AV_LOG_WARNING, "Empty playlist\n");
ret = AVERROR(EIO);
goto fail;
}
s->cur_seq_no = s->start_seq_no;
if (!s->finished && s->n_segments >= 3)
s->cur_seq_no = s->start_seq_no + s->n_segments - 3;
 
return 0;
 
fail:
hls_close(h);
return ret;
}
 
static int hls_read(URLContext *h, uint8_t *buf, int size)
{
HLSContext *s = h->priv_data;
const char *url;
int ret;
int64_t reload_interval;
 
start:
if (s->seg_hd) {
ret = ffurl_read(s->seg_hd, buf, size);
if (ret > 0)
return ret;
}
if (s->seg_hd) {
ffurl_close(s->seg_hd);
s->seg_hd = NULL;
s->cur_seq_no++;
}
reload_interval = s->n_segments > 0 ?
s->segments[s->n_segments - 1]->duration :
s->target_duration;
retry:
if (!s->finished) {
int64_t now = av_gettime();
if (now - s->last_load_time >= reload_interval) {
if ((ret = parse_playlist(h, s->playlisturl)) < 0)
return ret;
/* If we need to reload the playlist again below (if
* there's still no more segments), switch to a reload
* interval of half the target duration. */
reload_interval = s->target_duration / 2;
}
}
if (s->cur_seq_no < s->start_seq_no) {
av_log(h, AV_LOG_WARNING,
"skipping %d segments ahead, expired from playlist\n",
s->start_seq_no - s->cur_seq_no);
s->cur_seq_no = s->start_seq_no;
}
if (s->cur_seq_no - s->start_seq_no >= s->n_segments) {
if (s->finished)
return AVERROR_EOF;
while (av_gettime() - s->last_load_time < reload_interval) {
if (ff_check_interrupt(&h->interrupt_callback))
return AVERROR_EXIT;
av_usleep(100*1000);
}
goto retry;
}
url = s->segments[s->cur_seq_no - s->start_seq_no]->url,
av_log(h, AV_LOG_DEBUG, "opening %s\n", url);
ret = ffurl_open(&s->seg_hd, url, AVIO_FLAG_READ,
&h->interrupt_callback, NULL);
if (ret < 0) {
if (ff_check_interrupt(&h->interrupt_callback))
return AVERROR_EXIT;
av_log(h, AV_LOG_WARNING, "Unable to open %s\n", url);
s->cur_seq_no++;
goto retry;
}
goto start;
}
 
URLProtocol ff_hls_protocol = {
.name = "hls",
.url_open = hls_open,
.url_read = hls_read,
.url_close = hls_close,
.flags = URL_PROTOCOL_FLAG_NESTED_SCHEME,
.priv_data_size = sizeof(HLSContext),
};
/contrib/sdk/sources/ffmpeg/libavformat/http.c
0,0 → 1,1095
/*
* HTTP protocol for ffmpeg client
* Copyright (c) 2000, 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avstring.h"
#include "avformat.h"
#include "internal.h"
#include "network.h"
#include "http.h"
#include "os_support.h"
#include "httpauth.h"
#include "url.h"
#include "libavutil/opt.h"
 
#if CONFIG_ZLIB
#include <zlib.h>
#endif
 
/* XXX: POST protocol is not completely implemented because ffmpeg uses
only a subset of it. */
 
/* The IO buffer size is unrelated to the max URL size in itself, but needs
* to be large enough to fit the full request headers (including long
* path names).
*/
#define BUFFER_SIZE MAX_URL_SIZE
#define MAX_REDIRECTS 8
 
typedef struct {
const AVClass *class;
URLContext *hd;
unsigned char buffer[BUFFER_SIZE], *buf_ptr, *buf_end;
int line_count;
int http_code;
int64_t chunksize; /**< Used if "Transfer-Encoding: chunked" otherwise -1. */
char *content_type;
char *user_agent;
int64_t off, filesize;
int icy_data_read; ///< how much data was read since last ICY metadata packet
int icy_metaint; ///< after how many bytes of read data a new metadata packet will be found
char location[MAX_URL_SIZE];
HTTPAuthState auth_state;
HTTPAuthState proxy_auth_state;
char *headers;
int willclose; /**< Set if the server correctly handles Connection: close and will close the connection after feeding us the content. */
int seekable; /**< Control seekability, 0 = disable, 1 = enable, -1 = probe. */
int chunked_post;
int end_chunked_post; /**< A flag which indicates if the end of chunked encoding has been sent. */
int end_header; /**< A flag which indicates we have finished to read POST reply. */
int multiple_requests; /**< A flag which indicates if we use persistent connections. */
uint8_t *post_data;
int post_datalen;
int is_akamai;
char *mime_type;
char *cookies; ///< holds newline (\n) delimited Set-Cookie header field values (without the "Set-Cookie: " field name)
int icy;
char *icy_metadata_headers;
char *icy_metadata_packet;
#if CONFIG_ZLIB
int compressed;
z_stream inflate_stream;
uint8_t *inflate_buffer;
#endif
AVDictionary *chained_options;
int send_expect_100;
} HTTPContext;
 
#define OFFSET(x) offsetof(HTTPContext, x)
#define D AV_OPT_FLAG_DECODING_PARAM
#define E AV_OPT_FLAG_ENCODING_PARAM
#define DEFAULT_USER_AGENT "Lavf/" AV_STRINGIFY(LIBAVFORMAT_VERSION)
static const AVOption options[] = {
{"seekable", "control seekability of connection", OFFSET(seekable), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 1, D },
{"chunked_post", "use chunked transfer-encoding for posts", OFFSET(chunked_post), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, E },
{"headers", "set custom HTTP headers, can override built in default headers", OFFSET(headers), AV_OPT_TYPE_STRING, { 0 }, 0, 0, D|E },
{"content_type", "force a content type", OFFSET(content_type), AV_OPT_TYPE_STRING, { 0 }, 0, 0, D|E },
{"user-agent", "override User-Agent header", OFFSET(user_agent), AV_OPT_TYPE_STRING, {.str = DEFAULT_USER_AGENT}, 0, 0, D },
{"multiple_requests", "use persistent connections", OFFSET(multiple_requests), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, D|E },
{"post_data", "set custom HTTP post data", OFFSET(post_data), AV_OPT_TYPE_BINARY, .flags = D|E },
{"mime_type", "set MIME type", OFFSET(mime_type), AV_OPT_TYPE_STRING, {0}, 0, 0, 0 },
{"cookies", "set cookies to be sent in applicable future requests, use newline delimited Set-Cookie HTTP field value syntax", OFFSET(cookies), AV_OPT_TYPE_STRING, {0}, 0, 0, D },
{"icy", "request ICY metadata", OFFSET(icy), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, D },
{"icy_metadata_headers", "return ICY metadata headers", OFFSET(icy_metadata_headers), AV_OPT_TYPE_STRING, {0}, 0, 0, 0 },
{"icy_metadata_packet", "return current ICY metadata packet", OFFSET(icy_metadata_packet), AV_OPT_TYPE_STRING, {0}, 0, 0, 0 },
{"auth_type", "HTTP authentication type", OFFSET(auth_state.auth_type), AV_OPT_TYPE_INT, {.i64 = HTTP_AUTH_NONE}, HTTP_AUTH_NONE, HTTP_AUTH_BASIC, D|E, "auth_type" },
{"none", "No auth method set, autodetect", 0, AV_OPT_TYPE_CONST, {.i64 = HTTP_AUTH_NONE}, 0, 0, D|E, "auth_type" },
{"basic", "HTTP basic authentication", 0, AV_OPT_TYPE_CONST, {.i64 = HTTP_AUTH_BASIC}, 0, 0, D|E, "auth_type" },
{"send_expect_100", "Force sending an Expect: 100-continue header for POST", OFFSET(send_expect_100), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, E, "auth_type" },
{NULL}
};
#define HTTP_CLASS(flavor)\
static const AVClass flavor ## _context_class = {\
.class_name = #flavor,\
.item_name = av_default_item_name,\
.option = options,\
.version = LIBAVUTIL_VERSION_INT,\
}
 
HTTP_CLASS(http);
HTTP_CLASS(https);
 
static int http_connect(URLContext *h, const char *path, const char *local_path,
const char *hoststr, const char *auth,
const char *proxyauth, int *new_location);
 
void ff_http_init_auth_state(URLContext *dest, const URLContext *src)
{
memcpy(&((HTTPContext*)dest->priv_data)->auth_state,
&((HTTPContext*)src->priv_data)->auth_state, sizeof(HTTPAuthState));
memcpy(&((HTTPContext*)dest->priv_data)->proxy_auth_state,
&((HTTPContext*)src->priv_data)->proxy_auth_state,
sizeof(HTTPAuthState));
}
 
/* return non zero if error */
static int http_open_cnx(URLContext *h, AVDictionary **options)
{
const char *path, *proxy_path, *lower_proto = "tcp", *local_path;
char hostname[1024], hoststr[1024], proto[10];
char auth[1024], proxyauth[1024] = "";
char path1[MAX_URL_SIZE];
char buf[1024], urlbuf[MAX_URL_SIZE];
int port, use_proxy, err, location_changed = 0, redirects = 0, attempts = 0;
HTTPAuthType cur_auth_type, cur_proxy_auth_type;
HTTPContext *s = h->priv_data;
 
/* fill the dest addr */
redo:
/* needed in any case to build the host string */
av_url_split(proto, sizeof(proto), auth, sizeof(auth),
hostname, sizeof(hostname), &port,
path1, sizeof(path1), s->location);
ff_url_join(hoststr, sizeof(hoststr), NULL, NULL, hostname, port, NULL);
 
proxy_path = getenv("http_proxy");
use_proxy = !ff_http_match_no_proxy(getenv("no_proxy"), hostname) &&
proxy_path != NULL && av_strstart(proxy_path, "http://", NULL);
 
if (!strcmp(proto, "https")) {
lower_proto = "tls";
use_proxy = 0;
if (port < 0)
port = 443;
}
if (port < 0)
port = 80;
 
if (path1[0] == '\0')
path = "/";
else
path = path1;
local_path = path;
if (use_proxy) {
/* Reassemble the request URL without auth string - we don't
* want to leak the auth to the proxy. */
ff_url_join(urlbuf, sizeof(urlbuf), proto, NULL, hostname, port, "%s",
path1);
path = urlbuf;
av_url_split(NULL, 0, proxyauth, sizeof(proxyauth),
hostname, sizeof(hostname), &port, NULL, 0, proxy_path);
}
 
ff_url_join(buf, sizeof(buf), lower_proto, NULL, hostname, port, NULL);
 
if (!s->hd) {
err = ffurl_open(&s->hd, buf, AVIO_FLAG_READ_WRITE,
&h->interrupt_callback, options);
if (err < 0)
goto fail;
}
 
cur_auth_type = s->auth_state.auth_type;
cur_proxy_auth_type = s->auth_state.auth_type;
if (http_connect(h, path, local_path, hoststr, auth, proxyauth, &location_changed) < 0)
goto fail;
attempts++;
if (s->http_code == 401) {
if ((cur_auth_type == HTTP_AUTH_NONE || s->auth_state.stale) &&
s->auth_state.auth_type != HTTP_AUTH_NONE && attempts < 4) {
ffurl_closep(&s->hd);
goto redo;
} else
goto fail;
}
if (s->http_code == 407) {
if ((cur_proxy_auth_type == HTTP_AUTH_NONE || s->proxy_auth_state.stale) &&
s->proxy_auth_state.auth_type != HTTP_AUTH_NONE && attempts < 4) {
ffurl_closep(&s->hd);
goto redo;
} else
goto fail;
}
if ((s->http_code == 301 || s->http_code == 302 || s->http_code == 303 || s->http_code == 307)
&& location_changed == 1) {
/* url moved, get next */
ffurl_closep(&s->hd);
if (redirects++ >= MAX_REDIRECTS)
return AVERROR(EIO);
/* Restart the authentication process with the new target, which
* might use a different auth mechanism. */
memset(&s->auth_state, 0, sizeof(s->auth_state));
attempts = 0;
location_changed = 0;
goto redo;
}
return 0;
fail:
if (s->hd)
ffurl_closep(&s->hd);
return AVERROR(EIO);
}
 
int ff_http_do_new_request(URLContext *h, const char *uri)
{
HTTPContext *s = h->priv_data;
AVDictionary *options = NULL;
int ret;
 
s->off = 0;
s->icy_data_read = 0;
av_strlcpy(s->location, uri, sizeof(s->location));
 
av_dict_copy(&options, s->chained_options, 0);
ret = http_open_cnx(h, &options);
av_dict_free(&options);
return ret;
}
 
static int http_open(URLContext *h, const char *uri, int flags,
AVDictionary **options)
{
HTTPContext *s = h->priv_data;
int ret;
 
if( s->seekable == 1 )
h->is_streamed = 0;
else
h->is_streamed = 1;
 
s->filesize = -1;
av_strlcpy(s->location, uri, sizeof(s->location));
if (options)
av_dict_copy(&s->chained_options, *options, 0);
 
if (s->headers) {
int len = strlen(s->headers);
if (len < 2 || strcmp("\r\n", s->headers + len - 2))
av_log(h, AV_LOG_WARNING, "No trailing CRLF found in HTTP header.\n");
}
 
ret = http_open_cnx(h, options);
if (ret < 0)
av_dict_free(&s->chained_options);
return ret;
}
static int http_getc(HTTPContext *s)
{
int len;
if (s->buf_ptr >= s->buf_end) {
len = ffurl_read(s->hd, s->buffer, BUFFER_SIZE);
if (len < 0) {
return len;
} else if (len == 0) {
return -1;
} else {
s->buf_ptr = s->buffer;
s->buf_end = s->buffer + len;
}
}
return *s->buf_ptr++;
}
 
static int http_get_line(HTTPContext *s, char *line, int line_size)
{
int ch;
char *q;
 
q = line;
for(;;) {
ch = http_getc(s);
if (ch < 0)
return ch;
if (ch == '\n') {
/* process line */
if (q > line && q[-1] == '\r')
q--;
*q = '\0';
 
return 0;
} else {
if ((q - line) < line_size - 1)
*q++ = ch;
}
}
}
 
static int process_line(URLContext *h, char *line, int line_count,
int *new_location)
{
HTTPContext *s = h->priv_data;
char *tag, *p, *end;
char redirected_location[MAX_URL_SIZE];
 
/* end of header */
if (line[0] == '\0') {
s->end_header = 1;
return 0;
}
 
p = line;
if (line_count == 0) {
while (!av_isspace(*p) && *p != '\0')
p++;
while (av_isspace(*p))
p++;
s->http_code = strtol(p, &end, 10);
 
av_dlog(NULL, "http_code=%d\n", s->http_code);
 
/* error codes are 4xx and 5xx, but regard 401 as a success, so we
* don't abort until all headers have been parsed. */
if (s->http_code >= 400 && s->http_code < 600 && (s->http_code != 401
|| s->auth_state.auth_type != HTTP_AUTH_NONE) &&
(s->http_code != 407 || s->proxy_auth_state.auth_type != HTTP_AUTH_NONE)) {
end += strspn(end, SPACE_CHARS);
av_log(h, AV_LOG_WARNING, "HTTP error %d %s\n",
s->http_code, end);
return -1;
}
} else {
while (*p != '\0' && *p != ':')
p++;
if (*p != ':')
return 1;
 
*p = '\0';
tag = line;
p++;
while (av_isspace(*p))
p++;
if (!av_strcasecmp(tag, "Location")) {
ff_make_absolute_url(redirected_location, sizeof(redirected_location), s->location, p);
av_strlcpy(s->location, redirected_location, sizeof(s->location));
*new_location = 1;
} else if (!av_strcasecmp (tag, "Content-Length") && s->filesize == -1) {
s->filesize = strtoll(p, NULL, 10);
} else if (!av_strcasecmp (tag, "Content-Range")) {
/* "bytes $from-$to/$document_size" */
const char *slash;
if (!strncmp (p, "bytes ", 6)) {
p += 6;
s->off = strtoll(p, NULL, 10);
if ((slash = strchr(p, '/')) && strlen(slash) > 0)
s->filesize = strtoll(slash+1, NULL, 10);
}
if (s->seekable == -1 && (!s->is_akamai || s->filesize != 2147483647))
h->is_streamed = 0; /* we _can_ in fact seek */
} else if (!av_strcasecmp(tag, "Accept-Ranges") && !strncmp(p, "bytes", 5) && s->seekable == -1) {
h->is_streamed = 0;
} else if (!av_strcasecmp (tag, "Transfer-Encoding") && !av_strncasecmp(p, "chunked", 7)) {
s->filesize = -1;
s->chunksize = 0;
} else if (!av_strcasecmp (tag, "WWW-Authenticate")) {
ff_http_auth_handle_header(&s->auth_state, tag, p);
} else if (!av_strcasecmp (tag, "Authentication-Info")) {
ff_http_auth_handle_header(&s->auth_state, tag, p);
} else if (!av_strcasecmp (tag, "Proxy-Authenticate")) {
ff_http_auth_handle_header(&s->proxy_auth_state, tag, p);
} else if (!av_strcasecmp (tag, "Connection")) {
if (!strcmp(p, "close"))
s->willclose = 1;
} else if (!av_strcasecmp (tag, "Server") && !av_strcasecmp (p, "AkamaiGHost")) {
s->is_akamai = 1;
} else if (!av_strcasecmp (tag, "Content-Type")) {
av_free(s->mime_type); s->mime_type = av_strdup(p);
} else if (!av_strcasecmp (tag, "Set-Cookie")) {
if (!s->cookies) {
if (!(s->cookies = av_strdup(p)))
return AVERROR(ENOMEM);
} else {
char *tmp = s->cookies;
size_t str_size = strlen(tmp) + strlen(p) + 2;
if (!(s->cookies = av_malloc(str_size))) {
s->cookies = tmp;
return AVERROR(ENOMEM);
}
snprintf(s->cookies, str_size, "%s\n%s", tmp, p);
av_free(tmp);
}
} else if (!av_strcasecmp (tag, "Icy-MetaInt")) {
s->icy_metaint = strtoll(p, NULL, 10);
} else if (!av_strncasecmp(tag, "Icy-", 4)) {
// Concat all Icy- header lines
char *buf = av_asprintf("%s%s: %s\n",
s->icy_metadata_headers ? s->icy_metadata_headers : "", tag, p);
if (!buf)
return AVERROR(ENOMEM);
av_freep(&s->icy_metadata_headers);
s->icy_metadata_headers = buf;
} else if (!av_strcasecmp (tag, "Content-Encoding")) {
if (!av_strncasecmp(p, "gzip", 4) || !av_strncasecmp(p, "deflate", 7)) {
#if CONFIG_ZLIB
s->compressed = 1;
inflateEnd(&s->inflate_stream);
if (inflateInit2(&s->inflate_stream, 32 + 15) != Z_OK) {
av_log(h, AV_LOG_WARNING, "Error during zlib initialisation: %s\n",
s->inflate_stream.msg);
return AVERROR(ENOSYS);
}
if (zlibCompileFlags() & (1 << 17)) {
av_log(h, AV_LOG_WARNING, "Your zlib was compiled without gzip support.\n");
return AVERROR(ENOSYS);
}
#else
av_log(h, AV_LOG_WARNING, "Compressed (%s) content, need zlib with gzip support\n", p);
return AVERROR(ENOSYS);
#endif
} else if (!av_strncasecmp(p, "identity", 8)) {
// The normal, no-encoding case (although servers shouldn't include
// the header at all if this is the case).
} else {
av_log(h, AV_LOG_WARNING, "Unknown content coding: %s\n", p);
}
}
}
return 1;
}
 
/**
* Create a string containing cookie values for use as a HTTP cookie header
* field value for a particular path and domain from the cookie values stored in
* the HTTP protocol context. The cookie string is stored in *cookies.
*
* @return a negative value if an error condition occurred, 0 otherwise
*/
static int get_cookies(HTTPContext *s, char **cookies, const char *path,
const char *domain)
{
// cookie strings will look like Set-Cookie header field values. Multiple
// Set-Cookie fields will result in multiple values delimited by a newline
int ret = 0;
char *next, *cookie, *set_cookies = av_strdup(s->cookies), *cset_cookies = set_cookies;
 
if (!set_cookies) return AVERROR(EINVAL);
 
*cookies = NULL;
while ((cookie = av_strtok(set_cookies, "\n", &next))) {
int domain_offset = 0;
char *param, *next_param, *cdomain = NULL, *cpath = NULL, *cvalue = NULL;
set_cookies = NULL;
 
while ((param = av_strtok(cookie, "; ", &next_param))) {
cookie = NULL;
if (!av_strncasecmp("path=", param, 5)) {
av_free(cpath);
cpath = av_strdup(&param[5]);
} else if (!av_strncasecmp("domain=", param, 7)) {
av_free(cdomain);
cdomain = av_strdup(&param[7]);
} else if (!av_strncasecmp("secure", param, 6) ||
!av_strncasecmp("comment", param, 7) ||
!av_strncasecmp("max-age", param, 7) ||
!av_strncasecmp("version", param, 7)) {
// ignore Comment, Max-Age, Secure and Version
} else {
av_free(cvalue);
cvalue = av_strdup(param);
}
}
if (!cdomain)
cdomain = av_strdup(domain);
 
// ensure all of the necessary values are valid
if (!cdomain || !cpath || !cvalue) {
av_log(s, AV_LOG_WARNING,
"Invalid cookie found, no value, path or domain specified\n");
goto done_cookie;
}
 
// check if the request path matches the cookie path
if (av_strncasecmp(path, cpath, strlen(cpath)))
goto done_cookie;
 
// the domain should be at least the size of our cookie domain
domain_offset = strlen(domain) - strlen(cdomain);
if (domain_offset < 0)
goto done_cookie;
 
// match the cookie domain
if (av_strcasecmp(&domain[domain_offset], cdomain))
goto done_cookie;
 
// cookie parameters match, so copy the value
if (!*cookies) {
if (!(*cookies = av_strdup(cvalue))) {
ret = AVERROR(ENOMEM);
goto done_cookie;
}
} else {
char *tmp = *cookies;
size_t str_size = strlen(cvalue) + strlen(*cookies) + 3;
if (!(*cookies = av_malloc(str_size))) {
ret = AVERROR(ENOMEM);
goto done_cookie;
}
snprintf(*cookies, str_size, "%s; %s", tmp, cvalue);
av_free(tmp);
}
 
done_cookie:
av_free(cdomain);
av_free(cpath);
av_free(cvalue);
if (ret < 0) {
if (*cookies) av_freep(cookies);
av_free(cset_cookies);
return ret;
}
}
 
av_free(cset_cookies);
 
return 0;
}
 
static inline int has_header(const char *str, const char *header)
{
/* header + 2 to skip over CRLF prefix. (make sure you have one!) */
if (!str)
return 0;
return av_stristart(str, header + 2, NULL) || av_stristr(str, header);
}
 
static int http_read_header(URLContext *h, int *new_location)
{
HTTPContext *s = h->priv_data;
char line[MAX_URL_SIZE];
int err = 0;
 
s->chunksize = -1;
 
for (;;) {
if ((err = http_get_line(s, line, sizeof(line))) < 0)
return err;
 
av_dlog(NULL, "header='%s'\n", line);
 
err = process_line(h, line, s->line_count, new_location);
if (err < 0)
return err;
if (err == 0)
break;
s->line_count++;
}
 
return err;
}
 
static int http_connect(URLContext *h, const char *path, const char *local_path,
const char *hoststr, const char *auth,
const char *proxyauth, int *new_location)
{
HTTPContext *s = h->priv_data;
int post, err;
char headers[4096] = "";
char *authstr = NULL, *proxyauthstr = NULL;
int64_t off = s->off;
int len = 0;
const char *method;
int send_expect_100 = 0;
 
 
/* send http header */
post = h->flags & AVIO_FLAG_WRITE;
 
if (s->post_data) {
/* force POST method and disable chunked encoding when
* custom HTTP post data is set */
post = 1;
s->chunked_post = 0;
}
 
method = post ? "POST" : "GET";
authstr = ff_http_auth_create_response(&s->auth_state, auth, local_path,
method);
proxyauthstr = ff_http_auth_create_response(&s->proxy_auth_state, proxyauth,
local_path, method);
if (post && !s->post_data) {
send_expect_100 = s->send_expect_100;
/* The user has supplied authentication but we don't know the auth type,
* send Expect: 100-continue to get the 401 response including the
* WWW-Authenticate header, or an 100 continue if no auth actually
* is needed. */
if (auth && *auth &&
s->auth_state.auth_type == HTTP_AUTH_NONE &&
s->http_code != 401)
send_expect_100 = 1;
}
 
/* set default headers if needed */
if (!has_header(s->headers, "\r\nUser-Agent: "))
len += av_strlcatf(headers + len, sizeof(headers) - len,
"User-Agent: %s\r\n", s->user_agent);
if (!has_header(s->headers, "\r\nAccept: "))
len += av_strlcpy(headers + len, "Accept: */*\r\n",
sizeof(headers) - len);
// Note: we send this on purpose even when s->off is 0 when we're probing,
// since it allows us to detect more reliably if a (non-conforming)
// server supports seeking by analysing the reply headers.
if (!has_header(s->headers, "\r\nRange: ") && !post && (s->off > 0 || s->seekable == -1))
len += av_strlcatf(headers + len, sizeof(headers) - len,
"Range: bytes=%"PRId64"-\r\n", s->off);
if (send_expect_100 && !has_header(s->headers, "\r\nExpect: "))
len += av_strlcatf(headers + len, sizeof(headers) - len,
"Expect: 100-continue\r\n");
 
if (!has_header(s->headers, "\r\nConnection: ")) {
if (s->multiple_requests) {
len += av_strlcpy(headers + len, "Connection: keep-alive\r\n",
sizeof(headers) - len);
} else {
len += av_strlcpy(headers + len, "Connection: close\r\n",
sizeof(headers) - len);
}
}
 
if (!has_header(s->headers, "\r\nHost: "))
len += av_strlcatf(headers + len, sizeof(headers) - len,
"Host: %s\r\n", hoststr);
if (!has_header(s->headers, "\r\nContent-Length: ") && s->post_data)
len += av_strlcatf(headers + len, sizeof(headers) - len,
"Content-Length: %d\r\n", s->post_datalen);
if (!has_header(s->headers, "\r\nContent-Type: ") && s->content_type)
len += av_strlcatf(headers + len, sizeof(headers) - len,
"Content-Type: %s\r\n", s->content_type);
if (!has_header(s->headers, "\r\nCookie: ") && s->cookies) {
char *cookies = NULL;
if (!get_cookies(s, &cookies, path, hoststr)) {
len += av_strlcatf(headers + len, sizeof(headers) - len,
"Cookie: %s\r\n", cookies);
av_free(cookies);
}
}
if (!has_header(s->headers, "\r\nIcy-MetaData: ") && s->icy) {
len += av_strlcatf(headers + len, sizeof(headers) - len,
"Icy-MetaData: %d\r\n", 1);
}
 
/* now add in custom headers */
if (s->headers)
av_strlcpy(headers + len, s->headers, sizeof(headers) - len);
 
snprintf(s->buffer, sizeof(s->buffer),
"%s %s HTTP/1.1\r\n"
"%s"
"%s"
"%s"
"%s%s"
"\r\n",
method,
path,
post && s->chunked_post ? "Transfer-Encoding: chunked\r\n" : "",
headers,
authstr ? authstr : "",
proxyauthstr ? "Proxy-" : "", proxyauthstr ? proxyauthstr : "");
 
av_freep(&authstr);
av_freep(&proxyauthstr);
if ((err = ffurl_write(s->hd, s->buffer, strlen(s->buffer))) < 0)
return err;
 
if (s->post_data)
if ((err = ffurl_write(s->hd, s->post_data, s->post_datalen)) < 0)
return err;
 
/* init input buffer */
s->buf_ptr = s->buffer;
s->buf_end = s->buffer;
s->line_count = 0;
s->off = 0;
s->icy_data_read = 0;
s->filesize = -1;
s->willclose = 0;
s->end_chunked_post = 0;
s->end_header = 0;
if (post && !s->post_data && !send_expect_100) {
/* Pretend that it did work. We didn't read any header yet, since
* we've still to send the POST data, but the code calling this
* function will check http_code after we return. */
s->http_code = 200;
return 0;
}
 
/* wait for header */
err = http_read_header(h, new_location);
if (err < 0)
return err;
 
return (off == s->off) ? 0 : -1;
}
 
 
static int http_buf_read(URLContext *h, uint8_t *buf, int size)
{
HTTPContext *s = h->priv_data;
int len;
/* read bytes from input buffer first */
len = s->buf_end - s->buf_ptr;
if (len > 0) {
if (len > size)
len = size;
memcpy(buf, s->buf_ptr, len);
s->buf_ptr += len;
} else {
if (!s->willclose && s->filesize >= 0 && s->off >= s->filesize)
return AVERROR_EOF;
len = ffurl_read(s->hd, buf, size);
}
if (len > 0) {
s->off += len;
s->icy_data_read += len;
if (s->chunksize > 0)
s->chunksize -= len;
}
return len;
}
 
#if CONFIG_ZLIB
#define DECOMPRESS_BUF_SIZE (256 * 1024)
static int http_buf_read_compressed(URLContext *h, uint8_t *buf, int size)
{
HTTPContext *s = h->priv_data;
int ret;
 
if (!s->inflate_buffer) {
s->inflate_buffer = av_malloc(DECOMPRESS_BUF_SIZE);
if (!s->inflate_buffer)
return AVERROR(ENOMEM);
}
 
if (s->inflate_stream.avail_in == 0) {
int read = http_buf_read(h, s->inflate_buffer, DECOMPRESS_BUF_SIZE);
if (read <= 0)
return read;
s->inflate_stream.next_in = s->inflate_buffer;
s->inflate_stream.avail_in = read;
}
 
s->inflate_stream.avail_out = size;
s->inflate_stream.next_out = buf;
 
ret = inflate(&s->inflate_stream, Z_SYNC_FLUSH);
if (ret != Z_OK && ret != Z_STREAM_END)
av_log(h, AV_LOG_WARNING, "inflate return value: %d, %s\n", ret, s->inflate_stream.msg);
 
return size - s->inflate_stream.avail_out;
}
#endif
 
static int http_read(URLContext *h, uint8_t *buf, int size)
{
HTTPContext *s = h->priv_data;
int err, new_location;
 
if (!s->hd)
return AVERROR_EOF;
 
if (s->end_chunked_post && !s->end_header) {
err = http_read_header(h, &new_location);
if (err < 0)
return err;
}
 
if (s->chunksize >= 0) {
if (!s->chunksize) {
char line[32];
 
for(;;) {
do {
if ((err = http_get_line(s, line, sizeof(line))) < 0)
return err;
} while (!*line); /* skip CR LF from last chunk */
 
s->chunksize = strtoll(line, NULL, 16);
 
av_dlog(NULL, "Chunked encoding data size: %"PRId64"'\n", s->chunksize);
 
if (!s->chunksize)
return 0;
break;
}
}
size = FFMIN(size, s->chunksize);
}
if (s->icy_metaint > 0) {
int remaining = s->icy_metaint - s->icy_data_read; /* until next metadata packet */
if (!remaining) {
// The metadata packet is variable sized. It has a 1 byte header
// which sets the length of the packet (divided by 16). If it's 0,
// the metadata doesn't change. After the packet, icy_metaint bytes
// of normal data follow.
int ch = http_getc(s);
if (ch < 0)
return ch;
if (ch > 0) {
char data[255 * 16 + 1];
int n;
int ret;
ch *= 16;
for (n = 0; n < ch; n++)
data[n] = http_getc(s);
data[ch + 1] = 0;
if ((ret = av_opt_set(s, "icy_metadata_packet", data, 0)) < 0)
return ret;
}
s->icy_data_read = 0;
remaining = s->icy_metaint;
}
size = FFMIN(size, remaining);
}
#if CONFIG_ZLIB
if (s->compressed)
return http_buf_read_compressed(h, buf, size);
#endif
return http_buf_read(h, buf, size);
}
 
/* used only when posting data */
static int http_write(URLContext *h, const uint8_t *buf, int size)
{
char temp[11] = ""; /* 32-bit hex + CRLF + nul */
int ret;
char crlf[] = "\r\n";
HTTPContext *s = h->priv_data;
 
if (!s->chunked_post) {
/* non-chunked data is sent without any special encoding */
return ffurl_write(s->hd, buf, size);
}
 
/* silently ignore zero-size data since chunk encoding that would
* signal EOF */
if (size > 0) {
/* upload data using chunked encoding */
snprintf(temp, sizeof(temp), "%x\r\n", size);
 
if ((ret = ffurl_write(s->hd, temp, strlen(temp))) < 0 ||
(ret = ffurl_write(s->hd, buf, size)) < 0 ||
(ret = ffurl_write(s->hd, crlf, sizeof(crlf) - 1)) < 0)
return ret;
}
return size;
}
 
static int http_shutdown(URLContext *h, int flags)
{
int ret = 0;
char footer[] = "0\r\n\r\n";
HTTPContext *s = h->priv_data;
 
/* signal end of chunked encoding if used */
if ((flags & AVIO_FLAG_WRITE) && s->chunked_post) {
ret = ffurl_write(s->hd, footer, sizeof(footer) - 1);
ret = ret > 0 ? 0 : ret;
s->end_chunked_post = 1;
}
 
return ret;
}
 
static int http_close(URLContext *h)
{
int ret = 0;
HTTPContext *s = h->priv_data;
 
#if CONFIG_ZLIB
inflateEnd(&s->inflate_stream);
av_freep(&s->inflate_buffer);
#endif
 
if (!s->end_chunked_post) {
/* Close the write direction by sending the end of chunked encoding. */
ret = http_shutdown(h, h->flags);
}
 
if (s->hd)
ffurl_closep(&s->hd);
av_dict_free(&s->chained_options);
return ret;
}
 
static int64_t http_seek(URLContext *h, int64_t off, int whence)
{
HTTPContext *s = h->priv_data;
URLContext *old_hd = s->hd;
int64_t old_off = s->off;
uint8_t old_buf[BUFFER_SIZE];
int old_buf_size;
AVDictionary *options = NULL;
 
if (whence == AVSEEK_SIZE)
return s->filesize;
else if ((s->filesize == -1 && whence == SEEK_END) || h->is_streamed)
return -1;
 
/* we save the old context in case the seek fails */
old_buf_size = s->buf_end - s->buf_ptr;
memcpy(old_buf, s->buf_ptr, old_buf_size);
s->hd = NULL;
if (whence == SEEK_CUR)
off += s->off;
else if (whence == SEEK_END)
off += s->filesize;
s->off = off;
 
/* if it fails, continue on old connection */
av_dict_copy(&options, s->chained_options, 0);
if (http_open_cnx(h, &options) < 0) {
av_dict_free(&options);
memcpy(s->buffer, old_buf, old_buf_size);
s->buf_ptr = s->buffer;
s->buf_end = s->buffer + old_buf_size;
s->hd = old_hd;
s->off = old_off;
return -1;
}
av_dict_free(&options);
ffurl_close(old_hd);
return off;
}
 
static int
http_get_file_handle(URLContext *h)
{
HTTPContext *s = h->priv_data;
return ffurl_get_file_handle(s->hd);
}
 
#if CONFIG_HTTP_PROTOCOL
URLProtocol ff_http_protocol = {
.name = "http",
.url_open2 = http_open,
.url_read = http_read,
.url_write = http_write,
.url_seek = http_seek,
.url_close = http_close,
.url_get_file_handle = http_get_file_handle,
.url_shutdown = http_shutdown,
.priv_data_size = sizeof(HTTPContext),
.priv_data_class = &http_context_class,
.flags = URL_PROTOCOL_FLAG_NETWORK,
};
#endif
#if CONFIG_HTTPS_PROTOCOL
URLProtocol ff_https_protocol = {
.name = "https",
.url_open2 = http_open,
.url_read = http_read,
.url_write = http_write,
.url_seek = http_seek,
.url_close = http_close,
.url_get_file_handle = http_get_file_handle,
.url_shutdown = http_shutdown,
.priv_data_size = sizeof(HTTPContext),
.priv_data_class = &https_context_class,
.flags = URL_PROTOCOL_FLAG_NETWORK,
};
#endif
 
#if CONFIG_HTTPPROXY_PROTOCOL
static int http_proxy_close(URLContext *h)
{
HTTPContext *s = h->priv_data;
if (s->hd)
ffurl_closep(&s->hd);
return 0;
}
 
static int http_proxy_open(URLContext *h, const char *uri, int flags)
{
HTTPContext *s = h->priv_data;
char hostname[1024], hoststr[1024];
char auth[1024], pathbuf[1024], *path;
char lower_url[100];
int port, ret = 0, attempts = 0;
HTTPAuthType cur_auth_type;
char *authstr;
int new_loc;
 
if( s->seekable == 1 )
h->is_streamed = 0;
else
h->is_streamed = 1;
 
av_url_split(NULL, 0, auth, sizeof(auth), hostname, sizeof(hostname), &port,
pathbuf, sizeof(pathbuf), uri);
ff_url_join(hoststr, sizeof(hoststr), NULL, NULL, hostname, port, NULL);
path = pathbuf;
if (*path == '/')
path++;
 
ff_url_join(lower_url, sizeof(lower_url), "tcp", NULL, hostname, port,
NULL);
redo:
ret = ffurl_open(&s->hd, lower_url, AVIO_FLAG_READ_WRITE,
&h->interrupt_callback, NULL);
if (ret < 0)
return ret;
 
authstr = ff_http_auth_create_response(&s->proxy_auth_state, auth,
path, "CONNECT");
snprintf(s->buffer, sizeof(s->buffer),
"CONNECT %s HTTP/1.1\r\n"
"Host: %s\r\n"
"Connection: close\r\n"
"%s%s"
"\r\n",
path,
hoststr,
authstr ? "Proxy-" : "", authstr ? authstr : "");
av_freep(&authstr);
 
if ((ret = ffurl_write(s->hd, s->buffer, strlen(s->buffer))) < 0)
goto fail;
 
s->buf_ptr = s->buffer;
s->buf_end = s->buffer;
s->line_count = 0;
s->filesize = -1;
cur_auth_type = s->proxy_auth_state.auth_type;
 
/* Note: This uses buffering, potentially reading more than the
* HTTP header. If tunneling a protocol where the server starts
* the conversation, we might buffer part of that here, too.
* Reading that requires using the proper ffurl_read() function
* on this URLContext, not using the fd directly (as the tls
* protocol does). This shouldn't be an issue for tls though,
* since the client starts the conversation there, so there
* is no extra data that we might buffer up here.
*/
ret = http_read_header(h, &new_loc);
if (ret < 0)
goto fail;
 
attempts++;
if (s->http_code == 407 &&
(cur_auth_type == HTTP_AUTH_NONE || s->proxy_auth_state.stale) &&
s->proxy_auth_state.auth_type != HTTP_AUTH_NONE && attempts < 2) {
ffurl_closep(&s->hd);
goto redo;
}
 
if (s->http_code < 400)
return 0;
ret = AVERROR(EIO);
 
fail:
http_proxy_close(h);
return ret;
}
 
static int http_proxy_write(URLContext *h, const uint8_t *buf, int size)
{
HTTPContext *s = h->priv_data;
return ffurl_write(s->hd, buf, size);
}
 
URLProtocol ff_httpproxy_protocol = {
.name = "httpproxy",
.url_open = http_proxy_open,
.url_read = http_buf_read,
.url_write = http_proxy_write,
.url_close = http_proxy_close,
.url_get_file_handle = http_get_file_handle,
.priv_data_size = sizeof(HTTPContext),
.flags = URL_PROTOCOL_FLAG_NETWORK,
};
#endif
/contrib/sdk/sources/ffmpeg/libavformat/http.h
0,0 → 1,48
/*
* HTTP definitions
* Copyright (c) 2010 Josh Allmann
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_HTTP_H
#define AVFORMAT_HTTP_H
 
#include "url.h"
 
/**
* Initialize the authentication state based on another HTTP URLContext.
* This can be used to pre-initialize the authentication parameters if
* they are known beforehand, to avoid having to do an initial failing
* request just to get the parameters.
*
* @param dest URL context whose authentication state gets updated
* @param src URL context whose authentication state gets copied
*/
void ff_http_init_auth_state(URLContext *dest, const URLContext *src);
 
/**
* Send a new HTTP request, reusing the old connection.
*
* @param h pointer to the resource
* @param uri uri used to perform the request
* @return a negative value if an error condition occurred, 0
* otherwise
*/
int ff_http_do_new_request(URLContext *h, const char *uri);
 
#endif /* AVFORMAT_HTTP_H */
/contrib/sdk/sources/ffmpeg/libavformat/httpauth.c
0,0 → 1,287
/*
* HTTP authentication
* Copyright (c) 2010 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "httpauth.h"
#include "libavutil/base64.h"
#include "libavutil/avstring.h"
#include "internal.h"
#include "libavutil/random_seed.h"
#include "libavutil/md5.h"
#include "urldecode.h"
#include "avformat.h"
 
static void handle_basic_params(HTTPAuthState *state, const char *key,
int key_len, char **dest, int *dest_len)
{
if (!strncmp(key, "realm=", key_len)) {
*dest = state->realm;
*dest_len = sizeof(state->realm);
}
}
 
static void handle_digest_params(HTTPAuthState *state, const char *key,
int key_len, char **dest, int *dest_len)
{
DigestParams *digest = &state->digest_params;
 
if (!strncmp(key, "realm=", key_len)) {
*dest = state->realm;
*dest_len = sizeof(state->realm);
} else if (!strncmp(key, "nonce=", key_len)) {
*dest = digest->nonce;
*dest_len = sizeof(digest->nonce);
} else if (!strncmp(key, "opaque=", key_len)) {
*dest = digest->opaque;
*dest_len = sizeof(digest->opaque);
} else if (!strncmp(key, "algorithm=", key_len)) {
*dest = digest->algorithm;
*dest_len = sizeof(digest->algorithm);
} else if (!strncmp(key, "qop=", key_len)) {
*dest = digest->qop;
*dest_len = sizeof(digest->qop);
} else if (!strncmp(key, "stale=", key_len)) {
*dest = digest->stale;
*dest_len = sizeof(digest->stale);
}
}
 
static void handle_digest_update(HTTPAuthState *state, const char *key,
int key_len, char **dest, int *dest_len)
{
DigestParams *digest = &state->digest_params;
 
if (!strncmp(key, "nextnonce=", key_len)) {
*dest = digest->nonce;
*dest_len = sizeof(digest->nonce);
}
}
 
static void choose_qop(char *qop, int size)
{
char *ptr = strstr(qop, "auth");
char *end = ptr + strlen("auth");
 
if (ptr && (!*end || av_isspace(*end) || *end == ',') &&
(ptr == qop || av_isspace(ptr[-1]) || ptr[-1] == ',')) {
av_strlcpy(qop, "auth", size);
} else {
qop[0] = 0;
}
}
 
void ff_http_auth_handle_header(HTTPAuthState *state, const char *key,
const char *value)
{
if (!strcmp(key, "WWW-Authenticate") || !strcmp(key, "Proxy-Authenticate")) {
const char *p;
if (av_stristart(value, "Basic ", &p) &&
state->auth_type <= HTTP_AUTH_BASIC) {
state->auth_type = HTTP_AUTH_BASIC;
state->realm[0] = 0;
state->stale = 0;
ff_parse_key_value(p, (ff_parse_key_val_cb) handle_basic_params,
state);
} else if (av_stristart(value, "Digest ", &p) &&
state->auth_type <= HTTP_AUTH_DIGEST) {
state->auth_type = HTTP_AUTH_DIGEST;
memset(&state->digest_params, 0, sizeof(DigestParams));
state->realm[0] = 0;
state->stale = 0;
ff_parse_key_value(p, (ff_parse_key_val_cb) handle_digest_params,
state);
choose_qop(state->digest_params.qop,
sizeof(state->digest_params.qop));
if (!av_strcasecmp(state->digest_params.stale, "true"))
state->stale = 1;
}
} else if (!strcmp(key, "Authentication-Info")) {
ff_parse_key_value(value, (ff_parse_key_val_cb) handle_digest_update,
state);
}
}
 
 
static void update_md5_strings(struct AVMD5 *md5ctx, ...)
{
va_list vl;
 
va_start(vl, md5ctx);
while (1) {
const char* str = va_arg(vl, const char*);
if (!str)
break;
av_md5_update(md5ctx, str, strlen(str));
}
va_end(vl);
}
 
/* Generate a digest reply, according to RFC 2617. */
static char *make_digest_auth(HTTPAuthState *state, const char *username,
const char *password, const char *uri,
const char *method)
{
DigestParams *digest = &state->digest_params;
int len;
uint32_t cnonce_buf[2];
char cnonce[17];
char nc[9];
int i;
char A1hash[33], A2hash[33], response[33];
struct AVMD5 *md5ctx;
uint8_t hash[16];
char *authstr;
 
digest->nc++;
snprintf(nc, sizeof(nc), "%08x", digest->nc);
 
/* Generate a client nonce. */
for (i = 0; i < 2; i++)
cnonce_buf[i] = av_get_random_seed();
ff_data_to_hex(cnonce, (const uint8_t*) cnonce_buf, sizeof(cnonce_buf), 1);
cnonce[2*sizeof(cnonce_buf)] = 0;
 
md5ctx = av_md5_alloc();
if (!md5ctx)
return NULL;
 
av_md5_init(md5ctx);
update_md5_strings(md5ctx, username, ":", state->realm, ":", password, NULL);
av_md5_final(md5ctx, hash);
ff_data_to_hex(A1hash, hash, 16, 1);
A1hash[32] = 0;
 
if (!strcmp(digest->algorithm, "") || !strcmp(digest->algorithm, "MD5")) {
} else if (!strcmp(digest->algorithm, "MD5-sess")) {
av_md5_init(md5ctx);
update_md5_strings(md5ctx, A1hash, ":", digest->nonce, ":", cnonce, NULL);
av_md5_final(md5ctx, hash);
ff_data_to_hex(A1hash, hash, 16, 1);
A1hash[32] = 0;
} else {
/* Unsupported algorithm */
av_free(md5ctx);
return NULL;
}
 
av_md5_init(md5ctx);
update_md5_strings(md5ctx, method, ":", uri, NULL);
av_md5_final(md5ctx, hash);
ff_data_to_hex(A2hash, hash, 16, 1);
A2hash[32] = 0;
 
av_md5_init(md5ctx);
update_md5_strings(md5ctx, A1hash, ":", digest->nonce, NULL);
if (!strcmp(digest->qop, "auth") || !strcmp(digest->qop, "auth-int")) {
update_md5_strings(md5ctx, ":", nc, ":", cnonce, ":", digest->qop, NULL);
}
update_md5_strings(md5ctx, ":", A2hash, NULL);
av_md5_final(md5ctx, hash);
ff_data_to_hex(response, hash, 16, 1);
response[32] = 0;
 
av_free(md5ctx);
 
if (!strcmp(digest->qop, "") || !strcmp(digest->qop, "auth")) {
} else if (!strcmp(digest->qop, "auth-int")) {
/* qop=auth-int not supported */
return NULL;
} else {
/* Unsupported qop value. */
return NULL;
}
 
len = strlen(username) + strlen(state->realm) + strlen(digest->nonce) +
strlen(uri) + strlen(response) + strlen(digest->algorithm) +
strlen(digest->opaque) + strlen(digest->qop) + strlen(cnonce) +
strlen(nc) + 150;
 
authstr = av_malloc(len);
if (!authstr)
return NULL;
snprintf(authstr, len, "Authorization: Digest ");
 
/* TODO: Escape the quoted strings properly. */
av_strlcatf(authstr, len, "username=\"%s\"", username);
av_strlcatf(authstr, len, ",realm=\"%s\"", state->realm);
av_strlcatf(authstr, len, ",nonce=\"%s\"", digest->nonce);
av_strlcatf(authstr, len, ",uri=\"%s\"", uri);
av_strlcatf(authstr, len, ",response=\"%s\"", response);
if (digest->algorithm[0])
av_strlcatf(authstr, len, ",algorithm=%s", digest->algorithm);
if (digest->opaque[0])
av_strlcatf(authstr, len, ",opaque=\"%s\"", digest->opaque);
if (digest->qop[0]) {
av_strlcatf(authstr, len, ",qop=\"%s\"", digest->qop);
av_strlcatf(authstr, len, ",cnonce=\"%s\"", cnonce);
av_strlcatf(authstr, len, ",nc=%s", nc);
}
 
av_strlcatf(authstr, len, "\r\n");
 
return authstr;
}
 
char *ff_http_auth_create_response(HTTPAuthState *state, const char *auth,
const char *path, const char *method)
{
char *authstr = NULL;
 
/* Clear the stale flag, we assume the auth is ok now. It is reset
* by the server headers if there's a new issue. */
state->stale = 0;
if (!auth || !strchr(auth, ':'))
return NULL;
 
if (state->auth_type == HTTP_AUTH_BASIC) {
int auth_b64_len, len;
char *ptr, *decoded_auth = ff_urldecode(auth);
 
if (!decoded_auth)
return NULL;
 
auth_b64_len = AV_BASE64_SIZE(strlen(decoded_auth));
len = auth_b64_len + 30;
 
authstr = av_malloc(len);
if (!authstr) {
av_free(decoded_auth);
return NULL;
}
 
snprintf(authstr, len, "Authorization: Basic ");
ptr = authstr + strlen(authstr);
av_base64_encode(ptr, auth_b64_len, decoded_auth, strlen(decoded_auth));
av_strlcat(ptr, "\r\n", len - (ptr - authstr));
av_free(decoded_auth);
} else if (state->auth_type == HTTP_AUTH_DIGEST) {
char *username = ff_urldecode(auth), *password;
 
if (!username)
return NULL;
 
if ((password = strchr(username, ':'))) {
*password++ = 0;
authstr = make_digest_auth(state, username, password, path, method);
}
av_free(username);
}
return authstr;
}
/contrib/sdk/sources/ffmpeg/libavformat/httpauth.h
0,0 → 1,79
/*
* HTTP authentication
* Copyright (c) 2010 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_HTTPAUTH_H
#define AVFORMAT_HTTPAUTH_H
 
/**
* Authentication types, ordered from weakest to strongest.
*/
typedef enum HTTPAuthType {
HTTP_AUTH_NONE = 0, /**< No authentication specified */
HTTP_AUTH_BASIC, /**< HTTP 1.0 Basic auth from RFC 1945
* (also in RFC 2617) */
HTTP_AUTH_DIGEST, /**< HTTP 1.1 Digest auth from RFC 2617 */
} HTTPAuthType;
 
typedef struct DigestParams {
char nonce[300]; /**< Server specified nonce */
char algorithm[10]; /**< Server specified digest algorithm */
char qop[30]; /**< Quality of protection, containing the one
* that we've chosen to use, from the
* alternatives that the server offered. */
char opaque[300]; /**< A server-specified string that should be
* included in authentication responses, not
* included in the actual digest calculation. */
char stale[10]; /**< The server indicated that the auth was ok,
* but needs to be redone with a new, non-stale
* nonce. */
int nc; /**< Nonce count, the number of earlier replies
* where this particular nonce has been used. */
} DigestParams;
 
/**
* HTTP Authentication state structure. Must be zero-initialized
* before used with the functions below.
*/
typedef struct HTTPAuthState {
/**
* The currently chosen auth type.
*/
HTTPAuthType auth_type;
/**
* Authentication realm
*/
char realm[200];
/**
* The parameters specifiec to digest authentication.
*/
DigestParams digest_params;
/**
* Auth ok, but needs to be resent with a new nonce.
*/
int stale;
} HTTPAuthState;
 
void ff_http_auth_handle_header(HTTPAuthState *state, const char *key,
const char *value);
char *ff_http_auth_create_response(HTTPAuthState *state, const char *auth,
const char *path, const char *method);
 
#endif /* AVFORMAT_HTTPAUTH_H */
/contrib/sdk/sources/ffmpeg/libavformat/icodec.c
0,0 → 1,182
/*
* Microsoft Windows ICO demuxer
* Copyright (c) 2011 Peter Ross (pross@xvid.org)
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Microsoft Windows ICO demuxer
*/
 
#include "libavutil/intreadwrite.h"
#include "libavcodec/bytestream.h"
#include "libavcodec/bmp.h"
#include "avformat.h"
#include "internal.h"
 
typedef struct {
int offset;
int size;
int nb_pal;
} IcoImage;
 
typedef struct {
int current_image;
int nb_images;
IcoImage * images;
} IcoDemuxContext;
 
static int probe(AVProbeData *p)
{
if (AV_RL16(p->buf) == 0 && AV_RL16(p->buf + 2) == 1 && AV_RL16(p->buf + 4))
return AVPROBE_SCORE_MAX / 3;
return 0;
}
 
static int read_header(AVFormatContext *s)
{
IcoDemuxContext *ico = s->priv_data;
AVIOContext *pb = s->pb;
int i, codec;
 
avio_skip(pb, 4);
ico->nb_images = avio_rl16(pb);
 
ico->images = av_malloc(ico->nb_images * sizeof(IcoImage));
if (!ico->images)
return AVERROR(ENOMEM);
 
for (i = 0; i < ico->nb_images; i++) {
AVStream *st;
int tmp;
 
if (avio_seek(pb, 6 + i * 16, SEEK_SET) < 0)
break;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->width = avio_r8(pb);
st->codec->height = avio_r8(pb);
ico->images[i].nb_pal = avio_r8(pb);
if (ico->images[i].nb_pal == 255)
ico->images[i].nb_pal = 0;
 
avio_skip(pb, 5);
 
ico->images[i].size = avio_rl32(pb);
ico->images[i].offset = avio_rl32(pb);
 
if (avio_seek(pb, ico->images[i].offset, SEEK_SET) < 0)
break;
 
codec = avio_rl32(pb);
switch (codec) {
case MKTAG(0x89, 'P', 'N', 'G'):
st->codec->codec_id = AV_CODEC_ID_PNG;
st->codec->width = 0;
st->codec->height = 0;
break;
case 40:
if (ico->images[i].size < 40)
return AVERROR_INVALIDDATA;
st->codec->codec_id = AV_CODEC_ID_BMP;
tmp = avio_rl32(pb);
if (tmp)
st->codec->width = tmp;
tmp = avio_rl32(pb);
if (tmp)
st->codec->height = tmp / 2;
break;
default:
avpriv_request_sample(s, "codec %d", codec);
return AVERROR_INVALIDDATA;
}
}
 
return 0;
}
 
static int read_packet(AVFormatContext *s, AVPacket *pkt)
{
IcoDemuxContext *ico = s->priv_data;
IcoImage *image;
AVIOContext *pb = s->pb;
AVStream *st = s->streams[0];
int ret;
 
if (ico->current_image >= ico->nb_images)
return AVERROR(EIO);
 
image = &ico->images[ico->current_image];
 
if ((ret = avio_seek(pb, image->offset, SEEK_SET)) < 0)
return ret;
 
if (s->streams[ico->current_image]->codec->codec_id == AV_CODEC_ID_PNG) {
if ((ret = av_get_packet(pb, pkt, image->size)) < 0)
return ret;
} else {
uint8_t *buf;
if ((ret = av_new_packet(pkt, 14 + image->size)) < 0)
return ret;
buf = pkt->data;
 
/* add BMP header */
bytestream_put_byte(&buf, 'B');
bytestream_put_byte(&buf, 'M');
bytestream_put_le32(&buf, pkt->size);
bytestream_put_le16(&buf, 0);
bytestream_put_le16(&buf, 0);
bytestream_put_le32(&buf, 0);
 
if ((ret = avio_read(pb, buf, image->size)) < 0)
return ret;
 
st->codec->bits_per_coded_sample = AV_RL16(buf + 14);
 
if (AV_RL32(buf + 32))
image->nb_pal = AV_RL32(buf + 32);
 
if (st->codec->bits_per_coded_sample <= 8 && !image->nb_pal) {
image->nb_pal = 1 << st->codec->bits_per_coded_sample;
AV_WL32(buf + 32, image->nb_pal);
}
 
AV_WL32(buf - 4, 14 + 40 + image->nb_pal * 4);
AV_WL32(buf + 8, AV_RL32(buf + 8) / 2);
}
 
pkt->stream_index = ico->current_image++;
pkt->flags |= AV_PKT_FLAG_KEY;
 
return 0;
}
 
AVInputFormat ff_ico_demuxer = {
.name = "ico",
.long_name = NULL_IF_CONFIG_SMALL("Microsoft Windows ICO"),
.priv_data_size = sizeof(IcoDemuxContext),
.read_probe = probe,
.read_header = read_header,
.read_packet = read_packet,
.flags = AVFMT_NOTIMESTAMPS,
};
/contrib/sdk/sources/ffmpeg/libavformat/icoenc.c
0,0 → 1,202
/*
* Microsoft Windows ICO muxer
* Copyright (c) 2012 Michael Bradshaw <mjbshaw gmail com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Microsoft Windows ICO muxer
*/
 
#include "libavutil/intreadwrite.h"
#include "libavutil/pixdesc.h"
#include "avformat.h"
 
typedef struct {
int offset;
int size;
unsigned char width;
unsigned char height;
short bits;
} IcoImage;
 
typedef struct {
int current_image;
int nb_images;
IcoImage *images;
} IcoMuxContext;
 
static int ico_check_attributes(AVFormatContext *s, const AVCodecContext *c)
{
if (c->codec_id == AV_CODEC_ID_BMP) {
if (c->pix_fmt == AV_PIX_FMT_PAL8 && AV_PIX_FMT_RGB32 != AV_PIX_FMT_BGRA) {
av_log(s, AV_LOG_ERROR, "Wrong endianness for bmp pixel format\n");
return AVERROR(EINVAL);
} else if (c->pix_fmt != AV_PIX_FMT_PAL8 &&
c->pix_fmt != AV_PIX_FMT_RGB555LE &&
c->pix_fmt != AV_PIX_FMT_BGR24 &&
c->pix_fmt != AV_PIX_FMT_BGRA) {
av_log(s, AV_LOG_ERROR, "BMP must be 1bit, 4bit, 8bit, 16bit, 24bit, or 32bit\n");
return AVERROR(EINVAL);
}
} else if (c->codec_id == AV_CODEC_ID_PNG) {
if (c->pix_fmt != AV_PIX_FMT_RGBA) {
av_log(s, AV_LOG_ERROR, "PNG in ico requires pixel format to be rgba\n");
return AVERROR(EINVAL);
}
} else {
av_log(s, AV_LOG_ERROR, "Unsupported codec %s\n", c->codec_name);
return AVERROR(EINVAL);
}
 
if (c->width > 256 ||
c->height > 256) {
av_log(s, AV_LOG_ERROR, "Unsupported dimensions %dx%d (dimensions cannot exceed 256x256)\n", c->width, c->height);
return AVERROR(EINVAL);
}
 
return 0;
}
 
static int ico_write_header(AVFormatContext *s)
{
IcoMuxContext *ico = s->priv_data;
AVIOContext *pb = s->pb;
int ret;
int i;
 
if (!pb->seekable) {
av_log(s, AV_LOG_ERROR, "Output is not seekable\n");
return AVERROR(EINVAL);
}
 
ico->current_image = 0;
ico->nb_images = s->nb_streams;
 
avio_wl16(pb, 0); // reserved
avio_wl16(pb, 1); // 1 == icon
avio_skip(pb, 2); // skip the number of images
 
for (i = 0; i < s->nb_streams; i++) {
if (ret = ico_check_attributes(s, s->streams[i]->codec))
return ret;
 
// Fill in later when writing trailer...
avio_skip(pb, 16);
}
 
ico->images = av_mallocz(ico->nb_images * sizeof(IcoMuxContext));
if (!ico->images)
return AVERROR(ENOMEM);
 
avio_flush(pb);
 
return 0;
}
 
static int ico_write_packet(AVFormatContext *s, AVPacket *pkt)
{
IcoMuxContext *ico = s->priv_data;
IcoImage *image;
AVIOContext *pb = s->pb;
AVCodecContext *c = s->streams[pkt->stream_index]->codec;
int i;
 
if (ico->current_image >= ico->nb_images) {
av_log(s, AV_LOG_ERROR, "ICO already contains %d images\n", ico->current_image);
return AVERROR(EIO);
}
 
image = &ico->images[ico->current_image++];
 
image->offset = avio_tell(pb);
image->width = (c->width == 256) ? 0 : c->width;
image->height = (c->height == 256) ? 0 : c->height;
 
if (c->codec_id == AV_CODEC_ID_PNG) {
image->bits = c->bits_per_coded_sample;
image->size = pkt->size;
 
avio_write(pb, pkt->data, pkt->size);
} else { // BMP
if (AV_RL32(pkt->data + 14) != 40) { // must be BITMAPINFOHEADER
av_log(s, AV_LOG_ERROR, "Invalid BMP\n");
return AVERROR(EINVAL);
}
 
image->bits = AV_RL16(pkt->data + 28); // allows things like 1bit and 4bit images to be preserved
image->size = pkt->size - 14 + c->height * (c->width + 7) / 8;
 
avio_write(pb, pkt->data + 14, 8); // Skip the BITMAPFILEHEADER header
avio_wl32(pb, AV_RL32(pkt->data + 22) * 2); // rewrite height as 2 * height
avio_write(pb, pkt->data + 26, pkt->size - 26);
 
for (i = 0; i < c->height * (c->width + 7) / 8; ++i)
avio_w8(pb, 0x00); // Write bitmask (opaque)
}
 
return 0;
}
 
static int ico_write_trailer(AVFormatContext *s)
{
IcoMuxContext *ico = s->priv_data;
AVIOContext *pb = s->pb;
int i;
 
avio_seek(pb, 4, SEEK_SET);
 
avio_wl16(pb, ico->current_image);
 
for (i = 0; i < ico->nb_images; i++) {
avio_w8(pb, ico->images[i].width);
avio_w8(pb, ico->images[i].height);
 
if (s->streams[i]->codec->codec_id == AV_CODEC_ID_BMP &&
s->streams[i]->codec->pix_fmt == AV_PIX_FMT_PAL8) {
avio_w8(pb, (ico->images[i].bits >= 8) ? 0 : 1 << ico->images[i].bits);
} else {
avio_w8(pb, 0);
}
 
avio_w8(pb, 0); // reserved
avio_wl16(pb, 1); // color planes
avio_wl16(pb, ico->images[i].bits);
avio_wl32(pb, ico->images[i].size);
avio_wl32(pb, ico->images[i].offset);
}
 
av_freep(&ico->images);
 
return 0;
}
 
AVOutputFormat ff_ico_muxer = {
.name = "ico",
.long_name = NULL_IF_CONFIG_SMALL("Microsoft Windows ICO"),
.priv_data_size = sizeof(IcoMuxContext),
.mime_type = "image/vnd.microsoft.icon",
.extensions = "ico",
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_BMP,
.write_header = ico_write_header,
.write_packet = ico_write_packet,
.write_trailer = ico_write_trailer,
.flags = AVFMT_NOTIMESTAMPS,
};
/contrib/sdk/sources/ffmpeg/libavformat/id3v1.c
0,0 → 1,246
/*
* ID3v1 header parser
* Copyright (c) 2003 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "id3v1.h"
#include "libavcodec/avcodec.h"
#include "libavutil/dict.h"
 
/* See Genre List at http://id3.org/id3v2.3.0 */
const char * const ff_id3v1_genre_str[ID3v1_GENRE_MAX + 1] = {
[0] = "Blues",
[1] = "Classic Rock",
[2] = "Country",
[3] = "Dance",
[4] = "Disco",
[5] = "Funk",
[6] = "Grunge",
[7] = "Hip-Hop",
[8] = "Jazz",
[9] = "Metal",
[10] = "New Age",
[11] = "Oldies",
[12] = "Other",
[13] = "Pop",
[14] = "R&B",
[15] = "Rap",
[16] = "Reggae",
[17] = "Rock",
[18] = "Techno",
[19] = "Industrial",
[20] = "Alternative",
[21] = "Ska",
[22] = "Death Metal",
[23] = "Pranks",
[24] = "Soundtrack",
[25] = "Euro-Techno",
[26] = "Ambient",
[27] = "Trip-Hop",
[28] = "Vocal",
[29] = "Jazz+Funk",
[30] = "Fusion",
[31] = "Trance",
[32] = "Classical",
[33] = "Instrumental",
[34] = "Acid",
[35] = "House",
[36] = "Game",
[37] = "Sound Clip",
[38] = "Gospel",
[39] = "Noise",
[40] = "AlternRock",
[41] = "Bass",
[42] = "Soul",
[43] = "Punk",
[44] = "Space",
[45] = "Meditative",
[46] = "Instrumental Pop",
[47] = "Instrumental Rock",
[48] = "Ethnic",
[49] = "Gothic",
[50] = "Darkwave",
[51] = "Techno-Industrial",
[52] = "Electronic",
[53] = "Pop-Folk",
[54] = "Eurodance",
[55] = "Dream",
[56] = "Southern Rock",
[57] = "Comedy",
[58] = "Cult",
[59] = "Gangsta",
[60] = "Top 40",
[61] = "Christian Rap",
[62] = "Pop/Funk",
[63] = "Jungle",
[64] = "Native American",
[65] = "Cabaret",
[66] = "New Wave",
[67] = "Psychadelic", /* sic, the misspelling is used in the specification */
[68] = "Rave",
[69] = "Showtunes",
[70] = "Trailer",
[71] = "Lo-Fi",
[72] = "Tribal",
[73] = "Acid Punk",
[74] = "Acid Jazz",
[75] = "Polka",
[76] = "Retro",
[77] = "Musical",
[78] = "Rock & Roll",
[79] = "Hard Rock",
[80] = "Folk",
[81] = "Folk-Rock",
[82] = "National Folk",
[83] = "Swing",
[84] = "Fast Fusion",
[85] = "Bebob",
[86] = "Latin",
[87] = "Revival",
[88] = "Celtic",
[89] = "Bluegrass",
[90] = "Avantgarde",
[91] = "Gothic Rock",
[92] = "Progressive Rock",
[93] = "Psychedelic Rock",
[94] = "Symphonic Rock",
[95] = "Slow Rock",
[96] = "Big Band",
[97] = "Chorus",
[98] = "Easy Listening",
[99] = "Acoustic",
[100] = "Humour",
[101] = "Speech",
[102] = "Chanson",
[103] = "Opera",
[104] = "Chamber Music",
[105] = "Sonata",
[106] = "Symphony",
[107] = "Booty Bass",
[108] = "Primus",
[109] = "Porn Groove",
[110] = "Satire",
[111] = "Slow Jam",
[112] = "Club",
[113] = "Tango",
[114] = "Samba",
[115] = "Folklore",
[116] = "Ballad",
[117] = "Power Ballad",
[118] = "Rhythmic Soul",
[119] = "Freestyle",
[120] = "Duet",
[121] = "Punk Rock",
[122] = "Drum Solo",
[123] = "A capella",
[124] = "Euro-House",
[125] = "Dance Hall",
[126] = "Goa",
[127] = "Drum & Bass",
[128] = "Club-House",
[129] = "Hardcore",
[130] = "Terror",
[131] = "Indie",
[132] = "BritPop",
[133] = "Negerpunk",
[134] = "Polsk Punk",
[135] = "Beat",
[136] = "Christian Gangsta",
[137] = "Heavy Metal",
[138] = "Black Metal",
[139] = "Crossover",
[140] = "Contemporary Christian",
[141] = "Christian Rock",
[142] = "Merengue",
[143] = "Salsa",
[144] = "Thrash Metal",
[145] = "Anime",
[146] = "JPop",
[147] = "SynthPop",
};
 
static void get_string(AVFormatContext *s, const char *key,
const uint8_t *buf, int buf_size)
{
int i, c;
char *q, str[512];
 
q = str;
for(i = 0; i < buf_size; i++) {
c = buf[i];
if (c == '\0')
break;
if ((q - str) >= sizeof(str) - 1)
break;
*q++ = c;
}
*q = '\0';
 
if (*str)
av_dict_set(&s->metadata, key, str, 0);
}
 
/**
* Parse an ID3v1 tag
*
* @param buf ID3v1_TAG_SIZE long buffer containing the tag
*/
static int parse_tag(AVFormatContext *s, const uint8_t *buf)
{
char str[5];
int genre;
 
if (!(buf[0] == 'T' &&
buf[1] == 'A' &&
buf[2] == 'G'))
return -1;
get_string(s, "title", buf + 3, 30);
get_string(s, "artist", buf + 33, 30);
get_string(s, "album", buf + 63, 30);
get_string(s, "date", buf + 93, 4);
get_string(s, "comment", buf + 97, 30);
if (buf[125] == 0 && buf[126] != 0) {
snprintf(str, sizeof(str), "%d", buf[126]);
av_dict_set(&s->metadata, "track", str, 0);
}
genre = buf[127];
if (genre <= ID3v1_GENRE_MAX)
av_dict_set(&s->metadata, "genre", ff_id3v1_genre_str[genre], 0);
return 0;
}
 
void ff_id3v1_read(AVFormatContext *s)
{
int ret;
uint8_t buf[ID3v1_TAG_SIZE];
int64_t filesize, position = avio_tell(s->pb);
 
if (s->pb->seekable) {
/* XXX: change that */
filesize = avio_size(s->pb);
if (filesize > 128) {
avio_seek(s->pb, filesize - 128, SEEK_SET);
ret = avio_read(s->pb, buf, ID3v1_TAG_SIZE);
if (ret == ID3v1_TAG_SIZE) {
parse_tag(s, buf);
}
avio_seek(s->pb, position, SEEK_SET);
}
}
}
/contrib/sdk/sources/ffmpeg/libavformat/id3v1.h
0,0 → 1,41
/*
* ID3v1 header parser
* Copyright (c) 2003 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_ID3V1_H
#define AVFORMAT_ID3V1_H
 
#include "avformat.h"
 
#define ID3v1_TAG_SIZE 128
 
#define ID3v1_GENRE_MAX 147
 
/**
* ID3v1 genres
*/
extern const char * const ff_id3v1_genre_str[ID3v1_GENRE_MAX + 1];
 
/**
* Read an ID3v1 tag
*/
void ff_id3v1_read(AVFormatContext *s);
 
#endif /* AVFORMAT_ID3V1_H */
/contrib/sdk/sources/ffmpeg/libavformat/id3v2.c
0,0 → 1,897
/*
* Copyright (c) 2003 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* ID3v2 header parser
*
* Specifications available at:
* http://id3.org/Developer_Information
*/
 
#include "config.h"
 
#if CONFIG_ZLIB
#include <zlib.h>
#endif
 
#include "libavutil/avstring.h"
#include "libavutil/dict.h"
#include "libavutil/intreadwrite.h"
#include "avio_internal.h"
#include "internal.h"
#include "id3v1.h"
#include "id3v2.h"
 
const AVMetadataConv ff_id3v2_34_metadata_conv[] = {
{ "TALB", "album" },
{ "TCOM", "composer" },
{ "TCON", "genre" },
{ "TCOP", "copyright" },
{ "TENC", "encoded_by" },
{ "TIT2", "title" },
{ "TLAN", "language" },
{ "TPE1", "artist" },
{ "TPE2", "album_artist" },
{ "TPE3", "performer" },
{ "TPOS", "disc" },
{ "TPUB", "publisher" },
{ "TRCK", "track" },
{ "TSSE", "encoder" },
{ 0 }
};
 
const AVMetadataConv ff_id3v2_4_metadata_conv[] = {
{ "TDRL", "date" },
{ "TDRC", "date" },
{ "TDEN", "creation_time" },
{ "TSOA", "album-sort" },
{ "TSOP", "artist-sort" },
{ "TSOT", "title-sort" },
{ 0 }
};
 
static const AVMetadataConv id3v2_2_metadata_conv[] = {
{ "TAL", "album" },
{ "TCO", "genre" },
{ "TT2", "title" },
{ "TEN", "encoded_by" },
{ "TP1", "artist" },
{ "TP2", "album_artist" },
{ "TP3", "performer" },
{ "TRK", "track" },
{ 0 }
};
 
const char ff_id3v2_tags[][4] = {
"TALB", "TBPM", "TCOM", "TCON", "TCOP", "TDLY", "TENC", "TEXT",
"TFLT", "TIT1", "TIT2", "TIT3", "TKEY", "TLAN", "TLEN", "TMED",
"TOAL", "TOFN", "TOLY", "TOPE", "TOWN", "TPE1", "TPE2", "TPE3",
"TPE4", "TPOS", "TPUB", "TRCK", "TRSN", "TRSO", "TSRC", "TSSE",
{ 0 },
};
 
const char ff_id3v2_4_tags[][4] = {
"TDEN", "TDOR", "TDRC", "TDRL", "TDTG", "TIPL", "TMCL", "TMOO",
"TPRO", "TSOA", "TSOP", "TSOT", "TSST",
{ 0 },
};
 
const char ff_id3v2_3_tags[][4] = {
"TDAT", "TIME", "TORY", "TRDA", "TSIZ", "TYER",
{ 0 },
};
 
const char *ff_id3v2_picture_types[21] = {
"Other",
"32x32 pixels 'file icon'",
"Other file icon",
"Cover (front)",
"Cover (back)",
"Leaflet page",
"Media (e.g. label side of CD)",
"Lead artist/lead performer/soloist",
"Artist/performer",
"Conductor",
"Band/Orchestra",
"Composer",
"Lyricist/text writer",
"Recording Location",
"During recording",
"During performance",
"Movie/video screen capture",
"A bright coloured fish",
"Illustration",
"Band/artist logotype",
"Publisher/Studio logotype",
};
 
const CodecMime ff_id3v2_mime_tags[] = {
{ "image/gif", AV_CODEC_ID_GIF },
{ "image/jpeg", AV_CODEC_ID_MJPEG },
{ "image/jpg", AV_CODEC_ID_MJPEG },
{ "image/png", AV_CODEC_ID_PNG },
{ "image/tiff", AV_CODEC_ID_TIFF },
{ "image/bmp", AV_CODEC_ID_BMP },
{ "JPG", AV_CODEC_ID_MJPEG }, /* ID3v2.2 */
{ "PNG", AV_CODEC_ID_PNG }, /* ID3v2.2 */
{ "", AV_CODEC_ID_NONE },
};
 
int ff_id3v2_match(const uint8_t *buf, const char *magic)
{
return buf[0] == magic[0] &&
buf[1] == magic[1] &&
buf[2] == magic[2] &&
buf[3] != 0xff &&
buf[4] != 0xff &&
(buf[6] & 0x80) == 0 &&
(buf[7] & 0x80) == 0 &&
(buf[8] & 0x80) == 0 &&
(buf[9] & 0x80) == 0;
}
 
int ff_id3v2_tag_len(const uint8_t *buf)
{
int len = ((buf[6] & 0x7f) << 21) +
((buf[7] & 0x7f) << 14) +
((buf[8] & 0x7f) << 7) +
(buf[9] & 0x7f) +
ID3v2_HEADER_SIZE;
if (buf[5] & 0x10)
len += ID3v2_HEADER_SIZE;
return len;
}
 
static unsigned int get_size(AVIOContext *s, int len)
{
int v = 0;
while (len--)
v = (v << 7) + (avio_r8(s) & 0x7F);
return v;
}
 
/**
* Free GEOB type extra metadata.
*/
static void free_geobtag(void *obj)
{
ID3v2ExtraMetaGEOB *geob = obj;
av_free(geob->mime_type);
av_free(geob->file_name);
av_free(geob->description);
av_free(geob->data);
av_free(geob);
}
 
/**
* Decode characters to UTF-8 according to encoding type. The decoded buffer is
* always null terminated. Stop reading when either *maxread bytes are read from
* pb or U+0000 character is found.
*
* @param dst Pointer where the address of the buffer with the decoded bytes is
* stored. Buffer must be freed by caller.
* @param maxread Pointer to maximum number of characters to read from the
* AVIOContext. After execution the value is decremented by the number of bytes
* actually read.
* @returns 0 if no error occurred, dst is uninitialized on error
*/
static int decode_str(AVFormatContext *s, AVIOContext *pb, int encoding,
uint8_t **dst, int *maxread)
{
int ret;
uint8_t tmp;
uint32_t ch = 1;
int left = *maxread;
unsigned int (*get)(AVIOContext*) = avio_rb16;
AVIOContext *dynbuf;
 
if ((ret = avio_open_dyn_buf(&dynbuf)) < 0) {
av_log(s, AV_LOG_ERROR, "Error opening memory stream\n");
return ret;
}
 
switch (encoding) {
case ID3v2_ENCODING_ISO8859:
while (left && ch) {
ch = avio_r8(pb);
PUT_UTF8(ch, tmp, avio_w8(dynbuf, tmp);)
left--;
}
break;
 
case ID3v2_ENCODING_UTF16BOM:
if ((left -= 2) < 0) {
av_log(s, AV_LOG_ERROR, "Cannot read BOM value, input too short\n");
avio_close_dyn_buf(dynbuf, dst);
av_freep(dst);
return AVERROR_INVALIDDATA;
}
switch (avio_rb16(pb)) {
case 0xfffe:
get = avio_rl16;
case 0xfeff:
break;
default:
av_log(s, AV_LOG_ERROR, "Incorrect BOM value\n");
avio_close_dyn_buf(dynbuf, dst);
av_freep(dst);
*maxread = left;
return AVERROR_INVALIDDATA;
}
// fall-through
 
case ID3v2_ENCODING_UTF16BE:
while ((left > 1) && ch) {
GET_UTF16(ch, ((left -= 2) >= 0 ? get(pb) : 0), break;)
PUT_UTF8(ch, tmp, avio_w8(dynbuf, tmp);)
}
if (left < 0)
left += 2; /* did not read last char from pb */
break;
 
case ID3v2_ENCODING_UTF8:
while (left && ch) {
ch = avio_r8(pb);
avio_w8(dynbuf, ch);
left--;
}
break;
default:
av_log(s, AV_LOG_WARNING, "Unknown encoding\n");
}
 
if (ch)
avio_w8(dynbuf, 0);
 
avio_close_dyn_buf(dynbuf, dst);
*maxread = left;
 
return 0;
}
 
/**
* Parse a text tag.
*/
static void read_ttag(AVFormatContext *s, AVIOContext *pb, int taglen,
AVDictionary **metadata, const char *key)
{
uint8_t *dst;
int encoding, dict_flags = AV_DICT_DONT_OVERWRITE | AV_DICT_DONT_STRDUP_VAL;
unsigned genre;
 
if (taglen < 1)
return;
 
encoding = avio_r8(pb);
taglen--; /* account for encoding type byte */
 
if (decode_str(s, pb, encoding, &dst, &taglen) < 0) {
av_log(s, AV_LOG_ERROR, "Error reading frame %s, skipped\n", key);
return;
}
 
if (!(strcmp(key, "TCON") && strcmp(key, "TCO")) &&
(sscanf(dst, "(%d)", &genre) == 1 || sscanf(dst, "%d", &genre) == 1) &&
genre <= ID3v1_GENRE_MAX) {
av_freep(&dst);
dst = av_strdup(ff_id3v1_genre_str[genre]);
} else if (!(strcmp(key, "TXXX") && strcmp(key, "TXX"))) {
/* dst now contains the key, need to get value */
key = dst;
if (decode_str(s, pb, encoding, &dst, &taglen) < 0) {
av_log(s, AV_LOG_ERROR, "Error reading frame %s, skipped\n", key);
av_freep(&key);
return;
}
dict_flags |= AV_DICT_DONT_STRDUP_KEY;
} else if (!*dst)
av_freep(&dst);
 
if (dst)
av_dict_set(metadata, key, dst, dict_flags);
}
 
/**
* Parse GEOB tag into a ID3v2ExtraMetaGEOB struct.
*/
static void read_geobtag(AVFormatContext *s, AVIOContext *pb, int taglen,
char *tag, ID3v2ExtraMeta **extra_meta)
{
ID3v2ExtraMetaGEOB *geob_data = NULL;
ID3v2ExtraMeta *new_extra = NULL;
char encoding;
unsigned int len;
 
if (taglen < 1)
return;
 
geob_data = av_mallocz(sizeof(ID3v2ExtraMetaGEOB));
if (!geob_data) {
av_log(s, AV_LOG_ERROR, "Failed to alloc %zu bytes\n",
sizeof(ID3v2ExtraMetaGEOB));
return;
}
 
new_extra = av_mallocz(sizeof(ID3v2ExtraMeta));
if (!new_extra) {
av_log(s, AV_LOG_ERROR, "Failed to alloc %zu bytes\n",
sizeof(ID3v2ExtraMeta));
goto fail;
}
 
/* read encoding type byte */
encoding = avio_r8(pb);
taglen--;
 
/* read MIME type (always ISO-8859) */
if (decode_str(s, pb, ID3v2_ENCODING_ISO8859, &geob_data->mime_type,
&taglen) < 0 ||
taglen <= 0)
goto fail;
 
/* read file name */
if (decode_str(s, pb, encoding, &geob_data->file_name, &taglen) < 0 ||
taglen <= 0)
goto fail;
 
/* read content description */
if (decode_str(s, pb, encoding, &geob_data->description, &taglen) < 0 ||
taglen < 0)
goto fail;
 
if (taglen) {
/* save encapsulated binary data */
geob_data->data = av_malloc(taglen);
if (!geob_data->data) {
av_log(s, AV_LOG_ERROR, "Failed to alloc %d bytes\n", taglen);
goto fail;
}
if ((len = avio_read(pb, geob_data->data, taglen)) < taglen)
av_log(s, AV_LOG_WARNING,
"Error reading GEOB frame, data truncated.\n");
geob_data->datasize = len;
} else {
geob_data->data = NULL;
geob_data->datasize = 0;
}
 
/* add data to the list */
new_extra->tag = "GEOB";
new_extra->data = geob_data;
new_extra->next = *extra_meta;
*extra_meta = new_extra;
 
return;
 
fail:
av_log(s, AV_LOG_ERROR, "Error reading frame %s, skipped\n", tag);
free_geobtag(geob_data);
av_free(new_extra);
return;
}
 
static int is_number(const char *str)
{
while (*str >= '0' && *str <= '9')
str++;
return !*str;
}
 
static AVDictionaryEntry *get_date_tag(AVDictionary *m, const char *tag)
{
AVDictionaryEntry *t;
if ((t = av_dict_get(m, tag, NULL, AV_DICT_MATCH_CASE)) &&
strlen(t->value) == 4 && is_number(t->value))
return t;
return NULL;
}
 
static void merge_date(AVDictionary **m)
{
AVDictionaryEntry *t;
char date[17] = { 0 }; // YYYY-MM-DD hh:mm
 
if (!(t = get_date_tag(*m, "TYER")) &&
!(t = get_date_tag(*m, "TYE")))
return;
av_strlcpy(date, t->value, 5);
av_dict_set(m, "TYER", NULL, 0);
av_dict_set(m, "TYE", NULL, 0);
 
if (!(t = get_date_tag(*m, "TDAT")) &&
!(t = get_date_tag(*m, "TDA")))
goto finish;
snprintf(date + 4, sizeof(date) - 4, "-%.2s-%.2s", t->value + 2, t->value);
av_dict_set(m, "TDAT", NULL, 0);
av_dict_set(m, "TDA", NULL, 0);
 
if (!(t = get_date_tag(*m, "TIME")) &&
!(t = get_date_tag(*m, "TIM")))
goto finish;
snprintf(date + 10, sizeof(date) - 10,
" %.2s:%.2s", t->value, t->value + 2);
av_dict_set(m, "TIME", NULL, 0);
av_dict_set(m, "TIM", NULL, 0);
 
finish:
if (date[0])
av_dict_set(m, "date", date, 0);
}
 
static void free_apic(void *obj)
{
ID3v2ExtraMetaAPIC *apic = obj;
av_buffer_unref(&apic->buf);
av_freep(&apic->description);
av_freep(&apic);
}
 
static void read_apic(AVFormatContext *s, AVIOContext *pb, int taglen,
char *tag, ID3v2ExtraMeta **extra_meta)
{
int enc, pic_type;
char mimetype[64];
const CodecMime *mime = ff_id3v2_mime_tags;
enum AVCodecID id = AV_CODEC_ID_NONE;
ID3v2ExtraMetaAPIC *apic = NULL;
ID3v2ExtraMeta *new_extra = NULL;
int64_t end = avio_tell(pb) + taglen;
 
if (taglen <= 4)
goto fail;
 
new_extra = av_mallocz(sizeof(*new_extra));
apic = av_mallocz(sizeof(*apic));
if (!new_extra || !apic)
goto fail;
 
enc = avio_r8(pb);
taglen--;
 
/* mimetype */
taglen -= avio_get_str(pb, taglen, mimetype, sizeof(mimetype));
while (mime->id != AV_CODEC_ID_NONE) {
if (!av_strncasecmp(mime->str, mimetype, sizeof(mimetype))) {
id = mime->id;
break;
}
mime++;
}
if (id == AV_CODEC_ID_NONE) {
av_log(s, AV_LOG_WARNING,
"Unknown attached picture mimetype: %s, skipping.\n", mimetype);
goto fail;
}
apic->id = id;
 
/* picture type */
pic_type = avio_r8(pb);
taglen--;
if (pic_type < 0 || pic_type >= FF_ARRAY_ELEMS(ff_id3v2_picture_types)) {
av_log(s, AV_LOG_WARNING, "Unknown attached picture type %d.\n",
pic_type);
pic_type = 0;
}
apic->type = ff_id3v2_picture_types[pic_type];
 
/* description and picture data */
if (decode_str(s, pb, enc, &apic->description, &taglen) < 0) {
av_log(s, AV_LOG_ERROR,
"Error decoding attached picture description.\n");
goto fail;
}
 
apic->buf = av_buffer_alloc(taglen + FF_INPUT_BUFFER_PADDING_SIZE);
if (!apic->buf || !taglen || avio_read(pb, apic->buf->data, taglen) != taglen)
goto fail;
memset(apic->buf->data + taglen, 0, FF_INPUT_BUFFER_PADDING_SIZE);
 
new_extra->tag = "APIC";
new_extra->data = apic;
new_extra->next = *extra_meta;
*extra_meta = new_extra;
 
return;
 
fail:
if (apic)
free_apic(apic);
av_freep(&new_extra);
avio_seek(pb, end, SEEK_SET);
}
 
static void read_chapter(AVFormatContext *s, AVIOContext *pb, int len, char *ttag, ID3v2ExtraMeta **extra_meta)
{
AVRational time_base = {1, 1000};
uint32_t start, end;
AVChapter *chapter;
uint8_t *dst = NULL;
int taglen;
char tag[5];
 
if (decode_str(s, pb, 0, &dst, &len) < 0)
return;
if (len < 16)
return;
 
start = avio_rb32(pb);
end = avio_rb32(pb);
avio_skip(pb, 8);
 
chapter = avpriv_new_chapter(s, s->nb_chapters + 1, time_base, start, end, dst);
if (!chapter) {
av_free(dst);
return;
}
 
len -= 16;
while (len > 10) {
avio_read(pb, tag, 4);
tag[4] = 0;
taglen = avio_rb32(pb);
avio_skip(pb, 2);
len -= 10;
if (taglen < 0 || taglen > len) {
av_free(dst);
return;
}
if (tag[0] == 'T')
read_ttag(s, pb, taglen, &chapter->metadata, tag);
else
avio_skip(pb, taglen);
len -= taglen;
}
 
ff_metadata_conv(&chapter->metadata, NULL, ff_id3v2_34_metadata_conv);
ff_metadata_conv(&chapter->metadata, NULL, ff_id3v2_4_metadata_conv);
av_free(dst);
}
 
typedef struct ID3v2EMFunc {
const char *tag3;
const char *tag4;
void (*read)(AVFormatContext *, AVIOContext *, int, char *,
ID3v2ExtraMeta **);
void (*free)(void *obj);
} ID3v2EMFunc;
 
static const ID3v2EMFunc id3v2_extra_meta_funcs[] = {
{ "GEO", "GEOB", read_geobtag, free_geobtag },
{ "PIC", "APIC", read_apic, free_apic },
{ "CHAP","CHAP", read_chapter, NULL },
{ NULL }
};
 
/**
* Get the corresponding ID3v2EMFunc struct for a tag.
* @param isv34 Determines if v2.2 or v2.3/4 strings are used
* @return A pointer to the ID3v2EMFunc struct if found, NULL otherwise.
*/
static const ID3v2EMFunc *get_extra_meta_func(const char *tag, int isv34)
{
int i = 0;
while (id3v2_extra_meta_funcs[i].tag3) {
if (tag && !memcmp(tag,
(isv34 ? id3v2_extra_meta_funcs[i].tag4 :
id3v2_extra_meta_funcs[i].tag3),
(isv34 ? 4 : 3)))
return &id3v2_extra_meta_funcs[i];
i++;
}
return NULL;
}
 
static void id3v2_parse(AVFormatContext *s, int len, uint8_t version,
uint8_t flags, ID3v2ExtraMeta **extra_meta)
{
int isv34, unsync;
unsigned tlen;
char tag[5];
int64_t next, end = avio_tell(s->pb) + len;
int taghdrlen;
const char *reason = NULL;
AVIOContext pb;
AVIOContext *pbx;
unsigned char *buffer = NULL;
int buffer_size = 0;
const ID3v2EMFunc *extra_func = NULL;
unsigned char *uncompressed_buffer = NULL;
int uncompressed_buffer_size = 0;
 
av_log(s, AV_LOG_DEBUG, "id3v2 ver:%d flags:%02X len:%d\n", version, flags, len);
 
switch (version) {
case 2:
if (flags & 0x40) {
reason = "compression";
goto error;
}
isv34 = 0;
taghdrlen = 6;
break;
 
case 3:
case 4:
isv34 = 1;
taghdrlen = 10;
break;
 
default:
reason = "version";
goto error;
}
 
unsync = flags & 0x80;
 
if (isv34 && flags & 0x40) { /* Extended header present, just skip over it */
int extlen = get_size(s->pb, 4);
if (version == 4)
/* In v2.4 the length includes the length field we just read. */
extlen -= 4;
 
if (extlen < 0) {
reason = "invalid extended header length";
goto error;
}
avio_skip(s->pb, extlen);
len -= extlen + 4;
if (len < 0) {
reason = "extended header too long.";
goto error;
}
}
 
while (len >= taghdrlen) {
unsigned int tflags = 0;
int tunsync = 0;
int tcomp = 0;
int tencr = 0;
unsigned long dlen;
 
if (isv34) {
avio_read(s->pb, tag, 4);
tag[4] = 0;
if (version == 3) {
tlen = avio_rb32(s->pb);
} else
tlen = get_size(s->pb, 4);
tflags = avio_rb16(s->pb);
tunsync = tflags & ID3v2_FLAG_UNSYNCH;
} else {
avio_read(s->pb, tag, 3);
tag[3] = 0;
tlen = avio_rb24(s->pb);
}
if (tlen > (1<<28))
break;
len -= taghdrlen + tlen;
 
if (len < 0)
break;
 
next = avio_tell(s->pb) + tlen;
 
if (!tlen) {
if (tag[0])
av_log(s, AV_LOG_DEBUG, "Invalid empty frame %s, skipping.\n",
tag);
continue;
}
 
if (tflags & ID3v2_FLAG_DATALEN) {
if (tlen < 4)
break;
dlen = avio_rb32(s->pb);
tlen -= 4;
} else
dlen = tlen;
 
tcomp = tflags & ID3v2_FLAG_COMPRESSION;
tencr = tflags & ID3v2_FLAG_ENCRYPTION;
 
/* skip encrypted tags and, if no zlib, compressed tags */
if (tencr || (!CONFIG_ZLIB && tcomp)) {
const char *type;
if (!tcomp)
type = "encrypted";
else if (!tencr)
type = "compressed";
else
type = "encrypted and compressed";
 
av_log(s, AV_LOG_WARNING, "Skipping %s ID3v2 frame %s.\n", type, tag);
avio_skip(s->pb, tlen);
/* check for text tag or supported special meta tag */
} else if (tag[0] == 'T' ||
(extra_meta &&
(extra_func = get_extra_meta_func(tag, isv34)))) {
pbx = s->pb;
 
if (unsync || tunsync || tcomp) {
av_fast_malloc(&buffer, &buffer_size, tlen);
if (!buffer) {
av_log(s, AV_LOG_ERROR, "Failed to alloc %d bytes\n", tlen);
goto seek;
}
}
if (unsync || tunsync) {
int64_t end = avio_tell(s->pb) + tlen;
uint8_t *b;
 
b = buffer;
while (avio_tell(s->pb) < end && b - buffer < tlen && !s->pb->eof_reached) {
*b++ = avio_r8(s->pb);
if (*(b - 1) == 0xff && avio_tell(s->pb) < end - 1 &&
b - buffer < tlen &&
!s->pb->eof_reached ) {
uint8_t val = avio_r8(s->pb);
*b++ = val ? val : avio_r8(s->pb);
}
}
ffio_init_context(&pb, buffer, b - buffer, 0, NULL, NULL, NULL,
NULL);
tlen = b - buffer;
pbx = &pb; // read from sync buffer
}
 
#if CONFIG_ZLIB
if (tcomp) {
int err;
 
av_log(s, AV_LOG_DEBUG, "Compresssed frame %s tlen=%d dlen=%ld\n", tag, tlen, dlen);
 
av_fast_malloc(&uncompressed_buffer, &uncompressed_buffer_size, dlen);
if (!uncompressed_buffer) {
av_log(s, AV_LOG_ERROR, "Failed to alloc %ld bytes\n", dlen);
goto seek;
}
 
if (!(unsync || tunsync)) {
err = avio_read(s->pb, buffer, tlen);
if (err < 0) {
av_log(s, AV_LOG_ERROR, "Failed to read compressed tag\n");
goto seek;
}
tlen = err;
}
 
err = uncompress(uncompressed_buffer, &dlen, buffer, tlen);
if (err != Z_OK) {
av_log(s, AV_LOG_ERROR, "Failed to uncompress tag: %d\n", err);
goto seek;
}
ffio_init_context(&pb, uncompressed_buffer, dlen, 0, NULL, NULL, NULL, NULL);
tlen = dlen;
pbx = &pb; // read from sync buffer
}
#endif
if (tag[0] == 'T')
/* parse text tag */
read_ttag(s, pbx, tlen, &s->metadata, tag);
else
/* parse special meta tag */
extra_func->read(s, pbx, tlen, tag, extra_meta);
} else if (!tag[0]) {
if (tag[1])
av_log(s, AV_LOG_WARNING, "invalid frame id, assuming padding\n");
avio_skip(s->pb, tlen);
break;
}
/* Skip to end of tag */
seek:
avio_seek(s->pb, next, SEEK_SET);
}
 
/* Footer preset, always 10 bytes, skip over it */
if (version == 4 && flags & 0x10)
end += 10;
 
error:
if (reason)
av_log(s, AV_LOG_INFO, "ID3v2.%d tag skipped, cannot handle %s\n",
version, reason);
avio_seek(s->pb, end, SEEK_SET);
av_free(buffer);
av_free(uncompressed_buffer);
return;
}
 
void ff_id3v2_read(AVFormatContext *s, const char *magic,
ID3v2ExtraMeta **extra_meta)
{
int len, ret;
uint8_t buf[ID3v2_HEADER_SIZE];
int found_header;
int64_t off;
 
do {
/* save the current offset in case there's nothing to read/skip */
off = avio_tell(s->pb);
ret = avio_read(s->pb, buf, ID3v2_HEADER_SIZE);
if (ret != ID3v2_HEADER_SIZE) {
avio_seek(s->pb, off, SEEK_SET);
break;
}
found_header = ff_id3v2_match(buf, magic);
if (found_header) {
/* parse ID3v2 header */
len = ((buf[6] & 0x7f) << 21) |
((buf[7] & 0x7f) << 14) |
((buf[8] & 0x7f) << 7) |
(buf[9] & 0x7f);
id3v2_parse(s, len, buf[3], buf[5], extra_meta);
} else {
avio_seek(s->pb, off, SEEK_SET);
}
} while (found_header);
ff_metadata_conv(&s->metadata, NULL, ff_id3v2_34_metadata_conv);
ff_metadata_conv(&s->metadata, NULL, id3v2_2_metadata_conv);
ff_metadata_conv(&s->metadata, NULL, ff_id3v2_4_metadata_conv);
merge_date(&s->metadata);
}
 
void ff_id3v2_free_extra_meta(ID3v2ExtraMeta **extra_meta)
{
ID3v2ExtraMeta *current = *extra_meta, *next;
const ID3v2EMFunc *extra_func;
 
while (current) {
if ((extra_func = get_extra_meta_func(current->tag, 1)))
extra_func->free(current->data);
next = current->next;
av_freep(&current);
current = next;
}
}
 
int ff_id3v2_parse_apic(AVFormatContext *s, ID3v2ExtraMeta **extra_meta)
{
ID3v2ExtraMeta *cur;
 
for (cur = *extra_meta; cur; cur = cur->next) {
ID3v2ExtraMetaAPIC *apic;
AVStream *st;
 
if (strcmp(cur->tag, "APIC"))
continue;
apic = cur->data;
 
if (!(st = avformat_new_stream(s, NULL)))
return AVERROR(ENOMEM);
 
st->disposition |= AV_DISPOSITION_ATTACHED_PIC;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = apic->id;
av_dict_set(&st->metadata, "title", apic->description, 0);
av_dict_set(&st->metadata, "comment", apic->type, 0);
 
av_init_packet(&st->attached_pic);
st->attached_pic.buf = apic->buf;
st->attached_pic.data = apic->buf->data;
st->attached_pic.size = apic->buf->size - FF_INPUT_BUFFER_PADDING_SIZE;
st->attached_pic.stream_index = st->index;
st->attached_pic.flags |= AV_PKT_FLAG_KEY;
 
apic->buf = NULL;
}
 
return 0;
}
/contrib/sdk/sources/ffmpeg/libavformat/id3v2.h
0,0 → 1,163
/*
* ID3v2 header parser
* Copyright (c) 2003 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_ID3V2_H
#define AVFORMAT_ID3V2_H
 
#include <stdint.h>
#include "avformat.h"
#include "internal.h"
#include "metadata.h"
 
#define ID3v2_HEADER_SIZE 10
 
/**
* Default magic bytes for ID3v2 header: "ID3"
*/
#define ID3v2_DEFAULT_MAGIC "ID3"
 
#define ID3v2_FLAG_DATALEN 0x0001
#define ID3v2_FLAG_UNSYNCH 0x0002
#define ID3v2_FLAG_ENCRYPTION 0x0004
#define ID3v2_FLAG_COMPRESSION 0x0008
 
enum ID3v2Encoding {
ID3v2_ENCODING_ISO8859 = 0,
ID3v2_ENCODING_UTF16BOM = 1,
ID3v2_ENCODING_UTF16BE = 2,
ID3v2_ENCODING_UTF8 = 3,
};
 
typedef struct ID3v2EncContext {
int version; ///< ID3v2 minor version, either 3 or 4
int64_t size_pos; ///< offset of the tag total size
int len; ///< size of the tag written so far
} ID3v2EncContext;
 
typedef struct ID3v2ExtraMeta {
const char *tag;
void *data;
struct ID3v2ExtraMeta *next;
} ID3v2ExtraMeta;
 
typedef struct ID3v2ExtraMetaGEOB {
uint32_t datasize;
uint8_t *mime_type;
uint8_t *file_name;
uint8_t *description;
uint8_t *data;
} ID3v2ExtraMetaGEOB;
 
typedef struct ID3v2ExtraMetaAPIC {
AVBufferRef *buf;
const char *type;
uint8_t *description;
enum AVCodecID id;
} ID3v2ExtraMetaAPIC;
 
/**
* Detect ID3v2 Header.
* @param buf must be ID3v2_HEADER_SIZE byte long
* @param magic magic bytes to identify the header.
* If in doubt, use ID3v2_DEFAULT_MAGIC.
*/
int ff_id3v2_match(const uint8_t *buf, const char *magic);
 
/**
* Get the length of an ID3v2 tag.
* @param buf must be ID3v2_HEADER_SIZE bytes long and point to the start of an
* already detected ID3v2 tag
*/
int ff_id3v2_tag_len(const uint8_t *buf);
 
/**
* Read an ID3v2 tag, including supported extra metadata
* @param extra_meta If not NULL, extra metadata is parsed into a list of
* ID3v2ExtraMeta structs and *extra_meta points to the head of the list
*/
void ff_id3v2_read(AVFormatContext *s, const char *magic, ID3v2ExtraMeta **extra_meta);
 
/**
* Initialize an ID3v2 tag.
*/
void ff_id3v2_start(ID3v2EncContext *id3, AVIOContext *pb, int id3v2_version,
const char *magic);
 
/**
* Convert and write all global metadata from s into an ID3v2 tag.
*/
int ff_id3v2_write_metadata(AVFormatContext *s, ID3v2EncContext *id3);
 
/**
* Write an attached picture from pkt into an ID3v2 tag.
*/
int ff_id3v2_write_apic(AVFormatContext *s, ID3v2EncContext *id3, AVPacket *pkt);
 
/**
* Finalize an opened ID3v2 tag.
*/
void ff_id3v2_finish(ID3v2EncContext *id3, AVIOContext *pb);
 
/**
* Write an ID3v2 tag containing all global metadata from s.
* @param id3v2_version Subversion of ID3v2; supported values are 3 and 4
* @param magic magic bytes to identify the header
* If in doubt, use ID3v2_DEFAULT_MAGIC.
*/
int ff_id3v2_write_simple(struct AVFormatContext *s, int id3v2_version, const char *magic);
 
/**
* Free memory allocated parsing special (non-text) metadata.
* @param extra_meta Pointer to a pointer to the head of a ID3v2ExtraMeta list, *extra_meta is set to NULL.
*/
void ff_id3v2_free_extra_meta(ID3v2ExtraMeta **extra_meta);
 
/**
* Create a stream for each APIC (attached picture) extracted from the
* ID3v2 header.
*/
int ff_id3v2_parse_apic(AVFormatContext *s, ID3v2ExtraMeta **extra_meta);
 
extern const AVMetadataConv ff_id3v2_34_metadata_conv[];
extern const AVMetadataConv ff_id3v2_4_metadata_conv[];
 
/**
* A list of text information frames allowed in both ID3 v2.3 and v2.4
* http://www.id3.org/id3v2.4.0-frames
* http://www.id3.org/id3v2.4.0-changes
*/
extern const char ff_id3v2_tags[][4];
 
/**
* ID3v2.4-only text information frames.
*/
extern const char ff_id3v2_4_tags[][4];
 
/**
* ID3v2.3-only text information frames.
*/
extern const char ff_id3v2_3_tags[][4];
 
extern const CodecMime ff_id3v2_mime_tags[];
 
extern const char *ff_id3v2_picture_types[21];
 
#endif /* AVFORMAT_ID3V2_H */
/contrib/sdk/sources/ffmpeg/libavformat/id3v2enc.c
0,0 → 1,355
/*
* ID3v2 header writer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <stdint.h>
#include <string.h>
 
#include "libavutil/avstring.h"
#include "libavutil/dict.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "avio.h"
#include "avio_internal.h"
#include "id3v2.h"
 
#define PADDING_BYTES 10
 
static void id3v2_put_size(AVIOContext *pb, int size)
{
avio_w8(pb, size >> 21 & 0x7f);
avio_w8(pb, size >> 14 & 0x7f);
avio_w8(pb, size >> 7 & 0x7f);
avio_w8(pb, size & 0x7f);
}
 
static int string_is_ascii(const uint8_t *str)
{
while (*str && *str < 128) str++;
return !*str;
}
 
static void id3v2_encode_string(AVIOContext *pb, const uint8_t *str,
enum ID3v2Encoding enc)
{
int (*put)(AVIOContext*, const char*);
 
if (enc == ID3v2_ENCODING_UTF16BOM) {
avio_wl16(pb, 0xFEFF); /* BOM */
put = avio_put_str16le;
} else
put = avio_put_str;
 
put(pb, str);
}
 
/**
* Write a text frame with one (normal frames) or two (TXXX frames) strings
* according to encoding (only UTF-8 or UTF-16+BOM supported).
* @return number of bytes written or a negative error code.
*/
static int id3v2_put_ttag(ID3v2EncContext *id3, AVIOContext *avioc, const char *str1, const char *str2,
uint32_t tag, enum ID3v2Encoding enc)
{
int len;
uint8_t *pb;
AVIOContext *dyn_buf;
if (avio_open_dyn_buf(&dyn_buf) < 0)
return AVERROR(ENOMEM);
 
/* check if the strings are ASCII-only and use UTF16 only if
* they're not */
if (enc == ID3v2_ENCODING_UTF16BOM && string_is_ascii(str1) &&
(!str2 || string_is_ascii(str2)))
enc = ID3v2_ENCODING_ISO8859;
 
avio_w8(dyn_buf, enc);
id3v2_encode_string(dyn_buf, str1, enc);
if (str2)
id3v2_encode_string(dyn_buf, str2, enc);
len = avio_close_dyn_buf(dyn_buf, &pb);
 
avio_wb32(avioc, tag);
/* ID3v2.3 frame size is not synchsafe */
if (id3->version == 3)
avio_wb32(avioc, len);
else
id3v2_put_size(avioc, len);
avio_wb16(avioc, 0);
avio_write(avioc, pb, len);
 
av_freep(&pb);
return len + ID3v2_HEADER_SIZE;
}
 
static int id3v2_check_write_tag(ID3v2EncContext *id3, AVIOContext *pb, AVDictionaryEntry *t,
const char table[][4], enum ID3v2Encoding enc)
{
uint32_t tag;
int i;
 
if (t->key[0] != 'T' || strlen(t->key) != 4)
return -1;
tag = AV_RB32(t->key);
for (i = 0; *table[i]; i++)
if (tag == AV_RB32(table[i]))
return id3v2_put_ttag(id3, pb, t->value, NULL, tag, enc);
return -1;
}
 
static void id3v2_3_metadata_split_date(AVDictionary **pm)
{
AVDictionaryEntry *mtag = NULL;
AVDictionary *dst = NULL;
const char *key, *value;
char year[5] = {0}, day_month[5] = {0};
int i;
 
while ((mtag = av_dict_get(*pm, "", mtag, AV_DICT_IGNORE_SUFFIX))) {
key = mtag->key;
if (!av_strcasecmp(key, "date")) {
/* split date tag using "YYYY-MM-DD" format into year and month/day segments */
value = mtag->value;
i = 0;
while (value[i] >= '0' && value[i] <= '9') i++;
if (value[i] == '\0' || value[i] == '-') {
av_strlcpy(year, value, sizeof(year));
av_dict_set(&dst, "TYER", year, 0);
 
if (value[i] == '-' &&
value[i+1] >= '0' && value[i+1] <= '1' &&
value[i+2] >= '0' && value[i+2] <= '9' &&
value[i+3] == '-' &&
value[i+4] >= '0' && value[i+4] <= '3' &&
value[i+5] >= '0' && value[i+5] <= '9' &&
(value[i+6] == '\0' || value[i+6] == ' ')) {
snprintf(day_month, sizeof(day_month), "%.2s%.2s", value + i + 4, value + i + 1);
av_dict_set(&dst, "TDAT", day_month, 0);
}
} else
av_dict_set(&dst, key, value, 0);
} else
av_dict_set(&dst, key, mtag->value, 0);
}
av_dict_free(pm);
*pm = dst;
}
 
void ff_id3v2_start(ID3v2EncContext *id3, AVIOContext *pb, int id3v2_version,
const char *magic)
{
id3->version = id3v2_version;
 
avio_wb32(pb, MKBETAG(magic[0], magic[1], magic[2], id3v2_version));
avio_w8(pb, 0);
avio_w8(pb, 0); /* flags */
 
/* reserve space for size */
id3->size_pos = avio_tell(pb);
avio_wb32(pb, 0);
}
 
static int write_metadata(AVIOContext *pb, AVDictionary **metadata,
ID3v2EncContext *id3, int enc)
{
AVDictionaryEntry *t = NULL;
int ret;
 
ff_metadata_conv(metadata, ff_id3v2_34_metadata_conv, NULL);
if (id3->version == 3)
id3v2_3_metadata_split_date(metadata);
else if (id3->version == 4)
ff_metadata_conv(metadata, ff_id3v2_4_metadata_conv, NULL);
 
while ((t = av_dict_get(*metadata, "", t, AV_DICT_IGNORE_SUFFIX))) {
if ((ret = id3v2_check_write_tag(id3, pb, t, ff_id3v2_tags, enc)) > 0) {
id3->len += ret;
continue;
}
if ((ret = id3v2_check_write_tag(id3, pb, t, id3->version == 3 ?
ff_id3v2_3_tags : ff_id3v2_4_tags, enc)) > 0) {
id3->len += ret;
continue;
}
 
/* unknown tag, write as TXXX frame */
if ((ret = id3v2_put_ttag(id3, pb, t->key, t->value, MKBETAG('T', 'X', 'X', 'X'), enc)) < 0)
return ret;
id3->len += ret;
}
 
return 0;
}
 
static int write_chapter(AVFormatContext *s, ID3v2EncContext *id3, int id, int enc)
{
const AVRational time_base = {1, 1000};
AVChapter *ch = s->chapters[id];
uint8_t *dyn_buf = NULL;
AVIOContext *dyn_bc = NULL;
char name[123];
int len, start, end, ret;
 
if ((ret = avio_open_dyn_buf(&dyn_bc)) < 0)
goto fail;
 
start = av_rescale_q(ch->start, ch->time_base, time_base);
end = av_rescale_q(ch->end, ch->time_base, time_base);
 
snprintf(name, 122, "ch%d", id);
id3->len += avio_put_str(dyn_bc, name);
avio_wb32(dyn_bc, start);
avio_wb32(dyn_bc, end);
avio_wb32(dyn_bc, 0xFFFFFFFFu);
avio_wb32(dyn_bc, 0xFFFFFFFFu);
 
if ((ret = write_metadata(dyn_bc, &ch->metadata, id3, enc)) < 0)
goto fail;
 
len = avio_close_dyn_buf(dyn_bc, &dyn_buf);
id3->len += 16 + ID3v2_HEADER_SIZE;
 
avio_wb32(s->pb, MKBETAG('C', 'H', 'A', 'P'));
avio_wb32(s->pb, len);
avio_wb16(s->pb, 0);
avio_write(s->pb, dyn_buf, len);
 
fail:
if (dyn_bc && !dyn_buf)
avio_close_dyn_buf(dyn_bc, &dyn_buf);
av_freep(&dyn_buf);
 
return ret;
}
 
int ff_id3v2_write_metadata(AVFormatContext *s, ID3v2EncContext *id3)
{
int enc = id3->version == 3 ? ID3v2_ENCODING_UTF16BOM :
ID3v2_ENCODING_UTF8;
int i, ret;
 
if ((ret = write_metadata(s->pb, &s->metadata, id3, enc)) < 0)
return ret;
 
for (i = 0; i < s->nb_chapters; i++) {
if ((ret = write_chapter(s, id3, i, enc)) < 0)
return ret;
}
 
return 0;
}
 
int ff_id3v2_write_apic(AVFormatContext *s, ID3v2EncContext *id3, AVPacket *pkt)
{
AVStream *st = s->streams[pkt->stream_index];
AVDictionaryEntry *e;
 
AVIOContext *dyn_buf;
uint8_t *buf;
const CodecMime *mime = ff_id3v2_mime_tags;
const char *mimetype = NULL, *desc = "";
int enc = id3->version == 3 ? ID3v2_ENCODING_UTF16BOM :
ID3v2_ENCODING_UTF8;
int i, len, type = 0;
 
/* get the mimetype*/
while (mime->id != AV_CODEC_ID_NONE) {
if (mime->id == st->codec->codec_id) {
mimetype = mime->str;
break;
}
mime++;
}
if (!mimetype) {
av_log(s, AV_LOG_ERROR, "No mimetype is known for stream %d, cannot "
"write an attached picture.\n", st->index);
return AVERROR(EINVAL);
}
 
/* get the picture type */
e = av_dict_get(st->metadata, "comment", NULL, 0);
for (i = 0; e && i < FF_ARRAY_ELEMS(ff_id3v2_picture_types); i++) {
if (strstr(ff_id3v2_picture_types[i], e->value) == ff_id3v2_picture_types[i]) {
type = i;
break;
}
}
 
/* get the description */
if ((e = av_dict_get(st->metadata, "title", NULL, 0)))
desc = e->value;
 
/* use UTF16 only for non-ASCII strings */
if (enc == ID3v2_ENCODING_UTF16BOM && string_is_ascii(desc))
enc = ID3v2_ENCODING_ISO8859;
 
/* start writing */
if (avio_open_dyn_buf(&dyn_buf) < 0)
return AVERROR(ENOMEM);
 
avio_w8(dyn_buf, enc);
avio_put_str(dyn_buf, mimetype);
avio_w8(dyn_buf, type);
id3v2_encode_string(dyn_buf, desc, enc);
avio_write(dyn_buf, pkt->data, pkt->size);
len = avio_close_dyn_buf(dyn_buf, &buf);
 
avio_wb32(s->pb, MKBETAG('A', 'P', 'I', 'C'));
if (id3->version == 3)
avio_wb32(s->pb, len);
else
id3v2_put_size(s->pb, len);
avio_wb16(s->pb, 0);
avio_write(s->pb, buf, len);
av_freep(&buf);
 
id3->len += len + ID3v2_HEADER_SIZE;
 
return 0;
}
 
void ff_id3v2_finish(ID3v2EncContext *id3, AVIOContext *pb)
{
int64_t cur_pos;
 
/* adding an arbitrary amount of padding bytes at the end of the
* ID3 metadata fixes cover art display for some software (iTunes,
* Traktor, Serato, Torq) */
ffio_fill(pb, 0, PADDING_BYTES);
id3->len += PADDING_BYTES;
 
cur_pos = avio_tell(pb);
avio_seek(pb, id3->size_pos, SEEK_SET);
id3v2_put_size(pb, id3->len);
avio_seek(pb, cur_pos, SEEK_SET);
}
 
int ff_id3v2_write_simple(struct AVFormatContext *s, int id3v2_version,
const char *magic)
{
ID3v2EncContext id3 = { 0 };
int ret;
 
ff_id3v2_start(&id3, s->pb, id3v2_version, magic);
if ((ret = ff_id3v2_write_metadata(s, &id3)) < 0)
return ret;
ff_id3v2_finish(&id3, s->pb);
 
return 0;
}
/contrib/sdk/sources/ffmpeg/libavformat/idcin.c
0,0 → 1,380
/*
* id Quake II CIN File Demuxer
* Copyright (c) 2003 The ffmpeg Project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* id Quake II CIN file demuxer by Mike Melanson (melanson@pcisys.net)
* For more information about the id CIN format, visit:
* http://www.csse.monash.edu.au/~timf/
*
* CIN is a somewhat quirky and ill-defined format. Here are some notes
* for anyone trying to understand the technical details of this format:
*
* The format has no definite file signature. This is problematic for a
* general-purpose media player that wants to automatically detect file
* types. However, a CIN file does start with 5 32-bit numbers that
* specify audio and video parameters. This demuxer gets around the lack
* of file signature by performing sanity checks on those parameters.
* Probabalistically, this is a reasonable solution since the number of
* valid combinations of the 5 parameters is a very small subset of the
* total 160-bit number space.
*
* Refer to the function idcin_probe() for the precise A/V parameters
* that this demuxer allows.
*
* Next, each audio and video frame has a duration of 1/14 sec. If the
* audio sample rate is a multiple of the common frequency 22050 Hz it will
* divide evenly by 14. However, if the sample rate is 11025 Hz:
* 11025 (samples/sec) / 14 (frames/sec) = 787.5 (samples/frame)
* The way the CIN stores audio in this case is by storing 787 sample
* frames in the first audio frame and 788 sample frames in the second
* audio frame. Therefore, the total number of bytes in an audio frame
* is given as:
* audio frame #0: 787 * (bytes/sample) * (# channels) bytes in frame
* audio frame #1: 788 * (bytes/sample) * (# channels) bytes in frame
* audio frame #2: 787 * (bytes/sample) * (# channels) bytes in frame
* audio frame #3: 788 * (bytes/sample) * (# channels) bytes in frame
*
* Finally, not all id CIN creation tools agree on the resolution of the
* color palette, apparently. Some creation tools specify red, green, and
* blue palette components in terms of 6-bit VGA color DAC values which
* range from 0..63. Other tools specify the RGB components as full 8-bit
* values that range from 0..255. Since there are no markers in the file to
* differentiate between the two variants, this demuxer uses the following
* heuristic:
* - load the 768 palette bytes from disk
* - assume that they will need to be shifted left by 2 bits to
* transform them from 6-bit values to 8-bit values
* - scan through all 768 palette bytes
* - if any bytes exceed 63, do not shift the bytes at all before
* transmitting them to the video decoder
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/imgutils.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
 
#define HUFFMAN_TABLE_SIZE (64 * 1024)
#define IDCIN_FPS 14
 
typedef struct IdcinDemuxContext {
int video_stream_index;
int audio_stream_index;
int audio_chunk_size1;
int audio_chunk_size2;
int block_align;
 
/* demux state variables */
int current_audio_chunk;
int next_chunk_is_video;
int audio_present;
int64_t first_pkt_pos;
} IdcinDemuxContext;
 
static int idcin_probe(AVProbeData *p)
{
unsigned int number, sample_rate;
 
/*
* This is what you could call a "probabilistic" file check: id CIN
* files don't have a definite file signature. In lieu of such a marker,
* perform sanity checks on the 5 32-bit header fields:
* width, height: greater than 0, less than or equal to 1024
* audio sample rate: greater than or equal to 8000, less than or
* equal to 48000, or 0 for no audio
* audio sample width (bytes/sample): 0 for no audio, or 1 or 2
* audio channels: 0 for no audio, or 1 or 2
*/
 
/* check we have enough data to do all checks, otherwise the
0-padding may cause a wrong recognition */
if (p->buf_size < 20)
return 0;
 
/* check the video width */
number = AV_RL32(&p->buf[0]);
if ((number == 0) || (number > 1024))
return 0;
 
/* check the video height */
number = AV_RL32(&p->buf[4]);
if ((number == 0) || (number > 1024))
return 0;
 
/* check the audio sample rate */
sample_rate = AV_RL32(&p->buf[8]);
if (sample_rate && (sample_rate < 8000 || sample_rate > 48000))
return 0;
 
/* check the audio bytes/sample */
number = AV_RL32(&p->buf[12]);
if (number > 2 || sample_rate && !number)
return 0;
 
/* check the audio channels */
number = AV_RL32(&p->buf[16]);
if (number > 2 || sample_rate && !number)
return 0;
 
/* return half certainty since this check is a bit sketchy */
return AVPROBE_SCORE_EXTENSION;
}
 
static int idcin_read_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
IdcinDemuxContext *idcin = s->priv_data;
AVStream *st;
unsigned int width, height;
unsigned int sample_rate, bytes_per_sample, channels;
int ret;
 
/* get the 5 header parameters */
width = avio_rl32(pb);
height = avio_rl32(pb);
sample_rate = avio_rl32(pb);
bytes_per_sample = avio_rl32(pb);
channels = avio_rl32(pb);
 
if (s->pb->eof_reached) {
av_log(s, AV_LOG_ERROR, "incomplete header\n");
return s->pb->error ? s->pb->error : AVERROR_EOF;
}
 
if (av_image_check_size(width, height, 0, s) < 0)
return AVERROR_INVALIDDATA;
if (sample_rate > 0) {
if (sample_rate < 14 || sample_rate > INT_MAX) {
av_log(s, AV_LOG_ERROR, "invalid sample rate: %u\n", sample_rate);
return AVERROR_INVALIDDATA;
}
if (bytes_per_sample < 1 || bytes_per_sample > 2) {
av_log(s, AV_LOG_ERROR, "invalid bytes per sample: %u\n",
bytes_per_sample);
return AVERROR_INVALIDDATA;
}
if (channels < 1 || channels > 2) {
av_log(s, AV_LOG_ERROR, "invalid channels: %u\n", channels);
return AVERROR_INVALIDDATA;
}
idcin->audio_present = 1;
} else {
/* if sample rate is 0, assume no audio */
idcin->audio_present = 0;
}
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 33, 1, IDCIN_FPS);
st->start_time = 0;
idcin->video_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_IDCIN;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->width = width;
st->codec->height = height;
 
/* load up the Huffman tables into extradata */
if (ff_alloc_extradata(st->codec, HUFFMAN_TABLE_SIZE))
return AVERROR(ENOMEM);
ret = avio_read(pb, st->codec->extradata, HUFFMAN_TABLE_SIZE);
if (ret < 0) {
return ret;
} else if (ret != HUFFMAN_TABLE_SIZE) {
av_log(s, AV_LOG_ERROR, "incomplete header\n");
return AVERROR(EIO);
}
 
if (idcin->audio_present) {
idcin->audio_present = 1;
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 63, 1, sample_rate);
st->start_time = 0;
idcin->audio_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_tag = 1;
st->codec->channels = channels;
st->codec->channel_layout = channels > 1 ? AV_CH_LAYOUT_STEREO :
AV_CH_LAYOUT_MONO;
st->codec->sample_rate = sample_rate;
st->codec->bits_per_coded_sample = bytes_per_sample * 8;
st->codec->bit_rate = sample_rate * bytes_per_sample * 8 * channels;
st->codec->block_align = idcin->block_align = bytes_per_sample * channels;
if (bytes_per_sample == 1)
st->codec->codec_id = AV_CODEC_ID_PCM_U8;
else
st->codec->codec_id = AV_CODEC_ID_PCM_S16LE;
 
if (sample_rate % 14 != 0) {
idcin->audio_chunk_size1 = (sample_rate / 14) *
bytes_per_sample * channels;
idcin->audio_chunk_size2 = (sample_rate / 14 + 1) *
bytes_per_sample * channels;
} else {
idcin->audio_chunk_size1 = idcin->audio_chunk_size2 =
(sample_rate / 14) * bytes_per_sample * channels;
}
idcin->current_audio_chunk = 0;
}
 
idcin->next_chunk_is_video = 1;
idcin->first_pkt_pos = avio_tell(s->pb);
 
return 0;
}
 
static int idcin_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
int ret;
unsigned int command;
unsigned int chunk_size;
IdcinDemuxContext *idcin = s->priv_data;
AVIOContext *pb = s->pb;
int i;
int palette_scale;
unsigned char r, g, b;
unsigned char palette_buffer[768];
uint32_t palette[256];
 
if (url_feof(s->pb))
return s->pb->error ? s->pb->error : AVERROR_EOF;
 
if (idcin->next_chunk_is_video) {
command = avio_rl32(pb);
if (command == 2) {
return AVERROR(EIO);
} else if (command == 1) {
/* trigger a palette change */
ret = avio_read(pb, palette_buffer, 768);
if (ret < 0) {
return ret;
} else if (ret != 768) {
av_log(s, AV_LOG_ERROR, "incomplete packet\n");
return AVERROR(EIO);
}
/* scale the palette as necessary */
palette_scale = 2;
for (i = 0; i < 768; i++)
if (palette_buffer[i] > 63) {
palette_scale = 0;
break;
}
 
for (i = 0; i < 256; i++) {
r = palette_buffer[i * 3 ] << palette_scale;
g = palette_buffer[i * 3 + 1] << palette_scale;
b = palette_buffer[i * 3 + 2] << palette_scale;
palette[i] = (0xFFU << 24) | (r << 16) | (g << 8) | (b);
if (palette_scale == 2)
palette[i] |= palette[i] >> 6 & 0x30303;
}
}
 
if (s->pb->eof_reached) {
av_log(s, AV_LOG_ERROR, "incomplete packet\n");
return s->pb->error ? s->pb->error : AVERROR_EOF;
}
chunk_size = avio_rl32(pb);
if (chunk_size < 4 || chunk_size > INT_MAX - 4) {
av_log(s, AV_LOG_ERROR, "invalid chunk size: %u\n", chunk_size);
return AVERROR_INVALIDDATA;
}
/* skip the number of decoded bytes (always equal to width * height) */
avio_skip(pb, 4);
if (chunk_size < 4)
return AVERROR_INVALIDDATA;
chunk_size -= 4;
ret= av_get_packet(pb, pkt, chunk_size);
if (ret < 0)
return ret;
else if (ret != chunk_size) {
av_log(s, AV_LOG_ERROR, "incomplete packet\n");
av_free_packet(pkt);
return AVERROR(EIO);
}
if (command == 1) {
uint8_t *pal;
 
pal = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE,
AVPALETTE_SIZE);
if (!pal) {
av_free_packet(pkt);
return AVERROR(ENOMEM);
}
memcpy(pal, palette, AVPALETTE_SIZE);
pkt->flags |= AV_PKT_FLAG_KEY;
}
pkt->stream_index = idcin->video_stream_index;
pkt->duration = 1;
} else {
/* send out the audio chunk */
if (idcin->current_audio_chunk)
chunk_size = idcin->audio_chunk_size2;
else
chunk_size = idcin->audio_chunk_size1;
ret= av_get_packet(pb, pkt, chunk_size);
if (ret < 0)
return ret;
pkt->stream_index = idcin->audio_stream_index;
pkt->duration = chunk_size / idcin->block_align;
 
idcin->current_audio_chunk ^= 1;
}
 
if (idcin->audio_present)
idcin->next_chunk_is_video ^= 1;
 
return 0;
}
 
static int idcin_read_seek(AVFormatContext *s, int stream_index,
int64_t timestamp, int flags)
{
IdcinDemuxContext *idcin = s->priv_data;
 
if (idcin->first_pkt_pos > 0) {
int ret = avio_seek(s->pb, idcin->first_pkt_pos, SEEK_SET);
if (ret < 0)
return ret;
ff_update_cur_dts(s, s->streams[idcin->video_stream_index], 0);
idcin->next_chunk_is_video = 1;
idcin->current_audio_chunk = 0;
return 0;
}
return -1;
}
 
AVInputFormat ff_idcin_demuxer = {
.name = "idcin",
.long_name = NULL_IF_CONFIG_SMALL("id Cinematic"),
.priv_data_size = sizeof(IdcinDemuxContext),
.read_probe = idcin_probe,
.read_header = idcin_read_header,
.read_packet = idcin_read_packet,
.read_seek = idcin_read_seek,
.flags = AVFMT_NO_BYTE_SEEK,
};
/contrib/sdk/sources/ffmpeg/libavformat/idroqdec.c
0,0 → 1,244
/*
* id RoQ (.roq) File Demuxer
* Copyright (c) 2003 The ffmpeg Project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* id RoQ format file demuxer
* by Mike Melanson (melanson@pcisys.net)
* for more information on the .roq file format, visit:
* http://www.csse.monash.edu.au/~timf/
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
#include "avio_internal.h"
 
#define RoQ_MAGIC_NUMBER 0x1084
#define RoQ_CHUNK_PREAMBLE_SIZE 8
#define RoQ_AUDIO_SAMPLE_RATE 22050
#define RoQ_CHUNKS_TO_SCAN 30
 
#define RoQ_INFO 0x1001
#define RoQ_QUAD_CODEBOOK 0x1002
#define RoQ_QUAD_VQ 0x1011
#define RoQ_SOUND_MONO 0x1020
#define RoQ_SOUND_STEREO 0x1021
 
typedef struct RoqDemuxContext {
 
int frame_rate;
int width;
int height;
int audio_channels;
 
int video_stream_index;
int audio_stream_index;
 
int64_t video_pts;
unsigned int audio_frame_count;
 
} RoqDemuxContext;
 
static int roq_probe(AVProbeData *p)
{
if ((AV_RL16(&p->buf[0]) != RoQ_MAGIC_NUMBER) ||
(AV_RL32(&p->buf[2]) != 0xFFFFFFFF))
return 0;
 
return AVPROBE_SCORE_MAX;
}
 
static int roq_read_header(AVFormatContext *s)
{
RoqDemuxContext *roq = s->priv_data;
AVIOContext *pb = s->pb;
unsigned char preamble[RoQ_CHUNK_PREAMBLE_SIZE];
 
/* get the main header */
if (avio_read(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) !=
RoQ_CHUNK_PREAMBLE_SIZE)
return AVERROR(EIO);
roq->frame_rate = AV_RL16(&preamble[6]);
 
/* init private context parameters */
roq->width = roq->height = roq->audio_channels = roq->video_pts =
roq->audio_frame_count = 0;
roq->audio_stream_index = -1;
roq->video_stream_index = -1;
 
s->ctx_flags |= AVFMTCTX_NOHEADER;
 
return 0;
}
 
static int roq_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
RoqDemuxContext *roq = s->priv_data;
AVIOContext *pb = s->pb;
int ret = 0;
unsigned int chunk_size;
unsigned int chunk_type;
unsigned int codebook_size;
unsigned char preamble[RoQ_CHUNK_PREAMBLE_SIZE];
int packet_read = 0;
int64_t codebook_offset;
 
while (!packet_read) {
 
if (url_feof(s->pb))
return AVERROR(EIO);
 
/* get the next chunk preamble */
if ((ret = avio_read(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE)) !=
RoQ_CHUNK_PREAMBLE_SIZE)
return AVERROR(EIO);
 
chunk_type = AV_RL16(&preamble[0]);
chunk_size = AV_RL32(&preamble[2]);
if(chunk_size > INT_MAX)
return AVERROR_INVALIDDATA;
 
chunk_size = ffio_limit(pb, chunk_size);
 
switch (chunk_type) {
 
case RoQ_INFO:
if (roq->video_stream_index == -1) {
AVStream *st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 63, 1, roq->frame_rate);
roq->video_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_ROQ;
st->codec->codec_tag = 0; /* no fourcc */
 
if (avio_read(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) != RoQ_CHUNK_PREAMBLE_SIZE)
return AVERROR(EIO);
st->codec->width = roq->width = AV_RL16(preamble);
st->codec->height = roq->height = AV_RL16(preamble + 2);
break;
}
/* don't care about this chunk anymore */
avio_skip(pb, RoQ_CHUNK_PREAMBLE_SIZE);
break;
 
case RoQ_QUAD_CODEBOOK:
if (roq->video_stream_index < 0)
return AVERROR_INVALIDDATA;
/* packet needs to contain both this codebook and next VQ chunk */
codebook_offset = avio_tell(pb) - RoQ_CHUNK_PREAMBLE_SIZE;
codebook_size = chunk_size;
avio_skip(pb, codebook_size);
if (avio_read(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) !=
RoQ_CHUNK_PREAMBLE_SIZE)
return AVERROR(EIO);
chunk_size = AV_RL32(&preamble[2]) + RoQ_CHUNK_PREAMBLE_SIZE * 2 +
codebook_size;
 
/* rewind */
avio_seek(pb, codebook_offset, SEEK_SET);
 
/* load up the packet */
ret= av_get_packet(pb, pkt, chunk_size);
if (ret != chunk_size)
return AVERROR(EIO);
pkt->stream_index = roq->video_stream_index;
pkt->pts = roq->video_pts++;
 
packet_read = 1;
break;
 
case RoQ_SOUND_MONO:
case RoQ_SOUND_STEREO:
if (roq->audio_stream_index == -1) {
AVStream *st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 32, 1, RoQ_AUDIO_SAMPLE_RATE);
roq->audio_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_ROQ_DPCM;
st->codec->codec_tag = 0; /* no tag */
if (chunk_type == RoQ_SOUND_STEREO) {
st->codec->channels = 2;
st->codec->channel_layout = AV_CH_LAYOUT_STEREO;
} else {
st->codec->channels = 1;
st->codec->channel_layout = AV_CH_LAYOUT_MONO;
}
roq->audio_channels = st->codec->channels;
st->codec->sample_rate = RoQ_AUDIO_SAMPLE_RATE;
st->codec->bits_per_coded_sample = 16;
st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
st->codec->bits_per_coded_sample;
st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample;
}
case RoQ_QUAD_VQ:
if (chunk_type == RoQ_QUAD_VQ) {
if (roq->video_stream_index < 0)
return AVERROR_INVALIDDATA;
}
 
/* load up the packet */
if (av_new_packet(pkt, chunk_size + RoQ_CHUNK_PREAMBLE_SIZE))
return AVERROR(EIO);
/* copy over preamble */
memcpy(pkt->data, preamble, RoQ_CHUNK_PREAMBLE_SIZE);
 
if (chunk_type == RoQ_QUAD_VQ) {
pkt->stream_index = roq->video_stream_index;
pkt->pts = roq->video_pts++;
} else {
pkt->stream_index = roq->audio_stream_index;
pkt->pts = roq->audio_frame_count;
roq->audio_frame_count += (chunk_size / roq->audio_channels);
}
 
pkt->pos= avio_tell(pb);
ret = avio_read(pb, pkt->data + RoQ_CHUNK_PREAMBLE_SIZE,
chunk_size);
if (ret != chunk_size)
ret = AVERROR(EIO);
 
packet_read = 1;
break;
 
default:
av_log(s, AV_LOG_ERROR, " unknown RoQ chunk (%04X)\n", chunk_type);
return AVERROR_INVALIDDATA;
}
}
 
return ret;
}
 
AVInputFormat ff_roq_demuxer = {
.name = "roq",
.long_name = NULL_IF_CONFIG_SMALL("id RoQ"),
.priv_data_size = sizeof(RoqDemuxContext),
.read_probe = roq_probe,
.read_header = roq_read_header,
.read_packet = roq_read_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/idroqenc.c
0,0 → 1,46
/*
* id RoQ (.roq) File muxer
* Copyright (c) 2007 Vitor Sessak
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "rawenc.h"
 
 
static int roq_write_header(struct AVFormatContext *s)
{
static const uint8_t header[] = {
0x84, 0x10, 0xFF, 0xFF, 0xFF, 0xFF, 0x1E, 0x00
};
 
avio_write(s->pb, header, 8);
avio_flush(s->pb);
 
return 0;
}
 
AVOutputFormat ff_roq_muxer = {
.name = "roq",
.long_name = NULL_IF_CONFIG_SMALL("raw id RoQ"),
.extensions = "roq",
.audio_codec = AV_CODEC_ID_ROQ_DPCM,
.video_codec = AV_CODEC_ID_ROQ,
.write_header = roq_write_header,
.write_packet = ff_raw_write_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/iff.c
0,0 → 1,485
/*
* Copyright (c) 2008 Jaikrishnan Menon <realityman@gmx.net>
* Copyright (c) 2010 Peter Ross <pross@xvid.org>
* Copyright (c) 2010 Sebastian Vater <cdgs.basty@googlemail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* IFF file demuxer
* by Jaikrishnan Menon
* for more information on the .iff file format, visit:
* http://wiki.multimedia.cx/index.php?title=IFF
*/
 
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/dict.h"
#include "libavcodec/bytestream.h"
#include "avformat.h"
#include "internal.h"
 
#define ID_8SVX MKTAG('8','S','V','X')
#define ID_16SV MKTAG('1','6','S','V')
#define ID_MAUD MKTAG('M','A','U','D')
#define ID_MHDR MKTAG('M','H','D','R')
#define ID_MDAT MKTAG('M','D','A','T')
#define ID_VHDR MKTAG('V','H','D','R')
#define ID_ATAK MKTAG('A','T','A','K')
#define ID_RLSE MKTAG('R','L','S','E')
#define ID_CHAN MKTAG('C','H','A','N')
#define ID_PBM MKTAG('P','B','M',' ')
#define ID_ILBM MKTAG('I','L','B','M')
#define ID_BMHD MKTAG('B','M','H','D')
#define ID_DGBL MKTAG('D','G','B','L')
#define ID_CAMG MKTAG('C','A','M','G')
#define ID_CMAP MKTAG('C','M','A','P')
#define ID_ACBM MKTAG('A','C','B','M')
#define ID_DEEP MKTAG('D','E','E','P')
#define ID_RGB8 MKTAG('R','G','B','8')
#define ID_RGBN MKTAG('R','G','B','N')
 
#define ID_FORM MKTAG('F','O','R','M')
#define ID_ANNO MKTAG('A','N','N','O')
#define ID_AUTH MKTAG('A','U','T','H')
#define ID_CHRS MKTAG('C','H','R','S')
#define ID_COPYRIGHT MKTAG('(','c',')',' ')
#define ID_CSET MKTAG('C','S','E','T')
#define ID_FVER MKTAG('F','V','E','R')
#define ID_NAME MKTAG('N','A','M','E')
#define ID_TEXT MKTAG('T','E','X','T')
#define ID_ABIT MKTAG('A','B','I','T')
#define ID_BODY MKTAG('B','O','D','Y')
#define ID_DBOD MKTAG('D','B','O','D')
#define ID_DPEL MKTAG('D','P','E','L')
#define ID_DLOC MKTAG('D','L','O','C')
#define ID_TVDC MKTAG('T','V','D','C')
 
#define LEFT 2
#define RIGHT 4
#define STEREO 6
 
/**
* This number of bytes if added at the beginning of each AVPacket
* which contain additional information about video properties
* which has to be shared between demuxer and decoder.
* This number may change between frames, e.g. the demuxer might
* set it to smallest possible size of 2 to indicate that there's
* no extradata changing in this frame.
*/
#define IFF_EXTRA_VIDEO_SIZE 41
 
typedef enum {
COMP_NONE,
COMP_FIB,
COMP_EXP
} svx8_compression_type;
 
typedef struct {
int64_t body_pos;
int64_t body_end;
uint32_t body_size;
svx8_compression_type svx8_compression;
unsigned maud_bits;
unsigned maud_compression;
unsigned bitmap_compression; ///< delta compression method used
unsigned bpp; ///< bits per plane to decode (differs from bits_per_coded_sample if HAM)
unsigned ham; ///< 0 if non-HAM or number of hold bits (6 for bpp > 6, 4 otherwise)
unsigned flags; ///< 1 for EHB, 0 is no extra half darkening
unsigned transparency; ///< transparency color index in palette
unsigned masking; ///< masking method used
uint8_t tvdc[32]; ///< TVDC lookup table
} IffDemuxContext;
 
/* Metadata string read */
static int get_metadata(AVFormatContext *s,
const char *const tag,
const unsigned data_size)
{
uint8_t *buf = ((data_size + 1) == 0) ? NULL : av_malloc(data_size + 1);
 
if (!buf)
return AVERROR(ENOMEM);
 
if (avio_read(s->pb, buf, data_size) < 0) {
av_free(buf);
return AVERROR(EIO);
}
buf[data_size] = 0;
av_dict_set(&s->metadata, tag, buf, AV_DICT_DONT_STRDUP_VAL);
return 0;
}
 
static int iff_probe(AVProbeData *p)
{
const uint8_t *d = p->buf;
 
if ( AV_RL32(d) == ID_FORM &&
(AV_RL32(d+8) == ID_8SVX ||
AV_RL32(d+8) == ID_16SV ||
AV_RL32(d+8) == ID_MAUD ||
AV_RL32(d+8) == ID_PBM ||
AV_RL32(d+8) == ID_ACBM ||
AV_RL32(d+8) == ID_DEEP ||
AV_RL32(d+8) == ID_ILBM ||
AV_RL32(d+8) == ID_RGB8 ||
AV_RL32(d+8) == ID_RGBN) )
return AVPROBE_SCORE_MAX;
return 0;
}
 
static const uint8_t deep_rgb24[] = {0, 0, 0, 3, 0, 1, 0, 8, 0, 2, 0, 8, 0, 3, 0, 8};
static const uint8_t deep_rgba[] = {0, 0, 0, 4, 0, 1, 0, 8, 0, 2, 0, 8, 0, 3, 0, 8};
static const uint8_t deep_bgra[] = {0, 0, 0, 4, 0, 3, 0, 8, 0, 2, 0, 8, 0, 1, 0, 8};
static const uint8_t deep_argb[] = {0, 0, 0, 4, 0,17, 0, 8, 0, 1, 0, 8, 0, 2, 0, 8};
static const uint8_t deep_abgr[] = {0, 0, 0, 4, 0,17, 0, 8, 0, 3, 0, 8, 0, 2, 0, 8};
 
static int iff_read_header(AVFormatContext *s)
{
IffDemuxContext *iff = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *st;
uint8_t *buf;
uint32_t chunk_id, data_size;
uint32_t screenmode = 0, num, den;
unsigned transparency = 0;
unsigned masking = 0; // no mask
uint8_t fmt[16];
int fmt_size;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
st->codec->channels = 1;
st->codec->channel_layout = AV_CH_LAYOUT_MONO;
avio_skip(pb, 8);
// codec_tag used by ByteRun1 decoder to distinguish progressive (PBM) and interlaced (ILBM) content
st->codec->codec_tag = avio_rl32(pb);
iff->bitmap_compression = -1;
iff->svx8_compression = -1;
iff->maud_bits = -1;
iff->maud_compression = -1;
 
while(!url_feof(pb)) {
uint64_t orig_pos;
int res;
const char *metadata_tag = NULL;
chunk_id = avio_rl32(pb);
data_size = avio_rb32(pb);
orig_pos = avio_tell(pb);
 
switch(chunk_id) {
case ID_VHDR:
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
 
if (data_size < 14)
return AVERROR_INVALIDDATA;
avio_skip(pb, 12);
st->codec->sample_rate = avio_rb16(pb);
if (data_size >= 16) {
avio_skip(pb, 1);
iff->svx8_compression = avio_r8(pb);
}
break;
 
case ID_MHDR:
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
 
if (data_size < 32)
return AVERROR_INVALIDDATA;
avio_skip(pb, 4);
iff->maud_bits = avio_rb16(pb);
avio_skip(pb, 2);
num = avio_rb32(pb);
den = avio_rb16(pb);
if (!den)
return AVERROR_INVALIDDATA;
avio_skip(pb, 2);
st->codec->sample_rate = num / den;
st->codec->channels = avio_rb16(pb);
iff->maud_compression = avio_rb16(pb);
if (st->codec->channels == 1)
st->codec->channel_layout = AV_CH_LAYOUT_MONO;
else if (st->codec->channels == 2)
st->codec->channel_layout = AV_CH_LAYOUT_STEREO;
break;
 
case ID_ABIT:
case ID_BODY:
case ID_DBOD:
case ID_MDAT:
iff->body_pos = avio_tell(pb);
iff->body_end = iff->body_pos + data_size;
iff->body_size = data_size;
break;
 
case ID_CHAN:
if (data_size < 4)
return AVERROR_INVALIDDATA;
if (avio_rb32(pb) < 6) {
st->codec->channels = 1;
st->codec->channel_layout = AV_CH_LAYOUT_MONO;
} else {
st->codec->channels = 2;
st->codec->channel_layout = AV_CH_LAYOUT_STEREO;
}
break;
 
case ID_CAMG:
if (data_size < 4)
return AVERROR_INVALIDDATA;
screenmode = avio_rb32(pb);
break;
 
case ID_CMAP:
if (data_size < 3 || data_size > 768 || data_size % 3) {
av_log(s, AV_LOG_ERROR, "Invalid CMAP chunk size %d\n",
data_size);
return AVERROR_INVALIDDATA;
}
st->codec->extradata_size = data_size + IFF_EXTRA_VIDEO_SIZE;
st->codec->extradata = av_malloc(data_size + IFF_EXTRA_VIDEO_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
if (!st->codec->extradata)
return AVERROR(ENOMEM);
if (avio_read(pb, st->codec->extradata + IFF_EXTRA_VIDEO_SIZE, data_size) < 0)
return AVERROR(EIO);
break;
 
case ID_BMHD:
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
if (data_size <= 8)
return AVERROR_INVALIDDATA;
st->codec->width = avio_rb16(pb);
st->codec->height = avio_rb16(pb);
avio_skip(pb, 4); // x, y offset
st->codec->bits_per_coded_sample = avio_r8(pb);
if (data_size >= 10)
masking = avio_r8(pb);
if (data_size >= 11)
iff->bitmap_compression = avio_r8(pb);
if (data_size >= 14) {
avio_skip(pb, 1); // padding
transparency = avio_rb16(pb);
}
if (data_size >= 16) {
st->sample_aspect_ratio.num = avio_r8(pb);
st->sample_aspect_ratio.den = avio_r8(pb);
}
break;
 
case ID_DPEL:
if (data_size < 4 || (data_size & 3))
return AVERROR_INVALIDDATA;
if ((fmt_size = avio_read(pb, fmt, sizeof(fmt))) < 0)
return fmt_size;
if (fmt_size == sizeof(deep_rgb24) && !memcmp(fmt, deep_rgb24, sizeof(deep_rgb24)))
st->codec->pix_fmt = AV_PIX_FMT_RGB24;
else if (fmt_size == sizeof(deep_rgba) && !memcmp(fmt, deep_rgba, sizeof(deep_rgba)))
st->codec->pix_fmt = AV_PIX_FMT_RGBA;
else if (fmt_size == sizeof(deep_bgra) && !memcmp(fmt, deep_bgra, sizeof(deep_bgra)))
st->codec->pix_fmt = AV_PIX_FMT_BGRA;
else if (fmt_size == sizeof(deep_argb) && !memcmp(fmt, deep_argb, sizeof(deep_argb)))
st->codec->pix_fmt = AV_PIX_FMT_ARGB;
else if (fmt_size == sizeof(deep_abgr) && !memcmp(fmt, deep_abgr, sizeof(deep_abgr)))
st->codec->pix_fmt = AV_PIX_FMT_ABGR;
else {
avpriv_request_sample(s, "color format %.16s", fmt);
return AVERROR_PATCHWELCOME;
}
break;
 
case ID_DGBL:
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
if (data_size < 8)
return AVERROR_INVALIDDATA;
st->codec->width = avio_rb16(pb);
st->codec->height = avio_rb16(pb);
iff->bitmap_compression = avio_rb16(pb);
st->sample_aspect_ratio.num = avio_r8(pb);
st->sample_aspect_ratio.den = avio_r8(pb);
st->codec->bits_per_coded_sample = 24;
break;
 
case ID_DLOC:
if (data_size < 4)
return AVERROR_INVALIDDATA;
st->codec->width = avio_rb16(pb);
st->codec->height = avio_rb16(pb);
break;
 
case ID_TVDC:
if (data_size < sizeof(iff->tvdc))
return AVERROR_INVALIDDATA;
res = avio_read(pb, iff->tvdc, sizeof(iff->tvdc));
if (res < 0)
return res;
break;
 
case ID_ANNO:
case ID_TEXT: metadata_tag = "comment"; break;
case ID_AUTH: metadata_tag = "artist"; break;
case ID_COPYRIGHT: metadata_tag = "copyright"; break;
case ID_NAME: metadata_tag = "title"; break;
}
 
if (metadata_tag) {
if ((res = get_metadata(s, metadata_tag, data_size)) < 0) {
av_log(s, AV_LOG_ERROR, "cannot allocate metadata tag %s!\n", metadata_tag);
return res;
}
}
avio_skip(pb, data_size - (avio_tell(pb) - orig_pos) + (data_size & 1));
}
 
avio_seek(pb, iff->body_pos, SEEK_SET);
 
switch(st->codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
avpriv_set_pts_info(st, 32, 1, st->codec->sample_rate);
 
if (st->codec->codec_tag == ID_16SV)
st->codec->codec_id = AV_CODEC_ID_PCM_S16BE_PLANAR;
else if (st->codec->codec_tag == ID_MAUD) {
if (iff->maud_bits == 8 && !iff->maud_compression) {
st->codec->codec_id = AV_CODEC_ID_PCM_U8;
} else if (iff->maud_bits == 16 && !iff->maud_compression) {
st->codec->codec_id = AV_CODEC_ID_PCM_S16BE;
} else if (iff->maud_bits == 8 && iff->maud_compression == 2) {
st->codec->codec_id = AV_CODEC_ID_PCM_ALAW;
} else if (iff->maud_bits == 8 && iff->maud_compression == 3) {
st->codec->codec_id = AV_CODEC_ID_PCM_MULAW;
} else {
avpriv_request_sample(s, "compression %d and bit depth %d", iff->maud_compression, iff->maud_bits);
return AVERROR_PATCHWELCOME;
}
 
st->codec->bits_per_coded_sample =
av_get_bits_per_sample(st->codec->codec_id);
 
st->codec->block_align =
st->codec->bits_per_coded_sample * st->codec->channels / 8;
} else {
switch (iff->svx8_compression) {
case COMP_NONE:
st->codec->codec_id = AV_CODEC_ID_PCM_S8_PLANAR;
break;
case COMP_FIB:
st->codec->codec_id = AV_CODEC_ID_8SVX_FIB;
break;
case COMP_EXP:
st->codec->codec_id = AV_CODEC_ID_8SVX_EXP;
break;
default:
av_log(s, AV_LOG_ERROR,
"Unknown SVX8 compression method '%d'\n", iff->svx8_compression);
return -1;
}
}
 
st->codec->bits_per_coded_sample = av_get_bits_per_sample(st->codec->codec_id);
st->codec->bit_rate = st->codec->channels * st->codec->sample_rate * st->codec->bits_per_coded_sample;
st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample;
break;
 
case AVMEDIA_TYPE_VIDEO:
iff->bpp = st->codec->bits_per_coded_sample;
if ((screenmode & 0x800 /* Hold And Modify */) && iff->bpp <= 8) {
iff->ham = iff->bpp > 6 ? 6 : 4;
st->codec->bits_per_coded_sample = 24;
}
iff->flags = (screenmode & 0x80 /* Extra HalfBrite */) && iff->bpp <= 8;
iff->masking = masking;
iff->transparency = transparency;
 
if (!st->codec->extradata) {
st->codec->extradata_size = IFF_EXTRA_VIDEO_SIZE;
st->codec->extradata = av_malloc(IFF_EXTRA_VIDEO_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
if (!st->codec->extradata)
return AVERROR(ENOMEM);
}
av_assert0(st->codec->extradata_size >= IFF_EXTRA_VIDEO_SIZE);
buf = st->codec->extradata;
bytestream_put_be16(&buf, IFF_EXTRA_VIDEO_SIZE);
bytestream_put_byte(&buf, iff->bitmap_compression);
bytestream_put_byte(&buf, iff->bpp);
bytestream_put_byte(&buf, iff->ham);
bytestream_put_byte(&buf, iff->flags);
bytestream_put_be16(&buf, iff->transparency);
bytestream_put_byte(&buf, iff->masking);
bytestream_put_buffer(&buf, iff->tvdc, sizeof(iff->tvdc));
st->codec->codec_id = AV_CODEC_ID_IFF_ILBM;
break;
default:
return -1;
}
 
return 0;
}
 
static int iff_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
IffDemuxContext *iff = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *st = s->streams[0];
int ret;
int64_t pos = avio_tell(pb);
 
if (pos >= iff->body_end)
return AVERROR_EOF;
 
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
if (st->codec->codec_tag == ID_MAUD) {
ret = av_get_packet(pb, pkt, FFMIN(iff->body_end - pos, 1024 * st->codec->block_align));
} else {
ret = av_get_packet(pb, pkt, iff->body_size);
}
} else if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
uint8_t *buf;
 
if (av_new_packet(pkt, iff->body_size + 2) < 0) {
return AVERROR(ENOMEM);
}
 
buf = pkt->data;
bytestream_put_be16(&buf, 2);
ret = avio_read(pb, buf, iff->body_size);
} else {
av_assert0(0);
}
 
if (pos == iff->body_pos)
pkt->flags |= AV_PKT_FLAG_KEY;
if (ret < 0)
return ret;
pkt->stream_index = 0;
return ret;
}
 
AVInputFormat ff_iff_demuxer = {
.name = "iff",
.long_name = NULL_IF_CONFIG_SMALL("IFF (Interchange File Format)"),
.priv_data_size = sizeof(IffDemuxContext),
.read_probe = iff_probe,
.read_header = iff_read_header,
.read_packet = iff_read_packet,
.flags = AVFMT_GENERIC_INDEX | AVFMT_NO_BYTE_SEEK,
};
/contrib/sdk/sources/ffmpeg/libavformat/ilbc.c
0,0 → 1,140
/*
* iLBC storage file format
* Copyright (c) 2012 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "internal.h"
 
static const char mode20_header[] = "#!iLBC20\n";
static const char mode30_header[] = "#!iLBC30\n";
 
static int ilbc_write_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
AVCodecContext *enc;
 
if (s->nb_streams != 1) {
av_log(s, AV_LOG_ERROR, "Unsupported number of streams\n");
return AVERROR(EINVAL);
}
enc = s->streams[0]->codec;
 
if (enc->codec_id != AV_CODEC_ID_ILBC) {
av_log(s, AV_LOG_ERROR, "Unsupported codec\n");
return AVERROR(EINVAL);
}
 
if (enc->block_align == 50) {
avio_write(pb, mode30_header, sizeof(mode30_header) - 1);
} else if (enc->block_align == 38) {
avio_write(pb, mode20_header, sizeof(mode20_header) - 1);
} else {
av_log(s, AV_LOG_ERROR, "Unsupported mode\n");
return AVERROR(EINVAL);
}
avio_flush(pb);
return 0;
}
 
static int ilbc_write_packet(AVFormatContext *s, AVPacket *pkt)
{
avio_write(s->pb, pkt->data, pkt->size);
return 0;
}
 
static int ilbc_probe(AVProbeData *p)
{
// Only check for "#!iLBC" which matches both formats
if (!memcmp(p->buf, mode20_header, 6))
return AVPROBE_SCORE_MAX;
else
return 0;
}
 
static int ilbc_read_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
AVStream *st;
uint8_t header[9];
 
avio_read(pb, header, 9);
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_id = AV_CODEC_ID_ILBC;
st->codec->sample_rate = 8000;
st->codec->channels = 1;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->start_time = 0;
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
if (!memcmp(header, mode20_header, sizeof(mode20_header) - 1)) {
st->codec->block_align = 38;
st->codec->bit_rate = 15200;
} else if (!memcmp(header, mode30_header, sizeof(mode30_header) - 1)) {
st->codec->block_align = 50;
st->codec->bit_rate = 13333;
} else {
av_log(s, AV_LOG_ERROR, "Unrecognized iLBC file header\n");
return AVERROR_INVALIDDATA;
}
 
return 0;
}
 
static int ilbc_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
AVCodecContext *enc = s->streams[0]->codec;
int ret;
 
if ((ret = av_new_packet(pkt, enc->block_align)) < 0)
return ret;
 
pkt->stream_index = 0;
pkt->pos = avio_tell(s->pb);
pkt->duration = enc->block_align == 38 ? 160 : 240;
if ((ret = avio_read(s->pb, pkt->data, enc->block_align)) != enc->block_align) {
av_free_packet(pkt);
return ret < 0 ? ret : AVERROR(EIO);
}
 
return 0;
}
 
AVInputFormat ff_ilbc_demuxer = {
.name = "ilbc",
.long_name = NULL_IF_CONFIG_SMALL("iLBC storage"),
.read_probe = ilbc_probe,
.read_header = ilbc_read_header,
.read_packet = ilbc_read_packet,
.flags = AVFMT_GENERIC_INDEX,
};
 
AVOutputFormat ff_ilbc_muxer = {
.name = "ilbc",
.long_name = NULL_IF_CONFIG_SMALL("iLBC storage"),
.mime_type = "audio/iLBC",
.extensions = "lbc",
.audio_codec = AV_CODEC_ID_ILBC,
.write_header = ilbc_write_header,
.write_packet = ilbc_write_packet,
.flags = AVFMT_NOTIMESTAMPS,
};
/contrib/sdk/sources/ffmpeg/libavformat/img2.c
0,0 → 1,102
/*
* Image format
* Copyright (c) 2000, 2001, 2002 Fabrice Bellard
* Copyright (c) 2004 Michael Niedermayer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avstring.h"
#include "internal.h"
 
typedef struct {
enum AVCodecID id;
const char *str;
} IdStrMap;
 
static const IdStrMap img_tags[] = {
{ AV_CODEC_ID_MJPEG, "jpeg" },
{ AV_CODEC_ID_MJPEG, "jpg" },
{ AV_CODEC_ID_MJPEG, "jps" },
{ AV_CODEC_ID_LJPEG, "ljpg" },
{ AV_CODEC_ID_JPEGLS, "jls" },
{ AV_CODEC_ID_PNG, "png" },
{ AV_CODEC_ID_PNG, "pns" },
{ AV_CODEC_ID_PNG, "mng" },
{ AV_CODEC_ID_PPM, "ppm" },
{ AV_CODEC_ID_PPM, "pnm" },
{ AV_CODEC_ID_PGM, "pgm" },
{ AV_CODEC_ID_PGMYUV, "pgmyuv" },
{ AV_CODEC_ID_PBM, "pbm" },
{ AV_CODEC_ID_PAM, "pam" },
{ AV_CODEC_ID_MPEG1VIDEO, "mpg1-img" },
{ AV_CODEC_ID_MPEG2VIDEO, "mpg2-img" },
{ AV_CODEC_ID_MPEG4, "mpg4-img" },
{ AV_CODEC_ID_FFV1, "ffv1-img" },
{ AV_CODEC_ID_RAWVIDEO, "y" },
{ AV_CODEC_ID_RAWVIDEO, "raw" },
{ AV_CODEC_ID_BMP, "bmp" },
{ AV_CODEC_ID_TARGA, "tga" },
{ AV_CODEC_ID_TIFF, "tiff" },
{ AV_CODEC_ID_TIFF, "tif" },
{ AV_CODEC_ID_SGI, "sgi" },
{ AV_CODEC_ID_PTX, "ptx" },
{ AV_CODEC_ID_PCX, "pcx" },
{ AV_CODEC_ID_BRENDER_PIX, "pix" },
{ AV_CODEC_ID_SUNRAST, "sun" },
{ AV_CODEC_ID_SUNRAST, "ras" },
{ AV_CODEC_ID_SUNRAST, "rs" },
{ AV_CODEC_ID_SUNRAST, "im1" },
{ AV_CODEC_ID_SUNRAST, "im8" },
{ AV_CODEC_ID_SUNRAST, "im24" },
{ AV_CODEC_ID_SUNRAST, "im32" },
{ AV_CODEC_ID_SUNRAST, "sunras" },
{ AV_CODEC_ID_JPEG2000, "j2c" },
{ AV_CODEC_ID_JPEG2000, "jp2" },
{ AV_CODEC_ID_JPEG2000, "jpc" },
{ AV_CODEC_ID_JPEG2000, "j2k" },
{ AV_CODEC_ID_DPX, "dpx" },
{ AV_CODEC_ID_EXR, "exr" },
{ AV_CODEC_ID_PICTOR, "pic" },
{ AV_CODEC_ID_V210X, "yuv10" },
{ AV_CODEC_ID_WEBP, "webp" },
{ AV_CODEC_ID_XBM, "xbm" },
{ AV_CODEC_ID_XFACE, "xface" },
{ AV_CODEC_ID_XWD, "xwd" },
{ AV_CODEC_ID_NONE, NULL }
};
 
static enum AVCodecID av_str2id(const IdStrMap *tags, const char *str)
{
str = strrchr(str, '.');
if (!str)
return AV_CODEC_ID_NONE;
str++;
 
while (tags->id) {
if (!av_strcasecmp(str, tags->str))
return tags->id;
 
tags++;
}
return AV_CODEC_ID_NONE;
}
 
enum AVCodecID ff_guess_image2_codec(const char *filename)
{
return av_str2id(img_tags, filename);
}
/contrib/sdk/sources/ffmpeg/libavformat/img2dec.c
0,0 → 1,510
/*
* Image format
* Copyright (c) 2000, 2001, 2002 Fabrice Bellard
* Copyright (c) 2004 Michael Niedermayer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <sys/stat.h>
#include "libavutil/avstring.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/parseutils.h"
#include "avformat.h"
#include "internal.h"
#if HAVE_GLOB
#include <glob.h>
 
/* Locally define as 0 (bitwise-OR no-op) any missing glob options that
are non-posix glibc/bsd extensions. */
#ifndef GLOB_NOMAGIC
#define GLOB_NOMAGIC 0
#endif
#ifndef GLOB_BRACE
#define GLOB_BRACE 0
#endif
 
#endif /* HAVE_GLOB */
 
typedef struct {
const AVClass *class; /**< Class for private options. */
int img_first;
int img_last;
int img_number;
int64_t pts;
int img_count;
int is_pipe;
int split_planes; /**< use independent file for each Y, U, V plane */
char path[1024];
char *pixel_format; /**< Set by a private option. */
int width, height; /**< Set by a private option. */
AVRational framerate; /**< Set by a private option. */
int loop;
enum { PT_GLOB_SEQUENCE, PT_GLOB, PT_SEQUENCE } pattern_type;
int use_glob;
#if HAVE_GLOB
glob_t globstate;
#endif
int start_number;
int start_number_range;
int frame_size;
int ts_from_file;
} VideoDemuxData;
 
static const int sizes[][2] = {
{ 640, 480 },
{ 720, 480 },
{ 720, 576 },
{ 352, 288 },
{ 352, 240 },
{ 160, 128 },
{ 512, 384 },
{ 640, 352 },
{ 640, 240 },
};
 
static int infer_size(int *width_ptr, int *height_ptr, int size)
{
int i;
 
for (i = 0; i < FF_ARRAY_ELEMS(sizes); i++) {
if ((sizes[i][0] * sizes[i][1]) == size) {
*width_ptr = sizes[i][0];
*height_ptr = sizes[i][1];
return 0;
}
}
 
return -1;
}
 
static int is_glob(const char *path)
{
#if HAVE_GLOB
size_t span = 0;
const char *p = path;
 
while (p = strchr(p, '%')) {
if (*(++p) == '%') {
++p;
continue;
}
if (span = strspn(p, "*?[]{}"))
break;
}
/* Did we hit a glob char or get to the end? */
return span != 0;
#else
return 0;
#endif
}
 
/**
* Get index range of image files matched by path.
*
* @param pfirst_index pointer to index updated with the first number in the range
* @param plast_index pointer to index updated with the last number in the range
* @param path path which has to be matched by the image files in the range
* @param start_index minimum accepted value for the first index in the range
* @return -1 if no image file could be found
*/
static int find_image_range(int *pfirst_index, int *plast_index,
const char *path, int start_index, int start_index_range)
{
char buf[1024];
int range, last_index, range1, first_index;
 
/* find the first image */
for (first_index = start_index; first_index < start_index + start_index_range; first_index++) {
if (av_get_frame_filename(buf, sizeof(buf), path, first_index) < 0) {
*pfirst_index =
*plast_index = 1;
if (avio_check(buf, AVIO_FLAG_READ) > 0)
return 0;
return -1;
}
if (avio_check(buf, AVIO_FLAG_READ) > 0)
break;
}
if (first_index == start_index + start_index_range)
goto fail;
 
/* find the last image */
last_index = first_index;
for (;;) {
range = 0;
for (;;) {
if (!range)
range1 = 1;
else
range1 = 2 * range;
if (av_get_frame_filename(buf, sizeof(buf), path,
last_index + range1) < 0)
goto fail;
if (avio_check(buf, AVIO_FLAG_READ) <= 0)
break;
range = range1;
/* just in case... */
if (range >= (1 << 30))
goto fail;
}
/* we are sure than image last_index + range exists */
if (!range)
break;
last_index += range;
}
*pfirst_index = first_index;
*plast_index = last_index;
return 0;
 
fail:
return -1;
}
 
static int img_read_probe(AVProbeData *p)
{
if (p->filename && ff_guess_image2_codec(p->filename)) {
if (av_filename_number_test(p->filename))
return AVPROBE_SCORE_MAX;
else if (is_glob(p->filename))
return AVPROBE_SCORE_MAX;
else if (av_match_ext(p->filename, "raw") || av_match_ext(p->filename, "gif"))
return 5;
else
return AVPROBE_SCORE_EXTENSION;
}
return 0;
}
 
static int img_read_header(AVFormatContext *s1)
{
VideoDemuxData *s = s1->priv_data;
int first_index, last_index;
AVStream *st;
enum AVPixelFormat pix_fmt = AV_PIX_FMT_NONE;
 
s1->ctx_flags |= AVFMTCTX_NOHEADER;
 
st = avformat_new_stream(s1, NULL);
if (!st) {
return AVERROR(ENOMEM);
}
 
if (s->pixel_format &&
(pix_fmt = av_get_pix_fmt(s->pixel_format)) == AV_PIX_FMT_NONE) {
av_log(s1, AV_LOG_ERROR, "No such pixel format: %s.\n",
s->pixel_format);
return AVERROR(EINVAL);
}
 
av_strlcpy(s->path, s1->filename, sizeof(s->path));
s->img_number = 0;
s->img_count = 0;
 
/* find format */
if (s1->iformat->flags & AVFMT_NOFILE)
s->is_pipe = 0;
else {
s->is_pipe = 1;
st->need_parsing = AVSTREAM_PARSE_FULL;
}
 
if (s->ts_from_file)
avpriv_set_pts_info(st, 64, 1, 1);
else
avpriv_set_pts_info(st, 64, s->framerate.den, s->framerate.num);
 
if (s->width && s->height) {
st->codec->width = s->width;
st->codec->height = s->height;
}
 
if (!s->is_pipe) {
if (s->pattern_type == PT_GLOB_SEQUENCE) {
s->use_glob = is_glob(s->path);
if (s->use_glob) {
char *p = s->path, *q, *dup;
int gerr;
 
av_log(s1, AV_LOG_WARNING, "Pattern type 'glob_sequence' is deprecated: "
"use pattern_type 'glob' instead\n");
#if HAVE_GLOB
dup = q = av_strdup(p);
while (*q) {
/* Do we have room for the next char and a \ insertion? */
if ((p - s->path) >= (sizeof(s->path) - 2))
break;
if (*q == '%' && strspn(q + 1, "%*?[]{}"))
++q;
else if (strspn(q, "\\*?[]{}"))
*p++ = '\\';
*p++ = *q++;
}
*p = 0;
av_free(dup);
 
gerr = glob(s->path, GLOB_NOCHECK|GLOB_BRACE|GLOB_NOMAGIC, NULL, &s->globstate);
if (gerr != 0) {
return AVERROR(ENOENT);
}
first_index = 0;
last_index = s->globstate.gl_pathc - 1;
#endif
}
}
if ((s->pattern_type == PT_GLOB_SEQUENCE && !s->use_glob) || s->pattern_type == PT_SEQUENCE) {
if (find_image_range(&first_index, &last_index, s->path,
s->start_number, s->start_number_range) < 0) {
av_log(s1, AV_LOG_ERROR,
"Could find no file with path '%s' and index in the range %d-%d\n",
s->path, s->start_number, s->start_number + s->start_number_range - 1);
return AVERROR(ENOENT);
}
} else if (s->pattern_type == PT_GLOB) {
#if HAVE_GLOB
int gerr;
gerr = glob(s->path, GLOB_NOCHECK|GLOB_BRACE|GLOB_NOMAGIC, NULL, &s->globstate);
if (gerr != 0) {
return AVERROR(ENOENT);
}
first_index = 0;
last_index = s->globstate.gl_pathc - 1;
s->use_glob = 1;
#else
av_log(s1, AV_LOG_ERROR,
"Pattern type 'glob' was selected but globbing "
"is not supported by this libavformat build\n");
return AVERROR(ENOSYS);
#endif
} else if (s->pattern_type != PT_GLOB_SEQUENCE) {
av_log(s1, AV_LOG_ERROR,
"Unknown value '%d' for pattern_type option\n", s->pattern_type);
return AVERROR(EINVAL);
}
s->img_first = first_index;
s->img_last = last_index;
s->img_number = first_index;
/* compute duration */
if (!s->ts_from_file) {
st->start_time = 0;
st->duration = last_index - first_index + 1;
}
}
 
if (s1->video_codec_id) {
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = s1->video_codec_id;
} else if (s1->audio_codec_id) {
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = s1->audio_codec_id;
} else {
const char *str = strrchr(s->path, '.');
s->split_planes = str && !av_strcasecmp(str + 1, "y");
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = ff_guess_image2_codec(s->path);
if (st->codec->codec_id == AV_CODEC_ID_LJPEG)
st->codec->codec_id = AV_CODEC_ID_MJPEG;
}
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
pix_fmt != AV_PIX_FMT_NONE)
st->codec->pix_fmt = pix_fmt;
 
return 0;
}
 
static int img_read_packet(AVFormatContext *s1, AVPacket *pkt)
{
VideoDemuxData *s = s1->priv_data;
char filename_bytes[1024];
char *filename = filename_bytes;
int i;
int size[3] = { 0 }, ret[3] = { 0 };
AVIOContext *f[3] = { NULL };
AVCodecContext *codec = s1->streams[0]->codec;
 
if (!s->is_pipe) {
/* loop over input */
if (s->loop && s->img_number > s->img_last) {
s->img_number = s->img_first;
}
if (s->img_number > s->img_last)
return AVERROR_EOF;
if (s->use_glob) {
#if HAVE_GLOB
filename = s->globstate.gl_pathv[s->img_number];
#endif
} else {
if (av_get_frame_filename(filename_bytes, sizeof(filename_bytes),
s->path,
s->img_number) < 0 && s->img_number > 1)
return AVERROR(EIO);
}
for (i = 0; i < 3; i++) {
if (avio_open2(&f[i], filename, AVIO_FLAG_READ,
&s1->interrupt_callback, NULL) < 0) {
if (i >= 1)
break;
av_log(s1, AV_LOG_ERROR, "Could not open file : %s\n",
filename);
return AVERROR(EIO);
}
size[i] = avio_size(f[i]);
 
if (!s->split_planes)
break;
filename[strlen(filename) - 1] = 'U' + i;
}
 
if (codec->codec_id == AV_CODEC_ID_RAWVIDEO && !codec->width)
infer_size(&codec->width, &codec->height, size[0]);
} else {
f[0] = s1->pb;
if (url_feof(f[0]))
return AVERROR(EIO);
if (s->frame_size > 0) {
size[0] = s->frame_size;
} else {
size[0] = 4096;
}
}
 
if (av_new_packet(pkt, size[0] + size[1] + size[2]) < 0)
return AVERROR(ENOMEM);
pkt->stream_index = 0;
pkt->flags |= AV_PKT_FLAG_KEY;
/*
if (s->ts_from_file) {
struct stat img_stat;
if (stat(filename, &img_stat))
return AVERROR(EIO);
pkt->pts = (int64_t)img_stat.st_mtime;
av_add_index_entry(s1->streams[0], s->img_number, pkt->pts, 0, 0, AVINDEX_KEYFRAME);
} else if (!s->is_pipe) {
pkt->pts = s->pts;
}
*/
pkt->size = 0;
for (i = 0; i < 3; i++) {
if (f[i]) {
ret[i] = avio_read(f[i], pkt->data + pkt->size, size[i]);
if (!s->is_pipe)
avio_close(f[i]);
if (ret[i] > 0)
pkt->size += ret[i];
}
}
 
if (ret[0] <= 0 || ret[1] < 0 || ret[2] < 0) {
av_free_packet(pkt);
return AVERROR(EIO); /* signal EOF */
} else {
s->img_count++;
s->img_number++;
s->pts++;
return 0;
}
}
 
static int img_read_close(struct AVFormatContext* s1)
{
VideoDemuxData *s = s1->priv_data;
#if HAVE_GLOB
if (s->use_glob) {
globfree(&s->globstate);
}
#endif
return 0;
}
 
static int img_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
{
VideoDemuxData *s1 = s->priv_data;
AVStream *st = s->streams[0];
 
if (s1->ts_from_file) {
int index = av_index_search_timestamp(st, timestamp, flags);
if(index < 0)
return -1;
s1->img_number = st->index_entries[index].pos;
return 0;
}
 
if (timestamp < 0 || !s1->loop && timestamp > s1->img_last - s1->img_first)
return -1;
s1->img_number = timestamp%(s1->img_last - s1->img_first + 1) + s1->img_first;
s1->pts = timestamp;
return 0;
}
 
#define OFFSET(x) offsetof(VideoDemuxData, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "framerate", "set the video framerate", OFFSET(framerate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, DEC },
{ "loop", "force loop over input file sequence", OFFSET(loop), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, DEC },
 
{ "pattern_type", "set pattern type", OFFSET(pattern_type), AV_OPT_TYPE_INT, {.i64=PT_GLOB_SEQUENCE}, 0, INT_MAX, DEC, "pattern_type"},
{ "glob_sequence","select glob/sequence pattern type", 0, AV_OPT_TYPE_CONST, {.i64=PT_GLOB_SEQUENCE}, INT_MIN, INT_MAX, DEC, "pattern_type" },
{ "glob", "select glob pattern type", 0, AV_OPT_TYPE_CONST, {.i64=PT_GLOB }, INT_MIN, INT_MAX, DEC, "pattern_type" },
{ "sequence", "select sequence pattern type", 0, AV_OPT_TYPE_CONST, {.i64=PT_SEQUENCE }, INT_MIN, INT_MAX, DEC, "pattern_type" },
 
{ "pixel_format", "set video pixel format", OFFSET(pixel_format), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
{ "start_number", "set first number in the sequence", OFFSET(start_number), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, DEC },
{ "start_number_range", "set range for looking at the first sequence number", OFFSET(start_number_range), AV_OPT_TYPE_INT, {.i64 = 5}, 1, INT_MAX, DEC },
{ "video_size", "set video size", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, DEC },
{ "frame_size", "force frame size in bytes", OFFSET(frame_size), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, DEC },
{ "ts_from_file", "set frame timestamp from file's one", OFFSET(ts_from_file), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, DEC },
{ NULL },
};
 
#if CONFIG_IMAGE2_DEMUXER
static const AVClass img2_class = {
.class_name = "image2 demuxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
AVInputFormat ff_image2_demuxer = {
.name = "image2",
.long_name = NULL_IF_CONFIG_SMALL("image2 sequence"),
.priv_data_size = sizeof(VideoDemuxData),
.read_probe = img_read_probe,
.read_header = img_read_header,
.read_packet = img_read_packet,
.read_close = img_read_close,
.read_seek = img_read_seek,
.flags = AVFMT_NOFILE,
.priv_class = &img2_class,
};
#endif
#if CONFIG_IMAGE2PIPE_DEMUXER
static const AVClass img2pipe_class = {
.class_name = "image2pipe demuxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
AVInputFormat ff_image2pipe_demuxer = {
.name = "image2pipe",
.long_name = NULL_IF_CONFIG_SMALL("piped image2 sequence"),
.priv_data_size = sizeof(VideoDemuxData),
.read_header = img_read_header,
.read_packet = img_read_packet,
.priv_class = &img2pipe_class,
};
#endif
/contrib/sdk/sources/ffmpeg/libavformat/img2enc.c
0,0 → 1,217
/*
* Image format
* Copyright (c) 2000, 2001, 2002 Fabrice Bellard
* Copyright (c) 2004 Michael Niedermayer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intreadwrite.h"
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avformat.h"
#include "avio_internal.h"
#include "internal.h"
 
typedef struct {
const AVClass *class; /**< Class for private options. */
int img_number;
int is_pipe;
int split_planes; /**< use independent file for each Y, U, V plane */
char path[1024];
int update;
int use_strftime;
const char *muxer;
} VideoMuxData;
 
static int write_header(AVFormatContext *s)
{
VideoMuxData *img = s->priv_data;
AVStream *st = s->streams[0];
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(st->codec->pix_fmt);
 
av_strlcpy(img->path, s->filename, sizeof(img->path));
 
/* find format */
if (s->oformat->flags & AVFMT_NOFILE)
img->is_pipe = 0;
else
img->is_pipe = 1;
 
if (st->codec->codec_id == AV_CODEC_ID_GIF) {
img->muxer = "gif";
} else if (st->codec->codec_id == AV_CODEC_ID_RAWVIDEO) {
const char *str = strrchr(img->path, '.');
/* TODO: reindent */
img->split_planes = str
&& !av_strcasecmp(str + 1, "y")
&& s->nb_streams == 1
&& desc
&&(desc->flags & AV_PIX_FMT_FLAG_PLANAR)
&& desc->nb_components >= 3;
}
return 0;
}
 
static int write_packet(AVFormatContext *s, AVPacket *pkt)
{
VideoMuxData *img = s->priv_data;
AVIOContext *pb[4];
char filename[1024];
AVCodecContext *codec = s->streams[pkt->stream_index]->codec;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(codec->pix_fmt);
int i;
 
if (!img->is_pipe) {
if (img->update) {
av_strlcpy(filename, img->path, sizeof(filename));
} else if (img->use_strftime) {
time_t now0;
struct tm *tm;
time(&now0);
tm = localtime(&now0);
if (!strftime(filename, sizeof(filename), img->path, tm)) {
av_log(s, AV_LOG_ERROR, "Could not get frame filename with strftime\n");
return AVERROR(EINVAL);
}
} else if (av_get_frame_filename(filename, sizeof(filename), img->path, img->img_number) < 0 &&
img->img_number > 1) {
av_log(s, AV_LOG_ERROR,
"Could not get frame filename number %d from pattern '%s' (either set updatefirst or use a pattern like %%03d within the filename pattern)\n",
img->img_number, img->path);
return AVERROR(EINVAL);
}
for (i = 0; i < 4; i++) {
if (avio_open2(&pb[i], filename, AVIO_FLAG_WRITE,
&s->interrupt_callback, NULL) < 0) {
av_log(s, AV_LOG_ERROR, "Could not open file : %s\n", filename);
return AVERROR(EIO);
}
 
if (!img->split_planes || i+1 >= desc->nb_components)
break;
filename[strlen(filename) - 1] = ((int[]){'U','V','A','x'})[i];
}
} else {
pb[0] = s->pb;
}
 
if (img->split_planes) {
int ysize = codec->width * codec->height;
int usize = FF_CEIL_RSHIFT(codec->width, desc->log2_chroma_w) * FF_CEIL_RSHIFT(codec->height, desc->log2_chroma_h);
if (desc->comp[0].depth_minus1 >= 8) {
ysize *= 2;
usize *= 2;
}
avio_write(pb[0], pkt->data , ysize);
avio_write(pb[1], pkt->data + ysize , usize);
avio_write(pb[2], pkt->data + ysize + usize, usize);
avio_close(pb[1]);
avio_close(pb[2]);
if (desc->nb_components > 3) {
avio_write(pb[3], pkt->data + ysize + 2*usize, ysize);
avio_close(pb[3]);
}
} else if (img->muxer) {
int ret;
AVStream *st;
AVPacket pkt2 = {0};
AVFormatContext *fmt = NULL;
 
av_assert0(!img->split_planes);
 
ret = avformat_alloc_output_context2(&fmt, NULL, img->muxer, s->filename);
if (ret < 0)
return ret;
st = avformat_new_stream(fmt, NULL);
if (!st) {
avformat_free_context(fmt);
return AVERROR(ENOMEM);
}
st->id = pkt->stream_index;
 
fmt->pb = pb[0];
if ((ret = av_copy_packet(&pkt2, pkt)) < 0 ||
(ret = av_dup_packet(&pkt2)) < 0 ||
(ret = avcodec_copy_context(st->codec, s->streams[0]->codec)) < 0 ||
(ret = avformat_write_header(fmt, NULL)) < 0 ||
(ret = av_interleaved_write_frame(fmt, &pkt2)) < 0 ||
(ret = av_write_trailer(fmt)) < 0) {
av_free_packet(&pkt2);
avformat_free_context(fmt);
return ret;
}
av_free_packet(&pkt2);
avformat_free_context(fmt);
} else {
avio_write(pb[0], pkt->data, pkt->size);
}
avio_flush(pb[0]);
if (!img->is_pipe) {
avio_close(pb[0]);
}
 
img->img_number++;
return 0;
}
 
#define OFFSET(x) offsetof(VideoMuxData, x)
#define ENC AV_OPT_FLAG_ENCODING_PARAM
static const AVOption muxoptions[] = {
{ "updatefirst", "continuously overwrite one file", OFFSET(update), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, ENC },
{ "update", "continuously overwrite one file", OFFSET(update), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, ENC },
{ "start_number", "set first number in the sequence", OFFSET(img_number), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, INT_MAX, ENC },
{ "strftime", "use strftime for filename", OFFSET(use_strftime), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, ENC },
{ NULL },
};
 
#if CONFIG_IMAGE2_MUXER
static const AVClass img2mux_class = {
.class_name = "image2 muxer",
.item_name = av_default_item_name,
.option = muxoptions,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVOutputFormat ff_image2_muxer = {
.name = "image2",
.long_name = NULL_IF_CONFIG_SMALL("image2 sequence"),
.extensions = "bmp,dpx,jls,jpeg,jpg,ljpg,pam,pbm,pcx,pgm,pgmyuv,png,"
"ppm,sgi,tga,tif,tiff,jp2,j2c,j2k,xwd,sun,ras,rs,im1,im8,im24,"
"sunras,xbm,xface",
.priv_data_size = sizeof(VideoMuxData),
.video_codec = AV_CODEC_ID_MJPEG,
.write_header = write_header,
.write_packet = write_packet,
.flags = AVFMT_NOTIMESTAMPS | AVFMT_NODIMENSIONS | AVFMT_NOFILE,
.priv_class = &img2mux_class,
};
#endif
#if CONFIG_IMAGE2PIPE_MUXER
AVOutputFormat ff_image2pipe_muxer = {
.name = "image2pipe",
.long_name = NULL_IF_CONFIG_SMALL("piped image2 sequence"),
.priv_data_size = sizeof(VideoMuxData),
.video_codec = AV_CODEC_ID_MJPEG,
.write_header = write_header,
.write_packet = write_packet,
.flags = AVFMT_NOTIMESTAMPS | AVFMT_NODIMENSIONS
};
#endif
/contrib/sdk/sources/ffmpeg/libavformat/ingenientdec.c
0,0 → 1,66
/*
* RAW Ingenient MJPEG demuxer
* Copyright (c) 2005 Alex Beregszaszi
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "rawdec.h"
 
// http://www.artificis.hu/files/texts/ingenient.txt
static int ingenient_read_packet(AVFormatContext *s, AVPacket *pkt)
{
int ret, size, w, h, unk1, unk2;
 
if (avio_rl32(s->pb) != MKTAG('M', 'J', 'P', 'G'))
return AVERROR(EIO); // FIXME
 
size = avio_rl32(s->pb);
 
w = avio_rl16(s->pb);
h = avio_rl16(s->pb);
 
avio_skip(s->pb, 8); // zero + size (padded?)
avio_skip(s->pb, 2);
unk1 = avio_rl16(s->pb);
unk2 = avio_rl16(s->pb);
avio_skip(s->pb, 22); // ASCII timestamp
 
av_log(s, AV_LOG_DEBUG, "Ingenient packet: size=%d, width=%d, height=%d, unk1=%d unk2=%d\n",
size, w, h, unk1, unk2);
 
ret = av_get_packet(s->pb, pkt, size);
if (ret < 0)
return ret;
pkt->stream_index = 0;
return ret;
}
 
FF_RAWVIDEO_DEMUXER_CLASS(ingenient)
 
AVInputFormat ff_ingenient_demuxer = {
.name = "ingenient",
.long_name = NULL_IF_CONFIG_SMALL("raw Ingenient MJPEG"),
.priv_data_size = sizeof(FFRawVideoDemuxerContext),
.read_header = ff_raw_video_read_header,
.read_packet = ingenient_read_packet,
.flags = AVFMT_GENERIC_INDEX,
.extensions = "cgi", // FIXME
.raw_codec_id = AV_CODEC_ID_MJPEG,
.priv_class = &ingenient_demuxer_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/internal.h
0,0 → 1,373
/*
* copyright (c) 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_INTERNAL_H
#define AVFORMAT_INTERNAL_H
 
#include <stdint.h>
#include "avformat.h"
 
#define MAX_URL_SIZE 4096
 
/** size of probe buffer, for guessing file type from file contents */
#define PROBE_BUF_MIN 2048
#define PROBE_BUF_MAX (1<<20)
 
#ifdef DEBUG
# define hex_dump_debug(class, buf, size) av_hex_dump_log(class, AV_LOG_DEBUG, buf, size)
#else
# define hex_dump_debug(class, buf, size)
#endif
 
typedef struct AVCodecTag {
enum AVCodecID id;
unsigned int tag;
} AVCodecTag;
 
typedef struct CodecMime{
char str[32];
enum AVCodecID id;
} CodecMime;
 
#ifdef __GNUC__
#define dynarray_add(tab, nb_ptr, elem)\
do {\
__typeof__(tab) _tab = (tab);\
__typeof__(elem) _elem = (elem);\
(void)sizeof(**_tab == _elem); /* check that types are compatible */\
av_dynarray_add(_tab, nb_ptr, _elem);\
} while(0)
#else
#define dynarray_add(tab, nb_ptr, elem)\
do {\
av_dynarray_add((tab), nb_ptr, (elem));\
} while(0)
#endif
 
struct tm *ff_brktimegm(time_t secs, struct tm *tm);
 
char *ff_data_to_hex(char *buf, const uint8_t *src, int size, int lowercase);
 
/**
* Parse a string of hexadecimal strings. Any space between the hexadecimal
* digits is ignored.
*
* @param data if non-null, the parsed data is written to this pointer
* @param p the string to parse
* @return the number of bytes written (or to be written, if data is null)
*/
int ff_hex_to_data(uint8_t *data, const char *p);
 
void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx);
 
/**
* Add packet to AVFormatContext->packet_buffer list, determining its
* interleaved position using compare() function argument.
* @return 0, or < 0 on error
*/
int ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
int (*compare)(AVFormatContext *, AVPacket *, AVPacket *));
 
void ff_read_frame_flush(AVFormatContext *s);
 
#define NTP_OFFSET 2208988800ULL
#define NTP_OFFSET_US (NTP_OFFSET * 1000000ULL)
 
/** Get the current time since NTP epoch in microseconds. */
uint64_t ff_ntp_time(void);
 
/**
* Append the media-specific SDP fragment for the media stream c
* to the buffer buff.
*
* Note, the buffer needs to be initialized, since it is appended to
* existing content.
*
* @param buff the buffer to append the SDP fragment to
* @param size the size of the buff buffer
* @param st the AVStream of the media to describe
* @param idx the global stream index
* @param dest_addr the destination address of the media stream, may be NULL
* @param dest_type the destination address type, may be NULL
* @param port the destination port of the media stream, 0 if unknown
* @param ttl the time to live of the stream, 0 if not multicast
* @param fmt the AVFormatContext, which might contain options modifying
* the generated SDP
*/
void ff_sdp_write_media(char *buff, int size, AVStream *st, int idx,
const char *dest_addr, const char *dest_type,
int port, int ttl, AVFormatContext *fmt);
 
/**
* Write a packet to another muxer than the one the user originally
* intended. Useful when chaining muxers, where one muxer internally
* writes a received packet to another muxer.
*
* @param dst the muxer to write the packet to
* @param dst_stream the stream index within dst to write the packet to
* @param pkt the packet to be written
* @param src the muxer the packet originally was intended for
* @return the value av_write_frame returned
*/
int ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt,
AVFormatContext *src);
 
/**
* Get the length in bytes which is needed to store val as v.
*/
int ff_get_v_length(uint64_t val);
 
/**
* Put val using a variable number of bytes.
*/
void ff_put_v(AVIOContext *bc, uint64_t val);
 
/**
* Read a whole line of text from AVIOContext. Stop reading after reaching
* either a \\n, a \\0 or EOF. The returned string is always \\0-terminated,
* and may be truncated if the buffer is too small.
*
* @param s the read-only AVIOContext
* @param buf buffer to store the read line
* @param maxlen size of the buffer
* @return the length of the string written in the buffer, not including the
* final \\0
*/
int ff_get_line(AVIOContext *s, char *buf, int maxlen);
 
#define SPACE_CHARS " \t\r\n"
 
/**
* Callback function type for ff_parse_key_value.
*
* @param key a pointer to the key
* @param key_len the number of bytes that belong to the key, including the '='
* char
* @param dest return the destination pointer for the value in *dest, may
* be null to ignore the value
* @param dest_len the length of the *dest buffer
*/
typedef void (*ff_parse_key_val_cb)(void *context, const char *key,
int key_len, char **dest, int *dest_len);
/**
* Parse a string with comma-separated key=value pairs. The value strings
* may be quoted and may contain escaped characters within quoted strings.
*
* @param str the string to parse
* @param callback_get_buf function that returns where to store the
* unescaped value string.
* @param context the opaque context pointer to pass to callback_get_buf
*/
void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
void *context);
 
/**
* Find stream index based on format-specific stream ID
* @return stream index, or < 0 on error
*/
int ff_find_stream_index(AVFormatContext *s, int id);
 
/**
* Internal version of av_index_search_timestamp
*/
int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
int64_t wanted_timestamp, int flags);
 
/**
* Internal version of av_add_index_entry
*/
int ff_add_index_entry(AVIndexEntry **index_entries,
int *nb_index_entries,
unsigned int *index_entries_allocated_size,
int64_t pos, int64_t timestamp, int size, int distance, int flags);
 
/**
* Add a new chapter.
*
* @param s media file handle
* @param id unique ID for this chapter
* @param start chapter start time in time_base units
* @param end chapter end time in time_base units
* @param title chapter title
*
* @return AVChapter or NULL on error
*/
AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base,
int64_t start, int64_t end, const char *title);
 
/**
* Ensure the index uses less memory than the maximum specified in
* AVFormatContext.max_index_size by discarding entries if it grows
* too large.
*/
void ff_reduce_index(AVFormatContext *s, int stream_index);
 
enum AVCodecID ff_guess_image2_codec(const char *filename);
 
/**
* Convert a date string in ISO8601 format to Unix timestamp.
*/
int64_t ff_iso8601_to_unix_time(const char *datestr);
 
/**
* Perform a binary search using av_index_search_timestamp() and
* AVInputFormat.read_timestamp().
*
* @param target_ts target timestamp in the time base of the given stream
* @param stream_index stream number
*/
int ff_seek_frame_binary(AVFormatContext *s, int stream_index,
int64_t target_ts, int flags);
 
/**
* Update cur_dts of all streams based on the given timestamp and AVStream.
*
* Stream ref_st unchanged, others set cur_dts in their native time base.
* Only needed for timestamp wrapping or if (dts not set and pts!=dts).
* @param timestamp new dts expressed in time_base of param ref_st
* @param ref_st reference stream giving time_base of param timestamp
*/
void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp);
 
int ff_find_last_ts(AVFormatContext *s, int stream_index, int64_t *ts, int64_t *pos,
int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ));
 
/**
* Perform a binary search using read_timestamp().
*
* @param target_ts target timestamp in the time base of the given stream
* @param stream_index stream number
*/
int64_t ff_gen_search(AVFormatContext *s, int stream_index,
int64_t target_ts, int64_t pos_min,
int64_t pos_max, int64_t pos_limit,
int64_t ts_min, int64_t ts_max,
int flags, int64_t *ts_ret,
int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ));
 
/**
* Set the time base and wrapping info for a given stream. This will be used
* to interpret the stream's timestamps. If the new time base is invalid
* (numerator or denominator are non-positive), it leaves the stream
* unchanged.
*
* @param s stream
* @param pts_wrap_bits number of bits effectively used by the pts
* (used for wrap control)
* @param pts_num time base numerator
* @param pts_den time base denominator
*/
void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
unsigned int pts_num, unsigned int pts_den);
 
/**
* Add side data to a packet for changing parameters to the given values.
* Parameters set to 0 aren't included in the change.
*/
int ff_add_param_change(AVPacket *pkt, int32_t channels,
uint64_t channel_layout, int32_t sample_rate,
int32_t width, int32_t height);
 
/**
* Set the timebase for each stream from the corresponding codec timebase and
* print it.
*/
int ff_framehash_write_header(AVFormatContext *s);
 
/**
* Read a transport packet from a media file.
*
* @param s media file handle
* @param pkt is filled
* @return 0 if OK, AVERROR_xxx on error
*/
int ff_read_packet(AVFormatContext *s, AVPacket *pkt);
 
/**
* Interleave a packet per dts in an output media file.
*
* Packets with pkt->destruct == av_destruct_packet will be freed inside this
* function, so they cannot be used after it. Note that calling av_free_packet()
* on them is still safe.
*
* @param s media file handle
* @param out the interleaved packet will be output here
* @param pkt the input packet
* @param flush 1 if no further packets are available as input and all
* remaining packets should be output
* @return 1 if a packet was output, 0 if no packet could be output,
* < 0 if an error occurred
*/
int ff_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
AVPacket *pkt, int flush);
 
void ff_free_stream(AVFormatContext *s, AVStream *st);
 
/**
* Return the frame duration in seconds. Return 0 if not available.
*/
void ff_compute_frame_duration(int *pnum, int *pden, AVStream *st,
AVCodecParserContext *pc, AVPacket *pkt);
 
int ff_get_audio_frame_size(AVCodecContext *enc, int size, int mux);
 
unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id);
 
enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag);
 
/**
* Select a PCM codec based on the given parameters.
*
* @param bps bits-per-sample
* @param flt floating-point
* @param be big-endian
* @param sflags signed flags. each bit corresponds to one byte of bit depth.
* e.g. the 1st bit indicates if 8-bit should be signed or
* unsigned, the 2nd bit indicates if 16-bit should be signed or
* unsigned, etc... This is useful for formats such as WAVE where
* only 8-bit is unsigned and all other bit depths are signed.
* @return a PCM codec id or AV_CODEC_ID_NONE
*/
enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags);
 
/**
* Chooses a timebase for muxing the specified stream.
*
* The choosen timebase allows sample accurate timestamps based
* on the framerate or sample rate for audio streams. It also is
* at least as precisse as 1/min_precission would be.
*/
AVRational ff_choose_timebase(AVFormatContext *s, AVStream *st, int min_precission);
 
/**
* Generate standard extradata for AVC-Intra based on width/height and field order.
*/
void ff_generate_avci_extradata(AVStream *st);
 
/**
* Allocate extradata with additional FF_INPUT_BUFFER_PADDING_SIZE at end
* which is always set to 0.
*
* @param size size of extradata
* @return 0 if OK, AVERROR_xxx on error
*/
int ff_alloc_extradata(AVCodecContext *avctx, int size);
 
#endif /* AVFORMAT_INTERNAL_H */
/contrib/sdk/sources/ffmpeg/libavformat/ipmovie.c
0,0 → 1,656
/*
* Interplay MVE File Demuxer
* Copyright (c) 2003 The ffmpeg Project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Interplay MVE file demuxer
* by Mike Melanson (melanson@pcisys.net)
* For more information regarding the Interplay MVE file format, visit:
* http://www.pcisys.net/~melanson/codecs/
* The aforementioned site also contains a command line utility for parsing
* IP MVE files so that you can get a good idea of the typical structure of
* such files. This demuxer is not the best example to use if you are trying
* to write your own as it uses a rather roundabout approach for splitting
* up and sending out the chunks.
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
 
#define CHUNK_PREAMBLE_SIZE 4
#define OPCODE_PREAMBLE_SIZE 4
 
#define CHUNK_INIT_AUDIO 0x0000
#define CHUNK_AUDIO_ONLY 0x0001
#define CHUNK_INIT_VIDEO 0x0002
#define CHUNK_VIDEO 0x0003
#define CHUNK_SHUTDOWN 0x0004
#define CHUNK_END 0x0005
/* these last types are used internally */
#define CHUNK_DONE 0xFFFC
#define CHUNK_NOMEM 0xFFFD
#define CHUNK_EOF 0xFFFE
#define CHUNK_BAD 0xFFFF
 
#define OPCODE_END_OF_STREAM 0x00
#define OPCODE_END_OF_CHUNK 0x01
#define OPCODE_CREATE_TIMER 0x02
#define OPCODE_INIT_AUDIO_BUFFERS 0x03
#define OPCODE_START_STOP_AUDIO 0x04
#define OPCODE_INIT_VIDEO_BUFFERS 0x05
#define OPCODE_UNKNOWN_06 0x06
#define OPCODE_SEND_BUFFER 0x07
#define OPCODE_AUDIO_FRAME 0x08
#define OPCODE_SILENCE_FRAME 0x09
#define OPCODE_INIT_VIDEO_MODE 0x0A
#define OPCODE_CREATE_GRADIENT 0x0B
#define OPCODE_SET_PALETTE 0x0C
#define OPCODE_SET_PALETTE_COMPRESSED 0x0D
#define OPCODE_UNKNOWN_0E 0x0E
#define OPCODE_SET_DECODING_MAP 0x0F
#define OPCODE_UNKNOWN_10 0x10
#define OPCODE_VIDEO_DATA 0x11
#define OPCODE_UNKNOWN_12 0x12
#define OPCODE_UNKNOWN_13 0x13
#define OPCODE_UNKNOWN_14 0x14
#define OPCODE_UNKNOWN_15 0x15
 
#define PALETTE_COUNT 256
 
typedef struct IPMVEContext {
 
unsigned char *buf;
int buf_size;
 
uint64_t frame_pts_inc;
 
unsigned int video_bpp;
unsigned int video_width;
unsigned int video_height;
int64_t video_pts;
uint32_t palette[256];
int has_palette;
int changed;
 
unsigned int audio_bits;
unsigned int audio_channels;
unsigned int audio_sample_rate;
enum AVCodecID audio_type;
unsigned int audio_frame_count;
 
int video_stream_index;
int audio_stream_index;
 
int64_t audio_chunk_offset;
int audio_chunk_size;
int64_t video_chunk_offset;
int video_chunk_size;
int64_t decode_map_chunk_offset;
int decode_map_chunk_size;
 
int64_t next_chunk_offset;
 
} IPMVEContext;
 
static int load_ipmovie_packet(IPMVEContext *s, AVIOContext *pb,
AVPacket *pkt) {
 
int chunk_type;
 
if (s->audio_chunk_offset && s->audio_channels && s->audio_bits) {
if (s->audio_type == AV_CODEC_ID_NONE) {
av_log(NULL, AV_LOG_ERROR, "Can not read audio packet before"
"audio codec is known\n");
return CHUNK_BAD;
}
 
/* adjust for PCM audio by skipping chunk header */
if (s->audio_type != AV_CODEC_ID_INTERPLAY_DPCM) {
s->audio_chunk_offset += 6;
s->audio_chunk_size -= 6;
}
 
avio_seek(pb, s->audio_chunk_offset, SEEK_SET);
s->audio_chunk_offset = 0;
 
if (s->audio_chunk_size != av_get_packet(pb, pkt, s->audio_chunk_size))
return CHUNK_EOF;
 
pkt->stream_index = s->audio_stream_index;
pkt->pts = s->audio_frame_count;
 
/* audio frame maintenance */
if (s->audio_type != AV_CODEC_ID_INTERPLAY_DPCM)
s->audio_frame_count +=
(s->audio_chunk_size / s->audio_channels / (s->audio_bits / 8));
else
s->audio_frame_count +=
(s->audio_chunk_size - 6 - s->audio_channels) / s->audio_channels;
 
av_dlog(NULL, "sending audio frame with pts %"PRId64" (%d audio frames)\n",
pkt->pts, s->audio_frame_count);
 
chunk_type = CHUNK_VIDEO;
 
} else if (s->decode_map_chunk_offset) {
 
/* send both the decode map and the video data together */
 
if (av_new_packet(pkt, s->decode_map_chunk_size + s->video_chunk_size))
return CHUNK_NOMEM;
 
if (s->has_palette) {
uint8_t *pal;
 
pal = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE,
AVPALETTE_SIZE);
if (pal) {
memcpy(pal, s->palette, AVPALETTE_SIZE);
s->has_palette = 0;
}
}
 
if (s->changed) {
ff_add_param_change(pkt, 0, 0, 0, s->video_width, s->video_height);
s->changed = 0;
}
pkt->pos= s->decode_map_chunk_offset;
avio_seek(pb, s->decode_map_chunk_offset, SEEK_SET);
s->decode_map_chunk_offset = 0;
 
if (avio_read(pb, pkt->data, s->decode_map_chunk_size) !=
s->decode_map_chunk_size) {
av_free_packet(pkt);
return CHUNK_EOF;
}
 
avio_seek(pb, s->video_chunk_offset, SEEK_SET);
s->video_chunk_offset = 0;
 
if (avio_read(pb, pkt->data + s->decode_map_chunk_size,
s->video_chunk_size) != s->video_chunk_size) {
av_free_packet(pkt);
return CHUNK_EOF;
}
 
pkt->stream_index = s->video_stream_index;
pkt->pts = s->video_pts;
 
av_dlog(NULL, "sending video frame with pts %"PRId64"\n", pkt->pts);
 
s->video_pts += s->frame_pts_inc;
 
chunk_type = CHUNK_VIDEO;
 
} else {
 
avio_seek(pb, s->next_chunk_offset, SEEK_SET);
chunk_type = CHUNK_DONE;
 
}
 
return chunk_type;
}
 
/* This function loads and processes a single chunk in an IP movie file.
* It returns the type of chunk that was processed. */
static int process_ipmovie_chunk(IPMVEContext *s, AVIOContext *pb,
AVPacket *pkt)
{
unsigned char chunk_preamble[CHUNK_PREAMBLE_SIZE];
int chunk_type;
int chunk_size;
unsigned char opcode_preamble[OPCODE_PREAMBLE_SIZE];
unsigned char opcode_type;
unsigned char opcode_version;
int opcode_size;
unsigned char scratch[1024];
int i, j;
int first_color, last_color;
int audio_flags;
unsigned char r, g, b;
unsigned int width, height;
 
/* see if there are any pending packets */
chunk_type = load_ipmovie_packet(s, pb, pkt);
if (chunk_type != CHUNK_DONE)
return chunk_type;
 
/* read the next chunk, wherever the file happens to be pointing */
if (url_feof(pb))
return CHUNK_EOF;
if (avio_read(pb, chunk_preamble, CHUNK_PREAMBLE_SIZE) !=
CHUNK_PREAMBLE_SIZE)
return CHUNK_BAD;
chunk_size = AV_RL16(&chunk_preamble[0]);
chunk_type = AV_RL16(&chunk_preamble[2]);
 
av_dlog(NULL, "chunk type 0x%04X, 0x%04X bytes: ", chunk_type, chunk_size);
 
switch (chunk_type) {
 
case CHUNK_INIT_AUDIO:
av_dlog(NULL, "initialize audio\n");
break;
 
case CHUNK_AUDIO_ONLY:
av_dlog(NULL, "audio only\n");
break;
 
case CHUNK_INIT_VIDEO:
av_dlog(NULL, "initialize video\n");
break;
 
case CHUNK_VIDEO:
av_dlog(NULL, "video (and audio)\n");
break;
 
case CHUNK_SHUTDOWN:
av_dlog(NULL, "shutdown\n");
break;
 
case CHUNK_END:
av_dlog(NULL, "end\n");
break;
 
default:
av_dlog(NULL, "invalid chunk\n");
chunk_type = CHUNK_BAD;
break;
 
}
 
while ((chunk_size > 0) && (chunk_type != CHUNK_BAD)) {
 
/* read the next chunk, wherever the file happens to be pointing */
if (url_feof(pb)) {
chunk_type = CHUNK_EOF;
break;
}
if (avio_read(pb, opcode_preamble, CHUNK_PREAMBLE_SIZE) !=
CHUNK_PREAMBLE_SIZE) {
chunk_type = CHUNK_BAD;
break;
}
 
opcode_size = AV_RL16(&opcode_preamble[0]);
opcode_type = opcode_preamble[2];
opcode_version = opcode_preamble[3];
 
chunk_size -= OPCODE_PREAMBLE_SIZE;
chunk_size -= opcode_size;
if (chunk_size < 0) {
av_dlog(NULL, "chunk_size countdown just went negative\n");
chunk_type = CHUNK_BAD;
break;
}
 
av_dlog(NULL, " opcode type %02X, version %d, 0x%04X bytes: ",
opcode_type, opcode_version, opcode_size);
switch (opcode_type) {
 
case OPCODE_END_OF_STREAM:
av_dlog(NULL, "end of stream\n");
avio_skip(pb, opcode_size);
break;
 
case OPCODE_END_OF_CHUNK:
av_dlog(NULL, "end of chunk\n");
avio_skip(pb, opcode_size);
break;
 
case OPCODE_CREATE_TIMER:
av_dlog(NULL, "create timer\n");
if ((opcode_version > 0) || (opcode_size > 6)) {
av_dlog(NULL, "bad create_timer opcode\n");
chunk_type = CHUNK_BAD;
break;
}
if (avio_read(pb, scratch, opcode_size) !=
opcode_size) {
chunk_type = CHUNK_BAD;
break;
}
s->frame_pts_inc = ((uint64_t)AV_RL32(&scratch[0])) * AV_RL16(&scratch[4]);
av_dlog(NULL, " %.2f frames/second (timer div = %d, subdiv = %d)\n",
1000000.0 / s->frame_pts_inc, AV_RL32(&scratch[0]),
AV_RL16(&scratch[4]));
break;
 
case OPCODE_INIT_AUDIO_BUFFERS:
av_dlog(NULL, "initialize audio buffers\n");
if ((opcode_version > 1) || (opcode_size > 10)) {
av_dlog(NULL, "bad init_audio_buffers opcode\n");
chunk_type = CHUNK_BAD;
break;
}
if (avio_read(pb, scratch, opcode_size) !=
opcode_size) {
chunk_type = CHUNK_BAD;
break;
}
s->audio_sample_rate = AV_RL16(&scratch[4]);
audio_flags = AV_RL16(&scratch[2]);
/* bit 0 of the flags: 0 = mono, 1 = stereo */
s->audio_channels = (audio_flags & 1) + 1;
/* bit 1 of the flags: 0 = 8 bit, 1 = 16 bit */
s->audio_bits = (((audio_flags >> 1) & 1) + 1) * 8;
/* bit 2 indicates compressed audio in version 1 opcode */
if ((opcode_version == 1) && (audio_flags & 0x4))
s->audio_type = AV_CODEC_ID_INTERPLAY_DPCM;
else if (s->audio_bits == 16)
s->audio_type = AV_CODEC_ID_PCM_S16LE;
else
s->audio_type = AV_CODEC_ID_PCM_U8;
av_dlog(NULL, "audio: %d bits, %d Hz, %s, %s format\n",
s->audio_bits, s->audio_sample_rate,
(s->audio_channels == 2) ? "stereo" : "mono",
(s->audio_type == AV_CODEC_ID_INTERPLAY_DPCM) ?
"Interplay audio" : "PCM");
break;
 
case OPCODE_START_STOP_AUDIO:
av_dlog(NULL, "start/stop audio\n");
avio_skip(pb, opcode_size);
break;
 
case OPCODE_INIT_VIDEO_BUFFERS:
av_dlog(NULL, "initialize video buffers\n");
if ((opcode_version > 2) || (opcode_size > 8)) {
av_dlog(NULL, "bad init_video_buffers opcode\n");
chunk_type = CHUNK_BAD;
break;
}
if (avio_read(pb, scratch, opcode_size) !=
opcode_size) {
chunk_type = CHUNK_BAD;
break;
}
width = AV_RL16(&scratch[0]) * 8;
height = AV_RL16(&scratch[2]) * 8;
if (width != s->video_width) {
s->video_width = width;
s->changed++;
}
if (height != s->video_height) {
s->video_height = height;
s->changed++;
}
if (opcode_version < 2 || !AV_RL16(&scratch[6])) {
s->video_bpp = 8;
} else {
s->video_bpp = 16;
}
av_dlog(NULL, "video resolution: %d x %d\n",
s->video_width, s->video_height);
break;
 
case OPCODE_UNKNOWN_06:
case OPCODE_UNKNOWN_0E:
case OPCODE_UNKNOWN_10:
case OPCODE_UNKNOWN_12:
case OPCODE_UNKNOWN_13:
case OPCODE_UNKNOWN_14:
case OPCODE_UNKNOWN_15:
av_dlog(NULL, "unknown (but documented) opcode %02X\n", opcode_type);
avio_skip(pb, opcode_size);
break;
 
case OPCODE_SEND_BUFFER:
av_dlog(NULL, "send buffer\n");
avio_skip(pb, opcode_size);
break;
 
case OPCODE_AUDIO_FRAME:
av_dlog(NULL, "audio frame\n");
 
/* log position and move on for now */
s->audio_chunk_offset = avio_tell(pb);
s->audio_chunk_size = opcode_size;
avio_skip(pb, opcode_size);
break;
 
case OPCODE_SILENCE_FRAME:
av_dlog(NULL, "silence frame\n");
avio_skip(pb, opcode_size);
break;
 
case OPCODE_INIT_VIDEO_MODE:
av_dlog(NULL, "initialize video mode\n");
avio_skip(pb, opcode_size);
break;
 
case OPCODE_CREATE_GRADIENT:
av_dlog(NULL, "create gradient\n");
avio_skip(pb, opcode_size);
break;
 
case OPCODE_SET_PALETTE:
av_dlog(NULL, "set palette\n");
/* check for the logical maximum palette size
* (3 * 256 + 4 bytes) */
if (opcode_size > 0x304) {
av_dlog(NULL, "demux_ipmovie: set_palette opcode too large\n");
chunk_type = CHUNK_BAD;
break;
}
if (avio_read(pb, scratch, opcode_size) != opcode_size) {
chunk_type = CHUNK_BAD;
break;
}
 
/* load the palette into internal data structure */
first_color = AV_RL16(&scratch[0]);
last_color = first_color + AV_RL16(&scratch[2]) - 1;
/* sanity check (since they are 16 bit values) */
if ((first_color > 0xFF) || (last_color > 0xFF)) {
av_dlog(NULL, "demux_ipmovie: set_palette indexes out of range (%d -> %d)\n",
first_color, last_color);
chunk_type = CHUNK_BAD;
break;
}
j = 4; /* offset of first palette data */
for (i = first_color; i <= last_color; i++) {
/* the palette is stored as a 6-bit VGA palette, thus each
* component is shifted up to a 8-bit range */
r = scratch[j++] * 4;
g = scratch[j++] * 4;
b = scratch[j++] * 4;
s->palette[i] = (0xFFU << 24) | (r << 16) | (g << 8) | (b);
s->palette[i] |= s->palette[i] >> 6 & 0x30303;
}
s->has_palette = 1;
break;
 
case OPCODE_SET_PALETTE_COMPRESSED:
av_dlog(NULL, "set palette compressed\n");
avio_skip(pb, opcode_size);
break;
 
case OPCODE_SET_DECODING_MAP:
av_dlog(NULL, "set decoding map\n");
 
/* log position and move on for now */
s->decode_map_chunk_offset = avio_tell(pb);
s->decode_map_chunk_size = opcode_size;
avio_skip(pb, opcode_size);
break;
 
case OPCODE_VIDEO_DATA:
av_dlog(NULL, "set video data\n");
 
/* log position and move on for now */
s->video_chunk_offset = avio_tell(pb);
s->video_chunk_size = opcode_size;
avio_skip(pb, opcode_size);
break;
 
default:
av_dlog(NULL, "*** unknown opcode type\n");
chunk_type = CHUNK_BAD;
break;
 
}
}
 
/* make a note of where the stream is sitting */
s->next_chunk_offset = avio_tell(pb);
 
/* dispatch the first of any pending packets */
if ((chunk_type == CHUNK_VIDEO) || (chunk_type == CHUNK_AUDIO_ONLY))
chunk_type = load_ipmovie_packet(s, pb, pkt);
 
return chunk_type;
}
 
static const char signature[] = "Interplay MVE File\x1A\0\x1A";
 
static int ipmovie_probe(AVProbeData *p)
{
const uint8_t *b = p->buf;
const uint8_t *b_end = p->buf + p->buf_size - sizeof(signature);
do {
if (b[0] == signature[0] && memcmp(b, signature, sizeof(signature)) == 0)
return AVPROBE_SCORE_MAX;
b++;
} while (b < b_end);
 
return 0;
}
 
static int ipmovie_read_header(AVFormatContext *s)
{
IPMVEContext *ipmovie = s->priv_data;
AVIOContext *pb = s->pb;
AVPacket pkt;
AVStream *st;
unsigned char chunk_preamble[CHUNK_PREAMBLE_SIZE];
int chunk_type, i;
uint8_t signature_buffer[sizeof(signature)];
 
avio_read(pb, signature_buffer, sizeof(signature_buffer));
while (memcmp(signature_buffer, signature, sizeof(signature))) {
memmove(signature_buffer, signature_buffer + 1, sizeof(signature_buffer) - 1);
signature_buffer[sizeof(signature_buffer) - 1] = avio_r8(pb);
if (url_feof(pb))
return AVERROR_EOF;
}
/* initialize private context members */
ipmovie->video_pts = ipmovie->audio_frame_count = 0;
ipmovie->audio_chunk_offset = ipmovie->video_chunk_offset =
ipmovie->decode_map_chunk_offset = 0;
 
/* on the first read, this will position the stream at the first chunk */
ipmovie->next_chunk_offset = avio_tell(pb) + 4;
 
for (i = 0; i < 256; i++)
ipmovie->palette[i] = 0xFFU << 24;
 
/* process the first chunk which should be CHUNK_INIT_VIDEO */
if (process_ipmovie_chunk(ipmovie, pb, &pkt) != CHUNK_INIT_VIDEO)
return AVERROR_INVALIDDATA;
 
/* peek ahead to the next chunk-- if it is an init audio chunk, process
* it; if it is the first video chunk, this is a silent file */
if (avio_read(pb, chunk_preamble, CHUNK_PREAMBLE_SIZE) !=
CHUNK_PREAMBLE_SIZE)
return AVERROR(EIO);
chunk_type = AV_RL16(&chunk_preamble[2]);
avio_seek(pb, -CHUNK_PREAMBLE_SIZE, SEEK_CUR);
 
if (chunk_type == CHUNK_VIDEO)
ipmovie->audio_type = AV_CODEC_ID_NONE; /* no audio */
else if (process_ipmovie_chunk(ipmovie, pb, &pkt) != CHUNK_INIT_AUDIO)
return AVERROR_INVALIDDATA;
 
/* initialize the stream decoders */
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 63, 1, 1000000);
ipmovie->video_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_INTERPLAY_VIDEO;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->width = ipmovie->video_width;
st->codec->height = ipmovie->video_height;
st->codec->bits_per_coded_sample = ipmovie->video_bpp;
 
if (ipmovie->audio_type) {
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 32, 1, ipmovie->audio_sample_rate);
ipmovie->audio_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = ipmovie->audio_type;
st->codec->codec_tag = 0; /* no tag */
st->codec->channels = ipmovie->audio_channels;
st->codec->channel_layout = st->codec->channels == 1 ? AV_CH_LAYOUT_MONO :
AV_CH_LAYOUT_STEREO;
st->codec->sample_rate = ipmovie->audio_sample_rate;
st->codec->bits_per_coded_sample = ipmovie->audio_bits;
st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
st->codec->bits_per_coded_sample;
if (st->codec->codec_id == AV_CODEC_ID_INTERPLAY_DPCM)
st->codec->bit_rate /= 2;
st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample;
}
 
return 0;
}
 
static int ipmovie_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
IPMVEContext *ipmovie = s->priv_data;
AVIOContext *pb = s->pb;
int ret;
 
for (;;) {
ret = process_ipmovie_chunk(ipmovie, pb, pkt);
if (ret == CHUNK_BAD)
ret = AVERROR_INVALIDDATA;
else if (ret == CHUNK_EOF)
ret = AVERROR(EIO);
else if (ret == CHUNK_NOMEM)
ret = AVERROR(ENOMEM);
else if (ret == CHUNK_VIDEO)
ret = 0;
else if (ret == CHUNK_INIT_VIDEO || ret == CHUNK_INIT_AUDIO)
continue;
else
ret = -1;
 
return ret;
}
}
 
AVInputFormat ff_ipmovie_demuxer = {
.name = "ipmovie",
.long_name = NULL_IF_CONFIG_SMALL("Interplay MVE"),
.priv_data_size = sizeof(IPMVEContext),
.read_probe = ipmovie_probe,
.read_header = ipmovie_read_header,
.read_packet = ipmovie_read_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/ircam.c
0,0 → 1,47
/*
* IRCAM common code
* Copyright (c) 2012 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "internal.h"
 
const AVCodecTag ff_codec_ircam_le_tags[] = {
{ AV_CODEC_ID_PCM_ALAW, 0x10001 },
{ AV_CODEC_ID_PCM_F32LE, 0x00004 },
{ AV_CODEC_ID_PCM_F64LE, 0x00008 },
{ AV_CODEC_ID_PCM_MULAW, 0x20001 },
{ AV_CODEC_ID_PCM_S16LE, 0x00002 },
{ AV_CODEC_ID_PCM_S24LE, 0x00003 },
{ AV_CODEC_ID_PCM_S32LE, 0x40004 },
{ AV_CODEC_ID_PCM_S8, 0x00001 },
{ AV_CODEC_ID_NONE, 0 },
};
 
const AVCodecTag ff_codec_ircam_be_tags[] = {
{ AV_CODEC_ID_PCM_ALAW, 0x10001 },
{ AV_CODEC_ID_PCM_F32BE, 0x00004 },
{ AV_CODEC_ID_PCM_F64BE, 0x00008 },
{ AV_CODEC_ID_PCM_MULAW, 0x20001 },
{ AV_CODEC_ID_PCM_S16BE, 0x00002 },
{ AV_CODEC_ID_PCM_S24BE, 0x00003 },
{ AV_CODEC_ID_PCM_S32BE, 0x40004 },
{ AV_CODEC_ID_PCM_S8, 0x00001 },
{ AV_CODEC_ID_NONE, 0 },
};
/contrib/sdk/sources/ffmpeg/libavformat/ircam.h
0,0 → 1,30
/*
* IRCAM common code
* Copyright (c) 2012 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_IRCAM_H
#define AVFORMAT_IRCAM_H
 
#include "internal.h"
 
extern const AVCodecTag ff_codec_ircam_be_tags[];
extern const AVCodecTag ff_codec_ircam_le_tags[];
 
#endif /* AVFORMAT_IRCAM_H */
/contrib/sdk/sources/ffmpeg/libavformat/ircamdec.c
0,0 → 1,115
/*
* IRCAM demuxer
* Copyright (c) 2012 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
#include "pcm.h"
#include "ircam.h"
 
static int ircam_probe(AVProbeData *p)
{
if ((p->buf[0] == 0x64 && p->buf[1] == 0xA3 && p->buf[3] == 0x00 &&
p->buf[2] >= 1 && p->buf[2] <= 4) ||
(p->buf[3] == 0x64 && p->buf[2] == 0xA3 && p->buf[0] == 0x00 &&
p->buf[1] >= 1 && p->buf[1] <= 3) &&
AV_RN32(p->buf + 4) && AV_RN32(p->buf + 8))
return AVPROBE_SCORE_MAX / 4 * 3;
return 0;
}
 
static const struct endianess {
uint32_t magic;
int is_le;
} table[] = {
{ 0x64A30100, 0 },
{ 0x64A30200, 1 },
{ 0x64A30300, 0 },
{ 0x64A30400, 1 },
{ 0x0001A364, 1 },
{ 0x0002A364, 0 },
{ 0x0003A364, 1 },
};
 
static int ircam_read_header(AVFormatContext *s)
{
uint32_t magic, sample_rate, channels, tag;
const AVCodecTag *tags;
int le = -1, i;
AVStream *st;
 
magic = avio_rl32(s->pb);
for (i = 0; i < 7; i++) {
if (magic == table[i].magic) {
le = table[i].is_le;
break;
}
}
 
if (le == 1) {
sample_rate = av_int2float(avio_rl32(s->pb));
channels = avio_rl32(s->pb);
tag = avio_rl32(s->pb);
tags = ff_codec_ircam_le_tags;
} else if (le == 0) {
sample_rate = av_int2float(avio_rb32(s->pb));
channels = avio_rb32(s->pb);
tag = avio_rb32(s->pb);
tags = ff_codec_ircam_be_tags;
} else {
return AVERROR_INVALIDDATA;
}
 
if (!channels || !sample_rate)
return AVERROR_INVALIDDATA;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->channels = channels;
st->codec->sample_rate = sample_rate;
 
st->codec->codec_id = ff_codec_get_id(tags, tag);
if (st->codec->codec_id == AV_CODEC_ID_NONE) {
av_log(s, AV_LOG_ERROR, "unknown tag %X\n", tag);
return AVERROR_INVALIDDATA;
}
 
st->codec->bits_per_coded_sample = av_get_bits_per_sample(st->codec->codec_id);
st->codec->block_align = st->codec->bits_per_coded_sample * st->codec->channels / 8;
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
avio_skip(s->pb, 1008);
 
return 0;
}
 
AVInputFormat ff_ircam_demuxer = {
.name = "ircam",
.long_name = NULL_IF_CONFIG_SMALL("Berkeley/IRCAM/CARL Sound Format"),
.read_probe = ircam_probe,
.read_header = ircam_read_header,
.read_packet = ff_pcm_read_packet,
.read_seek = ff_pcm_read_seek,
.extensions = "sf,ircam",
.flags = AVFMT_GENERIC_INDEX,
};
/contrib/sdk/sources/ffmpeg/libavformat/ircamenc.c
0,0 → 1,62
/*
* IRCAM muxer
* Copyright (c) 2012 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "avio_internal.h"
#include "internal.h"
#include "rawenc.h"
#include "ircam.h"
 
static int ircam_write_header(AVFormatContext *s)
{
AVCodecContext *codec = s->streams[0]->codec;
uint32_t tag;
 
if (s->nb_streams != 1) {
av_log(s, AV_LOG_ERROR, "only one stream is supported\n");
return AVERROR(EINVAL);
}
 
tag = ff_codec_get_tag(ff_codec_ircam_le_tags, codec->codec_id);
if (!tag) {
av_log(s, AV_LOG_ERROR, "unsupported codec\n");
return AVERROR(EINVAL);
}
 
avio_wl32(s->pb, 0x0001A364);
avio_wl32(s->pb, av_float2int(codec->sample_rate));
avio_wl32(s->pb, codec->channels);
avio_wl32(s->pb, tag);
ffio_fill(s->pb, 0, 1008);
return 0;
}
 
AVOutputFormat ff_ircam_muxer = {
.name = "ircam",
.extensions = "sf,ircam",
.long_name = NULL_IF_CONFIG_SMALL("Berkeley/IRCAM/CARL Sound Format"),
.audio_codec = AV_CODEC_ID_PCM_S16LE,
.video_codec = AV_CODEC_ID_NONE,
.write_header = ircam_write_header,
.write_packet = ff_raw_write_packet,
.codec_tag = (const AVCodecTag *const []){ ff_codec_ircam_le_tags, 0 },
};
/contrib/sdk/sources/ffmpeg/libavformat/isom.c
0,0 → 1,575
/*
* ISO Media common code
* Copyright (c) 2001 Fabrice Bellard
* Copyright (c) 2002 Francois Revol <revol@free.fr>
* Copyright (c) 2006 Baptiste Coudurier <baptiste.coudurier@free.fr>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "internal.h"
#include "isom.h"
#include "libavcodec/mpeg4audio.h"
#include "libavcodec/mpegaudiodata.h"
 
/* http://www.mp4ra.org */
/* ordered by muxing preference */
const AVCodecTag ff_mp4_obj_type[] = {
{ AV_CODEC_ID_MOV_TEXT , 0x08 },
{ AV_CODEC_ID_MPEG4 , 0x20 },
{ AV_CODEC_ID_H264 , 0x21 },
{ AV_CODEC_ID_AAC , 0x40 },
{ AV_CODEC_ID_MP4ALS , 0x40 }, /* 14496-3 ALS */
{ AV_CODEC_ID_MPEG2VIDEO , 0x61 }, /* MPEG2 Main */
{ AV_CODEC_ID_MPEG2VIDEO , 0x60 }, /* MPEG2 Simple */
{ AV_CODEC_ID_MPEG2VIDEO , 0x62 }, /* MPEG2 SNR */
{ AV_CODEC_ID_MPEG2VIDEO , 0x63 }, /* MPEG2 Spatial */
{ AV_CODEC_ID_MPEG2VIDEO , 0x64 }, /* MPEG2 High */
{ AV_CODEC_ID_MPEG2VIDEO , 0x65 }, /* MPEG2 422 */
{ AV_CODEC_ID_AAC , 0x66 }, /* MPEG2 AAC Main */
{ AV_CODEC_ID_AAC , 0x67 }, /* MPEG2 AAC Low */
{ AV_CODEC_ID_AAC , 0x68 }, /* MPEG2 AAC SSR */
{ AV_CODEC_ID_MP3 , 0x69 }, /* 13818-3 */
{ AV_CODEC_ID_MP2 , 0x69 }, /* 11172-3 */
{ AV_CODEC_ID_MPEG1VIDEO , 0x6A }, /* 11172-2 */
{ AV_CODEC_ID_MP3 , 0x6B }, /* 11172-3 */
{ AV_CODEC_ID_MJPEG , 0x6C }, /* 10918-1 */
{ AV_CODEC_ID_PNG , 0x6D },
{ AV_CODEC_ID_JPEG2000 , 0x6E }, /* 15444-1 */
{ AV_CODEC_ID_VC1 , 0xA3 },
{ AV_CODEC_ID_DIRAC , 0xA4 },
{ AV_CODEC_ID_AC3 , 0xA5 },
{ AV_CODEC_ID_DTS , 0xA9 }, /* mp4ra.org */
{ AV_CODEC_ID_VORBIS , 0xDD }, /* non standard, gpac uses it */
{ AV_CODEC_ID_DVD_SUBTITLE, 0xE0 }, /* non standard, see unsupported-embedded-subs-2.mp4 */
{ AV_CODEC_ID_QCELP , 0xE1 },
{ AV_CODEC_ID_MPEG4SYSTEMS, 0x01 },
{ AV_CODEC_ID_MPEG4SYSTEMS, 0x02 },
{ AV_CODEC_ID_NONE , 0 },
};
 
const AVCodecTag ff_codec_movvideo_tags[] = {
/* { AV_CODEC_ID_, MKTAG('I', 'V', '5', '0') }, *//* Indeo 5.0 */
 
{ AV_CODEC_ID_RAWVIDEO, MKTAG('r', 'a', 'w', ' ') }, /* Uncompressed RGB */
{ AV_CODEC_ID_RAWVIDEO, MKTAG('y', 'u', 'v', '2') }, /* Uncompressed YUV422 */
{ AV_CODEC_ID_RAWVIDEO, MKTAG('2', 'v', 'u', 'y') }, /* UNCOMPRESSED 8BIT 4:2:2 */
{ AV_CODEC_ID_RAWVIDEO, MKTAG('y', 'u', 'v', 's') }, /* same as 2vuy but byte swapped */
 
{ AV_CODEC_ID_RAWVIDEO, MKTAG('L', '5', '5', '5') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('L', '5', '6', '5') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('B', '5', '6', '5') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('2', '4', 'B', 'G') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('B', 'G', 'R', 'A') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('R', 'G', 'B', 'A') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('A', 'B', 'G', 'R') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('b', '1', '6', 'g') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('b', '4', '8', 'r') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('b', 'x', 'b', 'g') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('b', 'x', 'r', 'g') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('b', 'x', 'y', 'v') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('N', 'O', '1', '6') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('D', 'V', 'O', 'O') }, /* Digital Voodoo SD 8 Bit */
{ AV_CODEC_ID_RAWVIDEO, MKTAG('R', '4', '2', '0') }, /* Radius DV YUV PAL */
{ AV_CODEC_ID_RAWVIDEO, MKTAG('R', '4', '1', '1') }, /* Radius DV YUV NTSC */
 
{ AV_CODEC_ID_R10K, MKTAG('R', '1', '0', 'k') }, /* UNCOMPRESSED 10BIT RGB */
{ AV_CODEC_ID_R10K, MKTAG('R', '1', '0', 'g') }, /* UNCOMPRESSED 10BIT RGB */
{ AV_CODEC_ID_R210, MKTAG('r', '2', '1', '0') }, /* UNCOMPRESSED 10BIT RGB */
{ AV_CODEC_ID_AVUI, MKTAG('A', 'V', 'U', 'I') }, /* AVID Uncompressed deinterleaved UYVY422 */
{ AV_CODEC_ID_AVRP, MKTAG('A', 'V', 'r', 'p') }, /* Avid 1:1 10-bit RGB Packer */
{ AV_CODEC_ID_AVRP, MKTAG('S', 'U', 'D', 'S') }, /* Avid DS Uncompressed */
{ AV_CODEC_ID_V210, MKTAG('v', '2', '1', '0') }, /* UNCOMPRESSED 10BIT 4:2:2 */
{ AV_CODEC_ID_V210, MKTAG('b', 'x', 'y', '2') }, /* BOXX 10BIT 4:2:2 */
{ AV_CODEC_ID_V308, MKTAG('v', '3', '0', '8') }, /* UNCOMPRESSED 8BIT 4:4:4 */
{ AV_CODEC_ID_V408, MKTAG('v', '4', '0', '8') }, /* UNCOMPRESSED 8BIT 4:4:4:4 */
{ AV_CODEC_ID_V410, MKTAG('v', '4', '1', '0') }, /* UNCOMPRESSED 10BIT 4:4:4 */
{ AV_CODEC_ID_Y41P, MKTAG('Y', '4', '1', 'P') }, /* UNCOMPRESSED 12BIT 4:1:1 */
{ AV_CODEC_ID_YUV4, MKTAG('y', 'u', 'v', '4') }, /* libquicktime packed yuv420p */
{ AV_CODEC_ID_TARGA_Y216, MKTAG('Y', '2', '1', '6') },
 
{ AV_CODEC_ID_MJPEG, MKTAG('j', 'p', 'e', 'g') }, /* PhotoJPEG */
{ AV_CODEC_ID_MJPEG, MKTAG('m', 'j', 'p', 'a') }, /* Motion-JPEG (format A) */
{ AV_CODEC_ID_MJPEG, MKTAG('A', 'V', 'D', 'J') }, /* MJPEG with alpha-channel (AVID JFIF meridien compressed) */
/* { AV_CODEC_ID_MJPEG, MKTAG('A', 'V', 'R', 'n') }, *//* MJPEG with alpha-channel (AVID ABVB/Truevision NuVista) */
{ AV_CODEC_ID_MJPEG, MKTAG('d', 'm', 'b', '1') }, /* Motion JPEG OpenDML */
{ AV_CODEC_ID_MJPEGB, MKTAG('m', 'j', 'p', 'b') }, /* Motion-JPEG (format B) */
 
{ AV_CODEC_ID_SVQ1, MKTAG('S', 'V', 'Q', '1') }, /* Sorenson Video v1 */
{ AV_CODEC_ID_SVQ1, MKTAG('s', 'v', 'q', '1') }, /* Sorenson Video v1 */
{ AV_CODEC_ID_SVQ1, MKTAG('s', 'v', 'q', 'i') }, /* Sorenson Video v1 (from QT specs)*/
{ AV_CODEC_ID_SVQ3, MKTAG('S', 'V', 'Q', '3') }, /* Sorenson Video v3 */
 
{ AV_CODEC_ID_MPEG4, MKTAG('m', 'p', '4', 'v') },
{ AV_CODEC_ID_MPEG4, MKTAG('D', 'I', 'V', 'X') }, /* OpenDiVX *//* sample files at http://heroinewarrior.com/xmovie.php3 use this tag */
{ AV_CODEC_ID_MPEG4, MKTAG('X', 'V', 'I', 'D') },
{ AV_CODEC_ID_MPEG4, MKTAG('3', 'I', 'V', '2') }, /* experimental: 3IVX files before ivx D4 4.5.1 */
 
{ AV_CODEC_ID_H263, MKTAG('h', '2', '6', '3') }, /* H263 */
{ AV_CODEC_ID_H263, MKTAG('s', '2', '6', '3') }, /* H263 ?? works */
 
{ AV_CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'c', 'p') }, /* DV PAL */
{ AV_CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'c', ' ') }, /* DV NTSC */
{ AV_CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'p', 'p') }, /* DVCPRO PAL produced by FCP */
{ AV_CODEC_ID_DVVIDEO, MKTAG('d', 'v', '5', 'p') }, /* DVCPRO50 PAL produced by FCP */
{ AV_CODEC_ID_DVVIDEO, MKTAG('d', 'v', '5', 'n') }, /* DVCPRO50 NTSC produced by FCP */
{ AV_CODEC_ID_DVVIDEO, MKTAG('A', 'V', 'd', 'v') }, /* AVID DV */
{ AV_CODEC_ID_DVVIDEO, MKTAG('A', 'V', 'd', '1') }, /* AVID DV100 */
{ AV_CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', 'q') }, /* DVCPRO HD 720p50 */
{ AV_CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', 'p') }, /* DVCPRO HD 720p60 */
{ AV_CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', '1') },
{ AV_CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', '2') },
{ AV_CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', '4') },
{ AV_CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', '5') }, /* DVCPRO HD 50i produced by FCP */
{ AV_CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', '6') }, /* DVCPRO HD 60i produced by FCP */
{ AV_CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', '3') }, /* DVCPRO HD 30p produced by FCP */
 
{ AV_CODEC_ID_VP3, MKTAG('V', 'P', '3', '1') }, /* On2 VP3 */
{ AV_CODEC_ID_RPZA, MKTAG('r', 'p', 'z', 'a') }, /* Apple Video (RPZA) */
{ AV_CODEC_ID_CINEPAK, MKTAG('c', 'v', 'i', 'd') }, /* Cinepak */
{ AV_CODEC_ID_8BPS, MKTAG('8', 'B', 'P', 'S') }, /* Planar RGB (8BPS) */
{ AV_CODEC_ID_SMC, MKTAG('s', 'm', 'c', ' ') }, /* Apple Graphics (SMC) */
{ AV_CODEC_ID_QTRLE, MKTAG('r', 'l', 'e', ' ') }, /* Apple Animation (RLE) */
{ AV_CODEC_ID_SGIRLE, MKTAG('r', 'l', 'e', '1') }, /* SGI RLE 8-bit */
{ AV_CODEC_ID_MSRLE, MKTAG('W', 'R', 'L', 'E') },
{ AV_CODEC_ID_QDRAW, MKTAG('q', 'd', 'r', 'w') }, /* QuickDraw */
 
{ AV_CODEC_ID_RAWVIDEO, MKTAG('W', 'R', 'A', 'W') },
 
{ AV_CODEC_ID_HEVC, MKTAG('h', 'v', 'c', '1') }, /* HEVC/H.265 which indicates parameter sets shall not be in ES */
{ AV_CODEC_ID_HEVC, MKTAG('h', 'e', 'v', '1') }, /* HEVC/H.265 which indicates parameter sets may be in ES */
 
{ AV_CODEC_ID_H264, MKTAG('a', 'v', 'c', '1') }, /* AVC-1/H.264 */
{ AV_CODEC_ID_H264, MKTAG('a', 'i', '5', 'p') }, /* AVC-Intra 50M 720p24/30/60 */
{ AV_CODEC_ID_H264, MKTAG('a', 'i', '5', 'q') }, /* AVC-Intra 50M 720p25/50 */
{ AV_CODEC_ID_H264, MKTAG('a', 'i', '5', '2') }, /* AVC-Intra 50M 1080p25/50 */
{ AV_CODEC_ID_H264, MKTAG('a', 'i', '5', '3') }, /* AVC-Intra 50M 1080p24/30/60 */
{ AV_CODEC_ID_H264, MKTAG('a', 'i', '5', '5') }, /* AVC-Intra 50M 1080i50 */
{ AV_CODEC_ID_H264, MKTAG('a', 'i', '5', '6') }, /* AVC-Intra 50M 1080i60 */
{ AV_CODEC_ID_H264, MKTAG('a', 'i', '1', 'p') }, /* AVC-Intra 100M 720p24/30/60 */
{ AV_CODEC_ID_H264, MKTAG('a', 'i', '1', 'q') }, /* AVC-Intra 100M 720p25/50 */
{ AV_CODEC_ID_H264, MKTAG('a', 'i', '1', '2') }, /* AVC-Intra 100M 1080p25/50 */
{ AV_CODEC_ID_H264, MKTAG('a', 'i', '1', '3') }, /* AVC-Intra 100M 1080p24/30/60 */
{ AV_CODEC_ID_H264, MKTAG('a', 'i', '1', '5') }, /* AVC-Intra 100M 1080i50 */
{ AV_CODEC_ID_H264, MKTAG('a', 'i', '1', '6') }, /* AVC-Intra 100M 1080i60 */
{ AV_CODEC_ID_H264, MKTAG('a', 'i', 'v', 'x') }, /* XAVC 4:2:2 10bit */
{ AV_CODEC_ID_H264, MKTAG('A', 'V', 'i', 'n') }, /* AVC-Intra with implicit SPS/PPS */
 
{ AV_CODEC_ID_MPEG1VIDEO, MKTAG('m', '1', 'v', '1') }, /* Apple MPEG-1 Camcorder */
{ AV_CODEC_ID_MPEG1VIDEO, MKTAG('m', 'p', 'e', 'g') }, /* MPEG */
{ AV_CODEC_ID_MPEG1VIDEO, MKTAG('m', '1', 'v', ' ') },
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('m', '2', 'v', '1') }, /* Apple MPEG-2 Camcorder */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('h', 'd', 'v', '1') }, /* MPEG2 HDV 720p30 */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('h', 'd', 'v', '2') }, /* MPEG2 HDV 1080i60 */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('h', 'd', 'v', '3') }, /* MPEG2 HDV 1080i50 */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('h', 'd', 'v', '4') }, /* MPEG2 HDV 720p24 */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('h', 'd', 'v', '5') }, /* MPEG2 HDV 720p25 */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('h', 'd', 'v', '6') }, /* MPEG2 HDV 1080p24 */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('h', 'd', 'v', '7') }, /* MPEG2 HDV 1080p25 */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('h', 'd', 'v', '8') }, /* MPEG2 HDV 1080p30 */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('h', 'd', 'v', '9') }, /* MPEG2 HDV 720p60 JVC */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('h', 'd', 'v', 'a') }, /* MPEG2 HDV 720p50 */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('m', 'x', '5', 'n') }, /* MPEG2 IMX NTSC 525/60 50mb/s produced by FCP */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('m', 'x', '5', 'p') }, /* MPEG2 IMX PAL 625/50 50mb/s produced by FCP */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('m', 'x', '4', 'n') }, /* MPEG2 IMX NTSC 525/60 40mb/s produced by FCP */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('m', 'x', '4', 'p') }, /* MPEG2 IMX PAL 625/50 40mb/s produced by FCP */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('m', 'x', '3', 'n') }, /* MPEG2 IMX NTSC 525/60 30mb/s produced by FCP */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('m', 'x', '3', 'p') }, /* MPEG2 IMX PAL 625/50 30mb/s produced by FCP */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', '5', '1') }, /* XDCAM HD422 720p30 CBR */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', '5', '4') }, /* XDCAM HD422 720p24 CBR */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', '5', '5') }, /* XDCAM HD422 720p25 CBR */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', '5', '9') }, /* XDCAM HD422 720p60 CBR */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', '5', 'a') }, /* XDCAM HD422 720p50 CBR */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', '5', 'b') }, /* XDCAM HD422 1080i60 CBR */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', '5', 'c') }, /* XDCAM HD422 1080i50 CBR */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', '5', 'd') }, /* XDCAM HD422 1080p24 CBR */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', '5', 'e') }, /* XDCAM HD422 1080p25 CBR */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', '5', 'f') }, /* XDCAM HD422 1080p30 CBR */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', 'v', '1') }, /* XDCAM EX 720p30 VBR */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', 'v', '2') }, /* XDCAM HD 1080i60 */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', 'v', '3') }, /* XDCAM HD 1080i50 VBR */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', 'v', '4') }, /* XDCAM EX 720p24 VBR */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', 'v', '5') }, /* XDCAM EX 720p25 VBR */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', 'v', '6') }, /* XDCAM HD 1080p24 VBR */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', 'v', '7') }, /* XDCAM HD 1080p25 VBR */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', 'v', '8') }, /* XDCAM HD 1080p30 VBR */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', 'v', '9') }, /* XDCAM EX 720p60 VBR */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', 'v', 'a') }, /* XDCAM EX 720p50 VBR */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', 'v', 'b') }, /* XDCAM EX 1080i60 VBR */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', 'v', 'c') }, /* XDCAM EX 1080i50 VBR */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', 'v', 'd') }, /* XDCAM EX 1080p24 VBR */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', 'v', 'e') }, /* XDCAM EX 1080p25 VBR */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', 'v', 'f') }, /* XDCAM EX 1080p30 VBR */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', 'h', 'd') }, /* XDCAM HD 540p */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', 'h', '2') }, /* XDCAM HD422 540p */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('A', 'V', 'm', 'p') }, /* AVID IMX PAL */
 
{ AV_CODEC_ID_JPEG2000, MKTAG('m', 'j', 'p', '2') }, /* JPEG 2000 produced by FCP */
 
{ AV_CODEC_ID_TARGA, MKTAG('t', 'g', 'a', ' ') }, /* Truevision Targa */
{ AV_CODEC_ID_TIFF, MKTAG('t', 'i', 'f', 'f') }, /* TIFF embedded in MOV */
{ AV_CODEC_ID_GIF, MKTAG('g', 'i', 'f', ' ') }, /* embedded gif files as frames (usually one "click to play movie" frame) */
{ AV_CODEC_ID_PNG, MKTAG('p', 'n', 'g', ' ') },
{ AV_CODEC_ID_PNG, MKTAG('M', 'N', 'G', ' ') },
 
{ AV_CODEC_ID_VC1, MKTAG('v', 'c', '-', '1') }, /* SMPTE RP 2025 */
{ AV_CODEC_ID_CAVS, MKTAG('a', 'v', 's', '2') },
 
{ AV_CODEC_ID_DIRAC, MKTAG('d', 'r', 'a', 'c') },
{ AV_CODEC_ID_DNXHD, MKTAG('A', 'V', 'd', 'n') }, /* AVID DNxHD */
{ AV_CODEC_ID_H263, MKTAG('H', '2', '6', '3') },
{ AV_CODEC_ID_MSMPEG4V3, MKTAG('3', 'I', 'V', 'D') }, /* 3ivx DivX Doctor */
{ AV_CODEC_ID_RAWVIDEO, MKTAG('A', 'V', '1', 'x') }, /* AVID 1:1x */
{ AV_CODEC_ID_RAWVIDEO, MKTAG('A', 'V', 'u', 'p') },
{ AV_CODEC_ID_SGI, MKTAG('s', 'g', 'i', ' ') }, /* SGI */
{ AV_CODEC_ID_DPX, MKTAG('d', 'p', 'x', ' ') }, /* DPX */
{ AV_CODEC_ID_EXR, MKTAG('e', 'x', 'r', ' ') }, /* OpenEXR */
 
{ AV_CODEC_ID_PRORES, MKTAG('a', 'p', 'c', 'h') }, /* Apple ProRes 422 High Quality */
{ AV_CODEC_ID_PRORES, MKTAG('a', 'p', 'c', 'n') }, /* Apple ProRes 422 Standard Definition */
{ AV_CODEC_ID_PRORES, MKTAG('a', 'p', 'c', 's') }, /* Apple ProRes 422 LT */
{ AV_CODEC_ID_PRORES, MKTAG('a', 'p', 'c', 'o') }, /* Apple ProRes 422 Proxy */
{ AV_CODEC_ID_PRORES, MKTAG('a', 'p', '4', 'h') }, /* Apple ProRes 4444 */
{ AV_CODEC_ID_FLIC, MKTAG('f', 'l', 'i', 'c') },
 
{ AV_CODEC_ID_AIC, MKTAG('i', 'c', 'o', 'd') },
 
{ AV_CODEC_ID_NONE, 0 },
};
 
const AVCodecTag ff_codec_movaudio_tags[] = {
{ AV_CODEC_ID_AAC, MKTAG('m', 'p', '4', 'a') },
{ AV_CODEC_ID_AC3, MKTAG('a', 'c', '-', '3') }, /* ETSI TS 102 366 Annex F */
{ AV_CODEC_ID_AC3, MKTAG('s', 'a', 'c', '3') }, /* Nero Recode */
{ AV_CODEC_ID_ADPCM_IMA_QT, MKTAG('i', 'm', 'a', '4') },
{ AV_CODEC_ID_ALAC, MKTAG('a', 'l', 'a', 'c') },
{ AV_CODEC_ID_AMR_NB, MKTAG('s', 'a', 'm', 'r') }, /* AMR-NB 3gp */
{ AV_CODEC_ID_AMR_WB, MKTAG('s', 'a', 'w', 'b') }, /* AMR-WB 3gp */
{ AV_CODEC_ID_DTS, MKTAG('d', 't', 's', 'c') }, /* DTS formats prior to DTS-HD */
{ AV_CODEC_ID_DTS, MKTAG('d', 't', 's', 'h') }, /* DTS-HD audio formats */
{ AV_CODEC_ID_DTS, MKTAG('d', 't', 's', 'l') }, /* DTS-HD Lossless formats */
{ AV_CODEC_ID_DTS, MKTAG('D', 'T', 'S', ' ') }, /* non-standard */
{ AV_CODEC_ID_EAC3, MKTAG('e', 'c', '-', '3') }, /* ETSI TS 102 366 Annex F (only valid in ISOBMFF) */
{ AV_CODEC_ID_DVAUDIO, MKTAG('v', 'd', 'v', 'a') },
{ AV_CODEC_ID_DVAUDIO, MKTAG('d', 'v', 'c', 'a') },
{ AV_CODEC_ID_GSM, MKTAG('a', 'g', 's', 'm') },
{ AV_CODEC_ID_ILBC, MKTAG('i', 'l', 'b', 'c') },
{ AV_CODEC_ID_MACE3, MKTAG('M', 'A', 'C', '3') },
{ AV_CODEC_ID_MACE6, MKTAG('M', 'A', 'C', '6') },
{ AV_CODEC_ID_MP1, MKTAG('.', 'm', 'p', '1') },
{ AV_CODEC_ID_MP2, MKTAG('.', 'm', 'p', '2') },
{ AV_CODEC_ID_MP3, MKTAG('.', 'm', 'p', '3') },
{ AV_CODEC_ID_MP3, 0x6D730055 },
{ AV_CODEC_ID_NELLYMOSER, MKTAG('n', 'm', 'o', 's') }, /* Flash Media Server */
{ AV_CODEC_ID_PCM_ALAW, MKTAG('a', 'l', 'a', 'w') },
{ AV_CODEC_ID_PCM_F32BE, MKTAG('f', 'l', '3', '2') },
{ AV_CODEC_ID_PCM_F32LE, MKTAG('f', 'l', '3', '2') },
{ AV_CODEC_ID_PCM_F64BE, MKTAG('f', 'l', '6', '4') },
{ AV_CODEC_ID_PCM_F64LE, MKTAG('f', 'l', '6', '4') },
{ AV_CODEC_ID_PCM_MULAW, MKTAG('u', 'l', 'a', 'w') },
{ AV_CODEC_ID_PCM_S16BE, MKTAG('t', 'w', 'o', 's') },
{ AV_CODEC_ID_PCM_S16LE, MKTAG('s', 'o', 'w', 't') },
{ AV_CODEC_ID_PCM_S16LE, MKTAG('l', 'p', 'c', 'm') },
{ AV_CODEC_ID_PCM_S24BE, MKTAG('i', 'n', '2', '4') },
{ AV_CODEC_ID_PCM_S24LE, MKTAG('i', 'n', '2', '4') },
{ AV_CODEC_ID_PCM_S32BE, MKTAG('i', 'n', '3', '2') },
{ AV_CODEC_ID_PCM_S32LE, MKTAG('i', 'n', '3', '2') },
{ AV_CODEC_ID_PCM_S8, MKTAG('s', 'o', 'w', 't') },
{ AV_CODEC_ID_PCM_U8, MKTAG('r', 'a', 'w', ' ') },
{ AV_CODEC_ID_PCM_U8, MKTAG('N', 'O', 'N', 'E') },
{ AV_CODEC_ID_QCELP, MKTAG('Q', 'c', 'l', 'p') },
{ AV_CODEC_ID_QCELP, MKTAG('Q', 'c', 'l', 'q') },
{ AV_CODEC_ID_QCELP, MKTAG('s', 'q', 'c', 'p') }, /* ISO Media fourcc */
{ AV_CODEC_ID_QDM2, MKTAG('Q', 'D', 'M', '2') },
{ AV_CODEC_ID_QDMC, MKTAG('Q', 'D', 'M', 'C') },
{ AV_CODEC_ID_SPEEX, MKTAG('s', 'p', 'e', 'x') }, /* Flash Media Server */
{ AV_CODEC_ID_SPEEX, MKTAG('S', 'P', 'X', 'N') },
{ AV_CODEC_ID_WMAV2, MKTAG('W', 'M', 'A', '2') },
{ AV_CODEC_ID_EVRC, MKTAG('s', 'e', 'v', 'c') }, /* 3GPP2 */
{ AV_CODEC_ID_SMV, MKTAG('s', 's', 'm', 'v') }, /* 3GPP2 */
{ AV_CODEC_ID_NONE, 0 },
};
 
const AVCodecTag ff_codec_movsubtitle_tags[] = {
{ AV_CODEC_ID_MOV_TEXT, MKTAG('t', 'e', 'x', 't') },
{ AV_CODEC_ID_MOV_TEXT, MKTAG('t', 'x', '3', 'g') },
{ AV_CODEC_ID_EIA_608, MKTAG('c', '6', '0', '8') },
{ AV_CODEC_ID_NONE, 0 },
};
 
/* map numeric codes from mdhd atom to ISO 639 */
/* cf. QTFileFormat.pdf p253, qtff.pdf p205 */
/* http://developer.apple.com/documentation/mac/Text/Text-368.html */
/* deprecated by putting the code as 3*5bit ascii */
static const char mov_mdhd_language_map[][4] = {
/* 0-9 */
"eng", "fra", "ger", "ita", "dut", "sve", "spa", "dan", "por", "nor",
"heb", "jpn", "ara", "fin", "gre", "ice", "mlt", "tur", "hr "/*scr*/, "chi"/*ace?*/,
"urd", "hin", "tha", "kor", "lit", "pol", "hun", "est", "lav", "",
"fo ", "", "rus", "chi", "", "iri", "alb", "ron", "ces", "slk",
"slv", "yid", "sr ", "mac", "bul", "ukr", "bel", "uzb", "kaz", "aze",
/*?*/
"aze", "arm", "geo", "mol", "kir", "tgk", "tuk", "mon", "", "pus",
"kur", "kas", "snd", "tib", "nep", "san", "mar", "ben", "asm", "guj",
"pa ", "ori", "mal", "kan", "tam", "tel", "", "bur", "khm", "lao",
/* roman? arabic? */
"vie", "ind", "tgl", "may", "may", "amh", "tir", "orm", "som", "swa",
/*==rundi?*/
"", "run", "", "mlg", "epo", "", "", "", "", "",
/* 100 */
"", "", "", "", "", "", "", "", "", "",
"", "", "", "", "", "", "", "", "", "",
"", "", "", "", "", "", "", "", "wel", "baq",
"cat", "lat", "que", "grn", "aym", "tat", "uig", "dzo", "jav"
};
 
int ff_mov_iso639_to_lang(const char lang[4], int mp4)
{
int i, code = 0;
 
/* old way, only for QT? */
for (i = 0; lang[0] && !mp4 && i < FF_ARRAY_ELEMS(mov_mdhd_language_map); i++) {
if (!strcmp(lang, mov_mdhd_language_map[i]))
return i;
}
/* XXX:can we do that in mov too? */
if (!mp4)
return -1;
/* handle undefined as such */
if (lang[0] == '\0')
lang = "und";
/* 5bit ascii */
for (i = 0; i < 3; i++) {
uint8_t c = lang[i];
c -= 0x60;
if (c > 0x1f)
return -1;
code <<= 5;
code |= c;
}
return code;
}
 
int ff_mov_lang_to_iso639(unsigned code, char to[4])
{
int i;
memset(to, 0, 4);
/* is it the mangled iso code? */
/* see http://www.geocities.com/xhelmboyx/quicktime/formats/mp4-layout.txt */
if (code >= 0x400 && code != 0x7fff) {
for (i = 2; i >= 0; i--) {
to[i] = 0x60 + (code & 0x1f);
code >>= 5;
}
return 1;
}
/* old fashion apple lang code */
if (code >= FF_ARRAY_ELEMS(mov_mdhd_language_map))
return 0;
if (!mov_mdhd_language_map[code][0])
return 0;
memcpy(to, mov_mdhd_language_map[code], 4);
return 1;
}
 
int ff_mp4_read_descr_len(AVIOContext *pb)
{
int len = 0;
int count = 4;
while (count--) {
int c = avio_r8(pb);
len = (len << 7) | (c & 0x7f);
if (!(c & 0x80))
break;
}
return len;
}
 
int ff_mp4_read_descr(AVFormatContext *fc, AVIOContext *pb, int *tag)
{
int len;
*tag = avio_r8(pb);
len = ff_mp4_read_descr_len(pb);
av_dlog(fc, "MPEG4 description: tag=0x%02x len=%d\n", *tag, len);
return len;
}
 
void ff_mp4_parse_es_descr(AVIOContext *pb, int *es_id)
{
int flags;
if (es_id) *es_id = avio_rb16(pb);
else avio_rb16(pb);
flags = avio_r8(pb);
if (flags & 0x80) //streamDependenceFlag
avio_rb16(pb);
if (flags & 0x40) { //URL_Flag
int len = avio_r8(pb);
avio_skip(pb, len);
}
if (flags & 0x20) //OCRstreamFlag
avio_rb16(pb);
}
 
static const AVCodecTag mp4_audio_types[] = {
{ AV_CODEC_ID_MP3ON4, AOT_PS }, /* old mp3on4 draft */
{ AV_CODEC_ID_MP3ON4, AOT_L1 }, /* layer 1 */
{ AV_CODEC_ID_MP3ON4, AOT_L2 }, /* layer 2 */
{ AV_CODEC_ID_MP3ON4, AOT_L3 }, /* layer 3 */
{ AV_CODEC_ID_MP4ALS, AOT_ALS }, /* MPEG-4 ALS */
{ AV_CODEC_ID_NONE, AOT_NULL },
};
 
int ff_mp4_read_dec_config_descr(AVFormatContext *fc, AVStream *st, AVIOContext *pb)
{
int len, tag;
int object_type_id = avio_r8(pb);
avio_r8(pb); /* stream type */
avio_rb24(pb); /* buffer size db */
avio_rb32(pb); /* max bitrate */
avio_rb32(pb); /* avg bitrate */
 
if(avcodec_is_open(st->codec)) {
av_log(fc, AV_LOG_DEBUG, "codec open in read_dec_config_descr\n");
return -1;
}
 
st->codec->codec_id= ff_codec_get_id(ff_mp4_obj_type, object_type_id);
av_dlog(fc, "esds object type id 0x%02x\n", object_type_id);
len = ff_mp4_read_descr(fc, pb, &tag);
if (tag == MP4DecSpecificDescrTag) {
av_dlog(fc, "Specific MPEG4 header len=%d\n", len);
if (!len || (uint64_t)len > (1<<30))
return -1;
av_free(st->codec->extradata);
if (ff_alloc_extradata(st->codec, len))
return AVERROR(ENOMEM);
avio_read(pb, st->codec->extradata, len);
if (st->codec->codec_id == AV_CODEC_ID_AAC) {
MPEG4AudioConfig cfg;
avpriv_mpeg4audio_get_config(&cfg, st->codec->extradata,
st->codec->extradata_size * 8, 1);
st->codec->channels = cfg.channels;
if (cfg.object_type == 29 && cfg.sampling_index < 3) // old mp3on4
st->codec->sample_rate = avpriv_mpa_freq_tab[cfg.sampling_index];
else if (cfg.ext_sample_rate)
st->codec->sample_rate = cfg.ext_sample_rate;
else
st->codec->sample_rate = cfg.sample_rate;
av_dlog(fc, "mp4a config channels %d obj %d ext obj %d "
"sample rate %d ext sample rate %d\n", st->codec->channels,
cfg.object_type, cfg.ext_object_type,
cfg.sample_rate, cfg.ext_sample_rate);
if (!(st->codec->codec_id = ff_codec_get_id(mp4_audio_types,
cfg.object_type)))
st->codec->codec_id = AV_CODEC_ID_AAC;
}
}
return 0;
}
 
typedef struct MovChannelLayout {
int64_t channel_layout;
uint32_t layout_tag;
} MovChannelLayout;
 
static const MovChannelLayout mov_channel_layout[] = {
{ AV_CH_LAYOUT_MONO, (100<<16) | 1}, // kCAFChannelLayoutTag_Mono
{ AV_CH_LAYOUT_STEREO, (101<<16) | 2}, // kCAFChannelLayoutTag_Stereo
{ AV_CH_LAYOUT_STEREO, (102<<16) | 2}, // kCAFChannelLayoutTag_StereoHeadphones
{ AV_CH_LAYOUT_2_1, (131<<16) | 3}, // kCAFChannelLayoutTag_ITU_2_1
{ AV_CH_LAYOUT_QUAD, (132<<16) | 4}, // kCAFChannelLayoutTag_ITU_2_2
{ AV_CH_LAYOUT_2_2, (132<<16) | 4}, // kCAFChannelLayoutTag_ITU_2_2
{ AV_CH_LAYOUT_QUAD, (108<<16) | 4}, // kCAFChannelLayoutTag_Quadraphonic
{ AV_CH_LAYOUT_SURROUND, (113<<16) | 3}, // kCAFChannelLayoutTag_MPEG_3_0_A
{ AV_CH_LAYOUT_4POINT0, (115<<16) | 4}, // kCAFChannelLayoutTag_MPEG_4_0_A
{ AV_CH_LAYOUT_5POINT0_BACK, (117<<16) | 5}, // kCAFChannelLayoutTag_MPEG_5_0_A
{ AV_CH_LAYOUT_5POINT0, (117<<16) | 5}, // kCAFChannelLayoutTag_MPEG_5_0_A
{ AV_CH_LAYOUT_5POINT1_BACK, (121<<16) | 6}, // kCAFChannelLayoutTag_MPEG_5_1_A
{ AV_CH_LAYOUT_5POINT1, (121<<16) | 6}, // kCAFChannelLayoutTag_MPEG_5_1_A
{ AV_CH_LAYOUT_7POINT1, (128<<16) | 8}, // kCAFChannelLayoutTag_MPEG_7_1_C
{ AV_CH_LAYOUT_7POINT1_WIDE, (126<<16) | 8}, // kCAFChannelLayoutTag_MPEG_7_1_A
{ AV_CH_LAYOUT_5POINT1_BACK|AV_CH_LAYOUT_STEREO_DOWNMIX, (130<<16) | 8}, // kCAFChannelLayoutTag_SMPTE_DTV
{ AV_CH_LAYOUT_STEREO|AV_CH_LOW_FREQUENCY, (133<<16) | 3}, // kCAFChannelLayoutTag_DVD_4
{ AV_CH_LAYOUT_2_1|AV_CH_LOW_FREQUENCY, (134<<16) | 4}, // kCAFChannelLayoutTag_DVD_5
{ AV_CH_LAYOUT_QUAD|AV_CH_LOW_FREQUENCY, (135<<16) | 4}, // kCAFChannelLayoutTag_DVD_6
{ AV_CH_LAYOUT_2_2|AV_CH_LOW_FREQUENCY, (135<<16) | 4}, // kCAFChannelLayoutTag_DVD_6
{ AV_CH_LAYOUT_SURROUND|AV_CH_LOW_FREQUENCY, (136<<16) | 4}, // kCAFChannelLayoutTag_DVD_10
{ AV_CH_LAYOUT_4POINT0|AV_CH_LOW_FREQUENCY, (137<<16) | 5}, // kCAFChannelLayoutTag_DVD_11
{ 0, 0},
};
#if 0
int ff_mov_read_chan(AVFormatContext *s, AVStream *st, int64_t size)
{
AVCodecContext *codec= st->codec;
uint32_t layout_tag;
AVIOContext *pb = s->pb;
const MovChannelLayout *layouts = mov_channel_layout;
 
if (size < 12)
return AVERROR_INVALIDDATA;
 
layout_tag = avio_rb32(pb);
size -= 4;
if (layout_tag == 0) { // kCAFChannelLayoutTag_UseChannelDescriptions
// Channel descriptions not implemented
av_log_ask_for_sample(s, "Unimplemented container channel layout.\n");
avio_skip(pb, size);
return 0;
}
if (layout_tag == 0x10000) { // kCAFChannelLayoutTag_UseChannelBitmap
codec->channel_layout = avio_rb32(pb);
size -= 4;
avio_skip(pb, size);
return 0;
}
while (layouts->channel_layout) {
if (layout_tag == layouts->layout_tag) {
codec->channel_layout = layouts->channel_layout;
break;
}
layouts++;
}
if (!codec->channel_layout)
av_log(s, AV_LOG_WARNING, "Unknown container channel layout.\n");
avio_skip(pb, size);
 
return 0;
}
#endif
 
void ff_mov_write_chan(AVIOContext *pb, int64_t channel_layout)
{
const MovChannelLayout *layouts;
uint32_t layout_tag = 0;
 
for (layouts = mov_channel_layout; layouts->channel_layout; layouts++)
if (channel_layout == layouts->channel_layout) {
layout_tag = layouts->layout_tag;
break;
}
 
if (layout_tag) {
avio_wb32(pb, layout_tag); // mChannelLayoutTag
avio_wb32(pb, 0); // mChannelBitmap
} else {
avio_wb32(pb, 0x10000); // kCAFChannelLayoutTag_UseChannelBitmap
avio_wb32(pb, channel_layout);
}
avio_wb32(pb, 0); // mNumberChannelDescriptions
}
 
/contrib/sdk/sources/ffmpeg/libavformat/isom.h
0,0 → 1,216
/*
* ISO Media common code
* copyright (c) 2001 Fabrice Bellard
* copyright (c) 2002 Francois Revol <revol@free.fr>
* copyright (c) 2006 Baptiste Coudurier <baptiste.coudurier@free.fr>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_ISOM_H
#define AVFORMAT_ISOM_H
 
#include "avio.h"
#include "internal.h"
#include "dv.h"
 
/* isom.c */
extern const AVCodecTag ff_mp4_obj_type[];
extern const AVCodecTag ff_codec_movvideo_tags[];
extern const AVCodecTag ff_codec_movaudio_tags[];
extern const AVCodecTag ff_codec_movsubtitle_tags[];
 
int ff_mov_iso639_to_lang(const char lang[4], int mp4);
int ff_mov_lang_to_iso639(unsigned code, char to[4]);
 
/* the QuickTime file format is quite convoluted...
* it has lots of index tables, each indexing something in another one...
* Here we just use what is needed to read the chunks
*/
 
typedef struct MOVStts {
int count;
int duration;
} MOVStts;
 
typedef struct MOVStsc {
int first;
int count;
int id;
} MOVStsc;
 
typedef struct MOVDref {
uint32_t type;
char *path;
char *dir;
char volume[28];
char filename[64];
int16_t nlvl_to, nlvl_from;
} MOVDref;
 
typedef struct MOVAtom {
uint32_t type;
int64_t size; /* total size (excluding the size and type fields) */
} MOVAtom;
 
struct MOVParseTableEntry;
 
typedef struct MOVFragment {
unsigned track_id;
uint64_t base_data_offset;
uint64_t moof_offset;
unsigned stsd_id;
unsigned duration;
unsigned size;
unsigned flags;
} MOVFragment;
 
typedef struct MOVTrackExt {
unsigned track_id;
unsigned stsd_id;
unsigned duration;
unsigned size;
unsigned flags;
} MOVTrackExt;
 
typedef struct MOVSbgp {
unsigned int count;
unsigned int index;
} MOVSbgp;
 
typedef struct MOVStreamContext {
AVIOContext *pb;
int pb_is_copied;
int ffindex; ///< AVStream index
int next_chunk;
unsigned int chunk_count;
int64_t *chunk_offsets;
unsigned int stts_count;
MOVStts *stts_data;
unsigned int ctts_count;
MOVStts *ctts_data;
unsigned int stsc_count;
MOVStsc *stsc_data;
unsigned int stps_count;
unsigned *stps_data; ///< partial sync sample for mpeg-2 open gop
int ctts_index;
int ctts_sample;
unsigned int sample_size; ///< may contain value calculated from stsd or value from stsz atom
unsigned int stsz_sample_size; ///< always contains sample size from stsz atom
unsigned int sample_count;
int *sample_sizes;
int keyframe_absent;
unsigned int keyframe_count;
int *keyframes;
int time_scale;
int64_t empty_duration; ///< empty duration of the first edit list entry
int64_t start_time; ///< start time of the media
int64_t time_offset; ///< time offset of the edit list entries
int current_sample;
unsigned int bytes_per_frame;
unsigned int samples_per_frame;
int dv_audio_container;
int pseudo_stream_id; ///< -1 means demux all ids
int16_t audio_cid; ///< stsd audio compression id
unsigned drefs_count;
MOVDref *drefs;
int dref_id;
int timecode_track;
int wrong_dts; ///< dts are wrong due to huge ctts offset (iMovie files)
int width; ///< tkhd width
int height; ///< tkhd height
int dts_shift; ///< dts shift when ctts is negative
uint32_t palette[256];
int has_palette;
int64_t data_size;
uint32_t tmcd_flags; ///< tmcd track flags
int64_t track_end; ///< used for dts generation in fragmented movie files
int start_pad; ///< amount of samples to skip due to enc-dec delay
unsigned int rap_group_count;
MOVSbgp *rap_group;
} MOVStreamContext;
 
typedef struct MOVContext {
AVClass *avclass;
AVFormatContext *fc;
int time_scale;
int64_t duration; ///< duration of the longest track
int found_moov; ///< 'moov' atom has been found
int found_mdat; ///< 'mdat' atom has been found
DVDemuxContext *dv_demux;
AVFormatContext *dv_fctx;
int isom; ///< 1 if file is ISO Media (mp4/3gp)
MOVFragment fragment; ///< current fragment in moof atom
MOVTrackExt *trex_data;
unsigned trex_count;
int itunes_metadata; ///< metadata are itunes style
int chapter_track;
int use_absolute_path;
int ignore_editlist;
int64_t next_root_atom; ///< offset of the next root atom
int *bitrates; ///< bitrates read before streams creation
int bitrates_count;
} MOVContext;
 
int ff_mp4_read_descr_len(AVIOContext *pb);
int ff_mp4_read_descr(AVFormatContext *fc, AVIOContext *pb, int *tag);
int ff_mp4_read_dec_config_descr(AVFormatContext *fc, AVStream *st, AVIOContext *pb);
void ff_mp4_parse_es_descr(AVIOContext *pb, int *es_id);
 
#define MP4ODescrTag 0x01
#define MP4IODescrTag 0x02
#define MP4ESDescrTag 0x03
#define MP4DecConfigDescrTag 0x04
#define MP4DecSpecificDescrTag 0x05
#define MP4SLDescrTag 0x06
 
#define MOV_TFHD_BASE_DATA_OFFSET 0x01
#define MOV_TFHD_STSD_ID 0x02
#define MOV_TFHD_DEFAULT_DURATION 0x08
#define MOV_TFHD_DEFAULT_SIZE 0x10
#define MOV_TFHD_DEFAULT_FLAGS 0x20
#define MOV_TFHD_DURATION_IS_EMPTY 0x010000
 
#define MOV_TRUN_DATA_OFFSET 0x01
#define MOV_TRUN_FIRST_SAMPLE_FLAGS 0x04
#define MOV_TRUN_SAMPLE_DURATION 0x100
#define MOV_TRUN_SAMPLE_SIZE 0x200
#define MOV_TRUN_SAMPLE_FLAGS 0x400
#define MOV_TRUN_SAMPLE_CTS 0x800
 
#define MOV_FRAG_SAMPLE_FLAG_DEGRADATION_PRIORITY_MASK 0x0000ffff
#define MOV_FRAG_SAMPLE_FLAG_IS_NON_SYNC 0x00010000
#define MOV_FRAG_SAMPLE_FLAG_PADDING_MASK 0x000e0000
#define MOV_FRAG_SAMPLE_FLAG_REDUNDANCY_MASK 0x00300000
#define MOV_FRAG_SAMPLE_FLAG_DEPENDED_MASK 0x00c00000
#define MOV_FRAG_SAMPLE_FLAG_DEPENDS_MASK 0x03000000
 
#define MOV_FRAG_SAMPLE_FLAG_DEPENDS_NO 0x02000000
#define MOV_FRAG_SAMPLE_FLAG_DEPENDS_YES 0x01000000
 
#define MOV_TKHD_FLAG_ENABLED 0x0001
#define MOV_TKHD_FLAG_IN_MOVIE 0x0002
#define MOV_TKHD_FLAG_IN_PREVIEW 0x0004
#define MOV_TKHD_FLAG_IN_POSTER 0x0008
 
int ff_mov_read_esds(AVFormatContext *fc, AVIOContext *pb, MOVAtom atom);
enum AVCodecID ff_mov_get_lpcm_codec_id(int bps, int flags);
 
int ff_mov_read_stsd_entries(MOVContext *c, AVIOContext *pb, int entries);
void ff_mov_write_chan(AVIOContext *pb, int64_t channel_layout);
 
#endif /* AVFORMAT_ISOM_H */
/contrib/sdk/sources/ffmpeg/libavformat/iss.c
0,0 → 1,144
/*
* ISS (.iss) file demuxer
* Copyright (c) 2008 Jaikrishnan Menon <realityman@gmx.net>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Funcom ISS file demuxer
* @author Jaikrishnan Menon
* @see http://wiki.multimedia.cx/index.php?title=FunCom_ISS
*/
 
#include "libavutil/channel_layout.h"
#include "avformat.h"
#include "internal.h"
#include "libavutil/avstring.h"
 
#define ISS_SIG "IMA_ADPCM_Sound"
#define ISS_SIG_LEN 15
#define MAX_TOKEN_SIZE 20
 
typedef struct {
int packet_size;
int sample_start_pos;
} IssDemuxContext;
 
static void get_token(AVIOContext *s, char *buf, int maxlen)
{
int i = 0;
char c;
 
while ((c = avio_r8(s))) {
if(c == ' ')
break;
if (i < maxlen-1)
buf[i++] = c;
}
 
if(!c)
avio_r8(s);
 
buf[i] = 0; /* Ensure null terminated, but may be truncated */
}
 
static int iss_probe(AVProbeData *p)
{
if (strncmp(p->buf, ISS_SIG, ISS_SIG_LEN))
return 0;
 
return AVPROBE_SCORE_MAX;
}
 
static av_cold int iss_read_header(AVFormatContext *s)
{
IssDemuxContext *iss = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *st;
char token[MAX_TOKEN_SIZE];
int stereo, rate_divisor;
 
get_token(pb, token, sizeof(token)); //"IMA_ADPCM_Sound"
get_token(pb, token, sizeof(token)); //packet size
sscanf(token, "%d", &iss->packet_size);
get_token(pb, token, sizeof(token)); //File ID
get_token(pb, token, sizeof(token)); //out size
get_token(pb, token, sizeof(token)); //stereo
sscanf(token, "%d", &stereo);
get_token(pb, token, sizeof(token)); //Unknown1
get_token(pb, token, sizeof(token)); //RateDivisor
sscanf(token, "%d", &rate_divisor);
get_token(pb, token, sizeof(token)); //Unknown2
get_token(pb, token, sizeof(token)); //Version ID
get_token(pb, token, sizeof(token)); //Size
 
if (iss->packet_size <= 0) {
av_log(s, AV_LOG_ERROR, "packet_size %d is invalid\n", iss->packet_size);
return AVERROR_INVALIDDATA;
}
 
iss->sample_start_pos = avio_tell(pb);
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_ADPCM_IMA_ISS;
if (stereo) {
st->codec->channels = 2;
st->codec->channel_layout = AV_CH_LAYOUT_STEREO;
} else {
st->codec->channels = 1;
st->codec->channel_layout = AV_CH_LAYOUT_MONO;
}
st->codec->sample_rate = 44100;
if(rate_divisor > 0)
st->codec->sample_rate /= rate_divisor;
st->codec->bits_per_coded_sample = 4;
st->codec->bit_rate = st->codec->channels * st->codec->sample_rate
* st->codec->bits_per_coded_sample;
st->codec->block_align = iss->packet_size;
avpriv_set_pts_info(st, 32, 1, st->codec->sample_rate);
 
return 0;
}
 
static int iss_read_packet(AVFormatContext *s, AVPacket *pkt)
{
IssDemuxContext *iss = s->priv_data;
int ret = av_get_packet(s->pb, pkt, iss->packet_size);
 
if(ret != iss->packet_size)
return AVERROR(EIO);
 
pkt->stream_index = 0;
pkt->pts = avio_tell(s->pb) - iss->sample_start_pos;
if(s->streams[0]->codec->channels > 0)
pkt->pts /= s->streams[0]->codec->channels*2;
return 0;
}
 
AVInputFormat ff_iss_demuxer = {
.name = "iss",
.long_name = NULL_IF_CONFIG_SMALL("Funcom ISS"),
.priv_data_size = sizeof(IssDemuxContext),
.read_probe = iss_probe,
.read_header = iss_read_header,
.read_packet = iss_read_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/iv8.c
0,0 → 1,118
/*
* Copyright (c) 2009 Michael Niedermayer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "internal.h"
 
 
static int probe(AVProbeData *p)
{
// the single file I have starts with that, I do not know if others do, too
if( p->buf[0] == 1
&& p->buf[1] == 1
&& p->buf[2] == 3
&& p->buf[3] == 0xB8
&& p->buf[4] == 0x80
&& p->buf[5] == 0x60
)
return AVPROBE_SCORE_MAX-2;
 
return 0;
}
 
static int read_header(AVFormatContext *s)
{
AVStream *st;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_MPEG4;
st->need_parsing = AVSTREAM_PARSE_FULL;
avpriv_set_pts_info(st, 64, 1, 90000);
 
return 0;
 
}
 
static int read_packet(AVFormatContext *s, AVPacket *pkt)
{
int ret, size, pts, type, flags;
int first_pkt = 0;
int frame_complete = 0;
 
while (!frame_complete) {
 
type = avio_rb16(s->pb); // 257 or 258
size = avio_rb16(s->pb);
flags = avio_rb16(s->pb); //some flags, 0x80 indicates end of frame
avio_rb16(s->pb); //packet number
pts = avio_rb32(s->pb);
avio_rb32(s->pb); //6A 13 E3 88
 
frame_complete = flags & 0x80;
 
size -= 12;
if (size < 1)
return -1;
 
if (type == 258) {
avio_skip(s->pb, size);
frame_complete = 0;
continue;
}
 
if (!first_pkt) {
ret = av_get_packet(s->pb, pkt, size);
if (ret < 0)
return ret;
first_pkt = 1;
pkt->pts = pts;
pkt->pos -= 16;
} else {
ret = av_append_packet(s->pb, pkt, size);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "failed to grow packet\n");
av_free_packet(pkt);
return ret;
}
}
if (ret < size) {
av_log(s, AV_LOG_ERROR, "Truncated packet! Read %d of %d bytes\n",
ret, size);
pkt->flags |= AV_PKT_FLAG_CORRUPT;
break;
}
}
pkt->stream_index = 0;
 
return 0;
}
 
AVInputFormat ff_iv8_demuxer = {
.name = "iv8",
.long_name = NULL_IF_CONFIG_SMALL("IndigoVision 8000 video"),
.read_probe = probe,
.read_header = read_header,
.read_packet = read_packet,
.flags = AVFMT_GENERIC_INDEX,
};
/contrib/sdk/sources/ffmpeg/libavformat/ivfdec.c
0,0 → 1,91
/*
* Copyright (c) 2010 David Conrad
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "internal.h"
#include "riff.h"
#include "libavutil/intreadwrite.h"
 
static int probe(AVProbeData *p)
{
if (AV_RL32(p->buf) == MKTAG('D','K','I','F')
&& !AV_RL16(p->buf+4) && AV_RL16(p->buf+6) == 32)
return AVPROBE_SCORE_MAX-2;
 
return 0;
}
 
static int read_header(AVFormatContext *s)
{
AVStream *st;
AVRational time_base;
 
avio_rl32(s->pb); // DKIF
avio_rl16(s->pb); // version
avio_rl16(s->pb); // header size
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
 
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_tag = avio_rl32(s->pb);
st->codec->codec_id = ff_codec_get_id(ff_codec_bmp_tags, st->codec->codec_tag);
st->codec->width = avio_rl16(s->pb);
st->codec->height = avio_rl16(s->pb);
time_base.den = avio_rl32(s->pb);
time_base.num = avio_rl32(s->pb);
st->duration = avio_rl64(s->pb);
 
st->need_parsing = AVSTREAM_PARSE_HEADERS;
 
if (!time_base.den || !time_base.num) {
av_log(s, AV_LOG_ERROR, "Invalid frame rate\n");
return AVERROR_INVALIDDATA;
}
 
avpriv_set_pts_info(st, 64, time_base.num, time_base.den);
 
return 0;
}
 
static int read_packet(AVFormatContext *s, AVPacket *pkt)
{
int ret, size = avio_rl32(s->pb);
int64_t pts = avio_rl64(s->pb);
 
ret = av_get_packet(s->pb, pkt, size);
pkt->stream_index = 0;
pkt->pts = pts;
pkt->pos -= 12;
 
return ret;
}
 
AVInputFormat ff_ivf_demuxer = {
.name = "ivf",
.long_name = NULL_IF_CONFIG_SMALL("On2 IVF"),
.read_probe = probe,
.read_header = read_header,
.read_packet = read_packet,
.flags = AVFMT_GENERIC_INDEX,
.codec_tag = (const AVCodecTag* const []){ ff_codec_bmp_tags, 0 },
};
/contrib/sdk/sources/ffmpeg/libavformat/ivfenc.c
0,0 → 1,68
/*
* Copyright (c) 2010 Reimar Döffinger
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avformat.h"
#include "libavutil/intreadwrite.h"
 
static int ivf_write_header(AVFormatContext *s)
{
AVCodecContext *ctx;
AVIOContext *pb = s->pb;
 
if (s->nb_streams != 1) {
av_log(s, AV_LOG_ERROR, "Format supports only exactly one video stream\n");
return AVERROR(EINVAL);
}
ctx = s->streams[0]->codec;
if (ctx->codec_type != AVMEDIA_TYPE_VIDEO || ctx->codec_id != AV_CODEC_ID_VP8) {
av_log(s, AV_LOG_ERROR, "Currently only VP8 is supported!\n");
return AVERROR(EINVAL);
}
avio_write(pb, "DKIF", 4);
avio_wl16(pb, 0); // version
avio_wl16(pb, 32); // header length
avio_wl32(pb, ctx->codec_tag ? ctx->codec_tag : AV_RL32("VP80"));
avio_wl16(pb, ctx->width);
avio_wl16(pb, ctx->height);
avio_wl32(pb, s->streams[0]->time_base.den);
avio_wl32(pb, s->streams[0]->time_base.num);
avio_wl64(pb, s->streams[0]->duration); // TODO: duration or number of frames?!?
 
return 0;
}
 
static int ivf_write_packet(AVFormatContext *s, AVPacket *pkt)
{
AVIOContext *pb = s->pb;
avio_wl32(pb, pkt->size);
avio_wl64(pb, pkt->pts);
avio_write(pb, pkt->data, pkt->size);
 
return 0;
}
 
AVOutputFormat ff_ivf_muxer = {
.name = "ivf",
.long_name = NULL_IF_CONFIG_SMALL("On2 IVF"),
.extensions = "ivf",
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_VP8,
.write_header = ivf_write_header,
.write_packet = ivf_write_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/jacosubdec.c
0,0 → 1,270
/*
* Copyright (c) 2012 Clément Bœsch
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* JACOsub subtitle demuxer
* @see http://unicorn.us.com/jacosub/jscripts.html
* @todo Support P[ALETTE] directive.
*/
 
#include "avformat.h"
#include "internal.h"
#include "subtitles.h"
#include "libavcodec/internal.h"
#include "libavcodec/jacosub.h"
#include "libavutil/avstring.h"
#include "libavutil/bprint.h"
#include "libavutil/intreadwrite.h"
 
typedef struct {
int shift;
unsigned timeres;
FFDemuxSubtitlesQueue q;
} JACOsubContext;
 
static int timed_line(const char *ptr)
{
char c;
return (sscanf(ptr, "%*u:%*u:%*u.%*u %*u:%*u:%*u.%*u %c", &c) == 1 ||
sscanf(ptr, "@%*u @%*u %c", &c) == 1);
}
 
static int jacosub_probe(AVProbeData *p)
{
const char *ptr = p->buf;
const char *ptr_end = p->buf + p->buf_size;
 
if (AV_RB24(ptr) == 0xEFBBBF)
ptr += 3; /* skip UTF-8 BOM */
 
while (ptr < ptr_end) {
while (jss_whitespace(*ptr))
ptr++;
if (*ptr != '#' && *ptr != '\n') {
if (timed_line(ptr))
return AVPROBE_SCORE_EXTENSION + 1;
return 0;
}
ptr += ff_subtitles_next_line(ptr);
}
return 0;
}
 
static const char * const cmds[] = {
"CLOCKPAUSE",
"DIRECTIVE",
"FONT",
"HRES",
"INCLUDE",
"PALETTE",
"QUANTIZE",
"RAMP",
"SHIFT",
"TIMERES",
};
 
static int get_jss_cmd(char k)
{
int i;
 
k = av_toupper(k);
for (i = 0; i < FF_ARRAY_ELEMS(cmds); i++)
if (k == cmds[i][0])
return i;
return -1;
}
 
static int jacosub_read_close(AVFormatContext *s)
{
JACOsubContext *jacosub = s->priv_data;
ff_subtitles_queue_clean(&jacosub->q);
return 0;
}
 
static const char *read_ts(JACOsubContext *jacosub, const char *buf,
int64_t *start, int *duration)
{
int len;
unsigned hs, ms, ss, fs; // hours, minutes, seconds, frame start
unsigned he, me, se, fe; // hours, minutes, seconds, frame end
int ts_start, ts_end;
 
/* timed format */
if (sscanf(buf, "%u:%u:%u.%u %u:%u:%u.%u %n",
&hs, &ms, &ss, &fs,
&he, &me, &se, &fe, &len) == 8) {
ts_start = (hs*3600 + ms*60 + ss) * jacosub->timeres + fs;
ts_end = (he*3600 + me*60 + se) * jacosub->timeres + fe;
goto shift_and_ret;
}
 
/* timestamps format */
if (sscanf(buf, "@%u @%u %n", &ts_start, &ts_end, &len) == 2)
goto shift_and_ret;
 
return NULL;
 
shift_and_ret:
ts_start = (ts_start + jacosub->shift) * 100 / jacosub->timeres;
ts_end = (ts_end + jacosub->shift) * 100 / jacosub->timeres;
*start = ts_start;
*duration = ts_start + ts_end;
return buf + len;
}
 
static int get_shift(int timeres, const char *buf)
{
int sign = 1;
int a = 0, b = 0, c = 0, d = 0;
#define SSEP "%*1[.:]"
int n = sscanf(buf, "%d"SSEP"%d"SSEP"%d"SSEP"%d", &a, &b, &c, &d);
#undef SSEP
 
if (*buf == '-' || a < 0) {
sign = -1;
a = FFABS(a);
}
 
switch (n) {
case 4: return sign * ((a*3600 + b*60 + c) * timeres + d);
case 3: return sign * (( a*60 + b) * timeres + c);
case 2: return sign * (( a) * timeres + b);
}
 
return 0;
}
 
static int jacosub_read_header(AVFormatContext *s)
{
AVBPrint header;
AVIOContext *pb = s->pb;
char line[JSS_MAX_LINESIZE];
JACOsubContext *jacosub = s->priv_data;
int shift_set = 0; // only the first shift matters
int merge_line = 0;
int i, ret;
 
AVStream *st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 64, 1, 100);
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
st->codec->codec_id = AV_CODEC_ID_JACOSUB;
 
jacosub->timeres = 30;
 
av_bprint_init(&header, 1024+FF_INPUT_BUFFER_PADDING_SIZE, 4096);
 
while (!url_feof(pb)) {
int cmd_len;
const char *p = line;
int64_t pos = avio_tell(pb);
int len = ff_get_line(pb, line, sizeof(line));
 
p = jss_skip_whitespace(p);
 
/* queue timed line */
if (merge_line || timed_line(p)) {
AVPacket *sub;
 
sub = ff_subtitles_queue_insert(&jacosub->q, line, len, merge_line);
if (!sub)
return AVERROR(ENOMEM);
sub->pos = pos;
merge_line = len > 1 && !strcmp(&line[len - 2], "\\\n");
continue;
}
 
/* skip all non-compiler commands and focus on the command */
if (*p != '#')
continue;
p++;
i = get_jss_cmd(p[0]);
if (i == -1)
continue;
 
/* trim command + spaces */
cmd_len = strlen(cmds[i]);
if (av_strncasecmp(p, cmds[i], cmd_len) == 0)
p += cmd_len;
else
p++;
p = jss_skip_whitespace(p);
 
/* handle commands which affect the whole script */
switch (cmds[i][0]) {
case 'S': // SHIFT command affect the whole script...
if (!shift_set) {
jacosub->shift = get_shift(jacosub->timeres, p);
shift_set = 1;
}
av_bprintf(&header, "#S %s", p);
break;
case 'T': // ...but must be placed after TIMERES
jacosub->timeres = strtol(p, NULL, 10);
if (!jacosub->timeres)
jacosub->timeres = 30;
else
av_bprintf(&header, "#T %s", p);
break;
}
}
 
/* general/essential directives in the extradata */
ret = avpriv_bprint_to_extradata(st->codec, &header);
if (ret < 0)
return ret;
 
/* SHIFT and TIMERES affect the whole script so packet timing can only be
* done in a second pass */
for (i = 0; i < jacosub->q.nb_subs; i++) {
AVPacket *sub = &jacosub->q.subs[i];
read_ts(jacosub, sub->data, &sub->pts, &sub->duration);
}
ff_subtitles_queue_finalize(&jacosub->q);
 
return 0;
}
 
static int jacosub_read_packet(AVFormatContext *s, AVPacket *pkt)
{
JACOsubContext *jacosub = s->priv_data;
return ff_subtitles_queue_read_packet(&jacosub->q, pkt);
}
 
static int jacosub_read_seek(AVFormatContext *s, int stream_index,
int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
{
JACOsubContext *jacosub = s->priv_data;
return ff_subtitles_queue_seek(&jacosub->q, s, stream_index,
min_ts, ts, max_ts, flags);
}
 
AVInputFormat ff_jacosub_demuxer = {
.name = "jacosub",
.long_name = NULL_IF_CONFIG_SMALL("JACOsub subtitle format"),
.priv_data_size = sizeof(JACOsubContext),
.read_probe = jacosub_probe,
.read_header = jacosub_read_header,
.read_packet = jacosub_read_packet,
.read_seek2 = jacosub_read_seek,
.read_close = jacosub_read_close,
};
/contrib/sdk/sources/ffmpeg/libavformat/jacosubenc.c
0,0 → 1,42
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "rawenc.h"
 
static int jacosub_write_header(AVFormatContext *s)
{
const AVCodecContext *avctx = s->streams[0]->codec;
 
if (avctx->extradata_size) {
avio_write(s->pb, avctx->extradata, avctx->extradata_size - 1);
avio_flush(s->pb);
}
return 0;
}
 
AVOutputFormat ff_jacosub_muxer = {
.name = "jacosub",
.long_name = NULL_IF_CONFIG_SMALL("JACOsub subtitle format"),
.mime_type = "text/x-jacosub",
.extensions = "jss,js",
.write_header = jacosub_write_header,
.write_packet = ff_raw_write_packet,
.flags = AVFMT_TS_NONSTRICT,
.subtitle_codec = AV_CODEC_ID_JACOSUB,
};
/contrib/sdk/sources/ffmpeg/libavformat/jvdec.c
0,0 → 1,239
/*
* Bitmap Brothers JV demuxer
* Copyright (c) 2005, 2011 Peter Ross <pross@xvid.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Bitmap Brothers JV demuxer
* @author Peter Ross <pross@xvid.org>
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
 
#define JV_PREAMBLE_SIZE 5
 
typedef struct {
int audio_size; /**< audio packet size (bytes) */
int video_size; /**< video packet size (bytes) */
int palette_size; /**< palette size (bytes) */
int video_type; /**< per-frame video compression type */
} JVFrame;
 
typedef struct {
JVFrame *frames;
enum {
JV_AUDIO = 0,
JV_VIDEO,
JV_PADDING
} state;
int64_t pts;
} JVDemuxContext;
 
#define MAGIC " Compression by John M Phillips Copyright (C) 1995 The Bitmap Brothers Ltd."
 
static int read_probe(AVProbeData *pd)
{
if (pd->buf[0] == 'J' && pd->buf[1] == 'V' && strlen(MAGIC) <= pd->buf_size - 4 &&
!memcmp(pd->buf + 4, MAGIC, strlen(MAGIC)))
return AVPROBE_SCORE_MAX;
return 0;
}
 
static int read_header(AVFormatContext *s)
{
JVDemuxContext *jv = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *vst, *ast;
int64_t audio_pts = 0;
int64_t offset;
int i;
 
avio_skip(pb, 80);
 
ast = avformat_new_stream(s, NULL);
vst = avformat_new_stream(s, NULL);
if (!ast || !vst)
return AVERROR(ENOMEM);
 
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codec->codec_id = AV_CODEC_ID_JV;
vst->codec->codec_tag = 0; /* no fourcc */
vst->codec->width = avio_rl16(pb);
vst->codec->height = avio_rl16(pb);
vst->duration =
vst->nb_frames =
ast->nb_index_entries = avio_rl16(pb);
avpriv_set_pts_info(vst, 64, avio_rl16(pb), 1000);
 
avio_skip(pb, 4);
 
ast->codec->codec_type = AVMEDIA_TYPE_AUDIO;
ast->codec->codec_id = AV_CODEC_ID_PCM_U8;
ast->codec->codec_tag = 0; /* no fourcc */
ast->codec->sample_rate = avio_rl16(pb);
ast->codec->channels = 1;
ast->codec->channel_layout = AV_CH_LAYOUT_MONO;
avpriv_set_pts_info(ast, 64, 1, ast->codec->sample_rate);
 
avio_skip(pb, 10);
 
ast->index_entries = av_malloc(ast->nb_index_entries * sizeof(*ast->index_entries));
if (!ast->index_entries)
return AVERROR(ENOMEM);
 
jv->frames = av_malloc(ast->nb_index_entries * sizeof(JVFrame));
if (!jv->frames)
return AVERROR(ENOMEM);
 
offset = 0x68 + ast->nb_index_entries * 16;
for(i = 0; i < ast->nb_index_entries; i++) {
AVIndexEntry *e = ast->index_entries + i;
JVFrame *jvf = jv->frames + i;
 
/* total frame size including audio, video, palette data and padding */
e->size = avio_rl32(pb);
e->timestamp = i;
e->pos = offset;
offset += e->size;
 
jvf->audio_size = avio_rl32(pb);
jvf->video_size = avio_rl32(pb);
jvf->palette_size = avio_r8(pb) ? 768 : 0;
jvf->video_size = FFMIN(FFMAX(jvf->video_size, 0),
INT_MAX - JV_PREAMBLE_SIZE - jvf->palette_size);
if (avio_r8(pb))
av_log(s, AV_LOG_WARNING, "unsupported audio codec\n");
jvf->video_type = avio_r8(pb);
avio_skip(pb, 1);
 
e->timestamp = jvf->audio_size ? audio_pts : AV_NOPTS_VALUE;
audio_pts += jvf->audio_size;
 
e->flags = jvf->video_type != 1 ? AVINDEX_KEYFRAME : 0;
}
 
jv->state = JV_AUDIO;
return 0;
}
 
static int read_packet(AVFormatContext *s, AVPacket *pkt)
{
JVDemuxContext *jv = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *ast = s->streams[0];
 
while (!url_feof(s->pb) && jv->pts < ast->nb_index_entries) {
const AVIndexEntry *e = ast->index_entries + jv->pts;
const JVFrame *jvf = jv->frames + jv->pts;
 
switch(jv->state) {
case JV_AUDIO:
jv->state++;
if (jvf->audio_size ) {
if (av_get_packet(s->pb, pkt, jvf->audio_size) < 0)
return AVERROR(ENOMEM);
pkt->stream_index = 0;
pkt->pts = e->timestamp;
pkt->flags |= AV_PKT_FLAG_KEY;
return 0;
}
case JV_VIDEO:
jv->state++;
if (jvf->video_size || jvf->palette_size) {
int size = jvf->video_size + jvf->palette_size;
if (av_new_packet(pkt, size + JV_PREAMBLE_SIZE))
return AVERROR(ENOMEM);
 
AV_WL32(pkt->data, jvf->video_size);
pkt->data[4] = jvf->video_type;
if ((size = avio_read(pb, pkt->data + JV_PREAMBLE_SIZE, size)) < 0)
return AVERROR(EIO);
 
pkt->size = size + JV_PREAMBLE_SIZE;
pkt->stream_index = 1;
pkt->pts = jv->pts;
if (jvf->video_type != 1)
pkt->flags |= AV_PKT_FLAG_KEY;
return 0;
}
case JV_PADDING:
avio_skip(pb, FFMAX(e->size - jvf->audio_size - jvf->video_size
- jvf->palette_size, 0));
jv->state = JV_AUDIO;
jv->pts++;
}
}
 
return AVERROR(EIO);
}
 
static int read_seek(AVFormatContext *s, int stream_index,
int64_t ts, int flags)
{
JVDemuxContext *jv = s->priv_data;
AVStream *ast = s->streams[0];
int i;
 
if (flags & (AVSEEK_FLAG_BYTE|AVSEEK_FLAG_FRAME))
return AVERROR(ENOSYS);
 
switch(stream_index) {
case 0:
i = av_index_search_timestamp(ast, ts, flags);
break;
case 1:
i = ts;
break;
default:
return 0;
}
 
if (i < 0 || i >= ast->nb_index_entries)
return 0;
if (avio_seek(s->pb, ast->index_entries[i].pos, SEEK_SET) < 0)
return -1;
 
jv->state = JV_AUDIO;
jv->pts = i;
return 0;
}
 
static int read_close(AVFormatContext *s)
{
JVDemuxContext *jv = s->priv_data;
 
av_freep(&jv->frames);
 
return 0;
}
 
AVInputFormat ff_jv_demuxer = {
.name = "jv",
.long_name = NULL_IF_CONFIG_SMALL("Bitmap Brothers JV"),
.priv_data_size = sizeof(JVDemuxContext),
.read_probe = read_probe,
.read_header = read_header,
.read_packet = read_packet,
.read_seek = read_seek,
.read_close = read_close,
};
/contrib/sdk/sources/ffmpeg/libavformat/latmenc.c
0,0 → 1,230
/*
* LATM/LOAS muxer
* Copyright (c) 2011 Kieran Kunhya <kieran@kunhya.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavcodec/get_bits.h"
#include "libavcodec/put_bits.h"
#include "libavcodec/avcodec.h"
#include "libavcodec/mpeg4audio.h"
#include "libavutil/opt.h"
#include "avformat.h"
#include "rawenc.h"
 
#define MAX_EXTRADATA_SIZE 1024
 
typedef struct {
AVClass *av_class;
int off;
int channel_conf;
int object_type;
int counter;
int mod;
uint8_t buffer[0x1fff + MAX_EXTRADATA_SIZE + 1024];
} LATMContext;
 
static const AVOption options[] = {
{"smc-interval", "StreamMuxConfig interval.",
offsetof(LATMContext, mod), AV_OPT_TYPE_INT, {.i64 = 0x0014}, 0x0001, 0xffff, AV_OPT_FLAG_ENCODING_PARAM},
{NULL},
};
 
static const AVClass latm_muxer_class = {
.class_name = "LATM/LOAS muxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
static int latm_decode_extradata(LATMContext *ctx, uint8_t *buf, int size)
{
MPEG4AudioConfig m4ac;
 
if (size > MAX_EXTRADATA_SIZE) {
av_log(ctx, AV_LOG_ERROR, "Extradata is larger than currently supported.\n");
return AVERROR_INVALIDDATA;
}
ctx->off = avpriv_mpeg4audio_get_config(&m4ac, buf, size * 8, 1);
if (ctx->off < 0)
return ctx->off;
 
if (ctx->object_type == AOT_ALS && (ctx->off & 7)) {
// as long as avpriv_mpeg4audio_get_config works correctly this is impossible
av_log(ctx, AV_LOG_ERROR, "BUG: ALS offset is not byte-aligned\n");
return AVERROR_INVALIDDATA;
}
/* FIXME: are any formats not allowed in LATM? */
 
if (m4ac.object_type > AOT_SBR && m4ac.object_type != AOT_ALS) {
av_log(ctx, AV_LOG_ERROR, "Muxing MPEG-4 AOT %d in LATM is not supported\n", m4ac.object_type);
return AVERROR_INVALIDDATA;
}
ctx->channel_conf = m4ac.chan_config;
ctx->object_type = m4ac.object_type;
 
return 0;
}
 
static int latm_write_header(AVFormatContext *s)
{
LATMContext *ctx = s->priv_data;
AVCodecContext *avctx = s->streams[0]->codec;
 
if (avctx->codec_id == AV_CODEC_ID_AAC_LATM)
return 0;
 
if (avctx->extradata_size > 0 &&
latm_decode_extradata(ctx, avctx->extradata, avctx->extradata_size) < 0)
return AVERROR_INVALIDDATA;
 
return 0;
}
 
static void latm_write_frame_header(AVFormatContext *s, PutBitContext *bs)
{
LATMContext *ctx = s->priv_data;
AVCodecContext *avctx = s->streams[0]->codec;
int header_size;
 
/* AudioMuxElement */
put_bits(bs, 1, !!ctx->counter);
 
if (!ctx->counter) {
/* StreamMuxConfig */
put_bits(bs, 1, 0); /* audioMuxVersion */
put_bits(bs, 1, 1); /* allStreamsSameTimeFraming */
put_bits(bs, 6, 0); /* numSubFrames */
put_bits(bs, 4, 0); /* numProgram */
put_bits(bs, 3, 0); /* numLayer */
 
/* AudioSpecificConfig */
if (ctx->object_type == AOT_ALS) {
header_size = avctx->extradata_size-(ctx->off >> 3);
avpriv_copy_bits(bs, &avctx->extradata[ctx->off >> 3], header_size);
} else {
// + 3 assumes not scalable and dependsOnCoreCoder == 0,
// see decode_ga_specific_config in libavcodec/aacdec.c
avpriv_copy_bits(bs, avctx->extradata, ctx->off + 3);
 
if (!ctx->channel_conf) {
GetBitContext gb;
init_get_bits8(&gb, avctx->extradata, avctx->extradata_size);
skip_bits_long(&gb, ctx->off + 3);
avpriv_copy_pce_data(bs, &gb);
}
}
 
put_bits(bs, 3, 0); /* frameLengthType */
put_bits(bs, 8, 0xff); /* latmBufferFullness */
 
put_bits(bs, 1, 0); /* otherDataPresent */
put_bits(bs, 1, 0); /* crcCheckPresent */
}
 
ctx->counter++;
ctx->counter %= ctx->mod;
}
 
static int latm_write_packet(AVFormatContext *s, AVPacket *pkt)
{
LATMContext *ctx = s->priv_data;
AVIOContext *pb = s->pb;
PutBitContext bs;
int i, len;
uint8_t loas_header[] = "\x56\xe0\x00";
 
if (s->streams[0]->codec->codec_id == AV_CODEC_ID_AAC_LATM)
return ff_raw_write_packet(s, pkt);
 
if (pkt->size > 2 && pkt->data[0] == 0xff && (pkt->data[1] >> 4) == 0xf) {
av_log(s, AV_LOG_ERROR, "ADTS header detected - ADTS will not be incorrectly muxed into LATM\n");
return AVERROR_INVALIDDATA;
}
 
if (!s->streams[0]->codec->extradata) {
if(pkt->size > 2 && pkt->data[0] == 0x56 && (pkt->data[1] >> 4) == 0xe &&
(AV_RB16(pkt->data + 1) & 0x1FFF) + 3 == pkt->size)
return ff_raw_write_packet(s, pkt);
else
return AVERROR_INVALIDDATA;
}
 
if (pkt->size > 0x1fff)
goto too_large;
 
init_put_bits(&bs, ctx->buffer, pkt->size+1024+MAX_EXTRADATA_SIZE);
 
latm_write_frame_header(s, &bs);
 
/* PayloadLengthInfo() */
for (i = 0; i <= pkt->size-255; i+=255)
put_bits(&bs, 8, 255);
 
put_bits(&bs, 8, pkt->size-i);
 
/* The LATM payload is written unaligned */
 
/* PayloadMux() */
if (pkt->size && (pkt->data[0] & 0xe1) == 0x81) {
// Convert byte-aligned DSE to non-aligned.
// Due to the input format encoding we know that
// it is naturally byte-aligned in the input stream,
// so there are no padding bits to account for.
// To avoid having to add padding bits and rearrange
// the whole stream we just remove the byte-align flag.
// This allows us to remux our FATE AAC samples into latm
// files that are still playable with minimal effort.
put_bits(&bs, 8, pkt->data[0] & 0xfe);
avpriv_copy_bits(&bs, pkt->data + 1, 8*pkt->size - 8);
} else
avpriv_copy_bits(&bs, pkt->data, 8*pkt->size);
 
avpriv_align_put_bits(&bs);
flush_put_bits(&bs);
 
len = put_bits_count(&bs) >> 3;
 
if (len > 0x1fff)
goto too_large;
 
loas_header[1] |= (len >> 8) & 0x1f;
loas_header[2] |= len & 0xff;
 
avio_write(pb, loas_header, 3);
avio_write(pb, ctx->buffer, len);
 
return 0;
 
too_large:
av_log(s, AV_LOG_ERROR, "LATM packet size larger than maximum size 0x1fff\n");
return AVERROR_INVALIDDATA;
}
 
AVOutputFormat ff_latm_muxer = {
.name = "latm",
.long_name = NULL_IF_CONFIG_SMALL("LOAS/LATM"),
.mime_type = "audio/MP4A-LATM",
.extensions = "latm,loas",
.priv_data_size = sizeof(LATMContext),
.audio_codec = AV_CODEC_ID_AAC,
.video_codec = AV_CODEC_ID_NONE,
.write_header = latm_write_header,
.write_packet = latm_write_packet,
.priv_class = &latm_muxer_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/libavformat.pc
0,0 → 1,14
prefix=/usr/local
exec_prefix=${prefix}
libdir=${prefix}/lib
includedir=${prefix}/include
 
Name: libavformat
Description: FFmpeg container format library
Version: 55.19.104
Requires:
Requires.private: libavcodec = 55.39.101
Conflicts:
Libs: -L${libdir} -lavformat
Libs.private: -lm -lz -lpsapi -ladvapi32 -lshell32
Cflags: -I${includedir}
/contrib/sdk/sources/ffmpeg/libavformat/libavformat.v
0,0 → 1,38
LIBAVFORMAT_$MAJOR {
global: DllStartup;
av*;
#FIXME those are for ffserver
ff_inet_aton;
ff_socket_nonblock;
ffm_set_write_index;
ffm_read_write_index;
ffm_write_write_index;
ff_mpegts_parse_close;
ff_mpegts_parse_open;
ff_mpegts_parse_packet;
ff_rtsp_parse_line;
ff_rtp_get_local_rtp_port;
ff_rtp_get_local_rtcp_port;
ffio_open_dyn_packet_buf;
ffio_set_buf_size;
ffurl_close;
ffurl_open;
ffurl_read_complete;
ffurl_seek;
ffurl_size;
ffurl_write;
ffurl_protocol_next;
url_open;
url_close;
url_write;
#those are deprecated, remove on next bump
url_*;
ff_timefilter_destroy;
ff_timefilter_new;
ff_timefilter_update;
ff_timefilter_reset;
get_*;
put_*;
ff_codec_get_id;
local: *;
};
/contrib/sdk/sources/ffmpeg/libavformat/libavformat.ver
0,0 → 1,38
LIBAVFORMAT_55 {
global: DllStartup;
av*;
#FIXME those are for ffserver
ff_inet_aton;
ff_socket_nonblock;
ffm_set_write_index;
ffm_read_write_index;
ffm_write_write_index;
ff_mpegts_parse_close;
ff_mpegts_parse_open;
ff_mpegts_parse_packet;
ff_rtsp_parse_line;
ff_rtp_get_local_rtp_port;
ff_rtp_get_local_rtcp_port;
ffio_open_dyn_packet_buf;
ffio_set_buf_size;
ffurl_close;
ffurl_open;
ffurl_read_complete;
ffurl_seek;
ffurl_size;
ffurl_write;
ffurl_protocol_next;
url_open;
url_close;
url_write;
#those are deprecated, remove on next bump
url_*;
ff_timefilter_destroy;
ff_timefilter_new;
ff_timefilter_update;
ff_timefilter_reset;
get_*;
put_*;
ff_codec_get_id;
local: *;
};
/contrib/sdk/sources/ffmpeg/libavformat/libgme.c
0,0 → 1,201
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* libgme demuxer
*/
 
#include <gme/gme.h>
#include "libavutil/avstring.h"
#include "libavutil/eval.h"
#include "libavutil/opt.h"
#include "avformat.h"
#include "internal.h"
 
typedef struct GMEContext {
const AVClass *class;
Music_Emu *music_emu;
gme_info_t *info; ///< selected track
 
/* options */
int track_index;
int sample_rate;
int64_t max_size;
} GMEContext;
 
#define OFFSET(x) offsetof(GMEContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
#define D AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{"track_index", "set track that should be played", OFFSET(track_index), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, A|D},
{"sample_rate", "set sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = 44100}, 1000, 999999, A|D},
{"max_size", "set max file size supported (in bytes)", OFFSET(max_size), AV_OPT_TYPE_INT64, {.i64 = 50 * 1024 * 1024}, 0, SIZE_MAX, A|D},
{NULL}
};
 
static void add_meta(AVFormatContext *s, const char *name, const char *value)
{
if (value && value[0])
av_dict_set(&s->metadata, name, value, 0);
}
 
static int load_metadata(AVFormatContext *s)
{
GMEContext *gme = s->priv_data;
gme_info_t *info = gme->info;
char buf[30];
 
add_meta(s, "system", info->system);
add_meta(s, "game", info->game);
add_meta(s, "song", info->song);
add_meta(s, "author", info->author);
add_meta(s, "copyright", info->copyright);
add_meta(s, "comment", info->comment);
add_meta(s, "dumper", info->dumper);
 
snprintf(buf, sizeof(buf), "%d", (int)gme_track_count(gme->music_emu));
add_meta(s, "tracks", buf);
 
return 0;
}
 
#define AUDIO_PKT_SIZE 512
 
static int read_header_gme(AVFormatContext *s)
{
AVStream *st;
AVIOContext *pb = s->pb;
GMEContext *gme = s->priv_data;
int64_t sz = avio_size(pb);
char *buf;
char dummy;
 
if (sz < 0) {
av_log(s, AV_LOG_WARNING, "Could not determine file size\n");
sz = gme->max_size;
} else if (gme->max_size && sz > gme->max_size) {
sz = gme->max_size;
}
 
buf = av_malloc(sz);
if (!buf)
return AVERROR(ENOMEM);
sz = avio_read(pb, buf, sz);
 
// Data left means our buffer (the max_size option) is too small
if (avio_read(pb, &dummy, 1) == 1) {
av_log(s, AV_LOG_ERROR, "File size is larger than max_size option "
"value %"PRIi64", consider increasing the max_size option\n",
gme->max_size);
return AVERROR_BUFFER_TOO_SMALL;
}
 
if (gme_open_data(buf, sz, &gme->music_emu, gme->sample_rate)) {
av_freep(&buf);
return AVERROR_INVALIDDATA;
}
av_freep(&buf);
 
if (gme_track_info(gme->music_emu, &gme->info, gme->track_index))
return AVERROR_STREAM_NOT_FOUND;
 
if (gme_start_track(gme->music_emu, gme->track_index))
return AVERROR_UNKNOWN;
 
load_metadata(s);
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 64, 1, 1000);
if (st->duration > 0)
st->duration = gme->info->length;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_NE(AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE);
st->codec->channels = 2;
st->codec->sample_rate = gme->sample_rate;
 
return 0;
}
 
static int read_packet_gme(AVFormatContext *s, AVPacket *pkt)
{
GMEContext *gme = s->priv_data;
int n_samples = AUDIO_PKT_SIZE / 2;
int ret;
 
if (gme_track_ended(gme->music_emu))
return AVERROR_EOF;
 
if ((ret = av_new_packet(pkt, AUDIO_PKT_SIZE)) < 0)
return ret;
 
if (gme_play(gme->music_emu, n_samples, (short *)pkt->data))
return AVERROR_EXTERNAL;
pkt->size = AUDIO_PKT_SIZE;
 
return 0;
}
 
static int read_close_gme(AVFormatContext *s)
{
GMEContext *gme = s->priv_data;
gme_free_info(gme->info);
gme_delete(gme->music_emu);
return 0;
}
 
static int read_seek_gme(AVFormatContext *s, int stream_idx, int64_t ts, int flags)
{
GMEContext *gme = s->priv_data;
if (!gme_seek(gme->music_emu, (int)ts))
return AVERROR_EXTERNAL;
return 0;
}
 
static int probe_gme(AVProbeData *p)
{
// Reads 4 bytes - returns "" if unknown format.
if (gme_identify_header(p->buf)[0]) {
if (p->buf_size < 16384)
return AVPROBE_SCORE_MAX / 4 + 1;
else
return AVPROBE_SCORE_MAX / 2;
}
return 0;
}
 
static const AVClass class_gme = {
.class_name = "Game Music Emu demuxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVInputFormat ff_libgme_demuxer = {
.name = "libgme",
.long_name = NULL_IF_CONFIG_SMALL("Game Music Emu demuxer"),
.priv_data_size = sizeof(GMEContext),
.read_probe = probe_gme,
.read_header = read_header_gme,
.read_packet = read_packet_gme,
.read_close = read_close_gme,
.read_seek = read_seek_gme,
.priv_class = &class_gme,
};
/contrib/sdk/sources/ffmpeg/libavformat/libmodplug.c
0,0 → 1,381
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* ModPlug demuxer
* @todo better probing than extensions matching
*/
 
#include <libmodplug/modplug.h>
#include "libavutil/avstring.h"
#include "libavutil/eval.h"
#include "libavutil/opt.h"
#include "avformat.h"
#include "internal.h"
 
typedef struct ModPlugContext {
const AVClass *class;
ModPlugFile *f;
uint8_t *buf; ///< input file content
 
/* options */
int noise_reduction;
int reverb_depth;
int reverb_delay;
int bass_amount;
int bass_range;
int surround_depth;
int surround_delay;
 
int max_size; ///< max file size to allocate
 
/* optional video stream */
double ts_per_packet; ///< used to define the pts/dts using packet_count;
int packet_count; ///< total number of audio packets
int print_textinfo; ///< bool flag for printing speed, tempo, order, ...
int video_stream; ///< 1 if the user want a video stream, otherwise 0
int w; ///< video stream width in char (one char = 8x8px)
int h; ///< video stream height in char (one char = 8x8px)
int video_switch; ///< 1 if current packet is video, otherwise 0
int fsize; ///< constant frame size
int linesize; ///< line size in bytes
char *color_eval; ///< color eval user input expression
AVExpr *expr; ///< parsed color eval expression
} ModPlugContext;
 
static const char *var_names[] = {
"x", "y",
"w", "h",
"t",
"speed", "tempo", "order", "pattern", "row",
NULL
};
 
enum var_name {
VAR_X, VAR_Y,
VAR_W, VAR_H,
VAR_TIME,
VAR_SPEED, VAR_TEMPO, VAR_ORDER, VAR_PATTERN, VAR_ROW,
VAR_VARS_NB
};
 
#define FF_MODPLUG_MAX_FILE_SIZE (100 * 1<<20) // 100M
#define FF_MODPLUG_DEF_FILE_SIZE ( 5 * 1<<20) // 5M
 
#define OFFSET(x) offsetof(ModPlugContext, x)
#define D AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{"noise_reduction", "Enable noise reduction 0(off)-1(on)", OFFSET(noise_reduction), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, D},
{"reverb_depth", "Reverb level 0(quiet)-100(loud)", OFFSET(reverb_depth), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 100, D},
{"reverb_delay", "Reverb delay in ms, usually 40-200ms", OFFSET(reverb_delay), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, D},
{"bass_amount", "XBass level 0(quiet)-100(loud)", OFFSET(bass_amount), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 100, D},
{"bass_range", "XBass cutoff in Hz 10-100", OFFSET(bass_range), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 100, D},
{"surround_depth", "Surround level 0(quiet)-100(heavy)", OFFSET(surround_depth), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 100, D},
{"surround_delay", "Surround delay in ms, usually 5-40ms", OFFSET(surround_delay), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, D},
{"max_size", "Max file size supported (in bytes). Default is 5MB. Set to 0 for no limit (not recommended)",
OFFSET(max_size), AV_OPT_TYPE_INT, {.i64 = FF_MODPLUG_DEF_FILE_SIZE}, 0, FF_MODPLUG_MAX_FILE_SIZE, D},
{"video_stream_expr", "Color formula", OFFSET(color_eval), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, D},
{"video_stream", "Make demuxer output a video stream", OFFSET(video_stream), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, D},
{"video_stream_w", "Video stream width in char (one char = 8x8px)", OFFSET(w), AV_OPT_TYPE_INT, {.i64 = 30}, 20, 512, D},
{"video_stream_h", "Video stream height in char (one char = 8x8px)", OFFSET(h), AV_OPT_TYPE_INT, {.i64 = 30}, 20, 512, D},
{"video_stream_ptxt", "Print speed, tempo, order, ... in video stream", OFFSET(print_textinfo), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, D},
{NULL},
};
 
#define SET_OPT_IF_REQUESTED(libopt, opt, flag) do { \
if (modplug->opt) { \
settings.libopt = modplug->opt; \
settings.mFlags |= flag; \
} \
} while (0)
 
#define ADD_META_MULTIPLE_ENTRIES(entry_name, fname) do { \
if (n_## entry_name ##s) { \
unsigned i, n = 0; \
\
for (i = 0; i < n_## entry_name ##s; i++) { \
char item_name[64] = {0}; \
fname(f, i, item_name); \
if (!*item_name) \
continue; \
if (n) \
av_dict_set(&s->metadata, #entry_name, "\n", AV_DICT_APPEND); \
av_dict_set(&s->metadata, #entry_name, item_name, AV_DICT_APPEND); \
n++; \
} \
\
extra = av_asprintf(", %u/%u " #entry_name "%s", \
n, n_## entry_name ##s, n > 1 ? "s" : ""); \
if (!extra) \
return AVERROR(ENOMEM); \
av_dict_set(&s->metadata, "extra info", extra, AV_DICT_APPEND); \
av_free(extra); \
} \
} while (0)
 
static int modplug_load_metadata(AVFormatContext *s)
{
ModPlugContext *modplug = s->priv_data;
ModPlugFile *f = modplug->f;
char *extra;
const char *name = ModPlug_GetName(f);
const char *msg = ModPlug_GetMessage(f);
 
unsigned n_instruments = ModPlug_NumInstruments(f);
unsigned n_samples = ModPlug_NumSamples(f);
unsigned n_patterns = ModPlug_NumPatterns(f);
unsigned n_channels = ModPlug_NumChannels(f);
 
if (name && *name) av_dict_set(&s->metadata, "name", name, 0);
if (msg && *msg) av_dict_set(&s->metadata, "message", msg, 0);
 
extra = av_asprintf("%u pattern%s, %u channel%s",
n_patterns, n_patterns > 1 ? "s" : "",
n_channels, n_channels > 1 ? "s" : "");
if (!extra)
return AVERROR(ENOMEM);
av_dict_set(&s->metadata, "extra info", extra, AV_DICT_DONT_STRDUP_VAL);
 
ADD_META_MULTIPLE_ENTRIES(instrument, ModPlug_InstrumentName);
ADD_META_MULTIPLE_ENTRIES(sample, ModPlug_SampleName);
 
return 0;
}
 
#define AUDIO_PKT_SIZE 512
 
static int modplug_read_header(AVFormatContext *s)
{
AVStream *st;
AVIOContext *pb = s->pb;
ModPlug_Settings settings;
ModPlugContext *modplug = s->priv_data;
int64_t sz = avio_size(pb);
 
if (sz < 0) {
av_log(s, AV_LOG_WARNING, "Could not determine file size\n");
sz = modplug->max_size;
} else if (modplug->max_size && sz > modplug->max_size) {
sz = modplug->max_size;
av_log(s, AV_LOG_WARNING, "Max file size reach%s, allocating %"PRIi64"B "
"but demuxing is likely to fail due to incomplete buffer\n",
sz == FF_MODPLUG_DEF_FILE_SIZE ? " (see -max_size)" : "", sz);
}
 
if (modplug->color_eval) {
int r = av_expr_parse(&modplug->expr, modplug->color_eval, var_names,
NULL, NULL, NULL, NULL, 0, s);
if (r < 0)
return r;
}
 
modplug->buf = av_malloc(modplug->max_size);
if (!modplug->buf)
return AVERROR(ENOMEM);
sz = avio_read(pb, modplug->buf, sz);
 
ModPlug_GetSettings(&settings);
settings.mChannels = 2;
settings.mBits = 16;
settings.mFrequency = 44100;
settings.mResamplingMode = MODPLUG_RESAMPLE_FIR; // best quality
settings.mLoopCount = 0; // prevents looping forever
 
if (modplug->noise_reduction) settings.mFlags |= MODPLUG_ENABLE_NOISE_REDUCTION;
SET_OPT_IF_REQUESTED(mReverbDepth, reverb_depth, MODPLUG_ENABLE_REVERB);
SET_OPT_IF_REQUESTED(mReverbDelay, reverb_delay, MODPLUG_ENABLE_REVERB);
SET_OPT_IF_REQUESTED(mBassAmount, bass_amount, MODPLUG_ENABLE_MEGABASS);
SET_OPT_IF_REQUESTED(mBassRange, bass_range, MODPLUG_ENABLE_MEGABASS);
SET_OPT_IF_REQUESTED(mSurroundDepth, surround_depth, MODPLUG_ENABLE_SURROUND);
SET_OPT_IF_REQUESTED(mSurroundDelay, surround_delay, MODPLUG_ENABLE_SURROUND);
 
if (modplug->reverb_depth) settings.mReverbDepth = modplug->reverb_depth;
if (modplug->reverb_delay) settings.mReverbDelay = modplug->reverb_delay;
if (modplug->bass_amount) settings.mBassAmount = modplug->bass_amount;
if (modplug->bass_range) settings.mBassRange = modplug->bass_range;
if (modplug->surround_depth) settings.mSurroundDepth = modplug->surround_depth;
if (modplug->surround_delay) settings.mSurroundDelay = modplug->surround_delay;
 
ModPlug_SetSettings(&settings);
 
modplug->f = ModPlug_Load(modplug->buf, sz);
if (!modplug->f)
return AVERROR_INVALIDDATA;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 64, 1, 1000);
st->duration = ModPlug_GetLength(modplug->f);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_PCM_S16LE;
st->codec->channels = settings.mChannels;
st->codec->sample_rate = settings.mFrequency;
 
// timebase = 1/1000, 2ch 16bits 44.1kHz-> 2*2*44100
modplug->ts_per_packet = 1000*AUDIO_PKT_SIZE / (4*44100.);
 
if (modplug->video_stream) {
AVStream *vst = avformat_new_stream(s, NULL);
if (!vst)
return AVERROR(ENOMEM);
avpriv_set_pts_info(vst, 64, 1, 1000);
vst->duration = st->duration;
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codec->codec_id = AV_CODEC_ID_XBIN;
vst->codec->width = modplug->w << 3;
vst->codec->height = modplug->h << 3;
modplug->linesize = modplug->w * 3;
modplug->fsize = modplug->linesize * modplug->h;
}
 
return modplug_load_metadata(s);
}
 
static void write_text(uint8_t *dst, const char *s, int linesize, int x, int y)
{
int i;
dst += y*linesize + x*3;
for (i = 0; s[i]; i++, dst += 3) {
dst[0] = 0x0; // count - 1
dst[1] = s[i]; // char
dst[2] = 0x0f; // background / foreground
}
}
 
#define PRINT_INFO(line, name, idvalue) do { \
snprintf(intbuf, sizeof(intbuf), "%.0f", var_values[idvalue]); \
write_text(pkt->data, name ":", modplug->linesize, 0+1, line+1); \
write_text(pkt->data, intbuf, modplug->linesize, 10+1, line+1); \
} while (0)
 
static int modplug_read_packet(AVFormatContext *s, AVPacket *pkt)
{
ModPlugContext *modplug = s->priv_data;
 
if (modplug->video_stream) {
modplug->video_switch ^= 1; // one video packet for one audio packet
if (modplug->video_switch) {
double var_values[VAR_VARS_NB];
 
var_values[VAR_W ] = modplug->w;
var_values[VAR_H ] = modplug->h;
var_values[VAR_TIME ] = modplug->packet_count * modplug->ts_per_packet;
var_values[VAR_SPEED ] = ModPlug_GetCurrentSpeed (modplug->f);
var_values[VAR_TEMPO ] = ModPlug_GetCurrentTempo (modplug->f);
var_values[VAR_ORDER ] = ModPlug_GetCurrentOrder (modplug->f);
var_values[VAR_PATTERN] = ModPlug_GetCurrentPattern(modplug->f);
var_values[VAR_ROW ] = ModPlug_GetCurrentRow (modplug->f);
 
if (av_new_packet(pkt, modplug->fsize) < 0)
return AVERROR(ENOMEM);
pkt->stream_index = 1;
memset(pkt->data, 0, modplug->fsize);
 
if (modplug->print_textinfo) {
char intbuf[32];
PRINT_INFO(0, "speed", VAR_SPEED);
PRINT_INFO(1, "tempo", VAR_TEMPO);
PRINT_INFO(2, "order", VAR_ORDER);
PRINT_INFO(3, "pattern", VAR_PATTERN);
PRINT_INFO(4, "row", VAR_ROW);
PRINT_INFO(5, "ts", VAR_TIME);
}
 
if (modplug->expr) {
int x, y;
for (y = 0; y < modplug->h; y++) {
for (x = 0; x < modplug->w; x++) {
double color;
var_values[VAR_X] = x;
var_values[VAR_Y] = y;
color = av_expr_eval(modplug->expr, var_values, NULL);
pkt->data[y*modplug->linesize + x*3 + 2] |= av_clip((int)color, 0, 0xf)<<4;
}
}
}
pkt->pts = pkt->dts = var_values[VAR_TIME];
pkt->flags |= AV_PKT_FLAG_KEY;
return 0;
}
}
 
if (av_new_packet(pkt, AUDIO_PKT_SIZE) < 0)
return AVERROR(ENOMEM);
 
if (modplug->video_stream)
pkt->pts = pkt->dts = modplug->packet_count++ * modplug->ts_per_packet;
 
pkt->size = ModPlug_Read(modplug->f, pkt->data, AUDIO_PKT_SIZE);
if (pkt->size <= 0) {
av_free_packet(pkt);
return pkt->size == 0 ? AVERROR_EOF : AVERROR(EIO);
}
return 0;
}
 
static int modplug_read_close(AVFormatContext *s)
{
ModPlugContext *modplug = s->priv_data;
ModPlug_Unload(modplug->f);
av_freep(&modplug->buf);
return 0;
}
 
static int modplug_read_seek(AVFormatContext *s, int stream_idx, int64_t ts, int flags)
{
ModPlugContext *modplug = s->priv_data;
ModPlug_Seek(modplug->f, (int)ts);
if (modplug->video_stream)
modplug->packet_count = ts / modplug->ts_per_packet;
return 0;
}
 
static const char modplug_extensions[] = "669,abc,amf,ams,dbm,dmf,dsm,far,it,mdl,med,mid,mod,mt2,mtm,okt,psm,ptm,s3m,stm,ult,umx,xm,itgz,itr,itz,mdgz,mdr,mdz,s3gz,s3r,s3z,xmgz,xmr,xmz";
 
static int modplug_probe(AVProbeData *p)
{
if (av_match_ext(p->filename, modplug_extensions)) {
if (p->buf_size < 16384)
return AVPROBE_SCORE_EXTENSION/2-1;
else
return AVPROBE_SCORE_EXTENSION;
}
return 0;
}
 
static const AVClass modplug_class = {
.class_name = "ModPlug demuxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVInputFormat ff_libmodplug_demuxer = {
.name = "libmodplug",
.long_name = NULL_IF_CONFIG_SMALL("ModPlug demuxer"),
.priv_data_size = sizeof(ModPlugContext),
.read_probe = modplug_probe,
.read_header = modplug_read_header,
.read_packet = modplug_read_packet,
.read_close = modplug_read_close,
.read_seek = modplug_read_seek,
.extensions = modplug_extensions,
.priv_class = &modplug_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/libnut.c
0,0 → 1,324
/*
* NUT (de)muxing via libnut
* copyright (c) 2006 Oded Shimon <ods15@ods15.dyndns.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* NUT demuxing and muxing via libnut.
* @author Oded Shimon <ods15@ods15.dyndns.org>
*/
 
#include "avformat.h"
#include "internal.h"
#include "riff.h"
#include <libnut.h>
 
#define ID_STRING "nut/multimedia container"
#define ID_LENGTH (strlen(ID_STRING) + 1)
 
typedef struct {
nut_context_tt * nut;
nut_stream_header_tt * s;
} NUTContext;
 
static const AVCodecTag nut_tags[] = {
{ AV_CODEC_ID_MPEG4, MKTAG('m', 'p', '4', 'v') },
{ AV_CODEC_ID_MP3, MKTAG('m', 'p', '3', ' ') },
{ AV_CODEC_ID_VORBIS, MKTAG('v', 'r', 'b', 's') },
{ 0, 0 },
};
 
#if CONFIG_LIBNUT_MUXER
static int av_write(void * h, size_t len, const uint8_t * buf) {
AVIOContext * bc = h;
avio_write(bc, buf, len);
//avio_flush(bc);
return len;
}
 
static int nut_write_header(AVFormatContext * avf) {
NUTContext * priv = avf->priv_data;
AVIOContext * bc = avf->pb;
nut_muxer_opts_tt mopts = {
.output = {
.priv = bc,
.write = av_write,
},
.alloc = { av_malloc, av_realloc, av_free },
.write_index = 1,
.realtime_stream = 0,
.max_distance = 32768,
.fti = NULL,
};
nut_stream_header_tt * s;
int i;
 
priv->s = s = av_mallocz((avf->nb_streams + 1) * sizeof*s);
if(!s)
return AVERROR(ENOMEM);
 
for (i = 0; i < avf->nb_streams; i++) {
AVCodecContext * codec = avf->streams[i]->codec;
int j;
int fourcc = 0;
int num, denom, ssize;
 
s[i].type = codec->codec_type == AVMEDIA_TYPE_VIDEO ? NUT_VIDEO_CLASS : NUT_AUDIO_CLASS;
 
if (codec->codec_tag) fourcc = codec->codec_tag;
else fourcc = ff_codec_get_tag(nut_tags, codec->codec_id);
 
if (!fourcc) {
if (codec->codec_type == AVMEDIA_TYPE_VIDEO) fourcc = ff_codec_get_tag(ff_codec_bmp_tags, codec->codec_id);
if (codec->codec_type == AVMEDIA_TYPE_AUDIO) fourcc = ff_codec_get_tag(ff_codec_wav_tags, codec->codec_id);
}
 
s[i].fourcc_len = 4;
s[i].fourcc = av_malloc(s[i].fourcc_len);
for (j = 0; j < s[i].fourcc_len; j++) s[i].fourcc[j] = (fourcc >> (j*8)) & 0xFF;
 
ff_parse_specific_params(codec, &num, &ssize, &denom);
avpriv_set_pts_info(avf->streams[i], 60, denom, num);
 
s[i].time_base.num = denom;
s[i].time_base.den = num;
 
s[i].fixed_fps = 0;
s[i].decode_delay = codec->has_b_frames;
s[i].codec_specific_len = codec->extradata_size;
s[i].codec_specific = codec->extradata;
 
if (codec->codec_type == AVMEDIA_TYPE_VIDEO) {
s[i].width = codec->width;
s[i].height = codec->height;
s[i].sample_width = 0;
s[i].sample_height = 0;
s[i].colorspace_type = 0;
} else {
s[i].samplerate_num = codec->sample_rate;
s[i].samplerate_denom = 1;
s[i].channel_count = codec->channels;
}
}
 
s[avf->nb_streams].type = -1;
priv->nut = nut_muxer_init(&mopts, s, NULL);
 
return 0;
}
 
static int nut_write_packet(AVFormatContext * avf, AVPacket * pkt) {
NUTContext * priv = avf->priv_data;
nut_packet_tt p;
 
p.len = pkt->size;
p.stream = pkt->stream_index;
p.pts = pkt->pts;
p.flags = pkt->flags & AV_PKT_FLAG_KEY ? NUT_FLAG_KEY : 0;
p.next_pts = 0;
 
nut_write_frame_reorder(priv->nut, &p, pkt->data);
 
return 0;
}
 
static int nut_write_trailer(AVFormatContext * avf) {
AVIOContext * bc = avf->pb;
NUTContext * priv = avf->priv_data;
int i;
 
nut_muxer_uninit_reorder(priv->nut);
avio_flush(bc);
 
for(i = 0; priv->s[i].type != -1; i++ ) av_freep(&priv->s[i].fourcc);
av_freep(&priv->s);
 
return 0;
}
 
AVOutputFormat ff_libnut_muxer = {
.name = "libnut",
.long_name = "nut format",
.mime_type = "video/x-nut",
.extensions = "nut",
.priv_data_size = sizeof(NUTContext),
.audio_codec = AV_CODEC_ID_VORBIS,
.video_codec = AV_CODEC_ID_MPEG4,
.write_header = nut_write_header,
.write_packet = nut_write_packet,
.write_trailer = nut_write_trailer,
.flags = AVFMT_GLOBALHEADER,
};
#endif /* CONFIG_LIBNUT_MUXER */
 
static int nut_probe(AVProbeData *p) {
if (!memcmp(p->buf, ID_STRING, ID_LENGTH)) return AVPROBE_SCORE_MAX;
 
return 0;
}
 
static size_t av_read(void * h, size_t len, uint8_t * buf) {
AVIOContext * bc = h;
return avio_read(bc, buf, len);
}
 
static off_t av_seek(void * h, long long pos, int whence) {
AVIOContext * bc = h;
if (whence == SEEK_END) {
pos = avio_size(bc) + pos;
whence = SEEK_SET;
}
return avio_seek(bc, pos, whence);
}
 
static int nut_read_header(AVFormatContext * avf) {
NUTContext * priv = avf->priv_data;
AVIOContext * bc = avf->pb;
nut_demuxer_opts_tt dopts = {
.input = {
.priv = bc,
.seek = av_seek,
.read = av_read,
.eof = NULL,
.file_pos = 0,
},
.alloc = { av_malloc, av_realloc, av_free },
.read_index = 1,
.cache_syncpoints = 1,
};
nut_context_tt * nut = priv->nut = nut_demuxer_init(&dopts);
nut_stream_header_tt * s;
int ret, i;
 
if(!nut)
return -1;
 
if ((ret = nut_read_headers(nut, &s, NULL))) {
av_log(avf, AV_LOG_ERROR, " NUT error: %s\n", nut_error(ret));
nut_demuxer_uninit(nut);
priv->nut = NULL;
return -1;
}
 
priv->s = s;
 
for (i = 0; s[i].type != -1 && i < 2; i++) {
AVStream * st = avformat_new_stream(avf, NULL);
int j;
 
if (!st)
return AVERROR(ENOMEM);
 
for (j = 0; j < s[i].fourcc_len && j < 8; j++) st->codec->codec_tag |= s[i].fourcc[j]<<(j*8);
 
st->codec->has_b_frames = s[i].decode_delay;
 
st->codec->extradata_size = s[i].codec_specific_len;
if (st->codec->extradata_size) {
if(ff_alloc_extradata(st->codec, st->codec->extradata_size)){
nut_demuxer_uninit(nut);
priv->nut = NULL;
return AVERROR(ENOMEM);
}
memcpy(st->codec->extradata, s[i].codec_specific, st->codec->extradata_size);
}
 
avpriv_set_pts_info(avf->streams[i], 60, s[i].time_base.num, s[i].time_base.den);
st->start_time = 0;
st->duration = s[i].max_pts;
 
st->codec->codec_id = ff_codec_get_id(nut_tags, st->codec->codec_tag);
 
switch(s[i].type) {
case NUT_AUDIO_CLASS:
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
if (st->codec->codec_id == AV_CODEC_ID_NONE) st->codec->codec_id = ff_codec_get_id(ff_codec_wav_tags, st->codec->codec_tag);
 
st->codec->channels = s[i].channel_count;
st->codec->sample_rate = s[i].samplerate_num / s[i].samplerate_denom;
break;
case NUT_VIDEO_CLASS:
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
if (st->codec->codec_id == AV_CODEC_ID_NONE) st->codec->codec_id = ff_codec_get_id(ff_codec_bmp_tags, st->codec->codec_tag);
 
st->codec->width = s[i].width;
st->codec->height = s[i].height;
st->sample_aspect_ratio.num = s[i].sample_width;
st->sample_aspect_ratio.den = s[i].sample_height;
break;
}
if (st->codec->codec_id == AV_CODEC_ID_NONE) av_log(avf, AV_LOG_ERROR, "Unknown codec?!\n");
}
 
return 0;
}
 
static int nut_read_packet(AVFormatContext * avf, AVPacket * pkt) {
NUTContext * priv = avf->priv_data;
nut_packet_tt pd;
int ret;
 
ret = nut_read_next_packet(priv->nut, &pd);
 
if (ret || av_new_packet(pkt, pd.len) < 0) {
if (ret != NUT_ERR_EOF)
av_log(avf, AV_LOG_ERROR, " NUT error: %s\n", nut_error(ret));
return -1;
}
 
if (pd.flags & NUT_FLAG_KEY) pkt->flags |= AV_PKT_FLAG_KEY;
pkt->pts = pd.pts;
pkt->stream_index = pd.stream;
pkt->pos = avio_tell(avf->pb);
 
ret = nut_read_frame(priv->nut, &pd.len, pkt->data);
 
return ret;
}
 
static int nut_read_seek(AVFormatContext * avf, int stream_index, int64_t target_ts, int flags) {
NUTContext * priv = avf->priv_data;
int active_streams[] = { stream_index, -1 };
double time_pos = target_ts * priv->s[stream_index].time_base.num / (double)priv->s[stream_index].time_base.den;
 
if (nut_seek(priv->nut, time_pos, 2*!(flags & AVSEEK_FLAG_BACKWARD), active_streams)) return -1;
 
return 0;
}
 
static int nut_read_close(AVFormatContext *s) {
NUTContext * priv = s->priv_data;
 
nut_demuxer_uninit(priv->nut);
 
return 0;
}
 
AVInputFormat ff_libnut_demuxer = {
.name = "libnut",
.long_name = NULL_IF_CONFIG_SMALL("NUT format"),
.priv_data_size = sizeof(NUTContext),
.read_probe = nut_probe,
.read_header = nut_read_header,
.read_packet = nut_read_packet,
.read_close = nut_read_close,
.read_seek = nut_read_seek,
.extensions = "nut",
};
/contrib/sdk/sources/ffmpeg/libavformat/libquvi.c
0,0 → 1,146
/*
* Copyright (c) 2013 Clément Bœsch
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <quvi/quvi.h>
 
#include "libavformat/avformat.h"
#include "libavformat/internal.h"
#include "libavutil/opt.h"
 
typedef struct {
const AVClass *class;
char *format;
AVFormatContext *fmtctx;
} LibQuviContext;
 
#define OFFSET(x) offsetof(LibQuviContext, x)
#define FLAGS AV_OPT_FLAG_DECODING_PARAM
static const AVOption libquvi_options[] = {
{ "format", "request specific format", OFFSET(format), AV_OPT_TYPE_STRING, {.str="best"}, .flags = FLAGS },
{ NULL }
};
 
static const AVClass libquvi_context_class = {
.class_name = "libquvi",
.item_name = av_default_item_name,
.option = libquvi_options,
.version = LIBAVUTIL_VERSION_INT,
};
 
static int libquvi_close(AVFormatContext *s)
{
LibQuviContext *qc = s->priv_data;
if (qc->fmtctx)
avformat_close_input(&qc->fmtctx);
return 0;
}
 
static int libquvi_read_header(AVFormatContext *s)
{
int i, ret;
quvi_t q;
quvi_media_t m;
QUVIcode rc;
LibQuviContext *qc = s->priv_data;
char *media_url, *pagetitle;
 
rc = quvi_init(&q);
if (rc != QUVI_OK)
goto quvi_fail;
 
quvi_setopt(q, QUVIOPT_FORMAT, qc->format);
 
rc = quvi_parse(q, s->filename, &m);
if (rc != QUVI_OK)
goto quvi_fail;
 
rc = quvi_getprop(m, QUVIPROP_MEDIAURL, &media_url);
if (rc != QUVI_OK)
goto quvi_fail;
 
ret = avformat_open_input(&qc->fmtctx, media_url, NULL, NULL);
if (ret < 0)
goto end;
 
rc = quvi_getprop(m, QUVIPROP_PAGETITLE, &pagetitle);
if (rc == QUVI_OK)
av_dict_set(&s->metadata, "title", pagetitle, 0);
 
for (i = 0; i < qc->fmtctx->nb_streams; i++) {
AVStream *st = avformat_new_stream(s, NULL);
AVStream *ist = qc->fmtctx->streams[i];
if (!st) {
ret = AVERROR(ENOMEM);
goto end;
}
avpriv_set_pts_info(st, ist->pts_wrap_bits, ist->time_base.num, ist->time_base.den);
avcodec_copy_context(st->codec, qc->fmtctx->streams[i]->codec);
}
 
return 0;
 
quvi_fail:
av_log(s, AV_LOG_ERROR, "%s\n", quvi_strerror(q, rc));
ret = AVERROR_EXTERNAL;
 
end:
quvi_parse_close(&m);
quvi_close(&q);
return ret;
}
 
static int libquvi_read_packet(AVFormatContext *s, AVPacket *pkt)
{
LibQuviContext *qc = s->priv_data;
return av_read_frame(qc->fmtctx, pkt);
}
 
static int libquvi_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
{
LibQuviContext *qc = s->priv_data;
return av_seek_frame(qc->fmtctx, stream_index, timestamp, flags);
}
 
static int libquvi_probe(AVProbeData *p)
{
int score;
quvi_t q;
QUVIcode rc;
 
rc = quvi_init(&q);
if (rc != QUVI_OK)
return AVERROR(ENOMEM);
score = quvi_supported(q, (char *)p->filename) == QUVI_OK ? AVPROBE_SCORE_EXTENSION : 0;
quvi_close(&q);
return score;
}
 
AVInputFormat ff_libquvi_demuxer = {
.name = "libquvi",
.long_name = NULL_IF_CONFIG_SMALL("libquvi demuxer"),
.priv_data_size = sizeof(LibQuviContext),
.read_probe = libquvi_probe,
.read_header = libquvi_read_header,
.read_packet = libquvi_read_packet,
.read_close = libquvi_close,
.read_seek = libquvi_read_seek,
.priv_class = &libquvi_context_class,
.flags = AVFMT_NOFILE,
};
/contrib/sdk/sources/ffmpeg/libavformat/librtmp.c
0,0 → 1,283
/*
* RTMP network protocol
* Copyright (c) 2010 Howard Chu
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* RTMP protocol based on http://rtmpdump.mplayerhq.hu/ librtmp
*/
 
#include "libavutil/avstring.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "avformat.h"
#include "url.h"
 
#include <librtmp/rtmp.h>
#include <librtmp/log.h>
 
typedef struct LibRTMPContext {
const AVClass *class;
RTMP rtmp;
char *app;
char *playpath;
} LibRTMPContext;
 
static void rtmp_log(int level, const char *fmt, va_list args)
{
switch (level) {
default:
case RTMP_LOGCRIT: level = AV_LOG_FATAL; break;
case RTMP_LOGERROR: level = AV_LOG_ERROR; break;
case RTMP_LOGWARNING: level = AV_LOG_WARNING; break;
case RTMP_LOGINFO: level = AV_LOG_INFO; break;
case RTMP_LOGDEBUG: level = AV_LOG_VERBOSE; break;
case RTMP_LOGDEBUG2: level = AV_LOG_DEBUG; break;
}
 
av_vlog(NULL, level, fmt, args);
av_log(NULL, level, "\n");
}
 
static int rtmp_close(URLContext *s)
{
LibRTMPContext *ctx = s->priv_data;
RTMP *r = &ctx->rtmp;
 
RTMP_Close(r);
return 0;
}
 
/**
* Open RTMP connection and verify that the stream can be played.
*
* URL syntax: rtmp://server[:port][/app][/playpath][ keyword=value]...
* where 'app' is first one or two directories in the path
* (e.g. /ondemand/, /flash/live/, etc.)
* and 'playpath' is a file name (the rest of the path,
* may be prefixed with "mp4:")
*
* Additional RTMP library options may be appended as
* space-separated key-value pairs.
*/
static int rtmp_open(URLContext *s, const char *uri, int flags)
{
LibRTMPContext *ctx = s->priv_data;
RTMP *r = &ctx->rtmp;
int rc = 0, level;
char *filename = s->filename;
 
switch (av_log_get_level()) {
default:
case AV_LOG_FATAL: level = RTMP_LOGCRIT; break;
case AV_LOG_ERROR: level = RTMP_LOGERROR; break;
case AV_LOG_WARNING: level = RTMP_LOGWARNING; break;
case AV_LOG_INFO: level = RTMP_LOGINFO; break;
case AV_LOG_VERBOSE: level = RTMP_LOGDEBUG; break;
case AV_LOG_DEBUG: level = RTMP_LOGDEBUG2; break;
}
RTMP_LogSetLevel(level);
RTMP_LogSetCallback(rtmp_log);
 
if (ctx->app || ctx->playpath) {
int len = strlen(s->filename) + 1;
if (ctx->app) len += strlen(ctx->app) + sizeof(" app=");
if (ctx->playpath) len += strlen(ctx->playpath) + sizeof(" playpath=");
 
if (!(filename = av_malloc(len)))
return AVERROR(ENOMEM);
 
av_strlcpy(filename, s->filename, len);
if (ctx->app) {
av_strlcat(filename, " app=", len);
av_strlcat(filename, ctx->app, len);
}
if (ctx->playpath) {
av_strlcat(filename, " playpath=", len);
av_strlcat(filename, ctx->playpath, len);
}
}
 
RTMP_Init(r);
if (!RTMP_SetupURL(r, filename)) {
rc = AVERROR_UNKNOWN;
goto fail;
}
 
if (flags & AVIO_FLAG_WRITE)
RTMP_EnableWrite(r);
 
if (!RTMP_Connect(r, NULL) || !RTMP_ConnectStream(r, 0)) {
rc = AVERROR_UNKNOWN;
goto fail;
}
 
s->is_streamed = 1;
rc = 0;
fail:
if (filename != s->filename)
av_freep(&filename);
return rc;
}
 
static int rtmp_write(URLContext *s, const uint8_t *buf, int size)
{
LibRTMPContext *ctx = s->priv_data;
RTMP *r = &ctx->rtmp;
 
return RTMP_Write(r, buf, size);
}
 
static int rtmp_read(URLContext *s, uint8_t *buf, int size)
{
LibRTMPContext *ctx = s->priv_data;
RTMP *r = &ctx->rtmp;
 
return RTMP_Read(r, buf, size);
}
 
static int rtmp_read_pause(URLContext *s, int pause)
{
LibRTMPContext *ctx = s->priv_data;
RTMP *r = &ctx->rtmp;
 
if (!RTMP_Pause(r, pause))
return AVERROR_UNKNOWN;
return 0;
}
 
static int64_t rtmp_read_seek(URLContext *s, int stream_index,
int64_t timestamp, int flags)
{
LibRTMPContext *ctx = s->priv_data;
RTMP *r = &ctx->rtmp;
 
if (flags & AVSEEK_FLAG_BYTE)
return AVERROR(ENOSYS);
 
/* seeks are in milliseconds */
if (stream_index < 0)
timestamp = av_rescale_rnd(timestamp, 1000, AV_TIME_BASE,
flags & AVSEEK_FLAG_BACKWARD ? AV_ROUND_DOWN : AV_ROUND_UP);
 
if (!RTMP_SendSeek(r, timestamp))
return AVERROR_UNKNOWN;
return timestamp;
}
 
static int rtmp_get_file_handle(URLContext *s)
{
LibRTMPContext *ctx = s->priv_data;
RTMP *r = &ctx->rtmp;
 
return RTMP_Socket(r);
}
 
#define OFFSET(x) offsetof(LibRTMPContext, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
#define ENC AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
{"rtmp_app", "Name of application to connect to on the RTMP server", OFFSET(app), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
{"rtmp_playpath", "Stream identifier to play or to publish", OFFSET(playpath), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
{ NULL },
};
 
#define RTMP_CLASS(flavor)\
static const AVClass lib ## flavor ## _class = {\
.class_name = "lib" #flavor " protocol",\
.item_name = av_default_item_name,\
.option = options,\
.version = LIBAVUTIL_VERSION_INT,\
};
 
RTMP_CLASS(rtmp)
URLProtocol ff_librtmp_protocol = {
.name = "rtmp",
.url_open = rtmp_open,
.url_read = rtmp_read,
.url_write = rtmp_write,
.url_close = rtmp_close,
.url_read_pause = rtmp_read_pause,
.url_read_seek = rtmp_read_seek,
.url_get_file_handle = rtmp_get_file_handle,
.priv_data_size = sizeof(LibRTMPContext),
.priv_data_class = &librtmp_class,
.flags = URL_PROTOCOL_FLAG_NETWORK,
};
 
RTMP_CLASS(rtmpt)
URLProtocol ff_librtmpt_protocol = {
.name = "rtmpt",
.url_open = rtmp_open,
.url_read = rtmp_read,
.url_write = rtmp_write,
.url_close = rtmp_close,
.url_read_pause = rtmp_read_pause,
.url_read_seek = rtmp_read_seek,
.url_get_file_handle = rtmp_get_file_handle,
.priv_data_size = sizeof(LibRTMPContext),
.priv_data_class = &librtmpt_class,
.flags = URL_PROTOCOL_FLAG_NETWORK,
};
 
RTMP_CLASS(rtmpe)
URLProtocol ff_librtmpe_protocol = {
.name = "rtmpe",
.url_open = rtmp_open,
.url_read = rtmp_read,
.url_write = rtmp_write,
.url_close = rtmp_close,
.url_read_pause = rtmp_read_pause,
.url_read_seek = rtmp_read_seek,
.url_get_file_handle = rtmp_get_file_handle,
.priv_data_size = sizeof(LibRTMPContext),
.priv_data_class = &librtmpe_class,
.flags = URL_PROTOCOL_FLAG_NETWORK,
};
 
RTMP_CLASS(rtmpte)
URLProtocol ff_librtmpte_protocol = {
.name = "rtmpte",
.url_open = rtmp_open,
.url_read = rtmp_read,
.url_write = rtmp_write,
.url_close = rtmp_close,
.url_read_pause = rtmp_read_pause,
.url_read_seek = rtmp_read_seek,
.url_get_file_handle = rtmp_get_file_handle,
.priv_data_size = sizeof(LibRTMPContext),
.priv_data_class = &librtmpte_class,
.flags = URL_PROTOCOL_FLAG_NETWORK,
};
 
RTMP_CLASS(rtmps)
URLProtocol ff_librtmps_protocol = {
.name = "rtmps",
.url_open = rtmp_open,
.url_read = rtmp_read,
.url_write = rtmp_write,
.url_close = rtmp_close,
.url_read_pause = rtmp_read_pause,
.url_read_seek = rtmp_read_seek,
.url_get_file_handle = rtmp_get_file_handle,
.priv_data_size = sizeof(LibRTMPContext),
.priv_data_class = &librtmps_class,
.flags = URL_PROTOCOL_FLAG_NETWORK,
};
/contrib/sdk/sources/ffmpeg/libavformat/libssh.c
0,0 → 1,229
/*
* Copyright (c) 2013 Lukasz Marek <lukasz.m.luki@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <fcntl.h>
#include <libssh/sftp.h>
#include "libavutil/avstring.h"
#include "libavutil/opt.h"
#include "avformat.h"
#include "internal.h"
#include "url.h"
 
typedef struct {
const AVClass *class;
ssh_session session;
sftp_session sftp;
sftp_file file;
int64_t filesize;
int rw_timeout;
int trunc;
} LIBSSHContext;
 
static int libssh_close(URLContext *h)
{
LIBSSHContext *s = h->priv_data;
if (s->file)
sftp_close(s->file);
if (s->sftp)
sftp_free(s->sftp);
if (s->session) {
ssh_disconnect(s->session);
ssh_free(s->session);
}
return 0;
}
 
static int libssh_open(URLContext *h, const char *url, int flags)
{
static const int verbosity = SSH_LOG_NOLOG;
LIBSSHContext *s = h->priv_data;
char proto[10], path[MAX_URL_SIZE], hostname[1024], credencials[1024];
int port = 22, access, ret;
long timeout = s->rw_timeout * 1000;
const char *user = NULL, *pass = NULL;
char *end = NULL;
sftp_attributes stat;
 
av_url_split(proto, sizeof(proto),
credencials, sizeof(credencials),
hostname, sizeof(hostname),
&port,
path, sizeof(path),
url);
 
if (port <= 0 || port > 65535)
port = 22;
 
if (!(s->session = ssh_new())) {
ret = AVERROR(ENOMEM);
goto fail;
}
user = av_strtok(credencials, ":", &end);
pass = av_strtok(end, ":", &end);
ssh_options_set(s->session, SSH_OPTIONS_HOST, hostname);
ssh_options_set(s->session, SSH_OPTIONS_PORT, &port);
ssh_options_set(s->session, SSH_OPTIONS_LOG_VERBOSITY, &verbosity);
if (timeout > 0)
ssh_options_set(s->session, SSH_OPTIONS_TIMEOUT_USEC, &timeout);
if (user)
ssh_options_set(s->session, SSH_OPTIONS_USER, user);
 
if (ssh_connect(s->session) != SSH_OK) {
av_log(h, AV_LOG_ERROR, "Connection failed. %s\n", ssh_get_error(s->session));
ret = AVERROR(EIO);
goto fail;
}
 
if (pass && ssh_userauth_password(s->session, NULL, pass) != SSH_AUTH_SUCCESS) {
av_log(h, AV_LOG_ERROR, "Error authenticating with password: %s\n", ssh_get_error(s->session));
ret = AVERROR(EACCES);
goto fail;
}
 
if (!(s->sftp = sftp_new(s->session))) {
av_log(h, AV_LOG_ERROR, "SFTP session creation failed: %s\n", ssh_get_error(s->session));
ret = AVERROR(ENOMEM);
goto fail;
}
 
if (sftp_init(s->sftp) != SSH_OK) {
av_log(h, AV_LOG_ERROR, "Error initializing sftp session: %s\n", ssh_get_error(s->session));
ret = AVERROR(EIO);
goto fail;
}
 
if ((flags & AVIO_FLAG_WRITE) && (flags & AVIO_FLAG_READ)) {
access = O_CREAT | O_RDWR;
if (s->trunc)
access |= O_TRUNC;
} else if (flags & AVIO_FLAG_WRITE) {
access = O_CREAT | O_WRONLY;
if (s->trunc)
access |= O_TRUNC;
} else {
access = O_RDONLY;
}
 
if (!(s->file = sftp_open(s->sftp, path, access, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH))) {
av_log(h, AV_LOG_ERROR, "Error opening sftp file: %s\n", ssh_get_error(s->session));
ret = AVERROR(EIO);
goto fail;
}
 
if (!(stat = sftp_fstat(s->file))) {
av_log(h, AV_LOG_WARNING, "Cannot stat remote file %s.\n", path);
s->filesize = -1;
} else {
s->filesize = stat->size;
sftp_attributes_free(stat);
}
 
return 0;
 
fail:
libssh_close(h);
return ret;
}
 
static int64_t libssh_seek(URLContext *h, int64_t pos, int whence)
{
LIBSSHContext *s = h->priv_data;
int64_t newpos;
 
if (s->filesize == -1 && (whence == AVSEEK_SIZE || whence == SEEK_END)) {
av_log(h, AV_LOG_ERROR, "Error during seeking.\n");
return AVERROR(EIO);
}
 
switch(whence) {
case AVSEEK_SIZE:
return s->filesize;
case SEEK_SET:
newpos = pos;
break;
case SEEK_CUR:
newpos = sftp_tell64(s->file);
break;
case SEEK_END:
newpos = s->filesize + pos;
break;
default:
return AVERROR(EINVAL);
}
 
if (sftp_seek64(s->file, newpos)) {
av_log(h, AV_LOG_ERROR, "Error during seeking.\n");
return AVERROR(EIO);
}
 
return newpos;
}
 
static int libssh_read(URLContext *h, unsigned char *buf, int size)
{
LIBSSHContext *s = h->priv_data;
int bytes_read;
 
if ((bytes_read = sftp_read(s->file, buf, size)) < 0) {
av_log(h, AV_LOG_ERROR, "Read error.\n");
return AVERROR(EIO);
}
return bytes_read;
}
 
static int libssh_write(URLContext *h, const unsigned char *buf, int size)
{
LIBSSHContext *s = h->priv_data;
int bytes_written;
 
if ((bytes_written = sftp_write(s->file, buf, size)) < 0) {
av_log(h, AV_LOG_ERROR, "Write error.\n");
return AVERROR(EIO);
}
return bytes_written;
}
 
#define OFFSET(x) offsetof(LIBSSHContext, x)
#define D AV_OPT_FLAG_DECODING_PARAM
#define E AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
{"timeout", "set timeout of socket I/O operations", OFFSET(rw_timeout), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, D|E },
{"truncate", "Truncate existing files on write", OFFSET(trunc), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, E },
{NULL}
};
 
static const AVClass libssh_context_class = {
.class_name = "libssh",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
URLProtocol ff_libssh_protocol = {
.name = "sftp",
.url_open = libssh_open,
.url_read = libssh_read,
.url_write = libssh_write,
.url_seek = libssh_seek,
.url_close = libssh_close,
.priv_data_size = sizeof(LIBSSHContext),
.priv_data_class = &libssh_context_class,
.flags = URL_PROTOCOL_FLAG_NETWORK,
};
/contrib/sdk/sources/ffmpeg/libavformat/lmlm4.c
0,0 → 1,127
/*
* Linux Media Labs MPEG-4 demuxer
* Copyright (c) 2008 Ivo van Poorten
*
* Due to a lack of sample files, only files with one channel are supported.
* u-law and ADPCM audio are unsupported for the same reason.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
 
#define LMLM4_I_FRAME 0x00
#define LMLM4_P_FRAME 0x01
#define LMLM4_B_FRAME 0x02
#define LMLM4_INVALID 0x03
#define LMLM4_MPEG1L2 0x04
 
#define LMLM4_MAX_PACKET_SIZE 1024 * 1024
 
static int lmlm4_probe(AVProbeData * pd) {
const unsigned char *buf = pd->buf;
unsigned int frame_type, packet_size;
 
frame_type = AV_RB16(buf+2);
packet_size = AV_RB32(buf+4);
 
if (!AV_RB16(buf) && frame_type <= LMLM4_MPEG1L2 && packet_size &&
frame_type != LMLM4_INVALID && packet_size <= LMLM4_MAX_PACKET_SIZE) {
 
if (frame_type == LMLM4_MPEG1L2) {
if ((AV_RB16(buf+8) & 0xfffe) != 0xfffc)
return 0;
/* I could calculate the audio framesize and compare with
* packet_size-8, but that seems overkill */
return AVPROBE_SCORE_MAX / 3;
} else if (AV_RB24(buf+8) == 0x000001) { /* PES Signal */
return AVPROBE_SCORE_MAX / 5;
}
}
 
return 0;
}
 
static int lmlm4_read_header(AVFormatContext *s) {
AVStream *st;
 
if (!(st = avformat_new_stream(s, NULL)))
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_MPEG4;
st->need_parsing = AVSTREAM_PARSE_HEADERS;
avpriv_set_pts_info(st, 64, 1001, 30000);
 
if (!(st = avformat_new_stream(s, NULL)))
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_MP2;
st->need_parsing = AVSTREAM_PARSE_HEADERS;
 
/* the parameters will be extracted from the compressed bitstream */
return 0;
}
 
static int lmlm4_read_packet(AVFormatContext *s, AVPacket *pkt) {
AVIOContext *pb = s->pb;
int ret;
unsigned int frame_type, packet_size, padding, frame_size;
 
avio_rb16(pb); /* channel number */
frame_type = avio_rb16(pb);
packet_size = avio_rb32(pb);
padding = -packet_size & 511;
frame_size = packet_size - 8;
 
if (frame_type > LMLM4_MPEG1L2 || frame_type == LMLM4_INVALID) {
av_log(s, AV_LOG_ERROR, "invalid or unsupported frame_type\n");
return AVERROR(EIO);
}
if (packet_size > LMLM4_MAX_PACKET_SIZE || packet_size<=8) {
av_log(s, AV_LOG_ERROR, "packet size %d is invalid\n", packet_size);
return AVERROR(EIO);
}
 
if ((ret = av_get_packet(pb, pkt, frame_size)) <= 0)
return AVERROR(EIO);
 
avio_skip(pb, padding);
 
switch (frame_type) {
case LMLM4_I_FRAME:
pkt->flags = AV_PKT_FLAG_KEY;
case LMLM4_P_FRAME:
case LMLM4_B_FRAME:
pkt->stream_index = 0;
break;
case LMLM4_MPEG1L2:
pkt->stream_index = 1;
break;
}
 
return ret;
}
 
AVInputFormat ff_lmlm4_demuxer = {
.name = "lmlm4",
.long_name = NULL_IF_CONFIG_SMALL("raw lmlm4"),
.read_probe = lmlm4_probe,
.read_header = lmlm4_read_header,
.read_packet = lmlm4_read_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/loasdec.c
0,0 → 1,87
/*
* LOAS AudioSyncStream demuxer
* Copyright (c) 2008 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intreadwrite.h"
#include "libavutil/internal.h"
#include "avformat.h"
#include "internal.h"
#include "rawdec.h"
 
static int loas_probe(AVProbeData *p)
{
int max_frames = 0, first_frames = 0;
int fsize, frames;
const uint8_t *buf0 = p->buf;
const uint8_t *buf2;
const uint8_t *buf;
const uint8_t *end = buf0 + p->buf_size - 3;
buf = buf0;
 
for(; buf < end; buf= buf2+1) {
buf2 = buf;
 
for(frames = 0; buf2 < end; frames++) {
uint32_t header = AV_RB24(buf2);
if((header >> 13) != 0x2B7)
break;
fsize = (header & 0x1FFF) + 3;
if(fsize < 7)
break;
fsize = FFMIN(fsize, end - buf2);
buf2 += fsize;
}
max_frames = FFMAX(max_frames, frames);
if(buf == buf0)
first_frames= frames;
}
if (first_frames>=3) return AVPROBE_SCORE_EXTENSION+1;
else if(max_frames>100)return AVPROBE_SCORE_EXTENSION;
else if(max_frames>=3) return AVPROBE_SCORE_EXTENSION / 2;
else return 0;
}
 
static int loas_read_header(AVFormatContext *s)
{
AVStream *st;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = s->iformat->raw_codec_id;
st->need_parsing = AVSTREAM_PARSE_FULL_RAW;
 
//LCM of all possible AAC sample rates
avpriv_set_pts_info(st, 64, 1, 28224000);
 
return 0;
}
 
AVInputFormat ff_loas_demuxer = {
.name = "loas",
.long_name = NULL_IF_CONFIG_SMALL("LOAS AudioSyncStream"),
.read_probe = loas_probe,
.read_header = loas_read_header,
.read_packet = ff_raw_read_partial_packet,
.flags= AVFMT_GENERIC_INDEX,
.raw_codec_id = AV_CODEC_ID_AAC_LATM,
};
/contrib/sdk/sources/ffmpeg/libavformat/log2_tab.c
0,0 → 1,0
#include "libavutil/log2_tab.c"
/contrib/sdk/sources/ffmpeg/libavformat/lvfdec.c
0,0 → 1,148
/*
* LVF demuxer
* Copyright (c) 2012 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "riff.h"
 
static int lvf_probe(AVProbeData *p)
{
if (AV_RL32(p->buf) == MKTAG('L', 'V', 'F', 'F'))
return AVPROBE_SCORE_EXTENSION;
return 0;
}
 
static int lvf_read_header(AVFormatContext *s)
{
AVStream *st;
int64_t next_offset;
unsigned size, nb_streams, id;
 
avio_skip(s->pb, 16);
nb_streams = avio_rl32(s->pb);
if (!nb_streams)
return AVERROR_INVALIDDATA;
if (nb_streams > 2) {
avpriv_request_sample(s, "%d streams", nb_streams);
return AVERROR_PATCHWELCOME;
}
 
avio_skip(s->pb, 1012);
 
while (!url_feof(s->pb)) {
id = avio_rl32(s->pb);
size = avio_rl32(s->pb);
next_offset = avio_tell(s->pb) + size;
 
switch (id) {
case MKTAG('0', '0', 'f', 'm'):
st = avformat_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
avio_skip(s->pb, 4);
st->codec->width = avio_rl32(s->pb);
st->codec->height = avio_rl32(s->pb);
avio_skip(s->pb, 4);
st->codec->codec_tag = avio_rl32(s->pb);
st->codec->codec_id = ff_codec_get_id(ff_codec_bmp_tags,
st->codec->codec_tag);
avpriv_set_pts_info(st, 32, 1, 1000);
break;
case MKTAG('0', '1', 'f', 'm'):
st = avformat_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_tag = avio_rl16(s->pb);
st->codec->channels = avio_rl16(s->pb);
st->codec->sample_rate = avio_rl16(s->pb);
avio_skip(s->pb, 8);
st->codec->bits_per_coded_sample = avio_r8(s->pb);
st->codec->codec_id = ff_codec_get_id(ff_codec_wav_tags,
st->codec->codec_tag);
avpriv_set_pts_info(st, 32, 1, 1000);
break;
case 0:
avio_seek(s->pb, 2048 + 8, SEEK_SET);
return 0;
default:
avpriv_request_sample(s, "id %d", id);
return AVERROR_PATCHWELCOME;
}
 
avio_seek(s->pb, next_offset, SEEK_SET);
}
 
return AVERROR_EOF;
}
 
static int lvf_read_packet(AVFormatContext *s, AVPacket *pkt)
{
unsigned size, flags, timestamp, id;
int64_t pos;
int ret, is_video = 0;
 
pos = avio_tell(s->pb);
while (!url_feof(s->pb)) {
id = avio_rl32(s->pb);
size = avio_rl32(s->pb);
 
if (size == 0xFFFFFFFFu)
return AVERROR_EOF;
 
switch (id) {
case MKTAG('0', '0', 'd', 'c'):
is_video = 1;
case MKTAG('0', '1', 'w', 'b'):
if (size < 8)
return AVERROR_INVALIDDATA;
timestamp = avio_rl32(s->pb);
flags = avio_rl32(s->pb);
ret = av_get_packet(s->pb, pkt, size - 8);
if (flags & (1 << 12))
pkt->flags |= AV_PKT_FLAG_KEY;
pkt->stream_index = is_video ? 0 : 1;
pkt->pts = timestamp;
pkt->pos = pos;
return ret;
default:
ret = avio_skip(s->pb, size);
}
 
if (ret < 0)
return ret;
}
 
return AVERROR_EOF;
}
 
AVInputFormat ff_lvf_demuxer = {
.name = "lvf",
.long_name = NULL_IF_CONFIG_SMALL("LVF"),
.read_probe = lvf_probe,
.read_header = lvf_read_header,
.read_packet = lvf_read_packet,
.extensions = "lvf",
.flags = AVFMT_GENERIC_INDEX,
};
/contrib/sdk/sources/ffmpeg/libavformat/lxfdec.c
0,0 → 1,342
/*
* LXF demuxer
* Copyright (c) 2010 Tomas Härdin
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intreadwrite.h"
#include "libavcodec/bytestream.h"
#include "avformat.h"
#include "internal.h"
 
#define LXF_MAX_PACKET_HEADER_SIZE 256
#define LXF_HEADER_DATA_SIZE 120
#define LXF_IDENT "LEITCH\0"
#define LXF_IDENT_LENGTH 8
#define LXF_SAMPLERATE 48000
 
static const AVCodecTag lxf_tags[] = {
{ AV_CODEC_ID_MJPEG, 0 },
{ AV_CODEC_ID_MPEG1VIDEO, 1 },
{ AV_CODEC_ID_MPEG2VIDEO, 2 }, //MpMl, 4:2:0
{ AV_CODEC_ID_MPEG2VIDEO, 3 }, //MpPl, 4:2:2
{ AV_CODEC_ID_DVVIDEO, 4 }, //DV25
{ AV_CODEC_ID_DVVIDEO, 5 }, //DVCPRO
{ AV_CODEC_ID_DVVIDEO, 6 }, //DVCPRO50
{ AV_CODEC_ID_RAWVIDEO, 7 }, //AV_PIX_FMT_ARGB, where alpha is used for chroma keying
{ AV_CODEC_ID_RAWVIDEO, 8 }, //16-bit chroma key
{ AV_CODEC_ID_MPEG2VIDEO, 9 }, //4:2:2 CBP ("Constrained Bytes per Gop")
{ AV_CODEC_ID_NONE, 0 },
};
 
typedef struct {
int channels; ///< number of audio channels. zero means no audio
int frame_number; ///< current video frame
uint32_t video_format, packet_type, extended_size;
} LXFDemuxContext;
 
static int lxf_probe(AVProbeData *p)
{
if (!memcmp(p->buf, LXF_IDENT, LXF_IDENT_LENGTH))
return AVPROBE_SCORE_MAX;
 
return 0;
}
 
/**
* Verify the checksum of an LXF packet header
*
* @param[in] header the packet header to check
* @return zero if the checksum is OK, non-zero otherwise
*/
static int check_checksum(const uint8_t *header, int size)
{
int x;
uint32_t sum = 0;
 
for (x = 0; x < size; x += 4)
sum += AV_RL32(&header[x]);
 
return sum;
}
 
/**
* Read input until we find the next ident. If found, copy it to the header buffer
*
* @param[out] header where to copy the ident to
* @return 0 if an ident was found, < 0 on I/O error
*/
static int sync(AVFormatContext *s, uint8_t *header)
{
uint8_t buf[LXF_IDENT_LENGTH];
int ret;
 
if ((ret = avio_read(s->pb, buf, LXF_IDENT_LENGTH)) != LXF_IDENT_LENGTH)
return ret < 0 ? ret : AVERROR_EOF;
 
while (memcmp(buf, LXF_IDENT, LXF_IDENT_LENGTH)) {
if (url_feof(s->pb))
return AVERROR_EOF;
 
memmove(buf, &buf[1], LXF_IDENT_LENGTH-1);
buf[LXF_IDENT_LENGTH-1] = avio_r8(s->pb);
}
 
memcpy(header, LXF_IDENT, LXF_IDENT_LENGTH);
 
return 0;
}
 
/**
* Read and checksum the next packet header
*
* @return the size of the payload following the header or < 0 on failure
*/
static int get_packet_header(AVFormatContext *s)
{
LXFDemuxContext *lxf = s->priv_data;
AVIOContext *pb = s->pb;
int track_size, samples, ret;
uint32_t version, audio_format, header_size, channels, tmp;
AVStream *st;
uint8_t header[LXF_MAX_PACKET_HEADER_SIZE];
const uint8_t *p = header + LXF_IDENT_LENGTH;
 
//find and read the ident
if ((ret = sync(s, header)) < 0)
return ret;
 
ret = avio_read(pb, header + LXF_IDENT_LENGTH, 8);
if (ret != 8)
return ret < 0 ? ret : AVERROR_EOF;
 
version = bytestream_get_le32(&p);
header_size = bytestream_get_le32(&p);
if (version > 1)
avpriv_request_sample(s, "Unknown format version %i\n", version);
 
if (header_size < (version ? 72 : 60) ||
header_size > LXF_MAX_PACKET_HEADER_SIZE ||
(header_size & 3)) {
av_log(s, AV_LOG_ERROR, "Invalid header size 0x%x\n", header_size);
return AVERROR_INVALIDDATA;
}
 
//read the rest of the packet header
if ((ret = avio_read(pb, header + (p - header),
header_size - (p - header))) !=
header_size - (p - header))
return ret < 0 ? ret : AVERROR_EOF;
 
if (check_checksum(header, header_size))
av_log(s, AV_LOG_ERROR, "checksum error\n");
 
lxf->packet_type = bytestream_get_le32(&p);
p += version ? 20 : 12;
 
lxf->extended_size = 0;
switch (lxf->packet_type) {
case 0:
//video
lxf->video_format = bytestream_get_le32(&p);
ret = bytestream_get_le32(&p);
//skip VBI data and metadata
avio_skip(pb, (int64_t)(uint32_t)AV_RL32(p + 4) +
(int64_t)(uint32_t)AV_RL32(p + 12));
break;
case 1:
//audio
if (s->nb_streams < 2) {
av_log(s, AV_LOG_INFO, "got audio packet, but no audio stream present\n");
break;
}
 
if (version == 0)
p += 8;
audio_format = bytestream_get_le32(&p);
channels = bytestream_get_le32(&p);
track_size = bytestream_get_le32(&p);
 
st = s->streams[1];
 
//set codec based on specified audio bitdepth
//we only support tightly packed 16-, 20-, 24- and 32-bit PCM at the moment
st->codec->bits_per_coded_sample = (audio_format >> 6) & 0x3F;
 
if (st->codec->bits_per_coded_sample != (audio_format & 0x3F)) {
av_log(s, AV_LOG_WARNING, "only tightly packed PCM currently supported\n");
return AVERROR_PATCHWELCOME;
}
 
switch (st->codec->bits_per_coded_sample) {
case 16: st->codec->codec_id = AV_CODEC_ID_PCM_S16LE_PLANAR; break;
case 20: st->codec->codec_id = AV_CODEC_ID_PCM_LXF; break;
case 24: st->codec->codec_id = AV_CODEC_ID_PCM_S24LE_PLANAR; break;
case 32: st->codec->codec_id = AV_CODEC_ID_PCM_S32LE_PLANAR; break;
default:
av_log(s, AV_LOG_WARNING,
"only 16-, 20-, 24- and 32-bit PCM currently supported\n");
return AVERROR_PATCHWELCOME;
}
 
samples = track_size * 8 / st->codec->bits_per_coded_sample;
 
//use audio packet size to determine video standard
//for NTSC we have one 8008-sample audio frame per five video frames
if (samples == LXF_SAMPLERATE * 5005 / 30000) {
avpriv_set_pts_info(s->streams[0], 64, 1001, 30000);
} else {
//assume PAL, but warn if we don't have 1920 samples
if (samples != LXF_SAMPLERATE / 25)
av_log(s, AV_LOG_WARNING,
"video doesn't seem to be PAL or NTSC. guessing PAL\n");
 
avpriv_set_pts_info(s->streams[0], 64, 1, 25);
}
 
//TODO: warning if track mask != (1 << channels) - 1?
ret = av_popcount(channels) * track_size;
 
break;
default:
tmp = bytestream_get_le32(&p);
ret = bytestream_get_le32(&p);
if (tmp == 1)
lxf->extended_size = bytestream_get_le32(&p);
break;
}
 
return ret;
}
 
static int lxf_read_header(AVFormatContext *s)
{
LXFDemuxContext *lxf = s->priv_data;
AVIOContext *pb = s->pb;
uint8_t header_data[LXF_HEADER_DATA_SIZE];
int ret;
AVStream *st;
uint32_t video_params, disk_params;
uint16_t record_date, expiration_date;
 
if ((ret = get_packet_header(s)) < 0)
return ret;
 
if (ret != LXF_HEADER_DATA_SIZE) {
av_log(s, AV_LOG_ERROR, "expected %d B size header, got %d\n",
LXF_HEADER_DATA_SIZE, ret);
return AVERROR_INVALIDDATA;
}
 
if ((ret = avio_read(pb, header_data, LXF_HEADER_DATA_SIZE)) != LXF_HEADER_DATA_SIZE)
return ret < 0 ? ret : AVERROR_EOF;
 
if (!(st = avformat_new_stream(s, NULL)))
return AVERROR(ENOMEM);
 
st->duration = AV_RL32(&header_data[32]);
video_params = AV_RL32(&header_data[40]);
record_date = AV_RL16(&header_data[56]);
expiration_date = AV_RL16(&header_data[58]);
disk_params = AV_RL32(&header_data[116]);
 
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->bit_rate = 1000000 * ((video_params >> 14) & 0xFF);
st->codec->codec_tag = video_params & 0xF;
st->codec->codec_id = ff_codec_get_id(lxf_tags, st->codec->codec_tag);
st->need_parsing = AVSTREAM_PARSE_HEADERS;
 
av_log(s, AV_LOG_DEBUG, "record: %x = %i-%02i-%02i\n",
record_date, 1900 + (record_date & 0x7F), (record_date >> 7) & 0xF,
(record_date >> 11) & 0x1F);
 
av_log(s, AV_LOG_DEBUG, "expire: %x = %i-%02i-%02i\n",
expiration_date, 1900 + (expiration_date & 0x7F), (expiration_date >> 7) & 0xF,
(expiration_date >> 11) & 0x1F);
 
if ((video_params >> 22) & 1)
av_log(s, AV_LOG_WARNING, "VBI data not yet supported\n");
 
if ((lxf->channels = 1 << (disk_params >> 4 & 3) + 1)) {
if (!(st = avformat_new_stream(s, NULL)))
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->sample_rate = LXF_SAMPLERATE;
st->codec->channels = lxf->channels;
 
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
}
 
avio_skip(s->pb, lxf->extended_size);
 
return 0;
}
 
static int lxf_read_packet(AVFormatContext *s, AVPacket *pkt)
{
LXFDemuxContext *lxf = s->priv_data;
AVIOContext *pb = s->pb;
uint32_t stream;
int ret, ret2;
 
if ((ret = get_packet_header(s)) < 0)
return ret;
 
stream = lxf->packet_type;
 
if (stream > 1) {
av_log(s, AV_LOG_WARNING, "got packet with illegal stream index %u\n", stream);
return AVERROR(EAGAIN);
}
 
if (stream == 1 && s->nb_streams < 2) {
av_log(s, AV_LOG_ERROR, "got audio packet without having an audio stream\n");
return AVERROR_INVALIDDATA;
}
 
if ((ret2 = av_new_packet(pkt, ret)) < 0)
return ret2;
 
if ((ret2 = avio_read(pb, pkt->data, ret)) != ret) {
av_free_packet(pkt);
return ret2 < 0 ? ret2 : AVERROR_EOF;
}
 
pkt->stream_index = stream;
 
if (!stream) {
//picture type (0 = closed I, 1 = open I, 2 = P, 3 = B)
if (((lxf->video_format >> 22) & 0x3) < 2)
pkt->flags |= AV_PKT_FLAG_KEY;
 
pkt->dts = lxf->frame_number++;
}
 
return ret;
}
 
AVInputFormat ff_lxf_demuxer = {
.name = "lxf",
.long_name = NULL_IF_CONFIG_SMALL("VR native stream (LXF)"),
.priv_data_size = sizeof(LXFDemuxContext),
.read_probe = lxf_probe,
.read_header = lxf_read_header,
.read_packet = lxf_read_packet,
.codec_tag = (const AVCodecTag* const []){lxf_tags, 0},
};
/contrib/sdk/sources/ffmpeg/libavformat/m4vdec.c
0,0 → 1,52
/*
* RAW MPEG-4 video demuxer
* Copyright (c) 2006 Thijs Vermeir <thijs.vermeir@barco.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "rawdec.h"
 
#define VISUAL_OBJECT_START_CODE 0x000001b5
#define VOP_START_CODE 0x000001b6
 
static int mpeg4video_probe(AVProbeData *probe_packet)
{
uint32_t temp_buffer= -1;
int VO=0, VOL=0, VOP = 0, VISO = 0, res=0;
int i;
 
for(i=0; i<probe_packet->buf_size; i++){
temp_buffer = (temp_buffer<<8) + probe_packet->buf[i];
if ((temp_buffer & 0xffffff00) != 0x100)
continue;
 
if (temp_buffer == VOP_START_CODE) VOP++;
else if (temp_buffer == VISUAL_OBJECT_START_CODE) VISO++;
else if (temp_buffer < 0x120) VO++;
else if (temp_buffer < 0x130) VOL++;
else if ( !(0x1AF < temp_buffer && temp_buffer < 0x1B7)
&& !(0x1B9 < temp_buffer && temp_buffer < 0x1C4)) res++;
}
 
if (VOP >= VISO && VOP >= VOL && VO >= VOL && VOL > 0 && res==0)
return VOP+VO > 3 ? AVPROBE_SCORE_EXTENSION : AVPROBE_SCORE_EXTENSION/2;
return 0;
}
 
FF_DEF_RAWVIDEO_DEMUXER(m4v, "raw MPEG-4 video", mpeg4video_probe, "m4v", AV_CODEC_ID_MPEG4)
/contrib/sdk/sources/ffmpeg/libavformat/matroska.c
0,0 → 1,148
/*
* Matroska common data
* Copyright (c) 2003-2004 The ffmpeg Project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "matroska.h"
 
/* If you add a tag here that is not in ff_codec_bmp_tags[]
or ff_codec_wav_tags[], add it also to additional_audio_tags[]
or additional_video_tags[] in matroskaenc.c */
const CodecTags ff_mkv_codec_tags[]={
{"A_AAC" , AV_CODEC_ID_AAC},
{"A_AC3" , AV_CODEC_ID_AC3},
{"A_ALAC" , AV_CODEC_ID_ALAC},
{"A_DTS" , AV_CODEC_ID_DTS},
{"A_EAC3" , AV_CODEC_ID_EAC3},
{"A_FLAC" , AV_CODEC_ID_FLAC},
{"A_MLP" , AV_CODEC_ID_MLP},
{"A_MPEG/L2" , AV_CODEC_ID_MP2},
{"A_MPEG/L1" , AV_CODEC_ID_MP2},
{"A_MPEG/L3" , AV_CODEC_ID_MP3},
{"A_OPUS", AV_CODEC_ID_OPUS},
{"A_OPUS/EXPERIMENTAL",AV_CODEC_ID_OPUS},
{"A_PCM/FLOAT/IEEE" , AV_CODEC_ID_PCM_F32LE},
{"A_PCM/FLOAT/IEEE" , AV_CODEC_ID_PCM_F64LE},
{"A_PCM/INT/BIG" , AV_CODEC_ID_PCM_S16BE},
{"A_PCM/INT/BIG" , AV_CODEC_ID_PCM_S24BE},
{"A_PCM/INT/BIG" , AV_CODEC_ID_PCM_S32BE},
{"A_PCM/INT/LIT" , AV_CODEC_ID_PCM_S16LE},
{"A_PCM/INT/LIT" , AV_CODEC_ID_PCM_S24LE},
{"A_PCM/INT/LIT" , AV_CODEC_ID_PCM_S32LE},
{"A_PCM/INT/LIT" , AV_CODEC_ID_PCM_U8},
{"A_QUICKTIME/QDM2" , AV_CODEC_ID_QDM2},
{"A_REAL/14_4" , AV_CODEC_ID_RA_144},
{"A_REAL/28_8" , AV_CODEC_ID_RA_288},
{"A_REAL/ATRC" , AV_CODEC_ID_ATRAC3},
{"A_REAL/COOK" , AV_CODEC_ID_COOK},
{"A_REAL/SIPR" , AV_CODEC_ID_SIPR},
{"A_TRUEHD" , AV_CODEC_ID_TRUEHD},
{"A_TTA1" , AV_CODEC_ID_TTA},
{"A_VORBIS" , AV_CODEC_ID_VORBIS},
{"A_WAVPACK4" , AV_CODEC_ID_WAVPACK},
 
{"D_WEBVTT/SUBTITLES" , AV_CODEC_ID_WEBVTT},
{"D_WEBVTT/CAPTIONS" , AV_CODEC_ID_WEBVTT},
{"D_WEBVTT/DESCRIPTIONS", AV_CODEC_ID_WEBVTT},
{"D_WEBVTT/METADATA" , AV_CODEC_ID_WEBVTT},
 
{"S_TEXT/UTF8" , AV_CODEC_ID_SUBRIP},
{"S_TEXT/UTF8" , AV_CODEC_ID_TEXT},
{"S_TEXT/UTF8" , AV_CODEC_ID_SRT},
{"S_TEXT/ASCII" , AV_CODEC_ID_TEXT},
#if FF_API_ASS_SSA
{"S_TEXT/ASS" , AV_CODEC_ID_SSA},
{"S_TEXT/SSA" , AV_CODEC_ID_SSA},
{"S_ASS" , AV_CODEC_ID_SSA},
{"S_SSA" , AV_CODEC_ID_SSA},
#endif
{"S_TEXT/ASS" , AV_CODEC_ID_ASS},
{"S_TEXT/SSA" , AV_CODEC_ID_ASS},
{"S_ASS" , AV_CODEC_ID_ASS},
{"S_SSA" , AV_CODEC_ID_ASS},
{"S_VOBSUB" , AV_CODEC_ID_DVD_SUBTITLE},
{"S_DVBSUB" , AV_CODEC_ID_DVB_SUBTITLE},
{"S_HDMV/PGS" , AV_CODEC_ID_HDMV_PGS_SUBTITLE},
 
{"V_DIRAC" , AV_CODEC_ID_DIRAC},
{"V_MJPEG" , AV_CODEC_ID_MJPEG},
{"V_MPEG1" , AV_CODEC_ID_MPEG1VIDEO},
{"V_MPEG2" , AV_CODEC_ID_MPEG2VIDEO},
{"V_MPEG4/ISO/ASP" , AV_CODEC_ID_MPEG4},
{"V_MPEG4/ISO/AP" , AV_CODEC_ID_MPEG4},
{"V_MPEG4/ISO/SP" , AV_CODEC_ID_MPEG4},
{"V_MPEG4/ISO/AVC" , AV_CODEC_ID_H264},
{"V_MPEGH/ISO/HEVC" , AV_CODEC_ID_HEVC},
{"V_MPEG4/MS/V3" , AV_CODEC_ID_MSMPEG4V3},
{"V_PRORES" , AV_CODEC_ID_PRORES},
{"V_REAL/RV10" , AV_CODEC_ID_RV10},
{"V_REAL/RV20" , AV_CODEC_ID_RV20},
{"V_REAL/RV30" , AV_CODEC_ID_RV30},
{"V_REAL/RV40" , AV_CODEC_ID_RV40},
{"V_SNOW" , AV_CODEC_ID_SNOW},
{"V_THEORA" , AV_CODEC_ID_THEORA},
{"V_UNCOMPRESSED" , AV_CODEC_ID_RAWVIDEO},
{"V_VP8" , AV_CODEC_ID_VP8},
{"V_VP9" , AV_CODEC_ID_VP9},
 
{"" , AV_CODEC_ID_NONE}
};
 
const CodecMime ff_mkv_mime_tags[] = {
{"text/plain" , AV_CODEC_ID_TEXT},
{"image/gif" , AV_CODEC_ID_GIF},
{"image/jpeg" , AV_CODEC_ID_MJPEG},
{"image/png" , AV_CODEC_ID_PNG},
{"image/tiff" , AV_CODEC_ID_TIFF},
{"application/x-truetype-font", AV_CODEC_ID_TTF},
{"application/x-font" , AV_CODEC_ID_TTF},
{"application/vnd.ms-opentype", AV_CODEC_ID_OTF},
 
{"" , AV_CODEC_ID_NONE}
};
 
const AVMetadataConv ff_mkv_metadata_conv[] = {
{ "LEAD_PERFORMER", "performer" },
{ "PART_NUMBER" , "track" },
{ 0 }
};
 
const char * const ff_matroska_video_stereo_mode[MATROSKA_VIDEO_STEREO_MODE_COUNT] = {
"mono",
"left_right",
"bottom_top",
"top_bottom",
"checkerboard_rl",
"checkerboard_lr",
"row_interleaved_rl",
"row_interleaved_lr",
"col_interleaved_rl",
"col_interleaved_lr",
"anaglyph_cyan_red",
"right_left",
"anaglyph_green_magenta",
"block_lr",
"block_rl",
};
 
const char * const ff_matroska_video_stereo_plane[MATROSKA_VIDEO_STEREO_PLANE_COUNT] = {
"left",
"right",
"background",
};
/contrib/sdk/sources/ffmpeg/libavformat/matroska.h
0,0 → 1,287
/*
* Matroska constants
* Copyright (c) 2003-2004 The ffmpeg Project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_MATROSKA_H
#define AVFORMAT_MATROSKA_H
 
#include "libavcodec/avcodec.h"
#include "metadata.h"
#include "internal.h"
 
/* EBML version supported */
#define EBML_VERSION 1
 
/* top-level master-IDs */
#define EBML_ID_HEADER 0x1A45DFA3
 
/* IDs in the HEADER master */
#define EBML_ID_EBMLVERSION 0x4286
#define EBML_ID_EBMLREADVERSION 0x42F7
#define EBML_ID_EBMLMAXIDLENGTH 0x42F2
#define EBML_ID_EBMLMAXSIZELENGTH 0x42F3
#define EBML_ID_DOCTYPE 0x4282
#define EBML_ID_DOCTYPEVERSION 0x4287
#define EBML_ID_DOCTYPEREADVERSION 0x4285
 
/* general EBML types */
#define EBML_ID_VOID 0xEC
#define EBML_ID_CRC32 0xBF
 
/*
* Matroska element IDs, max. 32 bits
*/
 
/* toplevel segment */
#define MATROSKA_ID_SEGMENT 0x18538067
 
/* Matroska top-level master IDs */
#define MATROSKA_ID_INFO 0x1549A966
#define MATROSKA_ID_TRACKS 0x1654AE6B
#define MATROSKA_ID_CUES 0x1C53BB6B
#define MATROSKA_ID_TAGS 0x1254C367
#define MATROSKA_ID_SEEKHEAD 0x114D9B74
#define MATROSKA_ID_ATTACHMENTS 0x1941A469
#define MATROSKA_ID_CLUSTER 0x1F43B675
#define MATROSKA_ID_CHAPTERS 0x1043A770
 
/* IDs in the info master */
#define MATROSKA_ID_TIMECODESCALE 0x2AD7B1
#define MATROSKA_ID_DURATION 0x4489
#define MATROSKA_ID_TITLE 0x7BA9
#define MATROSKA_ID_WRITINGAPP 0x5741
#define MATROSKA_ID_MUXINGAPP 0x4D80
#define MATROSKA_ID_DATEUTC 0x4461
#define MATROSKA_ID_SEGMENTUID 0x73A4
 
/* ID in the tracks master */
#define MATROSKA_ID_TRACKENTRY 0xAE
 
/* IDs in the trackentry master */
#define MATROSKA_ID_TRACKNUMBER 0xD7
#define MATROSKA_ID_TRACKUID 0x73C5
#define MATROSKA_ID_TRACKTYPE 0x83
#define MATROSKA_ID_TRACKVIDEO 0xE0
#define MATROSKA_ID_TRACKAUDIO 0xE1
#define MATROSKA_ID_TRACKOPERATION 0xE2
#define MATROSKA_ID_TRACKCOMBINEPLANES 0xE3
#define MATROSKA_ID_TRACKPLANE 0xE4
#define MATROSKA_ID_TRACKPLANEUID 0xE5
#define MATROSKA_ID_TRACKPLANETYPE 0xE6
#define MATROSKA_ID_CODECID 0x86
#define MATROSKA_ID_CODECPRIVATE 0x63A2
#define MATROSKA_ID_CODECNAME 0x258688
#define MATROSKA_ID_CODECINFOURL 0x3B4040
#define MATROSKA_ID_CODECDOWNLOADURL 0x26B240
#define MATROSKA_ID_CODECDECODEALL 0xAA
#define MATROSKA_ID_CODECDELAY 0x56AA
#define MATROSKA_ID_SEEKPREROLL 0x56BB
#define MATROSKA_ID_TRACKNAME 0x536E
#define MATROSKA_ID_TRACKLANGUAGE 0x22B59C
#define MATROSKA_ID_TRACKFLAGENABLED 0xB9
#define MATROSKA_ID_TRACKFLAGDEFAULT 0x88
#define MATROSKA_ID_TRACKFLAGFORCED 0x55AA
#define MATROSKA_ID_TRACKFLAGLACING 0x9C
#define MATROSKA_ID_TRACKMINCACHE 0x6DE7
#define MATROSKA_ID_TRACKMAXCACHE 0x6DF8
#define MATROSKA_ID_TRACKDEFAULTDURATION 0x23E383
#define MATROSKA_ID_TRACKCONTENTENCODINGS 0x6D80
#define MATROSKA_ID_TRACKCONTENTENCODING 0x6240
#define MATROSKA_ID_TRACKTIMECODESCALE 0x23314F
#define MATROSKA_ID_TRACKMAXBLKADDID 0x55EE
 
/* IDs in the trackvideo master */
#define MATROSKA_ID_VIDEOFRAMERATE 0x2383E3
#define MATROSKA_ID_VIDEODISPLAYWIDTH 0x54B0
#define MATROSKA_ID_VIDEODISPLAYHEIGHT 0x54BA
#define MATROSKA_ID_VIDEOPIXELWIDTH 0xB0
#define MATROSKA_ID_VIDEOPIXELHEIGHT 0xBA
#define MATROSKA_ID_VIDEOPIXELCROPB 0x54AA
#define MATROSKA_ID_VIDEOPIXELCROPT 0x54BB
#define MATROSKA_ID_VIDEOPIXELCROPL 0x54CC
#define MATROSKA_ID_VIDEOPIXELCROPR 0x54DD
#define MATROSKA_ID_VIDEODISPLAYUNIT 0x54B2
#define MATROSKA_ID_VIDEOFLAGINTERLACED 0x9A
#define MATROSKA_ID_VIDEOSTEREOMODE 0x53B8
#define MATROSKA_ID_VIDEOALPHAMODE 0x53C0
#define MATROSKA_ID_VIDEOASPECTRATIO 0x54B3
#define MATROSKA_ID_VIDEOCOLORSPACE 0x2EB524
 
/* IDs in the trackaudio master */
#define MATROSKA_ID_AUDIOSAMPLINGFREQ 0xB5
#define MATROSKA_ID_AUDIOOUTSAMPLINGFREQ 0x78B5
 
#define MATROSKA_ID_AUDIOBITDEPTH 0x6264
#define MATROSKA_ID_AUDIOCHANNELS 0x9F
 
/* IDs in the content encoding master */
#define MATROSKA_ID_ENCODINGORDER 0x5031
#define MATROSKA_ID_ENCODINGSCOPE 0x5032
#define MATROSKA_ID_ENCODINGTYPE 0x5033
#define MATROSKA_ID_ENCODINGCOMPRESSION 0x5034
#define MATROSKA_ID_ENCODINGCOMPALGO 0x4254
#define MATROSKA_ID_ENCODINGCOMPSETTINGS 0x4255
 
#define MATROSKA_ID_ENCODINGENCRYPTION 0x5035
#define MATROSKA_ID_ENCODINGENCAESSETTINGS 0x47E7
#define MATROSKA_ID_ENCODINGENCALGO 0x47E1
#define MATROSKA_ID_ENCODINGENCKEYID 0x47E2
#define MATROSKA_ID_ENCODINGSIGALGO 0x47E5
#define MATROSKA_ID_ENCODINGSIGHASHALGO 0x47E6
#define MATROSKA_ID_ENCODINGSIGKEYID 0x47E4
#define MATROSKA_ID_ENCODINGSIGNATURE 0x47E3
 
/* ID in the cues master */
#define MATROSKA_ID_POINTENTRY 0xBB
 
/* IDs in the pointentry master */
#define MATROSKA_ID_CUETIME 0xB3
#define MATROSKA_ID_CUETRACKPOSITION 0xB7
 
/* IDs in the cuetrackposition master */
#define MATROSKA_ID_CUETRACK 0xF7
#define MATROSKA_ID_CUECLUSTERPOSITION 0xF1
#define MATROSKA_ID_CUERELATIVEPOSITION 0xF0
#define MATROSKA_ID_CUEDURATION 0xB2
#define MATROSKA_ID_CUEBLOCKNUMBER 0x5378
 
/* IDs in the tags master */
#define MATROSKA_ID_TAG 0x7373
#define MATROSKA_ID_SIMPLETAG 0x67C8
#define MATROSKA_ID_TAGNAME 0x45A3
#define MATROSKA_ID_TAGSTRING 0x4487
#define MATROSKA_ID_TAGLANG 0x447A
#define MATROSKA_ID_TAGDEFAULT 0x4484
#define MATROSKA_ID_TAGDEFAULT_BUG 0x44B4
#define MATROSKA_ID_TAGTARGETS 0x63C0
#define MATROSKA_ID_TAGTARGETS_TYPE 0x63CA
#define MATROSKA_ID_TAGTARGETS_TYPEVALUE 0x68CA
#define MATROSKA_ID_TAGTARGETS_TRACKUID 0x63C5
#define MATROSKA_ID_TAGTARGETS_CHAPTERUID 0x63C4
#define MATROSKA_ID_TAGTARGETS_ATTACHUID 0x63C6
 
/* IDs in the seekhead master */
#define MATROSKA_ID_SEEKENTRY 0x4DBB
 
/* IDs in the seekpoint master */
#define MATROSKA_ID_SEEKID 0x53AB
#define MATROSKA_ID_SEEKPOSITION 0x53AC
 
/* IDs in the cluster master */
#define MATROSKA_ID_CLUSTERTIMECODE 0xE7
#define MATROSKA_ID_CLUSTERPOSITION 0xA7
#define MATROSKA_ID_CLUSTERPREVSIZE 0xAB
#define MATROSKA_ID_BLOCKGROUP 0xA0
#define MATROSKA_ID_BLOCKADDITIONS 0x75A1
#define MATROSKA_ID_BLOCKMORE 0xA6
#define MATROSKA_ID_BLOCKADDID 0xEE
#define MATROSKA_ID_BLOCKADDITIONAL 0xA5
#define MATROSKA_ID_SIMPLEBLOCK 0xA3
 
/* IDs in the blockgroup master */
#define MATROSKA_ID_BLOCK 0xA1
#define MATROSKA_ID_BLOCKDURATION 0x9B
#define MATROSKA_ID_BLOCKREFERENCE 0xFB
#define MATROSKA_ID_CODECSTATE 0xA4
#define MATROSKA_ID_DISCARDPADDING 0x75A2
 
/* IDs in the attachments master */
#define MATROSKA_ID_ATTACHEDFILE 0x61A7
#define MATROSKA_ID_FILEDESC 0x467E
#define MATROSKA_ID_FILENAME 0x466E
#define MATROSKA_ID_FILEMIMETYPE 0x4660
#define MATROSKA_ID_FILEDATA 0x465C
#define MATROSKA_ID_FILEUID 0x46AE
 
/* IDs in the chapters master */
#define MATROSKA_ID_EDITIONENTRY 0x45B9
#define MATROSKA_ID_CHAPTERATOM 0xB6
#define MATROSKA_ID_CHAPTERTIMESTART 0x91
#define MATROSKA_ID_CHAPTERTIMEEND 0x92
#define MATROSKA_ID_CHAPTERDISPLAY 0x80
#define MATROSKA_ID_CHAPSTRING 0x85
#define MATROSKA_ID_CHAPLANG 0x437C
#define MATROSKA_ID_EDITIONUID 0x45BC
#define MATROSKA_ID_EDITIONFLAGHIDDEN 0x45BD
#define MATROSKA_ID_EDITIONFLAGDEFAULT 0x45DB
#define MATROSKA_ID_EDITIONFLAGORDERED 0x45DD
#define MATROSKA_ID_CHAPTERUID 0x73C4
#define MATROSKA_ID_CHAPTERFLAGHIDDEN 0x98
#define MATROSKA_ID_CHAPTERFLAGENABLED 0x4598
#define MATROSKA_ID_CHAPTERPHYSEQUIV 0x63C3
 
typedef enum {
MATROSKA_TRACK_TYPE_NONE = 0x0,
MATROSKA_TRACK_TYPE_VIDEO = 0x1,
MATROSKA_TRACK_TYPE_AUDIO = 0x2,
MATROSKA_TRACK_TYPE_COMPLEX = 0x3,
MATROSKA_TRACK_TYPE_LOGO = 0x10,
MATROSKA_TRACK_TYPE_SUBTITLE = 0x11,
MATROSKA_TRACK_TYPE_CONTROL = 0x20,
MATROSKA_TRACK_TYPE_METADATA = 0x21,
} MatroskaTrackType;
 
typedef enum {
MATROSKA_TRACK_ENCODING_COMP_ZLIB = 0,
MATROSKA_TRACK_ENCODING_COMP_BZLIB = 1,
MATROSKA_TRACK_ENCODING_COMP_LZO = 2,
MATROSKA_TRACK_ENCODING_COMP_HEADERSTRIP = 3,
} MatroskaTrackEncodingCompAlgo;
 
typedef enum {
MATROSKA_VIDEO_STEREOMODE_TYPE_MONO = 0,
MATROSKA_VIDEO_STEREOMODE_TYPE_LEFT_RIGHT = 1,
MATROSKA_VIDEO_STEREOMODE_TYPE_BOTTOM_TOP = 2,
MATROSKA_VIDEO_STEREOMODE_TYPE_TOP_BOTTOM = 3,
MATROSKA_VIDEO_STEREOMODE_TYPE_CHECKERBOARD_RL = 4,
MATROSKA_VIDEO_STEREOMODE_TYPE_CHECKERBOARD_LR = 5,
MATROSKA_VIDEO_STEREOMODE_TYPE_ROW_INTERLEAVED_RL = 6,
MATROSKA_VIDEO_STEREOMODE_TYPE_ROW_INTERLEAVED_LR = 7,
MATROSKA_VIDEO_STEREOMODE_TYPE_COL_INTERLEAVED_RL = 8,
MATROSKA_VIDEO_STEREOMODE_TYPE_COL_INTERLEAVED_LR = 9,
MATROSKA_VIDEO_STEREOMODE_TYPE_ANAGLYPH_CYAN_RED = 10,
MATROSKA_VIDEO_STEREOMODE_TYPE_RIGHT_LEFT = 11,
MATROSKA_VIDEO_STEREOMODE_TYPE_ANAGLYPH_GREEN_MAG = 12,
MATROSKA_VIDEO_STEREOMODE_TYPE_BOTH_EYES_BLOCK_LR = 13,
MATROSKA_VIDEO_STEREOMODE_TYPE_BOTH_EYES_BLOCK_RL = 14,
} MatroskaVideoStereoModeType;
 
/*
* Matroska Codec IDs, strings
*/
 
typedef struct CodecTags{
char str[22];
enum AVCodecID id;
}CodecTags;
 
/* max. depth in the EBML tree structure */
#define EBML_MAX_DEPTH 16
 
#define MATROSKA_VIDEO_STEREO_MODE_COUNT 15
#define MATROSKA_VIDEO_STEREO_PLANE_COUNT 3
 
extern const CodecTags ff_mkv_codec_tags[];
extern const CodecMime ff_mkv_mime_tags[];
extern const AVMetadataConv ff_mkv_metadata_conv[];
extern const char * const ff_matroska_video_stereo_mode[MATROSKA_VIDEO_STEREO_MODE_COUNT];
extern const char * const ff_matroska_video_stereo_plane[MATROSKA_VIDEO_STEREO_PLANE_COUNT];
 
#endif /* AVFORMAT_MATROSKA_H */
/contrib/sdk/sources/ffmpeg/libavformat/matroskadec.c
0,0 → 1,2862
/*
* Matroska file demuxer
* Copyright (c) 2003-2008 The FFmpeg Project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Matroska file demuxer
* @author Ronald Bultje <rbultje@ronald.bitfreak.net>
* @author with a little help from Moritz Bunkus <moritz@bunkus.org>
* @author totally reworked by Aurelien Jacobs <aurel@gnuage.org>
* @see specs available on the Matroska project page: http://www.matroska.org/
*/
 
#include <stdio.h>
#include "avformat.h"
#include "internal.h"
#include "avio_internal.h"
/* For ff_codec_get_id(). */
#include "riff.h"
#include "isom.h"
#include "rmsipr.h"
#include "matroska.h"
#include "libavcodec/bytestream.h"
#include "libavcodec/mpeg4audio.h"
#include "libavutil/base64.h"
#include "libavutil/intfloat.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/avstring.h"
#include "libavutil/lzo.h"
#include "libavutil/dict.h"
#if CONFIG_ZLIB
#include <zlib.h>
#endif
#if CONFIG_BZLIB
#include <bzlib.h>
#endif
 
typedef enum {
EBML_NONE,
EBML_UINT,
EBML_FLOAT,
EBML_STR,
EBML_UTF8,
EBML_BIN,
EBML_NEST,
EBML_PASS,
EBML_STOP,
EBML_TYPE_COUNT
} EbmlType;
 
typedef const struct EbmlSyntax {
uint32_t id;
EbmlType type;
int list_elem_size;
int data_offset;
union {
uint64_t u;
double f;
const char *s;
const struct EbmlSyntax *n;
} def;
} EbmlSyntax;
 
typedef struct {
int nb_elem;
void *elem;
} EbmlList;
 
typedef struct {
int size;
uint8_t *data;
int64_t pos;
} EbmlBin;
 
typedef struct {
uint64_t version;
uint64_t max_size;
uint64_t id_length;
char *doctype;
uint64_t doctype_version;
} Ebml;
 
typedef struct {
uint64_t algo;
EbmlBin settings;
} MatroskaTrackCompression;
 
typedef struct {
uint64_t algo;
EbmlBin key_id;
} MatroskaTrackEncryption;
 
typedef struct {
uint64_t scope;
uint64_t type;
MatroskaTrackCompression compression;
MatroskaTrackEncryption encryption;
} MatroskaTrackEncoding;
 
typedef struct {
double frame_rate;
uint64_t display_width;
uint64_t display_height;
uint64_t pixel_width;
uint64_t pixel_height;
EbmlBin color_space;
uint64_t stereo_mode;
uint64_t alpha_mode;
} MatroskaTrackVideo;
 
typedef struct {
double samplerate;
double out_samplerate;
uint64_t bitdepth;
uint64_t channels;
 
/* real audio header (extracted from extradata) */
int coded_framesize;
int sub_packet_h;
int frame_size;
int sub_packet_size;
int sub_packet_cnt;
int pkt_cnt;
uint64_t buf_timecode;
uint8_t *buf;
} MatroskaTrackAudio;
 
typedef struct {
uint64_t uid;
uint64_t type;
} MatroskaTrackPlane;
 
typedef struct {
EbmlList combine_planes;
} MatroskaTrackOperation;
 
typedef struct {
uint64_t num;
uint64_t uid;
uint64_t type;
char *name;
char *codec_id;
EbmlBin codec_priv;
char *language;
double time_scale;
uint64_t default_duration;
uint64_t flag_default;
uint64_t flag_forced;
uint64_t codec_delay;
uint64_t seek_preroll;
MatroskaTrackVideo video;
MatroskaTrackAudio audio;
MatroskaTrackOperation operation;
EbmlList encodings;
 
AVStream *stream;
int64_t end_timecode;
int ms_compat;
uint64_t max_block_additional_id;
} MatroskaTrack;
 
typedef struct {
uint64_t uid;
char *filename;
char *mime;
EbmlBin bin;
 
AVStream *stream;
} MatroskaAttachement;
 
typedef struct {
uint64_t start;
uint64_t end;
uint64_t uid;
char *title;
 
AVChapter *chapter;
} MatroskaChapter;
 
typedef struct {
uint64_t track;
uint64_t pos;
} MatroskaIndexPos;
 
typedef struct {
uint64_t time;
EbmlList pos;
} MatroskaIndex;
 
typedef struct {
char *name;
char *string;
char *lang;
uint64_t def;
EbmlList sub;
} MatroskaTag;
 
typedef struct {
char *type;
uint64_t typevalue;
uint64_t trackuid;
uint64_t chapteruid;
uint64_t attachuid;
} MatroskaTagTarget;
 
typedef struct {
MatroskaTagTarget target;
EbmlList tag;
} MatroskaTags;
 
typedef struct {
uint64_t id;
uint64_t pos;
} MatroskaSeekhead;
 
typedef struct {
uint64_t start;
uint64_t length;
} MatroskaLevel;
 
typedef struct {
uint64_t timecode;
EbmlList blocks;
} MatroskaCluster;
 
typedef struct {
AVFormatContext *ctx;
 
/* EBML stuff */
int num_levels;
MatroskaLevel levels[EBML_MAX_DEPTH];
int level_up;
uint32_t current_id;
 
uint64_t time_scale;
double duration;
char *title;
EbmlBin date_utc;
EbmlList tracks;
EbmlList attachments;
EbmlList chapters;
EbmlList index;
EbmlList tags;
EbmlList seekhead;
 
/* byte position of the segment inside the stream */
int64_t segment_start;
 
/* the packet queue */
AVPacket **packets;
int num_packets;
AVPacket *prev_pkt;
 
int done;
 
/* What to skip before effectively reading a packet. */
int skip_to_keyframe;
uint64_t skip_to_timecode;
 
/* File has a CUES element, but we defer parsing until it is needed. */
int cues_parsing_deferred;
 
int current_cluster_num_blocks;
int64_t current_cluster_pos;
MatroskaCluster current_cluster;
 
/* File has SSA subtitles which prevent incremental cluster parsing. */
int contains_ssa;
} MatroskaDemuxContext;
 
typedef struct {
uint64_t duration;
int64_t reference;
uint64_t non_simple;
EbmlBin bin;
uint64_t additional_id;
EbmlBin additional;
uint64_t discard_padding;
} MatroskaBlock;
 
static EbmlSyntax ebml_header[] = {
{ EBML_ID_EBMLREADVERSION, EBML_UINT, 0, offsetof(Ebml,version), {.u=EBML_VERSION} },
{ EBML_ID_EBMLMAXSIZELENGTH, EBML_UINT, 0, offsetof(Ebml,max_size), {.u=8} },
{ EBML_ID_EBMLMAXIDLENGTH, EBML_UINT, 0, offsetof(Ebml,id_length), {.u=4} },
{ EBML_ID_DOCTYPE, EBML_STR, 0, offsetof(Ebml,doctype), {.s="(none)"} },
{ EBML_ID_DOCTYPEREADVERSION, EBML_UINT, 0, offsetof(Ebml,doctype_version), {.u=1} },
{ EBML_ID_EBMLVERSION, EBML_NONE },
{ EBML_ID_DOCTYPEVERSION, EBML_NONE },
{ 0 }
};
 
static EbmlSyntax ebml_syntax[] = {
{ EBML_ID_HEADER, EBML_NEST, 0, 0, {.n=ebml_header} },
{ 0 }
};
 
static EbmlSyntax matroska_info[] = {
{ MATROSKA_ID_TIMECODESCALE, EBML_UINT, 0, offsetof(MatroskaDemuxContext,time_scale), {.u=1000000} },
{ MATROSKA_ID_DURATION, EBML_FLOAT, 0, offsetof(MatroskaDemuxContext,duration) },
{ MATROSKA_ID_TITLE, EBML_UTF8, 0, offsetof(MatroskaDemuxContext,title) },
{ MATROSKA_ID_WRITINGAPP, EBML_NONE },
{ MATROSKA_ID_MUXINGAPP, EBML_NONE },
{ MATROSKA_ID_DATEUTC, EBML_BIN, 0, offsetof(MatroskaDemuxContext,date_utc) },
{ MATROSKA_ID_SEGMENTUID, EBML_NONE },
{ 0 }
};
 
static EbmlSyntax matroska_track_video[] = {
{ MATROSKA_ID_VIDEOFRAMERATE, EBML_FLOAT,0, offsetof(MatroskaTrackVideo,frame_rate) },
{ MATROSKA_ID_VIDEODISPLAYWIDTH, EBML_UINT, 0, offsetof(MatroskaTrackVideo,display_width), {.u=-1} },
{ MATROSKA_ID_VIDEODISPLAYHEIGHT, EBML_UINT, 0, offsetof(MatroskaTrackVideo,display_height), {.u=-1} },
{ MATROSKA_ID_VIDEOPIXELWIDTH, EBML_UINT, 0, offsetof(MatroskaTrackVideo,pixel_width) },
{ MATROSKA_ID_VIDEOPIXELHEIGHT, EBML_UINT, 0, offsetof(MatroskaTrackVideo,pixel_height) },
{ MATROSKA_ID_VIDEOCOLORSPACE, EBML_BIN, 0, offsetof(MatroskaTrackVideo,color_space) },
{ MATROSKA_ID_VIDEOSTEREOMODE, EBML_UINT, 0, offsetof(MatroskaTrackVideo,stereo_mode) },
{ MATROSKA_ID_VIDEOALPHAMODE, EBML_UINT, 0, offsetof(MatroskaTrackVideo,alpha_mode) },
{ MATROSKA_ID_VIDEOPIXELCROPB, EBML_NONE },
{ MATROSKA_ID_VIDEOPIXELCROPT, EBML_NONE },
{ MATROSKA_ID_VIDEOPIXELCROPL, EBML_NONE },
{ MATROSKA_ID_VIDEOPIXELCROPR, EBML_NONE },
{ MATROSKA_ID_VIDEODISPLAYUNIT, EBML_NONE },
{ MATROSKA_ID_VIDEOFLAGINTERLACED,EBML_NONE },
{ MATROSKA_ID_VIDEOASPECTRATIO, EBML_NONE },
{ 0 }
};
 
static EbmlSyntax matroska_track_audio[] = {
{ MATROSKA_ID_AUDIOSAMPLINGFREQ, EBML_FLOAT,0, offsetof(MatroskaTrackAudio,samplerate), {.f=8000.0} },
{ MATROSKA_ID_AUDIOOUTSAMPLINGFREQ,EBML_FLOAT,0,offsetof(MatroskaTrackAudio,out_samplerate) },
{ MATROSKA_ID_AUDIOBITDEPTH, EBML_UINT, 0, offsetof(MatroskaTrackAudio,bitdepth) },
{ MATROSKA_ID_AUDIOCHANNELS, EBML_UINT, 0, offsetof(MatroskaTrackAudio,channels), {.u=1} },
{ 0 }
};
 
static EbmlSyntax matroska_track_encoding_compression[] = {
{ MATROSKA_ID_ENCODINGCOMPALGO, EBML_UINT, 0, offsetof(MatroskaTrackCompression,algo), {.u=0} },
{ MATROSKA_ID_ENCODINGCOMPSETTINGS,EBML_BIN, 0, offsetof(MatroskaTrackCompression,settings) },
{ 0 }
};
 
static EbmlSyntax matroska_track_encoding_encryption[] = {
{ MATROSKA_ID_ENCODINGENCALGO, EBML_UINT, 0, offsetof(MatroskaTrackEncryption,algo), {.u=0} },
{ MATROSKA_ID_ENCODINGENCKEYID, EBML_BIN, 0, offsetof(MatroskaTrackEncryption,key_id) },
{ MATROSKA_ID_ENCODINGENCAESSETTINGS, EBML_NONE },
{ MATROSKA_ID_ENCODINGSIGALGO, EBML_NONE },
{ MATROSKA_ID_ENCODINGSIGHASHALGO, EBML_NONE },
{ MATROSKA_ID_ENCODINGSIGKEYID, EBML_NONE },
{ MATROSKA_ID_ENCODINGSIGNATURE, EBML_NONE },
{ 0 }
};
static EbmlSyntax matroska_track_encoding[] = {
{ MATROSKA_ID_ENCODINGSCOPE, EBML_UINT, 0, offsetof(MatroskaTrackEncoding,scope), {.u=1} },
{ MATROSKA_ID_ENCODINGTYPE, EBML_UINT, 0, offsetof(MatroskaTrackEncoding,type), {.u=0} },
{ MATROSKA_ID_ENCODINGCOMPRESSION,EBML_NEST, 0, offsetof(MatroskaTrackEncoding,compression), {.n=matroska_track_encoding_compression} },
{ MATROSKA_ID_ENCODINGENCRYPTION, EBML_NEST, 0, offsetof(MatroskaTrackEncoding,encryption), {.n=matroska_track_encoding_encryption} },
{ MATROSKA_ID_ENCODINGORDER, EBML_NONE },
{ 0 }
};
 
static EbmlSyntax matroska_track_encodings[] = {
{ MATROSKA_ID_TRACKCONTENTENCODING, EBML_NEST, sizeof(MatroskaTrackEncoding), offsetof(MatroskaTrack,encodings), {.n=matroska_track_encoding} },
{ 0 }
};
 
static EbmlSyntax matroska_track_plane[] = {
{ MATROSKA_ID_TRACKPLANEUID, EBML_UINT, 0, offsetof(MatroskaTrackPlane,uid) },
{ MATROSKA_ID_TRACKPLANETYPE, EBML_UINT, 0, offsetof(MatroskaTrackPlane,type) },
{ 0 }
};
 
static EbmlSyntax matroska_track_combine_planes[] = {
{ MATROSKA_ID_TRACKPLANE, EBML_NEST, sizeof(MatroskaTrackPlane), offsetof(MatroskaTrackOperation,combine_planes), {.n=matroska_track_plane} },
{ 0 }
};
 
static EbmlSyntax matroska_track_operation[] = {
{ MATROSKA_ID_TRACKCOMBINEPLANES, EBML_NEST, 0, 0, {.n=matroska_track_combine_planes} },
{ 0 }
};
 
static EbmlSyntax matroska_track[] = {
{ MATROSKA_ID_TRACKNUMBER, EBML_UINT, 0, offsetof(MatroskaTrack,num) },
{ MATROSKA_ID_TRACKNAME, EBML_UTF8, 0, offsetof(MatroskaTrack,name) },
{ MATROSKA_ID_TRACKUID, EBML_UINT, 0, offsetof(MatroskaTrack,uid) },
{ MATROSKA_ID_TRACKTYPE, EBML_UINT, 0, offsetof(MatroskaTrack,type) },
{ MATROSKA_ID_CODECID, EBML_STR, 0, offsetof(MatroskaTrack,codec_id) },
{ MATROSKA_ID_CODECPRIVATE, EBML_BIN, 0, offsetof(MatroskaTrack,codec_priv) },
{ MATROSKA_ID_TRACKLANGUAGE, EBML_UTF8, 0, offsetof(MatroskaTrack,language), {.s="eng"} },
{ MATROSKA_ID_TRACKDEFAULTDURATION, EBML_UINT, 0, offsetof(MatroskaTrack,default_duration) },
{ MATROSKA_ID_TRACKTIMECODESCALE, EBML_FLOAT,0, offsetof(MatroskaTrack,time_scale), {.f=1.0} },
{ MATROSKA_ID_TRACKFLAGDEFAULT, EBML_UINT, 0, offsetof(MatroskaTrack,flag_default), {.u=1} },
{ MATROSKA_ID_TRACKFLAGFORCED, EBML_UINT, 0, offsetof(MatroskaTrack,flag_forced), {.u=0} },
{ MATROSKA_ID_TRACKVIDEO, EBML_NEST, 0, offsetof(MatroskaTrack,video), {.n=matroska_track_video} },
{ MATROSKA_ID_TRACKAUDIO, EBML_NEST, 0, offsetof(MatroskaTrack,audio), {.n=matroska_track_audio} },
{ MATROSKA_ID_TRACKOPERATION, EBML_NEST, 0, offsetof(MatroskaTrack,operation), {.n=matroska_track_operation} },
{ MATROSKA_ID_TRACKCONTENTENCODINGS,EBML_NEST, 0, 0, {.n=matroska_track_encodings} },
{ MATROSKA_ID_TRACKMAXBLKADDID, EBML_UINT, 0, offsetof(MatroskaTrack,max_block_additional_id) },
{ MATROSKA_ID_CODECDELAY, EBML_UINT, 0, offsetof(MatroskaTrack,codec_delay) },
{ MATROSKA_ID_SEEKPREROLL, EBML_UINT, 0, offsetof(MatroskaTrack,seek_preroll) },
{ MATROSKA_ID_TRACKFLAGENABLED, EBML_NONE },
{ MATROSKA_ID_TRACKFLAGLACING, EBML_NONE },
{ MATROSKA_ID_CODECNAME, EBML_NONE },
{ MATROSKA_ID_CODECDECODEALL, EBML_NONE },
{ MATROSKA_ID_CODECINFOURL, EBML_NONE },
{ MATROSKA_ID_CODECDOWNLOADURL, EBML_NONE },
{ MATROSKA_ID_TRACKMINCACHE, EBML_NONE },
{ MATROSKA_ID_TRACKMAXCACHE, EBML_NONE },
{ 0 }
};
 
static EbmlSyntax matroska_tracks[] = {
{ MATROSKA_ID_TRACKENTRY, EBML_NEST, sizeof(MatroskaTrack), offsetof(MatroskaDemuxContext,tracks), {.n=matroska_track} },
{ 0 }
};
 
static EbmlSyntax matroska_attachment[] = {
{ MATROSKA_ID_FILEUID, EBML_UINT, 0, offsetof(MatroskaAttachement,uid) },
{ MATROSKA_ID_FILENAME, EBML_UTF8, 0, offsetof(MatroskaAttachement,filename) },
{ MATROSKA_ID_FILEMIMETYPE, EBML_STR, 0, offsetof(MatroskaAttachement,mime) },
{ MATROSKA_ID_FILEDATA, EBML_BIN, 0, offsetof(MatroskaAttachement,bin) },
{ MATROSKA_ID_FILEDESC, EBML_NONE },
{ 0 }
};
 
static EbmlSyntax matroska_attachments[] = {
{ MATROSKA_ID_ATTACHEDFILE, EBML_NEST, sizeof(MatroskaAttachement), offsetof(MatroskaDemuxContext,attachments), {.n=matroska_attachment} },
{ 0 }
};
 
static EbmlSyntax matroska_chapter_display[] = {
{ MATROSKA_ID_CHAPSTRING, EBML_UTF8, 0, offsetof(MatroskaChapter,title) },
{ MATROSKA_ID_CHAPLANG, EBML_NONE },
{ 0 }
};
 
static EbmlSyntax matroska_chapter_entry[] = {
{ MATROSKA_ID_CHAPTERTIMESTART, EBML_UINT, 0, offsetof(MatroskaChapter,start), {.u=AV_NOPTS_VALUE} },
{ MATROSKA_ID_CHAPTERTIMEEND, EBML_UINT, 0, offsetof(MatroskaChapter,end), {.u=AV_NOPTS_VALUE} },
{ MATROSKA_ID_CHAPTERUID, EBML_UINT, 0, offsetof(MatroskaChapter,uid) },
{ MATROSKA_ID_CHAPTERDISPLAY, EBML_NEST, 0, 0, {.n=matroska_chapter_display} },
{ MATROSKA_ID_CHAPTERFLAGHIDDEN, EBML_NONE },
{ MATROSKA_ID_CHAPTERFLAGENABLED, EBML_NONE },
{ MATROSKA_ID_CHAPTERPHYSEQUIV, EBML_NONE },
{ MATROSKA_ID_CHAPTERATOM, EBML_NONE },
{ 0 }
};
 
static EbmlSyntax matroska_chapter[] = {
{ MATROSKA_ID_CHAPTERATOM, EBML_NEST, sizeof(MatroskaChapter), offsetof(MatroskaDemuxContext,chapters), {.n=matroska_chapter_entry} },
{ MATROSKA_ID_EDITIONUID, EBML_NONE },
{ MATROSKA_ID_EDITIONFLAGHIDDEN, EBML_NONE },
{ MATROSKA_ID_EDITIONFLAGDEFAULT, EBML_NONE },
{ MATROSKA_ID_EDITIONFLAGORDERED, EBML_NONE },
{ 0 }
};
 
static EbmlSyntax matroska_chapters[] = {
{ MATROSKA_ID_EDITIONENTRY, EBML_NEST, 0, 0, {.n=matroska_chapter} },
{ 0 }
};
 
static EbmlSyntax matroska_index_pos[] = {
{ MATROSKA_ID_CUETRACK, EBML_UINT, 0, offsetof(MatroskaIndexPos,track) },
{ MATROSKA_ID_CUECLUSTERPOSITION, EBML_UINT, 0, offsetof(MatroskaIndexPos,pos) },
{ MATROSKA_ID_CUERELATIVEPOSITION,EBML_NONE },
{ MATROSKA_ID_CUEDURATION, EBML_NONE },
{ MATROSKA_ID_CUEBLOCKNUMBER, EBML_NONE },
{ 0 }
};
 
static EbmlSyntax matroska_index_entry[] = {
{ MATROSKA_ID_CUETIME, EBML_UINT, 0, offsetof(MatroskaIndex,time) },
{ MATROSKA_ID_CUETRACKPOSITION, EBML_NEST, sizeof(MatroskaIndexPos), offsetof(MatroskaIndex,pos), {.n=matroska_index_pos} },
{ 0 }
};
 
static EbmlSyntax matroska_index[] = {
{ MATROSKA_ID_POINTENTRY, EBML_NEST, sizeof(MatroskaIndex), offsetof(MatroskaDemuxContext,index), {.n=matroska_index_entry} },
{ 0 }
};
 
static EbmlSyntax matroska_simpletag[] = {
{ MATROSKA_ID_TAGNAME, EBML_UTF8, 0, offsetof(MatroskaTag,name) },
{ MATROSKA_ID_TAGSTRING, EBML_UTF8, 0, offsetof(MatroskaTag,string) },
{ MATROSKA_ID_TAGLANG, EBML_STR, 0, offsetof(MatroskaTag,lang), {.s="und"} },
{ MATROSKA_ID_TAGDEFAULT, EBML_UINT, 0, offsetof(MatroskaTag,def) },
{ MATROSKA_ID_TAGDEFAULT_BUG, EBML_UINT, 0, offsetof(MatroskaTag,def) },
{ MATROSKA_ID_SIMPLETAG, EBML_NEST, sizeof(MatroskaTag), offsetof(MatroskaTag,sub), {.n=matroska_simpletag} },
{ 0 }
};
 
static EbmlSyntax matroska_tagtargets[] = {
{ MATROSKA_ID_TAGTARGETS_TYPE, EBML_STR, 0, offsetof(MatroskaTagTarget,type) },
{ MATROSKA_ID_TAGTARGETS_TYPEVALUE, EBML_UINT, 0, offsetof(MatroskaTagTarget,typevalue), {.u=50} },
{ MATROSKA_ID_TAGTARGETS_TRACKUID, EBML_UINT, 0, offsetof(MatroskaTagTarget,trackuid) },
{ MATROSKA_ID_TAGTARGETS_CHAPTERUID,EBML_UINT, 0, offsetof(MatroskaTagTarget,chapteruid) },
{ MATROSKA_ID_TAGTARGETS_ATTACHUID, EBML_UINT, 0, offsetof(MatroskaTagTarget,attachuid) },
{ 0 }
};
 
static EbmlSyntax matroska_tag[] = {
{ MATROSKA_ID_SIMPLETAG, EBML_NEST, sizeof(MatroskaTag), offsetof(MatroskaTags,tag), {.n=matroska_simpletag} },
{ MATROSKA_ID_TAGTARGETS, EBML_NEST, 0, offsetof(MatroskaTags,target), {.n=matroska_tagtargets} },
{ 0 }
};
 
static EbmlSyntax matroska_tags[] = {
{ MATROSKA_ID_TAG, EBML_NEST, sizeof(MatroskaTags), offsetof(MatroskaDemuxContext,tags), {.n=matroska_tag} },
{ 0 }
};
 
static EbmlSyntax matroska_seekhead_entry[] = {
{ MATROSKA_ID_SEEKID, EBML_UINT, 0, offsetof(MatroskaSeekhead,id) },
{ MATROSKA_ID_SEEKPOSITION, EBML_UINT, 0, offsetof(MatroskaSeekhead,pos), {.u=-1} },
{ 0 }
};
 
static EbmlSyntax matroska_seekhead[] = {
{ MATROSKA_ID_SEEKENTRY, EBML_NEST, sizeof(MatroskaSeekhead), offsetof(MatroskaDemuxContext,seekhead), {.n=matroska_seekhead_entry} },
{ 0 }
};
 
static EbmlSyntax matroska_segment[] = {
{ MATROSKA_ID_INFO, EBML_NEST, 0, 0, {.n=matroska_info } },
{ MATROSKA_ID_TRACKS, EBML_NEST, 0, 0, {.n=matroska_tracks } },
{ MATROSKA_ID_ATTACHMENTS, EBML_NEST, 0, 0, {.n=matroska_attachments} },
{ MATROSKA_ID_CHAPTERS, EBML_NEST, 0, 0, {.n=matroska_chapters } },
{ MATROSKA_ID_CUES, EBML_NEST, 0, 0, {.n=matroska_index } },
{ MATROSKA_ID_TAGS, EBML_NEST, 0, 0, {.n=matroska_tags } },
{ MATROSKA_ID_SEEKHEAD, EBML_NEST, 0, 0, {.n=matroska_seekhead } },
{ MATROSKA_ID_CLUSTER, EBML_STOP },
{ 0 }
};
 
static EbmlSyntax matroska_segments[] = {
{ MATROSKA_ID_SEGMENT, EBML_NEST, 0, 0, {.n=matroska_segment } },
{ 0 }
};
 
static EbmlSyntax matroska_blockmore[] = {
{ MATROSKA_ID_BLOCKADDID, EBML_UINT, 0, offsetof(MatroskaBlock,additional_id) },
{ MATROSKA_ID_BLOCKADDITIONAL, EBML_BIN, 0, offsetof(MatroskaBlock,additional) },
{ 0 }
};
 
static EbmlSyntax matroska_blockadditions[] = {
{ MATROSKA_ID_BLOCKMORE, EBML_NEST, 0, 0, {.n=matroska_blockmore} },
{ 0 }
};
 
static EbmlSyntax matroska_blockgroup[] = {
{ MATROSKA_ID_BLOCK, EBML_BIN, 0, offsetof(MatroskaBlock,bin) },
{ MATROSKA_ID_BLOCKADDITIONS, EBML_NEST, 0, 0, {.n=matroska_blockadditions} },
{ MATROSKA_ID_SIMPLEBLOCK, EBML_BIN, 0, offsetof(MatroskaBlock,bin) },
{ MATROSKA_ID_BLOCKDURATION, EBML_UINT, 0, offsetof(MatroskaBlock,duration) },
{ MATROSKA_ID_DISCARDPADDING, EBML_UINT, 0, offsetof(MatroskaBlock,discard_padding) },
{ MATROSKA_ID_BLOCKREFERENCE, EBML_UINT, 0, offsetof(MatroskaBlock,reference) },
{ MATROSKA_ID_CODECSTATE, EBML_NONE },
{ 1, EBML_UINT, 0, offsetof(MatroskaBlock,non_simple), {.u=1} },
{ 0 }
};
 
static EbmlSyntax matroska_cluster[] = {
{ MATROSKA_ID_CLUSTERTIMECODE,EBML_UINT,0, offsetof(MatroskaCluster,timecode) },
{ MATROSKA_ID_BLOCKGROUP, EBML_NEST, sizeof(MatroskaBlock), offsetof(MatroskaCluster,blocks), {.n=matroska_blockgroup} },
{ MATROSKA_ID_SIMPLEBLOCK, EBML_PASS, sizeof(MatroskaBlock), offsetof(MatroskaCluster,blocks), {.n=matroska_blockgroup} },
{ MATROSKA_ID_CLUSTERPOSITION,EBML_NONE },
{ MATROSKA_ID_CLUSTERPREVSIZE,EBML_NONE },
{ 0 }
};
 
static EbmlSyntax matroska_clusters[] = {
{ MATROSKA_ID_CLUSTER, EBML_NEST, 0, 0, {.n=matroska_cluster} },
{ MATROSKA_ID_INFO, EBML_NONE },
{ MATROSKA_ID_CUES, EBML_NONE },
{ MATROSKA_ID_TAGS, EBML_NONE },
{ MATROSKA_ID_SEEKHEAD, EBML_NONE },
{ 0 }
};
 
static EbmlSyntax matroska_cluster_incremental_parsing[] = {
{ MATROSKA_ID_CLUSTERTIMECODE,EBML_UINT,0, offsetof(MatroskaCluster,timecode) },
{ MATROSKA_ID_BLOCKGROUP, EBML_NEST, sizeof(MatroskaBlock), offsetof(MatroskaCluster,blocks), {.n=matroska_blockgroup} },
{ MATROSKA_ID_SIMPLEBLOCK, EBML_PASS, sizeof(MatroskaBlock), offsetof(MatroskaCluster,blocks), {.n=matroska_blockgroup} },
{ MATROSKA_ID_CLUSTERPOSITION,EBML_NONE },
{ MATROSKA_ID_CLUSTERPREVSIZE,EBML_NONE },
{ MATROSKA_ID_INFO, EBML_NONE },
{ MATROSKA_ID_CUES, EBML_NONE },
{ MATROSKA_ID_TAGS, EBML_NONE },
{ MATROSKA_ID_SEEKHEAD, EBML_NONE },
{ MATROSKA_ID_CLUSTER, EBML_STOP },
{ 0 }
};
 
static EbmlSyntax matroska_cluster_incremental[] = {
{ MATROSKA_ID_CLUSTERTIMECODE,EBML_UINT,0, offsetof(MatroskaCluster,timecode) },
{ MATROSKA_ID_BLOCKGROUP, EBML_STOP },
{ MATROSKA_ID_SIMPLEBLOCK, EBML_STOP },
{ MATROSKA_ID_CLUSTERPOSITION,EBML_NONE },
{ MATROSKA_ID_CLUSTERPREVSIZE,EBML_NONE },
{ 0 }
};
 
static EbmlSyntax matroska_clusters_incremental[] = {
{ MATROSKA_ID_CLUSTER, EBML_NEST, 0, 0, {.n=matroska_cluster_incremental} },
{ MATROSKA_ID_INFO, EBML_NONE },
{ MATROSKA_ID_CUES, EBML_NONE },
{ MATROSKA_ID_TAGS, EBML_NONE },
{ MATROSKA_ID_SEEKHEAD, EBML_NONE },
{ 0 }
};
 
static const char *const matroska_doctypes[] = { "matroska", "webm" };
 
static int matroska_resync(MatroskaDemuxContext *matroska, int64_t last_pos)
{
AVIOContext *pb = matroska->ctx->pb;
uint32_t id;
matroska->current_id = 0;
matroska->num_levels = 0;
 
/* seek to next position to resync from */
if (avio_seek(pb, last_pos + 1, SEEK_SET) < 0)
goto eof;
 
id = avio_rb32(pb);
 
// try to find a toplevel element
while (!url_feof(pb)) {
if (id == MATROSKA_ID_INFO || id == MATROSKA_ID_TRACKS ||
id == MATROSKA_ID_CUES || id == MATROSKA_ID_TAGS ||
id == MATROSKA_ID_SEEKHEAD || id == MATROSKA_ID_ATTACHMENTS ||
id == MATROSKA_ID_CLUSTER || id == MATROSKA_ID_CHAPTERS) {
matroska->current_id = id;
return 0;
}
id = (id << 8) | avio_r8(pb);
}
eof:
matroska->done = 1;
return AVERROR_EOF;
}
 
/*
* Return: Whether we reached the end of a level in the hierarchy or not.
*/
static int ebml_level_end(MatroskaDemuxContext *matroska)
{
AVIOContext *pb = matroska->ctx->pb;
int64_t pos = avio_tell(pb);
 
if (matroska->num_levels > 0) {
MatroskaLevel *level = &matroska->levels[matroska->num_levels - 1];
if (pos - level->start >= level->length || matroska->current_id) {
matroska->num_levels--;
return 1;
}
}
return 0;
}
 
/*
* Read: an "EBML number", which is defined as a variable-length
* array of bytes. The first byte indicates the length by giving a
* number of 0-bits followed by a one. The position of the first
* "one" bit inside the first byte indicates the length of this
* number.
* Returns: number of bytes read, < 0 on error
*/
static int ebml_read_num(MatroskaDemuxContext *matroska, AVIOContext *pb,
int max_size, uint64_t *number)
{
int read = 1, n = 1;
uint64_t total = 0;
 
/* The first byte tells us the length in bytes - avio_r8() can normally
* return 0, but since that's not a valid first ebmlID byte, we can
* use it safely here to catch EOS. */
if (!(total = avio_r8(pb))) {
/* we might encounter EOS here */
if (!url_feof(pb)) {
int64_t pos = avio_tell(pb);
av_log(matroska->ctx, AV_LOG_ERROR,
"Read error at pos. %"PRIu64" (0x%"PRIx64")\n",
pos, pos);
return pb->error ? pb->error : AVERROR(EIO);
}
return AVERROR_EOF;
}
 
/* get the length of the EBML number */
read = 8 - ff_log2_tab[total];
if (read > max_size) {
int64_t pos = avio_tell(pb) - 1;
av_log(matroska->ctx, AV_LOG_ERROR,
"Invalid EBML number size tag 0x%02x at pos %"PRIu64" (0x%"PRIx64")\n",
(uint8_t) total, pos, pos);
return AVERROR_INVALIDDATA;
}
 
/* read out length */
total ^= 1 << ff_log2_tab[total];
while (n++ < read)
total = (total << 8) | avio_r8(pb);
 
*number = total;
 
return read;
}
 
/**
* Read a EBML length value.
* This needs special handling for the "unknown length" case which has multiple
* encodings.
*/
static int ebml_read_length(MatroskaDemuxContext *matroska, AVIOContext *pb,
uint64_t *number)
{
int res = ebml_read_num(matroska, pb, 8, number);
if (res > 0 && *number + 1 == 1ULL << (7 * res))
*number = 0xffffffffffffffULL;
return res;
}
 
/*
* Read the next element as an unsigned int.
* 0 is success, < 0 is failure.
*/
static int ebml_read_uint(AVIOContext *pb, int size, uint64_t *num)
{
int n = 0;
 
if (size > 8)
return AVERROR_INVALIDDATA;
 
/* big-endian ordering; build up number */
*num = 0;
while (n++ < size)
*num = (*num << 8) | avio_r8(pb);
 
return 0;
}
 
/*
* Read the next element as a float.
* 0 is success, < 0 is failure.
*/
static int ebml_read_float(AVIOContext *pb, int size, double *num)
{
if (size == 0) {
*num = 0;
} else if (size == 4) {
*num = av_int2float(avio_rb32(pb));
} else if (size == 8){
*num = av_int2double(avio_rb64(pb));
} else
return AVERROR_INVALIDDATA;
 
return 0;
}
 
/*
* Read the next element as an ASCII string.
* 0 is success, < 0 is failure.
*/
static int ebml_read_ascii(AVIOContext *pb, int size, char **str)
{
char *res;
 
/* EBML strings are usually not 0-terminated, so we allocate one
* byte more, read the string and NULL-terminate it ourselves. */
if (!(res = av_malloc(size + 1)))
return AVERROR(ENOMEM);
if (avio_read(pb, (uint8_t *) res, size) != size) {
av_free(res);
return AVERROR(EIO);
}
(res)[size] = '\0';
av_free(*str);
*str = res;
 
return 0;
}
 
/*
* Read the next element as binary data.
* 0 is success, < 0 is failure.
*/
static int ebml_read_binary(AVIOContext *pb, int length, EbmlBin *bin)
{
av_fast_padded_malloc(&bin->data, &bin->size, length);
if (!bin->data)
return AVERROR(ENOMEM);
 
bin->size = length;
bin->pos = avio_tell(pb);
if (avio_read(pb, bin->data, length) != length) {
av_freep(&bin->data);
bin->size = 0;
return AVERROR(EIO);
}
 
return 0;
}
 
/*
* Read the next element, but only the header. The contents
* are supposed to be sub-elements which can be read separately.
* 0 is success, < 0 is failure.
*/
static int ebml_read_master(MatroskaDemuxContext *matroska, uint64_t length)
{
AVIOContext *pb = matroska->ctx->pb;
MatroskaLevel *level;
 
if (matroska->num_levels >= EBML_MAX_DEPTH) {
av_log(matroska->ctx, AV_LOG_ERROR,
"File moves beyond max. allowed depth (%d)\n", EBML_MAX_DEPTH);
return AVERROR(ENOSYS);
}
 
level = &matroska->levels[matroska->num_levels++];
level->start = avio_tell(pb);
level->length = length;
 
return 0;
}
 
/*
* Read signed/unsigned "EBML" numbers.
* Return: number of bytes processed, < 0 on error
*/
static int matroska_ebmlnum_uint(MatroskaDemuxContext *matroska,
uint8_t *data, uint32_t size, uint64_t *num)
{
AVIOContext pb;
ffio_init_context(&pb, data, size, 0, NULL, NULL, NULL, NULL);
return ebml_read_num(matroska, &pb, FFMIN(size, 8), num);
}
 
/*
* Same as above, but signed.
*/
static int matroska_ebmlnum_sint(MatroskaDemuxContext *matroska,
uint8_t *data, uint32_t size, int64_t *num)
{
uint64_t unum;
int res;
 
/* read as unsigned number first */
if ((res = matroska_ebmlnum_uint(matroska, data, size, &unum)) < 0)
return res;
 
/* make signed (weird way) */
*num = unum - ((1LL << (7*res - 1)) - 1);
 
return res;
}
 
static int ebml_parse_elem(MatroskaDemuxContext *matroska,
EbmlSyntax *syntax, void *data);
 
static int ebml_parse_id(MatroskaDemuxContext *matroska, EbmlSyntax *syntax,
uint32_t id, void *data)
{
int i;
for (i=0; syntax[i].id; i++)
if (id == syntax[i].id)
break;
if (!syntax[i].id && id == MATROSKA_ID_CLUSTER &&
matroska->num_levels > 0 &&
matroska->levels[matroska->num_levels-1].length == 0xffffffffffffff)
return 0; // we reached the end of an unknown size cluster
if (!syntax[i].id && id != EBML_ID_VOID && id != EBML_ID_CRC32) {
av_log(matroska->ctx, AV_LOG_INFO, "Unknown entry 0x%X\n", id);
if (matroska->ctx->error_recognition & AV_EF_EXPLODE)
return AVERROR_INVALIDDATA;
}
return ebml_parse_elem(matroska, &syntax[i], data);
}
 
static int ebml_parse(MatroskaDemuxContext *matroska, EbmlSyntax *syntax,
void *data)
{
if (!matroska->current_id) {
uint64_t id;
int res = ebml_read_num(matroska, matroska->ctx->pb, 4, &id);
if (res < 0)
return res;
matroska->current_id = id | 1 << 7*res;
}
return ebml_parse_id(matroska, syntax, matroska->current_id, data);
}
 
static int ebml_parse_nest(MatroskaDemuxContext *matroska, EbmlSyntax *syntax,
void *data)
{
int i, res = 0;
 
for (i=0; syntax[i].id; i++)
switch (syntax[i].type) {
case EBML_UINT:
*(uint64_t *)((char *)data+syntax[i].data_offset) = syntax[i].def.u;
break;
case EBML_FLOAT:
*(double *)((char *)data+syntax[i].data_offset) = syntax[i].def.f;
break;
case EBML_STR:
case EBML_UTF8:
// the default may be NULL
if (syntax[i].def.s) {
uint8_t **dst = (uint8_t**)((uint8_t*)data + syntax[i].data_offset);
*dst = av_strdup(syntax[i].def.s);
if (!*dst)
return AVERROR(ENOMEM);
}
break;
}
 
while (!res && !ebml_level_end(matroska))
res = ebml_parse(matroska, syntax, data);
 
return res;
}
 
static int ebml_parse_elem(MatroskaDemuxContext *matroska,
EbmlSyntax *syntax, void *data)
{
static const uint64_t max_lengths[EBML_TYPE_COUNT] = {
[EBML_UINT] = 8,
[EBML_FLOAT] = 8,
// max. 16 MB for strings
[EBML_STR] = 0x1000000,
[EBML_UTF8] = 0x1000000,
// max. 256 MB for binary data
[EBML_BIN] = 0x10000000,
// no limits for anything else
};
AVIOContext *pb = matroska->ctx->pb;
uint32_t id = syntax->id;
uint64_t length;
int res;
void *newelem;
 
data = (char *)data + syntax->data_offset;
if (syntax->list_elem_size) {
EbmlList *list = data;
newelem = av_realloc_array(list->elem, list->nb_elem+1, syntax->list_elem_size);
if (!newelem)
return AVERROR(ENOMEM);
list->elem = newelem;
data = (char*)list->elem + list->nb_elem*syntax->list_elem_size;
memset(data, 0, syntax->list_elem_size);
list->nb_elem++;
}
 
if (syntax->type != EBML_PASS && syntax->type != EBML_STOP) {
matroska->current_id = 0;
if ((res = ebml_read_length(matroska, pb, &length)) < 0)
return res;
if (max_lengths[syntax->type] && length > max_lengths[syntax->type]) {
av_log(matroska->ctx, AV_LOG_ERROR,
"Invalid length 0x%"PRIx64" > 0x%"PRIx64" for syntax element %i\n",
length, max_lengths[syntax->type], syntax->type);
return AVERROR_INVALIDDATA;
}
}
 
switch (syntax->type) {
case EBML_UINT: res = ebml_read_uint (pb, length, data); break;
case EBML_FLOAT: res = ebml_read_float (pb, length, data); break;
case EBML_STR:
case EBML_UTF8: res = ebml_read_ascii (pb, length, data); break;
case EBML_BIN: res = ebml_read_binary(pb, length, data); break;
case EBML_NEST: if ((res=ebml_read_master(matroska, length)) < 0)
return res;
if (id == MATROSKA_ID_SEGMENT)
matroska->segment_start = avio_tell(matroska->ctx->pb);
return ebml_parse_nest(matroska, syntax->def.n, data);
case EBML_PASS: return ebml_parse_id(matroska, syntax->def.n, id, data);
case EBML_STOP: return 1;
default:
if(ffio_limit(pb, length) != length)
return AVERROR(EIO);
return avio_skip(pb,length)<0 ? AVERROR(EIO) : 0;
}
if (res == AVERROR_INVALIDDATA)
av_log(matroska->ctx, AV_LOG_ERROR, "Invalid element\n");
else if (res == AVERROR(EIO))
av_log(matroska->ctx, AV_LOG_ERROR, "Read error\n");
return res;
}
 
static void ebml_free(EbmlSyntax *syntax, void *data)
{
int i, j;
for (i=0; syntax[i].id; i++) {
void *data_off = (char *)data + syntax[i].data_offset;
switch (syntax[i].type) {
case EBML_STR:
case EBML_UTF8: av_freep(data_off); break;
case EBML_BIN: av_freep(&((EbmlBin *)data_off)->data); break;
case EBML_NEST:
if (syntax[i].list_elem_size) {
EbmlList *list = data_off;
char *ptr = list->elem;
for (j=0; j<list->nb_elem; j++, ptr+=syntax[i].list_elem_size)
ebml_free(syntax[i].def.n, ptr);
av_free(list->elem);
} else
ebml_free(syntax[i].def.n, data_off);
default: break;
}
}
}
 
 
/*
* Autodetecting...
*/
static int matroska_probe(AVProbeData *p)
{
uint64_t total = 0;
int len_mask = 0x80, size = 1, n = 1, i;
 
/* EBML header? */
if (AV_RB32(p->buf) != EBML_ID_HEADER)
return 0;
 
/* length of header */
total = p->buf[4];
while (size <= 8 && !(total & len_mask)) {
size++;
len_mask >>= 1;
}
if (size > 8)
return 0;
total &= (len_mask - 1);
while (n < size)
total = (total << 8) | p->buf[4 + n++];
 
/* Does the probe data contain the whole header? */
if (p->buf_size < 4 + size + total)
return 0;
 
/* The header should contain a known document type. For now,
* we don't parse the whole header but simply check for the
* availability of that array of characters inside the header.
* Not fully fool-proof, but good enough. */
for (i = 0; i < FF_ARRAY_ELEMS(matroska_doctypes); i++) {
int probelen = strlen(matroska_doctypes[i]);
if (total < probelen)
continue;
for (n = 4+size; n <= 4+size+total-probelen; n++)
if (!memcmp(p->buf+n, matroska_doctypes[i], probelen))
return AVPROBE_SCORE_MAX;
}
 
// probably valid EBML header but no recognized doctype
return AVPROBE_SCORE_EXTENSION;
}
 
static MatroskaTrack *matroska_find_track_by_num(MatroskaDemuxContext *matroska,
int num)
{
MatroskaTrack *tracks = matroska->tracks.elem;
int i;
 
for (i=0; i < matroska->tracks.nb_elem; i++)
if (tracks[i].num == num)
return &tracks[i];
 
av_log(matroska->ctx, AV_LOG_ERROR, "Invalid track number %d\n", num);
return NULL;
}
 
static int matroska_decode_buffer(uint8_t** buf, int* buf_size,
MatroskaTrack *track)
{
MatroskaTrackEncoding *encodings = track->encodings.elem;
uint8_t* data = *buf;
int isize = *buf_size;
uint8_t* pkt_data = NULL;
uint8_t av_unused *newpktdata;
int pkt_size = isize;
int result = 0;
int olen;
 
if (pkt_size >= 10000000U)
return AVERROR_INVALIDDATA;
 
switch (encodings[0].compression.algo) {
case MATROSKA_TRACK_ENCODING_COMP_HEADERSTRIP: {
int header_size = encodings[0].compression.settings.size;
uint8_t *header = encodings[0].compression.settings.data;
 
if (header_size && !header) {
av_log(NULL, AV_LOG_ERROR, "Compression size but no data in headerstrip\n");
return -1;
}
 
if (!header_size)
return 0;
 
pkt_size = isize + header_size;
pkt_data = av_malloc(pkt_size);
if (!pkt_data)
return AVERROR(ENOMEM);
 
memcpy(pkt_data, header, header_size);
memcpy(pkt_data + header_size, data, isize);
break;
}
#if CONFIG_LZO
case MATROSKA_TRACK_ENCODING_COMP_LZO:
do {
olen = pkt_size *= 3;
newpktdata = av_realloc(pkt_data, pkt_size + AV_LZO_OUTPUT_PADDING);
if (!newpktdata) {
result = AVERROR(ENOMEM);
goto failed;
}
pkt_data = newpktdata;
result = av_lzo1x_decode(pkt_data, &olen, data, &isize);
} while (result==AV_LZO_OUTPUT_FULL && pkt_size<10000000);
if (result) {
result = AVERROR_INVALIDDATA;
goto failed;
}
pkt_size -= olen;
break;
#endif
#if CONFIG_ZLIB
case MATROSKA_TRACK_ENCODING_COMP_ZLIB: {
z_stream zstream = {0};
if (inflateInit(&zstream) != Z_OK)
return -1;
zstream.next_in = data;
zstream.avail_in = isize;
do {
pkt_size *= 3;
newpktdata = av_realloc(pkt_data, pkt_size);
if (!newpktdata) {
inflateEnd(&zstream);
goto failed;
}
pkt_data = newpktdata;
zstream.avail_out = pkt_size - zstream.total_out;
zstream.next_out = pkt_data + zstream.total_out;
if (pkt_data) {
result = inflate(&zstream, Z_NO_FLUSH);
} else
result = Z_MEM_ERROR;
} while (result==Z_OK && pkt_size<10000000);
pkt_size = zstream.total_out;
inflateEnd(&zstream);
if (result != Z_STREAM_END) {
if (result == Z_MEM_ERROR)
result = AVERROR(ENOMEM);
else
result = AVERROR_INVALIDDATA;
goto failed;
}
break;
}
#endif
#if CONFIG_BZLIB
case MATROSKA_TRACK_ENCODING_COMP_BZLIB: {
bz_stream bzstream = {0};
if (BZ2_bzDecompressInit(&bzstream, 0, 0) != BZ_OK)
return -1;
bzstream.next_in = data;
bzstream.avail_in = isize;
do {
pkt_size *= 3;
newpktdata = av_realloc(pkt_data, pkt_size);
if (!newpktdata) {
BZ2_bzDecompressEnd(&bzstream);
goto failed;
}
pkt_data = newpktdata;
bzstream.avail_out = pkt_size - bzstream.total_out_lo32;
bzstream.next_out = pkt_data + bzstream.total_out_lo32;
if (pkt_data) {
result = BZ2_bzDecompress(&bzstream);
} else
result = BZ_MEM_ERROR;
} while (result==BZ_OK && pkt_size<10000000);
pkt_size = bzstream.total_out_lo32;
BZ2_bzDecompressEnd(&bzstream);
if (result != BZ_STREAM_END) {
if (result == BZ_MEM_ERROR)
result = AVERROR(ENOMEM);
else
result = AVERROR_INVALIDDATA;
goto failed;
}
break;
}
#endif
default:
return AVERROR_INVALIDDATA;
}
 
*buf = pkt_data;
*buf_size = pkt_size;
return 0;
failed:
av_free(pkt_data);
return result;
}
 
#if FF_API_ASS_SSA
static void matroska_fix_ass_packet(MatroskaDemuxContext *matroska,
AVPacket *pkt, uint64_t display_duration)
{
AVBufferRef *line;
char *layer, *ptr = pkt->data, *end = ptr+pkt->size;
for (; *ptr!=',' && ptr<end-1; ptr++);
if (*ptr == ',')
ptr++;
layer = ptr;
for (; *ptr!=',' && ptr<end-1; ptr++);
if (*ptr == ',') {
int64_t end_pts = pkt->pts + display_duration;
int sc = matroska->time_scale * pkt->pts / 10000000;
int ec = matroska->time_scale * end_pts / 10000000;
int sh, sm, ss, eh, em, es, len;
sh = sc/360000; sc -= 360000*sh;
sm = sc/ 6000; sc -= 6000*sm;
ss = sc/ 100; sc -= 100*ss;
eh = ec/360000; ec -= 360000*eh;
em = ec/ 6000; ec -= 6000*em;
es = ec/ 100; ec -= 100*es;
*ptr++ = '\0';
len = 50 + end-ptr + FF_INPUT_BUFFER_PADDING_SIZE;
if (!(line = av_buffer_alloc(len)))
return;
snprintf(line->data, len,"Dialogue: %s,%d:%02d:%02d.%02d,%d:%02d:%02d.%02d,%s\r\n",
layer, sh, sm, ss, sc, eh, em, es, ec, ptr);
av_buffer_unref(&pkt->buf);
pkt->buf = line;
pkt->data = line->data;
pkt->size = strlen(line->data);
}
}
 
static int matroska_merge_packets(AVPacket *out, AVPacket *in)
{
int ret = av_grow_packet(out, in->size);
if (ret < 0)
return ret;
 
memcpy(out->data + out->size - in->size, in->data, in->size);
 
av_free_packet(in);
av_free(in);
return 0;
}
#endif
 
static void matroska_convert_tag(AVFormatContext *s, EbmlList *list,
AVDictionary **metadata, char *prefix)
{
MatroskaTag *tags = list->elem;
char key[1024];
int i;
 
for (i=0; i < list->nb_elem; i++) {
const char *lang = tags[i].lang && strcmp(tags[i].lang, "und") ?
tags[i].lang : NULL;
 
if (!tags[i].name) {
av_log(s, AV_LOG_WARNING, "Skipping invalid tag with no TagName.\n");
continue;
}
if (prefix) snprintf(key, sizeof(key), "%s/%s", prefix, tags[i].name);
else av_strlcpy(key, tags[i].name, sizeof(key));
if (tags[i].def || !lang) {
av_dict_set(metadata, key, tags[i].string, 0);
if (tags[i].sub.nb_elem)
matroska_convert_tag(s, &tags[i].sub, metadata, key);
}
if (lang) {
av_strlcat(key, "-", sizeof(key));
av_strlcat(key, lang, sizeof(key));
av_dict_set(metadata, key, tags[i].string, 0);
if (tags[i].sub.nb_elem)
matroska_convert_tag(s, &tags[i].sub, metadata, key);
}
}
ff_metadata_conv(metadata, NULL, ff_mkv_metadata_conv);
}
 
static void matroska_convert_tags(AVFormatContext *s)
{
MatroskaDemuxContext *matroska = s->priv_data;
MatroskaTags *tags = matroska->tags.elem;
int i, j;
 
for (i=0; i < matroska->tags.nb_elem; i++) {
if (tags[i].target.attachuid) {
MatroskaAttachement *attachment = matroska->attachments.elem;
for (j=0; j<matroska->attachments.nb_elem; j++)
if (attachment[j].uid == tags[i].target.attachuid
&& attachment[j].stream)
matroska_convert_tag(s, &tags[i].tag,
&attachment[j].stream->metadata, NULL);
} else if (tags[i].target.chapteruid) {
MatroskaChapter *chapter = matroska->chapters.elem;
for (j=0; j<matroska->chapters.nb_elem; j++)
if (chapter[j].uid == tags[i].target.chapteruid
&& chapter[j].chapter)
matroska_convert_tag(s, &tags[i].tag,
&chapter[j].chapter->metadata, NULL);
} else if (tags[i].target.trackuid) {
MatroskaTrack *track = matroska->tracks.elem;
for (j=0; j<matroska->tracks.nb_elem; j++)
if (track[j].uid == tags[i].target.trackuid && track[j].stream)
matroska_convert_tag(s, &tags[i].tag,
&track[j].stream->metadata, NULL);
} else {
matroska_convert_tag(s, &tags[i].tag, &s->metadata,
tags[i].target.type);
}
}
}
 
static int matroska_parse_seekhead_entry(MatroskaDemuxContext *matroska, int idx)
{
EbmlList *seekhead_list = &matroska->seekhead;
MatroskaSeekhead *seekhead = seekhead_list->elem;
uint32_t level_up = matroska->level_up;
int64_t before_pos = avio_tell(matroska->ctx->pb);
uint32_t saved_id = matroska->current_id;
MatroskaLevel level;
int64_t offset;
int ret = 0;
 
if (idx >= seekhead_list->nb_elem
|| seekhead[idx].id == MATROSKA_ID_SEEKHEAD
|| seekhead[idx].id == MATROSKA_ID_CLUSTER)
return 0;
 
/* seek */
offset = seekhead[idx].pos + matroska->segment_start;
if (avio_seek(matroska->ctx->pb, offset, SEEK_SET) == offset) {
/* We don't want to lose our seekhead level, so we add
* a dummy. This is a crude hack. */
if (matroska->num_levels == EBML_MAX_DEPTH) {
av_log(matroska->ctx, AV_LOG_INFO,
"Max EBML element depth (%d) reached, "
"cannot parse further.\n", EBML_MAX_DEPTH);
ret = AVERROR_INVALIDDATA;
} else {
level.start = 0;
level.length = (uint64_t)-1;
matroska->levels[matroska->num_levels] = level;
matroska->num_levels++;
matroska->current_id = 0;
 
ret = ebml_parse(matroska, matroska_segment, matroska);
 
/* remove dummy level */
while (matroska->num_levels) {
uint64_t length = matroska->levels[--matroska->num_levels].length;
if (length == (uint64_t)-1)
break;
}
}
}
/* seek back */
avio_seek(matroska->ctx->pb, before_pos, SEEK_SET);
matroska->level_up = level_up;
matroska->current_id = saved_id;
 
return ret;
}
 
static void matroska_execute_seekhead(MatroskaDemuxContext *matroska)
{
EbmlList *seekhead_list = &matroska->seekhead;
int64_t before_pos = avio_tell(matroska->ctx->pb);
int i;
 
// we should not do any seeking in the streaming case
if (!matroska->ctx->pb->seekable ||
(matroska->ctx->flags & AVFMT_FLAG_IGNIDX))
return;
 
for (i = 0; i < seekhead_list->nb_elem; i++) {
MatroskaSeekhead *seekhead = seekhead_list->elem;
if (seekhead[i].pos <= before_pos)
continue;
 
// defer cues parsing until we actually need cue data.
if (seekhead[i].id == MATROSKA_ID_CUES) {
matroska->cues_parsing_deferred = 1;
continue;
}
 
if (matroska_parse_seekhead_entry(matroska, i) < 0) {
// mark index as broken
matroska->cues_parsing_deferred = -1;
break;
}
}
}
 
static void matroska_add_index_entries(MatroskaDemuxContext *matroska) {
EbmlList *index_list;
MatroskaIndex *index;
int index_scale = 1;
int i, j;
 
index_list = &matroska->index;
index = index_list->elem;
if (index_list->nb_elem
&& index[0].time > 1E14/matroska->time_scale) {
av_log(matroska->ctx, AV_LOG_WARNING, "Working around broken index.\n");
index_scale = matroska->time_scale;
}
for (i = 0; i < index_list->nb_elem; i++) {
EbmlList *pos_list = &index[i].pos;
MatroskaIndexPos *pos = pos_list->elem;
for (j = 0; j < pos_list->nb_elem; j++) {
MatroskaTrack *track = matroska_find_track_by_num(matroska, pos[j].track);
if (track && track->stream)
av_add_index_entry(track->stream,
pos[j].pos + matroska->segment_start,
index[i].time/index_scale, 0, 0,
AVINDEX_KEYFRAME);
}
}
}
 
static void matroska_parse_cues(MatroskaDemuxContext *matroska) {
EbmlList *seekhead_list = &matroska->seekhead;
MatroskaSeekhead *seekhead = seekhead_list->elem;
int i;
 
for (i = 0; i < seekhead_list->nb_elem; i++)
if (seekhead[i].id == MATROSKA_ID_CUES)
break;
av_assert1(i <= seekhead_list->nb_elem);
 
if (matroska_parse_seekhead_entry(matroska, i) < 0)
matroska->cues_parsing_deferred = -1;
matroska_add_index_entries(matroska);
}
 
static int matroska_aac_profile(char *codec_id)
{
static const char * const aac_profiles[] = { "MAIN", "LC", "SSR" };
int profile;
 
for (profile=0; profile<FF_ARRAY_ELEMS(aac_profiles); profile++)
if (strstr(codec_id, aac_profiles[profile]))
break;
return profile + 1;
}
 
static int matroska_aac_sri(int samplerate)
{
int sri;
 
for (sri=0; sri<FF_ARRAY_ELEMS(avpriv_mpeg4audio_sample_rates); sri++)
if (avpriv_mpeg4audio_sample_rates[sri] == samplerate)
break;
return sri;
}
 
static void matroska_metadata_creation_time(AVDictionary **metadata, int64_t date_utc)
{
char buffer[32];
/* Convert to seconds and adjust by number of seconds between 2001-01-01 and Epoch */
time_t creation_time = date_utc / 1000000000 + 978307200;
struct tm *ptm = gmtime(&creation_time);
if (!ptm) return;
strftime(buffer, sizeof(buffer), "%Y-%m-%d %H:%M:%S", ptm);
av_dict_set(metadata, "creation_time", buffer, 0);
}
 
static int matroska_read_header(AVFormatContext *s)
{
MatroskaDemuxContext *matroska = s->priv_data;
EbmlList *attachements_list = &matroska->attachments;
MatroskaAttachement *attachements;
EbmlList *chapters_list = &matroska->chapters;
MatroskaChapter *chapters;
MatroskaTrack *tracks;
uint64_t max_start = 0;
int64_t pos;
Ebml ebml = { 0 };
AVStream *st;
int i, j, k, res;
 
matroska->ctx = s;
 
/* First read the EBML header. */
if (ebml_parse(matroska, ebml_syntax, &ebml)
|| ebml.version > EBML_VERSION || ebml.max_size > sizeof(uint64_t)
|| ebml.id_length > sizeof(uint32_t) || ebml.doctype_version > 3 || !ebml.doctype) {
av_log(matroska->ctx, AV_LOG_ERROR,
"EBML header using unsupported features\n"
"(EBML version %"PRIu64", doctype %s, doc version %"PRIu64")\n",
ebml.version, ebml.doctype, ebml.doctype_version);
ebml_free(ebml_syntax, &ebml);
return AVERROR_PATCHWELCOME;
} else if (ebml.doctype_version == 3) {
av_log(matroska->ctx, AV_LOG_WARNING,
"EBML header using unsupported features\n"
"(EBML version %"PRIu64", doctype %s, doc version %"PRIu64")\n",
ebml.version, ebml.doctype, ebml.doctype_version);
}
for (i = 0; i < FF_ARRAY_ELEMS(matroska_doctypes); i++)
if (!strcmp(ebml.doctype, matroska_doctypes[i]))
break;
if (i >= FF_ARRAY_ELEMS(matroska_doctypes)) {
av_log(s, AV_LOG_WARNING, "Unknown EBML doctype '%s'\n", ebml.doctype);
if (matroska->ctx->error_recognition & AV_EF_EXPLODE) {
ebml_free(ebml_syntax, &ebml);
return AVERROR_INVALIDDATA;
}
}
ebml_free(ebml_syntax, &ebml);
 
/* The next thing is a segment. */
pos = avio_tell(matroska->ctx->pb);
res = ebml_parse(matroska, matroska_segments, matroska);
// try resyncing until we find a EBML_STOP type element.
while (res != 1) {
res = matroska_resync(matroska, pos);
if (res < 0)
return res;
pos = avio_tell(matroska->ctx->pb);
res = ebml_parse(matroska, matroska_segment, matroska);
}
matroska_execute_seekhead(matroska);
 
if (!matroska->time_scale)
matroska->time_scale = 1000000;
if (matroska->duration)
matroska->ctx->duration = matroska->duration * matroska->time_scale
* 1000 / AV_TIME_BASE;
av_dict_set(&s->metadata, "title", matroska->title, 0);
 
if (matroska->date_utc.size == 8)
matroska_metadata_creation_time(&s->metadata, AV_RB64(matroska->date_utc.data));
 
tracks = matroska->tracks.elem;
for (i=0; i < matroska->tracks.nb_elem; i++) {
MatroskaTrack *track = &tracks[i];
enum AVCodecID codec_id = AV_CODEC_ID_NONE;
EbmlList *encodings_list = &track->encodings;
MatroskaTrackEncoding *encodings = encodings_list->elem;
uint8_t *extradata = NULL;
int extradata_size = 0;
int extradata_offset = 0;
uint32_t fourcc = 0;
AVIOContext b;
char* key_id_base64 = NULL;
 
/* Apply some sanity checks. */
if (track->type != MATROSKA_TRACK_TYPE_VIDEO &&
track->type != MATROSKA_TRACK_TYPE_AUDIO &&
track->type != MATROSKA_TRACK_TYPE_SUBTITLE &&
track->type != MATROSKA_TRACK_TYPE_METADATA) {
av_log(matroska->ctx, AV_LOG_INFO,
"Unknown or unsupported track type %"PRIu64"\n",
track->type);
continue;
}
if (track->codec_id == NULL)
continue;
 
if (track->type == MATROSKA_TRACK_TYPE_VIDEO) {
if (!track->default_duration && track->video.frame_rate > 0)
track->default_duration = 1000000000/track->video.frame_rate;
if (track->video.display_width == -1)
track->video.display_width = track->video.pixel_width;
if (track->video.display_height == -1)
track->video.display_height = track->video.pixel_height;
if (track->video.color_space.size == 4)
fourcc = AV_RL32(track->video.color_space.data);
} else if (track->type == MATROSKA_TRACK_TYPE_AUDIO) {
if (!track->audio.out_samplerate)
track->audio.out_samplerate = track->audio.samplerate;
}
if (encodings_list->nb_elem > 1) {
av_log(matroska->ctx, AV_LOG_ERROR,
"Multiple combined encodings not supported");
} else if (encodings_list->nb_elem == 1) {
if (encodings[0].type) {
if (encodings[0].encryption.key_id.size > 0) {
/* Save the encryption key id to be stored later as a
metadata tag. */
const int b64_size = AV_BASE64_SIZE(encodings[0].encryption.key_id.size);
key_id_base64 = av_malloc(b64_size);
if (key_id_base64 == NULL)
return AVERROR(ENOMEM);
 
av_base64_encode(key_id_base64, b64_size,
encodings[0].encryption.key_id.data,
encodings[0].encryption.key_id.size);
} else {
encodings[0].scope = 0;
av_log(matroska->ctx, AV_LOG_ERROR,
"Unsupported encoding type");
}
} else if (
#if CONFIG_ZLIB
encodings[0].compression.algo != MATROSKA_TRACK_ENCODING_COMP_ZLIB &&
#endif
#if CONFIG_BZLIB
encodings[0].compression.algo != MATROSKA_TRACK_ENCODING_COMP_BZLIB &&
#endif
#if CONFIG_LZO
encodings[0].compression.algo != MATROSKA_TRACK_ENCODING_COMP_LZO &&
#endif
encodings[0].compression.algo != MATROSKA_TRACK_ENCODING_COMP_HEADERSTRIP) {
encodings[0].scope = 0;
av_log(matroska->ctx, AV_LOG_ERROR,
"Unsupported encoding type");
} else if (track->codec_priv.size && encodings[0].scope&2) {
uint8_t *codec_priv = track->codec_priv.data;
int ret = matroska_decode_buffer(&track->codec_priv.data,
&track->codec_priv.size,
track);
if (ret < 0) {
track->codec_priv.data = NULL;
track->codec_priv.size = 0;
av_log(matroska->ctx, AV_LOG_ERROR,
"Failed to decode codec private data\n");
}
 
if (codec_priv != track->codec_priv.data)
av_free(codec_priv);
}
}
 
for(j=0; ff_mkv_codec_tags[j].id != AV_CODEC_ID_NONE; j++){
if(!strncmp(ff_mkv_codec_tags[j].str, track->codec_id,
strlen(ff_mkv_codec_tags[j].str))){
codec_id= ff_mkv_codec_tags[j].id;
break;
}
}
 
st = track->stream = avformat_new_stream(s, NULL);
if (st == NULL) {
av_free(key_id_base64);
return AVERROR(ENOMEM);
}
 
if (key_id_base64) {
/* export encryption key id as base64 metadata tag */
av_dict_set(&st->metadata, "enc_key_id", key_id_base64, 0);
av_freep(&key_id_base64);
}
 
if (!strcmp(track->codec_id, "V_MS/VFW/FOURCC")
&& track->codec_priv.size >= 40
&& track->codec_priv.data != NULL) {
track->ms_compat = 1;
fourcc = AV_RL32(track->codec_priv.data + 16);
codec_id = ff_codec_get_id(ff_codec_bmp_tags, fourcc);
extradata_offset = 40;
} else if (!strcmp(track->codec_id, "A_MS/ACM")
&& track->codec_priv.size >= 14
&& track->codec_priv.data != NULL) {
int ret;
ffio_init_context(&b, track->codec_priv.data, track->codec_priv.size,
0, NULL, NULL, NULL, NULL);
ret = ff_get_wav_header(&b, st->codec, track->codec_priv.size);
if (ret < 0)
return ret;
codec_id = st->codec->codec_id;
extradata_offset = FFMIN(track->codec_priv.size, 18);
} else if (!strcmp(track->codec_id, "V_QUICKTIME")
&& (track->codec_priv.size >= 86)
&& (track->codec_priv.data != NULL)) {
fourcc = AV_RL32(track->codec_priv.data);
codec_id = ff_codec_get_id(ff_codec_movvideo_tags, fourcc);
} else if (codec_id == AV_CODEC_ID_PCM_S16BE) {
switch (track->audio.bitdepth) {
case 8: codec_id = AV_CODEC_ID_PCM_U8; break;
case 24: codec_id = AV_CODEC_ID_PCM_S24BE; break;
case 32: codec_id = AV_CODEC_ID_PCM_S32BE; break;
}
} else if (codec_id == AV_CODEC_ID_PCM_S16LE) {
switch (track->audio.bitdepth) {
case 8: codec_id = AV_CODEC_ID_PCM_U8; break;
case 24: codec_id = AV_CODEC_ID_PCM_S24LE; break;
case 32: codec_id = AV_CODEC_ID_PCM_S32LE; break;
}
} else if (codec_id==AV_CODEC_ID_PCM_F32LE && track->audio.bitdepth==64) {
codec_id = AV_CODEC_ID_PCM_F64LE;
} else if (codec_id == AV_CODEC_ID_AAC && !track->codec_priv.size) {
int profile = matroska_aac_profile(track->codec_id);
int sri = matroska_aac_sri(track->audio.samplerate);
extradata = av_mallocz(5 + FF_INPUT_BUFFER_PADDING_SIZE);
if (extradata == NULL)
return AVERROR(ENOMEM);
extradata[0] = (profile << 3) | ((sri&0x0E) >> 1);
extradata[1] = ((sri&0x01) << 7) | (track->audio.channels<<3);
if (strstr(track->codec_id, "SBR")) {
sri = matroska_aac_sri(track->audio.out_samplerate);
extradata[2] = 0x56;
extradata[3] = 0xE5;
extradata[4] = 0x80 | (sri<<3);
extradata_size = 5;
} else
extradata_size = 2;
} else if (codec_id == AV_CODEC_ID_ALAC && track->codec_priv.size && track->codec_priv.size < INT_MAX - 12 - FF_INPUT_BUFFER_PADDING_SIZE) {
/* Only ALAC's magic cookie is stored in Matroska's track headers.
Create the "atom size", "tag", and "tag version" fields the
decoder expects manually. */
extradata_size = 12 + track->codec_priv.size;
extradata = av_mallocz(extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
if (extradata == NULL)
return AVERROR(ENOMEM);
AV_WB32(extradata, extradata_size);
memcpy(&extradata[4], "alac", 4);
AV_WB32(&extradata[8], 0);
memcpy(&extradata[12], track->codec_priv.data,
track->codec_priv.size);
} else if (codec_id == AV_CODEC_ID_TTA) {
extradata_size = 30;
extradata = av_mallocz(extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
if (extradata == NULL)
return AVERROR(ENOMEM);
ffio_init_context(&b, extradata, extradata_size, 1,
NULL, NULL, NULL, NULL);
avio_write(&b, "TTA1", 4);
avio_wl16(&b, 1);
avio_wl16(&b, track->audio.channels);
avio_wl16(&b, track->audio.bitdepth);
if (track->audio.out_samplerate < 0 || track->audio.out_samplerate > INT_MAX)
return AVERROR_INVALIDDATA;
avio_wl32(&b, track->audio.out_samplerate);
avio_wl32(&b, av_rescale((matroska->duration * matroska->time_scale), track->audio.out_samplerate, AV_TIME_BASE * 1000));
} else if (codec_id == AV_CODEC_ID_RV10 || codec_id == AV_CODEC_ID_RV20 ||
codec_id == AV_CODEC_ID_RV30 || codec_id == AV_CODEC_ID_RV40) {
extradata_offset = 26;
} else if (codec_id == AV_CODEC_ID_RA_144) {
track->audio.out_samplerate = 8000;
track->audio.channels = 1;
} else if ((codec_id == AV_CODEC_ID_RA_288 || codec_id == AV_CODEC_ID_COOK ||
codec_id == AV_CODEC_ID_ATRAC3 || codec_id == AV_CODEC_ID_SIPR)
&& track->codec_priv.data) {
int flavor;
 
ffio_init_context(&b, track->codec_priv.data,track->codec_priv.size,
0, NULL, NULL, NULL, NULL);
avio_skip(&b, 22);
flavor = avio_rb16(&b);
track->audio.coded_framesize = avio_rb32(&b);
avio_skip(&b, 12);
track->audio.sub_packet_h = avio_rb16(&b);
track->audio.frame_size = avio_rb16(&b);
track->audio.sub_packet_size = avio_rb16(&b);
if (flavor <= 0 || track->audio.coded_framesize <= 0 ||
track->audio.sub_packet_h <= 0 || track->audio.frame_size <= 0 ||
track->audio.sub_packet_size <= 0)
return AVERROR_INVALIDDATA;
track->audio.buf = av_malloc_array(track->audio.sub_packet_h, track->audio.frame_size);
if (!track->audio.buf)
return AVERROR(ENOMEM);
if (codec_id == AV_CODEC_ID_RA_288) {
st->codec->block_align = track->audio.coded_framesize;
track->codec_priv.size = 0;
} else {
if (codec_id == AV_CODEC_ID_SIPR && flavor < 4) {
static const int sipr_bit_rate[4] = { 6504, 8496, 5000, 16000 };
track->audio.sub_packet_size = ff_sipr_subpk_size[flavor];
st->codec->bit_rate = sipr_bit_rate[flavor];
}
st->codec->block_align = track->audio.sub_packet_size;
extradata_offset = 78;
}
}
track->codec_priv.size -= extradata_offset;
 
if (codec_id == AV_CODEC_ID_NONE)
av_log(matroska->ctx, AV_LOG_INFO,
"Unknown/unsupported AVCodecID %s.\n", track->codec_id);
 
if (track->time_scale < 0.01)
track->time_scale = 1.0;
avpriv_set_pts_info(st, 64, matroska->time_scale*track->time_scale, 1000*1000*1000); /* 64 bit pts in ns */
 
st->codec->codec_id = codec_id;
st->start_time = 0;
if (strcmp(track->language, "und"))
av_dict_set(&st->metadata, "language", track->language, 0);
av_dict_set(&st->metadata, "title", track->name, 0);
 
if (track->flag_default)
st->disposition |= AV_DISPOSITION_DEFAULT;
if (track->flag_forced)
st->disposition |= AV_DISPOSITION_FORCED;
 
if (!st->codec->extradata) {
if(extradata){
st->codec->extradata = extradata;
st->codec->extradata_size = extradata_size;
} else if(track->codec_priv.data && track->codec_priv.size > 0){
if (ff_alloc_extradata(st->codec, track->codec_priv.size))
return AVERROR(ENOMEM);
memcpy(st->codec->extradata,
track->codec_priv.data + extradata_offset,
track->codec_priv.size);
}
}
 
if (track->type == MATROSKA_TRACK_TYPE_VIDEO) {
MatroskaTrackPlane *planes = track->operation.combine_planes.elem;
 
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_tag = fourcc;
st->codec->width = track->video.pixel_width;
st->codec->height = track->video.pixel_height;
av_reduce(&st->sample_aspect_ratio.num,
&st->sample_aspect_ratio.den,
st->codec->height * track->video.display_width,
st->codec-> width * track->video.display_height,
255);
if (st->codec->codec_id != AV_CODEC_ID_HEVC)
st->need_parsing = AVSTREAM_PARSE_HEADERS;
if (track->default_duration) {
av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
1000000000, track->default_duration, 30000);
#if FF_API_R_FRAME_RATE
if (st->avg_frame_rate.num < st->avg_frame_rate.den * 1000L)
st->r_frame_rate = st->avg_frame_rate;
#endif
}
 
/* export stereo mode flag as metadata tag */
if (track->video.stereo_mode && track->video.stereo_mode < MATROSKA_VIDEO_STEREO_MODE_COUNT)
av_dict_set(&st->metadata, "stereo_mode", ff_matroska_video_stereo_mode[track->video.stereo_mode], 0);
 
/* export alpha mode flag as metadata tag */
if (track->video.alpha_mode)
av_dict_set(&st->metadata, "alpha_mode", "1", 0);
 
/* if we have virtual track, mark the real tracks */
for (j=0; j < track->operation.combine_planes.nb_elem; j++) {
char buf[32];
if (planes[j].type >= MATROSKA_VIDEO_STEREO_PLANE_COUNT)
continue;
snprintf(buf, sizeof(buf), "%s_%d",
ff_matroska_video_stereo_plane[planes[j].type], i);
for (k=0; k < matroska->tracks.nb_elem; k++)
if (planes[j].uid == tracks[k].uid) {
av_dict_set(&s->streams[k]->metadata,
"stereo_mode", buf, 0);
break;
}
}
} else if (track->type == MATROSKA_TRACK_TYPE_AUDIO) {
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->sample_rate = track->audio.out_samplerate;
st->codec->channels = track->audio.channels;
st->codec->bits_per_coded_sample = track->audio.bitdepth;
if (st->codec->codec_id != AV_CODEC_ID_AAC)
st->need_parsing = AVSTREAM_PARSE_HEADERS;
if (track->codec_delay > 0) {
st->codec->delay = av_rescale_q(track->codec_delay,
(AVRational){1, 1000000000},
(AVRational){1, st->codec->sample_rate});
}
if (track->seek_preroll > 0) {
av_codec_set_seek_preroll(st->codec,
av_rescale_q(track->seek_preroll,
(AVRational){1, 1000000000},
(AVRational){1, st->codec->sample_rate}));
}
} else if (codec_id == AV_CODEC_ID_WEBVTT) {
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
 
if (!strcmp(track->codec_id, "D_WEBVTT/CAPTIONS")) {
st->disposition |= AV_DISPOSITION_CAPTIONS;
} else if (!strcmp(track->codec_id, "D_WEBVTT/DESCRIPTIONS")) {
st->disposition |= AV_DISPOSITION_DESCRIPTIONS;
} else if (!strcmp(track->codec_id, "D_WEBVTT/METADATA")) {
st->disposition |= AV_DISPOSITION_METADATA;
}
} else if (track->type == MATROSKA_TRACK_TYPE_SUBTITLE) {
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
#if FF_API_ASS_SSA
if (st->codec->codec_id == AV_CODEC_ID_SSA ||
st->codec->codec_id == AV_CODEC_ID_ASS)
#else
if (st->codec->codec_id == AV_CODEC_ID_ASS)
#endif
matroska->contains_ssa = 1;
}
}
 
attachements = attachements_list->elem;
for (j=0; j<attachements_list->nb_elem; j++) {
if (!(attachements[j].filename && attachements[j].mime &&
attachements[j].bin.data && attachements[j].bin.size > 0)) {
av_log(matroska->ctx, AV_LOG_ERROR, "incomplete attachment\n");
} else {
AVStream *st = avformat_new_stream(s, NULL);
if (st == NULL)
break;
av_dict_set(&st->metadata, "filename",attachements[j].filename, 0);
av_dict_set(&st->metadata, "mimetype", attachements[j].mime, 0);
st->codec->codec_id = AV_CODEC_ID_NONE;
st->codec->codec_type = AVMEDIA_TYPE_ATTACHMENT;
if (ff_alloc_extradata(st->codec, attachements[j].bin.size))
break;
memcpy(st->codec->extradata, attachements[j].bin.data, attachements[j].bin.size);
 
for (i=0; ff_mkv_mime_tags[i].id != AV_CODEC_ID_NONE; i++) {
if (!strncmp(ff_mkv_mime_tags[i].str, attachements[j].mime,
strlen(ff_mkv_mime_tags[i].str))) {
st->codec->codec_id = ff_mkv_mime_tags[i].id;
break;
}
}
attachements[j].stream = st;
}
}
 
chapters = chapters_list->elem;
for (i=0; i<chapters_list->nb_elem; i++)
if (chapters[i].start != AV_NOPTS_VALUE && chapters[i].uid
&& (max_start==0 || chapters[i].start > max_start)) {
chapters[i].chapter =
avpriv_new_chapter(s, chapters[i].uid, (AVRational){1, 1000000000},
chapters[i].start, chapters[i].end,
chapters[i].title);
av_dict_set(&chapters[i].chapter->metadata,
"title", chapters[i].title, 0);
max_start = chapters[i].start;
}
 
matroska_add_index_entries(matroska);
 
matroska_convert_tags(s);
 
return 0;
}
 
/*
* Put one packet in an application-supplied AVPacket struct.
* Returns 0 on success or -1 on failure.
*/
static int matroska_deliver_packet(MatroskaDemuxContext *matroska,
AVPacket *pkt)
{
if (matroska->num_packets > 0) {
memcpy(pkt, matroska->packets[0], sizeof(AVPacket));
av_free(matroska->packets[0]);
if (matroska->num_packets > 1) {
void *newpackets;
memmove(&matroska->packets[0], &matroska->packets[1],
(matroska->num_packets - 1) * sizeof(AVPacket *));
newpackets = av_realloc(matroska->packets,
(matroska->num_packets - 1) * sizeof(AVPacket *));
if (newpackets)
matroska->packets = newpackets;
} else {
av_freep(&matroska->packets);
matroska->prev_pkt = NULL;
}
matroska->num_packets--;
return 0;
}
 
return -1;
}
 
/*
* Free all packets in our internal queue.
*/
static void matroska_clear_queue(MatroskaDemuxContext *matroska)
{
matroska->prev_pkt = NULL;
if (matroska->packets) {
int n;
for (n = 0; n < matroska->num_packets; n++) {
av_free_packet(matroska->packets[n]);
av_free(matroska->packets[n]);
}
av_freep(&matroska->packets);
matroska->num_packets = 0;
}
}
 
static int matroska_parse_laces(MatroskaDemuxContext *matroska, uint8_t **buf,
int* buf_size, int type,
uint32_t **lace_buf, int *laces)
{
int res = 0, n, size = *buf_size;
uint8_t *data = *buf;
uint32_t *lace_size;
 
if (!type) {
*laces = 1;
*lace_buf = av_mallocz(sizeof(int));
if (!*lace_buf)
return AVERROR(ENOMEM);
 
*lace_buf[0] = size;
return 0;
}
 
av_assert0(size > 0);
*laces = *data + 1;
data += 1;
size -= 1;
lace_size = av_mallocz(*laces * sizeof(int));
if (!lace_size)
return AVERROR(ENOMEM);
 
switch (type) {
case 0x1: /* Xiph lacing */ {
uint8_t temp;
uint32_t total = 0;
for (n = 0; res == 0 && n < *laces - 1; n++) {
while (1) {
if (size <= total) {
res = AVERROR_INVALIDDATA;
break;
}
temp = *data;
total += temp;
lace_size[n] += temp;
data += 1;
size -= 1;
if (temp != 0xff)
break;
}
}
if (size <= total) {
res = AVERROR_INVALIDDATA;
break;
}
 
lace_size[n] = size - total;
break;
}
 
case 0x2: /* fixed-size lacing */
if (size % (*laces)) {
res = AVERROR_INVALIDDATA;
break;
}
for (n = 0; n < *laces; n++)
lace_size[n] = size / *laces;
break;
 
case 0x3: /* EBML lacing */ {
uint64_t num;
uint64_t total;
n = matroska_ebmlnum_uint(matroska, data, size, &num);
if (n < 0 || num > INT_MAX) {
av_log(matroska->ctx, AV_LOG_INFO,
"EBML block data error\n");
res = n<0 ? n : AVERROR_INVALIDDATA;
break;
}
data += n;
size -= n;
total = lace_size[0] = num;
for (n = 1; res == 0 && n < *laces - 1; n++) {
int64_t snum;
int r;
r = matroska_ebmlnum_sint(matroska, data, size, &snum);
if (r < 0 || lace_size[n - 1] + snum > (uint64_t)INT_MAX) {
av_log(matroska->ctx, AV_LOG_INFO,
"EBML block data error\n");
res = r<0 ? r : AVERROR_INVALIDDATA;
break;
}
data += r;
size -= r;
lace_size[n] = lace_size[n - 1] + snum;
total += lace_size[n];
}
if (size <= total) {
res = AVERROR_INVALIDDATA;
break;
}
lace_size[*laces - 1] = size - total;
break;
}
}
 
*buf = data;
*lace_buf = lace_size;
*buf_size = size;
 
return res;
}
 
static int matroska_parse_rm_audio(MatroskaDemuxContext *matroska,
MatroskaTrack *track,
AVStream *st,
uint8_t *data, int size,
uint64_t timecode,
int64_t pos)
{
int a = st->codec->block_align;
int sps = track->audio.sub_packet_size;
int cfs = track->audio.coded_framesize;
int h = track->audio.sub_packet_h;
int y = track->audio.sub_packet_cnt;
int w = track->audio.frame_size;
int x;
 
if (!track->audio.pkt_cnt) {
if (track->audio.sub_packet_cnt == 0)
track->audio.buf_timecode = timecode;
if (st->codec->codec_id == AV_CODEC_ID_RA_288) {
if (size < cfs * h / 2) {
av_log(matroska->ctx, AV_LOG_ERROR,
"Corrupt int4 RM-style audio packet size\n");
return AVERROR_INVALIDDATA;
}
for (x=0; x<h/2; x++)
memcpy(track->audio.buf+x*2*w+y*cfs,
data+x*cfs, cfs);
} else if (st->codec->codec_id == AV_CODEC_ID_SIPR) {
if (size < w) {
av_log(matroska->ctx, AV_LOG_ERROR,
"Corrupt sipr RM-style audio packet size\n");
return AVERROR_INVALIDDATA;
}
memcpy(track->audio.buf + y*w, data, w);
} else {
if (size < sps * w / sps || h<=0) {
av_log(matroska->ctx, AV_LOG_ERROR,
"Corrupt generic RM-style audio packet size\n");
return AVERROR_INVALIDDATA;
}
for (x=0; x<w/sps; x++)
memcpy(track->audio.buf+sps*(h*x+((h+1)/2)*(y&1)+(y>>1)), data+x*sps, sps);
}
 
if (++track->audio.sub_packet_cnt >= h) {
if (st->codec->codec_id == AV_CODEC_ID_SIPR)
ff_rm_reorder_sipr_data(track->audio.buf, h, w);
track->audio.sub_packet_cnt = 0;
track->audio.pkt_cnt = h*w / a;
}
}
 
while (track->audio.pkt_cnt) {
AVPacket *pkt = NULL;
if (!(pkt = av_mallocz(sizeof(AVPacket))) || av_new_packet(pkt, a) < 0){
av_free(pkt);
return AVERROR(ENOMEM);
}
memcpy(pkt->data, track->audio.buf
+ a * (h*w / a - track->audio.pkt_cnt--), a);
pkt->pts = track->audio.buf_timecode;
track->audio.buf_timecode = AV_NOPTS_VALUE;
pkt->pos = pos;
pkt->stream_index = st->index;
dynarray_add(&matroska->packets,&matroska->num_packets,pkt);
}
 
return 0;
}
 
/* reconstruct full wavpack blocks from mangled matroska ones */
static int matroska_parse_wavpack(MatroskaTrack *track, uint8_t *src,
uint8_t **pdst, int *size)
{
uint8_t *dst = NULL;
int dstlen = 0;
int srclen = *size;
uint32_t samples;
uint16_t ver;
int ret, offset = 0;
 
if (srclen < 12 || track->stream->codec->extradata_size < 2)
return AVERROR_INVALIDDATA;
 
ver = AV_RL16(track->stream->codec->extradata);
 
samples = AV_RL32(src);
src += 4;
srclen -= 4;
 
while (srclen >= 8) {
int multiblock;
uint32_t blocksize;
uint8_t *tmp;
 
uint32_t flags = AV_RL32(src);
uint32_t crc = AV_RL32(src + 4);
src += 8;
srclen -= 8;
 
multiblock = (flags & 0x1800) != 0x1800;
if (multiblock) {
if (srclen < 4) {
ret = AVERROR_INVALIDDATA;
goto fail;
}
blocksize = AV_RL32(src);
src += 4;
srclen -= 4;
} else
blocksize = srclen;
 
if (blocksize > srclen) {
ret = AVERROR_INVALIDDATA;
goto fail;
}
 
tmp = av_realloc(dst, dstlen + blocksize + 32);
if (!tmp) {
ret = AVERROR(ENOMEM);
goto fail;
}
dst = tmp;
dstlen += blocksize + 32;
 
AV_WL32(dst + offset, MKTAG('w', 'v', 'p', 'k')); // tag
AV_WL32(dst + offset + 4, blocksize + 24); // blocksize - 8
AV_WL16(dst + offset + 8, ver); // version
AV_WL16(dst + offset + 10, 0); // track/index_no
AV_WL32(dst + offset + 12, 0); // total samples
AV_WL32(dst + offset + 16, 0); // block index
AV_WL32(dst + offset + 20, samples); // number of samples
AV_WL32(dst + offset + 24, flags); // flags
AV_WL32(dst + offset + 28, crc); // crc
memcpy (dst + offset + 32, src, blocksize); // block data
 
src += blocksize;
srclen -= blocksize;
offset += blocksize + 32;
}
 
*pdst = dst;
*size = dstlen;
 
return 0;
 
fail:
av_freep(&dst);
return ret;
}
 
static int matroska_parse_webvtt(MatroskaDemuxContext *matroska,
MatroskaTrack *track,
AVStream *st,
uint8_t *data, int data_len,
uint64_t timecode,
uint64_t duration,
int64_t pos)
{
AVPacket *pkt;
uint8_t *id, *settings, *text, *buf;
int id_len, settings_len, text_len;
uint8_t *p, *q;
int err;
 
if (data_len <= 0)
return AVERROR_INVALIDDATA;
 
p = data;
q = data + data_len;
 
id = p;
id_len = -1;
while (p < q) {
if (*p == '\r' || *p == '\n') {
id_len = p - id;
if (*p == '\r')
p++;
break;
}
p++;
}
 
if (p >= q || *p != '\n')
return AVERROR_INVALIDDATA;
p++;
 
settings = p;
settings_len = -1;
while (p < q) {
if (*p == '\r' || *p == '\n') {
settings_len = p - settings;
if (*p == '\r')
p++;
break;
}
p++;
}
 
if (p >= q || *p != '\n')
return AVERROR_INVALIDDATA;
p++;
 
text = p;
text_len = q - p;
while (text_len > 0) {
const int len = text_len - 1;
const uint8_t c = p[len];
if (c != '\r' && c != '\n')
break;
text_len = len;
}
 
if (text_len <= 0)
return AVERROR_INVALIDDATA;
 
pkt = av_mallocz(sizeof(*pkt));
err = av_new_packet(pkt, text_len);
if (err < 0) {
av_free(pkt);
return AVERROR(err);
}
 
memcpy(pkt->data, text, text_len);
 
if (id_len > 0) {
buf = av_packet_new_side_data(pkt,
AV_PKT_DATA_WEBVTT_IDENTIFIER,
id_len);
if (buf == NULL) {
av_free(pkt);
return AVERROR(ENOMEM);
}
memcpy(buf, id, id_len);
}
 
if (settings_len > 0) {
buf = av_packet_new_side_data(pkt,
AV_PKT_DATA_WEBVTT_SETTINGS,
settings_len);
if (buf == NULL) {
av_free(pkt);
return AVERROR(ENOMEM);
}
memcpy(buf, settings, settings_len);
}
 
// Do we need this for subtitles?
// pkt->flags = AV_PKT_FLAG_KEY;
 
pkt->stream_index = st->index;
pkt->pts = timecode;
 
// Do we need this for subtitles?
// pkt->dts = timecode;
 
pkt->duration = duration;
pkt->pos = pos;
 
dynarray_add(&matroska->packets, &matroska->num_packets, pkt);
matroska->prev_pkt = pkt;
 
return 0;
}
 
static int matroska_parse_frame(MatroskaDemuxContext *matroska,
MatroskaTrack *track,
AVStream *st,
uint8_t *data, int pkt_size,
uint64_t timecode, uint64_t lace_duration,
int64_t pos, int is_keyframe,
uint8_t *additional, uint64_t additional_id, int additional_size,
uint64_t discard_padding)
{
MatroskaTrackEncoding *encodings = track->encodings.elem;
uint8_t *pkt_data = data;
int offset = 0, res;
AVPacket *pkt;
 
if (encodings && !encodings->type && encodings->scope & 1) {
res = matroska_decode_buffer(&pkt_data, &pkt_size, track);
if (res < 0)
return res;
}
 
if (st->codec->codec_id == AV_CODEC_ID_WAVPACK) {
uint8_t *wv_data;
res = matroska_parse_wavpack(track, pkt_data, &wv_data, &pkt_size);
if (res < 0) {
av_log(matroska->ctx, AV_LOG_ERROR, "Error parsing a wavpack block.\n");
goto fail;
}
if (pkt_data != data)
av_freep(&pkt_data);
pkt_data = wv_data;
}
 
if (st->codec->codec_id == AV_CODEC_ID_PRORES)
offset = 8;
 
pkt = av_mallocz(sizeof(AVPacket));
/* XXX: prevent data copy... */
if (av_new_packet(pkt, pkt_size + offset) < 0) {
av_free(pkt);
res = AVERROR(ENOMEM);
goto fail;
}
 
if (st->codec->codec_id == AV_CODEC_ID_PRORES) {
uint8_t *buf = pkt->data;
bytestream_put_be32(&buf, pkt_size);
bytestream_put_be32(&buf, MKBETAG('i', 'c', 'p', 'f'));
}
 
memcpy(pkt->data + offset, pkt_data, pkt_size);
 
if (pkt_data != data)
av_freep(&pkt_data);
 
pkt->flags = is_keyframe;
pkt->stream_index = st->index;
 
if (additional_size > 0) {
uint8_t *side_data = av_packet_new_side_data(pkt,
AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL,
additional_size + 8);
if(side_data == NULL) {
av_free_packet(pkt);
av_free(pkt);
return AVERROR(ENOMEM);
}
AV_WB64(side_data, additional_id);
memcpy(side_data + 8, additional, additional_size);
}
 
if (discard_padding) {
uint8_t *side_data = av_packet_new_side_data(pkt,
AV_PKT_DATA_SKIP_SAMPLES,
10);
if(side_data == NULL) {
av_free_packet(pkt);
av_free(pkt);
return AVERROR(ENOMEM);
}
AV_WL32(side_data, 0);
AV_WL32(side_data + 4, av_rescale_q(discard_padding,
(AVRational){1, 1000000000},
(AVRational){1, st->codec->sample_rate}));
}
 
if (track->ms_compat)
pkt->dts = timecode;
else
pkt->pts = timecode;
pkt->pos = pos;
if (st->codec->codec_id == AV_CODEC_ID_SUBRIP) {
/*
* For backward compatibility.
* Historically, we have put subtitle duration
* in convergence_duration, on the off chance
* that the time_scale is less than 1us, which
* could result in a 32bit overflow on the
* normal duration field.
*/
pkt->convergence_duration = lace_duration;
}
 
if (track->type != MATROSKA_TRACK_TYPE_SUBTITLE ||
lace_duration <= INT_MAX) {
/*
* For non subtitle tracks, just store the duration
* as normal.
*
* If it's a subtitle track and duration value does
* not overflow a uint32, then also store it normally.
*/
pkt->duration = lace_duration;
}
 
#if FF_API_ASS_SSA
if (st->codec->codec_id == AV_CODEC_ID_SSA)
matroska_fix_ass_packet(matroska, pkt, lace_duration);
 
if (matroska->prev_pkt &&
timecode != AV_NOPTS_VALUE &&
matroska->prev_pkt->pts == timecode &&
matroska->prev_pkt->stream_index == st->index &&
st->codec->codec_id == AV_CODEC_ID_SSA)
matroska_merge_packets(matroska->prev_pkt, pkt);
else {
dynarray_add(&matroska->packets,&matroska->num_packets,pkt);
matroska->prev_pkt = pkt;
}
#else
dynarray_add(&matroska->packets, &matroska->num_packets, pkt);
matroska->prev_pkt = pkt;
#endif
 
return 0;
fail:
if (pkt_data != data)
av_freep(&pkt_data);
return res;
}
 
static int matroska_parse_block(MatroskaDemuxContext *matroska, uint8_t *data,
int size, int64_t pos, uint64_t cluster_time,
uint64_t block_duration, int is_keyframe,
uint8_t *additional, uint64_t additional_id, int additional_size,
int64_t cluster_pos, uint64_t discard_padding)
{
uint64_t timecode = AV_NOPTS_VALUE;
MatroskaTrack *track;
int res = 0;
AVStream *st;
int16_t block_time;
uint32_t *lace_size = NULL;
int n, flags, laces = 0;
uint64_t num;
int trust_default_duration = 1;
 
if ((n = matroska_ebmlnum_uint(matroska, data, size, &num)) < 0) {
av_log(matroska->ctx, AV_LOG_ERROR, "EBML block data error\n");
return n;
}
data += n;
size -= n;
 
track = matroska_find_track_by_num(matroska, num);
if (!track || !track->stream) {
av_log(matroska->ctx, AV_LOG_INFO,
"Invalid stream %"PRIu64" or size %u\n", num, size);
return AVERROR_INVALIDDATA;
} else if (size <= 3)
return 0;
st = track->stream;
if (st->discard >= AVDISCARD_ALL)
return res;
av_assert1(block_duration != AV_NOPTS_VALUE);
 
block_time = sign_extend(AV_RB16(data), 16);
data += 2;
flags = *data++;
size -= 3;
if (is_keyframe == -1)
is_keyframe = flags & 0x80 ? AV_PKT_FLAG_KEY : 0;
 
if (cluster_time != (uint64_t)-1
&& (block_time >= 0 || cluster_time >= -block_time)) {
timecode = cluster_time + block_time;
if (track->type == MATROSKA_TRACK_TYPE_SUBTITLE
&& timecode < track->end_timecode)
is_keyframe = 0; /* overlapping subtitles are not key frame */
if (is_keyframe)
av_add_index_entry(st, cluster_pos, timecode, 0,0,AVINDEX_KEYFRAME);
}
 
if (matroska->skip_to_keyframe && track->type != MATROSKA_TRACK_TYPE_SUBTITLE) {
if (timecode < matroska->skip_to_timecode)
return res;
if (is_keyframe)
matroska->skip_to_keyframe = 0;
else if (!st->skip_to_keyframe) {
av_log(matroska->ctx, AV_LOG_ERROR, "File is broken, keyframes not correctly marked!\n");
matroska->skip_to_keyframe = 0;
}
}
 
res = matroska_parse_laces(matroska, &data, &size, (flags & 0x06) >> 1,
&lace_size, &laces);
 
if (res)
goto end;
 
if (track->audio.samplerate == 8000) {
// If this is needed for more codecs, then add them here
if (st->codec->codec_id == AV_CODEC_ID_AC3) {
if(track->audio.samplerate != st->codec->sample_rate || !st->codec->frame_size)
trust_default_duration = 0;
}
}
 
if (!block_duration && trust_default_duration)
block_duration = track->default_duration * laces / matroska->time_scale;
 
if (cluster_time != (uint64_t)-1 && (block_time >= 0 || cluster_time >= -block_time))
track->end_timecode =
FFMAX(track->end_timecode, timecode + block_duration);
 
for (n = 0; n < laces; n++) {
int64_t lace_duration = block_duration*(n+1) / laces - block_duration*n / laces;
 
if (lace_size[n] > size) {
av_log(matroska->ctx, AV_LOG_ERROR, "Invalid packet size\n");
break;
}
 
if ((st->codec->codec_id == AV_CODEC_ID_RA_288 ||
st->codec->codec_id == AV_CODEC_ID_COOK ||
st->codec->codec_id == AV_CODEC_ID_SIPR ||
st->codec->codec_id == AV_CODEC_ID_ATRAC3) &&
st->codec->block_align && track->audio.sub_packet_size) {
 
res = matroska_parse_rm_audio(matroska, track, st, data,
lace_size[n],
timecode, pos);
if (res)
goto end;
 
} else if (st->codec->codec_id == AV_CODEC_ID_WEBVTT) {
res = matroska_parse_webvtt(matroska, track, st,
data, lace_size[n],
timecode, lace_duration,
pos);
if (res)
goto end;
 
} else {
res = matroska_parse_frame(matroska, track, st, data, lace_size[n],
timecode, lace_duration,
pos, !n? is_keyframe : 0,
additional, additional_id, additional_size,
discard_padding);
if (res)
goto end;
}
 
if (timecode != AV_NOPTS_VALUE)
timecode = lace_duration ? timecode + lace_duration : AV_NOPTS_VALUE;
data += lace_size[n];
size -= lace_size[n];
}
 
end:
av_free(lace_size);
return res;
}
 
static int matroska_parse_cluster_incremental(MatroskaDemuxContext *matroska)
{
EbmlList *blocks_list;
MatroskaBlock *blocks;
int i, res;
res = ebml_parse(matroska,
matroska_cluster_incremental_parsing,
&matroska->current_cluster);
if (res == 1) {
/* New Cluster */
if (matroska->current_cluster_pos)
ebml_level_end(matroska);
ebml_free(matroska_cluster, &matroska->current_cluster);
memset(&matroska->current_cluster, 0, sizeof(MatroskaCluster));
matroska->current_cluster_num_blocks = 0;
matroska->current_cluster_pos = avio_tell(matroska->ctx->pb);
matroska->prev_pkt = NULL;
/* sizeof the ID which was already read */
if (matroska->current_id)
matroska->current_cluster_pos -= 4;
res = ebml_parse(matroska,
matroska_clusters_incremental,
&matroska->current_cluster);
/* Try parsing the block again. */
if (res == 1)
res = ebml_parse(matroska,
matroska_cluster_incremental_parsing,
&matroska->current_cluster);
}
 
if (!res &&
matroska->current_cluster_num_blocks <
matroska->current_cluster.blocks.nb_elem) {
blocks_list = &matroska->current_cluster.blocks;
blocks = blocks_list->elem;
 
matroska->current_cluster_num_blocks = blocks_list->nb_elem;
i = blocks_list->nb_elem - 1;
if (blocks[i].bin.size > 0 && blocks[i].bin.data) {
int is_keyframe = blocks[i].non_simple ? !blocks[i].reference : -1;
uint8_t* additional = blocks[i].additional.size > 0 ?
blocks[i].additional.data : NULL;
if (!blocks[i].non_simple)
blocks[i].duration = 0;
res = matroska_parse_block(matroska,
blocks[i].bin.data, blocks[i].bin.size,
blocks[i].bin.pos,
matroska->current_cluster.timecode,
blocks[i].duration, is_keyframe,
additional, blocks[i].additional_id,
blocks[i].additional.size,
matroska->current_cluster_pos,
blocks[i].discard_padding);
}
}
 
return res;
}
 
static int matroska_parse_cluster(MatroskaDemuxContext *matroska)
{
MatroskaCluster cluster = { 0 };
EbmlList *blocks_list;
MatroskaBlock *blocks;
int i, res;
int64_t pos;
if (!matroska->contains_ssa)
return matroska_parse_cluster_incremental(matroska);
pos = avio_tell(matroska->ctx->pb);
matroska->prev_pkt = NULL;
if (matroska->current_id)
pos -= 4; /* sizeof the ID which was already read */
res = ebml_parse(matroska, matroska_clusters, &cluster);
blocks_list = &cluster.blocks;
blocks = blocks_list->elem;
for (i=0; i<blocks_list->nb_elem; i++)
if (blocks[i].bin.size > 0 && blocks[i].bin.data) {
int is_keyframe = blocks[i].non_simple ? !blocks[i].reference : -1;
res=matroska_parse_block(matroska,
blocks[i].bin.data, blocks[i].bin.size,
blocks[i].bin.pos, cluster.timecode,
blocks[i].duration, is_keyframe, NULL, 0, 0,
pos, blocks[i].discard_padding);
}
ebml_free(matroska_cluster, &cluster);
return res;
}
 
static int matroska_read_packet(AVFormatContext *s, AVPacket *pkt)
{
MatroskaDemuxContext *matroska = s->priv_data;
 
while (matroska_deliver_packet(matroska, pkt)) {
int64_t pos = avio_tell(matroska->ctx->pb);
if (matroska->done)
return AVERROR_EOF;
if (matroska_parse_cluster(matroska) < 0)
matroska_resync(matroska, pos);
}
 
return 0;
}
 
static int matroska_read_seek(AVFormatContext *s, int stream_index,
int64_t timestamp, int flags)
{
MatroskaDemuxContext *matroska = s->priv_data;
MatroskaTrack *tracks = matroska->tracks.elem;
AVStream *st = s->streams[stream_index];
int i, index, index_sub, index_min;
 
/* Parse the CUES now since we need the index data to seek. */
if (matroska->cues_parsing_deferred > 0) {
matroska->cues_parsing_deferred = 0;
matroska_parse_cues(matroska);
}
 
if (!st->nb_index_entries)
goto err;
timestamp = FFMAX(timestamp, st->index_entries[0].timestamp);
 
if ((index = av_index_search_timestamp(st, timestamp, flags)) < 0) {
avio_seek(s->pb, st->index_entries[st->nb_index_entries-1].pos, SEEK_SET);
matroska->current_id = 0;
while ((index = av_index_search_timestamp(st, timestamp, flags)) < 0) {
matroska_clear_queue(matroska);
if (matroska_parse_cluster(matroska) < 0)
break;
}
}
 
matroska_clear_queue(matroska);
if (index < 0 || (matroska->cues_parsing_deferred < 0 && index == st->nb_index_entries - 1))
goto err;
 
index_min = index;
for (i=0; i < matroska->tracks.nb_elem; i++) {
tracks[i].audio.pkt_cnt = 0;
tracks[i].audio.sub_packet_cnt = 0;
tracks[i].audio.buf_timecode = AV_NOPTS_VALUE;
tracks[i].end_timecode = 0;
if (tracks[i].type == MATROSKA_TRACK_TYPE_SUBTITLE
&& tracks[i].stream->discard != AVDISCARD_ALL) {
index_sub = av_index_search_timestamp(tracks[i].stream, st->index_entries[index].timestamp, AVSEEK_FLAG_BACKWARD);
while(index_sub >= 0
&& index_min >= 0
&& tracks[i].stream->index_entries[index_sub].pos < st->index_entries[index_min].pos
&& st->index_entries[index].timestamp - tracks[i].stream->index_entries[index_sub].timestamp < 30000000000/matroska->time_scale)
index_min--;
}
}
 
avio_seek(s->pb, st->index_entries[index_min].pos, SEEK_SET);
matroska->current_id = 0;
if (flags & AVSEEK_FLAG_ANY) {
st->skip_to_keyframe = 0;
matroska->skip_to_timecode = timestamp;
} else {
st->skip_to_keyframe = 1;
matroska->skip_to_timecode = st->index_entries[index].timestamp;
}
matroska->skip_to_keyframe = 1;
matroska->done = 0;
matroska->num_levels = 0;
ff_update_cur_dts(s, st, st->index_entries[index].timestamp);
return 0;
err:
// slightly hackish but allows proper fallback to
// the generic seeking code.
matroska_clear_queue(matroska);
matroska->current_id = 0;
st->skip_to_keyframe =
matroska->skip_to_keyframe = 0;
matroska->done = 0;
matroska->num_levels = 0;
return -1;
}
 
static int matroska_read_close(AVFormatContext *s)
{
MatroskaDemuxContext *matroska = s->priv_data;
MatroskaTrack *tracks = matroska->tracks.elem;
int n;
 
matroska_clear_queue(matroska);
 
for (n=0; n < matroska->tracks.nb_elem; n++)
if (tracks[n].type == MATROSKA_TRACK_TYPE_AUDIO)
av_free(tracks[n].audio.buf);
ebml_free(matroska_cluster, &matroska->current_cluster);
ebml_free(matroska_segment, matroska);
 
return 0;
}
 
AVInputFormat ff_matroska_demuxer = {
.name = "matroska,webm",
.long_name = NULL_IF_CONFIG_SMALL("Matroska / WebM"),
.priv_data_size = sizeof(MatroskaDemuxContext),
.read_probe = matroska_probe,
.read_header = matroska_read_header,
.read_packet = matroska_read_packet,
.read_close = matroska_read_close,
.read_seek = matroska_read_seek,
};
/contrib/sdk/sources/ffmpeg/libavformat/matroskaenc.c
0,0 → 1,1844
/*
* Matroska muxer
* Copyright (c) 2007 David Conrad
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avc.h"
#include "avformat.h"
#include "avio_internal.h"
#include "avlanguage.h"
#include "flacenc.h"
#include "internal.h"
#include "isom.h"
#include "matroska.h"
#include "riff.h"
#include "subtitles.h"
#include "wv.h"
 
#include "libavutil/avstring.h"
#include "libavutil/dict.h"
#include "libavutil/intfloat.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/lfg.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/random_seed.h"
#include "libavutil/samplefmt.h"
#include "libavutil/sha.h"
 
#include "libavcodec/xiph.h"
#include "libavcodec/mpeg4audio.h"
#include "libavcodec/internal.h"
 
typedef struct ebml_master {
int64_t pos; ///< absolute offset in the file where the master's elements start
int sizebytes; ///< how many bytes were reserved for the size
} ebml_master;
 
typedef struct mkv_seekhead_entry {
unsigned int elementid;
uint64_t segmentpos;
} mkv_seekhead_entry;
 
typedef struct mkv_seekhead {
int64_t filepos;
int64_t segment_offset; ///< the file offset to the beginning of the segment
int reserved_size; ///< -1 if appending to file
int max_entries;
mkv_seekhead_entry *entries;
int num_entries;
} mkv_seekhead;
 
typedef struct {
uint64_t pts;
int tracknum;
int64_t cluster_pos; ///< file offset of the cluster containing the block
int64_t relative_pos; ///< relative offset from the position of the cluster containing the block
int64_t duration; ///< duration of the block according to time base
} mkv_cuepoint;
 
typedef struct {
int64_t segment_offset;
mkv_cuepoint *entries;
int num_entries;
} mkv_cues;
 
typedef struct {
int write_dts;
int has_cue;
} mkv_track;
 
#define MODE_MATROSKAv2 0x01
#define MODE_WEBM 0x02
 
typedef struct MatroskaMuxContext {
const AVClass *class;
int mode;
AVIOContext *dyn_bc;
ebml_master segment;
int64_t segment_offset;
ebml_master cluster;
int64_t cluster_pos; ///< file offset of the current cluster
int64_t cluster_pts;
int64_t duration_offset;
int64_t duration;
mkv_seekhead *main_seekhead;
mkv_cues *cues;
mkv_track *tracks;
 
AVPacket cur_audio_pkt;
 
int have_attachments;
 
int reserve_cues_space;
int cluster_size_limit;
int64_t cues_pos;
int64_t cluster_time_limit;
 
uint32_t chapter_id_offset;
int wrote_chapters;
} MatroskaMuxContext;
 
 
/** 2 bytes * 3 for EBML IDs, 3 1-byte EBML lengths, 8 bytes for 64 bit
* offset, 4 bytes for target EBML ID */
#define MAX_SEEKENTRY_SIZE 21
 
/** per-cuepoint-track - 5 1-byte EBML IDs, 5 1-byte EBML sizes, 4
* 8-byte uint max */
#define MAX_CUETRACKPOS_SIZE 42
 
/** per-cuepoint - 2 1-byte EBML IDs, 2 1-byte EBML sizes, 8-byte uint max */
#define MAX_CUEPOINT_SIZE(num_tracks) 12 + MAX_CUETRACKPOS_SIZE*num_tracks
 
/** Seek preroll value for opus */
#define OPUS_SEEK_PREROLL 80000000
 
 
static int ebml_id_size(unsigned int id)
{
return (av_log2(id+1)-1)/7+1;
}
 
static void put_ebml_id(AVIOContext *pb, unsigned int id)
{
int i = ebml_id_size(id);
while (i--)
avio_w8(pb, (uint8_t)(id >> (i*8)));
}
 
/**
* Write an EBML size meaning "unknown size".
*
* @param bytes The number of bytes the size should occupy (maximum: 8).
*/
static void put_ebml_size_unknown(AVIOContext *pb, int bytes)
{
av_assert0(bytes <= 8);
avio_w8(pb, 0x1ff >> bytes);
ffio_fill(pb, 0xff, bytes - 1);
}
 
/**
* Calculate how many bytes are needed to represent a given number in EBML.
*/
static int ebml_num_size(uint64_t num)
{
int bytes = 1;
while ((num+1) >> bytes*7) bytes++;
return bytes;
}
 
/**
* Write a number in EBML variable length format.
*
* @param bytes The number of bytes that need to be used to write the number.
* If zero, any number of bytes can be used.
*/
static void put_ebml_num(AVIOContext *pb, uint64_t num, int bytes)
{
int i, needed_bytes = ebml_num_size(num);
 
// sizes larger than this are currently undefined in EBML
av_assert0(num < (1ULL<<56)-1);
 
if (bytes == 0)
// don't care how many bytes are used, so use the min
bytes = needed_bytes;
// the bytes needed to write the given size would exceed the bytes
// that we need to use, so write unknown size. This shouldn't happen.
av_assert0(bytes >= needed_bytes);
 
num |= 1ULL << bytes*7;
for (i = bytes - 1; i >= 0; i--)
avio_w8(pb, (uint8_t)(num >> i*8));
}
 
static void put_ebml_uint(AVIOContext *pb, unsigned int elementid, uint64_t val)
{
int i, bytes = 1;
uint64_t tmp = val;
while (tmp>>=8) bytes++;
 
put_ebml_id(pb, elementid);
put_ebml_num(pb, bytes, 0);
for (i = bytes - 1; i >= 0; i--)
avio_w8(pb, (uint8_t)(val >> i*8));
}
 
static void put_ebml_float(AVIOContext *pb, unsigned int elementid, double val)
{
put_ebml_id(pb, elementid);
put_ebml_num(pb, 8, 0);
avio_wb64(pb, av_double2int(val));
}
 
static void put_ebml_binary(AVIOContext *pb, unsigned int elementid,
const void *buf, int size)
{
put_ebml_id(pb, elementid);
put_ebml_num(pb, size, 0);
avio_write(pb, buf, size);
}
 
static void put_ebml_string(AVIOContext *pb, unsigned int elementid, const char *str)
{
put_ebml_binary(pb, elementid, str, strlen(str));
}
 
/**
* Write a void element of a given size. Useful for reserving space in
* the file to be written to later.
*
* @param size The number of bytes to reserve, which must be at least 2.
*/
static void put_ebml_void(AVIOContext *pb, uint64_t size)
{
int64_t currentpos = avio_tell(pb);
 
av_assert0(size >= 2);
 
put_ebml_id(pb, EBML_ID_VOID);
// we need to subtract the length needed to store the size from the
// size we need to reserve so 2 cases, we use 8 bytes to store the
// size if possible, 1 byte otherwise
if (size < 10)
put_ebml_num(pb, size-1, 0);
else
put_ebml_num(pb, size-9, 8);
ffio_fill(pb, 0, currentpos + size - avio_tell(pb));
}
 
static ebml_master start_ebml_master(AVIOContext *pb, unsigned int elementid, uint64_t expectedsize)
{
int bytes = expectedsize ? ebml_num_size(expectedsize) : 8;
put_ebml_id(pb, elementid);
put_ebml_size_unknown(pb, bytes);
return (ebml_master){ avio_tell(pb), bytes };
}
 
static void end_ebml_master(AVIOContext *pb, ebml_master master)
{
int64_t pos = avio_tell(pb);
 
if (avio_seek(pb, master.pos - master.sizebytes, SEEK_SET) < 0)
return;
put_ebml_num(pb, pos - master.pos, master.sizebytes);
avio_seek(pb, pos, SEEK_SET);
}
 
static void put_xiph_size(AVIOContext *pb, int size)
{
ffio_fill(pb, 255, size / 255);
avio_w8(pb, size % 255);
}
 
/**
* Initialize a mkv_seekhead element to be ready to index level 1 Matroska
* elements. If a maximum number of elements is specified, enough space
* will be reserved at the current file location to write a seek head of
* that size.
*
* @param segment_offset The absolute offset to the position in the file
* where the segment begins.
* @param numelements The maximum number of elements that will be indexed
* by this seek head, 0 if unlimited.
*/
static mkv_seekhead * mkv_start_seekhead(AVIOContext *pb, int64_t segment_offset, int numelements)
{
mkv_seekhead *new_seekhead = av_mallocz(sizeof(mkv_seekhead));
if (new_seekhead == NULL)
return NULL;
 
new_seekhead->segment_offset = segment_offset;
 
if (numelements > 0) {
new_seekhead->filepos = avio_tell(pb);
// 21 bytes max for a seek entry, 10 bytes max for the SeekHead ID
// and size, and 3 bytes to guarantee that an EBML void element
// will fit afterwards
new_seekhead->reserved_size = numelements * MAX_SEEKENTRY_SIZE + 13;
new_seekhead->max_entries = numelements;
put_ebml_void(pb, new_seekhead->reserved_size);
}
return new_seekhead;
}
 
static int mkv_add_seekhead_entry(mkv_seekhead *seekhead, unsigned int elementid, uint64_t filepos)
{
mkv_seekhead_entry *entries = seekhead->entries;
 
// don't store more elements than we reserved space for
if (seekhead->max_entries > 0 && seekhead->max_entries <= seekhead->num_entries)
return -1;
 
entries = av_realloc_array(entries, seekhead->num_entries + 1, sizeof(mkv_seekhead_entry));
if (entries == NULL)
return AVERROR(ENOMEM);
seekhead->entries = entries;
 
seekhead->entries[seekhead->num_entries].elementid = elementid;
seekhead->entries[seekhead->num_entries++].segmentpos = filepos - seekhead->segment_offset;
 
return 0;
}
 
/**
* Write the seek head to the file and free it. If a maximum number of
* elements was specified to mkv_start_seekhead(), the seek head will
* be written at the location reserved for it. Otherwise, it is written
* at the current location in the file.
*
* @return The file offset where the seekhead was written,
* -1 if an error occurred.
*/
static int64_t mkv_write_seekhead(AVIOContext *pb, mkv_seekhead *seekhead)
{
ebml_master metaseek, seekentry;
int64_t currentpos;
int i;
 
currentpos = avio_tell(pb);
 
if (seekhead->reserved_size > 0) {
if (avio_seek(pb, seekhead->filepos, SEEK_SET) < 0) {
currentpos = -1;
goto fail;
}
}
 
metaseek = start_ebml_master(pb, MATROSKA_ID_SEEKHEAD, seekhead->reserved_size);
for (i = 0; i < seekhead->num_entries; i++) {
mkv_seekhead_entry *entry = &seekhead->entries[i];
 
seekentry = start_ebml_master(pb, MATROSKA_ID_SEEKENTRY, MAX_SEEKENTRY_SIZE);
 
put_ebml_id(pb, MATROSKA_ID_SEEKID);
put_ebml_num(pb, ebml_id_size(entry->elementid), 0);
put_ebml_id(pb, entry->elementid);
 
put_ebml_uint(pb, MATROSKA_ID_SEEKPOSITION, entry->segmentpos);
end_ebml_master(pb, seekentry);
}
end_ebml_master(pb, metaseek);
 
if (seekhead->reserved_size > 0) {
uint64_t remaining = seekhead->filepos + seekhead->reserved_size - avio_tell(pb);
put_ebml_void(pb, remaining);
avio_seek(pb, currentpos, SEEK_SET);
 
currentpos = seekhead->filepos;
}
fail:
av_freep(&seekhead->entries);
av_free(seekhead);
 
return currentpos;
}
 
static mkv_cues * mkv_start_cues(int64_t segment_offset)
{
mkv_cues *cues = av_mallocz(sizeof(mkv_cues));
if (cues == NULL)
return NULL;
 
cues->segment_offset = segment_offset;
return cues;
}
 
static int mkv_add_cuepoint(mkv_cues *cues, int stream, int64_t ts, int64_t cluster_pos, int64_t relative_pos,
int64_t duration)
{
mkv_cuepoint *entries = cues->entries;
 
if (ts < 0)
return 0;
 
entries = av_realloc_array(entries, cues->num_entries + 1, sizeof(mkv_cuepoint));
if (entries == NULL)
return AVERROR(ENOMEM);
cues->entries = entries;
 
cues->entries[cues->num_entries].pts = ts;
cues->entries[cues->num_entries].tracknum = stream + 1;
cues->entries[cues->num_entries].cluster_pos = cluster_pos - cues->segment_offset;
cues->entries[cues->num_entries].relative_pos = relative_pos;
cues->entries[cues->num_entries++].duration = duration;
 
return 0;
}
 
static int64_t mkv_write_cues(AVIOContext *pb, mkv_cues *cues, mkv_track *tracks, int num_tracks)
{
ebml_master cues_element;
int64_t currentpos;
int i, j;
 
currentpos = avio_tell(pb);
cues_element = start_ebml_master(pb, MATROSKA_ID_CUES, 0);
 
for (i = 0; i < cues->num_entries; i++) {
ebml_master cuepoint, track_positions;
mkv_cuepoint *entry = &cues->entries[i];
uint64_t pts = entry->pts;
 
cuepoint = start_ebml_master(pb, MATROSKA_ID_POINTENTRY, MAX_CUEPOINT_SIZE(num_tracks));
put_ebml_uint(pb, MATROSKA_ID_CUETIME, pts);
 
// put all the entries from different tracks that have the exact same
// timestamp into the same CuePoint
for (j = 0; j < num_tracks; j++)
tracks[j].has_cue = 0;
for (j = 0; j < cues->num_entries - i && entry[j].pts == pts; j++) {
int tracknum = entry[j].tracknum - 1;
av_assert0(tracknum>=0 && tracknum<num_tracks);
if (tracks[tracknum].has_cue)
continue;
tracks[tracknum].has_cue = 1;
track_positions = start_ebml_master(pb, MATROSKA_ID_CUETRACKPOSITION, MAX_CUETRACKPOS_SIZE);
put_ebml_uint(pb, MATROSKA_ID_CUETRACK , entry[j].tracknum );
put_ebml_uint(pb, MATROSKA_ID_CUECLUSTERPOSITION , entry[j].cluster_pos);
put_ebml_uint(pb, MATROSKA_ID_CUERELATIVEPOSITION, entry[j].relative_pos);
if (entry[j].duration != -1)
put_ebml_uint(pb, MATROSKA_ID_CUEDURATION , entry[j].duration);
end_ebml_master(pb, track_positions);
}
i += j - 1;
end_ebml_master(pb, cuepoint);
}
end_ebml_master(pb, cues_element);
 
return currentpos;
}
 
static int put_xiph_codecpriv(AVFormatContext *s, AVIOContext *pb, AVCodecContext *codec)
{
uint8_t *header_start[3];
int header_len[3];
int first_header_size;
int j;
 
if (codec->codec_id == AV_CODEC_ID_VORBIS)
first_header_size = 30;
else
first_header_size = 42;
 
if (avpriv_split_xiph_headers(codec->extradata, codec->extradata_size,
first_header_size, header_start, header_len) < 0) {
av_log(s, AV_LOG_ERROR, "Extradata corrupt.\n");
return -1;
}
 
avio_w8(pb, 2); // number packets - 1
for (j = 0; j < 2; j++) {
put_xiph_size(pb, header_len[j]);
}
for (j = 0; j < 3; j++)
avio_write(pb, header_start[j], header_len[j]);
 
return 0;
}
 
static int put_wv_codecpriv(AVIOContext *pb, AVCodecContext *codec)
{
if (codec->extradata && codec->extradata_size == 2)
avio_write(pb, codec->extradata, 2);
else
avio_wl16(pb, 0x403); // fallback to the version mentioned in matroska specs
return 0;
}
 
static void get_aac_sample_rates(AVFormatContext *s, AVCodecContext *codec, int *sample_rate, int *output_sample_rate)
{
MPEG4AudioConfig mp4ac;
 
if (avpriv_mpeg4audio_get_config(&mp4ac, codec->extradata,
codec->extradata_size * 8, 1) < 0) {
av_log(s, AV_LOG_WARNING, "Error parsing AAC extradata, unable to determine samplerate.\n");
return;
}
 
*sample_rate = mp4ac.sample_rate;
*output_sample_rate = mp4ac.ext_sample_rate;
}
 
static int mkv_write_codecprivate(AVFormatContext *s, AVIOContext *pb, AVCodecContext *codec, int native_id, int qt_id)
{
AVIOContext *dyn_cp;
uint8_t *codecpriv;
int ret, codecpriv_size;
 
ret = avio_open_dyn_buf(&dyn_cp);
if(ret < 0)
return ret;
 
if (native_id) {
if (codec->codec_id == AV_CODEC_ID_VORBIS || codec->codec_id == AV_CODEC_ID_THEORA)
ret = put_xiph_codecpriv(s, dyn_cp, codec);
else if (codec->codec_id == AV_CODEC_ID_FLAC)
ret = ff_flac_write_header(dyn_cp, codec, 1);
else if (codec->codec_id == AV_CODEC_ID_WAVPACK)
ret = put_wv_codecpriv(dyn_cp, codec);
else if (codec->codec_id == AV_CODEC_ID_H264)
ret = ff_isom_write_avcc(dyn_cp, codec->extradata, codec->extradata_size);
else if (codec->codec_id == AV_CODEC_ID_ALAC) {
if (codec->extradata_size < 36) {
av_log(s, AV_LOG_ERROR,
"Invalid extradata found, ALAC expects a 36-byte "
"QuickTime atom.");
ret = AVERROR_INVALIDDATA;
} else
avio_write(dyn_cp, codec->extradata + 12,
codec->extradata_size - 12);
}
else if (codec->extradata_size && codec->codec_id != AV_CODEC_ID_TTA)
avio_write(dyn_cp, codec->extradata, codec->extradata_size);
} else if (codec->codec_type == AVMEDIA_TYPE_VIDEO) {
if (qt_id) {
if (!codec->codec_tag)
codec->codec_tag = ff_codec_get_tag(ff_codec_movvideo_tags, codec->codec_id);
if (codec->extradata_size)
avio_write(dyn_cp, codec->extradata, codec->extradata_size);
} else {
if (!codec->codec_tag)
codec->codec_tag = ff_codec_get_tag(ff_codec_bmp_tags, codec->codec_id);
if (!codec->codec_tag) {
av_log(s, AV_LOG_ERROR, "No bmp codec tag found for codec %s\n",
avcodec_get_name(codec->codec_id));
ret = AVERROR(EINVAL);
}
 
ff_put_bmp_header(dyn_cp, codec, ff_codec_bmp_tags, 0);
}
 
} else if (codec->codec_type == AVMEDIA_TYPE_AUDIO) {
unsigned int tag;
tag = ff_codec_get_tag(ff_codec_wav_tags, codec->codec_id);
if (!tag) {
av_log(s, AV_LOG_ERROR, "No wav codec tag found for codec %s\n",
avcodec_get_name(codec->codec_id));
ret = AVERROR(EINVAL);
}
if (!codec->codec_tag)
codec->codec_tag = tag;
 
ff_put_wav_header(dyn_cp, codec);
}
 
codecpriv_size = avio_close_dyn_buf(dyn_cp, &codecpriv);
if (codecpriv_size)
put_ebml_binary(pb, MATROSKA_ID_CODECPRIVATE, codecpriv, codecpriv_size);
av_free(codecpriv);
return ret;
}
 
static int mkv_write_tracks(AVFormatContext *s)
{
MatroskaMuxContext *mkv = s->priv_data;
AVIOContext *pb = s->pb;
ebml_master tracks;
int i, j, ret, default_stream_exists = 0;
 
ret = mkv_add_seekhead_entry(mkv->main_seekhead, MATROSKA_ID_TRACKS, avio_tell(pb));
if (ret < 0) return ret;
 
tracks = start_ebml_master(pb, MATROSKA_ID_TRACKS, 0);
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
default_stream_exists |= st->disposition & AV_DISPOSITION_DEFAULT;
}
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
AVCodecContext *codec = st->codec;
ebml_master subinfo, track;
int native_id = 0;
int qt_id = 0;
int bit_depth = av_get_bits_per_sample(codec->codec_id);
int sample_rate = codec->sample_rate;
int output_sample_rate = 0;
AVDictionaryEntry *tag;
 
if (codec->codec_type == AVMEDIA_TYPE_ATTACHMENT) {
mkv->have_attachments = 1;
continue;
}
 
if (!bit_depth)
bit_depth = av_get_bytes_per_sample(codec->sample_fmt) << 3;
if (!bit_depth)
bit_depth = codec->bits_per_coded_sample;
 
if (codec->codec_id == AV_CODEC_ID_AAC)
get_aac_sample_rates(s, codec, &sample_rate, &output_sample_rate);
 
track = start_ebml_master(pb, MATROSKA_ID_TRACKENTRY, 0);
put_ebml_uint (pb, MATROSKA_ID_TRACKNUMBER , i + 1);
put_ebml_uint (pb, MATROSKA_ID_TRACKUID , i + 1);
put_ebml_uint (pb, MATROSKA_ID_TRACKFLAGLACING , 0); // no lacing (yet)
 
if ((tag = av_dict_get(st->metadata, "title", NULL, 0)))
put_ebml_string(pb, MATROSKA_ID_TRACKNAME, tag->value);
tag = av_dict_get(st->metadata, "language", NULL, 0);
if (mkv->mode != MODE_WEBM || codec->codec_id != AV_CODEC_ID_WEBVTT) {
put_ebml_string(pb, MATROSKA_ID_TRACKLANGUAGE, tag ? tag->value:"und");
} else if (tag && tag->value) {
put_ebml_string(pb, MATROSKA_ID_TRACKLANGUAGE, tag->value);
}
 
// The default value for TRACKFLAGDEFAULT is 1, so add element
// if we need to clear it.
if (default_stream_exists && !(st->disposition & AV_DISPOSITION_DEFAULT))
put_ebml_uint(pb, MATROSKA_ID_TRACKFLAGDEFAULT, !!(st->disposition & AV_DISPOSITION_DEFAULT));
 
if (st->disposition & AV_DISPOSITION_FORCED)
put_ebml_uint(pb, MATROSKA_ID_TRACKFLAGFORCED, 1);
 
if (mkv->mode == MODE_WEBM && codec->codec_id == AV_CODEC_ID_WEBVTT) {
const char *codec_id;
if (st->disposition & AV_DISPOSITION_CAPTIONS) {
codec_id = "D_WEBVTT/CAPTIONS";
native_id = MATROSKA_TRACK_TYPE_SUBTITLE;
} else if (st->disposition & AV_DISPOSITION_DESCRIPTIONS) {
codec_id = "D_WEBVTT/DESCRIPTIONS";
native_id = MATROSKA_TRACK_TYPE_METADATA;
} else if (st->disposition & AV_DISPOSITION_METADATA) {
codec_id = "D_WEBVTT/METADATA";
native_id = MATROSKA_TRACK_TYPE_METADATA;
} else {
codec_id = "D_WEBVTT/SUBTITLES";
native_id = MATROSKA_TRACK_TYPE_SUBTITLE;
}
put_ebml_string(pb, MATROSKA_ID_CODECID, codec_id);
} else {
// look for a codec ID string specific to mkv to use,
// if none are found, use AVI codes
for (j = 0; ff_mkv_codec_tags[j].id != AV_CODEC_ID_NONE; j++) {
if (ff_mkv_codec_tags[j].id == codec->codec_id) {
put_ebml_string(pb, MATROSKA_ID_CODECID, ff_mkv_codec_tags[j].str);
native_id = 1;
break;
}
}
}
 
if (codec->codec_id == AV_CODEC_ID_OPUS) {
uint64_t codec_delay =av_rescale_q(codec->delay,
(AVRational){1, codec->sample_rate},
(AVRational){1, 1000000000});
put_ebml_uint(pb, MATROSKA_ID_CODECDELAY, codec_delay);
put_ebml_uint(pb, MATROSKA_ID_SEEKPREROLL, OPUS_SEEK_PREROLL);
 
}
 
if (mkv->mode == MODE_WEBM && !(codec->codec_id == AV_CODEC_ID_VP8 ||
codec->codec_id == AV_CODEC_ID_VP9 ||
((codec->codec_id == AV_CODEC_ID_OPUS)&&(codec->strict_std_compliance <= FF_COMPLIANCE_EXPERIMENTAL)) ||
codec->codec_id == AV_CODEC_ID_VORBIS ||
codec->codec_id == AV_CODEC_ID_WEBVTT)) {
av_log(s, AV_LOG_ERROR,
"Only VP8,VP9 video and Vorbis,Opus(experimental, use -strict -2) audio and WebVTT subtitles are supported for WebM.\n");
return AVERROR(EINVAL);
}
 
switch (codec->codec_type) {
case AVMEDIA_TYPE_VIDEO:
put_ebml_uint(pb, MATROSKA_ID_TRACKTYPE, MATROSKA_TRACK_TYPE_VIDEO);
if(st->avg_frame_rate.num && st->avg_frame_rate.den && 1.0/av_q2d(st->avg_frame_rate) > av_q2d(codec->time_base))
put_ebml_uint(pb, MATROSKA_ID_TRACKDEFAULTDURATION, 1E9/av_q2d(st->avg_frame_rate));
else
put_ebml_uint(pb, MATROSKA_ID_TRACKDEFAULTDURATION, av_q2d(codec->time_base)*1E9);
 
if (!native_id &&
ff_codec_get_tag(ff_codec_movvideo_tags, codec->codec_id) &&
(!ff_codec_get_tag(ff_codec_bmp_tags, codec->codec_id)
|| codec->codec_id == AV_CODEC_ID_SVQ1
|| codec->codec_id == AV_CODEC_ID_SVQ3
|| codec->codec_id == AV_CODEC_ID_CINEPAK))
qt_id = 1;
 
if (qt_id)
put_ebml_string(pb, MATROSKA_ID_CODECID, "V_QUICKTIME");
else if (!native_id) {
// if there is no mkv-specific codec ID, use VFW mode
put_ebml_string(pb, MATROSKA_ID_CODECID, "V_MS/VFW/FOURCC");
mkv->tracks[i].write_dts = 1;
}
 
subinfo = start_ebml_master(pb, MATROSKA_ID_TRACKVIDEO, 0);
// XXX: interlace flag?
put_ebml_uint (pb, MATROSKA_ID_VIDEOPIXELWIDTH , codec->width);
put_ebml_uint (pb, MATROSKA_ID_VIDEOPIXELHEIGHT, codec->height);
 
if ((tag = av_dict_get(st->metadata, "stereo_mode", NULL, 0)) ||
(tag = av_dict_get( s->metadata, "stereo_mode", NULL, 0))) {
// save stereo mode flag
uint64_t st_mode = MATROSKA_VIDEO_STEREO_MODE_COUNT;
 
for (j=0; j<MATROSKA_VIDEO_STEREO_MODE_COUNT; j++)
if (!strcmp(tag->value, ff_matroska_video_stereo_mode[j])){
st_mode = j;
break;
}
 
if ((mkv->mode == MODE_WEBM && st_mode > 3 && st_mode != 11)
|| st_mode >= MATROSKA_VIDEO_STEREO_MODE_COUNT) {
av_log(s, AV_LOG_ERROR,
"The specified stereo mode is not valid.\n");
return AVERROR(EINVAL);
} else
put_ebml_uint(pb, MATROSKA_ID_VIDEOSTEREOMODE, st_mode);
}
 
if ((tag = av_dict_get(st->metadata, "alpha_mode", NULL, 0)) ||
(tag = av_dict_get( s->metadata, "alpha_mode", NULL, 0)) ||
(codec->pix_fmt == AV_PIX_FMT_YUVA420P)) {
put_ebml_uint(pb, MATROSKA_ID_VIDEOALPHAMODE, 1);
}
 
if (st->sample_aspect_ratio.num) {
int64_t d_width = av_rescale(codec->width, st->sample_aspect_ratio.num, st->sample_aspect_ratio.den);
if (d_width > INT_MAX) {
av_log(s, AV_LOG_ERROR, "Overflow in display width\n");
return AVERROR(EINVAL);
}
put_ebml_uint(pb, MATROSKA_ID_VIDEODISPLAYWIDTH , d_width);
put_ebml_uint(pb, MATROSKA_ID_VIDEODISPLAYHEIGHT, codec->height);
}
 
if (codec->codec_id == AV_CODEC_ID_RAWVIDEO) {
uint32_t color_space = av_le2ne32(codec->codec_tag);
put_ebml_binary(pb, MATROSKA_ID_VIDEOCOLORSPACE, &color_space, sizeof(color_space));
}
end_ebml_master(pb, subinfo);
break;
 
case AVMEDIA_TYPE_AUDIO:
put_ebml_uint(pb, MATROSKA_ID_TRACKTYPE, MATROSKA_TRACK_TYPE_AUDIO);
 
if (!native_id)
// no mkv-specific ID, use ACM mode
put_ebml_string(pb, MATROSKA_ID_CODECID, "A_MS/ACM");
 
subinfo = start_ebml_master(pb, MATROSKA_ID_TRACKAUDIO, 0);
put_ebml_uint (pb, MATROSKA_ID_AUDIOCHANNELS , codec->channels);
put_ebml_float (pb, MATROSKA_ID_AUDIOSAMPLINGFREQ, sample_rate);
if (output_sample_rate)
put_ebml_float(pb, MATROSKA_ID_AUDIOOUTSAMPLINGFREQ, output_sample_rate);
if (bit_depth)
put_ebml_uint(pb, MATROSKA_ID_AUDIOBITDEPTH, bit_depth);
end_ebml_master(pb, subinfo);
break;
 
case AVMEDIA_TYPE_SUBTITLE:
if (!native_id) {
av_log(s, AV_LOG_ERROR, "Subtitle codec %d is not supported.\n", codec->codec_id);
return AVERROR(ENOSYS);
}
 
if (mkv->mode != MODE_WEBM || codec->codec_id != AV_CODEC_ID_WEBVTT)
native_id = MATROSKA_TRACK_TYPE_SUBTITLE;
 
put_ebml_uint(pb, MATROSKA_ID_TRACKTYPE, native_id);
break;
default:
av_log(s, AV_LOG_ERROR, "Only audio, video, and subtitles are supported for Matroska.\n");
return AVERROR(EINVAL);
}
 
if (mkv->mode != MODE_WEBM || codec->codec_id != AV_CODEC_ID_WEBVTT) {
ret = mkv_write_codecprivate(s, pb, codec, native_id, qt_id);
if (ret < 0) return ret;
}
 
end_ebml_master(pb, track);
 
// ms precision is the de-facto standard timescale for mkv files
avpriv_set_pts_info(st, 64, 1, 1000);
}
end_ebml_master(pb, tracks);
return 0;
}
 
static int mkv_write_chapters(AVFormatContext *s)
{
MatroskaMuxContext *mkv = s->priv_data;
AVIOContext *pb = s->pb;
ebml_master chapters, editionentry;
AVRational scale = {1, 1E9};
int i, ret;
 
if (!s->nb_chapters || mkv->wrote_chapters)
return 0;
 
ret = mkv_add_seekhead_entry(mkv->main_seekhead, MATROSKA_ID_CHAPTERS, avio_tell(pb));
if (ret < 0) return ret;
 
chapters = start_ebml_master(pb, MATROSKA_ID_CHAPTERS , 0);
editionentry = start_ebml_master(pb, MATROSKA_ID_EDITIONENTRY, 0);
put_ebml_uint(pb, MATROSKA_ID_EDITIONFLAGDEFAULT, 1);
put_ebml_uint(pb, MATROSKA_ID_EDITIONFLAGHIDDEN , 0);
for (i = 0; i < s->nb_chapters; i++) {
ebml_master chapteratom, chapterdisplay;
AVChapter *c = s->chapters[i];
AVDictionaryEntry *t = NULL;
 
chapteratom = start_ebml_master(pb, MATROSKA_ID_CHAPTERATOM, 0);
put_ebml_uint(pb, MATROSKA_ID_CHAPTERUID, c->id + mkv->chapter_id_offset);
put_ebml_uint(pb, MATROSKA_ID_CHAPTERTIMESTART,
av_rescale_q(c->start, c->time_base, scale));
put_ebml_uint(pb, MATROSKA_ID_CHAPTERTIMEEND,
av_rescale_q(c->end, c->time_base, scale));
put_ebml_uint(pb, MATROSKA_ID_CHAPTERFLAGHIDDEN , 0);
put_ebml_uint(pb, MATROSKA_ID_CHAPTERFLAGENABLED, 1);
if ((t = av_dict_get(c->metadata, "title", NULL, 0))) {
chapterdisplay = start_ebml_master(pb, MATROSKA_ID_CHAPTERDISPLAY, 0);
put_ebml_string(pb, MATROSKA_ID_CHAPSTRING, t->value);
put_ebml_string(pb, MATROSKA_ID_CHAPLANG , "und");
end_ebml_master(pb, chapterdisplay);
}
end_ebml_master(pb, chapteratom);
}
end_ebml_master(pb, editionentry);
end_ebml_master(pb, chapters);
 
mkv->wrote_chapters = 1;
return 0;
}
 
static void mkv_write_simpletag(AVIOContext *pb, AVDictionaryEntry *t)
{
uint8_t *key = av_strdup(t->key);
uint8_t *p = key;
const uint8_t *lang = NULL;
ebml_master tag;
 
if ((p = strrchr(p, '-')) &&
(lang = av_convert_lang_to(p + 1, AV_LANG_ISO639_2_BIBL)))
*p = 0;
 
p = key;
while (*p) {
if (*p == ' ')
*p = '_';
else if (*p >= 'a' && *p <= 'z')
*p -= 'a' - 'A';
p++;
}
 
tag = start_ebml_master(pb, MATROSKA_ID_SIMPLETAG, 0);
put_ebml_string(pb, MATROSKA_ID_TAGNAME, key);
if (lang)
put_ebml_string(pb, MATROSKA_ID_TAGLANG, lang);
put_ebml_string(pb, MATROSKA_ID_TAGSTRING, t->value);
end_ebml_master(pb, tag);
 
av_freep(&key);
}
 
static int mkv_write_tag(AVFormatContext *s, AVDictionary *m, unsigned int elementid,
unsigned int uid, ebml_master *tags)
{
MatroskaMuxContext *mkv = s->priv_data;
ebml_master tag, targets;
AVDictionaryEntry *t = NULL;
int ret;
 
if (!tags->pos) {
ret = mkv_add_seekhead_entry(mkv->main_seekhead, MATROSKA_ID_TAGS, avio_tell(s->pb));
if (ret < 0) return ret;
 
*tags = start_ebml_master(s->pb, MATROSKA_ID_TAGS, 0);
}
 
tag = start_ebml_master(s->pb, MATROSKA_ID_TAG, 0);
targets = start_ebml_master(s->pb, MATROSKA_ID_TAGTARGETS, 0);
if (elementid)
put_ebml_uint(s->pb, elementid, uid);
end_ebml_master(s->pb, targets);
 
while ((t = av_dict_get(m, "", t, AV_DICT_IGNORE_SUFFIX)))
if (av_strcasecmp(t->key, "title") && av_strcasecmp(t->key, "stereo_mode"))
mkv_write_simpletag(s->pb, t);
 
end_ebml_master(s->pb, tag);
return 0;
}
 
static int mkv_check_tag(AVDictionary *m)
{
AVDictionaryEntry *t = NULL;
 
while ((t = av_dict_get(m, "", t, AV_DICT_IGNORE_SUFFIX)))
if (av_strcasecmp(t->key, "title") && av_strcasecmp(t->key, "stereo_mode"))
return 1;
 
return 0;
}
 
static int mkv_write_tags(AVFormatContext *s)
{
MatroskaMuxContext *mkv = s->priv_data;
ebml_master tags = {0};
int i, ret;
 
ff_metadata_conv_ctx(s, ff_mkv_metadata_conv, NULL);
 
if (mkv_check_tag(s->metadata)) {
ret = mkv_write_tag(s, s->metadata, 0, 0, &tags);
if (ret < 0) return ret;
}
 
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
 
if (!mkv_check_tag(st->metadata))
continue;
 
ret = mkv_write_tag(s, st->metadata, MATROSKA_ID_TAGTARGETS_TRACKUID, i + 1, &tags);
if (ret < 0) return ret;
}
 
for (i = 0; i < s->nb_chapters; i++) {
AVChapter *ch = s->chapters[i];
 
if (!mkv_check_tag(ch->metadata))
continue;
 
ret = mkv_write_tag(s, ch->metadata, MATROSKA_ID_TAGTARGETS_CHAPTERUID, ch->id + mkv->chapter_id_offset, &tags);
if (ret < 0) return ret;
}
 
if (tags.pos)
end_ebml_master(s->pb, tags);
return 0;
}
 
static int mkv_write_attachments(AVFormatContext *s)
{
MatroskaMuxContext *mkv = s->priv_data;
AVIOContext *pb = s->pb;
ebml_master attachments;
AVLFG c;
int i, ret;
 
if (!mkv->have_attachments)
return 0;
 
av_lfg_init(&c, av_get_random_seed());
 
ret = mkv_add_seekhead_entry(mkv->main_seekhead, MATROSKA_ID_ATTACHMENTS, avio_tell(pb));
if (ret < 0) return ret;
 
attachments = start_ebml_master(pb, MATROSKA_ID_ATTACHMENTS, 0);
 
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
ebml_master attached_file;
AVDictionaryEntry *t;
const char *mimetype = NULL;
uint64_t fileuid;
 
if (st->codec->codec_type != AVMEDIA_TYPE_ATTACHMENT)
continue;
 
attached_file = start_ebml_master(pb, MATROSKA_ID_ATTACHEDFILE, 0);
 
if (t = av_dict_get(st->metadata, "title", NULL, 0))
put_ebml_string(pb, MATROSKA_ID_FILEDESC, t->value);
if (!(t = av_dict_get(st->metadata, "filename", NULL, 0))) {
av_log(s, AV_LOG_ERROR, "Attachment stream %d has no filename tag.\n", i);
return AVERROR(EINVAL);
}
put_ebml_string(pb, MATROSKA_ID_FILENAME, t->value);
if (t = av_dict_get(st->metadata, "mimetype", NULL, 0))
mimetype = t->value;
else if (st->codec->codec_id != AV_CODEC_ID_NONE ) {
int i;
for (i = 0; ff_mkv_mime_tags[i].id != AV_CODEC_ID_NONE; i++)
if (ff_mkv_mime_tags[i].id == st->codec->codec_id) {
mimetype = ff_mkv_mime_tags[i].str;
break;
}
}
if (!mimetype) {
av_log(s, AV_LOG_ERROR, "Attachment stream %d has no mimetype tag and "
"it cannot be deduced from the codec id.\n", i);
return AVERROR(EINVAL);
}
 
if (st->codec->flags & CODEC_FLAG_BITEXACT) {
struct AVSHA *sha = av_sha_alloc();
uint8_t digest[20];
if (!sha)
return AVERROR(ENOMEM);
av_sha_init(sha, 160);
av_sha_update(sha, st->codec->extradata, st->codec->extradata_size);
av_sha_final(sha, digest);
av_free(sha);
fileuid = AV_RL64(digest);
} else {
fileuid = av_lfg_get(&c);
}
av_log(s, AV_LOG_VERBOSE, "Using %.16"PRIx64" for attachment %d\n",
fileuid, i);
 
put_ebml_string(pb, MATROSKA_ID_FILEMIMETYPE, mimetype);
put_ebml_binary(pb, MATROSKA_ID_FILEDATA, st->codec->extradata, st->codec->extradata_size);
put_ebml_uint(pb, MATROSKA_ID_FILEUID, fileuid);
end_ebml_master(pb, attached_file);
}
end_ebml_master(pb, attachments);
 
return 0;
}
 
static int mkv_write_header(AVFormatContext *s)
{
MatroskaMuxContext *mkv = s->priv_data;
AVIOContext *pb = s->pb;
ebml_master ebml_header, segment_info;
AVDictionaryEntry *tag;
int ret, i;
 
if (!strcmp(s->oformat->name, "webm")) mkv->mode = MODE_WEBM;
else mkv->mode = MODE_MATROSKAv2;
 
if (s->avoid_negative_ts < 0)
s->avoid_negative_ts = 1;
 
for (i = 0; i < s->nb_streams; i++)
if (s->streams[i]->codec->codec_id == AV_CODEC_ID_ATRAC3 ||
s->streams[i]->codec->codec_id == AV_CODEC_ID_COOK ||
s->streams[i]->codec->codec_id == AV_CODEC_ID_RA_288 ||
s->streams[i]->codec->codec_id == AV_CODEC_ID_SIPR ||
s->streams[i]->codec->codec_id == AV_CODEC_ID_RV10 ||
s->streams[i]->codec->codec_id == AV_CODEC_ID_RV20) {
av_log(s, AV_LOG_ERROR,
"The Matroska muxer does not yet support muxing %s\n",
avcodec_get_name(s->streams[i]->codec->codec_id));
return AVERROR_PATCHWELCOME;
}
 
mkv->tracks = av_mallocz(s->nb_streams * sizeof(*mkv->tracks));
if (!mkv->tracks)
return AVERROR(ENOMEM);
 
ebml_header = start_ebml_master(pb, EBML_ID_HEADER, 0);
put_ebml_uint (pb, EBML_ID_EBMLVERSION , 1);
put_ebml_uint (pb, EBML_ID_EBMLREADVERSION , 1);
put_ebml_uint (pb, EBML_ID_EBMLMAXIDLENGTH , 4);
put_ebml_uint (pb, EBML_ID_EBMLMAXSIZELENGTH , 8);
put_ebml_string (pb, EBML_ID_DOCTYPE , s->oformat->name);
put_ebml_uint (pb, EBML_ID_DOCTYPEVERSION , 4);
put_ebml_uint (pb, EBML_ID_DOCTYPEREADVERSION , 2);
end_ebml_master(pb, ebml_header);
 
mkv->segment = start_ebml_master(pb, MATROSKA_ID_SEGMENT, 0);
mkv->segment_offset = avio_tell(pb);
 
// we write 2 seek heads - one at the end of the file to point to each
// cluster, and one at the beginning to point to all other level one
// elements (including the seek head at the end of the file), which
// isn't more than 10 elements if we only write one of each other
// currently defined level 1 element
mkv->main_seekhead = mkv_start_seekhead(pb, mkv->segment_offset, 10);
if (!mkv->main_seekhead)
return AVERROR(ENOMEM);
 
ret = mkv_add_seekhead_entry(mkv->main_seekhead, MATROSKA_ID_INFO, avio_tell(pb));
if (ret < 0) return ret;
 
segment_info = start_ebml_master(pb, MATROSKA_ID_INFO, 0);
put_ebml_uint(pb, MATROSKA_ID_TIMECODESCALE, 1000000);
if ((tag = av_dict_get(s->metadata, "title", NULL, 0)))
put_ebml_string(pb, MATROSKA_ID_TITLE, tag->value);
if (!(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
uint32_t segment_uid[4];
AVLFG lfg;
 
av_lfg_init(&lfg, av_get_random_seed());
 
for (i = 0; i < 4; i++)
segment_uid[i] = av_lfg_get(&lfg);
 
put_ebml_string(pb, MATROSKA_ID_MUXINGAPP , LIBAVFORMAT_IDENT);
put_ebml_string(pb, MATROSKA_ID_WRITINGAPP, LIBAVFORMAT_IDENT);
put_ebml_binary(pb, MATROSKA_ID_SEGMENTUID, segment_uid, 16);
} else {
const char *ident = "Lavf";
put_ebml_string(pb, MATROSKA_ID_MUXINGAPP , ident);
put_ebml_string(pb, MATROSKA_ID_WRITINGAPP, ident);
}
 
if (tag = av_dict_get(s->metadata, "creation_time", NULL, 0)) {
// Adjust time so it's relative to 2001-01-01 and convert to nanoseconds.
int64_t date_utc = (ff_iso8601_to_unix_time(tag->value) - 978307200) * 1000000000;
uint8_t date_utc_buf[8];
AV_WB64(date_utc_buf, date_utc);
put_ebml_binary(pb, MATROSKA_ID_DATEUTC, date_utc_buf, 8);
}
 
// reserve space for the duration
mkv->duration = 0;
mkv->duration_offset = avio_tell(pb);
put_ebml_void(pb, 11); // assumes double-precision float to be written
end_ebml_master(pb, segment_info);
 
ret = mkv_write_tracks(s);
if (ret < 0) return ret;
 
for (i = 0; i < s->nb_chapters; i++)
mkv->chapter_id_offset = FFMAX(mkv->chapter_id_offset, 1LL - s->chapters[i]->id);
 
if (mkv->mode != MODE_WEBM) {
ret = mkv_write_chapters(s);
if (ret < 0) return ret;
 
ret = mkv_write_tags(s);
if (ret < 0) return ret;
 
ret = mkv_write_attachments(s);
if (ret < 0) return ret;
}
 
if (!s->pb->seekable)
mkv_write_seekhead(pb, mkv->main_seekhead);
 
mkv->cues = mkv_start_cues(mkv->segment_offset);
if (mkv->cues == NULL)
return AVERROR(ENOMEM);
 
if (pb->seekable && mkv->reserve_cues_space) {
mkv->cues_pos = avio_tell(pb);
put_ebml_void(pb, mkv->reserve_cues_space);
}
 
av_init_packet(&mkv->cur_audio_pkt);
mkv->cur_audio_pkt.size = 0;
mkv->cluster_pos = -1;
 
avio_flush(pb);
 
// start a new cluster every 5 MB or 5 sec, or 32k / 1 sec for streaming or
// after 4k and on a keyframe
if (pb->seekable) {
if (mkv->cluster_time_limit < 0)
mkv->cluster_time_limit = 5000;
if (mkv->cluster_size_limit < 0)
mkv->cluster_size_limit = 5 * 1024 * 1024;
} else {
if (mkv->cluster_time_limit < 0)
mkv->cluster_time_limit = 1000;
if (mkv->cluster_size_limit < 0)
mkv->cluster_size_limit = 32 * 1024;
}
 
return 0;
}
 
static int mkv_blockgroup_size(int pkt_size)
{
int size = pkt_size + 4;
size += ebml_num_size(size);
size += 2; // EBML ID for block and block duration
size += 8; // max size of block duration
size += ebml_num_size(size);
size += 1; // blockgroup EBML ID
return size;
}
 
static int ass_get_duration(const uint8_t *p)
{
int sh, sm, ss, sc, eh, em, es, ec;
uint64_t start, end;
 
if (sscanf(p, "%*[^,],%d:%d:%d%*c%d,%d:%d:%d%*c%d",
&sh, &sm, &ss, &sc, &eh, &em, &es, &ec) != 8)
return 0;
start = 3600000LL*sh + 60000LL*sm + 1000LL*ss + 10LL*sc;
end = 3600000LL*eh + 60000LL*em + 1000LL*es + 10LL*ec;
return end - start;
}
 
#if FF_API_ASS_SSA
static int mkv_write_ass_blocks(AVFormatContext *s, AVIOContext *pb, AVPacket *pkt)
{
MatroskaMuxContext *mkv = s->priv_data;
int i, layer = 0, max_duration = 0, size, line_size, data_size = pkt->size;
uint8_t *start, *end, *data = pkt->data;
ebml_master blockgroup;
char buffer[2048];
 
while (data_size) {
int duration = ass_get_duration(data);
max_duration = FFMAX(duration, max_duration);
end = memchr(data, '\n', data_size);
size = line_size = end ? end-data+1 : data_size;
size -= end ? (end[-1]=='\r')+1 : 0;
start = data;
for (i=0; i<3; i++, start++)
if (!(start = memchr(start, ',', size-(start-data))))
return max_duration;
size -= start - data;
sscanf(data, "Dialogue: %d,", &layer);
i = snprintf(buffer, sizeof(buffer), "%"PRId64",%d,",
s->streams[pkt->stream_index]->nb_frames, layer);
size = FFMIN(i+size, sizeof(buffer));
memcpy(buffer+i, start, size-i);
 
av_log(s, AV_LOG_DEBUG, "Writing block at offset %" PRIu64 ", size %d, "
"pts %" PRId64 ", duration %d\n",
avio_tell(pb), size, pkt->pts, duration);
blockgroup = start_ebml_master(pb, MATROSKA_ID_BLOCKGROUP, mkv_blockgroup_size(size));
put_ebml_id(pb, MATROSKA_ID_BLOCK);
put_ebml_num(pb, size+4, 0);
avio_w8(pb, 0x80 | (pkt->stream_index + 1)); // this assumes stream_index is less than 126
avio_wb16(pb, pkt->pts - mkv->cluster_pts);
avio_w8(pb, 0);
avio_write(pb, buffer, size);
put_ebml_uint(pb, MATROSKA_ID_BLOCKDURATION, duration);
end_ebml_master(pb, blockgroup);
 
data += line_size;
data_size -= line_size;
}
 
return max_duration;
}
#endif
 
static int mkv_strip_wavpack(const uint8_t *src, uint8_t **pdst, int *size)
{
uint8_t *dst;
int srclen = *size;
int offset = 0;
int ret;
 
dst = av_malloc(srclen);
if (!dst)
return AVERROR(ENOMEM);
 
while (srclen >= WV_HEADER_SIZE) {
WvHeader header;
 
ret = ff_wv_parse_header(&header, src);
if (ret < 0)
goto fail;
src += WV_HEADER_SIZE;
srclen -= WV_HEADER_SIZE;
 
if (srclen < header.blocksize) {
ret = AVERROR_INVALIDDATA;
goto fail;
}
 
if (header.initial) {
AV_WL32(dst + offset, header.samples);
offset += 4;
}
AV_WL32(dst + offset, header.flags);
AV_WL32(dst + offset + 4, header.crc);
offset += 8;
 
if (!(header.initial && header.final)) {
AV_WL32(dst + offset, header.blocksize);
offset += 4;
}
 
memcpy(dst + offset, src, header.blocksize);
src += header.blocksize;
srclen -= header.blocksize;
offset += header.blocksize;
}
 
*pdst = dst;
*size = offset;
 
return 0;
fail:
av_freep(&dst);
return ret;
}
 
static void mkv_write_block(AVFormatContext *s, AVIOContext *pb,
unsigned int blockid, AVPacket *pkt, int flags)
{
MatroskaMuxContext *mkv = s->priv_data;
AVCodecContext *codec = s->streams[pkt->stream_index]->codec;
uint8_t *data = NULL, *side_data = NULL;
int offset = 0, size = pkt->size, side_data_size = 0;
int64_t ts = mkv->tracks[pkt->stream_index].write_dts ? pkt->dts : pkt->pts;
uint64_t additional_id = 0, discard_padding = 0;
ebml_master block_group, block_additions, block_more;
 
av_log(s, AV_LOG_DEBUG, "Writing block at offset %" PRIu64 ", size %d, "
"pts %" PRId64 ", dts %" PRId64 ", duration %d, flags %d\n",
avio_tell(pb), pkt->size, pkt->pts, pkt->dts, pkt->duration, flags);
if (codec->codec_id == AV_CODEC_ID_H264 && codec->extradata_size > 0 &&
(AV_RB24(codec->extradata) == 1 || AV_RB32(codec->extradata) == 1))
ff_avc_parse_nal_units_buf(pkt->data, &data, &size);
else if (codec->codec_id == AV_CODEC_ID_WAVPACK) {
int ret = mkv_strip_wavpack(pkt->data, &data, &size);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "Error stripping a WavPack packet.\n");
return;
}
} else
data = pkt->data;
 
if (codec->codec_id == AV_CODEC_ID_PRORES) {
/* Matroska specification requires to remove the first QuickTime atom
*/
size -= 8;
offset = 8;
}
 
side_data = av_packet_get_side_data(pkt,
AV_PKT_DATA_SKIP_SAMPLES,
&side_data_size);
 
if (side_data && side_data_size >= 10) {
discard_padding = av_rescale_q(AV_RL32(side_data + 4),
(AVRational){1, codec->sample_rate},
(AVRational){1, 1000000000});
}
 
side_data = av_packet_get_side_data(pkt,
AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL,
&side_data_size);
if (side_data) {
additional_id = AV_RB64(side_data);
side_data += 8;
side_data_size -= 8;
}
 
if ((side_data_size && additional_id == 1) || discard_padding) {
block_group = start_ebml_master(pb, MATROSKA_ID_BLOCKGROUP, 0);
blockid = MATROSKA_ID_BLOCK;
}
 
put_ebml_id(pb, blockid);
put_ebml_num(pb, size+4, 0);
avio_w8(pb, 0x80 | (pkt->stream_index + 1)); // this assumes stream_index is less than 126
avio_wb16(pb, ts - mkv->cluster_pts);
avio_w8(pb, flags);
avio_write(pb, data + offset, size);
if (data != pkt->data)
av_free(data);
 
if (discard_padding) {
put_ebml_uint(pb, MATROSKA_ID_DISCARDPADDING, discard_padding);
}
 
if (side_data_size && additional_id == 1) {
block_additions = start_ebml_master(pb, MATROSKA_ID_BLOCKADDITIONS, 0);
block_more = start_ebml_master(pb, MATROSKA_ID_BLOCKMORE, 0);
put_ebml_uint(pb, MATROSKA_ID_BLOCKADDID, 1);
put_ebml_id(pb, MATROSKA_ID_BLOCKADDITIONAL);
put_ebml_num(pb, side_data_size, 0);
avio_write(pb, side_data, side_data_size);
end_ebml_master(pb, block_more);
end_ebml_master(pb, block_additions);
}
if ((side_data_size && additional_id == 1) || discard_padding) {
end_ebml_master(pb, block_group);
}
}
 
static int srt_get_duration(uint8_t **buf)
{
int i, duration = 0;
 
for (i=0; i<2 && !duration; i++) {
int s_hour, s_min, s_sec, s_hsec, e_hour, e_min, e_sec, e_hsec;
if (sscanf(*buf, "%d:%2d:%2d%*1[,.]%3d --> %d:%2d:%2d%*1[,.]%3d",
&s_hour, &s_min, &s_sec, &s_hsec,
&e_hour, &e_min, &e_sec, &e_hsec) == 8) {
s_min += 60*s_hour; e_min += 60*e_hour;
s_sec += 60*s_min; e_sec += 60*e_min;
s_hsec += 1000*s_sec; e_hsec += 1000*e_sec;
duration = e_hsec - s_hsec;
}
*buf += ff_subtitles_next_line(*buf);
}
return duration;
}
 
static int mkv_write_srt_blocks(AVFormatContext *s, AVIOContext *pb, AVPacket *pkt)
{
ebml_master blockgroup;
AVPacket pkt2 = *pkt;
int64_t duration = srt_get_duration(&pkt2.data);
pkt2.size -= pkt2.data - pkt->data;
 
blockgroup = start_ebml_master(pb, MATROSKA_ID_BLOCKGROUP,
mkv_blockgroup_size(pkt2.size));
mkv_write_block(s, pb, MATROSKA_ID_BLOCK, &pkt2, 0);
put_ebml_uint(pb, MATROSKA_ID_BLOCKDURATION, duration);
end_ebml_master(pb, blockgroup);
 
return duration;
}
 
static int mkv_write_vtt_blocks(AVFormatContext *s, AVIOContext *pb, AVPacket *pkt)
{
MatroskaMuxContext *mkv = s->priv_data;
ebml_master blockgroup;
int id_size, settings_size, size;
uint8_t *id, *settings;
int64_t ts = mkv->tracks[pkt->stream_index].write_dts ? pkt->dts : pkt->pts;
const int flags = 0;
 
id_size = 0;
id = av_packet_get_side_data(pkt, AV_PKT_DATA_WEBVTT_IDENTIFIER,
&id_size);
 
settings_size = 0;
settings = av_packet_get_side_data(pkt, AV_PKT_DATA_WEBVTT_SETTINGS,
&settings_size);
 
size = id_size + 1 + settings_size + 1 + pkt->size;
 
av_log(s, AV_LOG_DEBUG, "Writing block at offset %" PRIu64 ", size %d, "
"pts %" PRId64 ", dts %" PRId64 ", duration %d, flags %d\n",
avio_tell(pb), size, pkt->pts, pkt->dts, pkt->duration, flags);
 
blockgroup = start_ebml_master(pb, MATROSKA_ID_BLOCKGROUP, mkv_blockgroup_size(size));
 
put_ebml_id(pb, MATROSKA_ID_BLOCK);
put_ebml_num(pb, size+4, 0);
avio_w8(pb, 0x80 | (pkt->stream_index + 1)); // this assumes stream_index is less than 126
avio_wb16(pb, ts - mkv->cluster_pts);
avio_w8(pb, flags);
avio_printf(pb, "%.*s\n%.*s\n%.*s", id_size, id, settings_size, settings, pkt->size, pkt->data);
 
put_ebml_uint(pb, MATROSKA_ID_BLOCKDURATION, pkt->duration);
end_ebml_master(pb, blockgroup);
 
return pkt->duration;
}
 
static void mkv_flush_dynbuf(AVFormatContext *s)
{
MatroskaMuxContext *mkv = s->priv_data;
int bufsize;
uint8_t *dyn_buf;
 
if (!mkv->dyn_bc)
return;
 
bufsize = avio_close_dyn_buf(mkv->dyn_bc, &dyn_buf);
avio_write(s->pb, dyn_buf, bufsize);
av_free(dyn_buf);
mkv->dyn_bc = NULL;
}
 
static int mkv_write_packet_internal(AVFormatContext *s, AVPacket *pkt)
{
MatroskaMuxContext *mkv = s->priv_data;
AVIOContext *pb = s->pb;
AVCodecContext *codec = s->streams[pkt->stream_index]->codec;
int keyframe = !!(pkt->flags & AV_PKT_FLAG_KEY);
int duration = pkt->duration;
int ret;
int64_t ts = mkv->tracks[pkt->stream_index].write_dts ? pkt->dts : pkt->pts;
int64_t relative_packet_pos;
 
if (ts == AV_NOPTS_VALUE) {
av_log(s, AV_LOG_ERROR, "Can't write packet with unknown timestamp\n");
return AVERROR(EINVAL);
}
 
if (!s->pb->seekable) {
if (!mkv->dyn_bc) {
if ((ret = avio_open_dyn_buf(&mkv->dyn_bc)) < 0) {
av_log(s, AV_LOG_ERROR, "Failed to open dynamic buffer\n");
return ret;
}
}
pb = mkv->dyn_bc;
}
 
if (mkv->cluster_pos == -1) {
mkv->cluster_pos = avio_tell(s->pb);
mkv->cluster = start_ebml_master(pb, MATROSKA_ID_CLUSTER, 0);
put_ebml_uint(pb, MATROSKA_ID_CLUSTERTIMECODE, FFMAX(0, ts));
mkv->cluster_pts = FFMAX(0, ts);
}
 
relative_packet_pos = avio_tell(s->pb) - mkv->cluster.pos;
 
if (codec->codec_type != AVMEDIA_TYPE_SUBTITLE) {
mkv_write_block(s, pb, MATROSKA_ID_SIMPLEBLOCK, pkt, keyframe << 7);
#if FF_API_ASS_SSA
} else if (codec->codec_id == AV_CODEC_ID_SSA) {
duration = mkv_write_ass_blocks(s, pb, pkt);
#endif
} else if (codec->codec_id == AV_CODEC_ID_SRT) {
duration = mkv_write_srt_blocks(s, pb, pkt);
} else if (codec->codec_id == AV_CODEC_ID_WEBVTT) {
duration = mkv_write_vtt_blocks(s, pb, pkt);
} else {
ebml_master blockgroup = start_ebml_master(pb, MATROSKA_ID_BLOCKGROUP, mkv_blockgroup_size(pkt->size));
/* For backward compatibility, prefer convergence_duration. */
if (pkt->convergence_duration > 0) {
duration = pkt->convergence_duration;
}
mkv_write_block(s, pb, MATROSKA_ID_BLOCK, pkt, 0);
put_ebml_uint(pb, MATROSKA_ID_BLOCKDURATION, duration);
end_ebml_master(pb, blockgroup);
}
 
if ((codec->codec_type == AVMEDIA_TYPE_VIDEO && keyframe) || codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
ret = mkv_add_cuepoint(mkv->cues, pkt->stream_index, ts, mkv->cluster_pos, relative_packet_pos,
codec->codec_type == AVMEDIA_TYPE_SUBTITLE ? duration : -1);
if (ret < 0) return ret;
}
 
mkv->duration = FFMAX(mkv->duration, ts + duration);
return 0;
}
 
static int mkv_write_packet(AVFormatContext *s, AVPacket *pkt)
{
MatroskaMuxContext *mkv = s->priv_data;
int codec_type = s->streams[pkt->stream_index]->codec->codec_type;
int keyframe = !!(pkt->flags & AV_PKT_FLAG_KEY);
int cluster_size;
int64_t cluster_time;
AVIOContext *pb;
int ret;
 
if (mkv->tracks[pkt->stream_index].write_dts)
cluster_time = pkt->dts - mkv->cluster_pts;
else
cluster_time = pkt->pts - mkv->cluster_pts;
 
// start a new cluster every 5 MB or 5 sec, or 32k / 1 sec for streaming or
// after 4k and on a keyframe
if (s->pb->seekable) {
pb = s->pb;
cluster_size = avio_tell(pb) - mkv->cluster_pos;
} else {
pb = mkv->dyn_bc;
cluster_size = avio_tell(pb);
}
 
if (mkv->cluster_pos != -1 &&
(cluster_size > mkv->cluster_size_limit ||
cluster_time > mkv->cluster_time_limit ||
(codec_type == AVMEDIA_TYPE_VIDEO && keyframe &&
cluster_size > 4 * 1024))) {
av_log(s, AV_LOG_DEBUG, "Starting new cluster at offset %" PRIu64
" bytes, pts %" PRIu64 "dts %" PRIu64 "\n",
avio_tell(pb), pkt->pts, pkt->dts);
end_ebml_master(pb, mkv->cluster);
mkv->cluster_pos = -1;
if (mkv->dyn_bc)
mkv_flush_dynbuf(s);
avio_flush(s->pb);
}
 
// check if we have an audio packet cached
if (mkv->cur_audio_pkt.size > 0) {
ret = mkv_write_packet_internal(s, &mkv->cur_audio_pkt);
av_free_packet(&mkv->cur_audio_pkt);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "Could not write cached audio packet ret:%d\n", ret);
return ret;
}
}
 
// buffer an audio packet to ensure the packet containing the video
// keyframe's timecode is contained in the same cluster for WebM
if (codec_type == AVMEDIA_TYPE_AUDIO) {
mkv->cur_audio_pkt = *pkt;
if (pkt->buf) {
mkv->cur_audio_pkt.buf = av_buffer_ref(pkt->buf);
ret = mkv->cur_audio_pkt.buf ? 0 : AVERROR(ENOMEM);
} else
ret = av_dup_packet(&mkv->cur_audio_pkt);
if (mkv->cur_audio_pkt.side_data_elems > 0) {
ret = av_copy_packet_side_data(&mkv->cur_audio_pkt, &mkv->cur_audio_pkt);
}
} else
ret = mkv_write_packet_internal(s, pkt);
return ret;
}
 
static int mkv_write_flush_packet(AVFormatContext *s, AVPacket *pkt)
{
MatroskaMuxContext *mkv = s->priv_data;
AVIOContext *pb;
if (s->pb->seekable)
pb = s->pb;
else
pb = mkv->dyn_bc;
if (!pkt) {
if (mkv->cluster_pos != -1) {
av_log(s, AV_LOG_DEBUG, "Flushing cluster at offset %" PRIu64
" bytes\n", avio_tell(pb));
end_ebml_master(pb, mkv->cluster);
mkv->cluster_pos = -1;
if (mkv->dyn_bc)
mkv_flush_dynbuf(s);
avio_flush(s->pb);
}
return 0;
}
return mkv_write_packet(s, pkt);
}
 
static int mkv_write_trailer(AVFormatContext *s)
{
MatroskaMuxContext *mkv = s->priv_data;
AVIOContext *pb = s->pb;
int64_t currentpos, cuespos;
int ret;
 
// check if we have an audio packet cached
if (mkv->cur_audio_pkt.size > 0) {
ret = mkv_write_packet_internal(s, &mkv->cur_audio_pkt);
av_free_packet(&mkv->cur_audio_pkt);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "Could not write cached audio packet ret:%d\n", ret);
return ret;
}
}
 
if (mkv->dyn_bc) {
end_ebml_master(mkv->dyn_bc, mkv->cluster);
mkv_flush_dynbuf(s);
} else if (mkv->cluster_pos != -1) {
end_ebml_master(pb, mkv->cluster);
}
 
if (mkv->mode != MODE_WEBM) {
ret = mkv_write_chapters(s);
if (ret < 0) return ret;
}
 
if (pb->seekable) {
if (mkv->cues->num_entries) {
if (mkv->reserve_cues_space) {
int64_t cues_end;
 
currentpos = avio_tell(pb);
avio_seek(pb, mkv->cues_pos, SEEK_SET);
 
cuespos = mkv_write_cues(pb, mkv->cues, mkv->tracks, s->nb_streams);
cues_end = avio_tell(pb);
if (cues_end > cuespos + mkv->reserve_cues_space) {
av_log(s, AV_LOG_ERROR, "Insufficient space reserved for cues: %d "
"(needed: %"PRId64").\n", mkv->reserve_cues_space,
cues_end - cuespos);
return AVERROR(EINVAL);
}
 
if (cues_end < cuespos + mkv->reserve_cues_space)
put_ebml_void(pb, mkv->reserve_cues_space - (cues_end - cuespos));
 
avio_seek(pb, currentpos, SEEK_SET);
} else {
cuespos = mkv_write_cues(pb, mkv->cues, mkv->tracks, s->nb_streams);
}
 
ret = mkv_add_seekhead_entry(mkv->main_seekhead, MATROSKA_ID_CUES, cuespos);
if (ret < 0) return ret;
}
 
mkv_write_seekhead(pb, mkv->main_seekhead);
 
// update the duration
av_log(s, AV_LOG_DEBUG, "end duration = %" PRIu64 "\n", mkv->duration);
currentpos = avio_tell(pb);
avio_seek(pb, mkv->duration_offset, SEEK_SET);
put_ebml_float(pb, MATROSKA_ID_DURATION, mkv->duration);
 
avio_seek(pb, currentpos, SEEK_SET);
}
 
end_ebml_master(pb, mkv->segment);
av_freep(&mkv->tracks);
av_freep(&mkv->cues->entries);
av_freep(&mkv->cues);
 
return 0;
}
 
static int mkv_query_codec(enum AVCodecID codec_id, int std_compliance)
{
int i;
for (i = 0; ff_mkv_codec_tags[i].id != AV_CODEC_ID_NONE; i++)
if (ff_mkv_codec_tags[i].id == codec_id)
return 1;
 
if (std_compliance < FF_COMPLIANCE_NORMAL) { // mkv theoretically supports any
enum AVMediaType type = avcodec_get_type(codec_id); // video/audio through VFW/ACM
if (type == AVMEDIA_TYPE_VIDEO || type == AVMEDIA_TYPE_AUDIO)
return 1;
}
 
return 0;
}
 
static const AVCodecTag additional_audio_tags[] = {
{ AV_CODEC_ID_ALAC, 0XFFFFFFFF },
{ AV_CODEC_ID_EAC3, 0XFFFFFFFF },
{ AV_CODEC_ID_MLP, 0xFFFFFFFF },
{ AV_CODEC_ID_OPUS, 0xFFFFFFFF },
{ AV_CODEC_ID_PCM_S16BE, 0xFFFFFFFF },
{ AV_CODEC_ID_PCM_S24BE, 0xFFFFFFFF },
{ AV_CODEC_ID_PCM_S32BE, 0xFFFFFFFF },
{ AV_CODEC_ID_QDM2, 0xFFFFFFFF },
{ AV_CODEC_ID_RA_144, 0xFFFFFFFF },
{ AV_CODEC_ID_RA_288, 0xFFFFFFFF },
{ AV_CODEC_ID_COOK, 0xFFFFFFFF },
{ AV_CODEC_ID_TRUEHD, 0xFFFFFFFF },
{ AV_CODEC_ID_NONE, 0xFFFFFFFF }
};
 
static const AVCodecTag additional_video_tags[] = {
{ AV_CODEC_ID_RV10, 0xFFFFFFFF },
{ AV_CODEC_ID_RV20, 0xFFFFFFFF },
{ AV_CODEC_ID_RV30, 0xFFFFFFFF },
{ AV_CODEC_ID_RV40, 0xFFFFFFFF },
{ AV_CODEC_ID_VP9, 0xFFFFFFFF },
{ AV_CODEC_ID_NONE, 0xFFFFFFFF }
};
 
#define OFFSET(x) offsetof(MatroskaMuxContext, x)
#define FLAGS AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
{ "reserve_index_space", "Reserve a given amount of space (in bytes) at the beginning of the file for the index (cues).", OFFSET(reserve_cues_space), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
{ "cluster_size_limit", "Store at most the provided amount of bytes in a cluster. ", OFFSET(cluster_size_limit), AV_OPT_TYPE_INT , { .i64 = -1 }, -1, INT_MAX, FLAGS },
{ "cluster_time_limit", "Store at most the provided number of milliseconds in a cluster.", OFFSET(cluster_time_limit), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, FLAGS },
{ NULL },
};
 
#if CONFIG_MATROSKA_MUXER
static const AVClass matroska_class = {
.class_name = "matroska muxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVOutputFormat ff_matroska_muxer = {
.name = "matroska",
.long_name = NULL_IF_CONFIG_SMALL("Matroska"),
.mime_type = "video/x-matroska",
.extensions = "mkv",
.priv_data_size = sizeof(MatroskaMuxContext),
.audio_codec = CONFIG_LIBVORBIS_ENCODER ?
AV_CODEC_ID_VORBIS : AV_CODEC_ID_AC3,
.video_codec = CONFIG_LIBX264_ENCODER ?
AV_CODEC_ID_H264 : AV_CODEC_ID_MPEG4,
.write_header = mkv_write_header,
.write_packet = mkv_write_flush_packet,
.write_trailer = mkv_write_trailer,
.flags = AVFMT_GLOBALHEADER | AVFMT_VARIABLE_FPS |
AVFMT_TS_NONSTRICT | AVFMT_ALLOW_FLUSH,
.codec_tag = (const AVCodecTag* const []){
ff_codec_bmp_tags, ff_codec_wav_tags,
additional_audio_tags, additional_video_tags, 0
},
#if FF_API_ASS_SSA
.subtitle_codec = AV_CODEC_ID_SSA,
#else
.subtitle_codec = AV_CODEC_ID_ASS,
#endif
.query_codec = mkv_query_codec,
.priv_class = &matroska_class,
};
#endif
 
#if CONFIG_WEBM_MUXER
static const AVClass webm_class = {
.class_name = "webm muxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVOutputFormat ff_webm_muxer = {
.name = "webm",
.long_name = NULL_IF_CONFIG_SMALL("WebM"),
.mime_type = "video/webm",
.extensions = "webm",
.priv_data_size = sizeof(MatroskaMuxContext),
.audio_codec = AV_CODEC_ID_VORBIS,
.video_codec = AV_CODEC_ID_VP8,
.subtitle_codec = AV_CODEC_ID_WEBVTT,
.write_header = mkv_write_header,
.write_packet = mkv_write_flush_packet,
.write_trailer = mkv_write_trailer,
.flags = AVFMT_GLOBALHEADER | AVFMT_VARIABLE_FPS |
AVFMT_TS_NONSTRICT | AVFMT_ALLOW_FLUSH,
.priv_class = &webm_class,
};
#endif
 
#if CONFIG_MATROSKA_AUDIO_MUXER
static const AVClass mka_class = {
.class_name = "matroska audio muxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
AVOutputFormat ff_matroska_audio_muxer = {
.name = "matroska",
.long_name = NULL_IF_CONFIG_SMALL("Matroska Audio"),
.mime_type = "audio/x-matroska",
.extensions = "mka",
.priv_data_size = sizeof(MatroskaMuxContext),
.audio_codec = CONFIG_LIBVORBIS_ENCODER ?
AV_CODEC_ID_VORBIS : AV_CODEC_ID_AC3,
.video_codec = AV_CODEC_ID_NONE,
.write_header = mkv_write_header,
.write_packet = mkv_write_flush_packet,
.write_trailer = mkv_write_trailer,
.flags = AVFMT_GLOBALHEADER | AVFMT_TS_NONSTRICT |
AVFMT_ALLOW_FLUSH,
.codec_tag = (const AVCodecTag* const []){
ff_codec_wav_tags, additional_audio_tags, 0
},
.priv_class = &mka_class,
};
#endif
/contrib/sdk/sources/ffmpeg/libavformat/md5enc.c
0,0 → 1,168
/*
* MD5 encoder (for codec/format testing)
* Copyright (c) 2009 Reimar Döffinger, based on crcenc (c) 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/hash.h"
#include "libavutil/opt.h"
#include "avformat.h"
#include "internal.h"
 
struct MD5Context {
const AVClass *avclass;
struct AVHashContext *hash;
char *hash_name;
};
 
static void md5_finish(struct AVFormatContext *s, char *buf)
{
struct MD5Context *c = s->priv_data;
uint8_t md5[AV_HASH_MAX_SIZE];
int i, offset = strlen(buf);
int len = av_hash_get_size(c->hash);
av_assert0(len > 0 && len <= sizeof(md5));
av_hash_final(c->hash, md5);
for (i = 0; i < len; i++) {
snprintf(buf + offset, 3, "%02"PRIx8, md5[i]);
offset += 2;
}
buf[offset] = '\n';
buf[offset+1] = 0;
 
avio_write(s->pb, buf, strlen(buf));
avio_flush(s->pb);
}
 
#define OFFSET(x) offsetof(struct MD5Context, x)
#define ENC AV_OPT_FLAG_ENCODING_PARAM
static const AVOption hash_options[] = {
{ "hash", "set hash to use", OFFSET(hash_name), AV_OPT_TYPE_STRING, {.str = "md5"}, 0, 0, ENC },
{ NULL },
};
 
static const AVClass md5enc_class = {
.class_name = "hash encoder class",
.item_name = av_default_item_name,
.option = hash_options,
.version = LIBAVUTIL_VERSION_INT,
};
 
#if CONFIG_MD5_MUXER
static int write_header(struct AVFormatContext *s)
{
struct MD5Context *c = s->priv_data;
int res = av_hash_alloc(&c->hash, c->hash_name);
if (res < 0)
return res;
av_hash_init(c->hash);
return 0;
}
 
static int write_packet(struct AVFormatContext *s, AVPacket *pkt)
{
struct MD5Context *c = s->priv_data;
av_hash_update(c->hash, pkt->data, pkt->size);
return 0;
}
 
static int write_trailer(struct AVFormatContext *s)
{
struct MD5Context *c = s->priv_data;
char buf[256];
av_strlcpy(buf, av_hash_get_name(c->hash), sizeof(buf) - 200);
av_strlcat(buf, "=", sizeof(buf) - 200);
 
md5_finish(s, buf);
 
av_hash_freep(&c->hash);
return 0;
}
 
AVOutputFormat ff_md5_muxer = {
.name = "md5",
.long_name = NULL_IF_CONFIG_SMALL("MD5 testing"),
.priv_data_size = sizeof(struct MD5Context),
.audio_codec = AV_CODEC_ID_PCM_S16LE,
.video_codec = AV_CODEC_ID_RAWVIDEO,
.write_header = write_header,
.write_packet = write_packet,
.write_trailer = write_trailer,
.flags = AVFMT_NOTIMESTAMPS,
.priv_class = &md5enc_class,
};
#endif
 
#if CONFIG_FRAMEMD5_MUXER
static int framemd5_write_header(struct AVFormatContext *s)
{
struct MD5Context *c = s->priv_data;
int res = av_hash_alloc(&c->hash, c->hash_name);
if (res < 0)
return res;
avio_printf(s->pb, "#format: frame checksums\n");
avio_printf(s->pb, "#version: 1\n");
avio_printf(s->pb, "#hash: %s\n", av_hash_get_name(c->hash));
ff_framehash_write_header(s);
avio_printf(s->pb, "#stream#, dts, pts, duration, size, hash\n");
return 0;
}
 
static int framemd5_write_packet(struct AVFormatContext *s, AVPacket *pkt)
{
struct MD5Context *c = s->priv_data;
char buf[256];
av_hash_init(c->hash);
av_hash_update(c->hash, pkt->data, pkt->size);
 
snprintf(buf, sizeof(buf) - 64, "%d, %10"PRId64", %10"PRId64", %8d, %8d, ",
pkt->stream_index, pkt->dts, pkt->pts, pkt->duration, pkt->size);
md5_finish(s, buf);
return 0;
}
 
static int framemd5_write_trailer(struct AVFormatContext *s)
{
struct MD5Context *c = s->priv_data;
av_hash_freep(&c->hash);
return 0;
}
 
static const AVClass framemd5_class = {
.class_name = "frame hash encoder class",
.item_name = av_default_item_name,
.option = hash_options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVOutputFormat ff_framemd5_muxer = {
.name = "framemd5",
.long_name = NULL_IF_CONFIG_SMALL("Per-frame MD5 testing"),
.priv_data_size = sizeof(struct MD5Context),
.audio_codec = AV_CODEC_ID_PCM_S16LE,
.video_codec = AV_CODEC_ID_RAWVIDEO,
.write_header = framemd5_write_header,
.write_packet = framemd5_write_packet,
.write_trailer = framemd5_write_trailer,
.flags = AVFMT_VARIABLE_FPS | AVFMT_TS_NONSTRICT |
AVFMT_TS_NEGATIVE,
.priv_class = &framemd5_class,
};
#endif
/contrib/sdk/sources/ffmpeg/libavformat/md5proto.c
0,0 → 1,95
/*
* Copyright (c) 2010 Mans Rullgard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <stdio.h>
#include "libavutil/avstring.h"
#include "libavutil/md5.h"
#include "libavutil/mem.h"
#include "libavutil/error.h"
#include "avformat.h"
#include "avio.h"
#include "url.h"
 
struct MD5Context {
struct AVMD5 *md5;
};
 
static int md5_open(URLContext *h, const char *filename, int flags)
{
struct MD5Context *c = h->priv_data;
 
if (!(flags & AVIO_FLAG_WRITE))
return AVERROR(EINVAL);
 
c->md5 = av_md5_alloc();
if (!c->md5)
return AVERROR(ENOMEM);
av_md5_init(c->md5);
 
return 0;
}
 
static int md5_write(URLContext *h, const unsigned char *buf, int size)
{
struct MD5Context *c = h->priv_data;
av_md5_update(c->md5, buf, size);
return size;
}
 
static int md5_close(URLContext *h)
{
struct MD5Context *c = h->priv_data;
const char *filename = h->filename;
uint8_t md5[16], buf[64];
URLContext *out;
int i, err = 0;
 
av_md5_final(c->md5, md5);
for (i = 0; i < sizeof(md5); i++)
snprintf(buf + i*2, 3, "%02x", md5[i]);
buf[i*2] = '\n';
 
av_strstart(filename, "md5:", &filename);
 
if (*filename) {
err = ffurl_open(&out, filename, AVIO_FLAG_WRITE,
&h->interrupt_callback, NULL);
if (err)
return err;
err = ffurl_write(out, buf, i*2+1);
ffurl_close(out);
} else {
if (fwrite(buf, 1, i*2+1, stdout) < i*2+1)
err = AVERROR(errno);
}
 
av_freep(&c->md5);
 
return err;
}
 
 
URLProtocol ff_md5_protocol = {
.name = "md5",
.url_open = md5_open,
.url_write = md5_write,
.url_close = md5_close,
.priv_data_size = sizeof(struct MD5Context),
};
/contrib/sdk/sources/ffmpeg/libavformat/metadata.c
0,0 → 1,70
/*
* copyright (c) 2009 Michael Niedermayer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "metadata.h"
#include "libavutil/dict.h"
#include "libavutil/avstring.h"
 
void ff_metadata_conv(AVDictionary **pm, const AVMetadataConv *d_conv,
const AVMetadataConv *s_conv)
{
/* TODO: use binary search to look up the two conversion tables
if the tables are getting big enough that it would matter speed wise */
const AVMetadataConv *sc, *dc;
AVDictionaryEntry *mtag = NULL;
AVDictionary *dst = NULL;
const char *key;
 
if (d_conv == s_conv)
return;
 
while ((mtag = av_dict_get(*pm, "", mtag, AV_DICT_IGNORE_SUFFIX))) {
key = mtag->key;
if (s_conv)
for (sc=s_conv; sc->native; sc++)
if (!av_strcasecmp(key, sc->native)) {
key = sc->generic;
break;
}
if (d_conv)
for (dc=d_conv; dc->native; dc++)
if (!av_strcasecmp(key, dc->generic)) {
key = dc->native;
break;
}
av_dict_set(&dst, key, mtag->value, 0);
}
av_dict_free(pm);
*pm = dst;
}
 
void ff_metadata_conv_ctx(AVFormatContext *ctx, const AVMetadataConv *d_conv,
const AVMetadataConv *s_conv)
{
int i;
ff_metadata_conv(&ctx->metadata, d_conv, s_conv);
for (i=0; i<ctx->nb_streams ; i++)
ff_metadata_conv(&ctx->streams [i]->metadata, d_conv, s_conv);
for (i=0; i<ctx->nb_chapters; i++)
ff_metadata_conv(&ctx->chapters[i]->metadata, d_conv, s_conv);
for (i=0; i<ctx->nb_programs; i++)
ff_metadata_conv(&ctx->programs[i]->metadata, d_conv, s_conv);
}
/contrib/sdk/sources/ffmpeg/libavformat/metadata.h
0,0 → 1,44
/*
* copyright (c) 2009 Michael Niedermayer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_METADATA_H
#define AVFORMAT_METADATA_H
 
/**
* @file
* internal metadata API header
* see avformat.h or the public API!
*/
 
 
#include "avformat.h"
#include "libavutil/dict.h"
 
typedef struct AVMetadataConv {
const char *native;
const char *generic;
} AVMetadataConv;
 
void ff_metadata_conv(AVDictionary **pm, const AVMetadataConv *d_conv,
const AVMetadataConv *s_conv);
void ff_metadata_conv_ctx(AVFormatContext *ctx, const AVMetadataConv *d_conv,
const AVMetadataConv *s_conv);
 
#endif /* AVFORMAT_METADATA_H */
/contrib/sdk/sources/ffmpeg/libavformat/mgsts.c
0,0 → 1,106
/*
* Metar Gear Solid: The Twin Snakes demuxer
* Copyright (c) 2012 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intreadwrite.h"
#include "libavutil/intfloat.h"
#include "avformat.h"
#include "riff.h"
 
static int read_probe(AVProbeData *p)
{
if (AV_RB32(p->buf ) != 0x000E ||
AV_RB32(p->buf + 4) != 0x0050 ||
AV_RB32(p->buf + 12) != 0x0034)
return 0;
return AVPROBE_SCORE_MAX;
}
 
static int read_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
AVStream *st;
AVRational fps;
uint32_t chunk_size;
 
avio_skip(pb, 4);
chunk_size = avio_rb32(pb);
if (chunk_size != 80)
return AVERROR(EIO);
avio_skip(pb, 20);
 
st = avformat_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
 
st->need_parsing = AVSTREAM_PARSE_HEADERS;
st->start_time = 0;
st->nb_frames =
st->duration = avio_rb32(pb);
fps = av_d2q(av_int2float(avio_rb32(pb)), INT_MAX);
st->codec->width = avio_rb32(pb);
st->codec->height = avio_rb32(pb);
avio_skip(pb, 12);
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_tag = avio_rb32(pb);
st->codec->codec_id = ff_codec_get_id(ff_codec_bmp_tags,
st->codec->codec_tag);
avpriv_set_pts_info(st, 64, fps.den, fps.num);
avio_skip(pb, 20);
 
return 0;
}
 
static int read_packet(AVFormatContext *s, AVPacket *pkt)
{
AVIOContext *pb = s->pb;
uint32_t chunk_size, payload_size;
int ret;
 
if (url_feof(pb))
return AVERROR_EOF;
 
avio_skip(pb, 4);
chunk_size = avio_rb32(pb);
avio_skip(pb, 4);
payload_size = avio_rb32(pb);
 
if (chunk_size < payload_size + 16)
return AVERROR(EIO);
 
ret = av_get_packet(pb, pkt, payload_size);
if (ret < 0)
return ret;
 
pkt->pos -= 16;
pkt->duration = 1;
avio_skip(pb, chunk_size - (ret + 16));
 
return ret;
}
 
AVInputFormat ff_mgsts_demuxer = {
.name = "mgsts",
.long_name = NULL_IF_CONFIG_SMALL("Metal Gear Solid: The Twin Snakes"),
.read_probe = read_probe,
.read_header = read_header,
.read_packet = read_packet,
.flags = AVFMT_GENERIC_INDEX,
};
/contrib/sdk/sources/ffmpeg/libavformat/microdvddec.c
0,0 → 1,167
/*
* MicroDVD subtitle demuxer
* Copyright (c) 2010 Aurelien Jacobs <aurel@gnuage.org>
* Copyright (c) 2012 Clément Bœsch <u pkh me>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "internal.h"
#include "subtitles.h"
#include "libavutil/intreadwrite.h"
 
#define MAX_LINESIZE 2048
 
 
typedef struct {
FFDemuxSubtitlesQueue q;
} MicroDVDContext;
 
 
static int microdvd_probe(AVProbeData *p)
{
unsigned char c;
const uint8_t *ptr = p->buf;
int i;
 
if (AV_RB24(ptr) == 0xEFBBBF)
ptr += 3; /* skip UTF-8 BOM */
 
for (i=0; i<3; i++) {
if (sscanf(ptr, "{%*d}{}%c", &c) != 1 &&
sscanf(ptr, "{%*d}{%*d}%c", &c) != 1 &&
sscanf(ptr, "{DEFAULT}{}%c", &c) != 1)
return 0;
ptr += ff_subtitles_next_line(ptr);
}
return AVPROBE_SCORE_MAX;
}
 
static int64_t get_pts(const char *buf)
{
int frame;
char c;
 
if (sscanf(buf, "{%d}{%c", &frame, &c) == 2)
return frame;
return AV_NOPTS_VALUE;
}
 
static int get_duration(const char *buf)
{
int frame_start, frame_end;
 
if (sscanf(buf, "{%d}{%d}", &frame_start, &frame_end) == 2)
return frame_end - frame_start;
return -1;
}
 
static int microdvd_read_header(AVFormatContext *s)
{
AVRational pts_info = (AVRational){ 2997, 125 }; /* default: 23.976 fps */
MicroDVDContext *microdvd = s->priv_data;
AVStream *st = avformat_new_stream(s, NULL);
int i = 0;
char line[MAX_LINESIZE];
 
if (!st)
return AVERROR(ENOMEM);
 
while (!url_feof(s->pb)) {
char *p = line;
AVPacket *sub;
int64_t pos = avio_tell(s->pb);
int len = ff_get_line(s->pb, line, sizeof(line));
 
if (!len)
break;
line[strcspn(line, "\r\n")] = 0;
if (i++ < 3) {
int frame;
double fps;
char c;
 
if ((sscanf(line, "{%d}{}%6lf", &frame, &fps) == 2 ||
sscanf(line, "{%d}{%*d}%6lf", &frame, &fps) == 2)
&& frame <= 1 && fps > 3 && fps < 100)
pts_info = av_d2q(fps, 100000);
if (!st->codec->extradata && sscanf(line, "{DEFAULT}{}%c", &c) == 1) {
st->codec->extradata = av_strdup(line + 11);
if (!st->codec->extradata)
return AVERROR(ENOMEM);
st->codec->extradata_size = strlen(st->codec->extradata) + 1;
continue;
}
}
#define SKIP_FRAME_ID \
p = strchr(p, '}'); \
if (!p) { \
av_log(s, AV_LOG_WARNING, "Invalid event \"%s\"" \
" at line %d\n", line, i); \
continue; \
} \
p++
SKIP_FRAME_ID;
SKIP_FRAME_ID;
if (!*p)
continue;
sub = ff_subtitles_queue_insert(&microdvd->q, p, strlen(p), 0);
if (!sub)
return AVERROR(ENOMEM);
sub->pos = pos;
sub->pts = get_pts(line);
sub->duration = get_duration(line);
}
ff_subtitles_queue_finalize(&microdvd->q);
avpriv_set_pts_info(st, 64, pts_info.den, pts_info.num);
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
st->codec->codec_id = AV_CODEC_ID_MICRODVD;
return 0;
}
 
static int microdvd_read_packet(AVFormatContext *s, AVPacket *pkt)
{
MicroDVDContext *microdvd = s->priv_data;
return ff_subtitles_queue_read_packet(&microdvd->q, pkt);
}
 
static int microdvd_read_seek(AVFormatContext *s, int stream_index,
int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
{
MicroDVDContext *microdvd = s->priv_data;
return ff_subtitles_queue_seek(&microdvd->q, s, stream_index,
min_ts, ts, max_ts, flags);
}
 
static int microdvd_read_close(AVFormatContext *s)
{
MicroDVDContext *microdvd = s->priv_data;
ff_subtitles_queue_clean(&microdvd->q);
return 0;
}
 
AVInputFormat ff_microdvd_demuxer = {
.name = "microdvd",
.long_name = NULL_IF_CONFIG_SMALL("MicroDVD subtitle format"),
.priv_data_size = sizeof(MicroDVDContext),
.read_probe = microdvd_probe,
.read_header = microdvd_read_header,
.read_packet = microdvd_read_packet,
.read_seek2 = microdvd_read_seek,
.read_close = microdvd_read_close,
};
/contrib/sdk/sources/ffmpeg/libavformat/microdvdenc.c
0,0 → 1,67
/*
* MicroDVD subtitle muxer
* Copyright (c) 2010 Aurelien Jacobs <aurel@gnuage.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <inttypes.h>
#include "avformat.h"
#include "internal.h"
 
static int microdvd_write_header(struct AVFormatContext *s)
{
AVCodecContext *avctx = s->streams[0]->codec;
AVRational tb = avctx->time_base;
 
if (s->nb_streams != 1 || avctx->codec_id != AV_CODEC_ID_MICRODVD) {
av_log(s, AV_LOG_ERROR, "Exactly one MicroDVD stream is needed.\n");
return -1;
}
 
if (avctx->extradata && avctx->extradata_size > 0) {
avio_write(s->pb, "{DEFAULT}{}", 11);
avio_write(s->pb, avctx->extradata, avctx->extradata_size);
avio_flush(s->pb);
}
 
avpriv_set_pts_info(s->streams[0], 64, tb.num, tb.den);
return 0;
}
 
static int microdvd_write_packet(AVFormatContext *avf, AVPacket *pkt)
{
avio_printf(avf->pb, "{%"PRId64"}", pkt->pts);
if (pkt->duration < 0)
avio_write(avf->pb, "{}", 2);
else
avio_printf(avf->pb, "{%"PRId64"}", pkt->pts + pkt->duration);
avio_write(avf->pb, pkt->data, pkt->size);
avio_write(avf->pb, "\n", 1);
return 0;
}
 
AVOutputFormat ff_microdvd_muxer = {
.name = "microdvd",
.long_name = NULL_IF_CONFIG_SMALL("MicroDVD subtitle format"),
.mime_type = "text/x-microdvd",
.extensions = "sub",
.write_header = microdvd_write_header,
.write_packet = microdvd_write_packet,
.flags = AVFMT_NOTIMESTAMPS,
.subtitle_codec = AV_CODEC_ID_MICRODVD,
};
/contrib/sdk/sources/ffmpeg/libavformat/mkvtimestamp_v2.c
0,0 → 1,50
/*
* extract pts as timecode v2, as defined by mkvtoolnix
* Copyright (c) 2009 David Conrad
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "internal.h"
 
static int write_header(AVFormatContext *s)
{
static const char *header = "# timecode format v2\n";
avio_write(s->pb, header, strlen(header));
avpriv_set_pts_info(s->streams[0], 64, 1, 1000);
return 0;
}
 
static int write_packet(AVFormatContext *s, AVPacket *pkt)
{
char buf[256];
if (pkt->stream_index)
av_log(s, AV_LOG_WARNING, "More than one stream unsupported\n");
snprintf(buf, sizeof(buf), "%" PRId64 "\n", pkt->dts);
avio_write(s->pb, buf, strlen(buf));
return 0;
}
 
AVOutputFormat ff_mkvtimestamp_v2_muxer = {
.name = "mkvtimestamp_v2",
.long_name = NULL_IF_CONFIG_SMALL("extract pts as timecode v2 format, as defined by mkvtoolnix"),
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_RAWVIDEO,
.write_header = write_header,
.write_packet = write_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/mm.c
0,0 → 1,197
/*
* American Laser Games MM Format Demuxer
* Copyright (c) 2006 Peter Ross
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* American Laser Games MM Format Demuxer
* by Peter Ross (pross@xvid.org)
*
* The MM format was used by IBM-PC ports of ALG's "arcade shooter" games,
* including Mad Dog McCree and Crime Patrol.
*
* Technical details here:
* http://wiki.multimedia.cx/index.php?title=American_Laser_Games_MM
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
 
#define MM_PREAMBLE_SIZE 6
 
#define MM_TYPE_HEADER 0x0
#define MM_TYPE_INTER 0x5
#define MM_TYPE_INTRA 0x8
#define MM_TYPE_INTRA_HH 0xc
#define MM_TYPE_INTER_HH 0xd
#define MM_TYPE_INTRA_HHV 0xe
#define MM_TYPE_INTER_HHV 0xf
#define MM_TYPE_AUDIO 0x15
#define MM_TYPE_PALETTE 0x31
 
#define MM_HEADER_LEN_V 0x16 /* video only */
#define MM_HEADER_LEN_AV 0x18 /* video + audio */
 
#define MM_PALETTE_COUNT 128
#define MM_PALETTE_SIZE (MM_PALETTE_COUNT*3)
 
typedef struct {
unsigned int audio_pts, video_pts;
} MmDemuxContext;
 
static int probe(AVProbeData *p)
{
int len, type, fps, w, h;
if (p->buf_size < MM_HEADER_LEN_AV + MM_PREAMBLE_SIZE)
return 0;
/* the first chunk is always the header */
if (AV_RL16(&p->buf[0]) != MM_TYPE_HEADER)
return 0;
len = AV_RL32(&p->buf[2]);
if (len != MM_HEADER_LEN_V && len != MM_HEADER_LEN_AV)
return 0;
fps = AV_RL16(&p->buf[8]);
w = AV_RL16(&p->buf[12]);
h = AV_RL16(&p->buf[14]);
if (!fps || fps > 60 || !w || w > 2048 || !h || h > 2048)
return 0;
type = AV_RL16(&p->buf[len]);
if (!type || type > 0x31)
return 0;
 
/* only return half certainty since this check is a bit sketchy */
return AVPROBE_SCORE_EXTENSION;
}
 
static int read_header(AVFormatContext *s)
{
MmDemuxContext *mm = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *st;
 
unsigned int type, length;
unsigned int frame_rate, width, height;
 
type = avio_rl16(pb);
length = avio_rl32(pb);
 
if (type != MM_TYPE_HEADER)
return AVERROR_INVALIDDATA;
 
/* read header */
avio_rl16(pb); /* total number of chunks */
frame_rate = avio_rl16(pb);
avio_rl16(pb); /* ibm-pc video bios mode */
width = avio_rl16(pb);
height = avio_rl16(pb);
avio_skip(pb, length - 10); /* unknown data */
 
/* video stream */
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_MMVIDEO;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->width = width;
st->codec->height = height;
avpriv_set_pts_info(st, 64, 1, frame_rate);
 
/* audio stream */
if (length == MM_HEADER_LEN_AV) {
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->codec_id = AV_CODEC_ID_PCM_U8;
st->codec->channels = 1;
st->codec->channel_layout = AV_CH_LAYOUT_MONO;
st->codec->sample_rate = 8000;
avpriv_set_pts_info(st, 64, 1, 8000); /* 8000 hz */
}
 
mm->audio_pts = 0;
mm->video_pts = 0;
return 0;
}
 
static int read_packet(AVFormatContext *s,
AVPacket *pkt)
{
MmDemuxContext *mm = s->priv_data;
AVIOContext *pb = s->pb;
unsigned char preamble[MM_PREAMBLE_SIZE];
unsigned int type, length;
 
while(1) {
 
if (avio_read(pb, preamble, MM_PREAMBLE_SIZE) != MM_PREAMBLE_SIZE) {
return AVERROR(EIO);
}
 
type = AV_RL16(&preamble[0]);
length = AV_RL16(&preamble[2]);
 
switch(type) {
case MM_TYPE_PALETTE :
case MM_TYPE_INTER :
case MM_TYPE_INTRA :
case MM_TYPE_INTRA_HH :
case MM_TYPE_INTER_HH :
case MM_TYPE_INTRA_HHV :
case MM_TYPE_INTER_HHV :
/* output preamble + data */
if (av_new_packet(pkt, length + MM_PREAMBLE_SIZE))
return AVERROR(ENOMEM);
memcpy(pkt->data, preamble, MM_PREAMBLE_SIZE);
if (avio_read(pb, pkt->data + MM_PREAMBLE_SIZE, length) != length)
return AVERROR(EIO);
pkt->size = length + MM_PREAMBLE_SIZE;
pkt->stream_index = 0;
pkt->pts = mm->video_pts;
if (type!=MM_TYPE_PALETTE)
mm->video_pts++;
return 0;
 
case MM_TYPE_AUDIO :
if (av_get_packet(s->pb, pkt, length)<0)
return AVERROR(ENOMEM);
pkt->stream_index = 1;
pkt->pts = mm->audio_pts++;
return 0;
 
default :
av_log(s, AV_LOG_INFO, "unknown chunk type 0x%x\n", type);
avio_skip(pb, length);
}
}
}
 
AVInputFormat ff_mm_demuxer = {
.name = "mm",
.long_name = NULL_IF_CONFIG_SMALL("American Laser Games MM"),
.priv_data_size = sizeof(MmDemuxContext),
.read_probe = probe,
.read_header = read_header,
.read_packet = read_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/mmf.c
0,0 → 1,326
/*
* Yamaha SMAF format
* Copyright (c) 2005 Vidar Madsen
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/channel_layout.h"
#include "avformat.h"
#include "avio_internal.h"
#include "internal.h"
#include "pcm.h"
#include "rawenc.h"
#include "riff.h"
 
typedef struct {
int64_t atrpos, atsqpos, awapos;
int64_t data_end;
int stereo;
} MMFContext;
 
static const int mmf_rates[] = { 4000, 8000, 11025, 22050, 44100 };
 
static int mmf_rate(int code)
{
if ((code < 0) || (code > 4))
return -1;
return mmf_rates[code];
}
 
#if CONFIG_MMF_MUXER
static int mmf_rate_code(int rate)
{
int i;
for (i = 0; i < 5; i++)
if (mmf_rates[i] == rate)
return i;
return -1;
}
 
/* Copy of end_tag() from avienc.c, but for big-endian chunk size */
static void end_tag_be(AVIOContext *pb, int64_t start)
{
int64_t pos;
 
pos = avio_tell(pb);
avio_seek(pb, start - 4, SEEK_SET);
avio_wb32(pb, (uint32_t)(pos - start));
avio_seek(pb, pos, SEEK_SET);
}
 
static int mmf_write_header(AVFormatContext *s)
{
MMFContext *mmf = s->priv_data;
AVIOContext *pb = s->pb;
int64_t pos;
int rate;
const char *version = s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT ?
"VN:Lavf," :
"VN:"LIBAVFORMAT_IDENT",";
 
rate = mmf_rate_code(s->streams[0]->codec->sample_rate);
if (rate < 0) {
av_log(s, AV_LOG_ERROR, "Unsupported sample rate %d, supported are 4000, 8000, 11025, 22050 and 44100\n",
s->streams[0]->codec->sample_rate);
return AVERROR(EINVAL);
}
 
mmf->stereo = s->streams[0]->codec->channels > 1;
if (mmf->stereo &&
s->streams[0]->codec->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
av_log(s, AV_LOG_ERROR, "Yamaha SMAF stereo is experimental, "
"add '-strict %d' if you want to use it.\n",
FF_COMPLIANCE_EXPERIMENTAL);
return AVERROR(EINVAL);
}
 
ffio_wfourcc(pb, "MMMD");
avio_wb32(pb, 0);
pos = ff_start_tag(pb, "CNTI");
avio_w8(pb, 0); /* class */
avio_w8(pb, 1); /* type */
avio_w8(pb, 1); /* code type */
avio_w8(pb, 0); /* status */
avio_w8(pb, 0); /* counts */
end_tag_be(pb, pos);
 
pos = ff_start_tag(pb, "OPDA");
avio_write(pb, version, strlen(version)); /* metadata ("ST:songtitle,VN:version,...") */
end_tag_be(pb, pos);
 
avio_write(pb, "ATR\x00", 4);
avio_wb32(pb, 0);
mmf->atrpos = avio_tell(pb);
avio_w8(pb, 0); /* format type */
avio_w8(pb, 0); /* sequence type */
avio_w8(pb, (mmf->stereo << 7) | (1 << 4) | rate); /* (channel << 7) | (format << 4) | rate */
avio_w8(pb, 0); /* wave base bit */
avio_w8(pb, 2); /* time base d */
avio_w8(pb, 2); /* time base g */
 
ffio_wfourcc(pb, "Atsq");
avio_wb32(pb, 16);
mmf->atsqpos = avio_tell(pb);
/* Will be filled on close */
avio_write(pb, "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 16);
 
mmf->awapos = ff_start_tag(pb, "Awa\x01");
 
avpriv_set_pts_info(s->streams[0], 64, 1, s->streams[0]->codec->sample_rate);
 
avio_flush(pb);
 
return 0;
}
 
/* Write a variable-length symbol */
static void put_varlength(AVIOContext *pb, int val)
{
if (val < 128)
avio_w8(pb, val);
else {
val -= 128;
avio_w8(pb, 0x80 | val >> 7);
avio_w8(pb, 0x7f & val);
}
}
 
static int mmf_write_trailer(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
MMFContext *mmf = s->priv_data;
int64_t pos, size;
int gatetime;
 
if (s->pb->seekable) {
/* Fill in length fields */
end_tag_be(pb, mmf->awapos);
end_tag_be(pb, mmf->atrpos);
end_tag_be(pb, 8);
 
pos = avio_tell(pb);
size = pos - mmf->awapos;
 
/* Fill Atsq chunk */
avio_seek(pb, mmf->atsqpos, SEEK_SET);
 
/* "play wav" */
avio_w8(pb, 0); /* start time */
avio_w8(pb, (mmf->stereo << 6) | 1); /* (channel << 6) | wavenum */
gatetime = size * 500 / s->streams[0]->codec->sample_rate;
put_varlength(pb, gatetime); /* duration */
 
/* "nop" */
put_varlength(pb, gatetime); /* start time */
avio_write(pb, "\xff\x00", 2); /* nop */
 
/* "end of sequence" */
avio_write(pb, "\x00\x00\x00\x00", 4);
 
avio_seek(pb, pos, SEEK_SET);
 
avio_flush(pb);
}
return 0;
}
#endif /* CONFIG_MMF_MUXER */
 
static int mmf_probe(AVProbeData *p)
{
/* check file header */
if (p->buf[0] == 'M' && p->buf[1] == 'M' &&
p->buf[2] == 'M' && p->buf[3] == 'D' &&
p->buf[8] == 'C' && p->buf[9] == 'N' &&
p->buf[10] == 'T' && p->buf[11] == 'I')
return AVPROBE_SCORE_MAX;
else
return 0;
}
 
/* mmf input */
static int mmf_read_header(AVFormatContext *s)
{
MMFContext *mmf = s->priv_data;
unsigned int tag;
AVIOContext *pb = s->pb;
AVStream *st;
int64_t size;
int rate, params;
 
tag = avio_rl32(pb);
if (tag != MKTAG('M', 'M', 'M', 'D'))
return AVERROR_INVALIDDATA;
avio_skip(pb, 4); /* file_size */
 
/* Skip some unused chunks that may or may not be present */
for (;; avio_skip(pb, size)) {
tag = avio_rl32(pb);
size = avio_rb32(pb);
if (tag == MKTAG('C', 'N', 'T', 'I'))
continue;
if (tag == MKTAG('O', 'P', 'D', 'A'))
continue;
break;
}
 
/* Tag = "ATRx", where "x" = track number */
if ((tag & 0xffffff) == MKTAG('M', 'T', 'R', 0)) {
av_log(s, AV_LOG_ERROR, "MIDI like format found, unsupported\n");
return AVERROR_PATCHWELCOME;
}
if ((tag & 0xffffff) != MKTAG('A', 'T', 'R', 0)) {
av_log(s, AV_LOG_ERROR, "Unsupported SMAF chunk %08x\n", tag);
return AVERROR_PATCHWELCOME;
}
 
avio_r8(pb); /* format type */
avio_r8(pb); /* sequence type */
params = avio_r8(pb); /* (channel << 7) | (format << 4) | rate */
rate = mmf_rate(params & 0x0f);
if (rate < 0) {
av_log(s, AV_LOG_ERROR, "Invalid sample rate\n");
return AVERROR_INVALIDDATA;
}
avio_r8(pb); /* wave base bit */
avio_r8(pb); /* time base d */
avio_r8(pb); /* time base g */
 
/* Skip some unused chunks that may or may not be present */
for (;; avio_skip(pb, size)) {
tag = avio_rl32(pb);
size = avio_rb32(pb);
if (tag == MKTAG('A', 't', 's', 'q'))
continue;
if (tag == MKTAG('A', 's', 'p', 'I'))
continue;
break;
}
 
/* Make sure it's followed by an Awa chunk, aka wave data */
if ((tag & 0xffffff) != MKTAG('A', 'w', 'a', 0)) {
av_log(s, AV_LOG_ERROR, "Unexpected SMAF chunk %08x\n", tag);
return AVERROR_INVALIDDATA;
}
mmf->data_end = avio_tell(pb) + size;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_ADPCM_YAMAHA;
st->codec->sample_rate = rate;
st->codec->channels = (params >> 7) + 1;
st->codec->channel_layout = params >> 7 ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
st->codec->bits_per_coded_sample = 4;
st->codec->bit_rate = st->codec->sample_rate *
st->codec->bits_per_coded_sample;
 
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
 
return 0;
}
 
#define MAX_SIZE 4096
 
static int mmf_read_packet(AVFormatContext *s, AVPacket *pkt)
{
MMFContext *mmf = s->priv_data;
int64_t left, size;
int ret;
 
left = mmf->data_end - avio_tell(s->pb);
size = FFMIN(left, MAX_SIZE);
if (url_feof(s->pb) || size <= 0)
return AVERROR_EOF;
 
ret = av_get_packet(s->pb, pkt, size);
if (ret < 0)
return ret;
 
pkt->stream_index = 0;
 
return ret;
}
 
#if CONFIG_MMF_DEMUXER
AVInputFormat ff_mmf_demuxer = {
.name = "mmf",
.long_name = NULL_IF_CONFIG_SMALL("Yamaha SMAF"),
.priv_data_size = sizeof(MMFContext),
.read_probe = mmf_probe,
.read_header = mmf_read_header,
.read_packet = mmf_read_packet,
.flags = AVFMT_GENERIC_INDEX,
};
#endif
 
#if CONFIG_MMF_MUXER
AVOutputFormat ff_mmf_muxer = {
.name = "mmf",
.long_name = NULL_IF_CONFIG_SMALL("Yamaha SMAF"),
.mime_type = "application/vnd.smaf",
.extensions = "mmf",
.priv_data_size = sizeof(MMFContext),
.audio_codec = AV_CODEC_ID_ADPCM_YAMAHA,
.video_codec = AV_CODEC_ID_NONE,
.write_header = mmf_write_header,
.write_packet = ff_raw_write_packet,
.write_trailer = mmf_write_trailer,
};
#endif
/contrib/sdk/sources/ffmpeg/libavformat/mms.c
0,0 → 1,149
/*
* MMS protocol common definitions.
* Copyright (c) 2006,2007 Ryan Martell
* Copyright (c) 2007 Björn Axelsson
* Copyright (c) 2010 Zhentan Feng <spyfeng at gmail dot com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "mms.h"
#include "asf.h"
#include "libavutil/intreadwrite.h"
 
#define MMS_MAX_STREAMS 256 /**< arbitrary sanity check value */
 
int ff_mms_read_header(MMSContext *mms, uint8_t *buf, const int size)
{
char *pos;
int size_to_copy;
int remaining_size = mms->asf_header_size - mms->asf_header_read_size;
size_to_copy = FFMIN(size, remaining_size);
pos = mms->asf_header + mms->asf_header_read_size;
memcpy(buf, pos, size_to_copy);
if (mms->asf_header_read_size == mms->asf_header_size) {
av_freep(&mms->asf_header); // which contains asf header
}
mms->asf_header_read_size += size_to_copy;
return size_to_copy;
}
 
int ff_mms_read_data(MMSContext *mms, uint8_t *buf, const int size)
{
int read_size;
read_size = FFMIN(size, mms->remaining_in_len);
memcpy(buf, mms->read_in_ptr, read_size);
mms->remaining_in_len -= read_size;
mms->read_in_ptr += read_size;
return read_size;
}
 
int ff_mms_asf_header_parser(MMSContext *mms)
{
uint8_t *p = mms->asf_header;
uint8_t *end;
int flags, stream_id;
mms->stream_num = 0;
 
if (mms->asf_header_size < sizeof(ff_asf_guid) * 2 + 22 ||
memcmp(p, ff_asf_header, sizeof(ff_asf_guid))) {
av_log(NULL, AV_LOG_ERROR,
"Corrupt stream (invalid ASF header, size=%d)\n",
mms->asf_header_size);
return AVERROR_INVALIDDATA;
}
 
end = mms->asf_header + mms->asf_header_size;
 
p += sizeof(ff_asf_guid) + 14;
while(end - p >= sizeof(ff_asf_guid) + 8) {
uint64_t chunksize;
if (!memcmp(p, ff_asf_data_header, sizeof(ff_asf_guid))) {
chunksize = 50; // see Reference [2] section 5.1
} else {
chunksize = AV_RL64(p + sizeof(ff_asf_guid));
}
if (!chunksize || chunksize > end - p) {
av_log(NULL, AV_LOG_ERROR,
"Corrupt stream (header chunksize %"PRId64" is invalid)\n",
chunksize);
return AVERROR_INVALIDDATA;
}
if (!memcmp(p, ff_asf_file_header, sizeof(ff_asf_guid))) {
/* read packet size */
if (end - p > sizeof(ff_asf_guid) * 2 + 68) {
mms->asf_packet_len = AV_RL32(p + sizeof(ff_asf_guid) * 2 + 64);
if (mms->asf_packet_len <= 0 || mms->asf_packet_len > sizeof(mms->in_buffer)) {
av_log(NULL, AV_LOG_ERROR,
"Corrupt stream (too large pkt_len %d)\n",
mms->asf_packet_len);
return AVERROR_INVALIDDATA;
}
}
} else if (!memcmp(p, ff_asf_stream_header, sizeof(ff_asf_guid))) {
flags = AV_RL16(p + sizeof(ff_asf_guid)*3 + 24);
stream_id = flags & 0x7F;
//The second condition is for checking CS_PKT_STREAM_ID_REQUEST packet size,
//we can calcuate the packet size by stream_num.
//Please see function send_stream_selection_request().
if (mms->stream_num < MMS_MAX_STREAMS &&
46 + mms->stream_num * 6 < sizeof(mms->out_buffer)) {
mms->streams = av_fast_realloc(mms->streams,
&mms->nb_streams_allocated,
(mms->stream_num + 1) * sizeof(MMSStream));
mms->streams[mms->stream_num].id = stream_id;
mms->stream_num++;
} else {
av_log(NULL, AV_LOG_ERROR,
"Corrupt stream (too many A/V streams)\n");
return AVERROR_INVALIDDATA;
}
} else if (!memcmp(p, ff_asf_ext_stream_header, sizeof(ff_asf_guid))) {
if (end - p >= 88) {
int stream_count = AV_RL16(p + 84), ext_len_count = AV_RL16(p + 86);
uint64_t skip_bytes = 88;
while (stream_count--) {
if (end - p < skip_bytes + 4) {
av_log(NULL, AV_LOG_ERROR,
"Corrupt stream (next stream name length is not in the buffer)\n");
return AVERROR_INVALIDDATA;
}
skip_bytes += 4 + AV_RL16(p + skip_bytes + 2);
}
while (ext_len_count--) {
if (end - p < skip_bytes + 22) {
av_log(NULL, AV_LOG_ERROR,
"Corrupt stream (next extension system info length is not in the buffer)\n");
return AVERROR_INVALIDDATA;
}
skip_bytes += 22 + AV_RL32(p + skip_bytes + 18);
}
if (end - p < skip_bytes) {
av_log(NULL, AV_LOG_ERROR,
"Corrupt stream (the last extension system info length is invalid)\n");
return AVERROR_INVALIDDATA;
}
if (chunksize - skip_bytes > 24)
chunksize = skip_bytes;
}
} else if (!memcmp(p, ff_asf_head1_guid, sizeof(ff_asf_guid))) {
chunksize = 46; // see references [2] section 3.4. This should be set 46.
}
p += chunksize;
}
 
return 0;
}
/contrib/sdk/sources/ffmpeg/libavformat/mms.h
0,0 → 1,64
/*
* MMS protocol common definitions.
* Copyright (c) 2010 Zhentan Feng <spyfeng at gmail dot com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_MMS_H
#define AVFORMAT_MMS_H
 
#include "url.h"
 
typedef struct MMSStream {
int id;
}MMSStream;
 
typedef struct MMSContext {
URLContext *mms_hd; ///< TCP connection handle
MMSStream *streams;
 
/** Buffer for outgoing packets. */
/*@{*/
uint8_t *write_out_ptr; ///< Pointer for writing the buffer.
uint8_t out_buffer[512]; ///< Buffer for outgoing packet.
/*@}*/
 
/** Buffer for incoming packets. */
/*@{*/
uint8_t in_buffer[65536]; ///< Buffer for incoming packets.
uint8_t *read_in_ptr; ///< Pointer for reading from incoming buffer.
int remaining_in_len; ///< Reading length from incoming buffer.
/*@}*/
 
/** Internal handling of the ASF header */
/*@{*/
uint8_t *asf_header; ///< Stored ASF header.
int asf_header_size; ///< Size of stored ASF header.
int header_parsed; ///< The header has been received and parsed.
int asf_packet_len;
int asf_header_read_size;
/*@}*/
 
int stream_num; ///< stream numbers.
unsigned int nb_streams_allocated; ///< allocated size of streams
} MMSContext;
 
int ff_mms_asf_header_parser(MMSContext * mms);
int ff_mms_read_data(MMSContext *mms, uint8_t *buf, const int size);
int ff_mms_read_header(MMSContext * mms, uint8_t * buf, const int size);
 
#endif /* AVFORMAT_MMS_H */
/contrib/sdk/sources/ffmpeg/libavformat/mmsh.c
0,0 → 1,413
/*
* MMS protocol over HTTP
* Copyright (c) 2010 Zhentan Feng <spyfeng at gmail dot com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/*
* Reference
* Windows Media HTTP Streaming Protocol.
* http://msdn.microsoft.com/en-us/library/cc251059(PROT.10).aspx
*/
 
#include <string.h>
#include "libavutil/intreadwrite.h"
#include "libavutil/avstring.h"
#include "libavutil/opt.h"
#include "internal.h"
#include "mms.h"
#include "asf.h"
#include "http.h"
#include "url.h"
 
#define CHUNK_HEADER_LENGTH 4 // 2bytes chunk type and 2bytes chunk length.
#define EXT_HEADER_LENGTH 8 // 4bytes sequence, 2bytes useless and 2bytes chunk length.
 
// see Ref 2.2.1.8
#define USERAGENT "User-Agent: NSPlayer/4.1.0.3856\r\n"
// see Ref 2.2.1.4.33
// the guid value can be changed to any valid value.
#define CLIENTGUID "Pragma: xClientGUID={c77e7400-738a-11d2-9add-0020af0a3278}\r\n"
 
// see Ref 2.2.3 for packet type define:
// chunk type contains 2 fields: Frame and PacketID.
// Frame is 0x24 or 0xA4(rarely), different PacketID indicates different packet type.
typedef enum {
CHUNK_TYPE_DATA = 0x4424,
CHUNK_TYPE_ASF_HEADER = 0x4824,
CHUNK_TYPE_END = 0x4524,
CHUNK_TYPE_STREAM_CHANGE = 0x4324,
} ChunkType;
 
typedef struct {
MMSContext mms;
uint8_t location[1024];
int request_seq; ///< request packet sequence
int chunk_seq; ///< data packet sequence
} MMSHContext;
 
static int mmsh_close(URLContext *h)
{
MMSHContext *mmsh = (MMSHContext *)h->priv_data;
MMSContext *mms = &mmsh->mms;
if (mms->mms_hd)
ffurl_closep(&mms->mms_hd);
av_freep(&mms->streams);
av_freep(&mms->asf_header);
return 0;
}
 
static ChunkType get_chunk_header(MMSHContext *mmsh, int *len)
{
MMSContext *mms = &mmsh->mms;
uint8_t chunk_header[CHUNK_HEADER_LENGTH];
uint8_t ext_header[EXT_HEADER_LENGTH];
ChunkType chunk_type;
int chunk_len, res, ext_header_len;
 
res = ffurl_read_complete(mms->mms_hd, chunk_header, CHUNK_HEADER_LENGTH);
if (res != CHUNK_HEADER_LENGTH) {
av_log(NULL, AV_LOG_ERROR, "Read data packet header failed!\n");
return AVERROR(EIO);
}
chunk_type = AV_RL16(chunk_header);
chunk_len = AV_RL16(chunk_header + 2);
 
switch (chunk_type) {
case CHUNK_TYPE_END:
case CHUNK_TYPE_STREAM_CHANGE:
ext_header_len = 4;
break;
case CHUNK_TYPE_ASF_HEADER:
case CHUNK_TYPE_DATA:
ext_header_len = 8;
break;
default:
av_log(NULL, AV_LOG_ERROR, "Strange chunk type %d\n", chunk_type);
return AVERROR_INVALIDDATA;
}
 
res = ffurl_read_complete(mms->mms_hd, ext_header, ext_header_len);
if (res != ext_header_len) {
av_log(NULL, AV_LOG_ERROR, "Read ext header failed!\n");
return AVERROR(EIO);
}
*len = chunk_len - ext_header_len;
if (chunk_type == CHUNK_TYPE_END || chunk_type == CHUNK_TYPE_DATA)
mmsh->chunk_seq = AV_RL32(ext_header);
return chunk_type;
}
 
static int read_data_packet(MMSHContext *mmsh, const int len)
{
MMSContext *mms = &mmsh->mms;
int res;
if (len > sizeof(mms->in_buffer)) {
av_log(NULL, AV_LOG_ERROR,
"Data packet length %d exceeds the in_buffer size %zu\n",
len, sizeof(mms->in_buffer));
return AVERROR(EIO);
}
res = ffurl_read_complete(mms->mms_hd, mms->in_buffer, len);
av_dlog(NULL, "Data packet len = %d\n", len);
if (res != len) {
av_log(NULL, AV_LOG_ERROR, "Read data packet failed!\n");
return AVERROR(EIO);
}
if (len > mms->asf_packet_len) {
av_log(NULL, AV_LOG_ERROR,
"Chunk length %d exceed packet length %d\n",len, mms->asf_packet_len);
return AVERROR_INVALIDDATA;
} else {
memset(mms->in_buffer + len, 0, mms->asf_packet_len - len); // padding
}
mms->read_in_ptr = mms->in_buffer;
mms->remaining_in_len = mms->asf_packet_len;
return 0;
}
 
static int get_http_header_data(MMSHContext *mmsh)
{
MMSContext *mms = &mmsh->mms;
int res, len;
ChunkType chunk_type;
 
for (;;) {
len = 0;
res = chunk_type = get_chunk_header(mmsh, &len);
if (res < 0) {
return res;
} else if (chunk_type == CHUNK_TYPE_ASF_HEADER){
// get asf header and stored it
if (!mms->header_parsed) {
if (mms->asf_header) {
if (len != mms->asf_header_size) {
mms->asf_header_size = len;
av_dlog(NULL, "Header len changed from %d to %d\n",
mms->asf_header_size, len);
av_freep(&mms->asf_header);
}
}
mms->asf_header = av_mallocz(len);
if (!mms->asf_header) {
return AVERROR(ENOMEM);
}
mms->asf_header_size = len;
}
if (len > mms->asf_header_size) {
av_log(NULL, AV_LOG_ERROR,
"Asf header packet len = %d exceed the asf header buf size %d\n",
len, mms->asf_header_size);
return AVERROR(EIO);
}
res = ffurl_read_complete(mms->mms_hd, mms->asf_header, len);
if (res != len) {
av_log(NULL, AV_LOG_ERROR,
"Recv asf header data len %d != expected len %d\n", res, len);
return AVERROR(EIO);
}
mms->asf_header_size = len;
if (!mms->header_parsed) {
res = ff_mms_asf_header_parser(mms);
mms->header_parsed = 1;
return res;
}
} else if (chunk_type == CHUNK_TYPE_DATA) {
// read data packet and do padding
return read_data_packet(mmsh, len);
} else {
if (len) {
if (len > sizeof(mms->in_buffer)) {
av_log(NULL, AV_LOG_ERROR,
"Other packet len = %d exceed the in_buffer size %zu\n",
len, sizeof(mms->in_buffer));
return AVERROR(EIO);
}
res = ffurl_read_complete(mms->mms_hd, mms->in_buffer, len);
if (res != len) {
av_log(NULL, AV_LOG_ERROR, "Read other chunk type data failed!\n");
return AVERROR(EIO);
} else {
av_dlog(NULL, "Skip chunk type %d \n", chunk_type);
continue;
}
}
}
}
}
 
static int mmsh_open_internal(URLContext *h, const char *uri, int flags, int timestamp, int64_t pos)
{
int i, port, err;
char httpname[256], path[256], host[128];
char *stream_selection = NULL;
char headers[1024];
MMSHContext *mmsh = h->priv_data;
MMSContext *mms;
 
mmsh->request_seq = h->is_streamed = 1;
mms = &mmsh->mms;
av_strlcpy(mmsh->location, uri, sizeof(mmsh->location));
 
av_url_split(NULL, 0, NULL, 0,
host, sizeof(host), &port, path, sizeof(path), mmsh->location);
if (port<0)
port = 80; // default mmsh protocol port
ff_url_join(httpname, sizeof(httpname), "http", NULL, host, port, "%s", path);
 
if (ffurl_alloc(&mms->mms_hd, httpname, AVIO_FLAG_READ,
&h->interrupt_callback) < 0) {
return AVERROR(EIO);
}
 
snprintf(headers, sizeof(headers),
"Accept: */*\r\n"
USERAGENT
"Host: %s:%d\r\n"
"Pragma: no-cache,rate=1.000000,stream-time=0,"
"stream-offset=0:0,request-context=%u,max-duration=0\r\n"
CLIENTGUID
"Connection: Close\r\n",
host, port, mmsh->request_seq++);
av_opt_set(mms->mms_hd->priv_data, "headers", headers, 0);
 
err = ffurl_connect(mms->mms_hd, NULL);
if (err) {
goto fail;
}
err = get_http_header_data(mmsh);
if (err) {
av_log(NULL, AV_LOG_ERROR, "Get http header data failed!\n");
goto fail;
}
 
// close the socket and then reopen it for sending the second play request.
ffurl_close(mms->mms_hd);
memset(headers, 0, sizeof(headers));
if ((err = ffurl_alloc(&mms->mms_hd, httpname, AVIO_FLAG_READ,
&h->interrupt_callback)) < 0) {
goto fail;
}
stream_selection = av_mallocz(mms->stream_num * 19 + 1);
if (!stream_selection)
return AVERROR(ENOMEM);
for (i = 0; i < mms->stream_num; i++) {
char tmp[20];
err = snprintf(tmp, sizeof(tmp), "ffff:%d:0 ", mms->streams[i].id);
if (err < 0)
goto fail;
av_strlcat(stream_selection, tmp, mms->stream_num * 19 + 1);
}
// send play request
err = snprintf(headers, sizeof(headers),
"Accept: */*\r\n"
USERAGENT
"Host: %s:%d\r\n"
"Pragma: no-cache,rate=1.000000,request-context=%u\r\n"
"Pragma: xPlayStrm=1\r\n"
CLIENTGUID
"Pragma: stream-switch-count=%d\r\n"
"Pragma: stream-switch-entry=%s\r\n"
"Pragma: no-cache,rate=1.000000,stream-time=%u"
"Connection: Close\r\n",
host, port, mmsh->request_seq++, mms->stream_num, stream_selection, timestamp);
av_freep(&stream_selection);
if (err < 0) {
av_log(NULL, AV_LOG_ERROR, "Build play request failed!\n");
goto fail;
}
av_dlog(NULL, "out_buffer is %s", headers);
av_opt_set(mms->mms_hd->priv_data, "headers", headers, 0);
 
err = ffurl_connect(mms->mms_hd, NULL);
if (err) {
goto fail;
}
 
err = get_http_header_data(mmsh);
if (err) {
av_log(NULL, AV_LOG_ERROR, "Get http header data failed!\n");
goto fail;
}
 
av_dlog(NULL, "Connection successfully open\n");
return 0;
fail:
av_freep(&stream_selection);
mmsh_close(h);
av_dlog(NULL, "Connection failed with error %d\n", err);
return err;
}
 
static int mmsh_open(URLContext *h, const char *uri, int flags)
{
return mmsh_open_internal(h, uri, flags, 0, 0);
}
 
static int handle_chunk_type(MMSHContext *mmsh)
{
MMSContext *mms = &mmsh->mms;
int res, len = 0;
ChunkType chunk_type;
chunk_type = get_chunk_header(mmsh, &len);
 
switch (chunk_type) {
case CHUNK_TYPE_END:
mmsh->chunk_seq = 0;
av_log(NULL, AV_LOG_ERROR, "Stream ended!\n");
return AVERROR(EIO);
case CHUNK_TYPE_STREAM_CHANGE:
mms->header_parsed = 0;
if (res = get_http_header_data(mmsh)) {
av_log(NULL, AV_LOG_ERROR,"Stream changed! Failed to get new header!\n");
return res;
}
break;
case CHUNK_TYPE_DATA:
return read_data_packet(mmsh, len);
default:
av_log(NULL, AV_LOG_ERROR, "Recv other type packet %d\n", chunk_type);
return AVERROR_INVALIDDATA;
}
return 0;
}
 
static int mmsh_read(URLContext *h, uint8_t *buf, int size)
{
int res = 0;
MMSHContext *mmsh = h->priv_data;
MMSContext *mms = &mmsh->mms;
do {
if (mms->asf_header_read_size < mms->asf_header_size) {
// copy asf header into buffer
res = ff_mms_read_header(mms, buf, size);
} else {
if (!mms->remaining_in_len && (res = handle_chunk_type(mmsh)))
return res;
res = ff_mms_read_data(mms, buf, size);
}
} while (!res);
return res;
}
 
static int64_t mmsh_read_seek(URLContext *h, int stream_index,
int64_t timestamp, int flags)
{
MMSHContext *mmsh_old = h->priv_data;
MMSHContext *mmsh = av_mallocz(sizeof(*mmsh));
int ret;
 
if (!mmsh)
return AVERROR(ENOMEM);
 
h->priv_data = mmsh;
ret= mmsh_open_internal(h, mmsh_old->location, 0, FFMAX(timestamp, 0), 0);
if(ret>=0){
h->priv_data = mmsh_old;
mmsh_close(h);
h->priv_data = mmsh;
av_free(mmsh_old);
mmsh->mms.asf_header_read_size = mmsh->mms.asf_header_size;
}else {
h->priv_data = mmsh_old;
av_free(mmsh);
}
 
return ret;
}
 
static int64_t mmsh_seek(URLContext *h, int64_t pos, int whence)
{
MMSHContext *mmsh = h->priv_data;
MMSContext *mms = &mmsh->mms;
 
if(pos == 0 && whence == SEEK_CUR)
return mms->asf_header_read_size + mms->remaining_in_len + mmsh->chunk_seq * (int64_t)mms->asf_packet_len;
return AVERROR(ENOSYS);
}
 
URLProtocol ff_mmsh_protocol = {
.name = "mmsh",
.url_open = mmsh_open,
.url_read = mmsh_read,
.url_seek = mmsh_seek,
.url_close = mmsh_close,
.url_read_seek = mmsh_read_seek,
.priv_data_size = sizeof(MMSHContext),
.flags = URL_PROTOCOL_FLAG_NETWORK,
};
/contrib/sdk/sources/ffmpeg/libavformat/mmst.c
0,0 → 1,629
/*
* MMS protocol over TCP
* Copyright (c) 2006,2007 Ryan Martell
* Copyright (c) 2007 Björn Axelsson
* Copyright (c) 2010 Zhentan Feng <spyfeng at gmail dot com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/* References
* MMS protocol specification:
* [1]http://msdn.microsoft.com/en-us/library/cc234711(PROT.10).aspx
* ASF specification. Revision 01.20.03.
* [2]http://msdn.microsoft.com/en-us/library/bb643323.aspx
*/
 
#include "avformat.h"
#include "mms.h"
#include "internal.h"
#include "avio_internal.h"
#include "libavutil/intreadwrite.h"
#include "libavcodec/bytestream.h"
#include "network.h"
#include "url.h"
 
#define LOCAL_ADDRESS 0xc0a80081 // FIXME get and use correct local ip address.
#define LOCAL_PORT 1037 // as above.
/** Client to server packet types. */
typedef enum {
CS_PKT_INITIAL = 0x01,
CS_PKT_PROTOCOL_SELECT = 0x02,
CS_PKT_MEDIA_FILE_REQUEST = 0x05,
CS_PKT_START_FROM_PKT_ID = 0x07,
CS_PKT_STREAM_PAUSE = 0x09,
CS_PKT_STREAM_CLOSE = 0x0d,
CS_PKT_MEDIA_HEADER_REQUEST = 0x15,
CS_PKT_TIMING_DATA_REQUEST = 0x18,
CS_PKT_USER_PASSWORD = 0x1a,
CS_PKT_KEEPALIVE = 0x1b,
CS_PKT_STREAM_ID_REQUEST = 0x33,
} MMSCSPacketType;
 
/** Server to client packet types. */
typedef enum {
/** Control packets. */
/*@{*/
SC_PKT_CLIENT_ACCEPTED = 0x01,
SC_PKT_PROTOCOL_ACCEPTED = 0x02,
SC_PKT_PROTOCOL_FAILED = 0x03,
SC_PKT_MEDIA_PKT_FOLLOWS = 0x05,
SC_PKT_MEDIA_FILE_DETAILS = 0x06,
SC_PKT_HEADER_REQUEST_ACCEPTED = 0x11,
SC_PKT_TIMING_TEST_REPLY = 0x15,
SC_PKT_PASSWORD_REQUIRED = 0x1a,
SC_PKT_KEEPALIVE = 0x1b,
SC_PKT_STREAM_STOPPED = 0x1e,
SC_PKT_STREAM_CHANGING = 0x20,
SC_PKT_STREAM_ID_ACCEPTED = 0x21,
/*@}*/
 
/** Pseudo packets. */
/*@{*/
SC_PKT_CANCEL = -1,
SC_PKT_NO_DATA = -2,
/*@}*/
 
/** Data packets. */
/*@{*/
SC_PKT_ASF_HEADER = 0x010000,// make it bigger than 0xFF in case of
SC_PKT_ASF_MEDIA = 0x010001,// receiving false data packets.
/*@}*/
} MMSSCPacketType;
 
typedef struct {
MMSContext mms;
int outgoing_packet_seq; ///< Outgoing packet sequence number.
char path[256]; ///< Path of the resource being asked for.
char host[128]; ///< Host of the resources.
int incoming_packet_seq; ///< Incoming packet sequence number.
int incoming_flags; ///< Incoming packet flags.
int packet_id; ///< Identifier for packets in the current stream.
unsigned int header_packet_id; ///< default is 2.
} MMSTContext;
 
/** Create MMST command packet header */
static void start_command_packet(MMSTContext *mmst, MMSCSPacketType packet_type)
{
MMSContext *mms = &mmst->mms;
mms->write_out_ptr = mms->out_buffer;
 
bytestream_put_le32(&mms->write_out_ptr, 1); // start sequence
bytestream_put_le32(&mms->write_out_ptr, 0xb00bface);
bytestream_put_le32(&mms->write_out_ptr, 0); // Length starts from after the protocol type bytes
bytestream_put_le32(&mms->write_out_ptr, MKTAG('M','M','S',' '));
bytestream_put_le32(&mms->write_out_ptr, 0);
bytestream_put_le32(&mms->write_out_ptr, mmst->outgoing_packet_seq++);
bytestream_put_le64(&mms->write_out_ptr, 0); // timestamp
bytestream_put_le32(&mms->write_out_ptr, 0);
bytestream_put_le16(&mms->write_out_ptr, packet_type);
bytestream_put_le16(&mms->write_out_ptr, 3); // direction to server
}
 
/** Add prefixes to MMST command packet. */
static void insert_command_prefixes(MMSContext *mms,
uint32_t prefix1, uint32_t prefix2)
{
bytestream_put_le32(&mms->write_out_ptr, prefix1); // first prefix
bytestream_put_le32(&mms->write_out_ptr, prefix2); // second prefix
}
 
/** Send a prepared MMST command packet. */
static int send_command_packet(MMSTContext *mmst)
{
MMSContext *mms = &mmst->mms;
int len= mms->write_out_ptr - mms->out_buffer;
int exact_length = FFALIGN(len, 8);
int first_length= exact_length - 16;
int len8= first_length/8;
int write_result;
 
// update packet length fields.
AV_WL32(mms->out_buffer + 8, first_length);
AV_WL32(mms->out_buffer + 16, len8);
AV_WL32(mms->out_buffer + 32, len8-2);
memset(mms->write_out_ptr, 0, exact_length - len);
 
// write it out.
write_result= ffurl_write(mms->mms_hd, mms->out_buffer, exact_length);
if(write_result != exact_length) {
av_log(NULL, AV_LOG_ERROR,
"Failed to write data of length %d: %d (%s)\n",
exact_length, write_result,
write_result < 0 ? strerror(AVUNERROR(write_result)) :
"The server closed the connection");
return AVERROR(EIO);
}
 
return 0;
}
 
static void mms_put_utf16(MMSContext *mms, const uint8_t *src)
{
AVIOContext bic;
int size = mms->write_out_ptr - mms->out_buffer;
int len;
ffio_init_context(&bic, mms->write_out_ptr,
sizeof(mms->out_buffer) - size, 1, NULL, NULL, NULL, NULL);
 
len = avio_put_str16le(&bic, src);
mms->write_out_ptr += len;
}
 
static int send_time_test_data(MMSTContext *mmst)
{
start_command_packet(mmst, CS_PKT_TIMING_DATA_REQUEST);
insert_command_prefixes(&mmst->mms, 0x00f0f0f0, 0x0004000b);
return send_command_packet(mmst);
}
 
static int send_protocol_select(MMSTContext *mmst)
{
char data_string[256];
MMSContext *mms = &mmst->mms;
 
start_command_packet(mmst, CS_PKT_PROTOCOL_SELECT);
insert_command_prefixes(mms, 0, 0xffffffff);
bytestream_put_le32(&mms->write_out_ptr, 0); // maxFunnelBytes
bytestream_put_le32(&mms->write_out_ptr, 0x00989680); // maxbitRate
bytestream_put_le32(&mms->write_out_ptr, 2); // funnelMode
snprintf(data_string, sizeof(data_string), "\\\\%d.%d.%d.%d\\%s\\%d",
(LOCAL_ADDRESS>>24)&0xff,
(LOCAL_ADDRESS>>16)&0xff,
(LOCAL_ADDRESS>>8)&0xff,
LOCAL_ADDRESS&0xff,
"TCP", // or UDP
LOCAL_PORT);
 
mms_put_utf16(mms, data_string);
return send_command_packet(mmst);
}
 
static int send_media_file_request(MMSTContext *mmst)
{
MMSContext *mms = &mmst->mms;
start_command_packet(mmst, CS_PKT_MEDIA_FILE_REQUEST);
insert_command_prefixes(mms, 1, 0xffffffff);
bytestream_put_le32(&mms->write_out_ptr, 0);
bytestream_put_le32(&mms->write_out_ptr, 0);
mms_put_utf16(mms, mmst->path + 1); // +1 for skip "/"
 
return send_command_packet(mmst);
}
 
static void handle_packet_stream_changing_type(MMSTContext *mmst)
{
MMSContext *mms = &mmst->mms;
av_dlog(NULL, "Stream changing!\n");
 
// 40 is the packet header size, 7 is the prefix size.
mmst->header_packet_id= AV_RL32(mms->in_buffer + 40 + 7);
av_dlog(NULL, "Changed header prefix to 0x%x", mmst->header_packet_id);
}
 
static int send_keepalive_packet(MMSTContext *mmst)
{
// respond to a keepalive with a keepalive...
start_command_packet(mmst, CS_PKT_KEEPALIVE);
insert_command_prefixes(&mmst->mms, 1, 0x100FFFF);
return send_command_packet(mmst);
}
 
/** Pad media packets smaller than max_packet_size and/or adjust read position
* after a seek. */
static void pad_media_packet(MMSContext *mms)
{
if(mms->remaining_in_len<mms->asf_packet_len) {
int padding_size = mms->asf_packet_len - mms->remaining_in_len;
memset(mms->in_buffer + mms->remaining_in_len, 0, padding_size);
mms->remaining_in_len += padding_size;
}
}
 
/** Read incoming MMST media, header or command packet. */
static MMSSCPacketType get_tcp_server_response(MMSTContext *mmst)
{
int read_result;
MMSSCPacketType packet_type= -1;
MMSContext *mms = &mmst->mms;
for(;;) {
read_result = ffurl_read_complete(mms->mms_hd, mms->in_buffer, 8);
if (read_result != 8) {
if(read_result < 0) {
av_log(NULL, AV_LOG_ERROR,
"Error reading packet header: %d (%s)\n",
read_result, strerror(AVUNERROR(read_result)));
packet_type = SC_PKT_CANCEL;
} else {
av_log(NULL, AV_LOG_ERROR,
"The server closed the connection\n");
packet_type = SC_PKT_NO_DATA;
}
return packet_type;
}
 
// handle command packet.
if(AV_RL32(mms->in_buffer + 4)==0xb00bface) {
int length_remaining, hr;
 
mmst->incoming_flags= mms->in_buffer[3];
read_result= ffurl_read_complete(mms->mms_hd, mms->in_buffer+8, 4);
if(read_result != 4) {
av_log(NULL, AV_LOG_ERROR,
"Reading command packet length failed: %d (%s)\n",
read_result,
read_result < 0 ? strerror(AVUNERROR(read_result)) :
"The server closed the connection");
return read_result < 0 ? read_result : AVERROR(EIO);
}
 
length_remaining= AV_RL32(mms->in_buffer+8) + 4;
av_dlog(NULL, "Length remaining is %d\n", length_remaining);
// read the rest of the packet.
if (length_remaining < 0
|| length_remaining > sizeof(mms->in_buffer) - 12) {
av_log(NULL, AV_LOG_ERROR,
"Incoming packet length %d exceeds bufsize %zu\n",
length_remaining, sizeof(mms->in_buffer) - 12);
return AVERROR_INVALIDDATA;
}
read_result = ffurl_read_complete(mms->mms_hd, mms->in_buffer + 12,
length_remaining) ;
if (read_result != length_remaining) {
av_log(NULL, AV_LOG_ERROR,
"Reading pkt data (length=%d) failed: %d (%s)\n",
length_remaining, read_result,
read_result < 0 ? strerror(AVUNERROR(read_result)) :
"The server closed the connection");
return read_result < 0 ? read_result : AVERROR(EIO);
}
packet_type= AV_RL16(mms->in_buffer+36);
if (read_result >= 44 && (hr = AV_RL32(mms->in_buffer + 40))) {
av_log(NULL, AV_LOG_ERROR,
"Server sent a message with packet type 0x%x and error status code 0x%08x\n", packet_type, hr);
return AVERROR(EINVAL);
}
} else {
int length_remaining;
int packet_id_type;
int tmp;
 
// note we cache the first 8 bytes,
// then fill up the buffer with the others
tmp = AV_RL16(mms->in_buffer + 6);
length_remaining = (tmp - 8) & 0xffff;
mmst->incoming_packet_seq = AV_RL32(mms->in_buffer);
packet_id_type = mms->in_buffer[4];
mmst->incoming_flags = mms->in_buffer[5];
 
if (length_remaining < 0
|| length_remaining > sizeof(mms->in_buffer) - 8) {
av_log(NULL, AV_LOG_ERROR,
"Data length %d is invalid or too large (max=%zu)\n",
length_remaining, sizeof(mms->in_buffer));
return AVERROR_INVALIDDATA;
}
mms->remaining_in_len = length_remaining;
mms->read_in_ptr = mms->in_buffer;
read_result= ffurl_read_complete(mms->mms_hd, mms->in_buffer, length_remaining);
if(read_result != length_remaining) {
av_log(NULL, AV_LOG_ERROR,
"Failed to read packet data of size %d: %d (%s)\n",
length_remaining, read_result,
read_result < 0 ? strerror(AVUNERROR(read_result)) :
"The server closed the connection");
return read_result < 0 ? read_result : AVERROR(EIO);
}
 
// if we successfully read everything.
if(packet_id_type == mmst->header_packet_id) {
int err;
packet_type = SC_PKT_ASF_HEADER;
// Store the asf header
if(!mms->header_parsed) {
if ((err = av_reallocp(&mms->asf_header,
mms->asf_header_size +
mms->remaining_in_len)) < 0) {
mms->asf_header_size = 0;
return err;
}
memcpy(mms->asf_header + mms->asf_header_size,
mms->read_in_ptr, mms->remaining_in_len);
mms->asf_header_size += mms->remaining_in_len;
}
// 0x04 means asf header is sent in multiple packets.
if (mmst->incoming_flags == 0x04)
continue;
} else if(packet_id_type == mmst->packet_id) {
packet_type = SC_PKT_ASF_MEDIA;
} else {
av_dlog(NULL, "packet id type %d is old.", packet_id_type);
continue;
}
}
 
// preprocess some packet type
if(packet_type == SC_PKT_KEEPALIVE) {
send_keepalive_packet(mmst);
continue;
} else if(packet_type == SC_PKT_STREAM_CHANGING) {
handle_packet_stream_changing_type(mmst);
} else if(packet_type == SC_PKT_ASF_MEDIA) {
pad_media_packet(mms);
}
return packet_type;
}
}
 
static int mms_safe_send_recv(MMSTContext *mmst,
int (*send_fun)(MMSTContext *mmst),
const MMSSCPacketType expect_type)
{
MMSSCPacketType type;
if(send_fun) {
int ret = send_fun(mmst);
if (ret < 0) {
av_dlog(NULL, "Send Packet error before expecting recv packet %d\n", expect_type);
return ret;
}
}
 
if ((type = get_tcp_server_response(mmst)) != expect_type) {
av_log(NULL, AV_LOG_ERROR,
"Corrupt stream (unexpected packet type 0x%x, expected 0x%x)\n",
type, expect_type);
return AVERROR_INVALIDDATA;
} else {
return 0;
}
}
 
static int send_media_header_request(MMSTContext *mmst)
{
MMSContext *mms = &mmst->mms;
start_command_packet(mmst, CS_PKT_MEDIA_HEADER_REQUEST);
insert_command_prefixes(mms, 1, 0);
bytestream_put_le32(&mms->write_out_ptr, 0);
bytestream_put_le32(&mms->write_out_ptr, 0x00800000);
bytestream_put_le32(&mms->write_out_ptr, 0xffffffff);
bytestream_put_le32(&mms->write_out_ptr, 0);
bytestream_put_le32(&mms->write_out_ptr, 0);
bytestream_put_le32(&mms->write_out_ptr, 0);
 
// the media preroll value in milliseconds?
bytestream_put_le32(&mms->write_out_ptr, 0);
bytestream_put_le32(&mms->write_out_ptr, 0x40AC2000);
bytestream_put_le32(&mms->write_out_ptr, 2);
bytestream_put_le32(&mms->write_out_ptr, 0);
 
return send_command_packet(mmst);
}
 
/** Send the initial handshake. */
static int send_startup_packet(MMSTContext *mmst)
{
char data_string[256];
MMSContext *mms = &mmst->mms;
// SubscriberName is defined in MS specification linked below.
// The guid value can be any valid value.
// http://download.microsoft.com/
// download/9/5/E/95EF66AF-9026-4BB0-A41D-A4F81802D92C/%5BMS-WMSP%5D.pdf
snprintf(data_string, sizeof(data_string),
"NSPlayer/7.0.0.1956; {%s}; Host: %s",
"7E667F5D-A661-495E-A512-F55686DDA178", mmst->host);
 
start_command_packet(mmst, CS_PKT_INITIAL);
insert_command_prefixes(mms, 0, 0x0004000b);
bytestream_put_le32(&mms->write_out_ptr, 0x0003001c);
mms_put_utf16(mms, data_string);
return send_command_packet(mmst);
}
 
/** Send MMST stream selection command based on the AVStream->discard values. */
static int send_stream_selection_request(MMSTContext *mmst)
{
int i;
MMSContext *mms = &mmst->mms;
// send the streams we want back...
start_command_packet(mmst, CS_PKT_STREAM_ID_REQUEST);
bytestream_put_le32(&mms->write_out_ptr, mms->stream_num); // stream nums
for(i= 0; i<mms->stream_num; i++) {
bytestream_put_le16(&mms->write_out_ptr, 0xffff); // flags
bytestream_put_le16(&mms->write_out_ptr, mms->streams[i].id); // stream id
bytestream_put_le16(&mms->write_out_ptr, 0); // selection
}
return send_command_packet(mmst);
}
 
static int send_close_packet(MMSTContext *mmst)
{
start_command_packet(mmst, CS_PKT_STREAM_CLOSE);
insert_command_prefixes(&mmst->mms, 1, 1);
 
return send_command_packet(mmst);
}
 
/** Close the MMSH/MMST connection */
static int mms_close(URLContext *h)
{
MMSTContext *mmst = (MMSTContext *)h->priv_data;
MMSContext *mms = &mmst->mms;
if(mms->mms_hd) {
send_close_packet(mmst);
ffurl_close(mms->mms_hd);
}
 
/* free all separately allocated pointers in mms */
av_free(mms->streams);
av_free(mms->asf_header);
 
return 0;
}
 
static int send_media_packet_request(MMSTContext *mmst)
{
MMSContext *mms = &mmst->mms;
start_command_packet(mmst, CS_PKT_START_FROM_PKT_ID);
insert_command_prefixes(mms, 1, 0x0001FFFF);
bytestream_put_le64(&mms->write_out_ptr, 0); // seek timestamp
bytestream_put_le32(&mms->write_out_ptr, 0xffffffff); // unknown
bytestream_put_le32(&mms->write_out_ptr, 0xffffffff); // packet offset
bytestream_put_byte(&mms->write_out_ptr, 0xff); // max stream time limit
bytestream_put_byte(&mms->write_out_ptr, 0xff); // max stream time limit
bytestream_put_byte(&mms->write_out_ptr, 0xff); // max stream time limit
bytestream_put_byte(&mms->write_out_ptr, 0x00); // stream time limit flag
 
mmst->packet_id++; // new packet_id
bytestream_put_le32(&mms->write_out_ptr, mmst->packet_id);
return send_command_packet(mmst);
}
 
 
static void clear_stream_buffers(MMSContext *mms)
{
mms->remaining_in_len = 0;
mms->read_in_ptr = mms->in_buffer;
}
 
static int mms_open(URLContext *h, const char *uri, int flags)
{
MMSTContext *mmst = h->priv_data;
MMSContext *mms;
int port, err;
char tcpname[256];
 
h->is_streamed = 1;
mms = &mmst->mms;
 
// only for MMS over TCP, so set proto = NULL
av_url_split(NULL, 0, NULL, 0,
mmst->host, sizeof(mmst->host), &port, mmst->path,
sizeof(mmst->path), uri);
 
if(port<0)
port = 1755; // defaut mms protocol port
 
// establish tcp connection.
ff_url_join(tcpname, sizeof(tcpname), "tcp", NULL, mmst->host, port, NULL);
err = ffurl_open(&mms->mms_hd, tcpname, AVIO_FLAG_READ_WRITE,
&h->interrupt_callback, NULL);
if (err)
goto fail;
 
mmst->packet_id = 3; // default, initial value.
mmst->header_packet_id = 2; // default, initial value.
err = mms_safe_send_recv(mmst, send_startup_packet, SC_PKT_CLIENT_ACCEPTED);
if (err)
goto fail;
err = mms_safe_send_recv(mmst, send_time_test_data, SC_PKT_TIMING_TEST_REPLY);
if (err)
goto fail;
err = mms_safe_send_recv(mmst, send_protocol_select, SC_PKT_PROTOCOL_ACCEPTED);
if (err)
goto fail;
err = mms_safe_send_recv(mmst, send_media_file_request, SC_PKT_MEDIA_FILE_DETAILS);
if (err)
goto fail;
err = mms_safe_send_recv(mmst, send_media_header_request, SC_PKT_HEADER_REQUEST_ACCEPTED);
if (err)
goto fail;
err = mms_safe_send_recv(mmst, NULL, SC_PKT_ASF_HEADER);
if (err)
goto fail;
if((mmst->incoming_flags != 0X08) && (mmst->incoming_flags != 0X0C)) {
av_log(NULL, AV_LOG_ERROR,
"The server does not support MMST (try MMSH or RTSP)\n");
err = AVERROR(EINVAL);
goto fail;
}
err = ff_mms_asf_header_parser(mms);
if (err) {
av_dlog(NULL, "asf header parsed failed!\n");
goto fail;
}
mms->header_parsed = 1;
 
if (!mms->asf_packet_len || !mms->stream_num)
goto fail;
 
clear_stream_buffers(mms);
err = mms_safe_send_recv(mmst, send_stream_selection_request, SC_PKT_STREAM_ID_ACCEPTED);
if (err)
goto fail;
// send media packet request
err = mms_safe_send_recv(mmst, send_media_packet_request, SC_PKT_MEDIA_PKT_FOLLOWS);
if (err) {
goto fail;
}
av_dlog(NULL, "Leaving open (success)\n");
return 0;
fail:
mms_close(h);
av_dlog(NULL, "Leaving open (failure: %d)\n", err);
return err;
}
 
/** Read ASF data through the protocol. */
static int mms_read(URLContext *h, uint8_t *buf, int size)
{
/* TODO: see tcp.c:tcp_read() about a possible timeout scheme */
MMSTContext *mmst = h->priv_data;
MMSContext *mms = &mmst->mms;
int result = 0;
 
do {
if(mms->asf_header_read_size < mms->asf_header_size) {
/* Read from ASF header buffer */
result = ff_mms_read_header(mms, buf, size);
} else if(mms->remaining_in_len) {
/* Read remaining packet data to buffer.
* the result can not be zero because remaining_in_len is positive.*/
result = ff_mms_read_data(mms, buf, size);
} else {
/* Read from network */
int err = mms_safe_send_recv(mmst, NULL, SC_PKT_ASF_MEDIA);
if (err == 0) {
if(mms->remaining_in_len>mms->asf_packet_len) {
av_log(NULL, AV_LOG_ERROR,
"Incoming pktlen %d is larger than ASF pktsize %d\n",
mms->remaining_in_len, mms->asf_packet_len);
result= AVERROR(EIO);
} else {
// copy the data to the packet buffer.
result = ff_mms_read_data(mms, buf, size);
if (result == 0) {
av_dlog(NULL, "Read ASF media packet size is zero!\n");
break;
}
}
} else {
av_dlog(NULL, "read packet error!\n");
break;
}
}
} while(!result); // only return one packet.
return result;
}
 
URLProtocol ff_mmst_protocol = {
.name = "mmst",
.url_open = mms_open,
.url_read = mms_read,
.url_close = mms_close,
.priv_data_size = sizeof(MMSTContext),
.flags = URL_PROTOCOL_FLAG_NETWORK,
};
/contrib/sdk/sources/ffmpeg/libavformat/mov.c
0,0 → 1,3618
/*
* MOV demuxer
* Copyright (c) 2001 Fabrice Bellard
* Copyright (c) 2009 Baptiste Coudurier <baptiste dot coudurier at gmail dot com>
*
* first version by Francois Revol <revol@free.fr>
* seek function by Gael Chardon <gael.dev@4now.net>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <limits.h>
 
//#define MOV_EXPORT_ALL_METADATA
 
#include "libavutil/attributes.h"
#include "libavutil/channel_layout.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/intfloat.h"
#include "libavutil/mathematics.h"
#include "libavutil/avstring.h"
#include "libavutil/dict.h"
#include "libavutil/opt.h"
#include "libavutil/timecode.h"
#include "libavcodec/ac3tab.h"
#include "avformat.h"
#include "internal.h"
#include "avio_internal.h"
#include "riff.h"
#include "isom.h"
#include "libavcodec/get_bits.h"
#include "id3v1.h"
#include "mov_chan.h"
 
#if CONFIG_ZLIB
#include <zlib.h>
#endif
 
#include "qtpalette.h"
 
 
#undef NDEBUG
#include <assert.h>
 
/* those functions parse an atom */
/* links atom IDs to parse functions */
typedef struct MOVParseTableEntry {
uint32_t type;
int (*parse)(MOVContext *ctx, AVIOContext *pb, MOVAtom atom);
} MOVParseTableEntry;
 
static int mov_read_default(MOVContext *c, AVIOContext *pb, MOVAtom atom);
 
static int mov_metadata_track_or_disc_number(MOVContext *c, AVIOContext *pb,
unsigned len, const char *key)
{
char buf[16];
 
short current, total = 0;
avio_rb16(pb); // unknown
current = avio_rb16(pb);
if (len >= 6)
total = avio_rb16(pb);
if (!total)
snprintf(buf, sizeof(buf), "%d", current);
else
snprintf(buf, sizeof(buf), "%d/%d", current, total);
av_dict_set(&c->fc->metadata, key, buf, 0);
 
return 0;
}
 
static int mov_metadata_int8_bypass_padding(MOVContext *c, AVIOContext *pb,
unsigned len, const char *key)
{
char buf[16];
 
/* bypass padding bytes */
avio_r8(pb);
avio_r8(pb);
avio_r8(pb);
 
snprintf(buf, sizeof(buf), "%d", avio_r8(pb));
av_dict_set(&c->fc->metadata, key, buf, 0);
 
return 0;
}
 
static int mov_metadata_int8_no_padding(MOVContext *c, AVIOContext *pb,
unsigned len, const char *key)
{
char buf[16];
 
snprintf(buf, sizeof(buf), "%d", avio_r8(pb));
av_dict_set(&c->fc->metadata, key, buf, 0);
 
return 0;
}
 
static int mov_metadata_gnre(MOVContext *c, AVIOContext *pb,
unsigned len, const char *key)
{
short genre;
char buf[20];
 
avio_r8(pb); // unknown
 
genre = avio_r8(pb);
if (genre < 1 || genre > ID3v1_GENRE_MAX)
return 0;
snprintf(buf, sizeof(buf), "%s", ff_id3v1_genre_str[genre-1]);
av_dict_set(&c->fc->metadata, key, buf, 0);
 
return 0;
}
 
static int mov_read_custom_metadata(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
char key[1024]={0}, data[1024]={0};
int i;
AVStream *st;
MOVStreamContext *sc;
 
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
sc = st->priv_data;
 
if (atom.size <= 8) return 0;
 
for (i = 0; i < 3; i++) { // Parse up to three sub-atoms looking for name and data.
int data_size = avio_rb32(pb);
int tag = avio_rl32(pb);
int str_size = 0, skip_size = 0;
char *target = NULL;
 
switch (tag) {
case MKTAG('n','a','m','e'):
avio_rb32(pb); // version/flags
str_size = skip_size = data_size - 12;
atom.size -= 12;
target = key;
break;
case MKTAG('d','a','t','a'):
avio_rb32(pb); // version/flags
avio_rb32(pb); // reserved (zero)
str_size = skip_size = data_size - 16;
atom.size -= 16;
target = data;
break;
default:
skip_size = data_size - 8;
str_size = 0;
break;
}
 
if (target) {
str_size = FFMIN3(sizeof(data)-1, str_size, atom.size);
avio_read(pb, target, str_size);
target[str_size] = 0;
}
atom.size -= skip_size;
 
// If we didn't read the full data chunk for the sub-atom, skip to the end of it.
if (skip_size > str_size) avio_skip(pb, skip_size - str_size);
}
 
if (*key && *data) {
if (strcmp(key, "iTunSMPB") == 0) {
int priming, remainder, samples;
if(sscanf(data, "%*X %X %X %X", &priming, &remainder, &samples) == 3){
if(priming>0 && priming<16384)
sc->start_pad = priming;
return 1;
}
}
if (strcmp(key, "cdec") == 0) {
// av_dict_set(&st->metadata, key, data, 0);
return 1;
}
}
return 0;
}
 
static const uint32_t mac_to_unicode[128] = {
0x00C4,0x00C5,0x00C7,0x00C9,0x00D1,0x00D6,0x00DC,0x00E1,
0x00E0,0x00E2,0x00E4,0x00E3,0x00E5,0x00E7,0x00E9,0x00E8,
0x00EA,0x00EB,0x00ED,0x00EC,0x00EE,0x00EF,0x00F1,0x00F3,
0x00F2,0x00F4,0x00F6,0x00F5,0x00FA,0x00F9,0x00FB,0x00FC,
0x2020,0x00B0,0x00A2,0x00A3,0x00A7,0x2022,0x00B6,0x00DF,
0x00AE,0x00A9,0x2122,0x00B4,0x00A8,0x2260,0x00C6,0x00D8,
0x221E,0x00B1,0x2264,0x2265,0x00A5,0x00B5,0x2202,0x2211,
0x220F,0x03C0,0x222B,0x00AA,0x00BA,0x03A9,0x00E6,0x00F8,
0x00BF,0x00A1,0x00AC,0x221A,0x0192,0x2248,0x2206,0x00AB,
0x00BB,0x2026,0x00A0,0x00C0,0x00C3,0x00D5,0x0152,0x0153,
0x2013,0x2014,0x201C,0x201D,0x2018,0x2019,0x00F7,0x25CA,
0x00FF,0x0178,0x2044,0x20AC,0x2039,0x203A,0xFB01,0xFB02,
0x2021,0x00B7,0x201A,0x201E,0x2030,0x00C2,0x00CA,0x00C1,
0x00CB,0x00C8,0x00CD,0x00CE,0x00CF,0x00CC,0x00D3,0x00D4,
0xF8FF,0x00D2,0x00DA,0x00DB,0x00D9,0x0131,0x02C6,0x02DC,
0x00AF,0x02D8,0x02D9,0x02DA,0x00B8,0x02DD,0x02DB,0x02C7,
};
 
static int mov_read_mac_string(MOVContext *c, AVIOContext *pb, int len,
char *dst, int dstlen)
{
char *p = dst;
char *end = dst+dstlen-1;
int i;
 
for (i = 0; i < len; i++) {
uint8_t t, c = avio_r8(pb);
if (c < 0x80 && p < end)
*p++ = c;
else if (p < end)
PUT_UTF8(mac_to_unicode[c-0x80], t, if (p < end) *p++ = t;);
}
*p = 0;
return p - dst;
}
 
static int mov_read_covr(MOVContext *c, AVIOContext *pb, int type, int len)
{
AVPacket pkt;
AVStream *st;
MOVStreamContext *sc;
enum AVCodecID id;
int ret;
 
switch (type) {
case 0xd: id = AV_CODEC_ID_MJPEG; break;
case 0xe: id = AV_CODEC_ID_PNG; break;
case 0x1b: id = AV_CODEC_ID_BMP; break;
default:
av_log(c->fc, AV_LOG_WARNING, "Unknown cover type: 0x%x.\n", type);
avio_skip(pb, len);
return 0;
}
 
st = avformat_new_stream(c->fc, NULL);
if (!st)
return AVERROR(ENOMEM);
sc = av_mallocz(sizeof(*sc));
if (!sc)
return AVERROR(ENOMEM);
st->priv_data = sc;
 
ret = av_get_packet(pb, &pkt, len);
if (ret < 0)
return ret;
 
st->disposition |= AV_DISPOSITION_ATTACHED_PIC;
 
st->attached_pic = pkt;
st->attached_pic.stream_index = st->index;
st->attached_pic.flags |= AV_PKT_FLAG_KEY;
 
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = id;
 
return 0;
}
 
static int mov_metadata_raw(MOVContext *c, AVIOContext *pb,
unsigned len, const char *key)
{
char *value = av_malloc(len + 1);
if (!value)
return AVERROR(ENOMEM);
avio_read(pb, value, len);
value[len] = 0;
return av_dict_set(&c->fc->metadata, key, value, AV_DICT_DONT_STRDUP_VAL);
}
 
static int mov_read_udta_string(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
#ifdef MOV_EXPORT_ALL_METADATA
char tmp_key[5];
#endif
char str[1024], key2[16], language[4] = {0};
const char *key = NULL;
uint16_t langcode = 0;
uint32_t data_type = 0, str_size;
int (*parse)(MOVContext*, AVIOContext*, unsigned, const char*) = NULL;
 
if (c->itunes_metadata && atom.type == MKTAG('-','-','-','-'))
return mov_read_custom_metadata(c, pb, atom);
 
switch (atom.type) {
case MKTAG(0xa9,'n','a','m'): key = "title"; break;
case MKTAG(0xa9,'a','u','t'):
case MKTAG(0xa9,'A','R','T'): key = "artist"; break;
case MKTAG( 'a','A','R','T'): key = "album_artist"; break;
case MKTAG(0xa9,'w','r','t'): key = "composer"; break;
case MKTAG( 'c','p','r','t'):
case MKTAG(0xa9,'c','p','y'): key = "copyright"; break;
case MKTAG(0xa9,'g','r','p'): key = "grouping"; break;
case MKTAG(0xa9,'l','y','r'): key = "lyrics"; break;
case MKTAG(0xa9,'c','m','t'):
case MKTAG(0xa9,'i','n','f'): key = "comment"; break;
case MKTAG(0xa9,'a','l','b'): key = "album"; break;
case MKTAG(0xa9,'d','a','y'): key = "date"; break;
case MKTAG(0xa9,'g','e','n'): key = "genre"; break;
case MKTAG( 'g','n','r','e'): key = "genre";
parse = mov_metadata_gnre; break;
case MKTAG(0xa9,'t','o','o'):
case MKTAG(0xa9,'s','w','r'): key = "encoder"; break;
case MKTAG(0xa9,'e','n','c'): key = "encoder"; break;
case MKTAG(0xa9,'m','a','k'): key = "make"; break;
case MKTAG(0xa9,'m','o','d'): key = "model"; break;
case MKTAG(0xa9,'x','y','z'): key = "location"; break;
case MKTAG( 'd','e','s','c'): key = "description";break;
case MKTAG( 'l','d','e','s'): key = "synopsis"; break;
case MKTAG( 't','v','s','h'): key = "show"; break;
case MKTAG( 't','v','e','n'): key = "episode_id";break;
case MKTAG( 't','v','n','n'): key = "network"; break;
case MKTAG( 't','r','k','n'): key = "track";
parse = mov_metadata_track_or_disc_number; break;
case MKTAG( 'd','i','s','k'): key = "disc";
parse = mov_metadata_track_or_disc_number; break;
case MKTAG( 't','v','e','s'): key = "episode_sort";
parse = mov_metadata_int8_bypass_padding; break;
case MKTAG( 't','v','s','n'): key = "season_number";
parse = mov_metadata_int8_bypass_padding; break;
case MKTAG( 's','t','i','k'): key = "media_type";
parse = mov_metadata_int8_no_padding; break;
case MKTAG( 'h','d','v','d'): key = "hd_video";
parse = mov_metadata_int8_no_padding; break;
case MKTAG( 'p','g','a','p'): key = "gapless_playback";
parse = mov_metadata_int8_no_padding; break;
case MKTAG( '@','P','R','M'):
return mov_metadata_raw(c, pb, atom.size, "premiere_version");
case MKTAG( '@','P','R','Q'):
return mov_metadata_raw(c, pb, atom.size, "quicktime_version");
}
 
if (c->itunes_metadata && atom.size > 8) {
int data_size = avio_rb32(pb);
int tag = avio_rl32(pb);
if (tag == MKTAG('d','a','t','a')) {
data_type = avio_rb32(pb); // type
avio_rb32(pb); // unknown
str_size = data_size - 16;
atom.size -= 16;
 
if (atom.type == MKTAG('c', 'o', 'v', 'r')) {
int ret = mov_read_covr(c, pb, data_type, str_size);
if (ret < 0) {
av_log(c->fc, AV_LOG_ERROR, "Error parsing cover art.\n");
return ret;
}
}
} else return 0;
} else if (atom.size > 4 && key && !c->itunes_metadata) {
str_size = avio_rb16(pb); // string length
langcode = avio_rb16(pb);
ff_mov_lang_to_iso639(langcode, language);
atom.size -= 4;
} else
str_size = atom.size;
 
#ifdef MOV_EXPORT_ALL_METADATA
if (!key) {
snprintf(tmp_key, 5, "%.4s", (char*)&atom.type);
key = tmp_key;
}
#endif
 
if (!key)
return 0;
if (atom.size < 0)
return AVERROR_INVALIDDATA;
 
str_size = FFMIN3(sizeof(str)-1, str_size, atom.size);
 
if (parse)
parse(c, pb, str_size, key);
else {
if (data_type == 3 || (data_type == 0 && (langcode < 0x400 || langcode == 0x7fff))) { // MAC Encoded
mov_read_mac_string(c, pb, str_size, str, sizeof(str));
} else {
avio_read(pb, str, str_size);
str[str_size] = 0;
}
av_dict_set(&c->fc->metadata, key, str, 0);
if (*language && strcmp(language, "und")) {
snprintf(key2, sizeof(key2), "%s-%s", key, language);
av_dict_set(&c->fc->metadata, key2, str, 0);
}
}
av_dlog(c->fc, "lang \"%3s\" ", language);
av_dlog(c->fc, "tag \"%s\" value \"%s\" atom \"%.4s\" %d %"PRId64"\n",
key, str, (char*)&atom.type, str_size, atom.size);
 
return 0;
}
 
static int mov_read_chpl(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
int64_t start;
int i, nb_chapters, str_len, version;
char str[256+1];
 
if ((atom.size -= 5) < 0)
return 0;
 
version = avio_r8(pb);
avio_rb24(pb);
if (version)
avio_rb32(pb); // ???
nb_chapters = avio_r8(pb);
 
for (i = 0; i < nb_chapters; i++) {
if (atom.size < 9)
return 0;
 
start = avio_rb64(pb);
str_len = avio_r8(pb);
 
if ((atom.size -= 9+str_len) < 0)
return 0;
 
avio_read(pb, str, str_len);
str[str_len] = 0;
avpriv_new_chapter(c->fc, i, (AVRational){1,10000000}, start, AV_NOPTS_VALUE, str);
}
return 0;
}
 
#define MIN_DATA_ENTRY_BOX_SIZE 12
static int mov_read_dref(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
MOVStreamContext *sc;
int entries, i, j;
 
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
sc = st->priv_data;
 
avio_rb32(pb); // version + flags
entries = avio_rb32(pb);
if (entries > (atom.size - 1) / MIN_DATA_ENTRY_BOX_SIZE + 1 ||
entries >= UINT_MAX / sizeof(*sc->drefs))
return AVERROR_INVALIDDATA;
av_free(sc->drefs);
sc->drefs_count = 0;
sc->drefs = av_mallocz(entries * sizeof(*sc->drefs));
if (!sc->drefs)
return AVERROR(ENOMEM);
sc->drefs_count = entries;
 
for (i = 0; i < sc->drefs_count; i++) {
MOVDref *dref = &sc->drefs[i];
uint32_t size = avio_rb32(pb);
int64_t next = avio_tell(pb) + size - 4;
 
if (size < 12)
return AVERROR_INVALIDDATA;
 
dref->type = avio_rl32(pb);
avio_rb32(pb); // version + flags
av_dlog(c->fc, "type %.4s size %d\n", (char*)&dref->type, size);
 
if (dref->type == MKTAG('a','l','i','s') && size > 150) {
/* macintosh alias record */
uint16_t volume_len, len;
int16_t type;
 
avio_skip(pb, 10);
 
volume_len = avio_r8(pb);
volume_len = FFMIN(volume_len, 27);
avio_read(pb, dref->volume, 27);
dref->volume[volume_len] = 0;
av_log(c->fc, AV_LOG_DEBUG, "volume %s, len %d\n", dref->volume, volume_len);
 
avio_skip(pb, 12);
 
len = avio_r8(pb);
len = FFMIN(len, 63);
avio_read(pb, dref->filename, 63);
dref->filename[len] = 0;
av_log(c->fc, AV_LOG_DEBUG, "filename %s, len %d\n", dref->filename, len);
 
avio_skip(pb, 16);
 
/* read next level up_from_alias/down_to_target */
dref->nlvl_from = avio_rb16(pb);
dref->nlvl_to = avio_rb16(pb);
av_log(c->fc, AV_LOG_DEBUG, "nlvl from %d, nlvl to %d\n",
dref->nlvl_from, dref->nlvl_to);
 
avio_skip(pb, 16);
 
for (type = 0; type != -1 && avio_tell(pb) < next; ) {
if(url_feof(pb))
return AVERROR_EOF;
type = avio_rb16(pb);
len = avio_rb16(pb);
av_log(c->fc, AV_LOG_DEBUG, "type %d, len %d\n", type, len);
if (len&1)
len += 1;
if (type == 2) { // absolute path
av_free(dref->path);
dref->path = av_mallocz(len+1);
if (!dref->path)
return AVERROR(ENOMEM);
avio_read(pb, dref->path, len);
if (len > volume_len && !strncmp(dref->path, dref->volume, volume_len)) {
len -= volume_len;
memmove(dref->path, dref->path+volume_len, len);
dref->path[len] = 0;
}
for (j = 0; j < len; j++)
if (dref->path[j] == ':')
dref->path[j] = '/';
av_log(c->fc, AV_LOG_DEBUG, "path %s\n", dref->path);
} else if (type == 0) { // directory name
av_free(dref->dir);
dref->dir = av_malloc(len+1);
if (!dref->dir)
return AVERROR(ENOMEM);
avio_read(pb, dref->dir, len);
dref->dir[len] = 0;
for (j = 0; j < len; j++)
if (dref->dir[j] == ':')
dref->dir[j] = '/';
av_log(c->fc, AV_LOG_DEBUG, "dir %s\n", dref->dir);
} else
avio_skip(pb, len);
}
}
avio_seek(pb, next, SEEK_SET);
}
return 0;
}
 
static int mov_read_hdlr(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
uint32_t type;
uint32_t av_unused ctype;
int title_size;
char *title_str;
 
if (c->fc->nb_streams < 1) // meta before first trak
return 0;
 
st = c->fc->streams[c->fc->nb_streams-1];
 
avio_r8(pb); /* version */
avio_rb24(pb); /* flags */
 
/* component type */
ctype = avio_rl32(pb);
type = avio_rl32(pb); /* component subtype */
 
av_dlog(c->fc, "ctype= %.4s (0x%08x)\n", (char*)&ctype, ctype);
av_dlog(c->fc, "stype= %.4s\n", (char*)&type);
 
if (type == MKTAG('v','i','d','e'))
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
else if (type == MKTAG('s','o','u','n'))
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
else if (type == MKTAG('m','1','a',' '))
st->codec->codec_id = AV_CODEC_ID_MP2;
else if ((type == MKTAG('s','u','b','p')) || (type == MKTAG('c','l','c','p')))
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
 
avio_rb32(pb); /* component manufacture */
avio_rb32(pb); /* component flags */
avio_rb32(pb); /* component flags mask */
 
title_size = atom.size - 24;
if (title_size > 0) {
title_str = av_malloc(title_size + 1); /* Add null terminator */
if (!title_str)
return AVERROR(ENOMEM);
avio_read(pb, title_str, title_size);
title_str[title_size] = 0;
if (title_str[0])
av_dict_set(&st->metadata, "handler_name", title_str +
(!c->isom && title_str[0] == title_size - 1), 0);
av_freep(&title_str);
}
 
return 0;
}
 
int ff_mov_read_esds(AVFormatContext *fc, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
int tag;
 
if (fc->nb_streams < 1)
return 0;
st = fc->streams[fc->nb_streams-1];
 
avio_rb32(pb); /* version + flags */
ff_mp4_read_descr(fc, pb, &tag);
if (tag == MP4ESDescrTag) {
ff_mp4_parse_es_descr(pb, NULL);
} else
avio_rb16(pb); /* ID */
 
ff_mp4_read_descr(fc, pb, &tag);
if (tag == MP4DecConfigDescrTag)
ff_mp4_read_dec_config_descr(fc, st, pb);
return 0;
}
 
static int mov_read_esds(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
return ff_mov_read_esds(c->fc, pb, atom);
}
 
static int mov_read_dac3(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
int ac3info, acmod, lfeon, bsmod;
 
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
 
ac3info = avio_rb24(pb);
bsmod = (ac3info >> 14) & 0x7;
acmod = (ac3info >> 11) & 0x7;
lfeon = (ac3info >> 10) & 0x1;
st->codec->channels = ((int[]){2,1,2,3,3,4,4,5})[acmod] + lfeon;
st->codec->channel_layout = avpriv_ac3_channel_layout_tab[acmod];
if (lfeon)
st->codec->channel_layout |= AV_CH_LOW_FREQUENCY;
st->codec->audio_service_type = bsmod;
if (st->codec->channels > 1 && bsmod == 0x7)
st->codec->audio_service_type = AV_AUDIO_SERVICE_TYPE_KARAOKE;
 
return 0;
}
 
static int mov_read_dec3(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
int eac3info, acmod, lfeon, bsmod;
 
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
 
/* No need to parse fields for additional independent substreams and its
* associated dependent substreams since libavcodec's E-AC-3 decoder
* does not support them yet. */
avio_rb16(pb); /* data_rate and num_ind_sub */
eac3info = avio_rb24(pb);
bsmod = (eac3info >> 12) & 0x1f;
acmod = (eac3info >> 9) & 0x7;
lfeon = (eac3info >> 8) & 0x1;
st->codec->channel_layout = avpriv_ac3_channel_layout_tab[acmod];
if (lfeon)
st->codec->channel_layout |= AV_CH_LOW_FREQUENCY;
st->codec->channels = av_get_channel_layout_nb_channels(st->codec->channel_layout);
st->codec->audio_service_type = bsmod;
if (st->codec->channels > 1 && bsmod == 0x7)
st->codec->audio_service_type = AV_AUDIO_SERVICE_TYPE_KARAOKE;
 
return 0;
}
 
static int mov_read_chan(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
 
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
 
if (atom.size < 16)
return 0;
 
/* skip version and flags */
avio_skip(pb, 4);
 
ff_mov_read_chan(c->fc, pb, st, atom.size - 4);
 
return 0;
}
 
static int mov_read_wfex(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
 
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
 
if (ff_get_wav_header(pb, st->codec, atom.size) < 0) {
av_log(c->fc, AV_LOG_WARNING, "get_wav_header failed\n");
}
 
return 0;
}
 
static int mov_read_pasp(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
const int num = avio_rb32(pb);
const int den = avio_rb32(pb);
AVStream *st;
 
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
 
if ((st->sample_aspect_ratio.den != 1 || st->sample_aspect_ratio.num) && // default
(den != st->sample_aspect_ratio.den || num != st->sample_aspect_ratio.num)) {
av_log(c->fc, AV_LOG_WARNING,
"sample aspect ratio already set to %d:%d, ignoring 'pasp' atom (%d:%d)\n",
st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
num, den);
} else if (den != 0) {
st->sample_aspect_ratio.num = num;
st->sample_aspect_ratio.den = den;
}
return 0;
}
 
/* this atom contains actual media data */
static int mov_read_mdat(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
if (atom.size == 0) /* wrong one (MP4) */
return 0;
c->found_mdat=1;
return 0; /* now go for moov */
}
 
/* read major brand, minor version and compatible brands and store them as metadata */
static int mov_read_ftyp(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
uint32_t minor_ver;
int comp_brand_size;
char minor_ver_str[11]; /* 32 bit integer -> 10 digits + null */
char* comp_brands_str;
uint8_t type[5] = {0};
 
avio_read(pb, type, 4);
if (strcmp(type, "qt "))
c->isom = 1;
av_log(c->fc, AV_LOG_DEBUG, "ISO: File Type Major Brand: %.4s\n",(char *)&type);
av_dict_set(&c->fc->metadata, "major_brand", type, 0);
minor_ver = avio_rb32(pb); /* minor version */
snprintf(minor_ver_str, sizeof(minor_ver_str), "%d", minor_ver);
av_dict_set(&c->fc->metadata, "minor_version", minor_ver_str, 0);
 
comp_brand_size = atom.size - 8;
if (comp_brand_size < 0)
return AVERROR_INVALIDDATA;
comp_brands_str = av_malloc(comp_brand_size + 1); /* Add null terminator */
if (!comp_brands_str)
return AVERROR(ENOMEM);
avio_read(pb, comp_brands_str, comp_brand_size);
comp_brands_str[comp_brand_size] = 0;
av_dict_set(&c->fc->metadata, "compatible_brands", comp_brands_str, 0);
av_freep(&comp_brands_str);
 
return 0;
}
 
/* this atom should contain all header atoms */
static int mov_read_moov(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
int ret;
 
if (c->found_moov) {
av_log(c->fc, AV_LOG_WARNING, "Found duplicated MOOV Atom. Skipped it\n");
avio_skip(pb, atom.size);
return 0;
}
 
if ((ret = mov_read_default(c, pb, atom)) < 0)
return ret;
/* we parsed the 'moov' atom, we can terminate the parsing as soon as we find the 'mdat' */
/* so we don't parse the whole file if over a network */
c->found_moov=1;
return 0; /* now go for mdat */
}
 
static int mov_read_moof(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
c->fragment.moof_offset = avio_tell(pb) - 8;
av_dlog(c->fc, "moof offset %"PRIx64"\n", c->fragment.moof_offset);
return mov_read_default(c, pb, atom);
}
 
static void mov_metadata_creation_time(AVDictionary **metadata, int64_t time)
{
char buffer[32];
if (time) {
struct tm *ptm;
time_t timet;
if(time >= 2082844800)
time -= 2082844800; /* seconds between 1904-01-01 and Epoch */
timet = time;
ptm = gmtime(&timet);
if (!ptm) return;
strftime(buffer, sizeof(buffer), "%Y-%m-%d %H:%M:%S", ptm);
av_dict_set(metadata, "creation_time", buffer, 0);
}
}
 
static int mov_read_mdhd(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
MOVStreamContext *sc;
int version;
char language[4] = {0};
unsigned lang;
int64_t creation_time;
 
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
sc = st->priv_data;
 
if (sc->time_scale) {
av_log(c->fc, AV_LOG_ERROR, "Multiple mdhd?\n");
return AVERROR_INVALIDDATA;
}
 
version = avio_r8(pb);
if (version > 1) {
avpriv_request_sample(c->fc, "Version %d", version);
return AVERROR_PATCHWELCOME;
}
avio_rb24(pb); /* flags */
if (version == 1) {
creation_time = avio_rb64(pb);
avio_rb64(pb);
} else {
creation_time = avio_rb32(pb);
avio_rb32(pb); /* modification time */
}
mov_metadata_creation_time(&st->metadata, creation_time);
 
sc->time_scale = avio_rb32(pb);
st->duration = (version == 1) ? avio_rb64(pb) : avio_rb32(pb); /* duration */
 
lang = avio_rb16(pb); /* language */
if (ff_mov_lang_to_iso639(lang, language))
av_dict_set(&st->metadata, "language", language, 0);
avio_rb16(pb); /* quality */
 
return 0;
}
 
static int mov_read_mvhd(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
int64_t creation_time;
int version = avio_r8(pb); /* version */
avio_rb24(pb); /* flags */
 
if (version == 1) {
creation_time = avio_rb64(pb);
avio_rb64(pb);
} else {
creation_time = avio_rb32(pb);
avio_rb32(pb); /* modification time */
}
mov_metadata_creation_time(&c->fc->metadata, creation_time);
c->time_scale = avio_rb32(pb); /* time scale */
 
av_dlog(c->fc, "time scale = %i\n", c->time_scale);
 
c->duration = (version == 1) ? avio_rb64(pb) : avio_rb32(pb); /* duration */
// set the AVCodecContext duration because the duration of individual tracks
// may be inaccurate
if (c->time_scale > 0 && !c->trex_data)
c->fc->duration = av_rescale(c->duration, AV_TIME_BASE, c->time_scale);
avio_rb32(pb); /* preferred scale */
 
avio_rb16(pb); /* preferred volume */
 
avio_skip(pb, 10); /* reserved */
 
avio_skip(pb, 36); /* display matrix */
 
avio_rb32(pb); /* preview time */
avio_rb32(pb); /* preview duration */
avio_rb32(pb); /* poster time */
avio_rb32(pb); /* selection time */
avio_rb32(pb); /* selection duration */
avio_rb32(pb); /* current time */
avio_rb32(pb); /* next track ID */
return 0;
}
 
static int mov_read_enda(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
int little_endian;
 
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
 
little_endian = avio_rb16(pb) & 0xFF;
av_dlog(c->fc, "enda %d\n", little_endian);
if (little_endian == 1) {
switch (st->codec->codec_id) {
case AV_CODEC_ID_PCM_S24BE:
st->codec->codec_id = AV_CODEC_ID_PCM_S24LE;
break;
case AV_CODEC_ID_PCM_S32BE:
st->codec->codec_id = AV_CODEC_ID_PCM_S32LE;
break;
case AV_CODEC_ID_PCM_F32BE:
st->codec->codec_id = AV_CODEC_ID_PCM_F32LE;
break;
case AV_CODEC_ID_PCM_F64BE:
st->codec->codec_id = AV_CODEC_ID_PCM_F64LE;
break;
default:
break;
}
}
return 0;
}
 
static int mov_read_fiel(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
unsigned mov_field_order;
enum AVFieldOrder decoded_field_order = AV_FIELD_UNKNOWN;
 
if (c->fc->nb_streams < 1) // will happen with jp2 files
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
if (atom.size < 2)
return AVERROR_INVALIDDATA;
mov_field_order = avio_rb16(pb);
if ((mov_field_order & 0xFF00) == 0x0100)
decoded_field_order = AV_FIELD_PROGRESSIVE;
else if ((mov_field_order & 0xFF00) == 0x0200) {
switch (mov_field_order & 0xFF) {
case 0x01: decoded_field_order = AV_FIELD_TT;
break;
case 0x06: decoded_field_order = AV_FIELD_BB;
break;
case 0x09: decoded_field_order = AV_FIELD_TB;
break;
case 0x0E: decoded_field_order = AV_FIELD_BT;
break;
}
}
if (decoded_field_order == AV_FIELD_UNKNOWN && mov_field_order) {
av_log(NULL, AV_LOG_ERROR, "Unknown MOV field order 0x%04x\n", mov_field_order);
}
st->codec->field_order = decoded_field_order;
 
return 0;
}
 
/* FIXME modify qdm2/svq3/h264 decoders to take full atom as extradata */
static int mov_read_extradata(MOVContext *c, AVIOContext *pb, MOVAtom atom,
enum AVCodecID codec_id)
{
AVStream *st;
uint64_t size;
uint8_t *buf;
int err;
 
if (c->fc->nb_streams < 1) // will happen with jp2 files
return 0;
st= c->fc->streams[c->fc->nb_streams-1];
 
if (st->codec->codec_id != codec_id)
return 0; /* unexpected codec_id - don't mess with extradata */
 
size= (uint64_t)st->codec->extradata_size + atom.size + 8 + FF_INPUT_BUFFER_PADDING_SIZE;
if (size > INT_MAX || (uint64_t)atom.size > INT_MAX)
return AVERROR_INVALIDDATA;
if ((err = av_reallocp(&st->codec->extradata, size)) < 0) {
st->codec->extradata_size = 0;
return err;
}
buf = st->codec->extradata + st->codec->extradata_size;
st->codec->extradata_size= size - FF_INPUT_BUFFER_PADDING_SIZE;
AV_WB32( buf , atom.size + 8);
AV_WL32( buf + 4, atom.type);
avio_read(pb, buf + 8, atom.size);
return 0;
}
 
/* wrapper functions for reading ALAC/AVS/MJPEG/MJPEG2000 extradata atoms only for those codecs */
static int mov_read_alac(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
return mov_read_extradata(c, pb, atom, AV_CODEC_ID_ALAC);
}
 
static int mov_read_avss(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
return mov_read_extradata(c, pb, atom, AV_CODEC_ID_AVS);
}
 
static int mov_read_jp2h(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
return mov_read_extradata(c, pb, atom, AV_CODEC_ID_JPEG2000);
}
 
static int mov_read_avid(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
return mov_read_extradata(c, pb, atom, AV_CODEC_ID_AVUI);
}
 
static int mov_read_targa_y216(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
int ret = mov_read_extradata(c, pb, atom, AV_CODEC_ID_TARGA_Y216);
 
if (!ret && c->fc->nb_streams >= 1) {
AVCodecContext *avctx = c->fc->streams[c->fc->nb_streams-1]->codec;
if (avctx->extradata_size >= 40) {
avctx->height = AV_RB16(&avctx->extradata[36]);
avctx->width = AV_RB16(&avctx->extradata[38]);
}
}
return ret;
}
 
static int mov_read_ares(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVCodecContext *codec = c->fc->streams[c->fc->nb_streams-1]->codec;
if (codec->codec_tag == MKTAG('A', 'V', 'i', 'n') &&
codec->codec_id == AV_CODEC_ID_H264 &&
atom.size > 11) {
avio_skip(pb, 10);
/* For AVID AVCI50, force width of 1440 to be able to select the correct SPS and PPS */
if (avio_rb16(pb) == 0xd4d)
codec->width = 1440;
return 0;
}
 
return mov_read_avid(c, pb, atom);
}
 
static int mov_read_svq3(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
return mov_read_extradata(c, pb, atom, AV_CODEC_ID_SVQ3);
}
 
static int mov_read_wave(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
 
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
 
if ((uint64_t)atom.size > (1<<30))
return AVERROR_INVALIDDATA;
 
if (st->codec->codec_id == AV_CODEC_ID_QDM2 ||
st->codec->codec_id == AV_CODEC_ID_QDMC ||
st->codec->codec_id == AV_CODEC_ID_SPEEX) {
// pass all frma atom to codec, needed at least for QDMC and QDM2
av_free(st->codec->extradata);
if (ff_alloc_extradata(st->codec, atom.size))
return AVERROR(ENOMEM);
avio_read(pb, st->codec->extradata, atom.size);
} else if (atom.size > 8) { /* to read frma, esds atoms */
int ret;
if ((ret = mov_read_default(c, pb, atom)) < 0)
return ret;
} else
avio_skip(pb, atom.size);
return 0;
}
 
/**
* This function reads atom content and puts data in extradata without tag
* nor size unlike mov_read_extradata.
*/
static int mov_read_glbl(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
 
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
 
if ((uint64_t)atom.size > (1<<30))
return AVERROR_INVALIDDATA;
 
if (atom.size >= 10) {
// Broken files created by legacy versions of libavformat will
// wrap a whole fiel atom inside of a glbl atom.
unsigned size = avio_rb32(pb);
unsigned type = avio_rl32(pb);
avio_seek(pb, -8, SEEK_CUR);
if (type == MKTAG('f','i','e','l') && size == atom.size)
return mov_read_default(c, pb, atom);
}
av_free(st->codec->extradata);
if (ff_alloc_extradata(st->codec, atom.size))
return AVERROR(ENOMEM);
avio_read(pb, st->codec->extradata, atom.size);
return 0;
}
 
static int mov_read_dvc1(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
uint8_t profile_level;
 
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
 
if (atom.size >= (1<<28) || atom.size < 7)
return AVERROR_INVALIDDATA;
 
profile_level = avio_r8(pb);
if ((profile_level & 0xf0) != 0xc0)
return 0;
 
av_free(st->codec->extradata);
if (ff_alloc_extradata(st->codec, atom.size - 7))
return AVERROR(ENOMEM);
avio_seek(pb, 6, SEEK_CUR);
avio_read(pb, st->codec->extradata, st->codec->extradata_size);
return 0;
}
 
/**
* An strf atom is a BITMAPINFOHEADER struct. This struct is 40 bytes itself,
* but can have extradata appended at the end after the 40 bytes belonging
* to the struct.
*/
static int mov_read_strf(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
 
if (c->fc->nb_streams < 1)
return 0;
if (atom.size <= 40)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
 
if ((uint64_t)atom.size > (1<<30))
return AVERROR_INVALIDDATA;
 
av_free(st->codec->extradata);
if (ff_alloc_extradata(st->codec, atom.size - 40))
return AVERROR(ENOMEM);
avio_skip(pb, 40);
avio_read(pb, st->codec->extradata, atom.size - 40);
return 0;
}
 
static int mov_read_stco(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
MOVStreamContext *sc;
unsigned int i, entries;
 
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
sc = st->priv_data;
 
avio_r8(pb); /* version */
avio_rb24(pb); /* flags */
 
entries = avio_rb32(pb);
 
if (!entries)
return 0;
if (entries >= UINT_MAX/sizeof(int64_t))
return AVERROR_INVALIDDATA;
 
sc->chunk_offsets = av_malloc(entries * sizeof(int64_t));
if (!sc->chunk_offsets)
return AVERROR(ENOMEM);
sc->chunk_count = entries;
 
if (atom.type == MKTAG('s','t','c','o'))
for (i = 0; i < entries && !pb->eof_reached; i++)
sc->chunk_offsets[i] = avio_rb32(pb);
else if (atom.type == MKTAG('c','o','6','4'))
for (i = 0; i < entries && !pb->eof_reached; i++)
sc->chunk_offsets[i] = avio_rb64(pb);
else
return AVERROR_INVALIDDATA;
 
sc->chunk_count = i;
 
if (pb->eof_reached)
return AVERROR_EOF;
 
return 0;
}
 
/**
* Compute codec id for 'lpcm' tag.
* See CoreAudioTypes and AudioStreamBasicDescription at Apple.
*/
enum AVCodecID ff_mov_get_lpcm_codec_id(int bps, int flags)
{
/* lpcm flags:
* 0x1 = float
* 0x2 = big-endian
* 0x4 = signed
*/
return ff_get_pcm_codec_id(bps, flags & 1, flags & 2, flags & 4 ? -1 : 0);
}
 
static int mov_codec_id(AVStream *st, uint32_t format)
{
int id = ff_codec_get_id(ff_codec_movaudio_tags, format);
 
if (id <= 0 &&
((format & 0xFFFF) == 'm' + ('s' << 8) ||
(format & 0xFFFF) == 'T' + ('S' << 8)))
id = ff_codec_get_id(ff_codec_wav_tags, av_bswap32(format) & 0xFFFF);
 
if (st->codec->codec_type != AVMEDIA_TYPE_VIDEO && id > 0) {
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
} else if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO &&
/* skip old asf mpeg4 tag */
format && format != MKTAG('m','p','4','s')) {
id = ff_codec_get_id(ff_codec_movvideo_tags, format);
if (id <= 0)
id = ff_codec_get_id(ff_codec_bmp_tags, format);
if (id > 0)
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
else if (st->codec->codec_type == AVMEDIA_TYPE_DATA ||
(st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE &&
st->codec->codec_id == AV_CODEC_ID_NONE)) {
id = ff_codec_get_id(ff_codec_movsubtitle_tags, format);
if (id > 0)
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
}
}
 
st->codec->codec_tag = format;
 
return id;
}
 
static void mov_parse_stsd_video(MOVContext *c, AVIOContext *pb,
AVStream *st, MOVStreamContext *sc)
{
unsigned int color_depth, len, j;
int color_greyscale;
int color_table_id;
 
avio_rb16(pb); /* version */
avio_rb16(pb); /* revision level */
avio_rb32(pb); /* vendor */
avio_rb32(pb); /* temporal quality */
avio_rb32(pb); /* spatial quality */
 
st->codec->width = avio_rb16(pb); /* width */
st->codec->height = avio_rb16(pb); /* height */
 
avio_rb32(pb); /* horiz resolution */
avio_rb32(pb); /* vert resolution */
avio_rb32(pb); /* data size, always 0 */
avio_rb16(pb); /* frames per samples */
 
len = avio_r8(pb); /* codec name, pascal string */
if (len > 31)
len = 31;
mov_read_mac_string(c, pb, len, st->codec->codec_name, 32);
if (len < 31)
avio_skip(pb, 31 - len);
/* codec_tag YV12 triggers an UV swap in rawdec.c */
if (!memcmp(st->codec->codec_name, "Planar Y'CbCr 8-bit 4:2:0", 25)) {
st->codec->codec_tag = MKTAG('I', '4', '2', '0');
st->codec->width &= ~1;
st->codec->height &= ~1;
}
/* Flash Media Server uses tag H263 with Sorenson Spark */
if (st->codec->codec_tag == MKTAG('H','2','6','3') &&
!memcmp(st->codec->codec_name, "Sorenson H263", 13))
st->codec->codec_id = AV_CODEC_ID_FLV1;
 
st->codec->bits_per_coded_sample = avio_rb16(pb); /* depth */
color_table_id = avio_rb16(pb); /* colortable id */
av_dlog(c->fc, "depth %d, ctab id %d\n",
st->codec->bits_per_coded_sample, color_table_id);
/* figure out the palette situation */
color_depth = st->codec->bits_per_coded_sample & 0x1F;
color_greyscale = st->codec->bits_per_coded_sample & 0x20;
 
/* if the depth is 2, 4, or 8 bpp, file is palettized */
if ((color_depth == 2) || (color_depth == 4) || (color_depth == 8)) {
/* for palette traversal */
unsigned int color_start, color_count, color_end;
unsigned char a, r, g, b;
 
if (color_greyscale) {
int color_index, color_dec;
/* compute the greyscale palette */
st->codec->bits_per_coded_sample = color_depth;
color_count = 1 << color_depth;
color_index = 255;
color_dec = 256 / (color_count - 1);
for (j = 0; j < color_count; j++) {
if (st->codec->codec_id == AV_CODEC_ID_CINEPAK){
r = g = b = color_count - 1 - color_index;
} else
r = g = b = color_index;
sc->palette[j] = (0xFFU << 24) | (r << 16) | (g << 8) | (b);
color_index -= color_dec;
if (color_index < 0)
color_index = 0;
}
} else if (color_table_id) {
const uint8_t *color_table;
/* if flag bit 3 is set, use the default palette */
color_count = 1 << color_depth;
if (color_depth == 2)
color_table = ff_qt_default_palette_4;
else if (color_depth == 4)
color_table = ff_qt_default_palette_16;
else
color_table = ff_qt_default_palette_256;
 
for (j = 0; j < color_count; j++) {
r = color_table[j * 3 + 0];
g = color_table[j * 3 + 1];
b = color_table[j * 3 + 2];
sc->palette[j] = (0xFFU << 24) | (r << 16) | (g << 8) | (b);
}
} else {
/* load the palette from the file */
color_start = avio_rb32(pb);
color_count = avio_rb16(pb);
color_end = avio_rb16(pb);
if ((color_start <= 255) && (color_end <= 255)) {
for (j = color_start; j <= color_end; j++) {
/* each A, R, G, or B component is 16 bits;
* only use the top 8 bits */
a = avio_r8(pb);
avio_r8(pb);
r = avio_r8(pb);
avio_r8(pb);
g = avio_r8(pb);
avio_r8(pb);
b = avio_r8(pb);
avio_r8(pb);
sc->palette[j] = (a << 24 ) | (r << 16) | (g << 8) | (b);
}
}
}
sc->has_palette = 1;
}
}
 
static void mov_parse_stsd_audio(MOVContext *c, AVIOContext *pb,
AVStream *st, MOVStreamContext *sc)
{
int bits_per_sample, flags;
uint16_t version = avio_rb16(pb);
AVDictionaryEntry *compatible_brands = av_dict_get(c->fc->metadata, "compatible_brands", NULL, AV_DICT_MATCH_CASE);
 
avio_rb16(pb); /* revision level */
avio_rb32(pb); /* vendor */
 
st->codec->channels = avio_rb16(pb); /* channel count */
st->codec->bits_per_coded_sample = avio_rb16(pb); /* sample size */
av_dlog(c->fc, "audio channels %d\n", st->codec->channels);
 
sc->audio_cid = avio_rb16(pb);
avio_rb16(pb); /* packet size = 0 */
 
st->codec->sample_rate = ((avio_rb32(pb) >> 16));
 
// Read QT version 1 fields. In version 0 these do not exist.
av_dlog(c->fc, "version =%d, isom =%d\n", version, c->isom);
if (!c->isom ||
(compatible_brands && strstr(compatible_brands->value, "qt "))) {
 
if (version == 1) {
sc->samples_per_frame = avio_rb32(pb);
avio_rb32(pb); /* bytes per packet */
sc->bytes_per_frame = avio_rb32(pb);
avio_rb32(pb); /* bytes per sample */
} else if (version == 2) {
avio_rb32(pb); /* sizeof struct only */
st->codec->sample_rate = av_int2double(avio_rb64(pb));
st->codec->channels = avio_rb32(pb);
avio_rb32(pb); /* always 0x7F000000 */
st->codec->bits_per_coded_sample = avio_rb32(pb);
 
flags = avio_rb32(pb); /* lpcm format specific flag */
sc->bytes_per_frame = avio_rb32(pb);
sc->samples_per_frame = avio_rb32(pb);
if (st->codec->codec_tag == MKTAG('l','p','c','m'))
st->codec->codec_id =
ff_mov_get_lpcm_codec_id(st->codec->bits_per_coded_sample,
flags);
}
}
 
switch (st->codec->codec_id) {
case AV_CODEC_ID_PCM_S8:
case AV_CODEC_ID_PCM_U8:
if (st->codec->bits_per_coded_sample == 16)
st->codec->codec_id = AV_CODEC_ID_PCM_S16BE;
break;
case AV_CODEC_ID_PCM_S16LE:
case AV_CODEC_ID_PCM_S16BE:
if (st->codec->bits_per_coded_sample == 8)
st->codec->codec_id = AV_CODEC_ID_PCM_S8;
else if (st->codec->bits_per_coded_sample == 24)
st->codec->codec_id =
st->codec->codec_id == AV_CODEC_ID_PCM_S16BE ?
AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE;
break;
/* set values for old format before stsd version 1 appeared */
case AV_CODEC_ID_MACE3:
sc->samples_per_frame = 6;
sc->bytes_per_frame = 2 * st->codec->channels;
break;
case AV_CODEC_ID_MACE6:
sc->samples_per_frame = 6;
sc->bytes_per_frame = 1 * st->codec->channels;
break;
case AV_CODEC_ID_ADPCM_IMA_QT:
sc->samples_per_frame = 64;
sc->bytes_per_frame = 34 * st->codec->channels;
break;
case AV_CODEC_ID_GSM:
sc->samples_per_frame = 160;
sc->bytes_per_frame = 33;
break;
default:
break;
}
 
bits_per_sample = av_get_bits_per_sample(st->codec->codec_id);
if (bits_per_sample) {
st->codec->bits_per_coded_sample = bits_per_sample;
sc->sample_size = (bits_per_sample >> 3) * st->codec->channels;
}
}
 
static void mov_parse_stsd_subtitle(MOVContext *c, AVIOContext *pb,
AVStream *st, MOVStreamContext *sc,
int size)
{
// ttxt stsd contains display flags, justification, background
// color, fonts, and default styles, so fake an atom to read it
MOVAtom fake_atom = { .size = size };
// mp4s contains a regular esds atom
if (st->codec->codec_tag != AV_RL32("mp4s"))
mov_read_glbl(c, pb, fake_atom);
st->codec->width = sc->width;
st->codec->height = sc->height;
}
 
static int mov_parse_stsd_data(MOVContext *c, AVIOContext *pb,
AVStream *st, MOVStreamContext *sc,
int size)
{
if (st->codec->codec_tag == MKTAG('t','m','c','d')) {
if (ff_alloc_extradata(st->codec, size))
return AVERROR(ENOMEM);
avio_read(pb, st->codec->extradata, size);
if (size > 16) {
MOVStreamContext *tmcd_ctx = st->priv_data;
int val;
val = AV_RB32(st->codec->extradata + 4);
tmcd_ctx->tmcd_flags = val;
if (val & 1)
st->codec->flags2 |= CODEC_FLAG2_DROP_FRAME_TIMECODE;
st->codec->time_base.den = st->codec->extradata[16]; /* number of frame */
st->codec->time_base.num = 1;
}
} else {
/* other codec type, just skip (rtp, mp4s ...) */
avio_skip(pb, size);
}
return 0;
}
 
static int mov_finalize_stsd_codec(MOVContext *c, AVIOContext *pb,
AVStream *st, MOVStreamContext *sc)
{
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
!st->codec->sample_rate && sc->time_scale > 1)
st->codec->sample_rate = sc->time_scale;
 
/* special codec parameters handling */
switch (st->codec->codec_id) {
#if CONFIG_DV_DEMUXER
case AV_CODEC_ID_DVAUDIO:
c->dv_fctx = avformat_alloc_context();
c->dv_demux = avpriv_dv_init_demux(c->dv_fctx);
if (!c->dv_demux) {
av_log(c->fc, AV_LOG_ERROR, "dv demux context init error\n");
return AVERROR(ENOMEM);
}
sc->dv_audio_container = 1;
st->codec->codec_id = AV_CODEC_ID_PCM_S16LE;
break;
#endif
/* no ifdef since parameters are always those */
case AV_CODEC_ID_QCELP:
st->codec->channels = 1;
// force sample rate for qcelp when not stored in mov
if (st->codec->codec_tag != MKTAG('Q','c','l','p'))
st->codec->sample_rate = 8000;
break;
case AV_CODEC_ID_AMR_NB:
st->codec->channels = 1;
/* force sample rate for amr, stsd in 3gp does not store sample rate */
st->codec->sample_rate = 8000;
break;
case AV_CODEC_ID_AMR_WB:
st->codec->channels = 1;
st->codec->sample_rate = 16000;
break;
case AV_CODEC_ID_MP2:
case AV_CODEC_ID_MP3:
/* force type after stsd for m1a hdlr */
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->need_parsing = AVSTREAM_PARSE_FULL;
break;
case AV_CODEC_ID_GSM:
case AV_CODEC_ID_ADPCM_MS:
case AV_CODEC_ID_ADPCM_IMA_WAV:
case AV_CODEC_ID_ILBC:
case AV_CODEC_ID_MACE3:
case AV_CODEC_ID_MACE6:
case AV_CODEC_ID_QDM2:
st->codec->block_align = sc->bytes_per_frame;
break;
case AV_CODEC_ID_ALAC:
if (st->codec->extradata_size == 36) {
st->codec->channels = AV_RB8 (st->codec->extradata + 21);
st->codec->sample_rate = AV_RB32(st->codec->extradata + 32);
}
break;
case AV_CODEC_ID_AC3:
st->need_parsing = AVSTREAM_PARSE_FULL;
break;
case AV_CODEC_ID_MPEG1VIDEO:
st->need_parsing = AVSTREAM_PARSE_FULL;
break;
case AV_CODEC_ID_VC1:
st->need_parsing = AVSTREAM_PARSE_FULL;
break;
default:
break;
}
return 0;
}
 
static int mov_skip_multiple_stsd(MOVContext *c, AVIOContext *pb,
int codec_tag, int format,
int size)
{
int video_codec_id = ff_codec_get_id(ff_codec_movvideo_tags, format);
 
if (codec_tag &&
(codec_tag != format &&
(c->fc->video_codec_id ? video_codec_id != c->fc->video_codec_id
: codec_tag != MKTAG('j','p','e','g')))) {
/* Multiple fourcc, we skip JPEG. This is not correct, we should
* export it as a separate AVStream but this needs a few changes
* in the MOV demuxer, patch welcome. */
 
av_log(c->fc, AV_LOG_WARNING, "multiple fourcc not supported\n");
avio_skip(pb, size);
return 1;
}
if ( codec_tag == AV_RL32("avc1") ||
codec_tag == AV_RL32("hvc1") ||
codec_tag == AV_RL32("hev1")
)
av_log(c->fc, AV_LOG_WARNING, "Concatenated H.264 or H.265 might not play correctly.\n");
 
return 0;
}
 
int ff_mov_read_stsd_entries(MOVContext *c, AVIOContext *pb, int entries)
{
AVStream *st;
MOVStreamContext *sc;
int pseudo_stream_id;
 
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
sc = st->priv_data;
 
for (pseudo_stream_id = 0;
pseudo_stream_id < entries && !pb->eof_reached;
pseudo_stream_id++) {
//Parsing Sample description table
enum AVCodecID id;
int ret, dref_id = 1;
MOVAtom a = { AV_RL32("stsd") };
int64_t start_pos = avio_tell(pb);
int64_t size = avio_rb32(pb); /* size */
uint32_t format = avio_rl32(pb); /* data format */
 
if (size >= 16) {
avio_rb32(pb); /* reserved */
avio_rb16(pb); /* reserved */
dref_id = avio_rb16(pb);
}else if (size <= 7){
av_log(c->fc, AV_LOG_ERROR, "invalid size %"PRId64" in stsd\n", size);
return AVERROR_INVALIDDATA;
}
 
if (mov_skip_multiple_stsd(c, pb, st->codec->codec_tag, format,
size - (avio_tell(pb) - start_pos)))
continue;
 
sc->pseudo_stream_id = st->codec->codec_tag ? -1 : pseudo_stream_id;
sc->dref_id= dref_id;
 
id = mov_codec_id(st, format);
 
av_dlog(c->fc, "size=%"PRId64" 4CC= %c%c%c%c codec_type=%d\n", size,
(format >> 0) & 0xff, (format >> 8) & 0xff, (format >> 16) & 0xff,
(format >> 24) & 0xff, st->codec->codec_type);
 
if (st->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
st->codec->codec_id = id;
mov_parse_stsd_video(c, pb, st, sc);
} else if (st->codec->codec_type==AVMEDIA_TYPE_AUDIO) {
st->codec->codec_id = id;
mov_parse_stsd_audio(c, pb, st, sc);
} else if (st->codec->codec_type==AVMEDIA_TYPE_SUBTITLE){
st->codec->codec_id = id;
mov_parse_stsd_subtitle(c, pb, st, sc,
size - (avio_tell(pb) - start_pos));
} else {
ret = mov_parse_stsd_data(c, pb, st, sc,
size - (avio_tell(pb) - start_pos));
if (ret < 0)
return ret;
}
/* this will read extra atoms at the end (wave, alac, damr, avcC, hvcC, SMI ...) */
a.size = size - (avio_tell(pb) - start_pos);
if (a.size > 8) {
if ((ret = mov_read_default(c, pb, a)) < 0)
return ret;
} else if (a.size > 0)
avio_skip(pb, a.size);
}
 
if (pb->eof_reached)
return AVERROR_EOF;
 
return mov_finalize_stsd_codec(c, pb, st, sc);
}
 
static int mov_read_stsd(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
int entries;
 
avio_r8(pb); /* version */
avio_rb24(pb); /* flags */
entries = avio_rb32(pb);
 
return ff_mov_read_stsd_entries(c, pb, entries);
}
 
static int mov_read_stsc(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
MOVStreamContext *sc;
unsigned int i, entries;
 
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
sc = st->priv_data;
 
avio_r8(pb); /* version */
avio_rb24(pb); /* flags */
 
entries = avio_rb32(pb);
 
av_dlog(c->fc, "track[%i].stsc.entries = %i\n", c->fc->nb_streams-1, entries);
 
if (!entries)
return 0;
if (entries >= UINT_MAX / sizeof(*sc->stsc_data))
return AVERROR_INVALIDDATA;
sc->stsc_data = av_malloc(entries * sizeof(*sc->stsc_data));
if (!sc->stsc_data)
return AVERROR(ENOMEM);
 
for (i = 0; i < entries && !pb->eof_reached; i++) {
sc->stsc_data[i].first = avio_rb32(pb);
sc->stsc_data[i].count = avio_rb32(pb);
sc->stsc_data[i].id = avio_rb32(pb);
}
 
sc->stsc_count = i;
 
if (pb->eof_reached)
return AVERROR_EOF;
 
return 0;
}
 
static int mov_read_stps(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
MOVStreamContext *sc;
unsigned i, entries;
 
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
sc = st->priv_data;
 
avio_rb32(pb); // version + flags
 
entries = avio_rb32(pb);
if (entries >= UINT_MAX / sizeof(*sc->stps_data))
return AVERROR_INVALIDDATA;
sc->stps_data = av_malloc(entries * sizeof(*sc->stps_data));
if (!sc->stps_data)
return AVERROR(ENOMEM);
 
for (i = 0; i < entries && !pb->eof_reached; i++) {
sc->stps_data[i] = avio_rb32(pb);
//av_dlog(c->fc, "stps %d\n", sc->stps_data[i]);
}
 
sc->stps_count = i;
 
if (pb->eof_reached)
return AVERROR_EOF;
 
return 0;
}
 
static int mov_read_stss(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
MOVStreamContext *sc;
unsigned int i, entries;
 
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
sc = st->priv_data;
 
avio_r8(pb); /* version */
avio_rb24(pb); /* flags */
 
entries = avio_rb32(pb);
 
av_dlog(c->fc, "keyframe_count = %d\n", entries);
 
if (!entries)
{
sc->keyframe_absent = 1;
if (!st->need_parsing)
st->need_parsing = AVSTREAM_PARSE_HEADERS;
return 0;
}
if (entries >= UINT_MAX / sizeof(int))
return AVERROR_INVALIDDATA;
sc->keyframes = av_malloc(entries * sizeof(int));
if (!sc->keyframes)
return AVERROR(ENOMEM);
 
for (i = 0; i < entries && !pb->eof_reached; i++) {
sc->keyframes[i] = avio_rb32(pb);
//av_dlog(c->fc, "keyframes[]=%d\n", sc->keyframes[i]);
}
 
sc->keyframe_count = i;
 
if (pb->eof_reached)
return AVERROR_EOF;
 
return 0;
}
 
static int mov_read_stsz(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
MOVStreamContext *sc;
unsigned int i, entries, sample_size, field_size, num_bytes;
GetBitContext gb;
unsigned char* buf;
 
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
sc = st->priv_data;
 
avio_r8(pb); /* version */
avio_rb24(pb); /* flags */
 
if (atom.type == MKTAG('s','t','s','z')) {
sample_size = avio_rb32(pb);
if (!sc->sample_size) /* do not overwrite value computed in stsd */
sc->sample_size = sample_size;
sc->stsz_sample_size = sample_size;
field_size = 32;
} else {
sample_size = 0;
avio_rb24(pb); /* reserved */
field_size = avio_r8(pb);
}
entries = avio_rb32(pb);
 
av_dlog(c->fc, "sample_size = %d sample_count = %d\n", sc->sample_size, entries);
 
sc->sample_count = entries;
if (sample_size)
return 0;
 
if (field_size != 4 && field_size != 8 && field_size != 16 && field_size != 32) {
av_log(c->fc, AV_LOG_ERROR, "Invalid sample field size %d\n", field_size);
return AVERROR_INVALIDDATA;
}
 
if (!entries)
return 0;
if (entries >= UINT_MAX / sizeof(int) || entries >= (UINT_MAX - 4) / field_size)
return AVERROR_INVALIDDATA;
sc->sample_sizes = av_malloc(entries * sizeof(int));
if (!sc->sample_sizes)
return AVERROR(ENOMEM);
 
num_bytes = (entries*field_size+4)>>3;
 
buf = av_malloc(num_bytes+FF_INPUT_BUFFER_PADDING_SIZE);
if (!buf) {
av_freep(&sc->sample_sizes);
return AVERROR(ENOMEM);
}
 
if (avio_read(pb, buf, num_bytes) < num_bytes) {
av_freep(&sc->sample_sizes);
av_free(buf);
return AVERROR_INVALIDDATA;
}
 
init_get_bits(&gb, buf, 8*num_bytes);
 
for (i = 0; i < entries && !pb->eof_reached; i++) {
sc->sample_sizes[i] = get_bits_long(&gb, field_size);
sc->data_size += sc->sample_sizes[i];
}
 
sc->sample_count = i;
 
if (pb->eof_reached)
return AVERROR_EOF;
 
av_free(buf);
return 0;
}
 
static int mov_read_stts(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
MOVStreamContext *sc;
unsigned int i, entries;
int64_t duration=0;
int64_t total_sample_count=0;
 
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
sc = st->priv_data;
 
avio_r8(pb); /* version */
avio_rb24(pb); /* flags */
entries = avio_rb32(pb);
 
av_dlog(c->fc, "track[%i].stts.entries = %i\n",
c->fc->nb_streams-1, entries);
 
if (entries >= UINT_MAX / sizeof(*sc->stts_data))
return -1;
 
sc->stts_data = av_malloc(entries * sizeof(*sc->stts_data));
if (!sc->stts_data)
return AVERROR(ENOMEM);
 
for (i = 0; i < entries && !pb->eof_reached; i++) {
int sample_duration;
int sample_count;
 
sample_count=avio_rb32(pb);
sample_duration = avio_rb32(pb);
 
/* sample_duration < 0 is invalid based on the spec */
if (sample_duration < 0) {
av_log(c->fc, AV_LOG_ERROR, "Invalid SampleDelta in STTS %d\n", sample_duration);
sample_duration = 1;
}
if (sample_count < 0) {
av_log(c->fc, AV_LOG_ERROR, "Invalid sample_count=%d\n", sample_count);
return AVERROR_INVALIDDATA;
}
sc->stts_data[i].count= sample_count;
sc->stts_data[i].duration= sample_duration;
 
av_dlog(c->fc, "sample_count=%d, sample_duration=%d\n",
sample_count, sample_duration);
 
duration+=(int64_t)sample_duration*sample_count;
total_sample_count+=sample_count;
}
 
sc->stts_count = i;
 
if (pb->eof_reached)
return AVERROR_EOF;
 
st->nb_frames= total_sample_count;
if (duration)
st->duration= duration;
sc->track_end = duration;
return 0;
}
 
static void mov_update_dts_shift(MOVStreamContext *sc, int duration)
{
if (duration < 0) {
sc->dts_shift = FFMAX(sc->dts_shift, -duration);
}
}
 
static int mov_read_ctts(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
MOVStreamContext *sc;
unsigned int i, entries;
 
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
sc = st->priv_data;
 
avio_r8(pb); /* version */
avio_rb24(pb); /* flags */
entries = avio_rb32(pb);
 
av_dlog(c->fc, "track[%i].ctts.entries = %i\n", c->fc->nb_streams-1, entries);
 
if (!entries)
return 0;
if (entries >= UINT_MAX / sizeof(*sc->ctts_data))
return AVERROR_INVALIDDATA;
sc->ctts_data = av_malloc(entries * sizeof(*sc->ctts_data));
if (!sc->ctts_data)
return AVERROR(ENOMEM);
 
for (i = 0; i < entries && !pb->eof_reached; i++) {
int count =avio_rb32(pb);
int duration =avio_rb32(pb);
 
sc->ctts_data[i].count = count;
sc->ctts_data[i].duration= duration;
 
av_dlog(c->fc, "count=%d, duration=%d\n",
count, duration);
 
if (FFABS(duration) > (1<<28) && i+2<entries) {
av_log(c->fc, AV_LOG_WARNING, "CTTS invalid\n");
av_freep(&sc->ctts_data);
sc->ctts_count = 0;
return 0;
}
 
if (i+2<entries)
mov_update_dts_shift(sc, duration);
}
 
sc->ctts_count = i;
 
if (pb->eof_reached)
return AVERROR_EOF;
 
av_dlog(c->fc, "dts shift %d\n", sc->dts_shift);
 
return 0;
}
 
static int mov_read_sbgp(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
MOVStreamContext *sc;
unsigned int i, entries;
uint8_t version;
uint32_t grouping_type;
 
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
sc = st->priv_data;
 
version = avio_r8(pb); /* version */
avio_rb24(pb); /* flags */
grouping_type = avio_rl32(pb);
if (grouping_type != MKTAG( 'r','a','p',' '))
return 0; /* only support 'rap ' grouping */
if (version == 1)
avio_rb32(pb); /* grouping_type_parameter */
 
entries = avio_rb32(pb);
if (!entries)
return 0;
if (entries >= UINT_MAX / sizeof(*sc->rap_group))
return AVERROR_INVALIDDATA;
sc->rap_group = av_malloc(entries * sizeof(*sc->rap_group));
if (!sc->rap_group)
return AVERROR(ENOMEM);
 
for (i = 0; i < entries && !pb->eof_reached; i++) {
sc->rap_group[i].count = avio_rb32(pb); /* sample_count */
sc->rap_group[i].index = avio_rb32(pb); /* group_description_index */
}
 
sc->rap_group_count = i;
 
return pb->eof_reached ? AVERROR_EOF : 0;
}
 
static void mov_build_index(MOVContext *mov, AVStream *st)
{
MOVStreamContext *sc = st->priv_data;
int64_t current_offset;
int64_t current_dts = 0;
unsigned int stts_index = 0;
unsigned int stsc_index = 0;
unsigned int stss_index = 0;
unsigned int stps_index = 0;
unsigned int i, j;
uint64_t stream_size = 0;
 
/* adjust first dts according to edit list */
if ((sc->empty_duration || sc->start_time) && mov->time_scale > 0) {
if (sc->empty_duration)
sc->empty_duration = av_rescale(sc->empty_duration, sc->time_scale, mov->time_scale);
sc->time_offset = sc->start_time - sc->empty_duration;
current_dts = -sc->time_offset;
if (sc->ctts_count>0 && sc->stts_count>0 &&
sc->ctts_data[0].duration / FFMAX(sc->stts_data[0].duration, 1) > 16) {
/* more than 16 frames delay, dts are likely wrong
this happens with files created by iMovie */
sc->wrong_dts = 1;
st->codec->has_b_frames = 1;
}
}
 
/* only use old uncompressed audio chunk demuxing when stts specifies it */
if (!(st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
sc->stts_count == 1 && sc->stts_data[0].duration == 1)) {
unsigned int current_sample = 0;
unsigned int stts_sample = 0;
unsigned int sample_size;
unsigned int distance = 0;
unsigned int rap_group_index = 0;
unsigned int rap_group_sample = 0;
int rap_group_present = sc->rap_group_count && sc->rap_group;
int key_off = (sc->keyframe_count && sc->keyframes[0] > 0) || (sc->stps_count && sc->stps_data[0] > 0);
 
current_dts -= sc->dts_shift;
 
if (!sc->sample_count || st->nb_index_entries)
return;
if (sc->sample_count >= UINT_MAX / sizeof(*st->index_entries) - st->nb_index_entries)
return;
if (av_reallocp_array(&st->index_entries,
st->nb_index_entries + sc->sample_count,
sizeof(*st->index_entries)) < 0) {
st->nb_index_entries = 0;
return;
}
st->index_entries_allocated_size = (st->nb_index_entries + sc->sample_count) * sizeof(*st->index_entries);
 
for (i = 0; i < sc->chunk_count; i++) {
int64_t next_offset = i+1 < sc->chunk_count ? sc->chunk_offsets[i+1] : INT64_MAX;
current_offset = sc->chunk_offsets[i];
while (stsc_index + 1 < sc->stsc_count &&
i + 1 == sc->stsc_data[stsc_index + 1].first)
stsc_index++;
 
if (next_offset > current_offset && sc->sample_size>0 && sc->sample_size < sc->stsz_sample_size &&
sc->stsc_data[stsc_index].count * (int64_t)sc->stsz_sample_size > next_offset - current_offset) {
av_log(mov->fc, AV_LOG_WARNING, "STSZ sample size %d invalid (too large), ignoring\n", sc->stsz_sample_size);
sc->stsz_sample_size = sc->sample_size;
}
if (sc->stsz_sample_size>0 && sc->stsz_sample_size < sc->sample_size) {
av_log(mov->fc, AV_LOG_WARNING, "STSZ sample size %d invalid (too small), ignoring\n", sc->stsz_sample_size);
sc->stsz_sample_size = sc->sample_size;
}
 
for (j = 0; j < sc->stsc_data[stsc_index].count; j++) {
int keyframe = 0;
if (current_sample >= sc->sample_count) {
av_log(mov->fc, AV_LOG_ERROR, "wrong sample count\n");
return;
}
 
if (!sc->keyframe_absent && (!sc->keyframe_count || current_sample+key_off == sc->keyframes[stss_index])) {
keyframe = 1;
if (stss_index + 1 < sc->keyframe_count)
stss_index++;
} else if (sc->stps_count && current_sample+key_off == sc->stps_data[stps_index]) {
keyframe = 1;
if (stps_index + 1 < sc->stps_count)
stps_index++;
}
if (rap_group_present && rap_group_index < sc->rap_group_count) {
if (sc->rap_group[rap_group_index].index > 0)
keyframe = 1;
if (++rap_group_sample == sc->rap_group[rap_group_index].count) {
rap_group_sample = 0;
rap_group_index++;
}
}
if (keyframe)
distance = 0;
sample_size = sc->stsz_sample_size > 0 ? sc->stsz_sample_size : sc->sample_sizes[current_sample];
if (sc->pseudo_stream_id == -1 ||
sc->stsc_data[stsc_index].id - 1 == sc->pseudo_stream_id) {
AVIndexEntry *e = &st->index_entries[st->nb_index_entries++];
e->pos = current_offset;
e->timestamp = current_dts;
e->size = sample_size;
e->min_distance = distance;
e->flags = keyframe ? AVINDEX_KEYFRAME : 0;
av_dlog(mov->fc, "AVIndex stream %d, sample %d, offset %"PRIx64", dts %"PRId64", "
"size %d, distance %d, keyframe %d\n", st->index, current_sample,
current_offset, current_dts, sample_size, distance, keyframe);
}
 
current_offset += sample_size;
stream_size += sample_size;
current_dts += sc->stts_data[stts_index].duration;
distance++;
stts_sample++;
current_sample++;
if (stts_index + 1 < sc->stts_count && stts_sample == sc->stts_data[stts_index].count) {
stts_sample = 0;
stts_index++;
}
}
}
if (st->duration > 0)
st->codec->bit_rate = stream_size*8*sc->time_scale/st->duration;
} else {
unsigned chunk_samples, total = 0;
 
// compute total chunk count
for (i = 0; i < sc->stsc_count; i++) {
unsigned count, chunk_count;
 
chunk_samples = sc->stsc_data[i].count;
if (i != sc->stsc_count - 1 &&
sc->samples_per_frame && chunk_samples % sc->samples_per_frame) {
av_log(mov->fc, AV_LOG_ERROR, "error unaligned chunk\n");
return;
}
 
if (sc->samples_per_frame >= 160) { // gsm
count = chunk_samples / sc->samples_per_frame;
} else if (sc->samples_per_frame > 1) {
unsigned samples = (1024/sc->samples_per_frame)*sc->samples_per_frame;
count = (chunk_samples+samples-1) / samples;
} else {
count = (chunk_samples+1023) / 1024;
}
 
if (i < sc->stsc_count - 1)
chunk_count = sc->stsc_data[i+1].first - sc->stsc_data[i].first;
else
chunk_count = sc->chunk_count - (sc->stsc_data[i].first - 1);
total += chunk_count * count;
}
 
av_dlog(mov->fc, "chunk count %d\n", total);
if (total >= UINT_MAX / sizeof(*st->index_entries) - st->nb_index_entries)
return;
if (av_reallocp_array(&st->index_entries,
st->nb_index_entries + total,
sizeof(*st->index_entries)) < 0) {
st->nb_index_entries = 0;
return;
}
st->index_entries_allocated_size = (st->nb_index_entries + total) * sizeof(*st->index_entries);
 
// populate index
for (i = 0; i < sc->chunk_count; i++) {
current_offset = sc->chunk_offsets[i];
if (stsc_index + 1 < sc->stsc_count &&
i + 1 == sc->stsc_data[stsc_index + 1].first)
stsc_index++;
chunk_samples = sc->stsc_data[stsc_index].count;
 
while (chunk_samples > 0) {
AVIndexEntry *e;
unsigned size, samples;
 
if (sc->samples_per_frame >= 160) { // gsm
samples = sc->samples_per_frame;
size = sc->bytes_per_frame;
} else {
if (sc->samples_per_frame > 1) {
samples = FFMIN((1024 / sc->samples_per_frame)*
sc->samples_per_frame, chunk_samples);
size = (samples / sc->samples_per_frame) * sc->bytes_per_frame;
} else {
samples = FFMIN(1024, chunk_samples);
size = samples * sc->sample_size;
}
}
 
if (st->nb_index_entries >= total) {
av_log(mov->fc, AV_LOG_ERROR, "wrong chunk count %d\n", total);
return;
}
e = &st->index_entries[st->nb_index_entries++];
e->pos = current_offset;
e->timestamp = current_dts;
e->size = size;
e->min_distance = 0;
e->flags = AVINDEX_KEYFRAME;
av_dlog(mov->fc, "AVIndex stream %d, chunk %d, offset %"PRIx64", dts %"PRId64", "
"size %d, duration %d\n", st->index, i, current_offset, current_dts,
size, samples);
 
current_offset += size;
current_dts += samples;
chunk_samples -= samples;
}
}
}
}
 
static int mov_open_dref(AVIOContext **pb, const char *src, MOVDref *ref,
AVIOInterruptCB *int_cb, int use_absolute_path, AVFormatContext *fc)
{
/* try relative path, we do not try the absolute because it can leak information about our
system to an attacker */
if (ref->nlvl_to > 0 && ref->nlvl_from > 0) {
char filename[1024];
const char *src_path;
int i, l;
 
/* find a source dir */
src_path = strrchr(src, '/');
if (src_path)
src_path++;
else
src_path = src;
 
/* find a next level down to target */
for (i = 0, l = strlen(ref->path) - 1; l >= 0; l--)
if (ref->path[l] == '/') {
if (i == ref->nlvl_to - 1)
break;
else
i++;
}
 
/* compose filename if next level down to target was found */
if (i == ref->nlvl_to - 1 && src_path - src < sizeof(filename)) {
memcpy(filename, src, src_path - src);
filename[src_path - src] = 0;
 
for (i = 1; i < ref->nlvl_from; i++)
av_strlcat(filename, "../", 1024);
 
av_strlcat(filename, ref->path + l + 1, 1024);
 
if (!avio_open2(pb, filename, AVIO_FLAG_READ, int_cb, NULL))
return 0;
}
} else if (use_absolute_path) {
av_log(fc, AV_LOG_WARNING, "Using absolute path on user request, "
"this is a possible security issue\n");
if (!avio_open2(pb, ref->path, AVIO_FLAG_READ, int_cb, NULL))
return 0;
}
 
return AVERROR(ENOENT);
}
 
static void fix_timescale(MOVContext *c, MOVStreamContext *sc)
{
if (sc->time_scale <= 0) {
av_log(c->fc, AV_LOG_WARNING, "stream %d, timescale not set\n", sc->ffindex);
sc->time_scale = c->time_scale;
if (sc->time_scale <= 0)
sc->time_scale = 1;
}
}
 
static int mov_read_trak(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
MOVStreamContext *sc;
int ret;
 
st = avformat_new_stream(c->fc, NULL);
if (!st) return AVERROR(ENOMEM);
st->id = c->fc->nb_streams;
sc = av_mallocz(sizeof(MOVStreamContext));
if (!sc) return AVERROR(ENOMEM);
 
st->priv_data = sc;
st->codec->codec_type = AVMEDIA_TYPE_DATA;
sc->ffindex = st->index;
 
if ((ret = mov_read_default(c, pb, atom)) < 0)
return ret;
 
/* sanity checks */
if (sc->chunk_count && (!sc->stts_count || !sc->stsc_count ||
(!sc->sample_size && !sc->sample_count))) {
av_log(c->fc, AV_LOG_ERROR, "stream %d, missing mandatory atoms, broken header\n",
st->index);
return 0;
}
 
fix_timescale(c, sc);
 
avpriv_set_pts_info(st, 64, 1, sc->time_scale);
 
mov_build_index(c, st);
 
if (sc->dref_id-1 < sc->drefs_count && sc->drefs[sc->dref_id-1].path) {
MOVDref *dref = &sc->drefs[sc->dref_id - 1];
if (mov_open_dref(&sc->pb, c->fc->filename, dref, &c->fc->interrupt_callback,
c->use_absolute_path, c->fc) < 0)
av_log(c->fc, AV_LOG_ERROR,
"stream %d, error opening alias: path='%s', dir='%s', "
"filename='%s', volume='%s', nlvl_from=%d, nlvl_to=%d\n",
st->index, dref->path, dref->dir, dref->filename,
dref->volume, dref->nlvl_from, dref->nlvl_to);
} else {
sc->pb = c->fc->pb;
sc->pb_is_copied = 1;
}
 
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
if (!st->sample_aspect_ratio.num &&
(st->codec->width != sc->width || st->codec->height != sc->height)) {
st->sample_aspect_ratio = av_d2q(((double)st->codec->height * sc->width) /
((double)st->codec->width * sc->height), INT_MAX);
}
 
if (st->duration > 0)
av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
sc->time_scale*st->nb_frames, st->duration, INT_MAX);
 
#if FF_API_R_FRAME_RATE
if (sc->stts_count == 1 || (sc->stts_count == 2 && sc->stts_data[1].count == 1))
av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den,
sc->time_scale, sc->stts_data[0].duration, INT_MAX);
#endif
}
 
// done for ai5q, ai52, ai55, ai1q, ai12 and ai15.
if (!st->codec->extradata_size && st->codec->codec_id == AV_CODEC_ID_H264 &&
st->codec->codec_tag != MKTAG('a', 'v', 'c', '1')) {
ff_generate_avci_extradata(st);
}
 
switch (st->codec->codec_id) {
#if CONFIG_H261_DECODER
case AV_CODEC_ID_H261:
#endif
#if CONFIG_H263_DECODER
case AV_CODEC_ID_H263:
#endif
#if CONFIG_MPEG4_DECODER
case AV_CODEC_ID_MPEG4:
#endif
st->codec->width = 0; /* let decoder init width/height */
st->codec->height= 0;
break;
}
 
/* Do not need those anymore. */
av_freep(&sc->chunk_offsets);
av_freep(&sc->stsc_data);
av_freep(&sc->sample_sizes);
av_freep(&sc->keyframes);
av_freep(&sc->stts_data);
av_freep(&sc->stps_data);
av_freep(&sc->rap_group);
 
return 0;
}
 
static int mov_read_ilst(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
int ret;
c->itunes_metadata = 1;
ret = mov_read_default(c, pb, atom);
c->itunes_metadata = 0;
return ret;
}
 
static int mov_read_meta(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
while (atom.size > 8) {
uint32_t tag = avio_rl32(pb);
atom.size -= 4;
if (tag == MKTAG('h','d','l','r')) {
avio_seek(pb, -8, SEEK_CUR);
atom.size += 8;
return mov_read_default(c, pb, atom);
}
}
return 0;
}
 
static int mov_read_tkhd(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
int i;
int width;
int height;
int64_t disp_transform[2];
int display_matrix[3][2];
AVStream *st;
MOVStreamContext *sc;
int version;
int flags;
 
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
sc = st->priv_data;
 
version = avio_r8(pb);
flags = avio_rb24(pb);
st->disposition |= (flags & MOV_TKHD_FLAG_ENABLED) ? AV_DISPOSITION_DEFAULT : 0;
 
if (version == 1) {
avio_rb64(pb);
avio_rb64(pb);
} else {
avio_rb32(pb); /* creation time */
avio_rb32(pb); /* modification time */
}
st->id = (int)avio_rb32(pb); /* track id (NOT 0 !)*/
avio_rb32(pb); /* reserved */
 
/* highlevel (considering edits) duration in movie timebase */
(version == 1) ? avio_rb64(pb) : avio_rb32(pb);
avio_rb32(pb); /* reserved */
avio_rb32(pb); /* reserved */
 
avio_rb16(pb); /* layer */
avio_rb16(pb); /* alternate group */
avio_rb16(pb); /* volume */
avio_rb16(pb); /* reserved */
 
//read in the display matrix (outlined in ISO 14496-12, Section 6.2.2)
// they're kept in fixed point format through all calculations
// ignore u,v,z b/c we don't need the scale factor to calc aspect ratio
for (i = 0; i < 3; i++) {
display_matrix[i][0] = avio_rb32(pb); // 16.16 fixed point
display_matrix[i][1] = avio_rb32(pb); // 16.16 fixed point
avio_rb32(pb); // 2.30 fixed point (not used)
}
 
width = avio_rb32(pb); // 16.16 fixed point track width
height = avio_rb32(pb); // 16.16 fixed point track height
sc->width = width >> 16;
sc->height = height >> 16;
 
//Assign clockwise rotate values based on transform matrix so that
//we can compensate for iPhone orientation during capture.
 
if (display_matrix[1][0] == -65536 && display_matrix[0][1] == 65536) {
av_dict_set(&st->metadata, "rotate", "90", 0);
}
 
if (display_matrix[0][0] == -65536 && display_matrix[1][1] == -65536) {
av_dict_set(&st->metadata, "rotate", "180", 0);
}
 
if (display_matrix[1][0] == 65536 && display_matrix[0][1] == -65536) {
av_dict_set(&st->metadata, "rotate", "270", 0);
}
 
// transform the display width/height according to the matrix
// skip this if the display matrix is the default identity matrix
// or if it is rotating the picture, ex iPhone 3GS
// to keep the same scale, use [width height 1<<16]
if (width && height &&
((display_matrix[0][0] != 65536 ||
display_matrix[1][1] != 65536) &&
!display_matrix[0][1] &&
!display_matrix[1][0] &&
!display_matrix[2][0] && !display_matrix[2][1])) {
for (i = 0; i < 2; i++)
disp_transform[i] =
(int64_t) width * display_matrix[0][i] +
(int64_t) height * display_matrix[1][i] +
((int64_t) display_matrix[2][i] << 16);
 
//sample aspect ratio is new width/height divided by old width/height
st->sample_aspect_ratio = av_d2q(
((double) disp_transform[0] * height) /
((double) disp_transform[1] * width), INT_MAX);
}
return 0;
}
 
static int mov_read_tfhd(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
MOVFragment *frag = &c->fragment;
MOVTrackExt *trex = NULL;
int flags, track_id, i;
 
avio_r8(pb); /* version */
flags = avio_rb24(pb);
 
track_id = avio_rb32(pb);
if (!track_id)
return AVERROR_INVALIDDATA;
frag->track_id = track_id;
for (i = 0; i < c->trex_count; i++)
if (c->trex_data[i].track_id == frag->track_id) {
trex = &c->trex_data[i];
break;
}
if (!trex) {
av_log(c->fc, AV_LOG_ERROR, "could not find corresponding trex\n");
return AVERROR_INVALIDDATA;
}
 
frag->base_data_offset = flags & MOV_TFHD_BASE_DATA_OFFSET ?
avio_rb64(pb) : frag->moof_offset;
frag->stsd_id = flags & MOV_TFHD_STSD_ID ? avio_rb32(pb) : trex->stsd_id;
 
frag->duration = flags & MOV_TFHD_DEFAULT_DURATION ?
avio_rb32(pb) : trex->duration;
frag->size = flags & MOV_TFHD_DEFAULT_SIZE ?
avio_rb32(pb) : trex->size;
frag->flags = flags & MOV_TFHD_DEFAULT_FLAGS ?
avio_rb32(pb) : trex->flags;
av_dlog(c->fc, "frag flags 0x%x\n", frag->flags);
return 0;
}
 
static int mov_read_chap(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
c->chapter_track = avio_rb32(pb);
return 0;
}
 
static int mov_read_trex(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
MOVTrackExt *trex;
int err;
 
if ((uint64_t)c->trex_count+1 >= UINT_MAX / sizeof(*c->trex_data))
return AVERROR_INVALIDDATA;
if ((err = av_reallocp_array(&c->trex_data, c->trex_count + 1,
sizeof(*c->trex_data))) < 0) {
c->trex_count = 0;
return err;
}
 
c->fc->duration = AV_NOPTS_VALUE; // the duration from mvhd is not representing the whole file when fragments are used.
 
trex = &c->trex_data[c->trex_count++];
avio_r8(pb); /* version */
avio_rb24(pb); /* flags */
trex->track_id = avio_rb32(pb);
trex->stsd_id = avio_rb32(pb);
trex->duration = avio_rb32(pb);
trex->size = avio_rb32(pb);
trex->flags = avio_rb32(pb);
return 0;
}
 
static int mov_read_trun(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
MOVFragment *frag = &c->fragment;
AVStream *st = NULL;
MOVStreamContext *sc;
MOVStts *ctts_data;
uint64_t offset;
int64_t dts;
int data_offset = 0;
unsigned entries, first_sample_flags = frag->flags;
int flags, distance, i, found_keyframe = 0, err;
 
for (i = 0; i < c->fc->nb_streams; i++) {
if (c->fc->streams[i]->id == frag->track_id) {
st = c->fc->streams[i];
break;
}
}
if (!st) {
av_log(c->fc, AV_LOG_ERROR, "could not find corresponding track id %d\n", frag->track_id);
return AVERROR_INVALIDDATA;
}
sc = st->priv_data;
if (sc->pseudo_stream_id+1 != frag->stsd_id && sc->pseudo_stream_id != -1)
return 0;
avio_r8(pb); /* version */
flags = avio_rb24(pb);
entries = avio_rb32(pb);
av_dlog(c->fc, "flags 0x%x entries %d\n", flags, entries);
 
/* Always assume the presence of composition time offsets.
* Without this assumption, for instance, we cannot deal with a track in fragmented movies that meet the following.
* 1) in the initial movie, there are no samples.
* 2) in the first movie fragment, there is only one sample without composition time offset.
* 3) in the subsequent movie fragments, there are samples with composition time offset. */
if (!sc->ctts_count && sc->sample_count)
{
/* Complement ctts table if moov atom doesn't have ctts atom. */
ctts_data = av_realloc(NULL, sizeof(*sc->ctts_data));
if (!ctts_data)
return AVERROR(ENOMEM);
sc->ctts_data = ctts_data;
sc->ctts_data[sc->ctts_count].count = sc->sample_count;
sc->ctts_data[sc->ctts_count].duration = 0;
sc->ctts_count++;
}
if ((uint64_t)entries+sc->ctts_count >= UINT_MAX/sizeof(*sc->ctts_data))
return AVERROR_INVALIDDATA;
if ((err = av_reallocp_array(&sc->ctts_data, entries + sc->ctts_count,
sizeof(*sc->ctts_data))) < 0) {
sc->ctts_count = 0;
return err;
}
if (flags & MOV_TRUN_DATA_OFFSET) data_offset = avio_rb32(pb);
if (flags & MOV_TRUN_FIRST_SAMPLE_FLAGS) first_sample_flags = avio_rb32(pb);
dts = sc->track_end - sc->time_offset;
offset = frag->base_data_offset + data_offset;
distance = 0;
av_dlog(c->fc, "first sample flags 0x%x\n", first_sample_flags);
for (i = 0; i < entries && !pb->eof_reached; i++) {
unsigned sample_size = frag->size;
int sample_flags = i ? frag->flags : first_sample_flags;
unsigned sample_duration = frag->duration;
int keyframe = 0;
 
if (flags & MOV_TRUN_SAMPLE_DURATION) sample_duration = avio_rb32(pb);
if (flags & MOV_TRUN_SAMPLE_SIZE) sample_size = avio_rb32(pb);
if (flags & MOV_TRUN_SAMPLE_FLAGS) sample_flags = avio_rb32(pb);
sc->ctts_data[sc->ctts_count].count = 1;
sc->ctts_data[sc->ctts_count].duration = (flags & MOV_TRUN_SAMPLE_CTS) ?
avio_rb32(pb) : 0;
mov_update_dts_shift(sc, sc->ctts_data[sc->ctts_count].duration);
sc->ctts_count++;
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
keyframe = 1;
else if (!found_keyframe)
keyframe = found_keyframe =
!(sample_flags & (MOV_FRAG_SAMPLE_FLAG_IS_NON_SYNC |
MOV_FRAG_SAMPLE_FLAG_DEPENDS_YES));
if (keyframe)
distance = 0;
av_add_index_entry(st, offset, dts, sample_size, distance,
keyframe ? AVINDEX_KEYFRAME : 0);
av_dlog(c->fc, "AVIndex stream %d, sample %d, offset %"PRIx64", dts %"PRId64", "
"size %d, distance %d, keyframe %d\n", st->index, sc->sample_count+i,
offset, dts, sample_size, distance, keyframe);
distance++;
dts += sample_duration;
offset += sample_size;
sc->data_size += sample_size;
}
 
if (pb->eof_reached)
return AVERROR_EOF;
 
frag->moof_offset = offset;
st->duration = sc->track_end = dts + sc->time_offset;
return 0;
}
 
/* this atom should be null (from specs), but some buggy files put the 'moov' atom inside it... */
/* like the files created with Adobe Premiere 5.0, for samples see */
/* http://graphics.tudelft.nl/~wouter/publications/soundtests/ */
static int mov_read_wide(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
int err;
 
if (atom.size < 8)
return 0; /* continue */
if (avio_rb32(pb) != 0) { /* 0 sized mdat atom... use the 'wide' atom size */
avio_skip(pb, atom.size - 4);
return 0;
}
atom.type = avio_rl32(pb);
atom.size -= 8;
if (atom.type != MKTAG('m','d','a','t')) {
avio_skip(pb, atom.size);
return 0;
}
err = mov_read_mdat(c, pb, atom);
return err;
}
 
static int mov_read_cmov(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
#if CONFIG_ZLIB
AVIOContext ctx;
uint8_t *cmov_data;
uint8_t *moov_data; /* uncompressed data */
long cmov_len, moov_len;
int ret = -1;
 
avio_rb32(pb); /* dcom atom */
if (avio_rl32(pb) != MKTAG('d','c','o','m'))
return AVERROR_INVALIDDATA;
if (avio_rl32(pb) != MKTAG('z','l','i','b')) {
av_log(c->fc, AV_LOG_ERROR, "unknown compression for cmov atom !\n");
return AVERROR_INVALIDDATA;
}
avio_rb32(pb); /* cmvd atom */
if (avio_rl32(pb) != MKTAG('c','m','v','d'))
return AVERROR_INVALIDDATA;
moov_len = avio_rb32(pb); /* uncompressed size */
cmov_len = atom.size - 6 * 4;
 
cmov_data = av_malloc(cmov_len);
if (!cmov_data)
return AVERROR(ENOMEM);
moov_data = av_malloc(moov_len);
if (!moov_data) {
av_free(cmov_data);
return AVERROR(ENOMEM);
}
avio_read(pb, cmov_data, cmov_len);
if (uncompress (moov_data, (uLongf *) &moov_len, (const Bytef *)cmov_data, cmov_len) != Z_OK)
goto free_and_return;
if (ffio_init_context(&ctx, moov_data, moov_len, 0, NULL, NULL, NULL, NULL) != 0)
goto free_and_return;
atom.type = MKTAG('m','o','o','v');
atom.size = moov_len;
ret = mov_read_default(c, &ctx, atom);
free_and_return:
av_free(moov_data);
av_free(cmov_data);
return ret;
#else
av_log(c->fc, AV_LOG_ERROR, "this file requires zlib support compiled in\n");
return AVERROR(ENOSYS);
#endif
}
 
/* edit list atom */
static int mov_read_elst(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
MOVStreamContext *sc;
int i, edit_count, version, edit_start_index = 0;
int unsupported = 0;
 
if (c->fc->nb_streams < 1 || c->ignore_editlist)
return 0;
sc = c->fc->streams[c->fc->nb_streams-1]->priv_data;
 
version = avio_r8(pb); /* version */
avio_rb24(pb); /* flags */
edit_count = avio_rb32(pb); /* entries */
 
if ((uint64_t)edit_count*12+8 > atom.size)
return AVERROR_INVALIDDATA;
 
av_dlog(c->fc, "track[%i].edit_count = %i\n", c->fc->nb_streams-1, edit_count);
for (i=0; i<edit_count; i++){
int64_t time;
int64_t duration;
int rate;
if (version == 1) {
duration = avio_rb64(pb);
time = avio_rb64(pb);
} else {
duration = avio_rb32(pb); /* segment duration */
time = (int32_t)avio_rb32(pb); /* media time */
}
rate = avio_rb32(pb);
if (i == 0 && time == -1) {
sc->empty_duration = duration;
edit_start_index = 1;
} else if (i == edit_start_index && time >= 0)
sc->start_time = time;
else
unsupported = 1;
 
av_dlog(c->fc, "duration=%"PRId64" time=%"PRId64" rate=%f\n",
duration, time, rate / 65536.0);
}
 
if (unsupported)
av_log(c->fc, AV_LOG_WARNING, "multiple edit list entries, "
"a/v desync might occur, patch welcome\n");
 
return 0;
}
 
static int mov_read_tmcd(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
MOVStreamContext *sc;
 
if (c->fc->nb_streams < 1)
return AVERROR_INVALIDDATA;
sc = c->fc->streams[c->fc->nb_streams - 1]->priv_data;
sc->timecode_track = avio_rb32(pb);
return 0;
}
 
static int mov_read_uuid(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
int ret;
uint8_t uuid[16];
static const uint8_t uuid_isml_manifest[] = {
0xa5, 0xd4, 0x0b, 0x30, 0xe8, 0x14, 0x11, 0xdd,
0xba, 0x2f, 0x08, 0x00, 0x20, 0x0c, 0x9a, 0x66
};
 
if (atom.size < sizeof(uuid) || atom.size == INT64_MAX)
return AVERROR_INVALIDDATA;
 
ret = avio_read(pb, uuid, sizeof(uuid));
if (ret < 0) {
return ret;
} else if (ret != sizeof(uuid)) {
return AVERROR_INVALIDDATA;
}
if (!memcmp(uuid, uuid_isml_manifest, sizeof(uuid))) {
uint8_t *buffer, *ptr;
char *endptr;
size_t len = atom.size - sizeof(uuid);
 
if (len < 4) {
return AVERROR_INVALIDDATA;
}
ret = avio_skip(pb, 4); // zeroes
len -= 4;
 
buffer = av_mallocz(len + 1);
if (!buffer) {
return AVERROR(ENOMEM);
}
ret = avio_read(pb, buffer, len);
if (ret < 0) {
av_free(buffer);
return ret;
} else if (ret != len) {
av_free(buffer);
return AVERROR_INVALIDDATA;
}
 
ptr = buffer;
while ((ptr = av_stristr(ptr, "systemBitrate=\"")) != NULL) {
ptr += sizeof("systemBitrate=\"") - 1;
c->bitrates_count++;
c->bitrates = av_realloc_f(c->bitrates, c->bitrates_count, sizeof(*c->bitrates));
if (!c->bitrates) {
c->bitrates_count = 0;
av_free(buffer);
return AVERROR(ENOMEM);
}
errno = 0;
ret = strtol(ptr, &endptr, 10);
if (ret < 0 || errno || *endptr != '"') {
c->bitrates[c->bitrates_count - 1] = 0;
} else {
c->bitrates[c->bitrates_count - 1] = ret;
}
}
 
av_free(buffer);
}
return 0;
}
 
static const MOVParseTableEntry mov_default_parse_table[] = {
{ MKTAG('A','C','L','R'), mov_read_avid },
{ MKTAG('A','P','R','G'), mov_read_avid },
{ MKTAG('A','A','L','P'), mov_read_avid },
{ MKTAG('A','R','E','S'), mov_read_ares },
{ MKTAG('a','v','s','s'), mov_read_avss },
{ MKTAG('c','h','p','l'), mov_read_chpl },
{ MKTAG('c','o','6','4'), mov_read_stco },
{ MKTAG('c','t','t','s'), mov_read_ctts }, /* composition time to sample */
{ MKTAG('d','i','n','f'), mov_read_default },
{ MKTAG('d','r','e','f'), mov_read_dref },
{ MKTAG('e','d','t','s'), mov_read_default },
{ MKTAG('e','l','s','t'), mov_read_elst },
{ MKTAG('e','n','d','a'), mov_read_enda },
{ MKTAG('f','i','e','l'), mov_read_fiel },
{ MKTAG('f','t','y','p'), mov_read_ftyp },
{ MKTAG('g','l','b','l'), mov_read_glbl },
{ MKTAG('h','d','l','r'), mov_read_hdlr },
{ MKTAG('i','l','s','t'), mov_read_ilst },
{ MKTAG('j','p','2','h'), mov_read_jp2h },
{ MKTAG('m','d','a','t'), mov_read_mdat },
{ MKTAG('m','d','h','d'), mov_read_mdhd },
{ MKTAG('m','d','i','a'), mov_read_default },
{ MKTAG('m','e','t','a'), mov_read_meta },
{ MKTAG('m','i','n','f'), mov_read_default },
{ MKTAG('m','o','o','f'), mov_read_moof },
{ MKTAG('m','o','o','v'), mov_read_moov },
{ MKTAG('m','v','e','x'), mov_read_default },
{ MKTAG('m','v','h','d'), mov_read_mvhd },
{ MKTAG('S','M','I',' '), mov_read_svq3 },
{ MKTAG('a','l','a','c'), mov_read_alac }, /* alac specific atom */
{ MKTAG('a','v','c','C'), mov_read_glbl },
{ MKTAG('p','a','s','p'), mov_read_pasp },
{ MKTAG('s','t','b','l'), mov_read_default },
{ MKTAG('s','t','c','o'), mov_read_stco },
{ MKTAG('s','t','p','s'), mov_read_stps },
{ MKTAG('s','t','r','f'), mov_read_strf },
{ MKTAG('s','t','s','c'), mov_read_stsc },
{ MKTAG('s','t','s','d'), mov_read_stsd }, /* sample description */
{ MKTAG('s','t','s','s'), mov_read_stss }, /* sync sample */
{ MKTAG('s','t','s','z'), mov_read_stsz }, /* sample size */
{ MKTAG('s','t','t','s'), mov_read_stts },
{ MKTAG('s','t','z','2'), mov_read_stsz }, /* compact sample size */
{ MKTAG('t','k','h','d'), mov_read_tkhd }, /* track header */
{ MKTAG('t','f','h','d'), mov_read_tfhd }, /* track fragment header */
{ MKTAG('t','r','a','k'), mov_read_trak },
{ MKTAG('t','r','a','f'), mov_read_default },
{ MKTAG('t','r','e','f'), mov_read_default },
{ MKTAG('t','m','c','d'), mov_read_tmcd },
{ MKTAG('c','h','a','p'), mov_read_chap },
{ MKTAG('t','r','e','x'), mov_read_trex },
{ MKTAG('t','r','u','n'), mov_read_trun },
{ MKTAG('u','d','t','a'), mov_read_default },
{ MKTAG('w','a','v','e'), mov_read_wave },
{ MKTAG('e','s','d','s'), mov_read_esds },
{ MKTAG('d','a','c','3'), mov_read_dac3 }, /* AC-3 info */
{ MKTAG('d','e','c','3'), mov_read_dec3 }, /* EAC-3 info */
{ MKTAG('w','i','d','e'), mov_read_wide }, /* place holder */
{ MKTAG('w','f','e','x'), mov_read_wfex },
{ MKTAG('c','m','o','v'), mov_read_cmov },
{ MKTAG('c','h','a','n'), mov_read_chan }, /* channel layout */
{ MKTAG('d','v','c','1'), mov_read_dvc1 },
{ MKTAG('s','b','g','p'), mov_read_sbgp },
{ MKTAG('h','v','c','C'), mov_read_glbl },
{ MKTAG('u','u','i','d'), mov_read_uuid },
{ MKTAG('C','i','n', 0x8e), mov_read_targa_y216 },
{ 0, NULL }
};
 
static int mov_read_default(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
int64_t total_size = 0;
MOVAtom a;
int i;
 
if (atom.size < 0)
atom.size = INT64_MAX;
while (total_size + 8 <= atom.size && !url_feof(pb)) {
int (*parse)(MOVContext*, AVIOContext*, MOVAtom) = NULL;
a.size = atom.size;
a.type=0;
if (atom.size >= 8) {
a.size = avio_rb32(pb);
a.type = avio_rl32(pb);
if (atom.type != MKTAG('r','o','o','t') &&
atom.type != MKTAG('m','o','o','v'))
{
if (a.type == MKTAG('t','r','a','k') || a.type == MKTAG('m','d','a','t'))
{
av_log(c->fc, AV_LOG_ERROR, "Broken file, trak/mdat not at top-level\n");
avio_skip(pb, -8);
return 0;
}
}
total_size += 8;
if (a.size == 1) { /* 64 bit extended size */
a.size = avio_rb64(pb) - 8;
total_size += 8;
}
}
av_dlog(c->fc, "type: %08x '%.4s' parent:'%.4s' sz: %"PRId64" %"PRId64" %"PRId64"\n",
a.type, (char*)&a.type, (char*)&atom.type, a.size, total_size, atom.size);
if (a.size == 0) {
a.size = atom.size - total_size + 8;
}
a.size -= 8;
if (a.size < 0)
break;
a.size = FFMIN(a.size, atom.size - total_size);
 
for (i = 0; mov_default_parse_table[i].type; i++)
if (mov_default_parse_table[i].type == a.type) {
parse = mov_default_parse_table[i].parse;
break;
}
 
// container is user data
if (!parse && (atom.type == MKTAG('u','d','t','a') ||
atom.type == MKTAG('i','l','s','t')))
parse = mov_read_udta_string;
 
if (!parse) { /* skip leaf atoms data */
avio_skip(pb, a.size);
} else {
int64_t start_pos = avio_tell(pb);
int64_t left;
int err = parse(c, pb, a);
if (err < 0)
return err;
if (c->found_moov && c->found_mdat &&
((!pb->seekable || c->fc->flags & AVFMT_FLAG_IGNIDX) ||
start_pos + a.size == avio_size(pb))) {
if (!pb->seekable || c->fc->flags & AVFMT_FLAG_IGNIDX)
c->next_root_atom = start_pos + a.size;
return 0;
}
left = a.size - avio_tell(pb) + start_pos;
if (left > 0) /* skip garbage at atom end */
avio_skip(pb, left);
else if (left < 0) {
av_log(c->fc, AV_LOG_WARNING,
"overread end of atom '%.4s' by %"PRId64" bytes\n",
(char*)&a.type, -left);
avio_seek(pb, left, SEEK_CUR);
}
}
 
total_size += a.size;
}
 
if (total_size < atom.size && atom.size < 0x7ffff)
avio_skip(pb, atom.size - total_size);
 
return 0;
}
 
static int mov_probe(AVProbeData *p)
{
int64_t offset;
uint32_t tag;
int score = 0;
int moov_offset = -1;
 
/* check file header */
offset = 0;
for (;;) {
/* ignore invalid offset */
if ((offset + 8) > (unsigned int)p->buf_size)
break;
tag = AV_RL32(p->buf + offset + 4);
switch(tag) {
/* check for obvious tags */
case MKTAG('m','o','o','v'):
moov_offset = offset + 4;
case MKTAG('j','P',' ',' '): /* jpeg 2000 signature */
case MKTAG('m','d','a','t'):
case MKTAG('p','n','o','t'): /* detect movs with preview pics like ew.mov and april.mov */
case MKTAG('u','d','t','a'): /* Packet Video PVAuthor adds this and a lot of more junk */
case MKTAG('f','t','y','p'):
if (AV_RB32(p->buf+offset) < 8 &&
(AV_RB32(p->buf+offset) != 1 ||
offset + 12 > (unsigned int)p->buf_size ||
AV_RB64(p->buf+offset + 8) == 0)) {
score = FFMAX(score, AVPROBE_SCORE_EXTENSION);
} else {
score = AVPROBE_SCORE_MAX;
}
offset = FFMAX(4, AV_RB32(p->buf+offset)) + offset;
break;
/* those are more common words, so rate then a bit less */
case MKTAG('e','d','i','w'): /* xdcam files have reverted first tags */
case MKTAG('w','i','d','e'):
case MKTAG('f','r','e','e'):
case MKTAG('j','u','n','k'):
case MKTAG('p','i','c','t'):
score = FFMAX(score, AVPROBE_SCORE_MAX - 5);
offset = FFMAX(4, AV_RB32(p->buf+offset)) + offset;
break;
case MKTAG(0x82,0x82,0x7f,0x7d):
case MKTAG('s','k','i','p'):
case MKTAG('u','u','i','d'):
case MKTAG('p','r','f','l'):
/* if we only find those cause probedata is too small at least rate them */
score = FFMAX(score, AVPROBE_SCORE_EXTENSION);
offset = FFMAX(4, AV_RB32(p->buf+offset)) + offset;
break;
default:
offset = FFMAX(4, AV_RB32(p->buf+offset)) + offset;
}
}
if(score > AVPROBE_SCORE_MAX - 50 && moov_offset != -1) {
/* moov atom in the header - we should make sure that this is not a
* MOV-packed MPEG-PS */
offset = moov_offset;
 
while(offset < (p->buf_size - 16)){ /* Sufficient space */
/* We found an actual hdlr atom */
if(AV_RL32(p->buf + offset ) == MKTAG('h','d','l','r') &&
AV_RL32(p->buf + offset + 8) == MKTAG('m','h','l','r') &&
AV_RL32(p->buf + offset + 12) == MKTAG('M','P','E','G')){
av_log(NULL, AV_LOG_WARNING, "Found media data tag MPEG indicating this is a MOV-packed MPEG-PS.\n");
/* We found a media handler reference atom describing an
* MPEG-PS-in-MOV, return a
* low score to force expanding the probe window until
* mpegps_probe finds what it needs */
return 5;
}else
/* Keep looking */
offset+=2;
}
}
 
return score;
}
 
// must be done after parsing all trak because there's no order requirement
static void mov_read_chapters(AVFormatContext *s)
{
MOVContext *mov = s->priv_data;
AVStream *st = NULL;
MOVStreamContext *sc;
int64_t cur_pos;
int i;
 
for (i = 0; i < s->nb_streams; i++)
if (s->streams[i]->id == mov->chapter_track) {
st = s->streams[i];
break;
}
if (!st) {
av_log(s, AV_LOG_ERROR, "Referenced QT chapter track not found\n");
return;
}
 
st->discard = AVDISCARD_ALL;
sc = st->priv_data;
cur_pos = avio_tell(sc->pb);
 
for (i = 0; i < st->nb_index_entries; i++) {
AVIndexEntry *sample = &st->index_entries[i];
int64_t end = i+1 < st->nb_index_entries ? st->index_entries[i+1].timestamp : st->duration;
uint8_t *title;
uint16_t ch;
int len, title_len;
 
if (avio_seek(sc->pb, sample->pos, SEEK_SET) != sample->pos) {
av_log(s, AV_LOG_ERROR, "Chapter %d not found in file\n", i);
goto finish;
}
 
// the first two bytes are the length of the title
len = avio_rb16(sc->pb);
if (len > sample->size-2)
continue;
title_len = 2*len + 1;
if (!(title = av_mallocz(title_len)))
goto finish;
 
// The samples could theoretically be in any encoding if there's an encd
// atom following, but in practice are only utf-8 or utf-16, distinguished
// instead by the presence of a BOM
if (!len) {
title[0] = 0;
} else {
ch = avio_rb16(sc->pb);
if (ch == 0xfeff)
avio_get_str16be(sc->pb, len, title, title_len);
else if (ch == 0xfffe)
avio_get_str16le(sc->pb, len, title, title_len);
else {
AV_WB16(title, ch);
if (len == 1 || len == 2)
title[len] = 0;
else
avio_get_str(sc->pb, INT_MAX, title + 2, len - 1);
}
}
 
avpriv_new_chapter(s, i, st->time_base, sample->timestamp, end, title);
av_freep(&title);
}
finish:
avio_seek(sc->pb, cur_pos, SEEK_SET);
}
 
static int parse_timecode_in_framenum_format(AVFormatContext *s, AVStream *st,
uint32_t value, int flags)
{
AVTimecode tc;
char buf[AV_TIMECODE_STR_SIZE];
AVRational rate = {st->codec->time_base.den,
st->codec->time_base.num};
int ret = av_timecode_init(&tc, rate, flags, 0, s);
if (ret < 0)
return ret;
av_dict_set(&st->metadata, "timecode",
av_timecode_make_string(&tc, buf, value), 0);
return 0;
}
 
static int mov_read_timecode_track(AVFormatContext *s, AVStream *st)
{
MOVStreamContext *sc = st->priv_data;
int flags = 0;
int64_t cur_pos = avio_tell(sc->pb);
uint32_t value;
 
if (!st->nb_index_entries)
return -1;
 
avio_seek(sc->pb, st->index_entries->pos, SEEK_SET);
value = avio_rb32(s->pb);
 
if (sc->tmcd_flags & 0x0001) flags |= AV_TIMECODE_FLAG_DROPFRAME;
if (sc->tmcd_flags & 0x0002) flags |= AV_TIMECODE_FLAG_24HOURSMAX;
if (sc->tmcd_flags & 0x0004) flags |= AV_TIMECODE_FLAG_ALLOWNEGATIVE;
 
/* Assume Counter flag is set to 1 in tmcd track (even though it is likely
* not the case) and thus assume "frame number format" instead of QT one.
* No sample with tmcd track can be found with a QT timecode at the moment,
* despite what the tmcd track "suggests" (Counter flag set to 0 means QT
* format). */
parse_timecode_in_framenum_format(s, st, value, flags);
 
avio_seek(sc->pb, cur_pos, SEEK_SET);
return 0;
}
 
static int mov_read_close(AVFormatContext *s)
{
MOVContext *mov = s->priv_data;
int i, j;
 
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
MOVStreamContext *sc = st->priv_data;
 
av_freep(&sc->ctts_data);
for (j = 0; j < sc->drefs_count; j++) {
av_freep(&sc->drefs[j].path);
av_freep(&sc->drefs[j].dir);
}
av_freep(&sc->drefs);
if (!sc->pb_is_copied)
avio_close(sc->pb);
sc->pb = NULL;
av_freep(&sc->chunk_offsets);
av_freep(&sc->keyframes);
av_freep(&sc->sample_sizes);
av_freep(&sc->stps_data);
av_freep(&sc->stsc_data);
av_freep(&sc->stts_data);
}
 
if (mov->dv_demux) {
for (i = 0; i < mov->dv_fctx->nb_streams; i++) {
av_freep(&mov->dv_fctx->streams[i]->codec);
av_freep(&mov->dv_fctx->streams[i]);
}
av_freep(&mov->dv_fctx);
av_freep(&mov->dv_demux);
}
 
av_freep(&mov->trex_data);
av_freep(&mov->bitrates);
 
return 0;
}
 
static int tmcd_is_referenced(AVFormatContext *s, int tmcd_id)
{
int i;
 
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
MOVStreamContext *sc = st->priv_data;
 
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
sc->timecode_track == tmcd_id)
return 1;
}
return 0;
}
 
/* look for a tmcd track not referenced by any video track, and export it globally */
static void export_orphan_timecode(AVFormatContext *s)
{
int i;
 
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
 
if (st->codec->codec_tag == MKTAG('t','m','c','d') &&
!tmcd_is_referenced(s, i + 1)) {
AVDictionaryEntry *tcr = av_dict_get(st->metadata, "timecode", NULL, 0);
if (tcr) {
av_dict_set(&s->metadata, "timecode", tcr->value, 0);
break;
}
}
}
}
 
static int mov_read_header(AVFormatContext *s)
{
MOVContext *mov = s->priv_data;
AVIOContext *pb = s->pb;
int i, j, err;
MOVAtom atom = { AV_RL32("root") };
 
mov->fc = s;
/* .mov and .mp4 aren't streamable anyway (only progressive download if moov is before mdat) */
if (pb->seekable)
atom.size = avio_size(pb);
else
atom.size = INT64_MAX;
 
/* check MOV header */
if ((err = mov_read_default(mov, pb, atom)) < 0) {
av_log(s, AV_LOG_ERROR, "error reading header: %d\n", err);
mov_read_close(s);
return err;
}
if (!mov->found_moov) {
av_log(s, AV_LOG_ERROR, "moov atom not found\n");
mov_read_close(s);
return AVERROR_INVALIDDATA;
}
av_dlog(mov->fc, "on_parse_exit_offset=%"PRId64"\n", avio_tell(pb));
 
if (pb->seekable) {
if (mov->chapter_track > 0)
mov_read_chapters(s);
for (i = 0; i < s->nb_streams; i++)
if (s->streams[i]->codec->codec_tag == AV_RL32("tmcd"))
mov_read_timecode_track(s, s->streams[i]);
}
 
/* copy timecode metadata from tmcd tracks to the related video streams */
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
MOVStreamContext *sc = st->priv_data;
if (sc->timecode_track > 0) {
AVDictionaryEntry *tcr;
int tmcd_st_id = -1;
 
for (j = 0; j < s->nb_streams; j++)
if (s->streams[j]->id == sc->timecode_track)
tmcd_st_id = j;
 
if (tmcd_st_id < 0 || tmcd_st_id == i)
continue;
tcr = av_dict_get(s->streams[tmcd_st_id]->metadata, "timecode", NULL, 0);
if (tcr)
av_dict_set(&st->metadata, "timecode", tcr->value, 0);
}
}
export_orphan_timecode(s);
 
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
MOVStreamContext *sc = st->priv_data;
fix_timescale(mov, sc);
if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && st->codec->codec_id == AV_CODEC_ID_AAC) {
st->skip_samples = sc->start_pad;
}
}
 
if (mov->trex_data) {
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
MOVStreamContext *sc = st->priv_data;
if (st->duration > 0)
st->codec->bit_rate = sc->data_size * 8 * sc->time_scale / st->duration;
}
}
 
for (i = 0; i < mov->bitrates_count && i < s->nb_streams; i++) {
if (mov->bitrates[i]) {
s->streams[i]->codec->bit_rate = mov->bitrates[i];
}
}
 
return 0;
}
 
static AVIndexEntry *mov_find_next_sample(AVFormatContext *s, AVStream **st)
{
AVIndexEntry *sample = NULL;
int64_t best_dts = INT64_MAX;
int i;
for (i = 0; i < s->nb_streams; i++) {
AVStream *avst = s->streams[i];
MOVStreamContext *msc = avst->priv_data;
if (msc->pb && msc->current_sample < avst->nb_index_entries) {
AVIndexEntry *current_sample = &avst->index_entries[msc->current_sample];
int64_t dts = av_rescale(current_sample->timestamp, AV_TIME_BASE, msc->time_scale);
av_dlog(s, "stream %d, sample %d, dts %"PRId64"\n", i, msc->current_sample, dts);
if (!sample || (!s->pb->seekable && current_sample->pos < sample->pos) ||
(s->pb->seekable &&
((msc->pb != s->pb && dts < best_dts) || (msc->pb == s->pb &&
((FFABS(best_dts - dts) <= AV_TIME_BASE && current_sample->pos < sample->pos) ||
(FFABS(best_dts - dts) > AV_TIME_BASE && dts < best_dts)))))) {
sample = current_sample;
best_dts = dts;
*st = avst;
}
}
}
return sample;
}
 
static int mov_read_packet(AVFormatContext *s, AVPacket *pkt)
{
MOVContext *mov = s->priv_data;
MOVStreamContext *sc;
AVIndexEntry *sample;
AVStream *st = NULL;
int ret;
mov->fc = s;
retry:
sample = mov_find_next_sample(s, &st);
if (!sample) {
mov->found_mdat = 0;
if (!mov->next_root_atom)
return AVERROR_EOF;
avio_seek(s->pb, mov->next_root_atom, SEEK_SET);
mov->next_root_atom = 0;
if (mov_read_default(mov, s->pb, (MOVAtom){ AV_RL32("root"), INT64_MAX }) < 0 ||
url_feof(s->pb))
return AVERROR_EOF;
av_dlog(s, "read fragments, offset 0x%"PRIx64"\n", avio_tell(s->pb));
goto retry;
}
sc = st->priv_data;
/* must be done just before reading, to avoid infinite loop on sample */
sc->current_sample++;
 
if (mov->next_root_atom) {
sample->pos = FFMIN(sample->pos, mov->next_root_atom);
sample->size = FFMIN(sample->size, (mov->next_root_atom - sample->pos));
}
 
if (st->discard != AVDISCARD_ALL) {
if (avio_seek(sc->pb, sample->pos, SEEK_SET) != sample->pos) {
av_log(mov->fc, AV_LOG_ERROR, "stream %d, offset 0x%"PRIx64": partial file\n",
sc->ffindex, sample->pos);
return AVERROR_INVALIDDATA;
}
ret = av_get_packet(sc->pb, pkt, sample->size);
if (ret < 0)
return ret;
if (sc->has_palette) {
uint8_t *pal;
 
pal = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE, AVPALETTE_SIZE);
if (!pal) {
av_log(mov->fc, AV_LOG_ERROR, "Cannot append palette to packet\n");
} else {
memcpy(pal, sc->palette, AVPALETTE_SIZE);
sc->has_palette = 0;
}
}
#if CONFIG_DV_DEMUXER
if (mov->dv_demux && sc->dv_audio_container) {
avpriv_dv_produce_packet(mov->dv_demux, pkt, pkt->data, pkt->size, pkt->pos);
av_free(pkt->data);
pkt->size = 0;
ret = avpriv_dv_get_packet(mov->dv_demux, pkt);
if (ret < 0)
return ret;
}
#endif
}
 
pkt->stream_index = sc->ffindex;
pkt->dts = sample->timestamp;
if (sc->ctts_data && sc->ctts_index < sc->ctts_count) {
pkt->pts = pkt->dts + sc->dts_shift + sc->ctts_data[sc->ctts_index].duration;
/* update ctts context */
sc->ctts_sample++;
if (sc->ctts_index < sc->ctts_count &&
sc->ctts_data[sc->ctts_index].count == sc->ctts_sample) {
sc->ctts_index++;
sc->ctts_sample = 0;
}
if (sc->wrong_dts)
pkt->dts = AV_NOPTS_VALUE;
} else {
int64_t next_dts = (sc->current_sample < st->nb_index_entries) ?
st->index_entries[sc->current_sample].timestamp : st->duration;
pkt->duration = next_dts - pkt->dts;
pkt->pts = pkt->dts;
}
if (st->discard == AVDISCARD_ALL)
goto retry;
pkt->flags |= sample->flags & AVINDEX_KEYFRAME ? AV_PKT_FLAG_KEY : 0;
pkt->pos = sample->pos;
av_dlog(s, "stream %d, pts %"PRId64", dts %"PRId64", pos 0x%"PRIx64", duration %d\n",
pkt->stream_index, pkt->pts, pkt->dts, pkt->pos, pkt->duration);
return 0;
}
 
static int mov_seek_stream(AVFormatContext *s, AVStream *st, int64_t timestamp, int flags)
{
MOVStreamContext *sc = st->priv_data;
int sample, time_sample;
int i;
 
sample = av_index_search_timestamp(st, timestamp, flags);
av_dlog(s, "stream %d, timestamp %"PRId64", sample %d\n", st->index, timestamp, sample);
if (sample < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
sample = 0;
if (sample < 0) /* not sure what to do */
return AVERROR_INVALIDDATA;
sc->current_sample = sample;
av_dlog(s, "stream %d, found sample %d\n", st->index, sc->current_sample);
/* adjust ctts index */
if (sc->ctts_data) {
time_sample = 0;
for (i = 0; i < sc->ctts_count; i++) {
int next = time_sample + sc->ctts_data[i].count;
if (next > sc->current_sample) {
sc->ctts_index = i;
sc->ctts_sample = sc->current_sample - time_sample;
break;
}
time_sample = next;
}
}
return sample;
}
 
static int mov_read_seek(AVFormatContext *s, int stream_index, int64_t sample_time, int flags)
{
AVStream *st;
int64_t seek_timestamp, timestamp;
int sample;
int i;
 
if (stream_index >= s->nb_streams)
return AVERROR_INVALIDDATA;
 
st = s->streams[stream_index];
sample = mov_seek_stream(s, st, sample_time, flags);
if (sample < 0)
return sample;
 
/* adjust seek timestamp to found sample timestamp */
seek_timestamp = st->index_entries[sample].timestamp;
 
for (i = 0; i < s->nb_streams; i++) {
MOVStreamContext *sc = s->streams[i]->priv_data;
st = s->streams[i];
st->skip_samples = (sample_time <= 0) ? sc->start_pad : 0;
 
if (stream_index == i)
continue;
 
timestamp = av_rescale_q(seek_timestamp, s->streams[stream_index]->time_base, st->time_base);
mov_seek_stream(s, st, timestamp, flags);
}
return 0;
}
 
static const AVOption options[] = {
{"use_absolute_path",
"allow using absolute path when opening alias, this is a possible security issue",
offsetof(MOVContext, use_absolute_path), FF_OPT_TYPE_INT, {.i64 = 0},
0, 1, AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_DECODING_PARAM},
{"ignore_editlist", "", offsetof(MOVContext, ignore_editlist), FF_OPT_TYPE_INT, {.i64 = 0},
0, 1, AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_DECODING_PARAM},
{NULL}
};
 
static const AVClass mov_class = {
.class_name = "mov,mp4,m4a,3gp,3g2,mj2",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVInputFormat ff_mov_demuxer = {
.name = "mov,mp4,m4a,3gp,3g2,mj2",
.long_name = NULL_IF_CONFIG_SMALL("QuickTime / MOV"),
.priv_data_size = sizeof(MOVContext),
.read_probe = mov_probe,
.read_header = mov_read_header,
.read_packet = mov_read_packet,
.read_close = mov_read_close,
.read_seek = mov_read_seek,
.priv_class = &mov_class,
.flags = AVFMT_NO_BYTE_SEEK,
};
/contrib/sdk/sources/ffmpeg/libavformat/mov_chan.c
0,0 → 1,592
/*
* Copyright (c) 2011 Justin Ruggles
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* mov 'chan' tag reading/writing.
* @author Justin Ruggles
*/
 
#include <stdint.h>
 
#include "libavutil/channel_layout.h"
#include "libavcodec/avcodec.h"
#include "mov_chan.h"
 
/**
* Channel Layout Tag
* This tells which channels are present in the audio stream and the order in
* which they appear.
*
* @note We're using the channel layout tag to indicate channel order
* when the value is greater than 0x10000. The Apple documentation has
* some contradictions as to how this is actually supposed to be handled.
*
* Core Audio File Format Spec:
* "The high 16 bits indicates a specific ordering of the channels."
* Core Audio Data Types Reference:
* "These identifiers specify the channels included in a layout but
* do not specify a particular ordering of those channels."
*/
enum MovChannelLayoutTag {
MOV_CH_LAYOUT_UNKNOWN = 0xFFFF0000,
MOV_CH_LAYOUT_USE_DESCRIPTIONS = ( 0 << 16) | 0,
MOV_CH_LAYOUT_USE_BITMAP = ( 1 << 16) | 0,
MOV_CH_LAYOUT_DISCRETEINORDER = (147 << 16) | 0,
MOV_CH_LAYOUT_MONO = (100 << 16) | 1,
MOV_CH_LAYOUT_STEREO = (101 << 16) | 2,
MOV_CH_LAYOUT_STEREOHEADPHONES = (102 << 16) | 2,
MOV_CH_LAYOUT_MATRIXSTEREO = (103 << 16) | 2,
MOV_CH_LAYOUT_MIDSIDE = (104 << 16) | 2,
MOV_CH_LAYOUT_XY = (105 << 16) | 2,
MOV_CH_LAYOUT_BINAURAL = (106 << 16) | 2,
MOV_CH_LAYOUT_AMBISONIC_B_FORMAT = (107 << 16) | 4,
MOV_CH_LAYOUT_QUADRAPHONIC = (108 << 16) | 4,
MOV_CH_LAYOUT_PENTAGONAL = (109 << 16) | 5,
MOV_CH_LAYOUT_HEXAGONAL = (110 << 16) | 6,
MOV_CH_LAYOUT_OCTAGONAL = (111 << 16) | 8,
MOV_CH_LAYOUT_CUBE = (112 << 16) | 8,
MOV_CH_LAYOUT_MPEG_3_0_A = (113 << 16) | 3,
MOV_CH_LAYOUT_MPEG_3_0_B = (114 << 16) | 3,
MOV_CH_LAYOUT_MPEG_4_0_A = (115 << 16) | 4,
MOV_CH_LAYOUT_MPEG_4_0_B = (116 << 16) | 4,
MOV_CH_LAYOUT_MPEG_5_0_A = (117 << 16) | 5,
MOV_CH_LAYOUT_MPEG_5_0_B = (118 << 16) | 5,
MOV_CH_LAYOUT_MPEG_5_0_C = (119 << 16) | 5,
MOV_CH_LAYOUT_MPEG_5_0_D = (120 << 16) | 5,
MOV_CH_LAYOUT_MPEG_5_1_A = (121 << 16) | 6,
MOV_CH_LAYOUT_MPEG_5_1_B = (122 << 16) | 6,
MOV_CH_LAYOUT_MPEG_5_1_C = (123 << 16) | 6,
MOV_CH_LAYOUT_MPEG_5_1_D = (124 << 16) | 6,
MOV_CH_LAYOUT_MPEG_6_1_A = (125 << 16) | 7,
MOV_CH_LAYOUT_MPEG_7_1_A = (126 << 16) | 8,
MOV_CH_LAYOUT_MPEG_7_1_B = (127 << 16) | 8,
MOV_CH_LAYOUT_MPEG_7_1_C = (128 << 16) | 8,
MOV_CH_LAYOUT_EMAGIC_DEFAULT_7_1 = (129 << 16) | 8,
MOV_CH_LAYOUT_SMPTE_DTV = (130 << 16) | 8,
MOV_CH_LAYOUT_ITU_2_1 = (131 << 16) | 3,
MOV_CH_LAYOUT_ITU_2_2 = (132 << 16) | 4,
MOV_CH_LAYOUT_DVD_4 = (133 << 16) | 3,
MOV_CH_LAYOUT_DVD_5 = (134 << 16) | 4,
MOV_CH_LAYOUT_DVD_6 = (135 << 16) | 5,
MOV_CH_LAYOUT_DVD_10 = (136 << 16) | 4,
MOV_CH_LAYOUT_DVD_11 = (137 << 16) | 5,
MOV_CH_LAYOUT_DVD_18 = (138 << 16) | 5,
MOV_CH_LAYOUT_AUDIOUNIT_6_0 = (139 << 16) | 6,
MOV_CH_LAYOUT_AUDIOUNIT_7_0 = (140 << 16) | 7,
MOV_CH_LAYOUT_AUDIOUNIT_7_0_FRONT = (148 << 16) | 7,
MOV_CH_LAYOUT_AAC_6_0 = (141 << 16) | 6,
MOV_CH_LAYOUT_AAC_6_1 = (142 << 16) | 7,
MOV_CH_LAYOUT_AAC_7_0 = (143 << 16) | 7,
MOV_CH_LAYOUT_AAC_OCTAGONAL = (144 << 16) | 8,
MOV_CH_LAYOUT_TMH_10_2_STD = (145 << 16) | 16,
MOV_CH_LAYOUT_TMH_10_2_FULL = (146 << 16) | 21,
MOV_CH_LAYOUT_AC3_1_0_1 = (149 << 16) | 2,
MOV_CH_LAYOUT_AC3_3_0 = (150 << 16) | 3,
MOV_CH_LAYOUT_AC3_3_1 = (151 << 16) | 4,
MOV_CH_LAYOUT_AC3_3_0_1 = (152 << 16) | 4,
MOV_CH_LAYOUT_AC3_2_1_1 = (153 << 16) | 4,
MOV_CH_LAYOUT_AC3_3_1_1 = (154 << 16) | 5,
MOV_CH_LAYOUT_EAC3_6_0_A = (155 << 16) | 6,
MOV_CH_LAYOUT_EAC3_7_0_A = (156 << 16) | 7,
MOV_CH_LAYOUT_EAC3_6_1_A = (157 << 16) | 7,
MOV_CH_LAYOUT_EAC3_6_1_B = (158 << 16) | 7,
MOV_CH_LAYOUT_EAC3_6_1_C = (159 << 16) | 7,
MOV_CH_LAYOUT_EAC3_7_1_A = (160 << 16) | 8,
MOV_CH_LAYOUT_EAC3_7_1_B = (161 << 16) | 8,
MOV_CH_LAYOUT_EAC3_7_1_C = (162 << 16) | 8,
MOV_CH_LAYOUT_EAC3_7_1_D = (163 << 16) | 8,
MOV_CH_LAYOUT_EAC3_7_1_E = (164 << 16) | 8,
MOV_CH_LAYOUT_EAC3_7_1_F = (165 << 16) | 8,
MOV_CH_LAYOUT_EAC3_7_1_G = (166 << 16) | 8,
MOV_CH_LAYOUT_EAC3_7_1_H = (167 << 16) | 8,
MOV_CH_LAYOUT_DTS_3_1 = (168 << 16) | 4,
MOV_CH_LAYOUT_DTS_4_1 = (169 << 16) | 5,
MOV_CH_LAYOUT_DTS_6_0_A = (170 << 16) | 6,
MOV_CH_LAYOUT_DTS_6_0_B = (171 << 16) | 6,
MOV_CH_LAYOUT_DTS_6_0_C = (172 << 16) | 6,
MOV_CH_LAYOUT_DTS_6_1_A = (173 << 16) | 7,
MOV_CH_LAYOUT_DTS_6_1_B = (174 << 16) | 7,
MOV_CH_LAYOUT_DTS_6_1_C = (175 << 16) | 7,
MOV_CH_LAYOUT_DTS_6_1_D = (182 << 16) | 7,
MOV_CH_LAYOUT_DTS_7_0 = (176 << 16) | 7,
MOV_CH_LAYOUT_DTS_7_1 = (177 << 16) | 8,
MOV_CH_LAYOUT_DTS_8_0_A = (178 << 16) | 8,
MOV_CH_LAYOUT_DTS_8_0_B = (179 << 16) | 8,
MOV_CH_LAYOUT_DTS_8_1_A = (180 << 16) | 9,
MOV_CH_LAYOUT_DTS_8_1_B = (181 << 16) | 9,
};
 
struct MovChannelLayoutMap {
uint32_t tag;
uint64_t layout;
};
 
static const struct MovChannelLayoutMap mov_ch_layout_map_misc[] = {
{ MOV_CH_LAYOUT_USE_DESCRIPTIONS, 0 },
{ MOV_CH_LAYOUT_USE_BITMAP, 0 },
{ MOV_CH_LAYOUT_DISCRETEINORDER, 0 },
{ MOV_CH_LAYOUT_UNKNOWN, 0 },
{ MOV_CH_LAYOUT_TMH_10_2_STD, 0 }, // L, R, C, Vhc, Lsd, Rsd,
// Ls, Rs, Vhl, Vhr, Lw, Rw,
// Csd, Cs, LFE1, LFE2
{ MOV_CH_LAYOUT_TMH_10_2_FULL, 0 }, // L, R, C, Vhc, Lsd, Rsd,
// Ls, Rs, Vhl, Vhr, Lw, Rw,
// Csd, Cs, LFE1, LFE2, Lc, Rc,
// HI, VI, Haptic
{ 0, 0 },
};
 
static const struct MovChannelLayoutMap mov_ch_layout_map_1ch[] = {
{ MOV_CH_LAYOUT_MONO, AV_CH_LAYOUT_MONO }, // C
{ 0, 0 },
};
 
static const struct MovChannelLayoutMap mov_ch_layout_map_2ch[] = {
{ MOV_CH_LAYOUT_STEREO, AV_CH_LAYOUT_STEREO }, // L, R
{ MOV_CH_LAYOUT_STEREOHEADPHONES, AV_CH_LAYOUT_STEREO }, // L, R
{ MOV_CH_LAYOUT_BINAURAL, AV_CH_LAYOUT_STEREO }, // L, R
{ MOV_CH_LAYOUT_MIDSIDE, AV_CH_LAYOUT_STEREO }, // C, sides
{ MOV_CH_LAYOUT_XY, AV_CH_LAYOUT_STEREO }, // X (left), Y (right)
 
{ MOV_CH_LAYOUT_MATRIXSTEREO, AV_CH_LAYOUT_STEREO_DOWNMIX }, // Lt, Rt
 
{ MOV_CH_LAYOUT_AC3_1_0_1, AV_CH_LAYOUT_MONO | // C, LFE
AV_CH_LOW_FREQUENCY },
{ 0, 0 },
};
 
static const struct MovChannelLayoutMap mov_ch_layout_map_3ch[] = {
{ MOV_CH_LAYOUT_MPEG_3_0_A, AV_CH_LAYOUT_SURROUND }, // L, R, C
{ MOV_CH_LAYOUT_MPEG_3_0_B, AV_CH_LAYOUT_SURROUND }, // C, L, R
{ MOV_CH_LAYOUT_AC3_3_0, AV_CH_LAYOUT_SURROUND }, // L, C, R
 
{ MOV_CH_LAYOUT_ITU_2_1, AV_CH_LAYOUT_2_1 }, // L, R, Cs
 
{ MOV_CH_LAYOUT_DVD_4, AV_CH_LAYOUT_2POINT1 }, // L, R, LFE
{ 0, 0 },
};
 
static const struct MovChannelLayoutMap mov_ch_layout_map_4ch[] = {
{ MOV_CH_LAYOUT_AMBISONIC_B_FORMAT, 0 }, // W, X, Y, Z
 
{ MOV_CH_LAYOUT_QUADRAPHONIC, AV_CH_LAYOUT_QUAD }, // L, R, Rls, Rrs
 
{ MOV_CH_LAYOUT_MPEG_4_0_A, AV_CH_LAYOUT_4POINT0 }, // L, R, C, Cs
{ MOV_CH_LAYOUT_MPEG_4_0_B, AV_CH_LAYOUT_4POINT0 }, // C, L, R, Cs
{ MOV_CH_LAYOUT_AC3_3_1, AV_CH_LAYOUT_4POINT0 }, // L, C, R, Cs
 
{ MOV_CH_LAYOUT_ITU_2_2, AV_CH_LAYOUT_2_2 }, // L, R, Ls, Rs
 
{ MOV_CH_LAYOUT_DVD_5, AV_CH_LAYOUT_2_1 | // L, R, LFE, Cs
AV_CH_LOW_FREQUENCY },
{ MOV_CH_LAYOUT_AC3_2_1_1, AV_CH_LAYOUT_2_1 | // L, R, Cs, LFE
AV_CH_LOW_FREQUENCY },
 
{ MOV_CH_LAYOUT_DVD_10, AV_CH_LAYOUT_3POINT1 }, // L, R, C, LFE
{ MOV_CH_LAYOUT_AC3_3_0_1, AV_CH_LAYOUT_3POINT1 }, // L, C, R, LFE
{ MOV_CH_LAYOUT_DTS_3_1, AV_CH_LAYOUT_3POINT1 }, // C, L, R, LFE
{ 0, 0 },
};
 
static const struct MovChannelLayoutMap mov_ch_layout_map_5ch[] = {
{ MOV_CH_LAYOUT_PENTAGONAL, AV_CH_LAYOUT_5POINT0_BACK }, // L, R, Rls, Rrs, C
 
{ MOV_CH_LAYOUT_MPEG_5_0_A, AV_CH_LAYOUT_5POINT0 }, // L, R, C, Ls, Rs
{ MOV_CH_LAYOUT_MPEG_5_0_B, AV_CH_LAYOUT_5POINT0 }, // L, R, Ls, Rs, C
{ MOV_CH_LAYOUT_MPEG_5_0_C, AV_CH_LAYOUT_5POINT0 }, // L, C, R, Ls, Rs
{ MOV_CH_LAYOUT_MPEG_5_0_D, AV_CH_LAYOUT_5POINT0 }, // C, L, R, Ls, Rs
 
{ MOV_CH_LAYOUT_DVD_6, AV_CH_LAYOUT_2_2 | // L, R, LFE, Ls, Rs
AV_CH_LOW_FREQUENCY },
{ MOV_CH_LAYOUT_DVD_18, AV_CH_LAYOUT_2_2 | // L, R, Ls, Rs, LFE
AV_CH_LOW_FREQUENCY },
 
{ MOV_CH_LAYOUT_DVD_11, AV_CH_LAYOUT_4POINT1 }, // L, R, C, LFE, Cs
{ MOV_CH_LAYOUT_AC3_3_1_1, AV_CH_LAYOUT_4POINT1 }, // L, C, R, Cs, LFE
{ MOV_CH_LAYOUT_DTS_4_1, AV_CH_LAYOUT_4POINT1 }, // C, L, R, Cs, LFE
{ 0, 0 },
};
 
static const struct MovChannelLayoutMap mov_ch_layout_map_6ch[] = {
{ MOV_CH_LAYOUT_HEXAGONAL, AV_CH_LAYOUT_HEXAGONAL }, // L, R, Rls, Rrs, C, Cs
{ MOV_CH_LAYOUT_DTS_6_0_C, AV_CH_LAYOUT_HEXAGONAL }, // C, Cs, L, R, Rls, Rrs
 
{ MOV_CH_LAYOUT_MPEG_5_1_A, AV_CH_LAYOUT_5POINT1 }, // L, R, C, LFE, Ls, Rs
{ MOV_CH_LAYOUT_MPEG_5_1_B, AV_CH_LAYOUT_5POINT1 }, // L, R, Ls, Rs, C, LFE
{ MOV_CH_LAYOUT_MPEG_5_1_C, AV_CH_LAYOUT_5POINT1 }, // L, C, R, Ls, Rs, LFE
{ MOV_CH_LAYOUT_MPEG_5_1_D, AV_CH_LAYOUT_5POINT1 }, // C, L, R, Ls, Rs, LFE
 
{ MOV_CH_LAYOUT_AUDIOUNIT_6_0, AV_CH_LAYOUT_6POINT0 }, // L, R, Ls, Rs, C, Cs
{ MOV_CH_LAYOUT_AAC_6_0, AV_CH_LAYOUT_6POINT0 }, // C, L, R, Ls, Rs, Cs
{ MOV_CH_LAYOUT_EAC3_6_0_A, AV_CH_LAYOUT_6POINT0 }, // L, C, R, Ls, Rs, Cs
 
{ MOV_CH_LAYOUT_DTS_6_0_A, AV_CH_LAYOUT_6POINT0_FRONT }, // Lc, Rc, L, R, Ls, Rs
 
{ MOV_CH_LAYOUT_DTS_6_0_B, AV_CH_LAYOUT_5POINT0_BACK | // C, L, R, Rls, Rrs, Ts
AV_CH_TOP_CENTER },
{ 0, 0 },
};
 
static const struct MovChannelLayoutMap mov_ch_layout_map_7ch[] = {
{ MOV_CH_LAYOUT_MPEG_6_1_A, AV_CH_LAYOUT_6POINT1 }, // L, R, C, LFE, Ls, Rs, Cs
{ MOV_CH_LAYOUT_AAC_6_1, AV_CH_LAYOUT_6POINT1 }, // C, L, R, Ls, Rs, Cs, LFE
{ MOV_CH_LAYOUT_EAC3_6_1_A, AV_CH_LAYOUT_6POINT1 }, // L, C, R, Ls, Rs, LFE, Cs
{ MOV_CH_LAYOUT_DTS_6_1_D, AV_CH_LAYOUT_6POINT1 }, // C, L, R, Ls, Rs, LFE, Cs
 
{ MOV_CH_LAYOUT_AUDIOUNIT_7_0, AV_CH_LAYOUT_7POINT0 }, // L, R, Ls, Rs, C, Rls, Rrs
{ MOV_CH_LAYOUT_AAC_7_0, AV_CH_LAYOUT_7POINT0 }, // C, L, R, Ls, Rs, Rls, Rrs
{ MOV_CH_LAYOUT_EAC3_7_0_A, AV_CH_LAYOUT_7POINT0 }, // L, C, R, Ls, Rs, Rls, Rrs
 
{ MOV_CH_LAYOUT_AUDIOUNIT_7_0_FRONT, AV_CH_LAYOUT_7POINT0_FRONT }, // L, R, Ls, Rs, C, Lc, Rc
{ MOV_CH_LAYOUT_DTS_7_0, AV_CH_LAYOUT_7POINT0_FRONT }, // Lc, C, Rc, L, R, Ls, Rs
 
{ MOV_CH_LAYOUT_EAC3_6_1_B, AV_CH_LAYOUT_5POINT1 | // L, C, R, Ls, Rs, LFE, Ts
AV_CH_TOP_CENTER },
 
{ MOV_CH_LAYOUT_EAC3_6_1_C, AV_CH_LAYOUT_5POINT1 | // L, C, R, Ls, Rs, LFE, Vhc
AV_CH_TOP_FRONT_CENTER },
 
{ MOV_CH_LAYOUT_DTS_6_1_A, AV_CH_LAYOUT_6POINT1_FRONT }, // Lc, Rc, L, R, Ls, Rs, LFE
 
{ MOV_CH_LAYOUT_DTS_6_1_B, AV_CH_LAYOUT_5POINT1_BACK | // C, L, R, Rls, Rrs, Ts, LFE
AV_CH_TOP_CENTER },
 
{ MOV_CH_LAYOUT_DTS_6_1_C, AV_CH_LAYOUT_6POINT1_BACK }, // C, Cs, L, R, Rls, Rrs, LFE
{ 0, 0 },
};
 
static const struct MovChannelLayoutMap mov_ch_layout_map_8ch[] = {
{ MOV_CH_LAYOUT_OCTAGONAL, AV_CH_LAYOUT_OCTAGONAL }, // L, R, Rls, Rrs, C, Cs, Ls, Rs
{ MOV_CH_LAYOUT_AAC_OCTAGONAL, AV_CH_LAYOUT_OCTAGONAL }, // C, L, R, Ls, Rs, Rls, Rrs, Cs
 
{ MOV_CH_LAYOUT_CUBE, AV_CH_LAYOUT_QUAD | // L, R, Rls, Rrs, Vhl, Vhr, Rlt, Rrt
AV_CH_TOP_FRONT_LEFT |
AV_CH_TOP_FRONT_RIGHT |
AV_CH_TOP_BACK_LEFT |
AV_CH_TOP_BACK_RIGHT },
 
{ MOV_CH_LAYOUT_MPEG_7_1_A, AV_CH_LAYOUT_7POINT1_WIDE }, // L, R, C, LFE, Ls, Rs, Lc, Rc
{ MOV_CH_LAYOUT_MPEG_7_1_B, AV_CH_LAYOUT_7POINT1_WIDE }, // C, Lc, Rc, L, R, Ls, Rs, LFE
{ MOV_CH_LAYOUT_EMAGIC_DEFAULT_7_1, AV_CH_LAYOUT_7POINT1_WIDE }, // L, R, Ls, Rs, C, LFE, Lc, Rc
{ MOV_CH_LAYOUT_EAC3_7_1_B, AV_CH_LAYOUT_7POINT1_WIDE }, // L, C, R, Ls, Rs, LFE, Lc, Rc
{ MOV_CH_LAYOUT_DTS_7_1, AV_CH_LAYOUT_7POINT1_WIDE }, // Lc, C, Rc, L, R, Ls, Rs, LFE
 
{ MOV_CH_LAYOUT_MPEG_7_1_C, AV_CH_LAYOUT_7POINT1 }, // L, R, C, LFE, Ls, Rs, Rls, Rrs
{ MOV_CH_LAYOUT_EAC3_7_1_A, AV_CH_LAYOUT_7POINT1 }, // L, C, R, Ls, Rs, LFE, Rls, Rrs
 
{ MOV_CH_LAYOUT_SMPTE_DTV, AV_CH_LAYOUT_5POINT1 | // L, R, C, LFE, Ls, Rs, Lt, Rt
AV_CH_LAYOUT_STEREO_DOWNMIX },
 
{ MOV_CH_LAYOUT_EAC3_7_1_C, AV_CH_LAYOUT_5POINT1 | // L, C, R, Ls, Rs, LFE, Lsd, Rsd
AV_CH_SURROUND_DIRECT_LEFT |
AV_CH_SURROUND_DIRECT_RIGHT },
 
{ MOV_CH_LAYOUT_EAC3_7_1_D, AV_CH_LAYOUT_5POINT1 | // L, C, R, Ls, Rs, LFE, Lw, Rw
AV_CH_WIDE_LEFT |
AV_CH_WIDE_RIGHT },
 
{ MOV_CH_LAYOUT_EAC3_7_1_E, AV_CH_LAYOUT_5POINT1 | // L, C, R, Ls, Rs, LFE, Vhl, Vhr
AV_CH_TOP_FRONT_LEFT |
AV_CH_TOP_FRONT_RIGHT },
 
{ MOV_CH_LAYOUT_EAC3_7_1_F, AV_CH_LAYOUT_5POINT1 | // L, C, R, Ls, Rs, LFE, Cs, Ts
AV_CH_BACK_CENTER |
AV_CH_TOP_CENTER },
 
{ MOV_CH_LAYOUT_EAC3_7_1_G, AV_CH_LAYOUT_5POINT1 | // L, C, R, Ls, Rs, LFE, Cs, Vhc
AV_CH_BACK_CENTER |
AV_CH_TOP_FRONT_CENTER },
 
{ MOV_CH_LAYOUT_EAC3_7_1_H, AV_CH_LAYOUT_5POINT1 | // L, C, R, Ls, Rs, LFE, Ts, Vhc
AV_CH_TOP_CENTER |
AV_CH_TOP_FRONT_CENTER },
 
{ MOV_CH_LAYOUT_DTS_8_0_A, AV_CH_LAYOUT_2_2 | // Lc, Rc, L, R, Ls, Rs, Rls, Rrs
AV_CH_BACK_LEFT |
AV_CH_BACK_RIGHT |
AV_CH_FRONT_LEFT_OF_CENTER |
AV_CH_FRONT_RIGHT_OF_CENTER },
 
{ MOV_CH_LAYOUT_DTS_8_0_B, AV_CH_LAYOUT_5POINT0 | // Lc, C, Rc, L, R, Ls, Cs, Rs
AV_CH_FRONT_LEFT_OF_CENTER |
AV_CH_FRONT_RIGHT_OF_CENTER |
AV_CH_BACK_CENTER },
{ 0, 0 },
};
 
static const struct MovChannelLayoutMap mov_ch_layout_map_9ch[] = {
{ MOV_CH_LAYOUT_DTS_8_1_A, AV_CH_LAYOUT_2_2 | // Lc, Rc, L, R, Ls, Rs, Rls, Rrs, LFE
AV_CH_BACK_LEFT |
AV_CH_BACK_RIGHT |
AV_CH_FRONT_LEFT_OF_CENTER |
AV_CH_FRONT_RIGHT_OF_CENTER |
AV_CH_LOW_FREQUENCY },
 
{ MOV_CH_LAYOUT_DTS_8_1_B, AV_CH_LAYOUT_7POINT1_WIDE | // Lc, C, Rc, L, R, Ls, Cs, Rs, LFE
AV_CH_BACK_CENTER },
{ 0, 0 },
};
 
static const struct MovChannelLayoutMap *mov_ch_layout_map[] = {
mov_ch_layout_map_misc,
mov_ch_layout_map_1ch,
mov_ch_layout_map_2ch,
mov_ch_layout_map_3ch,
mov_ch_layout_map_4ch,
mov_ch_layout_map_5ch,
mov_ch_layout_map_6ch,
mov_ch_layout_map_7ch,
mov_ch_layout_map_8ch,
mov_ch_layout_map_9ch,
};
 
static const enum MovChannelLayoutTag mov_ch_layouts_aac[] = {
MOV_CH_LAYOUT_MONO,
MOV_CH_LAYOUT_STEREO,
MOV_CH_LAYOUT_AC3_1_0_1,
MOV_CH_LAYOUT_MPEG_3_0_B,
MOV_CH_LAYOUT_ITU_2_1,
MOV_CH_LAYOUT_DVD_4,
MOV_CH_LAYOUT_QUADRAPHONIC,
MOV_CH_LAYOUT_MPEG_4_0_B,
MOV_CH_LAYOUT_ITU_2_2,
MOV_CH_LAYOUT_AC3_2_1_1,
MOV_CH_LAYOUT_DTS_3_1,
MOV_CH_LAYOUT_MPEG_5_0_D,
MOV_CH_LAYOUT_DVD_18,
MOV_CH_LAYOUT_DTS_4_1,
MOV_CH_LAYOUT_MPEG_5_1_D,
MOV_CH_LAYOUT_AAC_6_0,
MOV_CH_LAYOUT_DTS_6_0_A,
MOV_CH_LAYOUT_AAC_6_1,
MOV_CH_LAYOUT_AAC_7_0,
MOV_CH_LAYOUT_DTS_6_1_A,
MOV_CH_LAYOUT_AAC_OCTAGONAL,
MOV_CH_LAYOUT_MPEG_7_1_B,
MOV_CH_LAYOUT_DTS_8_0_A,
0,
};
 
static const enum MovChannelLayoutTag mov_ch_layouts_ac3[] = {
MOV_CH_LAYOUT_MONO,
MOV_CH_LAYOUT_STEREO,
MOV_CH_LAYOUT_AC3_1_0_1,
MOV_CH_LAYOUT_AC3_3_0,
MOV_CH_LAYOUT_ITU_2_1,
MOV_CH_LAYOUT_DVD_4,
MOV_CH_LAYOUT_AC3_3_1,
MOV_CH_LAYOUT_ITU_2_2,
MOV_CH_LAYOUT_AC3_2_1_1,
MOV_CH_LAYOUT_AC3_3_0_1,
MOV_CH_LAYOUT_MPEG_5_0_C,
MOV_CH_LAYOUT_DVD_18,
MOV_CH_LAYOUT_AC3_3_1_1,
MOV_CH_LAYOUT_MPEG_5_1_C,
0,
};
 
static const enum MovChannelLayoutTag mov_ch_layouts_alac[] = {
MOV_CH_LAYOUT_MONO,
MOV_CH_LAYOUT_STEREO,
MOV_CH_LAYOUT_MPEG_3_0_B,
MOV_CH_LAYOUT_MPEG_4_0_B,
MOV_CH_LAYOUT_MPEG_5_0_D,
MOV_CH_LAYOUT_MPEG_5_1_D,
MOV_CH_LAYOUT_AAC_6_1,
MOV_CH_LAYOUT_MPEG_7_1_B,
0,
};
 
static const enum MovChannelLayoutTag mov_ch_layouts_wav[] = {
MOV_CH_LAYOUT_MONO,
MOV_CH_LAYOUT_STEREO,
MOV_CH_LAYOUT_MATRIXSTEREO,
MOV_CH_LAYOUT_MPEG_3_0_A,
MOV_CH_LAYOUT_QUADRAPHONIC,
MOV_CH_LAYOUT_MPEG_5_0_A,
MOV_CH_LAYOUT_MPEG_5_1_A,
MOV_CH_LAYOUT_MPEG_6_1_A,
MOV_CH_LAYOUT_MPEG_7_1_A,
MOV_CH_LAYOUT_MPEG_7_1_C,
MOV_CH_LAYOUT_SMPTE_DTV,
0,
};
 
static const struct {
enum AVCodecID codec_id;
const enum MovChannelLayoutTag *layouts;
} mov_codec_ch_layouts[] = {
{ AV_CODEC_ID_AAC, mov_ch_layouts_aac },
{ AV_CODEC_ID_AC3, mov_ch_layouts_ac3 },
{ AV_CODEC_ID_ALAC, mov_ch_layouts_alac },
{ AV_CODEC_ID_PCM_U8, mov_ch_layouts_wav },
{ AV_CODEC_ID_PCM_S8, mov_ch_layouts_wav },
{ AV_CODEC_ID_PCM_S16LE, mov_ch_layouts_wav },
{ AV_CODEC_ID_PCM_S16BE, mov_ch_layouts_wav },
{ AV_CODEC_ID_PCM_S24LE, mov_ch_layouts_wav },
{ AV_CODEC_ID_PCM_S24BE, mov_ch_layouts_wav },
{ AV_CODEC_ID_PCM_S32LE, mov_ch_layouts_wav },
{ AV_CODEC_ID_PCM_S32BE, mov_ch_layouts_wav },
{ AV_CODEC_ID_PCM_F32LE, mov_ch_layouts_wav },
{ AV_CODEC_ID_PCM_F32BE, mov_ch_layouts_wav },
{ AV_CODEC_ID_PCM_F64LE, mov_ch_layouts_wav },
{ AV_CODEC_ID_PCM_F64BE, mov_ch_layouts_wav },
{ AV_CODEC_ID_NONE, NULL },
};
 
uint64_t ff_mov_get_channel_layout(uint32_t tag, uint32_t bitmap)
{
int i, channels;
const struct MovChannelLayoutMap *layout_map;
 
/* use ff_mov_get_channel_label() to build a layout instead */
if (tag == MOV_CH_LAYOUT_USE_DESCRIPTIONS)
return 0;
 
/* handle the use of the channel bitmap */
if (tag == MOV_CH_LAYOUT_USE_BITMAP)
return bitmap < 0x40000 ? bitmap : 0;
 
/* get the layout map based on the channel count for the specified layout tag */
channels = tag & 0xFFFF;
if (channels > 9)
channels = 0;
layout_map = mov_ch_layout_map[channels];
 
/* find the channel layout for the specified layout tag */
for (i = 0; layout_map[i].tag != 0; i++) {
if (layout_map[i].tag == tag)
break;
}
return layout_map[i].layout;
}
 
static uint32_t mov_get_channel_label(uint32_t label)
{
if (label == 0)
return 0;
if (label <= 18)
return 1U << (label - 1);
if (label == 38)
return AV_CH_STEREO_LEFT;
if (label == 39)
return AV_CH_STEREO_RIGHT;
return 0;
}
 
uint32_t ff_mov_get_channel_layout_tag(enum AVCodecID codec_id,
uint64_t channel_layout,
uint32_t *bitmap)
{
int i, j;
uint32_t tag = 0;
const enum MovChannelLayoutTag *layouts = NULL;
 
/* find the layout list for the specified codec */
for (i = 0; mov_codec_ch_layouts[i].codec_id != AV_CODEC_ID_NONE; i++) {
if (mov_codec_ch_layouts[i].codec_id == codec_id)
break;
}
if (mov_codec_ch_layouts[i].codec_id != AV_CODEC_ID_NONE)
layouts = mov_codec_ch_layouts[i].layouts;
 
if (layouts) {
int channels;
const struct MovChannelLayoutMap *layout_map;
 
/* get the layout map based on the channel count */
channels = av_get_channel_layout_nb_channels(channel_layout);
if (channels > 9)
channels = 0;
layout_map = mov_ch_layout_map[channels];
 
/* find the layout tag for the specified channel layout */
for (i = 0; layouts[i] != 0; i++) {
if ((layouts[i] & 0xFFFF) != channels)
continue;
for (j = 0; layout_map[j].tag != 0; j++) {
if (layout_map[j].tag == layouts[i] &&
layout_map[j].layout == channel_layout)
break;
}
if (layout_map[j].tag)
break;
}
tag = layouts[i];
}
 
/* if no tag was found, use channel bitmap as a backup if possible */
if (tag == 0 && channel_layout > 0 && channel_layout < 0x40000) {
tag = MOV_CH_LAYOUT_USE_BITMAP;
*bitmap = (uint32_t)channel_layout;
} else
*bitmap = 0;
 
/* TODO: set channel descriptions as a secondary backup */
 
return tag;
}
 
int ff_mov_read_chan(AVFormatContext *s, AVIOContext *pb, AVStream *st,
int64_t size)
{
uint32_t layout_tag, bitmap, num_descr, label_mask;
int i;
 
if (size < 12)
return AVERROR_INVALIDDATA;
 
layout_tag = avio_rb32(pb);
bitmap = avio_rb32(pb);
num_descr = avio_rb32(pb);
 
av_dlog(s, "chan: layout=%u bitmap=%u num_descr=%u\n",
layout_tag, bitmap, num_descr);
 
if (size < 12ULL + num_descr * 20ULL)
return 0;
 
label_mask = 0;
for (i = 0; i < num_descr; i++) {
uint32_t label;
label = avio_rb32(pb); // mChannelLabel
avio_rb32(pb); // mChannelFlags
avio_rl32(pb); // mCoordinates[0]
avio_rl32(pb); // mCoordinates[1]
avio_rl32(pb); // mCoordinates[2]
size -= 20;
if (layout_tag == 0) {
uint32_t mask_incr = mov_get_channel_label(label);
if (mask_incr == 0) {
label_mask = 0;
break;
}
label_mask |= mask_incr;
}
}
if (layout_tag == 0) {
if (label_mask)
st->codec->channel_layout = label_mask;
} else
st->codec->channel_layout = ff_mov_get_channel_layout(layout_tag, bitmap);
avio_skip(pb, size - 12);
 
return 0;
}
/contrib/sdk/sources/ffmpeg/libavformat/mov_chan.h
0,0 → 1,68
/*
* Copyright (c) 2011 Justin Ruggles
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* mov 'chan' tag reading/writing.
* @author Justin Ruggles
*/
 
#ifndef AVFORMAT_MOV_CHAN_H
#define AVFORMAT_MOV_CHAN_H
 
#include <stdint.h>
 
#include "libavcodec/avcodec.h"
#include "avformat.h"
 
/**
* Get the channel layout for the specified channel layout tag.
*
* @param[in] tag channel layout tag
* @param[out] bitmap channel bitmap (only used if needed)
* @return channel layout
*/
uint64_t ff_mov_get_channel_layout(uint32_t tag, uint32_t bitmap);
 
/**
* Get the channel layout tag for the specified codec id and channel layout.
* If the layout tag was not found, use a channel bitmap if possible.
*
* @param[in] codec_id codec id
* @param[in] channel_layout channel layout
* @param[out] bitmap channel bitmap
* @return channel layout tag
*/
uint32_t ff_mov_get_channel_layout_tag(enum AVCodecID codec_id,
uint64_t channel_layout,
uint32_t *bitmap);
 
/**
* Read 'chan' tag from the input stream.
*
* @param s AVFormatContext
* @param pb AVIOContext
* @param st The stream to set codec values for
* @param size Remaining size in the 'chan' tag
* @return 0 if ok, or negative AVERROR code on failure
*/
int ff_mov_read_chan(AVFormatContext *s, AVIOContext *pb, AVStream *st,
int64_t size);
 
#endif /* AVFORMAT_MOV_CHAN_H */
/contrib/sdk/sources/ffmpeg/libavformat/movenc.c
0,0 → 1,4344
/*
* MOV, 3GP, MP4 muxer
* Copyright (c) 2003 Thomas Raivio
* Copyright (c) 2004 Gildas Bazin <gbazin at videolan dot org>
* Copyright (c) 2009 Baptiste Coudurier <baptiste dot coudurier at gmail dot com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "movenc.h"
#include "avformat.h"
#include "avio_internal.h"
#include "riff.h"
#include "avio.h"
#include "isom.h"
#include "avc.h"
#include "libavcodec/get_bits.h"
#include "libavcodec/put_bits.h"
#include "libavcodec/vc1.h"
#include "internal.h"
#include "libavutil/avstring.h"
#include "libavutil/intfloat.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/dict.h"
#include "rtpenc.h"
#include "mov_chan.h"
 
#undef NDEBUG
#include <assert.h>
 
static const AVOption options[] = {
{ "movflags", "MOV muxer flags", offsetof(MOVMuxContext, flags), AV_OPT_TYPE_FLAGS, {.i64 = 0}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" },
{ "rtphint", "Add RTP hint tracks", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_RTP_HINT}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" },
{ "moov_size", "maximum moov size so it can be placed at the begin", offsetof(MOVMuxContext, reserved_moov_size), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, 0 },
{ "empty_moov", "Make the initial moov atom empty (not supported by QuickTime)", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_EMPTY_MOOV}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" },
{ "frag_keyframe", "Fragment at video keyframes", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_FRAG_KEYFRAME}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" },
{ "separate_moof", "Write separate moof/mdat atoms for each track", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_SEPARATE_MOOF}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" },
{ "frag_custom", "Flush fragments on caller requests", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_FRAG_CUSTOM}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" },
{ "isml", "Create a live smooth streaming feed (for pushing to a publishing point)", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_ISML}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" },
{ "faststart", "Run a second pass to put the index (moov atom) at the beginning of the file", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_FASTSTART}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" },
{ "omit_tfhd_offset", "Omit the base data offset in tfhd atoms", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_OMIT_TFHD_OFFSET}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" },
FF_RTP_FLAG_OPTS(MOVMuxContext, rtp_flags),
{ "skip_iods", "Skip writing iods atom.", offsetof(MOVMuxContext, iods_skip), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, AV_OPT_FLAG_ENCODING_PARAM},
{ "iods_audio_profile", "iods audio profile atom.", offsetof(MOVMuxContext, iods_audio_profile), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 255, AV_OPT_FLAG_ENCODING_PARAM},
{ "iods_video_profile", "iods video profile atom.", offsetof(MOVMuxContext, iods_video_profile), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 255, AV_OPT_FLAG_ENCODING_PARAM},
{ "frag_duration", "Maximum fragment duration", offsetof(MOVMuxContext, max_fragment_duration), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM},
{ "min_frag_duration", "Minimum fragment duration", offsetof(MOVMuxContext, min_fragment_duration), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM},
{ "frag_size", "Maximum fragment size", offsetof(MOVMuxContext, max_fragment_size), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM},
{ "ism_lookahead", "Number of lookahead entries for ISM files", offsetof(MOVMuxContext, ism_lookahead), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM},
{ "use_editlist", "use edit list", offsetof(MOVMuxContext, use_editlist), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 1, AV_OPT_FLAG_ENCODING_PARAM},
{ "video_track_timescale", "set timescale of all video tracks", offsetof(MOVMuxContext, video_track_timescale), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM},
{ NULL },
};
 
#define MOV_CLASS(flavor)\
static const AVClass flavor ## _muxer_class = {\
.class_name = #flavor " muxer",\
.item_name = av_default_item_name,\
.option = options,\
.version = LIBAVUTIL_VERSION_INT,\
};
 
static int get_moov_size(AVFormatContext *s);
 
//FIXME support 64 bit variant with wide placeholders
static int64_t update_size(AVIOContext *pb, int64_t pos)
{
int64_t curpos = avio_tell(pb);
avio_seek(pb, pos, SEEK_SET);
avio_wb32(pb, curpos - pos); /* rewrite size */
avio_seek(pb, curpos, SEEK_SET);
 
return curpos - pos;
}
 
static int supports_edts(MOVMuxContext *mov)
{
// EDTS with fragments is tricky as we don't know the duration when its written
return (mov->use_editlist<0 && !(mov->flags & FF_MOV_FLAG_FRAGMENT)) || mov->use_editlist>0;
}
 
static int co64_required(const MOVTrack *track)
{
if (track->entry > 0 && track->cluster[track->entry - 1].pos + track->data_offset > UINT32_MAX)
return 1;
return 0;
}
 
/* Chunk offset atom */
static int mov_write_stco_tag(AVIOContext *pb, MOVTrack *track)
{
int i;
int mode64 = co64_required(track); // use 32 bit size variant if possible
int64_t pos = avio_tell(pb);
avio_wb32(pb, 0); /* size */
if (mode64)
ffio_wfourcc(pb, "co64");
else
ffio_wfourcc(pb, "stco");
avio_wb32(pb, 0); /* version & flags */
avio_wb32(pb, track->chunkCount); /* entry count */
for (i = 0; i < track->entry; i++) {
if (!track->cluster[i].chunkNum)
continue;
if (mode64 == 1)
avio_wb64(pb, track->cluster[i].pos + track->data_offset);
else
avio_wb32(pb, track->cluster[i].pos + track->data_offset);
}
return update_size(pb, pos);
}
 
/* Sample size atom */
static int mov_write_stsz_tag(AVIOContext *pb, MOVTrack *track)
{
int equalChunks = 1;
int i, j, entries = 0, tst = -1, oldtst = -1;
 
int64_t pos = avio_tell(pb);
avio_wb32(pb, 0); /* size */
ffio_wfourcc(pb, "stsz");
avio_wb32(pb, 0); /* version & flags */
 
for (i = 0; i < track->entry; i++) {
tst = track->cluster[i].size / track->cluster[i].entries;
if (oldtst != -1 && tst != oldtst)
equalChunks = 0;
oldtst = tst;
entries += track->cluster[i].entries;
}
if (equalChunks && track->entry) {
int sSize = track->entry ? track->cluster[0].size / track->cluster[0].entries : 0;
sSize = FFMAX(1, sSize); // adpcm mono case could make sSize == 0
avio_wb32(pb, sSize); // sample size
avio_wb32(pb, entries); // sample count
} else {
avio_wb32(pb, 0); // sample size
avio_wb32(pb, entries); // sample count
for (i = 0; i < track->entry; i++) {
for (j = 0; j < track->cluster[i].entries; j++) {
avio_wb32(pb, track->cluster[i].size /
track->cluster[i].entries);
}
}
}
return update_size(pb, pos);
}
 
/* Sample to chunk atom */
static int mov_write_stsc_tag(AVIOContext *pb, MOVTrack *track)
{
int index = 0, oldval = -1, i;
int64_t entryPos, curpos;
 
int64_t pos = avio_tell(pb);
avio_wb32(pb, 0); /* size */
ffio_wfourcc(pb, "stsc");
avio_wb32(pb, 0); // version & flags
entryPos = avio_tell(pb);
avio_wb32(pb, track->chunkCount); // entry count
for (i = 0; i < track->entry; i++) {
if (oldval != track->cluster[i].samples_in_chunk && track->cluster[i].chunkNum) {
avio_wb32(pb, track->cluster[i].chunkNum); // first chunk
avio_wb32(pb, track->cluster[i].samples_in_chunk); // samples per chunk
avio_wb32(pb, 0x1); // sample description index
oldval = track->cluster[i].samples_in_chunk;
index++;
}
}
curpos = avio_tell(pb);
avio_seek(pb, entryPos, SEEK_SET);
avio_wb32(pb, index); // rewrite size
avio_seek(pb, curpos, SEEK_SET);
 
return update_size(pb, pos);
}
 
/* Sync sample atom */
static int mov_write_stss_tag(AVIOContext *pb, MOVTrack *track, uint32_t flag)
{
int64_t curpos, entryPos;
int i, index = 0;
int64_t pos = avio_tell(pb);
avio_wb32(pb, 0); // size
ffio_wfourcc(pb, flag == MOV_SYNC_SAMPLE ? "stss" : "stps");
avio_wb32(pb, 0); // version & flags
entryPos = avio_tell(pb);
avio_wb32(pb, track->entry); // entry count
for (i = 0; i < track->entry; i++) {
if (track->cluster[i].flags & flag) {
avio_wb32(pb, i + 1);
index++;
}
}
curpos = avio_tell(pb);
avio_seek(pb, entryPos, SEEK_SET);
avio_wb32(pb, index); // rewrite size
avio_seek(pb, curpos, SEEK_SET);
return update_size(pb, pos);
}
 
static int mov_write_amr_tag(AVIOContext *pb, MOVTrack *track)
{
avio_wb32(pb, 0x11); /* size */
if (track->mode == MODE_MOV) ffio_wfourcc(pb, "samr");
else ffio_wfourcc(pb, "damr");
ffio_wfourcc(pb, "FFMP");
avio_w8(pb, 0); /* decoder version */
 
avio_wb16(pb, 0x81FF); /* Mode set (all modes for AMR_NB) */
avio_w8(pb, 0x00); /* Mode change period (no restriction) */
avio_w8(pb, 0x01); /* Frames per sample */
return 0x11;
}
 
static int mov_write_ac3_tag(AVIOContext *pb, MOVTrack *track)
{
GetBitContext gbc;
PutBitContext pbc;
uint8_t buf[3];
int fscod, bsid, bsmod, acmod, lfeon, frmsizecod;
 
if (track->vos_len < 7)
return -1;
 
avio_wb32(pb, 11);
ffio_wfourcc(pb, "dac3");
 
init_get_bits(&gbc, track->vos_data + 4, (track->vos_len - 4) * 8);
fscod = get_bits(&gbc, 2);
frmsizecod = get_bits(&gbc, 6);
bsid = get_bits(&gbc, 5);
bsmod = get_bits(&gbc, 3);
acmod = get_bits(&gbc, 3);
if (acmod == 2) {
skip_bits(&gbc, 2); // dsurmod
} else {
if ((acmod & 1) && acmod != 1)
skip_bits(&gbc, 2); // cmixlev
if (acmod & 4)
skip_bits(&gbc, 2); // surmixlev
}
lfeon = get_bits1(&gbc);
 
init_put_bits(&pbc, buf, sizeof(buf));
put_bits(&pbc, 2, fscod);
put_bits(&pbc, 5, bsid);
put_bits(&pbc, 3, bsmod);
put_bits(&pbc, 3, acmod);
put_bits(&pbc, 1, lfeon);
put_bits(&pbc, 5, frmsizecod >> 1); // bit_rate_code
put_bits(&pbc, 5, 0); // reserved
 
flush_put_bits(&pbc);
avio_write(pb, buf, sizeof(buf));
 
return 11;
}
 
/**
* This function writes extradata "as is".
* Extradata must be formatted like a valid atom (with size and tag).
*/
static int mov_write_extradata_tag(AVIOContext *pb, MOVTrack *track)
{
avio_write(pb, track->enc->extradata, track->enc->extradata_size);
return track->enc->extradata_size;
}
 
static int mov_write_enda_tag(AVIOContext *pb)
{
avio_wb32(pb, 10);
ffio_wfourcc(pb, "enda");
avio_wb16(pb, 1); /* little endian */
return 10;
}
 
static int mov_write_enda_tag_be(AVIOContext *pb)
{
avio_wb32(pb, 10);
ffio_wfourcc(pb, "enda");
avio_wb16(pb, 0); /* big endian */
return 10;
}
 
static void put_descr(AVIOContext *pb, int tag, unsigned int size)
{
int i = 3;
avio_w8(pb, tag);
for (; i > 0; i--)
avio_w8(pb, (size >> (7 * i)) | 0x80);
avio_w8(pb, size & 0x7F);
}
 
static unsigned compute_avg_bitrate(MOVTrack *track)
{
uint64_t size = 0;
int i;
if (!track->track_duration)
return 0;
for (i = 0; i < track->entry; i++)
size += track->cluster[i].size;
return size * 8 * track->timescale / track->track_duration;
}
 
static int mov_write_esds_tag(AVIOContext *pb, MOVTrack *track) // Basic
{
int64_t pos = avio_tell(pb);
int decoder_specific_info_len = track->vos_len ? 5 + track->vos_len : 0;
unsigned avg_bitrate;
 
avio_wb32(pb, 0); // size
ffio_wfourcc(pb, "esds");
avio_wb32(pb, 0); // Version
 
// ES descriptor
put_descr(pb, 0x03, 3 + 5+13 + decoder_specific_info_len + 5+1);
avio_wb16(pb, track->track_id);
avio_w8(pb, 0x00); // flags (= no flags)
 
// DecoderConfig descriptor
put_descr(pb, 0x04, 13 + decoder_specific_info_len);
 
// Object type indication
if ((track->enc->codec_id == AV_CODEC_ID_MP2 ||
track->enc->codec_id == AV_CODEC_ID_MP3) &&
track->enc->sample_rate > 24000)
avio_w8(pb, 0x6B); // 11172-3
else
avio_w8(pb, ff_codec_get_tag(ff_mp4_obj_type, track->enc->codec_id));
 
// the following fields is made of 6 bits to identify the streamtype (4 for video, 5 for audio)
// plus 1 bit to indicate upstream and 1 bit set to 1 (reserved)
if (track->enc->codec_type == AVMEDIA_TYPE_AUDIO)
avio_w8(pb, 0x15); // flags (= Audiostream)
else
avio_w8(pb, 0x11); // flags (= Visualstream)
 
avio_wb24(pb, track->enc->rc_buffer_size >> 3); // Buffersize DB
 
avg_bitrate = compute_avg_bitrate(track);
// maxbitrate (FIXME should be max rate in any 1 sec window)
avio_wb32(pb, FFMAX3(track->enc->bit_rate, track->enc->rc_max_rate, avg_bitrate));
avio_wb32(pb, avg_bitrate);
 
if (track->vos_len) {
// DecoderSpecific info descriptor
put_descr(pb, 0x05, track->vos_len);
avio_write(pb, track->vos_data, track->vos_len);
}
 
// SL descriptor
put_descr(pb, 0x06, 1);
avio_w8(pb, 0x02);
return update_size(pb, pos);
}
 
static int mov_pcm_le_gt16(enum AVCodecID codec_id)
{
return codec_id == AV_CODEC_ID_PCM_S24LE ||
codec_id == AV_CODEC_ID_PCM_S32LE ||
codec_id == AV_CODEC_ID_PCM_F32LE ||
codec_id == AV_CODEC_ID_PCM_F64LE;
}
 
static int mov_pcm_be_gt16(enum AVCodecID codec_id)
{
return codec_id == AV_CODEC_ID_PCM_S24BE ||
codec_id == AV_CODEC_ID_PCM_S32BE ||
codec_id == AV_CODEC_ID_PCM_F32BE ||
codec_id == AV_CODEC_ID_PCM_F64BE;
}
 
static int mov_write_ms_tag(AVIOContext *pb, MOVTrack *track)
{
int64_t pos = avio_tell(pb);
avio_wb32(pb, 0);
avio_wl32(pb, track->tag); // store it byteswapped
track->enc->codec_tag = av_bswap16(track->tag >> 16);
ff_put_wav_header(pb, track->enc);
return update_size(pb, pos);
}
 
static int mov_write_wfex_tag(AVIOContext *pb, MOVTrack *track)
{
int64_t pos = avio_tell(pb);
avio_wb32(pb, 0);
ffio_wfourcc(pb, "wfex");
ff_put_wav_header(pb, track->enc);
return update_size(pb, pos);
}
 
static int mov_write_chan_tag(AVIOContext *pb, MOVTrack *track)
{
uint32_t layout_tag, bitmap;
int64_t pos = avio_tell(pb);
 
layout_tag = ff_mov_get_channel_layout_tag(track->enc->codec_id,
track->enc->channel_layout,
&bitmap);
if (!layout_tag) {
av_log(track->enc, AV_LOG_WARNING, "not writing 'chan' tag due to "
"lack of channel information\n");
return 0;
}
 
avio_wb32(pb, 0); // Size
ffio_wfourcc(pb, "chan"); // Type
avio_w8(pb, 0); // Version
avio_wb24(pb, 0); // Flags
avio_wb32(pb, layout_tag); // mChannelLayoutTag
avio_wb32(pb, bitmap); // mChannelBitmap
avio_wb32(pb, 0); // mNumberChannelDescriptions
 
return update_size(pb, pos);
}
 
static int mov_write_wave_tag(AVIOContext *pb, MOVTrack *track)
{
int64_t pos = avio_tell(pb);
 
avio_wb32(pb, 0); /* size */
ffio_wfourcc(pb, "wave");
 
if (track->enc->codec_id != AV_CODEC_ID_QDM2) {
avio_wb32(pb, 12); /* size */
ffio_wfourcc(pb, "frma");
avio_wl32(pb, track->tag);
}
 
if (track->enc->codec_id == AV_CODEC_ID_AAC) {
/* useless atom needed by mplayer, ipod, not needed by quicktime */
avio_wb32(pb, 12); /* size */
ffio_wfourcc(pb, "mp4a");
avio_wb32(pb, 0);
mov_write_esds_tag(pb, track);
} else if (mov_pcm_le_gt16(track->enc->codec_id)) {
mov_write_enda_tag(pb);
} else if (mov_pcm_be_gt16(track->enc->codec_id)) {
mov_write_enda_tag_be(pb);
} else if (track->enc->codec_id == AV_CODEC_ID_AMR_NB) {
mov_write_amr_tag(pb, track);
} else if (track->enc->codec_id == AV_CODEC_ID_AC3) {
mov_write_ac3_tag(pb, track);
} else if (track->enc->codec_id == AV_CODEC_ID_ALAC ||
track->enc->codec_id == AV_CODEC_ID_QDM2) {
mov_write_extradata_tag(pb, track);
} else if (track->enc->codec_id == AV_CODEC_ID_ADPCM_MS ||
track->enc->codec_id == AV_CODEC_ID_ADPCM_IMA_WAV) {
mov_write_ms_tag(pb, track);
}
 
avio_wb32(pb, 8); /* size */
avio_wb32(pb, 0); /* null tag */
 
return update_size(pb, pos);
}
 
static int mov_write_dvc1_structs(MOVTrack *track, uint8_t *buf)
{
uint8_t *unescaped;
const uint8_t *start, *next, *end = track->vos_data + track->vos_len;
int unescaped_size, seq_found = 0;
int level = 0, interlace = 0;
int packet_seq = track->vc1_info.packet_seq;
int packet_entry = track->vc1_info.packet_entry;
int slices = track->vc1_info.slices;
PutBitContext pbc;
 
if (track->start_dts == AV_NOPTS_VALUE) {
/* No packets written yet, vc1_info isn't authoritative yet. */
/* Assume inline sequence and entry headers. This will be
* overwritten at the end if the file is seekable. */
packet_seq = packet_entry = 1;
}
 
unescaped = av_mallocz(track->vos_len + FF_INPUT_BUFFER_PADDING_SIZE);
if (!unescaped)
return AVERROR(ENOMEM);
start = find_next_marker(track->vos_data, end);
for (next = start; next < end; start = next) {
GetBitContext gb;
int size;
next = find_next_marker(start + 4, end);
size = next - start - 4;
if (size <= 0)
continue;
unescaped_size = vc1_unescape_buffer(start + 4, size, unescaped);
init_get_bits(&gb, unescaped, 8 * unescaped_size);
if (AV_RB32(start) == VC1_CODE_SEQHDR) {
int profile = get_bits(&gb, 2);
if (profile != PROFILE_ADVANCED) {
av_free(unescaped);
return AVERROR(ENOSYS);
}
seq_found = 1;
level = get_bits(&gb, 3);
/* chromaformat, frmrtq_postproc, bitrtq_postproc, postprocflag,
* width, height */
skip_bits_long(&gb, 2 + 3 + 5 + 1 + 2*12);
skip_bits(&gb, 1); /* broadcast */
interlace = get_bits1(&gb);
skip_bits(&gb, 4); /* tfcntrflag, finterpflag, reserved, psf */
}
}
if (!seq_found) {
av_free(unescaped);
return AVERROR(ENOSYS);
}
 
init_put_bits(&pbc, buf, 7);
/* VC1DecSpecStruc */
put_bits(&pbc, 4, 12); /* profile - advanced */
put_bits(&pbc, 3, level);
put_bits(&pbc, 1, 0); /* reserved */
/* VC1AdvDecSpecStruc */
put_bits(&pbc, 3, level);
put_bits(&pbc, 1, 0); /* cbr */
put_bits(&pbc, 6, 0); /* reserved */
put_bits(&pbc, 1, !interlace); /* no interlace */
put_bits(&pbc, 1, !packet_seq); /* no multiple seq */
put_bits(&pbc, 1, !packet_entry); /* no multiple entry */
put_bits(&pbc, 1, !slices); /* no slice code */
put_bits(&pbc, 1, 0); /* no bframe */
put_bits(&pbc, 1, 0); /* reserved */
put_bits32(&pbc, track->enc->time_base.den); /* framerate */
flush_put_bits(&pbc);
 
av_free(unescaped);
 
return 0;
}
 
static int mov_write_dvc1_tag(AVIOContext *pb, MOVTrack *track)
{
uint8_t buf[7] = { 0 };
int ret;
 
if ((ret = mov_write_dvc1_structs(track, buf)) < 0)
return ret;
 
avio_wb32(pb, track->vos_len + 8 + sizeof(buf));
ffio_wfourcc(pb, "dvc1");
track->vc1_info.struct_offset = avio_tell(pb);
avio_write(pb, buf, sizeof(buf));
avio_write(pb, track->vos_data, track->vos_len);
 
return 0;
}
 
static int mov_write_glbl_tag(AVIOContext *pb, MOVTrack *track)
{
avio_wb32(pb, track->vos_len + 8);
ffio_wfourcc(pb, "glbl");
avio_write(pb, track->vos_data, track->vos_len);
return 8 + track->vos_len;
}
 
/**
* Compute flags for 'lpcm' tag.
* See CoreAudioTypes and AudioStreamBasicDescription at Apple.
*/
static int mov_get_lpcm_flags(enum AVCodecID codec_id)
{
switch (codec_id) {
case AV_CODEC_ID_PCM_F32BE:
case AV_CODEC_ID_PCM_F64BE:
return 11;
case AV_CODEC_ID_PCM_F32LE:
case AV_CODEC_ID_PCM_F64LE:
return 9;
case AV_CODEC_ID_PCM_U8:
return 10;
case AV_CODEC_ID_PCM_S16BE:
case AV_CODEC_ID_PCM_S24BE:
case AV_CODEC_ID_PCM_S32BE:
return 14;
case AV_CODEC_ID_PCM_S8:
case AV_CODEC_ID_PCM_S16LE:
case AV_CODEC_ID_PCM_S24LE:
case AV_CODEC_ID_PCM_S32LE:
return 12;
default:
return 0;
}
}
 
static int get_cluster_duration(MOVTrack *track, int cluster_idx)
{
int64_t next_dts;
 
if (cluster_idx >= track->entry)
return 0;
 
if (cluster_idx + 1 == track->entry)
next_dts = track->track_duration + track->start_dts;
else
next_dts = track->cluster[cluster_idx + 1].dts;
 
return next_dts - track->cluster[cluster_idx].dts;
}
 
static int get_samples_per_packet(MOVTrack *track)
{
int i, first_duration;
 
// return track->enc->frame_size;
 
/* use 1 for raw PCM */
if (!track->audio_vbr)
return 1;
 
/* check to see if duration is constant for all clusters */
if (!track->entry)
return 0;
first_duration = get_cluster_duration(track, 0);
for (i = 1; i < track->entry; i++) {
if (get_cluster_duration(track, i) != first_duration)
return 0;
}
return first_duration;
}
 
static int mov_write_audio_tag(AVIOContext *pb, MOVTrack *track)
{
int64_t pos = avio_tell(pb);
int version = 0;
uint32_t tag = track->tag;
 
if (track->mode == MODE_MOV) {
if (track->timescale > UINT16_MAX) {
if (mov_get_lpcm_flags(track->enc->codec_id))
tag = AV_RL32("lpcm");
version = 2;
} else if (track->audio_vbr || mov_pcm_le_gt16(track->enc->codec_id) ||
mov_pcm_be_gt16(track->enc->codec_id) ||
track->enc->codec_id == AV_CODEC_ID_ADPCM_MS ||
track->enc->codec_id == AV_CODEC_ID_ADPCM_IMA_WAV ||
track->enc->codec_id == AV_CODEC_ID_QDM2) {
version = 1;
}
}
 
avio_wb32(pb, 0); /* size */
avio_wl32(pb, tag); // store it byteswapped
avio_wb32(pb, 0); /* Reserved */
avio_wb16(pb, 0); /* Reserved */
avio_wb16(pb, 1); /* Data-reference index, XXX == 1 */
 
/* SoundDescription */
avio_wb16(pb, version); /* Version */
avio_wb16(pb, 0); /* Revision level */
avio_wb32(pb, 0); /* Reserved */
 
if (version == 2) {
avio_wb16(pb, 3);
avio_wb16(pb, 16);
avio_wb16(pb, 0xfffe);
avio_wb16(pb, 0);
avio_wb32(pb, 0x00010000);
avio_wb32(pb, 72);
avio_wb64(pb, av_double2int(track->enc->sample_rate));
avio_wb32(pb, track->enc->channels);
avio_wb32(pb, 0x7F000000);
avio_wb32(pb, av_get_bits_per_sample(track->enc->codec_id));
avio_wb32(pb, mov_get_lpcm_flags(track->enc->codec_id));
avio_wb32(pb, track->sample_size);
avio_wb32(pb, get_samples_per_packet(track));
} else {
if (track->mode == MODE_MOV) {
avio_wb16(pb, track->enc->channels);
if (track->enc->codec_id == AV_CODEC_ID_PCM_U8 ||
track->enc->codec_id == AV_CODEC_ID_PCM_S8)
avio_wb16(pb, 8); /* bits per sample */
else
avio_wb16(pb, 16);
avio_wb16(pb, track->audio_vbr ? -2 : 0); /* compression ID */
} else { /* reserved for mp4/3gp */
avio_wb16(pb, 2);
avio_wb16(pb, 16);
avio_wb16(pb, 0);
}
 
avio_wb16(pb, 0); /* packet size (= 0) */
avio_wb16(pb, track->enc->sample_rate <= UINT16_MAX ?
track->enc->sample_rate : 0);
avio_wb16(pb, 0); /* Reserved */
}
 
if (version == 1) { /* SoundDescription V1 extended info */
if (mov_pcm_le_gt16(track->enc->codec_id) ||
mov_pcm_be_gt16(track->enc->codec_id))
avio_wb32(pb, 1); /* must be 1 for uncompressed formats */
else
avio_wb32(pb, track->enc->frame_size); /* Samples per packet */
avio_wb32(pb, track->sample_size / track->enc->channels); /* Bytes per packet */
avio_wb32(pb, track->sample_size); /* Bytes per frame */
avio_wb32(pb, 2); /* Bytes per sample */
}
 
if (track->mode == MODE_MOV &&
(track->enc->codec_id == AV_CODEC_ID_AAC ||
track->enc->codec_id == AV_CODEC_ID_AC3 ||
track->enc->codec_id == AV_CODEC_ID_AMR_NB ||
track->enc->codec_id == AV_CODEC_ID_ALAC ||
track->enc->codec_id == AV_CODEC_ID_ADPCM_MS ||
track->enc->codec_id == AV_CODEC_ID_ADPCM_IMA_WAV ||
track->enc->codec_id == AV_CODEC_ID_QDM2 ||
(mov_pcm_le_gt16(track->enc->codec_id) && version==1) ||
(mov_pcm_be_gt16(track->enc->codec_id) && version==1)))
mov_write_wave_tag(pb, track);
else if (track->tag == MKTAG('m','p','4','a'))
mov_write_esds_tag(pb, track);
else if (track->enc->codec_id == AV_CODEC_ID_AMR_NB)
mov_write_amr_tag(pb, track);
else if (track->enc->codec_id == AV_CODEC_ID_AC3)
mov_write_ac3_tag(pb, track);
else if (track->enc->codec_id == AV_CODEC_ID_ALAC)
mov_write_extradata_tag(pb, track);
else if (track->enc->codec_id == AV_CODEC_ID_WMAPRO)
mov_write_wfex_tag(pb, track);
else if (track->vos_len > 0)
mov_write_glbl_tag(pb, track);
 
if (track->mode == MODE_MOV && track->enc->codec_type == AVMEDIA_TYPE_AUDIO)
mov_write_chan_tag(pb, track);
 
return update_size(pb, pos);
}
 
static int mov_write_d263_tag(AVIOContext *pb)
{
avio_wb32(pb, 0xf); /* size */
ffio_wfourcc(pb, "d263");
ffio_wfourcc(pb, "FFMP");
avio_w8(pb, 0); /* decoder version */
/* FIXME use AVCodecContext level/profile, when encoder will set values */
avio_w8(pb, 0xa); /* level */
avio_w8(pb, 0); /* profile */
return 0xf;
}
 
static int mov_write_avcc_tag(AVIOContext *pb, MOVTrack *track)
{
int64_t pos = avio_tell(pb);
 
avio_wb32(pb, 0);
ffio_wfourcc(pb, "avcC");
ff_isom_write_avcc(pb, track->vos_data, track->vos_len);
return update_size(pb, pos);
}
 
/* also used by all avid codecs (dv, imx, meridien) and their variants */
static int mov_write_avid_tag(AVIOContext *pb, MOVTrack *track)
{
int i;
avio_wb32(pb, 24); /* size */
ffio_wfourcc(pb, "ACLR");
ffio_wfourcc(pb, "ACLR");
ffio_wfourcc(pb, "0001");
avio_wb32(pb, 2); /* yuv range: full 1 / normal 2 */
avio_wb32(pb, 0); /* unknown */
 
avio_wb32(pb, 24); /* size */
ffio_wfourcc(pb, "APRG");
ffio_wfourcc(pb, "APRG");
ffio_wfourcc(pb, "0001");
avio_wb32(pb, 1); /* unknown */
avio_wb32(pb, 0); /* unknown */
 
avio_wb32(pb, 120); /* size */
ffio_wfourcc(pb, "ARES");
ffio_wfourcc(pb, "ARES");
ffio_wfourcc(pb, "0001");
avio_wb32(pb, AV_RB32(track->vos_data + 0x28)); /* dnxhd cid, some id ? */
avio_wb32(pb, track->enc->width);
/* values below are based on samples created with quicktime and avid codecs */
if (track->vos_data[5] & 2) { // interlaced
avio_wb32(pb, track->enc->height / 2);
avio_wb32(pb, 2); /* unknown */
avio_wb32(pb, 0); /* unknown */
avio_wb32(pb, 4); /* unknown */
} else {
avio_wb32(pb, track->enc->height);
avio_wb32(pb, 1); /* unknown */
avio_wb32(pb, 0); /* unknown */
if (track->enc->height == 1080)
avio_wb32(pb, 5); /* unknown */
else
avio_wb32(pb, 6); /* unknown */
}
/* padding */
for (i = 0; i < 10; i++)
avio_wb64(pb, 0);
 
/* extra padding for stsd needed */
avio_wb32(pb, 0);
return 0;
}
 
static int mp4_get_codec_tag(AVFormatContext *s, MOVTrack *track)
{
int tag = track->enc->codec_tag;
 
if (!ff_codec_get_tag(ff_mp4_obj_type, track->enc->codec_id))
return 0;
 
if (track->enc->codec_id == AV_CODEC_ID_H264) tag = MKTAG('a','v','c','1');
else if (track->enc->codec_id == AV_CODEC_ID_AC3) tag = MKTAG('a','c','-','3');
else if (track->enc->codec_id == AV_CODEC_ID_DIRAC) tag = MKTAG('d','r','a','c');
else if (track->enc->codec_id == AV_CODEC_ID_MOV_TEXT) tag = MKTAG('t','x','3','g');
else if (track->enc->codec_id == AV_CODEC_ID_VC1) tag = MKTAG('v','c','-','1');
else if (track->enc->codec_type == AVMEDIA_TYPE_VIDEO) tag = MKTAG('m','p','4','v');
else if (track->enc->codec_type == AVMEDIA_TYPE_AUDIO) tag = MKTAG('m','p','4','a');
 
return tag;
}
 
static const AVCodecTag codec_ipod_tags[] = {
{ AV_CODEC_ID_H264, MKTAG('a','v','c','1') },
{ AV_CODEC_ID_MPEG4, MKTAG('m','p','4','v') },
{ AV_CODEC_ID_AAC, MKTAG('m','p','4','a') },
{ AV_CODEC_ID_ALAC, MKTAG('a','l','a','c') },
{ AV_CODEC_ID_AC3, MKTAG('a','c','-','3') },
{ AV_CODEC_ID_MOV_TEXT, MKTAG('t','x','3','g') },
{ AV_CODEC_ID_MOV_TEXT, MKTAG('t','e','x','t') },
{ AV_CODEC_ID_NONE, 0 },
};
 
static int ipod_get_codec_tag(AVFormatContext *s, MOVTrack *track)
{
int tag = track->enc->codec_tag;
 
// keep original tag for subs, ipod supports both formats
if (!(track->enc->codec_type == AVMEDIA_TYPE_SUBTITLE &&
(tag == MKTAG('t', 'x', '3', 'g') ||
tag == MKTAG('t', 'e', 'x', 't'))))
tag = ff_codec_get_tag(codec_ipod_tags, track->enc->codec_id);
 
if (!av_match_ext(s->filename, "m4a") && !av_match_ext(s->filename, "m4v"))
av_log(s, AV_LOG_WARNING, "Warning, extension is not .m4a nor .m4v "
"Quicktime/Ipod might not play the file\n");
 
return tag;
}
 
static int mov_get_dv_codec_tag(AVFormatContext *s, MOVTrack *track)
{
int tag;
 
if (track->enc->width == 720) { /* SD */
if (track->enc->height == 480) { /* NTSC */
if (track->enc->pix_fmt == AV_PIX_FMT_YUV422P) tag = MKTAG('d','v','5','n');
else tag = MKTAG('d','v','c',' ');
}else if (track->enc->pix_fmt == AV_PIX_FMT_YUV422P) tag = MKTAG('d','v','5','p');
else if (track->enc->pix_fmt == AV_PIX_FMT_YUV420P) tag = MKTAG('d','v','c','p');
else tag = MKTAG('d','v','p','p');
} else if (track->enc->height == 720) { /* HD 720 line */
if (track->enc->time_base.den == 50) tag = MKTAG('d','v','h','q');
else tag = MKTAG('d','v','h','p');
} else if (track->enc->height == 1080) { /* HD 1080 line */
if (track->enc->time_base.den == 25) tag = MKTAG('d','v','h','5');
else tag = MKTAG('d','v','h','6');
} else {
av_log(s, AV_LOG_ERROR, "unsupported height for dv codec\n");
return 0;
}
 
return tag;
}
 
static AVRational find_fps(AVFormatContext *s, AVStream *st)
{
AVRational rate = {st->codec->time_base.den, st->codec->time_base.num};
/* if the codec time base makes no sense, try to fallback on stream frame rate */
if (av_timecode_check_frame_rate(rate) < 0) {
av_log(s, AV_LOG_DEBUG, "timecode: tbc=%d/%d invalid, fallback on %d/%d\n",
rate.num, rate.den, st->avg_frame_rate.num, st->avg_frame_rate.den);
rate = st->avg_frame_rate;
}
 
return rate;
}
 
static int mov_get_mpeg2_xdcam_codec_tag(AVFormatContext *s, MOVTrack *track)
{
int tag = MKTAG('m', '2', 'v', '1'); //fallback tag
int interlaced = track->enc->field_order > AV_FIELD_PROGRESSIVE;
AVStream *st = track->st;
int rate = av_q2d(find_fps(s, st));
 
if (track->enc->pix_fmt == AV_PIX_FMT_YUV420P) {
if (track->enc->width == 1280 && track->enc->height == 720) {
if (!interlaced) {
if (rate == 24) tag = MKTAG('x','d','v','4');
else if (rate == 25) tag = MKTAG('x','d','v','5');
else if (rate == 30) tag = MKTAG('x','d','v','1');
else if (rate == 50) tag = MKTAG('x','d','v','a');
else if (rate == 60) tag = MKTAG('x','d','v','9');
}
} else if (track->enc->width == 1440 && track->enc->height == 1080) {
if (!interlaced) {
if (rate == 24) tag = MKTAG('x','d','v','6');
else if (rate == 25) tag = MKTAG('x','d','v','7');
else if (rate == 30) tag = MKTAG('x','d','v','8');
} else {
if (rate == 25) tag = MKTAG('x','d','v','3');
else if (rate == 30) tag = MKTAG('x','d','v','2');
}
} else if (track->enc->width == 1920 && track->enc->height == 1080) {
if (!interlaced) {
if (rate == 24) tag = MKTAG('x','d','v','d');
else if (rate == 25) tag = MKTAG('x','d','v','e');
else if (rate == 30) tag = MKTAG('x','d','v','f');
} else {
if (rate == 25) tag = MKTAG('x','d','v','c');
else if (rate == 30) tag = MKTAG('x','d','v','b');
}
}
} else if (track->enc->pix_fmt == AV_PIX_FMT_YUV422P) {
if (track->enc->width == 1280 && track->enc->height == 720) {
if (!interlaced) {
if (rate == 24) tag = MKTAG('x','d','5','4');
else if (rate == 25) tag = MKTAG('x','d','5','5');
else if (rate == 30) tag = MKTAG('x','d','5','1');
else if (rate == 50) tag = MKTAG('x','d','5','a');
else if (rate == 60) tag = MKTAG('x','d','5','9');
}
} else if (track->enc->width == 1920 && track->enc->height == 1080) {
if (!interlaced) {
if (rate == 24) tag = MKTAG('x','d','5','d');
else if (rate == 25) tag = MKTAG('x','d','5','e');
else if (rate == 30) tag = MKTAG('x','d','5','f');
} else {
if (rate == 25) tag = MKTAG('x','d','5','c');
else if (rate == 30) tag = MKTAG('x','d','5','b');
}
}
}
 
return tag;
}
 
static const struct {
enum AVPixelFormat pix_fmt;
uint32_t tag;
unsigned bps;
} mov_pix_fmt_tags[] = {
{ AV_PIX_FMT_YUYV422, MKTAG('y','u','v','2'), 0 },
{ AV_PIX_FMT_YUYV422, MKTAG('y','u','v','s'), 0 },
{ AV_PIX_FMT_UYVY422, MKTAG('2','v','u','y'), 0 },
{ AV_PIX_FMT_RGB555BE,MKTAG('r','a','w',' '), 16 },
{ AV_PIX_FMT_RGB555LE,MKTAG('L','5','5','5'), 16 },
{ AV_PIX_FMT_RGB565LE,MKTAG('L','5','6','5'), 16 },
{ AV_PIX_FMT_RGB565BE,MKTAG('B','5','6','5'), 16 },
{ AV_PIX_FMT_GRAY16BE,MKTAG('b','1','6','g'), 16 },
{ AV_PIX_FMT_RGB24, MKTAG('r','a','w',' '), 24 },
{ AV_PIX_FMT_BGR24, MKTAG('2','4','B','G'), 24 },
{ AV_PIX_FMT_ARGB, MKTAG('r','a','w',' '), 32 },
{ AV_PIX_FMT_BGRA, MKTAG('B','G','R','A'), 32 },
{ AV_PIX_FMT_RGBA, MKTAG('R','G','B','A'), 32 },
{ AV_PIX_FMT_ABGR, MKTAG('A','B','G','R'), 32 },
{ AV_PIX_FMT_RGB48BE, MKTAG('b','4','8','r'), 48 },
};
 
static int mov_get_rawvideo_codec_tag(AVFormatContext *s, MOVTrack *track)
{
int tag = track->enc->codec_tag;
int i;
 
for (i = 0; i < FF_ARRAY_ELEMS(mov_pix_fmt_tags); i++) {
if (track->enc->pix_fmt == mov_pix_fmt_tags[i].pix_fmt) {
tag = mov_pix_fmt_tags[i].tag;
track->enc->bits_per_coded_sample = mov_pix_fmt_tags[i].bps;
if (track->enc->codec_tag == mov_pix_fmt_tags[i].tag)
break;
}
}
 
return tag;
}
 
static int mov_get_codec_tag(AVFormatContext *s, MOVTrack *track)
{
int tag = track->enc->codec_tag;
 
if (!tag || (track->enc->strict_std_compliance >= FF_COMPLIANCE_NORMAL &&
(track->enc->codec_id == AV_CODEC_ID_DVVIDEO ||
track->enc->codec_id == AV_CODEC_ID_RAWVIDEO ||
track->enc->codec_id == AV_CODEC_ID_H263 ||
track->enc->codec_id == AV_CODEC_ID_MPEG2VIDEO ||
av_get_bits_per_sample(track->enc->codec_id)))) { // pcm audio
if (track->enc->codec_id == AV_CODEC_ID_DVVIDEO)
tag = mov_get_dv_codec_tag(s, track);
else if (track->enc->codec_id == AV_CODEC_ID_RAWVIDEO)
tag = mov_get_rawvideo_codec_tag(s, track);
else if (track->enc->codec_id == AV_CODEC_ID_MPEG2VIDEO)
tag = mov_get_mpeg2_xdcam_codec_tag(s, track);
else if (track->enc->codec_type == AVMEDIA_TYPE_VIDEO) {
tag = ff_codec_get_tag(ff_codec_movvideo_tags, track->enc->codec_id);
if (!tag) { // if no mac fcc found, try with Microsoft tags
tag = ff_codec_get_tag(ff_codec_bmp_tags, track->enc->codec_id);
if (tag)
av_log(s, AV_LOG_WARNING, "Using MS style video codec tag, "
"the file may be unplayable!\n");
}
} else if (track->enc->codec_type == AVMEDIA_TYPE_AUDIO) {
tag = ff_codec_get_tag(ff_codec_movaudio_tags, track->enc->codec_id);
if (!tag) { // if no mac fcc found, try with Microsoft tags
int ms_tag = ff_codec_get_tag(ff_codec_wav_tags, track->enc->codec_id);
if (ms_tag) {
tag = MKTAG('m', 's', ((ms_tag >> 8) & 0xff), (ms_tag & 0xff));
av_log(s, AV_LOG_WARNING, "Using MS style audio codec tag, "
"the file may be unplayable!\n");
}
}
} else if (track->enc->codec_type == AVMEDIA_TYPE_SUBTITLE)
tag = ff_codec_get_tag(ff_codec_movsubtitle_tags, track->enc->codec_id);
}
 
return tag;
}
 
static const AVCodecTag codec_3gp_tags[] = {
{ AV_CODEC_ID_H263, MKTAG('s','2','6','3') },
{ AV_CODEC_ID_H264, MKTAG('a','v','c','1') },
{ AV_CODEC_ID_MPEG4, MKTAG('m','p','4','v') },
{ AV_CODEC_ID_AAC, MKTAG('m','p','4','a') },
{ AV_CODEC_ID_AMR_NB, MKTAG('s','a','m','r') },
{ AV_CODEC_ID_AMR_WB, MKTAG('s','a','w','b') },
{ AV_CODEC_ID_MOV_TEXT, MKTAG('t','x','3','g') },
{ AV_CODEC_ID_NONE, 0 },
};
 
static const AVCodecTag codec_f4v_tags[] = { // XXX: add GIF/PNG/JPEG?
{ AV_CODEC_ID_MP3, MKTAG('.','m','p','3') },
{ AV_CODEC_ID_AAC, MKTAG('m','p','4','a') },
{ AV_CODEC_ID_H264, MKTAG('a','v','c','1') },
{ AV_CODEC_ID_VP6A, MKTAG('V','P','6','A') },
{ AV_CODEC_ID_VP6F, MKTAG('V','P','6','F') },
{ AV_CODEC_ID_NONE, 0 },
};
 
static int mov_find_codec_tag(AVFormatContext *s, MOVTrack *track)
{
int tag;
 
if (track->mode == MODE_MP4 || track->mode == MODE_PSP)
tag = mp4_get_codec_tag(s, track);
else if (track->mode == MODE_ISM) {
tag = mp4_get_codec_tag(s, track);
if (!tag && track->enc->codec_id == AV_CODEC_ID_WMAPRO)
tag = MKTAG('w', 'm', 'a', ' ');
} else if (track->mode == MODE_IPOD)
tag = ipod_get_codec_tag(s, track);
else if (track->mode & MODE_3GP)
tag = ff_codec_get_tag(codec_3gp_tags, track->enc->codec_id);
else if (track->mode == MODE_F4V)
tag = ff_codec_get_tag(codec_f4v_tags, track->enc->codec_id);
else
tag = mov_get_codec_tag(s, track);
 
return tag;
}
 
/** Write uuid atom.
* Needed to make file play in iPods running newest firmware
* goes after avcC atom in moov.trak.mdia.minf.stbl.stsd.avc1
*/
static int mov_write_uuid_tag_ipod(AVIOContext *pb)
{
avio_wb32(pb, 28);
ffio_wfourcc(pb, "uuid");
avio_wb32(pb, 0x6b6840f2);
avio_wb32(pb, 0x5f244fc5);
avio_wb32(pb, 0xba39a51b);
avio_wb32(pb, 0xcf0323f3);
avio_wb32(pb, 0x0);
return 28;
}
 
static const uint16_t fiel_data[] = {
0x0000, 0x0100, 0x0201, 0x0206, 0x0209, 0x020e
};
 
static int mov_write_fiel_tag(AVIOContext *pb, MOVTrack *track)
{
unsigned mov_field_order = 0;
if (track->enc->field_order < FF_ARRAY_ELEMS(fiel_data))
mov_field_order = fiel_data[track->enc->field_order];
else
return 0;
avio_wb32(pb, 10);
ffio_wfourcc(pb, "fiel");
avio_wb16(pb, mov_field_order);
return 10;
}
 
static int mov_write_subtitle_tag(AVIOContext *pb, MOVTrack *track)
{
int64_t pos = avio_tell(pb);
avio_wb32(pb, 0); /* size */
avio_wl32(pb, track->tag); // store it byteswapped
avio_wb32(pb, 0); /* Reserved */
avio_wb16(pb, 0); /* Reserved */
avio_wb16(pb, 1); /* Data-reference index */
 
if (track->enc->extradata_size)
avio_write(pb, track->enc->extradata, track->enc->extradata_size);
 
return update_size(pb, pos);
}
 
static int mov_write_pasp_tag(AVIOContext *pb, MOVTrack *track)
{
AVRational sar;
av_reduce(&sar.num, &sar.den, track->enc->sample_aspect_ratio.num,
track->enc->sample_aspect_ratio.den, INT_MAX);
 
avio_wb32(pb, 16);
ffio_wfourcc(pb, "pasp");
avio_wb32(pb, sar.num);
avio_wb32(pb, sar.den);
return 16;
}
 
static void find_compressor(char * compressor_name, int len, MOVTrack *track)
{
int xdcam_res = (track->enc->width == 1280 && track->enc->height == 720)
|| (track->enc->width == 1440 && track->enc->height == 1080)
|| (track->enc->width == 1920 && track->enc->height == 1080);
 
if (track->mode == MODE_MOV && track->enc->codec && track->enc->codec->name) {
av_strlcpy(compressor_name, track->enc->codec->name, 32);
} else if (track->enc->codec_id == AV_CODEC_ID_MPEG2VIDEO && xdcam_res) {
int interlaced = track->enc->field_order > AV_FIELD_PROGRESSIVE;
AVStream *st = track->st;
int rate = av_q2d(find_fps(NULL, st));
av_strlcatf(compressor_name, len, "XDCAM");
if (track->enc->pix_fmt == AV_PIX_FMT_YUV422P) {
av_strlcatf(compressor_name, len, " HD422");
} else if(track->enc->width == 1440) {
av_strlcatf(compressor_name, len, " HD");
} else
av_strlcatf(compressor_name, len, " EX");
 
av_strlcatf(compressor_name, len, " %d%c", track->enc->height, interlaced ? 'i' : 'p');
 
av_strlcatf(compressor_name, len, "%d", rate * (interlaced + 1));
}
}
 
static int mov_write_video_tag(AVIOContext *pb, MOVTrack *track)
{
int64_t pos = avio_tell(pb);
char compressor_name[32] = { 0 };
 
avio_wb32(pb, 0); /* size */
avio_wl32(pb, track->tag); // store it byteswapped
avio_wb32(pb, 0); /* Reserved */
avio_wb16(pb, 0); /* Reserved */
avio_wb16(pb, 1); /* Data-reference index */
 
avio_wb16(pb, 0); /* Codec stream version */
avio_wb16(pb, 0); /* Codec stream revision (=0) */
if (track->mode == MODE_MOV) {
ffio_wfourcc(pb, "FFMP"); /* Vendor */
if (track->enc->codec_id == AV_CODEC_ID_RAWVIDEO) {
avio_wb32(pb, 0); /* Temporal Quality */
avio_wb32(pb, 0x400); /* Spatial Quality = lossless*/
} else {
avio_wb32(pb, 0x200); /* Temporal Quality = normal */
avio_wb32(pb, 0x200); /* Spatial Quality = normal */
}
} else {
avio_wb32(pb, 0); /* Reserved */
avio_wb32(pb, 0); /* Reserved */
avio_wb32(pb, 0); /* Reserved */
}
avio_wb16(pb, track->enc->width); /* Video width */
avio_wb16(pb, track->height); /* Video height */
avio_wb32(pb, 0x00480000); /* Horizontal resolution 72dpi */
avio_wb32(pb, 0x00480000); /* Vertical resolution 72dpi */
avio_wb32(pb, 0); /* Data size (= 0) */
avio_wb16(pb, 1); /* Frame count (= 1) */
 
/* FIXME not sure, ISO 14496-1 draft where it shall be set to 0 */
find_compressor(compressor_name, 32, track);
avio_w8(pb, strlen(compressor_name));
avio_write(pb, compressor_name, 31);
 
if (track->mode == MODE_MOV && track->enc->bits_per_coded_sample)
avio_wb16(pb, track->enc->bits_per_coded_sample);
else
avio_wb16(pb, 0x18); /* Reserved */
avio_wb16(pb, 0xffff); /* Reserved */
if (track->tag == MKTAG('m','p','4','v'))
mov_write_esds_tag(pb, track);
else if (track->enc->codec_id == AV_CODEC_ID_H263)
mov_write_d263_tag(pb);
else if (track->enc->codec_id == AV_CODEC_ID_AVUI ||
track->enc->codec_id == AV_CODEC_ID_SVQ3) {
mov_write_extradata_tag(pb, track);
avio_wb32(pb, 0);
} else if (track->enc->codec_id == AV_CODEC_ID_DNXHD)
mov_write_avid_tag(pb, track);
else if (track->enc->codec_id == AV_CODEC_ID_H264) {
mov_write_avcc_tag(pb, track);
if (track->mode == MODE_IPOD)
mov_write_uuid_tag_ipod(pb);
} else if (track->enc->codec_id == AV_CODEC_ID_VC1 && track->vos_len > 0)
mov_write_dvc1_tag(pb, track);
else if (track->enc->codec_id == AV_CODEC_ID_VP6F ||
track->enc->codec_id == AV_CODEC_ID_VP6A) {
/* Don't write any potential extradata here - the cropping
* is signalled via the normal width/height fields. */
} else if (track->vos_len > 0)
mov_write_glbl_tag(pb, track);
 
if (track->enc->codec_id != AV_CODEC_ID_H264 &&
track->enc->codec_id != AV_CODEC_ID_MPEG4 &&
track->enc->codec_id != AV_CODEC_ID_DNXHD)
if (track->enc->field_order != AV_FIELD_UNKNOWN)
mov_write_fiel_tag(pb, track);
 
if (track->enc->sample_aspect_ratio.den && track->enc->sample_aspect_ratio.num &&
track->enc->sample_aspect_ratio.den != track->enc->sample_aspect_ratio.num) {
mov_write_pasp_tag(pb, track);
}
 
return update_size(pb, pos);
}
 
static int mov_write_rtp_tag(AVIOContext *pb, MOVTrack *track)
{
int64_t pos = avio_tell(pb);
avio_wb32(pb, 0); /* size */
ffio_wfourcc(pb, "rtp ");
avio_wb32(pb, 0); /* Reserved */
avio_wb16(pb, 0); /* Reserved */
avio_wb16(pb, 1); /* Data-reference index */
 
avio_wb16(pb, 1); /* Hint track version */
avio_wb16(pb, 1); /* Highest compatible version */
avio_wb32(pb, track->max_packet_size); /* Max packet size */
 
avio_wb32(pb, 12); /* size */
ffio_wfourcc(pb, "tims");
avio_wb32(pb, track->timescale);
 
return update_size(pb, pos);
}
 
static int mov_write_tmcd_tag(AVIOContext *pb, MOVTrack *track)
{
int64_t pos = avio_tell(pb);
#if 1
int frame_duration = av_rescale(track->timescale, track->enc->time_base.num, track->enc->time_base.den);
int nb_frames = 1.0/av_q2d(track->enc->time_base) + 0.5;
 
if (nb_frames > 255) {
av_log(NULL, AV_LOG_ERROR, "fps %d is too large\n", nb_frames);
return AVERROR(EINVAL);
}
 
avio_wb32(pb, 0); /* size */
ffio_wfourcc(pb, "tmcd"); /* Data format */
avio_wb32(pb, 0); /* Reserved */
avio_wb32(pb, 1); /* Data reference index */
avio_wb32(pb, 0); /* Flags */
avio_wb32(pb, track->timecode_flags); /* Flags (timecode) */
avio_wb32(pb, track->timescale); /* Timescale */
avio_wb32(pb, frame_duration); /* Frame duration */
avio_w8(pb, nb_frames); /* Number of frames */
avio_wb24(pb, 0); /* Reserved */
/* TODO: source reference string */
#else
 
avio_wb32(pb, 0); /* size */
ffio_wfourcc(pb, "tmcd"); /* Data format */
avio_wb32(pb, 0); /* Reserved */
avio_wb32(pb, 1); /* Data reference index */
if (track->enc->extradata_size)
avio_write(pb, track->enc->extradata, track->enc->extradata_size);
#endif
return update_size(pb, pos);
}
 
static int mov_write_stsd_tag(AVIOContext *pb, MOVTrack *track)
{
int64_t pos = avio_tell(pb);
avio_wb32(pb, 0); /* size */
ffio_wfourcc(pb, "stsd");
avio_wb32(pb, 0); /* version & flags */
avio_wb32(pb, 1); /* entry count */
if (track->enc->codec_type == AVMEDIA_TYPE_VIDEO)
mov_write_video_tag(pb, track);
else if (track->enc->codec_type == AVMEDIA_TYPE_AUDIO)
mov_write_audio_tag(pb, track);
else if (track->enc->codec_type == AVMEDIA_TYPE_SUBTITLE)
mov_write_subtitle_tag(pb, track);
else if (track->enc->codec_tag == MKTAG('r','t','p',' '))
mov_write_rtp_tag(pb, track);
else if (track->enc->codec_tag == MKTAG('t','m','c','d'))
mov_write_tmcd_tag(pb, track);
return update_size(pb, pos);
}
 
static int mov_write_ctts_tag(AVIOContext *pb, MOVTrack *track)
{
MOVStts *ctts_entries;
uint32_t entries = 0;
uint32_t atom_size;
int i;
 
ctts_entries = av_malloc((track->entry + 1) * sizeof(*ctts_entries)); /* worst case */
ctts_entries[0].count = 1;
ctts_entries[0].duration = track->cluster[0].cts;
for (i = 1; i < track->entry; i++) {
if (track->cluster[i].cts == ctts_entries[entries].duration) {
ctts_entries[entries].count++; /* compress */
} else {
entries++;
ctts_entries[entries].duration = track->cluster[i].cts;
ctts_entries[entries].count = 1;
}
}
entries++; /* last one */
atom_size = 16 + (entries * 8);
avio_wb32(pb, atom_size); /* size */
ffio_wfourcc(pb, "ctts");
avio_wb32(pb, 0); /* version & flags */
avio_wb32(pb, entries); /* entry count */
for (i = 0; i < entries; i++) {
avio_wb32(pb, ctts_entries[i].count);
avio_wb32(pb, ctts_entries[i].duration);
}
av_free(ctts_entries);
return atom_size;
}
 
/* Time to sample atom */
static int mov_write_stts_tag(AVIOContext *pb, MOVTrack *track)
{
MOVStts *stts_entries;
uint32_t entries = -1;
uint32_t atom_size;
int i;
 
if (track->enc->codec_type == AVMEDIA_TYPE_AUDIO && !track->audio_vbr) {
stts_entries = av_malloc(sizeof(*stts_entries)); /* one entry */
stts_entries[0].count = track->sample_count;
stts_entries[0].duration = 1;
entries = 1;
} else {
stts_entries = track->entry ?
av_malloc(track->entry * sizeof(*stts_entries)) : /* worst case */
NULL;
for (i = 0; i < track->entry; i++) {
int duration = get_cluster_duration(track, i);
if (i && duration == stts_entries[entries].duration) {
stts_entries[entries].count++; /* compress */
} else {
entries++;
stts_entries[entries].duration = duration;
stts_entries[entries].count = 1;
}
}
entries++; /* last one */
}
atom_size = 16 + (entries * 8);
avio_wb32(pb, atom_size); /* size */
ffio_wfourcc(pb, "stts");
avio_wb32(pb, 0); /* version & flags */
avio_wb32(pb, entries); /* entry count */
for (i = 0; i < entries; i++) {
avio_wb32(pb, stts_entries[i].count);
avio_wb32(pb, stts_entries[i].duration);
}
av_free(stts_entries);
return atom_size;
}
 
static int mov_write_dref_tag(AVIOContext *pb)
{
avio_wb32(pb, 28); /* size */
ffio_wfourcc(pb, "dref");
avio_wb32(pb, 0); /* version & flags */
avio_wb32(pb, 1); /* entry count */
 
avio_wb32(pb, 0xc); /* size */
//FIXME add the alis and rsrc atom
ffio_wfourcc(pb, "url ");
avio_wb32(pb, 1); /* version & flags */
 
return 28;
}
 
static int mov_write_stbl_tag(AVIOContext *pb, MOVTrack *track)
{
int64_t pos = avio_tell(pb);
avio_wb32(pb, 0); /* size */
ffio_wfourcc(pb, "stbl");
mov_write_stsd_tag(pb, track);
mov_write_stts_tag(pb, track);
if ((track->enc->codec_type == AVMEDIA_TYPE_VIDEO ||
track->enc->codec_tag == MKTAG('r','t','p',' ')) &&
track->has_keyframes && track->has_keyframes < track->entry)
mov_write_stss_tag(pb, track, MOV_SYNC_SAMPLE);
if (track->mode == MODE_MOV && track->flags & MOV_TRACK_STPS)
mov_write_stss_tag(pb, track, MOV_PARTIAL_SYNC_SAMPLE);
if (track->enc->codec_type == AVMEDIA_TYPE_VIDEO &&
track->flags & MOV_TRACK_CTTS)
mov_write_ctts_tag(pb, track);
mov_write_stsc_tag(pb, track);
mov_write_stsz_tag(pb, track);
mov_write_stco_tag(pb, track);
return update_size(pb, pos);
}
 
static int mov_write_dinf_tag(AVIOContext *pb)
{
int64_t pos = avio_tell(pb);
avio_wb32(pb, 0); /* size */
ffio_wfourcc(pb, "dinf");
mov_write_dref_tag(pb);
return update_size(pb, pos);
}
 
static int mov_write_nmhd_tag(AVIOContext *pb)
{
avio_wb32(pb, 12);
ffio_wfourcc(pb, "nmhd");
avio_wb32(pb, 0);
return 12;
}
 
static int mov_write_tcmi_tag(AVIOContext *pb, MOVTrack *track)
{
int64_t pos = avio_tell(pb);
const char *font = "Lucida Grande";
avio_wb32(pb, 0); /* size */
ffio_wfourcc(pb, "tcmi"); /* timecode media information atom */
avio_wb32(pb, 0); /* version & flags */
avio_wb16(pb, 0); /* text font */
avio_wb16(pb, 0); /* text face */
avio_wb16(pb, 12); /* text size */
avio_wb16(pb, 0); /* (unknown, not in the QT specs...) */
avio_wb16(pb, 0x0000); /* text color (red) */
avio_wb16(pb, 0x0000); /* text color (green) */
avio_wb16(pb, 0x0000); /* text color (blue) */
avio_wb16(pb, 0xffff); /* background color (red) */
avio_wb16(pb, 0xffff); /* background color (green) */
avio_wb16(pb, 0xffff); /* background color (blue) */
avio_w8(pb, strlen(font)); /* font len (part of the pascal string) */
avio_write(pb, font, strlen(font)); /* font name */
return update_size(pb, pos);
}
 
static int mov_write_gmhd_tag(AVIOContext *pb, MOVTrack *track)
{
int64_t pos = avio_tell(pb);
avio_wb32(pb, 0); /* size */
ffio_wfourcc(pb, "gmhd");
avio_wb32(pb, 0x18); /* gmin size */
ffio_wfourcc(pb, "gmin");/* generic media info */
avio_wb32(pb, 0); /* version & flags */
avio_wb16(pb, 0x40); /* graphics mode = */
avio_wb16(pb, 0x8000); /* opColor (r?) */
avio_wb16(pb, 0x8000); /* opColor (g?) */
avio_wb16(pb, 0x8000); /* opColor (b?) */
avio_wb16(pb, 0); /* balance */
avio_wb16(pb, 0); /* reserved */
 
/*
* This special text atom is required for
* Apple Quicktime chapters. The contents
* don't appear to be documented, so the
* bytes are copied verbatim.
*/
if (track->tag != MKTAG('c','6','0','8')) {
avio_wb32(pb, 0x2C); /* size */
ffio_wfourcc(pb, "text");
avio_wb16(pb, 0x01);
avio_wb32(pb, 0x00);
avio_wb32(pb, 0x00);
avio_wb32(pb, 0x00);
avio_wb32(pb, 0x01);
avio_wb32(pb, 0x00);
avio_wb32(pb, 0x00);
avio_wb32(pb, 0x00);
avio_wb32(pb, 0x00004000);
avio_wb16(pb, 0x0000);
}
 
if (track->enc->codec_tag == MKTAG('t','m','c','d')) {
int64_t tmcd_pos = avio_tell(pb);
avio_wb32(pb, 0); /* size */
ffio_wfourcc(pb, "tmcd");
mov_write_tcmi_tag(pb, track);
update_size(pb, tmcd_pos);
}
return update_size(pb, pos);
}
 
static int mov_write_smhd_tag(AVIOContext *pb)
{
avio_wb32(pb, 16); /* size */
ffio_wfourcc(pb, "smhd");
avio_wb32(pb, 0); /* version & flags */
avio_wb16(pb, 0); /* reserved (balance, normally = 0) */
avio_wb16(pb, 0); /* reserved */
return 16;
}
 
static int mov_write_vmhd_tag(AVIOContext *pb)
{
avio_wb32(pb, 0x14); /* size (always 0x14) */
ffio_wfourcc(pb, "vmhd");
avio_wb32(pb, 0x01); /* version & flags */
avio_wb64(pb, 0); /* reserved (graphics mode = copy) */
return 0x14;
}
 
static int mov_write_hdlr_tag(AVIOContext *pb, MOVTrack *track)
{
const char *hdlr, *descr = NULL, *hdlr_type = NULL;
int64_t pos = avio_tell(pb);
 
hdlr = "dhlr";
hdlr_type = "url ";
descr = "DataHandler";
 
if (track) {
hdlr = (track->mode == MODE_MOV) ? "mhlr" : "\0\0\0\0";
if (track->enc->codec_type == AVMEDIA_TYPE_VIDEO) {
hdlr_type = "vide";
descr = "VideoHandler";
} else if (track->enc->codec_type == AVMEDIA_TYPE_AUDIO) {
hdlr_type = "soun";
descr = "SoundHandler";
} else if (track->enc->codec_type == AVMEDIA_TYPE_SUBTITLE) {
if (track->tag == MKTAG('c','6','0','8')) {
hdlr_type = "clcp";
descr = "ClosedCaptionHandler";
} else {
if (track->tag == MKTAG('t','x','3','g')) hdlr_type = "sbtl";
else hdlr_type = "text";
descr = "SubtitleHandler";
}
} else if (track->enc->codec_tag == MKTAG('r','t','p',' ')) {
hdlr_type = "hint";
descr = "HintHandler";
} else if (track->enc->codec_tag == MKTAG('t','m','c','d')) {
hdlr_type = "tmcd";
descr = "TimeCodeHandler";
} else {
char tag_buf[32];
av_get_codec_tag_string(tag_buf, sizeof(tag_buf),
track->enc->codec_tag);
 
av_log(track->enc, AV_LOG_WARNING,
"Unknown hldr_type for %s / 0x%04X, writing dummy values\n",
tag_buf, track->enc->codec_tag);
}
}
 
avio_wb32(pb, 0); /* size */
ffio_wfourcc(pb, "hdlr");
avio_wb32(pb, 0); /* Version & flags */
avio_write(pb, hdlr, 4); /* handler */
ffio_wfourcc(pb, hdlr_type); /* handler type */
avio_wb32(pb, 0); /* reserved */
avio_wb32(pb, 0); /* reserved */
avio_wb32(pb, 0); /* reserved */
if (!track || track->mode == MODE_MOV)
avio_w8(pb, strlen(descr)); /* pascal string */
avio_write(pb, descr, strlen(descr)); /* handler description */
if (track && track->mode != MODE_MOV)
avio_w8(pb, 0); /* c string */
return update_size(pb, pos);
}
 
static int mov_write_hmhd_tag(AVIOContext *pb)
{
/* This atom must be present, but leaving the values at zero
* seems harmless. */
avio_wb32(pb, 28); /* size */
ffio_wfourcc(pb, "hmhd");
avio_wb32(pb, 0); /* version, flags */
avio_wb16(pb, 0); /* maxPDUsize */
avio_wb16(pb, 0); /* avgPDUsize */
avio_wb32(pb, 0); /* maxbitrate */
avio_wb32(pb, 0); /* avgbitrate */
avio_wb32(pb, 0); /* reserved */
return 28;
}
 
static int mov_write_minf_tag(AVIOContext *pb, MOVTrack *track)
{
int64_t pos = avio_tell(pb);
avio_wb32(pb, 0); /* size */
ffio_wfourcc(pb, "minf");
if (track->enc->codec_type == AVMEDIA_TYPE_VIDEO)
mov_write_vmhd_tag(pb);
else if (track->enc->codec_type == AVMEDIA_TYPE_AUDIO)
mov_write_smhd_tag(pb);
else if (track->enc->codec_type == AVMEDIA_TYPE_SUBTITLE) {
if (track->tag == MKTAG('t','e','x','t') || track->tag == MKTAG('c','6','0','8')) {
mov_write_gmhd_tag(pb, track);
} else {
mov_write_nmhd_tag(pb);
}
} else if (track->tag == MKTAG('r','t','p',' ')) {
mov_write_hmhd_tag(pb);
} else if (track->tag == MKTAG('t','m','c','d')) {
mov_write_gmhd_tag(pb, track);
}
if (track->mode == MODE_MOV) /* FIXME: Why do it for MODE_MOV only ? */
mov_write_hdlr_tag(pb, NULL);
mov_write_dinf_tag(pb);
mov_write_stbl_tag(pb, track);
return update_size(pb, pos);
}
 
static int mov_write_mdhd_tag(AVIOContext *pb, MOVTrack *track)
{
int version = track->track_duration < INT32_MAX ? 0 : 1;
 
if (track->mode == MODE_ISM)
version = 1;
 
(version == 1) ? avio_wb32(pb, 44) : avio_wb32(pb, 32); /* size */
ffio_wfourcc(pb, "mdhd");
avio_w8(pb, version);
avio_wb24(pb, 0); /* flags */
if (version == 1) {
avio_wb64(pb, track->time);
avio_wb64(pb, track->time);
} else {
avio_wb32(pb, track->time); /* creation time */
avio_wb32(pb, track->time); /* modification time */
}
avio_wb32(pb, track->timescale); /* time scale (sample rate for audio) */
if (!track->entry)
(version == 1) ? avio_wb64(pb, UINT64_C(0xffffffffffffffff)) : avio_wb32(pb, 0xffffffff);
else
(version == 1) ? avio_wb64(pb, track->track_duration) : avio_wb32(pb, track->track_duration); /* duration */
avio_wb16(pb, track->language); /* language */
avio_wb16(pb, 0); /* reserved (quality) */
 
if (version != 0 && track->mode == MODE_MOV) {
av_log(NULL, AV_LOG_ERROR,
"FATAL error, file duration too long for timebase, this file will not be\n"
"playable with quicktime. Choose a different timebase or a different\n"
"container format\n");
}
 
return 32;
}
 
static int mov_write_mdia_tag(AVIOContext *pb, MOVTrack *track)
{
int64_t pos = avio_tell(pb);
avio_wb32(pb, 0); /* size */
ffio_wfourcc(pb, "mdia");
mov_write_mdhd_tag(pb, track);
mov_write_hdlr_tag(pb, track);
mov_write_minf_tag(pb, track);
return update_size(pb, pos);
}
 
/* transformation matrix
|a b u|
|c d v|
|tx ty w| */
static void write_matrix(AVIOContext *pb, int16_t a, int16_t b, int16_t c,
int16_t d, int16_t tx, int16_t ty)
{
avio_wb32(pb, a << 16); /* 16.16 format */
avio_wb32(pb, b << 16); /* 16.16 format */
avio_wb32(pb, 0); /* u in 2.30 format */
avio_wb32(pb, c << 16); /* 16.16 format */
avio_wb32(pb, d << 16); /* 16.16 format */
avio_wb32(pb, 0); /* v in 2.30 format */
avio_wb32(pb, tx << 16); /* 16.16 format */
avio_wb32(pb, ty << 16); /* 16.16 format */
avio_wb32(pb, 1 << 30); /* w in 2.30 format */
}
 
static int mov_write_tkhd_tag(AVIOContext *pb, MOVTrack *track, AVStream *st)
{
int64_t duration = av_rescale_rnd(track->track_duration, MOV_TIMESCALE,
track->timescale, AV_ROUND_UP);
int version = duration < INT32_MAX ? 0 : 1;
int rotation = 0;
 
if (track->mode == MODE_ISM)
version = 1;
 
(version == 1) ? avio_wb32(pb, 104) : avio_wb32(pb, 92); /* size */
ffio_wfourcc(pb, "tkhd");
avio_w8(pb, version);
avio_wb24(pb, (track->flags & MOV_TRACK_ENABLED) ?
MOV_TKHD_FLAG_ENABLED | MOV_TKHD_FLAG_IN_MOVIE :
MOV_TKHD_FLAG_IN_MOVIE);
if (version == 1) {
avio_wb64(pb, track->time);
avio_wb64(pb, track->time);
} else {
avio_wb32(pb, track->time); /* creation time */
avio_wb32(pb, track->time); /* modification time */
}
avio_wb32(pb, track->track_id); /* track-id */
avio_wb32(pb, 0); /* reserved */
if (!track->entry)
(version == 1) ? avio_wb64(pb, UINT64_C(0xffffffffffffffff)) : avio_wb32(pb, 0xffffffff);
else
(version == 1) ? avio_wb64(pb, duration) : avio_wb32(pb, duration);
 
avio_wb32(pb, 0); /* reserved */
avio_wb32(pb, 0); /* reserved */
avio_wb16(pb, 0); /* layer */
avio_wb16(pb, st ? st->codec->codec_type : 0); /* alternate group) */
/* Volume, only for audio */
if (track->enc->codec_type == AVMEDIA_TYPE_AUDIO)
avio_wb16(pb, 0x0100);
else
avio_wb16(pb, 0);
avio_wb16(pb, 0); /* reserved */
 
/* Matrix structure */
if (st && st->metadata) {
AVDictionaryEntry *rot = av_dict_get(st->metadata, "rotate", NULL, 0);
rotation = (rot && rot->value) ? atoi(rot->value) : 0;
}
if (rotation == 90) {
write_matrix(pb, 0, 1, -1, 0, track->enc->height, 0);
} else if (rotation == 180) {
write_matrix(pb, -1, 0, 0, -1, track->enc->width, track->enc->height);
} else if (rotation == 270) {
write_matrix(pb, 0, -1, 1, 0, 0, track->enc->width);
} else {
write_matrix(pb, 1, 0, 0, 1, 0, 0);
}
/* Track width and height, for visual only */
if (st && (track->enc->codec_type == AVMEDIA_TYPE_VIDEO ||
track->enc->codec_type == AVMEDIA_TYPE_SUBTITLE)) {
if (track->mode == MODE_MOV) {
avio_wb32(pb, track->enc->width << 16);
avio_wb32(pb, track->height << 16);
} else {
double sample_aspect_ratio = av_q2d(st->sample_aspect_ratio);
if (!sample_aspect_ratio || track->height != track->enc->height)
sample_aspect_ratio = 1;
avio_wb32(pb, sample_aspect_ratio * track->enc->width * 0x10000);
avio_wb32(pb, track->height * 0x10000);
}
} else {
avio_wb32(pb, 0);
avio_wb32(pb, 0);
}
return 0x5c;
}
 
static int mov_write_tapt_tag(AVIOContext *pb, MOVTrack *track)
{
int32_t width = av_rescale(track->enc->sample_aspect_ratio.num, track->enc->width,
track->enc->sample_aspect_ratio.den);
 
int64_t pos = avio_tell(pb);
 
avio_wb32(pb, 0); /* size */
ffio_wfourcc(pb, "tapt");
 
avio_wb32(pb, 20);
ffio_wfourcc(pb, "clef");
avio_wb32(pb, 0);
avio_wb32(pb, width << 16);
avio_wb32(pb, track->enc->height << 16);
 
avio_wb32(pb, 20);
ffio_wfourcc(pb, "prof");
avio_wb32(pb, 0);
avio_wb32(pb, width << 16);
avio_wb32(pb, track->enc->height << 16);
 
avio_wb32(pb, 20);
ffio_wfourcc(pb, "enof");
avio_wb32(pb, 0);
avio_wb32(pb, track->enc->width << 16);
avio_wb32(pb, track->enc->height << 16);
 
return update_size(pb, pos);
}
 
// This box seems important for the psp playback ... without it the movie seems to hang
static int mov_write_edts_tag(AVIOContext *pb, MOVTrack *track)
{
int64_t duration = av_rescale_rnd(track->track_duration, MOV_TIMESCALE,
track->timescale, AV_ROUND_UP);
int version = duration < INT32_MAX ? 0 : 1;
int entry_size, entry_count, size;
int64_t delay, start_ct = track->cluster[0].cts;
delay = av_rescale_rnd(track->cluster[0].dts + start_ct, MOV_TIMESCALE,
track->timescale, AV_ROUND_DOWN);
version |= delay < INT32_MAX ? 0 : 1;
 
entry_size = (version == 1) ? 20 : 12;
entry_count = 1 + (delay > 0);
size = 24 + entry_count * entry_size;
 
/* write the atom data */
avio_wb32(pb, size);
ffio_wfourcc(pb, "edts");
avio_wb32(pb, size - 8);
ffio_wfourcc(pb, "elst");
avio_w8(pb, version);
avio_wb24(pb, 0); /* flags */
 
avio_wb32(pb, entry_count);
if (delay > 0) { /* add an empty edit to delay presentation */
if (version == 1) {
avio_wb64(pb, delay);
avio_wb64(pb, -1);
} else {
avio_wb32(pb, delay);
avio_wb32(pb, -1);
}
avio_wb32(pb, 0x00010000);
} else {
av_assert0(av_rescale_rnd(track->cluster[0].dts, MOV_TIMESCALE, track->timescale, AV_ROUND_DOWN) <= 0);
start_ct = -FFMIN(track->cluster[0].dts, 0); //FFMIN needed due to rounding
duration += delay;
}
 
/* duration */
if (version == 1) {
avio_wb64(pb, duration);
avio_wb64(pb, start_ct);
} else {
avio_wb32(pb, duration);
avio_wb32(pb, start_ct);
}
avio_wb32(pb, 0x00010000);
return size;
}
 
static int mov_write_tref_tag(AVIOContext *pb, MOVTrack *track)
{
avio_wb32(pb, 20); // size
ffio_wfourcc(pb, "tref");
avio_wb32(pb, 12); // size (subatom)
avio_wl32(pb, track->tref_tag);
avio_wb32(pb, track->tref_id);
return 20;
}
 
// goes at the end of each track! ... Critical for PSP playback ("Incompatible data" without it)
static int mov_write_uuid_tag_psp(AVIOContext *pb, MOVTrack *mov)
{
avio_wb32(pb, 0x34); /* size ... reports as 28 in mp4box! */
ffio_wfourcc(pb, "uuid");
ffio_wfourcc(pb, "USMT");
avio_wb32(pb, 0x21d24fce);
avio_wb32(pb, 0xbb88695c);
avio_wb32(pb, 0xfac9c740);
avio_wb32(pb, 0x1c); // another size here!
ffio_wfourcc(pb, "MTDT");
avio_wb32(pb, 0x00010012);
avio_wb32(pb, 0x0a);
avio_wb32(pb, 0x55c40000);
avio_wb32(pb, 0x1);
avio_wb32(pb, 0x0);
return 0x34;
}
 
static int mov_write_udta_sdp(AVIOContext *pb, MOVTrack *track)
{
AVFormatContext *ctx = track->rtp_ctx;
char buf[1000] = "";
int len;
 
ff_sdp_write_media(buf, sizeof(buf), ctx->streams[0], track->src_track,
NULL, NULL, 0, 0, ctx);
av_strlcatf(buf, sizeof(buf), "a=control:streamid=%d\r\n", track->track_id);
len = strlen(buf);
 
avio_wb32(pb, len + 24);
ffio_wfourcc(pb, "udta");
avio_wb32(pb, len + 16);
ffio_wfourcc(pb, "hnti");
avio_wb32(pb, len + 8);
ffio_wfourcc(pb, "sdp ");
avio_write(pb, buf, len);
return len + 24;
}
 
static int mov_write_trak_tag(AVIOContext *pb, MOVMuxContext *mov,
MOVTrack *track, AVStream *st)
{
int64_t pos = avio_tell(pb);
avio_wb32(pb, 0); /* size */
ffio_wfourcc(pb, "trak");
mov_write_tkhd_tag(pb, track, st);
if (supports_edts(mov))
mov_write_edts_tag(pb, track); // PSP Movies and several other cases require edts box
if (track->tref_tag)
mov_write_tref_tag(pb, track);
mov_write_mdia_tag(pb, track);
if (track->mode == MODE_PSP)
mov_write_uuid_tag_psp(pb, track); // PSP Movies require this uuid box
if (track->tag == MKTAG('r','t','p',' '))
mov_write_udta_sdp(pb, track);
if (track->enc->codec_type == AVMEDIA_TYPE_VIDEO && track->mode == MODE_MOV) {
double sample_aspect_ratio = av_q2d(st->sample_aspect_ratio);
if (st->sample_aspect_ratio.num && 1.0 != sample_aspect_ratio)
mov_write_tapt_tag(pb, track);
}
return update_size(pb, pos);
}
 
static int mov_write_iods_tag(AVIOContext *pb, MOVMuxContext *mov)
{
int i, has_audio = 0, has_video = 0;
int64_t pos = avio_tell(pb);
int audio_profile = mov->iods_audio_profile;
int video_profile = mov->iods_video_profile;
for (i = 0; i < mov->nb_streams; i++) {
if (mov->tracks[i].entry > 0) {
has_audio |= mov->tracks[i].enc->codec_type == AVMEDIA_TYPE_AUDIO;
has_video |= mov->tracks[i].enc->codec_type == AVMEDIA_TYPE_VIDEO;
}
}
if (audio_profile < 0)
audio_profile = 0xFF - has_audio;
if (video_profile < 0)
video_profile = 0xFF - has_video;
avio_wb32(pb, 0x0); /* size */
ffio_wfourcc(pb, "iods");
avio_wb32(pb, 0); /* version & flags */
put_descr(pb, 0x10, 7);
avio_wb16(pb, 0x004f);
avio_w8(pb, 0xff);
avio_w8(pb, 0xff);
avio_w8(pb, audio_profile);
avio_w8(pb, video_profile);
avio_w8(pb, 0xff);
return update_size(pb, pos);
}
 
static int mov_write_trex_tag(AVIOContext *pb, MOVTrack *track)
{
avio_wb32(pb, 0x20); /* size */
ffio_wfourcc(pb, "trex");
avio_wb32(pb, 0); /* version & flags */
avio_wb32(pb, track->track_id); /* track ID */
avio_wb32(pb, 1); /* default sample description index */
avio_wb32(pb, 0); /* default sample duration */
avio_wb32(pb, 0); /* default sample size */
avio_wb32(pb, 0); /* default sample flags */
return 0;
}
 
static int mov_write_mvex_tag(AVIOContext *pb, MOVMuxContext *mov)
{
int64_t pos = avio_tell(pb);
int i;
avio_wb32(pb, 0x0); /* size */
ffio_wfourcc(pb, "mvex");
for (i = 0; i < mov->nb_streams; i++)
mov_write_trex_tag(pb, &mov->tracks[i]);
return update_size(pb, pos);
}
 
static int mov_write_mvhd_tag(AVIOContext *pb, MOVMuxContext *mov)
{
int max_track_id = 1, i;
int64_t max_track_len_temp, max_track_len = 0;
int version;
 
for (i = 0; i < mov->nb_streams; i++) {
if (mov->tracks[i].entry > 0 && mov->tracks[i].timescale) {
max_track_len_temp = av_rescale_rnd(mov->tracks[i].track_duration,
MOV_TIMESCALE,
mov->tracks[i].timescale,
AV_ROUND_UP);
if (max_track_len < max_track_len_temp)
max_track_len = max_track_len_temp;
if (max_track_id < mov->tracks[i].track_id)
max_track_id = mov->tracks[i].track_id;
}
}
 
version = max_track_len < UINT32_MAX ? 0 : 1;
(version == 1) ? avio_wb32(pb, 120) : avio_wb32(pb, 108); /* size */
ffio_wfourcc(pb, "mvhd");
avio_w8(pb, version);
avio_wb24(pb, 0); /* flags */
if (version == 1) {
avio_wb64(pb, mov->time);
avio_wb64(pb, mov->time);
} else {
avio_wb32(pb, mov->time); /* creation time */
avio_wb32(pb, mov->time); /* modification time */
}
avio_wb32(pb, MOV_TIMESCALE);
(version == 1) ? avio_wb64(pb, max_track_len) : avio_wb32(pb, max_track_len); /* duration of longest track */
 
avio_wb32(pb, 0x00010000); /* reserved (preferred rate) 1.0 = normal */
avio_wb16(pb, 0x0100); /* reserved (preferred volume) 1.0 = normal */
avio_wb16(pb, 0); /* reserved */
avio_wb32(pb, 0); /* reserved */
avio_wb32(pb, 0); /* reserved */
 
/* Matrix structure */
write_matrix(pb, 1, 0, 0, 1, 0, 0);
 
avio_wb32(pb, 0); /* reserved (preview time) */
avio_wb32(pb, 0); /* reserved (preview duration) */
avio_wb32(pb, 0); /* reserved (poster time) */
avio_wb32(pb, 0); /* reserved (selection time) */
avio_wb32(pb, 0); /* reserved (selection duration) */
avio_wb32(pb, 0); /* reserved (current time) */
avio_wb32(pb, max_track_id + 1); /* Next track id */
return 0x6c;
}
 
static int mov_write_itunes_hdlr_tag(AVIOContext *pb, MOVMuxContext *mov,
AVFormatContext *s)
{
avio_wb32(pb, 33); /* size */
ffio_wfourcc(pb, "hdlr");
avio_wb32(pb, 0);
avio_wb32(pb, 0);
ffio_wfourcc(pb, "mdir");
ffio_wfourcc(pb, "appl");
avio_wb32(pb, 0);
avio_wb32(pb, 0);
avio_w8(pb, 0);
return 33;
}
 
/* helper function to write a data tag with the specified string as data */
static int mov_write_string_data_tag(AVIOContext *pb, const char *data, int lang, int long_style)
{
if (long_style) {
int size = 16 + strlen(data);
avio_wb32(pb, size); /* size */
ffio_wfourcc(pb, "data");
avio_wb32(pb, 1);
avio_wb32(pb, 0);
avio_write(pb, data, strlen(data));
return size;
} else {
if (!lang)
lang = ff_mov_iso639_to_lang("und", 1);
avio_wb16(pb, strlen(data)); /* string length */
avio_wb16(pb, lang);
avio_write(pb, data, strlen(data));
return strlen(data) + 4;
}
}
 
static int mov_write_string_tag(AVIOContext *pb, const char *name,
const char *value, int lang, int long_style)
{
int size = 0;
if (value && value[0]) {
int64_t pos = avio_tell(pb);
avio_wb32(pb, 0); /* size */
ffio_wfourcc(pb, name);
mov_write_string_data_tag(pb, value, lang, long_style);
size = update_size(pb, pos);
}
return size;
}
 
static int mov_write_string_metadata(AVFormatContext *s, AVIOContext *pb,
const char *name, const char *tag,
int long_style)
{
int l, lang = 0, len, len2;
AVDictionaryEntry *t, *t2 = NULL;
char tag2[16];
 
if (!(t = av_dict_get(s->metadata, tag, NULL, 0)))
return 0;
 
len = strlen(t->key);
snprintf(tag2, sizeof(tag2), "%s-", tag);
while ((t2 = av_dict_get(s->metadata, tag2, t2, AV_DICT_IGNORE_SUFFIX))) {
len2 = strlen(t2->key);
if (len2 == len + 4 && !strcmp(t->value, t2->value)
&& (l = ff_mov_iso639_to_lang(&t2->key[len2 - 3], 1)) >= 0) {
lang = l;
break;
}
}
return mov_write_string_tag(pb, name, t->value, lang, long_style);
}
 
/* iTunes bpm number */
static int mov_write_tmpo_tag(AVIOContext *pb, AVFormatContext *s)
{
AVDictionaryEntry *t = av_dict_get(s->metadata, "tmpo", NULL, 0);
int size = 0, tmpo = t ? atoi(t->value) : 0;
if (tmpo) {
size = 26;
avio_wb32(pb, size);
ffio_wfourcc(pb, "tmpo");
avio_wb32(pb, size-8); /* size */
ffio_wfourcc(pb, "data");
avio_wb32(pb, 0x15); //type specifier
avio_wb32(pb, 0);
avio_wb16(pb, tmpo); // data
}
return size;
}
 
/* iTunes track or disc number */
static int mov_write_trkn_tag(AVIOContext *pb, MOVMuxContext *mov,
AVFormatContext *s, int disc)
{
AVDictionaryEntry *t = av_dict_get(s->metadata,
disc ? "disc" : "track",
NULL, 0);
int size = 0, track = t ? atoi(t->value) : 0;
if (track) {
int tracks = 0;
char *slash = strchr(t->value, '/');
if (slash)
tracks = atoi(slash + 1);
avio_wb32(pb, 32); /* size */
ffio_wfourcc(pb, disc ? "disk" : "trkn");
avio_wb32(pb, 24); /* size */
ffio_wfourcc(pb, "data");
avio_wb32(pb, 0); // 8 bytes empty
avio_wb32(pb, 0);
avio_wb16(pb, 0); // empty
avio_wb16(pb, track); // track / disc number
avio_wb16(pb, tracks); // total track / disc number
avio_wb16(pb, 0); // empty
size = 32;
}
return size;
}
 
static int mov_write_int8_metadata(AVFormatContext *s, AVIOContext *pb,
const char *name, const char *tag,
int len)
{
AVDictionaryEntry *t = NULL;
uint8_t num;
int size = 24 + len;
 
if (len != 1 && len != 4)
return -1;
 
if (!(t = av_dict_get(s->metadata, tag, NULL, 0)))
return 0;
num = atoi(t->value);
 
avio_wb32(pb, size);
ffio_wfourcc(pb, name);
avio_wb32(pb, size - 8);
ffio_wfourcc(pb, "data");
avio_wb32(pb, 0x15);
avio_wb32(pb, 0);
if (len==4) avio_wb32(pb, num);
else avio_w8 (pb, num);
 
return size;
}
 
/* iTunes meta data list */
static int mov_write_ilst_tag(AVIOContext *pb, MOVMuxContext *mov,
AVFormatContext *s)
{
int64_t pos = avio_tell(pb);
avio_wb32(pb, 0); /* size */
ffio_wfourcc(pb, "ilst");
mov_write_string_metadata(s, pb, "\251nam", "title" , 1);
mov_write_string_metadata(s, pb, "\251ART", "artist" , 1);
mov_write_string_metadata(s, pb, "aART", "album_artist", 1);
mov_write_string_metadata(s, pb, "\251wrt", "composer" , 1);
mov_write_string_metadata(s, pb, "\251alb", "album" , 1);
mov_write_string_metadata(s, pb, "\251day", "date" , 1);
mov_write_string_tag(pb, "\251too", LIBAVFORMAT_IDENT, 0, 1);
mov_write_string_metadata(s, pb, "\251cmt", "comment" , 1);
mov_write_string_metadata(s, pb, "\251gen", "genre" , 1);
mov_write_string_metadata(s, pb, "\251cpy", "copyright", 1);
mov_write_string_metadata(s, pb, "\251grp", "grouping" , 1);
mov_write_string_metadata(s, pb, "\251lyr", "lyrics" , 1);
mov_write_string_metadata(s, pb, "desc", "description",1);
mov_write_string_metadata(s, pb, "ldes", "synopsis" , 1);
mov_write_string_metadata(s, pb, "tvsh", "show" , 1);
mov_write_string_metadata(s, pb, "tven", "episode_id",1);
mov_write_string_metadata(s, pb, "tvnn", "network" , 1);
mov_write_int8_metadata (s, pb, "tves", "episode_sort",4);
mov_write_int8_metadata (s, pb, "tvsn", "season_number",4);
mov_write_int8_metadata (s, pb, "stik", "media_type",1);
mov_write_int8_metadata (s, pb, "hdvd", "hd_video", 1);
mov_write_int8_metadata (s, pb, "pgap", "gapless_playback",1);
mov_write_trkn_tag(pb, mov, s, 0); // track number
mov_write_trkn_tag(pb, mov, s, 1); // disc number
mov_write_tmpo_tag(pb, s);
return update_size(pb, pos);
}
 
/* iTunes meta data tag */
static int mov_write_meta_tag(AVIOContext *pb, MOVMuxContext *mov,
AVFormatContext *s)
{
int size = 0;
int64_t pos = avio_tell(pb);
avio_wb32(pb, 0); /* size */
ffio_wfourcc(pb, "meta");
avio_wb32(pb, 0);
mov_write_itunes_hdlr_tag(pb, mov, s);
mov_write_ilst_tag(pb, mov, s);
size = update_size(pb, pos);
return size;
}
 
static int utf8len(const uint8_t *b)
{
int len = 0;
int val;
while (*b) {
GET_UTF8(val, *b++, return -1;)
len++;
}
return len;
}
 
static int ascii_to_wc(AVIOContext *pb, const uint8_t *b)
{
int val;
while (*b) {
GET_UTF8(val, *b++, return -1;)
avio_wb16(pb, val);
}
avio_wb16(pb, 0x00);
return 0;
}
 
static uint16_t language_code(const char *str)
{
return (((str[0] - 0x60) & 0x1F) << 10) +
(((str[1] - 0x60) & 0x1F) << 5) +
(( str[2] - 0x60) & 0x1F);
}
 
static int mov_write_3gp_udta_tag(AVIOContext *pb, AVFormatContext *s,
const char *tag, const char *str)
{
int64_t pos = avio_tell(pb);
AVDictionaryEntry *t = av_dict_get(s->metadata, str, NULL, 0);
if (!t || !utf8len(t->value))
return 0;
avio_wb32(pb, 0); /* size */
ffio_wfourcc(pb, tag); /* type */
avio_wb32(pb, 0); /* version + flags */
if (!strcmp(tag, "yrrc"))
avio_wb16(pb, atoi(t->value));
else {
avio_wb16(pb, language_code("eng")); /* language */
avio_write(pb, t->value, strlen(t->value) + 1); /* UTF8 string value */
if (!strcmp(tag, "albm") &&
(t = av_dict_get(s->metadata, "track", NULL, 0)))
avio_w8(pb, atoi(t->value));
}
return update_size(pb, pos);
}
 
static int mov_write_chpl_tag(AVIOContext *pb, AVFormatContext *s)
{
int64_t pos = avio_tell(pb);
int i, nb_chapters = FFMIN(s->nb_chapters, 255);
 
avio_wb32(pb, 0); // size
ffio_wfourcc(pb, "chpl");
avio_wb32(pb, 0x01000000); // version + flags
avio_wb32(pb, 0); // unknown
avio_w8(pb, nb_chapters);
 
for (i = 0; i < nb_chapters; i++) {
AVChapter *c = s->chapters[i];
AVDictionaryEntry *t;
avio_wb64(pb, av_rescale_q(c->start, c->time_base, (AVRational){1,10000000}));
 
if ((t = av_dict_get(c->metadata, "title", NULL, 0))) {
int len = FFMIN(strlen(t->value), 255);
avio_w8(pb, len);
avio_write(pb, t->value, len);
} else
avio_w8(pb, 0);
}
return update_size(pb, pos);
}
 
static int mov_write_udta_tag(AVIOContext *pb, MOVMuxContext *mov,
AVFormatContext *s)
{
AVIOContext *pb_buf;
int i, ret, size;
uint8_t *buf;
 
for (i = 0; i < s->nb_streams; i++)
if (mov->tracks[i].enc->flags & CODEC_FLAG_BITEXACT) {
return 0;
}
 
ret = avio_open_dyn_buf(&pb_buf);
if (ret < 0)
return ret;
 
if (mov->mode & MODE_3GP) {
mov_write_3gp_udta_tag(pb_buf, s, "perf", "artist");
mov_write_3gp_udta_tag(pb_buf, s, "titl", "title");
mov_write_3gp_udta_tag(pb_buf, s, "auth", "author");
mov_write_3gp_udta_tag(pb_buf, s, "gnre", "genre");
mov_write_3gp_udta_tag(pb_buf, s, "dscp", "comment");
mov_write_3gp_udta_tag(pb_buf, s, "albm", "album");
mov_write_3gp_udta_tag(pb_buf, s, "cprt", "copyright");
mov_write_3gp_udta_tag(pb_buf, s, "yrrc", "date");
} else if (mov->mode == MODE_MOV) { // the title field breaks gtkpod with mp4 and my suspicion is that stuff is not valid in mp4
mov_write_string_metadata(s, pb_buf, "\251ART", "artist", 0);
mov_write_string_metadata(s, pb_buf, "\251nam", "title", 0);
mov_write_string_metadata(s, pb_buf, "\251aut", "author", 0);
mov_write_string_metadata(s, pb_buf, "\251alb", "album", 0);
mov_write_string_metadata(s, pb_buf, "\251day", "date", 0);
mov_write_string_metadata(s, pb_buf, "\251swr", "encoder", 0);
// currently ignored by mov.c
mov_write_string_metadata(s, pb_buf, "\251des", "comment", 0);
// add support for libquicktime, this atom is also actually read by mov.c
mov_write_string_metadata(s, pb_buf, "\251cmt", "comment", 0);
mov_write_string_metadata(s, pb_buf, "\251gen", "genre", 0);
mov_write_string_metadata(s, pb_buf, "\251cpy", "copyright", 0);
} else {
/* iTunes meta data */
mov_write_meta_tag(pb_buf, mov, s);
}
 
if (s->nb_chapters)
mov_write_chpl_tag(pb_buf, s);
 
if ((size = avio_close_dyn_buf(pb_buf, &buf)) > 0) {
avio_wb32(pb, size + 8);
ffio_wfourcc(pb, "udta");
avio_write(pb, buf, size);
}
av_free(buf);
 
return 0;
}
 
static void mov_write_psp_udta_tag(AVIOContext *pb,
const char *str, const char *lang, int type)
{
int len = utf8len(str) + 1;
if (len <= 0)
return;
avio_wb16(pb, len * 2 + 10); /* size */
avio_wb32(pb, type); /* type */
avio_wb16(pb, language_code(lang)); /* language */
avio_wb16(pb, 0x01); /* ? */
ascii_to_wc(pb, str);
}
 
static int mov_write_uuidusmt_tag(AVIOContext *pb, AVFormatContext *s)
{
AVDictionaryEntry *title = av_dict_get(s->metadata, "title", NULL, 0);
int64_t pos, pos2;
 
if (title) {
pos = avio_tell(pb);
avio_wb32(pb, 0); /* size placeholder*/
ffio_wfourcc(pb, "uuid");
ffio_wfourcc(pb, "USMT");
avio_wb32(pb, 0x21d24fce); /* 96 bit UUID */
avio_wb32(pb, 0xbb88695c);
avio_wb32(pb, 0xfac9c740);
 
pos2 = avio_tell(pb);
avio_wb32(pb, 0); /* size placeholder*/
ffio_wfourcc(pb, "MTDT");
avio_wb16(pb, 4);
 
// ?
avio_wb16(pb, 0x0C); /* size */
avio_wb32(pb, 0x0B); /* type */
avio_wb16(pb, language_code("und")); /* language */
avio_wb16(pb, 0x0); /* ? */
avio_wb16(pb, 0x021C); /* data */
 
mov_write_psp_udta_tag(pb, LIBAVCODEC_IDENT, "eng", 0x04);
mov_write_psp_udta_tag(pb, title->value, "eng", 0x01);
mov_write_psp_udta_tag(pb, "2006/04/01 11:11:11", "und", 0x03);
 
update_size(pb, pos2);
return update_size(pb, pos);
}
 
return 0;
}
 
static void build_chunks(MOVTrack *trk)
{
int i;
MOVIentry *chunk = &trk->cluster[0];
uint64_t chunkSize = chunk->size;
chunk->chunkNum = 1;
if (trk->chunkCount)
return;
trk->chunkCount = 1;
for (i = 1; i<trk->entry; i++){
if (chunk->pos + chunkSize == trk->cluster[i].pos &&
chunkSize + trk->cluster[i].size < (1<<20)){
chunkSize += trk->cluster[i].size;
chunk->samples_in_chunk += trk->cluster[i].entries;
} else {
trk->cluster[i].chunkNum = chunk->chunkNum+1;
chunk=&trk->cluster[i];
chunkSize = chunk->size;
trk->chunkCount++;
}
}
}
 
static int mov_write_moov_tag(AVIOContext *pb, MOVMuxContext *mov,
AVFormatContext *s)
{
int i;
int64_t pos = avio_tell(pb);
avio_wb32(pb, 0); /* size placeholder*/
ffio_wfourcc(pb, "moov");
 
for (i = 0; i < mov->nb_streams; i++) {
if (mov->tracks[i].entry <= 0 && !(mov->flags & FF_MOV_FLAG_FRAGMENT))
continue;
 
mov->tracks[i].time = mov->time;
mov->tracks[i].track_id = i + 1;
 
if (mov->tracks[i].entry)
build_chunks(&mov->tracks[i]);
}
 
if (mov->chapter_track)
for (i = 0; i < s->nb_streams; i++) {
mov->tracks[i].tref_tag = MKTAG('c','h','a','p');
mov->tracks[i].tref_id = mov->tracks[mov->chapter_track].track_id;
}
for (i = 0; i < mov->nb_streams; i++) {
if (mov->tracks[i].tag == MKTAG('r','t','p',' ')) {
mov->tracks[i].tref_tag = MKTAG('h','i','n','t');
mov->tracks[i].tref_id =
mov->tracks[mov->tracks[i].src_track].track_id;
}
}
for (i = 0; i < mov->nb_streams; i++) {
if (mov->tracks[i].tag == MKTAG('t','m','c','d')) {
int src_trk = mov->tracks[i].src_track;
mov->tracks[src_trk].tref_tag = mov->tracks[i].tag;
mov->tracks[src_trk].tref_id = mov->tracks[i].track_id;
mov->tracks[i].track_duration = mov->tracks[src_trk].track_duration;
}
}
 
mov_write_mvhd_tag(pb, mov);
if (mov->mode != MODE_MOV && !mov->iods_skip)
mov_write_iods_tag(pb, mov);
for (i = 0; i < mov->nb_streams; i++) {
if (mov->tracks[i].entry > 0 || mov->flags & FF_MOV_FLAG_FRAGMENT) {
mov_write_trak_tag(pb, mov, &(mov->tracks[i]), i < s->nb_streams ? s->streams[i] : NULL);
}
}
if (mov->flags & FF_MOV_FLAG_FRAGMENT)
mov_write_mvex_tag(pb, mov); /* QuickTime requires trak to precede this */
 
if (mov->mode == MODE_PSP)
mov_write_uuidusmt_tag(pb, s);
else
mov_write_udta_tag(pb, mov, s);
 
return update_size(pb, pos);
}
 
static void param_write_int(AVIOContext *pb, const char *name, int value)
{
avio_printf(pb, "<param name=\"%s\" value=\"%d\" valuetype=\"data\"/>\n", name, value);
}
 
static void param_write_string(AVIOContext *pb, const char *name, const char *value)
{
avio_printf(pb, "<param name=\"%s\" value=\"%s\" valuetype=\"data\"/>\n", name, value);
}
 
static void param_write_hex(AVIOContext *pb, const char *name, const uint8_t *value, int len)
{
char buf[150];
len = FFMIN(sizeof(buf) / 2 - 1, len);
ff_data_to_hex(buf, value, len, 0);
buf[2 * len] = '\0';
avio_printf(pb, "<param name=\"%s\" value=\"%s\" valuetype=\"data\"/>\n", name, buf);
}
 
static int mov_write_isml_manifest(AVIOContext *pb, MOVMuxContext *mov)
{
int64_t pos = avio_tell(pb);
int i;
static const uint8_t uuid[] = {
0xa5, 0xd4, 0x0b, 0x30, 0xe8, 0x14, 0x11, 0xdd,
0xba, 0x2f, 0x08, 0x00, 0x20, 0x0c, 0x9a, 0x66
};
 
avio_wb32(pb, 0);
ffio_wfourcc(pb, "uuid");
avio_write(pb, uuid, sizeof(uuid));
avio_wb32(pb, 0);
 
avio_printf(pb, "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n");
avio_printf(pb, "<smil xmlns=\"http://www.w3.org/2001/SMIL20/Language\">\n");
avio_printf(pb, "<head>\n");
avio_printf(pb, "<meta name=\"creator\" content=\"%s\" />\n",
LIBAVFORMAT_IDENT);
avio_printf(pb, "</head>\n");
avio_printf(pb, "<body>\n");
avio_printf(pb, "<switch>\n");
for (i = 0; i < mov->nb_streams; i++) {
MOVTrack *track = &mov->tracks[i];
const char *type;
/* track->track_id is initialized in write_moov, and thus isn't known
* here yet */
int track_id = i + 1;
 
if (track->enc->codec_type == AVMEDIA_TYPE_VIDEO) {
type = "video";
} else if (track->enc->codec_type == AVMEDIA_TYPE_AUDIO) {
type = "audio";
} else {
continue;
}
avio_printf(pb, "<%s systemBitrate=\"%d\">\n", type,
track->enc->bit_rate);
param_write_int(pb, "systemBitrate", track->enc->bit_rate);
param_write_int(pb, "trackID", track_id);
if (track->enc->codec_type == AVMEDIA_TYPE_VIDEO) {
if (track->enc->codec_id == AV_CODEC_ID_H264) {
uint8_t *ptr;
int size = track->enc->extradata_size;
if (!ff_avc_write_annexb_extradata(track->enc->extradata, &ptr,
&size)) {
param_write_hex(pb, "CodecPrivateData",
ptr ? ptr : track->enc->extradata,
size);
av_free(ptr);
}
param_write_string(pb, "FourCC", "H264");
} else if (track->enc->codec_id == AV_CODEC_ID_VC1) {
param_write_string(pb, "FourCC", "WVC1");
param_write_hex(pb, "CodecPrivateData", track->enc->extradata,
track->enc->extradata_size);
}
param_write_int(pb, "MaxWidth", track->enc->width);
param_write_int(pb, "MaxHeight", track->enc->height);
param_write_int(pb, "DisplayWidth", track->enc->width);
param_write_int(pb, "DisplayHeight", track->enc->height);
} else {
if (track->enc->codec_id == AV_CODEC_ID_AAC) {
param_write_string(pb, "FourCC", "AACL");
} else if (track->enc->codec_id == AV_CODEC_ID_WMAPRO) {
param_write_string(pb, "FourCC", "WMAP");
}
param_write_hex(pb, "CodecPrivateData", track->enc->extradata,
track->enc->extradata_size);
param_write_int(pb, "AudioTag", ff_codec_get_tag(ff_codec_wav_tags,
track->enc->codec_id));
param_write_int(pb, "Channels", track->enc->channels);
param_write_int(pb, "SamplingRate", track->enc->sample_rate);
param_write_int(pb, "BitsPerSample", 16);
param_write_int(pb, "PacketSize", track->enc->block_align ?
track->enc->block_align : 4);
}
avio_printf(pb, "</%s>\n", type);
}
avio_printf(pb, "</switch>\n");
avio_printf(pb, "</body>\n");
avio_printf(pb, "</smil>\n");
 
return update_size(pb, pos);
}
 
static int mov_write_mfhd_tag(AVIOContext *pb, MOVMuxContext *mov)
{
avio_wb32(pb, 16);
ffio_wfourcc(pb, "mfhd");
avio_wb32(pb, 0);
avio_wb32(pb, mov->fragments);
return 0;
}
 
static int mov_write_tfhd_tag(AVIOContext *pb, MOVMuxContext *mov,
MOVTrack *track, int64_t moof_offset)
{
int64_t pos = avio_tell(pb);
uint32_t flags = MOV_TFHD_DEFAULT_SIZE | MOV_TFHD_DEFAULT_DURATION |
MOV_TFHD_BASE_DATA_OFFSET;
if (!track->entry) {
flags |= MOV_TFHD_DURATION_IS_EMPTY;
} else {
flags |= MOV_TFHD_DEFAULT_FLAGS;
}
if (mov->flags & FF_MOV_FLAG_OMIT_TFHD_OFFSET)
flags &= ~MOV_TFHD_BASE_DATA_OFFSET;
 
/* Don't set a default sample size, the silverlight player refuses
* to play files with that set. Don't set a default sample duration,
* WMP freaks out if it is set. Don't set a base data offset, PIFF
* file format says it MUST NOT be set. */
if (track->mode == MODE_ISM)
flags &= ~(MOV_TFHD_DEFAULT_SIZE | MOV_TFHD_DEFAULT_DURATION |
MOV_TFHD_BASE_DATA_OFFSET);
 
avio_wb32(pb, 0); /* size placeholder */
ffio_wfourcc(pb, "tfhd");
avio_w8(pb, 0); /* version */
avio_wb24(pb, flags);
 
avio_wb32(pb, track->track_id); /* track-id */
if (flags & MOV_TFHD_BASE_DATA_OFFSET)
avio_wb64(pb, moof_offset);
if (flags & MOV_TFHD_DEFAULT_DURATION) {
track->default_duration = get_cluster_duration(track, 0);
avio_wb32(pb, track->default_duration);
}
if (flags & MOV_TFHD_DEFAULT_SIZE) {
track->default_size = track->entry ? track->cluster[0].size : 1;
avio_wb32(pb, track->default_size);
} else
track->default_size = -1;
 
if (flags & MOV_TFHD_DEFAULT_FLAGS) {
track->default_sample_flags =
track->enc->codec_type == AVMEDIA_TYPE_VIDEO ?
(MOV_FRAG_SAMPLE_FLAG_DEPENDS_YES | MOV_FRAG_SAMPLE_FLAG_IS_NON_SYNC) :
MOV_FRAG_SAMPLE_FLAG_DEPENDS_NO;
avio_wb32(pb, track->default_sample_flags);
}
 
return update_size(pb, pos);
}
 
static uint32_t get_sample_flags(MOVTrack *track, MOVIentry *entry)
{
return entry->flags & MOV_SYNC_SAMPLE ? MOV_FRAG_SAMPLE_FLAG_DEPENDS_NO :
(MOV_FRAG_SAMPLE_FLAG_DEPENDS_YES | MOV_FRAG_SAMPLE_FLAG_IS_NON_SYNC);
}
 
static int mov_write_trun_tag(AVIOContext *pb, MOVMuxContext *mov,
MOVTrack *track, int moof_size)
{
int64_t pos = avio_tell(pb);
uint32_t flags = MOV_TRUN_DATA_OFFSET;
int i;
 
for (i = 0; i < track->entry; i++) {
if (get_cluster_duration(track, i) != track->default_duration)
flags |= MOV_TRUN_SAMPLE_DURATION;
if (track->cluster[i].size != track->default_size)
flags |= MOV_TRUN_SAMPLE_SIZE;
if (i > 0 && get_sample_flags(track, &track->cluster[i]) != track->default_sample_flags)
flags |= MOV_TRUN_SAMPLE_FLAGS;
}
if (!(flags & MOV_TRUN_SAMPLE_FLAGS))
flags |= MOV_TRUN_FIRST_SAMPLE_FLAGS;
if (track->flags & MOV_TRACK_CTTS)
flags |= MOV_TRUN_SAMPLE_CTS;
 
avio_wb32(pb, 0); /* size placeholder */
ffio_wfourcc(pb, "trun");
avio_w8(pb, 0); /* version */
avio_wb24(pb, flags);
 
avio_wb32(pb, track->entry); /* sample count */
if (mov->flags & FF_MOV_FLAG_OMIT_TFHD_OFFSET &&
!(mov->flags & FF_MOV_FLAG_SEPARATE_MOOF) &&
track->track_id != 1)
avio_wb32(pb, 0); /* Later tracks follow immediately after the previous one */
else
avio_wb32(pb, moof_size + 8 + track->data_offset +
track->cluster[0].pos); /* data offset */
if (flags & MOV_TRUN_FIRST_SAMPLE_FLAGS)
avio_wb32(pb, get_sample_flags(track, &track->cluster[0]));
 
for (i = 0; i < track->entry; i++) {
if (flags & MOV_TRUN_SAMPLE_DURATION)
avio_wb32(pb, get_cluster_duration(track, i));
if (flags & MOV_TRUN_SAMPLE_SIZE)
avio_wb32(pb, track->cluster[i].size);
if (flags & MOV_TRUN_SAMPLE_FLAGS)
avio_wb32(pb, get_sample_flags(track, &track->cluster[i]));
if (flags & MOV_TRUN_SAMPLE_CTS)
avio_wb32(pb, track->cluster[i].cts);
}
 
return update_size(pb, pos);
}
 
static int mov_write_tfxd_tag(AVIOContext *pb, MOVTrack *track)
{
int64_t pos = avio_tell(pb);
static const uint8_t uuid[] = {
0x6d, 0x1d, 0x9b, 0x05, 0x42, 0xd5, 0x44, 0xe6,
0x80, 0xe2, 0x14, 0x1d, 0xaf, 0xf7, 0x57, 0xb2
};
 
avio_wb32(pb, 0); /* size placeholder */
ffio_wfourcc(pb, "uuid");
avio_write(pb, uuid, sizeof(uuid));
avio_w8(pb, 1);
avio_wb24(pb, 0);
avio_wb64(pb, track->frag_start);
avio_wb64(pb, track->start_dts + track->track_duration -
track->cluster[0].dts);
 
return update_size(pb, pos);
}
 
static int mov_write_tfrf_tag(AVIOContext *pb, MOVMuxContext *mov,
MOVTrack *track, int entry)
{
int n = track->nb_frag_info - 1 - entry, i;
int size = 8 + 16 + 4 + 1 + 16*n;
static const uint8_t uuid[] = {
0xd4, 0x80, 0x7e, 0xf2, 0xca, 0x39, 0x46, 0x95,
0x8e, 0x54, 0x26, 0xcb, 0x9e, 0x46, 0xa7, 0x9f
};
 
if (entry < 0)
return 0;
 
avio_seek(pb, track->frag_info[entry].tfrf_offset, SEEK_SET);
avio_wb32(pb, size);
ffio_wfourcc(pb, "uuid");
avio_write(pb, uuid, sizeof(uuid));
avio_w8(pb, 1);
avio_wb24(pb, 0);
avio_w8(pb, n);
for (i = 0; i < n; i++) {
int index = entry + 1 + i;
avio_wb64(pb, track->frag_info[index].time);
avio_wb64(pb, track->frag_info[index].duration);
}
if (n < mov->ism_lookahead) {
int free_size = 16 * (mov->ism_lookahead - n);
avio_wb32(pb, free_size);
ffio_wfourcc(pb, "free");
ffio_fill(pb, 0, free_size - 8);
}
 
return 0;
}
 
static int mov_write_tfrf_tags(AVIOContext *pb, MOVMuxContext *mov,
MOVTrack *track)
{
int64_t pos = avio_tell(pb);
int i;
for (i = 0; i < mov->ism_lookahead; i++) {
/* Update the tfrf tag for the last ism_lookahead fragments,
* nb_frag_info - 1 is the next fragment to be written. */
mov_write_tfrf_tag(pb, mov, track, track->nb_frag_info - 2 - i);
}
avio_seek(pb, pos, SEEK_SET);
return 0;
}
 
static int mov_write_traf_tag(AVIOContext *pb, MOVMuxContext *mov,
MOVTrack *track, int64_t moof_offset,
int moof_size)
{
int64_t pos = avio_tell(pb);
avio_wb32(pb, 0); /* size placeholder */
ffio_wfourcc(pb, "traf");
 
mov_write_tfhd_tag(pb, mov, track, moof_offset);
mov_write_trun_tag(pb, mov, track, moof_size);
if (mov->mode == MODE_ISM) {
mov_write_tfxd_tag(pb, track);
 
if (mov->ism_lookahead) {
int i, size = 16 + 4 + 1 + 16 * mov->ism_lookahead;
 
track->tfrf_offset = avio_tell(pb);
avio_wb32(pb, 8 + size);
ffio_wfourcc(pb, "free");
for (i = 0; i < size; i++)
avio_w8(pb, 0);
}
}
 
return update_size(pb, pos);
}
 
static int mov_write_moof_tag_internal(AVIOContext *pb, MOVMuxContext *mov,
int tracks, int moof_size)
{
int64_t pos = avio_tell(pb);
int i;
 
avio_wb32(pb, 0); /* size placeholder */
ffio_wfourcc(pb, "moof");
 
mov_write_mfhd_tag(pb, mov);
for (i = 0; i < mov->nb_streams; i++) {
MOVTrack *track = &mov->tracks[i];
if (tracks >= 0 && i != tracks)
continue;
if (!track->entry)
continue;
mov_write_traf_tag(pb, mov, track, pos, moof_size);
}
 
return update_size(pb, pos);
}
 
static int mov_write_moof_tag(AVIOContext *pb, MOVMuxContext *mov, int tracks)
{
AVIOContext *avio_buf;
int ret, moof_size;
 
if ((ret = ffio_open_null_buf(&avio_buf)) < 0)
return ret;
mov_write_moof_tag_internal(avio_buf, mov, tracks, 0);
moof_size = ffio_close_null_buf(avio_buf);
return mov_write_moof_tag_internal(pb, mov, tracks, moof_size);
}
 
static int mov_write_tfra_tag(AVIOContext *pb, MOVTrack *track)
{
int64_t pos = avio_tell(pb);
int i;
 
avio_wb32(pb, 0); /* size placeholder */
ffio_wfourcc(pb, "tfra");
avio_w8(pb, 1); /* version */
avio_wb24(pb, 0);
 
avio_wb32(pb, track->track_id);
avio_wb32(pb, 0); /* length of traf/trun/sample num */
avio_wb32(pb, track->nb_frag_info);
for (i = 0; i < track->nb_frag_info; i++) {
avio_wb64(pb, track->frag_info[i].time);
avio_wb64(pb, track->frag_info[i].offset);
avio_w8(pb, 1); /* traf number */
avio_w8(pb, 1); /* trun number */
avio_w8(pb, 1); /* sample number */
}
 
return update_size(pb, pos);
}
 
static int mov_write_mfra_tag(AVIOContext *pb, MOVMuxContext *mov)
{
int64_t pos = avio_tell(pb);
int i;
 
avio_wb32(pb, 0); /* size placeholder */
ffio_wfourcc(pb, "mfra");
/* An empty mfra atom is enough to indicate to the publishing point that
* the stream has ended. */
if (mov->flags & FF_MOV_FLAG_ISML)
return update_size(pb, pos);
 
for (i = 0; i < mov->nb_streams; i++) {
MOVTrack *track = &mov->tracks[i];
if (track->nb_frag_info)
mov_write_tfra_tag(pb, track);
}
 
avio_wb32(pb, 16);
ffio_wfourcc(pb, "mfro");
avio_wb32(pb, 0); /* version + flags */
avio_wb32(pb, avio_tell(pb) + 4 - pos);
 
return update_size(pb, pos);
}
 
static int mov_write_mdat_tag(AVIOContext *pb, MOVMuxContext *mov)
{
avio_wb32(pb, 8); // placeholder for extended size field (64 bit)
ffio_wfourcc(pb, mov->mode == MODE_MOV ? "wide" : "free");
 
mov->mdat_pos = avio_tell(pb);
avio_wb32(pb, 0); /* size placeholder*/
ffio_wfourcc(pb, "mdat");
return 0;
}
 
/* TODO: This needs to be more general */
static int mov_write_ftyp_tag(AVIOContext *pb, AVFormatContext *s)
{
MOVMuxContext *mov = s->priv_data;
int64_t pos = avio_tell(pb);
int has_h264 = 0, has_video = 0;
int minor = 0x200;
int i;
 
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
has_video = 1;
if (st->codec->codec_id == AV_CODEC_ID_H264)
has_h264 = 1;
}
 
avio_wb32(pb, 0); /* size */
ffio_wfourcc(pb, "ftyp");
 
if (mov->mode == MODE_3GP) {
ffio_wfourcc(pb, has_h264 ? "3gp6" : "3gp4");
minor = has_h264 ? 0x100 : 0x200;
} else if (mov->mode & MODE_3G2) {
ffio_wfourcc(pb, has_h264 ? "3g2b" : "3g2a");
minor = has_h264 ? 0x20000 : 0x10000;
} else if (mov->mode == MODE_PSP)
ffio_wfourcc(pb, "MSNV");
else if (mov->mode == MODE_MP4)
ffio_wfourcc(pb, "isom");
else if (mov->mode == MODE_IPOD)
ffio_wfourcc(pb, has_video ? "M4V ":"M4A ");
else if (mov->mode == MODE_ISM)
ffio_wfourcc(pb, "isml");
else if (mov->mode == MODE_F4V)
ffio_wfourcc(pb, "f4v ");
else
ffio_wfourcc(pb, "qt ");
 
avio_wb32(pb, minor);
 
if (mov->mode == MODE_MOV)
ffio_wfourcc(pb, "qt ");
else if (mov->mode == MODE_ISM) {
ffio_wfourcc(pb, "piff");
ffio_wfourcc(pb, "iso2");
} else {
ffio_wfourcc(pb, "isom");
ffio_wfourcc(pb, "iso2");
if (has_h264)
ffio_wfourcc(pb, "avc1");
}
 
if (mov->mode == MODE_3GP)
ffio_wfourcc(pb, has_h264 ? "3gp6":"3gp4");
else if (mov->mode & MODE_3G2)
ffio_wfourcc(pb, has_h264 ? "3g2b":"3g2a");
else if (mov->mode == MODE_PSP)
ffio_wfourcc(pb, "MSNV");
else if (mov->mode == MODE_MP4)
ffio_wfourcc(pb, "mp41");
return update_size(pb, pos);
}
 
static void mov_write_uuidprof_tag(AVIOContext *pb, AVFormatContext *s)
{
AVCodecContext *video_codec = s->streams[0]->codec;
AVCodecContext *audio_codec = s->streams[1]->codec;
int audio_rate = audio_codec->sample_rate;
int frame_rate = ((video_codec->time_base.den) * (0x10000)) / (video_codec->time_base.num);
int audio_kbitrate = audio_codec->bit_rate / 1000;
int video_kbitrate = FFMIN(video_codec->bit_rate / 1000, 800 - audio_kbitrate);
 
avio_wb32(pb, 0x94); /* size */
ffio_wfourcc(pb, "uuid");
ffio_wfourcc(pb, "PROF");
 
avio_wb32(pb, 0x21d24fce); /* 96 bit UUID */
avio_wb32(pb, 0xbb88695c);
avio_wb32(pb, 0xfac9c740);
 
avio_wb32(pb, 0x0); /* ? */
avio_wb32(pb, 0x3); /* 3 sections ? */
 
avio_wb32(pb, 0x14); /* size */
ffio_wfourcc(pb, "FPRF");
avio_wb32(pb, 0x0); /* ? */
avio_wb32(pb, 0x0); /* ? */
avio_wb32(pb, 0x0); /* ? */
 
avio_wb32(pb, 0x2c); /* size */
ffio_wfourcc(pb, "APRF"); /* audio */
avio_wb32(pb, 0x0);
avio_wb32(pb, 0x2); /* TrackID */
ffio_wfourcc(pb, "mp4a");
avio_wb32(pb, 0x20f);
avio_wb32(pb, 0x0);
avio_wb32(pb, audio_kbitrate);
avio_wb32(pb, audio_kbitrate);
avio_wb32(pb, audio_rate);
avio_wb32(pb, audio_codec->channels);
 
avio_wb32(pb, 0x34); /* size */
ffio_wfourcc(pb, "VPRF"); /* video */
avio_wb32(pb, 0x0);
avio_wb32(pb, 0x1); /* TrackID */
if (video_codec->codec_id == AV_CODEC_ID_H264) {
ffio_wfourcc(pb, "avc1");
avio_wb16(pb, 0x014D);
avio_wb16(pb, 0x0015);
} else {
ffio_wfourcc(pb, "mp4v");
avio_wb16(pb, 0x0000);
avio_wb16(pb, 0x0103);
}
avio_wb32(pb, 0x0);
avio_wb32(pb, video_kbitrate);
avio_wb32(pb, video_kbitrate);
avio_wb32(pb, frame_rate);
avio_wb32(pb, frame_rate);
avio_wb16(pb, video_codec->width);
avio_wb16(pb, video_codec->height);
avio_wb32(pb, 0x010001); /* ? */
}
 
static int mov_parse_mpeg2_frame(AVPacket *pkt, uint32_t *flags)
{
uint32_t c = -1;
int i, closed_gop = 0;
 
for (i = 0; i < pkt->size - 4; i++) {
c = (c << 8) + pkt->data[i];
if (c == 0x1b8) { // gop
closed_gop = pkt->data[i + 4] >> 6 & 0x01;
} else if (c == 0x100) { // pic
int temp_ref = (pkt->data[i + 1] << 2) | (pkt->data[i + 2] >> 6);
if (!temp_ref || closed_gop) // I picture is not reordered
*flags = MOV_SYNC_SAMPLE;
else
*flags = MOV_PARTIAL_SYNC_SAMPLE;
break;
}
}
return 0;
}
 
static void mov_parse_vc1_frame(AVPacket *pkt, MOVTrack *trk, int fragment)
{
const uint8_t *start, *next, *end = pkt->data + pkt->size;
int seq = 0, entry = 0;
int key = pkt->flags & AV_PKT_FLAG_KEY;
start = find_next_marker(pkt->data, end);
for (next = start; next < end; start = next) {
next = find_next_marker(start + 4, end);
switch (AV_RB32(start)) {
case VC1_CODE_SEQHDR:
seq = 1;
break;
case VC1_CODE_ENTRYPOINT:
entry = 1;
break;
case VC1_CODE_SLICE:
trk->vc1_info.slices = 1;
break;
}
}
if (!trk->entry && !fragment) {
/* First packet in first fragment */
trk->vc1_info.first_packet_seq = seq;
trk->vc1_info.first_packet_entry = entry;
} else if ((seq && !trk->vc1_info.packet_seq) ||
(entry && !trk->vc1_info.packet_entry)) {
int i;
for (i = 0; i < trk->entry; i++)
trk->cluster[i].flags &= ~MOV_SYNC_SAMPLE;
trk->has_keyframes = 0;
if (seq)
trk->vc1_info.packet_seq = 1;
if (entry)
trk->vc1_info.packet_entry = 1;
if (!fragment) {
/* First fragment */
if ((!seq || trk->vc1_info.first_packet_seq) &&
(!entry || trk->vc1_info.first_packet_entry)) {
/* First packet had the same headers as this one, readd the
* sync sample flag. */
trk->cluster[0].flags |= MOV_SYNC_SAMPLE;
trk->has_keyframes = 1;
}
}
}
if (trk->vc1_info.packet_seq && trk->vc1_info.packet_entry)
key = seq && entry;
else if (trk->vc1_info.packet_seq)
key = seq;
else if (trk->vc1_info.packet_entry)
key = entry;
if (key) {
trk->cluster[trk->entry].flags |= MOV_SYNC_SAMPLE;
trk->has_keyframes++;
}
}
 
static int mov_flush_fragment(AVFormatContext *s)
{
MOVMuxContext *mov = s->priv_data;
int i, first_track = -1;
int64_t mdat_size = 0;
 
if (!(mov->flags & FF_MOV_FLAG_FRAGMENT))
return 0;
 
if (!(mov->flags & FF_MOV_FLAG_EMPTY_MOOV) && mov->fragments == 0) {
int64_t pos = avio_tell(s->pb);
uint8_t *buf;
int buf_size, moov_size;
 
for (i = 0; i < mov->nb_streams; i++)
if (!mov->tracks[i].entry)
break;
/* Don't write the initial moov unless all tracks have data */
if (i < mov->nb_streams)
return 0;
 
moov_size = get_moov_size(s);
for (i = 0; i < mov->nb_streams; i++)
mov->tracks[i].data_offset = pos + moov_size + 8;
 
mov_write_moov_tag(s->pb, mov, s);
 
buf_size = avio_close_dyn_buf(mov->mdat_buf, &buf);
mov->mdat_buf = NULL;
avio_wb32(s->pb, buf_size + 8);
ffio_wfourcc(s->pb, "mdat");
avio_write(s->pb, buf, buf_size);
av_free(buf);
 
mov->fragments++;
mov->mdat_size = 0;
for (i = 0; i < mov->nb_streams; i++) {
if (mov->tracks[i].entry)
mov->tracks[i].frag_start += mov->tracks[i].start_dts +
mov->tracks[i].track_duration -
mov->tracks[i].cluster[0].dts;
mov->tracks[i].entry = 0;
}
avio_flush(s->pb);
return 0;
}
 
for (i = 0; i < mov->nb_streams; i++) {
MOVTrack *track = &mov->tracks[i];
if (mov->flags & FF_MOV_FLAG_SEPARATE_MOOF)
track->data_offset = 0;
else
track->data_offset = mdat_size;
if (!track->mdat_buf)
continue;
mdat_size += avio_tell(track->mdat_buf);
if (first_track < 0)
first_track = i;
}
 
if (!mdat_size)
return 0;
 
for (i = 0; i < mov->nb_streams; i++) {
MOVTrack *track = &mov->tracks[i];
int buf_size, write_moof = 1, moof_tracks = -1;
uint8_t *buf;
int64_t duration = 0;
 
if (track->entry)
duration = track->start_dts + track->track_duration -
track->cluster[0].dts;
if (mov->flags & FF_MOV_FLAG_SEPARATE_MOOF) {
if (!track->mdat_buf)
continue;
mdat_size = avio_tell(track->mdat_buf);
moof_tracks = i;
} else {
write_moof = i == first_track;
}
 
if (write_moof) {
MOVFragmentInfo *info;
avio_flush(s->pb);
track->nb_frag_info++;
if (track->nb_frag_info >= track->frag_info_capacity) {
unsigned new_capacity = track->nb_frag_info + MOV_FRAG_INFO_ALLOC_INCREMENT;
if (av_reallocp_array(&track->frag_info,
new_capacity,
sizeof(*track->frag_info)))
return AVERROR(ENOMEM);
track->frag_info_capacity = new_capacity;
}
info = &track->frag_info[track->nb_frag_info - 1];
info->offset = avio_tell(s->pb);
info->time = mov->tracks[i].frag_start;
info->duration = duration;
mov_write_tfrf_tags(s->pb, mov, track);
 
mov_write_moof_tag(s->pb, mov, moof_tracks);
info->tfrf_offset = track->tfrf_offset;
mov->fragments++;
 
avio_wb32(s->pb, mdat_size + 8);
ffio_wfourcc(s->pb, "mdat");
}
 
if (track->entry)
track->frag_start += duration;
track->entry = 0;
if (!track->mdat_buf)
continue;
buf_size = avio_close_dyn_buf(track->mdat_buf, &buf);
track->mdat_buf = NULL;
 
avio_write(s->pb, buf, buf_size);
av_free(buf);
}
 
mov->mdat_size = 0;
 
avio_flush(s->pb);
return 0;
}
 
int ff_mov_write_packet(AVFormatContext *s, AVPacket *pkt)
{
MOVMuxContext *mov = s->priv_data;
AVIOContext *pb = s->pb;
MOVTrack *trk = &mov->tracks[pkt->stream_index];
AVCodecContext *enc = trk->enc;
unsigned int samples_in_chunk = 0;
int size = pkt->size;
uint8_t *reformatted_data = NULL;
 
if (mov->flags & FF_MOV_FLAG_FRAGMENT) {
int ret;
if (mov->fragments > 0) {
if (!trk->mdat_buf) {
if ((ret = avio_open_dyn_buf(&trk->mdat_buf)) < 0)
return ret;
}
pb = trk->mdat_buf;
} else {
if (!mov->mdat_buf) {
if ((ret = avio_open_dyn_buf(&mov->mdat_buf)) < 0)
return ret;
}
pb = mov->mdat_buf;
}
}
 
if (enc->codec_id == AV_CODEC_ID_AMR_NB) {
/* We must find out how many AMR blocks there are in one packet */
static uint16_t packed_size[16] =
{13, 14, 16, 18, 20, 21, 27, 32, 6, 0, 0, 0, 0, 0, 0, 1};
int len = 0;
 
while (len < size && samples_in_chunk < 100) {
len += packed_size[(pkt->data[len] >> 3) & 0x0F];
samples_in_chunk++;
}
if (samples_in_chunk > 1) {
av_log(s, AV_LOG_ERROR, "fatal error, input is not a single packet, implement a AVParser for it\n");
return -1;
}
} else if (enc->codec_id == AV_CODEC_ID_ADPCM_MS ||
enc->codec_id == AV_CODEC_ID_ADPCM_IMA_WAV) {
samples_in_chunk = enc->frame_size;
} else if (trk->sample_size)
samples_in_chunk = size / trk->sample_size;
else
samples_in_chunk = 1;
 
/* copy extradata if it exists */
if (trk->vos_len == 0 && enc->extradata_size > 0) {
trk->vos_len = enc->extradata_size;
trk->vos_data = av_malloc(trk->vos_len);
memcpy(trk->vos_data, enc->extradata, trk->vos_len);
}
 
if (enc->codec_id == AV_CODEC_ID_AAC && pkt->size > 2 &&
(AV_RB16(pkt->data) & 0xfff0) == 0xfff0) {
if (!s->streams[pkt->stream_index]->nb_frames) {
av_log(s, AV_LOG_ERROR, "Malformed AAC bitstream detected: "
"use audio bitstream filter 'aac_adtstoasc' to fix it "
"('-bsf:a aac_adtstoasc' option with ffmpeg)\n");
return -1;
}
av_log(s, AV_LOG_WARNING, "aac bitstream error\n");
}
if (enc->codec_id == AV_CODEC_ID_H264 && trk->vos_len > 0 && *(uint8_t *)trk->vos_data != 1) {
/* from x264 or from bytestream h264 */
/* nal reformating needed */
if (trk->hint_track >= 0 && trk->hint_track < mov->nb_streams) {
ff_avc_parse_nal_units_buf(pkt->data, &reformatted_data,
&size);
avio_write(pb, reformatted_data, size);
} else {
size = ff_avc_parse_nal_units(pb, pkt->data, pkt->size);
}
} else {
avio_write(pb, pkt->data, size);
}
 
if ((enc->codec_id == AV_CODEC_ID_DNXHD ||
enc->codec_id == AV_CODEC_ID_AC3) && !trk->vos_len) {
/* copy frame to create needed atoms */
trk->vos_len = size;
trk->vos_data = av_malloc(size);
if (!trk->vos_data)
return AVERROR(ENOMEM);
memcpy(trk->vos_data, pkt->data, size);
}
 
if (trk->entry >= trk->cluster_capacity) {
unsigned new_capacity = 2 * (trk->entry + MOV_INDEX_CLUSTER_SIZE);
if (av_reallocp_array(&trk->cluster, new_capacity,
sizeof(*trk->cluster)))
return AVERROR(ENOMEM);
trk->cluster_capacity = new_capacity;
}
 
trk->cluster[trk->entry].pos = avio_tell(pb) - size;
trk->cluster[trk->entry].samples_in_chunk = samples_in_chunk;
trk->cluster[trk->entry].chunkNum = 0;
trk->cluster[trk->entry].size = size;
trk->cluster[trk->entry].entries = samples_in_chunk;
trk->cluster[trk->entry].dts = pkt->dts;
if (!trk->entry && trk->start_dts != AV_NOPTS_VALUE) {
/* First packet of a new fragment. We already wrote the duration
* of the last packet of the previous fragment based on track_duration,
* which might not exactly match our dts. Therefore adjust the dts
* of this packet to be what the previous packets duration implies. */
trk->cluster[trk->entry].dts = trk->start_dts + trk->track_duration;
}
if (!trk->entry && trk->start_dts == AV_NOPTS_VALUE && !supports_edts(mov)) {
trk->cluster[trk->entry].dts = trk->start_dts = 0;
}
if (trk->start_dts == AV_NOPTS_VALUE)
trk->start_dts = pkt->dts;
trk->track_duration = pkt->dts - trk->start_dts + pkt->duration;
trk->last_sample_is_subtitle_end = 0;
 
if (pkt->pts == AV_NOPTS_VALUE) {
av_log(s, AV_LOG_WARNING, "pts has no value\n");
pkt->pts = pkt->dts;
}
if (pkt->dts != pkt->pts)
trk->flags |= MOV_TRACK_CTTS;
trk->cluster[trk->entry].cts = pkt->pts - pkt->dts;
trk->cluster[trk->entry].flags = 0;
if (enc->codec_id == AV_CODEC_ID_VC1) {
mov_parse_vc1_frame(pkt, trk, mov->fragments);
} else if (pkt->flags & AV_PKT_FLAG_KEY) {
if (mov->mode == MODE_MOV && enc->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
trk->entry > 0) { // force sync sample for the first key frame
mov_parse_mpeg2_frame(pkt, &trk->cluster[trk->entry].flags);
if (trk->cluster[trk->entry].flags & MOV_PARTIAL_SYNC_SAMPLE)
trk->flags |= MOV_TRACK_STPS;
} else {
trk->cluster[trk->entry].flags = MOV_SYNC_SAMPLE;
}
if (trk->cluster[trk->entry].flags & MOV_SYNC_SAMPLE)
trk->has_keyframes++;
}
trk->entry++;
trk->sample_count += samples_in_chunk;
mov->mdat_size += size;
 
if (trk->hint_track >= 0 && trk->hint_track < mov->nb_streams)
ff_mov_add_hinted_packet(s, pkt, trk->hint_track, trk->entry,
reformatted_data, size);
av_free(reformatted_data);
return 0;
}
 
static int mov_write_single_packet(AVFormatContext *s, AVPacket *pkt)
{
MOVMuxContext *mov = s->priv_data;
MOVTrack *trk = &mov->tracks[pkt->stream_index];
AVCodecContext *enc = trk->enc;
int64_t frag_duration = 0;
int size = pkt->size;
 
if (!pkt->size)
return 0; /* Discard 0 sized packets */
 
if (trk->entry && pkt->stream_index < s->nb_streams)
frag_duration = av_rescale_q(pkt->dts - trk->cluster[0].dts,
s->streams[pkt->stream_index]->time_base,
AV_TIME_BASE_Q);
if ((mov->max_fragment_duration &&
frag_duration >= mov->max_fragment_duration) ||
(mov->max_fragment_size && mov->mdat_size + size >= mov->max_fragment_size) ||
(mov->flags & FF_MOV_FLAG_FRAG_KEYFRAME &&
enc->codec_type == AVMEDIA_TYPE_VIDEO &&
trk->entry && pkt->flags & AV_PKT_FLAG_KEY)) {
if (frag_duration >= mov->min_fragment_duration)
mov_flush_fragment(s);
}
 
return ff_mov_write_packet(s, pkt);
}
 
static int mov_write_subtitle_end_packet(AVFormatContext *s,
int stream_index,
int64_t dts) {
AVPacket end;
uint8_t data[2] = {0};
int ret;
 
av_init_packet(&end);
end.size = sizeof(data);
end.data = data;
end.pts = dts;
end.dts = dts;
end.duration = 0;
end.stream_index = stream_index;
 
ret = mov_write_single_packet(s, &end);
av_free_packet(&end);
 
return ret;
}
 
static int mov_write_packet(AVFormatContext *s, AVPacket *pkt)
{
if (!pkt) {
mov_flush_fragment(s);
return 1;
} else {
int i;
MOVMuxContext *mov = s->priv_data;
 
if (!pkt->size) return 0; /* Discard 0 sized packets */
 
/*
* Subtitles require special handling.
*
* 1) For full complaince, every track must have a sample at
* dts == 0, which is rarely true for subtitles. So, as soon
* as we see any packet with dts > 0, write an empty subtitle
* at dts == 0 for any subtitle track with no samples in it.
*
* 2) For each subtitle track, check if the current packet's
* dts is past the duration of the last subtitle sample. If
* so, we now need to write an end sample for that subtitle.
*
* This must be done conditionally to allow for subtitles that
* immediately replace each other, in which case an end sample
* is not needed, and is, in fact, actively harmful.
*
* 3) See mov_write_trailer for how the final end sample is
* handled.
*/
for (i = 0; i < mov->nb_streams; i++) {
MOVTrack *trk = &mov->tracks[i];
int ret;
 
if (trk->enc->codec_id == AV_CODEC_ID_MOV_TEXT &&
trk->track_duration < pkt->dts &&
(trk->entry == 0 || !trk->last_sample_is_subtitle_end)) {
ret = mov_write_subtitle_end_packet(s, i, trk->track_duration);
if (ret < 0) return ret;
trk->last_sample_is_subtitle_end = 1;
}
}
 
return mov_write_single_packet(s, pkt);
}
}
 
// QuickTime chapters involve an additional text track with the chapter names
// as samples, and a tref pointing from the other tracks to the chapter one.
static int mov_create_chapter_track(AVFormatContext *s, int tracknum)
{
AVIOContext *pb;
 
MOVMuxContext *mov = s->priv_data;
MOVTrack *track = &mov->tracks[tracknum];
AVPacket pkt = { .stream_index = tracknum, .flags = AV_PKT_FLAG_KEY };
int i, len;
 
track->mode = mov->mode;
track->tag = MKTAG('t','e','x','t');
track->timescale = MOV_TIMESCALE;
track->enc = avcodec_alloc_context3(NULL);
if (!track->enc)
return AVERROR(ENOMEM);
track->enc->codec_type = AVMEDIA_TYPE_SUBTITLE;
#if 0
// These properties are required to make QT recognize the chapter track
uint8_t chapter_properties[43] = { 0, 0, 0, 0, 0, 0, 0, 1, };
if (ff_alloc_extradata(track->enc, sizeof(chapter_properties)))
return AVERROR(ENOMEM);
memcpy(track->enc->extradata, chapter_properties, sizeof(chapter_properties));
#else
if (avio_open_dyn_buf(&pb) >= 0) {
int size;
uint8_t *buf;
 
/* Stub header (usually for Quicktime chapter track) */
// TextSampleEntry
avio_wb32(pb, 0x01); // displayFlags
avio_w8(pb, 0x00); // horizontal justification
avio_w8(pb, 0x00); // vertical justification
avio_w8(pb, 0x00); // bgColourRed
avio_w8(pb, 0x00); // bgColourGreen
avio_w8(pb, 0x00); // bgColourBlue
avio_w8(pb, 0x00); // bgColourAlpha
// BoxRecord
avio_wb16(pb, 0x00); // defTextBoxTop
avio_wb16(pb, 0x00); // defTextBoxLeft
avio_wb16(pb, 0x00); // defTextBoxBottom
avio_wb16(pb, 0x00); // defTextBoxRight
// StyleRecord
avio_wb16(pb, 0x00); // startChar
avio_wb16(pb, 0x00); // endChar
avio_wb16(pb, 0x01); // fontID
avio_w8(pb, 0x00); // fontStyleFlags
avio_w8(pb, 0x00); // fontSize
avio_w8(pb, 0x00); // fgColourRed
avio_w8(pb, 0x00); // fgColourGreen
avio_w8(pb, 0x00); // fgColourBlue
avio_w8(pb, 0x00); // fgColourAlpha
// FontTableBox
avio_wb32(pb, 0x0D); // box size
ffio_wfourcc(pb, "ftab"); // box atom name
avio_wb16(pb, 0x01); // entry count
// FontRecord
avio_wb16(pb, 0x01); // font ID
avio_w8(pb, 0x00); // font name length
 
if ((size = avio_close_dyn_buf(pb, &buf)) > 0) {
track->enc->extradata = buf;
track->enc->extradata_size = size;
} else {
av_free(&buf);
}
}
#endif
 
for (i = 0; i < s->nb_chapters; i++) {
AVChapter *c = s->chapters[i];
AVDictionaryEntry *t;
 
int64_t end = av_rescale_q(c->end, c->time_base, (AVRational){1,MOV_TIMESCALE});
pkt.pts = pkt.dts = av_rescale_q(c->start, c->time_base, (AVRational){1,MOV_TIMESCALE});
pkt.duration = end - pkt.dts;
 
if ((t = av_dict_get(c->metadata, "title", NULL, 0))) {
len = strlen(t->value);
pkt.size = len + 2;
pkt.data = av_malloc(pkt.size);
if (!pkt.data)
return AVERROR(ENOMEM);
AV_WB16(pkt.data, len);
memcpy(pkt.data + 2, t->value, len);
ff_mov_write_packet(s, &pkt);
av_freep(&pkt.data);
}
}
 
return 0;
}
 
static int mov_create_timecode_track(AVFormatContext *s, int index, int src_index, const char *tcstr)
{
int ret;
MOVMuxContext *mov = s->priv_data;
MOVTrack *track = &mov->tracks[index];
AVStream *src_st = s->streams[src_index];
AVTimecode tc;
AVPacket pkt = {.stream_index = index, .flags = AV_PKT_FLAG_KEY, .size = 4};
AVRational rate = find_fps(s, src_st);
 
/* compute the frame number */
ret = av_timecode_init_from_string(&tc, rate, tcstr, s);
if (ret < 0)
return ret;
 
/* tmcd track based on video stream */
track->mode = mov->mode;
track->tag = MKTAG('t','m','c','d');
track->src_track = src_index;
track->timescale = mov->tracks[src_index].timescale;
if (tc.flags & AV_TIMECODE_FLAG_DROPFRAME)
track->timecode_flags |= MOV_TIMECODE_FLAG_DROPFRAME;
 
/* encode context: tmcd data stream */
track->enc = avcodec_alloc_context3(NULL);
track->enc->codec_type = AVMEDIA_TYPE_DATA;
track->enc->codec_tag = track->tag;
track->enc->time_base = av_inv_q(rate);
 
/* the tmcd track just contains one packet with the frame number */
pkt.data = av_malloc(pkt.size);
AV_WB32(pkt.data, tc.start);
ret = ff_mov_write_packet(s, &pkt);
av_free(pkt.data);
return ret;
}
 
/*
* st->disposition controls the "enabled" flag in the tkhd tag.
* QuickTime will not play a track if it is not enabled. So make sure
* that one track of each type (audio, video, subtitle) is enabled.
*
* Subtitles are special. For audio and video, setting "enabled" also
* makes the track "default" (i.e. it is rendered when played). For
* subtitles, an "enabled" subtitle is not rendered by default, but
* if no subtitle is enabled, the subtitle menu in QuickTime will be
* empty!
*/
static void enable_tracks(AVFormatContext *s)
{
MOVMuxContext *mov = s->priv_data;
int i;
uint8_t enabled[AVMEDIA_TYPE_NB];
int first[AVMEDIA_TYPE_NB];
 
for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
enabled[i] = 0;
first[i] = -1;
}
 
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
 
if (st->codec->codec_type <= AVMEDIA_TYPE_UNKNOWN ||
st->codec->codec_type >= AVMEDIA_TYPE_NB)
continue;
 
if (first[st->codec->codec_type] < 0)
first[st->codec->codec_type] = i;
if (st->disposition & AV_DISPOSITION_DEFAULT) {
mov->tracks[i].flags |= MOV_TRACK_ENABLED;
enabled[st->codec->codec_type] = 1;
}
}
 
for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
switch (i) {
case AVMEDIA_TYPE_VIDEO:
case AVMEDIA_TYPE_AUDIO:
case AVMEDIA_TYPE_SUBTITLE:
if (!enabled[i] && first[i] >= 0)
mov->tracks[first[i]].flags |= MOV_TRACK_ENABLED;
break;
}
}
}
 
static void mov_free(AVFormatContext *s)
{
MOVMuxContext *mov = s->priv_data;
int i;
 
if (mov->chapter_track) {
if (mov->tracks[mov->chapter_track].enc)
av_freep(&mov->tracks[mov->chapter_track].enc->extradata);
av_freep(&mov->tracks[mov->chapter_track].enc);
}
 
for (i = 0; i < mov->nb_streams; i++) {
if (mov->tracks[i].tag == MKTAG('r','t','p',' '))
ff_mov_close_hinting(&mov->tracks[i]);
else if (mov->tracks[i].tag == MKTAG('t','m','c','d') && mov->nb_meta_tmcd)
av_freep(&mov->tracks[i].enc);
av_freep(&mov->tracks[i].cluster);
av_freep(&mov->tracks[i].frag_info);
 
if (mov->tracks[i].vos_len)
av_freep(&mov->tracks[i].vos_data);
}
 
av_freep(&mov->tracks);
}
 
static int mov_write_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
MOVMuxContext *mov = s->priv_data;
AVDictionaryEntry *t, *global_tcr = av_dict_get(s->metadata, "timecode", NULL, 0);
int i, hint_track = 0, tmcd_track = 0;
 
/* Default mode == MP4 */
mov->mode = MODE_MP4;
 
if (s->oformat != NULL) {
if (!strcmp("3gp", s->oformat->name)) mov->mode = MODE_3GP;
else if (!strcmp("3g2", s->oformat->name)) mov->mode = MODE_3GP|MODE_3G2;
else if (!strcmp("mov", s->oformat->name)) mov->mode = MODE_MOV;
else if (!strcmp("psp", s->oformat->name)) mov->mode = MODE_PSP;
else if (!strcmp("ipod",s->oformat->name)) mov->mode = MODE_IPOD;
else if (!strcmp("ismv",s->oformat->name)) mov->mode = MODE_ISM;
else if (!strcmp("f4v", s->oformat->name)) mov->mode = MODE_F4V;
}
 
/* Set the FRAGMENT flag if any of the fragmentation methods are
* enabled. */
if (mov->max_fragment_duration || mov->max_fragment_size ||
mov->flags & (FF_MOV_FLAG_EMPTY_MOOV |
FF_MOV_FLAG_FRAG_KEYFRAME |
FF_MOV_FLAG_FRAG_CUSTOM))
mov->flags |= FF_MOV_FLAG_FRAGMENT;
 
/* Set other implicit flags immediately */
if (mov->mode == MODE_ISM)
mov->flags |= FF_MOV_FLAG_EMPTY_MOOV | FF_MOV_FLAG_SEPARATE_MOOF |
FF_MOV_FLAG_FRAGMENT;
 
/* faststart: moov at the beginning of the file, if supported */
if (mov->flags & FF_MOV_FLAG_FASTSTART) {
if ((mov->flags & FF_MOV_FLAG_FRAGMENT) ||
(s->flags & AVFMT_FLAG_CUSTOM_IO)) {
av_log(s, AV_LOG_WARNING, "The faststart flag is incompatible "
"with fragmentation and custom IO, disabling faststart\n");
mov->flags &= ~FF_MOV_FLAG_FASTSTART;
} else
mov->reserved_moov_size = -1;
}
 
if (!supports_edts(mov) && s->avoid_negative_ts < 0) {
s->avoid_negative_ts = 1;
}
 
/* Non-seekable output is ok if using fragmentation. If ism_lookahead
* is enabled, we don't support non-seekable output at all. */
if (!s->pb->seekable &&
(!(mov->flags & FF_MOV_FLAG_FRAGMENT) || mov->ism_lookahead)) {
av_log(s, AV_LOG_ERROR, "muxer does not support non seekable output\n");
return AVERROR(EINVAL);
}
 
mov_write_ftyp_tag(pb,s);
if (mov->mode == MODE_PSP) {
int video_streams_nb = 0, audio_streams_nb = 0, other_streams_nb = 0;
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
video_streams_nb++;
else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
audio_streams_nb++;
else
other_streams_nb++;
}
 
if (video_streams_nb != 1 || audio_streams_nb != 1 || other_streams_nb) {
av_log(s, AV_LOG_ERROR, "PSP mode need one video and one audio stream\n");
return AVERROR(EINVAL);
}
mov_write_uuidprof_tag(pb, s);
}
 
mov->nb_streams = s->nb_streams;
if (mov->mode & (MODE_MP4|MODE_MOV|MODE_IPOD) && s->nb_chapters)
mov->chapter_track = mov->nb_streams++;
 
if (mov->flags & FF_MOV_FLAG_RTP_HINT) {
/* Add hint tracks for each audio and video stream */
hint_track = mov->nb_streams;
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
mov->nb_streams++;
}
}
}
 
if (mov->mode == MODE_MOV) {
tmcd_track = mov->nb_streams;
 
/* +1 tmcd track for each video stream with a timecode */
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
(global_tcr || av_dict_get(st->metadata, "timecode", NULL, 0)))
mov->nb_meta_tmcd++;
}
 
/* check if there is already a tmcd track to remux */
if (mov->nb_meta_tmcd) {
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
if (st->codec->codec_tag == MKTAG('t','m','c','d')) {
av_log(s, AV_LOG_WARNING, "You requested a copy of the original timecode track "
"so timecode metadata are now ignored\n");
mov->nb_meta_tmcd = 0;
}
}
}
 
mov->nb_streams += mov->nb_meta_tmcd;
}
 
// Reserve an extra stream for chapters for the case where chapters
// are written in the trailer
mov->tracks = av_mallocz((mov->nb_streams + 1) * sizeof(*mov->tracks));
if (!mov->tracks)
return AVERROR(ENOMEM);
 
for (i = 0; i < s->nb_streams; i++) {
AVStream *st= s->streams[i];
MOVTrack *track= &mov->tracks[i];
AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL,0);
 
track->enc = st->codec;
track->st = st;
track->language = ff_mov_iso639_to_lang(lang?lang->value:"und", mov->mode!=MODE_MOV);
if (track->language < 0)
track->language = 0;
track->mode = mov->mode;
track->tag = mov_find_codec_tag(s, track);
if (!track->tag) {
av_log(s, AV_LOG_ERROR, "track %d: could not find tag, "
"codec not currently supported in container\n", i);
goto error;
}
/* If hinting of this track is enabled by a later hint track,
* this is updated. */
track->hint_track = -1;
track->start_dts = AV_NOPTS_VALUE;
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
if (track->tag == MKTAG('m','x','3','p') || track->tag == MKTAG('m','x','3','n') ||
track->tag == MKTAG('m','x','4','p') || track->tag == MKTAG('m','x','4','n') ||
track->tag == MKTAG('m','x','5','p') || track->tag == MKTAG('m','x','5','n')) {
if (st->codec->width != 720 || (st->codec->height != 608 && st->codec->height != 512)) {
av_log(s, AV_LOG_ERROR, "D-10/IMX must use 720x608 or 720x512 video resolution\n");
goto error;
}
track->height = track->tag >> 24 == 'n' ? 486 : 576;
}
if (mov->video_track_timescale) {
track->timescale = mov->video_track_timescale;
} else {
track->timescale = st->codec->time_base.den;
while(track->timescale < 10000)
track->timescale *= 2;
}
if (track->mode == MODE_MOV && track->timescale > 100000)
av_log(s, AV_LOG_WARNING,
"WARNING codec timebase is very high. If duration is too long,\n"
"file may not be playable by quicktime. Specify a shorter timebase\n"
"or choose different container.\n");
} else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
track->timescale = st->codec->sample_rate;
if (!st->codec->frame_size && !av_get_bits_per_sample(st->codec->codec_id)) {
av_log(s, AV_LOG_WARNING, "track %d: codec frame size is not set\n", i);
track->audio_vbr = 1;
}else if (st->codec->codec_id == AV_CODEC_ID_ADPCM_MS ||
st->codec->codec_id == AV_CODEC_ID_ADPCM_IMA_WAV ||
st->codec->codec_id == AV_CODEC_ID_ILBC){
if (!st->codec->block_align) {
av_log(s, AV_LOG_ERROR, "track %d: codec block align is not set for adpcm\n", i);
goto error;
}
track->sample_size = st->codec->block_align;
}else if (st->codec->frame_size > 1){ /* assume compressed audio */
track->audio_vbr = 1;
}else{
track->sample_size = (av_get_bits_per_sample(st->codec->codec_id) >> 3) * st->codec->channels;
}
if (st->codec->codec_id == AV_CODEC_ID_ILBC) {
track->audio_vbr = 1;
}
if (track->mode != MODE_MOV &&
track->enc->codec_id == AV_CODEC_ID_MP3 && track->timescale < 16000) {
av_log(s, AV_LOG_ERROR, "track %d: muxing mp3 at %dhz is not supported\n",
i, track->enc->sample_rate);
goto error;
}
} else if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
track->timescale = st->codec->time_base.den;
} else if (st->codec->codec_type == AVMEDIA_TYPE_DATA) {
track->timescale = st->codec->time_base.den;
} else {
track->timescale = MOV_TIMESCALE;
}
if (!track->height)
track->height = st->codec->height;
/* The ism specific timescale isn't mandatory, but is assumed by
* some tools, such as mp4split. */
if (mov->mode == MODE_ISM)
track->timescale = 10000000;
 
avpriv_set_pts_info(st, 64, 1, track->timescale);
 
/* copy extradata if it exists */
if (st->codec->extradata_size) {
track->vos_len = st->codec->extradata_size;
track->vos_data = av_malloc(track->vos_len);
memcpy(track->vos_data, st->codec->extradata, track->vos_len);
}
}
 
enable_tracks(s);
 
if (mov->mode == MODE_ISM) {
/* If no fragmentation options have been set, set a default. */
if (!(mov->flags & (FF_MOV_FLAG_FRAG_KEYFRAME |
FF_MOV_FLAG_FRAG_CUSTOM)) &&
!mov->max_fragment_duration && !mov->max_fragment_size)
mov->flags |= FF_MOV_FLAG_FRAG_KEYFRAME;
}
 
if (mov->reserved_moov_size){
mov->reserved_moov_pos= avio_tell(pb);
if (mov->reserved_moov_size > 0)
avio_skip(pb, mov->reserved_moov_size);
}
 
if (!(mov->flags & FF_MOV_FLAG_FRAGMENT)) {
if (mov->flags & FF_MOV_FLAG_FASTSTART)
mov->reserved_moov_pos = avio_tell(pb);
mov_write_mdat_tag(pb, mov);
}
 
if (t = av_dict_get(s->metadata, "creation_time", NULL, 0))
mov->time = ff_iso8601_to_unix_time(t->value);
if (mov->time)
mov->time += 0x7C25B080; // 1970 based -> 1904 based
 
if (mov->chapter_track)
if (mov_create_chapter_track(s, mov->chapter_track) < 0)
goto error;
 
if (mov->flags & FF_MOV_FLAG_RTP_HINT) {
/* Initialize the hint tracks for each audio and video stream */
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
if (ff_mov_init_hinting(s, hint_track, i) < 0)
goto error;
hint_track++;
}
}
}
 
if (mov->nb_meta_tmcd) {
/* Initialize the tmcd tracks */
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
t = global_tcr;
 
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
if (!t)
t = av_dict_get(st->metadata, "timecode", NULL, 0);
if (!t)
continue;
if (mov_create_timecode_track(s, tmcd_track, i, t->value) < 0)
goto error;
tmcd_track++;
}
}
}
 
avio_flush(pb);
 
if (mov->flags & FF_MOV_FLAG_ISML)
mov_write_isml_manifest(pb, mov);
 
if (mov->flags & FF_MOV_FLAG_EMPTY_MOOV) {
mov_write_moov_tag(pb, mov, s);
mov->fragments++;
}
 
return 0;
error:
mov_free(s);
return -1;
}
 
static int get_moov_size(AVFormatContext *s)
{
int ret;
AVIOContext *moov_buf;
MOVMuxContext *mov = s->priv_data;
 
if ((ret = ffio_open_null_buf(&moov_buf)) < 0)
return ret;
mov_write_moov_tag(moov_buf, mov, s);
return ffio_close_null_buf(moov_buf);
}
 
/*
* This function gets the moov size if moved to the top of the file: the chunk
* offset table can switch between stco (32-bit entries) to co64 (64-bit
* entries) when the moov is moved to the beginning, so the size of the moov
* would change. It also updates the chunk offset tables.
*/
static int compute_moov_size(AVFormatContext *s)
{
int i, moov_size, moov_size2;
MOVMuxContext *mov = s->priv_data;
 
moov_size = get_moov_size(s);
if (moov_size < 0)
return moov_size;
 
for (i = 0; i < mov->nb_streams; i++)
mov->tracks[i].data_offset += moov_size;
 
moov_size2 = get_moov_size(s);
if (moov_size2 < 0)
return moov_size2;
 
/* if the size changed, we just switched from stco to co64 and need to
* update the offsets */
if (moov_size2 != moov_size)
for (i = 0; i < mov->nb_streams; i++)
mov->tracks[i].data_offset += moov_size2 - moov_size;
 
return moov_size2;
}
 
static int shift_data(AVFormatContext *s)
{
int ret = 0, moov_size;
MOVMuxContext *mov = s->priv_data;
int64_t pos, pos_end = avio_tell(s->pb);
uint8_t *buf, *read_buf[2];
int read_buf_id = 0;
int read_size[2];
AVIOContext *read_pb;
 
moov_size = compute_moov_size(s);
if (moov_size < 0)
return moov_size;
 
buf = av_malloc(moov_size * 2);
if (!buf)
return AVERROR(ENOMEM);
read_buf[0] = buf;
read_buf[1] = buf + moov_size;
 
/* Shift the data: the AVIO context of the output can only be used for
* writing, so we re-open the same output, but for reading. It also avoids
* a read/seek/write/seek back and forth. */
avio_flush(s->pb);
ret = avio_open(&read_pb, s->filename, AVIO_FLAG_READ);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "Unable to re-open %s output file for "
"the second pass (faststart)\n", s->filename);
goto end;
}
 
/* mark the end of the shift to up to the last data we wrote, and get ready
* for writing */
pos_end = avio_tell(s->pb);
avio_seek(s->pb, mov->reserved_moov_pos + moov_size, SEEK_SET);
 
/* start reading at where the new moov will be placed */
avio_seek(read_pb, mov->reserved_moov_pos, SEEK_SET);
pos = avio_tell(read_pb);
 
#define READ_BLOCK do { \
read_size[read_buf_id] = avio_read(read_pb, read_buf[read_buf_id], moov_size); \
read_buf_id ^= 1; \
} while (0)
 
/* shift data by chunk of at most moov_size */
READ_BLOCK;
do {
int n;
READ_BLOCK;
n = read_size[read_buf_id];
if (n <= 0)
break;
avio_write(s->pb, read_buf[read_buf_id], n);
pos += n;
} while (pos < pos_end);
avio_close(read_pb);
 
end:
av_free(buf);
return ret;
}
 
static int mov_write_trailer(AVFormatContext *s)
{
MOVMuxContext *mov = s->priv_data;
AVIOContext *pb = s->pb;
int res = 0;
int i;
int64_t moov_pos;
 
/*
* Before actually writing the trailer, make sure that there are no
* dangling subtitles, that need a terminating sample.
*/
for (i = 0; i < mov->nb_streams; i++) {
MOVTrack *trk = &mov->tracks[i];
if (trk->enc->codec_id == AV_CODEC_ID_MOV_TEXT &&
!trk->last_sample_is_subtitle_end) {
mov_write_subtitle_end_packet(s, i, trk->track_duration);
trk->last_sample_is_subtitle_end = 1;
}
}
 
// If there were no chapters when the header was written, but there
// are chapters now, write them in the trailer. This only works
// when we are not doing fragments.
if (!mov->chapter_track && !(mov->flags & FF_MOV_FLAG_FRAGMENT)) {
if (mov->mode & (MODE_MP4|MODE_MOV|MODE_IPOD) && s->nb_chapters) {
mov->chapter_track = mov->nb_streams++;
if ((res = mov_create_chapter_track(s, mov->chapter_track)) < 0)
goto error;
}
}
 
if (!(mov->flags & FF_MOV_FLAG_FRAGMENT)) {
moov_pos = avio_tell(pb);
 
/* Write size of mdat tag */
if (mov->mdat_size + 8 <= UINT32_MAX) {
avio_seek(pb, mov->mdat_pos, SEEK_SET);
avio_wb32(pb, mov->mdat_size + 8);
} else {
/* overwrite 'wide' placeholder atom */
avio_seek(pb, mov->mdat_pos - 8, SEEK_SET);
/* special value: real atom size will be 64 bit value after
* tag field */
avio_wb32(pb, 1);
ffio_wfourcc(pb, "mdat");
avio_wb64(pb, mov->mdat_size + 16);
}
avio_seek(pb, mov->reserved_moov_size > 0 ? mov->reserved_moov_pos : moov_pos, SEEK_SET);
 
if (mov->flags & FF_MOV_FLAG_FASTSTART) {
av_log(s, AV_LOG_INFO, "Starting second pass: moving the moov atom to the beginning of the file\n");
res = shift_data(s);
if (res == 0) {
avio_seek(s->pb, mov->reserved_moov_pos, SEEK_SET);
mov_write_moov_tag(pb, mov, s);
}
} else if (mov->reserved_moov_size > 0) {
int64_t size;
mov_write_moov_tag(pb, mov, s);
size = mov->reserved_moov_size - (avio_tell(pb) - mov->reserved_moov_pos);
if (size < 8){
av_log(s, AV_LOG_ERROR, "reserved_moov_size is too small, needed %"PRId64" additional\n", 8-size);
return -1;
}
avio_wb32(pb, size);
ffio_wfourcc(pb, "free");
for (i = 0; i < size; i++)
avio_w8(pb, 0);
avio_seek(pb, moov_pos, SEEK_SET);
} else {
mov_write_moov_tag(pb, mov, s);
}
} else {
mov_flush_fragment(s);
mov_write_mfra_tag(pb, mov);
}
 
for (i = 0; i < mov->nb_streams; i++) {
if (mov->flags & FF_MOV_FLAG_FRAGMENT &&
mov->tracks[i].vc1_info.struct_offset && s->pb->seekable) {
int64_t off = avio_tell(pb);
uint8_t buf[7];
if (mov_write_dvc1_structs(&mov->tracks[i], buf) >= 0) {
avio_seek(pb, mov->tracks[i].vc1_info.struct_offset, SEEK_SET);
avio_write(pb, buf, 7);
avio_seek(pb, off, SEEK_SET);
}
}
}
 
error:
mov_free(s);
 
return res;
}
 
#if CONFIG_MOV_MUXER
MOV_CLASS(mov)
AVOutputFormat ff_mov_muxer = {
.name = "mov",
.long_name = NULL_IF_CONFIG_SMALL("QuickTime / MOV"),
.extensions = "mov",
.priv_data_size = sizeof(MOVMuxContext),
.audio_codec = AV_CODEC_ID_AAC,
.video_codec = CONFIG_LIBX264_ENCODER ?
AV_CODEC_ID_H264 : AV_CODEC_ID_MPEG4,
.write_header = mov_write_header,
.write_packet = mov_write_packet,
.write_trailer = mov_write_trailer,
.flags = AVFMT_GLOBALHEADER | AVFMT_ALLOW_FLUSH | AVFMT_TS_NEGATIVE,
.codec_tag = (const AVCodecTag* const []){
ff_codec_movvideo_tags, ff_codec_movaudio_tags, 0
},
.priv_class = &mov_muxer_class,
};
#endif
#if CONFIG_TGP_MUXER
MOV_CLASS(tgp)
AVOutputFormat ff_tgp_muxer = {
.name = "3gp",
.long_name = NULL_IF_CONFIG_SMALL("3GP (3GPP file format)"),
.extensions = "3gp",
.priv_data_size = sizeof(MOVMuxContext),
.audio_codec = AV_CODEC_ID_AMR_NB,
.video_codec = AV_CODEC_ID_H263,
.write_header = mov_write_header,
.write_packet = mov_write_packet,
.write_trailer = mov_write_trailer,
.flags = AVFMT_GLOBALHEADER | AVFMT_ALLOW_FLUSH | AVFMT_TS_NEGATIVE,
.codec_tag = (const AVCodecTag* const []){ codec_3gp_tags, 0 },
.priv_class = &tgp_muxer_class,
};
#endif
#if CONFIG_MP4_MUXER
MOV_CLASS(mp4)
AVOutputFormat ff_mp4_muxer = {
.name = "mp4",
.long_name = NULL_IF_CONFIG_SMALL("MP4 (MPEG-4 Part 14)"),
.mime_type = "application/mp4",
.extensions = "mp4",
.priv_data_size = sizeof(MOVMuxContext),
.audio_codec = AV_CODEC_ID_AAC,
.video_codec = CONFIG_LIBX264_ENCODER ?
AV_CODEC_ID_H264 : AV_CODEC_ID_MPEG4,
.write_header = mov_write_header,
.write_packet = mov_write_packet,
.write_trailer = mov_write_trailer,
.flags = AVFMT_GLOBALHEADER | AVFMT_ALLOW_FLUSH | AVFMT_TS_NEGATIVE,
.codec_tag = (const AVCodecTag* const []){ ff_mp4_obj_type, 0 },
.priv_class = &mp4_muxer_class,
};
#endif
#if CONFIG_PSP_MUXER
MOV_CLASS(psp)
AVOutputFormat ff_psp_muxer = {
.name = "psp",
.long_name = NULL_IF_CONFIG_SMALL("PSP MP4 (MPEG-4 Part 14)"),
.extensions = "mp4,psp",
.priv_data_size = sizeof(MOVMuxContext),
.audio_codec = AV_CODEC_ID_AAC,
.video_codec = CONFIG_LIBX264_ENCODER ?
AV_CODEC_ID_H264 : AV_CODEC_ID_MPEG4,
.write_header = mov_write_header,
.write_packet = mov_write_packet,
.write_trailer = mov_write_trailer,
.flags = AVFMT_GLOBALHEADER | AVFMT_ALLOW_FLUSH | AVFMT_TS_NEGATIVE,
.codec_tag = (const AVCodecTag* const []){ ff_mp4_obj_type, 0 },
.priv_class = &psp_muxer_class,
};
#endif
#if CONFIG_TG2_MUXER
MOV_CLASS(tg2)
AVOutputFormat ff_tg2_muxer = {
.name = "3g2",
.long_name = NULL_IF_CONFIG_SMALL("3GP2 (3GPP2 file format)"),
.extensions = "3g2",
.priv_data_size = sizeof(MOVMuxContext),
.audio_codec = AV_CODEC_ID_AMR_NB,
.video_codec = AV_CODEC_ID_H263,
.write_header = mov_write_header,
.write_packet = mov_write_packet,
.write_trailer = mov_write_trailer,
.flags = AVFMT_GLOBALHEADER | AVFMT_ALLOW_FLUSH | AVFMT_TS_NEGATIVE,
.codec_tag = (const AVCodecTag* const []){ codec_3gp_tags, 0 },
.priv_class = &tg2_muxer_class,
};
#endif
#if CONFIG_IPOD_MUXER
MOV_CLASS(ipod)
AVOutputFormat ff_ipod_muxer = {
.name = "ipod",
.long_name = NULL_IF_CONFIG_SMALL("iPod H.264 MP4 (MPEG-4 Part 14)"),
.mime_type = "application/mp4",
.extensions = "m4v,m4a",
.priv_data_size = sizeof(MOVMuxContext),
.audio_codec = AV_CODEC_ID_AAC,
.video_codec = AV_CODEC_ID_H264,
.write_header = mov_write_header,
.write_packet = mov_write_packet,
.write_trailer = mov_write_trailer,
.flags = AVFMT_GLOBALHEADER | AVFMT_ALLOW_FLUSH | AVFMT_TS_NEGATIVE,
.codec_tag = (const AVCodecTag* const []){ codec_ipod_tags, 0 },
.priv_class = &ipod_muxer_class,
};
#endif
#if CONFIG_ISMV_MUXER
MOV_CLASS(ismv)
AVOutputFormat ff_ismv_muxer = {
.name = "ismv",
.long_name = NULL_IF_CONFIG_SMALL("ISMV/ISMA (Smooth Streaming)"),
.mime_type = "application/mp4",
.extensions = "ismv,isma",
.priv_data_size = sizeof(MOVMuxContext),
.audio_codec = AV_CODEC_ID_AAC,
.video_codec = AV_CODEC_ID_H264,
.write_header = mov_write_header,
.write_packet = mov_write_packet,
.write_trailer = mov_write_trailer,
.flags = AVFMT_GLOBALHEADER | AVFMT_ALLOW_FLUSH | AVFMT_TS_NEGATIVE,
.codec_tag = (const AVCodecTag* const []){ ff_mp4_obj_type, 0 },
.priv_class = &ismv_muxer_class,
};
#endif
#if CONFIG_F4V_MUXER
MOV_CLASS(f4v)
AVOutputFormat ff_f4v_muxer = {
.name = "f4v",
.long_name = NULL_IF_CONFIG_SMALL("F4V Adobe Flash Video"),
.mime_type = "application/f4v",
.extensions = "f4v",
.priv_data_size = sizeof(MOVMuxContext),
.audio_codec = AV_CODEC_ID_AAC,
.video_codec = AV_CODEC_ID_H264,
.write_header = mov_write_header,
.write_packet = mov_write_packet,
.write_trailer = mov_write_trailer,
.flags = AVFMT_GLOBALHEADER | AVFMT_ALLOW_FLUSH,
.codec_tag = (const AVCodecTag* const []){ codec_f4v_tags, 0 },
.priv_class = &f4v_muxer_class,
};
#endif
/contrib/sdk/sources/ffmpeg/libavformat/movenc.h
0,0 → 1,196
/*
* MOV, 3GP, MP4 muxer
* Copyright (c) 2003 Thomas Raivio
* Copyright (c) 2004 Gildas Bazin <gbazin at videolan dot org>
* Copyright (c) 2009 Baptiste Coudurier <baptiste dot coudurier at gmail dot com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_MOVENC_H
#define AVFORMAT_MOVENC_H
 
#include "avformat.h"
 
#define MOV_FRAG_INFO_ALLOC_INCREMENT 64
#define MOV_INDEX_CLUSTER_SIZE 1024
#define MOV_TIMESCALE 1000
 
#define RTP_MAX_PACKET_SIZE 1450
 
#define MODE_MP4 0x01
#define MODE_MOV 0x02
#define MODE_3GP 0x04
#define MODE_PSP 0x08 // example working PSP command line:
// ffmpeg -i testinput.avi -f psp -r 14.985 -s 320x240 -b 768 -ar 24000 -ab 32 M4V00001.MP4
#define MODE_3G2 0x10
#define MODE_IPOD 0x20
#define MODE_ISM 0x40
#define MODE_F4V 0x80
 
typedef struct MOVIentry {
uint64_t pos;
int64_t dts;
unsigned int size;
unsigned int samples_in_chunk;
unsigned int chunkNum; ///< Chunk number if the current entry is a chunk start otherwise 0
unsigned int entries;
int cts;
#define MOV_SYNC_SAMPLE 0x0001
#define MOV_PARTIAL_SYNC_SAMPLE 0x0002
uint32_t flags;
} MOVIentry;
 
typedef struct HintSample {
uint8_t *data;
int size;
int sample_number;
int offset;
int own_data;
} HintSample;
 
typedef struct HintSampleQueue {
int size;
int len;
HintSample *samples;
} HintSampleQueue;
 
typedef struct MOVFragmentInfo {
int64_t offset;
int64_t time;
int64_t duration;
int64_t tfrf_offset;
} MOVFragmentInfo;
 
typedef struct MOVTrack {
int mode;
int entry;
unsigned timescale;
uint64_t time;
int64_t track_duration;
int last_sample_is_subtitle_end;
long sample_count;
long sample_size;
long chunkCount;
int has_keyframes;
#define MOV_TRACK_CTTS 0x0001
#define MOV_TRACK_STPS 0x0002
#define MOV_TRACK_ENABLED 0x0004
uint32_t flags;
#define MOV_TIMECODE_FLAG_DROPFRAME 0x0001
#define MOV_TIMECODE_FLAG_24HOURSMAX 0x0002
#define MOV_TIMECODE_FLAG_ALLOWNEGATIVE 0x0004
uint32_t timecode_flags;
int language;
int track_id;
int tag; ///< stsd fourcc
AVStream *st;
AVCodecContext *enc;
 
int vos_len;
uint8_t *vos_data;
MOVIentry *cluster;
unsigned cluster_capacity;
int audio_vbr;
int height; ///< active picture (w/o VBI) height for D-10/IMX
uint32_t tref_tag;
int tref_id; ///< trackID of the referenced track
int64_t start_dts;
 
int hint_track; ///< the track that hints this track, -1 if no hint track is set
int src_track; ///< the track that this hint (or tmcd) track describes
AVFormatContext *rtp_ctx; ///< the format context for the hinting rtp muxer
uint32_t prev_rtp_ts;
int64_t cur_rtp_ts_unwrapped;
uint32_t max_packet_size;
 
int64_t default_duration;
uint32_t default_sample_flags;
uint32_t default_size;
 
HintSampleQueue sample_queue;
 
AVIOContext *mdat_buf;
int64_t data_offset;
int64_t frag_start;
int64_t tfrf_offset;
 
int nb_frag_info;
MOVFragmentInfo *frag_info;
unsigned frag_info_capacity;
 
struct {
int64_t struct_offset;
int first_packet_seq;
int first_packet_entry;
int packet_seq;
int packet_entry;
int slices;
} vc1_info;
} MOVTrack;
 
typedef struct MOVMuxContext {
const AVClass *av_class;
int mode;
int64_t time;
int nb_streams;
int nb_meta_tmcd; ///< number of new created tmcd track based on metadata (aka not data copy)
int chapter_track; ///< qt chapter track number
int64_t mdat_pos;
uint64_t mdat_size;
MOVTrack *tracks;
 
int flags;
int rtp_flags;
 
int iods_skip;
int iods_video_profile;
int iods_audio_profile;
 
int fragments;
int max_fragment_duration;
int min_fragment_duration;
int max_fragment_size;
int ism_lookahead;
AVIOContext *mdat_buf;
 
int use_editlist;
int video_track_timescale;
 
int reserved_moov_size; ///< 0 for disabled, -1 for automatic, size otherwise
int64_t reserved_moov_pos;
} MOVMuxContext;
 
#define FF_MOV_FLAG_RTP_HINT 1
#define FF_MOV_FLAG_FRAGMENT 2
#define FF_MOV_FLAG_EMPTY_MOOV 4
#define FF_MOV_FLAG_FRAG_KEYFRAME 8
#define FF_MOV_FLAG_SEPARATE_MOOF 16
#define FF_MOV_FLAG_FRAG_CUSTOM 32
#define FF_MOV_FLAG_ISML 64
#define FF_MOV_FLAG_FASTSTART 128
#define FF_MOV_FLAG_OMIT_TFHD_OFFSET 256
 
int ff_mov_write_packet(AVFormatContext *s, AVPacket *pkt);
 
int ff_mov_init_hinting(AVFormatContext *s, int index, int src_index);
int ff_mov_add_hinted_packet(AVFormatContext *s, AVPacket *pkt,
int track_index, int sample,
uint8_t *sample_data, int sample_size);
void ff_mov_close_hinting(MOVTrack *track);
 
#endif /* AVFORMAT_MOVENC_H */
/contrib/sdk/sources/ffmpeg/libavformat/movenchint.c
0,0 → 1,474
/*
* MOV, 3GP, MP4 muxer RTP hinting
* Copyright (c) 2010 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "movenc.h"
#include "libavutil/intreadwrite.h"
#include "internal.h"
#include "rtpenc_chain.h"
#include "avio_internal.h"
#include "rtp.h"
 
int ff_mov_init_hinting(AVFormatContext *s, int index, int src_index)
{
MOVMuxContext *mov = s->priv_data;
MOVTrack *track = &mov->tracks[index];
MOVTrack *src_track = &mov->tracks[src_index];
AVStream *src_st = s->streams[src_index];
int ret = AVERROR(ENOMEM);
 
track->tag = MKTAG('r','t','p',' ');
track->src_track = src_index;
 
track->enc = avcodec_alloc_context3(NULL);
if (!track->enc)
goto fail;
track->enc->codec_type = AVMEDIA_TYPE_DATA;
track->enc->codec_tag = track->tag;
 
ret = ff_rtp_chain_mux_open(&track->rtp_ctx, s, src_st, NULL,
RTP_MAX_PACKET_SIZE, src_index);
if (ret < 0)
goto fail;
 
/* Copy the RTP AVStream timebase back to the hint AVStream */
track->timescale = track->rtp_ctx->streams[0]->time_base.den;
 
/* Mark the hinted track that packets written to it should be
* sent to this track for hinting. */
src_track->hint_track = index;
return 0;
fail:
av_log(s, AV_LOG_WARNING,
"Unable to initialize hinting of stream %d\n", src_index);
av_freep(&track->enc);
/* Set a default timescale, to avoid crashes in av_dump_format */
track->timescale = 90000;
return ret;
}
 
/**
* Remove the first sample from the sample queue.
*/
static void sample_queue_pop(HintSampleQueue *queue)
{
if (queue->len <= 0)
return;
if (queue->samples[0].own_data)
av_free(queue->samples[0].data);
queue->len--;
memmove(queue->samples, queue->samples + 1, sizeof(HintSample)*queue->len);
}
 
/**
* Empty the sample queue, releasing all memory.
*/
static void sample_queue_free(HintSampleQueue *queue)
{
int i;
for (i = 0; i < queue->len; i++)
if (queue->samples[i].own_data)
av_free(queue->samples[i].data);
av_freep(&queue->samples);
queue->len = 0;
queue->size = 0;
}
 
/**
* Add a reference to the sample data to the sample queue. The data is
* not copied. sample_queue_retain should be called before pkt->data
* is reused/freed.
*/
static void sample_queue_push(HintSampleQueue *queue, uint8_t *data, int size,
int sample)
{
/* No need to keep track of smaller samples, since describing them
* with immediates is more efficient. */
if (size <= 14)
return;
if (!queue->samples || queue->len >= queue->size) {
HintSample *samples;
samples = av_realloc_array(queue->samples, queue->size + 10, sizeof(HintSample));
if (!samples)
return;
queue->size += 10;
queue->samples = samples;
}
queue->samples[queue->len].data = data;
queue->samples[queue->len].size = size;
queue->samples[queue->len].sample_number = sample;
queue->samples[queue->len].offset = 0;
queue->samples[queue->len].own_data = 0;
queue->len++;
}
 
/**
* Make local copies of all referenced sample data in the queue.
*/
static void sample_queue_retain(HintSampleQueue *queue)
{
int i;
for (i = 0; i < queue->len; ) {
HintSample *sample = &queue->samples[i];
if (!sample->own_data) {
uint8_t *ptr = av_malloc(sample->size);
if (!ptr) {
/* Unable to allocate memory for this one, remove it */
memmove(queue->samples + i, queue->samples + i + 1,
sizeof(HintSample)*(queue->len - i - 1));
queue->len--;
continue;
}
memcpy(ptr, sample->data, sample->size);
sample->data = ptr;
sample->own_data = 1;
}
i++;
}
}
 
/**
* Find matches of needle[n_pos ->] within haystack. If a sufficiently
* large match is found, matching bytes before n_pos are included
* in the match, too (within the limits of the arrays).
*
* @param haystack buffer that may contain parts of needle
* @param h_len length of the haystack buffer
* @param needle buffer containing source data that have been used to
* construct haystack
* @param n_pos start position in needle used for looking for matches
* @param n_len length of the needle buffer
* @param match_h_offset_ptr offset of the first matching byte within haystack
* @param match_n_offset_ptr offset of the first matching byte within needle
* @param match_len_ptr length of the matched segment
* @return 0 if a match was found, < 0 if no match was found
*/
static int match_segments(const uint8_t *haystack, int h_len,
const uint8_t *needle, int n_pos, int n_len,
int *match_h_offset_ptr, int *match_n_offset_ptr,
int *match_len_ptr)
{
int h_pos;
for (h_pos = 0; h_pos < h_len; h_pos++) {
int match_len = 0;
int match_h_pos, match_n_pos;
 
/* Check how many bytes match at needle[n_pos] and haystack[h_pos] */
while (h_pos + match_len < h_len && n_pos + match_len < n_len &&
needle[n_pos + match_len] == haystack[h_pos + match_len])
match_len++;
if (match_len <= 8)
continue;
 
/* If a sufficiently large match was found, try to expand
* the matched segment backwards. */
match_h_pos = h_pos;
match_n_pos = n_pos;
while (match_n_pos > 0 && match_h_pos > 0 &&
needle[match_n_pos - 1] == haystack[match_h_pos - 1]) {
match_n_pos--;
match_h_pos--;
match_len++;
}
if (match_len <= 14)
continue;
*match_h_offset_ptr = match_h_pos;
*match_n_offset_ptr = match_n_pos;
*match_len_ptr = match_len;
return 0;
}
return -1;
}
 
/**
* Look for segments in samples in the sample queue matching the data
* in ptr. Samples not matching are removed from the queue. If a match
* is found, the next time it will look for matches starting from the
* end of the previous matched segment.
*
* @param data data to find matches for in the sample queue
* @param len length of the data buffer
* @param queue samples used for looking for matching segments
* @param pos the offset in data of the matched segment
* @param match_sample the number of the sample that contained the match
* @param match_offset the offset of the matched segment within the sample
* @param match_len the length of the matched segment
* @return 0 if a match was found, < 0 if no match was found
*/
static int find_sample_match(const uint8_t *data, int len,
HintSampleQueue *queue, int *pos,
int *match_sample, int *match_offset,
int *match_len)
{
while (queue->len > 0) {
HintSample *sample = &queue->samples[0];
/* If looking for matches in a new sample, skip the first 5 bytes,
* since they often may be modified/removed in the output packet. */
if (sample->offset == 0 && sample->size > 5)
sample->offset = 5;
 
if (match_segments(data, len, sample->data, sample->offset,
sample->size, pos, match_offset, match_len) == 0) {
*match_sample = sample->sample_number;
/* Next time, look for matches at this offset, with a little
* margin to this match. */
sample->offset = *match_offset + *match_len + 5;
if (sample->offset + 10 >= sample->size)
sample_queue_pop(queue); /* Not enough useful data left */
return 0;
}
 
if (sample->offset < 10 && sample->size > 20) {
/* No match found from the start of the sample,
* try from the middle of the sample instead. */
sample->offset = sample->size/2;
} else {
/* No match for this sample, remove it */
sample_queue_pop(queue);
}
}
return -1;
}
 
static void output_immediate(const uint8_t *data, int size,
AVIOContext *out, int *entries)
{
while (size > 0) {
int len = size;
if (len > 14)
len = 14;
avio_w8(out, 1); /* immediate constructor */
avio_w8(out, len); /* amount of valid data */
avio_write(out, data, len);
data += len;
size -= len;
 
for (; len < 14; len++)
avio_w8(out, 0);
 
(*entries)++;
}
}
 
static void output_match(AVIOContext *out, int match_sample,
int match_offset, int match_len, int *entries)
{
avio_w8(out, 2); /* sample constructor */
avio_w8(out, 0); /* track reference */
avio_wb16(out, match_len);
avio_wb32(out, match_sample);
avio_wb32(out, match_offset);
avio_wb16(out, 1); /* bytes per block */
avio_wb16(out, 1); /* samples per block */
(*entries)++;
}
 
static void describe_payload(const uint8_t *data, int size,
AVIOContext *out, int *entries,
HintSampleQueue *queue)
{
/* Describe the payload using different constructors */
while (size > 0) {
int match_sample, match_offset, match_len, pos;
if (find_sample_match(data, size, queue, &pos, &match_sample,
&match_offset, &match_len) < 0)
break;
output_immediate(data, pos, out, entries);
data += pos;
size -= pos;
output_match(out, match_sample, match_offset, match_len, entries);
data += match_len;
size -= match_len;
}
output_immediate(data, size, out, entries);
}
 
/**
* Write an RTP hint (that may contain one or more RTP packets)
* for the packets in data. data contains one or more packets with a
* BE32 size header.
*
* @param out buffer where the hints are written
* @param data buffer containing RTP packets
* @param size the size of the data buffer
* @param trk the MOVTrack for the hint track
* @param dts pointer where the timestamp for the written RTP hint is stored
* @return the number of RTP packets in the written hint
*/
static int write_hint_packets(AVIOContext *out, const uint8_t *data,
int size, MOVTrack *trk, int64_t *dts)
{
int64_t curpos;
int64_t count_pos, entries_pos;
int count = 0, entries;
 
count_pos = avio_tell(out);
/* RTPsample header */
avio_wb16(out, 0); /* packet count */
avio_wb16(out, 0); /* reserved */
 
while (size > 4) {
uint32_t packet_len = AV_RB32(data);
uint16_t seq;
uint32_t ts;
int32_t ts_diff;
 
data += 4;
size -= 4;
if (packet_len > size || packet_len <= 12)
break;
if (RTP_PT_IS_RTCP(data[1])) {
/* RTCP packet, just skip */
data += packet_len;
size -= packet_len;
continue;
}
 
if (packet_len > trk->max_packet_size)
trk->max_packet_size = packet_len;
 
seq = AV_RB16(&data[2]);
ts = AV_RB32(&data[4]);
 
if (trk->prev_rtp_ts == 0)
trk->prev_rtp_ts = ts;
/* Unwrap the 32-bit RTP timestamp that wraps around often
* into a not (as often) wrapping 64-bit timestamp. */
ts_diff = ts - trk->prev_rtp_ts;
if (ts_diff > 0) {
trk->cur_rtp_ts_unwrapped += ts_diff;
trk->prev_rtp_ts = ts;
ts_diff = 0;
}
if (*dts == AV_NOPTS_VALUE)
*dts = trk->cur_rtp_ts_unwrapped;
 
count++;
/* RTPpacket header */
avio_wb32(out, 0); /* relative_time */
avio_write(out, data, 2); /* RTP header */
avio_wb16(out, seq); /* RTPsequenceseed */
avio_wb16(out, ts_diff ? 4 : 0); /* reserved + flags (extra_flag) */
entries_pos = avio_tell(out);
avio_wb16(out, 0); /* entry count */
if (ts_diff) { /* if extra_flag is set */
avio_wb32(out, 16); /* extra_information_length */
avio_wb32(out, 12); /* rtpoffsetTLV box */
avio_write(out, "rtpo", 4);
avio_wb32(out, ts_diff);
}
 
data += 12;
size -= 12;
packet_len -= 12;
 
entries = 0;
/* Write one or more constructors describing the payload data */
describe_payload(data, packet_len, out, &entries, &trk->sample_queue);
data += packet_len;
size -= packet_len;
 
curpos = avio_tell(out);
avio_seek(out, entries_pos, SEEK_SET);
avio_wb16(out, entries);
avio_seek(out, curpos, SEEK_SET);
}
 
curpos = avio_tell(out);
avio_seek(out, count_pos, SEEK_SET);
avio_wb16(out, count);
avio_seek(out, curpos, SEEK_SET);
return count;
}
 
int ff_mov_add_hinted_packet(AVFormatContext *s, AVPacket *pkt,
int track_index, int sample,
uint8_t *sample_data, int sample_size)
{
MOVMuxContext *mov = s->priv_data;
MOVTrack *trk = &mov->tracks[track_index];
AVFormatContext *rtp_ctx = trk->rtp_ctx;
uint8_t *buf = NULL;
int size;
AVIOContext *hintbuf = NULL;
AVPacket hint_pkt;
int ret = 0, count;
 
if (!rtp_ctx)
return AVERROR(ENOENT);
if (!rtp_ctx->pb)
return AVERROR(ENOMEM);
 
if (sample_data)
sample_queue_push(&trk->sample_queue, sample_data, sample_size, sample);
else
sample_queue_push(&trk->sample_queue, pkt->data, pkt->size, sample);
 
/* Feed the packet to the RTP muxer */
ff_write_chained(rtp_ctx, 0, pkt, s);
 
/* Fetch the output from the RTP muxer, open a new output buffer
* for next time. */
size = avio_close_dyn_buf(rtp_ctx->pb, &buf);
if ((ret = ffio_open_dyn_packet_buf(&rtp_ctx->pb,
RTP_MAX_PACKET_SIZE)) < 0)
goto done;
 
if (size <= 0)
goto done;
 
/* Open a buffer for writing the hint */
if ((ret = avio_open_dyn_buf(&hintbuf)) < 0)
goto done;
av_init_packet(&hint_pkt);
count = write_hint_packets(hintbuf, buf, size, trk, &hint_pkt.dts);
av_freep(&buf);
 
/* Write the hint data into the hint track */
hint_pkt.size = size = avio_close_dyn_buf(hintbuf, &buf);
hint_pkt.data = buf;
hint_pkt.pts = hint_pkt.dts;
hint_pkt.stream_index = track_index;
if (pkt->flags & AV_PKT_FLAG_KEY)
hint_pkt.flags |= AV_PKT_FLAG_KEY;
if (count > 0)
ff_mov_write_packet(s, &hint_pkt);
done:
av_free(buf);
sample_queue_retain(&trk->sample_queue);
return ret;
}
 
void ff_mov_close_hinting(MOVTrack *track)
{
AVFormatContext *rtp_ctx = track->rtp_ctx;
uint8_t *ptr;
 
av_freep(&track->enc);
sample_queue_free(&track->sample_queue);
if (!rtp_ctx)
return;
if (rtp_ctx->pb) {
av_write_trailer(rtp_ctx);
avio_close_dyn_buf(rtp_ctx->pb, &ptr);
av_free(ptr);
}
avformat_free_context(rtp_ctx);
}
/contrib/sdk/sources/ffmpeg/libavformat/mp3dec.c
0,0 → 1,366
/*
* MP3 demuxer
* Copyright (c) 2003 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/opt.h"
#include "libavutil/avstring.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/dict.h"
#include "libavutil/mathematics.h"
#include "avformat.h"
#include "internal.h"
#include "id3v2.h"
#include "id3v1.h"
#include "libavcodec/mpegaudiodecheader.h"
 
#define XING_FLAG_FRAMES 0x01
#define XING_FLAG_SIZE 0x02
#define XING_FLAG_TOC 0x04
 
#define XING_TOC_COUNT 100
 
typedef struct {
AVClass *class;
int64_t filesize;
int64_t header_filesize;
int xing_toc;
int start_pad;
int end_pad;
int usetoc;
int is_cbr;
} MP3DecContext;
 
/* mp3 read */
 
static int mp3_read_probe(AVProbeData *p)
{
int max_frames, first_frames = 0;
int fsize, frames, sample_rate;
uint32_t header;
const uint8_t *buf, *buf0, *buf2, *end;
AVCodecContext avctx;
 
buf0 = p->buf;
end = p->buf + p->buf_size - sizeof(uint32_t);
while(buf0 < end && !*buf0)
buf0++;
 
max_frames = 0;
buf = buf0;
 
for(; buf < end; buf= buf2+1) {
buf2 = buf;
 
for(frames = 0; buf2 < end; frames++) {
header = AV_RB32(buf2);
fsize = avpriv_mpa_decode_header(&avctx, header, &sample_rate, &sample_rate, &sample_rate, &sample_rate);
if(fsize < 0)
break;
buf2 += fsize;
}
max_frames = FFMAX(max_frames, frames);
if(buf == buf0)
first_frames= frames;
}
// keep this in sync with ac3 probe, both need to avoid
// issues with MPEG-files!
if (first_frames>=4) return AVPROBE_SCORE_EXTENSION + 1;
else if(max_frames>200)return AVPROBE_SCORE_EXTENSION;
else if(max_frames>=4) return AVPROBE_SCORE_EXTENSION / 2;
else if(ff_id3v2_match(buf0, ID3v2_DEFAULT_MAGIC) && 2*ff_id3v2_tag_len(buf0) >= p->buf_size)
return p->buf_size < PROBE_BUF_MAX ? AVPROBE_SCORE_EXTENSION / 4 : AVPROBE_SCORE_EXTENSION - 2;
else if(max_frames>=1) return 1;
else return 0;
//mpegps_mp3_unrecognized_format.mpg has max_frames=3
}
 
static void read_xing_toc(AVFormatContext *s, int64_t filesize, int64_t duration)
{
int i;
MP3DecContext *mp3 = s->priv_data;
int fill_index = mp3->usetoc && duration > 0;
 
if (!filesize &&
!(filesize = avio_size(s->pb))) {
av_log(s, AV_LOG_WARNING, "Cannot determine file size, skipping TOC table.\n");
fill_index = 0;
}
 
for (i = 0; i < XING_TOC_COUNT; i++) {
uint8_t b = avio_r8(s->pb);
if (fill_index)
av_add_index_entry(s->streams[0],
av_rescale(b, filesize, 256),
av_rescale(i, duration, XING_TOC_COUNT),
0, 0, AVINDEX_KEYFRAME);
}
if (fill_index)
mp3->xing_toc = 1;
}
 
/**
* Try to find Xing/Info/VBRI tags and compute duration from info therein
*/
static int mp3_parse_vbr_tags(AVFormatContext *s, AVStream *st, int64_t base)
{
MP3DecContext *mp3 = s->priv_data;
uint32_t v, spf;
unsigned frames = 0; /* Total number of frames in file */
unsigned size = 0; /* Total number of bytes in the stream */
static const int64_t xing_offtbl[2][2] = {{32, 17}, {17,9}};
MPADecodeHeader c;
int vbrtag_size = 0;
int is_cbr;
 
v = avio_rb32(s->pb);
if(ff_mpa_check_header(v) < 0)
return -1;
 
if (avpriv_mpegaudio_decode_header(&c, v) == 0)
vbrtag_size = c.frame_size;
if(c.layer != 3)
return -1;
 
spf = c.lsf ? 576 : 1152; /* Samples per frame, layer 3 */
 
/* Check for Xing / Info tag */
avio_skip(s->pb, xing_offtbl[c.lsf == 1][c.nb_channels == 1]);
v = avio_rb32(s->pb);
is_cbr = v == MKBETAG('I', 'n', 'f', 'o');
if (v == MKBETAG('X', 'i', 'n', 'g') || is_cbr) {
v = avio_rb32(s->pb);
if(v & XING_FLAG_FRAMES)
frames = avio_rb32(s->pb);
if(v & XING_FLAG_SIZE)
size = avio_rb32(s->pb);
if (v & XING_FLAG_TOC)
read_xing_toc(s, size, av_rescale_q(frames, (AVRational){spf, c.sample_rate},
st->time_base));
if(v & 8)
avio_skip(s->pb, 4);
 
v = avio_rb32(s->pb);
if(v == MKBETAG('L', 'A', 'M', 'E') || v == MKBETAG('L', 'a', 'v', 'f')) {
avio_skip(s->pb, 21-4);
v= avio_rb24(s->pb);
mp3->start_pad = v>>12;
mp3-> end_pad = v&4095;
st->skip_samples = mp3->start_pad + 528 + 1;
av_log(s, AV_LOG_DEBUG, "pad %d %d\n", mp3->start_pad, mp3-> end_pad);
}
}
 
/* Check for VBRI tag (always 32 bytes after end of mpegaudio header) */
avio_seek(s->pb, base + 4 + 32, SEEK_SET);
v = avio_rb32(s->pb);
if(v == MKBETAG('V', 'B', 'R', 'I')) {
/* Check tag version */
if(avio_rb16(s->pb) == 1) {
/* skip delay and quality */
avio_skip(s->pb, 4);
size = avio_rb32(s->pb);
frames = avio_rb32(s->pb);
}
}
 
if(!frames && !size)
return -1;
 
/* Skip the vbr tag frame */
avio_seek(s->pb, base + vbrtag_size, SEEK_SET);
 
if(frames)
st->duration = av_rescale_q(frames, (AVRational){spf, c.sample_rate},
st->time_base);
if (size && frames && !is_cbr)
st->codec->bit_rate = av_rescale(size, 8 * c.sample_rate, frames * (int64_t)spf);
 
mp3->is_cbr = is_cbr;
mp3->header_filesize = size;
 
return 0;
}
 
static int mp3_read_header(AVFormatContext *s)
{
MP3DecContext *mp3 = s->priv_data;
AVStream *st;
int64_t off;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_MP3;
st->need_parsing = AVSTREAM_PARSE_FULL_RAW;
st->start_time = 0;
 
// lcm of all mp3 sample rates
avpriv_set_pts_info(st, 64, 1, 14112000);
 
s->pb->maxsize = -1;
off = avio_tell(s->pb);
 
if (!av_dict_get(s->metadata, "", NULL, AV_DICT_IGNORE_SUFFIX))
ff_id3v1_read(s);
 
if(s->pb->seekable)
mp3->filesize = avio_size(s->pb);
 
if (mp3_parse_vbr_tags(s, st, off) < 0)
avio_seek(s->pb, off, SEEK_SET);
 
/* the parameters will be extracted from the compressed bitstream */
return 0;
}
 
#define MP3_PACKET_SIZE 1024
 
static int mp3_read_packet(AVFormatContext *s, AVPacket *pkt)
{
MP3DecContext *mp3 = s->priv_data;
int ret, size;
int64_t pos;
 
size= MP3_PACKET_SIZE;
pos = avio_tell(s->pb);
if(mp3->filesize > ID3v1_TAG_SIZE && pos < mp3->filesize)
size= FFMIN(size, mp3->filesize - pos);
 
ret= av_get_packet(s->pb, pkt, size);
if (ret <= 0) {
if(ret<0)
return ret;
return AVERROR_EOF;
}
 
pkt->flags &= ~AV_PKT_FLAG_CORRUPT;
pkt->stream_index = 0;
 
if (ret >= ID3v1_TAG_SIZE &&
memcmp(&pkt->data[ret - ID3v1_TAG_SIZE], "TAG", 3) == 0)
ret -= ID3v1_TAG_SIZE;
 
/* note: we need to modify the packet size here to handle the last
packet */
pkt->size = ret;
return ret;
}
 
static int check(AVFormatContext *s, int64_t pos)
{
int64_t ret = avio_seek(s->pb, pos, SEEK_SET);
unsigned header;
MPADecodeHeader sd;
if (ret < 0)
return ret;
header = avio_rb32(s->pb);
if (ff_mpa_check_header(header) < 0)
return -1;
if (avpriv_mpegaudio_decode_header(&sd, header) == 1)
return -1;
return sd.frame_size;
}
 
static int mp3_seek(AVFormatContext *s, int stream_index, int64_t timestamp,
int flags)
{
MP3DecContext *mp3 = s->priv_data;
AVIndexEntry *ie, ie1;
AVStream *st = s->streams[0];
int64_t ret = av_index_search_timestamp(st, timestamp, flags);
int i, j;
int dir = (flags&AVSEEK_FLAG_BACKWARD) ? -1 : 1;
 
if (mp3->is_cbr && st->duration > 0 && mp3->header_filesize > s->data_offset) {
int64_t filesize = avio_size(s->pb);
int64_t duration;
if (filesize <= s->data_offset)
filesize = mp3->header_filesize;
filesize -= s->data_offset;
duration = av_rescale(st->duration, filesize, mp3->header_filesize - s->data_offset);
ie = &ie1;
timestamp = av_clip64(timestamp, 0, duration);
ie->timestamp = timestamp;
ie->pos = av_rescale(timestamp, filesize, duration) + s->data_offset;
} else if (mp3->xing_toc) {
if (ret < 0)
return ret;
 
ie = &st->index_entries[ret];
} else {
st->skip_samples = timestamp <= 0 ? mp3->start_pad + 528 + 1 : 0;
 
return -1;
}
 
ret = avio_seek(s->pb, ie->pos, SEEK_SET);
if (ret < 0)
return ret;
 
#define MIN_VALID 3
for(i=0; i<4096; i++) {
int64_t pos = ie->pos + i*dir;
for(j=0; j<MIN_VALID; j++) {
ret = check(s, pos);
if(ret < 0)
break;
pos += ret;
}
if(j==MIN_VALID)
break;
}
if(j!=MIN_VALID)
i=0;
 
ret = avio_seek(s->pb, ie->pos + i*dir, SEEK_SET);
if (ret < 0)
return ret;
ff_update_cur_dts(s, st, ie->timestamp);
st->skip_samples = ie->timestamp <= 0 ? mp3->start_pad + 528 + 1 : 0;
return 0;
}
 
static const AVOption options[] = {
{ "usetoc", "use table of contents", offsetof(MP3DecContext, usetoc), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 1, AV_OPT_FLAG_DECODING_PARAM},
{ NULL },
};
 
static const AVClass demuxer_class = {
.class_name = "mp3",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEMUXER,
};
 
AVInputFormat ff_mp3_demuxer = {
.name = "mp3",
.long_name = NULL_IF_CONFIG_SMALL("MP2/3 (MPEG audio layer 2/3)"),
.read_probe = mp3_read_probe,
.read_header = mp3_read_header,
.read_packet = mp3_read_packet,
.read_seek = mp3_seek,
.priv_data_size = sizeof(MP3DecContext),
.flags = AVFMT_GENERIC_INDEX,
.extensions = "mp2,mp3,m2a,mpa", /* XXX: use probe */
.priv_class = &demuxer_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/mp3enc.c
0,0 → 1,518
/*
* MP3 muxer
* Copyright (c) 2003 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "avio_internal.h"
#include "id3v1.h"
#include "id3v2.h"
#include "rawenc.h"
#include "libavutil/avstring.h"
#include "libavcodec/mpegaudio.h"
#include "libavcodec/mpegaudiodata.h"
#include "libavcodec/mpegaudiodecheader.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/opt.h"
#include "libavutil/dict.h"
#include "libavutil/avassert.h"
 
static int id3v1_set_string(AVFormatContext *s, const char *key,
uint8_t *buf, int buf_size)
{
AVDictionaryEntry *tag;
if ((tag = av_dict_get(s->metadata, key, NULL, 0)))
av_strlcpy(buf, tag->value, buf_size);
return !!tag;
}
 
static int id3v1_create_tag(AVFormatContext *s, uint8_t *buf)
{
AVDictionaryEntry *tag;
int i, count = 0;
 
memset(buf, 0, ID3v1_TAG_SIZE); /* fail safe */
buf[0] = 'T';
buf[1] = 'A';
buf[2] = 'G';
/* we knowingly overspecify each tag length by one byte to compensate for the mandatory null byte added by av_strlcpy */
count += id3v1_set_string(s, "TIT2", buf + 3, 30 + 1); //title
count += id3v1_set_string(s, "TPE1", buf + 33, 30 + 1); //author|artist
count += id3v1_set_string(s, "TALB", buf + 63, 30 + 1); //album
count += id3v1_set_string(s, "TDRL", buf + 93, 4 + 1); //date
count += id3v1_set_string(s, "comment", buf + 97, 30 + 1);
if ((tag = av_dict_get(s->metadata, "TRCK", NULL, 0))) { //track
buf[125] = 0;
buf[126] = atoi(tag->value);
count++;
}
buf[127] = 0xFF; /* default to unknown genre */
if ((tag = av_dict_get(s->metadata, "TCON", NULL, 0))) { //genre
for(i = 0; i <= ID3v1_GENRE_MAX; i++) {
if (!av_strcasecmp(tag->value, ff_id3v1_genre_str[i])) {
buf[127] = i;
count++;
break;
}
}
}
return count;
}
 
#define XING_NUM_BAGS 400
#define XING_TOC_SIZE 100
// maximum size of the xing frame: offset/Xing/flags/frames/size/TOC
#define XING_MAX_SIZE (32 + 4 + 4 + 4 + 4 + XING_TOC_SIZE)
 
typedef struct MP3Context {
const AVClass *class;
ID3v2EncContext id3;
int id3v2_version;
int write_id3v1;
 
/* xing header */
int64_t xing_offset;
int32_t frames;
int32_t size;
uint32_t want;
uint32_t seen;
uint32_t pos;
uint64_t bag[XING_NUM_BAGS];
int initial_bitrate;
int has_variable_bitrate;
 
/* index of the audio stream */
int audio_stream_idx;
/* number of attached pictures we still need to write */
int pics_to_write;
 
/* audio packets are queued here until we get all the attached pictures */
AVPacketList *queue, *queue_end;
} MP3Context;
 
static const uint8_t xing_offtbl[2][2] = {{32, 17}, {17, 9}};
 
/*
* Write an empty XING header and initialize respective data.
*/
static int mp3_write_xing(AVFormatContext *s)
{
MP3Context *mp3 = s->priv_data;
AVCodecContext *codec = s->streams[mp3->audio_stream_idx]->codec;
int bitrate_idx;
int best_bitrate_idx = -1;
int best_bitrate_error= INT_MAX;
int xing_offset;
int32_t header, mask;
MPADecodeHeader c;
int srate_idx, ver = 0, i, channels;
int needed;
const char *vendor = (codec->flags & CODEC_FLAG_BITEXACT) ? "Lavf" : LIBAVFORMAT_IDENT;
 
if (!s->pb->seekable)
return 0;
 
for (i = 0; i < FF_ARRAY_ELEMS(avpriv_mpa_freq_tab); i++) {
const uint16_t base_freq = avpriv_mpa_freq_tab[i];
 
if (codec->sample_rate == base_freq) ver = 0x3; // MPEG 1
else if (codec->sample_rate == base_freq / 2) ver = 0x2; // MPEG 2
else if (codec->sample_rate == base_freq / 4) ver = 0x0; // MPEG 2.5
else continue;
 
srate_idx = i;
break;
}
if (i == FF_ARRAY_ELEMS(avpriv_mpa_freq_tab)) {
av_log(s, AV_LOG_WARNING, "Unsupported sample rate, not writing Xing header.\n");
return -1;
}
 
switch (codec->channels) {
case 1: channels = MPA_MONO; break;
case 2: channels = MPA_STEREO; break;
default: av_log(s, AV_LOG_WARNING, "Unsupported number of channels, "
"not writing Xing header.\n");
return -1;
}
 
/* dummy MPEG audio header */
header = 0xffU << 24; // sync
header |= (0x7 << 5 | ver << 3 | 0x1 << 1 | 0x1) << 16; // sync/audio-version/layer 3/no crc*/
header |= (srate_idx << 2) << 8;
header |= channels << 6;
 
for (bitrate_idx=1; bitrate_idx<15; bitrate_idx++) {
int error;
avpriv_mpegaudio_decode_header(&c, header | (bitrate_idx << (4+8)));
error= FFABS(c.bit_rate - codec->bit_rate);
if(error < best_bitrate_error){
best_bitrate_error= error;
best_bitrate_idx = bitrate_idx;
}
}
av_assert0(best_bitrate_idx >= 0);
 
for (bitrate_idx= best_bitrate_idx;; bitrate_idx++) {
if (15 == bitrate_idx)
return -1;
mask = bitrate_idx << (4+8);
header |= mask;
avpriv_mpegaudio_decode_header(&c, header);
xing_offset=xing_offtbl[c.lsf == 1][c.nb_channels == 1];
needed = 4 // header
+ xing_offset
+ 4 // xing tag
+ 4 // frames/size/toc flags
+ 4 // frames
+ 4 // size
+ XING_TOC_SIZE // toc
+ 24
;
 
if (needed <= c.frame_size)
break;
header &= ~mask;
}
 
avio_wb32(s->pb, header);
 
ffio_fill(s->pb, 0, xing_offset);
mp3->xing_offset = avio_tell(s->pb);
ffio_wfourcc(s->pb, "Xing");
avio_wb32(s->pb, 0x01 | 0x02 | 0x04); // frames / size / TOC
 
mp3->size = c.frame_size;
mp3->want=1;
mp3->seen=0;
mp3->pos=0;
 
avio_wb32(s->pb, 0); // frames
avio_wb32(s->pb, 0); // size
 
// toc
for (i = 0; i < XING_TOC_SIZE; ++i)
avio_w8(s->pb, (uint8_t)(255 * i / XING_TOC_SIZE));
 
for (i = 0; i < strlen(vendor); ++i)
avio_w8(s->pb, vendor[i]);
for (; i < 21; ++i)
avio_w8(s->pb, 0);
avio_wb24(s->pb, FFMAX(codec->delay - 528 - 1, 0)<<12);
 
ffio_fill(s->pb, 0, c.frame_size - needed);
 
return 0;
}
 
/*
* Add a frame to XING data.
* Following lame's "VbrTag.c".
*/
static void mp3_xing_add_frame(MP3Context *mp3, AVPacket *pkt)
{
int i;
 
mp3->frames++;
mp3->seen++;
mp3->size += pkt->size;
 
if (mp3->want == mp3->seen) {
mp3->bag[mp3->pos] = mp3->size;
 
if (XING_NUM_BAGS == ++mp3->pos) {
/* shrink table to half size by throwing away each second bag. */
for (i = 1; i < XING_NUM_BAGS; i += 2)
mp3->bag[i >> 1] = mp3->bag[i];
 
/* double wanted amount per bag. */
mp3->want *= 2;
/* adjust current position to half of table size. */
mp3->pos = XING_NUM_BAGS / 2;
}
 
mp3->seen = 0;
}
}
 
static int mp3_write_audio_packet(AVFormatContext *s, AVPacket *pkt)
{
MP3Context *mp3 = s->priv_data;
 
if (pkt->data && pkt->size >= 4) {
MPADecodeHeader c;
int av_unused base;
uint32_t head = AV_RB32(pkt->data);
 
if (ff_mpa_check_header(head) < 0) {
av_log(s, AV_LOG_WARNING, "Audio packet of size %d (starting with %08X...) "
"is invalid, writing it anyway.\n", pkt->size, head);
return ff_raw_write_packet(s, pkt);
}
avpriv_mpegaudio_decode_header(&c, head);
 
if (!mp3->initial_bitrate)
mp3->initial_bitrate = c.bit_rate;
if ((c.bit_rate == 0) || (mp3->initial_bitrate != c.bit_rate))
mp3->has_variable_bitrate = 1;
 
#ifdef FILTER_VBR_HEADERS
/* filter out XING and INFO headers. */
base = 4 + xing_offtbl[c.lsf == 1][c.nb_channels == 1];
 
if (base + 4 <= pkt->size) {
uint32_t v = AV_RB32(pkt->data + base);
 
if (MKBETAG('X','i','n','g') == v || MKBETAG('I','n','f','o') == v)
return 0;
}
 
/* filter out VBRI headers. */
base = 4 + 32;
 
if (base + 4 <= pkt->size && MKBETAG('V','B','R','I') == AV_RB32(pkt->data + base))
return 0;
#endif
 
if (mp3->xing_offset)
mp3_xing_add_frame(mp3, pkt);
}
 
return ff_raw_write_packet(s, pkt);
}
 
static int mp3_queue_flush(AVFormatContext *s)
{
MP3Context *mp3 = s->priv_data;
AVPacketList *pktl;
int ret = 0, write = 1;
 
ff_id3v2_finish(&mp3->id3, s->pb);
mp3_write_xing(s);
 
while ((pktl = mp3->queue)) {
if (write && (ret = mp3_write_audio_packet(s, &pktl->pkt)) < 0)
write = 0;
av_free_packet(&pktl->pkt);
mp3->queue = pktl->next;
av_freep(&pktl);
}
mp3->queue_end = NULL;
return ret;
}
 
static void mp3_update_xing(AVFormatContext *s)
{
MP3Context *mp3 = s->priv_data;
int i;
 
/* replace "Xing" identification string with "Info" for CBR files. */
if (!mp3->has_variable_bitrate) {
avio_seek(s->pb, mp3->xing_offset, SEEK_SET);
ffio_wfourcc(s->pb, "Info");
}
 
avio_seek(s->pb, mp3->xing_offset + 8, SEEK_SET);
avio_wb32(s->pb, mp3->frames);
avio_wb32(s->pb, mp3->size);
 
avio_w8(s->pb, 0); // first toc entry has to be zero.
 
for (i = 1; i < XING_TOC_SIZE; ++i) {
int j = i * mp3->pos / XING_TOC_SIZE;
int seek_point = 256LL * mp3->bag[j] / mp3->size;
avio_w8(s->pb, FFMIN(seek_point, 255));
}
 
avio_seek(s->pb, 0, SEEK_END);
}
 
static int mp3_write_trailer(struct AVFormatContext *s)
{
uint8_t buf[ID3v1_TAG_SIZE];
MP3Context *mp3 = s->priv_data;
 
if (mp3->pics_to_write) {
av_log(s, AV_LOG_WARNING, "No packets were sent for some of the "
"attached pictures.\n");
mp3_queue_flush(s);
}
 
/* write the id3v1 tag */
if (mp3->write_id3v1 && id3v1_create_tag(s, buf) > 0) {
avio_write(s->pb, buf, ID3v1_TAG_SIZE);
}
 
if (mp3->xing_offset)
mp3_update_xing(s);
 
return 0;
}
 
static int query_codec(enum AVCodecID id, int std_compliance)
{
const CodecMime *cm= ff_id3v2_mime_tags;
while(cm->id != AV_CODEC_ID_NONE) {
if(id == cm->id)
return MKTAG('A', 'P', 'I', 'C');
cm++;
}
return -1;
}
 
#if CONFIG_MP2_MUXER
AVOutputFormat ff_mp2_muxer = {
.name = "mp2",
.long_name = NULL_IF_CONFIG_SMALL("MP2 (MPEG audio layer 2)"),
.mime_type = "audio/x-mpeg",
.extensions = "mp2,m2a,mpa",
.audio_codec = AV_CODEC_ID_MP2,
.video_codec = AV_CODEC_ID_NONE,
.write_packet = ff_raw_write_packet,
.flags = AVFMT_NOTIMESTAMPS,
};
#endif
 
#if CONFIG_MP3_MUXER
 
static const AVOption options[] = {
{ "id3v2_version", "Select ID3v2 version to write. Currently 3 and 4 are supported.",
offsetof(MP3Context, id3v2_version), AV_OPT_TYPE_INT, {.i64 = 4}, 3, 4, AV_OPT_FLAG_ENCODING_PARAM},
{ "write_id3v1", "Enable ID3v1 writing. ID3v1 tags are written in UTF-8 which may not be supported by most software.",
offsetof(MP3Context, write_id3v1), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, AV_OPT_FLAG_ENCODING_PARAM},
{ NULL },
};
 
static const AVClass mp3_muxer_class = {
.class_name = "MP3 muxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
static int mp3_write_packet(AVFormatContext *s, AVPacket *pkt)
{
MP3Context *mp3 = s->priv_data;
 
if (pkt->stream_index == mp3->audio_stream_idx) {
if (mp3->pics_to_write) {
/* buffer audio packets until we get all the pictures */
AVPacketList *pktl = av_mallocz(sizeof(*pktl));
if (!pktl)
return AVERROR(ENOMEM);
 
pktl->pkt = *pkt;
pktl->pkt.buf = av_buffer_ref(pkt->buf);
if (!pktl->pkt.buf) {
av_freep(&pktl);
return AVERROR(ENOMEM);
}
 
if (mp3->queue_end)
mp3->queue_end->next = pktl;
else
mp3->queue = pktl;
mp3->queue_end = pktl;
} else
return mp3_write_audio_packet(s, pkt);
} else {
int ret;
 
/* warn only once for each stream */
if (s->streams[pkt->stream_index]->nb_frames == 1) {
av_log(s, AV_LOG_WARNING, "Got more than one picture in stream %d,"
" ignoring.\n", pkt->stream_index);
}
if (!mp3->pics_to_write || s->streams[pkt->stream_index]->nb_frames >= 1)
return 0;
 
if ((ret = ff_id3v2_write_apic(s, &mp3->id3, pkt)) < 0)
return ret;
mp3->pics_to_write--;
 
/* flush the buffered audio packets */
if (!mp3->pics_to_write &&
(ret = mp3_queue_flush(s)) < 0)
return ret;
}
 
return 0;
}
 
/**
* Write an ID3v2 header at beginning of stream
*/
 
static int mp3_write_header(struct AVFormatContext *s)
{
MP3Context *mp3 = s->priv_data;
int ret, i;
 
/* check the streams -- we want exactly one audio and arbitrary number of
* video (attached pictures) */
mp3->audio_stream_idx = -1;
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
if (mp3->audio_stream_idx >= 0 || st->codec->codec_id != AV_CODEC_ID_MP3) {
av_log(s, AV_LOG_ERROR, "Invalid audio stream. Exactly one MP3 "
"audio stream is required.\n");
return AVERROR(EINVAL);
}
mp3->audio_stream_idx = i;
} else if (st->codec->codec_type != AVMEDIA_TYPE_VIDEO) {
av_log(s, AV_LOG_ERROR, "Only audio streams and pictures are allowed in MP3.\n");
return AVERROR(EINVAL);
}
}
if (mp3->audio_stream_idx < 0) {
av_log(s, AV_LOG_ERROR, "No audio stream present.\n");
return AVERROR(EINVAL);
}
mp3->pics_to_write = s->nb_streams - 1;
 
ff_id3v2_start(&mp3->id3, s->pb, mp3->id3v2_version, ID3v2_DEFAULT_MAGIC);
ret = ff_id3v2_write_metadata(s, &mp3->id3);
if (ret < 0)
return ret;
 
if (!mp3->pics_to_write) {
ff_id3v2_finish(&mp3->id3, s->pb);
mp3_write_xing(s);
}
 
return 0;
}
 
AVOutputFormat ff_mp3_muxer = {
.name = "mp3",
.long_name = NULL_IF_CONFIG_SMALL("MP3 (MPEG audio layer 3)"),
.mime_type = "audio/x-mpeg",
.extensions = "mp3",
.priv_data_size = sizeof(MP3Context),
.audio_codec = AV_CODEC_ID_MP3,
.video_codec = AV_CODEC_ID_PNG,
.write_header = mp3_write_header,
.write_packet = mp3_write_packet,
.write_trailer = mp3_write_trailer,
.query_codec = query_codec,
.flags = AVFMT_NOTIMESTAMPS,
.priv_class = &mp3_muxer_class,
};
#endif
/contrib/sdk/sources/ffmpeg/libavformat/mpc.c
0,0 → 1,237
/*
* Musepack demuxer
* Copyright (c) 2006 Konstantin Shishkov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/channel_layout.h"
#include "libavcodec/get_bits.h"
#include "avformat.h"
#include "internal.h"
#include "apetag.h"
#include "id3v1.h"
#include "libavutil/dict.h"
 
#define MPC_FRAMESIZE 1152
#define DELAY_FRAMES 32
 
static const int mpc_rate[4] = { 44100, 48000, 37800, 32000 };
typedef struct {
int64_t pos;
int size, skip;
}MPCFrame;
 
typedef struct {
int ver;
uint32_t curframe, lastframe;
uint32_t fcount;
MPCFrame *frames;
int curbits;
int frames_noted;
} MPCContext;
 
static int mpc_probe(AVProbeData *p)
{
const uint8_t *d = p->buf;
if (d[0] == 'M' && d[1] == 'P' && d[2] == '+' && (d[3] == 0x17 || d[3] == 0x7))
return AVPROBE_SCORE_MAX;
return 0;
}
 
static int mpc_read_header(AVFormatContext *s)
{
MPCContext *c = s->priv_data;
AVStream *st;
 
if(avio_rl24(s->pb) != MKTAG('M', 'P', '+', 0)){
av_log(s, AV_LOG_ERROR, "Not a Musepack file\n");
return AVERROR_INVALIDDATA;
}
c->ver = avio_r8(s->pb);
if(c->ver != 0x07 && c->ver != 0x17){
av_log(s, AV_LOG_ERROR, "Can demux Musepack SV7, got version %02X\n", c->ver);
return AVERROR_INVALIDDATA;
}
c->fcount = avio_rl32(s->pb);
if((int64_t)c->fcount * sizeof(MPCFrame) >= UINT_MAX){
av_log(s, AV_LOG_ERROR, "Too many frames, seeking is not possible\n");
return AVERROR_INVALIDDATA;
}
if(c->fcount){
c->frames = av_malloc(c->fcount * sizeof(MPCFrame));
if(!c->frames){
av_log(s, AV_LOG_ERROR, "Cannot allocate seektable\n");
return AVERROR(ENOMEM);
}
}else{
av_log(s, AV_LOG_WARNING, "Container reports no frames\n");
}
c->curframe = 0;
c->lastframe = -1;
c->curbits = 8;
c->frames_noted = 0;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_MUSEPACK7;
st->codec->channels = 2;
st->codec->channel_layout = AV_CH_LAYOUT_STEREO;
st->codec->bits_per_coded_sample = 16;
 
if (ff_alloc_extradata(st->codec, 16))
return AVERROR(ENOMEM);
avio_read(s->pb, st->codec->extradata, 16);
st->codec->sample_rate = mpc_rate[st->codec->extradata[2] & 3];
avpriv_set_pts_info(st, 32, MPC_FRAMESIZE, st->codec->sample_rate);
/* scan for seekpoints */
st->start_time = 0;
st->duration = c->fcount;
 
/* try to read APE tags */
if (s->pb->seekable) {
int64_t pos = avio_tell(s->pb);
ff_ape_parse_tag(s);
if (!av_dict_get(s->metadata, "", NULL, AV_DICT_IGNORE_SUFFIX))
ff_id3v1_read(s);
avio_seek(s->pb, pos, SEEK_SET);
}
 
return 0;
}
 
static int mpc_read_packet(AVFormatContext *s, AVPacket *pkt)
{
MPCContext *c = s->priv_data;
int ret, size, size2, curbits, cur = c->curframe;
unsigned tmp;
int64_t pos;
 
if (c->curframe >= c->fcount && c->fcount)
return AVERROR_EOF;
 
if(c->curframe != c->lastframe + 1){
avio_seek(s->pb, c->frames[c->curframe].pos, SEEK_SET);
c->curbits = c->frames[c->curframe].skip;
}
c->lastframe = c->curframe;
c->curframe++;
curbits = c->curbits;
pos = avio_tell(s->pb);
tmp = avio_rl32(s->pb);
if(curbits <= 12){
size2 = (tmp >> (12 - curbits)) & 0xFFFFF;
}else{
size2 = (tmp << (curbits - 12) | avio_rl32(s->pb) >> (44 - curbits)) & 0xFFFFF;
}
curbits += 20;
avio_seek(s->pb, pos, SEEK_SET);
 
size = ((size2 + curbits + 31) & ~31) >> 3;
if(cur == c->frames_noted && c->fcount){
c->frames[cur].pos = pos;
c->frames[cur].size = size;
c->frames[cur].skip = curbits - 20;
av_add_index_entry(s->streams[0], cur, cur, size, 0, AVINDEX_KEYFRAME);
c->frames_noted++;
}
c->curbits = (curbits + size2) & 0x1F;
 
if ((ret = av_new_packet(pkt, size)) < 0)
return ret;
 
pkt->data[0] = curbits;
pkt->data[1] = (c->curframe > c->fcount) && c->fcount;
pkt->data[2] = 0;
pkt->data[3] = 0;
 
pkt->stream_index = 0;
pkt->pts = cur;
ret = avio_read(s->pb, pkt->data + 4, size);
if(c->curbits)
avio_seek(s->pb, -4, SEEK_CUR);
if(ret < size){
av_free_packet(pkt);
return ret < 0 ? ret : AVERROR(EIO);
}
pkt->size = ret + 4;
 
return 0;
}
 
static int mpc_read_close(AVFormatContext *s)
{
MPCContext *c = s->priv_data;
 
av_freep(&c->frames);
return 0;
}
 
/**
* Seek to the given position
* If position is unknown but is within the limits of file
* then packets are skipped unless desired position is reached
*
* Also this function makes use of the fact that timestamp == frameno
*/
static int mpc_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
{
AVStream *st = s->streams[stream_index];
MPCContext *c = s->priv_data;
AVPacket pkt1, *pkt = &pkt1;
int ret;
int index = av_index_search_timestamp(st, FFMAX(timestamp - DELAY_FRAMES, 0), flags);
uint32_t lastframe;
 
/* if found, seek there */
if (index >= 0 && st->index_entries[st->nb_index_entries-1].timestamp >= timestamp - DELAY_FRAMES){
c->curframe = st->index_entries[index].pos;
return 0;
}
/* if timestamp is out of bounds, return error */
if(timestamp < 0 || timestamp >= c->fcount)
return -1;
timestamp -= DELAY_FRAMES;
/* seek to the furthest known position and read packets until
we reach desired position */
lastframe = c->curframe;
if(c->frames_noted) c->curframe = c->frames_noted - 1;
while(c->curframe < timestamp){
ret = av_read_frame(s, pkt);
if (ret < 0){
c->curframe = lastframe;
return ret;
}
av_free_packet(pkt);
}
return 0;
}
 
 
AVInputFormat ff_mpc_demuxer = {
.name = "mpc",
.long_name = NULL_IF_CONFIG_SMALL("Musepack"),
.priv_data_size = sizeof(MPCContext),
.read_probe = mpc_probe,
.read_header = mpc_read_header,
.read_packet = mpc_read_packet,
.read_close = mpc_read_close,
.read_seek = mpc_read_seek,
.extensions = "mpc",
};
/contrib/sdk/sources/ffmpeg/libavformat/mpc8.c
0,0 → 1,317
/*
* Musepack SV8 demuxer
* Copyright (c) 2007 Konstantin Shishkov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavcodec/get_bits.h"
#include "libavcodec/unary.h"
#include "apetag.h"
#include "avformat.h"
#include "internal.h"
#include "avio_internal.h"
 
/// Two-byte MPC tag
#define MKMPCTAG(a, b) (a | (b << 8))
 
#define TAG_MPCK MKTAG('M','P','C','K')
 
/// Reserved MPC tags
enum MPCPacketTags{
TAG_STREAMHDR = MKMPCTAG('S','H'),
TAG_STREAMEND = MKMPCTAG('S','E'),
 
TAG_AUDIOPACKET = MKMPCTAG('A','P'),
 
TAG_SEEKTBLOFF = MKMPCTAG('S','O'),
TAG_SEEKTABLE = MKMPCTAG('S','T'),
 
TAG_REPLAYGAIN = MKMPCTAG('R','G'),
TAG_ENCINFO = MKMPCTAG('E','I'),
};
 
static const int mpc8_rate[8] = { 44100, 48000, 37800, 32000, -1, -1, -1, -1 };
 
typedef struct {
int ver;
int64_t header_pos;
int64_t samples;
 
int64_t apetag_start;
} MPCContext;
 
static inline int64_t bs_get_v(const uint8_t **bs)
{
int64_t v = 0;
int br = 0;
int c;
 
do {
c = **bs; (*bs)++;
v <<= 7;
v |= c & 0x7F;
br++;
if (br > 10)
return -1;
} while (c & 0x80);
 
return v - br;
}
 
static int mpc8_probe(AVProbeData *p)
{
const uint8_t *bs = p->buf + 4;
const uint8_t *bs_end = bs + p->buf_size;
int64_t size;
 
if (p->buf_size < 16)
return 0;
if (AV_RL32(p->buf) != TAG_MPCK)
return 0;
while (bs < bs_end + 3) {
int header_found = (bs[0] == 'S' && bs[1] == 'H');
if (bs[0] < 'A' || bs[0] > 'Z' || bs[1] < 'A' || bs[1] > 'Z')
return 0;
bs += 2;
size = bs_get_v(&bs);
if (size < 2)
return 0;
if (bs + size - 2 >= bs_end)
return AVPROBE_SCORE_EXTENSION - 1; // seems to be valid MPC but no header yet
if (header_found) {
if (size < 11 || size > 28)
return 0;
if (!AV_RL32(bs)) //zero CRC is invalid
return 0;
return AVPROBE_SCORE_MAX;
} else {
bs += size - 2;
}
}
return 0;
}
 
static inline int64_t gb_get_v(GetBitContext *gb)
{
int64_t v = 0;
int bits = 0;
while(get_bits1(gb) && bits < 64-7){
v <<= 7;
v |= get_bits(gb, 7);
bits += 7;
}
v <<= 7;
v |= get_bits(gb, 7);
 
return v;
}
 
static void mpc8_get_chunk_header(AVIOContext *pb, int *tag, int64_t *size)
{
int64_t pos;
pos = avio_tell(pb);
*tag = avio_rl16(pb);
*size = ffio_read_varlen(pb);
*size -= avio_tell(pb) - pos;
}
 
static void mpc8_parse_seektable(AVFormatContext *s, int64_t off)
{
MPCContext *c = s->priv_data;
int tag;
int64_t size, pos, ppos[2];
uint8_t *buf;
int i, t, seekd;
GetBitContext gb;
 
if (s->nb_streams == 0) {
av_log(s, AV_LOG_ERROR, "No stream added before parsing seek table\n");
return;
}
 
avio_seek(s->pb, off, SEEK_SET);
mpc8_get_chunk_header(s->pb, &tag, &size);
if(tag != TAG_SEEKTABLE){
av_log(s, AV_LOG_ERROR, "No seek table at given position\n");
return;
}
if (size > INT_MAX/10 || size<=0) {
av_log(s, AV_LOG_ERROR, "Bad seek table size\n");
return;
}
if(!(buf = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE)))
return;
avio_read(s->pb, buf, size);
init_get_bits(&gb, buf, size * 8);
size = gb_get_v(&gb);
if(size > UINT_MAX/4 || size > c->samples/1152){
av_log(s, AV_LOG_ERROR, "Seek table is too big\n");
return;
}
seekd = get_bits(&gb, 4);
for(i = 0; i < 2; i++){
pos = gb_get_v(&gb) + c->header_pos;
ppos[1 - i] = pos;
av_add_index_entry(s->streams[0], pos, i, 0, 0, AVINDEX_KEYFRAME);
}
for(; i < size; i++){
t = get_unary(&gb, 1, 33) << 12;
t += get_bits(&gb, 12);
if(t & 1)
t = -(t & ~1);
pos = (t >> 1) + ppos[0]*2 - ppos[1];
av_add_index_entry(s->streams[0], pos, i << seekd, 0, 0, AVINDEX_KEYFRAME);
ppos[1] = ppos[0];
ppos[0] = pos;
}
av_free(buf);
}
 
static void mpc8_handle_chunk(AVFormatContext *s, int tag, int64_t chunk_pos, int64_t size)
{
AVIOContext *pb = s->pb;
int64_t pos, off;
 
switch(tag){
case TAG_SEEKTBLOFF:
pos = avio_tell(pb) + size;
off = ffio_read_varlen(pb);
mpc8_parse_seektable(s, chunk_pos + off);
avio_seek(pb, pos, SEEK_SET);
break;
default:
avio_skip(pb, size);
}
}
 
static int mpc8_read_header(AVFormatContext *s)
{
MPCContext *c = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *st;
int tag = 0;
int64_t size, pos;
 
c->header_pos = avio_tell(pb);
if(avio_rl32(pb) != TAG_MPCK){
av_log(s, AV_LOG_ERROR, "Not a Musepack8 file\n");
return AVERROR_INVALIDDATA;
}
 
while(!url_feof(pb)){
pos = avio_tell(pb);
mpc8_get_chunk_header(pb, &tag, &size);
if(tag == TAG_STREAMHDR)
break;
mpc8_handle_chunk(s, tag, pos, size);
}
if(tag != TAG_STREAMHDR){
av_log(s, AV_LOG_ERROR, "Stream header not found\n");
return AVERROR_INVALIDDATA;
}
pos = avio_tell(pb);
avio_skip(pb, 4); //CRC
c->ver = avio_r8(pb);
if(c->ver != 8){
av_log(s, AV_LOG_ERROR, "Unknown stream version %d\n", c->ver);
return AVERROR_PATCHWELCOME;
}
c->samples = ffio_read_varlen(pb);
ffio_read_varlen(pb); //silence samples at the beginning
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_MUSEPACK8;
st->codec->bits_per_coded_sample = 16;
 
if (ff_alloc_extradata(st->codec, 2))
return AVERROR(ENOMEM);
avio_read(pb, st->codec->extradata, st->codec->extradata_size);
 
st->codec->channels = (st->codec->extradata[1] >> 4) + 1;
st->codec->sample_rate = mpc8_rate[st->codec->extradata[0] >> 5];
avpriv_set_pts_info(st, 32, 1152 << (st->codec->extradata[1]&3)*2, st->codec->sample_rate);
st->start_time = 0;
st->duration = c->samples / (1152 << (st->codec->extradata[1]&3)*2);
size -= avio_tell(pb) - pos;
if (size > 0)
avio_skip(pb, size);
 
if (pb->seekable) {
int64_t pos = avio_tell(s->pb);
c->apetag_start = ff_ape_parse_tag(s);
avio_seek(s->pb, pos, SEEK_SET);
}
 
return 0;
}
 
static int mpc8_read_packet(AVFormatContext *s, AVPacket *pkt)
{
MPCContext *c = s->priv_data;
int tag;
int64_t pos, size;
 
while(!url_feof(s->pb)){
pos = avio_tell(s->pb);
 
/* don't return bogus packets with the ape tag data */
if (c->apetag_start && pos >= c->apetag_start)
return AVERROR_EOF;
 
mpc8_get_chunk_header(s->pb, &tag, &size);
if (size < 0)
return -1;
if(tag == TAG_AUDIOPACKET){
if(av_get_packet(s->pb, pkt, size) < 0)
return AVERROR(ENOMEM);
pkt->stream_index = 0;
pkt->duration = 1;
return 0;
}
if(tag == TAG_STREAMEND)
return AVERROR(EIO);
mpc8_handle_chunk(s, tag, pos, size);
}
return AVERROR_EOF;
}
 
static int mpc8_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
{
AVStream *st = s->streams[stream_index];
int index = av_index_search_timestamp(st, timestamp, flags);
 
if(index < 0) return -1;
if (avio_seek(s->pb, st->index_entries[index].pos, SEEK_SET) < 0)
return -1;
ff_update_cur_dts(s, st, st->index_entries[index].timestamp);
return 0;
}
 
 
AVInputFormat ff_mpc8_demuxer = {
.name = "mpc8",
.long_name = NULL_IF_CONFIG_SMALL("Musepack SV8"),
.priv_data_size = sizeof(MPCContext),
.read_probe = mpc8_probe,
.read_header = mpc8_read_header,
.read_packet = mpc8_read_packet,
.read_seek = mpc8_read_seek,
};
/contrib/sdk/sources/ffmpeg/libavformat/mpeg.c
0,0 → 1,949
/*
* MPEG1/2 demuxer
* Copyright (c) 2000, 2001, 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "internal.h"
#include "mpeg.h"
 
#if CONFIG_VOBSUB_DEMUXER
# include "subtitles.h"
# include "libavutil/bprint.h"
#endif
 
#undef NDEBUG
#include <assert.h>
#include "libavutil/avassert.h"
 
/*********************************************/
/* demux code */
 
#define MAX_SYNC_SIZE 100000
 
static int check_pes(const uint8_t *p, const uint8_t *end){
int pes1;
int pes2= (p[3] & 0xC0) == 0x80
&& (p[4] & 0xC0) != 0x40
&&((p[4] & 0xC0) == 0x00 || (p[4]&0xC0)>>2 == (p[6]&0xF0));
 
for(p+=3; p<end && *p == 0xFF; p++);
if((*p&0xC0) == 0x40) p+=2;
if((*p&0xF0) == 0x20){
pes1= p[0]&p[2]&p[4]&1;
}else if((*p&0xF0) == 0x30){
pes1= p[0]&p[2]&p[4]&p[5]&p[7]&p[9]&1;
}else
pes1 = *p == 0x0F;
 
return pes1||pes2;
}
 
static int check_pack_header(const uint8_t *buf) {
return (buf[1] & 0xC0) == 0x40 || (buf[1] & 0xF0) == 0x20;
}
 
static int mpegps_probe(AVProbeData *p)
{
uint32_t code= -1;
int sys=0, pspack=0, priv1=0, vid=0, audio=0, invalid=0;
int i;
int score=0;
 
for(i=0; i<p->buf_size; i++){
code = (code<<8) + p->buf[i];
if ((code & 0xffffff00) == 0x100) {
int len= p->buf[i+1] << 8 | p->buf[i+2];
int pes= check_pes(p->buf+i, p->buf+p->buf_size);
int pack = check_pack_header(p->buf+i);
 
if(code == SYSTEM_HEADER_START_CODE) sys++;
else if(code == PACK_START_CODE && pack) pspack++;
else if((code & 0xf0) == VIDEO_ID && pes) vid++;
// skip pes payload to avoid start code emulation for private
// and audio streams
else if((code & 0xe0) == AUDIO_ID && pes) {audio++; i+=len;}
else if(code == PRIVATE_STREAM_1 && pes) {priv1++; i+=len;}
else if(code == 0x1fd && pes) vid++; //VC1
 
else if((code & 0xf0) == VIDEO_ID && !pes) invalid++;
else if((code & 0xe0) == AUDIO_ID && !pes) invalid++;
else if(code == PRIVATE_STREAM_1 && !pes) invalid++;
}
}
 
if(vid+audio > invalid+1) /* invalid VDR files nd short PES streams */
score = AVPROBE_SCORE_EXTENSION / 2;
 
if(sys>invalid && sys*9 <= pspack*10)
return (audio > 12 || vid > 3 || pspack > 2) ? AVPROBE_SCORE_EXTENSION + 2 : AVPROBE_SCORE_EXTENSION / 2; // 1 more than .mpg
if(pspack > invalid && (priv1+vid+audio)*10 >= pspack*9)
return pspack > 2 ? AVPROBE_SCORE_EXTENSION + 2 : AVPROBE_SCORE_EXTENSION / 2; // 1 more than .mpg
if((!!vid ^ !!audio) && (audio > 4 || vid > 1) && !sys && !pspack && p->buf_size>2048 && vid + audio > invalid) /* PES stream */
return (audio > 12 || vid > 3 + 2*invalid) ? AVPROBE_SCORE_EXTENSION + 2 : AVPROBE_SCORE_EXTENSION / 2;
 
//02-Penguin.flac has sys:0 priv1:0 pspack:0 vid:0 audio:1
//mp3_misidentified_2.mp3 has sys:0 priv1:0 pspack:0 vid:0 audio:6
//Have\ Yourself\ a\ Merry\ Little\ Christmas.mp3 0 0 0 5 0 1 len:21618
return score;
}
 
 
typedef struct MpegDemuxContext {
int32_t header_state;
unsigned char psm_es_type[256];
int sofdec;
int dvd;
int imkh_cctv;
#if CONFIG_VOBSUB_DEMUXER
AVFormatContext *sub_ctx;
FFDemuxSubtitlesQueue q[32];
#endif
} MpegDemuxContext;
 
static int mpegps_read_header(AVFormatContext *s)
{
MpegDemuxContext *m = s->priv_data;
char buffer[7];
int64_t last_pos = avio_tell(s->pb);
 
m->header_state = 0xff;
s->ctx_flags |= AVFMTCTX_NOHEADER;
 
avio_get_str(s->pb, 6, buffer, sizeof(buffer));
if (!memcmp("IMKH", buffer, 4)) {
m->imkh_cctv = 1;
} else if (!memcmp("Sofdec", buffer, 6)) {
m->sofdec = 1;
} else
avio_seek(s->pb, last_pos, SEEK_SET);
 
/* no need to do more */
return 0;
}
 
static int64_t get_pts(AVIOContext *pb, int c)
{
uint8_t buf[5];
 
buf[0] = c<0 ? avio_r8(pb) : c;
avio_read(pb, buf+1, 4);
 
return ff_parse_pes_pts(buf);
}
 
static int find_next_start_code(AVIOContext *pb, int *size_ptr,
int32_t *header_state)
{
unsigned int state, v;
int val, n;
 
state = *header_state;
n = *size_ptr;
while (n > 0) {
if (url_feof(pb))
break;
v = avio_r8(pb);
n--;
if (state == 0x000001) {
state = ((state << 8) | v) & 0xffffff;
val = state;
goto found;
}
state = ((state << 8) | v) & 0xffffff;
}
val = -1;
found:
*header_state = state;
*size_ptr = n;
return val;
}
 
/**
* Extract stream types from a program stream map
* According to ISO/IEC 13818-1 ('MPEG-2 Systems') table 2-35
*
* @return number of bytes occupied by PSM in the bitstream
*/
static long mpegps_psm_parse(MpegDemuxContext *m, AVIOContext *pb)
{
int psm_length, ps_info_length, es_map_length;
 
psm_length = avio_rb16(pb);
avio_r8(pb);
avio_r8(pb);
ps_info_length = avio_rb16(pb);
 
/* skip program_stream_info */
avio_skip(pb, ps_info_length);
es_map_length = avio_rb16(pb);
 
/* at least one es available? */
while (es_map_length >= 4){
unsigned char type = avio_r8(pb);
unsigned char es_id = avio_r8(pb);
uint16_t es_info_length = avio_rb16(pb);
/* remember mapping from stream id to stream type */
m->psm_es_type[es_id] = type;
/* skip program_stream_info */
avio_skip(pb, es_info_length);
es_map_length -= 4 + es_info_length;
}
avio_rb32(pb); /* crc32 */
return 2 + psm_length;
}
 
/* read the next PES header. Return its position in ppos
(if not NULL), and its start code, pts and dts.
*/
static int mpegps_read_pes_header(AVFormatContext *s,
int64_t *ppos, int *pstart_code,
int64_t *ppts, int64_t *pdts)
{
MpegDemuxContext *m = s->priv_data;
int len, size, startcode, c, flags, header_len;
int pes_ext, ext2_len, id_ext, skip;
int64_t pts, dts;
int64_t last_sync= avio_tell(s->pb);
 
error_redo:
avio_seek(s->pb, last_sync, SEEK_SET);
redo:
/* next start code (should be immediately after) */
m->header_state = 0xff;
size = MAX_SYNC_SIZE;
startcode = find_next_start_code(s->pb, &size, &m->header_state);
last_sync = avio_tell(s->pb);
if (startcode < 0){
if(url_feof(s->pb))
return AVERROR_EOF;
//FIXME we should remember header_state
return AVERROR(EAGAIN);
}
 
if (startcode == PACK_START_CODE)
goto redo;
if (startcode == SYSTEM_HEADER_START_CODE)
goto redo;
if (startcode == PADDING_STREAM) {
avio_skip(s->pb, avio_rb16(s->pb));
goto redo;
}
if (startcode == PRIVATE_STREAM_2) {
if (!m->sofdec) {
/* Need to detect whether this from a DVD or a 'Sofdec' stream */
int len = avio_rb16(s->pb);
int bytesread = 0;
uint8_t *ps2buf = av_malloc(len);
 
if (ps2buf) {
bytesread = avio_read(s->pb, ps2buf, len);
 
if (bytesread != len) {
avio_skip(s->pb, len - bytesread);
} else {
uint8_t *p = 0;
if (len >= 6)
p = memchr(ps2buf, 'S', len - 5);
 
if (p)
m->sofdec = !memcmp(p+1, "ofdec", 5);
 
m->sofdec -= !m->sofdec;
 
if (m->sofdec < 0) {
if (len == 980 && ps2buf[0] == 0) {
/* PCI structure? */
uint32_t startpts = AV_RB32(ps2buf + 0x0d);
uint32_t endpts = AV_RB32(ps2buf + 0x11);
uint8_t hours = ((ps2buf[0x19] >> 4) * 10) + (ps2buf[0x19] & 0x0f);
uint8_t mins = ((ps2buf[0x1a] >> 4) * 10) + (ps2buf[0x1a] & 0x0f);
uint8_t secs = ((ps2buf[0x1b] >> 4) * 10) + (ps2buf[0x1b] & 0x0f);
 
m->dvd = (hours <= 23 &&
mins <= 59 &&
secs <= 59 &&
(ps2buf[0x19] & 0x0f) < 10 &&
(ps2buf[0x1a] & 0x0f) < 10 &&
(ps2buf[0x1b] & 0x0f) < 10 &&
endpts >= startpts);
} else if (len == 1018 && ps2buf[0] == 1) {
/* DSI structure? */
uint8_t hours = ((ps2buf[0x1d] >> 4) * 10) + (ps2buf[0x1d] & 0x0f);
uint8_t mins = ((ps2buf[0x1e] >> 4) * 10) + (ps2buf[0x1e] & 0x0f);
uint8_t secs = ((ps2buf[0x1f] >> 4) * 10) + (ps2buf[0x1f] & 0x0f);
 
m->dvd = (hours <= 23 &&
mins <= 59 &&
secs <= 59 &&
(ps2buf[0x1d] & 0x0f) < 10 &&
(ps2buf[0x1e] & 0x0f) < 10 &&
(ps2buf[0x1f] & 0x0f) < 10);
}
}
}
 
av_free(ps2buf);
 
/* If this isn't a DVD packet or no memory
* could be allocated, just ignore it.
* If we did, move back to the start of the
* packet (plus 'length' field) */
if (!m->dvd || avio_skip(s->pb, -(len + 2)) < 0) {
/* Skip back failed.
* This packet will be lost but that can't be helped
* if we can't skip back
*/
goto redo;
}
} else {
/* No memory */
avio_skip(s->pb, len);
goto redo;
}
} else if (!m->dvd) {
int len = avio_rb16(s->pb);
avio_skip(s->pb, len);
goto redo;
}
}
if (startcode == PROGRAM_STREAM_MAP) {
mpegps_psm_parse(m, s->pb);
goto redo;
}
 
/* find matching stream */
if (!((startcode >= 0x1c0 && startcode <= 0x1df) ||
(startcode >= 0x1e0 && startcode <= 0x1ef) ||
(startcode == 0x1bd) ||
(startcode == PRIVATE_STREAM_2) ||
(startcode == 0x1fd)))
goto redo;
if (ppos) {
*ppos = avio_tell(s->pb) - 4;
}
len = avio_rb16(s->pb);
pts =
dts = AV_NOPTS_VALUE;
if (startcode != PRIVATE_STREAM_2)
{
/* stuffing */
for(;;) {
if (len < 1)
goto error_redo;
c = avio_r8(s->pb);
len--;
/* XXX: for mpeg1, should test only bit 7 */
if (c != 0xff)
break;
}
if ((c & 0xc0) == 0x40) {
/* buffer scale & size */
avio_r8(s->pb);
c = avio_r8(s->pb);
len -= 2;
}
if ((c & 0xe0) == 0x20) {
dts = pts = get_pts(s->pb, c);
len -= 4;
if (c & 0x10){
dts = get_pts(s->pb, -1);
len -= 5;
}
} else if ((c & 0xc0) == 0x80) {
/* mpeg 2 PES */
flags = avio_r8(s->pb);
header_len = avio_r8(s->pb);
len -= 2;
if (header_len > len)
goto error_redo;
len -= header_len;
if (flags & 0x80) {
dts = pts = get_pts(s->pb, -1);
header_len -= 5;
if (flags & 0x40) {
dts = get_pts(s->pb, -1);
header_len -= 5;
}
}
if (flags & 0x3f && header_len == 0){
flags &= 0xC0;
av_log(s, AV_LOG_WARNING, "Further flags set but no bytes left\n");
}
if (flags & 0x01) { /* PES extension */
pes_ext = avio_r8(s->pb);
header_len--;
/* Skip PES private data, program packet sequence counter and P-STD buffer */
skip = (pes_ext >> 4) & 0xb;
skip += skip & 0x9;
if (pes_ext & 0x40 || skip > header_len){
av_log(s, AV_LOG_WARNING, "pes_ext %X is invalid\n", pes_ext);
pes_ext=skip=0;
}
avio_skip(s->pb, skip);
header_len -= skip;
 
if (pes_ext & 0x01) { /* PES extension 2 */
ext2_len = avio_r8(s->pb);
header_len--;
if ((ext2_len & 0x7f) > 0) {
id_ext = avio_r8(s->pb);
if ((id_ext & 0x80) == 0)
startcode = ((startcode & 0xff) << 8) | id_ext;
header_len--;
}
}
}
if(header_len < 0)
goto error_redo;
avio_skip(s->pb, header_len);
}
else if( c!= 0xf )
goto redo;
}
 
if (startcode == PRIVATE_STREAM_1) {
startcode = avio_r8(s->pb);
len--;
}
if(len<0)
goto error_redo;
if(dts != AV_NOPTS_VALUE && ppos){
int i;
for(i=0; i<s->nb_streams; i++){
if(startcode == s->streams[i]->id &&
s->pb->seekable /* index useless on streams anyway */) {
ff_reduce_index(s, i);
av_add_index_entry(s->streams[i], *ppos, dts, 0, 0, AVINDEX_KEYFRAME /* FIXME keyframe? */);
}
}
}
 
*pstart_code = startcode;
*ppts = pts;
*pdts = dts;
return len;
}
 
static int mpegps_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
MpegDemuxContext *m = s->priv_data;
AVStream *st;
int len, startcode, i, es_type, ret;
int lpcm_header_len = -1; //Init to supress warning
int request_probe= 0;
enum AVCodecID codec_id = AV_CODEC_ID_NONE;
enum AVMediaType type;
int64_t pts, dts, dummy_pos; //dummy_pos is needed for the index building to work
 
redo:
len = mpegps_read_pes_header(s, &dummy_pos, &startcode, &pts, &dts);
if (len < 0)
return len;
 
if (startcode >= 0x80 && startcode <= 0xcf) {
if(len < 4)
goto skip;
 
/* audio: skip header */
avio_r8(s->pb);
lpcm_header_len = avio_rb16(s->pb);
len -= 3;
if (startcode >= 0xb0 && startcode <= 0xbf) {
/* MLP/TrueHD audio has a 4-byte header */
avio_r8(s->pb);
len--;
}
}
 
/* now find stream */
for(i=0;i<s->nb_streams;i++) {
st = s->streams[i];
if (st->id == startcode)
goto found;
}
 
es_type = m->psm_es_type[startcode & 0xff];
if(es_type == STREAM_TYPE_VIDEO_MPEG1){
codec_id = AV_CODEC_ID_MPEG2VIDEO;
type = AVMEDIA_TYPE_VIDEO;
} else if(es_type == STREAM_TYPE_VIDEO_MPEG2){
codec_id = AV_CODEC_ID_MPEG2VIDEO;
type = AVMEDIA_TYPE_VIDEO;
} else if(es_type == STREAM_TYPE_AUDIO_MPEG1 ||
es_type == STREAM_TYPE_AUDIO_MPEG2){
codec_id = AV_CODEC_ID_MP3;
type = AVMEDIA_TYPE_AUDIO;
} else if(es_type == STREAM_TYPE_AUDIO_AAC){
codec_id = AV_CODEC_ID_AAC;
type = AVMEDIA_TYPE_AUDIO;
} else if(es_type == STREAM_TYPE_VIDEO_MPEG4){
codec_id = AV_CODEC_ID_MPEG4;
type = AVMEDIA_TYPE_VIDEO;
} else if(es_type == STREAM_TYPE_VIDEO_H264){
codec_id = AV_CODEC_ID_H264;
type = AVMEDIA_TYPE_VIDEO;
} else if(es_type == STREAM_TYPE_AUDIO_AC3){
codec_id = AV_CODEC_ID_AC3;
type = AVMEDIA_TYPE_AUDIO;
} else if(m->imkh_cctv && es_type == 0x91){
codec_id = AV_CODEC_ID_PCM_MULAW;
type = AVMEDIA_TYPE_AUDIO;
} else if (startcode >= 0x1e0 && startcode <= 0x1ef) {
static const unsigned char avs_seqh[4] = { 0, 0, 1, 0xb0 };
unsigned char buf[8];
avio_read(s->pb, buf, 8);
avio_seek(s->pb, -8, SEEK_CUR);
if(!memcmp(buf, avs_seqh, 4) && (buf[6] != 0 || buf[7] != 1))
codec_id = AV_CODEC_ID_CAVS;
else
request_probe= 1;
type = AVMEDIA_TYPE_VIDEO;
} else if (startcode == PRIVATE_STREAM_2) {
type = AVMEDIA_TYPE_DATA;
codec_id = AV_CODEC_ID_DVD_NAV;
} else if (startcode >= 0x1c0 && startcode <= 0x1df) {
type = AVMEDIA_TYPE_AUDIO;
codec_id = m->sofdec > 0 ? AV_CODEC_ID_ADPCM_ADX : AV_CODEC_ID_MP2;
} else if (startcode >= 0x80 && startcode <= 0x87) {
type = AVMEDIA_TYPE_AUDIO;
codec_id = AV_CODEC_ID_AC3;
} else if ( ( startcode >= 0x88 && startcode <= 0x8f)
||( startcode >= 0x98 && startcode <= 0x9f)) {
/* 0x90 - 0x97 is reserved for SDDS in DVD specs */
type = AVMEDIA_TYPE_AUDIO;
codec_id = AV_CODEC_ID_DTS;
} else if (startcode >= 0xa0 && startcode <= 0xaf) {
type = AVMEDIA_TYPE_AUDIO;
if(lpcm_header_len == 6) {
codec_id = AV_CODEC_ID_MLP;
} else {
codec_id = AV_CODEC_ID_PCM_DVD;
}
} else if (startcode >= 0xb0 && startcode <= 0xbf) {
type = AVMEDIA_TYPE_AUDIO;
codec_id = AV_CODEC_ID_TRUEHD;
} else if (startcode >= 0xc0 && startcode <= 0xcf) {
/* Used for both AC-3 and E-AC-3 in EVOB files */
type = AVMEDIA_TYPE_AUDIO;
codec_id = AV_CODEC_ID_AC3;
} else if (startcode >= 0x20 && startcode <= 0x3f) {
type = AVMEDIA_TYPE_SUBTITLE;
codec_id = AV_CODEC_ID_DVD_SUBTITLE;
} else if (startcode >= 0xfd55 && startcode <= 0xfd5f) {
type = AVMEDIA_TYPE_VIDEO;
codec_id = AV_CODEC_ID_VC1;
} else {
skip:
/* skip packet */
avio_skip(s->pb, len);
goto redo;
}
/* no stream found: add a new stream */
st = avformat_new_stream(s, NULL);
if (!st)
goto skip;
st->id = startcode;
st->codec->codec_type = type;
st->codec->codec_id = codec_id;
if (st->codec->codec_id == AV_CODEC_ID_PCM_MULAW) {
st->codec->channels = 1;
st->codec->channel_layout = AV_CH_LAYOUT_MONO;
st->codec->sample_rate = 8000;
}
st->request_probe = request_probe;
st->need_parsing = AVSTREAM_PARSE_FULL;
found:
if(st->discard >= AVDISCARD_ALL)
goto skip;
if (startcode >= 0xa0 && startcode <= 0xaf) {
if (lpcm_header_len == 6 && st->codec->codec_id == AV_CODEC_ID_MLP) {
if (len < 6)
goto skip;
avio_skip(s->pb, 6);
len -=6;
}
}
ret = av_get_packet(s->pb, pkt, len);
pkt->pts = pts;
pkt->dts = dts;
pkt->pos = dummy_pos;
pkt->stream_index = st->index;
av_dlog(s, "%d: pts=%0.3f dts=%0.3f size=%d\n",
pkt->stream_index, pkt->pts / 90000.0, pkt->dts / 90000.0,
pkt->size);
 
return (ret < 0) ? ret : 0;
}
 
static int64_t mpegps_read_dts(AVFormatContext *s, int stream_index,
int64_t *ppos, int64_t pos_limit)
{
int len, startcode;
int64_t pos, pts, dts;
 
pos = *ppos;
if (avio_seek(s->pb, pos, SEEK_SET) < 0)
return AV_NOPTS_VALUE;
 
for(;;) {
len = mpegps_read_pes_header(s, &pos, &startcode, &pts, &dts);
if (len < 0) {
av_dlog(s, "none (ret=%d)\n", len);
return AV_NOPTS_VALUE;
}
if (startcode == s->streams[stream_index]->id &&
dts != AV_NOPTS_VALUE) {
break;
}
avio_skip(s->pb, len);
}
av_dlog(s, "pos=0x%"PRIx64" dts=0x%"PRIx64" %0.3f\n",
pos, dts, dts / 90000.0);
*ppos = pos;
return dts;
}
 
AVInputFormat ff_mpegps_demuxer = {
.name = "mpeg",
.long_name = NULL_IF_CONFIG_SMALL("MPEG-PS (MPEG-2 Program Stream)"),
.priv_data_size = sizeof(MpegDemuxContext),
.read_probe = mpegps_probe,
.read_header = mpegps_read_header,
.read_packet = mpegps_read_packet,
.read_timestamp = mpegps_read_dts,
.flags = AVFMT_SHOW_IDS | AVFMT_TS_DISCONT,
};
 
#if CONFIG_VOBSUB_DEMUXER
 
#define REF_STRING "# VobSub index file,"
 
static int vobsub_probe(AVProbeData *p)
{
if (!strncmp(p->buf, REF_STRING, sizeof(REF_STRING) - 1))
return AVPROBE_SCORE_MAX;
return 0;
}
 
static int vobsub_read_header(AVFormatContext *s)
{
int i, ret = 0, header_parsed = 0, langidx = 0;
MpegDemuxContext *vobsub = s->priv_data;
char *sub_name = NULL;
size_t fname_len;
char *ext, *header_str;
AVBPrint header;
int64_t delay = 0;
AVStream *st = NULL;
 
sub_name = av_strdup(s->filename);
fname_len = strlen(sub_name);
ext = sub_name - 3 + fname_len;
if (fname_len < 4 || *(ext - 1) != '.') {
av_log(s, AV_LOG_ERROR, "The input index filename is too short "
"to guess the associated .SUB file\n");
ret = AVERROR_INVALIDDATA;
goto end;
}
memcpy(ext, !strncmp(ext, "IDX", 3) ? "SUB" : "sub", 3);
av_log(s, AV_LOG_VERBOSE, "IDX/SUB: %s -> %s\n", s->filename, sub_name);
ret = avformat_open_input(&vobsub->sub_ctx, sub_name, &ff_mpegps_demuxer, NULL);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "Unable to open %s as MPEG subtitles\n", sub_name);
goto end;
}
 
av_bprint_init(&header, 0, AV_BPRINT_SIZE_UNLIMITED);
while (!url_feof(s->pb)) {
char line[2048];
int len = ff_get_line(s->pb, line, sizeof(line));
 
if (!len)
break;
 
line[strcspn(line, "\r\n")] = 0;
 
if (!strncmp(line, "id:", 3)) {
int n, stream_id = 0;
char id[64] = {0};
 
n = sscanf(line, "id: %63[^,], index: %u", id, &stream_id);
if (n != 2) {
av_log(s, AV_LOG_WARNING, "Unable to parse index line '%s', "
"assuming 'id: und, index: 0'\n", line);
strcpy(id, "und");
stream_id = 0;
}
 
if (stream_id >= FF_ARRAY_ELEMS(vobsub->q)) {
av_log(s, AV_LOG_ERROR, "Maximum number of subtitles streams reached\n");
ret = AVERROR(EINVAL);
goto end;
}
 
st = avformat_new_stream(s, NULL);
if (!st) {
ret = AVERROR(ENOMEM);
goto end;
}
st->id = stream_id;
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
st->codec->codec_id = AV_CODEC_ID_DVD_SUBTITLE;
avpriv_set_pts_info(st, 64, 1, 1000);
av_dict_set(&st->metadata, "language", id, 0);
av_log(s, AV_LOG_DEBUG, "IDX stream[%d] id=%s\n", stream_id, id);
header_parsed = 1;
 
} else if (st && !strncmp(line, "timestamp:", 10)) {
AVPacket *sub;
int hh, mm, ss, ms;
int64_t pos, timestamp;
const char *p = line + 10;
 
if (!s->nb_streams) {
av_log(s, AV_LOG_ERROR, "Timestamp declared before any stream\n");
ret = AVERROR_INVALIDDATA;
goto end;
}
 
if (sscanf(p, "%02d:%02d:%02d:%03d, filepos: %"SCNx64,
&hh, &mm, &ss, &ms, &pos) != 5) {
av_log(s, AV_LOG_ERROR, "Unable to parse timestamp line '%s', "
"abort parsing\n", line);
break;
}
timestamp = (hh*3600LL + mm*60LL + ss) * 1000LL + ms + delay;
timestamp = av_rescale_q(timestamp, (AVRational){1,1000}, st->time_base);
 
sub = ff_subtitles_queue_insert(&vobsub->q[s->nb_streams - 1], "", 0, 0);
if (!sub) {
ret = AVERROR(ENOMEM);
goto end;
}
sub->pos = pos;
sub->pts = timestamp;
sub->stream_index = s->nb_streams - 1;
 
} else if (st && !strncmp(line, "alt:", 4)) {
const char *p = line + 4;
 
while (*p == ' ')
p++;
av_dict_set(&st->metadata, "title", p, 0);
av_log(s, AV_LOG_DEBUG, "IDX stream[%d] name=%s\n", st->id, p);
header_parsed = 1;
 
} else if (!strncmp(line, "delay:", 6)) {
int sign = 1, hh = 0, mm = 0, ss = 0, ms = 0;
const char *p = line + 6;
 
while (*p == ' ')
p++;
if (*p == '-' || *p == '+') {
sign = *p == '-' ? -1 : 1;
p++;
}
sscanf(p, "%d:%d:%d:%d", &hh, &mm, &ss, &ms);
delay = ((hh*3600LL + mm*60LL + ss) * 1000LL + ms) * sign;
 
} else if (!strncmp(line, "langidx:", 8)) {
const char *p = line + 8;
 
if (sscanf(p, "%d", &langidx) != 1)
av_log(s, AV_LOG_ERROR, "Invalid langidx specified\n");
 
} else if (!header_parsed) {
if (line[0] && line[0] != '#')
av_bprintf(&header, "%s\n", line);
}
}
 
if (langidx < s->nb_streams)
s->streams[langidx]->disposition |= AV_DISPOSITION_DEFAULT;
 
for (i = 0; i < s->nb_streams; i++) {
vobsub->q[i].sort = SUB_SORT_POS_TS;
ff_subtitles_queue_finalize(&vobsub->q[i]);
}
 
if (!av_bprint_is_complete(&header)) {
av_bprint_finalize(&header, NULL);
ret = AVERROR(ENOMEM);
goto end;
}
av_bprint_finalize(&header, &header_str);
for (i = 0; i < s->nb_streams; i++) {
AVStream *sub_st = s->streams[i];
sub_st->codec->extradata = av_strdup(header_str);
sub_st->codec->extradata_size = header.len;
}
av_free(header_str);
 
end:
av_free(sub_name);
return ret;
}
 
#define FAIL(r) do { ret = r; goto fail; } while (0)
 
static int vobsub_read_packet(AVFormatContext *s, AVPacket *pkt)
{
MpegDemuxContext *vobsub = s->priv_data;
FFDemuxSubtitlesQueue *q;
AVIOContext *pb = vobsub->sub_ctx->pb;
int ret, psize, total_read = 0, i;
AVPacket idx_pkt;
 
int64_t min_ts = INT64_MAX;
int sid = 0;
for (i = 0; i < s->nb_streams; i++) {
FFDemuxSubtitlesQueue *tmpq = &vobsub->q[i];
int64_t ts = tmpq->subs[tmpq->current_sub_idx].pts;
if (ts < min_ts) {
min_ts = ts;
sid = i;
}
}
q = &vobsub->q[sid];
ret = ff_subtitles_queue_read_packet(q, &idx_pkt);
if (ret < 0)
return ret;
 
/* compute maximum packet size using the next packet position. This is
* useful when the len in the header is non-sense */
if (q->current_sub_idx < q->nb_subs) {
psize = q->subs[q->current_sub_idx].pos - idx_pkt.pos;
} else {
int64_t fsize = avio_size(pb);
psize = fsize < 0 ? 0xffff : fsize - idx_pkt.pos;
}
 
avio_seek(pb, idx_pkt.pos, SEEK_SET);
 
av_init_packet(pkt);
pkt->size = 0;
pkt->data = NULL;
 
do {
int n, to_read, startcode;
int64_t pts, dts;
int64_t old_pos = avio_tell(pb), new_pos;
int pkt_size;
 
ret = mpegps_read_pes_header(vobsub->sub_ctx, NULL, &startcode, &pts, &dts);
if (ret < 0) {
if (pkt->size) // raise packet even if incomplete
break;
FAIL(ret);
}
to_read = ret & 0xffff;
new_pos = avio_tell(pb);
pkt_size = ret + (new_pos - old_pos);
 
/* this prevents reads above the current packet */
if (total_read + pkt_size > psize)
break;
total_read += pkt_size;
 
/* the current chunk doesn't match the stream index (unlikely) */
if ((startcode & 0x1f) != idx_pkt.stream_index)
break;
 
ret = av_grow_packet(pkt, to_read);
if (ret < 0)
FAIL(ret);
 
n = avio_read(pb, pkt->data + (pkt->size - to_read), to_read);
if (n < to_read)
pkt->size -= to_read - n;
} while (total_read < psize);
 
pkt->pts = pkt->dts = idx_pkt.pts;
pkt->pos = idx_pkt.pos;
pkt->stream_index = idx_pkt.stream_index;
 
av_free_packet(&idx_pkt);
return 0;
 
fail:
av_free_packet(pkt);
av_free_packet(&idx_pkt);
return ret;
}
 
static int vobsub_read_seek(AVFormatContext *s, int stream_index,
int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
{
MpegDemuxContext *vobsub = s->priv_data;
 
/* Rescale requested timestamps based on the first stream (timebase is the
* same for all subtitles stream within a .idx/.sub). Rescaling is done just
* like in avformat_seek_file(). */
if (stream_index == -1 && s->nb_streams != 1) {
int i, ret = 0;
AVRational time_base = s->streams[0]->time_base;
ts = av_rescale_q(ts, AV_TIME_BASE_Q, time_base);
min_ts = av_rescale_rnd(min_ts, time_base.den,
time_base.num * (int64_t)AV_TIME_BASE,
AV_ROUND_UP | AV_ROUND_PASS_MINMAX);
max_ts = av_rescale_rnd(max_ts, time_base.den,
time_base.num * (int64_t)AV_TIME_BASE,
AV_ROUND_DOWN | AV_ROUND_PASS_MINMAX);
for (i = 0; i < s->nb_streams; i++) {
int r = ff_subtitles_queue_seek(&vobsub->q[i], s, stream_index,
min_ts, ts, max_ts, flags);
if (r < 0)
ret = r;
}
return ret;
}
 
if (stream_index == -1) // only 1 stream
stream_index = 0;
return ff_subtitles_queue_seek(&vobsub->q[stream_index], s, stream_index,
min_ts, ts, max_ts, flags);
}
 
static int vobsub_read_close(AVFormatContext *s)
{
int i;
MpegDemuxContext *vobsub = s->priv_data;
 
for (i = 0; i < s->nb_streams; i++)
ff_subtitles_queue_clean(&vobsub->q[i]);
if (vobsub->sub_ctx)
avformat_close_input(&vobsub->sub_ctx);
return 0;
}
 
AVInputFormat ff_vobsub_demuxer = {
.name = "vobsub",
.long_name = NULL_IF_CONFIG_SMALL("VobSub subtitle format"),
.priv_data_size = sizeof(MpegDemuxContext),
.read_probe = vobsub_probe,
.read_header = vobsub_read_header,
.read_packet = vobsub_read_packet,
.read_seek2 = vobsub_read_seek,
.read_close = vobsub_read_close,
.flags = AVFMT_SHOW_IDS,
.extensions = "idx",
};
#endif
/contrib/sdk/sources/ffmpeg/libavformat/mpeg.h
0,0 → 1,73
/*
* MPEG1/2 muxer and demuxer common defines
* Copyright (c) 2000, 2001, 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_MPEG_H
#define AVFORMAT_MPEG_H
 
#include <stdint.h>
#include "libavutil/intreadwrite.h"
 
#define PACK_START_CODE ((unsigned int)0x000001ba)
#define SYSTEM_HEADER_START_CODE ((unsigned int)0x000001bb)
#define SEQUENCE_END_CODE ((unsigned int)0x000001b7)
#define PACKET_START_CODE_MASK ((unsigned int)0xffffff00)
#define PACKET_START_CODE_PREFIX ((unsigned int)0x00000100)
#define ISO_11172_END_CODE ((unsigned int)0x000001b9)
 
/* mpeg2 */
#define PROGRAM_STREAM_MAP 0x1bc
#define PRIVATE_STREAM_1 0x1bd
#define PADDING_STREAM 0x1be
#define PRIVATE_STREAM_2 0x1bf
 
#define AUDIO_ID 0xc0
#define VIDEO_ID 0xe0
#define AC3_ID 0x80
#define DTS_ID 0x88
#define LPCM_ID 0xa0
#define SUB_ID 0x20
 
#define STREAM_TYPE_VIDEO_MPEG1 0x01
#define STREAM_TYPE_VIDEO_MPEG2 0x02
#define STREAM_TYPE_AUDIO_MPEG1 0x03
#define STREAM_TYPE_AUDIO_MPEG2 0x04
#define STREAM_TYPE_PRIVATE_SECTION 0x05
#define STREAM_TYPE_PRIVATE_DATA 0x06
#define STREAM_TYPE_AUDIO_AAC 0x0f
#define STREAM_TYPE_VIDEO_MPEG4 0x10
#define STREAM_TYPE_VIDEO_H264 0x1b
#define STREAM_TYPE_VIDEO_CAVS 0x42
 
#define STREAM_TYPE_AUDIO_AC3 0x81
#define STREAM_TYPE_AUDIO_DTS 0x8a
 
static const int lpcm_freq_tab[4] = { 48000, 96000, 44100, 32000 };
 
/**
* Parse MPEG-PES five-byte timestamp
*/
static inline int64_t ff_parse_pes_pts(const uint8_t *buf) {
return (int64_t)(*buf & 0x0e) << 29 |
(AV_RB16(buf+1) >> 1) << 15 |
AV_RB16(buf+3) >> 1;
}
 
#endif /* AVFORMAT_MPEG_H */
/contrib/sdk/sources/ffmpeg/libavformat/mpegenc.c
0,0 → 1,1252
/*
* MPEG1/2 muxer
* Copyright (c) 2000, 2001, 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/attributes.h"
#include "libavutil/fifo.h"
#include "libavutil/log.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavcodec/put_bits.h"
#include "avformat.h"
#include "internal.h"
#include "mpeg.h"
 
#define MAX_PAYLOAD_SIZE 4096
 
#undef NDEBUG
#include <assert.h>
 
typedef struct PacketDesc {
int64_t pts;
int64_t dts;
int size;
int unwritten_size;
int flags;
struct PacketDesc *next;
} PacketDesc;
 
typedef struct {
AVFifoBuffer *fifo;
uint8_t id;
int max_buffer_size; /* in bytes */
int buffer_index;
PacketDesc *predecode_packet;
PacketDesc *premux_packet;
PacketDesc **next_packet;
int packet_number;
uint8_t lpcm_header[3];
int lpcm_align;
int bytes_to_iframe;
int align_iframe;
int64_t vobu_start_pts;
} StreamInfo;
 
typedef struct {
const AVClass *class;
int packet_size; /* required packet size */
int packet_number;
int pack_header_freq; /* frequency (in packets^-1) at which we send pack headers */
int system_header_freq;
int system_header_size;
int user_mux_rate; /* bitrate in units of bits/s */
int mux_rate; /* bitrate in units of 50 bytes/s */
/* stream info */
int audio_bound;
int video_bound;
int is_mpeg2;
int is_vcd;
int is_svcd;
int is_dvd;
int64_t last_scr; /* current system clock */
 
double vcd_padding_bitrate; //FIXME floats
int64_t vcd_padding_bytes_written;
 
int preload;
} MpegMuxContext;
 
extern AVOutputFormat ff_mpeg1vcd_muxer;
extern AVOutputFormat ff_mpeg2dvd_muxer;
extern AVOutputFormat ff_mpeg2svcd_muxer;
extern AVOutputFormat ff_mpeg2vob_muxer;
 
static int put_pack_header(AVFormatContext *ctx,
uint8_t *buf, int64_t timestamp)
{
MpegMuxContext *s = ctx->priv_data;
PutBitContext pb;
 
init_put_bits(&pb, buf, 128);
 
put_bits32(&pb, PACK_START_CODE);
if (s->is_mpeg2) {
put_bits(&pb, 2, 0x1);
} else {
put_bits(&pb, 4, 0x2);
}
put_bits(&pb, 3, (uint32_t)((timestamp >> 30) & 0x07));
put_bits(&pb, 1, 1);
put_bits(&pb, 15, (uint32_t)((timestamp >> 15) & 0x7fff));
put_bits(&pb, 1, 1);
put_bits(&pb, 15, (uint32_t)((timestamp ) & 0x7fff));
put_bits(&pb, 1, 1);
if (s->is_mpeg2) {
/* clock extension */
put_bits(&pb, 9, 0);
}
put_bits(&pb, 1, 1);
put_bits(&pb, 22, s->mux_rate);
put_bits(&pb, 1, 1);
if (s->is_mpeg2) {
put_bits(&pb, 1, 1);
put_bits(&pb, 5, 0x1f); /* reserved */
put_bits(&pb, 3, 0); /* stuffing length */
}
flush_put_bits(&pb);
return put_bits_ptr(&pb) - pb.buf;
}
 
static int put_system_header(AVFormatContext *ctx, uint8_t *buf,int only_for_stream_id)
{
MpegMuxContext *s = ctx->priv_data;
int size, i, private_stream_coded, id;
PutBitContext pb;
 
init_put_bits(&pb, buf, 128);
 
put_bits32(&pb, SYSTEM_HEADER_START_CODE);
put_bits(&pb, 16, 0);
put_bits(&pb, 1, 1);
 
put_bits(&pb, 22, s->mux_rate); /* maximum bit rate of the multiplexed stream */
put_bits(&pb, 1, 1); /* marker */
if (s->is_vcd && only_for_stream_id==VIDEO_ID) {
/* This header applies only to the video stream (see VCD standard p. IV-7)*/
put_bits(&pb, 6, 0);
} else
put_bits(&pb, 6, s->audio_bound);
 
if (s->is_vcd) {
/* see VCD standard, p. IV-7*/
put_bits(&pb, 1, 0);
put_bits(&pb, 1, 1);
} else {
put_bits(&pb, 1, 0); /* variable bitrate*/
put_bits(&pb, 1, 0); /* non constrainted bit stream */
}
 
if (s->is_vcd || s->is_dvd) {
/* see VCD standard p IV-7 */
put_bits(&pb, 1, 1); /* audio locked */
put_bits(&pb, 1, 1); /* video locked */
} else {
put_bits(&pb, 1, 0); /* audio locked */
put_bits(&pb, 1, 0); /* video locked */
}
 
put_bits(&pb, 1, 1); /* marker */
 
if (s->is_vcd && (only_for_stream_id & 0xe0) == AUDIO_ID) {
/* This header applies only to the audio stream (see VCD standard p. IV-7)*/
put_bits(&pb, 5, 0);
} else
put_bits(&pb, 5, s->video_bound);
 
if (s->is_dvd) {
put_bits(&pb, 1, 0); /* packet_rate_restriction_flag */
put_bits(&pb, 7, 0x7f); /* reserved byte */
} else
put_bits(&pb, 8, 0xff); /* reserved byte */
 
/* DVD-Video Stream_bound entries
id (0xB9) video, maximum P-STD for stream 0xE0. (P-STD_buffer_bound_scale = 1)
id (0xB8) audio, maximum P-STD for any MPEG audio (0xC0 to 0xC7) streams. If there are none set to 4096 (32x128). (P-STD_buffer_bound_scale = 0)
id (0xBD) private stream 1 (audio other than MPEG and subpictures). (P-STD_buffer_bound_scale = 1)
id (0xBF) private stream 2, NAV packs, set to 2x1024. */
if (s->is_dvd) {
 
int P_STD_max_video = 0;
int P_STD_max_mpeg_audio = 0;
int P_STD_max_mpeg_PS1 = 0;
 
for(i=0;i<ctx->nb_streams;i++) {
StreamInfo *stream = ctx->streams[i]->priv_data;
 
id = stream->id;
if (id == 0xbd && stream->max_buffer_size > P_STD_max_mpeg_PS1) {
P_STD_max_mpeg_PS1 = stream->max_buffer_size;
} else if (id >= 0xc0 && id <= 0xc7 && stream->max_buffer_size > P_STD_max_mpeg_audio) {
P_STD_max_mpeg_audio = stream->max_buffer_size;
} else if (id == 0xe0 && stream->max_buffer_size > P_STD_max_video) {
P_STD_max_video = stream->max_buffer_size;
}
}
 
/* video */
put_bits(&pb, 8, 0xb9); /* stream ID */
put_bits(&pb, 2, 3);
put_bits(&pb, 1, 1);
put_bits(&pb, 13, P_STD_max_video / 1024);
 
/* audio */
if (P_STD_max_mpeg_audio == 0)
P_STD_max_mpeg_audio = 4096;
put_bits(&pb, 8, 0xb8); /* stream ID */
put_bits(&pb, 2, 3);
put_bits(&pb, 1, 0);
put_bits(&pb, 13, P_STD_max_mpeg_audio / 128);
 
/* private stream 1 */
put_bits(&pb, 8, 0xbd); /* stream ID */
put_bits(&pb, 2, 3);
put_bits(&pb, 1, 0);
put_bits(&pb, 13, P_STD_max_mpeg_PS1 / 128);
 
/* private stream 2 */
put_bits(&pb, 8, 0xbf); /* stream ID */
put_bits(&pb, 2, 3);
put_bits(&pb, 1, 1);
put_bits(&pb, 13, 2);
}
else {
/* audio stream info */
private_stream_coded = 0;
for(i=0;i<ctx->nb_streams;i++) {
StreamInfo *stream = ctx->streams[i]->priv_data;
 
 
/* For VCDs, only include the stream info for the stream
that the pack which contains this system belongs to.
(see VCD standard p. IV-7) */
if ( !s->is_vcd || stream->id==only_for_stream_id
|| only_for_stream_id==0) {
 
id = stream->id;
if (id < 0xc0) {
/* special case for private streams (AC-3 uses that) */
if (private_stream_coded)
continue;
private_stream_coded = 1;
id = 0xbd;
}
put_bits(&pb, 8, id); /* stream ID */
put_bits(&pb, 2, 3);
if (id < 0xe0) {
/* audio */
put_bits(&pb, 1, 0);
put_bits(&pb, 13, stream->max_buffer_size / 128);
} else {
/* video */
put_bits(&pb, 1, 1);
put_bits(&pb, 13, stream->max_buffer_size / 1024);
}
}
}
}
 
flush_put_bits(&pb);
size = put_bits_ptr(&pb) - pb.buf;
/* patch packet size */
AV_WB16(buf + 4, size - 6);
 
return size;
}
 
static int get_system_header_size(AVFormatContext *ctx)
{
int buf_index, i, private_stream_coded;
StreamInfo *stream;
MpegMuxContext *s = ctx->priv_data;
 
if (s->is_dvd)
return 18; // DVD-Video system headers are 18 bytes fixed length.
 
buf_index = 12;
private_stream_coded = 0;
for(i=0;i<ctx->nb_streams;i++) {
stream = ctx->streams[i]->priv_data;
if (stream->id < 0xc0) {
if (private_stream_coded)
continue;
private_stream_coded = 1;
}
buf_index += 3;
}
return buf_index;
}
 
static av_cold int mpeg_mux_init(AVFormatContext *ctx)
{
MpegMuxContext *s = ctx->priv_data;
int bitrate, i, mpa_id, mpv_id, mps_id, ac3_id, dts_id, lpcm_id, j;
AVStream *st;
StreamInfo *stream;
int audio_bitrate;
int video_bitrate;
 
s->packet_number = 0;
s->is_vcd = (CONFIG_MPEG1VCD_MUXER && ctx->oformat == &ff_mpeg1vcd_muxer);
s->is_svcd = (CONFIG_MPEG2SVCD_MUXER && ctx->oformat == &ff_mpeg2svcd_muxer);
s->is_mpeg2 = ((CONFIG_MPEG2VOB_MUXER && ctx->oformat == &ff_mpeg2vob_muxer) ||
(CONFIG_MPEG2DVD_MUXER && ctx->oformat == &ff_mpeg2dvd_muxer) ||
(CONFIG_MPEG2SVCD_MUXER && ctx->oformat == &ff_mpeg2svcd_muxer));
s->is_dvd = (CONFIG_MPEG2DVD_MUXER && ctx->oformat == &ff_mpeg2dvd_muxer);
 
if(ctx->packet_size) {
if (ctx->packet_size < 20 || ctx->packet_size > (1 << 23) + 10) {
av_log(ctx, AV_LOG_ERROR, "Invalid packet size %d\n",
ctx->packet_size);
goto fail;
}
s->packet_size = ctx->packet_size;
} else
s->packet_size = 2048;
if (ctx->max_delay < 0) /* Not set by the caller */
ctx->max_delay = 0.7*AV_TIME_BASE;
 
s->vcd_padding_bytes_written = 0;
s->vcd_padding_bitrate=0;
 
s->audio_bound = 0;
s->video_bound = 0;
mpa_id = AUDIO_ID;
ac3_id = AC3_ID;
dts_id = DTS_ID;
mpv_id = VIDEO_ID;
mps_id = SUB_ID;
lpcm_id = LPCM_ID;
for(i=0;i<ctx->nb_streams;i++) {
st = ctx->streams[i];
stream = av_mallocz(sizeof(StreamInfo));
if (!stream)
goto fail;
st->priv_data = stream;
 
avpriv_set_pts_info(st, 64, 1, 90000);
 
switch(st->codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
if (st->codec->codec_id == AV_CODEC_ID_AC3) {
stream->id = ac3_id++;
} else if (st->codec->codec_id == AV_CODEC_ID_DTS) {
stream->id = dts_id++;
} else if (st->codec->codec_id == AV_CODEC_ID_PCM_S16BE) {
stream->id = lpcm_id++;
for(j = 0; j < 4; j++) {
if (lpcm_freq_tab[j] == st->codec->sample_rate)
break;
}
if (j == 4)
goto fail;
if (st->codec->channels > 8)
return -1;
stream->lpcm_header[0] = 0x0c;
stream->lpcm_header[1] = (st->codec->channels - 1) | (j << 4);
stream->lpcm_header[2] = 0x80;
stream->lpcm_align = st->codec->channels * 2;
} else {
stream->id = mpa_id++;
}
 
/* This value HAS to be used for VCD (see VCD standard, p. IV-7).
Right now it is also used for everything else.*/
stream->max_buffer_size = 4 * 1024;
s->audio_bound++;
break;
case AVMEDIA_TYPE_VIDEO:
stream->id = mpv_id++;
if (st->codec->rc_buffer_size)
stream->max_buffer_size = 6*1024 + st->codec->rc_buffer_size/8;
else {
av_log(ctx, AV_LOG_WARNING, "VBV buffer size not set, muxing may fail\n");
stream->max_buffer_size = 230*1024; //FIXME this is probably too small as default
}
if (stream->max_buffer_size > 1024 * 8191) {
av_log(ctx, AV_LOG_WARNING, "buffer size %d, too large\n", stream->max_buffer_size);
stream->max_buffer_size = 1024 * 8191;
}
s->video_bound++;
break;
case AVMEDIA_TYPE_SUBTITLE:
stream->id = mps_id++;
stream->max_buffer_size = 16 * 1024;
break;
default:
return -1;
}
stream->fifo= av_fifo_alloc(16);
if (!stream->fifo)
goto fail;
}
bitrate = 0;
audio_bitrate = 0;
video_bitrate = 0;
for(i=0;i<ctx->nb_streams;i++) {
int codec_rate;
st = ctx->streams[i];
stream = (StreamInfo*) st->priv_data;
 
if(st->codec->rc_max_rate || stream->id==VIDEO_ID)
codec_rate= st->codec->rc_max_rate;
else
codec_rate= st->codec->bit_rate;
 
if(!codec_rate)
codec_rate= (1<<21)*8*50/ctx->nb_streams;
 
bitrate += codec_rate;
 
if ((stream->id & 0xe0) == AUDIO_ID)
audio_bitrate += codec_rate;
else if (stream->id==VIDEO_ID)
video_bitrate += codec_rate;
}
 
if (s->user_mux_rate) {
s->mux_rate = (s->user_mux_rate + (8 * 50) - 1) / (8 * 50);
} else {
/* we increase slightly the bitrate to take into account the
headers. XXX: compute it exactly */
bitrate += bitrate / 20;
bitrate += 10000;
s->mux_rate = (bitrate + (8 * 50) - 1) / (8 * 50);
if (s->mux_rate >= (1<<22)) {
av_log(ctx, AV_LOG_WARNING, "mux rate %d is too large\n", s->mux_rate);
s->mux_rate = (1<<22) - 1;
}
}
 
if (s->is_vcd) {
double overhead_rate;
 
/* The VCD standard mandates that the mux_rate field is 3528
(see standard p. IV-6).
The value is actually "wrong", i.e. if you calculate
it using the normal formula and the 75 sectors per second transfer
rate you get a different value because the real pack size is 2324,
not 2352. But the standard explicitly specifies that the mux_rate
field in the header must have this value.*/
// s->mux_rate=2352 * 75 / 50; /* = 3528*/
 
/* The VCD standard states that the muxed stream must be
exactly 75 packs / second (the data rate of a single speed cdrom).
Since the video bitrate (probably 1150000 bits/sec) will be below
the theoretical maximum we have to add some padding packets
to make up for the lower data rate.
(cf. VCD standard p. IV-6 )*/
 
/* Add the header overhead to the data rate.
2279 data bytes per audio pack, 2294 data bytes per video pack*/
overhead_rate = ((audio_bitrate / 8.0) / 2279) * (2324 - 2279);
overhead_rate += ((video_bitrate / 8.0) / 2294) * (2324 - 2294);
overhead_rate *= 8;
 
/* Add padding so that the full bitrate is 2324*75 bytes/sec */
s->vcd_padding_bitrate = 2324 * 75 * 8 - (bitrate + overhead_rate);
}
 
if (s->is_vcd || s->is_mpeg2)
/* every packet */
s->pack_header_freq = 1;
else
/* every 2 seconds */
s->pack_header_freq = 2 * bitrate / s->packet_size / 8;
 
/* the above seems to make pack_header_freq zero sometimes */
if (s->pack_header_freq == 0)
s->pack_header_freq = 1;
 
if (s->is_mpeg2)
/* every 200 packets. Need to look at the spec. */
s->system_header_freq = s->pack_header_freq * 40;
else if (s->is_vcd)
/* the standard mandates that there are only two system headers
in the whole file: one in the first packet of each stream.
(see standard p. IV-7 and IV-8) */
s->system_header_freq = 0x7fffffff;
else
s->system_header_freq = s->pack_header_freq * 5;
 
for(i=0;i<ctx->nb_streams;i++) {
stream = ctx->streams[i]->priv_data;
stream->packet_number = 0;
}
s->system_header_size = get_system_header_size(ctx);
s->last_scr = AV_NOPTS_VALUE;
return 0;
fail:
for(i=0;i<ctx->nb_streams;i++) {
av_free(ctx->streams[i]->priv_data);
}
return AVERROR(ENOMEM);
}
 
static inline void put_timestamp(AVIOContext *pb, int id, int64_t timestamp)
{
avio_w8(pb,
(id << 4) |
(((timestamp >> 30) & 0x07) << 1) |
1);
avio_wb16(pb, (uint16_t)((((timestamp >> 15) & 0x7fff) << 1) | 1));
avio_wb16(pb, (uint16_t)((((timestamp ) & 0x7fff) << 1) | 1));
}
 
 
/* return the number of padding bytes that should be inserted into
the multiplexed stream.*/
static int get_vcd_padding_size(AVFormatContext *ctx, int64_t pts)
{
MpegMuxContext *s = ctx->priv_data;
int pad_bytes = 0;
 
if (s->vcd_padding_bitrate > 0 && pts!=AV_NOPTS_VALUE)
{
int64_t full_pad_bytes;
 
full_pad_bytes = (int64_t)((s->vcd_padding_bitrate * (pts / 90000.0)) / 8.0); //FIXME this is wrong
pad_bytes = (int) (full_pad_bytes - s->vcd_padding_bytes_written);
 
if (pad_bytes<0)
/* might happen if we have already padded to a later timestamp. This
can occur if another stream has already advanced further.*/
pad_bytes=0;
}
 
return pad_bytes;
}
 
 
/* Write an MPEG padding packet header. */
static void put_padding_packet(AVFormatContext *ctx, AVIOContext *pb,int packet_bytes)
{
MpegMuxContext *s = ctx->priv_data;
int i;
 
avio_wb32(pb, PADDING_STREAM);
avio_wb16(pb, packet_bytes - 6);
if (!s->is_mpeg2) {
avio_w8(pb, 0x0f);
packet_bytes -= 7;
} else
packet_bytes -= 6;
 
for(i=0;i<packet_bytes;i++)
avio_w8(pb, 0xff);
}
 
static int get_nb_frames(AVFormatContext *ctx, StreamInfo *stream, int len){
int nb_frames=0;
PacketDesc *pkt_desc= stream->premux_packet;
 
while(len>0){
if(pkt_desc->size == pkt_desc->unwritten_size)
nb_frames++;
len -= pkt_desc->unwritten_size;
pkt_desc= pkt_desc->next;
}
 
return nb_frames;
}
 
/* flush the packet on stream stream_index */
static int flush_packet(AVFormatContext *ctx, int stream_index,
int64_t pts, int64_t dts, int64_t scr, int trailer_size)
{
MpegMuxContext *s = ctx->priv_data;
StreamInfo *stream = ctx->streams[stream_index]->priv_data;
uint8_t *buf_ptr;
int size, payload_size, startcode, id, stuffing_size, i, header_len;
int packet_size;
uint8_t buffer[128];
int zero_trail_bytes = 0;
int pad_packet_bytes = 0;
int pes_flags;
int general_pack = 0; /*"general" pack without data specific to one stream?*/
int nb_frames;
 
id = stream->id;
 
av_dlog(ctx, "packet ID=%2x PTS=%0.3f\n", id, pts / 90000.0);
 
buf_ptr = buffer;
 
if ((s->packet_number % s->pack_header_freq) == 0 || s->last_scr != scr) {
/* output pack and systems header if needed */
size = put_pack_header(ctx, buf_ptr, scr);
buf_ptr += size;
s->last_scr= scr;
 
if (s->is_vcd) {
/* there is exactly one system header for each stream in a VCD MPEG,
One in the very first video packet and one in the very first
audio packet (see VCD standard p. IV-7 and IV-8).*/
 
if (stream->packet_number==0) {
size = put_system_header(ctx, buf_ptr, id);
buf_ptr += size;
}
} else if (s->is_dvd) {
if (stream->align_iframe || s->packet_number == 0){
int PES_bytes_to_fill = s->packet_size - size - 10;
 
if (pts != AV_NOPTS_VALUE) {
if (dts != pts)
PES_bytes_to_fill -= 5 + 5;
else
PES_bytes_to_fill -= 5;
}
 
if (stream->bytes_to_iframe == 0 || s->packet_number == 0) {
size = put_system_header(ctx, buf_ptr, 0);
buf_ptr += size;
size = buf_ptr - buffer;
avio_write(ctx->pb, buffer, size);
 
avio_wb32(ctx->pb, PRIVATE_STREAM_2);
avio_wb16(ctx->pb, 0x03d4); // length
avio_w8(ctx->pb, 0x00); // substream ID, 00=PCI
for (i = 0; i < 979; i++)
avio_w8(ctx->pb, 0x00);
 
avio_wb32(ctx->pb, PRIVATE_STREAM_2);
avio_wb16(ctx->pb, 0x03fa); // length
avio_w8(ctx->pb, 0x01); // substream ID, 01=DSI
for (i = 0; i < 1017; i++)
avio_w8(ctx->pb, 0x00);
 
memset(buffer, 0, 128);
buf_ptr = buffer;
s->packet_number++;
stream->align_iframe = 0;
scr += s->packet_size*90000LL / (s->mux_rate*50LL); //FIXME rounding and first few bytes of each packet
size = put_pack_header(ctx, buf_ptr, scr);
s->last_scr= scr;
buf_ptr += size;
/* GOP Start */
} else if (stream->bytes_to_iframe < PES_bytes_to_fill) {
pad_packet_bytes = PES_bytes_to_fill - stream->bytes_to_iframe;
}
}
} else {
if ((s->packet_number % s->system_header_freq) == 0) {
size = put_system_header(ctx, buf_ptr, 0);
buf_ptr += size;
}
}
}
size = buf_ptr - buffer;
avio_write(ctx->pb, buffer, size);
 
packet_size = s->packet_size - size;
 
if (s->is_vcd && (id & 0xe0) == AUDIO_ID)
/* The VCD standard demands that 20 zero bytes follow
each audio pack (see standard p. IV-8).*/
zero_trail_bytes += 20;
 
if ((s->is_vcd && stream->packet_number==0)
|| (s->is_svcd && s->packet_number==0)) {
/* for VCD the first pack of each stream contains only the pack header,
the system header and lots of padding (see VCD standard p. IV-6).
In the case of an audio pack, 20 zero bytes are also added at
the end.*/
/* For SVCD we fill the very first pack to increase compatibility with
some DVD players. Not mandated by the standard.*/
if (s->is_svcd)
general_pack = 1; /* the system header refers to both streams and no stream data*/
pad_packet_bytes = packet_size - zero_trail_bytes;
}
 
packet_size -= pad_packet_bytes + zero_trail_bytes;
 
if (packet_size > 0) {
 
/* packet header size */
packet_size -= 6;
 
/* packet header */
if (s->is_mpeg2) {
header_len = 3;
if (stream->packet_number==0)
header_len += 3; /* PES extension */
header_len += 1; /* obligatory stuffing byte */
} else {
header_len = 0;
}
if (pts != AV_NOPTS_VALUE) {
if (dts != pts)
header_len += 5 + 5;
else
header_len += 5;
} else {
if (!s->is_mpeg2)
header_len++;
}
 
payload_size = packet_size - header_len;
if (id < 0xc0) {
startcode = PRIVATE_STREAM_1;
payload_size -= 1;
if (id >= 0x40) {
payload_size -= 3;
if (id >= 0xa0)
payload_size -= 3;
}
} else {
startcode = 0x100 + id;
}
 
stuffing_size = payload_size - av_fifo_size(stream->fifo);
 
// first byte does not fit -> reset pts/dts + stuffing
if(payload_size <= trailer_size && pts != AV_NOPTS_VALUE){
int timestamp_len=0;
if(dts != pts)
timestamp_len += 5;
if(pts != AV_NOPTS_VALUE)
timestamp_len += s->is_mpeg2 ? 5 : 4;
pts=dts= AV_NOPTS_VALUE;
header_len -= timestamp_len;
if (s->is_dvd && stream->align_iframe) {
pad_packet_bytes += timestamp_len;
packet_size -= timestamp_len;
} else {
payload_size += timestamp_len;
}
stuffing_size += timestamp_len;
if(payload_size > trailer_size)
stuffing_size += payload_size - trailer_size;
}
 
if (pad_packet_bytes > 0 && pad_packet_bytes <= 7) { // can't use padding, so use stuffing
packet_size += pad_packet_bytes;
payload_size += pad_packet_bytes; // undo the previous adjustment
if (stuffing_size < 0) {
stuffing_size = pad_packet_bytes;
} else {
stuffing_size += pad_packet_bytes;
}
pad_packet_bytes = 0;
}
 
if (stuffing_size < 0)
stuffing_size = 0;
 
if (startcode == PRIVATE_STREAM_1 && id >= 0xa0) {
if (payload_size < av_fifo_size(stream->fifo))
stuffing_size += payload_size % stream->lpcm_align;
}
 
if (stuffing_size > 16) { /*<=16 for MPEG-1, <=32 for MPEG-2*/
pad_packet_bytes += stuffing_size;
packet_size -= stuffing_size;
payload_size -= stuffing_size;
stuffing_size = 0;
}
 
nb_frames= get_nb_frames(ctx, stream, payload_size - stuffing_size);
 
avio_wb32(ctx->pb, startcode);
 
avio_wb16(ctx->pb, packet_size);
 
if (!s->is_mpeg2)
for(i=0;i<stuffing_size;i++)
avio_w8(ctx->pb, 0xff);
 
if (s->is_mpeg2) {
avio_w8(ctx->pb, 0x80); /* mpeg2 id */
 
pes_flags=0;
 
if (pts != AV_NOPTS_VALUE) {
pes_flags |= 0x80;
if (dts != pts)
pes_flags |= 0x40;
}
 
/* Both the MPEG-2 and the SVCD standards demand that the
P-STD_buffer_size field be included in the first packet of
every stream. (see SVCD standard p. 26 V.2.3.1 and V.2.3.2
and MPEG-2 standard 2.7.7) */
if (stream->packet_number == 0)
pes_flags |= 0x01;
 
avio_w8(ctx->pb, pes_flags); /* flags */
avio_w8(ctx->pb, header_len - 3 + stuffing_size);
 
if (pes_flags & 0x80) /*write pts*/
put_timestamp(ctx->pb, (pes_flags & 0x40) ? 0x03 : 0x02, pts);
if (pes_flags & 0x40) /*write dts*/
put_timestamp(ctx->pb, 0x01, dts);
 
if (pes_flags & 0x01) { /*write pes extension*/
avio_w8(ctx->pb, 0x10); /* flags */
 
/* P-STD buffer info */
if ((id & 0xe0) == AUDIO_ID)
avio_wb16(ctx->pb, 0x4000 | stream->max_buffer_size/ 128);
else
avio_wb16(ctx->pb, 0x6000 | stream->max_buffer_size/1024);
}
 
} else {
if (pts != AV_NOPTS_VALUE) {
if (dts != pts) {
put_timestamp(ctx->pb, 0x03, pts);
put_timestamp(ctx->pb, 0x01, dts);
} else {
put_timestamp(ctx->pb, 0x02, pts);
}
} else {
avio_w8(ctx->pb, 0x0f);
}
}
 
if (s->is_mpeg2) {
/* special stuffing byte that is always written
to prevent accidental generation of start codes. */
avio_w8(ctx->pb, 0xff);
 
for(i=0;i<stuffing_size;i++)
avio_w8(ctx->pb, 0xff);
}
 
if (startcode == PRIVATE_STREAM_1) {
avio_w8(ctx->pb, id);
if (id >= 0xa0) {
/* LPCM (XXX: check nb_frames) */
avio_w8(ctx->pb, 7);
avio_wb16(ctx->pb, 4); /* skip 3 header bytes */
avio_w8(ctx->pb, stream->lpcm_header[0]);
avio_w8(ctx->pb, stream->lpcm_header[1]);
avio_w8(ctx->pb, stream->lpcm_header[2]);
} else if (id >= 0x40) {
/* AC-3 */
avio_w8(ctx->pb, nb_frames);
avio_wb16(ctx->pb, trailer_size+1);
}
}
 
/* output data */
assert(payload_size - stuffing_size <= av_fifo_size(stream->fifo));
av_fifo_generic_read(stream->fifo, ctx->pb, payload_size - stuffing_size, (void*)avio_write);
stream->bytes_to_iframe -= payload_size - stuffing_size;
}else{
payload_size=
stuffing_size= 0;
}
 
if (pad_packet_bytes > 0)
put_padding_packet(ctx,ctx->pb, pad_packet_bytes);
 
for(i=0;i<zero_trail_bytes;i++)
avio_w8(ctx->pb, 0x00);
 
avio_flush(ctx->pb);
 
s->packet_number++;
 
/* only increase the stream packet number if this pack actually contains
something that is specific to this stream! I.e. a dedicated header
or some data.*/
if (!general_pack)
stream->packet_number++;
 
return payload_size - stuffing_size;
}
 
static void put_vcd_padding_sector(AVFormatContext *ctx)
{
/* There are two ways to do this padding: writing a sector/pack
of 0 values, or writing an MPEG padding pack. Both seem to
work with most decoders, BUT the VCD standard only allows a 0-sector
(see standard p. IV-4, IV-5).
So a 0-sector it is...*/
 
MpegMuxContext *s = ctx->priv_data;
int i;
 
for(i=0;i<s->packet_size;i++)
avio_w8(ctx->pb, 0);
 
s->vcd_padding_bytes_written += s->packet_size;
 
avio_flush(ctx->pb);
 
/* increasing the packet number is correct. The SCR of the following packs
is calculated from the packet_number and it has to include the padding
sector (it represents the sector index, not the MPEG pack index)
(see VCD standard p. IV-6)*/
s->packet_number++;
}
 
static int remove_decoded_packets(AVFormatContext *ctx, int64_t scr){
// MpegMuxContext *s = ctx->priv_data;
int i;
 
for(i=0; i<ctx->nb_streams; i++){
AVStream *st = ctx->streams[i];
StreamInfo *stream = st->priv_data;
PacketDesc *pkt_desc;
 
while((pkt_desc= stream->predecode_packet)
&& scr > pkt_desc->dts){ //FIXME > vs >=
if(stream->buffer_index < pkt_desc->size ||
stream->predecode_packet == stream->premux_packet){
av_log(ctx, AV_LOG_ERROR,
"buffer underflow st=%d bufi=%d size=%d\n",
i, stream->buffer_index, pkt_desc->size);
break;
}
stream->buffer_index -= pkt_desc->size;
 
stream->predecode_packet= pkt_desc->next;
av_freep(&pkt_desc);
}
}
 
return 0;
}
 
static int output_packet(AVFormatContext *ctx, int flush){
MpegMuxContext *s = ctx->priv_data;
AVStream *st;
StreamInfo *stream;
int i, avail_space=0, es_size, trailer_size;
int best_i= -1;
int best_score= INT_MIN;
int ignore_constraints=0;
int64_t scr= s->last_scr;
PacketDesc *timestamp_packet;
const int64_t max_delay= av_rescale(ctx->max_delay, 90000, AV_TIME_BASE);
 
retry:
for(i=0; i<ctx->nb_streams; i++){
AVStream *st = ctx->streams[i];
StreamInfo *stream = st->priv_data;
const int avail_data= av_fifo_size(stream->fifo);
const int space= stream->max_buffer_size - stream->buffer_index;
int rel_space= 1024LL*space / stream->max_buffer_size;
PacketDesc *next_pkt= stream->premux_packet;
 
/* for subtitle, a single PES packet must be generated,
so we flush after every single subtitle packet */
if(s->packet_size > avail_data && !flush
&& st->codec->codec_type != AVMEDIA_TYPE_SUBTITLE)
return 0;
if(avail_data==0)
continue;
av_assert0(avail_data>0);
 
if(space < s->packet_size && !ignore_constraints)
continue;
 
if(next_pkt && next_pkt->dts - scr > max_delay)
continue;
if ( stream->predecode_packet
&& stream->predecode_packet->size > stream->buffer_index)
rel_space += 1<<28;
if(rel_space > best_score){
best_score= rel_space;
best_i = i;
avail_space= space;
}
}
 
if(best_i < 0){
int64_t best_dts= INT64_MAX;
 
for(i=0; i<ctx->nb_streams; i++){
AVStream *st = ctx->streams[i];
StreamInfo *stream = st->priv_data;
PacketDesc *pkt_desc= stream->predecode_packet;
if(pkt_desc && pkt_desc->dts < best_dts)
best_dts= pkt_desc->dts;
}
 
av_dlog(ctx, "bumping scr, scr:%f, dts:%f\n",
scr / 90000.0, best_dts / 90000.0);
if(best_dts == INT64_MAX)
return 0;
 
if(scr >= best_dts+1 && !ignore_constraints){
av_log(ctx, AV_LOG_ERROR, "packet too large, ignoring buffer limits to mux it\n");
ignore_constraints= 1;
}
scr= FFMAX(best_dts+1, scr);
if(remove_decoded_packets(ctx, scr) < 0)
return -1;
goto retry;
}
 
assert(best_i >= 0);
 
st = ctx->streams[best_i];
stream = st->priv_data;
 
assert(av_fifo_size(stream->fifo) > 0);
 
assert(avail_space >= s->packet_size || ignore_constraints);
 
timestamp_packet= stream->premux_packet;
if(timestamp_packet->unwritten_size == timestamp_packet->size){
trailer_size= 0;
}else{
trailer_size= timestamp_packet->unwritten_size;
timestamp_packet= timestamp_packet->next;
}
 
if(timestamp_packet){
av_dlog(ctx, "dts:%f pts:%f scr:%f stream:%d\n",
timestamp_packet->dts / 90000.0,
timestamp_packet->pts / 90000.0,
scr / 90000.0, best_i);
es_size= flush_packet(ctx, best_i, timestamp_packet->pts, timestamp_packet->dts, scr, trailer_size);
}else{
assert(av_fifo_size(stream->fifo) == trailer_size);
es_size= flush_packet(ctx, best_i, AV_NOPTS_VALUE, AV_NOPTS_VALUE, scr, trailer_size);
}
 
if (s->is_vcd) {
/* Write one or more padding sectors, if necessary, to reach
the constant overall bitrate.*/
int vcd_pad_bytes;
 
while((vcd_pad_bytes = get_vcd_padding_size(ctx,stream->premux_packet->pts) ) >= s->packet_size){ //FIXME pts cannot be correct here
put_vcd_padding_sector(ctx);
s->last_scr += s->packet_size*90000LL / (s->mux_rate*50LL); //FIXME rounding and first few bytes of each packet
}
}
 
stream->buffer_index += es_size;
s->last_scr += s->packet_size*90000LL / (s->mux_rate*50LL); //FIXME rounding and first few bytes of each packet
 
while(stream->premux_packet && stream->premux_packet->unwritten_size <= es_size){
es_size -= stream->premux_packet->unwritten_size;
stream->premux_packet= stream->premux_packet->next;
}
if(es_size)
stream->premux_packet->unwritten_size -= es_size;
 
if(remove_decoded_packets(ctx, s->last_scr) < 0)
return -1;
 
return 1;
}
 
static int mpeg_mux_write_packet(AVFormatContext *ctx, AVPacket *pkt)
{
MpegMuxContext *s = ctx->priv_data;
int stream_index= pkt->stream_index;
int size= pkt->size;
uint8_t *buf= pkt->data;
AVStream *st = ctx->streams[stream_index];
StreamInfo *stream = st->priv_data;
int64_t pts, dts;
PacketDesc *pkt_desc;
int preload;
const int is_iframe = st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (pkt->flags & AV_PKT_FLAG_KEY);
 
preload = av_rescale(s->preload, 90000, AV_TIME_BASE);
 
pts= pkt->pts;
dts= pkt->dts;
 
if (s->last_scr == AV_NOPTS_VALUE) {
if (dts == AV_NOPTS_VALUE || (dts < preload && ctx->avoid_negative_ts) || s->is_dvd) {
if (dts != AV_NOPTS_VALUE)
s->preload += av_rescale(-dts, AV_TIME_BASE, 90000);
s->last_scr = 0;
} else {
s->last_scr = dts - preload;
s->preload = 0;
}
preload = av_rescale(s->preload, 90000, AV_TIME_BASE);
av_log(ctx, AV_LOG_DEBUG, "First SCR: %"PRId64" First DTS: %"PRId64"\n", s->last_scr, dts + preload);
}
 
if (dts != AV_NOPTS_VALUE) dts += preload;
if (pts != AV_NOPTS_VALUE) pts += preload;
 
av_dlog(ctx, "dts:%f pts:%f flags:%d stream:%d nopts:%d\n",
dts / 90000.0, pts / 90000.0, pkt->flags,
pkt->stream_index, pts != AV_NOPTS_VALUE);
if (!stream->premux_packet)
stream->next_packet = &stream->premux_packet;
*stream->next_packet=
pkt_desc= av_mallocz(sizeof(PacketDesc));
pkt_desc->pts= pts;
pkt_desc->dts= dts;
pkt_desc->unwritten_size=
pkt_desc->size= size;
if(!stream->predecode_packet)
stream->predecode_packet= pkt_desc;
stream->next_packet= &pkt_desc->next;
 
if (av_fifo_realloc2(stream->fifo, av_fifo_size(stream->fifo) + size) < 0)
return -1;
 
if (s->is_dvd){
if (is_iframe && (s->packet_number == 0 || (pts - stream->vobu_start_pts >= 36000))) { // min VOBU length 0.4 seconds (mpucoder)
stream->bytes_to_iframe = av_fifo_size(stream->fifo);
stream->align_iframe = 1;
stream->vobu_start_pts = pts;
}
}
 
av_fifo_generic_write(stream->fifo, buf, size, NULL);
 
for(;;){
int ret= output_packet(ctx, 0);
if(ret<=0)
return ret;
}
}
 
static int mpeg_mux_end(AVFormatContext *ctx)
{
// MpegMuxContext *s = ctx->priv_data;
StreamInfo *stream;
int i;
 
for(;;){
int ret= output_packet(ctx, 1);
if(ret<0)
return ret;
else if(ret==0)
break;
}
 
/* End header according to MPEG1 systems standard. We do not write
it as it is usually not needed by decoders and because it
complicates MPEG stream concatenation. */
//avio_wb32(ctx->pb, ISO_11172_END_CODE);
//avio_flush(ctx->pb);
 
for(i=0;i<ctx->nb_streams;i++) {
stream = ctx->streams[i]->priv_data;
 
assert(av_fifo_size(stream->fifo) == 0);
av_fifo_free(stream->fifo);
}
return 0;
}
 
#define OFFSET(x) offsetof(MpegMuxContext, x)
#define E AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
{ "muxrate", NULL, OFFSET(user_mux_rate), AV_OPT_TYPE_INT, {.i64 = 0}, 0, ((1<<22) - 1) * (8 * 50), E },
{ "preload", "Initial demux-decode delay in microseconds.", OFFSET(preload), AV_OPT_TYPE_INT, {.i64 = 500000}, 0, INT_MAX, E},
{ NULL },
};
 
#define MPEGENC_CLASS(flavor)\
static const AVClass flavor ## _class = {\
.class_name = #flavor " muxer",\
.item_name = av_default_item_name,\
.version = LIBAVUTIL_VERSION_INT,\
.option = options,\
};
 
#if CONFIG_MPEG1SYSTEM_MUXER
MPEGENC_CLASS(mpeg)
AVOutputFormat ff_mpeg1system_muxer = {
.name = "mpeg",
.long_name = NULL_IF_CONFIG_SMALL("MPEG-1 Systems / MPEG program stream"),
.mime_type = "video/mpeg",
.extensions = "mpg,mpeg",
.priv_data_size = sizeof(MpegMuxContext),
.audio_codec = AV_CODEC_ID_MP2,
.video_codec = AV_CODEC_ID_MPEG1VIDEO,
.write_header = mpeg_mux_init,
.write_packet = mpeg_mux_write_packet,
.write_trailer = mpeg_mux_end,
.priv_class = &mpeg_class,
};
#endif
#if CONFIG_MPEG1VCD_MUXER
MPEGENC_CLASS(vcd)
AVOutputFormat ff_mpeg1vcd_muxer = {
.name = "vcd",
.long_name = NULL_IF_CONFIG_SMALL("MPEG-1 Systems / MPEG program stream (VCD)"),
.mime_type = "video/mpeg",
.priv_data_size = sizeof(MpegMuxContext),
.audio_codec = AV_CODEC_ID_MP2,
.video_codec = AV_CODEC_ID_MPEG1VIDEO,
.write_header = mpeg_mux_init,
.write_packet = mpeg_mux_write_packet,
.write_trailer = mpeg_mux_end,
.priv_class = &vcd_class,
};
#endif
#if CONFIG_MPEG2VOB_MUXER
MPEGENC_CLASS(vob)
AVOutputFormat ff_mpeg2vob_muxer = {
.name = "vob",
.long_name = NULL_IF_CONFIG_SMALL("MPEG-2 PS (VOB)"),
.mime_type = "video/mpeg",
.extensions = "vob",
.priv_data_size = sizeof(MpegMuxContext),
.audio_codec = AV_CODEC_ID_MP2,
.video_codec = AV_CODEC_ID_MPEG2VIDEO,
.write_header = mpeg_mux_init,
.write_packet = mpeg_mux_write_packet,
.write_trailer = mpeg_mux_end,
.priv_class = &vob_class,
};
#endif
 
/* Same as mpeg2vob_mux except that the pack size is 2324 */
#if CONFIG_MPEG2SVCD_MUXER
MPEGENC_CLASS(svcd)
AVOutputFormat ff_mpeg2svcd_muxer = {
.name = "svcd",
.long_name = NULL_IF_CONFIG_SMALL("MPEG-2 PS (SVCD)"),
.mime_type = "video/mpeg",
.extensions = "vob",
.priv_data_size = sizeof(MpegMuxContext),
.audio_codec = AV_CODEC_ID_MP2,
.video_codec = AV_CODEC_ID_MPEG2VIDEO,
.write_header = mpeg_mux_init,
.write_packet = mpeg_mux_write_packet,
.write_trailer = mpeg_mux_end,
.priv_class = &svcd_class,
};
#endif
 
/* Same as mpeg2vob_mux except the 'is_dvd' flag is set to produce NAV pkts */
#if CONFIG_MPEG2DVD_MUXER
MPEGENC_CLASS(dvd)
AVOutputFormat ff_mpeg2dvd_muxer = {
.name = "dvd",
.long_name = NULL_IF_CONFIG_SMALL("MPEG-2 PS (DVD VOB)"),
.mime_type = "video/mpeg",
.extensions = "dvd",
.priv_data_size = sizeof(MpegMuxContext),
.audio_codec = AV_CODEC_ID_MP2,
.video_codec = AV_CODEC_ID_MPEG2VIDEO,
.write_header = mpeg_mux_init,
.write_packet = mpeg_mux_write_packet,
.write_trailer = mpeg_mux_end,
.priv_class = &dvd_class,
};
#endif
/contrib/sdk/sources/ffmpeg/libavformat/mpegts.c
0,0 → 1,2456
/*
* MPEG2 transport stream (aka DVB) demuxer
* Copyright (c) 2002-2003 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/buffer.h"
#include "libavutil/crc.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/log.h"
#include "libavutil/dict.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/avassert.h"
#include "libavcodec/bytestream.h"
#include "libavcodec/get_bits.h"
#include "libavcodec/mathops.h"
#include "avformat.h"
#include "mpegts.h"
#include "internal.h"
#include "avio_internal.h"
#include "seek.h"
#include "mpeg.h"
#include "isom.h"
 
/* maximum size in which we look for synchronisation if
synchronisation is lost */
#define MAX_RESYNC_SIZE 65536
 
#define MAX_PES_PAYLOAD 200*1024
 
#define MAX_MP4_DESCR_COUNT 16
 
enum MpegTSFilterType {
MPEGTS_PES,
MPEGTS_SECTION,
};
 
typedef struct MpegTSFilter MpegTSFilter;
 
typedef int PESCallback(MpegTSFilter *f, const uint8_t *buf, int len, int is_start, int64_t pos, int64_t cur_pcr);
 
typedef struct MpegTSPESFilter {
PESCallback *pes_cb;
void *opaque;
} MpegTSPESFilter;
 
typedef void SectionCallback(MpegTSFilter *f, const uint8_t *buf, int len);
 
typedef void SetServiceCallback(void *opaque, int ret);
 
typedef struct MpegTSSectionFilter {
int section_index;
int section_h_size;
uint8_t *section_buf;
unsigned int check_crc:1;
unsigned int end_of_section_reached:1;
SectionCallback *section_cb;
void *opaque;
} MpegTSSectionFilter;
 
struct MpegTSFilter {
int pid;
int es_id;
int last_cc; /* last cc code (-1 if first packet) */
enum MpegTSFilterType type;
union {
MpegTSPESFilter pes_filter;
MpegTSSectionFilter section_filter;
} u;
};
 
#define MAX_PIDS_PER_PROGRAM 64
struct Program {
unsigned int id; //program id/service id
unsigned int nb_pids;
unsigned int pids[MAX_PIDS_PER_PROGRAM];
};
 
struct MpegTSContext {
const AVClass *class;
/* user data */
AVFormatContext *stream;
/** raw packet size, including FEC if present */
int raw_packet_size;
 
int size_stat[3];
int size_stat_count;
#define SIZE_STAT_THRESHOLD 10
 
int64_t pos47_full;
 
/** if true, all pids are analyzed to find streams */
int auto_guess;
 
/** compute exact PCR for each transport stream packet */
int mpeg2ts_compute_pcr;
 
/** fix dvb teletext pts */
int fix_teletext_pts;
 
int64_t cur_pcr; /**< used to estimate the exact PCR */
int pcr_incr; /**< used to estimate the exact PCR */
 
/* data needed to handle file based ts */
/** stop parsing loop */
int stop_parse;
/** packet containing Audio/Video data */
AVPacket *pkt;
/** to detect seek */
int64_t last_pos;
 
/******************************************/
/* private mpegts data */
/* scan context */
/** structure to keep track of Program->pids mapping */
unsigned int nb_prg;
struct Program *prg;
 
int8_t crc_validity[NB_PID_MAX];
 
/** filters for various streams specified by PMT + for the PAT and PMT */
MpegTSFilter *pids[NB_PID_MAX];
int current_pid;
};
 
static const AVOption mpegtsraw_options[] = {
{"compute_pcr", "Compute exact PCR for each transport stream packet.", offsetof(MpegTSContext, mpeg2ts_compute_pcr), AV_OPT_TYPE_INT,
{.i64 = 0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
 
static const AVClass mpegtsraw_class = {
.class_name = "mpegtsraw demuxer",
.item_name = av_default_item_name,
.option = mpegtsraw_options,
.version = LIBAVUTIL_VERSION_INT,
};
 
static const AVOption mpegts_options[] = {
{"fix_teletext_pts", "Try to fix pts values of dvb teletext streams.", offsetof(MpegTSContext, fix_teletext_pts), AV_OPT_TYPE_INT,
{.i64 = 1}, 0, 1, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
 
static const AVClass mpegts_class = {
.class_name = "mpegts demuxer",
.item_name = av_default_item_name,
.option = mpegts_options,
.version = LIBAVUTIL_VERSION_INT,
};
 
/* TS stream handling */
 
enum MpegTSState {
MPEGTS_HEADER = 0,
MPEGTS_PESHEADER,
MPEGTS_PESHEADER_FILL,
MPEGTS_PAYLOAD,
MPEGTS_SKIP,
};
 
/* enough for PES header + length */
#define PES_START_SIZE 6
#define PES_HEADER_SIZE 9
#define MAX_PES_HEADER_SIZE (9 + 255)
 
typedef struct PESContext {
int pid;
int pcr_pid; /**< if -1 then all packets containing PCR are considered */
int stream_type;
MpegTSContext *ts;
AVFormatContext *stream;
AVStream *st;
AVStream *sub_st; /**< stream for the embedded AC3 stream in HDMV TrueHD */
enum MpegTSState state;
/* used to get the format */
int data_index;
int flags; /**< copied to the AVPacket flags */
int total_size;
int pes_header_size;
int extended_stream_id;
int64_t pts, dts;
int64_t ts_packet_pos; /**< position of first TS packet of this PES packet */
uint8_t header[MAX_PES_HEADER_SIZE];
AVBufferRef *buffer;
SLConfigDescr sl;
int64_t last_pcr;
} PESContext;
 
extern AVInputFormat ff_mpegts_demuxer;
 
static void clear_avprogram(MpegTSContext *ts, unsigned int programid)
{
AVProgram *prg = NULL;
int i;
for(i=0; i<ts->stream->nb_programs; i++)
if(ts->stream->programs[i]->id == programid){
prg = ts->stream->programs[i];
break;
}
if (!prg)
return;
prg->nb_stream_indexes = 0;
}
 
static void clear_program(MpegTSContext *ts, unsigned int programid)
{
int i;
 
clear_avprogram(ts, programid);
for(i=0; i<ts->nb_prg; i++)
if(ts->prg[i].id == programid)
ts->prg[i].nb_pids = 0;
}
 
static void clear_programs(MpegTSContext *ts)
{
av_freep(&ts->prg);
ts->nb_prg=0;
}
 
static void add_pat_entry(MpegTSContext *ts, unsigned int programid)
{
struct Program *p;
if (av_reallocp_array(&ts->prg, ts->nb_prg + 1, sizeof(*ts->prg)) < 0) {
ts->nb_prg = 0;
return;
}
p = &ts->prg[ts->nb_prg];
p->id = programid;
p->nb_pids = 0;
ts->nb_prg++;
}
 
static void add_pid_to_pmt(MpegTSContext *ts, unsigned int programid, unsigned int pid)
{
int i;
struct Program *p = NULL;
for(i=0; i<ts->nb_prg; i++) {
if(ts->prg[i].id == programid) {
p = &ts->prg[i];
break;
}
}
if(!p)
return;
 
if(p->nb_pids >= MAX_PIDS_PER_PROGRAM)
return;
p->pids[p->nb_pids++] = pid;
}
 
static void set_pcr_pid(AVFormatContext *s, unsigned int programid, unsigned int pid)
{
int i;
for(i=0; i<s->nb_programs; i++) {
if(s->programs[i]->id == programid) {
s->programs[i]->pcr_pid = pid;
break;
}
}
}
 
/**
* @brief discard_pid() decides if the pid is to be discarded according
* to caller's programs selection
* @param ts : - TS context
* @param pid : - pid
* @return 1 if the pid is only comprised in programs that have .discard=AVDISCARD_ALL
* 0 otherwise
*/
static int discard_pid(MpegTSContext *ts, unsigned int pid)
{
int i, j, k;
int used = 0, discarded = 0;
struct Program *p;
 
/* If none of the programs have .discard=AVDISCARD_ALL then there's
* no way we have to discard this packet
*/
for (k = 0; k < ts->stream->nb_programs; k++) {
if (ts->stream->programs[k]->discard == AVDISCARD_ALL)
break;
}
if (k == ts->stream->nb_programs)
return 0;
 
for(i=0; i<ts->nb_prg; i++) {
p = &ts->prg[i];
for(j=0; j<p->nb_pids; j++) {
if(p->pids[j] != pid)
continue;
//is program with id p->id set to be discarded?
for(k=0; k<ts->stream->nb_programs; k++) {
if(ts->stream->programs[k]->id == p->id) {
if(ts->stream->programs[k]->discard == AVDISCARD_ALL)
discarded++;
else
used++;
}
}
}
}
 
return !used && discarded;
}
 
/**
* Assemble PES packets out of TS packets, and then call the "section_cb"
* function when they are complete.
*/
static void write_section_data(AVFormatContext *s, MpegTSFilter *tss1,
const uint8_t *buf, int buf_size, int is_start)
{
MpegTSContext *ts = s->priv_data;
MpegTSSectionFilter *tss = &tss1->u.section_filter;
int len;
 
if (is_start) {
memcpy(tss->section_buf, buf, buf_size);
tss->section_index = buf_size;
tss->section_h_size = -1;
tss->end_of_section_reached = 0;
} else {
if (tss->end_of_section_reached)
return;
len = 4096 - tss->section_index;
if (buf_size < len)
len = buf_size;
memcpy(tss->section_buf + tss->section_index, buf, len);
tss->section_index += len;
}
 
/* compute section length if possible */
if (tss->section_h_size == -1 && tss->section_index >= 3) {
len = (AV_RB16(tss->section_buf + 1) & 0xfff) + 3;
if (len > 4096)
return;
tss->section_h_size = len;
}
 
if (tss->section_h_size != -1 && tss->section_index >= tss->section_h_size) {
int crc_valid = 1;
tss->end_of_section_reached = 1;
 
if (tss->check_crc){
crc_valid = !av_crc(av_crc_get_table(AV_CRC_32_IEEE), -1, tss->section_buf, tss->section_h_size);
if (crc_valid){
ts->crc_validity[ tss1->pid ] = 100;
}else if(ts->crc_validity[ tss1->pid ] > -10){
ts->crc_validity[ tss1->pid ]--;
}else
crc_valid = 2;
}
if (crc_valid)
tss->section_cb(tss1, tss->section_buf, tss->section_h_size);
}
}
 
static MpegTSFilter *mpegts_open_section_filter(MpegTSContext *ts, unsigned int pid,
SectionCallback *section_cb, void *opaque,
int check_crc)
 
{
MpegTSFilter *filter;
MpegTSSectionFilter *sec;
 
av_dlog(ts->stream, "Filter: pid=0x%x\n", pid);
 
if (pid >= NB_PID_MAX || ts->pids[pid])
return NULL;
filter = av_mallocz(sizeof(MpegTSFilter));
if (!filter)
return NULL;
ts->pids[pid] = filter;
filter->type = MPEGTS_SECTION;
filter->pid = pid;
filter->es_id = -1;
filter->last_cc = -1;
sec = &filter->u.section_filter;
sec->section_cb = section_cb;
sec->opaque = opaque;
sec->section_buf = av_malloc(MAX_SECTION_SIZE);
sec->check_crc = check_crc;
if (!sec->section_buf) {
av_free(filter);
return NULL;
}
return filter;
}
 
static MpegTSFilter *mpegts_open_pes_filter(MpegTSContext *ts, unsigned int pid,
PESCallback *pes_cb,
void *opaque)
{
MpegTSFilter *filter;
MpegTSPESFilter *pes;
 
if (pid >= NB_PID_MAX || ts->pids[pid])
return NULL;
filter = av_mallocz(sizeof(MpegTSFilter));
if (!filter)
return NULL;
ts->pids[pid] = filter;
filter->type = MPEGTS_PES;
filter->pid = pid;
filter->es_id = -1;
filter->last_cc = -1;
pes = &filter->u.pes_filter;
pes->pes_cb = pes_cb;
pes->opaque = opaque;
return filter;
}
 
static void mpegts_close_filter(MpegTSContext *ts, MpegTSFilter *filter)
{
int pid;
 
pid = filter->pid;
if (filter->type == MPEGTS_SECTION)
av_freep(&filter->u.section_filter.section_buf);
else if (filter->type == MPEGTS_PES) {
PESContext *pes = filter->u.pes_filter.opaque;
av_buffer_unref(&pes->buffer);
/* referenced private data will be freed later in
* avformat_close_input */
if (!((PESContext *)filter->u.pes_filter.opaque)->st) {
av_freep(&filter->u.pes_filter.opaque);
}
}
 
av_free(filter);
ts->pids[pid] = NULL;
}
 
static int analyze(const uint8_t *buf, int size, int packet_size, int *index){
int stat[TS_MAX_PACKET_SIZE];
int i;
int x=0;
int best_score=0;
 
memset(stat, 0, packet_size*sizeof(int));
 
for(x=i=0; i<size-3; i++){
if(buf[i] == 0x47 && !(buf[i+1] & 0x80) && buf[i+3] != 0x47){
stat[x]++;
if(stat[x] > best_score){
best_score= stat[x];
if(index) *index= x;
}
}
 
x++;
if(x == packet_size) x= 0;
}
 
return best_score;
}
 
/* autodetect fec presence. Must have at least 1024 bytes */
static int get_packet_size(const uint8_t *buf, int size)
{
int score, fec_score, dvhs_score;
 
if (size < (TS_FEC_PACKET_SIZE * 5 + 1))
return -1;
 
score = analyze(buf, size, TS_PACKET_SIZE, NULL);
dvhs_score = analyze(buf, size, TS_DVHS_PACKET_SIZE, NULL);
fec_score= analyze(buf, size, TS_FEC_PACKET_SIZE, NULL);
av_dlog(NULL, "score: %d, dvhs_score: %d, fec_score: %d \n",
score, dvhs_score, fec_score);
 
if (score > fec_score && score > dvhs_score) return TS_PACKET_SIZE;
else if(dvhs_score > score && dvhs_score > fec_score) return TS_DVHS_PACKET_SIZE;
else if(score < fec_score && dvhs_score < fec_score) return TS_FEC_PACKET_SIZE;
else return -1;
}
 
typedef struct SectionHeader {
uint8_t tid;
uint16_t id;
uint8_t version;
uint8_t sec_num;
uint8_t last_sec_num;
} SectionHeader;
 
static inline int get8(const uint8_t **pp, const uint8_t *p_end)
{
const uint8_t *p;
int c;
 
p = *pp;
if (p >= p_end)
return -1;
c = *p++;
*pp = p;
return c;
}
 
static inline int get16(const uint8_t **pp, const uint8_t *p_end)
{
const uint8_t *p;
int c;
 
p = *pp;
if ((p + 1) >= p_end)
return -1;
c = AV_RB16(p);
p += 2;
*pp = p;
return c;
}
 
/* read and allocate a DVB string preceded by its length */
static char *getstr8(const uint8_t **pp, const uint8_t *p_end)
{
int len;
const uint8_t *p;
char *str;
 
p = *pp;
len = get8(&p, p_end);
if (len < 0)
return NULL;
if ((p + len) > p_end)
return NULL;
str = av_malloc(len + 1);
if (!str)
return NULL;
memcpy(str, p, len);
str[len] = '\0';
p += len;
*pp = p;
return str;
}
 
static int parse_section_header(SectionHeader *h,
const uint8_t **pp, const uint8_t *p_end)
{
int val;
 
val = get8(pp, p_end);
if (val < 0)
return -1;
h->tid = val;
*pp += 2;
val = get16(pp, p_end);
if (val < 0)
return -1;
h->id = val;
val = get8(pp, p_end);
if (val < 0)
return -1;
h->version = (val >> 1) & 0x1f;
val = get8(pp, p_end);
if (val < 0)
return -1;
h->sec_num = val;
val = get8(pp, p_end);
if (val < 0)
return -1;
h->last_sec_num = val;
return 0;
}
 
typedef struct {
uint32_t stream_type;
enum AVMediaType codec_type;
enum AVCodecID codec_id;
} StreamType;
 
static const StreamType ISO_types[] = {
{ 0x01, AVMEDIA_TYPE_VIDEO, AV_CODEC_ID_MPEG2VIDEO },
{ 0x02, AVMEDIA_TYPE_VIDEO, AV_CODEC_ID_MPEG2VIDEO },
{ 0x03, AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_MP3 },
{ 0x04, AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_MP3 },
{ 0x0f, AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_AAC },
{ 0x10, AVMEDIA_TYPE_VIDEO, AV_CODEC_ID_MPEG4 },
/* Makito encoder sets stream type 0x11 for AAC,
* so auto-detect LOAS/LATM instead of hardcoding it. */
#if !CONFIG_LOAS_DEMUXER
{ 0x11, AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_AAC_LATM }, /* LATM syntax */
#endif
{ 0x1b, AVMEDIA_TYPE_VIDEO, AV_CODEC_ID_H264 },
{ 0x24, AVMEDIA_TYPE_VIDEO, AV_CODEC_ID_HEVC },
{ 0x42, AVMEDIA_TYPE_VIDEO, AV_CODEC_ID_CAVS },
{ 0xd1, AVMEDIA_TYPE_VIDEO, AV_CODEC_ID_DIRAC },
{ 0xea, AVMEDIA_TYPE_VIDEO, AV_CODEC_ID_VC1 },
{ 0 },
};
 
static const StreamType HDMV_types[] = {
{ 0x80, AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_PCM_BLURAY },
{ 0x81, AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_AC3 },
{ 0x82, AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_DTS },
{ 0x83, AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_TRUEHD },
{ 0x84, AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_EAC3 },
{ 0x85, AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_DTS }, /* DTS HD */
{ 0x86, AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_DTS }, /* DTS HD MASTER*/
{ 0xa1, AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_EAC3 }, /* E-AC3 Secondary Audio */
{ 0xa2, AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_DTS }, /* DTS Express Secondary Audio */
{ 0x90, AVMEDIA_TYPE_SUBTITLE, AV_CODEC_ID_HDMV_PGS_SUBTITLE },
{ 0 },
};
 
/* ATSC ? */
static const StreamType MISC_types[] = {
{ 0x81, AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_AC3 },
{ 0x8a, AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_DTS },
{ 0 },
};
 
static const StreamType REGD_types[] = {
{ MKTAG('d','r','a','c'), AVMEDIA_TYPE_VIDEO, AV_CODEC_ID_DIRAC },
{ MKTAG('A','C','-','3'), AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_AC3 },
{ MKTAG('B','S','S','D'), AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_S302M },
{ MKTAG('D','T','S','1'), AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_DTS },
{ MKTAG('D','T','S','2'), AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_DTS },
{ MKTAG('D','T','S','3'), AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_DTS },
{ MKTAG('H','E','V','C'), AVMEDIA_TYPE_VIDEO, AV_CODEC_ID_HEVC },
{ MKTAG('K','L','V','A'), AVMEDIA_TYPE_DATA, AV_CODEC_ID_SMPTE_KLV },
{ MKTAG('V','C','-','1'), AVMEDIA_TYPE_VIDEO, AV_CODEC_ID_VC1 },
{ 0 },
};
 
/* descriptor present */
static const StreamType DESC_types[] = {
{ 0x6a, AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_AC3 }, /* AC-3 descriptor */
{ 0x7a, AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_EAC3 }, /* E-AC-3 descriptor */
{ 0x7b, AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_DTS },
{ 0x56, AVMEDIA_TYPE_SUBTITLE, AV_CODEC_ID_DVB_TELETEXT },
{ 0x59, AVMEDIA_TYPE_SUBTITLE, AV_CODEC_ID_DVB_SUBTITLE }, /* subtitling descriptor */
{ 0 },
};
 
static void mpegts_find_stream_type(AVStream *st,
uint32_t stream_type, const StreamType *types)
{
if (avcodec_is_open(st->codec)) {
av_log(NULL, AV_LOG_DEBUG, "cannot set stream info, codec is open\n");
return;
}
 
for (; types->stream_type; types++) {
if (stream_type == types->stream_type) {
st->codec->codec_type = types->codec_type;
st->codec->codec_id = types->codec_id;
st->request_probe = 0;
return;
}
}
}
 
static int mpegts_set_stream_info(AVStream *st, PESContext *pes,
uint32_t stream_type, uint32_t prog_reg_desc)
{
int old_codec_type= st->codec->codec_type;
int old_codec_id = st->codec->codec_id;
 
if (avcodec_is_open(st->codec)) {
av_log(pes->stream, AV_LOG_DEBUG, "cannot set stream info, codec is open\n");
return 0;
}
 
avpriv_set_pts_info(st, 33, 1, 90000);
st->priv_data = pes;
st->codec->codec_type = AVMEDIA_TYPE_DATA;
st->codec->codec_id = AV_CODEC_ID_NONE;
st->need_parsing = AVSTREAM_PARSE_FULL;
pes->st = st;
pes->stream_type = stream_type;
 
av_log(pes->stream, AV_LOG_DEBUG,
"stream=%d stream_type=%x pid=%x prog_reg_desc=%.4s\n",
st->index, pes->stream_type, pes->pid, (char*)&prog_reg_desc);
 
st->codec->codec_tag = pes->stream_type;
 
mpegts_find_stream_type(st, pes->stream_type, ISO_types);
if ((prog_reg_desc == AV_RL32("HDMV") ||
prog_reg_desc == AV_RL32("HDPR")) &&
st->codec->codec_id == AV_CODEC_ID_NONE) {
mpegts_find_stream_type(st, pes->stream_type, HDMV_types);
if (pes->stream_type == 0x83) {
// HDMV TrueHD streams also contain an AC3 coded version of the
// audio track - add a second stream for this
AVStream *sub_st;
// priv_data cannot be shared between streams
PESContext *sub_pes = av_malloc(sizeof(*sub_pes));
if (!sub_pes)
return AVERROR(ENOMEM);
memcpy(sub_pes, pes, sizeof(*sub_pes));
 
sub_st = avformat_new_stream(pes->stream, NULL);
if (!sub_st) {
av_free(sub_pes);
return AVERROR(ENOMEM);
}
 
sub_st->id = pes->pid;
avpriv_set_pts_info(sub_st, 33, 1, 90000);
sub_st->priv_data = sub_pes;
sub_st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
sub_st->codec->codec_id = AV_CODEC_ID_AC3;
sub_st->need_parsing = AVSTREAM_PARSE_FULL;
sub_pes->sub_st = pes->sub_st = sub_st;
}
}
if (st->codec->codec_id == AV_CODEC_ID_NONE)
mpegts_find_stream_type(st, pes->stream_type, MISC_types);
if (st->codec->codec_id == AV_CODEC_ID_NONE){
st->codec->codec_id = old_codec_id;
st->codec->codec_type= old_codec_type;
}
 
return 0;
}
 
static void new_pes_packet(PESContext *pes, AVPacket *pkt)
{
av_init_packet(pkt);
 
pkt->buf = pes->buffer;
pkt->data = pes->buffer->data;
pkt->size = pes->data_index;
 
if(pes->total_size != MAX_PES_PAYLOAD &&
pes->pes_header_size + pes->data_index != pes->total_size + PES_START_SIZE) {
av_log(pes->stream, AV_LOG_WARNING, "PES packet size mismatch\n");
pes->flags |= AV_PKT_FLAG_CORRUPT;
}
memset(pkt->data+pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
 
// Separate out the AC3 substream from an HDMV combined TrueHD/AC3 PID
if (pes->sub_st && pes->stream_type == 0x83 && pes->extended_stream_id == 0x76)
pkt->stream_index = pes->sub_st->index;
else
pkt->stream_index = pes->st->index;
pkt->pts = pes->pts;
pkt->dts = pes->dts;
/* store position of first TS packet of this PES packet */
pkt->pos = pes->ts_packet_pos;
pkt->flags = pes->flags;
 
/* reset pts values */
pes->pts = AV_NOPTS_VALUE;
pes->dts = AV_NOPTS_VALUE;
pes->buffer = NULL;
pes->data_index = 0;
pes->flags = 0;
}
 
static uint64_t get_ts64(GetBitContext *gb, int bits)
{
if (get_bits_left(gb) < bits)
return AV_NOPTS_VALUE;
return get_bits64(gb, bits);
}
 
static int read_sl_header(PESContext *pes, SLConfigDescr *sl, const uint8_t *buf, int buf_size)
{
GetBitContext gb;
int au_start_flag = 0, au_end_flag = 0, ocr_flag = 0, idle_flag = 0;
int padding_flag = 0, padding_bits = 0, inst_bitrate_flag = 0;
int dts_flag = -1, cts_flag = -1;
int64_t dts = AV_NOPTS_VALUE, cts = AV_NOPTS_VALUE;
 
init_get_bits(&gb, buf, buf_size*8);
 
if (sl->use_au_start)
au_start_flag = get_bits1(&gb);
if (sl->use_au_end)
au_end_flag = get_bits1(&gb);
if (!sl->use_au_start && !sl->use_au_end)
au_start_flag = au_end_flag = 1;
if (sl->ocr_len > 0)
ocr_flag = get_bits1(&gb);
if (sl->use_idle)
idle_flag = get_bits1(&gb);
if (sl->use_padding)
padding_flag = get_bits1(&gb);
if (padding_flag)
padding_bits = get_bits(&gb, 3);
 
if (!idle_flag && (!padding_flag || padding_bits != 0)) {
if (sl->packet_seq_num_len)
skip_bits_long(&gb, sl->packet_seq_num_len);
if (sl->degr_prior_len)
if (get_bits1(&gb))
skip_bits(&gb, sl->degr_prior_len);
if (ocr_flag)
skip_bits_long(&gb, sl->ocr_len);
if (au_start_flag) {
if (sl->use_rand_acc_pt)
get_bits1(&gb);
if (sl->au_seq_num_len > 0)
skip_bits_long(&gb, sl->au_seq_num_len);
if (sl->use_timestamps) {
dts_flag = get_bits1(&gb);
cts_flag = get_bits1(&gb);
}
}
if (sl->inst_bitrate_len)
inst_bitrate_flag = get_bits1(&gb);
if (dts_flag == 1)
dts = get_ts64(&gb, sl->timestamp_len);
if (cts_flag == 1)
cts = get_ts64(&gb, sl->timestamp_len);
if (sl->au_len > 0)
skip_bits_long(&gb, sl->au_len);
if (inst_bitrate_flag)
skip_bits_long(&gb, sl->inst_bitrate_len);
}
 
if (dts != AV_NOPTS_VALUE)
pes->dts = dts;
if (cts != AV_NOPTS_VALUE)
pes->pts = cts;
 
if (sl->timestamp_len && sl->timestamp_res)
avpriv_set_pts_info(pes->st, sl->timestamp_len, 1, sl->timestamp_res);
 
return (get_bits_count(&gb) + 7) >> 3;
}
 
/* return non zero if a packet could be constructed */
static int mpegts_push_data(MpegTSFilter *filter,
const uint8_t *buf, int buf_size, int is_start,
int64_t pos, int64_t pcr)
{
PESContext *pes = filter->u.pes_filter.opaque;
MpegTSContext *ts = pes->ts;
const uint8_t *p;
int len, code;
 
if(!ts->pkt)
return 0;
 
if (pcr != -1)
pes->last_pcr = pcr;
 
if (is_start) {
if (pes->state == MPEGTS_PAYLOAD && pes->data_index > 0) {
new_pes_packet(pes, ts->pkt);
ts->stop_parse = 1;
}
pes->state = MPEGTS_HEADER;
pes->data_index = 0;
pes->ts_packet_pos = pos;
}
p = buf;
while (buf_size > 0) {
switch(pes->state) {
case MPEGTS_HEADER:
len = PES_START_SIZE - pes->data_index;
if (len > buf_size)
len = buf_size;
memcpy(pes->header + pes->data_index, p, len);
pes->data_index += len;
p += len;
buf_size -= len;
if (pes->data_index == PES_START_SIZE) {
/* we got all the PES or section header. We can now
decide */
if (pes->header[0] == 0x00 && pes->header[1] == 0x00 &&
pes->header[2] == 0x01) {
/* it must be an mpeg2 PES stream */
code = pes->header[3] | 0x100;
av_dlog(pes->stream, "pid=%x pes_code=%#x\n", pes->pid, code);
 
if ((pes->st && pes->st->discard == AVDISCARD_ALL &&
(!pes->sub_st || pes->sub_st->discard == AVDISCARD_ALL)) ||
code == 0x1be) /* padding_stream */
goto skip;
 
/* stream not present in PMT */
if (!pes->st) {
pes->st = avformat_new_stream(ts->stream, NULL);
if (!pes->st)
return AVERROR(ENOMEM);
pes->st->id = pes->pid;
mpegts_set_stream_info(pes->st, pes, 0, 0);
}
 
pes->total_size = AV_RB16(pes->header + 4);
/* NOTE: a zero total size means the PES size is
unbounded */
if (!pes->total_size)
pes->total_size = MAX_PES_PAYLOAD;
 
/* allocate pes buffer */
pes->buffer = av_buffer_alloc(pes->total_size +
FF_INPUT_BUFFER_PADDING_SIZE);
if (!pes->buffer)
return AVERROR(ENOMEM);
 
if (code != 0x1bc && code != 0x1bf && /* program_stream_map, private_stream_2 */
code != 0x1f0 && code != 0x1f1 && /* ECM, EMM */
code != 0x1ff && code != 0x1f2 && /* program_stream_directory, DSMCC_stream */
code != 0x1f8) { /* ITU-T Rec. H.222.1 type E stream */
pes->state = MPEGTS_PESHEADER;
if (pes->st->codec->codec_id == AV_CODEC_ID_NONE && !pes->st->request_probe) {
av_dlog(pes->stream, "pid=%x stream_type=%x probing\n",
pes->pid, pes->stream_type);
pes->st->request_probe= 1;
}
} else {
pes->state = MPEGTS_PAYLOAD;
pes->data_index = 0;
}
} else {
/* otherwise, it should be a table */
/* skip packet */
skip:
pes->state = MPEGTS_SKIP;
continue;
}
}
break;
/**********************************************/
/* PES packing parsing */
case MPEGTS_PESHEADER:
len = PES_HEADER_SIZE - pes->data_index;
if (len < 0)
return -1;
if (len > buf_size)
len = buf_size;
memcpy(pes->header + pes->data_index, p, len);
pes->data_index += len;
p += len;
buf_size -= len;
if (pes->data_index == PES_HEADER_SIZE) {
pes->pes_header_size = pes->header[8] + 9;
pes->state = MPEGTS_PESHEADER_FILL;
}
break;
case MPEGTS_PESHEADER_FILL:
len = pes->pes_header_size - pes->data_index;
if (len < 0)
return -1;
if (len > buf_size)
len = buf_size;
memcpy(pes->header + pes->data_index, p, len);
pes->data_index += len;
p += len;
buf_size -= len;
if (pes->data_index == pes->pes_header_size) {
const uint8_t *r;
unsigned int flags, pes_ext, skip;
 
flags = pes->header[7];
r = pes->header + 9;
pes->pts = AV_NOPTS_VALUE;
pes->dts = AV_NOPTS_VALUE;
if ((flags & 0xc0) == 0x80) {
pes->dts = pes->pts = ff_parse_pes_pts(r);
r += 5;
} else if ((flags & 0xc0) == 0xc0) {
pes->pts = ff_parse_pes_pts(r);
r += 5;
pes->dts = ff_parse_pes_pts(r);
r += 5;
}
pes->extended_stream_id = -1;
if (flags & 0x01) { /* PES extension */
pes_ext = *r++;
/* Skip PES private data, program packet sequence counter and P-STD buffer */
skip = (pes_ext >> 4) & 0xb;
skip += skip & 0x9;
r += skip;
if ((pes_ext & 0x41) == 0x01 &&
(r + 2) <= (pes->header + pes->pes_header_size)) {
/* PES extension 2 */
if ((r[0] & 0x7f) > 0 && (r[1] & 0x80) == 0)
pes->extended_stream_id = r[1];
}
}
 
/* we got the full header. We parse it and get the payload */
pes->state = MPEGTS_PAYLOAD;
pes->data_index = 0;
if (pes->stream_type == 0x12 && buf_size > 0) {
int sl_header_bytes = read_sl_header(pes, &pes->sl, p, buf_size);
pes->pes_header_size += sl_header_bytes;
p += sl_header_bytes;
buf_size -= sl_header_bytes;
}
if (pes->ts->fix_teletext_pts && pes->st->codec->codec_id == AV_CODEC_ID_DVB_TELETEXT) {
AVProgram *p = NULL;
while ((p = av_find_program_from_stream(pes->stream, p, pes->st->index))) {
if (p->pcr_pid != -1 && p->discard != AVDISCARD_ALL) {
MpegTSFilter *f = pes->ts->pids[p->pcr_pid];
if (f && f->type == MPEGTS_PES) {
PESContext *pcrpes = f->u.pes_filter.opaque;
if (pcrpes && pcrpes->last_pcr != -1 && pcrpes->st && pcrpes->st->discard != AVDISCARD_ALL) {
// teletext packets do not always have correct timestamps,
// the standard says they should be handled after 40.6 ms at most,
// and the pcr error to this packet should be no more than 100 ms.
// TODO: we should interpolate the PCR, not just use the last one
int64_t pcr = pcrpes->last_pcr / 300;
pes->st->pts_wrap_reference = pcrpes->st->pts_wrap_reference;
pes->st->pts_wrap_behavior = pcrpes->st->pts_wrap_behavior;
if (pes->dts == AV_NOPTS_VALUE || pes->dts < pcr) {
pes->pts = pes->dts = pcr;
} else if (pes->dts > pcr + 3654 + 9000) {
pes->pts = pes->dts = pcr + 3654 + 9000;
}
break;
}
}
}
}
}
}
break;
case MPEGTS_PAYLOAD:
if (buf_size > 0 && pes->buffer) {
if (pes->data_index > 0 && pes->data_index+buf_size > pes->total_size) {
new_pes_packet(pes, ts->pkt);
pes->total_size = MAX_PES_PAYLOAD;
pes->buffer = av_buffer_alloc(pes->total_size + FF_INPUT_BUFFER_PADDING_SIZE);
if (!pes->buffer)
return AVERROR(ENOMEM);
ts->stop_parse = 1;
} else if (pes->data_index == 0 && buf_size > pes->total_size) {
// pes packet size is < ts size packet and pes data is padded with 0xff
// not sure if this is legal in ts but see issue #2392
buf_size = pes->total_size;
}
memcpy(pes->buffer->data + pes->data_index, p, buf_size);
pes->data_index += buf_size;
}
buf_size = 0;
/* emit complete packets with known packet size
* decreases demuxer delay for infrequent packets like subtitles from
* a couple of seconds to milliseconds for properly muxed files.
* total_size is the number of bytes following pes_packet_length
* in the pes header, i.e. not counting the first PES_START_SIZE bytes */
if (!ts->stop_parse && pes->total_size < MAX_PES_PAYLOAD &&
pes->pes_header_size + pes->data_index == pes->total_size + PES_START_SIZE) {
ts->stop_parse = 1;
new_pes_packet(pes, ts->pkt);
}
break;
case MPEGTS_SKIP:
buf_size = 0;
break;
}
}
 
return 0;
}
 
static PESContext *add_pes_stream(MpegTSContext *ts, int pid, int pcr_pid)
{
MpegTSFilter *tss;
PESContext *pes;
 
/* if no pid found, then add a pid context */
pes = av_mallocz(sizeof(PESContext));
if (!pes)
return 0;
pes->ts = ts;
pes->stream = ts->stream;
pes->pid = pid;
pes->pcr_pid = pcr_pid;
pes->state = MPEGTS_SKIP;
pes->pts = AV_NOPTS_VALUE;
pes->dts = AV_NOPTS_VALUE;
pes->last_pcr = -1;
tss = mpegts_open_pes_filter(ts, pid, mpegts_push_data, pes);
if (!tss) {
av_free(pes);
return 0;
}
return pes;
}
 
#define MAX_LEVEL 4
typedef struct {
AVFormatContext *s;
AVIOContext pb;
Mp4Descr *descr;
Mp4Descr *active_descr;
int descr_count;
int max_descr_count;
int level;
} MP4DescrParseContext;
 
static int init_MP4DescrParseContext(
MP4DescrParseContext *d, AVFormatContext *s, const uint8_t *buf,
unsigned size, Mp4Descr *descr, int max_descr_count)
{
int ret;
if (size > (1<<30))
return AVERROR_INVALIDDATA;
 
if ((ret = ffio_init_context(&d->pb, (unsigned char*)buf, size, 0,
NULL, NULL, NULL, NULL)) < 0)
return ret;
 
d->s = s;
d->level = 0;
d->descr_count = 0;
d->descr = descr;
d->active_descr = NULL;
d->max_descr_count = max_descr_count;
 
return 0;
}
 
static void update_offsets(AVIOContext *pb, int64_t *off, int *len) {
int64_t new_off = avio_tell(pb);
(*len) -= new_off - *off;
*off = new_off;
}
 
static int parse_mp4_descr(MP4DescrParseContext *d, int64_t off, int len,
int target_tag);
 
static int parse_mp4_descr_arr(MP4DescrParseContext *d, int64_t off, int len)
{
while (len > 0) {
if (parse_mp4_descr(d, off, len, 0) < 0)
return -1;
update_offsets(&d->pb, &off, &len);
}
return 0;
}
 
static int parse_MP4IODescrTag(MP4DescrParseContext *d, int64_t off, int len)
{
avio_rb16(&d->pb); // ID
avio_r8(&d->pb);
avio_r8(&d->pb);
avio_r8(&d->pb);
avio_r8(&d->pb);
avio_r8(&d->pb);
update_offsets(&d->pb, &off, &len);
return parse_mp4_descr_arr(d, off, len);
}
 
static int parse_MP4ODescrTag(MP4DescrParseContext *d, int64_t off, int len)
{
int id_flags;
if (len < 2)
return 0;
id_flags = avio_rb16(&d->pb);
if (!(id_flags & 0x0020)) { //URL_Flag
update_offsets(&d->pb, &off, &len);
return parse_mp4_descr_arr(d, off, len); //ES_Descriptor[]
} else {
return 0;
}
}
 
static int parse_MP4ESDescrTag(MP4DescrParseContext *d, int64_t off, int len)
{
int es_id = 0;
if (d->descr_count >= d->max_descr_count)
return -1;
ff_mp4_parse_es_descr(&d->pb, &es_id);
d->active_descr = d->descr + (d->descr_count++);
 
d->active_descr->es_id = es_id;
update_offsets(&d->pb, &off, &len);
parse_mp4_descr(d, off, len, MP4DecConfigDescrTag);
update_offsets(&d->pb, &off, &len);
if (len > 0)
parse_mp4_descr(d, off, len, MP4SLDescrTag);
d->active_descr = NULL;
return 0;
}
 
static int parse_MP4DecConfigDescrTag(MP4DescrParseContext *d, int64_t off, int len)
{
Mp4Descr *descr = d->active_descr;
if (!descr)
return -1;
d->active_descr->dec_config_descr = av_malloc(len);
if (!descr->dec_config_descr)
return AVERROR(ENOMEM);
descr->dec_config_descr_len = len;
avio_read(&d->pb, descr->dec_config_descr, len);
return 0;
}
 
static int parse_MP4SLDescrTag(MP4DescrParseContext *d, int64_t off, int len)
{
Mp4Descr *descr = d->active_descr;
int predefined;
if (!descr)
return -1;
 
predefined = avio_r8(&d->pb);
if (!predefined) {
int lengths;
int flags = avio_r8(&d->pb);
descr->sl.use_au_start = !!(flags & 0x80);
descr->sl.use_au_end = !!(flags & 0x40);
descr->sl.use_rand_acc_pt = !!(flags & 0x20);
descr->sl.use_padding = !!(flags & 0x08);
descr->sl.use_timestamps = !!(flags & 0x04);
descr->sl.use_idle = !!(flags & 0x02);
descr->sl.timestamp_res = avio_rb32(&d->pb);
avio_rb32(&d->pb);
descr->sl.timestamp_len = avio_r8(&d->pb);
descr->sl.ocr_len = avio_r8(&d->pb);
descr->sl.au_len = avio_r8(&d->pb);
descr->sl.inst_bitrate_len = avio_r8(&d->pb);
lengths = avio_rb16(&d->pb);
descr->sl.degr_prior_len = lengths >> 12;
descr->sl.au_seq_num_len = (lengths >> 7) & 0x1f;
descr->sl.packet_seq_num_len = (lengths >> 2) & 0x1f;
} else {
avpriv_report_missing_feature(d->s, "Predefined SLConfigDescriptor");
}
return 0;
}
 
static int parse_mp4_descr(MP4DescrParseContext *d, int64_t off, int len,
int target_tag) {
int tag;
int len1 = ff_mp4_read_descr(d->s, &d->pb, &tag);
update_offsets(&d->pb, &off, &len);
if (len < 0 || len1 > len || len1 <= 0) {
av_log(d->s, AV_LOG_ERROR, "Tag %x length violation new length %d bytes remaining %d\n", tag, len1, len);
return -1;
}
 
if (d->level++ >= MAX_LEVEL) {
av_log(d->s, AV_LOG_ERROR, "Maximum MP4 descriptor level exceeded\n");
goto done;
}
 
if (target_tag && tag != target_tag) {
av_log(d->s, AV_LOG_ERROR, "Found tag %x expected %x\n", tag, target_tag);
goto done;
}
 
switch (tag) {
case MP4IODescrTag:
parse_MP4IODescrTag(d, off, len1);
break;
case MP4ODescrTag:
parse_MP4ODescrTag(d, off, len1);
break;
case MP4ESDescrTag:
parse_MP4ESDescrTag(d, off, len1);
break;
case MP4DecConfigDescrTag:
parse_MP4DecConfigDescrTag(d, off, len1);
break;
case MP4SLDescrTag:
parse_MP4SLDescrTag(d, off, len1);
break;
}
 
done:
d->level--;
avio_seek(&d->pb, off + len1, SEEK_SET);
return 0;
}
 
static int mp4_read_iods(AVFormatContext *s, const uint8_t *buf, unsigned size,
Mp4Descr *descr, int *descr_count, int max_descr_count)
{
MP4DescrParseContext d;
if (init_MP4DescrParseContext(&d, s, buf, size, descr, max_descr_count) < 0)
return -1;
 
parse_mp4_descr(&d, avio_tell(&d.pb), size, MP4IODescrTag);
 
*descr_count = d.descr_count;
return 0;
}
 
static int mp4_read_od(AVFormatContext *s, const uint8_t *buf, unsigned size,
Mp4Descr *descr, int *descr_count, int max_descr_count)
{
MP4DescrParseContext d;
if (init_MP4DescrParseContext(&d, s, buf, size, descr, max_descr_count) < 0)
return -1;
 
parse_mp4_descr_arr(&d, avio_tell(&d.pb), size);
 
*descr_count = d.descr_count;
return 0;
}
 
static void m4sl_cb(MpegTSFilter *filter, const uint8_t *section, int section_len)
{
MpegTSContext *ts = filter->u.section_filter.opaque;
SectionHeader h;
const uint8_t *p, *p_end;
AVIOContext pb;
Mp4Descr mp4_descr[MAX_MP4_DESCR_COUNT] = {{ 0 }};
int mp4_descr_count = 0;
int i, pid;
AVFormatContext *s = ts->stream;
 
p_end = section + section_len - 4;
p = section;
if (parse_section_header(&h, &p, p_end) < 0)
return;
if (h.tid != M4OD_TID)
return;
 
mp4_read_od(s, p, (unsigned)(p_end - p), mp4_descr, &mp4_descr_count, MAX_MP4_DESCR_COUNT);
 
for (pid = 0; pid < NB_PID_MAX; pid++) {
if (!ts->pids[pid])
continue;
for (i = 0; i < mp4_descr_count; i++) {
PESContext *pes;
AVStream *st;
if (ts->pids[pid]->es_id != mp4_descr[i].es_id)
continue;
if (!(ts->pids[pid] && ts->pids[pid]->type == MPEGTS_PES)) {
av_log(s, AV_LOG_ERROR, "pid %x is not PES\n", pid);
continue;
}
pes = ts->pids[pid]->u.pes_filter.opaque;
st = pes->st;
if (!st) {
continue;
}
 
pes->sl = mp4_descr[i].sl;
 
ffio_init_context(&pb, mp4_descr[i].dec_config_descr,
mp4_descr[i].dec_config_descr_len, 0, NULL, NULL, NULL, NULL);
ff_mp4_read_dec_config_descr(s, st, &pb);
if (st->codec->codec_id == AV_CODEC_ID_AAC &&
st->codec->extradata_size > 0)
st->need_parsing = 0;
if (st->codec->codec_id == AV_CODEC_ID_H264 &&
st->codec->extradata_size > 0)
st->need_parsing = 0;
 
if (st->codec->codec_id <= AV_CODEC_ID_NONE) {
} else if (st->codec->codec_id < AV_CODEC_ID_FIRST_AUDIO) {
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
} else if (st->codec->codec_id < AV_CODEC_ID_FIRST_SUBTITLE) {
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
} else if (st->codec->codec_id < AV_CODEC_ID_FIRST_UNKNOWN) {
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
}
}
}
for (i = 0; i < mp4_descr_count; i++)
av_free(mp4_descr[i].dec_config_descr);
}
 
int ff_parse_mpeg2_descriptor(AVFormatContext *fc, AVStream *st, int stream_type,
const uint8_t **pp, const uint8_t *desc_list_end,
Mp4Descr *mp4_descr, int mp4_descr_count, int pid,
MpegTSContext *ts)
{
const uint8_t *desc_end;
int desc_len, desc_tag, desc_es_id;
char language[252];
int i;
 
desc_tag = get8(pp, desc_list_end);
if (desc_tag < 0)
return -1;
desc_len = get8(pp, desc_list_end);
if (desc_len < 0)
return -1;
desc_end = *pp + desc_len;
if (desc_end > desc_list_end)
return -1;
 
av_dlog(fc, "tag: 0x%02x len=%d\n", desc_tag, desc_len);
 
if (st->codec->codec_id == AV_CODEC_ID_NONE &&
stream_type == STREAM_TYPE_PRIVATE_DATA)
mpegts_find_stream_type(st, desc_tag, DESC_types);
 
switch(desc_tag) {
case 0x1E: /* SL descriptor */
desc_es_id = get16(pp, desc_end);
if (ts && ts->pids[pid])
ts->pids[pid]->es_id = desc_es_id;
for (i = 0; i < mp4_descr_count; i++)
if (mp4_descr[i].dec_config_descr_len &&
mp4_descr[i].es_id == desc_es_id) {
AVIOContext pb;
ffio_init_context(&pb, mp4_descr[i].dec_config_descr,
mp4_descr[i].dec_config_descr_len, 0, NULL, NULL, NULL, NULL);
ff_mp4_read_dec_config_descr(fc, st, &pb);
if (st->codec->codec_id == AV_CODEC_ID_AAC &&
st->codec->extradata_size > 0)
st->need_parsing = 0;
if (st->codec->codec_id == AV_CODEC_ID_MPEG4SYSTEMS)
mpegts_open_section_filter(ts, pid, m4sl_cb, ts, 1);
}
break;
case 0x1F: /* FMC descriptor */
get16(pp, desc_end);
if (mp4_descr_count > 0 && (st->codec->codec_id == AV_CODEC_ID_AAC_LATM || st->request_probe>0) &&
mp4_descr->dec_config_descr_len && mp4_descr->es_id == pid) {
AVIOContext pb;
ffio_init_context(&pb, mp4_descr->dec_config_descr,
mp4_descr->dec_config_descr_len, 0, NULL, NULL, NULL, NULL);
ff_mp4_read_dec_config_descr(fc, st, &pb);
if (st->codec->codec_id == AV_CODEC_ID_AAC &&
st->codec->extradata_size > 0){
st->request_probe= st->need_parsing = 0;
st->codec->codec_type= AVMEDIA_TYPE_AUDIO;
}
}
break;
case 0x56: /* DVB teletext descriptor */
language[0] = get8(pp, desc_end);
language[1] = get8(pp, desc_end);
language[2] = get8(pp, desc_end);
language[3] = 0;
av_dict_set(&st->metadata, "language", language, 0);
break;
case 0x59: /* subtitling descriptor */
language[0] = get8(pp, desc_end);
language[1] = get8(pp, desc_end);
language[2] = get8(pp, desc_end);
language[3] = 0;
/* hearing impaired subtitles detection */
switch(get8(pp, desc_end)) {
case 0x20: /* DVB subtitles (for the hard of hearing) with no monitor aspect ratio criticality */
case 0x21: /* DVB subtitles (for the hard of hearing) for display on 4:3 aspect ratio monitor */
case 0x22: /* DVB subtitles (for the hard of hearing) for display on 16:9 aspect ratio monitor */
case 0x23: /* DVB subtitles (for the hard of hearing) for display on 2.21:1 aspect ratio monitor */
case 0x24: /* DVB subtitles (for the hard of hearing) for display on a high definition monitor */
case 0x25: /* DVB subtitles (for the hard of hearing) with plano-stereoscopic disparity for display on a high definition monitor */
st->disposition |= AV_DISPOSITION_HEARING_IMPAIRED;
break;
}
if (st->codec->extradata) {
if (st->codec->extradata_size == 4 && memcmp(st->codec->extradata, *pp, 4))
avpriv_request_sample(fc, "DVB sub with multiple IDs");
} else {
if (!ff_alloc_extradata(st->codec, 4)) {
memcpy(st->codec->extradata, *pp, 4);
}
}
*pp += 4;
av_dict_set(&st->metadata, "language", language, 0);
break;
case 0x0a: /* ISO 639 language descriptor */
for (i = 0; i + 4 <= desc_len; i += 4) {
language[i + 0] = get8(pp, desc_end);
language[i + 1] = get8(pp, desc_end);
language[i + 2] = get8(pp, desc_end);
language[i + 3] = ',';
switch (get8(pp, desc_end)) {
case 0x01: st->disposition |= AV_DISPOSITION_CLEAN_EFFECTS; break;
case 0x02: st->disposition |= AV_DISPOSITION_HEARING_IMPAIRED; break;
case 0x03: st->disposition |= AV_DISPOSITION_VISUAL_IMPAIRED; break;
}
}
if (i) {
language[i - 1] = 0;
av_dict_set(&st->metadata, "language", language, 0);
}
break;
case 0x05: /* registration descriptor */
st->codec->codec_tag = bytestream_get_le32(pp);
av_dlog(fc, "reg_desc=%.4s\n", (char*)&st->codec->codec_tag);
if (st->codec->codec_id == AV_CODEC_ID_NONE)
mpegts_find_stream_type(st, st->codec->codec_tag, REGD_types);
break;
case 0x52: /* stream identifier descriptor */
st->stream_identifier = 1 + get8(pp, desc_end);
break;
default:
break;
}
*pp = desc_end;
return 0;
}
 
static void pmt_cb(MpegTSFilter *filter, const uint8_t *section, int section_len)
{
MpegTSContext *ts = filter->u.section_filter.opaque;
SectionHeader h1, *h = &h1;
PESContext *pes;
AVStream *st;
const uint8_t *p, *p_end, *desc_list_end;
int program_info_length, pcr_pid, pid, stream_type;
int desc_list_len;
uint32_t prog_reg_desc = 0; /* registration descriptor */
 
Mp4Descr mp4_descr[MAX_MP4_DESCR_COUNT] = {{ 0 }};
int mp4_descr_count = 0;
int i;
 
av_dlog(ts->stream, "PMT: len %i\n", section_len);
hex_dump_debug(ts->stream, section, section_len);
 
p_end = section + section_len - 4;
p = section;
if (parse_section_header(h, &p, p_end) < 0)
return;
 
av_dlog(ts->stream, "sid=0x%x sec_num=%d/%d\n",
h->id, h->sec_num, h->last_sec_num);
 
if (h->tid != PMT_TID)
return;
 
clear_program(ts, h->id);
pcr_pid = get16(&p, p_end);
if (pcr_pid < 0)
return;
pcr_pid &= 0x1fff;
add_pid_to_pmt(ts, h->id, pcr_pid);
set_pcr_pid(ts->stream, h->id, pcr_pid);
 
av_dlog(ts->stream, "pcr_pid=0x%x\n", pcr_pid);
 
program_info_length = get16(&p, p_end);
if (program_info_length < 0)
return;
program_info_length &= 0xfff;
while(program_info_length >= 2) {
uint8_t tag, len;
tag = get8(&p, p_end);
len = get8(&p, p_end);
 
av_dlog(ts->stream, "program tag: 0x%02x len=%d\n", tag, len);
 
if(len > program_info_length - 2)
//something else is broken, exit the program_descriptors_loop
break;
program_info_length -= len + 2;
if (tag == 0x1d) { // IOD descriptor
get8(&p, p_end); // scope
get8(&p, p_end); // label
len -= 2;
mp4_read_iods(ts->stream, p, len, mp4_descr + mp4_descr_count,
&mp4_descr_count, MAX_MP4_DESCR_COUNT);
} else if (tag == 0x05 && len >= 4) { // registration descriptor
prog_reg_desc = bytestream_get_le32(&p);
len -= 4;
}
p += len;
}
p += program_info_length;
if (p >= p_end)
goto out;
 
// stop parsing after pmt, we found header
if (!ts->stream->nb_streams)
ts->stop_parse = 2;
 
for(;;) {
st = 0;
pes = NULL;
stream_type = get8(&p, p_end);
if (stream_type < 0)
break;
pid = get16(&p, p_end);
if (pid < 0)
break;
pid &= 0x1fff;
if (pid == ts->current_pid)
break;
 
/* now create stream */
if (ts->pids[pid] && ts->pids[pid]->type == MPEGTS_PES) {
pes = ts->pids[pid]->u.pes_filter.opaque;
if (!pes->st) {
pes->st = avformat_new_stream(pes->stream, NULL);
if (!pes->st)
goto out;
pes->st->id = pes->pid;
}
st = pes->st;
} else if (stream_type != 0x13) {
if (ts->pids[pid]) mpegts_close_filter(ts, ts->pids[pid]); //wrongly added sdt filter probably
pes = add_pes_stream(ts, pid, pcr_pid);
if (pes) {
st = avformat_new_stream(pes->stream, NULL);
if (!st)
goto out;
st->id = pes->pid;
}
} else {
int idx = ff_find_stream_index(ts->stream, pid);
if (idx >= 0) {
st = ts->stream->streams[idx];
} else {
st = avformat_new_stream(ts->stream, NULL);
if (!st)
goto out;
st->id = pid;
st->codec->codec_type = AVMEDIA_TYPE_DATA;
}
}
 
if (!st)
goto out;
 
if (pes && !pes->stream_type)
mpegts_set_stream_info(st, pes, stream_type, prog_reg_desc);
 
add_pid_to_pmt(ts, h->id, pid);
 
ff_program_add_stream_index(ts->stream, h->id, st->index);
 
desc_list_len = get16(&p, p_end);
if (desc_list_len < 0)
break;
desc_list_len &= 0xfff;
desc_list_end = p + desc_list_len;
if (desc_list_end > p_end)
break;
for(;;) {
if (ff_parse_mpeg2_descriptor(ts->stream, st, stream_type, &p, desc_list_end,
mp4_descr, mp4_descr_count, pid, ts) < 0)
break;
 
if (pes && prog_reg_desc == AV_RL32("HDMV") && stream_type == 0x83 && pes->sub_st) {
ff_program_add_stream_index(ts->stream, h->id, pes->sub_st->index);
pes->sub_st->codec->codec_tag = st->codec->codec_tag;
}
}
p = desc_list_end;
}
 
out:
for (i = 0; i < mp4_descr_count; i++)
av_free(mp4_descr[i].dec_config_descr);
}
 
static void pat_cb(MpegTSFilter *filter, const uint8_t *section, int section_len)
{
MpegTSContext *ts = filter->u.section_filter.opaque;
SectionHeader h1, *h = &h1;
const uint8_t *p, *p_end;
int sid, pmt_pid;
AVProgram *program;
 
av_dlog(ts->stream, "PAT:\n");
hex_dump_debug(ts->stream, section, section_len);
 
p_end = section + section_len - 4;
p = section;
if (parse_section_header(h, &p, p_end) < 0)
return;
if (h->tid != PAT_TID)
return;
 
ts->stream->ts_id = h->id;
 
clear_programs(ts);
for(;;) {
sid = get16(&p, p_end);
if (sid < 0)
break;
pmt_pid = get16(&p, p_end);
if (pmt_pid < 0)
break;
pmt_pid &= 0x1fff;
 
if (pmt_pid == ts->current_pid)
break;
 
av_dlog(ts->stream, "sid=0x%x pid=0x%x\n", sid, pmt_pid);
 
if (sid == 0x0000) {
/* NIT info */
} else {
MpegTSFilter *fil = ts->pids[pmt_pid];
program = av_new_program(ts->stream, sid);
program->program_num = sid;
program->pmt_pid = pmt_pid;
if (fil)
if ( fil->type != MPEGTS_SECTION
|| fil->pid != pmt_pid
|| fil->u.section_filter.section_cb != pmt_cb)
mpegts_close_filter(ts, ts->pids[pmt_pid]);
 
if (!ts->pids[pmt_pid])
mpegts_open_section_filter(ts, pmt_pid, pmt_cb, ts, 1);
add_pat_entry(ts, sid);
add_pid_to_pmt(ts, sid, 0); //add pat pid to program
add_pid_to_pmt(ts, sid, pmt_pid);
}
}
 
if (sid < 0) {
int i,j;
for (j=0; j<ts->stream->nb_programs; j++) {
for (i=0; i<ts->nb_prg; i++)
if (ts->prg[i].id == ts->stream->programs[j]->id)
break;
if (i==ts->nb_prg)
clear_avprogram(ts, ts->stream->programs[j]->id);
}
}
}
 
static void sdt_cb(MpegTSFilter *filter, const uint8_t *section, int section_len)
{
MpegTSContext *ts = filter->u.section_filter.opaque;
SectionHeader h1, *h = &h1;
const uint8_t *p, *p_end, *desc_list_end, *desc_end;
int onid, val, sid, desc_list_len, desc_tag, desc_len, service_type;
char *name, *provider_name;
 
av_dlog(ts->stream, "SDT:\n");
hex_dump_debug(ts->stream, section, section_len);
 
p_end = section + section_len - 4;
p = section;
if (parse_section_header(h, &p, p_end) < 0)
return;
if (h->tid != SDT_TID)
return;
onid = get16(&p, p_end);
if (onid < 0)
return;
val = get8(&p, p_end);
if (val < 0)
return;
for(;;) {
sid = get16(&p, p_end);
if (sid < 0)
break;
val = get8(&p, p_end);
if (val < 0)
break;
desc_list_len = get16(&p, p_end);
if (desc_list_len < 0)
break;
desc_list_len &= 0xfff;
desc_list_end = p + desc_list_len;
if (desc_list_end > p_end)
break;
for(;;) {
desc_tag = get8(&p, desc_list_end);
if (desc_tag < 0)
break;
desc_len = get8(&p, desc_list_end);
desc_end = p + desc_len;
if (desc_end > desc_list_end)
break;
 
av_dlog(ts->stream, "tag: 0x%02x len=%d\n",
desc_tag, desc_len);
 
switch(desc_tag) {
case 0x48:
service_type = get8(&p, p_end);
if (service_type < 0)
break;
provider_name = getstr8(&p, p_end);
if (!provider_name)
break;
name = getstr8(&p, p_end);
if (name) {
AVProgram *program = av_new_program(ts->stream, sid);
if(program) {
av_dict_set(&program->metadata, "service_name", name, 0);
av_dict_set(&program->metadata, "service_provider", provider_name, 0);
}
}
av_free(name);
av_free(provider_name);
break;
default:
break;
}
p = desc_end;
}
p = desc_list_end;
}
}
 
static int parse_pcr(int64_t *ppcr_high, int *ppcr_low,
const uint8_t *packet);
 
/* handle one TS packet */
static int handle_packet(MpegTSContext *ts, const uint8_t *packet)
{
AVFormatContext *s = ts->stream;
MpegTSFilter *tss;
int len, pid, cc, expected_cc, cc_ok, afc, is_start, is_discontinuity,
has_adaptation, has_payload;
const uint8_t *p, *p_end;
int64_t pos;
 
pid = AV_RB16(packet + 1) & 0x1fff;
if(pid && discard_pid(ts, pid))
return 0;
is_start = packet[1] & 0x40;
tss = ts->pids[pid];
if (ts->auto_guess && tss == NULL && is_start) {
add_pes_stream(ts, pid, -1);
tss = ts->pids[pid];
}
if (!tss)
return 0;
ts->current_pid = pid;
 
afc = (packet[3] >> 4) & 3;
if (afc == 0) /* reserved value */
return 0;
has_adaptation = afc & 2;
has_payload = afc & 1;
is_discontinuity = has_adaptation
&& packet[4] != 0 /* with length > 0 */
&& (packet[5] & 0x80); /* and discontinuity indicated */
 
/* continuity check (currently not used) */
cc = (packet[3] & 0xf);
expected_cc = has_payload ? (tss->last_cc + 1) & 0x0f : tss->last_cc;
cc_ok = pid == 0x1FFF // null packet PID
|| is_discontinuity
|| tss->last_cc < 0
|| expected_cc == cc;
 
tss->last_cc = cc;
if (!cc_ok) {
av_log(ts->stream, AV_LOG_DEBUG,
"Continuity check failed for pid %d expected %d got %d\n",
pid, expected_cc, cc);
if(tss->type == MPEGTS_PES) {
PESContext *pc = tss->u.pes_filter.opaque;
pc->flags |= AV_PKT_FLAG_CORRUPT;
}
}
 
if (!has_payload)
return 0;
p = packet + 4;
if (has_adaptation) {
/* skip adaptation field */
p += p[0] + 1;
}
/* if past the end of packet, ignore */
p_end = packet + TS_PACKET_SIZE;
if (p >= p_end)
return 0;
 
pos = avio_tell(ts->stream->pb);
if (pos >= 0) {
av_assert0(pos >= TS_PACKET_SIZE);
ts->pos47_full = pos - TS_PACKET_SIZE;
}
 
if (tss->type == MPEGTS_SECTION) {
if (is_start) {
/* pointer field present */
len = *p++;
if (p + len > p_end)
return 0;
if (len && cc_ok) {
/* write remaining section bytes */
write_section_data(s, tss,
p, len, 0);
/* check whether filter has been closed */
if (!ts->pids[pid])
return 0;
}
p += len;
if (p < p_end) {
write_section_data(s, tss,
p, p_end - p, 1);
}
} else {
if (cc_ok) {
write_section_data(s, tss,
p, p_end - p, 0);
}
}
} else {
int ret;
int64_t pcr = -1;
int64_t pcr_h;
int pcr_l;
if (parse_pcr(&pcr_h, &pcr_l, packet) == 0)
pcr = pcr_h * 300 + pcr_l;
// Note: The position here points actually behind the current packet.
if ((ret = tss->u.pes_filter.pes_cb(tss, p, p_end - p, is_start,
pos - ts->raw_packet_size, pcr)) < 0)
return ret;
}
 
return 0;
}
 
static void reanalyze(MpegTSContext *ts) {
AVIOContext *pb = ts->stream->pb;
int64_t pos = avio_tell(pb);
if(pos < 0)
return;
pos -= ts->pos47_full;
if (pos == TS_PACKET_SIZE) {
ts->size_stat[0] ++;
} else if (pos == TS_DVHS_PACKET_SIZE) {
ts->size_stat[1] ++;
} else if (pos == TS_FEC_PACKET_SIZE) {
ts->size_stat[2] ++;
}
 
ts->size_stat_count ++;
if(ts->size_stat_count > SIZE_STAT_THRESHOLD) {
int newsize = 0;
if (ts->size_stat[0] > SIZE_STAT_THRESHOLD) {
newsize = TS_PACKET_SIZE;
} else if (ts->size_stat[1] > SIZE_STAT_THRESHOLD) {
newsize = TS_DVHS_PACKET_SIZE;
} else if (ts->size_stat[2] > SIZE_STAT_THRESHOLD) {
newsize = TS_FEC_PACKET_SIZE;
}
if (newsize && newsize != ts->raw_packet_size) {
av_log(ts->stream, AV_LOG_WARNING, "changing packet size to %d\n", newsize);
ts->raw_packet_size = newsize;
}
ts->size_stat_count = 0;
memset(ts->size_stat, 0, sizeof(ts->size_stat));
}
}
 
/* XXX: try to find a better synchro over several packets (use
get_packet_size() ?) */
static int mpegts_resync(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
int c, i;
 
for(i = 0;i < MAX_RESYNC_SIZE; i++) {
c = avio_r8(pb);
if (url_feof(pb))
return -1;
if (c == 0x47) {
avio_seek(pb, -1, SEEK_CUR);
reanalyze(s->priv_data);
return 0;
}
}
av_log(s, AV_LOG_ERROR, "max resync size reached, could not find sync byte\n");
/* no sync found */
return -1;
}
 
/* return -1 if error or EOF. Return 0 if OK. */
static int read_packet(AVFormatContext *s, uint8_t *buf, int raw_packet_size, const uint8_t **data)
{
AVIOContext *pb = s->pb;
int len;
 
for(;;) {
len = ffio_read_indirect(pb, buf, TS_PACKET_SIZE, data);
if (len != TS_PACKET_SIZE)
return len < 0 ? len : AVERROR_EOF;
/* check packet sync byte */
if ((*data)[0] != 0x47) {
/* find a new packet start */
uint64_t pos = avio_tell(pb);
avio_seek(pb, -FFMIN(raw_packet_size, pos), SEEK_CUR);
 
if (mpegts_resync(s) < 0)
return AVERROR(EAGAIN);
else
continue;
} else {
break;
}
}
return 0;
}
 
static void finished_reading_packet(AVFormatContext *s, int raw_packet_size)
{
AVIOContext *pb = s->pb;
int skip = raw_packet_size - TS_PACKET_SIZE;
if (skip > 0)
avio_skip(pb, skip);
}
 
static int handle_packets(MpegTSContext *ts, int nb_packets)
{
AVFormatContext *s = ts->stream;
uint8_t packet[TS_PACKET_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
const uint8_t *data;
int packet_num, ret = 0;
 
if (avio_tell(s->pb) != ts->last_pos) {
int i;
av_dlog(ts->stream, "Skipping after seek\n");
/* seek detected, flush pes buffer */
for (i = 0; i < NB_PID_MAX; i++) {
if (ts->pids[i]) {
if (ts->pids[i]->type == MPEGTS_PES) {
PESContext *pes = ts->pids[i]->u.pes_filter.opaque;
av_buffer_unref(&pes->buffer);
pes->data_index = 0;
pes->state = MPEGTS_SKIP; /* skip until pes header */
pes->last_pcr = -1;
}
ts->pids[i]->last_cc = -1;
}
}
}
 
ts->stop_parse = 0;
packet_num = 0;
memset(packet + TS_PACKET_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);
for(;;) {
packet_num++;
if (nb_packets != 0 && packet_num >= nb_packets ||
ts->stop_parse > 1) {
ret = AVERROR(EAGAIN);
break;
}
if (ts->stop_parse > 0)
break;
 
ret = read_packet(s, packet, ts->raw_packet_size, &data);
if (ret != 0)
break;
ret = handle_packet(ts, data);
finished_reading_packet(s, ts->raw_packet_size);
if (ret != 0)
break;
}
ts->last_pos = avio_tell(s->pb);
return ret;
}
 
static int mpegts_probe(AVProbeData *p)
{
const int size= p->buf_size;
int maxscore=0;
int sumscore=0;
int i;
int check_count= size / TS_FEC_PACKET_SIZE;
#define CHECK_COUNT 10
#define CHECK_BLOCK 100
 
if (check_count < CHECK_COUNT)
return -1;
 
for (i=0; i<check_count; i+=CHECK_BLOCK){
int left = FFMIN(check_count - i, CHECK_BLOCK);
int score = analyze(p->buf + TS_PACKET_SIZE *i, TS_PACKET_SIZE *left, TS_PACKET_SIZE , NULL);
int dvhs_score= analyze(p->buf + TS_DVHS_PACKET_SIZE*i, TS_DVHS_PACKET_SIZE*left, TS_DVHS_PACKET_SIZE, NULL);
int fec_score = analyze(p->buf + TS_FEC_PACKET_SIZE *i, TS_FEC_PACKET_SIZE *left, TS_FEC_PACKET_SIZE , NULL);
score = FFMAX3(score, dvhs_score, fec_score);
sumscore += score;
maxscore = FFMAX(maxscore, score);
}
 
sumscore = sumscore*CHECK_COUNT/check_count;
maxscore = maxscore*CHECK_COUNT/CHECK_BLOCK;
 
av_dlog(0, "TS score: %d %d\n", sumscore, maxscore);
 
if (sumscore > 6) return AVPROBE_SCORE_MAX + sumscore - CHECK_COUNT;
else if (maxscore > 6) return AVPROBE_SCORE_MAX/2 + sumscore - CHECK_COUNT;
else return -1;
}
 
/* return the 90kHz PCR and the extension for the 27MHz PCR. return
(-1) if not available */
static int parse_pcr(int64_t *ppcr_high, int *ppcr_low,
const uint8_t *packet)
{
int afc, len, flags;
const uint8_t *p;
unsigned int v;
 
afc = (packet[3] >> 4) & 3;
if (afc <= 1)
return -1;
p = packet + 4;
len = p[0];
p++;
if (len == 0)
return -1;
flags = *p++;
len--;
if (!(flags & 0x10))
return -1;
if (len < 6)
return -1;
v = AV_RB32(p);
*ppcr_high = ((int64_t)v << 1) | (p[4] >> 7);
*ppcr_low = ((p[4] & 1) << 8) | p[5];
return 0;
}
 
static void seek_back(AVFormatContext *s, AVIOContext *pb, int64_t pos) {
 
/* NOTE: We attempt to seek on non-seekable files as well, as the
* probe buffer usually is big enough. Only warn if the seek failed
* on files where the seek should work. */
if (avio_seek(pb, pos, SEEK_SET) < 0)
av_log(s, pb->seekable ? AV_LOG_ERROR : AV_LOG_INFO, "Unable to seek back to the start\n");
}
 
static int mpegts_read_header(AVFormatContext *s)
{
MpegTSContext *ts = s->priv_data;
AVIOContext *pb = s->pb;
uint8_t buf[8*1024]={0};
int len;
int64_t pos;
 
ffio_ensure_seekback(pb, s->probesize);
 
/* read the first 8192 bytes to get packet size */
pos = avio_tell(pb);
len = avio_read(pb, buf, sizeof(buf));
ts->raw_packet_size = get_packet_size(buf, len);
if (ts->raw_packet_size <= 0) {
av_log(s, AV_LOG_WARNING, "Could not detect TS packet size, defaulting to non-FEC/DVHS\n");
ts->raw_packet_size = TS_PACKET_SIZE;
}
ts->stream = s;
ts->auto_guess = 0;
 
if (s->iformat == &ff_mpegts_demuxer) {
/* normal demux */
 
/* first do a scan to get all the services */
seek_back(s, pb, pos);
 
mpegts_open_section_filter(ts, SDT_PID, sdt_cb, ts, 1);
 
mpegts_open_section_filter(ts, PAT_PID, pat_cb, ts, 1);
 
handle_packets(ts, s->probesize / ts->raw_packet_size);
/* if could not find service, enable auto_guess */
 
ts->auto_guess = 1;
 
av_dlog(ts->stream, "tuning done\n");
 
s->ctx_flags |= AVFMTCTX_NOHEADER;
} else {
AVStream *st;
int pcr_pid, pid, nb_packets, nb_pcrs, ret, pcr_l;
int64_t pcrs[2], pcr_h;
int packet_count[2];
uint8_t packet[TS_PACKET_SIZE];
const uint8_t *data;
 
/* only read packets */
 
st = avformat_new_stream(s, NULL);
if (!st)
goto fail;
avpriv_set_pts_info(st, 60, 1, 27000000);
st->codec->codec_type = AVMEDIA_TYPE_DATA;
st->codec->codec_id = AV_CODEC_ID_MPEG2TS;
 
/* we iterate until we find two PCRs to estimate the bitrate */
pcr_pid = -1;
nb_pcrs = 0;
nb_packets = 0;
for(;;) {
ret = read_packet(s, packet, ts->raw_packet_size, &data);
if (ret < 0)
goto fail;
pid = AV_RB16(data + 1) & 0x1fff;
if ((pcr_pid == -1 || pcr_pid == pid) &&
parse_pcr(&pcr_h, &pcr_l, data) == 0) {
finished_reading_packet(s, ts->raw_packet_size);
pcr_pid = pid;
packet_count[nb_pcrs] = nb_packets;
pcrs[nb_pcrs] = pcr_h * 300 + pcr_l;
nb_pcrs++;
if (nb_pcrs >= 2)
break;
} else {
finished_reading_packet(s, ts->raw_packet_size);
}
nb_packets++;
}
 
/* NOTE1: the bitrate is computed without the FEC */
/* NOTE2: it is only the bitrate of the start of the stream */
ts->pcr_incr = (pcrs[1] - pcrs[0]) / (packet_count[1] - packet_count[0]);
ts->cur_pcr = pcrs[0] - ts->pcr_incr * packet_count[0];
s->bit_rate = (TS_PACKET_SIZE * 8) * 27e6 / ts->pcr_incr;
st->codec->bit_rate = s->bit_rate;
st->start_time = ts->cur_pcr;
av_dlog(ts->stream, "start=%0.3f pcr=%0.3f incr=%d\n",
st->start_time / 1000000.0, pcrs[0] / 27e6, ts->pcr_incr);
}
 
seek_back(s, pb, pos);
return 0;
fail:
return -1;
}
 
#define MAX_PACKET_READAHEAD ((128 * 1024) / 188)
 
static int mpegts_raw_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
MpegTSContext *ts = s->priv_data;
int ret, i;
int64_t pcr_h, next_pcr_h, pos;
int pcr_l, next_pcr_l;
uint8_t pcr_buf[12];
const uint8_t *data;
 
if (av_new_packet(pkt, TS_PACKET_SIZE) < 0)
return AVERROR(ENOMEM);
pkt->pos= avio_tell(s->pb);
ret = read_packet(s, pkt->data, ts->raw_packet_size, &data);
if (ret < 0) {
av_free_packet(pkt);
return ret;
}
if (data != pkt->data)
memcpy(pkt->data, data, ts->raw_packet_size);
finished_reading_packet(s, ts->raw_packet_size);
if (ts->mpeg2ts_compute_pcr) {
/* compute exact PCR for each packet */
if (parse_pcr(&pcr_h, &pcr_l, pkt->data) == 0) {
/* we read the next PCR (XXX: optimize it by using a bigger buffer */
pos = avio_tell(s->pb);
for(i = 0; i < MAX_PACKET_READAHEAD; i++) {
avio_seek(s->pb, pos + i * ts->raw_packet_size, SEEK_SET);
avio_read(s->pb, pcr_buf, 12);
if (parse_pcr(&next_pcr_h, &next_pcr_l, pcr_buf) == 0) {
/* XXX: not precise enough */
ts->pcr_incr = ((next_pcr_h - pcr_h) * 300 + (next_pcr_l - pcr_l)) /
(i + 1);
break;
}
}
avio_seek(s->pb, pos, SEEK_SET);
/* no next PCR found: we use previous increment */
ts->cur_pcr = pcr_h * 300 + pcr_l;
}
pkt->pts = ts->cur_pcr;
pkt->duration = ts->pcr_incr;
ts->cur_pcr += ts->pcr_incr;
}
pkt->stream_index = 0;
return 0;
}
 
static int mpegts_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
MpegTSContext *ts = s->priv_data;
int ret, i;
 
pkt->size = -1;
ts->pkt = pkt;
ret = handle_packets(ts, 0);
if (ret < 0) {
av_free_packet(ts->pkt);
/* flush pes data left */
for (i = 0; i < NB_PID_MAX; i++) {
if (ts->pids[i] && ts->pids[i]->type == MPEGTS_PES) {
PESContext *pes = ts->pids[i]->u.pes_filter.opaque;
if (pes->state == MPEGTS_PAYLOAD && pes->data_index > 0) {
new_pes_packet(pes, pkt);
pes->state = MPEGTS_SKIP;
ret = 0;
break;
}
}
}
}
 
if (!ret && pkt->size < 0)
ret = AVERROR(EINTR);
return ret;
}
 
static void mpegts_free(MpegTSContext *ts)
{
int i;
 
clear_programs(ts);
 
for(i=0;i<NB_PID_MAX;i++)
if (ts->pids[i]) mpegts_close_filter(ts, ts->pids[i]);
}
 
static int mpegts_read_close(AVFormatContext *s)
{
MpegTSContext *ts = s->priv_data;
mpegts_free(ts);
return 0;
}
 
static av_unused int64_t mpegts_get_pcr(AVFormatContext *s, int stream_index,
int64_t *ppos, int64_t pos_limit)
{
MpegTSContext *ts = s->priv_data;
int64_t pos, timestamp;
uint8_t buf[TS_PACKET_SIZE];
int pcr_l, pcr_pid = ((PESContext*)s->streams[stream_index]->priv_data)->pcr_pid;
int pos47 = ts->pos47_full % ts->raw_packet_size;
pos = ((*ppos + ts->raw_packet_size - 1 - pos47) / ts->raw_packet_size) * ts->raw_packet_size + pos47;
while(pos < pos_limit) {
if (avio_seek(s->pb, pos, SEEK_SET) < 0)
return AV_NOPTS_VALUE;
if (avio_read(s->pb, buf, TS_PACKET_SIZE) != TS_PACKET_SIZE)
return AV_NOPTS_VALUE;
if (buf[0] != 0x47) {
avio_seek(s->pb, -TS_PACKET_SIZE, SEEK_CUR);
if (mpegts_resync(s) < 0)
return AV_NOPTS_VALUE;
pos = avio_tell(s->pb);
continue;
}
if ((pcr_pid < 0 || (AV_RB16(buf + 1) & 0x1fff) == pcr_pid) &&
parse_pcr(&timestamp, &pcr_l, buf) == 0) {
*ppos = pos;
return timestamp;
}
pos += ts->raw_packet_size;
}
 
return AV_NOPTS_VALUE;
}
 
static int64_t mpegts_get_dts(AVFormatContext *s, int stream_index,
int64_t *ppos, int64_t pos_limit)
{
MpegTSContext *ts = s->priv_data;
int64_t pos;
int pos47 = ts->pos47_full % ts->raw_packet_size;
pos = ((*ppos + ts->raw_packet_size - 1 - pos47) / ts->raw_packet_size) * ts->raw_packet_size + pos47;
ff_read_frame_flush(s);
if (avio_seek(s->pb, pos, SEEK_SET) < 0)
return AV_NOPTS_VALUE;
while(pos < pos_limit) {
int ret;
AVPacket pkt;
av_init_packet(&pkt);
ret= av_read_frame(s, &pkt);
if(ret < 0)
return AV_NOPTS_VALUE;
av_free_packet(&pkt);
if(pkt.dts != AV_NOPTS_VALUE && pkt.pos >= 0){
ff_reduce_index(s, pkt.stream_index);
av_add_index_entry(s->streams[pkt.stream_index], pkt.pos, pkt.dts, 0, 0, AVINDEX_KEYFRAME /* FIXME keyframe? */);
if(pkt.stream_index == stream_index && pkt.pos >= *ppos){
*ppos= pkt.pos;
return pkt.dts;
}
}
pos = pkt.pos;
}
 
return AV_NOPTS_VALUE;
}
 
/**************************************************************/
/* parsing functions - called from other demuxers such as RTP */
 
MpegTSContext *ff_mpegts_parse_open(AVFormatContext *s)
{
MpegTSContext *ts;
 
ts = av_mallocz(sizeof(MpegTSContext));
if (!ts)
return NULL;
/* no stream case, currently used by RTP */
ts->raw_packet_size = TS_PACKET_SIZE;
ts->stream = s;
ts->auto_guess = 1;
mpegts_open_section_filter(ts, SDT_PID, sdt_cb, ts, 1);
mpegts_open_section_filter(ts, PAT_PID, pat_cb, ts, 1);
 
return ts;
}
 
/* return the consumed length if a packet was output, or -1 if no
packet is output */
int ff_mpegts_parse_packet(MpegTSContext *ts, AVPacket *pkt,
const uint8_t *buf, int len)
{
int len1;
 
len1 = len;
ts->pkt = pkt;
for(;;) {
ts->stop_parse = 0;
if (len < TS_PACKET_SIZE)
return -1;
if (buf[0] != 0x47) {
buf++;
len--;
} else {
handle_packet(ts, buf);
buf += TS_PACKET_SIZE;
len -= TS_PACKET_SIZE;
if (ts->stop_parse == 1)
break;
}
}
return len1 - len;
}
 
void ff_mpegts_parse_close(MpegTSContext *ts)
{
mpegts_free(ts);
av_free(ts);
}
 
AVInputFormat ff_mpegts_demuxer = {
.name = "mpegts",
.long_name = NULL_IF_CONFIG_SMALL("MPEG-TS (MPEG-2 Transport Stream)"),
.priv_data_size = sizeof(MpegTSContext),
.read_probe = mpegts_probe,
.read_header = mpegts_read_header,
.read_packet = mpegts_read_packet,
.read_close = mpegts_read_close,
.read_timestamp = mpegts_get_dts,
.flags = AVFMT_SHOW_IDS | AVFMT_TS_DISCONT,
.priv_class = &mpegts_class,
};
 
AVInputFormat ff_mpegtsraw_demuxer = {
.name = "mpegtsraw",
.long_name = NULL_IF_CONFIG_SMALL("raw MPEG-TS (MPEG-2 Transport Stream)"),
.priv_data_size = sizeof(MpegTSContext),
.read_header = mpegts_read_header,
.read_packet = mpegts_raw_read_packet,
.read_close = mpegts_read_close,
.read_timestamp = mpegts_get_dts,
.flags = AVFMT_SHOW_IDS | AVFMT_TS_DISCONT,
.priv_class = &mpegtsraw_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/mpegts.h
0,0 → 1,107
/*
* MPEG2 transport stream defines
* Copyright (c) 2003 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_MPEGTS_H
#define AVFORMAT_MPEGTS_H
 
#include "avformat.h"
 
#define TS_FEC_PACKET_SIZE 204
#define TS_DVHS_PACKET_SIZE 192
#define TS_PACKET_SIZE 188
#define TS_MAX_PACKET_SIZE 204
 
#define NB_PID_MAX 8192
#define MAX_SECTION_SIZE 4096
 
/* pids */
#define PAT_PID 0x0000
#define SDT_PID 0x0011
 
/* table ids */
#define PAT_TID 0x00
#define PMT_TID 0x02
#define M4OD_TID 0x05
#define SDT_TID 0x42
 
#define STREAM_TYPE_VIDEO_MPEG1 0x01
#define STREAM_TYPE_VIDEO_MPEG2 0x02
#define STREAM_TYPE_AUDIO_MPEG1 0x03
#define STREAM_TYPE_AUDIO_MPEG2 0x04
#define STREAM_TYPE_PRIVATE_SECTION 0x05
#define STREAM_TYPE_PRIVATE_DATA 0x06
#define STREAM_TYPE_AUDIO_AAC 0x0f
#define STREAM_TYPE_AUDIO_AAC_LATM 0x11
#define STREAM_TYPE_VIDEO_MPEG4 0x10
#define STREAM_TYPE_VIDEO_H264 0x1b
#define STREAM_TYPE_VIDEO_CAVS 0x42
#define STREAM_TYPE_VIDEO_VC1 0xea
#define STREAM_TYPE_VIDEO_DIRAC 0xd1
 
#define STREAM_TYPE_AUDIO_AC3 0x81
#define STREAM_TYPE_AUDIO_DTS 0x8a
 
typedef struct MpegTSContext MpegTSContext;
 
MpegTSContext *ff_mpegts_parse_open(AVFormatContext *s);
int ff_mpegts_parse_packet(MpegTSContext *ts, AVPacket *pkt,
const uint8_t *buf, int len);
void ff_mpegts_parse_close(MpegTSContext *ts);
 
typedef struct SLConfigDescr {
int use_au_start;
int use_au_end;
int use_rand_acc_pt;
int use_padding;
int use_timestamps;
int use_idle;
int timestamp_res;
int timestamp_len;
int ocr_len;
int au_len;
int inst_bitrate_len;
int degr_prior_len;
int au_seq_num_len;
int packet_seq_num_len;
} SLConfigDescr;
 
typedef struct Mp4Descr {
int es_id;
int dec_config_descr_len;
uint8_t *dec_config_descr;
SLConfigDescr sl;
} Mp4Descr;
 
/**
* Parse an MPEG-2 descriptor
* @param[in] fc Format context (used for logging only)
* @param st Stream
* @param stream_type STREAM_TYPE_xxx
* @param pp Descriptor buffer pointer
* @param desc_list_end End of buffer
* @return <0 to stop processing
*/
int ff_parse_mpeg2_descriptor(AVFormatContext *fc, AVStream *st, int stream_type,
const uint8_t **pp, const uint8_t *desc_list_end,
Mp4Descr *mp4_descr, int mp4_descr_count, int pid,
MpegTSContext *ts);
 
#endif /* AVFORMAT_MPEGTS_H */
/contrib/sdk/sources/ffmpeg/libavformat/mpegtsenc.c
0,0 → 1,1285
/*
* MPEG2 transport stream (aka DVB) muxer
* Copyright (c) 2003 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/bswap.h"
#include "libavutil/crc.h"
#include "libavutil/dict.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/avassert.h"
#include "libavcodec/internal.h"
#include "avformat.h"
#include "internal.h"
#include "mpegts.h"
 
#define PCR_TIME_BASE 27000000
 
/* write DVB SI sections */
 
/*********************************************/
/* mpegts section writer */
 
typedef struct MpegTSSection {
int pid;
int cc;
void (*write_packet)(struct MpegTSSection *s, const uint8_t *packet);
void *opaque;
} MpegTSSection;
 
typedef struct MpegTSService {
MpegTSSection pmt; /* MPEG2 pmt table context */
int sid; /* service ID */
char *name;
char *provider_name;
int pcr_pid;
int pcr_packet_count;
int pcr_packet_period;
} MpegTSService;
 
typedef struct MpegTSWrite {
const AVClass *av_class;
MpegTSSection pat; /* MPEG2 pat table */
MpegTSSection sdt; /* MPEG2 sdt table context */
MpegTSService **services;
int sdt_packet_count;
int sdt_packet_period;
int pat_packet_count;
int pat_packet_period;
int nb_services;
int onid;
int tsid;
int64_t first_pcr;
int mux_rate; ///< set to 1 when VBR
int pes_payload_size;
 
int transport_stream_id;
int original_network_id;
int service_id;
 
int pmt_start_pid;
int start_pid;
int m2ts_mode;
 
int reemit_pat_pmt; // backward compatibility
 
#define MPEGTS_FLAG_REEMIT_PAT_PMT 0x01
#define MPEGTS_FLAG_AAC_LATM 0x02
int flags;
int copyts;
int tables_version;
} MpegTSWrite;
 
/* a PES packet header is generated every DEFAULT_PES_HEADER_FREQ packets */
#define DEFAULT_PES_HEADER_FREQ 16
#define DEFAULT_PES_PAYLOAD_SIZE ((DEFAULT_PES_HEADER_FREQ - 1) * 184 + 170)
 
static const AVOption options[] = {
{ "mpegts_transport_stream_id", "Set transport_stream_id field.",
offsetof(MpegTSWrite, transport_stream_id), AV_OPT_TYPE_INT, {.i64 = 0x0001 }, 0x0001, 0xffff, AV_OPT_FLAG_ENCODING_PARAM},
{ "mpegts_original_network_id", "Set original_network_id field.",
offsetof(MpegTSWrite, original_network_id), AV_OPT_TYPE_INT, {.i64 = 0x0001 }, 0x0001, 0xffff, AV_OPT_FLAG_ENCODING_PARAM},
{ "mpegts_service_id", "Set service_id field.",
offsetof(MpegTSWrite, service_id), AV_OPT_TYPE_INT, {.i64 = 0x0001 }, 0x0001, 0xffff, AV_OPT_FLAG_ENCODING_PARAM},
{ "mpegts_pmt_start_pid", "Set the first pid of the PMT.",
offsetof(MpegTSWrite, pmt_start_pid), AV_OPT_TYPE_INT, {.i64 = 0x1000 }, 0x0010, 0x1f00, AV_OPT_FLAG_ENCODING_PARAM},
{ "mpegts_start_pid", "Set the first pid.",
offsetof(MpegTSWrite, start_pid), AV_OPT_TYPE_INT, {.i64 = 0x0100 }, 0x0100, 0x0f00, AV_OPT_FLAG_ENCODING_PARAM},
{"mpegts_m2ts_mode", "Enable m2ts mode.",
offsetof(MpegTSWrite, m2ts_mode), AV_OPT_TYPE_INT, {.i64 = -1 },
-1,1, AV_OPT_FLAG_ENCODING_PARAM},
{ "muxrate", NULL, offsetof(MpegTSWrite, mux_rate), AV_OPT_TYPE_INT, {.i64 = 1}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM},
{ "pes_payload_size", "Minimum PES packet payload in bytes",
offsetof(MpegTSWrite, pes_payload_size), AV_OPT_TYPE_INT, {.i64 = DEFAULT_PES_PAYLOAD_SIZE}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM},
{ "mpegts_flags", "MPEG-TS muxing flags", offsetof(MpegTSWrite, flags), AV_OPT_TYPE_FLAGS, {.i64 = 0}, 0, INT_MAX,
AV_OPT_FLAG_ENCODING_PARAM, "mpegts_flags" },
{ "resend_headers", "Reemit PAT/PMT before writing the next packet",
0, AV_OPT_TYPE_CONST, {.i64 = MPEGTS_FLAG_REEMIT_PAT_PMT}, 0, INT_MAX,
AV_OPT_FLAG_ENCODING_PARAM, "mpegts_flags"},
{ "latm", "Use LATM packetization for AAC",
0, AV_OPT_TYPE_CONST, {.i64 = MPEGTS_FLAG_AAC_LATM}, 0, INT_MAX,
AV_OPT_FLAG_ENCODING_PARAM, "mpegts_flags"},
// backward compatibility
{ "resend_headers", "Reemit PAT/PMT before writing the next packet",
offsetof(MpegTSWrite, reemit_pat_pmt), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM},
{ "mpegts_copyts", "don't offset dts/pts",
offsetof(MpegTSWrite, copyts), AV_OPT_TYPE_INT, {.i64=-1}, -1, 1, AV_OPT_FLAG_ENCODING_PARAM},
{ "tables_version", "set PAT, PMT and SDT version",
offsetof(MpegTSWrite, tables_version), AV_OPT_TYPE_INT, {.i64=0}, 0, 31, AV_OPT_FLAG_ENCODING_PARAM},
{ NULL },
};
 
static const AVClass mpegts_muxer_class = {
.class_name = "MPEGTS muxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
/* NOTE: 4 bytes must be left at the end for the crc32 */
static void mpegts_write_section(MpegTSSection *s, uint8_t *buf, int len)
{
unsigned int crc;
unsigned char packet[TS_PACKET_SIZE];
const unsigned char *buf_ptr;
unsigned char *q;
int first, b, len1, left;
 
crc = av_bswap32(av_crc(av_crc_get_table(AV_CRC_32_IEEE), -1, buf, len - 4));
buf[len - 4] = (crc >> 24) & 0xff;
buf[len - 3] = (crc >> 16) & 0xff;
buf[len - 2] = (crc >> 8) & 0xff;
buf[len - 1] = (crc) & 0xff;
 
/* send each packet */
buf_ptr = buf;
while (len > 0) {
first = (buf == buf_ptr);
q = packet;
*q++ = 0x47;
b = (s->pid >> 8);
if (first)
b |= 0x40;
*q++ = b;
*q++ = s->pid;
s->cc = (s->cc + 1) & 0xf;
*q++ = 0x10 | s->cc;
if (first)
*q++ = 0; /* 0 offset */
len1 = TS_PACKET_SIZE - (q - packet);
if (len1 > len)
len1 = len;
memcpy(q, buf_ptr, len1);
q += len1;
/* add known padding data */
left = TS_PACKET_SIZE - (q - packet);
if (left > 0)
memset(q, 0xff, left);
 
s->write_packet(s, packet);
 
buf_ptr += len1;
len -= len1;
}
}
 
static inline void put16(uint8_t **q_ptr, int val)
{
uint8_t *q;
q = *q_ptr;
*q++ = val >> 8;
*q++ = val;
*q_ptr = q;
}
 
static int mpegts_write_section1(MpegTSSection *s, int tid, int id,
int version, int sec_num, int last_sec_num,
uint8_t *buf, int len)
{
uint8_t section[1024], *q;
unsigned int tot_len;
/* reserved_future_use field must be set to 1 for SDT */
unsigned int flags = tid == SDT_TID ? 0xf000 : 0xb000;
 
tot_len = 3 + 5 + len + 4;
/* check if not too big */
if (tot_len > 1024)
return AVERROR_INVALIDDATA;
 
q = section;
*q++ = tid;
put16(&q, flags | (len + 5 + 4)); /* 5 byte header + 4 byte CRC */
put16(&q, id);
*q++ = 0xc1 | (version << 1); /* current_next_indicator = 1 */
*q++ = sec_num;
*q++ = last_sec_num;
memcpy(q, buf, len);
 
mpegts_write_section(s, section, tot_len);
return 0;
}
 
/*********************************************/
/* mpegts writer */
 
#define DEFAULT_PROVIDER_NAME "FFmpeg"
#define DEFAULT_SERVICE_NAME "Service01"
 
/* we retransmit the SI info at this rate */
#define SDT_RETRANS_TIME 500
#define PAT_RETRANS_TIME 100
#define PCR_RETRANS_TIME 20
 
typedef struct MpegTSWriteStream {
struct MpegTSService *service;
int pid; /* stream associated pid */
int cc;
int payload_size;
int first_pts_check; ///< first pts check needed
int prev_payload_key;
int64_t payload_pts;
int64_t payload_dts;
int payload_flags;
uint8_t *payload;
AVFormatContext *amux;
} MpegTSWriteStream;
 
static void mpegts_write_pat(AVFormatContext *s)
{
MpegTSWrite *ts = s->priv_data;
MpegTSService *service;
uint8_t data[1012], *q;
int i;
 
q = data;
for(i = 0; i < ts->nb_services; i++) {
service = ts->services[i];
put16(&q, service->sid);
put16(&q, 0xe000 | service->pmt.pid);
}
mpegts_write_section1(&ts->pat, PAT_TID, ts->tsid, ts->tables_version, 0, 0,
data, q - data);
}
 
static void mpegts_write_pmt(AVFormatContext *s, MpegTSService *service)
{
MpegTSWrite *ts = s->priv_data;
uint8_t data[1012], *q, *desc_length_ptr, *program_info_length_ptr;
int val, stream_type, i;
 
q = data;
put16(&q, 0xe000 | service->pcr_pid);
 
program_info_length_ptr = q;
q += 2; /* patched after */
 
/* put program info here */
 
val = 0xf000 | (q - program_info_length_ptr - 2);
program_info_length_ptr[0] = val >> 8;
program_info_length_ptr[1] = val;
 
for(i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
MpegTSWriteStream *ts_st = st->priv_data;
AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL,0);
switch(st->codec->codec_id) {
case AV_CODEC_ID_MPEG1VIDEO:
case AV_CODEC_ID_MPEG2VIDEO:
stream_type = STREAM_TYPE_VIDEO_MPEG2;
break;
case AV_CODEC_ID_MPEG4:
stream_type = STREAM_TYPE_VIDEO_MPEG4;
break;
case AV_CODEC_ID_H264:
stream_type = STREAM_TYPE_VIDEO_H264;
break;
case AV_CODEC_ID_CAVS:
stream_type = STREAM_TYPE_VIDEO_CAVS;
break;
case AV_CODEC_ID_DIRAC:
stream_type = STREAM_TYPE_VIDEO_DIRAC;
break;
case AV_CODEC_ID_MP2:
case AV_CODEC_ID_MP3:
stream_type = STREAM_TYPE_AUDIO_MPEG1;
break;
case AV_CODEC_ID_AAC:
stream_type = (ts->flags & MPEGTS_FLAG_AAC_LATM) ? STREAM_TYPE_AUDIO_AAC_LATM : STREAM_TYPE_AUDIO_AAC;
break;
case AV_CODEC_ID_AAC_LATM:
stream_type = STREAM_TYPE_AUDIO_AAC_LATM;
break;
case AV_CODEC_ID_AC3:
stream_type = STREAM_TYPE_AUDIO_AC3;
break;
default:
stream_type = STREAM_TYPE_PRIVATE_DATA;
break;
}
*q++ = stream_type;
put16(&q, 0xe000 | ts_st->pid);
desc_length_ptr = q;
q += 2; /* patched after */
 
/* write optional descriptors here */
switch(st->codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
if(st->codec->codec_id==AV_CODEC_ID_EAC3){
*q++=0x7a; // EAC3 descriptor see A038 DVB SI
*q++=1; // 1 byte, all flags sets to 0
*q++=0; // omit all fields...
}
if(st->codec->codec_id==AV_CODEC_ID_S302M){
*q++ = 0x05; /* MPEG-2 registration descriptor*/
*q++ = 4;
*q++ = 'B';
*q++ = 'S';
*q++ = 'S';
*q++ = 'D';
}
 
if (lang) {
char *p;
char *next = lang->value;
uint8_t *len_ptr;
 
*q++ = 0x0a; /* ISO 639 language descriptor */
len_ptr = q++;
*len_ptr = 0;
 
for (p = lang->value; next && *len_ptr < 255 / 4 * 4; p = next + 1) {
next = strchr(p, ',');
if (strlen(p) != 3 && (!next || next != p + 3))
continue; /* not a 3-letter code */
 
*q++ = *p++;
*q++ = *p++;
*q++ = *p++;
 
if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
*q++ = 0x01;
else if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
*q++ = 0x02;
else if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
*q++ = 0x03;
else
*q++ = 0; /* undefined type */
 
*len_ptr += 4;
}
 
if (*len_ptr == 0)
q -= 2; /* no language codes were written */
}
break;
case AVMEDIA_TYPE_SUBTITLE:
{
const char *language;
language = lang && strlen(lang->value)==3 ? lang->value : "eng";
*q++ = 0x59;
*q++ = 8;
*q++ = language[0];
*q++ = language[1];
*q++ = language[2];
*q++ = 0x10; /* normal subtitles (0x20 = if hearing pb) */
if(st->codec->extradata_size == 4) {
memcpy(q, st->codec->extradata, 4);
q += 4;
} else {
put16(&q, 1); /* page id */
put16(&q, 1); /* ancillary page id */
}
}
break;
case AVMEDIA_TYPE_VIDEO:
if (stream_type == STREAM_TYPE_VIDEO_DIRAC) {
*q++ = 0x05; /*MPEG-2 registration descriptor*/
*q++ = 4;
*q++ = 'd';
*q++ = 'r';
*q++ = 'a';
*q++ = 'c';
}
break;
case AVMEDIA_TYPE_DATA:
if (st->codec->codec_id == AV_CODEC_ID_SMPTE_KLV) {
*q++ = 0x05; /* MPEG-2 registration descriptor */
*q++ = 4;
*q++ = 'K';
*q++ = 'L';
*q++ = 'V';
*q++ = 'A';
}
break;
}
 
val = 0xf000 | (q - desc_length_ptr - 2);
desc_length_ptr[0] = val >> 8;
desc_length_ptr[1] = val;
}
mpegts_write_section1(&service->pmt, PMT_TID, service->sid, ts->tables_version, 0, 0,
data, q - data);
}
 
/* NOTE: str == NULL is accepted for an empty string */
static void putstr8(uint8_t **q_ptr, const char *str)
{
uint8_t *q;
int len;
 
q = *q_ptr;
if (!str)
len = 0;
else
len = strlen(str);
*q++ = len;
memcpy(q, str, len);
q += len;
*q_ptr = q;
}
 
static void mpegts_write_sdt(AVFormatContext *s)
{
MpegTSWrite *ts = s->priv_data;
MpegTSService *service;
uint8_t data[1012], *q, *desc_list_len_ptr, *desc_len_ptr;
int i, running_status, free_ca_mode, val;
 
q = data;
put16(&q, ts->onid);
*q++ = 0xff;
for(i = 0; i < ts->nb_services; i++) {
service = ts->services[i];
put16(&q, service->sid);
*q++ = 0xfc | 0x00; /* currently no EIT info */
desc_list_len_ptr = q;
q += 2;
running_status = 4; /* running */
free_ca_mode = 0;
 
/* write only one descriptor for the service name and provider */
*q++ = 0x48;
desc_len_ptr = q;
q++;
*q++ = 0x01; /* digital television service */
putstr8(&q, service->provider_name);
putstr8(&q, service->name);
desc_len_ptr[0] = q - desc_len_ptr - 1;
 
/* fill descriptor length */
val = (running_status << 13) | (free_ca_mode << 12) |
(q - desc_list_len_ptr - 2);
desc_list_len_ptr[0] = val >> 8;
desc_list_len_ptr[1] = val;
}
mpegts_write_section1(&ts->sdt, SDT_TID, ts->tsid, ts->tables_version, 0, 0,
data, q - data);
}
 
static MpegTSService *mpegts_add_service(MpegTSWrite *ts,
int sid,
const char *provider_name,
const char *name)
{
MpegTSService *service;
 
service = av_mallocz(sizeof(MpegTSService));
if (!service)
return NULL;
service->pmt.pid = ts->pmt_start_pid + ts->nb_services;
service->sid = sid;
service->provider_name = av_strdup(provider_name);
service->name = av_strdup(name);
service->pcr_pid = 0x1fff;
dynarray_add(&ts->services, &ts->nb_services, service);
return service;
}
 
static int64_t get_pcr(const MpegTSWrite *ts, AVIOContext *pb)
{
return av_rescale(avio_tell(pb) + 11, 8 * PCR_TIME_BASE, ts->mux_rate) +
ts->first_pcr;
}
 
static void mpegts_prefix_m2ts_header(AVFormatContext *s)
{
MpegTSWrite *ts = s->priv_data;
if (ts->m2ts_mode) {
int64_t pcr = get_pcr(s->priv_data, s->pb);
uint32_t tp_extra_header = pcr % 0x3fffffff;
tp_extra_header = AV_RB32(&tp_extra_header);
avio_write(s->pb, (unsigned char *) &tp_extra_header,
sizeof(tp_extra_header));
}
}
 
static void section_write_packet(MpegTSSection *s, const uint8_t *packet)
{
AVFormatContext *ctx = s->opaque;
mpegts_prefix_m2ts_header(ctx);
avio_write(ctx->pb, packet, TS_PACKET_SIZE);
}
 
static int mpegts_write_header(AVFormatContext *s)
{
MpegTSWrite *ts = s->priv_data;
MpegTSWriteStream *ts_st;
MpegTSService *service;
AVStream *st, *pcr_st = NULL;
AVDictionaryEntry *title, *provider;
int i, j;
const char *service_name;
const char *provider_name;
int *pids;
int ret;
 
if (s->max_delay < 0) /* Not set by the caller */
s->max_delay = 0;
 
// round up to a whole number of TS packets
ts->pes_payload_size = (ts->pes_payload_size + 14 + 183) / 184 * 184 - 14;
 
ts->tsid = ts->transport_stream_id;
ts->onid = ts->original_network_id;
/* allocate a single DVB service */
title = av_dict_get(s->metadata, "service_name", NULL, 0);
if (!title)
title = av_dict_get(s->metadata, "title", NULL, 0);
service_name = title ? title->value : DEFAULT_SERVICE_NAME;
provider = av_dict_get(s->metadata, "service_provider", NULL, 0);
provider_name = provider ? provider->value : DEFAULT_PROVIDER_NAME;
service = mpegts_add_service(ts, ts->service_id, provider_name, service_name);
service->pmt.write_packet = section_write_packet;
service->pmt.opaque = s;
service->pmt.cc = 15;
 
ts->pat.pid = PAT_PID;
ts->pat.cc = 15; // Initialize at 15 so that it wraps and be equal to 0 for the first packet we write
ts->pat.write_packet = section_write_packet;
ts->pat.opaque = s;
 
ts->sdt.pid = SDT_PID;
ts->sdt.cc = 15;
ts->sdt.write_packet = section_write_packet;
ts->sdt.opaque = s;
 
pids = av_malloc(s->nb_streams * sizeof(*pids));
if (!pids)
return AVERROR(ENOMEM);
 
/* assign pids to each stream */
for(i = 0;i < s->nb_streams; i++) {
st = s->streams[i];
avpriv_set_pts_info(st, 33, 1, 90000);
ts_st = av_mallocz(sizeof(MpegTSWriteStream));
if (!ts_st) {
ret = AVERROR(ENOMEM);
goto fail;
}
st->priv_data = ts_st;
ts_st->payload = av_mallocz(ts->pes_payload_size);
if (!ts_st->payload) {
ret = AVERROR(ENOMEM);
goto fail;
}
ts_st->service = service;
/* MPEG pid values < 16 are reserved. Applications which set st->id in
* this range are assigned a calculated pid. */
if (st->id < 16) {
ts_st->pid = ts->start_pid + i;
} else if (st->id < 0x1FFF) {
ts_st->pid = st->id;
} else {
av_log(s, AV_LOG_ERROR, "Invalid stream id %d, must be less than 8191\n", st->id);
ret = AVERROR(EINVAL);
goto fail;
}
if (ts_st->pid == service->pmt.pid) {
av_log(s, AV_LOG_ERROR, "Duplicate stream id %d\n", ts_st->pid);
ret = AVERROR(EINVAL);
goto fail;
}
for (j = 0; j < i; j++)
if (pids[j] == ts_st->pid) {
av_log(s, AV_LOG_ERROR, "Duplicate stream id %d\n", ts_st->pid);
ret = AVERROR(EINVAL);
goto fail;
}
pids[i] = ts_st->pid;
ts_st->payload_pts = AV_NOPTS_VALUE;
ts_st->payload_dts = AV_NOPTS_VALUE;
ts_st->first_pts_check = 1;
ts_st->cc = 15;
/* update PCR pid by using the first video stream */
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
service->pcr_pid == 0x1fff) {
service->pcr_pid = ts_st->pid;
pcr_st = st;
}
if (st->codec->codec_id == AV_CODEC_ID_AAC &&
st->codec->extradata_size > 0)
{
AVStream *ast;
ts_st->amux = avformat_alloc_context();
if (!ts_st->amux) {
ret = AVERROR(ENOMEM);
goto fail;
}
ts_st->amux->oformat = av_guess_format((ts->flags & MPEGTS_FLAG_AAC_LATM) ? "latm" : "adts", NULL, NULL);
if (!ts_st->amux->oformat) {
ret = AVERROR(EINVAL);
goto fail;
}
ast = avformat_new_stream(ts_st->amux, NULL);
ret = avcodec_copy_context(ast->codec, st->codec);
if (ret != 0)
goto fail;
ret = avformat_write_header(ts_st->amux, NULL);
if (ret < 0)
goto fail;
}
}
 
av_free(pids);
 
/* if no video stream, use the first stream as PCR */
if (service->pcr_pid == 0x1fff && s->nb_streams > 0) {
pcr_st = s->streams[0];
ts_st = pcr_st->priv_data;
service->pcr_pid = ts_st->pid;
}
 
if (ts->mux_rate > 1) {
service->pcr_packet_period = (ts->mux_rate * PCR_RETRANS_TIME) /
(TS_PACKET_SIZE * 8 * 1000);
ts->sdt_packet_period = (ts->mux_rate * SDT_RETRANS_TIME) /
(TS_PACKET_SIZE * 8 * 1000);
ts->pat_packet_period = (ts->mux_rate * PAT_RETRANS_TIME) /
(TS_PACKET_SIZE * 8 * 1000);
 
if(ts->copyts < 1)
ts->first_pcr = av_rescale(s->max_delay, PCR_TIME_BASE, AV_TIME_BASE);
} else {
/* Arbitrary values, PAT/PMT will also be written on video key frames */
ts->sdt_packet_period = 200;
ts->pat_packet_period = 40;
if (pcr_st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
if (!pcr_st->codec->frame_size) {
av_log(s, AV_LOG_WARNING, "frame size not set\n");
service->pcr_packet_period =
pcr_st->codec->sample_rate/(10*512);
} else {
service->pcr_packet_period =
pcr_st->codec->sample_rate/(10*pcr_st->codec->frame_size);
}
} else {
// max delta PCR 0.1s
service->pcr_packet_period =
pcr_st->codec->time_base.den/(10*pcr_st->codec->time_base.num);
}
if(!service->pcr_packet_period)
service->pcr_packet_period = 1;
}
 
// output a PCR as soon as possible
service->pcr_packet_count = service->pcr_packet_period;
ts->pat_packet_count = ts->pat_packet_period-1;
ts->sdt_packet_count = ts->sdt_packet_period-1;
 
if (ts->mux_rate == 1)
av_log(s, AV_LOG_VERBOSE, "muxrate VBR, ");
else
av_log(s, AV_LOG_VERBOSE, "muxrate %d, ", ts->mux_rate);
av_log(s, AV_LOG_VERBOSE, "pcr every %d pkts, "
"sdt every %d, pat/pmt every %d pkts\n",
service->pcr_packet_period,
ts->sdt_packet_period, ts->pat_packet_period);
 
if (ts->m2ts_mode == -1) {
if (av_match_ext(s->filename, "m2ts")) {
ts->m2ts_mode = 1;
} else {
ts->m2ts_mode = 0;
}
}
 
avio_flush(s->pb);
 
return 0;
 
fail:
av_free(pids);
for(i = 0;i < s->nb_streams; i++) {
MpegTSWriteStream *ts_st;
st = s->streams[i];
ts_st = st->priv_data;
if (ts_st) {
av_freep(&ts_st->payload);
if (ts_st->amux) {
avformat_free_context(ts_st->amux);
ts_st->amux = NULL;
}
}
av_freep(&st->priv_data);
}
return ret;
}
 
/* send SDT, PAT and PMT tables regulary */
static void retransmit_si_info(AVFormatContext *s, int force_pat)
{
MpegTSWrite *ts = s->priv_data;
int i;
 
if (++ts->sdt_packet_count == ts->sdt_packet_period) {
ts->sdt_packet_count = 0;
mpegts_write_sdt(s);
}
if (++ts->pat_packet_count == ts->pat_packet_period || force_pat) {
ts->pat_packet_count = 0;
mpegts_write_pat(s);
for(i = 0; i < ts->nb_services; i++) {
mpegts_write_pmt(s, ts->services[i]);
}
}
}
 
static int write_pcr_bits(uint8_t *buf, int64_t pcr)
{
int64_t pcr_low = pcr % 300, pcr_high = pcr / 300;
 
*buf++ = pcr_high >> 25;
*buf++ = pcr_high >> 17;
*buf++ = pcr_high >> 9;
*buf++ = pcr_high >> 1;
*buf++ = pcr_high << 7 | pcr_low >> 8 | 0x7e;
*buf++ = pcr_low;
 
return 6;
}
 
/* Write a single null transport stream packet */
static void mpegts_insert_null_packet(AVFormatContext *s)
{
uint8_t *q;
uint8_t buf[TS_PACKET_SIZE];
 
q = buf;
*q++ = 0x47;
*q++ = 0x00 | 0x1f;
*q++ = 0xff;
*q++ = 0x10;
memset(q, 0x0FF, TS_PACKET_SIZE - (q - buf));
mpegts_prefix_m2ts_header(s);
avio_write(s->pb, buf, TS_PACKET_SIZE);
}
 
/* Write a single transport stream packet with a PCR and no payload */
static void mpegts_insert_pcr_only(AVFormatContext *s, AVStream *st)
{
MpegTSWrite *ts = s->priv_data;
MpegTSWriteStream *ts_st = st->priv_data;
uint8_t *q;
uint8_t buf[TS_PACKET_SIZE];
 
q = buf;
*q++ = 0x47;
*q++ = ts_st->pid >> 8;
*q++ = ts_st->pid;
*q++ = 0x20 | ts_st->cc; /* Adaptation only */
/* Continuity Count field does not increment (see 13818-1 section 2.4.3.3) */
*q++ = TS_PACKET_SIZE - 5; /* Adaptation Field Length */
*q++ = 0x10; /* Adaptation flags: PCR present */
 
/* PCR coded into 6 bytes */
q += write_pcr_bits(q, get_pcr(ts, s->pb));
 
/* stuffing bytes */
memset(q, 0xFF, TS_PACKET_SIZE - (q - buf));
mpegts_prefix_m2ts_header(s);
avio_write(s->pb, buf, TS_PACKET_SIZE);
}
 
static void write_pts(uint8_t *q, int fourbits, int64_t pts)
{
int val;
 
val = fourbits << 4 | (((pts >> 30) & 0x07) << 1) | 1;
*q++ = val;
val = (((pts >> 15) & 0x7fff) << 1) | 1;
*q++ = val >> 8;
*q++ = val;
val = (((pts) & 0x7fff) << 1) | 1;
*q++ = val >> 8;
*q++ = val;
}
 
/* Set an adaptation field flag in an MPEG-TS packet*/
static void set_af_flag(uint8_t *pkt, int flag)
{
// expect at least one flag to set
av_assert0(flag);
 
if ((pkt[3] & 0x20) == 0) {
// no AF yet, set adaptation field flag
pkt[3] |= 0x20;
// 1 byte length, no flags
pkt[4] = 1;
pkt[5] = 0;
}
pkt[5] |= flag;
}
 
/* Extend the adaptation field by size bytes */
static void extend_af(uint8_t *pkt, int size)
{
// expect already existing adaptation field
av_assert0(pkt[3] & 0x20);
pkt[4] += size;
}
 
/* Get a pointer to MPEG-TS payload (right after TS packet header) */
static uint8_t *get_ts_payload_start(uint8_t *pkt)
{
if (pkt[3] & 0x20)
return pkt + 5 + pkt[4];
else
return pkt + 4;
}
 
/* Add a pes header to the front of payload, and segment into an integer number of
* ts packets. The final ts packet is padded using an over-sized adaptation header
* to exactly fill the last ts packet.
* NOTE: 'payload' contains a complete PES payload.
*/
static void mpegts_write_pes(AVFormatContext *s, AVStream *st,
const uint8_t *payload, int payload_size,
int64_t pts, int64_t dts, int key)
{
MpegTSWriteStream *ts_st = st->priv_data;
MpegTSWrite *ts = s->priv_data;
uint8_t buf[TS_PACKET_SIZE];
uint8_t *q;
int val, is_start, len, header_len, write_pcr, private_code, flags;
int afc_len, stuffing_len;
int64_t pcr = -1; /* avoid warning */
int64_t delay = av_rescale(s->max_delay, 90000, AV_TIME_BASE);
int force_pat = st->codec->codec_type == AVMEDIA_TYPE_VIDEO && key && !ts_st->prev_payload_key;
 
is_start = 1;
while (payload_size > 0) {
retransmit_si_info(s, force_pat);
force_pat = 0;
 
write_pcr = 0;
if (ts_st->pid == ts_st->service->pcr_pid) {
if (ts->mux_rate > 1 || is_start) // VBR pcr period is based on frames
ts_st->service->pcr_packet_count++;
if (ts_st->service->pcr_packet_count >=
ts_st->service->pcr_packet_period) {
ts_st->service->pcr_packet_count = 0;
write_pcr = 1;
}
}
 
if (ts->mux_rate > 1 && dts != AV_NOPTS_VALUE &&
(dts - get_pcr(ts, s->pb)/300) > delay) {
/* pcr insert gets priority over null packet insert */
if (write_pcr)
mpegts_insert_pcr_only(s, st);
else
mpegts_insert_null_packet(s);
continue; /* recalculate write_pcr and possibly retransmit si_info */
}
 
/* prepare packet header */
q = buf;
*q++ = 0x47;
val = (ts_st->pid >> 8);
if (is_start)
val |= 0x40;
*q++ = val;
*q++ = ts_st->pid;
ts_st->cc = (ts_st->cc + 1) & 0xf;
*q++ = 0x10 | ts_st->cc; // payload indicator + CC
if (key && is_start && pts != AV_NOPTS_VALUE) {
// set Random Access for key frames
if (ts_st->pid == ts_st->service->pcr_pid)
write_pcr = 1;
set_af_flag(buf, 0x40);
q = get_ts_payload_start(buf);
}
if (write_pcr) {
set_af_flag(buf, 0x10);
q = get_ts_payload_start(buf);
// add 11, pcr references the last byte of program clock reference base
if (ts->mux_rate > 1)
pcr = get_pcr(ts, s->pb);
else
pcr = (dts - delay)*300;
if (dts != AV_NOPTS_VALUE && dts < pcr / 300)
av_log(s, AV_LOG_WARNING, "dts < pcr, TS is invalid\n");
extend_af(buf, write_pcr_bits(q, pcr));
q = get_ts_payload_start(buf);
}
if (is_start) {
int pes_extension = 0;
/* write PES header */
*q++ = 0x00;
*q++ = 0x00;
*q++ = 0x01;
private_code = 0;
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
if (st->codec->codec_id == AV_CODEC_ID_DIRAC) {
*q++ = 0xfd;
} else
*q++ = 0xe0;
} else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
(st->codec->codec_id == AV_CODEC_ID_MP2 ||
st->codec->codec_id == AV_CODEC_ID_MP3 ||
st->codec->codec_id == AV_CODEC_ID_AAC)) {
*q++ = 0xc0;
} else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
st->codec->codec_id == AV_CODEC_ID_AC3 &&
ts->m2ts_mode) {
*q++ = 0xfd;
} else {
*q++ = 0xbd;
if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
private_code = 0x20;
}
}
header_len = 0;
flags = 0;
if (pts != AV_NOPTS_VALUE) {
header_len += 5;
flags |= 0x80;
}
if (dts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE && dts != pts) {
header_len += 5;
flags |= 0x40;
}
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
st->codec->codec_id == AV_CODEC_ID_DIRAC) {
/* set PES_extension_flag */
pes_extension = 1;
flags |= 0x01;
 
/*
* One byte for PES2 extension flag +
* one byte for extension length +
* one byte for extension id
*/
header_len += 3;
}
/* for Blu-ray AC3 Audio the PES Extension flag should be as follow
* otherwise it will not play sound on blu-ray
*/
if (ts->m2ts_mode &&
st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
st->codec->codec_id == AV_CODEC_ID_AC3) {
/* set PES_extension_flag */
pes_extension = 1;
flags |= 0x01;
header_len += 3;
}
len = payload_size + header_len + 3;
if (private_code != 0)
len++;
if (len > 0xffff)
len = 0;
*q++ = len >> 8;
*q++ = len;
val = 0x80;
/* data alignment indicator is required for subtitle and data streams */
if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE || st->codec->codec_type == AVMEDIA_TYPE_DATA)
val |= 0x04;
*q++ = val;
*q++ = flags;
*q++ = header_len;
if (pts != AV_NOPTS_VALUE) {
write_pts(q, flags >> 6, pts);
q += 5;
}
if (dts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE && dts != pts) {
write_pts(q, 1, dts);
q += 5;
}
if (pes_extension && st->codec->codec_id == AV_CODEC_ID_DIRAC) {
flags = 0x01; /* set PES_extension_flag_2 */
*q++ = flags;
*q++ = 0x80 | 0x01; /* marker bit + extension length */
/*
* Set the stream id extension flag bit to 0 and
* write the extended stream id
*/
*q++ = 0x00 | 0x60;
}
/* For Blu-ray AC3 Audio Setting extended flags */
if (ts->m2ts_mode &&
pes_extension &&
st->codec->codec_id == AV_CODEC_ID_AC3) {
flags = 0x01; /* set PES_extension_flag_2 */
*q++ = flags;
*q++ = 0x80 | 0x01; /* marker bit + extension length */
*q++ = 0x00 | 0x71; /* for AC3 Audio (specifically on blue-rays) */
}
 
 
if (private_code != 0)
*q++ = private_code;
is_start = 0;
}
/* header size */
header_len = q - buf;
/* data len */
len = TS_PACKET_SIZE - header_len;
if (len > payload_size)
len = payload_size;
stuffing_len = TS_PACKET_SIZE - header_len - len;
if (stuffing_len > 0) {
/* add stuffing with AFC */
if (buf[3] & 0x20) {
/* stuffing already present: increase its size */
afc_len = buf[4] + 1;
memmove(buf + 4 + afc_len + stuffing_len,
buf + 4 + afc_len,
header_len - (4 + afc_len));
buf[4] += stuffing_len;
memset(buf + 4 + afc_len, 0xff, stuffing_len);
} else {
/* add stuffing */
memmove(buf + 4 + stuffing_len, buf + 4, header_len - 4);
buf[3] |= 0x20;
buf[4] = stuffing_len - 1;
if (stuffing_len >= 2) {
buf[5] = 0x00;
memset(buf + 6, 0xff, stuffing_len - 2);
}
}
}
memcpy(buf + TS_PACKET_SIZE - len, payload, len);
payload += len;
payload_size -= len;
mpegts_prefix_m2ts_header(s);
avio_write(s->pb, buf, TS_PACKET_SIZE);
}
avio_flush(s->pb);
ts_st->prev_payload_key = key;
}
 
static int mpegts_write_packet_internal(AVFormatContext *s, AVPacket *pkt)
{
AVStream *st = s->streams[pkt->stream_index];
int size = pkt->size;
uint8_t *buf= pkt->data;
uint8_t *data= NULL;
MpegTSWrite *ts = s->priv_data;
MpegTSWriteStream *ts_st = st->priv_data;
const int64_t delay = av_rescale(s->max_delay, 90000, AV_TIME_BASE)*2;
int64_t dts = pkt->dts, pts = pkt->pts;
 
if (ts->reemit_pat_pmt) {
av_log(s, AV_LOG_WARNING, "resend_headers option is deprecated, use -mpegts_flags resend_headers\n");
ts->reemit_pat_pmt = 0;
ts->flags |= MPEGTS_FLAG_REEMIT_PAT_PMT;
}
 
if (ts->flags & MPEGTS_FLAG_REEMIT_PAT_PMT) {
ts->pat_packet_count = ts->pat_packet_period - 1;
ts->sdt_packet_count = ts->sdt_packet_period - 1;
ts->flags &= ~MPEGTS_FLAG_REEMIT_PAT_PMT;
}
 
if(ts->copyts < 1){
if (pts != AV_NOPTS_VALUE)
pts += delay;
if (dts != AV_NOPTS_VALUE)
dts += delay;
}
 
if (ts_st->first_pts_check && pts == AV_NOPTS_VALUE) {
av_log(s, AV_LOG_ERROR, "first pts value must be set\n");
return AVERROR_INVALIDDATA;
}
ts_st->first_pts_check = 0;
 
if (st->codec->codec_id == AV_CODEC_ID_H264) {
const uint8_t *p = buf, *buf_end = p+size;
uint32_t state = -1;
 
if (pkt->size < 5 || AV_RB32(pkt->data) != 0x0000001) {
if (!st->nb_frames) {
av_log(s, AV_LOG_ERROR, "H.264 bitstream malformed, "
"no startcode found, use the h264_mp4toannexb bitstream filter (-bsf h264_mp4toannexb)\n");
return AVERROR(EINVAL);
}
av_log(s, AV_LOG_WARNING, "H.264 bitstream error, startcode missing\n");
}
 
do {
p = avpriv_find_start_code(p, buf_end, &state);
av_dlog(s, "nal %d\n", state & 0x1f);
} while (p < buf_end && (state & 0x1f) != 9 &&
(state & 0x1f) != 5 && (state & 0x1f) != 1);
 
if ((state & 0x1f) != 9) { // AUD NAL
data = av_malloc(pkt->size+6);
if (!data)
return AVERROR(ENOMEM);
memcpy(data+6, pkt->data, pkt->size);
AV_WB32(data, 0x00000001);
data[4] = 0x09;
data[5] = 0xf0; // any slice type (0xe) + rbsp stop one bit
buf = data;
size = pkt->size+6;
}
} else if (st->codec->codec_id == AV_CODEC_ID_AAC) {
if (pkt->size < 2) {
av_log(s, AV_LOG_ERROR, "AAC packet too short\n");
return AVERROR_INVALIDDATA;
}
if ((AV_RB16(pkt->data) & 0xfff0) != 0xfff0) {
int ret;
AVPacket pkt2;
 
if (!ts_st->amux) {
av_log(s, AV_LOG_ERROR, "AAC bitstream not in ADTS format "
"and extradata missing\n");
return AVERROR_INVALIDDATA;
}
 
av_init_packet(&pkt2);
pkt2.data = pkt->data;
pkt2.size = pkt->size;
ret = avio_open_dyn_buf(&ts_st->amux->pb);
if (ret < 0)
return AVERROR(ENOMEM);
 
ret = av_write_frame(ts_st->amux, &pkt2);
if (ret < 0) {
avio_close_dyn_buf(ts_st->amux->pb, &data);
ts_st->amux->pb = NULL;
av_free(data);
return ret;
}
size = avio_close_dyn_buf(ts_st->amux->pb, &data);
ts_st->amux->pb = NULL;
buf = data;
}
}
 
if (pkt->dts != AV_NOPTS_VALUE) {
int i;
for(i=0; i<s->nb_streams; i++){
AVStream *st2 = s->streams[i];
MpegTSWriteStream *ts_st2 = st2->priv_data;
if( ts_st2->payload_size
&& (ts_st2->payload_dts == AV_NOPTS_VALUE || dts - ts_st2->payload_dts > delay/2)){
mpegts_write_pes(s, st2, ts_st2->payload, ts_st2->payload_size,
ts_st2->payload_pts, ts_st2->payload_dts,
ts_st2->payload_flags & AV_PKT_FLAG_KEY);
ts_st2->payload_size = 0;
}
}
}
 
if (ts_st->payload_size && ts_st->payload_size + size > ts->pes_payload_size) {
mpegts_write_pes(s, st, ts_st->payload, ts_st->payload_size,
ts_st->payload_pts, ts_st->payload_dts,
ts_st->payload_flags & AV_PKT_FLAG_KEY);
ts_st->payload_size = 0;
}
 
if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO || size > ts->pes_payload_size) {
av_assert0(!ts_st->payload_size);
// for video and subtitle, write a single pes packet
mpegts_write_pes(s, st, buf, size, pts, dts, pkt->flags & AV_PKT_FLAG_KEY);
av_free(data);
return 0;
}
 
if (!ts_st->payload_size) {
ts_st->payload_pts = pts;
ts_st->payload_dts = dts;
ts_st->payload_flags = pkt->flags;
}
 
memcpy(ts_st->payload + ts_st->payload_size, buf, size);
ts_st->payload_size += size;
 
av_free(data);
 
return 0;
}
 
static void mpegts_write_flush(AVFormatContext *s)
{
int i;
 
/* flush current packets */
for(i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
MpegTSWriteStream *ts_st = st->priv_data;
if (ts_st->payload_size > 0) {
mpegts_write_pes(s, st, ts_st->payload, ts_st->payload_size,
ts_st->payload_pts, ts_st->payload_dts,
ts_st->payload_flags & AV_PKT_FLAG_KEY);
ts_st->payload_size = 0;
}
}
avio_flush(s->pb);
}
 
static int mpegts_write_packet(AVFormatContext *s, AVPacket *pkt)
{
if (!pkt) {
mpegts_write_flush(s);
return 1;
} else {
return mpegts_write_packet_internal(s, pkt);
}
}
 
static int mpegts_write_end(AVFormatContext *s)
{
MpegTSWrite *ts = s->priv_data;
MpegTSService *service;
int i;
 
mpegts_write_flush(s);
 
for(i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
MpegTSWriteStream *ts_st = st->priv_data;
av_freep(&ts_st->payload);
if (ts_st->amux) {
avformat_free_context(ts_st->amux);
ts_st->amux = NULL;
}
}
 
for(i = 0; i < ts->nb_services; i++) {
service = ts->services[i];
av_freep(&service->provider_name);
av_freep(&service->name);
av_free(service);
}
av_free(ts->services);
 
return 0;
}
 
AVOutputFormat ff_mpegts_muxer = {
.name = "mpegts",
.long_name = NULL_IF_CONFIG_SMALL("MPEG-TS (MPEG-2 Transport Stream)"),
.mime_type = "video/x-mpegts",
.extensions = "ts,m2t,m2ts,mts",
.priv_data_size = sizeof(MpegTSWrite),
.audio_codec = AV_CODEC_ID_MP2,
.video_codec = AV_CODEC_ID_MPEG2VIDEO,
.write_header = mpegts_write_header,
.write_packet = mpegts_write_packet,
.write_trailer = mpegts_write_end,
.flags = AVFMT_ALLOW_FLUSH,
.priv_class = &mpegts_muxer_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/mpegvideodec.c
0,0 → 1,72
/*
* RAW MPEG video demuxer
* Copyright (c) 2002-2003 Fabrice Bellard
* Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "rawdec.h"
 
#define SEQ_START_CODE 0x000001b3
#define GOP_START_CODE 0x000001b8
#define PICTURE_START_CODE 0x00000100
#define SLICE_START_CODE 0x00000101
#define PACK_START_CODE 0x000001ba
#define VIDEO_ID 0x000001e0
#define AUDIO_ID 0x000001c0
 
static int mpegvideo_probe(AVProbeData *p)
{
uint32_t code= -1;
int pic=0, seq=0, slice=0, pspack=0, vpes=0, apes=0, res=0, sicle=0;
int i;
uint32_t last = 0;
 
for(i=0; i<p->buf_size; i++){
code = (code<<8) + p->buf[i];
if ((code & 0xffffff00) == 0x100) {
switch(code){
case SEQ_START_CODE: seq++; break;
case PICTURE_START_CODE: pic++; break;
case PACK_START_CODE: pspack++; break;
case 0x1b6:
res++; break;
}
if (code >= SLICE_START_CODE && code <= 0x1af) {
if (last >= SLICE_START_CODE && last <= 0x1af) {
if (code >= last) slice++;
else sicle++;
}else{
if (code == SLICE_START_CODE) slice++;
else sicle++;
}
}
if ((code & 0x1f0) == VIDEO_ID) vpes++;
else if((code & 0x1e0) == AUDIO_ID) apes++;
last = code;
}
}
if(seq && seq*9<=pic*10 && pic*9<=slice*10 && !pspack && !apes && !res && slice > sicle) {
if(vpes) return AVPROBE_SCORE_EXTENSION / 4;
else return pic>1 ? AVPROBE_SCORE_EXTENSION + 1 : AVPROBE_SCORE_EXTENSION / 2; // +1 for .mpg
}
return 0;
}
 
FF_DEF_RAWVIDEO_DEMUXER(mpegvideo, "raw MPEG video", mpegvideo_probe, NULL, AV_CODEC_ID_MPEG1VIDEO)
/contrib/sdk/sources/ffmpeg/libavformat/mpjpeg.c
0,0 → 1,68
/*
* Multipart JPEG format
* Copyright (c) 2000, 2001, 2002, 2003 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avformat.h"
 
/* Multipart JPEG */
 
#define BOUNDARY_TAG "ffserver"
 
static int mpjpeg_write_header(AVFormatContext *s)
{
uint8_t buf1[256];
 
snprintf(buf1, sizeof(buf1), "--%s\r\n", BOUNDARY_TAG);
avio_write(s->pb, buf1, strlen(buf1));
avio_flush(s->pb);
return 0;
}
 
static int mpjpeg_write_packet(AVFormatContext *s, AVPacket *pkt)
{
uint8_t buf1[256];
 
snprintf(buf1, sizeof(buf1), "Content-type: image/jpeg\r\n");
avio_write(s->pb, buf1, strlen(buf1));
 
snprintf(buf1, sizeof(buf1), "Content-length: %d\r\n\r\n", pkt->size);
avio_write(s->pb, buf1, strlen(buf1));
avio_write(s->pb, pkt->data, pkt->size);
 
snprintf(buf1, sizeof(buf1), "\r\n--%s\r\n", BOUNDARY_TAG);
avio_write(s->pb, buf1, strlen(buf1));
return 0;
}
 
static int mpjpeg_write_trailer(AVFormatContext *s)
{
return 0;
}
 
AVOutputFormat ff_mpjpeg_muxer = {
.name = "mpjpeg",
.long_name = NULL_IF_CONFIG_SMALL("MIME multipart JPEG"),
.mime_type = "multipart/x-mixed-replace;boundary=" BOUNDARY_TAG,
.extensions = "mjpg",
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_MJPEG,
.write_header = mpjpeg_write_header,
.write_packet = mpjpeg_write_packet,
.write_trailer = mpjpeg_write_trailer,
};
/contrib/sdk/sources/ffmpeg/libavformat/mpl2dec.c
0,0 → 1,146
/*
* Copyright (c) 2012 Clément Bœsch
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* MPL2 subtitles format demuxer
*/
 
#include "avformat.h"
#include "internal.h"
#include "subtitles.h"
 
typedef struct {
FFDemuxSubtitlesQueue q;
} MPL2Context;
 
static int mpl2_probe(AVProbeData *p)
{
int i;
char c;
int64_t start, end;
const unsigned char *ptr = p->buf;
const unsigned char *ptr_end = ptr + p->buf_size;
 
for (i = 0; i < 2; i++) {
if (sscanf(ptr, "[%"SCNd64"][%"SCNd64"]%c", &start, &end, &c) != 3 &&
sscanf(ptr, "[%"SCNd64"][]%c", &start, &c) != 2)
return 0;
ptr += ff_subtitles_next_line(ptr);
if (ptr >= ptr_end)
return 0;
}
return AVPROBE_SCORE_MAX;
}
 
static int read_ts(char **line, int64_t *pts_start, int *duration)
{
char c;
int len;
int64_t end;
 
if (sscanf(*line, "[%"SCNd64"][]%c%n",
pts_start, &c, &len) >= 2) {
*duration = -1;
*line += len - 1;
return 0;
}
if (sscanf(*line, "[%"SCNd64"][%"SCNd64"]%c%n",
pts_start, &end, &c, &len) >= 3) {
*duration = end - *pts_start;
*line += len - 1;
return 0;
}
return -1;
}
 
static int mpl2_read_header(AVFormatContext *s)
{
MPL2Context *mpl2 = s->priv_data;
AVStream *st = avformat_new_stream(s, NULL);
int res = 0;
 
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 64, 1, 10);
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
st->codec->codec_id = AV_CODEC_ID_MPL2;
 
while (!url_feof(s->pb)) {
char line[4096];
char *p = line;
const int64_t pos = avio_tell(s->pb);
int len = ff_get_line(s->pb, line, sizeof(line));
int64_t pts_start;
int duration;
 
if (!len)
break;
 
line[strcspn(line, "\r\n")] = 0;
 
if (!read_ts(&p, &pts_start, &duration)) {
AVPacket *sub;
 
sub = ff_subtitles_queue_insert(&mpl2->q, p, strlen(p), 0);
if (!sub)
return AVERROR(ENOMEM);
sub->pos = pos;
sub->pts = pts_start;
sub->duration = duration;
}
}
 
ff_subtitles_queue_finalize(&mpl2->q);
return res;
}
 
static int mpl2_read_packet(AVFormatContext *s, AVPacket *pkt)
{
MPL2Context *mpl2 = s->priv_data;
return ff_subtitles_queue_read_packet(&mpl2->q, pkt);
}
 
static int mpl2_read_seek(AVFormatContext *s, int stream_index,
int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
{
MPL2Context *mpl2 = s->priv_data;
return ff_subtitles_queue_seek(&mpl2->q, s, stream_index,
min_ts, ts, max_ts, flags);
}
 
static int mpl2_read_close(AVFormatContext *s)
{
MPL2Context *mpl2 = s->priv_data;
ff_subtitles_queue_clean(&mpl2->q);
return 0;
}
 
AVInputFormat ff_mpl2_demuxer = {
.name = "mpl2",
.long_name = NULL_IF_CONFIG_SMALL("MPL2 subtitles"),
.priv_data_size = sizeof(MPL2Context),
.read_probe = mpl2_probe,
.read_header = mpl2_read_header,
.read_packet = mpl2_read_packet,
.read_seek2 = mpl2_read_seek,
.read_close = mpl2_read_close,
.extensions = "txt,mpl2",
};
/contrib/sdk/sources/ffmpeg/libavformat/mpsubdec.c
0,0 → 1,144
/*
* Copyright (c) 2012 Clément Bœsch
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* MPlayer subtitles format demuxer
*/
 
#include "avformat.h"
#include "internal.h"
#include "subtitles.h"
 
typedef struct {
FFDemuxSubtitlesQueue q;
} MPSubContext;
 
static int mpsub_probe(AVProbeData *p)
{
const char *ptr = p->buf;
const char *ptr_end = p->buf + p->buf_size;
 
while (ptr < ptr_end) {
int inc;
 
if (!memcmp(ptr, "FORMAT=TIME", 11))
return AVPROBE_SCORE_EXTENSION;
if (!memcmp(ptr, "FORMAT=", 7))
return AVPROBE_SCORE_EXTENSION / 3;
inc = ff_subtitles_next_line(ptr);
if (!inc)
break;
ptr += inc;
}
return 0;
}
 
static int mpsub_read_header(AVFormatContext *s)
{
MPSubContext *mpsub = s->priv_data;
AVStream *st;
AVBPrint buf;
AVRational pts_info = (AVRational){ 100, 1 }; // ts based by default
int res = 0;
float multiplier = 100.0;
float current_pts = 0;
 
av_bprint_init(&buf, 0, AV_BPRINT_SIZE_UNLIMITED);
 
while (!url_feof(s->pb)) {
char line[1024];
float start, duration;
int fps, len = ff_get_line(s->pb, line, sizeof(line));
 
if (!len)
break;
 
line[strcspn(line, "\r\n")] = 0;
 
if (sscanf(line, "FORMAT=%d", &fps) == 1 && fps > 3 && fps < 100) {
/* frame based timing */
pts_info = (AVRational){ fps, 1 };
multiplier = 1.0;
} else if (sscanf(line, "%f %f", &start, &duration) == 2) {
AVPacket *sub;
const int64_t pos = avio_tell(s->pb);
 
ff_subtitles_read_chunk(s->pb, &buf);
if (buf.len) {
sub = ff_subtitles_queue_insert(&mpsub->q, buf.str, buf.len, 0);
if (!sub) {
res = AVERROR(ENOMEM);
goto end;
}
sub->pts = (int64_t)(current_pts + start*multiplier);
sub->duration = (int)(duration * multiplier);
current_pts += (start + duration) * multiplier;
sub->pos = pos;
}
}
}
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 64, pts_info.den, pts_info.num);
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
st->codec->codec_id = AV_CODEC_ID_TEXT;
 
ff_subtitles_queue_finalize(&mpsub->q);
 
end:
av_bprint_finalize(&buf, NULL);
return res;
}
 
static int mpsub_read_packet(AVFormatContext *s, AVPacket *pkt)
{
MPSubContext *mpsub = s->priv_data;
return ff_subtitles_queue_read_packet(&mpsub->q, pkt);
}
 
static int mpsub_read_seek(AVFormatContext *s, int stream_index,
int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
{
MPSubContext *mpsub = s->priv_data;
return ff_subtitles_queue_seek(&mpsub->q, s, stream_index,
min_ts, ts, max_ts, flags);
}
 
static int mpsub_read_close(AVFormatContext *s)
{
MPSubContext *mpsub = s->priv_data;
ff_subtitles_queue_clean(&mpsub->q);
return 0;
}
 
AVInputFormat ff_mpsub_demuxer = {
.name = "mpsub",
.long_name = NULL_IF_CONFIG_SMALL("MPlayer subtitles"),
.priv_data_size = sizeof(MPSubContext),
.read_probe = mpsub_probe,
.read_header = mpsub_read_header,
.read_packet = mpsub_read_packet,
.read_seek2 = mpsub_read_seek,
.read_close = mpsub_read_close,
.extensions = "sub",
};
/contrib/sdk/sources/ffmpeg/libavformat/msnwc_tcp.c
0,0 → 1,140
/*
* Copyright (C) 2008 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavcodec/bytestream.h"
#include "avformat.h"
#include "internal.h"
 
#define HEADER_SIZE 24
 
/*
* Header structure:
* uint16_t ss; // struct size
* uint16_t width; // frame width
* uint16_t height; // frame height
* uint16_t ff; // keyframe + some other info(???)
* uint32_t size; // size of data
* uint32_t fourcc; // ML20
* uint32_t u3; // ?
* uint32_t ts; // time
*/
 
static int msnwc_tcp_probe(AVProbeData *p)
{
int i;
 
for(i = 0 ; i + HEADER_SIZE <= p->buf_size ; i++) {
uint16_t width, height;
uint32_t fourcc;
const uint8_t *bytestream = p->buf+i;
 
if(bytestream_get_le16(&bytestream) != HEADER_SIZE)
continue;
width = bytestream_get_le16(&bytestream);
height = bytestream_get_le16(&bytestream);
if(!(width==320 && height==240) && !(width==160 && height==120))
continue;
bytestream += 2; // keyframe
bytestream += 4; // size
fourcc = bytestream_get_le32(&bytestream);
if(fourcc != MKTAG('M', 'L', '2', '0'))
continue;
 
if(i) {
if(i < 14) /* starts with SwitchBoard connection info */
return AVPROBE_SCORE_MAX / 2;
else /* starts in the middle of stream */
return AVPROBE_SCORE_MAX / 3;
} else {
return AVPROBE_SCORE_MAX;
}
}
 
return -1;
}
 
static int msnwc_tcp_read_header(AVFormatContext *ctx)
{
AVIOContext *pb = ctx->pb;
AVCodecContext *codec;
AVStream *st;
 
st = avformat_new_stream(ctx, NULL);
if(!st)
return AVERROR(ENOMEM);
 
codec = st->codec;
codec->codec_type = AVMEDIA_TYPE_VIDEO;
codec->codec_id = AV_CODEC_ID_MIMIC;
codec->codec_tag = MKTAG('M', 'L', '2', '0');
 
avpriv_set_pts_info(st, 32, 1, 1000);
 
/* Some files start with "connected\r\n\r\n".
* So skip until we find the first byte of struct size */
while(avio_r8(pb) != HEADER_SIZE && !url_feof(pb));
 
if(url_feof(pb)) {
av_log(ctx, AV_LOG_ERROR, "Could not find valid start.\n");
return -1;
}
 
return 0;
}
 
static int msnwc_tcp_read_packet(AVFormatContext *ctx, AVPacket *pkt)
{
AVIOContext *pb = ctx->pb;
uint16_t keyframe;
uint32_t size, timestamp;
 
avio_skip(pb, 1); /* one byte has been read ahead */
avio_skip(pb, 2);
avio_skip(pb, 2);
keyframe = avio_rl16(pb);
size = avio_rl32(pb);
avio_skip(pb, 4);
avio_skip(pb, 4);
timestamp = avio_rl32(pb);
 
if(!size || av_get_packet(pb, pkt, size) != size)
return -1;
 
avio_skip(pb, 1); /* Read ahead one byte of struct size like read_header */
 
pkt->pts = timestamp;
pkt->dts = timestamp;
pkt->stream_index = 0;
 
/* Some aMsn generated videos (or was it Mercury Messenger?) don't set
* this bit and rely on the codec to get keyframe information */
if(keyframe&1)
pkt->flags |= AV_PKT_FLAG_KEY;
 
return HEADER_SIZE + size;
}
 
AVInputFormat ff_msnwc_tcp_demuxer = {
.name = "msnwctcp",
.long_name = NULL_IF_CONFIG_SMALL("MSN TCP Webcam stream"),
.read_probe = msnwc_tcp_probe,
.read_header = msnwc_tcp_read_header,
.read_packet = msnwc_tcp_read_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/mtv.c
0,0 → 1,203
/*
* mtv demuxer
* Copyright (c) 2006 Reynaldo H. Verdejo Pinochet
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* MTV demuxer.
*/
 
#include "libavutil/bswap.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
 
#define MTV_ASUBCHUNK_DATA_SIZE 500
#define MTV_HEADER_SIZE 512
#define MTV_AUDIO_PADDING_SIZE 12
#define AUDIO_SAMPLING_RATE 44100
 
typedef struct MTVDemuxContext {
 
unsigned int file_size; ///< filesize, not always right
unsigned int segments; ///< number of 512 byte segments
unsigned int audio_identifier; ///< 'MP3' on all files I have seen
unsigned int audio_br; ///< bitrate of audio channel (mp3)
unsigned int img_colorfmt; ///< frame colorfmt rgb 565/555
unsigned int img_bpp; ///< frame bits per pixel
unsigned int img_width;
unsigned int img_height;
unsigned int img_segment_size; ///< size of image segment
unsigned int video_fps;
unsigned int full_segment_size;
 
} MTVDemuxContext;
 
static int mtv_probe(AVProbeData *p)
{
/* Magic is 'AMV' */
if (*p->buf != 'A' || *(p->buf + 1) != 'M' || *(p->buf + 2) != 'V')
return 0;
 
/* Check for nonzero in bpp and (width|height) header fields */
if(p->buf_size < 57 || !(p->buf[51] && AV_RL16(&p->buf[52]) | AV_RL16(&p->buf[54])))
return 0;
 
/* If width or height are 0 then imagesize header field should not */
if(!AV_RL16(&p->buf[52]) || !AV_RL16(&p->buf[54]))
{
if(!!AV_RL16(&p->buf[56]))
return AVPROBE_SCORE_EXTENSION;
else
return 0;
}
 
if(p->buf[51] != 16)
return AVPROBE_SCORE_EXTENSION / 2; // But we are going to assume 16bpp anyway ..
 
return AVPROBE_SCORE_MAX;
}
 
static int mtv_read_header(AVFormatContext *s)
{
MTVDemuxContext *mtv = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *st;
unsigned int audio_subsegments;
 
avio_skip(pb, 3);
mtv->file_size = avio_rl32(pb);
mtv->segments = avio_rl32(pb);
avio_skip(pb, 32);
mtv->audio_identifier = avio_rl24(pb);
mtv->audio_br = avio_rl16(pb);
mtv->img_colorfmt = avio_rl24(pb);
mtv->img_bpp = avio_r8(pb);
mtv->img_width = avio_rl16(pb);
mtv->img_height = avio_rl16(pb);
mtv->img_segment_size = avio_rl16(pb);
 
/* Calculate width and height if missing from header */
 
if(mtv->img_bpp>>3){
if(!mtv->img_width && mtv->img_height)
mtv->img_width=mtv->img_segment_size / (mtv->img_bpp>>3)
/ mtv->img_height;
 
if(!mtv->img_height && mtv->img_width)
mtv->img_height=mtv->img_segment_size / (mtv->img_bpp>>3)
/ mtv->img_width;
}
if(!mtv->img_height || !mtv->img_width || !mtv->img_segment_size){
av_log(s, AV_LOG_ERROR, "width or height or segment_size is invalid and I cannot calculate them from other information\n");
return AVERROR(EINVAL);
}
 
avio_skip(pb, 4);
audio_subsegments = avio_rl16(pb);
 
if (audio_subsegments == 0) {
avpriv_request_sample(s, "MTV files without audio");
return AVERROR_PATCHWELCOME;
}
 
mtv->full_segment_size =
audio_subsegments * (MTV_AUDIO_PADDING_SIZE + MTV_ASUBCHUNK_DATA_SIZE) +
mtv->img_segment_size;
mtv->video_fps = (mtv->audio_br / 4) / audio_subsegments;
 
// FIXME Add sanity check here
 
// all systems go! init decoders
 
// video - raw rgb565
 
st = avformat_new_stream(s, NULL);
if(!st)
return AVERROR(ENOMEM);
 
avpriv_set_pts_info(st, 64, 1, mtv->video_fps);
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
st->codec->pix_fmt = AV_PIX_FMT_RGB565BE;
st->codec->width = mtv->img_width;
st->codec->height = mtv->img_height;
st->codec->sample_rate = mtv->video_fps;
st->codec->extradata = av_strdup("BottomUp");
st->codec->extradata_size = 9;
 
// audio - mp3
 
st = avformat_new_stream(s, NULL);
if(!st)
return AVERROR(ENOMEM);
 
avpriv_set_pts_info(st, 64, 1, AUDIO_SAMPLING_RATE);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_MP3;
st->codec->bit_rate = mtv->audio_br;
st->need_parsing = AVSTREAM_PARSE_FULL;
 
// Jump over header
 
if(avio_seek(pb, MTV_HEADER_SIZE, SEEK_SET) != MTV_HEADER_SIZE)
return AVERROR(EIO);
 
return 0;
 
}
 
static int mtv_read_packet(AVFormatContext *s, AVPacket *pkt)
{
MTVDemuxContext *mtv = s->priv_data;
AVIOContext *pb = s->pb;
int ret;
 
if((avio_tell(pb) - s->data_offset + mtv->img_segment_size) % mtv->full_segment_size)
{
avio_skip(pb, MTV_AUDIO_PADDING_SIZE);
 
ret = av_get_packet(pb, pkt, MTV_ASUBCHUNK_DATA_SIZE);
if(ret < 0)
return ret;
 
pkt->pos -= MTV_AUDIO_PADDING_SIZE;
pkt->stream_index = 1;
 
}else
{
ret = av_get_packet(pb, pkt, mtv->img_segment_size);
if(ret < 0)
return ret;
 
pkt->stream_index = 0;
}
 
return ret;
}
 
AVInputFormat ff_mtv_demuxer = {
.name = "mtv",
.long_name = NULL_IF_CONFIG_SMALL("MTV"),
.priv_data_size = sizeof(MTVDemuxContext),
.read_probe = mtv_probe,
.read_header = mtv_read_header,
.read_packet = mtv_read_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/mux.c
0,0 → 1,871
/*
* muxing functions for use within FFmpeg
* Copyright (c) 2000, 2001, 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "avio_internal.h"
#include "internal.h"
#include "libavcodec/internal.h"
#include "libavcodec/bytestream.h"
#include "libavutil/opt.h"
#include "libavutil/dict.h"
#include "libavutil/pixdesc.h"
#include "libavutil/timestamp.h"
#include "metadata.h"
#include "id3v2.h"
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/internal.h"
#include "libavutil/mathematics.h"
#include "libavutil/parseutils.h"
#include "libavutil/time.h"
#include "riff.h"
#include "audiointerleave.h"
#include "url.h"
#include <stdarg.h>
#if CONFIG_NETWORK
#include "network.h"
#endif
 
#undef NDEBUG
#include <assert.h>
 
/**
* @file
* muxing functions for use within libavformat
*/
 
/* fraction handling */
 
/**
* f = val + (num / den) + 0.5.
*
* 'num' is normalized so that it is such as 0 <= num < den.
*
* @param f fractional number
* @param val integer value
* @param num must be >= 0
* @param den must be >= 1
*/
static void frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
{
num += (den >> 1);
if (num >= den) {
val += num / den;
num = num % den;
}
f->val = val;
f->num = num;
f->den = den;
}
 
/**
* Fractional addition to f: f = f + (incr / f->den).
*
* @param f fractional number
* @param incr increment, can be positive or negative
*/
static void frac_add(AVFrac *f, int64_t incr)
{
int64_t num, den;
 
num = f->num + incr;
den = f->den;
if (num < 0) {
f->val += num / den;
num = num % den;
if (num < 0) {
num += den;
f->val--;
}
} else if (num >= den) {
f->val += num / den;
num = num % den;
}
f->num = num;
}
 
AVRational ff_choose_timebase(AVFormatContext *s, AVStream *st, int min_precission)
{
AVRational q;
int j;
 
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
q = (AVRational){1, st->codec->sample_rate};
} else {
q = st->codec->time_base;
}
for (j=2; j<14; j+= 1+(j>2))
while (q.den / q.num < min_precission && q.num % j == 0)
q.num /= j;
while (q.den / q.num < min_precission && q.den < (1<<24))
q.den <<= 1;
 
return q;
}
 
int avformat_alloc_output_context2(AVFormatContext **avctx, AVOutputFormat *oformat,
const char *format, const char *filename)
{
AVFormatContext *s = avformat_alloc_context();
int ret = 0;
 
*avctx = NULL;
if (!s)
goto nomem;
 
if (!oformat) {
if (format) {
oformat = av_guess_format(format, NULL, NULL);
if (!oformat) {
av_log(s, AV_LOG_ERROR, "Requested output format '%s' is not a suitable output format\n", format);
ret = AVERROR(EINVAL);
goto error;
}
} else {
oformat = av_guess_format(NULL, filename, NULL);
if (!oformat) {
ret = AVERROR(EINVAL);
av_log(s, AV_LOG_ERROR, "Unable to find a suitable output format for '%s'\n",
filename);
goto error;
}
}
}
 
s->oformat = oformat;
if (s->oformat->priv_data_size > 0) {
s->priv_data = av_mallocz(s->oformat->priv_data_size);
if (!s->priv_data)
goto nomem;
if (s->oformat->priv_class) {
*(const AVClass**)s->priv_data= s->oformat->priv_class;
av_opt_set_defaults(s->priv_data);
}
} else
s->priv_data = NULL;
 
if (filename)
av_strlcpy(s->filename, filename, sizeof(s->filename));
*avctx = s;
return 0;
nomem:
av_log(s, AV_LOG_ERROR, "Out of memory\n");
ret = AVERROR(ENOMEM);
error:
avformat_free_context(s);
return ret;
}
 
#if FF_API_ALLOC_OUTPUT_CONTEXT
AVFormatContext *avformat_alloc_output_context(const char *format,
AVOutputFormat *oformat, const char *filename)
{
AVFormatContext *avctx;
int ret = avformat_alloc_output_context2(&avctx, oformat, format, filename);
return ret < 0 ? NULL : avctx;
}
#endif
 
static int validate_codec_tag(AVFormatContext *s, AVStream *st)
{
const AVCodecTag *avctag;
int n;
enum AVCodecID id = AV_CODEC_ID_NONE;
unsigned int tag = 0;
 
/**
* Check that tag + id is in the table
* If neither is in the table -> OK
* If tag is in the table with another id -> FAIL
* If id is in the table with another tag -> FAIL unless strict < normal
*/
for (n = 0; s->oformat->codec_tag[n]; n++) {
avctag = s->oformat->codec_tag[n];
while (avctag->id != AV_CODEC_ID_NONE) {
if (avpriv_toupper4(avctag->tag) == avpriv_toupper4(st->codec->codec_tag)) {
id = avctag->id;
if (id == st->codec->codec_id)
return 1;
}
if (avctag->id == st->codec->codec_id)
tag = avctag->tag;
avctag++;
}
}
if (id != AV_CODEC_ID_NONE)
return 0;
if (tag && (st->codec->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
return 0;
return 1;
}
 
 
static int init_muxer(AVFormatContext *s, AVDictionary **options)
{
int ret = 0, i;
AVStream *st;
AVDictionary *tmp = NULL;
AVCodecContext *codec = NULL;
AVOutputFormat *of = s->oformat;
 
if (options)
av_dict_copy(&tmp, *options, 0);
 
if ((ret = av_opt_set_dict(s, &tmp)) < 0)
goto fail;
if (s->priv_data && s->oformat->priv_class && *(const AVClass**)s->priv_data==s->oformat->priv_class &&
(ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
goto fail;
 
// some sanity checks
if (s->nb_streams == 0 && !(of->flags & AVFMT_NOSTREAMS)) {
av_log(s, AV_LOG_ERROR, "no streams\n");
ret = AVERROR(EINVAL);
goto fail;
}
 
for (i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
codec = st->codec;
 
switch (codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
if (codec->sample_rate <= 0) {
av_log(s, AV_LOG_ERROR, "sample rate not set\n");
ret = AVERROR(EINVAL);
goto fail;
}
if (!codec->block_align)
codec->block_align = codec->channels *
av_get_bits_per_sample(codec->codec_id) >> 3;
break;
case AVMEDIA_TYPE_VIDEO:
if (codec->time_base.num <= 0 ||
codec->time_base.den <= 0) { //FIXME audio too?
av_log(s, AV_LOG_ERROR, "time base not set\n");
ret = AVERROR(EINVAL);
goto fail;
}
 
if ((codec->width <= 0 || codec->height <= 0) &&
!(of->flags & AVFMT_NODIMENSIONS)) {
av_log(s, AV_LOG_ERROR, "dimensions not set\n");
ret = AVERROR(EINVAL);
goto fail;
}
if (av_cmp_q(st->sample_aspect_ratio, codec->sample_aspect_ratio)
&& FFABS(av_q2d(st->sample_aspect_ratio) - av_q2d(codec->sample_aspect_ratio)) > 0.004*av_q2d(st->sample_aspect_ratio)
) {
if (st->sample_aspect_ratio.num != 0 &&
st->sample_aspect_ratio.den != 0 &&
codec->sample_aspect_ratio.den != 0 &&
codec->sample_aspect_ratio.den != 0) {
av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between muxer "
"(%d/%d) and encoder layer (%d/%d)\n",
st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
codec->sample_aspect_ratio.num,
codec->sample_aspect_ratio.den);
ret = AVERROR(EINVAL);
goto fail;
}
}
break;
}
 
if (of->codec_tag) {
if ( codec->codec_tag
&& codec->codec_id == AV_CODEC_ID_RAWVIDEO
&& ( av_codec_get_tag(of->codec_tag, codec->codec_id) == 0
|| av_codec_get_tag(of->codec_tag, codec->codec_id) == MKTAG('r', 'a', 'w', ' '))
&& !validate_codec_tag(s, st)) {
// the current rawvideo encoding system ends up setting
// the wrong codec_tag for avi/mov, we override it here
codec->codec_tag = 0;
}
if (codec->codec_tag) {
if (!validate_codec_tag(s, st)) {
char tagbuf[32], tagbuf2[32];
av_get_codec_tag_string(tagbuf, sizeof(tagbuf), codec->codec_tag);
av_get_codec_tag_string(tagbuf2, sizeof(tagbuf2), av_codec_get_tag(s->oformat->codec_tag, codec->codec_id));
av_log(s, AV_LOG_ERROR,
"Tag %s/0x%08x incompatible with output codec id '%d' (%s)\n",
tagbuf, codec->codec_tag, codec->codec_id, tagbuf2);
ret = AVERROR_INVALIDDATA;
goto fail;
}
} else
codec->codec_tag = av_codec_get_tag(of->codec_tag, codec->codec_id);
}
 
if (of->flags & AVFMT_GLOBALHEADER &&
!(codec->flags & CODEC_FLAG_GLOBAL_HEADER))
av_log(s, AV_LOG_WARNING,
"Codec for stream %d does not use global headers "
"but container format requires global headers\n", i);
}
 
if (!s->priv_data && of->priv_data_size > 0) {
s->priv_data = av_mallocz(of->priv_data_size);
if (!s->priv_data) {
ret = AVERROR(ENOMEM);
goto fail;
}
if (of->priv_class) {
*(const AVClass **)s->priv_data = of->priv_class;
av_opt_set_defaults(s->priv_data);
if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
goto fail;
}
}
 
/* set muxer identification string */
if (s->nb_streams && !(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
}
 
if (options) {
av_dict_free(options);
*options = tmp;
}
 
return 0;
 
fail:
av_dict_free(&tmp);
return ret;
}
 
static int init_pts(AVFormatContext *s)
{
int i;
AVStream *st;
 
/* init PTS generation */
for (i = 0; i < s->nb_streams; i++) {
int64_t den = AV_NOPTS_VALUE;
st = s->streams[i];
 
switch (st->codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
den = (int64_t)st->time_base.num * st->codec->sample_rate;
break;
case AVMEDIA_TYPE_VIDEO:
den = (int64_t)st->time_base.num * st->codec->time_base.den;
break;
default:
break;
}
if (den != AV_NOPTS_VALUE) {
if (den <= 0)
return AVERROR_INVALIDDATA;
 
frac_init(&st->pts, 0, 0, den);
}
}
 
return 0;
}
 
int avformat_write_header(AVFormatContext *s, AVDictionary **options)
{
int ret = 0;
 
if (ret = init_muxer(s, options))
return ret;
 
if (s->oformat->write_header) {
ret = s->oformat->write_header(s);
if (ret >= 0 && s->pb && s->pb->error < 0)
ret = s->pb->error;
if (ret < 0)
return ret;
}
 
if ((ret = init_pts(s)) < 0)
return ret;
 
if (s->avoid_negative_ts < 0) {
if (s->oformat->flags & (AVFMT_TS_NEGATIVE | AVFMT_NOTIMESTAMPS)) {
s->avoid_negative_ts = 0;
} else
s->avoid_negative_ts = 1;
}
 
return 0;
}
 
//FIXME merge with compute_pkt_fields
static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt)
{
int delay = FFMAX(st->codec->has_b_frames, st->codec->max_b_frames > 0);
int num, den, frame_size, i;
 
av_dlog(s, "compute_pkt_fields2: pts:%s dts:%s cur_dts:%s b:%d size:%d st:%d\n",
av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), delay, pkt->size, pkt->stream_index);
 
/* duration field */
if (pkt->duration == 0) {
ff_compute_frame_duration(&num, &den, st, NULL, pkt);
if (den && num) {
pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
}
}
 
if (pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay == 0)
pkt->pts = pkt->dts;
 
//XXX/FIXME this is a temporary hack until all encoders output pts
if ((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay) {
static int warned;
if (!warned) {
av_log(s, AV_LOG_WARNING, "Encoder did not produce proper pts, making some up.\n");
warned = 1;
}
pkt->dts =
// pkt->pts= st->cur_dts;
pkt->pts = st->pts.val;
}
 
//calculate dts from pts
if (pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY) {
st->pts_buffer[0] = pkt->pts;
for (i = 1; i < delay + 1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
st->pts_buffer[i] = pkt->pts + (i - delay - 1) * pkt->duration;
for (i = 0; i<delay && st->pts_buffer[i] > st->pts_buffer[i + 1]; i++)
FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i + 1]);
 
pkt->dts = st->pts_buffer[0];
}
 
if (st->cur_dts && st->cur_dts != AV_NOPTS_VALUE &&
((!(s->oformat->flags & AVFMT_TS_NONSTRICT) &&
st->cur_dts >= pkt->dts) || st->cur_dts > pkt->dts)) {
av_log(s, AV_LOG_ERROR,
"Application provided invalid, non monotonically increasing dts to muxer in stream %d: %s >= %s\n",
st->index, av_ts2str(st->cur_dts), av_ts2str(pkt->dts));
return AVERROR(EINVAL);
}
if (pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts) {
av_log(s, AV_LOG_ERROR, "pts (%s) < dts (%s) in stream %d\n",
av_ts2str(pkt->pts), av_ts2str(pkt->dts), st->index);
return AVERROR(EINVAL);
}
 
av_dlog(s, "av_write_frame: pts2:%s dts2:%s\n",
av_ts2str(pkt->pts), av_ts2str(pkt->dts));
st->cur_dts = pkt->dts;
st->pts.val = pkt->dts;
 
/* update pts */
switch (st->codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
frame_size = ff_get_audio_frame_size(st->codec, pkt->size, 1);
 
/* HACK/FIXME, we skip the initial 0 size packets as they are most
* likely equal to the encoder delay, but it would be better if we
* had the real timestamps from the encoder */
if (frame_size >= 0 && (pkt->size || st->pts.num != st->pts.den >> 1 || st->pts.val)) {
frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
}
break;
case AVMEDIA_TYPE_VIDEO:
frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
break;
default:
break;
}
return 0;
}
 
/**
* Make timestamps non negative, move side data from payload to internal struct, call muxer, and restore
* sidedata.
*
* FIXME: this function should NEVER get undefined pts/dts beside when the
* AVFMT_NOTIMESTAMPS is set.
* Those additional safety checks should be dropped once the correct checks
* are set in the callers.
*/
static int write_packet(AVFormatContext *s, AVPacket *pkt)
{
int ret, did_split;
 
if (s->avoid_negative_ts > 0) {
AVStream *st = s->streams[pkt->stream_index];
int64_t offset = st->mux_ts_offset;
 
if (pkt->dts < 0 && pkt->dts != AV_NOPTS_VALUE && !s->offset) {
s->offset = -pkt->dts;
s->offset_timebase = st->time_base;
}
 
if (s->offset && !offset) {
offset = st->mux_ts_offset =
av_rescale_q_rnd(s->offset,
s->offset_timebase,
st->time_base,
AV_ROUND_UP);
}
 
if (pkt->dts != AV_NOPTS_VALUE)
pkt->dts += offset;
if (pkt->pts != AV_NOPTS_VALUE)
pkt->pts += offset;
 
av_assert2(pkt->dts == AV_NOPTS_VALUE || pkt->dts >= 0);
}
 
did_split = av_packet_split_side_data(pkt);
ret = s->oformat->write_packet(s, pkt);
 
if (s->flush_packets && s->pb && ret >= 0 && s->flags & AVFMT_FLAG_FLUSH_PACKETS)
avio_flush(s->pb);
 
if (did_split)
av_packet_merge_side_data(pkt);
 
return ret;
}
 
int av_write_frame(AVFormatContext *s, AVPacket *pkt)
{
int ret;
 
if (!pkt) {
if (s->oformat->flags & AVFMT_ALLOW_FLUSH) {
ret = s->oformat->write_packet(s, NULL);
if (s->flush_packets && s->pb && s->pb->error >= 0)
avio_flush(s->pb);
if (ret >= 0 && s->pb && s->pb->error < 0)
ret = s->pb->error;
return ret;
}
return 1;
}
 
ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
 
if (ret < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
return ret;
 
ret = write_packet(s, pkt);
if (ret >= 0 && s->pb && s->pb->error < 0)
ret = s->pb->error;
 
if (ret >= 0)
s->streams[pkt->stream_index]->nb_frames++;
return ret;
}
 
#define CHUNK_START 0x1000
 
int ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
{
AVPacketList **next_point, *this_pktl;
AVStream *st = s->streams[pkt->stream_index];
int chunked = s->max_chunk_size || s->max_chunk_duration;
 
this_pktl = av_mallocz(sizeof(AVPacketList));
if (!this_pktl)
return AVERROR(ENOMEM);
this_pktl->pkt = *pkt;
#if FF_API_DESTRUCT_PACKET
FF_DISABLE_DEPRECATION_WARNINGS
pkt->destruct = NULL; // do not free original but only the copy
FF_ENABLE_DEPRECATION_WARNINGS
#endif
pkt->buf = NULL;
av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-allocated memory
av_copy_packet_side_data(&this_pktl->pkt, &this_pktl->pkt); // copy side data
 
if (s->streams[pkt->stream_index]->last_in_packet_buffer) {
next_point = &(st->last_in_packet_buffer->next);
} else {
next_point = &s->packet_buffer;
}
 
if (chunked) {
uint64_t max= av_rescale_q_rnd(s->max_chunk_duration, AV_TIME_BASE_Q, st->time_base, AV_ROUND_UP);
st->interleaver_chunk_size += pkt->size;
st->interleaver_chunk_duration += pkt->duration;
if ( (s->max_chunk_size && st->interleaver_chunk_size > s->max_chunk_size)
|| (max && st->interleaver_chunk_duration > max)) {
st->interleaver_chunk_size = 0;
this_pktl->pkt.flags |= CHUNK_START;
if (max && st->interleaver_chunk_duration > max) {
int64_t syncoffset = (st->codec->codec_type == AVMEDIA_TYPE_VIDEO)*max/2;
int64_t syncto = av_rescale(pkt->dts + syncoffset, 1, max)*max - syncoffset;
 
st->interleaver_chunk_duration += (pkt->dts - syncto)/8 - max;
} else
st->interleaver_chunk_duration = 0;
}
}
if (*next_point) {
if (chunked && !(this_pktl->pkt.flags & CHUNK_START))
goto next_non_null;
 
if (compare(s, &s->packet_buffer_end->pkt, pkt)) {
while ( *next_point
&& ((chunked && !((*next_point)->pkt.flags&CHUNK_START))
|| !compare(s, &(*next_point)->pkt, pkt)))
next_point = &(*next_point)->next;
if (*next_point)
goto next_non_null;
} else {
next_point = &(s->packet_buffer_end->next);
}
}
av_assert1(!*next_point);
 
s->packet_buffer_end = this_pktl;
next_non_null:
 
this_pktl->next = *next_point;
 
s->streams[pkt->stream_index]->last_in_packet_buffer =
*next_point = this_pktl;
return 0;
}
 
static int interleave_compare_dts(AVFormatContext *s, AVPacket *next,
AVPacket *pkt)
{
AVStream *st = s->streams[pkt->stream_index];
AVStream *st2 = s->streams[next->stream_index];
int comp = av_compare_ts(next->dts, st2->time_base, pkt->dts,
st->time_base);
if (s->audio_preload && ((st->codec->codec_type == AVMEDIA_TYPE_AUDIO) != (st2->codec->codec_type == AVMEDIA_TYPE_AUDIO))) {
int64_t ts = av_rescale_q(pkt ->dts, st ->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st ->codec->codec_type == AVMEDIA_TYPE_AUDIO);
int64_t ts2= av_rescale_q(next->dts, st2->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st2->codec->codec_type == AVMEDIA_TYPE_AUDIO);
if (ts == ts2) {
ts= ( pkt ->dts* st->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st ->codec->codec_type == AVMEDIA_TYPE_AUDIO)* st->time_base.den)*st2->time_base.den
-( next->dts*st2->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st2->codec->codec_type == AVMEDIA_TYPE_AUDIO)*st2->time_base.den)* st->time_base.den;
ts2=0;
}
comp= (ts>ts2) - (ts<ts2);
}
 
if (comp == 0)
return pkt->stream_index < next->stream_index;
return comp > 0;
}
 
int ff_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
AVPacket *pkt, int flush)
{
AVPacketList *pktl;
int stream_count = 0, noninterleaved_count = 0;
int64_t delta_dts_max = 0;
int i, ret;
 
if (pkt) {
ret = ff_interleave_add_packet(s, pkt, interleave_compare_dts);
if (ret < 0)
return ret;
}
 
for (i = 0; i < s->nb_streams; i++) {
if (s->streams[i]->last_in_packet_buffer) {
++stream_count;
} else if (s->streams[i]->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
++noninterleaved_count;
}
}
 
if (s->nb_streams == stream_count) {
flush = 1;
} else if (!flush) {
for (i=0; i < s->nb_streams; i++) {
if (s->streams[i]->last_in_packet_buffer) {
int64_t delta_dts =
av_rescale_q(s->streams[i]->last_in_packet_buffer->pkt.dts,
s->streams[i]->time_base,
AV_TIME_BASE_Q) -
av_rescale_q(s->packet_buffer->pkt.dts,
s->streams[s->packet_buffer->pkt.stream_index]->time_base,
AV_TIME_BASE_Q);
delta_dts_max= FFMAX(delta_dts_max, delta_dts);
}
}
if (s->nb_streams == stream_count+noninterleaved_count &&
delta_dts_max > 20*AV_TIME_BASE) {
av_log(s, AV_LOG_DEBUG, "flushing with %d noninterleaved\n", noninterleaved_count);
flush = 1;
}
}
if (stream_count && flush) {
AVStream *st;
pktl = s->packet_buffer;
*out = pktl->pkt;
st = s->streams[out->stream_index];
 
s->packet_buffer = pktl->next;
if (!s->packet_buffer)
s->packet_buffer_end = NULL;
 
if (st->last_in_packet_buffer == pktl)
st->last_in_packet_buffer = NULL;
av_freep(&pktl);
 
return 1;
} else {
av_init_packet(out);
return 0;
}
}
 
/**
* Interleave an AVPacket correctly so it can be muxed.
* @param out the interleaved packet will be output here
* @param in the input packet
* @param flush 1 if no further packets are available as input and all
* remaining packets should be output
* @return 1 if a packet was output, 0 if no packet could be output,
* < 0 if an error occurred
*/
static int interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush)
{
if (s->oformat->interleave_packet) {
int ret = s->oformat->interleave_packet(s, out, in, flush);
if (in)
av_free_packet(in);
return ret;
} else
return ff_interleave_packet_per_dts(s, out, in, flush);
}
 
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
{
int ret, flush = 0;
 
if (pkt) {
AVStream *st = s->streams[pkt->stream_index];
 
//FIXME/XXX/HACK drop zero sized packets
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size == 0)
return 0;
 
av_dlog(s, "av_interleaved_write_frame size:%d dts:%s pts:%s\n",
pkt->size, av_ts2str(pkt->dts), av_ts2str(pkt->pts));
if ((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
return ret;
 
if (pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
return AVERROR(EINVAL);
} else {
av_dlog(s, "av_interleaved_write_frame FLUSH\n");
flush = 1;
}
 
for (;; ) {
AVPacket opkt;
int ret = interleave_packet(s, &opkt, pkt, flush);
if (ret <= 0) //FIXME cleanup needed for ret<0 ?
return ret;
 
ret = write_packet(s, &opkt);
if (ret >= 0)
s->streams[opkt.stream_index]->nb_frames++;
 
av_free_packet(&opkt);
pkt = NULL;
 
if (ret < 0)
return ret;
if(s->pb && s->pb->error)
return s->pb->error;
}
}
 
int av_write_trailer(AVFormatContext *s)
{
int ret, i;
 
for (;; ) {
AVPacket pkt;
ret = interleave_packet(s, &pkt, NULL, 1);
if (ret < 0) //FIXME cleanup needed for ret<0 ?
goto fail;
if (!ret)
break;
 
ret = write_packet(s, &pkt);
if (ret >= 0)
s->streams[pkt.stream_index]->nb_frames++;
 
av_free_packet(&pkt);
 
if (ret < 0)
goto fail;
if(s->pb && s->pb->error)
goto fail;
}
 
if (s->oformat->write_trailer)
ret = s->oformat->write_trailer(s);
 
fail:
if (s->pb)
avio_flush(s->pb);
if (ret == 0)
ret = s->pb ? s->pb->error : 0;
for (i = 0; i < s->nb_streams; i++) {
av_freep(&s->streams[i]->priv_data);
av_freep(&s->streams[i]->index_entries);
}
if (s->oformat->priv_class)
av_opt_free(s->priv_data);
av_freep(&s->priv_data);
return ret;
}
 
int av_get_output_timestamp(struct AVFormatContext *s, int stream,
int64_t *dts, int64_t *wall)
{
if (!s->oformat || !s->oformat->get_output_timestamp)
return AVERROR(ENOSYS);
s->oformat->get_output_timestamp(s, stream, dts, wall);
return 0;
}
 
int ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt,
AVFormatContext *src)
{
AVPacket local_pkt;
 
local_pkt = *pkt;
local_pkt.stream_index = dst_stream;
if (pkt->pts != AV_NOPTS_VALUE)
local_pkt.pts = av_rescale_q(pkt->pts,
src->streams[pkt->stream_index]->time_base,
dst->streams[dst_stream]->time_base);
if (pkt->dts != AV_NOPTS_VALUE)
local_pkt.dts = av_rescale_q(pkt->dts,
src->streams[pkt->stream_index]->time_base,
dst->streams[dst_stream]->time_base);
if (pkt->duration)
local_pkt.duration = av_rescale_q(pkt->duration,
src->streams[pkt->stream_index]->time_base,
dst->streams[dst_stream]->time_base);
return av_write_frame(dst, &local_pkt);
}
/contrib/sdk/sources/ffmpeg/libavformat/mvdec.c
0,0 → 1,441
/*
* Silicon Graphics Movie demuxer
* Copyright (c) 2012 Peter Ross
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Silicon Graphics Movie demuxer
*/
 
#include "libavutil/eval.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/rational.h"
#include "avformat.h"
#include "internal.h"
 
typedef struct {
int nb_video_tracks;
int nb_audio_tracks;
 
int eof_count; /**< number of streams that have finished */
int stream_index; /**< current stream index */
int frame[2]; /**< frame nb for current stream */
} MvContext;
 
#define AUDIO_FORMAT_SIGNED 401
 
static int mv_probe(AVProbeData *p)
{
if (AV_RB32(p->buf) == MKBETAG('M','O','V','I') && AV_RB16(p->buf + 4) < 3)
return AVPROBE_SCORE_MAX;
return 0;
}
 
static char * var_read_string(AVIOContext *pb, int size)
{
char *str = av_malloc(size + 1);
int n;
if (!str)
return NULL;
n = avio_get_str(pb, size, str, size + 1);
if (n < size)
avio_skip(pb, size - n);
return str;
}
 
static int var_read_int(AVIOContext *pb, int size)
{
int v;
char * s = var_read_string(pb, size);
if (!s || sscanf(s, "%d", &v) != 1)
v = 0;
av_free(s);
return v;
}
 
static AVRational var_read_float(AVIOContext *pb, int size)
{
AVRational v;
char * s = var_read_string(pb, size);
if (!s)
return (AVRational){0, 0};
v = av_d2q(av_strtod(s, NULL), INT_MAX);
av_free(s);
return v;
}
 
static void var_read_metadata(AVFormatContext *avctx, const char *tag, int size)
{
char *value = var_read_string(avctx->pb, size);
if (value)
av_dict_set(&avctx->metadata, tag, value, AV_DICT_DONT_STRDUP_VAL);
}
 
static int set_channels(AVFormatContext *avctx, AVStream *st, int channels) {
if (channels <= 0) {
av_log(avctx, AV_LOG_ERROR, "Channel count %d invalid\n", channels);
return AVERROR_INVALIDDATA;
}
st->codec->channels = channels;
st->codec->channel_layout = (st->codec->channels == 1) ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO;
return 0;
}
 
/**
* Parse global variable
* @return < 0 if unknown
*/
static int parse_global_var(AVFormatContext *avctx, AVStream *st, const char *name, int size)
{
MvContext *mv = avctx->priv_data;
AVIOContext *pb = avctx->pb;
if (!strcmp(name, "__NUM_I_TRACKS")) {
mv->nb_video_tracks = var_read_int(pb, size);
} else if (!strcmp(name, "__NUM_A_TRACKS")) {
mv->nb_audio_tracks = var_read_int(pb, size);
} else if (!strcmp(name, "COMMENT") || !strcmp(name, "TITLE")) {
var_read_metadata(avctx, name, size);
} else if (!strcmp(name, "LOOP_MODE") || !strcmp(name, "NUM_LOOPS") || !strcmp(name, "OPTIMIZED")) {
avio_skip(pb, size); // ignore
} else
return -1;
 
return 0;
}
 
/**
* Parse audio variable
* @return < 0 if unknown
*/
static int parse_audio_var(AVFormatContext *avctx, AVStream *st, const char *name, int size)
{
AVIOContext *pb = avctx->pb;
if (!strcmp(name, "__DIR_COUNT")) {
st->nb_frames = var_read_int(pb, size);
} else if (!strcmp(name, "AUDIO_FORMAT")) {
st->codec->codec_id = var_read_int(pb, size);
} else if (!strcmp(name, "COMPRESSION")) {
st->codec->codec_tag = var_read_int(pb, size);
} else if (!strcmp(name, "DEFAULT_VOL")) {
var_read_metadata(avctx, name, size);
} else if (!strcmp(name, "NUM_CHANNELS")) {
return set_channels(avctx, st, var_read_int(pb, size));
} else if (!strcmp(name, "SAMPLE_RATE")) {
st->codec->sample_rate = var_read_int(pb, size);
avpriv_set_pts_info(st, 33, 1, st->codec->sample_rate);
} else if (!strcmp(name, "SAMPLE_WIDTH")) {
st->codec->bits_per_coded_sample = var_read_int(pb, size) * 8;
} else
return -1;
return 0;
}
 
/**
* Parse video variable
* @return < 0 if unknown
*/
static int parse_video_var(AVFormatContext *avctx, AVStream *st, const char *name, int size)
{
AVIOContext *pb = avctx->pb;
if (!strcmp(name, "__DIR_COUNT")) {
st->nb_frames = st->duration = var_read_int(pb, size);
} else if (!strcmp(name, "COMPRESSION")) {
char * str = var_read_string(pb, size);
if (!str)
return AVERROR_INVALIDDATA;
if (!strcmp(str, "1")) {
st->codec->codec_id = AV_CODEC_ID_MVC1;
} else if (!strcmp(str, "2")) {
st->codec->pix_fmt = AV_PIX_FMT_ABGR;
st->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
} else if (!strcmp(str, "3")) {
st->codec->codec_id = AV_CODEC_ID_SGIRLE;
} else if (!strcmp(str, "10")) {
st->codec->codec_id = AV_CODEC_ID_MJPEG;
} else if (!strcmp(str, "MVC2")) {
st->codec->codec_id = AV_CODEC_ID_MVC2;
} else {
avpriv_request_sample(avctx, "video compression %s", str);
}
av_free(str);
} else if (!strcmp(name, "FPS")) {
AVRational fps = var_read_float(pb, size);
avpriv_set_pts_info(st, 64, fps.den, fps.num);
} else if (!strcmp(name, "HEIGHT")) {
st->codec->height = var_read_int(pb, size);
} else if (!strcmp(name, "PIXEL_ASPECT")) {
st->sample_aspect_ratio = var_read_float(pb, size);
av_reduce(&st->sample_aspect_ratio.num, &st->sample_aspect_ratio.den,
st->sample_aspect_ratio.num, st->sample_aspect_ratio.den, INT_MAX);
} else if (!strcmp(name, "WIDTH")) {
st->codec->width = var_read_int(pb, size);
} else if (!strcmp(name, "ORIENTATION")) {
if (var_read_int(pb, size) == 1101) {
st->codec->extradata = av_strdup("BottomUp");
st->codec->extradata_size = 9;
}
} else if (!strcmp(name, "Q_SPATIAL") || !strcmp(name, "Q_TEMPORAL")) {
var_read_metadata(avctx, name, size);
} else if (!strcmp(name, "INTERLACING") || !strcmp(name, "PACKING")) {
avio_skip(pb, size); // ignore
} else
return -1;
return 0;
}
 
static void read_table(AVFormatContext *avctx, AVStream *st, int (*parse)(AVFormatContext *avctx, AVStream *st, const char *name, int size))
{
int count, i;
AVIOContext *pb = avctx->pb;
avio_skip(pb, 4);
count = avio_rb32(pb);
avio_skip(pb, 4);
for (i = 0; i < count; i++) {
char name[17];
int size;
avio_read(pb, name, 16);
name[sizeof(name) - 1] = 0;
size = avio_rb32(pb);
if (parse(avctx, st, name, size) < 0) {
avpriv_request_sample(avctx, "variable %s", name);
avio_skip(pb, size);
}
}
}
 
static void read_index(AVIOContext *pb, AVStream *st)
{
uint64_t timestamp = 0;
int i;
for (i = 0; i < st->nb_frames; i++) {
uint32_t pos = avio_rb32(pb);
uint32_t size = avio_rb32(pb);
avio_skip(pb, 8);
av_add_index_entry(st, pos, timestamp, size, 0, AVINDEX_KEYFRAME);
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
timestamp += size / (st->codec->channels * 2);
} else {
timestamp++;
}
}
}
 
static int mv_read_header(AVFormatContext *avctx)
{
MvContext *mv = avctx->priv_data;
AVIOContext *pb = avctx->pb;
AVStream *ast = NULL, *vst = NULL; //initialization to suppress warning
int version, i;
 
avio_skip(pb, 4);
 
version = avio_rb16(pb);
if (version == 2) {
uint64_t timestamp;
int v;
avio_skip(pb, 22);
 
/* allocate audio track first to prevent unnecessary seeking
(audio packet always precede video packet for a given frame) */
ast = avformat_new_stream(avctx, NULL);
if (!ast)
return AVERROR(ENOMEM);
 
vst = avformat_new_stream(avctx, NULL);
if (!vst)
return AVERROR(ENOMEM);
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
avpriv_set_pts_info(vst, 64, 1, 15);
vst->nb_frames = avio_rb32(pb);
v = avio_rb32(pb);
switch (v) {
case 1:
vst->codec->codec_id = AV_CODEC_ID_MVC1;
break;
case 2:
vst->codec->pix_fmt = AV_PIX_FMT_ARGB;
vst->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
break;
default:
avpriv_request_sample(avctx, "video compression %i", v);
break;
}
vst->codec->codec_tag = 0;
vst->codec->width = avio_rb32(pb);
vst->codec->height = avio_rb32(pb);
avio_skip(pb, 12);
 
ast->codec->codec_type = AVMEDIA_TYPE_AUDIO;
ast->nb_frames = vst->nb_frames;
ast->codec->sample_rate = avio_rb32(pb);
avpriv_set_pts_info(ast, 33, 1, ast->codec->sample_rate);
if (set_channels(avctx, ast, avio_rb32(pb)) < 0)
return AVERROR_INVALIDDATA;
 
v = avio_rb32(pb);
if (v == AUDIO_FORMAT_SIGNED) {
ast->codec->codec_id = AV_CODEC_ID_PCM_S16BE;
} else {
avpriv_request_sample(avctx, "audio compression (format %i)", v);
}
 
avio_skip(pb, 12);
var_read_metadata(avctx, "title", 0x80);
var_read_metadata(avctx, "comment", 0x100);
avio_skip(pb, 0x80);
 
timestamp = 0;
for (i = 0; i < vst->nb_frames; i++) {
uint32_t pos = avio_rb32(pb);
uint32_t asize = avio_rb32(pb);
uint32_t vsize = avio_rb32(pb);
avio_skip(pb, 8);
av_add_index_entry(ast, pos, timestamp, asize, 0, AVINDEX_KEYFRAME);
av_add_index_entry(vst, pos + asize, i, vsize, 0, AVINDEX_KEYFRAME);
timestamp += asize / (ast->codec->channels * 2);
}
} else if (!version && avio_rb16(pb) == 3) {
avio_skip(pb, 4);
 
read_table(avctx, NULL, parse_global_var);
 
if (mv->nb_audio_tracks > 1) {
avpriv_request_sample(avctx, "multiple audio streams support");
return AVERROR_PATCHWELCOME;
} else if (mv->nb_audio_tracks) {
ast = avformat_new_stream(avctx, NULL);
if (!ast)
return AVERROR(ENOMEM);
ast->codec->codec_type = AVMEDIA_TYPE_AUDIO;
/* temporarily store compression value in codec_tag; format value in codec_id */
read_table(avctx, ast, parse_audio_var);
if (ast->codec->codec_tag == 100 && ast->codec->codec_id == AUDIO_FORMAT_SIGNED && ast->codec->bits_per_coded_sample == 16) {
ast->codec->codec_id = AV_CODEC_ID_PCM_S16BE;
} else {
avpriv_request_sample(avctx, "audio compression %i (format %i, width %i)",
ast->codec->codec_tag, ast->codec->codec_id, ast->codec->bits_per_coded_sample);
ast->codec->codec_id = AV_CODEC_ID_NONE;
}
ast->codec->codec_tag = 0;
if (ast->codec->channels <= 0) {
av_log(avctx, AV_LOG_ERROR, "No valid channel count found\n");
return AVERROR_INVALIDDATA;
}
}
 
if (mv->nb_video_tracks > 1) {
avpriv_request_sample(avctx, "multiple video streams support");
return AVERROR_PATCHWELCOME;
} else if (mv->nb_video_tracks) {
vst = avformat_new_stream(avctx, NULL);
if (!vst)
return AVERROR(ENOMEM);
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
read_table(avctx, vst, parse_video_var);
}
 
if (mv->nb_audio_tracks)
read_index(pb, ast);
 
if (mv->nb_video_tracks)
read_index(pb, vst);
} else {
avpriv_request_sample(avctx, "version %i", version);
return AVERROR_PATCHWELCOME;
}
 
return 0;
}
 
static int mv_read_packet(AVFormatContext *avctx, AVPacket *pkt)
{
MvContext *mv = avctx->priv_data;
AVIOContext *pb = avctx->pb;
AVStream *st = avctx->streams[mv->stream_index];
const AVIndexEntry *index;
int frame = mv->frame[mv->stream_index];
int ret;
uint64_t pos;
 
if (frame < st->nb_index_entries) {
index = &st->index_entries[frame];
pos = avio_tell(pb);
if (index->pos > pos)
avio_skip(pb, index->pos - pos);
else if (index->pos < pos) {
if (!pb->seekable)
return AVERROR(EIO);
ret = avio_seek(pb, index->pos, SEEK_SET);
if (ret < 0)
return ret;
}
ret = av_get_packet(pb, pkt, index->size);
if (ret < 0)
return ret;
 
pkt->stream_index = mv->stream_index;
pkt->pts = index->timestamp;
pkt->flags |= AV_PKT_FLAG_KEY;
 
mv->frame[mv->stream_index]++;
mv->eof_count = 0;
} else {
mv->eof_count++;
if (mv->eof_count >= avctx->nb_streams)
return AVERROR_EOF;
}
 
mv->stream_index++;
if (mv->stream_index >= avctx->nb_streams)
mv->stream_index = 0;
 
return 0;
}
 
static int mv_read_seek(AVFormatContext *avctx, int stream_index, int64_t timestamp, int flags)
{
MvContext *mv = avctx->priv_data;
AVStream *st = avctx->streams[stream_index];
int frame, i;
 
if ((flags & AVSEEK_FLAG_FRAME) || (flags & AVSEEK_FLAG_BYTE))
return AVERROR(ENOSYS);
 
if (!avctx->pb->seekable)
return AVERROR(EIO);
 
frame = av_index_search_timestamp(st, timestamp, flags);
if (frame < 0)
return -1;
 
for (i = 0; i < avctx->nb_streams; i++)
mv->frame[i] = frame;
return 0;
}
 
AVInputFormat ff_mv_demuxer = {
.name = "mv",
.long_name = NULL_IF_CONFIG_SMALL("Silicon Graphics Movie"),
.priv_data_size = sizeof(MvContext),
.read_probe = mv_probe,
.read_header = mv_read_header,
.read_packet = mv_read_packet,
.read_seek = mv_read_seek,
};
/contrib/sdk/sources/ffmpeg/libavformat/mvi.c
0,0 → 1,143
/*
* Motion Pixels MVI Demuxer
* Copyright (c) 2008 Gregory Montoir (cyx@users.sourceforge.net)
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/channel_layout.h"
#include "avformat.h"
#include "internal.h"
 
#define MVI_FRAC_BITS 10
 
#define MVI_AUDIO_STREAM_INDEX 0
#define MVI_VIDEO_STREAM_INDEX 1
 
typedef struct MviDemuxContext {
unsigned int (*get_int)(AVIOContext *);
uint32_t audio_data_size;
uint64_t audio_size_counter;
uint64_t audio_frame_size;
int audio_size_left;
int video_frame_size;
} MviDemuxContext;
 
static int read_header(AVFormatContext *s)
{
MviDemuxContext *mvi = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *ast, *vst;
unsigned int version, frames_count, msecs_per_frame, player_version;
 
ast = avformat_new_stream(s, NULL);
if (!ast)
return AVERROR(ENOMEM);
 
vst = avformat_new_stream(s, NULL);
if (!vst)
return AVERROR(ENOMEM);
 
if (ff_alloc_extradata(vst->codec, 2))
return AVERROR(ENOMEM);
 
version = avio_r8(pb);
vst->codec->extradata[0] = avio_r8(pb);
vst->codec->extradata[1] = avio_r8(pb);
frames_count = avio_rl32(pb);
msecs_per_frame = avio_rl32(pb);
vst->codec->width = avio_rl16(pb);
vst->codec->height = avio_rl16(pb);
avio_r8(pb);
ast->codec->sample_rate = avio_rl16(pb);
mvi->audio_data_size = avio_rl32(pb);
avio_r8(pb);
player_version = avio_rl32(pb);
avio_rl16(pb);
avio_r8(pb);
 
if (frames_count == 0 || mvi->audio_data_size == 0)
return AVERROR_INVALIDDATA;
 
if (version != 7 || player_version > 213) {
av_log(s, AV_LOG_ERROR, "unhandled version (%d,%d)\n", version, player_version);
return AVERROR_INVALIDDATA;
}
 
avpriv_set_pts_info(ast, 64, 1, ast->codec->sample_rate);
ast->codec->codec_type = AVMEDIA_TYPE_AUDIO;
ast->codec->codec_id = AV_CODEC_ID_PCM_U8;
ast->codec->channels = 1;
ast->codec->channel_layout = AV_CH_LAYOUT_MONO;
ast->codec->bits_per_coded_sample = 8;
ast->codec->bit_rate = ast->codec->sample_rate * 8;
 
avpriv_set_pts_info(vst, 64, msecs_per_frame, 1000000);
vst->avg_frame_rate = av_inv_q(vst->time_base);
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codec->codec_id = AV_CODEC_ID_MOTIONPIXELS;
 
mvi->get_int = (vst->codec->width * vst->codec->height < (1 << 16)) ? avio_rl16 : avio_rl24;
 
mvi->audio_frame_size = ((uint64_t)mvi->audio_data_size << MVI_FRAC_BITS) / frames_count;
if (mvi->audio_frame_size <= 1 << MVI_FRAC_BITS - 1) {
av_log(s, AV_LOG_ERROR, "Invalid audio_data_size (%d) or frames_count (%d)\n",
mvi->audio_data_size, frames_count);
return AVERROR_INVALIDDATA;
}
 
mvi->audio_size_counter = (ast->codec->sample_rate * 830 / mvi->audio_frame_size - 1) * mvi->audio_frame_size;
mvi->audio_size_left = mvi->audio_data_size;
 
return 0;
}
 
static int read_packet(AVFormatContext *s, AVPacket *pkt)
{
int ret, count;
MviDemuxContext *mvi = s->priv_data;
AVIOContext *pb = s->pb;
 
if (mvi->video_frame_size == 0) {
mvi->video_frame_size = (mvi->get_int)(pb);
if (mvi->audio_size_left == 0)
return AVERROR(EIO);
count = (mvi->audio_size_counter + mvi->audio_frame_size + 512) >> MVI_FRAC_BITS;
if (count > mvi->audio_size_left)
count = mvi->audio_size_left;
if ((ret = av_get_packet(pb, pkt, count)) < 0)
return ret;
pkt->stream_index = MVI_AUDIO_STREAM_INDEX;
mvi->audio_size_left -= count;
mvi->audio_size_counter += mvi->audio_frame_size - (count << MVI_FRAC_BITS);
} else {
if ((ret = av_get_packet(pb, pkt, mvi->video_frame_size)) < 0)
return ret;
pkt->stream_index = MVI_VIDEO_STREAM_INDEX;
mvi->video_frame_size = 0;
}
return 0;
}
 
AVInputFormat ff_mvi_demuxer = {
.name = "mvi",
.long_name = NULL_IF_CONFIG_SMALL("Motion Pixels MVI"),
.priv_data_size = sizeof(MviDemuxContext),
.read_header = read_header,
.read_packet = read_packet,
.extensions = "mvi",
};
/contrib/sdk/sources/ffmpeg/libavformat/mxf.c
0,0 → 1,137
/*
* MXF
* Copyright (c) 2006 SmartJog S.A., Baptiste Coudurier <baptiste dot coudurier at smartjog dot com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/common.h"
#include "mxf.h"
 
/**
* SMPTE RP224 http://www.smpte-ra.org/mdd/index.html
*/
const MXFCodecUL ff_mxf_data_definition_uls[] = {
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x01,0x03,0x02,0x02,0x01,0x00,0x00,0x00 }, 13, AVMEDIA_TYPE_VIDEO },
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x01,0x03,0x02,0x02,0x02,0x00,0x00,0x00 }, 13, AVMEDIA_TYPE_AUDIO },
{ { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }, 0, AVMEDIA_TYPE_DATA },
};
 
const MXFCodecUL ff_mxf_codec_uls[] = {
/* PictureEssenceCoding */
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x01,0x11,0x00 }, 14, AV_CODEC_ID_MPEG2VIDEO }, /* MP@ML Long GoP */
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x01,0x02,0x01,0x01 }, 14, AV_CODEC_ID_MPEG2VIDEO }, /* D-10 50Mbps PAL */
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x03,0x03,0x00 }, 14, AV_CODEC_ID_MPEG2VIDEO }, /* MP@HL Long GoP */
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x04,0x02,0x00 }, 14, AV_CODEC_ID_MPEG2VIDEO }, /* 422P@HL I-Frame */
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x20,0x02,0x03 }, 14, AV_CODEC_ID_MPEG4 }, /* XDCAM proxy_pal030926.mxf */
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x02,0x01,0x02,0x00 }, 13, AV_CODEC_ID_DVVIDEO }, /* DV25 IEC PAL */
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x07,0x04,0x01,0x02,0x02,0x03,0x01,0x01,0x00 }, 14, AV_CODEC_ID_JPEG2000 }, /* JPEG2000 Codestream */
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x01,0x7F,0x00,0x00,0x00 }, 13, AV_CODEC_ID_RAWVIDEO }, /* Uncompressed */
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x01,0x01,0x02,0x01,0x00 }, 15, AV_CODEC_ID_RAWVIDEO }, /* Uncompressed 422 8-bit */
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x71,0x00,0x00,0x00 }, 13, AV_CODEC_ID_DNXHD }, /* SMPTE VC-3/DNxHD */
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x03,0x02,0x00,0x00 }, 14, AV_CODEC_ID_DNXHD }, /* SMPTE VC-3/DNxHD */
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x02,0x01,0x32,0x00,0x00 }, 14, AV_CODEC_ID_H264 }, /* H.264/MPEG-4 AVC Intra */
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x01,0x01,0x02,0x02,0x01 }, 16, AV_CODEC_ID_V210 }, /* V210 */
/* SoundEssenceCompression */
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x01,0x00,0x00,0x00,0x00 }, 13, AV_CODEC_ID_PCM_S16LE }, /* Uncompressed */
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x01,0x7F,0x00,0x00,0x00 }, 13, AV_CODEC_ID_PCM_S16LE },
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x07,0x04,0x02,0x02,0x01,0x7E,0x00,0x00,0x00 }, 13, AV_CODEC_ID_PCM_S16BE }, /* From Omneon MXF file */
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x04,0x04,0x02,0x02,0x02,0x03,0x01,0x01,0x00 }, 15, AV_CODEC_ID_PCM_ALAW }, /* XDCAM Proxy C0023S01.mxf */
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x02,0x03,0x02,0x01,0x00 }, 15, AV_CODEC_ID_AC3 },
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x02,0x03,0x02,0x05,0x00 }, 15, AV_CODEC_ID_MP2 }, /* MP2 or MP3 */
//{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x02,0x03,0x02,0x1C,0x00 }, 15, AV_CODEC_ID_DOLBY_E }, /* Dolby-E */
{ { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }, 0, AV_CODEC_ID_NONE },
};
 
const MXFCodecUL ff_mxf_pixel_format_uls[] = {
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x01,0x01,0x02,0x01,0x01 }, 16, AV_PIX_FMT_UYVY422 },
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x01,0x01,0x02,0x01,0x02 }, 16, AV_PIX_FMT_YUYV422 },
{ { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }, 0, AV_PIX_FMT_NONE },
};
 
static const struct {
enum AVPixelFormat pix_fmt;
const char data[16];
} ff_mxf_pixel_layouts[] = {
/**
* See SMPTE 377M E.2.46
*
* Note: Only RGB, palette based and "abnormal" YUV pixel formats like 4:2:2:4 go here.
* For regular YUV, use CDCIPictureEssenceDescriptor.
*
* Note: Do not use these for encoding descriptors for little-endian formats until we
* get samples or official word from SMPTE on how/if those can be encoded.
*/
{AV_PIX_FMT_ABGR, {'A', 8, 'B', 8, 'G', 8, 'R', 8 }},
{AV_PIX_FMT_ARGB, {'A', 8, 'R', 8, 'G', 8, 'B', 8 }},
{AV_PIX_FMT_BGR24, {'B', 8, 'G', 8, 'R', 8 }},
{AV_PIX_FMT_BGRA, {'B', 8, 'G', 8, 'R', 8, 'A', 8 }},
{AV_PIX_FMT_RGB24, {'R', 8, 'G', 8, 'B', 8 }},
{AV_PIX_FMT_RGB444BE,{'F', 4, 'R', 4, 'G', 4, 'B', 4 }},
{AV_PIX_FMT_RGB48BE, {'R', 8, 'r', 8, 'G', 8, 'g', 8, 'B', 8, 'b', 8 }},
{AV_PIX_FMT_RGB48BE, {'R', 16, 'G', 16, 'B', 16 }},
{AV_PIX_FMT_RGB48LE, {'r', 8, 'R', 8, 'g', 8, 'G', 8, 'b', 8, 'B', 8 }},
{AV_PIX_FMT_RGB555BE,{'F', 1, 'R', 5, 'G', 5, 'B', 5 }},
{AV_PIX_FMT_RGB565BE,{'R', 5, 'G', 6, 'B', 5 }},
{AV_PIX_FMT_RGBA, {'R', 8, 'G', 8, 'B', 8, 'A', 8 }},
{AV_PIX_FMT_PAL8, {'P', 8 }},
};
 
static const int num_pixel_layouts = FF_ARRAY_ELEMS(ff_mxf_pixel_layouts);
 
int ff_mxf_decode_pixel_layout(const char pixel_layout[16], enum AVPixelFormat *pix_fmt)
{
int x;
 
for(x = 0; x < num_pixel_layouts; x++) {
if (!memcmp(pixel_layout, ff_mxf_pixel_layouts[x].data, 16)) {
*pix_fmt = ff_mxf_pixel_layouts[x].pix_fmt;
return 0;
}
}
 
return -1;
}
 
static const MXFSamplesPerFrame mxf_samples_per_frames[] = {
{ { 1001, 24000 }, { 2002, 0, 0, 0, 0, 0 } }, // FILM 23.976
{ { 1, 24}, { 2000, 0, 0, 0, 0, 0 } }, // FILM 24
{ { 1001, 30000 }, { 1602, 1601, 1602, 1601, 1602, 0 } }, // NTSC 29.97
{ { 1001, 60000 }, { 801, 801, 801, 801, 800, 0 } }, // NTSC 59.94
{ { 1, 25 }, { 1920, 0, 0, 0, 0, 0 } }, // PAL 25
{ { 1, 50 }, { 960, 0, 0, 0, 0, 0 } }, // PAL 50
};
 
const MXFSamplesPerFrame *ff_mxf_get_samples_per_frame(AVFormatContext *s, AVRational time_base)
{
int i;
for (i = 0; i < FF_ARRAY_ELEMS(mxf_samples_per_frames); i++) {
if (!av_cmp_q(mxf_samples_per_frames[i].time_base, time_base))
return &mxf_samples_per_frames[i];
}
 
// Find closest container time base for approximative codec time base like 1/29.97, 1/30, ...
for (i = 0; i < FF_ARRAY_ELEMS(mxf_samples_per_frames); i++) {
if (fabs(av_q2d(mxf_samples_per_frames[i].time_base) - av_q2d(time_base)) < 0.0001) {
av_log(s, AV_LOG_WARNING, "%d/%d input time base matched %d/%d container time base\n",
time_base.num, time_base.den,
mxf_samples_per_frames[i].time_base.num, mxf_samples_per_frames[i].time_base.den);
return &mxf_samples_per_frames[i];
}
}
return NULL;
}
/contrib/sdk/sources/ffmpeg/libavformat/mxf.h
0,0 → 1,85
/*
* MXF
* Copyright (c) 2006 SmartJog S.A., Baptiste Coudurier <baptiste dot coudurier at smartjog dot com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_MXF_H
#define AVFORMAT_MXF_H
 
#include "avformat.h"
#include "libavcodec/avcodec.h"
#include <stdint.h>
 
typedef uint8_t UID[16];
 
enum MXFMetadataSetType {
AnyType,
MaterialPackage,
SourcePackage,
SourceClip,
TimecodeComponent,
Sequence,
MultipleDescriptor,
Descriptor,
Track,
CryptoContext,
Preface,
Identification,
ContentStorage,
SubDescriptor,
IndexTableSegment,
EssenceContainerData,
TypeBottom,// add metadata type before this
};
 
enum MXFFrameLayout {
FullFrame = 0,
SeparateFields,
OneField,
MixedFields,
SegmentedFrame,
};
 
typedef struct KLVPacket {
UID key;
int64_t offset;
uint64_t length;
} KLVPacket;
 
typedef struct MXFCodecUL {
UID uid;
unsigned matching_len;
int id;
} MXFCodecUL;
 
typedef struct {
struct AVRational time_base;
int samples_per_frame[6];
} MXFSamplesPerFrame;
 
extern const MXFCodecUL ff_mxf_data_definition_uls[];
extern const MXFCodecUL ff_mxf_codec_uls[];
extern const MXFCodecUL ff_mxf_pixel_format_uls[];
 
int ff_mxf_decode_pixel_layout(const char pixel_layout[16], enum AVPixelFormat *pix_fmt);
const MXFSamplesPerFrame *ff_mxf_get_samples_per_frame(AVFormatContext *s, AVRational time_base);
 
#define PRINT_KEY(pc, s, x) av_dlog(pc, "%s %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", s, \
(x)[0], (x)[1], (x)[2], (x)[3], (x)[4], (x)[5], (x)[6], (x)[7], (x)[8], (x)[9], (x)[10], (x)[11], (x)[12], (x)[13], (x)[14], (x)[15])
 
#endif /* AVFORMAT_MXF_H */
/contrib/sdk/sources/ffmpeg/libavformat/mxfdec.c
0,0 → 1,2544
/*
* MXF demuxer.
* Copyright (c) 2006 SmartJog S.A., Baptiste Coudurier <baptiste dot coudurier at smartjog dot com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/*
* References
* SMPTE 336M KLV Data Encoding Protocol Using Key-Length-Value
* SMPTE 377M MXF File Format Specifications
* SMPTE 378M Operational Pattern 1a
* SMPTE 379M MXF Generic Container
* SMPTE 381M Mapping MPEG Streams into the MXF Generic Container
* SMPTE 382M Mapping AES3 and Broadcast Wave Audio into the MXF Generic Container
* SMPTE 383M Mapping DV-DIF Data to the MXF Generic Container
*
* Principle
* Search for Track numbers which will identify essence element KLV packets.
* Search for SourcePackage which define tracks which contains Track numbers.
* Material Package contains tracks with reference to SourcePackage tracks.
* Search for Descriptors (Picture, Sound) which contains codec info and parameters.
* Assign Descriptors to correct Tracks.
*
* Metadata reading functions read Local Tags, get InstanceUID(0x3C0A) then add MetaDataSet to MXFContext.
* Metadata parsing resolves Strong References to objects.
*
* Simple demuxer, only OP1A supported and some files might not work at all.
* Only tracks with associated descriptors will be decoded. "Highly Desirable" SMPTE 377M D.1
*/
 
#include "libavutil/aes.h"
#include "libavutil/avassert.h"
#include "libavutil/mathematics.h"
#include "libavcodec/bytestream.h"
#include "libavutil/timecode.h"
#include "avformat.h"
#include "internal.h"
#include "mxf.h"
 
typedef enum {
Header,
BodyPartition,
Footer
} MXFPartitionType;
 
typedef enum {
OP1a = 1,
OP1b,
OP1c,
OP2a,
OP2b,
OP2c,
OP3a,
OP3b,
OP3c,
OPAtom,
OPSONYOpt, /* FATE sample, violates the spec in places */
} MXFOP;
 
typedef struct {
int closed;
int complete;
MXFPartitionType type;
uint64_t previous_partition;
int index_sid;
int body_sid;
int64_t this_partition;
int64_t essence_offset; ///< absolute offset of essence
int64_t essence_length;
int32_t kag_size;
int64_t header_byte_count;
int64_t index_byte_count;
int pack_length;
} MXFPartition;
 
typedef struct {
UID uid;
enum MXFMetadataSetType type;
UID source_container_ul;
} MXFCryptoContext;
 
typedef struct {
UID uid;
enum MXFMetadataSetType type;
UID source_package_uid;
UID data_definition_ul;
int64_t duration;
int64_t start_position;
int source_track_id;
} MXFStructuralComponent;
 
typedef struct {
UID uid;
enum MXFMetadataSetType type;
UID data_definition_ul;
UID *structural_components_refs;
int structural_components_count;
int64_t duration;
} MXFSequence;
 
typedef struct {
UID uid;
enum MXFMetadataSetType type;
int drop_frame;
int start_frame;
struct AVRational rate;
AVTimecode tc;
} MXFTimecodeComponent;
 
typedef struct {
UID uid;
enum MXFMetadataSetType type;
MXFSequence *sequence; /* mandatory, and only one */
UID sequence_ref;
int track_id;
uint8_t track_number[4];
AVRational edit_rate;
int intra_only;
uint64_t sample_count;
int64_t original_duration; ///< duration before multiplying st->duration by SampleRate/EditRate
} MXFTrack;
 
typedef struct {
UID uid;
enum MXFMetadataSetType type;
UID essence_container_ul;
UID essence_codec_ul;
AVRational sample_rate;
AVRational aspect_ratio;
int width;
int height; /* Field height, not frame height */
int frame_layout; /* See MXFFrameLayout enum */
int channels;
int bits_per_sample;
int field_dominance;
unsigned int component_depth;
unsigned int horiz_subsampling;
unsigned int vert_subsampling;
UID *sub_descriptors_refs;
int sub_descriptors_count;
int linked_track_id;
uint8_t *extradata;
int extradata_size;
enum AVPixelFormat pix_fmt;
} MXFDescriptor;
 
typedef struct {
UID uid;
enum MXFMetadataSetType type;
int edit_unit_byte_count;
int index_sid;
int body_sid;
AVRational index_edit_rate;
uint64_t index_start_position;
uint64_t index_duration;
int8_t *temporal_offset_entries;
int *flag_entries;
uint64_t *stream_offset_entries;
int nb_index_entries;
} MXFIndexTableSegment;
 
typedef struct {
UID uid;
enum MXFMetadataSetType type;
UID package_uid;
UID *tracks_refs;
int tracks_count;
MXFDescriptor *descriptor; /* only one */
UID descriptor_ref;
} MXFPackage;
 
typedef struct {
UID uid;
enum MXFMetadataSetType type;
} MXFMetadataSet;
 
/* decoded index table */
typedef struct {
int index_sid;
int body_sid;
int nb_ptses; /* number of PTSes or total duration of index */
int64_t first_dts; /* DTS = EditUnit + first_dts */
int64_t *ptses; /* maps EditUnit -> PTS */
int nb_segments;
MXFIndexTableSegment **segments; /* sorted by IndexStartPosition */
AVIndexEntry *fake_index; /* used for calling ff_index_search_timestamp() */
} MXFIndexTable;
 
typedef struct {
MXFPartition *partitions;
unsigned partitions_count;
MXFOP op;
UID *packages_refs;
int packages_count;
MXFMetadataSet **metadata_sets;
int metadata_sets_count;
AVFormatContext *fc;
struct AVAES *aesc;
uint8_t *local_tags;
int local_tags_count;
uint64_t last_partition;
uint64_t footer_partition;
KLVPacket current_klv_data;
int current_klv_index;
int run_in;
MXFPartition *current_partition;
int parsing_backward;
int64_t last_forward_tell;
int last_forward_partition;
int current_edit_unit;
int nb_index_tables;
MXFIndexTable *index_tables;
int edit_units_per_packet; ///< how many edit units to read at a time (PCM, OPAtom)
} MXFContext;
 
enum MXFWrappingScheme {
Frame,
Clip,
};
 
/* NOTE: klv_offset is not set (-1) for local keys */
typedef int MXFMetadataReadFunc(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset);
 
typedef struct {
const UID key;
MXFMetadataReadFunc *read;
int ctx_size;
enum MXFMetadataSetType type;
} MXFMetadataReadTableEntry;
 
static int mxf_read_close(AVFormatContext *s);
 
/* partial keys to match */
static const uint8_t mxf_header_partition_pack_key[] = { 0x06,0x0e,0x2b,0x34,0x02,0x05,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x02 };
static const uint8_t mxf_essence_element_key[] = { 0x06,0x0e,0x2b,0x34,0x01,0x02,0x01,0x01,0x0d,0x01,0x03,0x01 };
static const uint8_t mxf_avid_essence_element_key[] = { 0x06,0x0e,0x2b,0x34,0x01,0x02,0x01,0x01,0x0e,0x04,0x03,0x01 };
static const uint8_t mxf_system_item_key[] = { 0x06,0x0E,0x2B,0x34,0x02,0x05,0x01,0x01,0x0D,0x01,0x03,0x01,0x04 };
static const uint8_t mxf_klv_key[] = { 0x06,0x0e,0x2b,0x34 };
/* complete keys to match */
static const uint8_t mxf_crypto_source_container_ul[] = { 0x06,0x0e,0x2b,0x34,0x01,0x01,0x01,0x09,0x06,0x01,0x01,0x02,0x02,0x00,0x00,0x00 };
static const uint8_t mxf_encrypted_triplet_key[] = { 0x06,0x0e,0x2b,0x34,0x02,0x04,0x01,0x07,0x0d,0x01,0x03,0x01,0x02,0x7e,0x01,0x00 };
static const uint8_t mxf_encrypted_essence_container[] = { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x07,0x0d,0x01,0x03,0x01,0x02,0x0b,0x01,0x00 };
static const uint8_t mxf_random_index_pack_key[] = { 0x06,0x0E,0x2B,0x34,0x02,0x05,0x01,0x01,0x0D,0x01,0x02,0x01,0x01,0x11,0x01,0x00 };
static const uint8_t mxf_sony_mpeg4_extradata[] = { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x01,0x0e,0x06,0x06,0x02,0x02,0x01,0x00,0x00 };
 
#define IS_KLV_KEY(x, y) (!memcmp(x, y, sizeof(y)))
 
static int64_t klv_decode_ber_length(AVIOContext *pb)
{
uint64_t size = avio_r8(pb);
if (size & 0x80) { /* long form */
int bytes_num = size & 0x7f;
/* SMPTE 379M 5.3.4 guarantee that bytes_num must not exceed 8 bytes */
if (bytes_num > 8)
return AVERROR_INVALIDDATA;
size = 0;
while (bytes_num--)
size = size << 8 | avio_r8(pb);
}
return size;
}
 
static int mxf_read_sync(AVIOContext *pb, const uint8_t *key, unsigned size)
{
int i, b;
for (i = 0; i < size && !url_feof(pb); i++) {
b = avio_r8(pb);
if (b == key[0])
i = 0;
else if (b != key[i])
i = -1;
}
return i == size;
}
 
static int klv_read_packet(KLVPacket *klv, AVIOContext *pb)
{
if (!mxf_read_sync(pb, mxf_klv_key, 4))
return AVERROR_INVALIDDATA;
klv->offset = avio_tell(pb) - 4;
memcpy(klv->key, mxf_klv_key, 4);
avio_read(pb, klv->key + 4, 12);
klv->length = klv_decode_ber_length(pb);
return klv->length == -1 ? -1 : 0;
}
 
static int mxf_get_stream_index(AVFormatContext *s, KLVPacket *klv)
{
int i;
 
for (i = 0; i < s->nb_streams; i++) {
MXFTrack *track = s->streams[i]->priv_data;
/* SMPTE 379M 7.3 */
if (!memcmp(klv->key + sizeof(mxf_essence_element_key), track->track_number, sizeof(track->track_number)))
return i;
}
/* return 0 if only one stream, for OP Atom files with 0 as track number */
return s->nb_streams == 1 ? 0 : -1;
}
 
/* XXX: use AVBitStreamFilter */
static int mxf_get_d10_aes3_packet(AVIOContext *pb, AVStream *st, AVPacket *pkt, int64_t length)
{
const uint8_t *buf_ptr, *end_ptr;
uint8_t *data_ptr;
int i;
 
if (length > 61444) /* worst case PAL 1920 samples 8 channels */
return AVERROR_INVALIDDATA;
length = av_get_packet(pb, pkt, length);
if (length < 0)
return length;
data_ptr = pkt->data;
end_ptr = pkt->data + length;
buf_ptr = pkt->data + 4; /* skip SMPTE 331M header */
for (; buf_ptr + st->codec->channels*4 <= end_ptr; ) {
for (i = 0; i < st->codec->channels; i++) {
uint32_t sample = bytestream_get_le32(&buf_ptr);
if (st->codec->bits_per_coded_sample == 24)
bytestream_put_le24(&data_ptr, (sample >> 4) & 0xffffff);
else
bytestream_put_le16(&data_ptr, (sample >> 12) & 0xffff);
}
buf_ptr += 32 - st->codec->channels*4; // always 8 channels stored SMPTE 331M
}
av_shrink_packet(pkt, data_ptr - pkt->data);
return 0;
}
 
static int mxf_decrypt_triplet(AVFormatContext *s, AVPacket *pkt, KLVPacket *klv)
{
static const uint8_t checkv[16] = {0x43, 0x48, 0x55, 0x4b, 0x43, 0x48, 0x55, 0x4b, 0x43, 0x48, 0x55, 0x4b, 0x43, 0x48, 0x55, 0x4b};
MXFContext *mxf = s->priv_data;
AVIOContext *pb = s->pb;
int64_t end = avio_tell(pb) + klv->length;
int64_t size;
uint64_t orig_size;
uint64_t plaintext_size;
uint8_t ivec[16];
uint8_t tmpbuf[16];
int index;
 
if (!mxf->aesc && s->key && s->keylen == 16) {
mxf->aesc = av_aes_alloc();
if (!mxf->aesc)
return AVERROR(ENOMEM);
av_aes_init(mxf->aesc, s->key, 128, 1);
}
// crypto context
avio_skip(pb, klv_decode_ber_length(pb));
// plaintext offset
klv_decode_ber_length(pb);
plaintext_size = avio_rb64(pb);
// source klv key
klv_decode_ber_length(pb);
avio_read(pb, klv->key, 16);
if (!IS_KLV_KEY(klv, mxf_essence_element_key))
return AVERROR_INVALIDDATA;
index = mxf_get_stream_index(s, klv);
if (index < 0)
return AVERROR_INVALIDDATA;
// source size
klv_decode_ber_length(pb);
orig_size = avio_rb64(pb);
if (orig_size < plaintext_size)
return AVERROR_INVALIDDATA;
// enc. code
size = klv_decode_ber_length(pb);
if (size < 32 || size - 32 < orig_size)
return AVERROR_INVALIDDATA;
avio_read(pb, ivec, 16);
avio_read(pb, tmpbuf, 16);
if (mxf->aesc)
av_aes_crypt(mxf->aesc, tmpbuf, tmpbuf, 1, ivec, 1);
if (memcmp(tmpbuf, checkv, 16))
av_log(s, AV_LOG_ERROR, "probably incorrect decryption key\n");
size -= 32;
size = av_get_packet(pb, pkt, size);
if (size < 0)
return size;
else if (size < plaintext_size)
return AVERROR_INVALIDDATA;
size -= plaintext_size;
if (mxf->aesc)
av_aes_crypt(mxf->aesc, &pkt->data[plaintext_size],
&pkt->data[plaintext_size], size >> 4, ivec, 1);
av_shrink_packet(pkt, orig_size);
pkt->stream_index = index;
avio_skip(pb, end - avio_tell(pb));
return 0;
}
 
static int mxf_read_primer_pack(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset)
{
MXFContext *mxf = arg;
int item_num = avio_rb32(pb);
int item_len = avio_rb32(pb);
 
if (item_len != 18) {
avpriv_request_sample(pb, "Primer pack item length %d", item_len);
return AVERROR_PATCHWELCOME;
}
if (item_num > 65536) {
av_log(mxf->fc, AV_LOG_ERROR, "item_num %d is too large\n", item_num);
return AVERROR_INVALIDDATA;
}
mxf->local_tags = av_calloc(item_num, item_len);
if (!mxf->local_tags)
return AVERROR(ENOMEM);
mxf->local_tags_count = item_num;
avio_read(pb, mxf->local_tags, item_num*item_len);
return 0;
}
 
static int mxf_read_partition_pack(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset)
{
MXFContext *mxf = arg;
MXFPartition *partition, *tmp_part;
UID op;
uint64_t footer_partition;
uint32_t nb_essence_containers;
 
tmp_part = av_realloc_array(mxf->partitions, mxf->partitions_count + 1, sizeof(*mxf->partitions));
if (!tmp_part)
return AVERROR(ENOMEM);
mxf->partitions = tmp_part;
 
if (mxf->parsing_backward) {
/* insert the new partition pack in the middle
* this makes the entries in mxf->partitions sorted by offset */
memmove(&mxf->partitions[mxf->last_forward_partition+1],
&mxf->partitions[mxf->last_forward_partition],
(mxf->partitions_count - mxf->last_forward_partition)*sizeof(*mxf->partitions));
partition = mxf->current_partition = &mxf->partitions[mxf->last_forward_partition];
} else {
mxf->last_forward_partition++;
partition = mxf->current_partition = &mxf->partitions[mxf->partitions_count];
}
 
memset(partition, 0, sizeof(*partition));
mxf->partitions_count++;
partition->pack_length = avio_tell(pb) - klv_offset + size;
 
switch(uid[13]) {
case 2:
partition->type = Header;
break;
case 3:
partition->type = BodyPartition;
break;
case 4:
partition->type = Footer;
break;
default:
av_log(mxf->fc, AV_LOG_ERROR, "unknown partition type %i\n", uid[13]);
return AVERROR_INVALIDDATA;
}
 
/* consider both footers to be closed (there is only Footer and CompleteFooter) */
partition->closed = partition->type == Footer || !(uid[14] & 1);
partition->complete = uid[14] > 2;
avio_skip(pb, 4);
partition->kag_size = avio_rb32(pb);
partition->this_partition = avio_rb64(pb);
partition->previous_partition = avio_rb64(pb);
footer_partition = avio_rb64(pb);
partition->header_byte_count = avio_rb64(pb);
partition->index_byte_count = avio_rb64(pb);
partition->index_sid = avio_rb32(pb);
avio_skip(pb, 8);
partition->body_sid = avio_rb32(pb);
avio_read(pb, op, sizeof(UID));
nb_essence_containers = avio_rb32(pb);
 
/* some files don'thave FooterPartition set in every partition */
if (footer_partition) {
if (mxf->footer_partition && mxf->footer_partition != footer_partition) {
av_log(mxf->fc, AV_LOG_ERROR,
"inconsistent FooterPartition value: %"PRIu64" != %"PRIu64"\n",
mxf->footer_partition, footer_partition);
} else {
mxf->footer_partition = footer_partition;
}
}
 
av_dlog(mxf->fc,
"PartitionPack: ThisPartition = 0x%"PRIX64
", PreviousPartition = 0x%"PRIX64", "
"FooterPartition = 0x%"PRIX64", IndexSID = %i, BodySID = %i\n",
partition->this_partition,
partition->previous_partition, footer_partition,
partition->index_sid, partition->body_sid);
 
/* sanity check PreviousPartition if set */
if (partition->previous_partition &&
mxf->run_in + partition->previous_partition >= klv_offset) {
av_log(mxf->fc, AV_LOG_ERROR,
"PreviousPartition points to this partition or forward\n");
return AVERROR_INVALIDDATA;
}
 
if (op[12] == 1 && op[13] == 1) mxf->op = OP1a;
else if (op[12] == 1 && op[13] == 2) mxf->op = OP1b;
else if (op[12] == 1 && op[13] == 3) mxf->op = OP1c;
else if (op[12] == 2 && op[13] == 1) mxf->op = OP2a;
else if (op[12] == 2 && op[13] == 2) mxf->op = OP2b;
else if (op[12] == 2 && op[13] == 3) mxf->op = OP2c;
else if (op[12] == 3 && op[13] == 1) mxf->op = OP3a;
else if (op[12] == 3 && op[13] == 2) mxf->op = OP3b;
else if (op[12] == 3 && op[13] == 3) mxf->op = OP3c;
else if (op[12] == 64&& op[13] == 1) mxf->op = OPSONYOpt;
else if (op[12] == 0x10) {
/* SMPTE 390m: "There shall be exactly one essence container"
* The following block deals with files that violate this, namely:
* 2011_DCPTEST_24FPS.V.mxf - two ECs, OP1a
* abcdefghiv016f56415e.mxf - zero ECs, OPAtom, output by Avid AirSpeed */
if (nb_essence_containers != 1) {
MXFOP op = nb_essence_containers ? OP1a : OPAtom;
 
/* only nag once */
if (!mxf->op)
av_log(mxf->fc, AV_LOG_WARNING, "\"OPAtom\" with %u ECs - assuming %s\n",
nb_essence_containers, op == OP1a ? "OP1a" : "OPAtom");
 
mxf->op = op;
} else
mxf->op = OPAtom;
} else {
av_log(mxf->fc, AV_LOG_ERROR, "unknown operational pattern: %02xh %02xh - guessing OP1a\n", op[12], op[13]);
mxf->op = OP1a;
}
 
if (partition->kag_size <= 0 || partition->kag_size > (1 << 20)) {
av_log(mxf->fc, AV_LOG_WARNING, "invalid KAGSize %i - guessing ", partition->kag_size);
 
if (mxf->op == OPSONYOpt)
partition->kag_size = 512;
else
partition->kag_size = 1;
 
av_log(mxf->fc, AV_LOG_WARNING, "%i\n", partition->kag_size);
}
 
return 0;
}
 
static int mxf_add_metadata_set(MXFContext *mxf, void *metadata_set)
{
MXFMetadataSet **tmp;
 
tmp = av_realloc_array(mxf->metadata_sets, mxf->metadata_sets_count + 1, sizeof(*mxf->metadata_sets));
if (!tmp)
return AVERROR(ENOMEM);
mxf->metadata_sets = tmp;
mxf->metadata_sets[mxf->metadata_sets_count] = metadata_set;
mxf->metadata_sets_count++;
return 0;
}
 
static int mxf_read_cryptographic_context(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset)
{
MXFCryptoContext *cryptocontext = arg;
if (size != 16)
return AVERROR_INVALIDDATA;
if (IS_KLV_KEY(uid, mxf_crypto_source_container_ul))
avio_read(pb, cryptocontext->source_container_ul, 16);
return 0;
}
 
static int mxf_read_content_storage(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset)
{
MXFContext *mxf = arg;
switch (tag) {
case 0x1901:
mxf->packages_count = avio_rb32(pb);
mxf->packages_refs = av_calloc(mxf->packages_count, sizeof(UID));
if (!mxf->packages_refs)
return AVERROR(ENOMEM);
avio_skip(pb, 4); /* useless size of objects, always 16 according to specs */
avio_read(pb, (uint8_t *)mxf->packages_refs, mxf->packages_count * sizeof(UID));
break;
}
return 0;
}
 
static int mxf_read_source_clip(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset)
{
MXFStructuralComponent *source_clip = arg;
switch(tag) {
case 0x0202:
source_clip->duration = avio_rb64(pb);
break;
case 0x1201:
source_clip->start_position = avio_rb64(pb);
break;
case 0x1101:
/* UMID, only get last 16 bytes */
avio_skip(pb, 16);
avio_read(pb, source_clip->source_package_uid, 16);
break;
case 0x1102:
source_clip->source_track_id = avio_rb32(pb);
break;
}
return 0;
}
 
static int mxf_read_material_package(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset)
{
MXFPackage *package = arg;
switch(tag) {
case 0x4403:
package->tracks_count = avio_rb32(pb);
package->tracks_refs = av_calloc(package->tracks_count, sizeof(UID));
if (!package->tracks_refs)
return AVERROR(ENOMEM);
avio_skip(pb, 4); /* useless size of objects, always 16 according to specs */
avio_read(pb, (uint8_t *)package->tracks_refs, package->tracks_count * sizeof(UID));
break;
}
return 0;
}
 
static int mxf_read_timecode_component(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset)
{
MXFTimecodeComponent *mxf_timecode = arg;
switch(tag) {
case 0x1501:
mxf_timecode->start_frame = avio_rb64(pb);
break;
case 0x1502:
mxf_timecode->rate = (AVRational){avio_rb16(pb), 1};
break;
case 0x1503:
mxf_timecode->drop_frame = avio_r8(pb);
break;
}
return 0;
}
 
static int mxf_read_track(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset)
{
MXFTrack *track = arg;
switch(tag) {
case 0x4801:
track->track_id = avio_rb32(pb);
break;
case 0x4804:
avio_read(pb, track->track_number, 4);
break;
case 0x4B01:
track->edit_rate.num = avio_rb32(pb);
track->edit_rate.den = avio_rb32(pb);
break;
case 0x4803:
avio_read(pb, track->sequence_ref, 16);
break;
}
return 0;
}
 
static int mxf_read_sequence(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset)
{
MXFSequence *sequence = arg;
switch(tag) {
case 0x0202:
sequence->duration = avio_rb64(pb);
break;
case 0x0201:
avio_read(pb, sequence->data_definition_ul, 16);
break;
case 0x1001:
sequence->structural_components_count = avio_rb32(pb);
sequence->structural_components_refs = av_calloc(sequence->structural_components_count, sizeof(UID));
if (!sequence->structural_components_refs)
return AVERROR(ENOMEM);
avio_skip(pb, 4); /* useless size of objects, always 16 according to specs */
avio_read(pb, (uint8_t *)sequence->structural_components_refs, sequence->structural_components_count * sizeof(UID));
break;
}
return 0;
}
 
static int mxf_read_source_package(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset)
{
MXFPackage *package = arg;
switch(tag) {
case 0x4403:
package->tracks_count = avio_rb32(pb);
package->tracks_refs = av_calloc(package->tracks_count, sizeof(UID));
if (!package->tracks_refs)
return AVERROR(ENOMEM);
avio_skip(pb, 4); /* useless size of objects, always 16 according to specs */
avio_read(pb, (uint8_t *)package->tracks_refs, package->tracks_count * sizeof(UID));
break;
case 0x4401:
/* UMID, only get last 16 bytes */
avio_skip(pb, 16);
avio_read(pb, package->package_uid, 16);
break;
case 0x4701:
avio_read(pb, package->descriptor_ref, 16);
break;
}
return 0;
}
 
static int mxf_read_index_entry_array(AVIOContext *pb, MXFIndexTableSegment *segment)
{
int i, length;
 
segment->nb_index_entries = avio_rb32(pb);
 
length = avio_rb32(pb);
 
if (!(segment->temporal_offset_entries=av_calloc(segment->nb_index_entries, sizeof(*segment->temporal_offset_entries))) ||
!(segment->flag_entries = av_calloc(segment->nb_index_entries, sizeof(*segment->flag_entries))) ||
!(segment->stream_offset_entries = av_calloc(segment->nb_index_entries, sizeof(*segment->stream_offset_entries))))
return AVERROR(ENOMEM);
 
for (i = 0; i < segment->nb_index_entries; i++) {
segment->temporal_offset_entries[i] = avio_r8(pb);
avio_r8(pb); /* KeyFrameOffset */
segment->flag_entries[i] = avio_r8(pb);
segment->stream_offset_entries[i] = avio_rb64(pb);
avio_skip(pb, length - 11);
}
return 0;
}
 
static int mxf_read_index_table_segment(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset)
{
MXFIndexTableSegment *segment = arg;
switch(tag) {
case 0x3F05:
segment->edit_unit_byte_count = avio_rb32(pb);
av_dlog(NULL, "EditUnitByteCount %d\n", segment->edit_unit_byte_count);
break;
case 0x3F06:
segment->index_sid = avio_rb32(pb);
av_dlog(NULL, "IndexSID %d\n", segment->index_sid);
break;
case 0x3F07:
segment->body_sid = avio_rb32(pb);
av_dlog(NULL, "BodySID %d\n", segment->body_sid);
break;
case 0x3F0A:
av_dlog(NULL, "IndexEntryArray found\n");
return mxf_read_index_entry_array(pb, segment);
case 0x3F0B:
segment->index_edit_rate.num = avio_rb32(pb);
segment->index_edit_rate.den = avio_rb32(pb);
av_dlog(NULL, "IndexEditRate %d/%d\n", segment->index_edit_rate.num,
segment->index_edit_rate.den);
break;
case 0x3F0C:
segment->index_start_position = avio_rb64(pb);
av_dlog(NULL, "IndexStartPosition %"PRId64"\n", segment->index_start_position);
break;
case 0x3F0D:
segment->index_duration = avio_rb64(pb);
av_dlog(NULL, "IndexDuration %"PRId64"\n", segment->index_duration);
break;
}
return 0;
}
 
static void mxf_read_pixel_layout(AVIOContext *pb, MXFDescriptor *descriptor)
{
int code, value, ofs = 0;
char layout[16] = {0}; /* not for printing, may end up not terminated on purpose */
 
do {
code = avio_r8(pb);
value = avio_r8(pb);
av_dlog(NULL, "pixel layout: code %#x\n", code);
 
if (ofs <= 14) {
layout[ofs++] = code;
layout[ofs++] = value;
} else
break; /* don't read byte by byte on sneaky files filled with lots of non-zeroes */
} while (code != 0); /* SMPTE 377M E.2.46 */
 
ff_mxf_decode_pixel_layout(layout, &descriptor->pix_fmt);
}
 
static int mxf_read_generic_descriptor(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset)
{
MXFDescriptor *descriptor = arg;
descriptor->pix_fmt = AV_PIX_FMT_NONE;
switch(tag) {
case 0x3F01:
descriptor->sub_descriptors_count = avio_rb32(pb);
descriptor->sub_descriptors_refs = av_calloc(descriptor->sub_descriptors_count, sizeof(UID));
if (!descriptor->sub_descriptors_refs)
return AVERROR(ENOMEM);
avio_skip(pb, 4); /* useless size of objects, always 16 according to specs */
avio_read(pb, (uint8_t *)descriptor->sub_descriptors_refs, descriptor->sub_descriptors_count * sizeof(UID));
break;
case 0x3004:
avio_read(pb, descriptor->essence_container_ul, 16);
break;
case 0x3006:
descriptor->linked_track_id = avio_rb32(pb);
break;
case 0x3201: /* PictureEssenceCoding */
avio_read(pb, descriptor->essence_codec_ul, 16);
break;
case 0x3203:
descriptor->width = avio_rb32(pb);
break;
case 0x3202:
descriptor->height = avio_rb32(pb);
break;
case 0x320C:
descriptor->frame_layout = avio_r8(pb);
break;
case 0x320E:
descriptor->aspect_ratio.num = avio_rb32(pb);
descriptor->aspect_ratio.den = avio_rb32(pb);
break;
case 0x3212:
descriptor->field_dominance = avio_r8(pb);
break;
case 0x3301:
descriptor->component_depth = avio_rb32(pb);
break;
case 0x3302:
descriptor->horiz_subsampling = avio_rb32(pb);
break;
case 0x3308:
descriptor->vert_subsampling = avio_rb32(pb);
break;
case 0x3D03:
descriptor->sample_rate.num = avio_rb32(pb);
descriptor->sample_rate.den = avio_rb32(pb);
break;
case 0x3D06: /* SoundEssenceCompression */
avio_read(pb, descriptor->essence_codec_ul, 16);
break;
case 0x3D07:
descriptor->channels = avio_rb32(pb);
break;
case 0x3D01:
descriptor->bits_per_sample = avio_rb32(pb);
break;
case 0x3401:
mxf_read_pixel_layout(pb, descriptor);
break;
default:
/* Private uid used by SONY C0023S01.mxf */
if (IS_KLV_KEY(uid, mxf_sony_mpeg4_extradata)) {
if (descriptor->extradata)
av_log(NULL, AV_LOG_WARNING, "Duplicate sony_mpeg4_extradata\n");
av_free(descriptor->extradata);
descriptor->extradata_size = 0;
descriptor->extradata = av_malloc(size);
if (!descriptor->extradata)
return AVERROR(ENOMEM);
descriptor->extradata_size = size;
avio_read(pb, descriptor->extradata, size);
}
break;
}
return 0;
}
 
/*
* Match an uid independently of the version byte and up to len common bytes
* Returns: boolean
*/
static int mxf_match_uid(const UID key, const UID uid, int len)
{
int i;
for (i = 0; i < len; i++) {
if (i != 7 && key[i] != uid[i])
return 0;
}
return 1;
}
 
static const MXFCodecUL *mxf_get_codec_ul(const MXFCodecUL *uls, UID *uid)
{
while (uls->uid[0]) {
if(mxf_match_uid(uls->uid, *uid, uls->matching_len))
break;
uls++;
}
return uls;
}
 
static void *mxf_resolve_strong_ref(MXFContext *mxf, UID *strong_ref, enum MXFMetadataSetType type)
{
int i;
 
if (!strong_ref)
return NULL;
for (i = 0; i < mxf->metadata_sets_count; i++) {
if (!memcmp(*strong_ref, mxf->metadata_sets[i]->uid, 16) &&
(type == AnyType || mxf->metadata_sets[i]->type == type)) {
return mxf->metadata_sets[i];
}
}
return NULL;
}
 
static const MXFCodecUL mxf_picture_essence_container_uls[] = {
// video essence container uls
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x02,0x0D,0x01,0x03,0x01,0x02,0x04,0x60,0x01 }, 14, AV_CODEC_ID_MPEG2VIDEO }, /* MPEG-ES Frame wrapped */
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x02,0x41,0x01 }, 14, AV_CODEC_ID_DVVIDEO }, /* DV 625 25mbps */
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x05,0x00,0x00 }, 14, AV_CODEC_ID_RAWVIDEO }, /* Uncompressed Picture */
{ { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }, 0, AV_CODEC_ID_NONE },
};
 
/* EC ULs for intra-only formats */
static const MXFCodecUL mxf_intra_only_essence_container_uls[] = {
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x01,0x00,0x00 }, 14, AV_CODEC_ID_MPEG2VIDEO }, /* MXF-GC SMPTE D-10 Mappings */
{ { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }, 0, AV_CODEC_ID_NONE },
};
 
/* intra-only PictureEssenceCoding ULs, where no corresponding EC UL exists */
static const MXFCodecUL mxf_intra_only_picture_essence_coding_uls[] = {
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x02,0x01,0x32,0x00,0x00 }, 14, AV_CODEC_ID_H264 }, /* H.264/MPEG-4 AVC Intra Profiles */
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x07,0x04,0x01,0x02,0x02,0x03,0x01,0x01,0x00 }, 14, AV_CODEC_ID_JPEG2000 }, /* JPEG2000 Codestream */
{ { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }, 0, AV_CODEC_ID_NONE },
};
 
static const MXFCodecUL mxf_sound_essence_container_uls[] = {
// sound essence container uls
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x06,0x01,0x00 }, 14, AV_CODEC_ID_PCM_S16LE }, /* BWF Frame wrapped */
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x02,0x0D,0x01,0x03,0x01,0x02,0x04,0x40,0x01 }, 14, AV_CODEC_ID_MP2 }, /* MPEG-ES Frame wrapped, 0x40 ??? stream id */
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x01,0x01,0x01 }, 14, AV_CODEC_ID_PCM_S16LE }, /* D-10 Mapping 50Mbps PAL Extended Template */
{ { 0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0xFF,0x4B,0x46,0x41,0x41,0x00,0x0D,0x4D,0x4F }, 14, AV_CODEC_ID_PCM_S16LE }, /* 0001GL00.MXF.A1.mxf_opatom.mxf */
{ { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }, 0, AV_CODEC_ID_NONE },
};
 
static int mxf_get_sorted_table_segments(MXFContext *mxf, int *nb_sorted_segments, MXFIndexTableSegment ***sorted_segments)
{
int i, j, nb_segments = 0;
MXFIndexTableSegment **unsorted_segments;
int last_body_sid = -1, last_index_sid = -1, last_index_start = -1;
 
/* count number of segments, allocate arrays and copy unsorted segments */
for (i = 0; i < mxf->metadata_sets_count; i++)
if (mxf->metadata_sets[i]->type == IndexTableSegment)
nb_segments++;
 
if (!nb_segments)
return AVERROR_INVALIDDATA;
 
if (!(unsorted_segments = av_calloc(nb_segments, sizeof(*unsorted_segments))) ||
!(*sorted_segments = av_calloc(nb_segments, sizeof(**sorted_segments)))) {
av_freep(sorted_segments);
av_free(unsorted_segments);
return AVERROR(ENOMEM);
}
 
for (i = j = 0; i < mxf->metadata_sets_count; i++)
if (mxf->metadata_sets[i]->type == IndexTableSegment)
unsorted_segments[j++] = (MXFIndexTableSegment*)mxf->metadata_sets[i];
 
*nb_sorted_segments = 0;
 
/* sort segments by {BodySID, IndexSID, IndexStartPosition}, remove duplicates while we're at it */
for (i = 0; i < nb_segments; i++) {
int best = -1, best_body_sid = -1, best_index_sid = -1, best_index_start = -1;
uint64_t best_index_duration = 0;
 
for (j = 0; j < nb_segments; j++) {
MXFIndexTableSegment *s = unsorted_segments[j];
 
/* Require larger BosySID, IndexSID or IndexStartPosition then the previous entry. This removes duplicates.
* We want the smallest values for the keys than what we currently have, unless this is the first such entry this time around.
* If we come across an entry with the same IndexStartPosition but larger IndexDuration, then we'll prefer it over the one we currently have.
*/
if ((i == 0 || s->body_sid > last_body_sid || s->index_sid > last_index_sid || s->index_start_position > last_index_start) &&
(best == -1 || s->body_sid < best_body_sid || s->index_sid < best_index_sid || s->index_start_position < best_index_start ||
(s->index_start_position == best_index_start && s->index_duration > best_index_duration))) {
best = j;
best_body_sid = s->body_sid;
best_index_sid = s->index_sid;
best_index_start = s->index_start_position;
best_index_duration = s->index_duration;
}
}
 
/* no suitable entry found -> we're done */
if (best == -1)
break;
 
(*sorted_segments)[(*nb_sorted_segments)++] = unsorted_segments[best];
last_body_sid = best_body_sid;
last_index_sid = best_index_sid;
last_index_start = best_index_start;
}
 
av_free(unsorted_segments);
 
return 0;
}
 
/**
* Computes the absolute file offset of the given essence container offset
*/
static int mxf_absolute_bodysid_offset(MXFContext *mxf, int body_sid, int64_t offset, int64_t *offset_out)
{
int x;
int64_t offset_in = offset; /* for logging */
 
for (x = 0; x < mxf->partitions_count; x++) {
MXFPartition *p = &mxf->partitions[x];
 
if (p->body_sid != body_sid)
continue;
 
if (offset < p->essence_length || !p->essence_length) {
*offset_out = p->essence_offset + offset;
return 0;
}
 
offset -= p->essence_length;
}
 
av_log(mxf->fc, AV_LOG_ERROR,
"failed to find absolute offset of %"PRIX64" in BodySID %i - partial file?\n",
offset_in, body_sid);
 
return AVERROR_INVALIDDATA;
}
 
/**
* Returns the end position of the essence container with given BodySID, or zero if unknown
*/
static int64_t mxf_essence_container_end(MXFContext *mxf, int body_sid)
{
int x;
int64_t ret = 0;
 
for (x = 0; x < mxf->partitions_count; x++) {
MXFPartition *p = &mxf->partitions[x];
 
if (p->body_sid != body_sid)
continue;
 
if (!p->essence_length)
return 0;
 
ret = p->essence_offset + p->essence_length;
}
 
return ret;
}
 
/* EditUnit -> absolute offset */
static int mxf_edit_unit_absolute_offset(MXFContext *mxf, MXFIndexTable *index_table, int64_t edit_unit, int64_t *edit_unit_out, int64_t *offset_out, int nag)
{
int i;
int64_t offset_temp = 0;
 
for (i = 0; i < index_table->nb_segments; i++) {
MXFIndexTableSegment *s = index_table->segments[i];
 
edit_unit = FFMAX(edit_unit, s->index_start_position); /* clamp if trying to seek before start */
 
if (edit_unit < s->index_start_position + s->index_duration) {
int64_t index = edit_unit - s->index_start_position;
 
if (s->edit_unit_byte_count)
offset_temp += s->edit_unit_byte_count * index;
else if (s->nb_index_entries) {
if (s->nb_index_entries == 2 * s->index_duration + 1)
index *= 2; /* Avid index */
 
if (index < 0 || index >= s->nb_index_entries) {
av_log(mxf->fc, AV_LOG_ERROR, "IndexSID %i segment at %"PRId64" IndexEntryArray too small\n",
index_table->index_sid, s->index_start_position);
return AVERROR_INVALIDDATA;
}
 
offset_temp = s->stream_offset_entries[index];
} else {
av_log(mxf->fc, AV_LOG_ERROR, "IndexSID %i segment at %"PRId64" missing EditUnitByteCount and IndexEntryArray\n",
index_table->index_sid, s->index_start_position);
return AVERROR_INVALIDDATA;
}
 
if (edit_unit_out)
*edit_unit_out = edit_unit;
 
return mxf_absolute_bodysid_offset(mxf, index_table->body_sid, offset_temp, offset_out);
} else {
/* EditUnitByteCount == 0 for VBR indexes, which is fine since they use explicit StreamOffsets */
offset_temp += s->edit_unit_byte_count * s->index_duration;
}
}
 
if (nag)
av_log(mxf->fc, AV_LOG_ERROR, "failed to map EditUnit %"PRId64" in IndexSID %i to an offset\n", edit_unit, index_table->index_sid);
 
return AVERROR_INVALIDDATA;
}
 
static int mxf_compute_ptses_fake_index(MXFContext *mxf, MXFIndexTable *index_table)
{
int i, j, x;
int8_t max_temporal_offset = -128;
 
/* first compute how many entries we have */
for (i = 0; i < index_table->nb_segments; i++) {
MXFIndexTableSegment *s = index_table->segments[i];
 
if (!s->nb_index_entries) {
index_table->nb_ptses = 0;
return 0; /* no TemporalOffsets */
}
 
index_table->nb_ptses += s->index_duration;
}
 
/* paranoid check */
if (index_table->nb_ptses <= 0)
return 0;
 
if (!(index_table->ptses = av_calloc(index_table->nb_ptses, sizeof(int64_t))) ||
!(index_table->fake_index = av_calloc(index_table->nb_ptses, sizeof(AVIndexEntry)))) {
av_freep(&index_table->ptses);
return AVERROR(ENOMEM);
}
 
/* we may have a few bad TemporalOffsets
* make sure the corresponding PTSes don't have the bogus value 0 */
for (x = 0; x < index_table->nb_ptses; x++)
index_table->ptses[x] = AV_NOPTS_VALUE;
 
/**
* We have this:
*
* x TemporalOffset
* 0: 0
* 1: 1
* 2: 1
* 3: -2
* 4: 1
* 5: 1
* 6: -2
*
* We want to transform it into this:
*
* x DTS PTS
* 0: -1 0
* 1: 0 3
* 2: 1 1
* 3: 2 2
* 4: 3 6
* 5: 4 4
* 6: 5 5
*
* We do this by bucket sorting x by x+TemporalOffset[x] into mxf->ptses,
* then settings mxf->first_dts = -max(TemporalOffset[x]).
* The latter makes DTS <= PTS.
*/
for (i = x = 0; i < index_table->nb_segments; i++) {
MXFIndexTableSegment *s = index_table->segments[i];
int index_delta = 1;
int n = s->nb_index_entries;
 
if (s->nb_index_entries == 2 * s->index_duration + 1) {
index_delta = 2; /* Avid index */
/* ignore the last entry - it's the size of the essence container */
n--;
}
 
for (j = 0; j < n; j += index_delta, x++) {
int offset = s->temporal_offset_entries[j] / index_delta;
int index = x + offset;
 
if (x >= index_table->nb_ptses) {
av_log(mxf->fc, AV_LOG_ERROR,
"x >= nb_ptses - IndexEntryCount %i < IndexDuration %"PRId64"?\n",
s->nb_index_entries, s->index_duration);
break;
}
 
index_table->fake_index[x].timestamp = x;
index_table->fake_index[x].flags = !(s->flag_entries[j] & 0x30) ? AVINDEX_KEYFRAME : 0;
 
if (index < 0 || index >= index_table->nb_ptses) {
av_log(mxf->fc, AV_LOG_ERROR,
"index entry %i + TemporalOffset %i = %i, which is out of bounds\n",
x, offset, index);
continue;
}
 
index_table->ptses[index] = x;
max_temporal_offset = FFMAX(max_temporal_offset, offset);
}
}
 
index_table->first_dts = -max_temporal_offset;
 
return 0;
}
 
/**
* Sorts and collects index table segments into index tables.
* Also computes PTSes if possible.
*/
static int mxf_compute_index_tables(MXFContext *mxf)
{
int i, j, k, ret, nb_sorted_segments;
MXFIndexTableSegment **sorted_segments = NULL;
 
if ((ret = mxf_get_sorted_table_segments(mxf, &nb_sorted_segments, &sorted_segments)) ||
nb_sorted_segments <= 0) {
av_log(mxf->fc, AV_LOG_WARNING, "broken or empty index\n");
return 0;
}
 
/* sanity check and count unique BodySIDs/IndexSIDs */
for (i = 0; i < nb_sorted_segments; i++) {
if (i == 0 || sorted_segments[i-1]->index_sid != sorted_segments[i]->index_sid)
mxf->nb_index_tables++;
else if (sorted_segments[i-1]->body_sid != sorted_segments[i]->body_sid) {
av_log(mxf->fc, AV_LOG_ERROR, "found inconsistent BodySID\n");
ret = AVERROR_INVALIDDATA;
goto finish_decoding_index;
}
}
 
if (!(mxf->index_tables = av_calloc(mxf->nb_index_tables, sizeof(MXFIndexTable)))) {
av_log(mxf->fc, AV_LOG_ERROR, "failed to allocate index tables\n");
ret = AVERROR(ENOMEM);
goto finish_decoding_index;
}
 
/* distribute sorted segments to index tables */
for (i = j = 0; i < nb_sorted_segments; i++) {
if (i != 0 && sorted_segments[i-1]->index_sid != sorted_segments[i]->index_sid) {
/* next IndexSID */
j++;
}
 
mxf->index_tables[j].nb_segments++;
}
 
for (i = j = 0; j < mxf->nb_index_tables; i += mxf->index_tables[j++].nb_segments) {
MXFIndexTable *t = &mxf->index_tables[j];
 
if (!(t->segments = av_calloc(t->nb_segments, sizeof(MXFIndexTableSegment*)))) {
av_log(mxf->fc, AV_LOG_ERROR, "failed to allocate IndexTableSegment pointer array\n");
ret = AVERROR(ENOMEM);
goto finish_decoding_index;
}
 
if (sorted_segments[i]->index_start_position)
av_log(mxf->fc, AV_LOG_WARNING, "IndexSID %i starts at EditUnit %"PRId64" - seeking may not work as expected\n",
sorted_segments[i]->index_sid, sorted_segments[i]->index_start_position);
 
memcpy(t->segments, &sorted_segments[i], t->nb_segments * sizeof(MXFIndexTableSegment*));
t->index_sid = sorted_segments[i]->index_sid;
t->body_sid = sorted_segments[i]->body_sid;
 
if ((ret = mxf_compute_ptses_fake_index(mxf, t)) < 0)
goto finish_decoding_index;
 
/* fix zero IndexDurations */
for (k = 0; k < t->nb_segments; k++) {
if (t->segments[k]->index_duration)
continue;
 
if (t->nb_segments > 1)
av_log(mxf->fc, AV_LOG_WARNING, "IndexSID %i segment %i has zero IndexDuration and there's more than one segment\n",
t->index_sid, k);
 
if (mxf->fc->nb_streams <= 0) {
av_log(mxf->fc, AV_LOG_WARNING, "no streams?\n");
break;
}
 
/* assume the first stream's duration is reasonable
* leave index_duration = 0 on further segments in case we have any (unlikely)
*/
t->segments[k]->index_duration = mxf->fc->streams[0]->duration;
break;
}
}
 
ret = 0;
finish_decoding_index:
av_free(sorted_segments);
return ret;
}
 
static int mxf_is_intra_only(MXFDescriptor *descriptor)
{
return mxf_get_codec_ul(mxf_intra_only_essence_container_uls,
&descriptor->essence_container_ul)->id != AV_CODEC_ID_NONE ||
mxf_get_codec_ul(mxf_intra_only_picture_essence_coding_uls,
&descriptor->essence_codec_ul)->id != AV_CODEC_ID_NONE;
}
 
static int mxf_add_timecode_metadata(AVDictionary **pm, const char *key, AVTimecode *tc)
{
char buf[AV_TIMECODE_STR_SIZE];
av_dict_set(pm, key, av_timecode_make_string(tc, buf, 0), 0);
 
return 0;
}
 
static int mxf_parse_structural_metadata(MXFContext *mxf)
{
MXFPackage *material_package = NULL;
MXFPackage *temp_package = NULL;
int i, j, k, ret;
 
av_dlog(mxf->fc, "metadata sets count %d\n", mxf->metadata_sets_count);
/* TODO: handle multiple material packages (OP3x) */
for (i = 0; i < mxf->packages_count; i++) {
material_package = mxf_resolve_strong_ref(mxf, &mxf->packages_refs[i], MaterialPackage);
if (material_package) break;
}
if (!material_package) {
av_log(mxf->fc, AV_LOG_ERROR, "no material package found\n");
return AVERROR_INVALIDDATA;
}
 
for (i = 0; i < material_package->tracks_count; i++) {
MXFPackage *source_package = NULL;
MXFTrack *material_track = NULL;
MXFTrack *source_track = NULL;
MXFTrack *temp_track = NULL;
MXFDescriptor *descriptor = NULL;
MXFStructuralComponent *component = NULL;
MXFTimecodeComponent *mxf_tc = NULL;
UID *essence_container_ul = NULL;
const MXFCodecUL *codec_ul = NULL;
const MXFCodecUL *container_ul = NULL;
const MXFCodecUL *pix_fmt_ul = NULL;
AVStream *st;
AVTimecode tc;
int flags;
 
if (!(material_track = mxf_resolve_strong_ref(mxf, &material_package->tracks_refs[i], Track))) {
av_log(mxf->fc, AV_LOG_ERROR, "could not resolve material track strong ref\n");
continue;
}
 
if ((component = mxf_resolve_strong_ref(mxf, &material_track->sequence_ref, TimecodeComponent))) {
mxf_tc = (MXFTimecodeComponent*)component;
flags = mxf_tc->drop_frame == 1 ? AV_TIMECODE_FLAG_DROPFRAME : 0;
if (av_timecode_init(&tc, mxf_tc->rate, flags, mxf_tc->start_frame, mxf->fc) == 0) {
mxf_add_timecode_metadata(&mxf->fc->metadata, "timecode", &tc);
}
}
 
if (!(material_track->sequence = mxf_resolve_strong_ref(mxf, &material_track->sequence_ref, Sequence))) {
av_log(mxf->fc, AV_LOG_ERROR, "could not resolve material track sequence strong ref\n");
continue;
}
 
for (j = 0; j < material_track->sequence->structural_components_count; j++) {
component = mxf_resolve_strong_ref(mxf, &material_track->sequence->structural_components_refs[j], TimecodeComponent);
if (!component)
continue;
 
mxf_tc = (MXFTimecodeComponent*)component;
flags = mxf_tc->drop_frame == 1 ? AV_TIMECODE_FLAG_DROPFRAME : 0;
if (av_timecode_init(&tc, mxf_tc->rate, flags, mxf_tc->start_frame, mxf->fc) == 0) {
mxf_add_timecode_metadata(&mxf->fc->metadata, "timecode", &tc);
break;
}
}
 
/* TODO: handle multiple source clips */
for (j = 0; j < material_track->sequence->structural_components_count; j++) {
component = mxf_resolve_strong_ref(mxf, &material_track->sequence->structural_components_refs[j], SourceClip);
if (!component)
continue;
 
for (k = 0; k < mxf->packages_count; k++) {
temp_package = mxf_resolve_strong_ref(mxf, &mxf->packages_refs[k], SourcePackage);
if (!temp_package)
continue;
if (!memcmp(temp_package->package_uid, component->source_package_uid, 16)) {
source_package = temp_package;
break;
}
}
if (!source_package) {
av_dlog(mxf->fc, "material track %d: no corresponding source package found\n", material_track->track_id);
break;
}
for (k = 0; k < source_package->tracks_count; k++) {
if (!(temp_track = mxf_resolve_strong_ref(mxf, &source_package->tracks_refs[k], Track))) {
av_log(mxf->fc, AV_LOG_ERROR, "could not resolve source track strong ref\n");
ret = AVERROR_INVALIDDATA;
goto fail_and_free;
}
if (temp_track->track_id == component->source_track_id) {
source_track = temp_track;
break;
}
}
if (!source_track) {
av_log(mxf->fc, AV_LOG_ERROR, "material track %d: no corresponding source track found\n", material_track->track_id);
break;
}
}
if (!source_track || !component)
continue;
 
if (!(source_track->sequence = mxf_resolve_strong_ref(mxf, &source_track->sequence_ref, Sequence))) {
av_log(mxf->fc, AV_LOG_ERROR, "could not resolve source track sequence strong ref\n");
ret = AVERROR_INVALIDDATA;
goto fail_and_free;
}
 
/* 0001GL00.MXF.A1.mxf_opatom.mxf has the same SourcePackageID as 0001GL.MXF.V1.mxf_opatom.mxf
* This would result in both files appearing to have two streams. Work around this by sanity checking DataDefinition */
if (memcmp(material_track->sequence->data_definition_ul, source_track->sequence->data_definition_ul, 16)) {
av_log(mxf->fc, AV_LOG_ERROR, "material track %d: DataDefinition mismatch\n", material_track->track_id);
continue;
}
 
st = avformat_new_stream(mxf->fc, NULL);
if (!st) {
av_log(mxf->fc, AV_LOG_ERROR, "could not allocate stream\n");
ret = AVERROR(ENOMEM);
goto fail_and_free;
}
st->id = source_track->track_id;
st->priv_data = source_track;
source_track->original_duration = st->duration = component->duration;
if (st->duration == -1)
st->duration = AV_NOPTS_VALUE;
st->start_time = component->start_position;
if (material_track->edit_rate.num <= 0 || material_track->edit_rate.den <= 0) {
av_log(mxf->fc, AV_LOG_WARNING,
"invalid edit rate (%d/%d) found on stream #%d, defaulting to 25/1\n",
material_track->edit_rate.num, material_track->edit_rate.den, st->index);
material_track->edit_rate = (AVRational){25, 1};
}
avpriv_set_pts_info(st, 64, material_track->edit_rate.den, material_track->edit_rate.num);
 
/* ensure SourceTrack EditRate == MaterialTrack EditRate since only the former is accessible via st->priv_data */
source_track->edit_rate = material_track->edit_rate;
 
PRINT_KEY(mxf->fc, "data definition ul", source_track->sequence->data_definition_ul);
codec_ul = mxf_get_codec_ul(ff_mxf_data_definition_uls, &source_track->sequence->data_definition_ul);
st->codec->codec_type = codec_ul->id;
 
source_package->descriptor = mxf_resolve_strong_ref(mxf, &source_package->descriptor_ref, AnyType);
if (source_package->descriptor) {
if (source_package->descriptor->type == MultipleDescriptor) {
for (j = 0; j < source_package->descriptor->sub_descriptors_count; j++) {
MXFDescriptor *sub_descriptor = mxf_resolve_strong_ref(mxf, &source_package->descriptor->sub_descriptors_refs[j], Descriptor);
 
if (!sub_descriptor) {
av_log(mxf->fc, AV_LOG_ERROR, "could not resolve sub descriptor strong ref\n");
continue;
}
if (sub_descriptor->linked_track_id == source_track->track_id) {
descriptor = sub_descriptor;
break;
}
}
} else if (source_package->descriptor->type == Descriptor)
descriptor = source_package->descriptor;
}
if (!descriptor) {
av_log(mxf->fc, AV_LOG_INFO, "source track %d: stream %d, no descriptor found\n", source_track->track_id, st->index);
continue;
}
PRINT_KEY(mxf->fc, "essence codec ul", descriptor->essence_codec_ul);
PRINT_KEY(mxf->fc, "essence container ul", descriptor->essence_container_ul);
essence_container_ul = &descriptor->essence_container_ul;
/* HACK: replacing the original key with mxf_encrypted_essence_container
* is not allowed according to s429-6, try to find correct information anyway */
if (IS_KLV_KEY(essence_container_ul, mxf_encrypted_essence_container)) {
av_log(mxf->fc, AV_LOG_INFO, "broken encrypted mxf file\n");
for (k = 0; k < mxf->metadata_sets_count; k++) {
MXFMetadataSet *metadata = mxf->metadata_sets[k];
if (metadata->type == CryptoContext) {
essence_container_ul = &((MXFCryptoContext *)metadata)->source_container_ul;
break;
}
}
}
 
/* TODO: drop PictureEssenceCoding and SoundEssenceCompression, only check EssenceContainer */
codec_ul = mxf_get_codec_ul(ff_mxf_codec_uls, &descriptor->essence_codec_ul);
st->codec->codec_id = (enum AVCodecID)codec_ul->id;
av_log(mxf->fc, AV_LOG_VERBOSE, "%s: Universal Label: ",
avcodec_get_name(st->codec->codec_id));
for (k = 0; k < 16; k++) {
av_log(mxf->fc, AV_LOG_VERBOSE, "%.2x",
descriptor->essence_codec_ul[k]);
if (!(k+1 & 19) || k == 5)
av_log(mxf->fc, AV_LOG_VERBOSE, ".");
}
av_log(mxf->fc, AV_LOG_VERBOSE, "\n");
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
source_track->intra_only = mxf_is_intra_only(descriptor);
container_ul = mxf_get_codec_ul(mxf_picture_essence_container_uls, essence_container_ul);
if (st->codec->codec_id == AV_CODEC_ID_NONE)
st->codec->codec_id = container_ul->id;
st->codec->width = descriptor->width;
st->codec->height = descriptor->height; /* Field height, not frame height */
switch (descriptor->frame_layout) {
case SegmentedFrame:
/* This one is a weird layout I don't fully understand. */
av_log(mxf->fc, AV_LOG_INFO, "SegmentedFrame layout isn't currently supported\n");
break;
case FullFrame:
st->codec->field_order = AV_FIELD_PROGRESSIVE;
break;
case OneField:
/* Every other line is stored and needs to be duplicated. */
av_log(mxf->fc, AV_LOG_INFO, "OneField frame layout isn't currently supported\n");
break; /* The correct thing to do here is fall through, but by breaking we might be
able to decode some streams at half the vertical resolution, rather than not al all.
It's also for compatibility with the old behavior. */
case MixedFields:
break;
case SeparateFields:
st->codec->height *= 2; /* Turn field height into frame height. */
break;
default:
av_log(mxf->fc, AV_LOG_INFO, "Unknown frame layout type: %d\n", descriptor->frame_layout);
}
if (st->codec->codec_id == AV_CODEC_ID_RAWVIDEO) {
st->codec->pix_fmt = descriptor->pix_fmt;
if (st->codec->pix_fmt == AV_PIX_FMT_NONE) {
pix_fmt_ul = mxf_get_codec_ul(ff_mxf_pixel_format_uls,
&descriptor->essence_codec_ul);
st->codec->pix_fmt = (enum AVPixelFormat)pix_fmt_ul->id;
if (st->codec->pix_fmt == AV_PIX_FMT_NONE) {
/* support files created before RP224v10 by defaulting to UYVY422
if subsampling is 4:2:2 and component depth is 8-bit */
if (descriptor->horiz_subsampling == 2 &&
descriptor->vert_subsampling == 1 &&
descriptor->component_depth == 8) {
st->codec->pix_fmt = AV_PIX_FMT_UYVY422;
}
}
}
}
st->need_parsing = AVSTREAM_PARSE_HEADERS;
} else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
container_ul = mxf_get_codec_ul(mxf_sound_essence_container_uls, essence_container_ul);
/* Only overwrite existing codec ID if it is unset or A-law, which is the default according to SMPTE RP 224. */
if (st->codec->codec_id == AV_CODEC_ID_NONE || (st->codec->codec_id == AV_CODEC_ID_PCM_ALAW && (enum AVCodecID)container_ul->id != AV_CODEC_ID_NONE))
st->codec->codec_id = (enum AVCodecID)container_ul->id;
st->codec->channels = descriptor->channels;
st->codec->bits_per_coded_sample = descriptor->bits_per_sample;
 
if (descriptor->sample_rate.den > 0) {
st->codec->sample_rate = descriptor->sample_rate.num / descriptor->sample_rate.den;
avpriv_set_pts_info(st, 64, descriptor->sample_rate.den, descriptor->sample_rate.num);
} else {
av_log(mxf->fc, AV_LOG_WARNING, "invalid sample rate (%d/%d) "
"found for stream #%d, time base forced to 1/48000\n",
descriptor->sample_rate.num, descriptor->sample_rate.den,
st->index);
avpriv_set_pts_info(st, 64, 1, 48000);
}
 
/* if duration is set, rescale it from EditRate to SampleRate */
if (st->duration != AV_NOPTS_VALUE)
st->duration = av_rescale_q(st->duration, av_inv_q(material_track->edit_rate), st->time_base);
 
/* TODO: implement AV_CODEC_ID_RAWAUDIO */
if (st->codec->codec_id == AV_CODEC_ID_PCM_S16LE) {
if (descriptor->bits_per_sample > 16 && descriptor->bits_per_sample <= 24)
st->codec->codec_id = AV_CODEC_ID_PCM_S24LE;
else if (descriptor->bits_per_sample == 32)
st->codec->codec_id = AV_CODEC_ID_PCM_S32LE;
} else if (st->codec->codec_id == AV_CODEC_ID_PCM_S16BE) {
if (descriptor->bits_per_sample > 16 && descriptor->bits_per_sample <= 24)
st->codec->codec_id = AV_CODEC_ID_PCM_S24BE;
else if (descriptor->bits_per_sample == 32)
st->codec->codec_id = AV_CODEC_ID_PCM_S32BE;
} else if (st->codec->codec_id == AV_CODEC_ID_MP2) {
st->need_parsing = AVSTREAM_PARSE_FULL;
}
}
if (descriptor->extradata) {
if (!ff_alloc_extradata(st->codec, descriptor->extradata_size)) {
memcpy(st->codec->extradata, descriptor->extradata, descriptor->extradata_size);
}
} else if(st->codec->codec_id == AV_CODEC_ID_H264) {
ff_generate_avci_extradata(st);
}
if (st->codec->codec_type != AVMEDIA_TYPE_DATA && (*essence_container_ul)[15] > 0x01) {
/* TODO: decode timestamps */
st->need_parsing = AVSTREAM_PARSE_TIMESTAMPS;
}
}
 
ret = 0;
fail_and_free:
return ret;
}
 
static int mxf_read_utf16_string(AVIOContext *pb, int size, char** str)
{
int ret;
size_t buf_size;
 
if (size < 0)
return AVERROR(EINVAL);
 
buf_size = size + size/2 + 1;
*str = av_malloc(buf_size);
if (!*str)
return AVERROR(ENOMEM);
 
if ((ret = avio_get_str16be(pb, size, *str, buf_size)) < 0) {
av_freep(str);
return ret;
}
 
return ret;
}
 
static int mxf_uid_to_str(UID uid, char **str)
{
int i;
char *p;
p = *str = av_mallocz(sizeof(UID) * 2 + 4 + 1);
if (!p)
return AVERROR(ENOMEM);
for (i = 0; i < sizeof(UID); i++) {
snprintf(p, 2 + 1, "%.2x", uid[i]);
p += 2;
if (i == 3 || i == 5 || i == 7 || i == 9) {
snprintf(p, 1 + 1, "-");
p++;
}
}
return 0;
}
 
static int mxf_timestamp_to_str(uint64_t timestamp, char **str)
{
struct tm time = {0};
time.tm_year = (timestamp >> 48) - 1900;
time.tm_mon = (timestamp >> 40 & 0xFF) - 1;
time.tm_mday = (timestamp >> 32 & 0xFF);
time.tm_hour = (timestamp >> 24 & 0xFF);
time.tm_min = (timestamp >> 16 & 0xFF);
time.tm_sec = (timestamp >> 8 & 0xFF);
 
/* ensure month/day are valid */
time.tm_mon = FFMAX(time.tm_mon, 0);
time.tm_mday = FFMAX(time.tm_mday, 1);
 
*str = av_mallocz(32);
if (!*str)
return AVERROR(ENOMEM);
strftime(*str, 32, "%Y-%m-%d %H:%M:%S", &time);
 
return 0;
}
 
#define SET_STR_METADATA(pb, name, str) do { \
if ((ret = mxf_read_utf16_string(pb, size, &str)) < 0) \
return ret; \
av_dict_set(&s->metadata, name, str, AV_DICT_DONT_STRDUP_VAL); \
} while (0)
 
#define SET_UID_METADATA(pb, name, var, str) do { \
avio_read(pb, var, 16); \
if ((ret = mxf_uid_to_str(var, &str)) < 0) \
return ret; \
av_dict_set(&s->metadata, name, str, AV_DICT_DONT_STRDUP_VAL); \
} while (0)
 
#define SET_TS_METADATA(pb, name, var, str) do { \
var = avio_rb64(pb); \
if ((ret = mxf_timestamp_to_str(var, &str)) < 0) \
return ret; \
av_dict_set(&s->metadata, name, str, AV_DICT_DONT_STRDUP_VAL); \
} while (0)
 
static int mxf_read_identification_metadata(void *arg, AVIOContext *pb, int tag, int size, UID _uid, int64_t klv_offset)
{
MXFContext *mxf = arg;
AVFormatContext *s = mxf->fc;
int ret;
UID uid = { 0 };
char *str = NULL;
uint64_t ts;
switch (tag) {
case 0x3C01:
SET_STR_METADATA(pb, "company_name", str);
break;
case 0x3C02:
SET_STR_METADATA(pb, "product_name", str);
break;
case 0x3C04:
SET_STR_METADATA(pb, "product_version", str);
break;
case 0x3C05:
SET_UID_METADATA(pb, "product_uid", uid, str);
break;
case 0x3C06:
SET_TS_METADATA(pb, "modification_date", ts, str);
break;
case 0x3C08:
SET_STR_METADATA(pb, "application_platform", str);
break;
case 0x3C09:
SET_UID_METADATA(pb, "generation_uid", uid, str);
break;
case 0x3C0A:
SET_UID_METADATA(pb, "uid", uid, str);
break;
}
return 0;
}
 
static const MXFMetadataReadTableEntry mxf_metadata_read_table[] = {
{ { 0x06,0x0E,0x2B,0x34,0x02,0x05,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x05,0x01,0x00 }, mxf_read_primer_pack },
{ { 0x06,0x0E,0x2B,0x34,0x02,0x05,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x02,0x01,0x00 }, mxf_read_partition_pack },
{ { 0x06,0x0E,0x2B,0x34,0x02,0x05,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x02,0x02,0x00 }, mxf_read_partition_pack },
{ { 0x06,0x0E,0x2B,0x34,0x02,0x05,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x02,0x03,0x00 }, mxf_read_partition_pack },
{ { 0x06,0x0E,0x2B,0x34,0x02,0x05,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x02,0x04,0x00 }, mxf_read_partition_pack },
{ { 0x06,0x0E,0x2B,0x34,0x02,0x05,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x03,0x01,0x00 }, mxf_read_partition_pack },
{ { 0x06,0x0E,0x2B,0x34,0x02,0x05,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x03,0x02,0x00 }, mxf_read_partition_pack },
{ { 0x06,0x0E,0x2B,0x34,0x02,0x05,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x03,0x03,0x00 }, mxf_read_partition_pack },
{ { 0x06,0x0E,0x2B,0x34,0x02,0x05,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x03,0x04,0x00 }, mxf_read_partition_pack },
{ { 0x06,0x0E,0x2B,0x34,0x02,0x05,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x04,0x02,0x00 }, mxf_read_partition_pack },
{ { 0x06,0x0E,0x2B,0x34,0x02,0x05,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x04,0x04,0x00 }, mxf_read_partition_pack },
{ { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0D,0x01,0x01,0x01,0x01,0x01,0x30,0x00 }, mxf_read_identification_metadata },
{ { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x18,0x00 }, mxf_read_content_storage, 0, AnyType },
{ { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x37,0x00 }, mxf_read_source_package, sizeof(MXFPackage), SourcePackage },
{ { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x36,0x00 }, mxf_read_material_package, sizeof(MXFPackage), MaterialPackage },
{ { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x0F,0x00 }, mxf_read_sequence, sizeof(MXFSequence), Sequence },
{ { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x11,0x00 }, mxf_read_source_clip, sizeof(MXFStructuralComponent), SourceClip },
{ { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x44,0x00 }, mxf_read_generic_descriptor, sizeof(MXFDescriptor), MultipleDescriptor },
{ { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x42,0x00 }, mxf_read_generic_descriptor, sizeof(MXFDescriptor), Descriptor }, /* Generic Sound */
{ { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x28,0x00 }, mxf_read_generic_descriptor, sizeof(MXFDescriptor), Descriptor }, /* CDCI */
{ { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x29,0x00 }, mxf_read_generic_descriptor, sizeof(MXFDescriptor), Descriptor }, /* RGBA */
{ { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x51,0x00 }, mxf_read_generic_descriptor, sizeof(MXFDescriptor), Descriptor }, /* MPEG 2 Video */
{ { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x48,0x00 }, mxf_read_generic_descriptor, sizeof(MXFDescriptor), Descriptor }, /* Wave */
{ { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x47,0x00 }, mxf_read_generic_descriptor, sizeof(MXFDescriptor), Descriptor }, /* AES3 */
{ { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x3A,0x00 }, mxf_read_track, sizeof(MXFTrack), Track }, /* Static Track */
{ { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x3B,0x00 }, mxf_read_track, sizeof(MXFTrack), Track }, /* Generic Track */
{ { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x14,0x00 }, mxf_read_timecode_component, sizeof(MXFTimecodeComponent), TimecodeComponent },
{ { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x04,0x01,0x02,0x02,0x00,0x00 }, mxf_read_cryptographic_context, sizeof(MXFCryptoContext), CryptoContext },
{ { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x10,0x01,0x00 }, mxf_read_index_table_segment, sizeof(MXFIndexTableSegment), IndexTableSegment },
{ { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }, NULL, 0, AnyType },
};
 
static int mxf_read_local_tags(MXFContext *mxf, KLVPacket *klv, MXFMetadataReadFunc *read_child, int ctx_size, enum MXFMetadataSetType type)
{
AVIOContext *pb = mxf->fc->pb;
MXFMetadataSet *ctx = ctx_size ? av_mallocz(ctx_size) : mxf;
uint64_t klv_end = avio_tell(pb) + klv->length;
 
if (!ctx)
return AVERROR(ENOMEM);
while (avio_tell(pb) + 4 < klv_end && !url_feof(pb)) {
int ret;
int tag = avio_rb16(pb);
int size = avio_rb16(pb); /* KLV specified by 0x53 */
uint64_t next = avio_tell(pb) + size;
UID uid = {0};
 
av_dlog(mxf->fc, "local tag %#04x size %d\n", tag, size);
if (!size) { /* ignore empty tag, needed for some files with empty UMID tag */
av_log(mxf->fc, AV_LOG_ERROR, "local tag %#04x with 0 size\n", tag);
continue;
}
if (tag > 0x7FFF) { /* dynamic tag */
int i;
for (i = 0; i < mxf->local_tags_count; i++) {
int local_tag = AV_RB16(mxf->local_tags+i*18);
if (local_tag == tag) {
memcpy(uid, mxf->local_tags+i*18+2, 16);
av_dlog(mxf->fc, "local tag %#04x\n", local_tag);
PRINT_KEY(mxf->fc, "uid", uid);
}
}
}
if (ctx_size && tag == 0x3C0A)
avio_read(pb, ctx->uid, 16);
else if ((ret = read_child(ctx, pb, tag, size, uid, -1)) < 0)
return ret;
 
/* Accept the 64k local set limit being exceeded (Avid). Don't accept
* it extending past the end of the KLV though (zzuf5.mxf). */
if (avio_tell(pb) > klv_end) {
if (ctx_size)
av_free(ctx);
 
av_log(mxf->fc, AV_LOG_ERROR,
"local tag %#04x extends past end of local set @ %#"PRIx64"\n",
tag, klv->offset);
return AVERROR_INVALIDDATA;
} else if (avio_tell(pb) <= next) /* only seek forward, else this can loop for a long time */
avio_seek(pb, next, SEEK_SET);
}
if (ctx_size) ctx->type = type;
return ctx_size ? mxf_add_metadata_set(mxf, ctx) : 0;
}
 
/**
* Seeks to the previous partition, if possible
* @return <= 0 if we should stop parsing, > 0 if we should keep going
*/
static int mxf_seek_to_previous_partition(MXFContext *mxf)
{
AVIOContext *pb = mxf->fc->pb;
 
if (!mxf->current_partition ||
mxf->run_in + mxf->current_partition->previous_partition <= mxf->last_forward_tell)
return 0; /* we've parsed all partitions */
 
/* seek to previous partition */
avio_seek(pb, mxf->run_in + mxf->current_partition->previous_partition, SEEK_SET);
mxf->current_partition = NULL;
 
av_dlog(mxf->fc, "seeking to previous partition\n");
 
return 1;
}
 
/**
* Called when essence is encountered
* @return <= 0 if we should stop parsing, > 0 if we should keep going
*/
static int mxf_parse_handle_essence(MXFContext *mxf)
{
AVIOContext *pb = mxf->fc->pb;
int64_t ret;
 
if (mxf->parsing_backward) {
return mxf_seek_to_previous_partition(mxf);
} else if (mxf->footer_partition || mxf->last_partition){
uint64_t offset;
 
offset = mxf->footer_partition ? mxf->footer_partition : mxf->last_partition;
 
av_dlog(mxf->fc, "seeking to last partition\n");
 
/* remember where we were so we don't end up seeking further back than this */
mxf->last_forward_tell = avio_tell(pb);
 
if (!pb->seekable) {
av_log(mxf->fc, AV_LOG_INFO, "file is not seekable - not parsing last partition\n");
return -1;
}
 
/* seek to last partition and parse backward */
if ((ret = avio_seek(pb, mxf->run_in + offset, SEEK_SET)) < 0) {
av_log(mxf->fc, AV_LOG_ERROR, "failed to seek to last partition @ 0x%"PRIx64" (%"PRId64") - partial file?\n",
mxf->run_in + offset, ret);
return ret;
}
 
mxf->current_partition = NULL;
mxf->parsing_backward = 1;
} else {
av_dlog(mxf->fc, "can't find last partition\n");
return 0;
}
 
return 1;
}
 
/**
* Called when the next partition or EOF is encountered
* @return <= 0 if we should stop parsing, > 0 if we should keep going
*/
static int mxf_parse_handle_partition_or_eof(MXFContext *mxf)
{
return mxf->parsing_backward ? mxf_seek_to_previous_partition(mxf) : 1;
}
 
/**
* Figures out the proper offset and length of the essence container in each partition
*/
static void mxf_compute_essence_containers(MXFContext *mxf)
{
int x;
 
/* everything is already correct */
if (mxf->op == OPAtom)
return;
 
for (x = 0; x < mxf->partitions_count; x++) {
MXFPartition *p = &mxf->partitions[x];
 
if (!p->body_sid)
continue; /* BodySID == 0 -> no essence */
 
if (x >= mxf->partitions_count - 1)
break; /* last partition - can't compute length (and we don't need to) */
 
/* essence container spans to the next partition */
p->essence_length = mxf->partitions[x+1].this_partition - p->essence_offset;
 
if (p->essence_length < 0) {
/* next ThisPartition < essence_offset */
p->essence_length = 0;
av_log(mxf->fc, AV_LOG_ERROR,
"partition %i: bad ThisPartition = %"PRIX64"\n",
x+1, mxf->partitions[x+1].this_partition);
}
}
}
 
static int64_t round_to_kag(int64_t position, int kag_size)
{
/* TODO: account for run-in? the spec isn't clear whether KAG should account for it */
/* NOTE: kag_size may be any integer between 1 - 2^10 */
int64_t ret = (position / kag_size) * kag_size;
return ret == position ? ret : ret + kag_size;
}
 
static int is_pcm(enum AVCodecID codec_id)
{
/* we only care about "normal" PCM codecs until we get samples */
return codec_id >= AV_CODEC_ID_PCM_S16LE && codec_id < AV_CODEC_ID_PCM_S24DAUD;
}
 
/**
* Deal with the case where for some audio atoms EditUnitByteCount is
* very small (2, 4..). In those cases we should read more than one
* sample per call to mxf_read_packet().
*/
static void mxf_handle_small_eubc(AVFormatContext *s)
{
MXFContext *mxf = s->priv_data;
 
/* assuming non-OPAtom == frame wrapped
* no sane writer would wrap 2 byte PCM packets with 20 byte headers.. */
if (mxf->op != OPAtom)
return;
 
/* expect PCM with exactly one index table segment and a small (< 32) EUBC */
if (s->nb_streams != 1 ||
s->streams[0]->codec->codec_type != AVMEDIA_TYPE_AUDIO ||
!is_pcm(s->streams[0]->codec->codec_id) ||
mxf->nb_index_tables != 1 ||
mxf->index_tables[0].nb_segments != 1 ||
mxf->index_tables[0].segments[0]->edit_unit_byte_count >= 32)
return;
 
/* arbitrarily default to 48 kHz PAL audio frame size */
/* TODO: We could compute this from the ratio between the audio
* and video edit rates for 48 kHz NTSC we could use the
* 1802-1802-1802-1802-1801 pattern. */
mxf->edit_units_per_packet = 1920;
}
 
static void mxf_read_random_index_pack(AVFormatContext *s)
{
MXFContext *mxf = s->priv_data;
uint32_t length;
int64_t file_size;
KLVPacket klv;
 
if (!s->pb->seekable)
return;
 
file_size = avio_size(s->pb);
avio_seek(s->pb, file_size - 4, SEEK_SET);
length = avio_rb32(s->pb);
if (length <= 32 || length >= FFMIN(file_size, INT_MAX))
goto end;
avio_seek(s->pb, file_size - length, SEEK_SET);
if (klv_read_packet(&klv, s->pb) < 0 ||
!IS_KLV_KEY(klv.key, mxf_random_index_pack_key) ||
klv.length != length - 20)
goto end;
 
avio_skip(s->pb, klv.length - 12);
mxf->last_partition = avio_rb64(s->pb);
 
end:
avio_seek(s->pb, mxf->run_in, SEEK_SET);
}
 
static int mxf_read_header(AVFormatContext *s)
{
MXFContext *mxf = s->priv_data;
KLVPacket klv;
int64_t essence_offset = 0;
int ret;
 
mxf->last_forward_tell = INT64_MAX;
mxf->edit_units_per_packet = 1;
 
if (!mxf_read_sync(s->pb, mxf_header_partition_pack_key, 14)) {
av_log(s, AV_LOG_ERROR, "could not find header partition pack key\n");
return AVERROR_INVALIDDATA;
}
avio_seek(s->pb, -14, SEEK_CUR);
mxf->fc = s;
mxf->run_in = avio_tell(s->pb);
 
mxf_read_random_index_pack(s);
 
while (!url_feof(s->pb)) {
const MXFMetadataReadTableEntry *metadata;
 
if (klv_read_packet(&klv, s->pb) < 0) {
/* EOF - seek to previous partition or stop */
if(mxf_parse_handle_partition_or_eof(mxf) <= 0)
break;
else
continue;
}
 
PRINT_KEY(s, "read header", klv.key);
av_dlog(s, "size %"PRIu64" offset %#"PRIx64"\n", klv.length, klv.offset);
if (IS_KLV_KEY(klv.key, mxf_encrypted_triplet_key) ||
IS_KLV_KEY(klv.key, mxf_essence_element_key) ||
IS_KLV_KEY(klv.key, mxf_avid_essence_element_key) ||
IS_KLV_KEY(klv.key, mxf_system_item_key)) {
 
if (!mxf->current_partition) {
av_log(mxf->fc, AV_LOG_ERROR, "found essence prior to first PartitionPack\n");
return AVERROR_INVALIDDATA;
}
 
if (!mxf->current_partition->essence_offset) {
/* for OP1a we compute essence_offset
* for OPAtom we point essence_offset after the KL (usually op1a_essence_offset + 20 or 25)
* TODO: for OP1a we could eliminate this entire if statement, always stopping parsing at op1a_essence_offset
* for OPAtom we still need the actual essence_offset though (the KL's length can vary)
*/
int64_t op1a_essence_offset =
round_to_kag(mxf->current_partition->this_partition +
mxf->current_partition->pack_length, mxf->current_partition->kag_size) +
round_to_kag(mxf->current_partition->header_byte_count, mxf->current_partition->kag_size) +
round_to_kag(mxf->current_partition->index_byte_count, mxf->current_partition->kag_size);
 
if (mxf->op == OPAtom) {
/* point essence_offset to the actual data
* OPAtom has all the essence in one big KLV
*/
mxf->current_partition->essence_offset = avio_tell(s->pb);
mxf->current_partition->essence_length = klv.length;
} else {
/* NOTE: op1a_essence_offset may be less than to klv.offset (C0023S01.mxf) */
mxf->current_partition->essence_offset = op1a_essence_offset;
}
}
 
if (!essence_offset)
essence_offset = klv.offset;
 
/* seek to footer, previous partition or stop */
if (mxf_parse_handle_essence(mxf) <= 0)
break;
continue;
} else if (!memcmp(klv.key, mxf_header_partition_pack_key, 13) &&
klv.key[13] >= 2 && klv.key[13] <= 4 && mxf->current_partition) {
/* next partition pack - keep going, seek to previous partition or stop */
if(mxf_parse_handle_partition_or_eof(mxf) <= 0)
break;
else if (mxf->parsing_backward)
continue;
/* we're still parsing forward. proceed to parsing this partition pack */
}
 
for (metadata = mxf_metadata_read_table; metadata->read; metadata++) {
if (IS_KLV_KEY(klv.key, metadata->key)) {
int res;
if (klv.key[5] == 0x53) {
res = mxf_read_local_tags(mxf, &klv, metadata->read, metadata->ctx_size, metadata->type);
} else {
uint64_t next = avio_tell(s->pb) + klv.length;
res = metadata->read(mxf, s->pb, 0, klv.length, klv.key, klv.offset);
 
/* only seek forward, else this can loop for a long time */
if (avio_tell(s->pb) > next) {
av_log(s, AV_LOG_ERROR, "read past end of KLV @ %#"PRIx64"\n",
klv.offset);
return AVERROR_INVALIDDATA;
}
 
avio_seek(s->pb, next, SEEK_SET);
}
if (res < 0) {
av_log(s, AV_LOG_ERROR, "error reading header metadata\n");
return res;
}
break;
}
}
if (!metadata->read)
avio_skip(s->pb, klv.length);
}
/* FIXME avoid seek */
if (!essence_offset) {
av_log(s, AV_LOG_ERROR, "no essence\n");
return AVERROR_INVALIDDATA;
}
avio_seek(s->pb, essence_offset, SEEK_SET);
 
mxf_compute_essence_containers(mxf);
 
/* we need to do this before computing the index tables
* to be able to fill in zero IndexDurations with st->duration */
if ((ret = mxf_parse_structural_metadata(mxf)) < 0)
goto fail;
 
if ((ret = mxf_compute_index_tables(mxf)) < 0)
goto fail;
 
if (mxf->nb_index_tables > 1) {
/* TODO: look up which IndexSID to use via EssenceContainerData */
av_log(mxf->fc, AV_LOG_INFO, "got %i index tables - only the first one (IndexSID %i) will be used\n",
mxf->nb_index_tables, mxf->index_tables[0].index_sid);
} else if (mxf->nb_index_tables == 0 && mxf->op == OPAtom) {
av_log(mxf->fc, AV_LOG_ERROR, "cannot demux OPAtom without an index\n");
ret = AVERROR_INVALIDDATA;
goto fail;
}
 
mxf_handle_small_eubc(s);
 
return 0;
fail:
mxf_read_close(s);
 
return ret;
}
 
/**
* Sets mxf->current_edit_unit based on what offset we're currently at.
* @return next_ofs if OK, <0 on error
*/
static int64_t mxf_set_current_edit_unit(MXFContext *mxf, int64_t current_offset)
{
int64_t last_ofs = -1, next_ofs = -1;
MXFIndexTable *t = &mxf->index_tables[0];
 
/* this is called from the OP1a demuxing logic, which means there
* may be no index tables */
if (mxf->nb_index_tables <= 0)
return -1;
 
/* find mxf->current_edit_unit so that the next edit unit starts ahead of current_offset */
while (mxf->current_edit_unit >= 0) {
if (mxf_edit_unit_absolute_offset(mxf, t, mxf->current_edit_unit + 1, NULL, &next_ofs, 0) < 0)
return -1;
 
if (next_ofs <= last_ofs) {
/* large next_ofs didn't change or current_edit_unit wrapped
* around this fixes the infinite loop on zzuf3.mxf */
av_log(mxf->fc, AV_LOG_ERROR,
"next_ofs didn't change. not deriving packet timestamps\n");
return -1;
}
 
if (next_ofs > current_offset)
break;
 
last_ofs = next_ofs;
mxf->current_edit_unit++;
}
 
/* not checking mxf->current_edit_unit >= t->nb_ptses here since CBR files may lack IndexEntryArrays */
if (mxf->current_edit_unit < 0)
return -1;
 
return next_ofs;
}
 
static int mxf_compute_sample_count(MXFContext *mxf, int stream_index, uint64_t *sample_count)
{
int i, total = 0, size = 0;
AVStream *st = mxf->fc->streams[stream_index];
MXFTrack *track = st->priv_data;
AVRational time_base = av_inv_q(track->edit_rate);
AVRational sample_rate = av_inv_q(st->time_base);
const MXFSamplesPerFrame *spf = NULL;
 
if ((sample_rate.num / sample_rate.den) == 48000)
spf = ff_mxf_get_samples_per_frame(mxf->fc, time_base);
if (!spf) {
int remainder = (sample_rate.num * time_base.num) % (time_base.den * sample_rate.den);
*sample_count = av_q2d(av_mul_q((AVRational){mxf->current_edit_unit, 1},
av_mul_q(sample_rate, time_base)));
if (remainder)
av_log(mxf->fc, AV_LOG_WARNING,
"seeking detected on stream #%d with time base (%d/%d) and sample rate (%d/%d), audio pts won't be accurate.\n",
stream_index, time_base.num, time_base.den, sample_rate.num, sample_rate.den);
return 0;
}
 
while (spf->samples_per_frame[size]) {
total += spf->samples_per_frame[size];
size++;
}
 
av_assert2(size);
 
*sample_count = (mxf->current_edit_unit / size) * (uint64_t)total;
for (i = 0; i < mxf->current_edit_unit % size; i++) {
*sample_count += spf->samples_per_frame[i];
}
 
return 0;
}
 
static int mxf_set_audio_pts(MXFContext *mxf, AVCodecContext *codec, AVPacket *pkt)
{
MXFTrack *track = mxf->fc->streams[pkt->stream_index]->priv_data;
pkt->pts = track->sample_count;
if ( codec->channels <= 0
|| av_get_bits_per_sample(codec->codec_id) <= 0
|| codec->channels * (int64_t)av_get_bits_per_sample(codec->codec_id) < 8)
return AVERROR(EINVAL);
track->sample_count += pkt->size / (codec->channels * (int64_t)av_get_bits_per_sample(codec->codec_id) / 8);
return 0;
}
 
static int mxf_read_packet_old(AVFormatContext *s, AVPacket *pkt)
{
KLVPacket klv;
MXFContext *mxf = s->priv_data;
 
while (klv_read_packet(&klv, s->pb) == 0) {
int ret;
PRINT_KEY(s, "read packet", klv.key);
av_dlog(s, "size %"PRIu64" offset %#"PRIx64"\n", klv.length, klv.offset);
if (IS_KLV_KEY(klv.key, mxf_encrypted_triplet_key)) {
ret = mxf_decrypt_triplet(s, pkt, &klv);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "invalid encoded triplet\n");
return AVERROR_INVALIDDATA;
}
return 0;
}
if (IS_KLV_KEY(klv.key, mxf_essence_element_key) ||
IS_KLV_KEY(klv.key, mxf_avid_essence_element_key)) {
int index = mxf_get_stream_index(s, &klv);
int64_t next_ofs, next_klv;
AVStream *st;
MXFTrack *track;
AVCodecContext *codec;
 
if (index < 0) {
av_log(s, AV_LOG_ERROR, "error getting stream index %d\n", AV_RB32(klv.key+12));
goto skip;
}
 
st = s->streams[index];
track = st->priv_data;
 
if (s->streams[index]->discard == AVDISCARD_ALL)
goto skip;
 
next_klv = avio_tell(s->pb) + klv.length;
next_ofs = mxf_set_current_edit_unit(mxf, klv.offset);
 
if (next_ofs >= 0 && next_klv > next_ofs) {
/* if this check is hit then it's possible OPAtom was treated as OP1a
* truncate the packet since it's probably very large (>2 GiB is common) */
avpriv_request_sample(s,
"OPAtom misinterpreted as OP1a?"
"KLV for edit unit %i extending into "
"next edit unit",
mxf->current_edit_unit);
klv.length = next_ofs - avio_tell(s->pb);
}
 
/* check for 8 channels AES3 element */
if (klv.key[12] == 0x06 && klv.key[13] == 0x01 && klv.key[14] == 0x10) {
if (mxf_get_d10_aes3_packet(s->pb, s->streams[index], pkt, klv.length) < 0) {
av_log(s, AV_LOG_ERROR, "error reading D-10 aes3 frame\n");
return AVERROR_INVALIDDATA;
}
} else {
ret = av_get_packet(s->pb, pkt, klv.length);
if (ret < 0)
return ret;
}
pkt->stream_index = index;
pkt->pos = klv.offset;
 
codec = s->streams[index]->codec;
if (codec->codec_type == AVMEDIA_TYPE_VIDEO && next_ofs >= 0) {
/* mxf->current_edit_unit good - see if we have an index table to derive timestamps from */
MXFIndexTable *t = &mxf->index_tables[0];
 
if (mxf->nb_index_tables >= 1 && mxf->current_edit_unit < t->nb_ptses) {
pkt->dts = mxf->current_edit_unit + t->first_dts;
pkt->pts = t->ptses[mxf->current_edit_unit];
} else if (track->intra_only) {
/* intra-only -> PTS = EditUnit.
* let utils.c figure out DTS since it can be < PTS if low_delay = 0 (Sony IMX30) */
pkt->pts = mxf->current_edit_unit;
}
} else if (codec->codec_type == AVMEDIA_TYPE_AUDIO) {
int ret = mxf_set_audio_pts(mxf, codec, pkt);
if (ret < 0)
return ret;
}
 
/* seek for truncated packets */
avio_seek(s->pb, next_klv, SEEK_SET);
 
return 0;
} else
skip:
avio_skip(s->pb, klv.length);
}
return url_feof(s->pb) ? AVERROR_EOF : -1;
}
 
static int mxf_read_packet(AVFormatContext *s, AVPacket *pkt)
{
MXFContext *mxf = s->priv_data;
int ret, size;
int64_t ret64, pos, next_pos;
AVStream *st;
MXFIndexTable *t;
int edit_units;
 
if (mxf->op != OPAtom)
return mxf_read_packet_old(s, pkt);
 
/* OPAtom - clip wrapped demuxing */
/* NOTE: mxf_read_header() makes sure nb_index_tables > 0 for OPAtom */
st = s->streams[0];
t = &mxf->index_tables[0];
 
if (mxf->current_edit_unit >= st->duration)
return AVERROR_EOF;
 
edit_units = FFMIN(mxf->edit_units_per_packet, st->duration - mxf->current_edit_unit);
 
if ((ret = mxf_edit_unit_absolute_offset(mxf, t, mxf->current_edit_unit, NULL, &pos, 1)) < 0)
return ret;
 
/* compute size by finding the next edit unit or the end of the essence container
* not pretty, but it works */
if ((ret = mxf_edit_unit_absolute_offset(mxf, t, mxf->current_edit_unit + edit_units, NULL, &next_pos, 0)) < 0 &&
(next_pos = mxf_essence_container_end(mxf, t->body_sid)) <= 0) {
av_log(s, AV_LOG_ERROR, "unable to compute the size of the last packet\n");
return AVERROR_INVALIDDATA;
}
 
if ((size = next_pos - pos) <= 0) {
av_log(s, AV_LOG_ERROR, "bad size: %i\n", size);
return AVERROR_INVALIDDATA;
}
 
if ((ret64 = avio_seek(s->pb, pos, SEEK_SET)) < 0)
return ret64;
 
if ((size = av_get_packet(s->pb, pkt, size)) < 0)
return size;
 
pkt->stream_index = 0;
 
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO && t->ptses &&
mxf->current_edit_unit >= 0 && mxf->current_edit_unit < t->nb_ptses) {
pkt->dts = mxf->current_edit_unit + t->first_dts;
pkt->pts = t->ptses[mxf->current_edit_unit];
} else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
int ret = mxf_set_audio_pts(mxf, st->codec, pkt);
if (ret < 0)
return ret;
}
 
mxf->current_edit_unit += edit_units;
 
return 0;
}
 
static int mxf_read_close(AVFormatContext *s)
{
MXFContext *mxf = s->priv_data;
MXFIndexTableSegment *seg;
int i;
 
av_freep(&mxf->packages_refs);
 
for (i = 0; i < s->nb_streams; i++)
s->streams[i]->priv_data = NULL;
 
for (i = 0; i < mxf->metadata_sets_count; i++) {
switch (mxf->metadata_sets[i]->type) {
case Descriptor:
av_freep(&((MXFDescriptor *)mxf->metadata_sets[i])->extradata);
break;
case MultipleDescriptor:
av_freep(&((MXFDescriptor *)mxf->metadata_sets[i])->sub_descriptors_refs);
break;
case Sequence:
av_freep(&((MXFSequence *)mxf->metadata_sets[i])->structural_components_refs);
break;
case SourcePackage:
case MaterialPackage:
av_freep(&((MXFPackage *)mxf->metadata_sets[i])->tracks_refs);
break;
case IndexTableSegment:
seg = (MXFIndexTableSegment *)mxf->metadata_sets[i];
av_freep(&seg->temporal_offset_entries);
av_freep(&seg->flag_entries);
av_freep(&seg->stream_offset_entries);
break;
default:
break;
}
av_freep(&mxf->metadata_sets[i]);
}
av_freep(&mxf->partitions);
av_freep(&mxf->metadata_sets);
av_freep(&mxf->aesc);
av_freep(&mxf->local_tags);
 
if (mxf->index_tables) {
for (i = 0; i < mxf->nb_index_tables; i++) {
av_freep(&mxf->index_tables[i].segments);
av_freep(&mxf->index_tables[i].ptses);
av_freep(&mxf->index_tables[i].fake_index);
}
}
av_freep(&mxf->index_tables);
 
return 0;
}
 
static int mxf_probe(AVProbeData *p) {
const uint8_t *bufp = p->buf;
const uint8_t *end = p->buf + p->buf_size;
 
if (p->buf_size < sizeof(mxf_header_partition_pack_key))
return 0;
 
/* Must skip Run-In Sequence and search for MXF header partition pack key SMPTE 377M 5.5 */
end -= sizeof(mxf_header_partition_pack_key);
for (; bufp < end; bufp++) {
if (IS_KLV_KEY(bufp, mxf_header_partition_pack_key))
return AVPROBE_SCORE_MAX;
}
return 0;
}
 
/* rudimentary byte seek */
/* XXX: use MXF Index */
static int mxf_read_seek(AVFormatContext *s, int stream_index, int64_t sample_time, int flags)
{
AVStream *st = s->streams[stream_index];
int64_t seconds;
MXFContext* mxf = s->priv_data;
int64_t seekpos;
int i, ret;
int64_t ret64;
MXFIndexTable *t;
MXFTrack *source_track = st->priv_data;
 
/* if audio then truncate sample_time to EditRate */
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
sample_time = av_rescale_q(sample_time, st->time_base, av_inv_q(source_track->edit_rate));
 
if (mxf->nb_index_tables <= 0) {
if (!s->bit_rate)
return AVERROR_INVALIDDATA;
if (sample_time < 0)
sample_time = 0;
seconds = av_rescale(sample_time, st->time_base.num, st->time_base.den);
 
if ((ret64 = avio_seek(s->pb, (s->bit_rate * seconds) >> 3, SEEK_SET)) < 0)
return ret64;
ff_update_cur_dts(s, st, sample_time);
mxf->current_edit_unit = sample_time;
} else {
t = &mxf->index_tables[0];
 
/* clamp above zero, else ff_index_search_timestamp() returns negative
* this also means we allow seeking before the start */
sample_time = FFMAX(sample_time, 0);
 
if (t->fake_index) {
/* behave as if we have a proper index */
if ((sample_time = ff_index_search_timestamp(t->fake_index, t->nb_ptses, sample_time, flags)) < 0)
return sample_time;
} else {
/* no IndexEntryArray (one or more CBR segments)
* make sure we don't seek past the end */
sample_time = FFMIN(sample_time, source_track->original_duration - 1);
}
 
if ((ret = mxf_edit_unit_absolute_offset(mxf, t, sample_time, &sample_time, &seekpos, 1)) << 0)
return ret;
 
ff_update_cur_dts(s, st, sample_time);
mxf->current_edit_unit = sample_time;
avio_seek(s->pb, seekpos, SEEK_SET);
}
 
// Update all tracks sample count
for (i = 0; i < s->nb_streams; i++) {
AVStream *cur_st = s->streams[i];
MXFTrack *cur_track = cur_st->priv_data;
uint64_t current_sample_count = 0;
if (cur_st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
ret = mxf_compute_sample_count(mxf, i, &current_sample_count);
if (ret < 0)
return ret;
 
cur_track->sample_count = current_sample_count;
}
}
return 0;
}
 
AVInputFormat ff_mxf_demuxer = {
.name = "mxf",
.long_name = NULL_IF_CONFIG_SMALL("MXF (Material eXchange Format)"),
.priv_data_size = sizeof(MXFContext),
.read_probe = mxf_probe,
.read_header = mxf_read_header,
.read_packet = mxf_read_packet,
.read_close = mxf_read_close,
.read_seek = mxf_read_seek,
};
/contrib/sdk/sources/ffmpeg/libavformat/mxfenc.c
0,0 → 1,2185
/*
* MXF muxer
* Copyright (c) 2008 GUCAS, Zhentan Feng <spyfeng at gmail dot com>
* Copyright (c) 2008 Baptiste Coudurier <baptiste dot coudurier at gmail dot com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/*
* References
* SMPTE 336M KLV Data Encoding Protocol Using Key-Length-Value
* SMPTE 377M MXF File Format Specifications
* SMPTE 379M MXF Generic Container
* SMPTE 381M Mapping MPEG Streams into the MXF Generic Container
* SMPTE RP210: SMPTE Metadata Dictionary
* SMPTE RP224: Registry of SMPTE Universal Labels
*/
 
#include <inttypes.h>
#include <math.h>
#include <time.h>
 
#include "libavutil/opt.h"
#include "libavutil/random_seed.h"
#include "libavutil/timecode.h"
#include "libavutil/avassert.h"
#include "libavcodec/bytestream.h"
#include "libavcodec/dnxhddata.h"
#include "audiointerleave.h"
#include "avformat.h"
#include "avio_internal.h"
#include "internal.h"
#include "mxf.h"
#include "config.h"
 
extern AVOutputFormat ff_mxf_d10_muxer;
 
#define EDIT_UNITS_PER_BODY 250
#define KAG_SIZE 512
 
typedef struct {
int local_tag;
UID uid;
} MXFLocalTagPair;
 
typedef struct {
uint8_t flags;
uint64_t offset;
unsigned slice_offset; ///< offset of audio slice
uint16_t temporal_ref;
} MXFIndexEntry;
 
typedef struct {
AudioInterleaveContext aic;
UID track_essence_element_key;
int index; ///< index in mxf_essence_container_uls table
const UID *codec_ul;
int order; ///< interleaving order if dts are equal
int interlaced; ///< whether picture is interlaced
int field_dominance; ///< tff=1, bff=2
int component_depth;
int temporal_reordering;
AVRational aspect_ratio; ///< display aspect ratio
int closed_gop; ///< gop is closed, used in mpeg-2 frame parsing
int video_bit_rate;
} MXFStreamContext;
 
typedef struct {
UID container_ul;
UID element_ul;
UID codec_ul;
void (*write_desc)(AVFormatContext *, AVStream *);
} MXFContainerEssenceEntry;
 
static const struct {
enum AVCodecID id;
int index;
} mxf_essence_mappings[] = {
{ AV_CODEC_ID_MPEG2VIDEO, 0 },
{ AV_CODEC_ID_PCM_S24LE, 1 },
{ AV_CODEC_ID_PCM_S16LE, 1 },
{ AV_CODEC_ID_DVVIDEO, 15 },
{ AV_CODEC_ID_DNXHD, 24 },
{ AV_CODEC_ID_NONE }
};
 
static void mxf_write_wav_desc(AVFormatContext *s, AVStream *st);
static void mxf_write_aes3_desc(AVFormatContext *s, AVStream *st);
static void mxf_write_mpegvideo_desc(AVFormatContext *s, AVStream *st);
static void mxf_write_cdci_desc(AVFormatContext *s, AVStream *st);
static void mxf_write_generic_sound_desc(AVFormatContext *s, AVStream *st);
 
static const MXFContainerEssenceEntry mxf_essence_container_uls[] = {
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x02,0x0D,0x01,0x03,0x01,0x02,0x04,0x60,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x15,0x01,0x05,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x00,0x00,0x00 },
mxf_write_mpegvideo_desc },
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x06,0x03,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x16,0x01,0x03,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x01,0x00,0x00,0x00,0x00 },
mxf_write_aes3_desc },
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x06,0x01,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x16,0x01,0x01,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x01,0x00,0x00,0x00,0x00 },
mxf_write_wav_desc },
// D-10 625/50 PAL 50mb/s
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x01,0x01,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x05,0x01,0x01,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x01,0x02,0x01,0x01 },
mxf_write_cdci_desc },
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x01,0x01,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x06,0x01,0x10,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x01,0x00,0x00,0x00,0x00 },
mxf_write_generic_sound_desc },
// D-10 525/60 NTSC 50mb/s
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x01,0x02,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x05,0x01,0x01,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x01,0x02,0x01,0x02 },
mxf_write_cdci_desc },
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x01,0x02,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x06,0x01,0x10,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x01,0x00,0x00,0x00,0x00 },
mxf_write_generic_sound_desc },
// D-10 625/50 PAL 40mb/s
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x01,0x03,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x05,0x01,0x01,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x01,0x02,0x01,0x03 },
mxf_write_cdci_desc },
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x01,0x03,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x06,0x01,0x10,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x01,0x00,0x00,0x00,0x00 },
mxf_write_generic_sound_desc },
// D-10 525/60 NTSC 40mb/s
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x01,0x04,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x05,0x01,0x01,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x01,0x02,0x01,0x04 },
mxf_write_cdci_desc },
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x01,0x04,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x06,0x01,0x10,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x01,0x00,0x00,0x00,0x00 },
mxf_write_generic_sound_desc },
// D-10 625/50 PAL 30mb/s
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x01,0x05,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x05,0x01,0x01,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x01,0x02,0x01,0x05 },
mxf_write_cdci_desc },
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x01,0x05,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x06,0x01,0x10,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x01,0x00,0x00,0x00,0x00 },
mxf_write_generic_sound_desc },
// D-10 525/60 NTSC 30mb/s
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x01,0x06,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x05,0x01,0x01,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x01,0x02,0x01,0x06 },
mxf_write_cdci_desc },
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x01,0x06,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x06,0x01,0x10,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x01,0x00,0x00,0x00,0x00 },
mxf_write_generic_sound_desc },
// DV Unknown
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x02,0x7F,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x18,0x01,0x01,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x02,0x00,0x00,0x00 },
mxf_write_cdci_desc },
// DV25 525/60
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x02,0x40,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x18,0x01,0x01,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x02,0x02,0x01,0x00 },
mxf_write_cdci_desc },
// DV25 625/50
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x02,0x41,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x18,0x01,0x01,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x02,0x02,0x02,0x00 },
mxf_write_cdci_desc },
// DV50 525/60
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x02,0x50,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x18,0x01,0x01,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x02,0x02,0x03,0x00 },
mxf_write_cdci_desc },
// DV50 625/50
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x02,0x51,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x18,0x01,0x01,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x02,0x02,0x04,0x00 },
mxf_write_cdci_desc },
// DV100 1080/60
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x02,0x60,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x18,0x01,0x01,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x02,0x02,0x05,0x00 },
mxf_write_cdci_desc },
// DV100 1080/50
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x02,0x61,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x18,0x01,0x01,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x02,0x02,0x06,0x00 },
mxf_write_cdci_desc },
// DV100 720/60
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x02,0x62,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x18,0x01,0x01,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x02,0x02,0x07,0x00 },
mxf_write_cdci_desc },
// DV100 720/50
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x02,0x63,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x18,0x01,0x01,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x02,0x02,0x08,0x00 },
mxf_write_cdci_desc },
// DNxHD 1080p 10bit high
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x11,0x01,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x15,0x01,0x05,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x02,0x71,0x01,0x00,0x00 },
mxf_write_cdci_desc },
// DNxHD 1080p 8bit medium
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x11,0x01,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x15,0x01,0x05,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x02,0x71,0x03,0x00,0x00 },
mxf_write_cdci_desc },
// DNxHD 1080p 8bit high
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x11,0x01,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x15,0x01,0x05,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x02,0x71,0x04,0x00,0x00 },
mxf_write_cdci_desc },
// DNxHD 1080i 10bit high
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x11,0x01,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x15,0x01,0x05,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x02,0x71,0x07,0x00,0x00 },
mxf_write_cdci_desc },
// DNxHD 1080i 8bit medium
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x11,0x01,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x15,0x01,0x05,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x02,0x71,0x08,0x00,0x00 },
mxf_write_cdci_desc },
// DNxHD 1080i 8bit high
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x11,0x01,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x15,0x01,0x05,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x02,0x71,0x09,0x00,0x00 },
mxf_write_cdci_desc },
// DNxHD 720p 10bit
{ { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x01,0x0d,0x01,0x03,0x01,0x02,0x11,0x01,0x00 },
{ 0x06,0x0e,0x2b,0x34,0x01,0x02,0x01,0x01,0x0d,0x01,0x03,0x01,0x15,0x01,0x05,0x00 },
{ 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x02,0x71,0x10,0x00,0x00 },
mxf_write_cdci_desc },
// DNxHD 720p 8bit high
{ { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x01,0x0d,0x01,0x03,0x01,0x02,0x11,0x01,0x00 },
{ 0x06,0x0e,0x2b,0x34,0x01,0x02,0x01,0x01,0x0d,0x01,0x03,0x01,0x15,0x01,0x05,0x00 },
{ 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x02,0x71,0x11,0x00,0x00 },
mxf_write_cdci_desc },
// DNxHD 720p 8bit medium
{ { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x01,0x0d,0x01,0x03,0x01,0x02,0x11,0x01,0x00 },
{ 0x06,0x0e,0x2b,0x34,0x01,0x02,0x01,0x01,0x0d,0x01,0x03,0x01,0x15,0x01,0x05,0x00 },
{ 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x02,0x71,0x12,0x00,0x00 },
mxf_write_cdci_desc },
// DNxHD 720p 8bit low
{ { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x01,0x0d,0x01,0x03,0x01,0x02,0x11,0x01,0x00 },
{ 0x06,0x0e,0x2b,0x34,0x01,0x02,0x01,0x01,0x0d,0x01,0x03,0x01,0x15,0x01,0x05,0x00 },
{ 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x02,0x71,0x13,0x00,0x00 },
mxf_write_cdci_desc },
{ { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 },
{ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 },
{ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 },
NULL },
};
 
typedef struct MXFContext {
AVClass *av_class;
int64_t footer_partition_offset;
int essence_container_count;
AVRational time_base;
int header_written;
MXFIndexEntry *index_entries;
unsigned edit_units_count;
uint64_t timestamp; ///< timestamp, as year(16),month(8),day(8),hour(8),minutes(8),msec/4(8)
uint8_t slice_count; ///< index slice count minus 1 (1 if no audio, 0 otherwise)
int last_indexed_edit_unit;
uint64_t *body_partition_offset;
unsigned body_partitions_count;
int last_key_index; ///< index of last key frame
uint64_t duration;
AVTimecode tc; ///< timecode context
AVStream *timecode_track;
int timecode_base; ///< rounded time code base (25 or 30)
int edit_unit_byte_count; ///< fixed edit unit byte count
uint64_t body_offset;
uint32_t instance_number;
uint8_t umid[16]; ///< unique material identifier
} MXFContext;
 
static const uint8_t uuid_base[] = { 0xAD,0xAB,0x44,0x24,0x2f,0x25,0x4d,0xc7,0x92,0xff,0x29,0xbd };
static const uint8_t umid_ul[] = { 0x06,0x0A,0x2B,0x34,0x01,0x01,0x01,0x05,0x01,0x01,0x0D,0x00,0x13 };
 
/**
* complete key for operation pattern, partitions, and primer pack
*/
static const uint8_t op1a_ul[] = { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x02,0x01,0x01,0x01,0x09,0x00 };
static const uint8_t footer_partition_key[] = { 0x06,0x0E,0x2B,0x34,0x02,0x05,0x01,0x01,0x0D,0x01,0x02,0x01,0x01,0x04,0x04,0x00 }; // ClosedComplete
static const uint8_t primer_pack_key[] = { 0x06,0x0E,0x2B,0x34,0x02,0x05,0x01,0x01,0x0D,0x01,0x02,0x01,0x01,0x05,0x01,0x00 };
static const uint8_t index_table_segment_key[] = { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x10,0x01,0x00 };
static const uint8_t random_index_pack_key[] = { 0x06,0x0E,0x2B,0x34,0x02,0x05,0x01,0x01,0x0D,0x01,0x02,0x01,0x01,0x11,0x01,0x00 };
static const uint8_t header_open_partition_key[] = { 0x06,0x0E,0x2B,0x34,0x02,0x05,0x01,0x01,0x0D,0x01,0x02,0x01,0x01,0x02,0x01,0x00 }; // OpenIncomplete
static const uint8_t header_closed_partition_key[] = { 0x06,0x0E,0x2B,0x34,0x02,0x05,0x01,0x01,0x0D,0x01,0x02,0x01,0x01,0x02,0x04,0x00 }; // ClosedComplete
static const uint8_t klv_fill_key[] = { 0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x01,0x03,0x01,0x02,0x10,0x01,0x00,0x00,0x00 };
static const uint8_t body_partition_key[] = { 0x06,0x0E,0x2B,0x34,0x02,0x05,0x01,0x01,0x0D,0x01,0x02,0x01,0x01,0x03,0x04,0x00 }; // ClosedComplete
 
/**
* partial key for header metadata
*/
static const uint8_t header_metadata_key[] = { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0D,0x01,0x01,0x01,0x01 };
static const uint8_t multiple_desc_ul[] = { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x0D,0x01,0x03,0x01,0x02,0x7F,0x01,0x00 };
 
/**
* SMPTE RP210 http://www.smpte-ra.org/mdd/index.html
*/
static const MXFLocalTagPair mxf_local_tag_batch[] = {
// preface set
{ 0x3C0A, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x01,0x01,0x01,0x15,0x02,0x00,0x00,0x00,0x00}}, /* Instance UID */
{ 0x3B02, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x07,0x02,0x01,0x10,0x02,0x04,0x00,0x00}}, /* Last Modified Date */
{ 0x3B05, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x03,0x01,0x02,0x01,0x05,0x00,0x00,0x00}}, /* Version */
{ 0x3B06, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x06,0x01,0x01,0x04,0x06,0x04,0x00,0x00}}, /* Identifications reference */
{ 0x3B03, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x06,0x01,0x01,0x04,0x02,0x01,0x00,0x00}}, /* Content Storage reference */
{ 0x3B09, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x05,0x01,0x02,0x02,0x03,0x00,0x00,0x00,0x00}}, /* Operational Pattern UL */
{ 0x3B0A, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x05,0x01,0x02,0x02,0x10,0x02,0x01,0x00,0x00}}, /* Essence Containers UL batch */
{ 0x3B0B, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x05,0x01,0x02,0x02,0x10,0x02,0x02,0x00,0x00}}, /* DM Schemes UL batch */
// Identification
{ 0x3C09, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x05,0x20,0x07,0x01,0x01,0x00,0x00,0x00}}, /* This Generation UID */
{ 0x3C01, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x05,0x20,0x07,0x01,0x02,0x01,0x00,0x00}}, /* Company Name */
{ 0x3C02, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x05,0x20,0x07,0x01,0x03,0x01,0x00,0x00}}, /* Product Name */
{ 0x3C04, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x05,0x20,0x07,0x01,0x05,0x01,0x00,0x00}}, /* Version String */
{ 0x3C05, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x05,0x20,0x07,0x01,0x07,0x00,0x00,0x00}}, /* Product ID */
{ 0x3C06, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x07,0x02,0x01,0x10,0x02,0x03,0x00,0x00}}, /* Modification Date */
// Content Storage
{ 0x1901, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x06,0x01,0x01,0x04,0x05,0x01,0x00,0x00}}, /* Package strong reference batch */
{ 0x1902, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x06,0x01,0x01,0x04,0x05,0x02,0x00,0x00}}, /* Package strong reference batch */
// Essence Container Data
{ 0x2701, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x06,0x01,0x01,0x06,0x01,0x00,0x00,0x00}}, /* Linked Package UID */
{ 0x3F07, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x04,0x01,0x03,0x04,0x04,0x00,0x00,0x00,0x00}}, /* BodySID */
// Package
{ 0x4401, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x01,0x01,0x01,0x15,0x10,0x00,0x00,0x00,0x00}}, /* Package UID */
{ 0x4405, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x07,0x02,0x01,0x10,0x01,0x03,0x00,0x00}}, /* Package Creation Date */
{ 0x4404, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x07,0x02,0x01,0x10,0x02,0x05,0x00,0x00}}, /* Package Modified Date */
{ 0x4403, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x06,0x01,0x01,0x04,0x06,0x05,0x00,0x00}}, /* Tracks Strong reference array */
{ 0x4701, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x06,0x01,0x01,0x04,0x02,0x03,0x00,0x00}}, /* Descriptor */
// Track
{ 0x4801, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x01,0x07,0x01,0x01,0x00,0x00,0x00,0x00}}, /* Track ID */
{ 0x4804, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x01,0x04,0x01,0x03,0x00,0x00,0x00,0x00}}, /* Track Number */
{ 0x4B01, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x05,0x30,0x04,0x05,0x00,0x00,0x00,0x00}}, /* Edit Rate */
{ 0x4B02, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x07,0x02,0x01,0x03,0x01,0x03,0x00,0x00}}, /* Origin */
{ 0x4803, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x06,0x01,0x01,0x04,0x02,0x04,0x00,0x00}}, /* Sequence reference */
// Sequence
{ 0x0201, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x04,0x07,0x01,0x00,0x00,0x00,0x00,0x00}}, /* Data Definition UL */
{ 0x0202, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x07,0x02,0x02,0x01,0x01,0x03,0x00,0x00}}, /* Duration */
{ 0x1001, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x06,0x01,0x01,0x04,0x06,0x09,0x00,0x00}}, /* Structural Components reference array */
// Source Clip
{ 0x1201, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x07,0x02,0x01,0x03,0x01,0x04,0x00,0x00}}, /* Start position */
{ 0x1101, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x06,0x01,0x01,0x03,0x01,0x00,0x00,0x00}}, /* SourcePackageID */
{ 0x1102, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x06,0x01,0x01,0x03,0x02,0x00,0x00,0x00}}, /* SourceTrackID */
// Timecode Component
{ 0x1501, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x07,0x02,0x01,0x03,0x01,0x05,0x00,0x00}}, /* Start Time Code */
{ 0x1502, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x04,0x04,0x01,0x01,0x02,0x06,0x00,0x00}}, /* Rounded Time Code Base */
{ 0x1503, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x01,0x04,0x04,0x01,0x01,0x05,0x00,0x00,0x00}}, /* Drop Frame */
// File Descriptor
{ 0x3F01, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x04,0x06,0x01,0x01,0x04,0x06,0x0B,0x00,0x00}}, /* Sub Descriptors reference array */
{ 0x3006, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x05,0x06,0x01,0x01,0x03,0x05,0x00,0x00,0x00}}, /* Linked Track ID */
{ 0x3001, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x01,0x04,0x06,0x01,0x01,0x00,0x00,0x00,0x00}}, /* SampleRate */
{ 0x3004, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x06,0x01,0x01,0x04,0x01,0x02,0x00,0x00}}, /* Essence Container */
// Generic Picture Essence Descriptor
{ 0x320C, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x01,0x04,0x01,0x03,0x01,0x04,0x00,0x00,0x00}}, /* Frame Layout */
{ 0x320D, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x04,0x01,0x03,0x02,0x05,0x00,0x00,0x00}}, /* Video Line Map */
{ 0x3203, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x01,0x04,0x01,0x05,0x02,0x02,0x00,0x00,0x00}}, /* Stored Width */
{ 0x3202, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x01,0x04,0x01,0x05,0x02,0x01,0x00,0x00,0x00}}, /* Stored Height */
{ 0x3209, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x01,0x04,0x01,0x05,0x01,0x0C,0x00,0x00,0x00}}, /* Display Width */
{ 0x3208, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x01,0x04,0x01,0x05,0x01,0x0B,0x00,0x00,0x00}}, /* Display Height */
{ 0x320E, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x01,0x04,0x01,0x01,0x01,0x01,0x00,0x00,0x00}}, /* Aspect Ratio */
{ 0x3201, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x04,0x01,0x06,0x01,0x00,0x00,0x00,0x00}}, /* Picture Essence Coding */
{ 0x3212, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x04,0x01,0x03,0x01,0x06,0x00,0x00,0x00}}, /* Field Dominance (Opt) */
// CDCI Picture Essence Descriptor
{ 0x3301, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x04,0x01,0x05,0x03,0x0A,0x00,0x00,0x00}}, /* Component Depth */
{ 0x3302, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x01,0x04,0x01,0x05,0x01,0x05,0x00,0x00,0x00}}, /* Horizontal Subsampling */
// Generic Sound Essence Descriptor
{ 0x3D02, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x04,0x04,0x02,0x03,0x01,0x04,0x00,0x00,0x00}}, /* Locked/Unlocked */
{ 0x3D03, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x05,0x04,0x02,0x03,0x01,0x01,0x01,0x00,0x00}}, /* Audio sampling rate */
{ 0x3D07, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x05,0x04,0x02,0x01,0x01,0x04,0x00,0x00,0x00}}, /* ChannelCount */
{ 0x3D01, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x04,0x04,0x02,0x03,0x03,0x04,0x00,0x00,0x00}}, /* Quantization bits */
{ 0x3D06, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x04,0x02,0x04,0x02,0x00,0x00,0x00,0x00}}, /* Sound Essence Compression */
// Index Table Segment
{ 0x3F0B, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x05,0x05,0x30,0x04,0x06,0x00,0x00,0x00,0x00}}, /* Index Edit Rate */
{ 0x3F0C, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x05,0x07,0x02,0x01,0x03,0x01,0x0A,0x00,0x00}}, /* Index Start Position */
{ 0x3F0D, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x05,0x07,0x02,0x02,0x01,0x01,0x02,0x00,0x00}}, /* Index Duration */
{ 0x3F05, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x04,0x04,0x06,0x02,0x01,0x00,0x00,0x00,0x00}}, /* Edit Unit Byte Count */
{ 0x3F06, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x04,0x01,0x03,0x04,0x05,0x00,0x00,0x00,0x00}}, /* IndexSID */
{ 0x3F08, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x04,0x04,0x04,0x04,0x01,0x01,0x00,0x00,0x00}}, /* Slice Count */
{ 0x3F09, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x05,0x04,0x04,0x04,0x01,0x06,0x00,0x00,0x00}}, /* Delta Entry Array */
{ 0x3F0A, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x05,0x04,0x04,0x04,0x02,0x05,0x00,0x00,0x00}}, /* Index Entry Array */
// MPEG video Descriptor
{ 0x8000, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x05,0x04,0x01,0x06,0x02,0x01,0x0B,0x00,0x00}}, /* BitRate */
{ 0x8007, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x05,0x04,0x01,0x06,0x02,0x01,0x0A,0x00,0x00}}, /* ProfileAndLevel */
// Wave Audio Essence Descriptor
{ 0x3D09, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x05,0x04,0x02,0x03,0x03,0x05,0x00,0x00,0x00}}, /* Average Bytes Per Second */
{ 0x3D0A, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x05,0x04,0x02,0x03,0x02,0x01,0x00,0x00,0x00}}, /* Block Align */
};
 
static void mxf_write_uuid(AVIOContext *pb, enum MXFMetadataSetType type, int value)
{
avio_write(pb, uuid_base, 12);
avio_wb16(pb, type);
avio_wb16(pb, value);
}
 
static void mxf_write_umid(AVFormatContext *s, int type)
{
MXFContext *mxf = s->priv_data;
avio_write(s->pb, umid_ul, 13);
avio_wb24(s->pb, mxf->instance_number);
avio_write(s->pb, mxf->umid, 15);
avio_w8(s->pb, type);
}
 
static void mxf_write_refs_count(AVIOContext *pb, int ref_count)
{
avio_wb32(pb, ref_count);
avio_wb32(pb, 16);
}
 
static int klv_ber_length(uint64_t len)
{
if (len < 128)
return 1;
else
return (av_log2(len) >> 3) + 2;
}
 
static int klv_encode_ber_length(AVIOContext *pb, uint64_t len)
{
// Determine the best BER size
int size;
if (len < 128) {
//short form
avio_w8(pb, len);
return 1;
}
 
size = (av_log2(len) >> 3) + 1;
 
// long form
avio_w8(pb, 0x80 + size);
while(size) {
size--;
avio_w8(pb, len >> 8 * size & 0xff);
}
return 0;
}
 
static void klv_encode_ber4_length(AVIOContext *pb, int len)
{
avio_w8(pb, 0x80 + 3);
avio_wb24(pb, len);
}
 
/*
* Get essence container ul index
*/
static int mxf_get_essence_container_ul_index(enum AVCodecID id)
{
int i;
for (i = 0; mxf_essence_mappings[i].id; i++)
if (mxf_essence_mappings[i].id == id)
return mxf_essence_mappings[i].index;
return -1;
}
 
static void mxf_write_primer_pack(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
int local_tag_number, i = 0;
 
local_tag_number = FF_ARRAY_ELEMS(mxf_local_tag_batch);
 
avio_write(pb, primer_pack_key, 16);
klv_encode_ber_length(pb, local_tag_number * 18 + 8);
 
avio_wb32(pb, local_tag_number); // local_tag num
avio_wb32(pb, 18); // item size, always 18 according to the specs
 
for (i = 0; i < local_tag_number; i++) {
avio_wb16(pb, mxf_local_tag_batch[i].local_tag);
avio_write(pb, mxf_local_tag_batch[i].uid, 16);
}
}
 
static void mxf_write_local_tag(AVIOContext *pb, int size, int tag)
{
avio_wb16(pb, tag);
avio_wb16(pb, size);
}
 
static void mxf_write_metadata_key(AVIOContext *pb, unsigned int value)
{
avio_write(pb, header_metadata_key, 13);
avio_wb24(pb, value);
}
 
static void mxf_free(AVFormatContext *s)
{
int i;
 
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
av_freep(&st->priv_data);
}
}
 
static const MXFCodecUL *mxf_get_data_definition_ul(int type)
{
const MXFCodecUL *uls = ff_mxf_data_definition_uls;
while (uls->uid[0]) {
if (type == uls->id)
break;
uls++;
}
return uls;
}
 
//one EC -> one descriptor. N ECs -> MultipleDescriptor + N descriptors
#define DESCRIPTOR_COUNT(essence_container_count) \
(essence_container_count > 1 ? essence_container_count + 1 : essence_container_count)
 
static void mxf_write_essence_container_refs(AVFormatContext *s)
{
MXFContext *c = s->priv_data;
AVIOContext *pb = s->pb;
int i;
 
mxf_write_refs_count(pb, DESCRIPTOR_COUNT(c->essence_container_count));
av_log(s,AV_LOG_DEBUG, "essence container count:%d\n", c->essence_container_count);
for (i = 0; i < c->essence_container_count; i++) {
MXFStreamContext *sc = s->streams[i]->priv_data;
avio_write(pb, mxf_essence_container_uls[sc->index].container_ul, 16);
}
 
if (c->essence_container_count > 1)
avio_write(pb, multiple_desc_ul, 16);
}
 
static void mxf_write_preface(AVFormatContext *s)
{
MXFContext *mxf = s->priv_data;
AVIOContext *pb = s->pb;
 
mxf_write_metadata_key(pb, 0x012f00);
PRINT_KEY(s, "preface key", pb->buf_ptr - 16);
klv_encode_ber_length(pb, 130 + 16LL * DESCRIPTOR_COUNT(mxf->essence_container_count));
 
// write preface set uid
mxf_write_local_tag(pb, 16, 0x3C0A);
mxf_write_uuid(pb, Preface, 0);
PRINT_KEY(s, "preface uid", pb->buf_ptr - 16);
 
// last modified date
mxf_write_local_tag(pb, 8, 0x3B02);
avio_wb64(pb, mxf->timestamp);
 
// write version
mxf_write_local_tag(pb, 2, 0x3B05);
avio_wb16(pb, 258); // v1.2
 
// write identification_refs
mxf_write_local_tag(pb, 16 + 8, 0x3B06);
mxf_write_refs_count(pb, 1);
mxf_write_uuid(pb, Identification, 0);
 
// write content_storage_refs
mxf_write_local_tag(pb, 16, 0x3B03);
mxf_write_uuid(pb, ContentStorage, 0);
 
// operational pattern
mxf_write_local_tag(pb, 16, 0x3B09);
avio_write(pb, op1a_ul, 16);
 
// write essence_container_refs
mxf_write_local_tag(pb, 8 + 16LL * DESCRIPTOR_COUNT(mxf->essence_container_count), 0x3B0A);
mxf_write_essence_container_refs(s);
 
// write dm_scheme_refs
mxf_write_local_tag(pb, 8, 0x3B0B);
avio_wb64(pb, 0);
}
 
/*
* Write a local tag containing an ascii string as utf-16
*/
static void mxf_write_local_tag_utf16(AVIOContext *pb, int tag, const char *value)
{
int i, size = strlen(value);
mxf_write_local_tag(pb, size*2, tag);
for (i = 0; i < size; i++)
avio_wb16(pb, value[i]);
}
 
static void mxf_write_identification(AVFormatContext *s)
{
MXFContext *mxf = s->priv_data;
AVIOContext *pb = s->pb;
const char *company = "FFmpeg";
const char *product = "OP1a Muxer";
const char *version;
int length;
 
mxf_write_metadata_key(pb, 0x013000);
PRINT_KEY(s, "identification key", pb->buf_ptr - 16);
 
version = s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT ?
"0.0.0" : AV_STRINGIFY(LIBAVFORMAT_VERSION);
length = 84 + (strlen(company)+strlen(product)+strlen(version))*2; // utf-16
klv_encode_ber_length(pb, length);
 
// write uid
mxf_write_local_tag(pb, 16, 0x3C0A);
mxf_write_uuid(pb, Identification, 0);
PRINT_KEY(s, "identification uid", pb->buf_ptr - 16);
 
// write generation uid
mxf_write_local_tag(pb, 16, 0x3C09);
mxf_write_uuid(pb, Identification, 1);
 
mxf_write_local_tag_utf16(pb, 0x3C01, company); // Company Name
mxf_write_local_tag_utf16(pb, 0x3C02, product); // Product Name
mxf_write_local_tag_utf16(pb, 0x3C04, version); // Version String
 
// write product uid
mxf_write_local_tag(pb, 16, 0x3C05);
mxf_write_uuid(pb, Identification, 2);
 
// modification date
mxf_write_local_tag(pb, 8, 0x3C06);
avio_wb64(pb, mxf->timestamp);
}
 
static void mxf_write_content_storage(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
 
mxf_write_metadata_key(pb, 0x011800);
PRINT_KEY(s, "content storage key", pb->buf_ptr - 16);
klv_encode_ber_length(pb, 92);
 
// write uid
mxf_write_local_tag(pb, 16, 0x3C0A);
mxf_write_uuid(pb, ContentStorage, 0);
PRINT_KEY(s, "content storage uid", pb->buf_ptr - 16);
 
// write package reference
mxf_write_local_tag(pb, 16 * 2 + 8, 0x1901);
mxf_write_refs_count(pb, 2);
mxf_write_uuid(pb, MaterialPackage, 0);
mxf_write_uuid(pb, SourcePackage, 0);
 
// write essence container data
mxf_write_local_tag(pb, 8 + 16, 0x1902);
mxf_write_refs_count(pb, 1);
mxf_write_uuid(pb, EssenceContainerData, 0);
}
 
static void mxf_write_track(AVFormatContext *s, AVStream *st, enum MXFMetadataSetType type)
{
MXFContext *mxf = s->priv_data;
AVIOContext *pb = s->pb;
MXFStreamContext *sc = st->priv_data;
 
mxf_write_metadata_key(pb, 0x013b00);
PRINT_KEY(s, "track key", pb->buf_ptr - 16);
klv_encode_ber_length(pb, 80);
 
// write track uid
mxf_write_local_tag(pb, 16, 0x3C0A);
mxf_write_uuid(pb, type == MaterialPackage ? Track : Track + TypeBottom, st->index);
PRINT_KEY(s, "track uid", pb->buf_ptr - 16);
 
// write track id
mxf_write_local_tag(pb, 4, 0x4801);
avio_wb32(pb, st->index+2);
 
// write track number
mxf_write_local_tag(pb, 4, 0x4804);
if (type == MaterialPackage)
avio_wb32(pb, 0); // track number of material package is 0
else
avio_write(pb, sc->track_essence_element_key + 12, 4);
 
mxf_write_local_tag(pb, 8, 0x4B01);
avio_wb32(pb, mxf->time_base.den);
avio_wb32(pb, mxf->time_base.num);
 
// write origin
mxf_write_local_tag(pb, 8, 0x4B02);
avio_wb64(pb, 0);
 
// write sequence refs
mxf_write_local_tag(pb, 16, 0x4803);
mxf_write_uuid(pb, type == MaterialPackage ? Sequence: Sequence + TypeBottom, st->index);
}
 
static const uint8_t smpte_12m_timecode_track_data_ul[] = { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x01,0x03,0x02,0x01,0x01,0x00,0x00,0x00 };
 
static void mxf_write_common_fields(AVFormatContext *s, AVStream *st)
{
MXFContext *mxf = s->priv_data;
AVIOContext *pb = s->pb;
 
// find data define uls
mxf_write_local_tag(pb, 16, 0x0201);
if (st == mxf->timecode_track)
avio_write(pb, smpte_12m_timecode_track_data_ul, 16);
else {
const MXFCodecUL *data_def_ul = mxf_get_data_definition_ul(st->codec->codec_type);
avio_write(pb, data_def_ul->uid, 16);
}
 
// write duration
mxf_write_local_tag(pb, 8, 0x0202);
avio_wb64(pb, mxf->duration);
}
 
static void mxf_write_sequence(AVFormatContext *s, AVStream *st, enum MXFMetadataSetType type)
{
MXFContext *mxf = s->priv_data;
AVIOContext *pb = s->pb;
enum MXFMetadataSetType component;
 
mxf_write_metadata_key(pb, 0x010f00);
PRINT_KEY(s, "sequence key", pb->buf_ptr - 16);
klv_encode_ber_length(pb, 80);
 
mxf_write_local_tag(pb, 16, 0x3C0A);
mxf_write_uuid(pb, type == MaterialPackage ? Sequence: Sequence + TypeBottom, st->index);
 
PRINT_KEY(s, "sequence uid", pb->buf_ptr - 16);
mxf_write_common_fields(s, st);
 
// write structural component
mxf_write_local_tag(pb, 16 + 8, 0x1001);
mxf_write_refs_count(pb, 1);
if (st == mxf->timecode_track)
component = TimecodeComponent;
else
component = SourceClip;
if (type == SourcePackage)
component += TypeBottom;
mxf_write_uuid(pb, component, st->index);
}
 
static void mxf_write_timecode_component(AVFormatContext *s, AVStream *st, enum MXFMetadataSetType type)
{
MXFContext *mxf = s->priv_data;
AVIOContext *pb = s->pb;
 
mxf_write_metadata_key(pb, 0x011400);
klv_encode_ber_length(pb, 75);
 
// UID
mxf_write_local_tag(pb, 16, 0x3C0A);
mxf_write_uuid(pb, type == MaterialPackage ? TimecodeComponent :
TimecodeComponent + TypeBottom, st->index);
 
mxf_write_common_fields(s, st);
 
// Start Time Code
mxf_write_local_tag(pb, 8, 0x1501);
avio_wb64(pb, mxf->tc.start);
 
// Rounded Time Code Base
mxf_write_local_tag(pb, 2, 0x1502);
avio_wb16(pb, mxf->timecode_base);
 
// Drop Frame
mxf_write_local_tag(pb, 1, 0x1503);
avio_w8(pb, !!(mxf->tc.flags & AV_TIMECODE_FLAG_DROPFRAME));
}
 
static void mxf_write_structural_component(AVFormatContext *s, AVStream *st, enum MXFMetadataSetType type)
{
AVIOContext *pb = s->pb;
int i;
 
mxf_write_metadata_key(pb, 0x011100);
PRINT_KEY(s, "sturctural component key", pb->buf_ptr - 16);
klv_encode_ber_length(pb, 108);
 
// write uid
mxf_write_local_tag(pb, 16, 0x3C0A);
mxf_write_uuid(pb, type == MaterialPackage ? SourceClip: SourceClip + TypeBottom, st->index);
 
PRINT_KEY(s, "structural component uid", pb->buf_ptr - 16);
mxf_write_common_fields(s, st);
 
// write start_position
mxf_write_local_tag(pb, 8, 0x1201);
avio_wb64(pb, 0);
 
// write source package uid, end of the reference
mxf_write_local_tag(pb, 32, 0x1101);
if (type == SourcePackage) {
for (i = 0; i < 4; i++)
avio_wb64(pb, 0);
} else
mxf_write_umid(s, 1);
 
// write source track id
mxf_write_local_tag(pb, 4, 0x1102);
if (type == SourcePackage)
avio_wb32(pb, 0);
else
avio_wb32(pb, st->index+2);
}
 
static void mxf_write_multi_descriptor(AVFormatContext *s)
{
MXFContext *mxf = s->priv_data;
AVIOContext *pb = s->pb;
const uint8_t *ul;
int i;
 
mxf_write_metadata_key(pb, 0x014400);
PRINT_KEY(s, "multiple descriptor key", pb->buf_ptr - 16);
klv_encode_ber_length(pb, 64 + 16LL * s->nb_streams);
 
mxf_write_local_tag(pb, 16, 0x3C0A);
mxf_write_uuid(pb, MultipleDescriptor, 0);
PRINT_KEY(s, "multi_desc uid", pb->buf_ptr - 16);
 
// write sample rate
mxf_write_local_tag(pb, 8, 0x3001);
avio_wb32(pb, mxf->time_base.den);
avio_wb32(pb, mxf->time_base.num);
 
// write essence container ul
mxf_write_local_tag(pb, 16, 0x3004);
if (mxf->essence_container_count > 1)
ul = multiple_desc_ul;
else {
MXFStreamContext *sc = s->streams[0]->priv_data;
ul = mxf_essence_container_uls[sc->index].container_ul;
}
avio_write(pb, ul, 16);
 
// write sub descriptor refs
mxf_write_local_tag(pb, s->nb_streams * 16 + 8, 0x3F01);
mxf_write_refs_count(pb, s->nb_streams);
for (i = 0; i < s->nb_streams; i++)
mxf_write_uuid(pb, SubDescriptor, i);
}
 
static void mxf_write_generic_desc(AVFormatContext *s, AVStream *st, const UID key, unsigned size)
{
MXFContext *mxf = s->priv_data;
MXFStreamContext *sc = st->priv_data;
AVIOContext *pb = s->pb;
 
avio_write(pb, key, 16);
klv_encode_ber4_length(pb, size+20+8+12+20);
 
mxf_write_local_tag(pb, 16, 0x3C0A);
mxf_write_uuid(pb, SubDescriptor, st->index);
 
mxf_write_local_tag(pb, 4, 0x3006);
avio_wb32(pb, st->index+2);
 
mxf_write_local_tag(pb, 8, 0x3001);
avio_wb32(pb, mxf->time_base.den);
avio_wb32(pb, mxf->time_base.num);
 
mxf_write_local_tag(pb, 16, 0x3004);
avio_write(pb, mxf_essence_container_uls[sc->index].container_ul, 16);
}
 
static const UID mxf_mpegvideo_descriptor_key = { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x51,0x00 };
static const UID mxf_wav_descriptor_key = { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x48,0x00 };
static const UID mxf_aes3_descriptor_key = { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x47,0x00 };
static const UID mxf_cdci_descriptor_key = { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0D,0x01,0x01,0x01,0x01,0x01,0x28,0x00 };
static const UID mxf_generic_sound_descriptor_key = { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0D,0x01,0x01,0x01,0x01,0x01,0x42,0x00 };
 
static void mxf_write_cdci_common(AVFormatContext *s, AVStream *st, const UID key, unsigned size)
{
MXFStreamContext *sc = st->priv_data;
AVIOContext *pb = s->pb;
int stored_height = (st->codec->height+15)/16*16;
int display_height;
int f1, f2;
unsigned desc_size = size+8+8+8+8+8+8+5+16+sc->interlaced*4+12+20;
if (sc->interlaced && sc->field_dominance)
desc_size += 5;
 
mxf_write_generic_desc(s, st, key, desc_size);
 
mxf_write_local_tag(pb, 4, 0x3203);
avio_wb32(pb, st->codec->width);
 
mxf_write_local_tag(pb, 4, 0x3202);
avio_wb32(pb, stored_height>>sc->interlaced);
 
mxf_write_local_tag(pb, 4, 0x3209);
avio_wb32(pb, st->codec->width);
 
if (st->codec->height == 608) // PAL + VBI
display_height = 576;
else if (st->codec->height == 512) // NTSC + VBI
display_height = 486;
else
display_height = st->codec->height;
 
mxf_write_local_tag(pb, 4, 0x3208);
avio_wb32(pb, display_height>>sc->interlaced);
 
// component depth
mxf_write_local_tag(pb, 4, 0x3301);
avio_wb32(pb, sc->component_depth);
 
// horizontal subsampling
mxf_write_local_tag(pb, 4, 0x3302);
avio_wb32(pb, 2);
 
// frame layout
mxf_write_local_tag(pb, 1, 0x320C);
avio_w8(pb, sc->interlaced);
 
// video line map
switch (st->codec->height) {
case 576: f1 = 23; f2 = st->codec->codec_id == AV_CODEC_ID_DVVIDEO ? 335 : 336; break;
case 608: f1 = 7; f2 = 320; break;
case 480: f1 = 20; f2 = st->codec->codec_id == AV_CODEC_ID_DVVIDEO ? 285 : 283; break;
case 512: f1 = 7; f2 = 270; break;
case 720: f1 = 26; f2 = 0; break; // progressive
case 1080: f1 = 21; f2 = 584; break;
default: f1 = 0; f2 = 0; break;
}
 
if (!sc->interlaced) {
f2 = 0;
f1 *= 2;
}
 
mxf_write_local_tag(pb, 12+sc->interlaced*4, 0x320D);
avio_wb32(pb, sc->interlaced ? 2 : 1);
avio_wb32(pb, 4);
avio_wb32(pb, f1);
if (sc->interlaced)
avio_wb32(pb, f2);
 
mxf_write_local_tag(pb, 8, 0x320E);
avio_wb32(pb, sc->aspect_ratio.num);
avio_wb32(pb, sc->aspect_ratio.den);
 
mxf_write_local_tag(pb, 16, 0x3201);
avio_write(pb, *sc->codec_ul, 16);
 
if (sc->interlaced && sc->field_dominance) {
mxf_write_local_tag(pb, 1, 0x3212);
avio_w8(pb, sc->field_dominance);
}
 
}
 
static void mxf_write_cdci_desc(AVFormatContext *s, AVStream *st)
{
mxf_write_cdci_common(s, st, mxf_cdci_descriptor_key, 0);
}
 
static void mxf_write_mpegvideo_desc(AVFormatContext *s, AVStream *st)
{
AVIOContext *pb = s->pb;
MXFStreamContext *sc = st->priv_data;
int profile_and_level = (st->codec->profile<<4) | st->codec->level;
 
mxf_write_cdci_common(s, st, mxf_mpegvideo_descriptor_key, 8+5);
 
// bit rate
mxf_write_local_tag(pb, 4, 0x8000);
avio_wb32(pb, sc->video_bit_rate);
 
// profile and level
mxf_write_local_tag(pb, 1, 0x8007);
if (!st->codec->profile)
profile_and_level |= 0x80; // escape bit
avio_w8(pb, profile_and_level);
}
 
static void mxf_write_generic_sound_common(AVFormatContext *s, AVStream *st, const UID key, unsigned size)
{
AVIOContext *pb = s->pb;
 
mxf_write_generic_desc(s, st, key, size+5+12+8+8);
 
// audio locked
mxf_write_local_tag(pb, 1, 0x3D02);
avio_w8(pb, 1);
 
// write audio sampling rate
mxf_write_local_tag(pb, 8, 0x3D03);
avio_wb32(pb, st->codec->sample_rate);
avio_wb32(pb, 1);
 
mxf_write_local_tag(pb, 4, 0x3D07);
avio_wb32(pb, st->codec->channels);
 
mxf_write_local_tag(pb, 4, 0x3D01);
avio_wb32(pb, av_get_bits_per_sample(st->codec->codec_id));
}
 
static void mxf_write_wav_common(AVFormatContext *s, AVStream *st, const UID key, unsigned size)
{
AVIOContext *pb = s->pb;
 
mxf_write_generic_sound_common(s, st, key, size+6+8);
 
mxf_write_local_tag(pb, 2, 0x3D0A);
avio_wb16(pb, st->codec->block_align);
 
// avg bytes per sec
mxf_write_local_tag(pb, 4, 0x3D09);
avio_wb32(pb, st->codec->block_align*st->codec->sample_rate);
}
 
static void mxf_write_wav_desc(AVFormatContext *s, AVStream *st)
{
mxf_write_wav_common(s, st, mxf_wav_descriptor_key, 0);
}
 
static void mxf_write_aes3_desc(AVFormatContext *s, AVStream *st)
{
mxf_write_wav_common(s, st, mxf_aes3_descriptor_key, 0);
}
 
static void mxf_write_generic_sound_desc(AVFormatContext *s, AVStream *st)
{
mxf_write_generic_sound_common(s, st, mxf_generic_sound_descriptor_key, 0);
}
 
static void mxf_write_package(AVFormatContext *s, enum MXFMetadataSetType type)
{
MXFContext *mxf = s->priv_data;
AVIOContext *pb = s->pb;
int i, track_count = s->nb_streams+1;
 
if (type == MaterialPackage) {
mxf_write_metadata_key(pb, 0x013600);
PRINT_KEY(s, "Material Package key", pb->buf_ptr - 16);
klv_encode_ber_length(pb, 92 + 16*track_count);
} else {
mxf_write_metadata_key(pb, 0x013700);
PRINT_KEY(s, "Source Package key", pb->buf_ptr - 16);
klv_encode_ber_length(pb, 112 + 16*track_count); // 20 bytes length for descriptor reference
}
 
// write uid
mxf_write_local_tag(pb, 16, 0x3C0A);
mxf_write_uuid(pb, type, 0);
av_log(s,AV_LOG_DEBUG, "package type:%d\n", type);
PRINT_KEY(s, "package uid", pb->buf_ptr - 16);
 
// write package umid
mxf_write_local_tag(pb, 32, 0x4401);
mxf_write_umid(s, type == SourcePackage);
PRINT_KEY(s, "package umid second part", pb->buf_ptr - 16);
 
// package creation date
mxf_write_local_tag(pb, 8, 0x4405);
avio_wb64(pb, mxf->timestamp);
 
// package modified date
mxf_write_local_tag(pb, 8, 0x4404);
avio_wb64(pb, mxf->timestamp);
 
// write track refs
mxf_write_local_tag(pb, track_count*16 + 8, 0x4403);
mxf_write_refs_count(pb, track_count);
mxf_write_uuid(pb, type == MaterialPackage ? Track :
Track + TypeBottom, -1); // timecode track
for (i = 0; i < s->nb_streams; i++)
mxf_write_uuid(pb, type == MaterialPackage ? Track : Track + TypeBottom, i);
 
// write multiple descriptor reference
if (type == SourcePackage) {
mxf_write_local_tag(pb, 16, 0x4701);
if (s->nb_streams > 1) {
mxf_write_uuid(pb, MultipleDescriptor, 0);
mxf_write_multi_descriptor(s);
} else
mxf_write_uuid(pb, SubDescriptor, 0);
}
 
// write timecode track
mxf_write_track(s, mxf->timecode_track, type);
mxf_write_sequence(s, mxf->timecode_track, type);
mxf_write_timecode_component(s, mxf->timecode_track, type);
 
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
mxf_write_track(s, st, type);
mxf_write_sequence(s, st, type);
mxf_write_structural_component(s, st, type);
 
if (type == SourcePackage) {
MXFStreamContext *sc = st->priv_data;
mxf_essence_container_uls[sc->index].write_desc(s, st);
}
}
}
 
static int mxf_write_essence_container_data(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
 
mxf_write_metadata_key(pb, 0x012300);
klv_encode_ber_length(pb, 72);
 
mxf_write_local_tag(pb, 16, 0x3C0A); // Instance UID
mxf_write_uuid(pb, EssenceContainerData, 0);
 
mxf_write_local_tag(pb, 32, 0x2701); // Linked Package UID
mxf_write_umid(s, 1);
 
mxf_write_local_tag(pb, 4, 0x3F07); // BodySID
avio_wb32(pb, 1);
 
mxf_write_local_tag(pb, 4, 0x3F06); // IndexSID
avio_wb32(pb, 2);
 
return 0;
}
 
static int mxf_write_header_metadata_sets(AVFormatContext *s)
{
mxf_write_preface(s);
mxf_write_identification(s);
mxf_write_content_storage(s);
mxf_write_package(s, MaterialPackage);
mxf_write_package(s, SourcePackage);
mxf_write_essence_container_data(s);
return 0;
}
 
static unsigned klv_fill_size(uint64_t size)
{
unsigned pad = KAG_SIZE - (size & (KAG_SIZE-1));
if (pad < 20) // smallest fill item possible
return pad + KAG_SIZE;
else
return pad & (KAG_SIZE-1);
}
 
static void mxf_write_index_table_segment(AVFormatContext *s)
{
MXFContext *mxf = s->priv_data;
AVIOContext *pb = s->pb;
int i, j, temporal_reordering = 0;
int key_index = mxf->last_key_index;
 
av_log(s, AV_LOG_DEBUG, "edit units count %d\n", mxf->edit_units_count);
 
if (!mxf->edit_units_count && !mxf->edit_unit_byte_count)
return;
 
avio_write(pb, index_table_segment_key, 16);
 
if (mxf->edit_unit_byte_count) {
klv_encode_ber_length(pb, 80);
} else {
klv_encode_ber_length(pb, 85 + 12+(s->nb_streams+1LL)*6 +
12+mxf->edit_units_count*(11+mxf->slice_count*4LL));
}
 
// instance id
mxf_write_local_tag(pb, 16, 0x3C0A);
mxf_write_uuid(pb, IndexTableSegment, 0);
 
// index edit rate
mxf_write_local_tag(pb, 8, 0x3F0B);
avio_wb32(pb, mxf->time_base.den);
avio_wb32(pb, mxf->time_base.num);
 
// index start position
mxf_write_local_tag(pb, 8, 0x3F0C);
avio_wb64(pb, mxf->last_indexed_edit_unit);
 
// index duration
mxf_write_local_tag(pb, 8, 0x3F0D);
if (mxf->edit_unit_byte_count)
avio_wb64(pb, 0); // index table covers whole container
else
avio_wb64(pb, mxf->edit_units_count);
 
// edit unit byte count
mxf_write_local_tag(pb, 4, 0x3F05);
avio_wb32(pb, mxf->edit_unit_byte_count);
 
// index sid
mxf_write_local_tag(pb, 4, 0x3F06);
avio_wb32(pb, 2);
 
// body sid
mxf_write_local_tag(pb, 4, 0x3F07);
avio_wb32(pb, 1);
 
if (!mxf->edit_unit_byte_count) {
// real slice count - 1
mxf_write_local_tag(pb, 1, 0x3F08);
avio_w8(pb, mxf->slice_count);
 
// delta entry array
mxf_write_local_tag(pb, 8 + (s->nb_streams+1)*6, 0x3F09);
avio_wb32(pb, s->nb_streams+1); // num of entries
avio_wb32(pb, 6); // size of one entry
// write system item delta entry
avio_w8(pb, 0);
avio_w8(pb, 0); // slice entry
avio_wb32(pb, 0); // element delta
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
MXFStreamContext *sc = st->priv_data;
avio_w8(pb, sc->temporal_reordering);
if (sc->temporal_reordering)
temporal_reordering = 1;
if (i == 0) { // video track
avio_w8(pb, 0); // slice number
avio_wb32(pb, KAG_SIZE); // system item size including klv fill
} else { // audio track
unsigned audio_frame_size = sc->aic.samples[0]*sc->aic.sample_size;
audio_frame_size += klv_fill_size(audio_frame_size);
avio_w8(pb, 1);
avio_wb32(pb, (i-1)*audio_frame_size); // element delta
}
}
 
mxf_write_local_tag(pb, 8 + mxf->edit_units_count*(11+mxf->slice_count*4), 0x3F0A);
avio_wb32(pb, mxf->edit_units_count); // num of entries
avio_wb32(pb, 11+mxf->slice_count*4); // size of one entry
 
for (i = 0; i < mxf->edit_units_count; i++) {
int temporal_offset = 0;
 
if (!(mxf->index_entries[i].flags & 0x33)) { // I frame
mxf->last_key_index = key_index;
key_index = i;
}
 
if (temporal_reordering) {
int pic_num_in_gop = i - key_index;
if (pic_num_in_gop != mxf->index_entries[i].temporal_ref) {
for (j = key_index; j < mxf->edit_units_count; j++) {
if (pic_num_in_gop == mxf->index_entries[j].temporal_ref)
break;
}
if (j == mxf->edit_units_count)
av_log(s, AV_LOG_WARNING, "missing frames\n");
temporal_offset = j - key_index - pic_num_in_gop;
}
}
avio_w8(pb, temporal_offset);
 
if ((mxf->index_entries[i].flags & 0x30) == 0x30) { // back and forward prediction
avio_w8(pb, mxf->last_key_index - i);
} else {
avio_w8(pb, key_index - i); // key frame offset
if ((mxf->index_entries[i].flags & 0x20) == 0x20) // only forward
mxf->last_key_index = key_index;
}
 
if (!(mxf->index_entries[i].flags & 0x33) && // I frame
mxf->index_entries[i].flags & 0x40 && !temporal_offset)
mxf->index_entries[i].flags |= 0x80; // random access
avio_w8(pb, mxf->index_entries[i].flags);
// stream offset
avio_wb64(pb, mxf->index_entries[i].offset);
if (s->nb_streams > 1)
avio_wb32(pb, mxf->index_entries[i].slice_offset);
}
 
mxf->last_key_index = key_index - mxf->edit_units_count;
mxf->last_indexed_edit_unit += mxf->edit_units_count;
mxf->edit_units_count = 0;
}
}
 
static void mxf_write_klv_fill(AVFormatContext *s)
{
unsigned pad = klv_fill_size(avio_tell(s->pb));
if (pad) {
avio_write(s->pb, klv_fill_key, 16);
pad -= 16 + 4;
klv_encode_ber4_length(s->pb, pad);
ffio_fill(s->pb, 0, pad);
av_assert1(!(avio_tell(s->pb) & (KAG_SIZE-1)));
}
}
 
static int mxf_write_partition(AVFormatContext *s, int bodysid,
int indexsid,
const uint8_t *key, int write_metadata)
{
MXFContext *mxf = s->priv_data;
AVIOContext *pb = s->pb;
int64_t header_byte_count_offset;
unsigned index_byte_count = 0;
uint64_t partition_offset = avio_tell(pb);
int err;
 
if (!mxf->edit_unit_byte_count && mxf->edit_units_count)
index_byte_count = 85 + 12+(s->nb_streams+1)*6 +
12+mxf->edit_units_count*(11+mxf->slice_count*4);
else if (mxf->edit_unit_byte_count && indexsid)
index_byte_count = 80;
 
if (index_byte_count) {
// add encoded ber length
index_byte_count += 16 + klv_ber_length(index_byte_count);
index_byte_count += klv_fill_size(index_byte_count);
}
 
if (!memcmp(key, body_partition_key, 16)) {
if ((err = av_reallocp_array(&mxf->body_partition_offset, mxf->body_partitions_count + 1,
sizeof(*mxf->body_partition_offset))) < 0) {
mxf->body_partitions_count = 0;
return err;
}
mxf->body_partition_offset[mxf->body_partitions_count++] = partition_offset;
}
 
// write klv
avio_write(pb, key, 16);
klv_encode_ber_length(pb, 88 + 16LL * DESCRIPTOR_COUNT(mxf->essence_container_count));
 
// write partition value
avio_wb16(pb, 1); // majorVersion
avio_wb16(pb, 2); // minorVersion
avio_wb32(pb, KAG_SIZE); // KAGSize
 
avio_wb64(pb, partition_offset); // ThisPartition
 
if (!memcmp(key, body_partition_key, 16) && mxf->body_partitions_count > 1)
avio_wb64(pb, mxf->body_partition_offset[mxf->body_partitions_count-2]); // PreviousPartition
else if (!memcmp(key, footer_partition_key, 16) && mxf->body_partitions_count)
avio_wb64(pb, mxf->body_partition_offset[mxf->body_partitions_count-1]); // PreviousPartition
else
avio_wb64(pb, 0);
 
avio_wb64(pb, mxf->footer_partition_offset); // footerPartition
 
// set offset
header_byte_count_offset = avio_tell(pb);
avio_wb64(pb, 0); // headerByteCount, update later
 
// indexTable
avio_wb64(pb, index_byte_count); // indexByteCount
avio_wb32(pb, index_byte_count ? indexsid : 0); // indexSID
 
// BodyOffset
if (bodysid && mxf->edit_units_count && mxf->body_partitions_count) {
avio_wb64(pb, mxf->body_offset);
} else
avio_wb64(pb, 0);
 
avio_wb32(pb, bodysid); // bodySID
 
// operational pattern
avio_write(pb, op1a_ul, 16);
 
// essence container
mxf_write_essence_container_refs(s);
 
if (write_metadata) {
// mark the start of the headermetadata and calculate metadata size
int64_t pos, start;
unsigned header_byte_count;
 
mxf_write_klv_fill(s);
start = avio_tell(s->pb);
mxf_write_primer_pack(s);
mxf_write_header_metadata_sets(s);
pos = avio_tell(s->pb);
header_byte_count = pos - start + klv_fill_size(pos);
 
// update header_byte_count
avio_seek(pb, header_byte_count_offset, SEEK_SET);
avio_wb64(pb, header_byte_count);
avio_seek(pb, pos, SEEK_SET);
}
 
avio_flush(pb);
 
return 0;
}
 
static int mxf_parse_dnxhd_frame(AVFormatContext *s, AVStream *st,
AVPacket *pkt)
{
MXFContext *mxf = s->priv_data;
MXFStreamContext *sc = st->priv_data;
int i, cid;
uint8_t* header_cid;
int frame_size = 0;
 
if (mxf->header_written)
return 1;
 
if (pkt->size < 43)
return -1;
 
header_cid = pkt->data + 0x28;
cid = header_cid[0] << 24 | header_cid[1] << 16 | header_cid[2] << 8 | header_cid[3];
 
if ((frame_size = avpriv_dnxhd_get_frame_size(cid)) < 0)
return -1;
 
switch (cid) {
case 1235:
sc->index = 24;
sc->component_depth = 10;
break;
case 1237:
sc->index = 25;
break;
case 1238:
sc->index = 26;
break;
case 1241:
sc->index = 27;
sc->component_depth = 10;
break;
case 1242:
sc->index = 28;
break;
case 1243:
sc->index = 29;
break;
case 1250:
sc->index = 30;
sc->component_depth = 10;
break;
case 1251:
sc->index = 31;
break;
case 1252:
sc->index = 32;
break;
case 1253:
sc->index = 33;
break;
default:
return -1;
}
 
sc->codec_ul = &mxf_essence_container_uls[sc->index].codec_ul;
sc->aspect_ratio = (AVRational){ 16, 9 };
 
mxf->edit_unit_byte_count = KAG_SIZE;
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
MXFStreamContext *sc = st->priv_data;
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
mxf->edit_unit_byte_count += 16 + 4 + sc->aic.samples[0]*sc->aic.sample_size;
mxf->edit_unit_byte_count += klv_fill_size(mxf->edit_unit_byte_count);
} else if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
mxf->edit_unit_byte_count += 16 + 4 + frame_size;
mxf->edit_unit_byte_count += klv_fill_size(mxf->edit_unit_byte_count);
}
}
 
return 1;
}
 
static int mxf_parse_dv_frame(AVFormatContext *s, AVStream *st, AVPacket *pkt)
{
MXFContext *mxf = s->priv_data;
MXFStreamContext *sc = st->priv_data;
uint8_t *vs_pack, *vsc_pack;
int i, ul_index, frame_size, stype, pal;
 
if (mxf->header_written)
return 1;
 
// Check for minimal frame size
if (pkt->size < 120000)
return -1;
 
vs_pack = pkt->data + 80*5 + 48;
vsc_pack = pkt->data + 80*5 + 53;
stype = vs_pack[3] & 0x1f;
pal = (vs_pack[3] >> 5) & 0x1;
 
if ((vs_pack[2] & 0x07) == 0x02)
sc->aspect_ratio = (AVRational){ 16, 9 };
else
sc->aspect_ratio = (AVRational){ 4, 3 };
 
sc->interlaced = (vsc_pack[3] >> 4) & 0x01;
// TODO: fix dv encoder to set proper FF/FS value in VSC pack
// and set field dominance accordingly
// av_log(s, AV_LOG_DEBUG, "DV vsc pack ff/ss = %x\n", vsc_pack[2] >> 6);
 
switch (stype) {
case 0x18: // DV100 720p
ul_index = 6 + pal;
frame_size = pal ? 288000 : 240000;
if (sc->interlaced) {
av_log(s, AV_LOG_ERROR, "source marked as interlaced but codec profile is progressive\n");
sc->interlaced = 0;
}
break;
case 0x14: // DV100 1080i
ul_index = 4 + pal;
frame_size = pal ? 576000 : 480000;
break;
case 0x04: // DV50
ul_index = 2 + pal;
frame_size = pal ? 288000 : 240000;
break;
default: // DV25
ul_index = 0 + pal;
frame_size = pal ? 144000 : 120000;
}
 
sc->index = ul_index + 16;
sc->codec_ul = &mxf_essence_container_uls[sc->index].codec_ul;
 
mxf->edit_unit_byte_count = KAG_SIZE;
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
MXFStreamContext *sc = st->priv_data;
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
mxf->edit_unit_byte_count += 16 + 4 + sc->aic.samples[0]*sc->aic.sample_size;
mxf->edit_unit_byte_count += klv_fill_size(mxf->edit_unit_byte_count);
} else if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
mxf->edit_unit_byte_count += 16 + 4 + frame_size;
mxf->edit_unit_byte_count += klv_fill_size(mxf->edit_unit_byte_count);
}
}
 
return 1;
}
 
static const UID mxf_mpeg2_codec_uls[] = {
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x01,0x10,0x00 }, // MP-ML I-Frame
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x01,0x11,0x00 }, // MP-ML Long GOP
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x02,0x02,0x00 }, // 422P-ML I-Frame
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x02,0x03,0x00 }, // 422P-ML Long GOP
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x03,0x02,0x00 }, // MP-HL I-Frame
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x03,0x03,0x00 }, // MP-HL Long GOP
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x04,0x02,0x00 }, // 422P-HL I-Frame
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x04,0x03,0x00 }, // 422P-HL Long GOP
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x05,0x02,0x00 }, // MP@H-14 I-Frame
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x05,0x03,0x00 }, // MP@H-14 Long GOP
};
 
static const UID *mxf_get_mpeg2_codec_ul(AVCodecContext *avctx)
{
int long_gop = avctx->gop_size > 1 || avctx->has_b_frames;
 
if (avctx->profile == 4) { // Main
if (avctx->level == 8) // Main
return &mxf_mpeg2_codec_uls[0+long_gop];
else if (avctx->level == 4) // High
return &mxf_mpeg2_codec_uls[4+long_gop];
else if (avctx->level == 6) // High 14
return &mxf_mpeg2_codec_uls[8+long_gop];
} else if (avctx->profile == 0) { // 422
if (avctx->level == 5) // Main
return &mxf_mpeg2_codec_uls[2+long_gop];
else if (avctx->level == 2) // High
return &mxf_mpeg2_codec_uls[6+long_gop];
}
return NULL;
}
 
static int mxf_parse_mpeg2_frame(AVFormatContext *s, AVStream *st,
AVPacket *pkt, MXFIndexEntry *e)
{
MXFStreamContext *sc = st->priv_data;
uint32_t c = -1;
int i;
 
for(i = 0; i < pkt->size - 4; i++) {
c = (c<<8) + pkt->data[i];
if (c == 0x1b5) {
if ((pkt->data[i+1] & 0xf0) == 0x10) { // seq ext
st->codec->profile = pkt->data[i+1] & 0x07;
st->codec->level = pkt->data[i+2] >> 4;
} else if (i + 5 < pkt->size && (pkt->data[i+1] & 0xf0) == 0x80) { // pict coding ext
sc->interlaced = !(pkt->data[i+5] & 0x80); // progressive frame
if (sc->interlaced)
sc->field_dominance = 1 + !(pkt->data[i+4] & 0x80); // top field first
break;
}
} else if (c == 0x1b8) { // gop
if (pkt->data[i+4]>>6 & 0x01) { // closed
sc->closed_gop = 1;
if (e->flags & 0x40) // sequence header present
e->flags |= 0x80; // random access
}
} else if (c == 0x1b3) { // seq
e->flags |= 0x40;
switch ((pkt->data[i+4]>>4) & 0xf) {
case 2: sc->aspect_ratio = (AVRational){ 4, 3}; break;
case 3: sc->aspect_ratio = (AVRational){ 16, 9}; break;
case 4: sc->aspect_ratio = (AVRational){221,100}; break;
default:
av_reduce(&sc->aspect_ratio.num, &sc->aspect_ratio.den,
st->codec->width, st->codec->height, 1024*1024);
}
} else if (c == 0x100) { // pic
int pict_type = (pkt->data[i+2]>>3) & 0x07;
e->temporal_ref = (pkt->data[i+1]<<2) | (pkt->data[i+2]>>6);
if (pict_type == 2) { // P frame
e->flags |= 0x22;
sc->closed_gop = 0; // reset closed gop, don't matter anymore
} else if (pict_type == 3) { // B frame
if (sc->closed_gop)
e->flags |= 0x13; // only backward prediction
else
e->flags |= 0x33;
sc->temporal_reordering = -1;
} else if (!pict_type) {
av_log(s, AV_LOG_ERROR, "error parsing mpeg2 frame\n");
return 0;
}
}
}
if (s->oformat != &ff_mxf_d10_muxer)
sc->codec_ul = mxf_get_mpeg2_codec_ul(st->codec);
return !!sc->codec_ul;
}
 
static uint64_t mxf_parse_timestamp(time_t timestamp)
{
struct tm *time = gmtime(&timestamp);
if (!time)
return 0;
return (uint64_t)(time->tm_year+1900) << 48 |
(uint64_t)(time->tm_mon+1) << 40 |
(uint64_t) time->tm_mday << 32 |
time->tm_hour << 24 |
time->tm_min << 16 |
time->tm_sec << 8;
}
 
static void mxf_gen_umid(AVFormatContext *s)
{
MXFContext *mxf = s->priv_data;
uint32_t seed = av_get_random_seed();
uint64_t umid = seed + 0x5294713400000000LL;
 
AV_WB64(mxf->umid , umid);
AV_WB64(mxf->umid+8, umid>>8);
 
mxf->instance_number = seed & 0xFFFFFF;
}
 
static int mxf_write_header(AVFormatContext *s)
{
MXFContext *mxf = s->priv_data;
int i, ret;
uint8_t present[FF_ARRAY_ELEMS(mxf_essence_container_uls)] = {0};
const MXFSamplesPerFrame *spf = NULL;
AVDictionaryEntry *t;
int64_t timestamp = 0;
AVDictionaryEntry *tcr = av_dict_get(s->metadata, "timecode", NULL, 0);
 
if (!s->nb_streams)
return -1;
 
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
MXFStreamContext *sc = av_mallocz(sizeof(*sc));
if (!sc)
return AVERROR(ENOMEM);
st->priv_data = sc;
 
if ((i == 0) ^ (st->codec->codec_type == AVMEDIA_TYPE_VIDEO)) {
av_log(s, AV_LOG_ERROR, "there must be exactly one video stream and it must be the first one\n");
return -1;
}
 
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
AVRational rate, tbc = st->codec->time_base;
// Default component depth to 8
sc->component_depth = 8;
mxf->timecode_base = (tbc.den + tbc.num/2) / tbc.num;
spf = ff_mxf_get_samples_per_frame(s, tbc);
if (!spf) {
av_log(s, AV_LOG_ERROR, "Unsupported video frame rate %d/%d\n",
tbc.den, tbc.num);
return AVERROR(EINVAL);
}
mxf->time_base = spf->time_base;
rate = av_inv_q(mxf->time_base);
avpriv_set_pts_info(st, 64, mxf->time_base.num, mxf->time_base.den);
if (!tcr)
tcr = av_dict_get(st->metadata, "timecode", NULL, 0);
if (tcr)
ret = av_timecode_init_from_string(&mxf->tc, rate, tcr->value, s);
else
ret = av_timecode_init(&mxf->tc, rate, 0, 0, s);
if (ret < 0)
return ret;
sc->video_bit_rate = st->codec->bit_rate ? st->codec->bit_rate : st->codec->rc_max_rate;
if (s->oformat == &ff_mxf_d10_muxer) {
if (sc->video_bit_rate == 50000000) {
if (mxf->time_base.den == 25) sc->index = 3;
else sc->index = 5;
} else if (sc->video_bit_rate == 40000000) {
if (mxf->time_base.den == 25) sc->index = 7;
else sc->index = 9;
} else if (sc->video_bit_rate == 30000000) {
if (mxf->time_base.den == 25) sc->index = 11;
else sc->index = 13;
} else {
av_log(s, AV_LOG_ERROR, "error MXF D-10 only support 30/40/50 mbit/s\n");
return -1;
}
 
mxf->edit_unit_byte_count = KAG_SIZE; // system element
mxf->edit_unit_byte_count += 16 + 4 + (uint64_t)sc->video_bit_rate *
mxf->time_base.num / (8*mxf->time_base.den);
mxf->edit_unit_byte_count += klv_fill_size(mxf->edit_unit_byte_count);
mxf->edit_unit_byte_count += 16 + 4 + 4 + spf->samples_per_frame[0]*8*4;
mxf->edit_unit_byte_count += klv_fill_size(mxf->edit_unit_byte_count);
}
} else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
if (st->codec->sample_rate != 48000) {
av_log(s, AV_LOG_ERROR, "only 48khz is implemented\n");
return -1;
}
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
if (s->oformat == &ff_mxf_d10_muxer) {
if (st->index != 1) {
av_log(s, AV_LOG_ERROR, "MXF D-10 only support one audio track\n");
return -1;
}
if (st->codec->codec_id != AV_CODEC_ID_PCM_S16LE &&
st->codec->codec_id != AV_CODEC_ID_PCM_S24LE) {
av_log(s, AV_LOG_ERROR, "MXF D-10 only support 16 or 24 bits le audio\n");
}
sc->index = ((MXFStreamContext*)s->streams[0]->priv_data)->index + 1;
} else
mxf->slice_count = 1;
}
 
if (!sc->index) {
sc->index = mxf_get_essence_container_ul_index(st->codec->codec_id);
if (sc->index == -1) {
av_log(s, AV_LOG_ERROR, "track %d: could not find essence container ul, "
"codec not currently supported in container\n", i);
return -1;
}
}
 
sc->codec_ul = &mxf_essence_container_uls[sc->index].codec_ul;
 
memcpy(sc->track_essence_element_key, mxf_essence_container_uls[sc->index].element_ul, 15);
sc->track_essence_element_key[15] = present[sc->index];
PRINT_KEY(s, "track essence element key", sc->track_essence_element_key);
 
if (!present[sc->index])
mxf->essence_container_count++;
present[sc->index]++;
}
 
if (s->oformat == &ff_mxf_d10_muxer) {
mxf->essence_container_count = 1;
}
 
if (!(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT))
mxf_gen_umid(s);
 
for (i = 0; i < s->nb_streams; i++) {
MXFStreamContext *sc = s->streams[i]->priv_data;
// update element count
sc->track_essence_element_key[13] = present[sc->index];
if (!memcmp(sc->track_essence_element_key, mxf_essence_container_uls[15].element_ul, 13)) // DV
sc->order = (0x15 << 24) | AV_RB32(sc->track_essence_element_key+13);
else
sc->order = AV_RB32(sc->track_essence_element_key+12);
}
 
if (t = av_dict_get(s->metadata, "creation_time", NULL, 0))
timestamp = ff_iso8601_to_unix_time(t->value);
if (timestamp)
mxf->timestamp = mxf_parse_timestamp(timestamp);
mxf->duration = -1;
 
mxf->timecode_track = av_mallocz(sizeof(*mxf->timecode_track));
if (!mxf->timecode_track)
return AVERROR(ENOMEM);
mxf->timecode_track->priv_data = av_mallocz(sizeof(MXFStreamContext));
if (!mxf->timecode_track->priv_data)
return AVERROR(ENOMEM);
mxf->timecode_track->index = -1;
 
if (!spf)
spf = ff_mxf_get_samples_per_frame(s, (AVRational){ 1, 25 });
 
if (ff_audio_interleave_init(s, spf->samples_per_frame, mxf->time_base) < 0)
return -1;
 
return 0;
}
 
static const uint8_t system_metadata_pack_key[] = { 0x06,0x0E,0x2B,0x34,0x02,0x05,0x01,0x01,0x0D,0x01,0x03,0x01,0x04,0x01,0x01,0x00 };
static const uint8_t system_metadata_package_set_key[] = { 0x06,0x0E,0x2B,0x34,0x02,0x43,0x01,0x01,0x0D,0x01,0x03,0x01,0x04,0x01,0x02,0x01 };
 
static void mxf_write_system_item(AVFormatContext *s)
{
MXFContext *mxf = s->priv_data;
AVIOContext *pb = s->pb;
unsigned frame;
uint32_t time_code;
 
frame = mxf->last_indexed_edit_unit + mxf->edit_units_count;
 
// write system metadata pack
avio_write(pb, system_metadata_pack_key, 16);
klv_encode_ber4_length(pb, 57);
avio_w8(pb, 0x5c); // UL, user date/time stamp, picture and sound item present
avio_w8(pb, 0x04); // content package rate
avio_w8(pb, 0x00); // content package type
avio_wb16(pb, 0x00); // channel handle
avio_wb16(pb, (mxf->tc.start + frame) & 0xFFFF); // continuity count, supposed to overflow
if (mxf->essence_container_count > 1)
avio_write(pb, multiple_desc_ul, 16);
else {
MXFStreamContext *sc = s->streams[0]->priv_data;
avio_write(pb, mxf_essence_container_uls[sc->index].container_ul, 16);
}
avio_w8(pb, 0);
avio_wb64(pb, 0);
avio_wb64(pb, 0); // creation date/time stamp
 
avio_w8(pb, 0x81); // SMPTE 12M time code
time_code = av_timecode_get_smpte_from_framenum(&mxf->tc, frame);
avio_wb32(pb, time_code);
avio_wb32(pb, 0); // binary group data
avio_wb64(pb, 0);
 
// write system metadata package set
avio_write(pb, system_metadata_package_set_key, 16);
klv_encode_ber4_length(pb, 35);
avio_w8(pb, 0x83); // UMID
avio_wb16(pb, 0x20);
mxf_write_umid(s, 1);
}
 
static void mxf_write_d10_video_packet(AVFormatContext *s, AVStream *st, AVPacket *pkt)
{
MXFContext *mxf = s->priv_data;
AVIOContext *pb = s->pb;
MXFStreamContext *sc = st->priv_data;
int packet_size = (uint64_t)sc->video_bit_rate*mxf->time_base.num /
(8*mxf->time_base.den); // frame size
int pad;
 
packet_size += 16 + 4;
packet_size += klv_fill_size(packet_size);
 
klv_encode_ber4_length(pb, pkt->size);
avio_write(pb, pkt->data, pkt->size);
 
// ensure CBR muxing by padding to correct video frame size
pad = packet_size - pkt->size - 16 - 4;
if (pad > 20) {
avio_write(s->pb, klv_fill_key, 16);
pad -= 16 + 4;
klv_encode_ber4_length(s->pb, pad);
ffio_fill(s->pb, 0, pad);
av_assert1(!(avio_tell(s->pb) & (KAG_SIZE-1)));
} else {
av_log(s, AV_LOG_WARNING, "cannot fill d-10 video packet\n");
ffio_fill(s->pb, 0, pad);
}
}
 
static void mxf_write_d10_audio_packet(AVFormatContext *s, AVStream *st, AVPacket *pkt)
{
MXFContext *mxf = s->priv_data;
AVIOContext *pb = s->pb;
int frame_size = pkt->size / st->codec->block_align;
uint8_t *samples = pkt->data;
uint8_t *end = pkt->data + pkt->size;
int i;
 
klv_encode_ber4_length(pb, 4 + frame_size*4*8);
 
avio_w8(pb, (frame_size == 1920 ? 0 : (mxf->edit_units_count-1) % 5 + 1));
avio_wl16(pb, frame_size);
avio_w8(pb, (1<<st->codec->channels)-1);
 
while (samples < end) {
for (i = 0; i < st->codec->channels; i++) {
uint32_t sample;
if (st->codec->codec_id == AV_CODEC_ID_PCM_S24LE) {
sample = AV_RL24(samples)<< 4;
samples += 3;
} else {
sample = AV_RL16(samples)<<12;
samples += 2;
}
avio_wl32(pb, sample | i);
}
for (; i < 8; i++)
avio_wl32(pb, i);
}
}
 
static int mxf_write_packet(AVFormatContext *s, AVPacket *pkt)
{
MXFContext *mxf = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *st = s->streams[pkt->stream_index];
MXFStreamContext *sc = st->priv_data;
MXFIndexEntry ie = {0};
int err;
 
if (!mxf->edit_unit_byte_count && !(mxf->edit_units_count % EDIT_UNITS_PER_BODY)) {
if ((err = av_reallocp_array(&mxf->index_entries, mxf->edit_units_count
+ EDIT_UNITS_PER_BODY, sizeof(*mxf->index_entries))) < 0) {
mxf->edit_units_count = 0;
av_log(s, AV_LOG_ERROR, "could not allocate index entries\n");
return err;
}
}
 
if (st->codec->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
if (!mxf_parse_mpeg2_frame(s, st, pkt, &ie)) {
av_log(s, AV_LOG_ERROR, "could not get mpeg2 profile and level\n");
return -1;
}
} else if (st->codec->codec_id == AV_CODEC_ID_DNXHD) {
if (!mxf_parse_dnxhd_frame(s, st, pkt)) {
av_log(s, AV_LOG_ERROR, "could not get dnxhd profile\n");
return -1;
}
} else if (st->codec->codec_id == AV_CODEC_ID_DVVIDEO) {
if (!mxf_parse_dv_frame(s, st, pkt)) {
av_log(s, AV_LOG_ERROR, "could not get dv profile\n");
return -1;
}
}
 
if (!mxf->header_written) {
if (mxf->edit_unit_byte_count) {
if ((err = mxf_write_partition(s, 1, 2, header_open_partition_key, 1)) < 0)
return err;
mxf_write_klv_fill(s);
mxf_write_index_table_segment(s);
} else {
if ((err = mxf_write_partition(s, 0, 0, header_open_partition_key, 1)) < 0)
return err;
}
mxf->header_written = 1;
}
 
if (st->index == 0) {
if (!mxf->edit_unit_byte_count &&
(!mxf->edit_units_count || mxf->edit_units_count > EDIT_UNITS_PER_BODY) &&
!(ie.flags & 0x33)) { // I frame, Gop start
mxf_write_klv_fill(s);
if ((err = mxf_write_partition(s, 1, 2, body_partition_key, 0)) < 0)
return err;
mxf_write_klv_fill(s);
mxf_write_index_table_segment(s);
}
 
mxf_write_klv_fill(s);
mxf_write_system_item(s);
 
if (!mxf->edit_unit_byte_count) {
mxf->index_entries[mxf->edit_units_count].offset = mxf->body_offset;
mxf->index_entries[mxf->edit_units_count].flags = ie.flags;
mxf->index_entries[mxf->edit_units_count].temporal_ref = ie.temporal_ref;
mxf->body_offset += KAG_SIZE; // size of system element
}
mxf->edit_units_count++;
} else if (!mxf->edit_unit_byte_count && st->index == 1) {
mxf->index_entries[mxf->edit_units_count-1].slice_offset =
mxf->body_offset - mxf->index_entries[mxf->edit_units_count-1].offset;
}
 
mxf_write_klv_fill(s);
avio_write(pb, sc->track_essence_element_key, 16); // write key
if (s->oformat == &ff_mxf_d10_muxer) {
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
mxf_write_d10_video_packet(s, st, pkt);
else
mxf_write_d10_audio_packet(s, st, pkt);
} else {
klv_encode_ber4_length(pb, pkt->size); // write length
avio_write(pb, pkt->data, pkt->size);
mxf->body_offset += 16+4+pkt->size + klv_fill_size(16+4+pkt->size);
}
 
avio_flush(pb);
 
return 0;
}
 
static void mxf_write_random_index_pack(AVFormatContext *s)
{
MXFContext *mxf = s->priv_data;
AVIOContext *pb = s->pb;
uint64_t pos = avio_tell(pb);
int i;
 
avio_write(pb, random_index_pack_key, 16);
klv_encode_ber_length(pb, 28 + 12LL*mxf->body_partitions_count);
 
if (mxf->edit_unit_byte_count)
avio_wb32(pb, 1); // BodySID of header partition
else
avio_wb32(pb, 0);
avio_wb64(pb, 0); // offset of header partition
 
for (i = 0; i < mxf->body_partitions_count; i++) {
avio_wb32(pb, 1); // BodySID
avio_wb64(pb, mxf->body_partition_offset[i]);
}
 
avio_wb32(pb, 0); // BodySID of footer partition
avio_wb64(pb, mxf->footer_partition_offset);
 
avio_wb32(pb, avio_tell(pb) - pos + 4);
}
 
static int mxf_write_footer(AVFormatContext *s)
{
MXFContext *mxf = s->priv_data;
AVIOContext *pb = s->pb;
int err;
 
mxf->duration = mxf->last_indexed_edit_unit + mxf->edit_units_count;
 
mxf_write_klv_fill(s);
mxf->footer_partition_offset = avio_tell(pb);
if (mxf->edit_unit_byte_count) { // no need to repeat index
if ((err = mxf_write_partition(s, 0, 0, footer_partition_key, 0)) < 0)
return err;
} else {
if ((err = mxf_write_partition(s, 0, 2, footer_partition_key, 0)) < 0)
return err;
mxf_write_klv_fill(s);
mxf_write_index_table_segment(s);
}
 
mxf_write_klv_fill(s);
mxf_write_random_index_pack(s);
 
if (s->pb->seekable) {
avio_seek(pb, 0, SEEK_SET);
if (mxf->edit_unit_byte_count) {
if ((err = mxf_write_partition(s, 1, 2, header_closed_partition_key, 1)) < 0)
return err;
mxf_write_klv_fill(s);
mxf_write_index_table_segment(s);
} else {
if ((err = mxf_write_partition(s, 0, 0, header_closed_partition_key, 1)) < 0)
return err;
}
}
 
ff_audio_interleave_close(s);
 
av_freep(&mxf->index_entries);
av_freep(&mxf->body_partition_offset);
av_freep(&mxf->timecode_track->priv_data);
av_freep(&mxf->timecode_track);
 
mxf_free(s);
 
return 0;
}
 
static int mxf_interleave_get_packet(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush)
{
int i, stream_count = 0;
 
for (i = 0; i < s->nb_streams; i++)
stream_count += !!s->streams[i]->last_in_packet_buffer;
 
if (stream_count && (s->nb_streams == stream_count || flush)) {
AVPacketList *pktl = s->packet_buffer;
if (s->nb_streams != stream_count) {
AVPacketList *last = NULL;
// find last packet in edit unit
while (pktl) {
if (!stream_count || pktl->pkt.stream_index == 0)
break;
last = pktl;
pktl = pktl->next;
stream_count--;
}
// purge packet queue
while (pktl) {
AVPacketList *next = pktl->next;
 
if(s->streams[pktl->pkt.stream_index]->last_in_packet_buffer == pktl)
s->streams[pktl->pkt.stream_index]->last_in_packet_buffer= NULL;
av_free_packet(&pktl->pkt);
av_freep(&pktl);
pktl = next;
}
if (last)
last->next = NULL;
else {
s->packet_buffer = NULL;
s->packet_buffer_end= NULL;
goto out;
}
pktl = s->packet_buffer;
}
 
*out = pktl->pkt;
av_dlog(s, "out st:%d dts:%"PRId64"\n", (*out).stream_index, (*out).dts);
s->packet_buffer = pktl->next;
if(s->streams[pktl->pkt.stream_index]->last_in_packet_buffer == pktl)
s->streams[pktl->pkt.stream_index]->last_in_packet_buffer= NULL;
if(!s->packet_buffer)
s->packet_buffer_end= NULL;
av_freep(&pktl);
return 1;
} else {
out:
av_init_packet(out);
return 0;
}
}
 
static int mxf_compare_timestamps(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
{
MXFStreamContext *sc = s->streams[pkt ->stream_index]->priv_data;
MXFStreamContext *sc2 = s->streams[next->stream_index]->priv_data;
 
return next->dts > pkt->dts ||
(next->dts == pkt->dts && sc->order < sc2->order);
}
 
static int mxf_interleave(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush)
{
return ff_audio_rechunk_interleave(s, out, pkt, flush,
mxf_interleave_get_packet, mxf_compare_timestamps);
}
 
AVOutputFormat ff_mxf_muxer = {
.name = "mxf",
.long_name = NULL_IF_CONFIG_SMALL("MXF (Material eXchange Format)"),
.mime_type = "application/mxf",
.extensions = "mxf",
.priv_data_size = sizeof(MXFContext),
.audio_codec = AV_CODEC_ID_PCM_S16LE,
.video_codec = AV_CODEC_ID_MPEG2VIDEO,
.write_header = mxf_write_header,
.write_packet = mxf_write_packet,
.write_trailer = mxf_write_footer,
.flags = AVFMT_NOTIMESTAMPS,
.interleave_packet = mxf_interleave,
};
 
AVOutputFormat ff_mxf_d10_muxer = {
.name = "mxf_d10",
.long_name = NULL_IF_CONFIG_SMALL("MXF (Material eXchange Format) D-10 Mapping"),
.mime_type = "application/mxf",
.priv_data_size = sizeof(MXFContext),
.audio_codec = AV_CODEC_ID_PCM_S16LE,
.video_codec = AV_CODEC_ID_MPEG2VIDEO,
.write_header = mxf_write_header,
.write_packet = mxf_write_packet,
.write_trailer = mxf_write_footer,
.flags = AVFMT_NOTIMESTAMPS,
.interleave_packet = mxf_interleave,
};
/contrib/sdk/sources/ffmpeg/libavformat/mxg.c
0,0 → 1,265
/*
* MxPEG clip file demuxer
* Copyright (c) 2010 Anatoly Nenashev
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/internal.h"
#include "libavutil/intreadwrite.h"
#include "libavcodec/mjpeg.h"
#include "avformat.h"
#include "internal.h"
#include "avio.h"
 
#define DEFAULT_PACKET_SIZE 1024
#define OVERREAD_SIZE 3
 
typedef struct MXGContext {
uint8_t *buffer;
uint8_t *buffer_ptr;
uint8_t *soi_ptr;
unsigned int buffer_size;
int64_t dts;
unsigned int cache_size;
} MXGContext;
 
static int mxg_read_header(AVFormatContext *s)
{
AVStream *video_st, *audio_st;
MXGContext *mxg = s->priv_data;
 
/* video parameters will be extracted from the compressed bitstream */
video_st = avformat_new_stream(s, NULL);
if (!video_st)
return AVERROR(ENOMEM);
video_st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
video_st->codec->codec_id = AV_CODEC_ID_MXPEG;
avpriv_set_pts_info(video_st, 64, 1, 1000000);
 
audio_st = avformat_new_stream(s, NULL);
if (!audio_st)
return AVERROR(ENOMEM);
audio_st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
audio_st->codec->codec_id = AV_CODEC_ID_PCM_ALAW;
audio_st->codec->channels = 1;
audio_st->codec->channel_layout = AV_CH_LAYOUT_MONO;
audio_st->codec->sample_rate = 8000;
audio_st->codec->bits_per_coded_sample = 8;
audio_st->codec->block_align = 1;
avpriv_set_pts_info(audio_st, 64, 1, 1000000);
 
mxg->soi_ptr = mxg->buffer_ptr = mxg->buffer = 0;
mxg->buffer_size = 0;
mxg->dts = AV_NOPTS_VALUE;
mxg->cache_size = 0;
 
return 0;
}
 
static uint8_t* mxg_find_startmarker(uint8_t *p, uint8_t *end)
{
for (; p < end - 3; p += 4) {
uint32_t x = AV_RN32(p);
 
if (x & (~(x+0x01010101)) & 0x80808080) {
if (p[0] == 0xff) {
return p;
} else if (p[1] == 0xff) {
return p+1;
} else if (p[2] == 0xff) {
return p+2;
} else if (p[3] == 0xff) {
return p+3;
}
}
}
 
for (; p < end; ++p) {
if (*p == 0xff) return p;
}
 
return end;
}
 
static int mxg_update_cache(AVFormatContext *s, unsigned int cache_size)
{
MXGContext *mxg = s->priv_data;
unsigned int current_pos = mxg->buffer_ptr - mxg->buffer;
unsigned int soi_pos;
uint8_t *buffer;
int ret;
 
/* reallocate internal buffer */
if (current_pos > current_pos + cache_size)
return AVERROR(ENOMEM);
soi_pos = mxg->soi_ptr - mxg->buffer;
buffer = av_fast_realloc(mxg->buffer, &mxg->buffer_size,
current_pos + cache_size +
FF_INPUT_BUFFER_PADDING_SIZE);
if (!buffer)
return AVERROR(ENOMEM);
mxg->buffer = buffer;
mxg->buffer_ptr = mxg->buffer + current_pos;
if (mxg->soi_ptr) mxg->soi_ptr = mxg->buffer + soi_pos;
 
/* get data */
ret = avio_read(s->pb, mxg->buffer_ptr + mxg->cache_size,
cache_size - mxg->cache_size);
if (ret < 0)
return ret;
 
mxg->cache_size += ret;
 
return ret;
}
 
static int mxg_read_packet(AVFormatContext *s, AVPacket *pkt)
{
int ret;
unsigned int size;
uint8_t *startmarker_ptr, *end, *search_end, marker;
MXGContext *mxg = s->priv_data;
 
while (!url_feof(s->pb) && !s->pb->error){
if (mxg->cache_size <= OVERREAD_SIZE) {
/* update internal buffer */
ret = mxg_update_cache(s, DEFAULT_PACKET_SIZE + OVERREAD_SIZE);
if (ret < 0)
return ret;
}
end = mxg->buffer_ptr + mxg->cache_size;
 
/* find start marker - 0xff */
if (mxg->cache_size > OVERREAD_SIZE) {
search_end = end - OVERREAD_SIZE;
startmarker_ptr = mxg_find_startmarker(mxg->buffer_ptr, search_end);
} else {
search_end = end;
startmarker_ptr = mxg_find_startmarker(mxg->buffer_ptr, search_end);
if (startmarker_ptr >= search_end - 1 ||
*(startmarker_ptr + 1) != EOI) break;
}
 
if (startmarker_ptr != search_end) { /* start marker found */
marker = *(startmarker_ptr + 1);
mxg->buffer_ptr = startmarker_ptr + 2;
mxg->cache_size = end - mxg->buffer_ptr;
 
if (marker == SOI) {
mxg->soi_ptr = startmarker_ptr;
} else if (marker == EOI) {
if (!mxg->soi_ptr) {
av_log(s, AV_LOG_WARNING, "Found EOI before SOI, skipping\n");
continue;
}
 
pkt->pts = pkt->dts = mxg->dts;
pkt->stream_index = 0;
#if FF_API_DESTRUCT_PACKET
FF_DISABLE_DEPRECATION_WARNINGS
pkt->destruct = NULL;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
pkt->buf = NULL;
pkt->size = mxg->buffer_ptr - mxg->soi_ptr;
pkt->data = mxg->soi_ptr;
 
if (mxg->soi_ptr - mxg->buffer > mxg->cache_size) {
if (mxg->cache_size > 0) {
memcpy(mxg->buffer, mxg->buffer_ptr, mxg->cache_size);
}
 
mxg->buffer_ptr = mxg->buffer;
}
mxg->soi_ptr = 0;
 
return pkt->size;
} else if ( (SOF0 <= marker && marker <= SOF15) ||
(SOS <= marker && marker <= COM) ) {
/* all other markers that start marker segment also contain
length value (see specification for JPEG Annex B.1) */
size = AV_RB16(mxg->buffer_ptr);
if (size < 2)
return AVERROR(EINVAL);
 
if (mxg->cache_size < size) {
ret = mxg_update_cache(s, size);
if (ret < 0)
return ret;
startmarker_ptr = mxg->buffer_ptr - 2;
mxg->cache_size = 0;
} else {
mxg->cache_size -= size;
}
 
mxg->buffer_ptr += size;
 
if (marker == APP13 && size >= 16) { /* audio data */
/* time (GMT) of first sample in usec since 1970, little-endian */
pkt->pts = pkt->dts = AV_RL64(startmarker_ptr + 8);
pkt->stream_index = 1;
#if FF_API_DESTRUCT_PACKET
FF_DISABLE_DEPRECATION_WARNINGS
pkt->destruct = NULL;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
pkt->buf = NULL;
pkt->size = size - 14;
pkt->data = startmarker_ptr + 16;
 
if (startmarker_ptr - mxg->buffer > mxg->cache_size) {
if (mxg->cache_size > 0) {
memcpy(mxg->buffer, mxg->buffer_ptr, mxg->cache_size);
}
mxg->buffer_ptr = mxg->buffer;
}
 
return pkt->size;
} else if (marker == COM && size >= 18 &&
!strncmp(startmarker_ptr + 4, "MXF", 3)) {
/* time (GMT) of video frame in usec since 1970, little-endian */
mxg->dts = AV_RL64(startmarker_ptr + 12);
}
}
} else {
/* start marker not found */
mxg->buffer_ptr = search_end;
mxg->cache_size = OVERREAD_SIZE;
}
}
 
return AVERROR_EOF;
}
 
static int mxg_close(struct AVFormatContext *s)
{
MXGContext *mxg = s->priv_data;
av_freep(&mxg->buffer);
return 0;
}
 
AVInputFormat ff_mxg_demuxer = {
.name = "mxg",
.long_name = NULL_IF_CONFIG_SMALL("MxPEG clip"),
.priv_data_size = sizeof(MXGContext),
.read_header = mxg_read_header,
.read_packet = mxg_read_packet,
.read_close = mxg_close,
.extensions = "mxg",
};
/contrib/sdk/sources/ffmpeg/libavformat/ncdec.c
0,0 → 1,101
/*
* NC camera feed demuxer
* Copyright (c) 2009 Nicolas Martin (martinic at iro dot umontreal dot ca)
* Edouard Auvinet
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
 
#define NC_VIDEO_FLAG 0x1A5
 
static int nc_probe(AVProbeData *probe_packet)
{
int size;
 
if (AV_RB32(probe_packet->buf) != NC_VIDEO_FLAG)
return 0;
 
size = AV_RL16(probe_packet->buf + 5);
 
if (size + 20 > probe_packet->buf_size)
return AVPROBE_SCORE_MAX/4;
 
if (AV_RB32(probe_packet->buf+16+size) == NC_VIDEO_FLAG)
return AVPROBE_SCORE_MAX;
 
return 0;
}
 
static int nc_read_header(AVFormatContext *s)
{
AVStream *st = avformat_new_stream(s, NULL);
 
if (!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_MPEG4;
st->need_parsing = AVSTREAM_PARSE_FULL;
 
avpriv_set_pts_info(st, 64, 1, 100);
 
return 0;
}
 
static int nc_read_packet(AVFormatContext *s, AVPacket *pkt)
{
int size;
int ret;
 
uint32_t state=-1;
while (state != NC_VIDEO_FLAG) {
if (url_feof(s->pb))
return AVERROR(EIO);
state = (state<<8) + avio_r8(s->pb);
}
 
avio_r8(s->pb);
size = avio_rl16(s->pb);
avio_skip(s->pb, 9);
 
if (size == 0) {
av_log(s, AV_LOG_DEBUG, "Next packet size is zero\n");
return AVERROR(EAGAIN);
}
 
ret = av_get_packet(s->pb, pkt, size);
if (ret != size) {
if (ret > 0) av_free_packet(pkt);
return AVERROR(EIO);
}
 
pkt->stream_index = 0;
return size;
}
 
AVInputFormat ff_nc_demuxer = {
.name = "nc",
.long_name = NULL_IF_CONFIG_SMALL("NC camera feed"),
.read_probe = nc_probe,
.read_header = nc_read_header,
.read_packet = nc_read_packet,
.extensions = "v",
};
/contrib/sdk/sources/ffmpeg/libavformat/network.c
0,0 → 1,384
/*
* Copyright (c) 2007 The FFmpeg Project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <fcntl.h>
#include "network.h"
#include "url.h"
#include "libavcodec/internal.h"
#include "libavutil/avutil.h"
#include "libavutil/mem.h"
#include "libavutil/time.h"
 
#if HAVE_THREADS
#if HAVE_PTHREADS
#include <pthread.h>
#elif HAVE_OS2THREADS
#include "compat/os2threads.h"
#else
#include "compat/w32pthreads.h"
#endif
#endif
 
#if CONFIG_OPENSSL
#include <openssl/ssl.h>
static int openssl_init;
#if HAVE_THREADS
#include <openssl/crypto.h>
pthread_mutex_t *openssl_mutexes;
static void openssl_lock(int mode, int type, const char *file, int line)
{
if (mode & CRYPTO_LOCK)
pthread_mutex_lock(&openssl_mutexes[type]);
else
pthread_mutex_unlock(&openssl_mutexes[type]);
}
#if !defined(WIN32) && OPENSSL_VERSION_NUMBER < 0x10000000
static unsigned long openssl_thread_id(void)
{
return (intptr_t) pthread_self();
}
#endif
#endif
#endif
#if CONFIG_GNUTLS
#include <gnutls/gnutls.h>
#if HAVE_THREADS && GNUTLS_VERSION_NUMBER <= 0x020b00
#include <gcrypt.h>
#include <errno.h>
GCRY_THREAD_OPTION_PTHREAD_IMPL;
#endif
#endif
 
void ff_tls_init(void)
{
avpriv_lock_avformat();
#if CONFIG_OPENSSL
if (!openssl_init) {
SSL_library_init();
SSL_load_error_strings();
#if HAVE_THREADS
if (!CRYPTO_get_locking_callback()) {
int i;
openssl_mutexes = av_malloc(sizeof(pthread_mutex_t) * CRYPTO_num_locks());
for (i = 0; i < CRYPTO_num_locks(); i++)
pthread_mutex_init(&openssl_mutexes[i], NULL);
CRYPTO_set_locking_callback(openssl_lock);
#if !defined(WIN32) && OPENSSL_VERSION_NUMBER < 0x10000000
CRYPTO_set_id_callback(openssl_thread_id);
#endif
}
#endif
}
openssl_init++;
#endif
#if CONFIG_GNUTLS
#if HAVE_THREADS && GNUTLS_VERSION_NUMBER < 0x020b00
if (gcry_control(GCRYCTL_ANY_INITIALIZATION_P) == 0)
gcry_control(GCRYCTL_SET_THREAD_CBS, &gcry_threads_pthread);
#endif
gnutls_global_init();
#endif
avpriv_unlock_avformat();
}
 
void ff_tls_deinit(void)
{
avpriv_lock_avformat();
#if CONFIG_OPENSSL
openssl_init--;
if (!openssl_init) {
#if HAVE_THREADS
if (CRYPTO_get_locking_callback() == openssl_lock) {
int i;
CRYPTO_set_locking_callback(NULL);
for (i = 0; i < CRYPTO_num_locks(); i++)
pthread_mutex_destroy(&openssl_mutexes[i]);
av_free(openssl_mutexes);
}
#endif
}
#endif
#if CONFIG_GNUTLS
gnutls_global_deinit();
#endif
avpriv_unlock_avformat();
}
 
int ff_network_inited_globally;
 
int ff_network_init(void)
{
#if HAVE_WINSOCK2_H
WSADATA wsaData;
#endif
 
if (!ff_network_inited_globally)
av_log(NULL, AV_LOG_WARNING, "Using network protocols without global "
"network initialization. Please use "
"avformat_network_init(), this will "
"become mandatory later.\n");
#if HAVE_WINSOCK2_H
if (WSAStartup(MAKEWORD(1,1), &wsaData))
return 0;
#endif
return 1;
}
 
int ff_network_wait_fd(int fd, int write)
{
int ev = write ? POLLOUT : POLLIN;
struct pollfd p = { .fd = fd, .events = ev, .revents = 0 };
int ret;
ret = poll(&p, 1, 100);
return ret < 0 ? ff_neterrno() : p.revents & (ev | POLLERR | POLLHUP) ? 0 : AVERROR(EAGAIN);
}
 
int ff_network_wait_fd_timeout(int fd, int write, int64_t timeout, AVIOInterruptCB *int_cb)
{
int ret;
int64_t wait_start = 0;
 
while (1) {
if (ff_check_interrupt(int_cb))
return AVERROR_EXIT;
ret = ff_network_wait_fd(fd, write);
if (ret != AVERROR(EAGAIN))
return ret;
if (timeout > 0) {
if (!wait_start)
wait_start = av_gettime();
else if (av_gettime() - wait_start > timeout)
return AVERROR(ETIMEDOUT);
}
}
}
 
void ff_network_close(void)
{
#if HAVE_WINSOCK2_H
WSACleanup();
#endif
}
 
#if HAVE_WINSOCK2_H
int ff_neterrno(void)
{
int err = WSAGetLastError();
switch (err) {
case WSAEWOULDBLOCK:
return AVERROR(EAGAIN);
case WSAEINTR:
return AVERROR(EINTR);
case WSAEPROTONOSUPPORT:
return AVERROR(EPROTONOSUPPORT);
case WSAETIMEDOUT:
return AVERROR(ETIMEDOUT);
case WSAECONNREFUSED:
return AVERROR(ECONNREFUSED);
case WSAEINPROGRESS:
return AVERROR(EINPROGRESS);
}
return -err;
}
#endif
 
int ff_is_multicast_address(struct sockaddr *addr)
{
if (addr->sa_family == AF_INET) {
return IN_MULTICAST(ntohl(((struct sockaddr_in *)addr)->sin_addr.s_addr));
}
#if HAVE_STRUCT_SOCKADDR_IN6
if (addr->sa_family == AF_INET6) {
return IN6_IS_ADDR_MULTICAST(&((struct sockaddr_in6 *)addr)->sin6_addr);
}
#endif
 
return 0;
}
 
static int ff_poll_interrupt(struct pollfd *p, nfds_t nfds, int timeout,
AVIOInterruptCB *cb)
{
int runs = timeout / POLLING_TIME;
int ret = 0;
 
do {
if (ff_check_interrupt(cb))
return AVERROR_EXIT;
ret = poll(p, nfds, POLLING_TIME);
if (ret != 0)
break;
} while (timeout <= 0 || runs-- > 0);
 
if (!ret)
return AVERROR(ETIMEDOUT);
if (ret < 0)
return AVERROR(errno);
return ret;
}
 
int ff_socket(int af, int type, int proto)
{
int fd;
 
#ifdef SOCK_CLOEXEC
fd = socket(af, type | SOCK_CLOEXEC, proto);
if (fd == -1 && errno == EINVAL)
#endif
{
fd = socket(af, type, proto);
#if HAVE_FCNTL
if (fd != -1) {
if (fcntl(fd, F_SETFD, FD_CLOEXEC) == -1)
av_log(NULL, AV_LOG_DEBUG, "Failed to set close on exec\n");
}
#endif
}
return fd;
}
 
int ff_listen_bind(int fd, const struct sockaddr *addr,
socklen_t addrlen, int timeout, URLContext *h)
{
int ret;
int reuse = 1;
struct pollfd lp = { fd, POLLIN, 0 };
if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &reuse, sizeof(reuse))) {
av_log(NULL, AV_LOG_WARNING, "setsockopt(SO_REUSEADDR) failed\n");
}
ret = bind(fd, addr, addrlen);
if (ret)
return ff_neterrno();
 
ret = listen(fd, 1);
if (ret)
return ff_neterrno();
 
ret = ff_poll_interrupt(&lp, 1, timeout, &h->interrupt_callback);
if (ret < 0)
return ret;
 
ret = accept(fd, NULL, NULL);
if (ret < 0)
return ff_neterrno();
 
closesocket(fd);
 
ff_socket_nonblock(ret, 1);
return ret;
}
 
int ff_listen_connect(int fd, const struct sockaddr *addr,
socklen_t addrlen, int timeout, URLContext *h,
int will_try_next)
{
struct pollfd p = {fd, POLLOUT, 0};
int ret;
socklen_t optlen;
 
ff_socket_nonblock(fd, 1);
 
while ((ret = connect(fd, addr, addrlen))) {
ret = ff_neterrno();
switch (ret) {
case AVERROR(EINTR):
if (ff_check_interrupt(&h->interrupt_callback))
return AVERROR_EXIT;
continue;
case AVERROR(EINPROGRESS):
case AVERROR(EAGAIN):
ret = ff_poll_interrupt(&p, 1, timeout, &h->interrupt_callback);
if (ret < 0)
return ret;
optlen = sizeof(ret);
if (getsockopt (fd, SOL_SOCKET, SO_ERROR, &ret, &optlen))
ret = AVUNERROR(ff_neterrno());
if (ret != 0) {
char errbuf[100];
ret = AVERROR(ret);
av_strerror(ret, errbuf, sizeof(errbuf));
if (will_try_next)
av_log(h, AV_LOG_WARNING,
"Connection to %s failed (%s), trying next address\n",
h->filename, errbuf);
else
av_log(h, AV_LOG_ERROR, "Connection to %s failed: %s\n",
h->filename, errbuf);
}
default:
return ret;
}
}
return ret;
}
 
static int match_host_pattern(const char *pattern, const char *hostname)
{
int len_p, len_h;
if (!strcmp(pattern, "*"))
return 1;
// Skip a possible *. at the start of the pattern
if (pattern[0] == '*')
pattern++;
if (pattern[0] == '.')
pattern++;
len_p = strlen(pattern);
len_h = strlen(hostname);
if (len_p > len_h)
return 0;
// Simply check if the end of hostname is equal to 'pattern'
if (!strcmp(pattern, &hostname[len_h - len_p])) {
if (len_h == len_p)
return 1; // Exact match
if (hostname[len_h - len_p - 1] == '.')
return 1; // The matched substring is a domain and not just a substring of a domain
}
return 0;
}
 
int ff_http_match_no_proxy(const char *no_proxy, const char *hostname)
{
char *buf, *start;
int ret = 0;
if (!no_proxy)
return 0;
if (!hostname)
return 0;
buf = av_strdup(no_proxy);
if (!buf)
return 0;
start = buf;
while (start) {
char *sep, *next = NULL;
start += strspn(start, " ,");
sep = start + strcspn(start, " ,");
if (*sep) {
next = sep + 1;
*sep = '\0';
}
if (match_host_pattern(start, hostname)) {
ret = 1;
break;
}
start = next;
}
av_free(buf);
return ret;
}
/contrib/sdk/sources/ffmpeg/libavformat/network.h
0,0 → 1,267
/*
* Copyright (c) 2007 The FFmpeg Project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_NETWORK_H
#define AVFORMAT_NETWORK_H
 
#include <errno.h>
#include <stdint.h>
 
#include "config.h"
#include "libavutil/error.h"
#include "os_support.h"
#include "avio.h"
#include "url.h"
 
#if HAVE_UNISTD_H
#include <unistd.h>
#endif
 
#if HAVE_WINSOCK2_H
#include <winsock2.h>
#include <ws2tcpip.h>
 
#ifndef EPROTONOSUPPORT
#define EPROTONOSUPPORT WSAEPROTONOSUPPORT
#endif
#ifndef ETIMEDOUT
#define ETIMEDOUT WSAETIMEDOUT
#endif
#ifndef ECONNREFUSED
#define ECONNREFUSED WSAECONNREFUSED
#endif
#ifndef EINPROGRESS
#define EINPROGRESS WSAEINPROGRESS
#endif
 
#define getsockopt(a, b, c, d, e) getsockopt(a, b, c, (char*) d, e)
#define setsockopt(a, b, c, d, e) setsockopt(a, b, c, (const char*) d, e)
 
int ff_neterrno(void);
#else
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <netdb.h>
 
#define ff_neterrno() AVERROR(errno)
#endif
 
#if HAVE_ARPA_INET_H
#include <arpa/inet.h>
#endif
 
#if HAVE_POLL_H
#include <poll.h>
#endif
 
int ff_socket_nonblock(int socket, int enable);
 
extern int ff_network_inited_globally;
int ff_network_init(void);
void ff_network_close(void);
 
void ff_tls_init(void);
void ff_tls_deinit(void);
 
int ff_network_wait_fd(int fd, int write);
 
/**
* This works similarly to ff_network_wait_fd, but waits up to 'timeout' microseconds
* Uses ff_network_wait_fd in a loop
*
* @fd Socket descriptor
* @write Set 1 to wait for socket able to be read, 0 to be written
* @timeout Timeout interval, in microseconds. Actual precision is 100000 mcs, due to ff_network_wait_fd usage
* @param int_cb Interrupt callback, is checked before each ff_network_wait_fd call
* @return 0 if data can be read/written, AVERROR(ETIMEDOUT) if timeout expired, or negative error code
*/
int ff_network_wait_fd_timeout(int fd, int write, int64_t timeout, AVIOInterruptCB *int_cb);
 
int ff_inet_aton (const char * str, struct in_addr * add);
 
#if !HAVE_STRUCT_SOCKADDR_STORAGE
struct sockaddr_storage {
#if HAVE_STRUCT_SOCKADDR_SA_LEN
uint8_t ss_len;
uint8_t ss_family;
#else
uint16_t ss_family;
#endif
char ss_pad1[6];
int64_t ss_align;
char ss_pad2[112];
};
#endif
 
#if !HAVE_STRUCT_ADDRINFO
struct addrinfo {
int ai_flags;
int ai_family;
int ai_socktype;
int ai_protocol;
int ai_addrlen;
struct sockaddr *ai_addr;
char *ai_canonname;
struct addrinfo *ai_next;
};
#endif
 
/* getaddrinfo constants */
#ifndef EAI_AGAIN
#define EAI_AGAIN 2
#endif
#ifndef EAI_BADFLAGS
#define EAI_BADFLAGS 3
#endif
#ifndef EAI_FAIL
#define EAI_FAIL 4
#endif
#ifndef EAI_FAMILY
#define EAI_FAMILY 5
#endif
#ifndef EAI_MEMORY
#define EAI_MEMORY 6
#endif
#ifndef EAI_NODATA
#define EAI_NODATA 7
#endif
#ifndef EAI_NONAME
#define EAI_NONAME 8
#endif
#ifndef EAI_SERVICE
#define EAI_SERVICE 9
#endif
#ifndef EAI_SOCKTYPE
#define EAI_SOCKTYPE 10
#endif
 
#ifndef AI_PASSIVE
#define AI_PASSIVE 1
#endif
 
#ifndef AI_CANONNAME
#define AI_CANONNAME 2
#endif
 
#ifndef AI_NUMERICHOST
#define AI_NUMERICHOST 4
#endif
 
#ifndef NI_NOFQDN
#define NI_NOFQDN 1
#endif
 
#ifndef NI_NUMERICHOST
#define NI_NUMERICHOST 2
#endif
 
#ifndef NI_NAMERQD
#define NI_NAMERQD 4
#endif
 
#ifndef NI_NUMERICSERV
#define NI_NUMERICSERV 8
#endif
 
#ifndef NI_DGRAM
#define NI_DGRAM 16
#endif
 
#if !HAVE_GETADDRINFO
int ff_getaddrinfo(const char *node, const char *service,
const struct addrinfo *hints, struct addrinfo **res);
void ff_freeaddrinfo(struct addrinfo *res);
int ff_getnameinfo(const struct sockaddr *sa, int salen,
char *host, int hostlen,
char *serv, int servlen, int flags);
#define getaddrinfo ff_getaddrinfo
#define freeaddrinfo ff_freeaddrinfo
#define getnameinfo ff_getnameinfo
#endif
#if !HAVE_GETADDRINFO || HAVE_WINSOCK2_H
const char *ff_gai_strerror(int ecode);
#undef gai_strerror
#define gai_strerror ff_gai_strerror
#endif
 
#ifndef INADDR_LOOPBACK
#define INADDR_LOOPBACK 0x7f000001
#endif
 
#ifndef INET_ADDRSTRLEN
#define INET_ADDRSTRLEN 16
#endif
 
#ifndef INET6_ADDRSTRLEN
#define INET6_ADDRSTRLEN INET_ADDRSTRLEN
#endif
 
#ifndef IN_MULTICAST
#define IN_MULTICAST(a) ((((uint32_t)(a)) & 0xf0000000) == 0xe0000000)
#endif
#ifndef IN6_IS_ADDR_MULTICAST
#define IN6_IS_ADDR_MULTICAST(a) (((uint8_t *) (a))[0] == 0xff)
#endif
 
int ff_is_multicast_address(struct sockaddr *addr);
 
#define POLLING_TIME 100 /// Time in milliseconds between interrupt check
 
/**
* Bind to a file descriptor and poll for a connection.
*
* @param fd First argument of bind().
* @param addr Second argument of bind().
* @param addrlen Third argument of bind().
* @param timeout Polling timeout in milliseconds.
* @param h URLContext providing interrupt check
* callback and logging context.
* @return A non-blocking file descriptor on success
* or an AVERROR on failure.
*/
int ff_listen_bind(int fd, const struct sockaddr *addr,
socklen_t addrlen, int timeout,
URLContext *h);
 
/**
* Connect to a file descriptor and poll for result.
*
* @param fd First argument of connect(),
* will be set as non-blocking.
* @param addr Second argument of connect().
* @param addrlen Third argument of connect().
* @param timeout Polling timeout in milliseconds.
* @param h URLContext providing interrupt check
* callback and logging context.
* @param will_try_next Whether the caller will try to connect to another
* address for the same host name, affecting the form of
* logged errors.
* @return 0 on success, AVERROR on failure.
*/
int ff_listen_connect(int fd, const struct sockaddr *addr,
socklen_t addrlen, int timeout,
URLContext *h, int will_try_next);
 
int ff_http_match_no_proxy(const char *no_proxy, const char *hostname);
 
int ff_socket(int domain, int type, int protocol);
 
#endif /* AVFORMAT_NETWORK_H */
/contrib/sdk/sources/ffmpeg/libavformat/nistspheredec.c
0,0 → 1,128
/*
* NIST Sphere demuxer
* Copyright (c) 2012 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avstring.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
#include "pcm.h"
 
static int nist_probe(AVProbeData *p)
{
if (AV_RL64(p->buf) == AV_RL64("NIST_1A\x0a"))
return AVPROBE_SCORE_MAX;
return 0;
}
 
static int nist_read_header(AVFormatContext *s)
{
char buffer[32], coding[32] = "pcm", format[32] = "01";
int bps = 0, be = 0;
int32_t header_size;
AVStream *st;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
 
ff_get_line(s->pb, buffer, sizeof(buffer));
ff_get_line(s->pb, buffer, sizeof(buffer));
sscanf(buffer, "%"SCNd32, &header_size);
if (header_size <= 0)
return AVERROR_INVALIDDATA;
 
while (!url_feof(s->pb)) {
ff_get_line(s->pb, buffer, sizeof(buffer));
 
if (avio_tell(s->pb) >= header_size)
return AVERROR_INVALIDDATA;
 
if (!memcmp(buffer, "end_head", 8)) {
if (!st->codec->bits_per_coded_sample)
st->codec->bits_per_coded_sample = bps << 3;
 
if (!av_strcasecmp(coding, "pcm")) {
st->codec->codec_id = ff_get_pcm_codec_id(st->codec->bits_per_coded_sample,
0, be, 0xFFFF);
} else if (!av_strcasecmp(coding, "alaw")) {
st->codec->codec_id = AV_CODEC_ID_PCM_ALAW;
} else if (!av_strcasecmp(coding, "ulaw") ||
!av_strcasecmp(coding, "mu-law")) {
st->codec->codec_id = AV_CODEC_ID_PCM_MULAW;
} else {
avpriv_request_sample(s, "coding %s", coding);
}
 
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
 
st->codec->block_align = st->codec->bits_per_coded_sample * st->codec->channels / 8;
 
if (avio_tell(s->pb) > header_size)
return AVERROR_INVALIDDATA;
 
avio_skip(s->pb, header_size - avio_tell(s->pb));
 
return 0;
} else if (!memcmp(buffer, "channel_count", 13)) {
sscanf(buffer, "%*s %*s %"SCNd32, &st->codec->channels);
} else if (!memcmp(buffer, "sample_byte_format", 18)) {
sscanf(buffer, "%*s %*s %31s", format);
 
if (!av_strcasecmp(format, "01")) {
be = 0;
} else if (!av_strcasecmp(format, "10")) {
be = 1;
} else if (av_strcasecmp(format, "1")) {
avpriv_request_sample(s, "sample byte format %s", format);
return AVERROR_PATCHWELCOME;
}
} else if (!memcmp(buffer, "sample_coding", 13)) {
sscanf(buffer, "%*s %*s %31s", coding);
} else if (!memcmp(buffer, "sample_count", 12)) {
sscanf(buffer, "%*s %*s %"SCNd64, &st->duration);
} else if (!memcmp(buffer, "sample_n_bytes", 14)) {
sscanf(buffer, "%*s %*s %"SCNd32, &bps);
} else if (!memcmp(buffer, "sample_rate", 11)) {
sscanf(buffer, "%*s %*s %"SCNd32, &st->codec->sample_rate);
} else if (!memcmp(buffer, "sample_sig_bits", 15)) {
sscanf(buffer, "%*s %*s %"SCNd32, &st->codec->bits_per_coded_sample);
} else {
char key[32], value[32];
sscanf(buffer, "%31s %*s %31s", key, value);
av_dict_set(&s->metadata, key, value, AV_DICT_APPEND);
}
}
 
return AVERROR_EOF;
}
 
AVInputFormat ff_nistsphere_demuxer = {
.name = "nistsphere",
.long_name = NULL_IF_CONFIG_SMALL("NIST SPeech HEader REsources"),
.read_probe = nist_probe,
.read_header = nist_read_header,
.read_packet = ff_pcm_read_packet,
.read_seek = ff_pcm_read_seek,
.extensions = "nist,sph",
.flags = AVFMT_GENERIC_INDEX,
};
/contrib/sdk/sources/ffmpeg/libavformat/noproxy-test.c
0,0 → 1,43
/*
* Copyright (c) 2013 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "network.h"
 
static void test(const char *pattern, const char *host)
{
int res = ff_http_match_no_proxy(pattern, host);
printf("The pattern \"%s\" %s the hostname %s\n",
pattern ? pattern : "(null)", res ? "matches" : "does not match",
host);
}
 
int main(void)
{
test(NULL, "domain.com");
test("example.com domain.com", "domain.com");
test("example.com other.com", "domain.com");
test("example.com,domain.com", "domain.com");
test("example.com,domain.com", "otherdomain.com");
test("example.com, *.domain.com", "sub.domain.com");
test("example.com, *.domain.com", "domain.com");
test("example.com, .domain.com", "domain.com");
test("*", "domain.com");
return 0;
}
/contrib/sdk/sources/ffmpeg/libavformat/nsvdec.c
0,0 → 1,776
/*
* NSV demuxer
* Copyright (c) 2004 The FFmpeg Project
*
* first version by Francois Revol <revol@free.fr>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/attributes.h"
#include "libavutil/mathematics.h"
#include "avformat.h"
#include "internal.h"
#include "libavutil/dict.h"
#include "libavutil/intreadwrite.h"
 
/* max bytes to crawl for trying to resync
* stupid streaming servers don't start at chunk boundaries...
*/
#define NSV_MAX_RESYNC (500*1024)
#define NSV_MAX_RESYNC_TRIES 300
 
/*
* References:
* (1) http://www.multimedia.cx/nsv-format.txt
* seems someone came to the same conclusions as me, and updated it:
* (2) http://www.stud.ktu.lt/~vitslav/nsv/nsv-format.txt
* http://www.stud.ktu.lt/~vitslav/nsv/
* official docs
* (3) http://ultravox.aol.com/NSVFormat.rtf
* Sample files:
* (S1) http://www.nullsoft.com/nsv/samples/
* http://www.nullsoft.com/nsv/samples/faster.nsv
* http://streamripper.sourceforge.net/openbb/read.php?TID=492&page=4
*/
 
/*
* notes on the header (Francois Revol):
*
* It is followed by strings, then a table, but nothing tells
* where the table begins according to (1). After checking faster.nsv,
* I believe NVSf[16-19] gives the size of the strings data
* (that is the offset of the data table after the header).
* After checking all samples from (S1) all confirms this.
*
* Then, about NSVf[12-15], faster.nsf has 179700. When veiwing it in VLC,
* I noticed there was about 1 NVSs chunk/s, so I ran
* strings faster.nsv | grep NSVs | wc -l
* which gave me 180. That leads me to think that NSVf[12-15] might be the
* file length in milliseconds.
* Let's try that:
* for f in *.nsv; do HTIME="$(od -t x4 "$f" | head -1 | sed 's/.* //')"; echo "'$f' $((0x$HTIME))s = $((0x$HTIME/1000/60)):$((0x$HTIME/1000%60))"; done
* except for nstrailer (which doesn't have an NSVf header), it repports correct time.
*
* nsvtrailer.nsv (S1) does not have any NSVf header, only NSVs chunks,
* so the header seems to not be mandatory. (for streaming).
*
* index slice duration check (excepts nsvtrailer.nsv):
* for f in [^n]*.nsv; do DUR="$(ffmpeg -i "$f" 2>/dev/null | grep 'NSVf duration' | cut -d ' ' -f 4)"; IC="$(ffmpeg -i "$f" 2>/dev/null | grep 'INDEX ENTRIES' | cut -d ' ' -f 2)"; echo "duration $DUR, slite time $(($DUR/$IC))"; done
*/
 
/*
* TODO:
* - handle timestamps !!!
* - use index
* - mime-type in probe()
* - seek
*/
 
#if 0
struct NSVf_header {
uint32_t chunk_tag; /* 'NSVf' */
uint32_t chunk_size;
uint32_t file_size; /* max 4GB ??? no one learns anything it seems :^) */
uint32_t file_length; //unknown1; /* what about MSB of file_size ? */
uint32_t info_strings_size; /* size of the info strings */ //unknown2;
uint32_t table_entries;
uint32_t table_entries_used; /* the left ones should be -1 */
};
 
struct NSVs_header {
uint32_t chunk_tag; /* 'NSVs' */
uint32_t v4cc; /* or 'NONE' */
uint32_t a4cc; /* or 'NONE' */
uint16_t vwidth; /* av_assert(vwidth%16==0) */
uint16_t vheight; /* av_assert(vheight%16==0) */
uint8_t framerate; /* value = (framerate&0x80)?frtable[frameratex0x7f]:framerate */
uint16_t unknown;
};
 
struct nsv_avchunk_header {
uint8_t vchunk_size_lsb;
uint16_t vchunk_size_msb; /* value = (vchunk_size_msb << 4) | (vchunk_size_lsb >> 4) */
uint16_t achunk_size;
};
 
struct nsv_pcm_header {
uint8_t bits_per_sample;
uint8_t channel_count;
uint16_t sample_rate;
};
#endif
 
/* variation from avi.h */
/*typedef struct CodecTag {
int id;
unsigned int tag;
} CodecTag;*/
 
/* tags */
 
#define T_NSVF MKTAG('N', 'S', 'V', 'f') /* file header */
#define T_NSVS MKTAG('N', 'S', 'V', 's') /* chunk header */
#define T_TOC2 MKTAG('T', 'O', 'C', '2') /* extra index marker */
#define T_NONE MKTAG('N', 'O', 'N', 'E') /* null a/v 4CC */
#define T_SUBT MKTAG('S', 'U', 'B', 'T') /* subtitle aux data */
#define T_ASYN MKTAG('A', 'S', 'Y', 'N') /* async a/v aux marker */
#define T_KEYF MKTAG('K', 'E', 'Y', 'F') /* video keyframe aux marker (addition) */
 
#define TB_NSVF MKBETAG('N', 'S', 'V', 'f')
#define TB_NSVS MKBETAG('N', 'S', 'V', 's')
 
/* hardcoded stream indexes */
#define NSV_ST_VIDEO 0
#define NSV_ST_AUDIO 1
#define NSV_ST_SUBT 2
 
enum NSVStatus {
NSV_UNSYNC,
NSV_FOUND_NSVF,
NSV_HAS_READ_NSVF,
NSV_FOUND_NSVS,
NSV_HAS_READ_NSVS,
NSV_FOUND_BEEF,
NSV_GOT_VIDEO,
NSV_GOT_AUDIO,
};
 
typedef struct NSVStream {
int frame_offset; /* current frame (video) or byte (audio) counter
(used to compute the pts) */
int scale;
int rate;
int sample_size; /* audio only data */
int start;
 
int new_frame_offset; /* temporary storage (used during seek) */
int cum_len; /* temporary storage (used during seek) */
} NSVStream;
 
typedef struct {
int base_offset;
int NSVf_end;
uint32_t *nsvs_file_offset;
int index_entries;
enum NSVStatus state;
AVPacket ahead[2]; /* [v, a] if .data is !NULL there is something */
/* cached */
int64_t duration;
uint32_t vtag, atag;
uint16_t vwidth, vheight;
int16_t avsync;
AVRational framerate;
uint32_t *nsvs_timestamps;
//DVDemuxContext* dv_demux;
} NSVContext;
 
static const AVCodecTag nsv_codec_video_tags[] = {
{ AV_CODEC_ID_VP3, MKTAG('V', 'P', '3', ' ') },
{ AV_CODEC_ID_VP3, MKTAG('V', 'P', '3', '0') },
{ AV_CODEC_ID_VP3, MKTAG('V', 'P', '3', '1') },
{ AV_CODEC_ID_VP5, MKTAG('V', 'P', '5', ' ') },
{ AV_CODEC_ID_VP5, MKTAG('V', 'P', '5', '0') },
{ AV_CODEC_ID_VP6, MKTAG('V', 'P', '6', ' ') },
{ AV_CODEC_ID_VP6, MKTAG('V', 'P', '6', '0') },
{ AV_CODEC_ID_VP6, MKTAG('V', 'P', '6', '1') },
{ AV_CODEC_ID_VP6, MKTAG('V', 'P', '6', '2') },
{ AV_CODEC_ID_VP8, MKTAG('V', 'P', '8', '0') },
/*
{ AV_CODEC_ID_VP4, MKTAG('V', 'P', '4', ' ') },
{ AV_CODEC_ID_VP4, MKTAG('V', 'P', '4', '0') },
*/
{ AV_CODEC_ID_MPEG4, MKTAG('X', 'V', 'I', 'D') }, /* cf sample xvid decoder from nsv_codec_sdk.zip */
{ AV_CODEC_ID_RAWVIDEO, MKTAG('R', 'G', 'B', '3') },
{ AV_CODEC_ID_NONE, 0 },
};
 
static const AVCodecTag nsv_codec_audio_tags[] = {
{ AV_CODEC_ID_MP3, MKTAG('M', 'P', '3', ' ') },
{ AV_CODEC_ID_AAC, MKTAG('A', 'A', 'C', ' ') },
{ AV_CODEC_ID_AAC, MKTAG('A', 'A', 'C', 'P') },
{ AV_CODEC_ID_AAC, MKTAG('V', 'L', 'B', ' ') },
{ AV_CODEC_ID_SPEEX, MKTAG('S', 'P', 'X', ' ') },
{ AV_CODEC_ID_PCM_U16LE, MKTAG('P', 'C', 'M', ' ') },
{ AV_CODEC_ID_NONE, 0 },
};
 
//static int nsv_load_index(AVFormatContext *s);
static int nsv_read_chunk(AVFormatContext *s, int fill_header);
 
#define print_tag(str, tag, size) \
av_dlog(NULL, "%s: tag=%c%c%c%c\n", \
str, tag & 0xff, \
(tag >> 8) & 0xff, \
(tag >> 16) & 0xff, \
(tag >> 24) & 0xff);
 
/* try to find something we recognize, and set the state accordingly */
static int nsv_resync(AVFormatContext *s)
{
NSVContext *nsv = s->priv_data;
AVIOContext *pb = s->pb;
uint32_t v = 0;
int i;
 
av_dlog(s, "%s(), offset = %"PRId64", state = %d\n", __FUNCTION__, avio_tell(pb), nsv->state);
 
//nsv->state = NSV_UNSYNC;
 
for (i = 0; i < NSV_MAX_RESYNC; i++) {
if (url_feof(pb)) {
av_dlog(s, "NSV EOF\n");
nsv->state = NSV_UNSYNC;
return -1;
}
v <<= 8;
v |= avio_r8(pb);
if (i < 8) {
av_dlog(s, "NSV resync: [%d] = %02x\n", i, v & 0x0FF);
}
 
if ((v & 0x0000ffff) == 0xefbe) { /* BEEF */
av_dlog(s, "NSV resynced on BEEF after %d bytes\n", i+1);
nsv->state = NSV_FOUND_BEEF;
return 0;
}
/* we read as big-endian, thus the MK*BE* */
if (v == TB_NSVF) { /* NSVf */
av_dlog(s, "NSV resynced on NSVf after %d bytes\n", i+1);
nsv->state = NSV_FOUND_NSVF;
return 0;
}
if (v == MKBETAG('N', 'S', 'V', 's')) { /* NSVs */
av_dlog(s, "NSV resynced on NSVs after %d bytes\n", i+1);
nsv->state = NSV_FOUND_NSVS;
return 0;
}
 
}
av_dlog(s, "NSV sync lost\n");
return -1;
}
 
static int nsv_parse_NSVf_header(AVFormatContext *s)
{
NSVContext *nsv = s->priv_data;
AVIOContext *pb = s->pb;
unsigned int av_unused file_size;
unsigned int size;
int64_t duration;
int strings_size;
int table_entries;
int table_entries_used;
 
av_dlog(s, "%s()\n", __FUNCTION__);
 
nsv->state = NSV_UNSYNC; /* in case we fail */
 
size = avio_rl32(pb);
if (size < 28)
return -1;
nsv->NSVf_end = size;
 
//s->file_size = (uint32_t)avio_rl32(pb);
file_size = (uint32_t)avio_rl32(pb);
av_dlog(s, "NSV NSVf chunk_size %u\n", size);
av_dlog(s, "NSV NSVf file_size %u\n", file_size);
 
nsv->duration = duration = avio_rl32(pb); /* in ms */
av_dlog(s, "NSV NSVf duration %"PRId64" ms\n", duration);
// XXX: store it in AVStreams
 
strings_size = avio_rl32(pb);
table_entries = avio_rl32(pb);
table_entries_used = avio_rl32(pb);
av_dlog(s, "NSV NSVf info-strings size: %d, table entries: %d, bis %d\n",
strings_size, table_entries, table_entries_used);
if (url_feof(pb))
return -1;
 
av_dlog(s, "NSV got header; filepos %"PRId64"\n", avio_tell(pb));
 
if (strings_size > 0) {
char *strings; /* last byte will be '\0' to play safe with str*() */
char *p, *endp;
char *token, *value;
char quote;
 
p = strings = av_mallocz((size_t)strings_size + 1);
if (!p)
return AVERROR(ENOMEM);
endp = strings + strings_size;
avio_read(pb, strings, strings_size);
while (p < endp) {
while (*p == ' ')
p++; /* strip out spaces */
if (p >= endp-2)
break;
token = p;
p = strchr(p, '=');
if (!p || p >= endp-2)
break;
*p++ = '\0';
quote = *p++;
value = p;
p = strchr(p, quote);
if (!p || p >= endp)
break;
*p++ = '\0';
av_dlog(s, "NSV NSVf INFO: %s='%s'\n", token, value);
av_dict_set(&s->metadata, token, value, 0);
}
av_free(strings);
}
if (url_feof(pb))
return -1;
 
av_dlog(s, "NSV got infos; filepos %"PRId64"\n", avio_tell(pb));
 
if (table_entries_used > 0) {
int i;
nsv->index_entries = table_entries_used;
if((unsigned)table_entries_used >= UINT_MAX / sizeof(uint32_t))
return -1;
nsv->nsvs_file_offset = av_malloc((unsigned)table_entries_used * sizeof(uint32_t));
if (!nsv->nsvs_file_offset)
return AVERROR(ENOMEM);
 
for(i=0;i<table_entries_used;i++)
nsv->nsvs_file_offset[i] = avio_rl32(pb) + size;
 
if(table_entries > table_entries_used &&
avio_rl32(pb) == MKTAG('T','O','C','2')) {
nsv->nsvs_timestamps = av_malloc((unsigned)table_entries_used*sizeof(uint32_t));
if (!nsv->nsvs_timestamps)
return AVERROR(ENOMEM);
for(i=0;i<table_entries_used;i++) {
nsv->nsvs_timestamps[i] = avio_rl32(pb);
}
}
}
 
av_dlog(s, "NSV got index; filepos %"PRId64"\n", avio_tell(pb));
 
avio_seek(pb, nsv->base_offset + size, SEEK_SET); /* required for dumbdriving-271.nsv (2 extra bytes) */
 
if (url_feof(pb))
return -1;
nsv->state = NSV_HAS_READ_NSVF;
return 0;
}
 
static int nsv_parse_NSVs_header(AVFormatContext *s)
{
NSVContext *nsv = s->priv_data;
AVIOContext *pb = s->pb;
uint32_t vtag, atag;
uint16_t vwidth, vheight;
AVRational framerate;
int i;
AVStream *st;
NSVStream *nst;
av_dlog(s, "%s()\n", __FUNCTION__);
 
vtag = avio_rl32(pb);
atag = avio_rl32(pb);
vwidth = avio_rl16(pb);
vheight = avio_rl16(pb);
i = avio_r8(pb);
 
av_dlog(s, "NSV NSVs framerate code %2x\n", i);
if(i&0x80) { /* odd way of giving native framerates from docs */
int t=(i & 0x7F)>>2;
if(t<16) framerate = (AVRational){1, t+1};
else framerate = (AVRational){t-15, 1};
 
if(i&1){
framerate.num *= 1000;
framerate.den *= 1001;
}
 
if((i&3)==3) framerate.num *= 24;
else if((i&3)==2) framerate.num *= 25;
else framerate.num *= 30;
}
else
framerate= (AVRational){i, 1};
 
nsv->avsync = avio_rl16(pb);
nsv->framerate = framerate;
 
print_tag("NSV NSVs vtag", vtag, 0);
print_tag("NSV NSVs atag", atag, 0);
av_dlog(s, "NSV NSVs vsize %dx%d\n", vwidth, vheight);
 
/* XXX change to ap != NULL ? */
if (s->nb_streams == 0) { /* streams not yet published, let's do that */
nsv->vtag = vtag;
nsv->atag = atag;
nsv->vwidth = vwidth;
nsv->vheight = vwidth;
if (vtag != T_NONE) {
int i;
st = avformat_new_stream(s, NULL);
if (!st)
goto fail;
 
st->id = NSV_ST_VIDEO;
nst = av_mallocz(sizeof(NSVStream));
if (!nst)
goto fail;
st->priv_data = nst;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_tag = vtag;
st->codec->codec_id = ff_codec_get_id(nsv_codec_video_tags, vtag);
st->codec->width = vwidth;
st->codec->height = vheight;
st->codec->bits_per_coded_sample = 24; /* depth XXX */
 
avpriv_set_pts_info(st, 64, framerate.den, framerate.num);
st->start_time = 0;
st->duration = av_rescale(nsv->duration, framerate.num, 1000*framerate.den);
 
for(i=0;i<nsv->index_entries;i++) {
if(nsv->nsvs_timestamps) {
av_add_index_entry(st, nsv->nsvs_file_offset[i], nsv->nsvs_timestamps[i],
0, 0, AVINDEX_KEYFRAME);
} else {
int64_t ts = av_rescale(i*nsv->duration/nsv->index_entries, framerate.num, 1000*framerate.den);
av_add_index_entry(st, nsv->nsvs_file_offset[i], ts, 0, 0, AVINDEX_KEYFRAME);
}
}
}
if (atag != T_NONE) {
st = avformat_new_stream(s, NULL);
if (!st)
goto fail;
 
st->id = NSV_ST_AUDIO;
nst = av_mallocz(sizeof(NSVStream));
if (!nst)
goto fail;
st->priv_data = nst;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_tag = atag;
st->codec->codec_id = ff_codec_get_id(nsv_codec_audio_tags, atag);
 
st->need_parsing = AVSTREAM_PARSE_FULL; /* for PCM we will read a chunk later and put correct info */
 
/* set timebase to common denominator of ms and framerate */
avpriv_set_pts_info(st, 64, 1, framerate.num*1000);
st->start_time = 0;
st->duration = (int64_t)nsv->duration * framerate.num;
}
} else {
if (nsv->vtag != vtag || nsv->atag != atag || nsv->vwidth != vwidth || nsv->vheight != vwidth) {
av_dlog(s, "NSV NSVs header values differ from the first one!!!\n");
//return -1;
}
}
 
nsv->state = NSV_HAS_READ_NSVS;
return 0;
fail:
/* XXX */
nsv->state = NSV_UNSYNC;
return -1;
}
 
static int nsv_read_header(AVFormatContext *s)
{
NSVContext *nsv = s->priv_data;
int i, err;
 
av_dlog(s, "%s()\n", __FUNCTION__);
av_dlog(s, "filename '%s'\n", s->filename);
 
nsv->state = NSV_UNSYNC;
nsv->ahead[0].data = nsv->ahead[1].data = NULL;
 
for (i = 0; i < NSV_MAX_RESYNC_TRIES; i++) {
if (nsv_resync(s) < 0)
return -1;
if (nsv->state == NSV_FOUND_NSVF) {
err = nsv_parse_NSVf_header(s);
if (err < 0)
return err;
}
/* we need the first NSVs also... */
if (nsv->state == NSV_FOUND_NSVS) {
err = nsv_parse_NSVs_header(s);
if (err < 0)
return err;
break; /* we just want the first one */
}
}
if (s->nb_streams < 1) /* no luck so far */
return -1;
/* now read the first chunk, so we can attempt to decode more info */
err = nsv_read_chunk(s, 1);
 
av_dlog(s, "parsed header\n");
return err;
}
 
static int nsv_read_chunk(AVFormatContext *s, int fill_header)
{
NSVContext *nsv = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *st[2] = {NULL, NULL};
NSVStream *nst;
AVPacket *pkt;
int i, err = 0;
uint8_t auxcount; /* number of aux metadata, also 4 bits of vsize */
uint32_t vsize;
uint16_t asize;
uint16_t auxsize;
 
av_dlog(s, "%s(%d)\n", __FUNCTION__, fill_header);
 
if (nsv->ahead[0].data || nsv->ahead[1].data)
return 0; //-1; /* hey! eat what you've in your plate first! */
 
null_chunk_retry:
if (url_feof(pb))
return -1;
 
for (i = 0; i < NSV_MAX_RESYNC_TRIES && nsv->state < NSV_FOUND_NSVS && !err; i++)
err = nsv_resync(s);
if (err < 0)
return err;
if (nsv->state == NSV_FOUND_NSVS)
err = nsv_parse_NSVs_header(s);
if (err < 0)
return err;
if (nsv->state != NSV_HAS_READ_NSVS && nsv->state != NSV_FOUND_BEEF)
return -1;
 
auxcount = avio_r8(pb);
vsize = avio_rl16(pb);
asize = avio_rl16(pb);
vsize = (vsize << 4) | (auxcount >> 4);
auxcount &= 0x0f;
av_dlog(s, "NSV CHUNK %d aux, %u bytes video, %d bytes audio\n", auxcount, vsize, asize);
/* skip aux stuff */
for (i = 0; i < auxcount; i++) {
uint32_t av_unused auxtag;
auxsize = avio_rl16(pb);
auxtag = avio_rl32(pb);
av_dlog(s, "NSV aux data: '%c%c%c%c', %d bytes\n",
(auxtag & 0x0ff),
((auxtag >> 8) & 0x0ff),
((auxtag >> 16) & 0x0ff),
((auxtag >> 24) & 0x0ff),
auxsize);
avio_skip(pb, auxsize);
vsize -= auxsize + sizeof(uint16_t) + sizeof(uint32_t); /* that's becoming braindead */
}
 
if (url_feof(pb))
return -1;
if (!vsize && !asize) {
nsv->state = NSV_UNSYNC;
goto null_chunk_retry;
}
 
/* map back streams to v,a */
if (s->nb_streams > 0)
st[s->streams[0]->id] = s->streams[0];
if (s->nb_streams > 1)
st[s->streams[1]->id] = s->streams[1];
 
if (vsize && st[NSV_ST_VIDEO]) {
nst = st[NSV_ST_VIDEO]->priv_data;
pkt = &nsv->ahead[NSV_ST_VIDEO];
av_get_packet(pb, pkt, vsize);
pkt->stream_index = st[NSV_ST_VIDEO]->index;//NSV_ST_VIDEO;
pkt->dts = nst->frame_offset;
pkt->flags |= nsv->state == NSV_HAS_READ_NSVS ? AV_PKT_FLAG_KEY : 0; /* keyframe only likely on a sync frame */
for (i = 0; i < FFMIN(8, vsize); i++)
av_dlog(s, "NSV video: [%d] = %02x\n", i, pkt->data[i]);
}
if(st[NSV_ST_VIDEO])
((NSVStream*)st[NSV_ST_VIDEO]->priv_data)->frame_offset++;
 
if (asize && st[NSV_ST_AUDIO]) {
nst = st[NSV_ST_AUDIO]->priv_data;
pkt = &nsv->ahead[NSV_ST_AUDIO];
/* read raw audio specific header on the first audio chunk... */
/* on ALL audio chunks ?? seems so! */
if (asize && st[NSV_ST_AUDIO]->codec->codec_tag == MKTAG('P', 'C', 'M', ' ')/* && fill_header*/) {
uint8_t bps;
uint8_t channels;
uint16_t samplerate;
bps = avio_r8(pb);
channels = avio_r8(pb);
samplerate = avio_rl16(pb);
asize-=4;
av_dlog(s, "NSV RAWAUDIO: bps %d, nchan %d, srate %d\n", bps, channels, samplerate);
if (fill_header) {
st[NSV_ST_AUDIO]->need_parsing = AVSTREAM_PARSE_NONE; /* we know everything */
if (bps != 16) {
av_dlog(s, "NSV AUDIO bit/sample != 16 (%d)!!!\n", bps);
}
if(channels)
bps /= channels; // ???
else
av_log(s, AV_LOG_WARNING, "Channels is 0\n");
if (bps == 8)
st[NSV_ST_AUDIO]->codec->codec_id = AV_CODEC_ID_PCM_U8;
samplerate /= 4;/* UGH ??? XXX */
channels = 1;
st[NSV_ST_AUDIO]->codec->channels = channels;
st[NSV_ST_AUDIO]->codec->sample_rate = samplerate;
av_dlog(s, "NSV RAWAUDIO: bps %d, nchan %d, srate %d\n", bps, channels, samplerate);
}
}
av_get_packet(pb, pkt, asize);
pkt->stream_index = st[NSV_ST_AUDIO]->index;//NSV_ST_AUDIO;
pkt->flags |= nsv->state == NSV_HAS_READ_NSVS ? AV_PKT_FLAG_KEY : 0; /* keyframe only likely on a sync frame */
if( nsv->state == NSV_HAS_READ_NSVS && st[NSV_ST_VIDEO] ) {
/* on a nsvs frame we have new information on a/v sync */
pkt->dts = (((NSVStream*)st[NSV_ST_VIDEO]->priv_data)->frame_offset-1);
pkt->dts *= (int64_t)1000 * nsv->framerate.den;
pkt->dts += (int64_t)nsv->avsync * nsv->framerate.num;
av_dlog(s, "NSV AUDIO: sync:%d, dts:%"PRId64, nsv->avsync, pkt->dts);
}
nst->frame_offset++;
}
 
nsv->state = NSV_UNSYNC;
return 0;
}
 
 
static int nsv_read_packet(AVFormatContext *s, AVPacket *pkt)
{
NSVContext *nsv = s->priv_data;
int i, err = 0;
 
av_dlog(s, "%s()\n", __FUNCTION__);
 
/* in case we don't already have something to eat ... */
if (nsv->ahead[0].data == NULL && nsv->ahead[1].data == NULL)
err = nsv_read_chunk(s, 0);
if (err < 0)
return err;
 
/* now pick one of the plates */
for (i = 0; i < 2; i++) {
if (nsv->ahead[i].data) {
av_dlog(s, "%s: using cached packet[%d]\n", __FUNCTION__, i);
/* avoid the cost of new_packet + memcpy(->data) */
memcpy(pkt, &nsv->ahead[i], sizeof(AVPacket));
nsv->ahead[i].data = NULL; /* we ate that one */
return pkt->size;
}
}
 
/* this restaurant is not approvisionned :^] */
return -1;
}
 
static int nsv_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
{
NSVContext *nsv = s->priv_data;
AVStream *st = s->streams[stream_index];
NSVStream *nst = st->priv_data;
int index;
 
index = av_index_search_timestamp(st, timestamp, flags);
if(index < 0)
return -1;
 
if (avio_seek(s->pb, st->index_entries[index].pos, SEEK_SET) < 0)
return -1;
 
nst->frame_offset = st->index_entries[index].timestamp;
nsv->state = NSV_UNSYNC;
return 0;
}
 
static int nsv_read_close(AVFormatContext *s)
{
/* int i; */
NSVContext *nsv = s->priv_data;
 
av_freep(&nsv->nsvs_file_offset);
av_freep(&nsv->nsvs_timestamps);
if (nsv->ahead[0].data)
av_free_packet(&nsv->ahead[0]);
if (nsv->ahead[1].data)
av_free_packet(&nsv->ahead[1]);
 
#if 0
 
for(i=0;i<s->nb_streams;i++) {
AVStream *st = s->streams[i];
NSVStream *ast = st->priv_data;
if(ast){
av_free(ast->index_entries);
av_free(ast);
}
av_free(st->codec->palctrl);
}
 
#endif
return 0;
}
 
static int nsv_probe(AVProbeData *p)
{
int i, score = 0;
 
av_dlog(NULL, "nsv_probe(), buf_size %d\n", p->buf_size);
/* check file header */
/* streamed files might not have any header */
if (p->buf[0] == 'N' && p->buf[1] == 'S' &&
p->buf[2] == 'V' && (p->buf[3] == 'f' || p->buf[3] == 's'))
return AVPROBE_SCORE_MAX;
/* XXX: do streamed files always start at chunk boundary ?? */
/* or do we need to search NSVs in the byte stream ? */
/* seems the servers don't bother starting clean chunks... */
/* sometimes even the first header is at 9KB or something :^) */
for (i = 1; i < p->buf_size - 3; i++) {
if (AV_RL32(p->buf + i) == AV_RL32("NSVs")) {
/* Get the chunk size and check if at the end we are getting 0xBEEF */
int vsize = AV_RL24(p->buf+i+19) >> 4;
int asize = AV_RL16(p->buf+i+22);
int offset = i + 23 + asize + vsize + 1;
if (offset <= p->buf_size - 2 && AV_RL16(p->buf + offset) == 0xBEEF)
return 4*AVPROBE_SCORE_MAX/5;
score = AVPROBE_SCORE_MAX/5;
}
}
/* so we'll have more luck on extension... */
if (av_match_ext(p->filename, "nsv"))
return AVPROBE_SCORE_EXTENSION;
/* FIXME: add mime-type check */
return score;
}
 
AVInputFormat ff_nsv_demuxer = {
.name = "nsv",
.long_name = NULL_IF_CONFIG_SMALL("Nullsoft Streaming Video"),
.priv_data_size = sizeof(NSVContext),
.read_probe = nsv_probe,
.read_header = nsv_read_header,
.read_packet = nsv_read_packet,
.read_close = nsv_read_close,
.read_seek = nsv_read_seek,
};
/contrib/sdk/sources/ffmpeg/libavformat/nullenc.c
0,0 → 1,36
/*
* RAW null muxer
* Copyright (c) 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
 
static int null_write_packet(struct AVFormatContext *s, AVPacket *pkt)
{
return 0;
}
 
AVOutputFormat ff_null_muxer = {
.name = "null",
.long_name = NULL_IF_CONFIG_SMALL("raw null video"),
.audio_codec = AV_NE(AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE),
.video_codec = AV_CODEC_ID_RAWVIDEO,
.write_packet = null_write_packet,
.flags = AVFMT_NOFILE | AVFMT_NOTIMESTAMPS | AVFMT_RAWPICTURE,
};
/contrib/sdk/sources/ffmpeg/libavformat/nut.c
0,0 → 1,289
/*
* nut
* Copyright (c) 2004-2007 Michael Niedermayer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/mathematics.h"
#include "libavutil/tree.h"
#include "nut.h"
#include "riff.h"
#include "internal.h"
 
const AVCodecTag ff_nut_subtitle_tags[] = {
{ AV_CODEC_ID_TEXT, MKTAG('U', 'T', 'F', '8') },
{ AV_CODEC_ID_SSA, MKTAG('S', 'S', 'A', 0 ) },
{ AV_CODEC_ID_DVD_SUBTITLE, MKTAG('D', 'V', 'D', 'S') },
{ AV_CODEC_ID_DVB_SUBTITLE, MKTAG('D', 'V', 'B', 'S') },
{ AV_CODEC_ID_DVB_TELETEXT, MKTAG('D', 'V', 'B', 'T') },
{ AV_CODEC_ID_NONE, 0 }
};
 
const AVCodecTag ff_nut_data_tags[] = {
{ AV_CODEC_ID_TEXT, MKTAG('U', 'T', 'F', '8') },
{ AV_CODEC_ID_NONE, 0 }
};
 
const AVCodecTag ff_nut_video_tags[] = {
{ AV_CODEC_ID_VP9, MKTAG('V', 'P', '9', '0') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('R', 'G', 'B', 15 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('B', 'G', 'R', 15 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('R', 'G', 'B', 16 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('B', 'G', 'R', 16 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(15 , 'B', 'G', 'R') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(15 , 'R', 'G', 'B') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(16 , 'B', 'G', 'R') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(16 , 'R', 'G', 'B') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('R', 'G', 'B', 12 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('B', 'G', 'R', 12 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(12 , 'B', 'G', 'R') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(12 , 'R', 'G', 'B') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('R', 'G', 'B', 'A') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('R', 'G', 'B', 0 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('B', 'G', 'R', 'A') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('B', 'G', 'R', 0 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('A', 'B', 'G', 'R') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG( 0 , 'B', 'G', 'R') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('A', 'R', 'G', 'B') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG( 0 , 'R', 'G', 'B') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('R', 'G', 'B', 24 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('B', 'G', 'R', 24 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('4', '1', '1', 'P') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('4', '2', '2', 'P') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('4', '2', '2', 'P') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('4', '4', '0', 'P') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('4', '4', '0', 'P') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('4', '4', '4', 'P') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('4', '4', '4', 'P') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('B', '1', 'W', '0') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('B', '0', 'W', '1') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('B', 'G', 'R', 8 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('R', 'G', 'B', 8 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('B', 'G', 'R', 4 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('R', 'G', 'B', 4 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('B', '4', 'B', 'Y') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('R', '4', 'B', 'Y') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('B', 'G', 'R', 48 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('R', 'G', 'B', 48 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(48 , 'B', 'G', 'R') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(48 , 'R', 'G', 'B') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('B', 'R', 'A', 64 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('R', 'B', 'A', 64 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(64 , 'B', 'R', 'A') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(64 , 'R', 'B', 'A') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '3', 11 , 10 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(10 , 11 , '3', 'Y') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '3', 10 , 10 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(10 , 10 , '3', 'Y') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '3', 0 , 10 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(10 , 0 , '3', 'Y') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '3', 11 , 12 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(12 , 11 , '3', 'Y') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '3', 10 , 12 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(12 , 10 , '3', 'Y') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '3', 0 , 12 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(12 , 0 , '3', 'Y') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '3', 11 , 14 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(14 , 11 , '3', 'Y') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '3', 10 , 14 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(14 , 10 , '3', 'Y') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '3', 0 , 14 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(14 , 0 , '3', 'Y') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '1', 0 , 16 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(16 , 0 , '1', 'Y') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '3', 11 , 16 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(16 , 11 , '3', 'Y') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '3', 10 , 16 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(16 , 10 , '3', 'Y') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '3', 0 , 16 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(16 , 0 , '3', 'Y') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '4', 11 , 8 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '4', 10 , 8 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '4', 0 , 8 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '2', 0 , 8 ) },
 
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '1', 0, 9) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(9, 0, '1', 'Y') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '4', 11, 9) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(9, 11, '4', 'Y') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '4', 10, 9) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(9, 10, '4', 'Y') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '4', 0, 9) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(9, 0, '4', 'Y') },
 
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '1', 0, 10) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(10, 0, '1', 'Y') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '4', 11, 10) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(10, 11, '4', 'Y') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '4', 10, 10) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(10, 10, '4', 'Y') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '4', 0, 10) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(10, 0, '4', 'Y') },
 
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '1', 0, 16) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(16, 0, '1', 'Y') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '4', 11, 16) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(16, 11, '4', 'Y') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '4', 10, 16) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(16, 10, '4', 'Y') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '4', 0, 16) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(16, 0, '4', 'Y') },
 
{ AV_CODEC_ID_RAWVIDEO, MKTAG('G', '3', 0, 8) },
 
{ AV_CODEC_ID_RAWVIDEO, MKTAG('G', '3', 0, 9) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG( 9, 0, '3', 'G') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('G', '3', 0, 10) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(10, 0, '3', 'G') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('G', '3', 0, 12) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(12, 0, '3', 'G') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('G', '3', 0, 14) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(14, 0, '3', 'G') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('G', '3', 0, 16) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG(16, 0, '3', 'G') },
 
{ AV_CODEC_ID_NONE, 0 }
};
 
static const AVCodecTag nut_audio_extra_tags[] = {
{ AV_CODEC_ID_PCM_ALAW, MKTAG('A', 'L', 'A', 'W') },
{ AV_CODEC_ID_PCM_MULAW, MKTAG('U', 'L', 'A', 'W') },
{ AV_CODEC_ID_MP3, MKTAG('M', 'P', '3', ' ') },
{ AV_CODEC_ID_NONE, 0 }
};
 
const AVCodecTag ff_nut_audio_tags[] = {
{ AV_CODEC_ID_PCM_F32BE, MKTAG(32 , 'D', 'F', 'P') },
{ AV_CODEC_ID_PCM_F32LE, MKTAG('P', 'F', 'D', 32 ) },
{ AV_CODEC_ID_PCM_F64BE, MKTAG(64 , 'D', 'F', 'P') },
{ AV_CODEC_ID_PCM_F64LE, MKTAG('P', 'F', 'D', 64 ) },
{ AV_CODEC_ID_PCM_S16BE, MKTAG(16 , 'D', 'S', 'P') },
{ AV_CODEC_ID_PCM_S16LE, MKTAG('P', 'S', 'D', 16 ) },
{ AV_CODEC_ID_PCM_S24BE, MKTAG(24 , 'D', 'S', 'P') },
{ AV_CODEC_ID_PCM_S24LE, MKTAG('P', 'S', 'D', 24 ) },
{ AV_CODEC_ID_PCM_S32BE, MKTAG(32 , 'D', 'S', 'P') },
{ AV_CODEC_ID_PCM_S32LE, MKTAG('P', 'S', 'D', 32 ) },
{ AV_CODEC_ID_PCM_S8, MKTAG('P', 'S', 'D', 8 ) },
{ AV_CODEC_ID_PCM_U16BE, MKTAG(16 , 'D', 'U', 'P') },
{ AV_CODEC_ID_PCM_U16LE, MKTAG('P', 'U', 'D', 16 ) },
{ AV_CODEC_ID_PCM_U24BE, MKTAG(24 , 'D', 'U', 'P') },
{ AV_CODEC_ID_PCM_U24LE, MKTAG('P', 'U', 'D', 24 ) },
{ AV_CODEC_ID_PCM_U32BE, MKTAG(32 , 'D', 'U', 'P') },
{ AV_CODEC_ID_PCM_U32LE, MKTAG('P', 'U', 'D', 32 ) },
{ AV_CODEC_ID_PCM_U8, MKTAG('P', 'U', 'D', 8 ) },
{ AV_CODEC_ID_PCM_S8_PLANAR, MKTAG('P', 'S', 'P', 8 ) },
{ AV_CODEC_ID_PCM_S16BE_PLANAR, MKTAG(16 , 'P', 'S', 'P') },
{ AV_CODEC_ID_PCM_S16LE_PLANAR, MKTAG('P', 'S', 'P', 16 ) },
{ AV_CODEC_ID_PCM_S24LE_PLANAR, MKTAG('P', 'S', 'P', 24 ) },
{ AV_CODEC_ID_PCM_S32LE_PLANAR, MKTAG('P', 'S', 'P', 32 ) },
{ AV_CODEC_ID_NONE, 0 }
};
 
const AVCodecTag * const ff_nut_codec_tags[] = {
ff_nut_video_tags, ff_nut_audio_tags, ff_nut_subtitle_tags,
ff_codec_bmp_tags, ff_codec_wav_tags, nut_audio_extra_tags, ff_nut_data_tags, 0
};
 
void ff_nut_reset_ts(NUTContext *nut, AVRational time_base, int64_t val)
{
int i;
for (i = 0; i < nut->avf->nb_streams; i++)
nut->stream[i].last_pts =
av_rescale_rnd(val,
time_base.num * (int64_t)nut->stream[i].time_base->den,
time_base.den * (int64_t)nut->stream[i].time_base->num,
AV_ROUND_DOWN);
}
 
int64_t ff_lsb2full(StreamContext *stream, int64_t lsb)
{
int64_t mask = (1ULL << stream->msb_pts_shift) - 1;
int64_t delta = stream->last_pts - mask / 2;
return ((lsb - delta) & mask) + delta;
}
 
int ff_nut_sp_pos_cmp(const Syncpoint *a, const Syncpoint *b)
{
return ((a->pos - b->pos) >> 32) - ((b->pos - a->pos) >> 32);
}
 
int ff_nut_sp_pts_cmp(const Syncpoint *a, const Syncpoint *b)
{
return ((a->ts - b->ts) >> 32) - ((b->ts - a->ts) >> 32);
}
 
int ff_nut_add_sp(NUTContext *nut, int64_t pos, int64_t back_ptr, int64_t ts)
{
Syncpoint *sp = av_mallocz(sizeof(Syncpoint));
struct AVTreeNode *node = av_tree_node_alloc();
 
if (!sp || !node) {
av_freep(&sp);
av_freep(&node);
return AVERROR(ENOMEM);
}
 
nut->sp_count++;
 
sp->pos = pos;
sp->back_ptr = back_ptr;
sp->ts = ts;
av_tree_insert(&nut->syncpoints, sp, (void *) ff_nut_sp_pos_cmp, &node);
if (node) {
av_free(sp);
av_free(node);
}
 
return 0;
}
 
static int enu_free(void *opaque, void *elem)
{
av_free(elem);
return 0;
}
 
void ff_nut_free_sp(NUTContext *nut)
{
av_tree_enumerate(nut->syncpoints, NULL, NULL, enu_free);
av_tree_destroy(nut->syncpoints);
}
 
const Dispositions ff_nut_dispositions[] = {
{ "default", AV_DISPOSITION_DEFAULT },
{ "dub", AV_DISPOSITION_DUB },
{ "original", AV_DISPOSITION_ORIGINAL },
{ "comment", AV_DISPOSITION_COMMENT },
{ "lyrics", AV_DISPOSITION_LYRICS },
{ "karaoke", AV_DISPOSITION_KARAOKE },
{ "", 0 }
};
 
const AVMetadataConv ff_nut_metadata_conv[] = {
{ "Author", "artist" },
{ "X-CreationTime", "date" },
{ "CreationTime", "date" },
{ "SourceFilename", "filename" },
{ "X-Language", "language" },
{ "X-Disposition", "disposition" },
{ "X-Replaces", "replaces" },
{ "X-Depends", "depends" },
{ "X-Uses", "uses" },
{ "X-UsesFont", "usesfont" },
{ 0 },
};
/contrib/sdk/sources/ffmpeg/libavformat/nut.h
0,0 → 1,132
/*
* "NUT" Container Format (de)muxer
* Copyright (c) 2006 Michael Niedermayer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_NUT_H
#define AVFORMAT_NUT_H
 
#include "avformat.h"
#include "internal.h"
#include "metadata.h"
 
#define MAIN_STARTCODE (0x7A561F5F04ADULL + (((uint64_t)('N'<<8) + 'M')<<48))
#define STREAM_STARTCODE (0x11405BF2F9DBULL + (((uint64_t)('N'<<8) + 'S')<<48))
#define SYNCPOINT_STARTCODE (0xE4ADEECA4569ULL + (((uint64_t)('N'<<8) + 'K')<<48))
#define INDEX_STARTCODE (0xDD672F23E64EULL + (((uint64_t)('N'<<8) + 'X')<<48))
#define INFO_STARTCODE (0xAB68B596BA78ULL + (((uint64_t)('N'<<8) + 'I')<<48))
 
#define ID_STRING "nut/multimedia container\0"
 
#define MAX_DISTANCE (1024*32-1)
 
#define NUT_VERSION 3
 
typedef enum{
FLAG_KEY = 1, ///<if set, frame is keyframe
FLAG_EOR = 2, ///<if set, stream has no relevance on presentation. (EOR)
FLAG_CODED_PTS = 8, ///<if set, coded_pts is in the frame header
FLAG_STREAM_ID = 16, ///<if set, stream_id is coded in the frame header
FLAG_SIZE_MSB = 32, ///<if set, data_size_msb is at frame header, otherwise data_size_msb is 0
FLAG_CHECKSUM = 64, ///<if set, the frame header contains a checksum
FLAG_RESERVED = 128, ///<if set, reserved_count is coded in the frame header
FLAG_HEADER_IDX =1024, ///<If set, header_idx is coded in the frame header.
FLAG_MATCH_TIME =2048, ///<If set, match_time_delta is coded in the frame header
FLAG_CODED =4096, ///<if set, coded_flags are stored in the frame header
FLAG_INVALID =8192, ///<if set, frame_code is invalid
} Flag;
 
typedef struct Syncpoint {
uint64_t pos;
uint64_t back_ptr;
// uint64_t global_key_pts;
int64_t ts;
} Syncpoint;
 
typedef struct FrameCode {
uint16_t flags;
uint8_t stream_id;
uint16_t size_mul;
uint16_t size_lsb;
int16_t pts_delta;
uint8_t reserved_count;
uint8_t header_idx;
} FrameCode;
 
typedef struct StreamContext {
int last_flags;
int skip_until_key_frame;
int64_t last_pts;
int time_base_id;
AVRational *time_base;
int msb_pts_shift;
int max_pts_distance;
int decode_delay; //FIXME duplicate of has_b_frames
int64_t *keyframe_pts;
} StreamContext;
 
typedef struct ChapterContext {
AVRational *time_base;
} ChapterContext;
 
typedef struct NUTContext {
AVFormatContext *avf;
// int written_packet_size;
// int64_t packet_start;
FrameCode frame_code[256];
uint8_t header_len[128];
const uint8_t *header[128];
uint64_t next_startcode; ///< stores the next startcode if it has already been parsed but the stream is not seekable
StreamContext *stream;
ChapterContext *chapter;
unsigned int max_distance;
unsigned int time_base_count;
int64_t last_syncpoint_pos;
int header_count;
AVRational *time_base;
struct AVTreeNode *syncpoints;
int sp_count;
int64_t max_pts;
AVRational *max_pts_tb;
} NUTContext;
 
extern const AVCodecTag ff_nut_subtitle_tags[];
extern const AVCodecTag ff_nut_video_tags[];
extern const AVCodecTag ff_nut_audio_tags[];
extern const AVCodecTag ff_nut_data_tags[];
 
extern const AVCodecTag * const ff_nut_codec_tags[];
 
typedef struct Dispositions {
char str[9];
int flag;
} Dispositions;
 
void ff_nut_reset_ts(NUTContext *nut, AVRational time_base, int64_t val);
int64_t ff_lsb2full(StreamContext *stream, int64_t lsb);
int ff_nut_sp_pos_cmp(const Syncpoint *a, const Syncpoint *b);
int ff_nut_sp_pts_cmp(const Syncpoint *a, const Syncpoint *b);
int ff_nut_add_sp(NUTContext *nut, int64_t pos, int64_t back_ptr, int64_t ts);
void ff_nut_free_sp(NUTContext *nut);
 
extern const Dispositions ff_nut_dispositions[];
 
extern const AVMetadataConv ff_nut_metadata_conv[];
 
#endif /* AVFORMAT_NUT_H */
/contrib/sdk/sources/ffmpeg/libavformat/nutdec.c
0,0 → 1,1067
/*
* "NUT" Container Format demuxer
* Copyright (c) 2004-2006 Michael Niedermayer
* Copyright (c) 2003 Alex Beregszaszi
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avstring.h"
#include "libavutil/avassert.h"
#include "libavutil/bswap.h"
#include "libavutil/dict.h"
#include "libavutil/mathematics.h"
#include "libavutil/tree.h"
#include "avio_internal.h"
#include "nut.h"
#include "riff.h"
 
#define NUT_MAX_STREAMS 256 /* arbitrary sanity check value */
 
static int64_t nut_read_timestamp(AVFormatContext *s, int stream_index,
int64_t *pos_arg, int64_t pos_limit);
 
static int get_str(AVIOContext *bc, char *string, unsigned int maxlen)
{
unsigned int len = ffio_read_varlen(bc);
 
if (len && maxlen)
avio_read(bc, string, FFMIN(len, maxlen));
while (len > maxlen) {
avio_r8(bc);
len--;
}
 
if (maxlen)
string[FFMIN(len, maxlen - 1)] = 0;
 
if (maxlen == len)
return -1;
else
return 0;
}
 
static int64_t get_s(AVIOContext *bc)
{
int64_t v = ffio_read_varlen(bc) + 1;
 
if (v & 1)
return -(v >> 1);
else
return (v >> 1);
}
 
static uint64_t get_fourcc(AVIOContext *bc)
{
unsigned int len = ffio_read_varlen(bc);
 
if (len == 2)
return avio_rl16(bc);
else if (len == 4)
return avio_rl32(bc);
else {
av_log(NULL, AV_LOG_ERROR, "Unsupported fourcc length %d\n", len);
return -1;
}
}
 
#ifdef TRACE
static inline uint64_t get_v_trace(AVIOContext *bc, const char *file,
const char *func, int line)
{
uint64_t v = ffio_read_varlen(bc);
 
av_log(NULL, AV_LOG_DEBUG, "get_v %5"PRId64" / %"PRIX64" in %s %s:%d\n",
v, v, file, func, line);
return v;
}
 
static inline int64_t get_s_trace(AVIOContext *bc, const char *file,
const char *func, int line)
{
int64_t v = get_s(bc);
 
av_log(NULL, AV_LOG_DEBUG, "get_s %5"PRId64" / %"PRIX64" in %s %s:%d\n",
v, v, file, func, line);
return v;
}
 
static inline uint64_t get_4cc_trace(AVIOContext *bc, char *file,
char *func, int line)
{
uint64_t v = get_fourcc(bc);
 
av_log(NULL, AV_LOG_DEBUG, "get_fourcc %5"PRId64" / %"PRIX64" in %s %s:%d\n",
v, v, file, func, line);
return v;
}
#define ffio_read_varlen(bc) get_v_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__)
#define get_s(bc) get_s_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__)
#define get_fourcc(bc) get_4cc_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__)
#endif
 
static int get_packetheader(NUTContext *nut, AVIOContext *bc,
int calculate_checksum, uint64_t startcode)
{
int64_t size;
// start = avio_tell(bc) - 8;
 
startcode = av_be2ne64(startcode);
startcode = ff_crc04C11DB7_update(0, (uint8_t*) &startcode, 8);
 
ffio_init_checksum(bc, ff_crc04C11DB7_update, startcode);
size = ffio_read_varlen(bc);
if (size > 4096)
avio_rb32(bc);
if (ffio_get_checksum(bc) && size > 4096)
return -1;
 
ffio_init_checksum(bc, calculate_checksum ? ff_crc04C11DB7_update : NULL, 0);
 
return size;
}
 
static uint64_t find_any_startcode(AVIOContext *bc, int64_t pos)
{
uint64_t state = 0;
 
if (pos >= 0)
/* Note, this may fail if the stream is not seekable, but that should
* not matter, as in this case we simply start where we currently are */
avio_seek(bc, pos, SEEK_SET);
while (!url_feof(bc)) {
state = (state << 8) | avio_r8(bc);
if ((state >> 56) != 'N')
continue;
switch (state) {
case MAIN_STARTCODE:
case STREAM_STARTCODE:
case SYNCPOINT_STARTCODE:
case INFO_STARTCODE:
case INDEX_STARTCODE:
return state;
}
}
 
return 0;
}
 
/**
* Find the given startcode.
* @param code the startcode
* @param pos the start position of the search, or -1 if the current position
* @return the position of the startcode or -1 if not found
*/
static int64_t find_startcode(AVIOContext *bc, uint64_t code, int64_t pos)
{
for (;;) {
uint64_t startcode = find_any_startcode(bc, pos);
if (startcode == code)
return avio_tell(bc) - 8;
else if (startcode == 0)
return -1;
pos = -1;
}
}
 
static int nut_probe(AVProbeData *p)
{
int i;
uint64_t code = 0;
 
for (i = 0; i < p->buf_size; i++) {
code = (code << 8) | p->buf[i];
if (code == MAIN_STARTCODE)
return AVPROBE_SCORE_MAX;
}
return 0;
}
 
#define GET_V(dst, check) \
do { \
tmp = ffio_read_varlen(bc); \
if (!(check)) { \
av_log(s, AV_LOG_ERROR, "Error " #dst " is (%"PRId64")\n", tmp); \
return AVERROR_INVALIDDATA; \
} \
dst = tmp; \
} while (0)
 
static int skip_reserved(AVIOContext *bc, int64_t pos)
{
pos -= avio_tell(bc);
if (pos < 0) {
avio_seek(bc, pos, SEEK_CUR);
return AVERROR_INVALIDDATA;
} else {
while (pos--)
avio_r8(bc);
return 0;
}
}
 
static int decode_main_header(NUTContext *nut)
{
AVFormatContext *s = nut->avf;
AVIOContext *bc = s->pb;
uint64_t tmp, end;
unsigned int stream_count;
int i, j, count;
int tmp_stream, tmp_mul, tmp_pts, tmp_size, tmp_res, tmp_head_idx;
 
end = get_packetheader(nut, bc, 1, MAIN_STARTCODE);
end += avio_tell(bc);
 
tmp = ffio_read_varlen(bc);
if (tmp < 2 && tmp > NUT_VERSION) {
av_log(s, AV_LOG_ERROR, "Version %"PRId64" not supported.\n",
tmp);
return AVERROR(ENOSYS);
}
 
GET_V(stream_count, tmp > 0 && tmp <= NUT_MAX_STREAMS);
 
nut->max_distance = ffio_read_varlen(bc);
if (nut->max_distance > 65536) {
av_log(s, AV_LOG_DEBUG, "max_distance %d\n", nut->max_distance);
nut->max_distance = 65536;
}
 
GET_V(nut->time_base_count, tmp > 0 && tmp < INT_MAX / sizeof(AVRational));
nut->time_base = av_malloc(nut->time_base_count * sizeof(AVRational));
if (!nut->time_base)
return AVERROR(ENOMEM);
 
for (i = 0; i < nut->time_base_count; i++) {
GET_V(nut->time_base[i].num, tmp > 0 && tmp < (1ULL << 31));
GET_V(nut->time_base[i].den, tmp > 0 && tmp < (1ULL << 31));
if (av_gcd(nut->time_base[i].num, nut->time_base[i].den) != 1) {
av_log(s, AV_LOG_ERROR, "time base invalid\n");
return AVERROR_INVALIDDATA;
}
}
tmp_pts = 0;
tmp_mul = 1;
tmp_stream = 0;
tmp_head_idx = 0;
for (i = 0; i < 256;) {
int tmp_flags = ffio_read_varlen(bc);
int tmp_fields = ffio_read_varlen(bc);
 
if (tmp_fields > 0)
tmp_pts = get_s(bc);
if (tmp_fields > 1)
tmp_mul = ffio_read_varlen(bc);
if (tmp_fields > 2)
tmp_stream = ffio_read_varlen(bc);
if (tmp_fields > 3)
tmp_size = ffio_read_varlen(bc);
else
tmp_size = 0;
if (tmp_fields > 4)
tmp_res = ffio_read_varlen(bc);
else
tmp_res = 0;
if (tmp_fields > 5)
count = ffio_read_varlen(bc);
else
count = tmp_mul - tmp_size;
if (tmp_fields > 6)
get_s(bc);
if (tmp_fields > 7)
tmp_head_idx = ffio_read_varlen(bc);
 
while (tmp_fields-- > 8)
ffio_read_varlen(bc);
 
if (count == 0 || i + count > 256) {
av_log(s, AV_LOG_ERROR, "illegal count %d at %d\n", count, i);
return AVERROR_INVALIDDATA;
}
if (tmp_stream >= stream_count) {
av_log(s, AV_LOG_ERROR, "illegal stream number\n");
return AVERROR_INVALIDDATA;
}
 
for (j = 0; j < count; j++, i++) {
if (i == 'N') {
nut->frame_code[i].flags = FLAG_INVALID;
j--;
continue;
}
nut->frame_code[i].flags = tmp_flags;
nut->frame_code[i].pts_delta = tmp_pts;
nut->frame_code[i].stream_id = tmp_stream;
nut->frame_code[i].size_mul = tmp_mul;
nut->frame_code[i].size_lsb = tmp_size + j;
nut->frame_code[i].reserved_count = tmp_res;
nut->frame_code[i].header_idx = tmp_head_idx;
}
}
av_assert0(nut->frame_code['N'].flags == FLAG_INVALID);
 
if (end > avio_tell(bc) + 4) {
int rem = 1024;
GET_V(nut->header_count, tmp < 128U);
nut->header_count++;
for (i = 1; i < nut->header_count; i++) {
uint8_t *hdr;
GET_V(nut->header_len[i], tmp > 0 && tmp < 256);
rem -= nut->header_len[i];
if (rem < 0) {
av_log(s, AV_LOG_ERROR, "invalid elision header\n");
return AVERROR_INVALIDDATA;
}
hdr = av_malloc(nut->header_len[i]);
if (!hdr)
return AVERROR(ENOMEM);
avio_read(bc, hdr, nut->header_len[i]);
nut->header[i] = hdr;
}
av_assert0(nut->header_len[0] == 0);
}
 
if (skip_reserved(bc, end) || ffio_get_checksum(bc)) {
av_log(s, AV_LOG_ERROR, "main header checksum mismatch\n");
return AVERROR_INVALIDDATA;
}
 
nut->stream = av_calloc(stream_count, sizeof(StreamContext));
if (!nut->stream)
return AVERROR(ENOMEM);
for (i = 0; i < stream_count; i++)
avformat_new_stream(s, NULL);
 
return 0;
}
 
static int decode_stream_header(NUTContext *nut)
{
AVFormatContext *s = nut->avf;
AVIOContext *bc = s->pb;
StreamContext *stc;
int class, stream_id;
uint64_t tmp, end;
AVStream *st;
 
end = get_packetheader(nut, bc, 1, STREAM_STARTCODE);
end += avio_tell(bc);
 
GET_V(stream_id, tmp < s->nb_streams && !nut->stream[tmp].time_base);
stc = &nut->stream[stream_id];
st = s->streams[stream_id];
if (!st)
return AVERROR(ENOMEM);
 
class = ffio_read_varlen(bc);
tmp = get_fourcc(bc);
st->codec->codec_tag = tmp;
switch (class) {
case 0:
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = av_codec_get_id((const AVCodecTag * const []) {
ff_nut_video_tags,
ff_codec_bmp_tags,
0
},
tmp);
break;
case 1:
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = av_codec_get_id((const AVCodecTag * const []) {
ff_nut_audio_tags,
ff_codec_wav_tags,
0
},
tmp);
break;
case 2:
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
st->codec->codec_id = ff_codec_get_id(ff_nut_subtitle_tags, tmp);
break;
case 3:
st->codec->codec_type = AVMEDIA_TYPE_DATA;
st->codec->codec_id = ff_codec_get_id(ff_nut_data_tags, tmp);
break;
default:
av_log(s, AV_LOG_ERROR, "unknown stream class (%d)\n", class);
return AVERROR(ENOSYS);
}
if (class < 3 && st->codec->codec_id == AV_CODEC_ID_NONE)
av_log(s, AV_LOG_ERROR,
"Unknown codec tag '0x%04x' for stream number %d\n",
(unsigned int) tmp, stream_id);
 
GET_V(stc->time_base_id, tmp < nut->time_base_count);
GET_V(stc->msb_pts_shift, tmp < 16);
stc->max_pts_distance = ffio_read_varlen(bc);
GET_V(stc->decode_delay, tmp < 1000); // sanity limit, raise this if Moore's law is true
st->codec->has_b_frames = stc->decode_delay;
ffio_read_varlen(bc); // stream flags
 
GET_V(st->codec->extradata_size, tmp < (1 << 30));
if (st->codec->extradata_size) {
if (ff_alloc_extradata(st->codec, st->codec->extradata_size))
return AVERROR(ENOMEM);
avio_read(bc, st->codec->extradata, st->codec->extradata_size);
}
 
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
GET_V(st->codec->width, tmp > 0);
GET_V(st->codec->height, tmp > 0);
st->sample_aspect_ratio.num = ffio_read_varlen(bc);
st->sample_aspect_ratio.den = ffio_read_varlen(bc);
if ((!st->sample_aspect_ratio.num) != (!st->sample_aspect_ratio.den)) {
av_log(s, AV_LOG_ERROR, "invalid aspect ratio %d/%d\n",
st->sample_aspect_ratio.num, st->sample_aspect_ratio.den);
return AVERROR_INVALIDDATA;
}
ffio_read_varlen(bc); /* csp type */
} else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
GET_V(st->codec->sample_rate, tmp > 0);
ffio_read_varlen(bc); // samplerate_den
GET_V(st->codec->channels, tmp > 0);
}
if (skip_reserved(bc, end) || ffio_get_checksum(bc)) {
av_log(s, AV_LOG_ERROR,
"stream header %d checksum mismatch\n", stream_id);
return AVERROR_INVALIDDATA;
}
stc->time_base = &nut->time_base[stc->time_base_id];
avpriv_set_pts_info(s->streams[stream_id], 63, stc->time_base->num,
stc->time_base->den);
return 0;
}
 
static void set_disposition_bits(AVFormatContext *avf, char *value,
int stream_id)
{
int flag = 0, i;
 
for (i = 0; ff_nut_dispositions[i].flag; ++i)
if (!strcmp(ff_nut_dispositions[i].str, value))
flag = ff_nut_dispositions[i].flag;
if (!flag)
av_log(avf, AV_LOG_INFO, "unknown disposition type '%s'\n", value);
for (i = 0; i < avf->nb_streams; ++i)
if (stream_id == i || stream_id == -1)
avf->streams[i]->disposition |= flag;
}
 
static int decode_info_header(NUTContext *nut)
{
AVFormatContext *s = nut->avf;
AVIOContext *bc = s->pb;
uint64_t tmp, chapter_start, chapter_len;
unsigned int stream_id_plus1, count;
int chapter_id, i;
int64_t value, end;
char name[256], str_value[1024], type_str[256];
const char *type;
AVChapter *chapter = NULL;
AVStream *st = NULL;
AVDictionary **metadata = NULL;
 
end = get_packetheader(nut, bc, 1, INFO_STARTCODE);
end += avio_tell(bc);
 
GET_V(stream_id_plus1, tmp <= s->nb_streams);
chapter_id = get_s(bc);
chapter_start = ffio_read_varlen(bc);
chapter_len = ffio_read_varlen(bc);
count = ffio_read_varlen(bc);
 
if (chapter_id && !stream_id_plus1) {
int64_t start = chapter_start / nut->time_base_count;
chapter = avpriv_new_chapter(s, chapter_id,
nut->time_base[chapter_start %
nut->time_base_count],
start, start + chapter_len, NULL);
metadata = &chapter->metadata;
} else if (stream_id_plus1) {
st = s->streams[stream_id_plus1 - 1];
metadata = &st->metadata;
} else
metadata = &s->metadata;
 
for (i = 0; i < count; i++) {
get_str(bc, name, sizeof(name));
value = get_s(bc);
if (value == -1) {
type = "UTF-8";
get_str(bc, str_value, sizeof(str_value));
} else if (value == -2) {
get_str(bc, type_str, sizeof(type_str));
type = type_str;
get_str(bc, str_value, sizeof(str_value));
} else if (value == -3) {
type = "s";
value = get_s(bc);
} else if (value == -4) {
type = "t";
value = ffio_read_varlen(bc);
} else if (value < -4) {
type = "r";
get_s(bc);
} else {
type = "v";
}
 
if (stream_id_plus1 > s->nb_streams) {
av_log(s, AV_LOG_ERROR, "invalid stream id for info packet\n");
continue;
}
 
if (!strcmp(type, "UTF-8")) {
if (chapter_id == 0 && !strcmp(name, "Disposition")) {
set_disposition_bits(s, str_value, stream_id_plus1 - 1);
continue;
}
 
if (stream_id_plus1 && !strcmp(name, "r_frame_rate")) {
sscanf(str_value, "%d/%d", &st->r_frame_rate.num, &st->r_frame_rate.den);
if (st->r_frame_rate.num >= 1000LL*st->r_frame_rate.den)
st->r_frame_rate.num = st->r_frame_rate.den = 0;
continue;
}
 
if (metadata && av_strcasecmp(name, "Uses") &&
av_strcasecmp(name, "Depends") && av_strcasecmp(name, "Replaces"))
av_dict_set(metadata, name, str_value, 0);
}
}
 
if (skip_reserved(bc, end) || ffio_get_checksum(bc)) {
av_log(s, AV_LOG_ERROR, "info header checksum mismatch\n");
return AVERROR_INVALIDDATA;
}
return 0;
}
 
static int decode_syncpoint(NUTContext *nut, int64_t *ts, int64_t *back_ptr)
{
AVFormatContext *s = nut->avf;
AVIOContext *bc = s->pb;
int64_t end;
uint64_t tmp;
int ret;
 
nut->last_syncpoint_pos = avio_tell(bc) - 8;
 
end = get_packetheader(nut, bc, 1, SYNCPOINT_STARTCODE);
end += avio_tell(bc);
 
tmp = ffio_read_varlen(bc);
*back_ptr = nut->last_syncpoint_pos - 16 * ffio_read_varlen(bc);
if (*back_ptr < 0)
return AVERROR_INVALIDDATA;
 
ff_nut_reset_ts(nut, nut->time_base[tmp % nut->time_base_count],
tmp / nut->time_base_count);
 
if (skip_reserved(bc, end) || ffio_get_checksum(bc)) {
av_log(s, AV_LOG_ERROR, "sync point checksum mismatch\n");
return AVERROR_INVALIDDATA;
}
 
*ts = tmp / nut->time_base_count *
av_q2d(nut->time_base[tmp % nut->time_base_count]) * AV_TIME_BASE;
 
if ((ret = ff_nut_add_sp(nut, nut->last_syncpoint_pos, *back_ptr, *ts)) < 0)
return ret;
 
return 0;
}
 
//FIXME calculate exactly, this is just a good approximation.
static int64_t find_duration(NUTContext *nut, int64_t filesize)
{
AVFormatContext *s = nut->avf;
int64_t duration = 0;
 
ff_find_last_ts(s, -1, &duration, NULL, nut_read_timestamp);
 
if(duration > 0)
s->duration_estimation_method = AVFMT_DURATION_FROM_PTS;
return duration;
}
 
static int find_and_decode_index(NUTContext *nut)
{
AVFormatContext *s = nut->avf;
AVIOContext *bc = s->pb;
uint64_t tmp, end;
int i, j, syncpoint_count;
int64_t filesize = avio_size(bc);
int64_t *syncpoints;
uint64_t max_pts;
int8_t *has_keyframe;
int ret = AVERROR_INVALIDDATA;
 
if(filesize <= 0)
return -1;
 
avio_seek(bc, filesize - 12, SEEK_SET);
avio_seek(bc, filesize - avio_rb64(bc), SEEK_SET);
if (avio_rb64(bc) != INDEX_STARTCODE) {
av_log(s, AV_LOG_ERROR, "no index at the end\n");
 
if(s->duration<=0)
s->duration = find_duration(nut, filesize);
return ret;
}
 
end = get_packetheader(nut, bc, 1, INDEX_STARTCODE);
end += avio_tell(bc);
 
max_pts = ffio_read_varlen(bc);
s->duration = av_rescale_q(max_pts / nut->time_base_count,
nut->time_base[max_pts % nut->time_base_count],
AV_TIME_BASE_Q);
s->duration_estimation_method = AVFMT_DURATION_FROM_PTS;
 
GET_V(syncpoint_count, tmp < INT_MAX / 8 && tmp > 0);
syncpoints = av_malloc_array(syncpoint_count, sizeof(int64_t));
has_keyframe = av_malloc_array(syncpoint_count + 1, sizeof(int8_t));
if (!syncpoints || !has_keyframe) {
ret = AVERROR(ENOMEM);
goto fail;
}
for (i = 0; i < syncpoint_count; i++) {
syncpoints[i] = ffio_read_varlen(bc);
if (syncpoints[i] <= 0)
goto fail;
if (i)
syncpoints[i] += syncpoints[i - 1];
}
 
for (i = 0; i < s->nb_streams; i++) {
int64_t last_pts = -1;
for (j = 0; j < syncpoint_count;) {
uint64_t x = ffio_read_varlen(bc);
int type = x & 1;
int n = j;
x >>= 1;
if (type) {
int flag = x & 1;
x >>= 1;
if (n + x >= syncpoint_count + 1) {
av_log(s, AV_LOG_ERROR, "index overflow A %d + %"PRIu64" >= %d\n", n, x, syncpoint_count + 1);
goto fail;
}
while (x--)
has_keyframe[n++] = flag;
has_keyframe[n++] = !flag;
} else {
while (x != 1) {
if (n >= syncpoint_count + 1) {
av_log(s, AV_LOG_ERROR, "index overflow B\n");
goto fail;
}
has_keyframe[n++] = x & 1;
x >>= 1;
}
}
if (has_keyframe[0]) {
av_log(s, AV_LOG_ERROR, "keyframe before first syncpoint in index\n");
goto fail;
}
av_assert0(n <= syncpoint_count + 1);
for (; j < n && j < syncpoint_count; j++) {
if (has_keyframe[j]) {
uint64_t B, A = ffio_read_varlen(bc);
if (!A) {
A = ffio_read_varlen(bc);
B = ffio_read_varlen(bc);
// eor_pts[j][i] = last_pts + A + B
} else
B = 0;
av_add_index_entry(s->streams[i], 16 * syncpoints[j - 1],
last_pts + A, 0, 0, AVINDEX_KEYFRAME);
last_pts += A + B;
}
}
}
}
 
if (skip_reserved(bc, end) || ffio_get_checksum(bc)) {
av_log(s, AV_LOG_ERROR, "index checksum mismatch\n");
goto fail;
}
ret = 0;
 
fail:
av_free(syncpoints);
av_free(has_keyframe);
return ret;
}
 
static int nut_read_header(AVFormatContext *s)
{
NUTContext *nut = s->priv_data;
AVIOContext *bc = s->pb;
int64_t pos;
int initialized_stream_count;
 
nut->avf = s;
 
/* main header */
pos = 0;
do {
pos = find_startcode(bc, MAIN_STARTCODE, pos) + 1;
if (pos < 0 + 1) {
av_log(s, AV_LOG_ERROR, "No main startcode found.\n");
return AVERROR_INVALIDDATA;
}
} while (decode_main_header(nut) < 0);
 
/* stream headers */
pos = 0;
for (initialized_stream_count = 0; initialized_stream_count < s->nb_streams;) {
pos = find_startcode(bc, STREAM_STARTCODE, pos) + 1;
if (pos < 0 + 1) {
av_log(s, AV_LOG_ERROR, "Not all stream headers found.\n");
return AVERROR_INVALIDDATA;
}
if (decode_stream_header(nut) >= 0)
initialized_stream_count++;
}
 
/* info headers */
pos = 0;
for (;;) {
uint64_t startcode = find_any_startcode(bc, pos);
pos = avio_tell(bc);
 
if (startcode == 0) {
av_log(s, AV_LOG_ERROR, "EOF before video frames\n");
return AVERROR_INVALIDDATA;
} else if (startcode == SYNCPOINT_STARTCODE) {
nut->next_startcode = startcode;
break;
} else if (startcode != INFO_STARTCODE) {
continue;
}
 
decode_info_header(nut);
}
 
s->data_offset = pos - 8;
 
if (bc->seekable) {
int64_t orig_pos = avio_tell(bc);
find_and_decode_index(nut);
avio_seek(bc, orig_pos, SEEK_SET);
}
av_assert0(nut->next_startcode == SYNCPOINT_STARTCODE);
 
ff_metadata_conv_ctx(s, NULL, ff_nut_metadata_conv);
 
return 0;
}
 
static int decode_frame_header(NUTContext *nut, int64_t *pts, int *stream_id,
uint8_t *header_idx, int frame_code)
{
AVFormatContext *s = nut->avf;
AVIOContext *bc = s->pb;
StreamContext *stc;
int size, flags, size_mul, pts_delta, i, reserved_count;
uint64_t tmp;
 
if (avio_tell(bc) > nut->last_syncpoint_pos + nut->max_distance) {
av_log(s, AV_LOG_ERROR,
"Last frame must have been damaged %"PRId64" > %"PRId64" + %d\n",
avio_tell(bc), nut->last_syncpoint_pos, nut->max_distance);
return AVERROR_INVALIDDATA;
}
 
flags = nut->frame_code[frame_code].flags;
size_mul = nut->frame_code[frame_code].size_mul;
size = nut->frame_code[frame_code].size_lsb;
*stream_id = nut->frame_code[frame_code].stream_id;
pts_delta = nut->frame_code[frame_code].pts_delta;
reserved_count = nut->frame_code[frame_code].reserved_count;
*header_idx = nut->frame_code[frame_code].header_idx;
 
if (flags & FLAG_INVALID)
return AVERROR_INVALIDDATA;
if (flags & FLAG_CODED)
flags ^= ffio_read_varlen(bc);
if (flags & FLAG_STREAM_ID) {
GET_V(*stream_id, tmp < s->nb_streams);
}
stc = &nut->stream[*stream_id];
if (flags & FLAG_CODED_PTS) {
int coded_pts = ffio_read_varlen(bc);
// FIXME check last_pts validity?
if (coded_pts < (1 << stc->msb_pts_shift)) {
*pts = ff_lsb2full(stc, coded_pts);
} else
*pts = coded_pts - (1LL << stc->msb_pts_shift);
} else
*pts = stc->last_pts + pts_delta;
if (flags & FLAG_SIZE_MSB)
size += size_mul * ffio_read_varlen(bc);
if (flags & FLAG_MATCH_TIME)
get_s(bc);
if (flags & FLAG_HEADER_IDX)
*header_idx = ffio_read_varlen(bc);
if (flags & FLAG_RESERVED)
reserved_count = ffio_read_varlen(bc);
for (i = 0; i < reserved_count; i++)
ffio_read_varlen(bc);
 
if (*header_idx >= (unsigned)nut->header_count) {
av_log(s, AV_LOG_ERROR, "header_idx invalid\n");
return AVERROR_INVALIDDATA;
}
if (size > 4096)
*header_idx = 0;
size -= nut->header_len[*header_idx];
 
if (flags & FLAG_CHECKSUM) {
avio_rb32(bc); // FIXME check this
} else if (size > 2 * nut->max_distance || FFABS(stc->last_pts - *pts) >
stc->max_pts_distance) {
av_log(s, AV_LOG_ERROR, "frame size > 2max_distance and no checksum\n");
return AVERROR_INVALIDDATA;
}
 
stc->last_pts = *pts;
stc->last_flags = flags;
 
return size;
}
 
static int decode_frame(NUTContext *nut, AVPacket *pkt, int frame_code)
{
AVFormatContext *s = nut->avf;
AVIOContext *bc = s->pb;
int size, stream_id, discard;
int64_t pts, last_IP_pts;
StreamContext *stc;
uint8_t header_idx;
 
size = decode_frame_header(nut, &pts, &stream_id, &header_idx, frame_code);
if (size < 0)
return size;
 
stc = &nut->stream[stream_id];
 
if (stc->last_flags & FLAG_KEY)
stc->skip_until_key_frame = 0;
 
discard = s->streams[stream_id]->discard;
last_IP_pts = s->streams[stream_id]->last_IP_pts;
if ((discard >= AVDISCARD_NONKEY && !(stc->last_flags & FLAG_KEY)) ||
(discard >= AVDISCARD_BIDIR && last_IP_pts != AV_NOPTS_VALUE &&
last_IP_pts > pts) ||
discard >= AVDISCARD_ALL ||
stc->skip_until_key_frame) {
avio_skip(bc, size);
return 1;
}
 
if (av_new_packet(pkt, size + nut->header_len[header_idx]) < 0)
return AVERROR(ENOMEM);
memcpy(pkt->data, nut->header[header_idx], nut->header_len[header_idx]);
pkt->pos = avio_tell(bc); // FIXME
avio_read(bc, pkt->data + nut->header_len[header_idx], size);
 
pkt->stream_index = stream_id;
if (stc->last_flags & FLAG_KEY)
pkt->flags |= AV_PKT_FLAG_KEY;
pkt->pts = pts;
 
return 0;
}
 
static int nut_read_packet(AVFormatContext *s, AVPacket *pkt)
{
NUTContext *nut = s->priv_data;
AVIOContext *bc = s->pb;
int i, frame_code = 0, ret, skip;
int64_t ts, back_ptr;
 
for (;;) {
int64_t pos = avio_tell(bc);
uint64_t tmp = nut->next_startcode;
nut->next_startcode = 0;
 
if (tmp) {
pos -= 8;
} else {
frame_code = avio_r8(bc);
if (url_feof(bc))
return AVERROR_EOF;
if (frame_code == 'N') {
tmp = frame_code;
for (i = 1; i < 8; i++)
tmp = (tmp << 8) + avio_r8(bc);
}
}
switch (tmp) {
case MAIN_STARTCODE:
case STREAM_STARTCODE:
case INDEX_STARTCODE:
skip = get_packetheader(nut, bc, 0, tmp);
avio_skip(bc, skip);
break;
case INFO_STARTCODE:
if (decode_info_header(nut) < 0)
goto resync;
break;
case SYNCPOINT_STARTCODE:
if (decode_syncpoint(nut, &ts, &back_ptr) < 0)
goto resync;
frame_code = avio_r8(bc);
case 0:
ret = decode_frame(nut, pkt, frame_code);
if (ret == 0)
return 0;
else if (ret == 1) // OK but discard packet
break;
default:
resync:
av_log(s, AV_LOG_DEBUG, "syncing from %"PRId64"\n", pos);
tmp = find_any_startcode(bc, nut->last_syncpoint_pos + 1);
if (tmp == 0)
return AVERROR_INVALIDDATA;
av_log(s, AV_LOG_DEBUG, "sync\n");
nut->next_startcode = tmp;
}
}
}
 
static int64_t nut_read_timestamp(AVFormatContext *s, int stream_index,
int64_t *pos_arg, int64_t pos_limit)
{
NUTContext *nut = s->priv_data;
AVIOContext *bc = s->pb;
int64_t pos, pts, back_ptr;
av_log(s, AV_LOG_DEBUG, "read_timestamp(X,%d,%"PRId64",%"PRId64")\n",
stream_index, *pos_arg, pos_limit);
 
pos = *pos_arg;
do {
pos = find_startcode(bc, SYNCPOINT_STARTCODE, pos) + 1;
if (pos < 1) {
av_log(s, AV_LOG_ERROR, "read_timestamp failed.\n");
return AV_NOPTS_VALUE;
}
} while (decode_syncpoint(nut, &pts, &back_ptr) < 0);
*pos_arg = pos - 1;
av_assert0(nut->last_syncpoint_pos == *pos_arg);
 
av_log(s, AV_LOG_DEBUG, "return %"PRId64" %"PRId64"\n", pts, back_ptr);
if (stream_index == -2)
return back_ptr;
av_assert0(stream_index == -1);
return pts;
}
 
static int read_seek(AVFormatContext *s, int stream_index,
int64_t pts, int flags)
{
NUTContext *nut = s->priv_data;
AVStream *st = s->streams[stream_index];
Syncpoint dummy = { .ts = pts * av_q2d(st->time_base) * AV_TIME_BASE };
Syncpoint nopts_sp = { .ts = AV_NOPTS_VALUE, .back_ptr = AV_NOPTS_VALUE };
Syncpoint *sp, *next_node[2] = { &nopts_sp, &nopts_sp };
int64_t pos, pos2, ts;
int i;
 
if (st->index_entries) {
int index = av_index_search_timestamp(st, pts, flags);
if (index < 0)
index = av_index_search_timestamp(st, pts, flags ^ AVSEEK_FLAG_BACKWARD);
if (index < 0)
return -1;
 
pos2 = st->index_entries[index].pos;
ts = st->index_entries[index].timestamp;
} else {
av_tree_find(nut->syncpoints, &dummy, (void *) ff_nut_sp_pts_cmp,
(void **) next_node);
av_log(s, AV_LOG_DEBUG, "%"PRIu64"-%"PRIu64" %"PRId64"-%"PRId64"\n",
next_node[0]->pos, next_node[1]->pos, next_node[0]->ts,
next_node[1]->ts);
pos = ff_gen_search(s, -1, dummy.ts, next_node[0]->pos,
next_node[1]->pos, next_node[1]->pos,
next_node[0]->ts, next_node[1]->ts,
AVSEEK_FLAG_BACKWARD, &ts, nut_read_timestamp);
 
if (!(flags & AVSEEK_FLAG_BACKWARD)) {
dummy.pos = pos + 16;
next_node[1] = &nopts_sp;
av_tree_find(nut->syncpoints, &dummy, (void *) ff_nut_sp_pos_cmp,
(void **) next_node);
pos2 = ff_gen_search(s, -2, dummy.pos, next_node[0]->pos,
next_node[1]->pos, next_node[1]->pos,
next_node[0]->back_ptr, next_node[1]->back_ptr,
flags, &ts, nut_read_timestamp);
if (pos2 >= 0)
pos = pos2;
// FIXME dir but I think it does not matter
}
dummy.pos = pos;
sp = av_tree_find(nut->syncpoints, &dummy, (void *) ff_nut_sp_pos_cmp,
NULL);
 
av_assert0(sp);
pos2 = sp->back_ptr - 15;
}
av_log(NULL, AV_LOG_DEBUG, "SEEKTO: %"PRId64"\n", pos2);
pos = find_startcode(s->pb, SYNCPOINT_STARTCODE, pos2);
avio_seek(s->pb, pos, SEEK_SET);
av_log(NULL, AV_LOG_DEBUG, "SP: %"PRId64"\n", pos);
if (pos2 > pos || pos2 + 15 < pos)
av_log(NULL, AV_LOG_ERROR, "no syncpoint at backptr pos\n");
for (i = 0; i < s->nb_streams; i++)
nut->stream[i].skip_until_key_frame = 1;
 
return 0;
}
 
static int nut_read_close(AVFormatContext *s)
{
NUTContext *nut = s->priv_data;
int i;
 
av_freep(&nut->time_base);
av_freep(&nut->stream);
ff_nut_free_sp(nut);
for (i = 1; i < nut->header_count; i++)
av_freep(&nut->header[i]);
 
return 0;
}
 
AVInputFormat ff_nut_demuxer = {
.name = "nut",
.long_name = NULL_IF_CONFIG_SMALL("NUT"),
.flags = AVFMT_SEEK_TO_PTS,
.priv_data_size = sizeof(NUTContext),
.read_probe = nut_probe,
.read_header = nut_read_header,
.read_packet = nut_read_packet,
.read_close = nut_read_close,
.read_seek = read_seek,
.extensions = "nut",
.codec_tag = ff_nut_codec_tags,
};
/contrib/sdk/sources/ffmpeg/libavformat/nutenc.c
0,0 → 1,1024
/*
* nut muxer
* Copyright (c) 2004-2007 Michael Niedermayer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intreadwrite.h"
#include "libavutil/mathematics.h"
#include "libavutil/tree.h"
#include "libavutil/dict.h"
#include "libavutil/avassert.h"
#include "libavcodec/mpegaudiodata.h"
#include "nut.h"
#include "internal.h"
#include "avio_internal.h"
#include "riff.h"
 
static int find_expected_header(AVCodecContext *c, int size, int key_frame,
uint8_t out[64])
{
int sample_rate = c->sample_rate;
 
if (size > 4096)
return 0;
 
AV_WB24(out, 1);
 
if (c->codec_id == AV_CODEC_ID_MPEG4) {
if (key_frame) {
return 3;
} else {
out[3] = 0xB6;
return 4;
}
} else if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
return 3;
} else if (c->codec_id == AV_CODEC_ID_H264) {
return 3;
} else if (c->codec_id == AV_CODEC_ID_MP3 ||
c->codec_id == AV_CODEC_ID_MP2) {
int lsf, mpeg25, sample_rate_index, bitrate_index, frame_size;
int layer = c->codec_id == AV_CODEC_ID_MP3 ? 3 : 2;
unsigned int header = 0xFFF00000;
 
lsf = sample_rate < (24000 + 32000) / 2;
mpeg25 = sample_rate < (12000 + 16000) / 2;
sample_rate <<= lsf + mpeg25;
if (sample_rate < (32000 + 44100) / 2) sample_rate_index = 2;
else if (sample_rate < (44100 + 48000) / 2) sample_rate_index = 0;
else sample_rate_index = 1;
 
sample_rate = avpriv_mpa_freq_tab[sample_rate_index] >> (lsf + mpeg25);
 
for (bitrate_index = 2; bitrate_index < 30; bitrate_index++) {
frame_size =
avpriv_mpa_bitrate_tab[lsf][layer - 1][bitrate_index >> 1];
frame_size = (frame_size * 144000) / (sample_rate << lsf) +
(bitrate_index & 1);
 
if (frame_size == size)
break;
}
 
header |= (!lsf) << 19;
header |= (4 - layer) << 17;
header |= 1 << 16; //no crc
AV_WB32(out, header);
if (size <= 0)
return 2; //we guess there is no crc, if there is one the user clearly does not care about overhead
if (bitrate_index == 30)
return -1; //something is wrong ...
 
header |= (bitrate_index >> 1) << 12;
header |= sample_rate_index << 10;
header |= (bitrate_index & 1) << 9;
 
return 2; //FIXME actually put the needed ones in build_elision_headers()
//return 3; //we guess that the private bit is not set
//FIXME the above assumptions should be checked, if these turn out false too often something should be done
}
return 0;
}
 
static int find_header_idx(AVFormatContext *s, AVCodecContext *c, int size, int frame_type)
{
NUTContext *nut = s->priv_data;
uint8_t out[64];
int i;
int len = find_expected_header(c, size, frame_type, out);
 
for (i = 1; i < nut->header_count; i++) {
if (len == nut->header_len[i] && !memcmp(out, nut->header[i], len)) {
return i;
}
}
 
return 0;
}
 
static void build_elision_headers(AVFormatContext *s)
{
NUTContext *nut = s->priv_data;
int i;
//FIXME this is lame
//FIXME write a 2pass mode to find the maximal headers
static const uint8_t headers[][5] = {
{ 3, 0x00, 0x00, 0x01 },
{ 4, 0x00, 0x00, 0x01, 0xB6},
{ 2, 0xFF, 0xFA }, //mp3+crc
{ 2, 0xFF, 0xFB }, //mp3
{ 2, 0xFF, 0xFC }, //mp2+crc
{ 2, 0xFF, 0xFD }, //mp2
};
 
nut->header_count = 7;
for (i = 1; i < nut->header_count; i++) {
nut->header_len[i] = headers[i - 1][0];
nut->header[i] = &headers[i - 1][1];
}
}
 
static void build_frame_code(AVFormatContext *s)
{
NUTContext *nut = s->priv_data;
int key_frame, index, pred, stream_id;
int start = 1;
int end = 254;
int keyframe_0_esc = s->nb_streams > 2;
int pred_table[10];
FrameCode *ft;
 
ft = &nut->frame_code[start];
ft->flags = FLAG_CODED;
ft->size_mul = 1;
ft->pts_delta = 1;
start++;
 
if (keyframe_0_esc) {
/* keyframe = 0 escape */
FrameCode *ft = &nut->frame_code[start];
ft->flags = FLAG_STREAM_ID | FLAG_SIZE_MSB | FLAG_CODED_PTS;
ft->size_mul = 1;
start++;
}
 
for (stream_id = 0; stream_id < s->nb_streams; stream_id++) {
int start2 = start + (end - start) * stream_id / s->nb_streams;
int end2 = start + (end - start) * (stream_id + 1) / s->nb_streams;
AVCodecContext *codec = s->streams[stream_id]->codec;
int is_audio = codec->codec_type == AVMEDIA_TYPE_AUDIO;
int intra_only = /*codec->intra_only || */ is_audio;
int pred_count;
int frame_size = 0;
 
if (codec->codec_type == AVMEDIA_TYPE_AUDIO) {
frame_size = av_get_audio_frame_duration(codec, 0);
if (codec->codec_id == AV_CODEC_ID_VORBIS && !frame_size)
frame_size = 64;
} else {
AVRational f = av_div_q(codec->time_base, *nut->stream[stream_id].time_base);
if (f.den == 1 && f.num>0)
frame_size = f.num;
}
if (!frame_size)
frame_size = 1;
 
for (key_frame = 0; key_frame < 2; key_frame++) {
if (!intra_only || !keyframe_0_esc || key_frame != 0) {
FrameCode *ft = &nut->frame_code[start2];
ft->flags = FLAG_KEY * key_frame;
ft->flags |= FLAG_SIZE_MSB | FLAG_CODED_PTS;
ft->stream_id = stream_id;
ft->size_mul = 1;
if (is_audio)
ft->header_idx = find_header_idx(s, codec, -1, key_frame);
start2++;
}
}
 
key_frame = intra_only;
#if 1
if (is_audio) {
int frame_bytes = codec->frame_size * (int64_t)codec->bit_rate /
(8 * codec->sample_rate);
int pts;
for (pts = 0; pts < 2; pts++) {
for (pred = 0; pred < 2; pred++) {
FrameCode *ft = &nut->frame_code[start2];
ft->flags = FLAG_KEY * key_frame;
ft->stream_id = stream_id;
ft->size_mul = frame_bytes + 2;
ft->size_lsb = frame_bytes + pred;
ft->pts_delta = pts * frame_size;
ft->header_idx = find_header_idx(s, codec, frame_bytes + pred, key_frame);
start2++;
}
}
} else {
FrameCode *ft = &nut->frame_code[start2];
ft->flags = FLAG_KEY | FLAG_SIZE_MSB;
ft->stream_id = stream_id;
ft->size_mul = 1;
ft->pts_delta = frame_size;
start2++;
}
#endif
 
if (codec->has_b_frames) {
pred_count = 5;
pred_table[0] = -2;
pred_table[1] = -1;
pred_table[2] = 1;
pred_table[3] = 3;
pred_table[4] = 4;
} else if (codec->codec_id == AV_CODEC_ID_VORBIS) {
pred_count = 3;
pred_table[0] = 2;
pred_table[1] = 9;
pred_table[2] = 16;
} else {
pred_count = 1;
pred_table[0] = 1;
}
 
for (pred = 0; pred < pred_count; pred++) {
int start3 = start2 + (end2 - start2) * pred / pred_count;
int end3 = start2 + (end2 - start2) * (pred + 1) / pred_count;
 
pred_table[pred] *= frame_size;
 
for (index = start3; index < end3; index++) {
FrameCode *ft = &nut->frame_code[index];
ft->flags = FLAG_KEY * key_frame;
ft->flags |= FLAG_SIZE_MSB;
ft->stream_id = stream_id;
//FIXME use single byte size and pred from last
ft->size_mul = end3 - start3;
ft->size_lsb = index - start3;
ft->pts_delta = pred_table[pred];
if (is_audio)
ft->header_idx = find_header_idx(s, codec, -1, key_frame);
}
}
}
memmove(&nut->frame_code['N' + 1], &nut->frame_code['N'], sizeof(FrameCode) * (255 - 'N'));
nut->frame_code[0].flags =
nut->frame_code[255].flags =
nut->frame_code['N'].flags = FLAG_INVALID;
}
 
static void put_tt(NUTContext *nut, AVRational *time_base, AVIOContext *bc, uint64_t val)
{
val *= nut->time_base_count;
val += time_base - nut->time_base;
ff_put_v(bc, val);
}
/**
* Store a string as vb.
*/
static void put_str(AVIOContext *bc, const char *string)
{
int len = strlen(string);
 
ff_put_v(bc, len);
avio_write(bc, string, len);
}
 
static void put_s(AVIOContext *bc, int64_t val)
{
ff_put_v(bc, 2 * FFABS(val) - (val > 0));
}
 
#ifdef TRACE
static inline void ff_put_v_trace(AVIOContext *bc, uint64_t v, const char *file,
const char *func, int line)
{
av_log(NULL, AV_LOG_DEBUG, "ff_put_v %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
 
ff_put_v(bc, v);
}
 
static inline void put_s_trace(AVIOContext *bc, int64_t v, const char *file, const char *func, int line)
{
av_log(NULL, AV_LOG_DEBUG, "put_s %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
 
put_s(bc, v);
}
#define ff_put_v(bc, v) ff_put_v_trace(bc, v, __FILE__, __PRETTY_FUNCTION__, __LINE__)
#define put_s(bc, v) put_s_trace(bc, v, __FILE__, __PRETTY_FUNCTION__, __LINE__)
#endif
 
//FIXME remove calculate_checksum
static void put_packet(NUTContext *nut, AVIOContext *bc, AVIOContext *dyn_bc,
int calculate_checksum, uint64_t startcode)
{
uint8_t *dyn_buf = NULL;
int dyn_size = avio_close_dyn_buf(dyn_bc, &dyn_buf);
int forw_ptr = dyn_size + 4 * calculate_checksum;
 
if (forw_ptr > 4096)
ffio_init_checksum(bc, ff_crc04C11DB7_update, 0);
avio_wb64(bc, startcode);
ff_put_v(bc, forw_ptr);
if (forw_ptr > 4096)
avio_wl32(bc, ffio_get_checksum(bc));
 
if (calculate_checksum)
ffio_init_checksum(bc, ff_crc04C11DB7_update, 0);
avio_write(bc, dyn_buf, dyn_size);
if (calculate_checksum)
avio_wl32(bc, ffio_get_checksum(bc));
 
av_free(dyn_buf);
}
 
static void write_mainheader(NUTContext *nut, AVIOContext *bc)
{
int i, j, tmp_pts, tmp_flags, tmp_stream, tmp_mul, tmp_size, tmp_fields,
tmp_head_idx;
int64_t tmp_match;
 
ff_put_v(bc, NUT_VERSION);
ff_put_v(bc, nut->avf->nb_streams);
ff_put_v(bc, nut->max_distance);
ff_put_v(bc, nut->time_base_count);
 
for (i = 0; i < nut->time_base_count; i++) {
ff_put_v(bc, nut->time_base[i].num);
ff_put_v(bc, nut->time_base[i].den);
}
 
tmp_pts = 0;
tmp_mul = 1;
tmp_stream = 0;
tmp_match = 1 - (1LL << 62);
tmp_head_idx = 0;
for (i = 0; i < 256; ) {
tmp_fields = 0;
tmp_size = 0;
// tmp_res=0;
if (tmp_pts != nut->frame_code[i].pts_delta ) tmp_fields = 1;
if (tmp_mul != nut->frame_code[i].size_mul ) tmp_fields = 2;
if (tmp_stream != nut->frame_code[i].stream_id ) tmp_fields = 3;
if (tmp_size != nut->frame_code[i].size_lsb ) tmp_fields = 4;
// if (tmp_res != nut->frame_code[i].res ) tmp_fields=5;
if (tmp_head_idx != nut->frame_code[i].header_idx) tmp_fields = 8;
 
tmp_pts = nut->frame_code[i].pts_delta;
tmp_flags = nut->frame_code[i].flags;
tmp_stream = nut->frame_code[i].stream_id;
tmp_mul = nut->frame_code[i].size_mul;
tmp_size = nut->frame_code[i].size_lsb;
// tmp_res = nut->frame_code[i].res;
tmp_head_idx = nut->frame_code[i].header_idx;
 
for (j = 0; i < 256; j++, i++) {
if (i == 'N') {
j--;
continue;
}
if (nut->frame_code[i].pts_delta != tmp_pts ||
nut->frame_code[i].flags != tmp_flags ||
nut->frame_code[i].stream_id != tmp_stream ||
nut->frame_code[i].size_mul != tmp_mul ||
nut->frame_code[i].size_lsb != tmp_size + j ||
// nut->frame_code[i].res != tmp_res ||
nut->frame_code[i].header_idx != tmp_head_idx)
break;
}
if (j != tmp_mul - tmp_size)
tmp_fields = 6;
 
ff_put_v(bc, tmp_flags);
ff_put_v(bc, tmp_fields);
if (tmp_fields > 0) put_s(bc, tmp_pts);
if (tmp_fields > 1) ff_put_v(bc, tmp_mul);
if (tmp_fields > 2) ff_put_v(bc, tmp_stream);
if (tmp_fields > 3) ff_put_v(bc, tmp_size);
if (tmp_fields > 4) ff_put_v(bc, 0 /*tmp_res*/);
if (tmp_fields > 5) ff_put_v(bc, j);
if (tmp_fields > 6) ff_put_v(bc, tmp_match);
if (tmp_fields > 7) ff_put_v(bc, tmp_head_idx);
}
ff_put_v(bc, nut->header_count - 1);
for (i = 1; i < nut->header_count; i++) {
ff_put_v(bc, nut->header_len[i]);
avio_write(bc, nut->header[i], nut->header_len[i]);
}
}
 
static int write_streamheader(AVFormatContext *avctx, AVIOContext *bc,
AVStream *st, int i)
{
NUTContext *nut = avctx->priv_data;
AVCodecContext *codec = st->codec;
 
ff_put_v(bc, i);
switch (codec->codec_type) {
case AVMEDIA_TYPE_VIDEO: ff_put_v(bc, 0); break;
case AVMEDIA_TYPE_AUDIO: ff_put_v(bc, 1); break;
case AVMEDIA_TYPE_SUBTITLE: ff_put_v(bc, 2); break;
default: ff_put_v(bc, 3); break;
}
ff_put_v(bc, 4);
if (codec->codec_tag) {
avio_wl32(bc, codec->codec_tag);
} else {
av_log(avctx, AV_LOG_ERROR, "No codec tag defined for stream %d\n", i);
return AVERROR(EINVAL);
}
 
ff_put_v(bc, nut->stream[i].time_base - nut->time_base);
ff_put_v(bc, nut->stream[i].msb_pts_shift);
ff_put_v(bc, nut->stream[i].max_pts_distance);
ff_put_v(bc, codec->has_b_frames);
avio_w8(bc, 0); /* flags: 0x1 - fixed_fps, 0x2 - index_present */
 
ff_put_v(bc, codec->extradata_size);
avio_write(bc, codec->extradata, codec->extradata_size);
 
switch (codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
ff_put_v(bc, codec->sample_rate);
ff_put_v(bc, 1);
ff_put_v(bc, codec->channels);
break;
case AVMEDIA_TYPE_VIDEO:
ff_put_v(bc, codec->width);
ff_put_v(bc, codec->height);
 
if (st->sample_aspect_ratio.num <= 0 ||
st->sample_aspect_ratio.den <= 0) {
ff_put_v(bc, 0);
ff_put_v(bc, 0);
} else {
ff_put_v(bc, st->sample_aspect_ratio.num);
ff_put_v(bc, st->sample_aspect_ratio.den);
}
ff_put_v(bc, 0); /* csp type -- unknown */
break;
default:
break;
}
return 0;
}
 
static int add_info(AVIOContext *bc, const char *type, const char *value)
{
put_str(bc, type);
put_s(bc, -1);
put_str(bc, value);
return 1;
}
 
static int write_globalinfo(NUTContext *nut, AVIOContext *bc)
{
AVFormatContext *s = nut->avf;
AVDictionaryEntry *t = NULL;
AVIOContext *dyn_bc;
uint8_t *dyn_buf = NULL;
int count = 0, dyn_size;
int ret = avio_open_dyn_buf(&dyn_bc);
if (ret < 0)
return ret;
 
while ((t = av_dict_get(s->metadata, "", t, AV_DICT_IGNORE_SUFFIX)))
count += add_info(dyn_bc, t->key, t->value);
 
ff_put_v(bc, 0); //stream_if_plus1
ff_put_v(bc, 0); //chapter_id
ff_put_v(bc, 0); //timestamp_start
ff_put_v(bc, 0); //length
 
ff_put_v(bc, count);
 
dyn_size = avio_close_dyn_buf(dyn_bc, &dyn_buf);
avio_write(bc, dyn_buf, dyn_size);
av_free(dyn_buf);
return 0;
}
 
static int write_streaminfo(NUTContext *nut, AVIOContext *bc, int stream_id) {
AVFormatContext *s= nut->avf;
AVStream* st = s->streams[stream_id];
AVDictionaryEntry *t = NULL;
AVIOContext *dyn_bc;
uint8_t *dyn_buf=NULL;
int count=0, dyn_size, i;
int ret = avio_open_dyn_buf(&dyn_bc);
if (ret < 0)
return ret;
 
while ((t = av_dict_get(st->metadata, "", t, AV_DICT_IGNORE_SUFFIX)))
count += add_info(dyn_bc, t->key, t->value);
for (i=0; ff_nut_dispositions[i].flag; ++i) {
if (st->disposition & ff_nut_dispositions[i].flag)
count += add_info(dyn_bc, "Disposition", ff_nut_dispositions[i].str);
}
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
uint8_t buf[256];
snprintf(buf, sizeof(buf), "%d/%d", st->codec->time_base.den, st->codec->time_base.num);
count += add_info(dyn_bc, "r_frame_rate", buf);
}
dyn_size = avio_close_dyn_buf(dyn_bc, &dyn_buf);
 
if (count) {
ff_put_v(bc, stream_id + 1); //stream_id_plus1
ff_put_v(bc, 0); //chapter_id
ff_put_v(bc, 0); //timestamp_start
ff_put_v(bc, 0); //length
 
ff_put_v(bc, count);
 
avio_write(bc, dyn_buf, dyn_size);
}
 
av_free(dyn_buf);
return count;
}
 
static int write_chapter(NUTContext *nut, AVIOContext *bc, int id)
{
AVIOContext *dyn_bc;
uint8_t *dyn_buf = NULL;
AVDictionaryEntry *t = NULL;
AVChapter *ch = nut->avf->chapters[id];
int ret, dyn_size, count = 0;
 
ret = avio_open_dyn_buf(&dyn_bc);
if (ret < 0)
return ret;
 
ff_put_v(bc, 0); // stream_id_plus1
put_s(bc, id + 1); // chapter_id
put_tt(nut, nut->chapter[id].time_base, bc, ch->start); // chapter_start
ff_put_v(bc, ch->end - ch->start); // chapter_len
 
while ((t = av_dict_get(ch->metadata, "", t, AV_DICT_IGNORE_SUFFIX)))
count += add_info(dyn_bc, t->key, t->value);
 
ff_put_v(bc, count);
 
dyn_size = avio_close_dyn_buf(dyn_bc, &dyn_buf);
avio_write(bc, dyn_buf, dyn_size);
av_freep(&dyn_buf);
return 0;
}
 
static int write_index(NUTContext *nut, AVIOContext *bc) {
int i;
Syncpoint dummy= { .pos= 0 };
Syncpoint *next_node[2] = { NULL };
int64_t startpos = avio_tell(bc);
int64_t payload_size;
 
put_tt(nut, nut->max_pts_tb, bc, nut->max_pts);
 
ff_put_v(bc, nut->sp_count);
 
for (i=0; i<nut->sp_count; i++) {
av_tree_find(nut->syncpoints, &dummy, (void *) ff_nut_sp_pos_cmp, (void**)next_node);
ff_put_v(bc, (next_node[1]->pos >> 4) - (dummy.pos>>4));
dummy.pos = next_node[1]->pos;
}
 
for (i=0; i<nut->avf->nb_streams; i++) {
StreamContext *nus= &nut->stream[i];
int64_t last_pts= -1;
int j, k;
for (j=0; j<nut->sp_count; j++) {
int flag = (nus->keyframe_pts[j] != AV_NOPTS_VALUE) ^ (j+1 == nut->sp_count);
int n = 0;
for (; j<nut->sp_count && (nus->keyframe_pts[j] != AV_NOPTS_VALUE) == flag; j++)
n++;
 
ff_put_v(bc, 1 + 2*flag + 4*n);
for (k= j - n; k<=j && k<nut->sp_count; k++) {
if (nus->keyframe_pts[k] == AV_NOPTS_VALUE)
continue;
av_assert0(nus->keyframe_pts[k] > last_pts);
ff_put_v(bc, nus->keyframe_pts[k] - last_pts);
last_pts = nus->keyframe_pts[k];
}
}
}
 
payload_size = avio_tell(bc) - startpos + 8 + 4;
 
avio_wb64(bc, 8 + payload_size + av_log2(payload_size) / 7 + 1 + 4*(payload_size > 4096));
 
return 0;
}
 
static int write_headers(AVFormatContext *avctx, AVIOContext *bc)
{
NUTContext *nut = avctx->priv_data;
AVIOContext *dyn_bc;
int i, ret;
 
ff_metadata_conv_ctx(avctx, ff_nut_metadata_conv, NULL);
 
ret = avio_open_dyn_buf(&dyn_bc);
if (ret < 0)
return ret;
write_mainheader(nut, dyn_bc);
put_packet(nut, bc, dyn_bc, 1, MAIN_STARTCODE);
 
for (i = 0; i < nut->avf->nb_streams; i++) {
ret = avio_open_dyn_buf(&dyn_bc);
if (ret < 0)
return ret;
ret = write_streamheader(avctx, dyn_bc, nut->avf->streams[i], i);
if (ret < 0)
return ret;
put_packet(nut, bc, dyn_bc, 1, STREAM_STARTCODE);
}
 
ret = avio_open_dyn_buf(&dyn_bc);
if (ret < 0)
return ret;
write_globalinfo(nut, dyn_bc);
put_packet(nut, bc, dyn_bc, 1, INFO_STARTCODE);
 
for (i = 0; i < nut->avf->nb_streams; i++) {
ret = avio_open_dyn_buf(&dyn_bc);
if (ret < 0)
return ret;
ret = write_streaminfo(nut, dyn_bc, i);
if (ret < 0)
return ret;
if (ret > 0)
put_packet(nut, bc, dyn_bc, 1, INFO_STARTCODE);
else {
uint8_t *buf;
avio_close_dyn_buf(dyn_bc, &buf);
av_free(buf);
}
}
 
for (i = 0; i < nut->avf->nb_chapters; i++) {
ret = avio_open_dyn_buf(&dyn_bc);
if (ret < 0)
return ret;
ret = write_chapter(nut, dyn_bc, i);
if (ret < 0) {
uint8_t *buf;
avio_close_dyn_buf(dyn_bc, &buf);
av_freep(&buf);
return ret;
}
put_packet(nut, bc, dyn_bc, 1, INFO_STARTCODE);
}
 
nut->last_syncpoint_pos = INT_MIN;
nut->header_count++;
return 0;
}
 
static int nut_write_header(AVFormatContext *s)
{
NUTContext *nut = s->priv_data;
AVIOContext *bc = s->pb;
int i, j, ret;
 
nut->avf = s;
 
nut->stream = av_calloc(s->nb_streams, sizeof(*nut->stream ));
nut->chapter = av_calloc(s->nb_chapters, sizeof(*nut->chapter));
nut->time_base= av_calloc(s->nb_streams +
s->nb_chapters, sizeof(*nut->time_base));
if (!nut->stream || !nut->chapter || !nut->time_base) {
av_freep(&nut->stream);
av_freep(&nut->chapter);
av_freep(&nut->time_base);
return AVERROR(ENOMEM);
}
 
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
int ssize;
AVRational time_base;
ff_parse_specific_params(st->codec, &time_base.den, &ssize, &time_base.num);
 
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO && st->codec->sample_rate) {
time_base = (AVRational) {1, st->codec->sample_rate};
} else {
time_base = ff_choose_timebase(s, st, 48000);
}
 
avpriv_set_pts_info(st, 64, time_base.num, time_base.den);
 
for (j = 0; j < nut->time_base_count; j++)
if (!memcmp(&time_base, &nut->time_base[j], sizeof(AVRational))) {
break;
}
nut->time_base[j] = time_base;
nut->stream[i].time_base = &nut->time_base[j];
if (j == nut->time_base_count)
nut->time_base_count++;
 
if (INT64_C(1000) * time_base.num >= time_base.den)
nut->stream[i].msb_pts_shift = 7;
else
nut->stream[i].msb_pts_shift = 14;
nut->stream[i].max_pts_distance =
FFMAX(time_base.den, time_base.num) / time_base.num;
}
 
for (i = 0; i < s->nb_chapters; i++) {
AVChapter *ch = s->chapters[i];
 
for (j = 0; j < nut->time_base_count; j++)
if (!memcmp(&ch->time_base, &nut->time_base[j], sizeof(AVRational)))
break;
 
nut->time_base[j] = ch->time_base;
nut->chapter[i].time_base = &nut->time_base[j];
if (j == nut->time_base_count)
nut->time_base_count++;
}
 
nut->max_distance = MAX_DISTANCE;
build_elision_headers(s);
build_frame_code(s);
av_assert0(nut->frame_code['N'].flags == FLAG_INVALID);
 
avio_write(bc, ID_STRING, strlen(ID_STRING));
avio_w8(bc, 0);
 
if ((ret = write_headers(s, bc)) < 0)
return ret;
 
if (s->avoid_negative_ts < 0)
s->avoid_negative_ts = 1;
 
avio_flush(bc);
 
return 0;
}
 
static int get_needed_flags(NUTContext *nut, StreamContext *nus, FrameCode *fc,
AVPacket *pkt)
{
int flags = 0;
 
if (pkt->flags & AV_PKT_FLAG_KEY)
flags |= FLAG_KEY;
if (pkt->stream_index != fc->stream_id)
flags |= FLAG_STREAM_ID;
if (pkt->size / fc->size_mul)
flags |= FLAG_SIZE_MSB;
if (pkt->pts - nus->last_pts != fc->pts_delta)
flags |= FLAG_CODED_PTS;
if (pkt->size > 2 * nut->max_distance)
flags |= FLAG_CHECKSUM;
if (FFABS(pkt->pts - nus->last_pts) > nus->max_pts_distance)
flags |= FLAG_CHECKSUM;
if (pkt->size < nut->header_len[fc->header_idx] ||
(pkt->size > 4096 && fc->header_idx) ||
memcmp(pkt->data, nut->header[fc->header_idx],
nut->header_len[fc->header_idx]))
flags |= FLAG_HEADER_IDX;
 
return flags | (fc->flags & FLAG_CODED);
}
 
static int find_best_header_idx(NUTContext *nut, AVPacket *pkt)
{
int i;
int best_i = 0;
int best_len = 0;
 
if (pkt->size > 4096)
return 0;
 
for (i = 1; i < nut->header_count; i++)
if (pkt->size >= nut->header_len[i]
&& nut->header_len[i] > best_len
&& !memcmp(pkt->data, nut->header[i], nut->header_len[i])) {
best_i = i;
best_len = nut->header_len[i];
}
return best_i;
}
 
static int nut_write_packet(AVFormatContext *s, AVPacket *pkt)
{
NUTContext *nut = s->priv_data;
StreamContext *nus = &nut->stream[pkt->stream_index];
AVIOContext *bc = s->pb, *dyn_bc;
FrameCode *fc;
int64_t coded_pts;
int best_length, frame_code, flags, needed_flags, i, header_idx;
int best_header_idx;
int key_frame = !!(pkt->flags & AV_PKT_FLAG_KEY);
int store_sp = 0;
int ret;
 
if (pkt->pts < 0) {
av_log(s, AV_LOG_ERROR,
"Negative pts not supported stream %d, pts %"PRId64"\n",
pkt->stream_index, pkt->pts);
return AVERROR(EINVAL);
}
 
if (1LL << (20 + 3 * nut->header_count) <= avio_tell(bc))
write_headers(s, bc);
 
if (key_frame && !(nus->last_flags & FLAG_KEY))
store_sp = 1;
 
if (pkt->size + 30 /*FIXME check*/ + avio_tell(bc) >= nut->last_syncpoint_pos + nut->max_distance)
store_sp = 1;
 
//FIXME: Ensure store_sp is 1 in the first place.
 
if (store_sp) {
Syncpoint *sp, dummy = { .pos = INT64_MAX };
 
ff_nut_reset_ts(nut, *nus->time_base, pkt->dts);
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
int64_t dts_tb = av_rescale_rnd(pkt->dts,
nus->time_base->num * (int64_t)nut->stream[i].time_base->den,
nus->time_base->den * (int64_t)nut->stream[i].time_base->num,
AV_ROUND_DOWN);
int index = av_index_search_timestamp(st, dts_tb,
AVSEEK_FLAG_BACKWARD);
if (index >= 0)
dummy.pos = FFMIN(dummy.pos, st->index_entries[index].pos);
}
if (dummy.pos == INT64_MAX)
dummy.pos = 0;
sp = av_tree_find(nut->syncpoints, &dummy, (void *)ff_nut_sp_pos_cmp,
NULL);
 
nut->last_syncpoint_pos = avio_tell(bc);
ret = avio_open_dyn_buf(&dyn_bc);
if (ret < 0)
return ret;
put_tt(nut, nus->time_base, dyn_bc, pkt->dts);
ff_put_v(dyn_bc, sp ? (nut->last_syncpoint_pos - sp->pos) >> 4 : 0);
put_packet(nut, bc, dyn_bc, 1, SYNCPOINT_STARTCODE);
 
if ((ret = ff_nut_add_sp(nut, nut->last_syncpoint_pos, 0 /*unused*/, pkt->dts)) < 0)
return ret;
 
if ((1ll<<60) % nut->sp_count == 0)
for (i=0; i<s->nb_streams; i++) {
int j;
StreamContext *nus = &nut->stream[i];
av_reallocp_array(&nus->keyframe_pts, 2*nut->sp_count, sizeof(*nus->keyframe_pts));
if (!nus->keyframe_pts)
return AVERROR(ENOMEM);
for (j=nut->sp_count == 1 ? 0 : nut->sp_count; j<2*nut->sp_count; j++)
nus->keyframe_pts[j] = AV_NOPTS_VALUE;
}
}
av_assert0(nus->last_pts != AV_NOPTS_VALUE);
 
coded_pts = pkt->pts & ((1 << nus->msb_pts_shift) - 1);
if (ff_lsb2full(nus, coded_pts) != pkt->pts)
coded_pts = pkt->pts + (1 << nus->msb_pts_shift);
 
best_header_idx = find_best_header_idx(nut, pkt);
 
best_length = INT_MAX;
frame_code = -1;
for (i = 0; i < 256; i++) {
int length = 0;
FrameCode *fc = &nut->frame_code[i];
int flags = fc->flags;
 
if (flags & FLAG_INVALID)
continue;
needed_flags = get_needed_flags(nut, nus, fc, pkt);
 
if (flags & FLAG_CODED) {
length++;
flags = needed_flags;
}
 
if ((flags & needed_flags) != needed_flags)
continue;
 
if ((flags ^ needed_flags) & FLAG_KEY)
continue;
 
if (flags & FLAG_STREAM_ID)
length += ff_get_v_length(pkt->stream_index);
 
if (pkt->size % fc->size_mul != fc->size_lsb)
continue;
if (flags & FLAG_SIZE_MSB)
length += ff_get_v_length(pkt->size / fc->size_mul);
 
if (flags & FLAG_CHECKSUM)
length += 4;
 
if (flags & FLAG_CODED_PTS)
length += ff_get_v_length(coded_pts);
 
if ( (flags & FLAG_CODED)
&& nut->header_len[best_header_idx] > nut->header_len[fc->header_idx] + 1) {
flags |= FLAG_HEADER_IDX;
}
 
if (flags & FLAG_HEADER_IDX) {
length += 1 - nut->header_len[best_header_idx];
} else {
length -= nut->header_len[fc->header_idx];
}
 
length *= 4;
length += !(flags & FLAG_CODED_PTS);
length += !(flags & FLAG_CHECKSUM);
 
if (length < best_length) {
best_length = length;
frame_code = i;
}
}
av_assert0(frame_code != -1);
fc = &nut->frame_code[frame_code];
flags = fc->flags;
needed_flags = get_needed_flags(nut, nus, fc, pkt);
header_idx = fc->header_idx;
 
ffio_init_checksum(bc, ff_crc04C11DB7_update, 0);
avio_w8(bc, frame_code);
if (flags & FLAG_CODED) {
ff_put_v(bc, (flags ^ needed_flags) & ~(FLAG_CODED));
flags = needed_flags;
}
if (flags & FLAG_STREAM_ID) ff_put_v(bc, pkt->stream_index);
if (flags & FLAG_CODED_PTS) ff_put_v(bc, coded_pts);
if (flags & FLAG_SIZE_MSB ) ff_put_v(bc, pkt->size / fc->size_mul);
if (flags & FLAG_HEADER_IDX) ff_put_v(bc, header_idx = best_header_idx);
 
if (flags & FLAG_CHECKSUM) avio_wl32(bc, ffio_get_checksum(bc));
else ffio_get_checksum(bc);
 
avio_write(bc, pkt->data + nut->header_len[header_idx], pkt->size - nut->header_len[header_idx]);
nus->last_flags = flags;
nus->last_pts = pkt->pts;
 
//FIXME just store one per syncpoint
if (flags & FLAG_KEY) {
av_add_index_entry(
s->streams[pkt->stream_index],
nut->last_syncpoint_pos,
pkt->pts,
0,
0,
AVINDEX_KEYFRAME);
if (nus->keyframe_pts && nus->keyframe_pts[nut->sp_count] == AV_NOPTS_VALUE)
nus->keyframe_pts[nut->sp_count] = pkt->pts;
}
 
if (!nut->max_pts_tb || av_compare_ts(nut->max_pts, *nut->max_pts_tb, pkt->pts, *nus->time_base) < 0) {
nut->max_pts = pkt->pts;
nut->max_pts_tb = nus->time_base;
}
 
return 0;
}
 
static int nut_write_trailer(AVFormatContext *s)
{
NUTContext *nut = s->priv_data;
AVIOContext *bc = s->pb, *dyn_bc;
int i, ret;
 
while (nut->header_count < 3)
write_headers(s, bc);
 
ret = avio_open_dyn_buf(&dyn_bc);
if (ret >= 0 && nut->sp_count) {
write_index(nut, dyn_bc);
put_packet(nut, bc, dyn_bc, 1, INDEX_STARTCODE);
}
 
ff_nut_free_sp(nut);
for (i=0; i<s->nb_streams; i++)
av_freep(&nut->stream[i].keyframe_pts);
 
av_freep(&nut->stream);
av_freep(&nut->chapter);
av_freep(&nut->time_base);
 
return 0;
}
 
AVOutputFormat ff_nut_muxer = {
.name = "nut",
.long_name = NULL_IF_CONFIG_SMALL("NUT"),
.mime_type = "video/x-nut",
.extensions = "nut",
.priv_data_size = sizeof(NUTContext),
.audio_codec = CONFIG_LIBVORBIS ? AV_CODEC_ID_VORBIS :
CONFIG_LIBMP3LAME ? AV_CODEC_ID_MP3 : AV_CODEC_ID_MP2,
.video_codec = AV_CODEC_ID_MPEG4,
.write_header = nut_write_header,
.write_packet = nut_write_packet,
.write_trailer = nut_write_trailer,
.flags = AVFMT_GLOBALHEADER | AVFMT_VARIABLE_FPS,
.codec_tag = ff_nut_codec_tags,
};
/contrib/sdk/sources/ffmpeg/libavformat/nuv.c
0,0 → 1,390
/*
* NuppelVideo demuxer.
* Copyright (c) 2006 Reimar Doeffinger
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/intfloat.h"
#include "avformat.h"
#include "internal.h"
#include "riff.h"
 
static const AVCodecTag nuv_audio_tags[] = {
{ AV_CODEC_ID_PCM_S16LE, MKTAG('R', 'A', 'W', 'A') },
{ AV_CODEC_ID_MP3, MKTAG('L', 'A', 'M', 'E') },
{ AV_CODEC_ID_NONE, 0 },
};
 
typedef struct {
int v_id;
int a_id;
int rtjpg_video;
} NUVContext;
 
typedef enum {
NUV_VIDEO = 'V',
NUV_EXTRADATA = 'D',
NUV_AUDIO = 'A',
NUV_SEEKP = 'R',
NUV_MYTHEXT = 'X'
} nuv_frametype;
 
static int nuv_probe(AVProbeData *p)
{
if (!memcmp(p->buf, "NuppelVideo", 12))
return AVPROBE_SCORE_MAX;
if (!memcmp(p->buf, "MythTVVideo", 12))
return AVPROBE_SCORE_MAX;
return 0;
}
 
/// little macro to sanitize packet size
#define PKTSIZE(s) (s & 0xffffff)
 
/**
* @brief read until we found all data needed for decoding
* @param vst video stream of which to change parameters
* @param ast video stream of which to change parameters
* @param myth set if this is a MythTVVideo format file
* @return 0 or AVERROR code
*/
static int get_codec_data(AVIOContext *pb, AVStream *vst,
AVStream *ast, int myth)
{
nuv_frametype frametype;
 
if (!vst && !myth)
return 1; // no codec data needed
while (!url_feof(pb)) {
int size, subtype;
 
frametype = avio_r8(pb);
switch (frametype) {
case NUV_EXTRADATA:
subtype = avio_r8(pb);
avio_skip(pb, 6);
size = PKTSIZE(avio_rl32(pb));
if (vst && subtype == 'R') {
if (vst->codec->extradata) {
av_freep(&vst->codec->extradata);
vst->codec->extradata_size = 0;
}
if (ff_alloc_extradata(vst->codec, size))
return AVERROR(ENOMEM);
avio_read(pb, vst->codec->extradata, size);
size = 0;
if (!myth)
return 0;
}
break;
case NUV_MYTHEXT:
avio_skip(pb, 7);
size = PKTSIZE(avio_rl32(pb));
if (size != 128 * 4)
break;
avio_rl32(pb); // version
if (vst) {
vst->codec->codec_tag = avio_rl32(pb);
vst->codec->codec_id =
ff_codec_get_id(ff_codec_bmp_tags, vst->codec->codec_tag);
if (vst->codec->codec_tag == MKTAG('R', 'J', 'P', 'G'))
vst->codec->codec_id = AV_CODEC_ID_NUV;
} else
avio_skip(pb, 4);
 
if (ast) {
int id;
 
ast->codec->codec_tag = avio_rl32(pb);
ast->codec->sample_rate = avio_rl32(pb);
ast->codec->bits_per_coded_sample = avio_rl32(pb);
ast->codec->channels = avio_rl32(pb);
ast->codec->channel_layout = 0;
 
id = ff_wav_codec_get_id(ast->codec->codec_tag,
ast->codec->bits_per_coded_sample);
if (id == AV_CODEC_ID_NONE) {
id = ff_codec_get_id(nuv_audio_tags, ast->codec->codec_tag);
if (id == AV_CODEC_ID_PCM_S16LE)
id = ff_get_pcm_codec_id(ast->codec->bits_per_coded_sample,
0, 0, ~1);
}
ast->codec->codec_id = id;
 
ast->need_parsing = AVSTREAM_PARSE_FULL;
} else
avio_skip(pb, 4 * 4);
 
size -= 6 * 4;
avio_skip(pb, size);
return 0;
case NUV_SEEKP:
size = 11;
break;
default:
avio_skip(pb, 7);
size = PKTSIZE(avio_rl32(pb));
break;
}
avio_skip(pb, size);
}
 
return 0;
}
 
static int nuv_header(AVFormatContext *s)
{
NUVContext *ctx = s->priv_data;
AVIOContext *pb = s->pb;
char id_string[12];
double aspect, fps;
int is_mythtv, width, height, v_packs, a_packs, ret;
AVStream *vst = NULL, *ast = NULL;
 
avio_read(pb, id_string, 12);
is_mythtv = !memcmp(id_string, "MythTVVideo", 12);
avio_skip(pb, 5); // version string
avio_skip(pb, 3); // padding
width = avio_rl32(pb);
height = avio_rl32(pb);
avio_rl32(pb); // unused, "desiredwidth"
avio_rl32(pb); // unused, "desiredheight"
avio_r8(pb); // 'P' == progressive, 'I' == interlaced
avio_skip(pb, 3); // padding
aspect = av_int2double(avio_rl64(pb));
if (aspect > 0.9999 && aspect < 1.0001)
aspect = 4.0 / 3.0;
fps = av_int2double(avio_rl64(pb));
 
// number of packets per stream type, -1 means unknown, e.g. streaming
v_packs = avio_rl32(pb);
a_packs = avio_rl32(pb);
avio_rl32(pb); // text
 
avio_rl32(pb); // keyframe distance (?)
 
if (v_packs) {
vst = avformat_new_stream(s, NULL);
if (!vst)
return AVERROR(ENOMEM);
ctx->v_id = vst->index;
 
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codec->codec_id = AV_CODEC_ID_NUV;
vst->codec->width = width;
vst->codec->height = height;
vst->codec->bits_per_coded_sample = 10;
vst->sample_aspect_ratio = av_d2q(aspect * height / width,
10000);
#if FF_API_R_FRAME_RATE
vst->r_frame_rate =
#endif
vst->avg_frame_rate = av_d2q(fps, 60000);
avpriv_set_pts_info(vst, 32, 1, 1000);
} else
ctx->v_id = -1;
 
if (a_packs) {
ast = avformat_new_stream(s, NULL);
if (!ast)
return AVERROR(ENOMEM);
ctx->a_id = ast->index;
 
ast->codec->codec_type = AVMEDIA_TYPE_AUDIO;
ast->codec->codec_id = AV_CODEC_ID_PCM_S16LE;
ast->codec->channels = 2;
ast->codec->channel_layout = AV_CH_LAYOUT_STEREO;
ast->codec->sample_rate = 44100;
ast->codec->bit_rate = 2 * 2 * 44100 * 8;
ast->codec->block_align = 2 * 2;
ast->codec->bits_per_coded_sample = 16;
avpriv_set_pts_info(ast, 32, 1, 1000);
} else
ctx->a_id = -1;
 
if ((ret = get_codec_data(pb, vst, ast, is_mythtv)) < 0)
return ret;
 
ctx->rtjpg_video = vst && vst->codec->codec_id == AV_CODEC_ID_NUV;
 
return 0;
}
 
#define HDRSIZE 12
 
static int nuv_packet(AVFormatContext *s, AVPacket *pkt)
{
NUVContext *ctx = s->priv_data;
AVIOContext *pb = s->pb;
uint8_t hdr[HDRSIZE];
nuv_frametype frametype;
int ret, size;
 
while (!url_feof(pb)) {
int copyhdrsize = ctx->rtjpg_video ? HDRSIZE : 0;
uint64_t pos = avio_tell(pb);
 
ret = avio_read(pb, hdr, HDRSIZE);
if (ret < HDRSIZE)
return ret < 0 ? ret : AVERROR(EIO);
 
frametype = hdr[0];
size = PKTSIZE(AV_RL32(&hdr[8]));
 
switch (frametype) {
case NUV_EXTRADATA:
if (!ctx->rtjpg_video) {
avio_skip(pb, size);
break;
}
case NUV_VIDEO:
if (ctx->v_id < 0) {
av_log(s, AV_LOG_ERROR, "Video packet in file without video stream!\n");
avio_skip(pb, size);
break;
}
ret = av_new_packet(pkt, copyhdrsize + size);
if (ret < 0)
return ret;
 
pkt->pos = pos;
pkt->flags |= hdr[2] == 0 ? AV_PKT_FLAG_KEY : 0;
pkt->pts = AV_RL32(&hdr[4]);
pkt->stream_index = ctx->v_id;
memcpy(pkt->data, hdr, copyhdrsize);
ret = avio_read(pb, pkt->data + copyhdrsize, size);
if (ret < 0) {
av_free_packet(pkt);
return ret;
}
if (ret < size)
av_shrink_packet(pkt, copyhdrsize + ret);
return 0;
case NUV_AUDIO:
if (ctx->a_id < 0) {
av_log(s, AV_LOG_ERROR, "Audio packet in file without audio stream!\n");
avio_skip(pb, size);
break;
}
ret = av_get_packet(pb, pkt, size);
pkt->flags |= AV_PKT_FLAG_KEY;
pkt->pos = pos;
pkt->pts = AV_RL32(&hdr[4]);
pkt->stream_index = ctx->a_id;
if (ret < 0)
return ret;
return 0;
case NUV_SEEKP:
// contains no data, size value is invalid
break;
default:
avio_skip(pb, size);
break;
}
}
 
return AVERROR(EIO);
}
 
/**
* \brief looks for the string RTjjjjjjjjjj in the stream too resync reading
* \return 1 if the syncword is found 0 otherwise.
*/
static int nuv_resync(AVFormatContext *s, int64_t pos_limit) {
AVIOContext *pb = s->pb;
uint32_t tag = 0;
while(!url_feof(pb) && avio_tell(pb) < pos_limit) {
tag = (tag << 8) | avio_r8(pb);
if (tag == MKBETAG('R','T','j','j') &&
(tag = avio_rb32(pb)) == MKBETAG('j','j','j','j') &&
(tag = avio_rb32(pb)) == MKBETAG('j','j','j','j'))
return 1;
}
return 0;
}
 
/**
* \brief attempts to read a timestamp from stream at the given stream position
* \return timestamp if successful and AV_NOPTS_VALUE if failure
*/
static int64_t nuv_read_dts(AVFormatContext *s, int stream_index,
int64_t *ppos, int64_t pos_limit)
{
NUVContext *ctx = s->priv_data;
AVIOContext *pb = s->pb;
uint8_t hdr[HDRSIZE];
nuv_frametype frametype;
int size, key, idx;
int64_t pos, dts;
 
if (avio_seek(pb, *ppos, SEEK_SET) < 0)
return AV_NOPTS_VALUE;
 
if (!nuv_resync(s, pos_limit))
return AV_NOPTS_VALUE;
 
while (!url_feof(pb) && avio_tell(pb) < pos_limit) {
if (avio_read(pb, hdr, HDRSIZE) < HDRSIZE)
return AV_NOPTS_VALUE;
frametype = hdr[0];
size = PKTSIZE(AV_RL32(&hdr[8]));
switch (frametype) {
case NUV_SEEKP:
break;
case NUV_AUDIO:
case NUV_VIDEO:
if (frametype == NUV_VIDEO) {
idx = ctx->v_id;
key = hdr[2] == 0;
} else {
idx = ctx->a_id;
key = 1;
}
if (stream_index == idx) {
 
pos = avio_tell(s->pb) - HDRSIZE;
dts = AV_RL32(&hdr[4]);
 
// TODO - add general support in av_gen_search, so it adds positions after reading timestamps
av_add_index_entry(s->streams[stream_index], pos, dts, size + HDRSIZE, 0,
key ? AVINDEX_KEYFRAME : 0);
 
*ppos = pos;
return dts;
}
default:
avio_skip(pb, size);
break;
}
}
return AV_NOPTS_VALUE;
}
 
 
AVInputFormat ff_nuv_demuxer = {
.name = "nuv",
.long_name = NULL_IF_CONFIG_SMALL("NuppelVideo"),
.priv_data_size = sizeof(NUVContext),
.read_probe = nuv_probe,
.read_header = nuv_header,
.read_packet = nuv_packet,
.read_timestamp = nuv_read_dts,
.flags = AVFMT_GENERIC_INDEX,
};
/contrib/sdk/sources/ffmpeg/libavformat/oggdec.c
0,0 → 1,872
/*
* Ogg bitstream support
* Luca Barbato <lu_zero@gentoo.org>
* Based on tcvp implementation
*/
 
/*
Copyright (C) 2005 Michael Ahlberg, Måns Rullgård
 
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
 
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
 
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*/
 
#include <stdio.h>
#include "libavutil/avassert.h"
#include "libavutil/intreadwrite.h"
#include "oggdec.h"
#include "avformat.h"
#include "internal.h"
#include "vorbiscomment.h"
 
#define MAX_PAGE_SIZE 65307
#define DECODER_BUFFER_SIZE MAX_PAGE_SIZE
 
static const struct ogg_codec * const ogg_codecs[] = {
&ff_skeleton_codec,
&ff_dirac_codec,
&ff_speex_codec,
&ff_vorbis_codec,
&ff_theora_codec,
&ff_flac_codec,
&ff_celt_codec,
&ff_opus_codec,
&ff_old_dirac_codec,
&ff_old_flac_codec,
&ff_ogm_video_codec,
&ff_ogm_audio_codec,
&ff_ogm_text_codec,
&ff_ogm_old_codec,
NULL
};
 
static int64_t ogg_calc_pts(AVFormatContext *s, int idx, int64_t *dts);
static int ogg_new_stream(AVFormatContext *s, uint32_t serial);
 
//FIXME We could avoid some structure duplication
static int ogg_save(AVFormatContext *s)
{
struct ogg *ogg = s->priv_data;
struct ogg_state *ost =
av_malloc(sizeof(*ost) + (ogg->nstreams - 1) * sizeof(*ogg->streams));
int i;
ost->pos = avio_tell(s->pb);
ost->curidx = ogg->curidx;
ost->next = ogg->state;
ost->nstreams = ogg->nstreams;
memcpy(ost->streams, ogg->streams, ogg->nstreams * sizeof(*ogg->streams));
 
for (i = 0; i < ogg->nstreams; i++) {
struct ogg_stream *os = ogg->streams + i;
os->buf = av_mallocz(os->bufsize + FF_INPUT_BUFFER_PADDING_SIZE);
memcpy(os->buf, ost->streams[i].buf, os->bufpos);
}
 
ogg->state = ost;
 
return 0;
}
 
static int ogg_restore(AVFormatContext *s, int discard)
{
struct ogg *ogg = s->priv_data;
AVIOContext *bc = s->pb;
struct ogg_state *ost = ogg->state;
int i, err;
 
if (!ost)
return 0;
 
ogg->state = ost->next;
 
if (!discard) {
 
for (i = 0; i < ogg->nstreams; i++)
av_freep(&ogg->streams[i].buf);
 
avio_seek(bc, ost->pos, SEEK_SET);
ogg->page_pos = -1;
ogg->curidx = ost->curidx;
ogg->nstreams = ost->nstreams;
if ((err = av_reallocp_array(&ogg->streams, ogg->nstreams,
sizeof(*ogg->streams))) < 0) {
ogg->nstreams = 0;
return err;
} else
memcpy(ogg->streams, ost->streams,
ost->nstreams * sizeof(*ogg->streams));
}
 
av_free(ost);
 
return 0;
}
 
static int ogg_reset(AVFormatContext *s)
{
struct ogg *ogg = s->priv_data;
int i;
int64_t start_pos = avio_tell(s->pb);
 
for (i = 0; i < ogg->nstreams; i++) {
struct ogg_stream *os = ogg->streams + i;
os->bufpos = 0;
os->pstart = 0;
os->psize = 0;
os->granule = -1;
os->lastpts = AV_NOPTS_VALUE;
os->lastdts = AV_NOPTS_VALUE;
os->sync_pos = -1;
os->page_pos = 0;
os->nsegs = 0;
os->segp = 0;
os->incomplete = 0;
os->got_data = 0;
if (start_pos <= s->data_offset) {
os->lastpts = 0;
}
}
 
ogg->page_pos = -1;
ogg->curidx = -1;
 
return 0;
}
 
static const struct ogg_codec *ogg_find_codec(uint8_t *buf, int size)
{
int i;
 
for (i = 0; ogg_codecs[i]; i++)
if (size >= ogg_codecs[i]->magicsize &&
!memcmp(buf, ogg_codecs[i]->magic, ogg_codecs[i]->magicsize))
return ogg_codecs[i];
 
return NULL;
}
 
/**
* Replace the current stream with a new one. This is a typical webradio
* situation where a new audio stream spawn (identified with a new serial) and
* must replace the previous one (track switch).
*/
static int ogg_replace_stream(AVFormatContext *s, uint32_t serial, int nsegs)
{
struct ogg *ogg = s->priv_data;
struct ogg_stream *os;
const struct ogg_codec *codec;
int i = 0;
 
if (s->pb->seekable) {
uint8_t magic[8];
int64_t pos = avio_tell(s->pb);
avio_skip(s->pb, nsegs);
avio_read(s->pb, magic, sizeof(magic));
avio_seek(s->pb, pos, SEEK_SET);
codec = ogg_find_codec(magic, sizeof(magic));
if (!codec) {
av_log(s, AV_LOG_ERROR, "Cannot identify new stream\n");
return AVERROR_INVALIDDATA;
}
for (i = 0; i < ogg->nstreams; i++) {
if (ogg->streams[i].codec == codec)
break;
}
if (i >= ogg->nstreams)
return ogg_new_stream(s, serial);
} else if (ogg->nstreams != 1) {
avpriv_report_missing_feature(s, "Changing stream parameters in multistream ogg");
return AVERROR_PATCHWELCOME;
}
 
os = &ogg->streams[i];
 
os->serial = serial;
return i;
 
#if 0
buf = os->buf;
bufsize = os->bufsize;
codec = os->codec;
 
if (!ogg->state || ogg->state->streams[i].private != os->private)
av_freep(&ogg->streams[i].private);
 
/* Set Ogg stream settings similar to what is done in ogg_new_stream(). We
* also re-use the ogg_stream allocated buffer */
memset(os, 0, sizeof(*os));
os->serial = serial;
os->bufsize = bufsize;
os->buf = buf;
os->header = -1;
os->codec = codec;
 
return i;
#endif
}
 
static int ogg_new_stream(AVFormatContext *s, uint32_t serial)
{
struct ogg *ogg = s->priv_data;
int idx = ogg->nstreams;
AVStream *st;
struct ogg_stream *os;
size_t size;
 
if (ogg->state) {
av_log(s, AV_LOG_ERROR, "New streams are not supposed to be added "
"in between Ogg context save/restore operations.\n");
return AVERROR_BUG;
}
 
/* Allocate and init a new Ogg Stream */
if (av_size_mult(ogg->nstreams + 1, sizeof(*ogg->streams), &size) < 0 ||
!(os = av_realloc(ogg->streams, size)))
return AVERROR(ENOMEM);
ogg->streams = os;
os = ogg->streams + idx;
memset(os, 0, sizeof(*os));
os->serial = serial;
os->bufsize = DECODER_BUFFER_SIZE;
os->buf = av_malloc(os->bufsize + FF_INPUT_BUFFER_PADDING_SIZE);
os->header = -1;
os->start_granule = OGG_NOGRANULE_VALUE;
if (!os->buf)
return AVERROR(ENOMEM);
 
/* Create the associated AVStream */
st = avformat_new_stream(s, NULL);
if (!st) {
av_freep(&os->buf);
return AVERROR(ENOMEM);
}
st->id = idx;
avpriv_set_pts_info(st, 64, 1, 1000000);
 
ogg->nstreams++;
return idx;
}
 
static int ogg_new_buf(struct ogg *ogg, int idx)
{
struct ogg_stream *os = ogg->streams + idx;
uint8_t *nb = av_malloc(os->bufsize + FF_INPUT_BUFFER_PADDING_SIZE);
int size = os->bufpos - os->pstart;
 
if (os->buf) {
memcpy(nb, os->buf + os->pstart, size);
av_free(os->buf);
}
 
os->buf = nb;
os->bufpos = size;
os->pstart = 0;
 
return 0;
}
 
static int data_packets_seen(const struct ogg *ogg)
{
int i;
 
for (i = 0; i < ogg->nstreams; i++)
if (ogg->streams[i].got_data)
return 1;
return 0;
}
 
static int ogg_read_page(AVFormatContext *s, int *sid)
{
AVIOContext *bc = s->pb;
struct ogg *ogg = s->priv_data;
struct ogg_stream *os;
int ret, i = 0;
int flags, nsegs;
uint64_t gp;
uint32_t serial;
int size, idx;
uint8_t sync[4];
int sp = 0;
 
ret = avio_read(bc, sync, 4);
if (ret < 4)
return ret < 0 ? ret : AVERROR_EOF;
 
do {
int c;
 
if (sync[sp & 3] == 'O' &&
sync[(sp + 1) & 3] == 'g' &&
sync[(sp + 2) & 3] == 'g' && sync[(sp + 3) & 3] == 'S')
break;
 
if(!i && bc->seekable && ogg->page_pos > 0) {
memset(sync, 0, 4);
avio_seek(bc, ogg->page_pos+4, SEEK_SET);
ogg->page_pos = -1;
}
 
c = avio_r8(bc);
 
if (url_feof(bc))
return AVERROR_EOF;
 
sync[sp++ & 3] = c;
} while (i++ < MAX_PAGE_SIZE);
 
if (i >= MAX_PAGE_SIZE) {
av_log(s, AV_LOG_INFO, "cannot find sync word\n");
return AVERROR_INVALIDDATA;
}
 
if (avio_r8(bc) != 0) { /* version */
av_log (s, AV_LOG_ERROR, "ogg page, unsupported version\n");
return AVERROR_INVALIDDATA;
}
 
flags = avio_r8(bc);
gp = avio_rl64(bc);
serial = avio_rl32(bc);
avio_skip(bc, 8); /* seq, crc */
nsegs = avio_r8(bc);
 
idx = ogg_find_stream(ogg, serial);
if (idx < 0) {
if (data_packets_seen(ogg))
idx = ogg_replace_stream(s, serial, nsegs);
else
idx = ogg_new_stream(s, serial);
 
if (idx < 0) {
av_log(s, AV_LOG_ERROR, "failed to create or replace stream\n");
return idx;
}
}
 
os = ogg->streams + idx;
ogg->page_pos =
os->page_pos = avio_tell(bc) - 27;
 
if (os->psize > 0)
ogg_new_buf(ogg, idx);
 
ret = avio_read(bc, os->segments, nsegs);
if (ret < nsegs)
return ret < 0 ? ret : AVERROR_EOF;
 
os->nsegs = nsegs;
os->segp = 0;
 
size = 0;
for (i = 0; i < nsegs; i++)
size += os->segments[i];
 
if (!(flags & OGG_FLAG_BOS))
os->got_data = 1;
 
if (flags & OGG_FLAG_CONT || os->incomplete) {
if (!os->psize) {
// If this is the very first segment we started
// playback in the middle of a continuation packet.
// Discard it since we missed the start of it.
while (os->segp < os->nsegs) {
int seg = os->segments[os->segp++];
os->pstart += seg;
if (seg < 255)
break;
}
os->sync_pos = os->page_pos;
}
} else {
os->psize = 0;
os->sync_pos = os->page_pos;
}
 
if (os->bufsize - os->bufpos < size) {
uint8_t *nb = av_malloc((os->bufsize *= 2) + FF_INPUT_BUFFER_PADDING_SIZE);
if (!nb)
return AVERROR(ENOMEM);
memcpy(nb, os->buf, os->bufpos);
av_free(os->buf);
os->buf = nb;
}
 
ret = avio_read(bc, os->buf + os->bufpos, size);
if (ret < size)
return ret < 0 ? ret : AVERROR_EOF;
 
os->bufpos += size;
os->granule = gp;
os->flags = flags;
 
memset(os->buf + os->bufpos, 0, FF_INPUT_BUFFER_PADDING_SIZE);
if (sid)
*sid = idx;
 
return 0;
}
 
/**
* @brief find the next Ogg packet
* @param *sid is set to the stream for the packet or -1 if there is
* no matching stream, in that case assume all other return
* values to be uninitialized.
* @return negative value on error or EOF.
*/
static int ogg_packet(AVFormatContext *s, int *sid, int *dstart, int *dsize,
int64_t *fpos)
{
struct ogg *ogg = s->priv_data;
int idx, i, ret;
struct ogg_stream *os;
int complete = 0;
int segp = 0, psize = 0;
 
av_dlog(s, "ogg_packet: curidx=%i\n", ogg->curidx);
if (sid)
*sid = -1;
 
do {
idx = ogg->curidx;
 
while (idx < 0) {
ret = ogg_read_page(s, &idx);
if (ret < 0)
return ret;
}
 
os = ogg->streams + idx;
 
av_dlog(s, "ogg_packet: idx=%d pstart=%d psize=%d segp=%d nsegs=%d\n",
idx, os->pstart, os->psize, os->segp, os->nsegs);
 
if (!os->codec) {
if (os->header < 0) {
os->codec = ogg_find_codec(os->buf, os->bufpos);
if (!os->codec) {
av_log(s, AV_LOG_WARNING, "Codec not found\n");
os->header = 0;
return 0;
}
} else {
return 0;
}
}
 
segp = os->segp;
psize = os->psize;
 
while (os->segp < os->nsegs) {
int ss = os->segments[os->segp++];
os->psize += ss;
if (ss < 255) {
complete = 1;
break;
}
}
 
if (!complete && os->segp == os->nsegs) {
ogg->curidx = -1;
// Do not set incomplete for empty packets.
// Together with the code in ogg_read_page
// that discards all continuation of empty packets
// we would get an infinite loop.
os->incomplete = !!os->psize;
}
} while (!complete);
 
 
if (os->granule == -1)
av_log(s, AV_LOG_WARNING,
"Page at %"PRId64" is missing granule\n",
os->page_pos);
 
ogg->curidx = idx;
os->incomplete = 0;
 
if (os->header) {
os->header = os->codec->header(s, idx);
if (!os->header) {
os->segp = segp;
os->psize = psize;
 
// We have reached the first non-header packet in this stream.
// Unfortunately more header packets may still follow for others,
// but if we continue with header parsing we may lose data packets.
ogg->headers = 1;
 
// Update the header state for all streams and
// compute the data_offset.
if (!s->data_offset)
s->data_offset = os->sync_pos;
 
for (i = 0; i < ogg->nstreams; i++) {
struct ogg_stream *cur_os = ogg->streams + i;
 
// if we have a partial non-header packet, its start is
// obviously at or after the data start
if (cur_os->incomplete)
s->data_offset = FFMIN(s->data_offset, cur_os->sync_pos);
}
} else {
os->nb_header++;
os->pstart += os->psize;
os->psize = 0;
}
} else {
os->pflags = 0;
os->pduration = 0;
if (os->codec && os->codec->packet)
os->codec->packet(s, idx);
if (sid)
*sid = idx;
if (dstart)
*dstart = os->pstart;
if (dsize)
*dsize = os->psize;
if (fpos)
*fpos = os->sync_pos;
os->pstart += os->psize;
os->psize = 0;
if(os->pstart == os->bufpos)
os->bufpos = os->pstart = 0;
os->sync_pos = os->page_pos;
}
 
// determine whether there are more complete packets in this page
// if not, the page's granule will apply to this packet
os->page_end = 1;
for (i = os->segp; i < os->nsegs; i++)
if (os->segments[i] < 255) {
os->page_end = 0;
break;
}
 
if (os->segp == os->nsegs)
ogg->curidx = -1;
 
return 0;
}
 
static int ogg_get_length(AVFormatContext *s)
{
struct ogg *ogg = s->priv_data;
int i;
int64_t size, end;
int streams_left=0;
 
if (!s->pb->seekable)
return 0;
 
// already set
if (s->duration != AV_NOPTS_VALUE)
return 0;
 
size = avio_size(s->pb);
if (size < 0)
return 0;
end = size > MAX_PAGE_SIZE ? size - MAX_PAGE_SIZE : 0;
 
ogg_save(s);
avio_seek(s->pb, end, SEEK_SET);
ogg->page_pos = -1;
 
while (!ogg_read_page(s, &i)) {
if (ogg->streams[i].granule != -1 && ogg->streams[i].granule != 0 &&
ogg->streams[i].codec) {
s->streams[i]->duration =
ogg_gptopts(s, i, ogg->streams[i].granule, NULL);
if (s->streams[i]->start_time != AV_NOPTS_VALUE) {
s->streams[i]->duration -= s->streams[i]->start_time;
streams_left-= (ogg->streams[i].got_start==-1);
ogg->streams[i].got_start= 1;
} else if(!ogg->streams[i].got_start) {
ogg->streams[i].got_start= -1;
streams_left++;
}
}
}
 
ogg_restore(s, 0);
 
ogg_save (s);
avio_seek (s->pb, s->data_offset, SEEK_SET);
ogg_reset(s);
while (streams_left > 0 && !ogg_packet(s, &i, NULL, NULL, NULL)) {
int64_t pts;
if (i < 0) continue;
pts = ogg_calc_pts(s, i, NULL);
if (pts != AV_NOPTS_VALUE && s->streams[i]->start_time == AV_NOPTS_VALUE && !ogg->streams[i].got_start) {
s->streams[i]->duration -= pts;
ogg->streams[i].got_start= 1;
streams_left--;
}else if(s->streams[i]->start_time != AV_NOPTS_VALUE && !ogg->streams[i].got_start) {
ogg->streams[i].got_start= 1;
streams_left--;
}
}
ogg_restore (s, 0);
 
return 0;
}
 
static int ogg_read_close(AVFormatContext *s)
{
struct ogg *ogg = s->priv_data;
int i;
 
for (i = 0; i < ogg->nstreams; i++) {
av_freep(&ogg->streams[i].buf);
if (ogg->streams[i].codec &&
ogg->streams[i].codec->cleanup) {
ogg->streams[i].codec->cleanup(s, i);
}
av_freep(&ogg->streams[i].private);
}
av_freep(&ogg->streams);
return 0;
}
 
static int ogg_read_header(AVFormatContext *s)
{
struct ogg *ogg = s->priv_data;
int ret, i;
 
ogg->curidx = -1;
 
//linear headers seek from start
do {
ret = ogg_packet(s, NULL, NULL, NULL, NULL);
if (ret < 0) {
ogg_read_close(s);
return ret;
}
} while (!ogg->headers);
av_dlog(s, "found headers\n");
 
for (i = 0; i < ogg->nstreams; i++) {
struct ogg_stream *os = ogg->streams + i;
 
if (ogg->streams[i].header < 0) {
av_log(s, AV_LOG_ERROR, "Header parsing failed for stream %d\n", i);
ogg->streams[i].codec = NULL;
} else if (os->codec && os->nb_header < os->codec->nb_header) {
av_log(s, AV_LOG_WARNING,
"Headers mismatch for stream %d: "
"expected %d received %d.\n",
i, os->codec->nb_header, os->nb_header);
if (s->error_recognition & AV_EF_EXPLODE)
return AVERROR_INVALIDDATA;
}
if (os->start_granule != OGG_NOGRANULE_VALUE)
os->lastpts = s->streams[i]->start_time =
ogg_gptopts(s, i, os->start_granule, NULL);
}
 
//linear granulepos seek from end
ogg_get_length(s);
 
return 0;
}
 
static int64_t ogg_calc_pts(AVFormatContext *s, int idx, int64_t *dts)
{
struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + idx;
int64_t pts = AV_NOPTS_VALUE;
 
if (dts)
*dts = AV_NOPTS_VALUE;
 
if (os->lastpts != AV_NOPTS_VALUE) {
pts = os->lastpts;
os->lastpts = AV_NOPTS_VALUE;
}
if (os->lastdts != AV_NOPTS_VALUE) {
if (dts)
*dts = os->lastdts;
os->lastdts = AV_NOPTS_VALUE;
}
if (os->page_end) {
if (os->granule != -1LL) {
if (os->codec && os->codec->granule_is_start)
pts = ogg_gptopts(s, idx, os->granule, dts);
else
os->lastpts = ogg_gptopts(s, idx, os->granule, &os->lastdts);
os->granule = -1LL;
}
}
return pts;
}
 
static void ogg_validate_keyframe(AVFormatContext *s, int idx, int pstart, int psize)
{
struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + idx;
if (psize && s->streams[idx]->codec->codec_id == AV_CODEC_ID_THEORA) {
if (!!(os->pflags & AV_PKT_FLAG_KEY) != !(os->buf[pstart] & 0x40)) {
os->pflags ^= AV_PKT_FLAG_KEY;
av_log(s, AV_LOG_WARNING, "Broken file, %skeyframe not correctly marked.\n",
(os->pflags & AV_PKT_FLAG_KEY) ? "" : "non-");
}
}
}
 
static int ogg_read_packet(AVFormatContext *s, AVPacket *pkt)
{
struct ogg *ogg;
struct ogg_stream *os;
int idx, ret;
int pstart, psize;
int64_t fpos, pts, dts;
 
if (s->io_repositioned) {
ogg_reset(s);
s->io_repositioned = 0;
}
 
//Get an ogg packet
retry:
do {
ret = ogg_packet(s, &idx, &pstart, &psize, &fpos);
if (ret < 0)
return ret;
} while (idx < 0 || !s->streams[idx]);
 
ogg = s->priv_data;
os = ogg->streams + idx;
 
// pflags might not be set until after this
pts = ogg_calc_pts(s, idx, &dts);
ogg_validate_keyframe(s, idx, pstart, psize);
 
if (os->keyframe_seek && !(os->pflags & AV_PKT_FLAG_KEY))
goto retry;
os->keyframe_seek = 0;
 
//Alloc a pkt
ret = av_new_packet(pkt, psize);
if (ret < 0)
return ret;
pkt->stream_index = idx;
memcpy(pkt->data, os->buf + pstart, psize);
 
pkt->pts = pts;
pkt->dts = dts;
pkt->flags = os->pflags;
pkt->duration = os->pduration;
pkt->pos = fpos;
 
if (os->end_trimming) {
uint8_t *side_data = av_packet_new_side_data(pkt,
AV_PKT_DATA_SKIP_SAMPLES,
10);
if(side_data == NULL) {
av_free_packet(pkt);
av_free(pkt);
return AVERROR(ENOMEM);
}
AV_WL32(side_data + 4, os->end_trimming);
}
 
return psize;
}
 
static int64_t ogg_read_timestamp(AVFormatContext *s, int stream_index,
int64_t *pos_arg, int64_t pos_limit)
{
struct ogg *ogg = s->priv_data;
AVIOContext *bc = s->pb;
int64_t pts = AV_NOPTS_VALUE;
int64_t keypos = -1;
int i;
int pstart, psize;
avio_seek(bc, *pos_arg, SEEK_SET);
ogg_reset(s);
 
while ( avio_tell(bc) <= pos_limit
&& !ogg_packet(s, &i, &pstart, &psize, pos_arg)) {
if (i == stream_index) {
struct ogg_stream *os = ogg->streams + stream_index;
pts = ogg_calc_pts(s, i, NULL);
ogg_validate_keyframe(s, i, pstart, psize);
if (os->pflags & AV_PKT_FLAG_KEY) {
keypos = *pos_arg;
} else if (os->keyframe_seek) {
// if we had a previous keyframe but no pts for it,
// return that keyframe with this pts value.
if (keypos >= 0)
*pos_arg = keypos;
else
pts = AV_NOPTS_VALUE;
}
}
if (pts != AV_NOPTS_VALUE)
break;
}
ogg_reset(s);
return pts;
}
 
static int ogg_read_seek(AVFormatContext *s, int stream_index,
int64_t timestamp, int flags)
{
struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + stream_index;
int ret;
 
av_assert0(stream_index < ogg->nstreams);
// Ensure everything is reset even when seeking via
// the generated index.
ogg_reset(s);
 
// Try seeking to a keyframe first. If this fails (very possible),
// av_seek_frame will fall back to ignoring keyframes
if (s->streams[stream_index]->codec->codec_type == AVMEDIA_TYPE_VIDEO
&& !(flags & AVSEEK_FLAG_ANY))
os->keyframe_seek = 1;
 
ret = ff_seek_frame_binary(s, stream_index, timestamp, flags);
os = ogg->streams + stream_index;
if (ret < 0)
os->keyframe_seek = 0;
return ret;
}
 
static int ogg_probe(AVProbeData *p)
{
if (!memcmp("OggS", p->buf, 5) && p->buf[5] <= 0x7)
return AVPROBE_SCORE_MAX;
return 0;
}
 
AVInputFormat ff_ogg_demuxer = {
.name = "ogg",
.long_name = NULL_IF_CONFIG_SMALL("Ogg"),
.priv_data_size = sizeof(struct ogg),
.read_probe = ogg_probe,
.read_header = ogg_read_header,
.read_packet = ogg_read_packet,
.read_close = ogg_read_close,
.read_seek = ogg_read_seek,
.read_timestamp = ogg_read_timestamp,
.extensions = "ogg",
.flags = AVFMT_GENERIC_INDEX | AVFMT_TS_DISCONT | AVFMT_NOBINSEARCH,
};
/contrib/sdk/sources/ffmpeg/libavformat/oggdec.h
0,0 → 1,161
/**
Copyright (C) 2005 Michael Ahlberg, Måns Rullgård
 
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
 
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
 
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
**/
 
#ifndef AVFORMAT_OGGDEC_H
#define AVFORMAT_OGGDEC_H
 
#include "avformat.h"
#include "metadata.h"
 
struct ogg_codec {
const int8_t *magic;
uint8_t magicsize;
const int8_t *name;
/**
* Attempt to process a packet as a header
* @return 1 if the packet was a valid header,
* 0 if the packet was not a header (was a data packet)
* -1 if an error occurred or for unsupported stream
*/
int (*header)(AVFormatContext *, int);
int (*packet)(AVFormatContext *, int);
/**
* Translate a granule into a timestamp.
* Will set dts if non-null and known.
* @return pts
*/
uint64_t (*gptopts)(AVFormatContext *, int, uint64_t, int64_t *dts);
/**
* 1 if granule is the start time of the associated packet.
* 0 if granule is the end time of the associated packet.
*/
int granule_is_start;
/**
* Number of expected headers
*/
int nb_header;
void (*cleanup)(AVFormatContext *s, int idx);
};
 
struct ogg_stream {
uint8_t *buf;
unsigned int bufsize;
unsigned int bufpos;
unsigned int pstart;
unsigned int psize;
unsigned int pflags;
unsigned int pduration;
uint32_t serial;
uint64_t granule;
uint64_t start_granule;
int64_t lastpts;
int64_t lastdts;
int64_t sync_pos; ///< file offset of the first page needed to reconstruct the current packet
int64_t page_pos; ///< file offset of the current page
int flags;
const struct ogg_codec *codec;
int header;
int nsegs, segp;
uint8_t segments[255];
int incomplete; ///< whether we're expecting a continuation in the next page
int page_end; ///< current packet is the last one completed in the page
int keyframe_seek;
int got_start;
int got_data; ///< 1 if the stream got some data (non-initial packets), 0 otherwise
int nb_header; ///< set to the number of parsed headers
int end_trimming; ///< set the number of packets to drop from the end
void *private;
};
 
struct ogg_state {
uint64_t pos;
int curidx;
struct ogg_state *next;
int nstreams;
struct ogg_stream streams[1];
};
 
struct ogg {
struct ogg_stream *streams;
int nstreams;
int headers;
int curidx;
int64_t page_pos; ///< file offset of the current page
struct ogg_state *state;
};
 
#define OGG_FLAG_CONT 1
#define OGG_FLAG_BOS 2
#define OGG_FLAG_EOS 4
 
#define OGG_NOGRANULE_VALUE (-1ull)
 
extern const struct ogg_codec ff_celt_codec;
extern const struct ogg_codec ff_dirac_codec;
extern const struct ogg_codec ff_flac_codec;
extern const struct ogg_codec ff_ogm_audio_codec;
extern const struct ogg_codec ff_ogm_old_codec;
extern const struct ogg_codec ff_ogm_text_codec;
extern const struct ogg_codec ff_ogm_video_codec;
extern const struct ogg_codec ff_old_dirac_codec;
extern const struct ogg_codec ff_old_flac_codec;
extern const struct ogg_codec ff_opus_codec;
extern const struct ogg_codec ff_skeleton_codec;
extern const struct ogg_codec ff_speex_codec;
extern const struct ogg_codec ff_theora_codec;
extern const struct ogg_codec ff_vorbis_codec;
 
int ff_vorbis_comment(AVFormatContext *ms, AVDictionary **m, const uint8_t *buf, int size);
 
static inline int
ogg_find_stream (struct ogg * ogg, int serial)
{
int i;
 
for (i = 0; i < ogg->nstreams; i++)
if (ogg->streams[i].serial == serial)
return i;
 
return -1;
}
 
static inline uint64_t
ogg_gptopts (AVFormatContext * s, int i, uint64_t gp, int64_t *dts)
{
struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + i;
uint64_t pts = AV_NOPTS_VALUE;
 
if(os->codec && os->codec->gptopts){
pts = os->codec->gptopts(s, i, gp, dts);
} else {
pts = gp;
if (dts)
*dts = pts;
}
 
return pts;
}
 
#endif /* AVFORMAT_OGGDEC_H */
/contrib/sdk/sources/ffmpeg/libavformat/oggenc.c
0,0 → 1,638
/*
* Ogg muxer
* Copyright (c) 2007 Baptiste Coudurier <baptiste dot coudurier at free dot fr>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/crc.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/random_seed.h"
#include "libavcodec/xiph.h"
#include "libavcodec/bytestream.h"
#include "libavcodec/flac.h"
#include "avformat.h"
#include "avio_internal.h"
#include "internal.h"
#include "vorbiscomment.h"
 
#define MAX_PAGE_SIZE 65025
 
typedef struct {
int64_t start_granule;
int64_t granule;
int stream_index;
uint8_t flags;
uint8_t segments_count;
uint8_t segments[255];
uint8_t data[MAX_PAGE_SIZE];
uint16_t size;
} OGGPage;
 
typedef struct {
unsigned page_counter;
uint8_t *header[3];
int header_len[3];
/** for theora granule */
int kfgshift;
int64_t last_kf_pts;
int vrev;
int eos;
unsigned page_count; ///< number of page buffered
OGGPage page; ///< current page
unsigned serial_num; ///< serial number
int64_t last_granule; ///< last packet granule
} OGGStreamContext;
 
typedef struct OGGPageList {
OGGPage page;
struct OGGPageList *next;
} OGGPageList;
 
typedef struct {
const AVClass *class;
OGGPageList *page_list;
int pref_size; ///< preferred page size (0 => fill all segments)
int64_t pref_duration; ///< preferred page duration (0 => fill all segments)
} OGGContext;
 
#define OFFSET(x) offsetof(OGGContext, x)
#define PARAM AV_OPT_FLAG_ENCODING_PARAM
 
static const AVOption options[] = {
{ "oggpagesize", "Set preferred Ogg page size.",
offsetof(OGGContext, pref_size), AV_OPT_TYPE_INT, {.i64 = 0}, 0, MAX_PAGE_SIZE, AV_OPT_FLAG_ENCODING_PARAM},
{ "pagesize", "preferred page size in bytes (deprecated)",
OFFSET(pref_size), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, MAX_PAGE_SIZE, PARAM },
{ "page_duration", "preferred page duration, in microseconds",
OFFSET(pref_duration), AV_OPT_TYPE_INT64, { .i64 = 1000000 }, 0, INT64_MAX, PARAM },
{ NULL },
};
 
static const AVClass ogg_muxer_class = {
.class_name = "Ogg muxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
 
static void ogg_update_checksum(AVFormatContext *s, AVIOContext *pb, int64_t crc_offset)
{
int64_t pos = avio_tell(pb);
uint32_t checksum = ffio_get_checksum(pb);
avio_seek(pb, crc_offset, SEEK_SET);
avio_wb32(pb, checksum);
avio_seek(pb, pos, SEEK_SET);
}
 
static int ogg_write_page(AVFormatContext *s, OGGPage *page, int extra_flags)
{
OGGStreamContext *oggstream = s->streams[page->stream_index]->priv_data;
AVIOContext *pb;
int64_t crc_offset;
int ret, size;
uint8_t *buf;
 
ret = avio_open_dyn_buf(&pb);
if (ret < 0)
return ret;
ffio_init_checksum(pb, ff_crc04C11DB7_update, 0);
ffio_wfourcc(pb, "OggS");
avio_w8(pb, 0);
avio_w8(pb, page->flags | extra_flags);
avio_wl64(pb, page->granule);
avio_wl32(pb, oggstream->serial_num);
avio_wl32(pb, oggstream->page_counter++);
crc_offset = avio_tell(pb);
avio_wl32(pb, 0); // crc
avio_w8(pb, page->segments_count);
avio_write(pb, page->segments, page->segments_count);
avio_write(pb, page->data, page->size);
 
ogg_update_checksum(s, pb, crc_offset);
avio_flush(pb);
 
size = avio_close_dyn_buf(pb, &buf);
if (size < 0)
return size;
 
avio_write(s->pb, buf, size);
avio_flush(s->pb);
av_free(buf);
oggstream->page_count--;
return 0;
}
 
static int ogg_key_granule(OGGStreamContext *oggstream, int64_t granule)
{
return oggstream->kfgshift && !(granule & ((1<<oggstream->kfgshift)-1));
}
 
static int64_t ogg_granule_to_timestamp(OGGStreamContext *oggstream, int64_t granule)
{
if (oggstream->kfgshift)
return (granule>>oggstream->kfgshift) +
(granule & ((1<<oggstream->kfgshift)-1));
else
return granule;
}
 
static int ogg_compare_granule(AVFormatContext *s, OGGPage *next, OGGPage *page)
{
AVStream *st2 = s->streams[next->stream_index];
AVStream *st = s->streams[page->stream_index];
int64_t next_granule, cur_granule;
 
if (next->granule == -1 || page->granule == -1)
return 0;
 
next_granule = av_rescale_q(ogg_granule_to_timestamp(st2->priv_data, next->granule),
st2->time_base, AV_TIME_BASE_Q);
cur_granule = av_rescale_q(ogg_granule_to_timestamp(st->priv_data, page->granule),
st ->time_base, AV_TIME_BASE_Q);
return next_granule > cur_granule;
}
 
static int ogg_reset_cur_page(OGGStreamContext *oggstream)
{
oggstream->page.granule = -1;
oggstream->page.flags = 0;
oggstream->page.segments_count = 0;
oggstream->page.size = 0;
return 0;
}
 
static int ogg_buffer_page(AVFormatContext *s, OGGStreamContext *oggstream)
{
OGGContext *ogg = s->priv_data;
OGGPageList **p = &ogg->page_list;
OGGPageList *l = av_mallocz(sizeof(*l));
 
if (!l)
return AVERROR(ENOMEM);
l->page = oggstream->page;
 
oggstream->page.start_granule = oggstream->page.granule;
oggstream->page_count++;
ogg_reset_cur_page(oggstream);
 
while (*p) {
if (ogg_compare_granule(s, &(*p)->page, &l->page))
break;
p = &(*p)->next;
}
l->next = *p;
*p = l;
 
return 0;
}
 
static int ogg_buffer_data(AVFormatContext *s, AVStream *st,
uint8_t *data, unsigned size, int64_t granule,
int header)
{
OGGStreamContext *oggstream = st->priv_data;
OGGContext *ogg = s->priv_data;
int total_segments = size / 255 + 1;
uint8_t *p = data;
int i, segments, len, flush = 0;
 
// Handles VFR by flushing page because this frame needs to have a timestamp
// For theora, keyframes also need to have a timestamp to correctly mark
// them as such, otherwise seeking will not work correctly at the very
// least with old libogg versions.
// Do not try to flush header packets though, that will create broken files.
if (st->codec->codec_id == AV_CODEC_ID_THEORA && !header &&
(ogg_granule_to_timestamp(oggstream, granule) >
ogg_granule_to_timestamp(oggstream, oggstream->last_granule) + 1 ||
ogg_key_granule(oggstream, granule))) {
if (oggstream->page.granule != -1)
ogg_buffer_page(s, oggstream);
flush = 1;
}
 
// avoid a continued page
if (!header && oggstream->page.size > 0 &&
MAX_PAGE_SIZE - oggstream->page.size < size) {
ogg_buffer_page(s, oggstream);
}
 
for (i = 0; i < total_segments; ) {
OGGPage *page = &oggstream->page;
 
segments = FFMIN(total_segments - i, 255 - page->segments_count);
 
if (i && !page->segments_count)
page->flags |= 1; // continued packet
 
memset(page->segments+page->segments_count, 255, segments - 1);
page->segments_count += segments - 1;
 
len = FFMIN(size, segments*255);
page->segments[page->segments_count++] = len - (segments-1)*255;
memcpy(page->data+page->size, p, len);
p += len;
size -= len;
i += segments;
page->size += len;
 
if (i == total_segments)
page->granule = granule;
 
if (!header) {
AVStream *st = s->streams[page->stream_index];
 
int64_t start = av_rescale_q(page->start_granule, st->time_base,
AV_TIME_BASE_Q);
int64_t next = av_rescale_q(page->granule, st->time_base,
AV_TIME_BASE_Q);
 
if (page->segments_count == 255 ||
(ogg->pref_size > 0 && page->size >= ogg->pref_size) ||
(ogg->pref_duration > 0 && next - start >= ogg->pref_duration)) {
ogg_buffer_page(s, oggstream);
}
}
}
 
if (flush && oggstream->page.granule != -1)
ogg_buffer_page(s, oggstream);
 
return 0;
}
 
static uint8_t *ogg_write_vorbiscomment(int offset, int bitexact,
int *header_len, AVDictionary **m, int framing_bit)
{
const char *vendor = bitexact ? "ffmpeg" : LIBAVFORMAT_IDENT;
int size;
uint8_t *p, *p0;
unsigned int count;
 
ff_metadata_conv(m, ff_vorbiscomment_metadata_conv, NULL);
 
size = offset + ff_vorbiscomment_length(*m, vendor, &count) + framing_bit;
p = av_mallocz(size);
if (!p)
return NULL;
p0 = p;
 
p += offset;
ff_vorbiscomment_write(&p, m, vendor, count);
if (framing_bit)
bytestream_put_byte(&p, 1);
 
*header_len = size;
return p0;
}
 
static int ogg_build_flac_headers(AVCodecContext *avctx,
OGGStreamContext *oggstream, int bitexact,
AVDictionary **m)
{
enum FLACExtradataFormat format;
uint8_t *streaminfo;
uint8_t *p;
 
if (!avpriv_flac_is_extradata_valid(avctx, &format, &streaminfo))
return -1;
 
// first packet: STREAMINFO
oggstream->header_len[0] = 51;
oggstream->header[0] = av_mallocz(51); // per ogg flac specs
p = oggstream->header[0];
if (!p)
return AVERROR(ENOMEM);
bytestream_put_byte(&p, 0x7F);
bytestream_put_buffer(&p, "FLAC", 4);
bytestream_put_byte(&p, 1); // major version
bytestream_put_byte(&p, 0); // minor version
bytestream_put_be16(&p, 1); // headers packets without this one
bytestream_put_buffer(&p, "fLaC", 4);
bytestream_put_byte(&p, 0x00); // streaminfo
bytestream_put_be24(&p, 34);
bytestream_put_buffer(&p, streaminfo, FLAC_STREAMINFO_SIZE);
 
// second packet: VorbisComment
p = ogg_write_vorbiscomment(4, bitexact, &oggstream->header_len[1], m, 0);
if (!p)
return AVERROR(ENOMEM);
oggstream->header[1] = p;
bytestream_put_byte(&p, 0x84); // last metadata block and vorbis comment
bytestream_put_be24(&p, oggstream->header_len[1] - 4);
 
return 0;
}
 
#define SPEEX_HEADER_SIZE 80
 
static int ogg_build_speex_headers(AVCodecContext *avctx,
OGGStreamContext *oggstream, int bitexact,
AVDictionary **m)
{
uint8_t *p;
 
if (avctx->extradata_size < SPEEX_HEADER_SIZE)
return -1;
 
// first packet: Speex header
p = av_mallocz(SPEEX_HEADER_SIZE);
if (!p)
return AVERROR(ENOMEM);
oggstream->header[0] = p;
oggstream->header_len[0] = SPEEX_HEADER_SIZE;
bytestream_put_buffer(&p, avctx->extradata, SPEEX_HEADER_SIZE);
AV_WL32(&oggstream->header[0][68], 0); // set extra_headers to 0
 
// second packet: VorbisComment
p = ogg_write_vorbiscomment(0, bitexact, &oggstream->header_len[1], m, 0);
if (!p)
return AVERROR(ENOMEM);
oggstream->header[1] = p;
 
return 0;
}
 
#define OPUS_HEADER_SIZE 19
 
static int ogg_build_opus_headers(AVCodecContext *avctx,
OGGStreamContext *oggstream, int bitexact,
AVDictionary **m)
{
uint8_t *p;
 
if (avctx->extradata_size < OPUS_HEADER_SIZE)
return -1;
 
/* first packet: Opus header */
p = av_mallocz(avctx->extradata_size);
if (!p)
return AVERROR(ENOMEM);
oggstream->header[0] = p;
oggstream->header_len[0] = avctx->extradata_size;
bytestream_put_buffer(&p, avctx->extradata, avctx->extradata_size);
 
/* second packet: VorbisComment */
p = ogg_write_vorbiscomment(8, bitexact, &oggstream->header_len[1], m, 0);
if (!p)
return AVERROR(ENOMEM);
oggstream->header[1] = p;
bytestream_put_buffer(&p, "OpusTags", 8);
 
return 0;
}
 
static int ogg_write_header(AVFormatContext *s)
{
OGGContext *ogg = s->priv_data;
OGGStreamContext *oggstream = NULL;
int i, j;
 
if (ogg->pref_size)
av_log(s, AV_LOG_WARNING, "The pagesize option is deprecated\n");
 
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
unsigned serial_num = i;
 
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
if (st->codec->codec_id == AV_CODEC_ID_OPUS)
/* Opus requires a fixed 48kHz clock */
avpriv_set_pts_info(st, 64, 1, 48000);
else
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
} else if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
avpriv_set_pts_info(st, 64, st->codec->time_base.num, st->codec->time_base.den);
if (st->codec->codec_id != AV_CODEC_ID_VORBIS &&
st->codec->codec_id != AV_CODEC_ID_THEORA &&
st->codec->codec_id != AV_CODEC_ID_SPEEX &&
st->codec->codec_id != AV_CODEC_ID_FLAC &&
st->codec->codec_id != AV_CODEC_ID_OPUS) {
av_log(s, AV_LOG_ERROR, "Unsupported codec id in stream %d\n", i);
return -1;
}
 
if (!st->codec->extradata || !st->codec->extradata_size) {
av_log(s, AV_LOG_ERROR, "No extradata present\n");
return -1;
}
oggstream = av_mallocz(sizeof(*oggstream));
oggstream->page.stream_index = i;
 
if (!(st->codec->flags & CODEC_FLAG_BITEXACT))
do {
serial_num = av_get_random_seed();
for (j = 0; j < i; j++) {
OGGStreamContext *sc = s->streams[j]->priv_data;
if (serial_num == sc->serial_num)
break;
}
} while (j < i);
oggstream->serial_num = serial_num;
 
av_dict_copy(&st->metadata, s->metadata, AV_DICT_DONT_OVERWRITE);
 
st->priv_data = oggstream;
if (st->codec->codec_id == AV_CODEC_ID_FLAC) {
int err = ogg_build_flac_headers(st->codec, oggstream,
st->codec->flags & CODEC_FLAG_BITEXACT,
&st->metadata);
if (err) {
av_log(s, AV_LOG_ERROR, "Error writing FLAC headers\n");
av_freep(&st->priv_data);
return err;
}
} else if (st->codec->codec_id == AV_CODEC_ID_SPEEX) {
int err = ogg_build_speex_headers(st->codec, oggstream,
st->codec->flags & CODEC_FLAG_BITEXACT,
&st->metadata);
if (err) {
av_log(s, AV_LOG_ERROR, "Error writing Speex headers\n");
av_freep(&st->priv_data);
return err;
}
} else if (st->codec->codec_id == AV_CODEC_ID_OPUS) {
int err = ogg_build_opus_headers(st->codec, oggstream,
st->codec->flags & CODEC_FLAG_BITEXACT,
&st->metadata);
if (err) {
av_log(s, AV_LOG_ERROR, "Error writing Opus headers\n");
av_freep(&st->priv_data);
return err;
}
} else {
uint8_t *p;
const char *cstr = st->codec->codec_id == AV_CODEC_ID_VORBIS ? "vorbis" : "theora";
int header_type = st->codec->codec_id == AV_CODEC_ID_VORBIS ? 3 : 0x81;
int framing_bit = st->codec->codec_id == AV_CODEC_ID_VORBIS ? 1 : 0;
 
if (avpriv_split_xiph_headers(st->codec->extradata, st->codec->extradata_size,
st->codec->codec_id == AV_CODEC_ID_VORBIS ? 30 : 42,
oggstream->header, oggstream->header_len) < 0) {
av_log(s, AV_LOG_ERROR, "Extradata corrupted\n");
av_freep(&st->priv_data);
return -1;
}
 
p = ogg_write_vorbiscomment(7, st->codec->flags & CODEC_FLAG_BITEXACT,
&oggstream->header_len[1], &st->metadata,
framing_bit);
oggstream->header[1] = p;
if (!p)
return AVERROR(ENOMEM);
 
bytestream_put_byte(&p, header_type);
bytestream_put_buffer(&p, cstr, 6);
 
if (st->codec->codec_id == AV_CODEC_ID_THEORA) {
/** KFGSHIFT is the width of the less significant section of the granule position
The less significant section is the frame count since the last keyframe */
oggstream->kfgshift = ((oggstream->header[0][40]&3)<<3)|(oggstream->header[0][41]>>5);
oggstream->vrev = oggstream->header[0][9];
av_log(s, AV_LOG_DEBUG, "theora kfgshift %d, vrev %d\n",
oggstream->kfgshift, oggstream->vrev);
}
}
}
 
for (j = 0; j < s->nb_streams; j++) {
OGGStreamContext *oggstream = s->streams[j]->priv_data;
ogg_buffer_data(s, s->streams[j], oggstream->header[0],
oggstream->header_len[0], 0, 1);
oggstream->page.flags |= 2; // bos
ogg_buffer_page(s, oggstream);
}
for (j = 0; j < s->nb_streams; j++) {
AVStream *st = s->streams[j];
OGGStreamContext *oggstream = st->priv_data;
for (i = 1; i < 3; i++) {
if (oggstream->header_len[i])
ogg_buffer_data(s, st, oggstream->header[i],
oggstream->header_len[i], 0, 1);
}
ogg_buffer_page(s, oggstream);
}
 
oggstream->page.start_granule = AV_NOPTS_VALUE;
 
return 0;
}
 
static void ogg_write_pages(AVFormatContext *s, int flush)
{
OGGContext *ogg = s->priv_data;
OGGPageList *next, *p;
 
if (!ogg->page_list)
return;
 
for (p = ogg->page_list; p; ) {
OGGStreamContext *oggstream =
s->streams[p->page.stream_index]->priv_data;
if (oggstream->page_count < 2 && !flush)
break;
ogg_write_page(s, &p->page,
flush && oggstream->page_count == 1 ? 4 : 0); // eos
next = p->next;
av_freep(&p);
p = next;
}
ogg->page_list = p;
}
 
static int ogg_write_packet(AVFormatContext *s, AVPacket *pkt)
{
AVStream *st = s->streams[pkt->stream_index];
OGGStreamContext *oggstream = st->priv_data;
int ret;
int64_t granule;
 
if (st->codec->codec_id == AV_CODEC_ID_THEORA) {
int64_t pts = oggstream->vrev < 1 ? pkt->pts : pkt->pts + pkt->duration;
int pframe_count;
if (pkt->flags & AV_PKT_FLAG_KEY)
oggstream->last_kf_pts = pts;
pframe_count = pts - oggstream->last_kf_pts;
// prevent frame count from overflow if key frame flag is not set
if (pframe_count >= (1<<oggstream->kfgshift)) {
oggstream->last_kf_pts += pframe_count;
pframe_count = 0;
}
granule = (oggstream->last_kf_pts<<oggstream->kfgshift) | pframe_count;
} else if (st->codec->codec_id == AV_CODEC_ID_OPUS)
granule = pkt->pts + pkt->duration + av_rescale_q(st->codec->delay, (AVRational){ 1, st->codec->sample_rate }, st->time_base);
else
granule = pkt->pts + pkt->duration;
 
if (oggstream->page.start_granule == AV_NOPTS_VALUE)
oggstream->page.start_granule = pkt->pts;
 
ret = ogg_buffer_data(s, st, pkt->data, pkt->size, granule, 0);
if (ret < 0)
return ret;
 
ogg_write_pages(s, 0);
 
oggstream->last_granule = granule;
 
return 0;
}
 
static int ogg_write_trailer(AVFormatContext *s)
{
int i;
 
/* flush current page if needed */
for (i = 0; i < s->nb_streams; i++) {
OGGStreamContext *oggstream = s->streams[i]->priv_data;
 
if (oggstream->page.size > 0)
ogg_buffer_page(s, oggstream);
}
 
ogg_write_pages(s, 1);
 
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
OGGStreamContext *oggstream = st->priv_data;
if (st->codec->codec_id == AV_CODEC_ID_FLAC ||
st->codec->codec_id == AV_CODEC_ID_SPEEX ||
st->codec->codec_id == AV_CODEC_ID_OPUS) {
av_freep(&oggstream->header[0]);
}
av_freep(&oggstream->header[1]);
av_freep(&st->priv_data);
}
return 0;
}
 
AVOutputFormat ff_ogg_muxer = {
.name = "ogg",
.long_name = NULL_IF_CONFIG_SMALL("Ogg"),
.mime_type = "application/ogg",
.extensions = "ogg,ogv,spx,opus",
.priv_data_size = sizeof(OGGContext),
.audio_codec = AV_CODEC_ID_FLAC,
.video_codec = AV_CODEC_ID_THEORA,
.write_header = ogg_write_header,
.write_packet = ogg_write_packet,
.write_trailer = ogg_write_trailer,
.flags = AVFMT_TS_NEGATIVE,
.priv_class = &ogg_muxer_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/oggparsecelt.c
0,0 → 1,90
/*
* Xiph CELT parser for Ogg
* Copyright (c) 2011 Nicolas George
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <string.h>
 
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
#include "oggdec.h"
 
struct oggcelt_private {
int extra_headers_left;
};
 
static int celt_header(AVFormatContext *s, int idx)
{
struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + idx;
AVStream *st = s->streams[idx];
struct oggcelt_private *priv = os->private;
uint8_t *p = os->buf + os->pstart;
 
if (os->psize == 60 &&
!memcmp(p, ff_celt_codec.magic, ff_celt_codec.magicsize)) {
/* Main header */
 
uint32_t version, sample_rate, nb_channels, frame_size;
uint32_t overlap, extra_headers;
 
priv = av_malloc(sizeof(struct oggcelt_private));
if (!priv)
return AVERROR(ENOMEM);
if (ff_alloc_extradata(st->codec, 2 * sizeof(uint32_t)) < 0)
return AVERROR(ENOMEM);
version = AV_RL32(p + 28);
/* unused header size field skipped */
sample_rate = AV_RL32(p + 36);
nb_channels = AV_RL32(p + 40);
frame_size = AV_RL32(p + 44);
overlap = AV_RL32(p + 48);
/* unused bytes per packet field skipped */
extra_headers = AV_RL32(p + 56);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_CELT;
st->codec->sample_rate = sample_rate;
st->codec->channels = nb_channels;
st->codec->frame_size = frame_size;
if (sample_rate)
avpriv_set_pts_info(st, 64, 1, sample_rate);
priv->extra_headers_left = 1 + extra_headers;
av_free(os->private);
os->private = priv;
AV_WL32(st->codec->extradata + 0, overlap);
AV_WL32(st->codec->extradata + 4, version);
return 1;
} else if (priv && priv->extra_headers_left) {
/* Extra headers (vorbiscomment) */
 
ff_vorbis_comment(s, &st->metadata, p, os->psize);
priv->extra_headers_left--;
return 1;
} else {
return 0;
}
}
 
const struct ogg_codec ff_celt_codec = {
.magic = "CELT ",
.magicsize = 8,
.header = celt_header,
.nb_header = 2,
};
/contrib/sdk/sources/ffmpeg/libavformat/oggparsedirac.c
0,0 → 1,117
/*
* Copyright (C) 2008 David Conrad
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavcodec/get_bits.h"
#include "libavcodec/dirac.h"
#include "avformat.h"
#include "internal.h"
#include "oggdec.h"
 
static int dirac_header(AVFormatContext *s, int idx)
{
struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + idx;
AVStream *st = s->streams[idx];
dirac_source_params source;
GetBitContext gb;
 
// already parsed the header
if (st->codec->codec_id == AV_CODEC_ID_DIRAC)
return 0;
 
init_get_bits(&gb, os->buf + os->pstart + 13, (os->psize - 13) * 8);
if (avpriv_dirac_parse_sequence_header(st->codec, &gb, &source) < 0)
return -1;
 
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_DIRAC;
// dirac in ogg always stores timestamps as though the video were interlaced
avpriv_set_pts_info(st, 64, st->codec->time_base.num, 2*st->codec->time_base.den);
return 1;
}
 
// various undocument things: granule is signed (only for dirac!)
static uint64_t dirac_gptopts(AVFormatContext *s, int idx, uint64_t granule,
int64_t *dts_out)
{
int64_t gp = granule;
struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + idx;
 
unsigned dist = ((gp >> 14) & 0xff00) | (gp & 0xff);
int64_t dts = (gp >> 31);
int64_t pts = dts + ((gp >> 9) & 0x1fff);
 
if (!dist)
os->pflags |= AV_PKT_FLAG_KEY;
 
if (dts_out)
*dts_out = dts;
 
return pts;
}
 
static int old_dirac_header(AVFormatContext *s, int idx)
{
struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + idx;
AVStream *st = s->streams[idx];
uint8_t *buf = os->buf + os->pstart;
 
if (buf[0] != 'K')
return 0;
 
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_DIRAC;
avpriv_set_pts_info(st, 64, AV_RB32(buf+12), AV_RB32(buf+8));
return 1;
}
 
static uint64_t old_dirac_gptopts(AVFormatContext *s, int idx, uint64_t gp,
int64_t *dts)
{
struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + idx;
uint64_t iframe = gp >> 30;
uint64_t pframe = gp & 0x3fffffff;
 
if (!pframe)
os->pflags |= AV_PKT_FLAG_KEY;
 
return iframe + pframe;
}
 
const struct ogg_codec ff_dirac_codec = {
.magic = "BBCD\0",
.magicsize = 5,
.header = dirac_header,
.gptopts = dirac_gptopts,
.granule_is_start = 1,
.nb_header = 1,
};
 
const struct ogg_codec ff_old_dirac_codec = {
.magic = "KW-DIRAC",
.magicsize = 8,
.header = old_dirac_header,
.gptopts = old_dirac_gptopts,
.granule_is_start = 1,
.nb_header = 1,
};
/contrib/sdk/sources/ffmpeg/libavformat/oggparseflac.c
0,0 → 1,120
/*
* Copyright (C) 2005 Matthieu CASTET
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <stdlib.h>
#include "libavcodec/get_bits.h"
#include "libavcodec/flac.h"
#include "avformat.h"
#include "internal.h"
#include "oggdec.h"
 
#define OGG_FLAC_METADATA_TYPE_STREAMINFO 0x7F
 
static int
flac_header (AVFormatContext * s, int idx)
{
struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + idx;
AVStream *st = s->streams[idx];
GetBitContext gb;
FLACStreaminfo si;
int mdt;
 
if (os->buf[os->pstart] == 0xff)
return 0;
 
init_get_bits(&gb, os->buf + os->pstart, os->psize*8);
skip_bits1(&gb); /* metadata_last */
mdt = get_bits(&gb, 7);
 
if (mdt == OGG_FLAC_METADATA_TYPE_STREAMINFO) {
uint8_t *streaminfo_start = os->buf + os->pstart + 5 + 4 + 4 + 4;
skip_bits_long(&gb, 4*8); /* "FLAC" */
if(get_bits(&gb, 8) != 1) /* unsupported major version */
return -1;
skip_bits_long(&gb, 8 + 16); /* minor version + header count */
skip_bits_long(&gb, 4*8); /* "fLaC" */
 
/* METADATA_BLOCK_HEADER */
if (get_bits_long(&gb, 32) != FLAC_STREAMINFO_SIZE)
return -1;
 
avpriv_flac_parse_streaminfo(st->codec, &si, streaminfo_start);
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_FLAC;
st->need_parsing = AVSTREAM_PARSE_HEADERS;
 
if (ff_alloc_extradata(st->codec, FLAC_STREAMINFO_SIZE) < 0)
return AVERROR(ENOMEM);
memcpy(st->codec->extradata, streaminfo_start, st->codec->extradata_size);
 
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
} else if (mdt == FLAC_METADATA_TYPE_VORBIS_COMMENT) {
ff_vorbis_comment (s, &st->metadata, os->buf + os->pstart + 4, os->psize - 4);
}
 
return 1;
}
 
static int
old_flac_header (AVFormatContext * s, int idx)
{
struct ogg *ogg = s->priv_data;
AVStream *st = s->streams[idx];
struct ogg_stream *os = ogg->streams + idx;
AVCodecParserContext *parser = av_parser_init(AV_CODEC_ID_FLAC);
int size;
uint8_t *data;
 
if (!parser)
return -1;
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_FLAC;
 
parser->flags = PARSER_FLAG_COMPLETE_FRAMES;
av_parser_parse2(parser, st->codec,
&data, &size, os->buf + os->pstart, os->psize,
AV_NOPTS_VALUE, AV_NOPTS_VALUE, -1);
 
av_parser_close(parser);
 
if (st->codec->sample_rate) {
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
return 0;
}
 
return 1;
}
 
const struct ogg_codec ff_flac_codec = {
.magic = "\177FLAC",
.magicsize = 5,
.header = flac_header,
.nb_header = 2,
};
 
const struct ogg_codec ff_old_flac_codec = {
.magic = "fLaC",
.magicsize = 4,
.header = old_flac_header,
.nb_header = 0,
};
/contrib/sdk/sources/ffmpeg/libavformat/oggparseogm.c
0,0 → 1,203
/**
Copyright (C) 2005 Michael Ahlberg, Måns Rullgård
 
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
 
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
 
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
**/
 
#include <stdlib.h>
#include "libavutil/avassert.h"
#include "libavutil/intreadwrite.h"
#include "libavcodec/get_bits.h"
#include "libavcodec/bytestream.h"
#include "avformat.h"
#include "internal.h"
#include "oggdec.h"
#include "riff.h"
 
static int
ogm_header(AVFormatContext *s, int idx)
{
struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + idx;
AVStream *st = s->streams[idx];
GetByteContext p;
uint64_t time_unit;
uint64_t spu;
uint32_t size;
 
bytestream2_init(&p, os->buf + os->pstart, os->psize);
if (!(bytestream2_peek_byte(&p) & 1))
return 0;
 
if (bytestream2_peek_byte(&p) == 1) {
bytestream2_skip(&p, 1);
 
if (bytestream2_peek_byte(&p) == 'v'){
int tag;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
bytestream2_skip(&p, 8);
tag = bytestream2_get_le32(&p);
st->codec->codec_id = ff_codec_get_id(ff_codec_bmp_tags, tag);
st->codec->codec_tag = tag;
} else if (bytestream2_peek_byte(&p) == 't') {
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
st->codec->codec_id = AV_CODEC_ID_TEXT;
bytestream2_skip(&p, 12);
} else {
uint8_t acid[5] = { 0 };
int cid;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
bytestream2_skip(&p, 8);
bytestream2_get_buffer(&p, acid, 4);
acid[4] = 0;
cid = strtol(acid, NULL, 16);
st->codec->codec_id = ff_codec_get_id(ff_codec_wav_tags, cid);
// our parser completely breaks AAC in Ogg
if (st->codec->codec_id != AV_CODEC_ID_AAC)
st->need_parsing = AVSTREAM_PARSE_FULL;
}
 
size = bytestream2_get_le32(&p);
size = FFMIN(size, os->psize);
time_unit = bytestream2_get_le64(&p);
spu = bytestream2_get_le64(&p);
bytestream2_skip(&p, 4); /* default_len */
bytestream2_skip(&p, 8); /* buffersize + bits_per_sample */
 
if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
st->codec->width = bytestream2_get_le32(&p);
st->codec->height = bytestream2_get_le32(&p);
avpriv_set_pts_info(st, 64, time_unit, spu * 10000000);
} else {
st->codec->channels = bytestream2_get_le16(&p);
bytestream2_skip(&p, 2); /* block_align */
st->codec->bit_rate = bytestream2_get_le32(&p) * 8;
st->codec->sample_rate = time_unit ? spu * 10000000 / time_unit : 0;
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
if (size >= 56 && st->codec->codec_id == AV_CODEC_ID_AAC) {
bytestream2_skip(&p, 4);
size -= 4;
}
if (size > 52) {
av_assert0(FF_INPUT_BUFFER_PADDING_SIZE <= 52);
size -= 52;
ff_alloc_extradata(st->codec, size);
bytestream2_get_buffer(&p, st->codec->extradata, st->codec->extradata_size);
}
}
} else if (bytestream2_peek_byte(&p) == 3) {
bytestream2_skip(&p, 7);
if (bytestream2_get_bytes_left(&p) > 1)
ff_vorbis_comment(s, &st->metadata, p.buffer, bytestream2_get_bytes_left(&p) - 1);
}
 
return 1;
}
 
static int
ogm_dshow_header(AVFormatContext *s, int idx)
{
struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + idx;
AVStream *st = s->streams[idx];
uint8_t *p = os->buf + os->pstart;
uint32_t t;
 
if(!(*p & 1))
return 0;
if(*p != 1)
return 1;
 
t = AV_RL32(p + 96);
 
if(t == 0x05589f80){
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = ff_codec_get_id(ff_codec_bmp_tags, AV_RL32(p + 68));
avpriv_set_pts_info(st, 64, AV_RL64(p + 164), 10000000);
st->codec->width = AV_RL32(p + 176);
st->codec->height = AV_RL32(p + 180);
} else if(t == 0x05589f81){
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = ff_codec_get_id(ff_codec_wav_tags, AV_RL16(p + 124));
st->codec->channels = AV_RL16(p + 126);
st->codec->sample_rate = AV_RL32(p + 128);
st->codec->bit_rate = AV_RL32(p + 132) * 8;
}
 
return 1;
}
 
static int
ogm_packet(AVFormatContext *s, int idx)
{
struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + idx;
uint8_t *p = os->buf + os->pstart;
int lb;
 
if(*p & 8)
os->pflags |= AV_PKT_FLAG_KEY;
 
lb = ((*p & 2) << 1) | ((*p >> 6) & 3);
os->pstart += lb + 1;
os->psize -= lb + 1;
 
while (lb--)
os->pduration += p[lb+1] << (lb*8);
 
return 0;
}
 
const struct ogg_codec ff_ogm_video_codec = {
.magic = "\001video",
.magicsize = 6,
.header = ogm_header,
.packet = ogm_packet,
.granule_is_start = 1,
.nb_header = 2,
};
 
const struct ogg_codec ff_ogm_audio_codec = {
.magic = "\001audio",
.magicsize = 6,
.header = ogm_header,
.packet = ogm_packet,
.granule_is_start = 1,
.nb_header = 2,
};
 
const struct ogg_codec ff_ogm_text_codec = {
.magic = "\001text",
.magicsize = 5,
.header = ogm_header,
.packet = ogm_packet,
.granule_is_start = 1,
.nb_header = 2,
};
 
const struct ogg_codec ff_ogm_old_codec = {
.magic = "\001Direct Show Samples embedded in Ogg",
.magicsize = 35,
.header = ogm_dshow_header,
.packet = ogm_packet,
.granule_is_start = 1,
.nb_header = 1,
};
/contrib/sdk/sources/ffmpeg/libavformat/oggparseopus.c
0,0 → 1,140
/*
* Opus parser for Ogg
* Copyright (c) 2012 Nicolas George
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <string.h>
 
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
#include "oggdec.h"
 
struct oggopus_private {
int need_comments;
unsigned pre_skip;
int64_t cur_dts;
};
 
#define OPUS_HEAD_SIZE 19
 
static int opus_header(AVFormatContext *avf, int idx)
{
struct ogg *ogg = avf->priv_data;
struct ogg_stream *os = &ogg->streams[idx];
AVStream *st = avf->streams[idx];
struct oggopus_private *priv = os->private;
uint8_t *packet = os->buf + os->pstart;
 
if (!priv) {
priv = os->private = av_mallocz(sizeof(*priv));
if (!priv)
return AVERROR(ENOMEM);
}
 
if (os->flags & OGG_FLAG_BOS) {
if (os->psize < OPUS_HEAD_SIZE || (AV_RL8(packet + 8) & 0xF0) != 0)
return AVERROR_INVALIDDATA;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_OPUS;
st->codec->channels = AV_RL8 (packet + 9);
priv->pre_skip = AV_RL16(packet + 10);
/*orig_sample_rate = AV_RL32(packet + 12);*/
/*gain = AV_RL16(packet + 16);*/
/*channel_map = AV_RL8 (packet + 18);*/
 
if (ff_alloc_extradata(st->codec, os->psize))
return AVERROR(ENOMEM);
 
memcpy(st->codec->extradata, packet, os->psize);
 
st->codec->sample_rate = 48000;
avpriv_set_pts_info(st, 64, 1, 48000);
priv->need_comments = 1;
return 1;
}
 
if (priv->need_comments) {
if (os->psize < 8 || memcmp(packet, "OpusTags", 8))
return AVERROR_INVALIDDATA;
ff_vorbis_comment(avf, &st->metadata, packet + 8, os->psize - 8);
priv->need_comments--;
return 1;
}
 
return 0;
}
 
static int opus_packet(AVFormatContext *avf, int idx)
{
struct ogg *ogg = avf->priv_data;
struct ogg_stream *os = &ogg->streams[idx];
AVStream *st = avf->streams[idx];
struct oggopus_private *priv = os->private;
uint8_t *packet = os->buf + os->pstart;
unsigned toc, toc_config, toc_count, frame_size, nb_frames = 1;
 
if (!os->psize)
return AVERROR_INVALIDDATA;
 
toc = *packet;
toc_config = toc >> 3;
toc_count = toc & 3;
frame_size = toc_config < 12 ? FFMAX(480, 960 * (toc_config & 3)) :
toc_config < 16 ? 480 << (toc_config & 1) :
120 << (toc_config & 3);
if (toc_count == 3) {
if (os->psize < 2)
return AVERROR_INVALIDDATA;
nb_frames = packet[1] & 0x3F;
} else if (toc_count) {
nb_frames = 2;
}
 
os->pduration = frame_size * nb_frames;
if (os->lastpts != AV_NOPTS_VALUE) {
if (st->start_time == AV_NOPTS_VALUE)
st->start_time = os->lastpts;
priv->cur_dts = os->lastdts = os->lastpts -= priv->pre_skip;
}
 
priv->cur_dts += os->pduration;
if ((os->flags & OGG_FLAG_EOS)) {
int64_t skip = priv->cur_dts - os->granule + priv->pre_skip;
skip = FFMIN(skip, os->pduration);
if (skip > 0) {
os->pduration = skip < os->pduration ? os->pduration - skip : 1;
os->end_trimming = skip;
av_log(avf, AV_LOG_DEBUG,
"Last packet was truncated to %d due to end trimming.\n",
os->pduration);
}
}
 
return 0;
}
 
const struct ogg_codec ff_opus_codec = {
.name = "Opus",
.magic = "OpusHead",
.magicsize = 8,
.header = opus_header,
.packet = opus_packet,
.nb_header = 1,
};
/contrib/sdk/sources/ffmpeg/libavformat/oggparseskeleton.c
0,0 → 1,102
/*
* Copyright (C) 2010 David Conrad
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavcodec/bytestream.h"
#include "avformat.h"
#include "internal.h"
#include "oggdec.h"
 
static int skeleton_header(AVFormatContext *s, int idx)
{
struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + idx;
AVStream *st = s->streams[idx];
uint8_t *buf = os->buf + os->pstart;
int version_major, version_minor;
int64_t start_num, start_den;
uint64_t start_granule;
int target_idx, start_time;
 
strcpy(st->codec->codec_name, "skeleton");
st->codec->codec_type = AVMEDIA_TYPE_DATA;
 
if ((os->flags & OGG_FLAG_EOS) && os->psize == 0)
return 1;
 
if (os->psize < 8)
return -1;
 
if (!strncmp(buf, "fishead", 8)) {
if (os->psize < 64)
return -1;
 
version_major = AV_RL16(buf+8);
version_minor = AV_RL16(buf+10);
 
if (version_major != 3 && version_major != 4) {
av_log(s, AV_LOG_WARNING, "Unknown skeleton version %d.%d\n",
version_major, version_minor);
return -1;
}
 
// This is the overall start time. We use it for the start time of
// of the skeleton stream since if left unset lavf assumes 0,
// which we don't want since skeleton is timeless
// FIXME: the real meaning of this field is "start playback at
// this time which can be in the middle of a packet
start_num = AV_RL64(buf+12);
start_den = AV_RL64(buf+20);
 
if (start_den > 0 && start_num > 0) {
int base_den;
av_reduce(&start_time, &base_den, start_num, start_den, INT_MAX);
avpriv_set_pts_info(st, 64, 1, base_den);
os->lastpts =
st->start_time = start_time;
}
} else if (!strncmp(buf, "fisbone", 8)) {
if (os->psize < 52)
return -1;
 
target_idx = ogg_find_stream(ogg, AV_RL32(buf+12));
start_granule = AV_RL64(buf+36);
if (target_idx < 0) {
av_log(s, AV_LOG_WARNING, "Serial number in fisbone doesn't match any stream\n");
return 1;
}
os = ogg->streams + target_idx;
if (os->start_granule != OGG_NOGRANULE_VALUE) {
av_log(s, AV_LOG_WARNING, "Multiple fisbone for the same stream\n");
return 1;
}
if (start_granule != OGG_NOGRANULE_VALUE) {
os->start_granule = start_granule;
}
}
 
return 1;
}
 
const struct ogg_codec ff_skeleton_codec = {
.magic = "fishead",
.magicsize = 8,
.header = skeleton_header,
.nb_header = 0,
};
/contrib/sdk/sources/ffmpeg/libavformat/oggparsespeex.c
0,0 → 1,138
/*
Copyright (C) 2008 Reimar Döffinger
 
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
 
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
 
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
**/
 
#include <stdlib.h>
#include "libavutil/bswap.h"
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavcodec/get_bits.h"
#include "libavcodec/bytestream.h"
#include "avformat.h"
#include "internal.h"
#include "oggdec.h"
 
struct speex_params {
int packet_size;
int final_packet_duration;
int seq;
};
 
static int speex_header(AVFormatContext *s, int idx) {
struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + idx;
struct speex_params *spxp = os->private;
AVStream *st = s->streams[idx];
uint8_t *p = os->buf + os->pstart;
 
if (!spxp) {
spxp = av_mallocz(sizeof(*spxp));
os->private = spxp;
}
 
if (spxp->seq > 1)
return 0;
 
if (spxp->seq == 0) {
int frames_per_packet;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_SPEEX;
 
if (os->psize < 68) {
av_log(s, AV_LOG_ERROR, "speex packet too small\n");
return AVERROR_INVALIDDATA;
}
 
st->codec->sample_rate = AV_RL32(p + 36);
st->codec->channels = AV_RL32(p + 48);
if (st->codec->channels < 1 || st->codec->channels > 2) {
av_log(s, AV_LOG_ERROR, "invalid channel count. Speex must be mono or stereo.\n");
return AVERROR_INVALIDDATA;
}
st->codec->channel_layout = st->codec->channels == 1 ? AV_CH_LAYOUT_MONO :
AV_CH_LAYOUT_STEREO;
 
spxp->packet_size = AV_RL32(p + 56);
frames_per_packet = AV_RL32(p + 64);
if (frames_per_packet)
spxp->packet_size *= frames_per_packet;
 
if (ff_alloc_extradata(st->codec, os->psize) < 0)
return AVERROR(ENOMEM);
memcpy(st->codec->extradata, p, st->codec->extradata_size);
 
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
} else
ff_vorbis_comment(s, &st->metadata, p, os->psize);
 
spxp->seq++;
return 1;
}
 
static int ogg_page_packets(struct ogg_stream *os)
{
int i;
int packets = 0;
for (i = 0; i < os->nsegs; i++)
if (os->segments[i] < 255)
packets++;
return packets;
}
 
static int speex_packet(AVFormatContext *s, int idx)
{
struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + idx;
struct speex_params *spxp = os->private;
int packet_size = spxp->packet_size;
 
if (os->flags & OGG_FLAG_EOS && os->lastpts != AV_NOPTS_VALUE &&
os->granule > 0) {
/* first packet of final page. we have to calculate the final packet
duration here because it is the only place we know the next-to-last
granule position. */
spxp->final_packet_duration = os->granule - os->lastpts -
packet_size * (ogg_page_packets(os) - 1);
}
 
if (!os->lastpts && os->granule > 0)
/* first packet */
os->lastpts = os->lastdts = os->granule - packet_size *
ogg_page_packets(os);
if (os->flags & OGG_FLAG_EOS && os->segp == os->nsegs &&
spxp->final_packet_duration)
/* final packet */
os->pduration = spxp->final_packet_duration;
else
os->pduration = packet_size;
 
return 0;
}
 
const struct ogg_codec ff_speex_codec = {
.magic = "Speex ",
.magicsize = 8,
.header = speex_header,
.packet = speex_packet,
.nb_header = 2,
};
/contrib/sdk/sources/ffmpeg/libavformat/oggparsetheora.c
0,0 → 1,209
/**
Copyright (C) 2005 Matthieu CASTET, Alex Beregszaszi
 
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
 
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
 
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
**/
 
#include <stdlib.h>
#include "libavutil/bswap.h"
#include "libavcodec/get_bits.h"
#include "avformat.h"
#include "internal.h"
#include "oggdec.h"
 
struct theora_params {
int gpshift;
int gpmask;
unsigned version;
};
 
static int
theora_header (AVFormatContext * s, int idx)
{
struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + idx;
AVStream *st = s->streams[idx];
struct theora_params *thp = os->private;
int cds = st->codec->extradata_size + os->psize + 2, err;
uint8_t *cdp;
 
if(!(os->buf[os->pstart] & 0x80))
return 0;
 
if(!thp){
thp = av_mallocz(sizeof(*thp));
os->private = thp;
}
 
switch (os->buf[os->pstart]) {
case 0x80: {
GetBitContext gb;
int width, height;
AVRational timebase;
 
init_get_bits(&gb, os->buf + os->pstart, os->psize*8);
 
skip_bits_long(&gb, 7*8); /* 0x80"theora" */
 
thp->version = get_bits_long(&gb, 24);
if (thp->version < 0x030100)
{
av_log(s, AV_LOG_ERROR,
"Too old or unsupported Theora (%x)\n", thp->version);
return -1;
}
 
width = get_bits(&gb, 16) << 4;
height = get_bits(&gb, 16) << 4;
avcodec_set_dimensions(st->codec, width, height);
 
if (thp->version >= 0x030400)
skip_bits(&gb, 100);
 
if (thp->version >= 0x030200) {
width = get_bits_long(&gb, 24);
height = get_bits_long(&gb, 24);
if ( width <= st->codec->width && width > st->codec->width-16
&& height <= st->codec->height && height > st->codec->height-16)
avcodec_set_dimensions(st->codec, width, height);
 
skip_bits(&gb, 16);
}
timebase.den = get_bits_long(&gb, 32);
timebase.num = get_bits_long(&gb, 32);
if (!(timebase.num > 0 && timebase.den > 0)) {
av_log(s, AV_LOG_WARNING, "Invalid time base in theora stream, assuming 25 FPS\n");
timebase.num = 1;
timebase.den = 25;
}
avpriv_set_pts_info(st, 64, timebase.num, timebase.den);
 
st->sample_aspect_ratio.num = get_bits_long(&gb, 24);
st->sample_aspect_ratio.den = get_bits_long(&gb, 24);
 
if (thp->version >= 0x030200)
skip_bits_long(&gb, 38);
if (thp->version >= 0x304000)
skip_bits(&gb, 2);
 
thp->gpshift = get_bits(&gb, 5);
thp->gpmask = (1 << thp->gpshift) - 1;
 
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_THEORA;
st->need_parsing = AVSTREAM_PARSE_HEADERS;
}
break;
case 0x81:
ff_vorbis_comment(s, &st->metadata, os->buf + os->pstart + 7, os->psize - 7);
case 0x82:
if (!thp->version)
return -1;
break;
default:
av_log(s, AV_LOG_ERROR, "Unknown header type %X\n", os->buf[os->pstart]);
return -1;
}
 
if ((err = av_reallocp(&st->codec->extradata,
cds + FF_INPUT_BUFFER_PADDING_SIZE)) < 0) {
st->codec->extradata_size = 0;
return err;
}
cdp = st->codec->extradata + st->codec->extradata_size;
*cdp++ = os->psize >> 8;
*cdp++ = os->psize & 0xff;
memcpy (cdp, os->buf + os->pstart, os->psize);
st->codec->extradata_size = cds;
 
return 1;
}
 
static uint64_t
theora_gptopts(AVFormatContext *ctx, int idx, uint64_t gp, int64_t *dts)
{
struct ogg *ogg = ctx->priv_data;
struct ogg_stream *os = ogg->streams + idx;
struct theora_params *thp = os->private;
uint64_t iframe, pframe;
 
if (!thp)
return AV_NOPTS_VALUE;
 
iframe = gp >> thp->gpshift;
pframe = gp & thp->gpmask;
 
if (thp->version < 0x030201)
iframe++;
 
if(!pframe)
os->pflags |= AV_PKT_FLAG_KEY;
 
if (dts)
*dts = iframe + pframe;
 
return iframe + pframe;
}
 
static int theora_packet(AVFormatContext *s, int idx)
{
struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + idx;
int duration;
 
/* first packet handling
here we parse the duration of each packet in the first page and compare
the total duration to the page granule to find the encoder delay and
set the first timestamp */
 
if ((!os->lastpts || os->lastpts == AV_NOPTS_VALUE) && !(os->flags & OGG_FLAG_EOS)) {
int seg;
 
duration = 1;
for (seg = os->segp; seg < os->nsegs; seg++) {
if (os->segments[seg] < 255)
duration ++;
}
 
os->lastpts = os->lastdts = theora_gptopts(s, idx, os->granule, NULL) - duration;
if(s->streams[idx]->start_time == AV_NOPTS_VALUE) {
s->streams[idx]->start_time = os->lastpts;
if (s->streams[idx]->duration)
s->streams[idx]->duration -= s->streams[idx]->start_time;
}
}
 
/* parse packet duration */
if (os->psize > 0) {
os->pduration = 1;
}
 
return 0;
}
 
const struct ogg_codec ff_theora_codec = {
.magic = "\200theora",
.magicsize = 7,
.header = theora_header,
.packet = theora_packet,
.gptopts = theora_gptopts,
.nb_header = 3,
};
/contrib/sdk/sources/ffmpeg/libavformat/oggparsevorbis.c
0,0 → 1,426
/*
* Copyright (C) 2005 Michael Ahlberg, Måns Rullgård
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
 
#include <stdlib.h>
 
#include "libavutil/avstring.h"
#include "libavutil/base64.h"
#include "libavutil/bswap.h"
#include "libavutil/dict.h"
#include "libavcodec/bytestream.h"
#include "libavcodec/get_bits.h"
#include "libavcodec/vorbis_parser.h"
#include "avformat.h"
#include "flac_picture.h"
#include "internal.h"
#include "oggdec.h"
#include "vorbiscomment.h"
 
static int ogm_chapter(AVFormatContext *as, uint8_t *key, uint8_t *val)
{
int i, cnum, h, m, s, ms, keylen = strlen(key);
AVChapter *chapter = NULL;
 
if (keylen < 9 || sscanf(key, "CHAPTER%03d", &cnum) != 1)
return 0;
 
if (keylen <= 10) {
if (sscanf(val, "%02d:%02d:%02d.%03d", &h, &m, &s, &ms) < 4)
return 0;
 
avpriv_new_chapter(as, cnum, (AVRational) { 1, 1000 },
ms + 1000 * (s + 60 * (m + 60 * h)),
AV_NOPTS_VALUE, NULL);
av_free(val);
} else if (!strcmp(key + keylen - 4, "NAME")) {
for (i = 0; i < as->nb_chapters; i++)
if (as->chapters[i]->id == cnum) {
chapter = as->chapters[i];
break;
}
if (!chapter)
return 0;
 
av_dict_set(&chapter->metadata, "title", val, AV_DICT_DONT_STRDUP_VAL);
} else
return 0;
 
av_free(key);
return 1;
}
 
int ff_vorbis_comment(AVFormatContext *as, AVDictionary **m,
const uint8_t *buf, int size)
{
const uint8_t *p = buf;
const uint8_t *end = buf + size;
unsigned n, j;
int s;
 
/* must have vendor_length and user_comment_list_length */
if (size < 8)
return AVERROR_INVALIDDATA;
 
s = bytestream_get_le32(&p);
 
if (end - p - 4 < s || s < 0)
return AVERROR_INVALIDDATA;
 
p += s;
 
n = bytestream_get_le32(&p);
 
while (end - p >= 4 && n > 0) {
const char *t, *v;
int tl, vl;
 
s = bytestream_get_le32(&p);
 
if (end - p < s || s < 0)
break;
 
t = p;
p += s;
n--;
 
v = memchr(t, '=', s);
if (!v)
continue;
 
tl = v - t;
vl = s - tl - 1;
v++;
 
if (tl && vl) {
char *tt, *ct;
 
tt = av_malloc(tl + 1);
ct = av_malloc(vl + 1);
if (!tt || !ct) {
av_freep(&tt);
av_freep(&ct);
return AVERROR(ENOMEM);
}
 
for (j = 0; j < tl; j++)
tt[j] = av_toupper(t[j]);
tt[tl] = 0;
 
memcpy(ct, v, vl);
ct[vl] = 0;
 
/* The format in which the pictures are stored is the FLAC format.
* Xiph says: "The binary FLAC picture structure is base64 encoded
* and placed within a VorbisComment with the tag name
* 'METADATA_BLOCK_PICTURE'. This is the preferred and
* recommended way of embedding cover art within VorbisComments."
*/
if (!strcmp(tt, "METADATA_BLOCK_PICTURE")) {
int ret;
char *pict = av_malloc(vl);
 
if (!pict) {
av_log(as, AV_LOG_WARNING, "out-of-memory error. Skipping cover art block.\n");
av_freep(&tt);
av_freep(&ct);
continue;
}
if ((ret = av_base64_decode(pict, ct, vl)) > 0)
ret = ff_flac_parse_picture(as, pict, ret);
av_freep(&tt);
av_freep(&ct);
av_freep(&pict);
if (ret < 0) {
av_log(as, AV_LOG_WARNING, "Failed to parse cover art block.\n");
continue;
}
} else if (!ogm_chapter(as, tt, ct))
av_dict_set(m, tt, ct,
AV_DICT_DONT_STRDUP_KEY |
AV_DICT_DONT_STRDUP_VAL);
}
}
 
if (p != end)
av_log(as, AV_LOG_INFO,
"%ti bytes of comment header remain\n", end - p);
if (n > 0)
av_log(as, AV_LOG_INFO,
"truncated comment header, %i comments not found\n", n);
 
ff_metadata_conv(m, NULL, ff_vorbiscomment_metadata_conv);
 
return 0;
}
 
/*
* Parse the vorbis header
*
* Vorbis Identification header from Vorbis_I_spec.html#vorbis-spec-codec
* [vorbis_version] = read 32 bits as unsigned integer | Not used
* [audio_channels] = read 8 bit integer as unsigned | Used
* [audio_sample_rate] = read 32 bits as unsigned integer | Used
* [bitrate_maximum] = read 32 bits as signed integer | Not used yet
* [bitrate_nominal] = read 32 bits as signed integer | Not used yet
* [bitrate_minimum] = read 32 bits as signed integer | Used as bitrate
* [blocksize_0] = read 4 bits as unsigned integer | Not Used
* [blocksize_1] = read 4 bits as unsigned integer | Not Used
* [framing_flag] = read one bit | Not Used
*/
 
struct oggvorbis_private {
unsigned int len[3];
unsigned char *packet[3];
VorbisParseContext vp;
int64_t final_pts;
int final_duration;
};
 
static int fixup_vorbis_headers(AVFormatContext *as,
struct oggvorbis_private *priv,
uint8_t **buf)
{
int i, offset, len, err;
int buf_len;
unsigned char *ptr;
 
len = priv->len[0] + priv->len[1] + priv->len[2];
buf_len = len + len / 255 + 64;
ptr = *buf = av_realloc(NULL, buf_len);
if (!ptr)
return AVERROR(ENOMEM);
memset(*buf, '\0', buf_len);
 
ptr[0] = 2;
offset = 1;
offset += av_xiphlacing(&ptr[offset], priv->len[0]);
offset += av_xiphlacing(&ptr[offset], priv->len[1]);
for (i = 0; i < 3; i++) {
memcpy(&ptr[offset], priv->packet[i], priv->len[i]);
offset += priv->len[i];
av_freep(&priv->packet[i]);
}
if ((err = av_reallocp(buf, offset + FF_INPUT_BUFFER_PADDING_SIZE)) < 0)
return err;
return offset;
}
 
static void vorbis_cleanup(AVFormatContext *s, int idx)
{
struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + idx;
struct oggvorbis_private *priv = os->private;
int i;
if (os->private)
for (i = 0; i < 3; i++)
av_freep(&priv->packet[i]);
}
 
static int vorbis_header(AVFormatContext *s, int idx)
{
struct ogg *ogg = s->priv_data;
AVStream *st = s->streams[idx];
struct ogg_stream *os = ogg->streams + idx;
struct oggvorbis_private *priv;
int pkt_type = os->buf[os->pstart];
 
if (!os->private) {
os->private = av_mallocz(sizeof(struct oggvorbis_private));
if (!os->private)
return AVERROR(ENOMEM);
}
 
if (!(pkt_type & 1))
return 0;
 
if (os->psize < 1 || pkt_type > 5)
return AVERROR_INVALIDDATA;
 
priv = os->private;
 
if (priv->packet[pkt_type >> 1])
return AVERROR_INVALIDDATA;
if (pkt_type > 1 && !priv->packet[0] || pkt_type > 3 && !priv->packet[1])
return AVERROR_INVALIDDATA;
 
priv->len[pkt_type >> 1] = os->psize;
priv->packet[pkt_type >> 1] = av_mallocz(os->psize);
if (!priv->packet[pkt_type >> 1])
return AVERROR(ENOMEM);
memcpy(priv->packet[pkt_type >> 1], os->buf + os->pstart, os->psize);
if (os->buf[os->pstart] == 1) {
const uint8_t *p = os->buf + os->pstart + 7; /* skip "\001vorbis" tag */
unsigned blocksize, bs0, bs1;
int srate;
int channels;
 
if (os->psize != 30)
return AVERROR_INVALIDDATA;
 
if (bytestream_get_le32(&p) != 0) /* vorbis_version */
return AVERROR_INVALIDDATA;
 
channels = bytestream_get_byte(&p);
if (st->codec->channels && channels != st->codec->channels) {
av_log(s, AV_LOG_ERROR, "Channel change is not supported\n");
return AVERROR_PATCHWELCOME;
}
st->codec->channels = channels;
srate = bytestream_get_le32(&p);
p += 4; // skip maximum bitrate
st->codec->bit_rate = bytestream_get_le32(&p); // nominal bitrate
p += 4; // skip minimum bitrate
 
blocksize = bytestream_get_byte(&p);
bs0 = blocksize & 15;
bs1 = blocksize >> 4;
 
if (bs0 > bs1)
return AVERROR_INVALIDDATA;
if (bs0 < 6 || bs1 > 13)
return AVERROR_INVALIDDATA;
 
if (bytestream_get_byte(&p) != 1) /* framing_flag */
return AVERROR_INVALIDDATA;
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_VORBIS;
 
if (srate > 0) {
st->codec->sample_rate = srate;
avpriv_set_pts_info(st, 64, 1, srate);
}
} else if (os->buf[os->pstart] == 3) {
if (os->psize > 8 &&
ff_vorbis_comment(s, &st->metadata, os->buf + os->pstart + 7,
os->psize - 8) >= 0) {
// drop all metadata we parsed and which is not required by libvorbis
unsigned new_len = 7 + 4 + AV_RL32(priv->packet[1] + 7) + 4 + 1;
if (new_len >= 16 && new_len < os->psize) {
AV_WL32(priv->packet[1] + new_len - 5, 0);
priv->packet[1][new_len - 1] = 1;
priv->len[1] = new_len;
}
}
} else {
int ret = fixup_vorbis_headers(s, priv, &st->codec->extradata);
if (ret < 0) {
st->codec->extradata_size = 0;
return ret;
}
st->codec->extradata_size = ret;
if ((ret = avpriv_vorbis_parse_extradata(st->codec, &priv->vp))) {
av_freep(&st->codec->extradata);
st->codec->extradata_size = 0;
return ret;
}
}
 
return 1;
}
 
static int vorbis_packet(AVFormatContext *s, int idx)
{
struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + idx;
struct oggvorbis_private *priv = os->private;
int duration;
 
/* first packet handling
* here we parse the duration of each packet in the first page and compare
* the total duration to the page granule to find the encoder delay and
* set the first timestamp */
if ((!os->lastpts || os->lastpts == AV_NOPTS_VALUE) && !(os->flags & OGG_FLAG_EOS)) {
int seg, d;
uint8_t *last_pkt = os->buf + os->pstart;
uint8_t *next_pkt = last_pkt;
 
avpriv_vorbis_parse_reset(&priv->vp);
duration = 0;
seg = os->segp;
d = avpriv_vorbis_parse_frame(&priv->vp, last_pkt, 1);
if (d < 0) {
os->pflags |= AV_PKT_FLAG_CORRUPT;
return 0;
}
duration += d;
last_pkt = next_pkt = next_pkt + os->psize;
for (; seg < os->nsegs; seg++) {
if (os->segments[seg] < 255) {
int d = avpriv_vorbis_parse_frame(&priv->vp, last_pkt, 1);
if (d < 0) {
duration = os->granule;
break;
}
duration += d;
last_pkt = next_pkt + os->segments[seg];
}
next_pkt += os->segments[seg];
}
os->lastpts =
os->lastdts = os->granule - duration;
if (s->streams[idx]->start_time == AV_NOPTS_VALUE) {
s->streams[idx]->start_time = FFMAX(os->lastpts, 0);
if (s->streams[idx]->duration)
s->streams[idx]->duration -= s->streams[idx]->start_time;
}
priv->final_pts = AV_NOPTS_VALUE;
avpriv_vorbis_parse_reset(&priv->vp);
}
 
/* parse packet duration */
if (os->psize > 0) {
duration = avpriv_vorbis_parse_frame(&priv->vp, os->buf + os->pstart, 1);
if (duration < 0) {
os->pflags |= AV_PKT_FLAG_CORRUPT;
return 0;
}
os->pduration = duration;
}
 
/* final packet handling
* here we save the pts of the first packet in the final page, sum up all
* packet durations in the final page except for the last one, and compare
* to the page granule to find the duration of the final packet */
if (os->flags & OGG_FLAG_EOS) {
if (os->lastpts != AV_NOPTS_VALUE) {
priv->final_pts = os->lastpts;
priv->final_duration = 0;
}
if (os->segp == os->nsegs)
os->pduration = os->granule - priv->final_pts - priv->final_duration;
priv->final_duration += os->pduration;
}
 
return 0;
}
 
const struct ogg_codec ff_vorbis_codec = {
.magic = "\001vorbis",
.magicsize = 7,
.header = vorbis_header,
.packet = vorbis_packet,
.cleanup = vorbis_cleanup,
.nb_header = 3,
};
/contrib/sdk/sources/ffmpeg/libavformat/oma.c
0,0 → 1,48
/*
* Sony OpenMG (OMA) common data
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "internal.h"
#include "oma.h"
#include "libavcodec/avcodec.h"
#include "libavutil/channel_layout.h"
 
const uint16_t ff_oma_srate_tab[8] = { 320, 441, 480, 882, 960, 0 };
 
const AVCodecTag ff_oma_codec_tags[] = {
{ AV_CODEC_ID_ATRAC3, OMA_CODECID_ATRAC3 },
{ AV_CODEC_ID_ATRAC3P, OMA_CODECID_ATRAC3P },
{ AV_CODEC_ID_MP3, OMA_CODECID_MP3 },
{ AV_CODEC_ID_PCM_S16BE, OMA_CODECID_LPCM },
{ 0 },
};
 
/** map ATRAC-X channel id to internal channel layout */
const uint64_t ff_oma_chid_to_native_layout[7] = {
AV_CH_LAYOUT_MONO,
AV_CH_LAYOUT_STEREO,
AV_CH_LAYOUT_SURROUND,
AV_CH_LAYOUT_4POINT0,
AV_CH_LAYOUT_5POINT1_BACK,
AV_CH_LAYOUT_6POINT1_BACK,
AV_CH_LAYOUT_7POINT1
};
 
/** map ATRAC-X channel id to total number of channels */
const int ff_oma_chid_to_num_channels[7] = {1, 2, 3, 4, 6, 7, 8};
/contrib/sdk/sources/ffmpeg/libavformat/oma.h
0,0 → 1,47
/*
* Sony OpenMG (OMA) common data
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_OMA_H
#define AVFORMAT_OMA_H
 
#include <stdint.h>
 
#include "internal.h"
 
#define EA3_HEADER_SIZE 96
#define ID3v2_EA3_MAGIC "ea3"
#define OMA_ENC_HEADER_SIZE 16
 
enum {
OMA_CODECID_ATRAC3 = 0,
OMA_CODECID_ATRAC3P = 1,
OMA_CODECID_MP3 = 3,
OMA_CODECID_LPCM = 4,
OMA_CODECID_WMA = 5,
};
 
extern const uint16_t ff_oma_srate_tab[8];
 
extern const AVCodecTag ff_oma_codec_tags[];
 
extern const uint64_t ff_oma_chid_to_native_layout[7];
extern const int ff_oma_chid_to_num_channels[7];
 
#endif /* AVFORMAT_OMA_H */
/contrib/sdk/sources/ffmpeg/libavformat/omadec.c
0,0 → 1,506
/*
* Sony OpenMG (OMA) demuxer
*
* Copyright (c) 2008, 2013 Maxim Poliakovski
* 2008 Benjamin Larsson
* 2011 David Goldwich
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* This is a demuxer for Sony OpenMG Music files
*
* Known file extensions: ".oma", "aa3"
* The format of such files consists of three parts:
* - "ea3" header carrying overall info and metadata. Except for starting with
* "ea" instead of "ID", it's an ID3v2 header.
* - "EA3" header is a Sony-specific header containing information about
* the OpenMG file: codec type (usually ATRAC, can also be MP3 or WMA),
* codec specific info (packet size, sample rate, channels and so on)
* and DRM related info (file encryption, content id).
* - Sound data organized in packets follow the EA3 header
* (can be encrypted using the Sony DRM!).
*
*/
 
#include "libavutil/channel_layout.h"
#include "avformat.h"
#include "internal.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/des.h"
#include "oma.h"
#include "pcm.h"
#include "id3v2.h"
 
 
static const uint64_t leaf_table[] = {
0xd79e8283acea4620, 0x7a9762f445afd0d8,
0x354d60a60b8c79f1, 0x584e1cde00b07aee,
0x1573cd93da7df623, 0x47f98d79620dd535
};
 
typedef struct OMAContext {
uint64_t content_start;
int encrypted;
uint16_t k_size;
uint16_t e_size;
uint16_t i_size;
uint16_t s_size;
uint32_t rid;
uint8_t r_val[24];
uint8_t n_val[24];
uint8_t m_val[8];
uint8_t s_val[8];
uint8_t sm_val[8];
uint8_t e_val[8];
uint8_t iv[8];
struct AVDES av_des;
} OMAContext;
 
static void hex_log(AVFormatContext *s, int level,
const char *name, const uint8_t *value, int len)
{
char buf[33];
len = FFMIN(len, 16);
if (av_log_get_level() < level)
return;
ff_data_to_hex(buf, value, len, 1);
buf[len << 1] = '\0';
av_log(s, level, "%s: %s\n", name, buf);
}
 
static int kset(AVFormatContext *s, const uint8_t *r_val, const uint8_t *n_val,
int len)
{
OMAContext *oc = s->priv_data;
 
if (!r_val && !n_val)
return -1;
 
len = FFMIN(len, 16);
 
/* use first 64 bits in the third round again */
if (r_val) {
if (r_val != oc->r_val) {
memset(oc->r_val, 0, 24);
memcpy(oc->r_val, r_val, len);
}
memcpy(&oc->r_val[16], r_val, 8);
}
if (n_val) {
if (n_val != oc->n_val) {
memset(oc->n_val, 0, 24);
memcpy(oc->n_val, n_val, len);
}
memcpy(&oc->n_val[16], n_val, 8);
}
 
return 0;
}
 
#define OMA_RPROBE_M_VAL 48 + 1
 
static int rprobe(AVFormatContext *s, uint8_t *enc_header, unsigned size,
const uint8_t *r_val)
{
OMAContext *oc = s->priv_data;
unsigned int pos;
struct AVDES av_des;
 
if (!enc_header || !r_val ||
size < OMA_ENC_HEADER_SIZE + oc->k_size + oc->e_size + oc->i_size ||
size < OMA_RPROBE_M_VAL)
return -1;
 
/* m_val */
av_des_init(&av_des, r_val, 192, 1);
av_des_crypt(&av_des, oc->m_val, &enc_header[48], 1, NULL, 1);
 
/* s_val */
av_des_init(&av_des, oc->m_val, 64, 0);
av_des_crypt(&av_des, oc->s_val, NULL, 1, NULL, 0);
 
/* sm_val */
pos = OMA_ENC_HEADER_SIZE + oc->k_size + oc->e_size;
av_des_init(&av_des, oc->s_val, 64, 0);
av_des_mac(&av_des, oc->sm_val, &enc_header[pos], (oc->i_size >> 3));
 
pos += oc->i_size;
 
return memcmp(&enc_header[pos], oc->sm_val, 8) ? -1 : 0;
}
 
static int nprobe(AVFormatContext *s, uint8_t *enc_header, unsigned size,
const uint8_t *n_val)
{
OMAContext *oc = s->priv_data;
uint64_t pos;
uint32_t taglen, datalen;
struct AVDES av_des;
 
if (!enc_header || !n_val ||
size < OMA_ENC_HEADER_SIZE + oc->k_size + 4)
return -1;
 
pos = OMA_ENC_HEADER_SIZE + oc->k_size;
if (!memcmp(&enc_header[pos], "EKB ", 4))
pos += 32;
 
if (size < pos + 44)
return -1;
 
if (AV_RB32(&enc_header[pos]) != oc->rid)
av_log(s, AV_LOG_DEBUG, "Mismatching RID\n");
 
taglen = AV_RB32(&enc_header[pos + 32]);
datalen = AV_RB32(&enc_header[pos + 36]) >> 4;
 
pos += 44L + taglen;
 
if (pos + (((uint64_t)datalen) << 4) > size)
return -1;
 
av_des_init(&av_des, n_val, 192, 1);
while (datalen-- > 0) {
av_des_crypt(&av_des, oc->r_val, &enc_header[pos], 2, NULL, 1);
kset(s, oc->r_val, NULL, 16);
if (!rprobe(s, enc_header, size, oc->r_val))
return 0;
pos += 16;
}
 
return -1;
}
 
static int decrypt_init(AVFormatContext *s, ID3v2ExtraMeta *em, uint8_t *header)
{
OMAContext *oc = s->priv_data;
ID3v2ExtraMetaGEOB *geob = NULL;
uint8_t *gdata;
 
oc->encrypted = 1;
av_log(s, AV_LOG_INFO, "File is encrypted\n");
 
/* find GEOB metadata */
while (em) {
if (!strcmp(em->tag, "GEOB") &&
(geob = em->data) &&
(!strcmp(geob->description, "OMG_LSI") ||
!strcmp(geob->description, "OMG_BKLSI"))) {
break;
}
em = em->next;
}
if (!em) {
av_log(s, AV_LOG_ERROR, "No encryption header found\n");
return AVERROR_INVALIDDATA;
}
 
if (geob->datasize < 64) {
av_log(s, AV_LOG_ERROR,
"Invalid GEOB data size: %u\n", geob->datasize);
return AVERROR_INVALIDDATA;
}
 
gdata = geob->data;
 
if (AV_RB16(gdata) != 1)
av_log(s, AV_LOG_WARNING, "Unknown version in encryption header\n");
 
oc->k_size = AV_RB16(&gdata[2]);
oc->e_size = AV_RB16(&gdata[4]);
oc->i_size = AV_RB16(&gdata[6]);
oc->s_size = AV_RB16(&gdata[8]);
 
if (memcmp(&gdata[OMA_ENC_HEADER_SIZE], "KEYRING ", 12)) {
av_log(s, AV_LOG_ERROR, "Invalid encryption header\n");
return AVERROR_INVALIDDATA;
}
if ( OMA_ENC_HEADER_SIZE + oc->k_size + oc->e_size + oc->i_size + 8 > geob->datasize
|| OMA_ENC_HEADER_SIZE + 48 > geob->datasize
) {
av_log(s, AV_LOG_ERROR, "Too little GEOB data\n");
return AVERROR_INVALIDDATA;
}
oc->rid = AV_RB32(&gdata[OMA_ENC_HEADER_SIZE + 28]);
av_log(s, AV_LOG_DEBUG, "RID: %.8x\n", oc->rid);
 
memcpy(oc->iv, &header[0x58], 8);
hex_log(s, AV_LOG_DEBUG, "IV", oc->iv, 8);
 
hex_log(s, AV_LOG_DEBUG, "CBC-MAC",
&gdata[OMA_ENC_HEADER_SIZE + oc->k_size + oc->e_size + oc->i_size],
8);
 
if (s->keylen > 0) {
kset(s, s->key, s->key, s->keylen);
}
if (!memcmp(oc->r_val, (const uint8_t[8]){0}, 8) ||
rprobe(s, gdata, geob->datasize, oc->r_val) < 0 &&
nprobe(s, gdata, geob->datasize, oc->n_val) < 0) {
int i;
for (i = 0; i < FF_ARRAY_ELEMS(leaf_table); i += 2) {
uint8_t buf[16];
AV_WL64(buf, leaf_table[i]);
AV_WL64(&buf[8], leaf_table[i + 1]);
kset(s, buf, buf, 16);
if (!rprobe(s, gdata, geob->datasize, oc->r_val) ||
!nprobe(s, gdata, geob->datasize, oc->n_val))
break;
}
if (i >= FF_ARRAY_ELEMS(leaf_table)) {
av_log(s, AV_LOG_ERROR, "Invalid key\n");
return AVERROR_INVALIDDATA;
}
}
 
/* e_val */
av_des_init(&oc->av_des, oc->m_val, 64, 0);
av_des_crypt(&oc->av_des, oc->e_val,
&gdata[OMA_ENC_HEADER_SIZE + 40], 1, NULL, 0);
hex_log(s, AV_LOG_DEBUG, "EK", oc->e_val, 8);
 
/* init e_val */
av_des_init(&oc->av_des, oc->e_val, 64, 1);
 
return 0;
}
 
static int oma_read_header(AVFormatContext *s)
{
int ret, framesize, jsflag, samplerate;
uint32_t codec_params, channel_id;
int16_t eid;
uint8_t buf[EA3_HEADER_SIZE];
uint8_t *edata;
AVStream *st;
ID3v2ExtraMeta *extra_meta = NULL;
OMAContext *oc = s->priv_data;
 
ff_id3v2_read(s, ID3v2_EA3_MAGIC, &extra_meta);
ret = avio_read(s->pb, buf, EA3_HEADER_SIZE);
if (ret < EA3_HEADER_SIZE)
return -1;
 
if (memcmp(buf, ((const uint8_t[]){'E', 'A', '3'}), 3) ||
buf[4] != 0 || buf[5] != EA3_HEADER_SIZE) {
av_log(s, AV_LOG_ERROR, "Couldn't find the EA3 header !\n");
return AVERROR_INVALIDDATA;
}
 
oc->content_start = avio_tell(s->pb);
 
/* encrypted file */
eid = AV_RB16(&buf[6]);
if (eid != -1 && eid != -128 && decrypt_init(s, extra_meta, buf) < 0) {
ff_id3v2_free_extra_meta(&extra_meta);
return -1;
}
 
ff_id3v2_free_extra_meta(&extra_meta);
 
codec_params = AV_RB24(&buf[33]);
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
st->start_time = 0;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_tag = buf[32];
st->codec->codec_id = ff_codec_get_id(ff_oma_codec_tags,
st->codec->codec_tag);
 
switch (buf[32]) {
case OMA_CODECID_ATRAC3:
samplerate = ff_oma_srate_tab[(codec_params >> 13) & 7] * 100;
if (!samplerate) {
av_log(s, AV_LOG_ERROR, "Unsupported sample rate\n");
return AVERROR_INVALIDDATA;
}
if (samplerate != 44100)
avpriv_request_sample(s, "Sample rate %d", samplerate);
 
framesize = (codec_params & 0x3FF) * 8;
 
/* get stereo coding mode, 1 for joint-stereo */
jsflag = (codec_params >> 17) & 1;
 
st->codec->channels = 2;
st->codec->channel_layout = AV_CH_LAYOUT_STEREO;
st->codec->sample_rate = samplerate;
st->codec->bit_rate = st->codec->sample_rate * framesize * 8 / 1024;
 
/* fake the ATRAC3 extradata
* (wav format, makes stream copy to wav work) */
if (ff_alloc_extradata(st->codec, 14))
return AVERROR(ENOMEM);
 
edata = st->codec->extradata;
AV_WL16(&edata[0], 1); // always 1
AV_WL32(&edata[2], samplerate); // samples rate
AV_WL16(&edata[6], jsflag); // coding mode
AV_WL16(&edata[8], jsflag); // coding mode
AV_WL16(&edata[10], 1); // always 1
// AV_WL16(&edata[12], 0); // always 0
 
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
break;
case OMA_CODECID_ATRAC3P:
channel_id = (codec_params >> 10) & 7;
if (!channel_id) {
av_log(s, AV_LOG_ERROR,
"Invalid ATRAC-X channel id: %d\n", channel_id);
return AVERROR_INVALIDDATA;
}
st->codec->channel_layout = ff_oma_chid_to_native_layout[channel_id - 1];
st->codec->channels = ff_oma_chid_to_num_channels[channel_id - 1];
framesize = ((codec_params & 0x3FF) * 8) + 8;
samplerate = ff_oma_srate_tab[(codec_params >> 13) & 7] * 100;
if (!samplerate) {
av_log(s, AV_LOG_ERROR, "Unsupported sample rate\n");
return AVERROR_INVALIDDATA;
}
st->codec->sample_rate = samplerate;
st->codec->bit_rate = samplerate * framesize * 8 / 2048;
avpriv_set_pts_info(st, 64, 1, samplerate);
av_log(s, AV_LOG_ERROR, "Unsupported codec ATRAC3+!\n");
break;
case OMA_CODECID_MP3:
st->need_parsing = AVSTREAM_PARSE_FULL_RAW;
framesize = 1024;
break;
case OMA_CODECID_LPCM:
/* PCM 44.1 kHz 16 bit stereo big-endian */
st->codec->channels = 2;
st->codec->channel_layout = AV_CH_LAYOUT_STEREO;
st->codec->sample_rate = 44100;
framesize = 1024;
/* bit rate = sample rate x PCM block align (= 4) x 8 */
st->codec->bit_rate = st->codec->sample_rate * 32;
st->codec->bits_per_coded_sample =
av_get_bits_per_sample(st->codec->codec_id);
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
break;
default:
av_log(s, AV_LOG_ERROR, "Unsupported codec %d!\n", buf[32]);
return AVERROR(ENOSYS);
}
 
st->codec->block_align = framesize;
 
return 0;
}
 
 
static int oma_read_packet(AVFormatContext *s, AVPacket *pkt)
{
OMAContext *oc = s->priv_data;
int packet_size = s->streams[0]->codec->block_align;
int ret = av_get_packet(s->pb, pkt, packet_size);
 
if (ret < packet_size)
pkt->flags |= AV_PKT_FLAG_CORRUPT;
 
if (ret < 0)
return ret;
if (!ret)
return AVERROR_EOF;
 
pkt->stream_index = 0;
 
if (oc->encrypted) {
/* previous unencrypted block saved in IV for
* the next packet (CBC mode) */
if (ret == packet_size)
av_des_crypt(&oc->av_des, pkt->data, pkt->data,
(packet_size >> 3), oc->iv, 1);
else
memset(oc->iv, 0, 8);
}
 
return ret;
}
 
static int oma_read_probe(AVProbeData *p)
{
const uint8_t *buf;
unsigned tag_len = 0;
 
buf = p->buf;
 
if (p->buf_size < ID3v2_HEADER_SIZE ||
!ff_id3v2_match(buf, ID3v2_EA3_MAGIC) ||
buf[3] != 3 || // version must be 3
buf[4]) // flags byte zero
return 0;
 
tag_len = ff_id3v2_tag_len(buf);
 
/* This check cannot overflow as tag_len has at most 28 bits */
if (p->buf_size < tag_len + 5)
/* EA3 header comes late, might be outside of the probe buffer */
return AVPROBE_SCORE_EXTENSION;
 
buf += tag_len;
 
if (!memcmp(buf, "EA3", 3) && !buf[4] && buf[5] == EA3_HEADER_SIZE)
return AVPROBE_SCORE_MAX;
else
return 0;
}
 
static int oma_read_seek(struct AVFormatContext *s,
int stream_index, int64_t timestamp, int flags)
{
OMAContext *oc = s->priv_data;
int err = ff_pcm_read_seek(s, stream_index, timestamp, flags);
 
if (!oc->encrypted)
return err;
 
/* readjust IV for CBC */
if (err || avio_tell(s->pb) < oc->content_start)
goto wipe;
if ((err = avio_seek(s->pb, -8, SEEK_CUR)) < 0)
goto wipe;
if ((err = avio_read(s->pb, oc->iv, 8)) < 8) {
if (err >= 0)
err = AVERROR_EOF;
goto wipe;
}
 
return 0;
wipe:
memset(oc->iv, 0, 8);
return err;
}
 
AVInputFormat ff_oma_demuxer = {
.name = "oma",
.long_name = NULL_IF_CONFIG_SMALL("Sony OpenMG audio"),
.priv_data_size = sizeof(OMAContext),
.read_probe = oma_read_probe,
.read_header = oma_read_header,
.read_packet = oma_read_packet,
.read_seek = oma_read_seek,
.flags = AVFMT_GENERIC_INDEX,
.extensions = "oma,omg,aa3",
.codec_tag = (const AVCodecTag* const []){ff_oma_codec_tags, 0},
};
/contrib/sdk/sources/ffmpeg/libavformat/omaenc.c
0,0 → 1,106
/*
* Sony OpenMG (OMA) muxer
*
* Copyright (c) 2011 Michael Karcher
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "avio_internal.h"
#include "id3v2.h"
#include "internal.h"
#include "oma.h"
#include "rawenc.h"
 
static av_cold int oma_write_header(AVFormatContext *s)
{
int i;
AVCodecContext *format;
int srate_index;
int isjointstereo;
 
format = s->streams[0]->codec;
/* check for support of the format first */
 
for (srate_index = 0; ; srate_index++) {
if (ff_oma_srate_tab[srate_index] == 0) {
av_log(s, AV_LOG_ERROR, "Sample rate %d not supported in OpenMG audio\n",
format->sample_rate);
return AVERROR(EINVAL);
}
 
if (ff_oma_srate_tab[srate_index] * 100 == format->sample_rate)
break;
}
 
/* Metadata; OpenMG does not support ID3v2.4 */
ff_id3v2_write_simple(s, 3, ID3v2_EA3_MAGIC);
 
ffio_wfourcc(s->pb, "EA3\0");
avio_w8(s->pb, EA3_HEADER_SIZE >> 7);
avio_w8(s->pb, EA3_HEADER_SIZE & 0x7F);
avio_wl16(s->pb, 0xFFFF); /* not encrypted */
for (i = 0; i < 6; i++)
avio_wl32(s->pb, 0); /* Padding + DRM id */
 
switch(format->codec_tag) {
case OMA_CODECID_ATRAC3:
if (format->channels != 2) {
av_log(s, AV_LOG_ERROR, "ATRAC3 in OMA is only supported with 2 channels\n");
return AVERROR(EINVAL);
}
if (format->extradata_size == 14) /* WAV format extradata */
isjointstereo = format->extradata[6] != 0;
else if(format->extradata_size == 10) /* RM format extradata */
isjointstereo = format->extradata[8] == 0x12;
else {
av_log(s, AV_LOG_ERROR, "ATRAC3: Unsupported extradata size\n");
return AVERROR(EINVAL);
}
avio_wb32(s->pb, (OMA_CODECID_ATRAC3 << 24) |
(isjointstereo << 17) |
(srate_index << 13) |
(format->block_align/8));
break;
case OMA_CODECID_ATRAC3P:
avio_wb32(s->pb, (OMA_CODECID_ATRAC3P << 24) |
(srate_index << 13) |
(format->channels << 10) |
(format->block_align/8 - 1));
break;
default:
av_log(s, AV_LOG_ERROR, "unsupported codec tag %d for write\n",
format->codec_tag);
return AVERROR(EINVAL);
}
for (i = 0; i < (EA3_HEADER_SIZE - 36)/4; i++)
avio_wl32(s->pb, 0); /* Padding */
 
return 0;
}
 
AVOutputFormat ff_oma_muxer = {
.name = "oma",
.long_name = NULL_IF_CONFIG_SMALL("Sony OpenMG audio"),
.mime_type = "audio/x-oma",
.extensions = "oma",
.audio_codec = AV_CODEC_ID_ATRAC3,
.write_header = oma_write_header,
.write_packet = ff_raw_write_packet,
.codec_tag = (const AVCodecTag* const []){ff_oma_codec_tags, 0},
};
/contrib/sdk/sources/ffmpeg/libavformat/options.c
0,0 → 1,123
/*
* Copyright (c) 2000, 2001, 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avformat.h"
#include "avio_internal.h"
#include "libavutil/opt.h"
 
/**
* @file
* Options definition for AVFormatContext.
*/
 
#include "options_table.h"
 
static const char* format_to_name(void* ptr)
{
AVFormatContext* fc = (AVFormatContext*) ptr;
if(fc->iformat) return fc->iformat->name;
else if(fc->oformat) return fc->oformat->name;
else return "NULL";
}
 
static void *format_child_next(void *obj, void *prev)
{
AVFormatContext *s = obj;
if (!prev && s->priv_data &&
((s->iformat && s->iformat->priv_class) ||
s->oformat && s->oformat->priv_class))
return s->priv_data;
if (s->pb && s->pb->av_class && prev != s->pb)
return s->pb;
return NULL;
}
 
static const AVClass *format_child_class_next(const AVClass *prev)
{
AVInputFormat *ifmt = NULL;
AVOutputFormat *ofmt = NULL;
 
if (!prev)
return &ffio_url_class;
 
while ((ifmt = av_iformat_next(ifmt)))
if (ifmt->priv_class == prev)
break;
 
if (!ifmt)
while ((ofmt = av_oformat_next(ofmt)))
if (ofmt->priv_class == prev)
break;
if (!ofmt)
while (ifmt = av_iformat_next(ifmt))
if (ifmt->priv_class)
return ifmt->priv_class;
 
while (ofmt = av_oformat_next(ofmt))
if (ofmt->priv_class)
return ofmt->priv_class;
 
return NULL;
}
 
static AVClassCategory get_category(void *ptr)
{
AVFormatContext* s = ptr;
if(s->iformat) return AV_CLASS_CATEGORY_DEMUXER;
else return AV_CLASS_CATEGORY_MUXER;
}
 
static const AVClass av_format_context_class = {
.class_name = "AVFormatContext",
.item_name = format_to_name,
.option = avformat_options,
.version = LIBAVUTIL_VERSION_INT,
.child_next = format_child_next,
.child_class_next = format_child_class_next,
.category = AV_CLASS_CATEGORY_MUXER,
.get_category = get_category,
};
 
static void avformat_get_context_defaults(AVFormatContext *s)
{
memset(s, 0, sizeof(AVFormatContext));
 
s->av_class = &av_format_context_class;
 
av_opt_set_defaults(s);
}
 
AVFormatContext *avformat_alloc_context(void)
{
AVFormatContext *ic;
ic = av_malloc(sizeof(AVFormatContext));
if (!ic) return ic;
avformat_get_context_defaults(ic);
return ic;
}
 
enum AVDurationEstimationMethod av_fmt_ctx_get_duration_estimation_method(const AVFormatContext* ctx)
{
return ctx->duration_estimation_method;
}
 
const AVClass *avformat_get_class(void)
{
return &av_format_context_class;
}
/contrib/sdk/sources/ffmpeg/libavformat/options_table.h
0,0 → 1,88
/*
* Copyright (c) 2000, 2001, 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_OPTIONS_TABLE_H
#define AVFORMAT_OPTIONS_TABLE_H
 
#include <limits.h>
 
#include "libavutil/opt.h"
#include "avformat.h"
 
#define OFFSET(x) offsetof(AVFormatContext,x)
#define DEFAULT 0 //should be NAN but it does not work as it is not a constant in glibc as required by ANSI/ISO C
//these names are too long to be readable
#define E AV_OPT_FLAG_ENCODING_PARAM
#define D AV_OPT_FLAG_DECODING_PARAM
 
static const AVOption avformat_options[] = {
{"avioflags", NULL, OFFSET(avio_flags), AV_OPT_TYPE_FLAGS, {.i64 = DEFAULT }, INT_MIN, INT_MAX, D|E, "avioflags"},
{"direct", "reduce buffering", 0, AV_OPT_TYPE_CONST, {.i64 = AVIO_FLAG_DIRECT }, INT_MIN, INT_MAX, D|E, "avioflags"},
{"probesize", "set probing size", OFFSET(probesize), AV_OPT_TYPE_INT, {.i64 = 5000000 }, 32, INT_MAX, D},
{"packetsize", "set packet size", OFFSET(packet_size), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX, E},
{"fflags", NULL, OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64 = AVFMT_FLAG_FLUSH_PACKETS }, INT_MIN, INT_MAX, D|E, "fflags"},
{"flush_packets", "reduce the latency by flushing out packets immediately", 0, AV_OPT_TYPE_CONST, {.i64 = AVFMT_FLAG_FLUSH_PACKETS }, INT_MIN, INT_MAX, D, "fflags"},
{"ignidx", "ignore index", 0, AV_OPT_TYPE_CONST, {.i64 = AVFMT_FLAG_IGNIDX }, INT_MIN, INT_MAX, D, "fflags"},
{"genpts", "generate pts", 0, AV_OPT_TYPE_CONST, {.i64 = AVFMT_FLAG_GENPTS }, INT_MIN, INT_MAX, D, "fflags"},
{"nofillin", "do not fill in missing values that can be exactly calculated", 0, AV_OPT_TYPE_CONST, {.i64 = AVFMT_FLAG_NOFILLIN }, INT_MIN, INT_MAX, D, "fflags"},
{"noparse", "disable AVParsers, this needs nofillin too", 0, AV_OPT_TYPE_CONST, {.i64 = AVFMT_FLAG_NOPARSE }, INT_MIN, INT_MAX, D, "fflags"},
{"igndts", "ignore dts", 0, AV_OPT_TYPE_CONST, {.i64 = AVFMT_FLAG_IGNDTS }, INT_MIN, INT_MAX, D, "fflags"},
{"discardcorrupt", "discard corrupted frames", 0, AV_OPT_TYPE_CONST, {.i64 = AVFMT_FLAG_DISCARD_CORRUPT }, INT_MIN, INT_MAX, D, "fflags"},
{"sortdts", "try to interleave outputted packets by dts", 0, AV_OPT_TYPE_CONST, {.i64 = AVFMT_FLAG_SORT_DTS }, INT_MIN, INT_MAX, D, "fflags"},
{"keepside", "don't merge side data", 0, AV_OPT_TYPE_CONST, {.i64 = AVFMT_FLAG_KEEP_SIDE_DATA }, INT_MIN, INT_MAX, D, "fflags"},
{"latm", "enable RTP MP4A-LATM payload", 0, AV_OPT_TYPE_CONST, {.i64 = AVFMT_FLAG_MP4A_LATM }, INT_MIN, INT_MAX, E, "fflags"},
{"nobuffer", "reduce the latency introduced by optional buffering", 0, AV_OPT_TYPE_CONST, {.i64 = AVFMT_FLAG_NOBUFFER }, 0, INT_MAX, D, "fflags"},
{"seek2any", "allow seeking to non-keyframes on demuxer level when supported", OFFSET(seek2any), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, D},
{"analyzeduration", "specify how many microseconds are analyzed to probe the input", OFFSET(max_analyze_duration), AV_OPT_TYPE_INT, {.i64 = 5*AV_TIME_BASE }, 0, INT_MAX, D},
{"cryptokey", "decryption key", OFFSET(key), AV_OPT_TYPE_BINARY, {.dbl = 0}, 0, 0, D},
{"indexmem", "max memory used for timestamp index (per stream)", OFFSET(max_index_size), AV_OPT_TYPE_INT, {.i64 = 1<<20 }, 0, INT_MAX, D},
{"rtbufsize", "max memory used for buffering real-time frames", OFFSET(max_picture_buffer), AV_OPT_TYPE_INT, {.i64 = 3041280 }, 0, INT_MAX, D}, /* defaults to 1s of 15fps 352x288 YUYV422 video */
{"fdebug", "print specific debug info", OFFSET(debug), AV_OPT_TYPE_FLAGS, {.i64 = DEFAULT }, 0, INT_MAX, E|D, "fdebug"},
{"ts", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_FDEBUG_TS }, INT_MIN, INT_MAX, E|D, "fdebug"},
{"max_delay", "maximum muxing or demuxing delay in microseconds", OFFSET(max_delay), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, INT_MAX, E|D},
{"fpsprobesize", "number of frames used to probe fps", OFFSET(fps_probe_size), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX-1, D},
{"audio_preload", "microseconds by which audio packets should be interleaved earlier", OFFSET(audio_preload), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX-1, E},
{"chunk_duration", "microseconds for each chunk", OFFSET(max_chunk_duration), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX-1, E},
{"chunk_size", "size in bytes for each chunk", OFFSET(max_chunk_size), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX-1, E},
/* this is a crutch for avconv, since it cannot deal with identically named options in different contexts.
* to be removed when avconv is fixed */
{"f_err_detect", "set error detection flags (deprecated; use err_detect, save via avconv)", OFFSET(error_recognition), AV_OPT_TYPE_FLAGS, {.i64 = AV_EF_CRCCHECK }, INT_MIN, INT_MAX, D, "err_detect"},
{"err_detect", "set error detection flags", OFFSET(error_recognition), AV_OPT_TYPE_FLAGS, {.i64 = AV_EF_CRCCHECK }, INT_MIN, INT_MAX, D, "err_detect"},
{"crccheck", "verify embedded CRCs", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_CRCCHECK }, INT_MIN, INT_MAX, D, "err_detect"},
{"bitstream", "detect bitstream specification deviations", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_BITSTREAM }, INT_MIN, INT_MAX, D, "err_detect"},
{"buffer", "detect improper bitstream length", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_BUFFER }, INT_MIN, INT_MAX, D, "err_detect"},
{"explode", "abort decoding on minor error detection", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_EXPLODE }, INT_MIN, INT_MAX, D, "err_detect"},
{"careful", "consider things that violate the spec, are fast to check and have not been seen in the wild as errors", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_CAREFUL }, INT_MIN, INT_MAX, D, "err_detect"},
{"compliant", "consider all spec non compliancies as errors", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_COMPLIANT }, INT_MIN, INT_MAX, D, "err_detect"},
{"aggressive", "consider things that a sane encoder shouldn't do as an error", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_AGGRESSIVE }, INT_MIN, INT_MAX, D, "err_detect"},
{"use_wallclock_as_timestamps", "use wallclock as timestamps", OFFSET(use_wallclock_as_timestamps), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX-1, D},
{"avoid_negative_ts", "shift timestamps to make them non-negative. 1 enables, 0 disables, default of -1 enables when required by target format.", OFFSET(avoid_negative_ts), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 1, E},
{"skip_initial_bytes", "set number of bytes to skip before reading header and frames", OFFSET(skip_initial_bytes), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX-1, D},
{"correct_ts_overflow", "correct single timestamp overflows", OFFSET(correct_ts_overflow), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, D},
{"flush_packets", "enable flushing of the I/O context after each packet", OFFSET(flush_packets), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, E},
{NULL},
};
 
#undef E
#undef D
#undef DEFAULT
#undef OFFSET
 
#endif /* AVFORMAT_OPTIONS_TABLE_H */
/contrib/sdk/sources/ffmpeg/libavformat/os_support.c
0,0 → 1,340
/*
* various OS-feature replacement utilities
* Copyright (c) 2000, 2001, 2002 Fabrice Bellard
* copyright (c) 2002 Francois Revol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/* needed by inet_aton() */
#define _SVID_SOURCE
 
#include "config.h"
#include "avformat.h"
#include "os_support.h"
 
#if CONFIG_NETWORK
#include <fcntl.h>
#if !HAVE_POLL_H
#if HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#if HAVE_WINSOCK2_H
#include <winsock2.h>
#elif HAVE_SYS_SELECT_H
#include <sys/select.h>
#endif
#endif
 
#include "network.h"
 
#if !HAVE_INET_ATON
#include <stdlib.h>
 
int ff_inet_aton(const char *str, struct in_addr *add)
{
unsigned int add1 = 0, add2 = 0, add3 = 0, add4 = 0;
 
if (sscanf(str, "%d.%d.%d.%d", &add1, &add2, &add3, &add4) != 4)
return 0;
 
if (!add1 || (add1 | add2 | add3 | add4) > 255)
return 0;
 
add->s_addr = htonl((add1 << 24) + (add2 << 16) + (add3 << 8) + add4);
 
return 1;
}
#else
int ff_inet_aton(const char *str, struct in_addr *add)
{
return inet_aton(str, add);
}
#endif /* !HAVE_INET_ATON */
 
#if !HAVE_GETADDRINFO
int ff_getaddrinfo(const char *node, const char *service,
const struct addrinfo *hints, struct addrinfo **res)
{
struct hostent *h = NULL;
struct addrinfo *ai;
struct sockaddr_in *sin;
 
#if HAVE_WINSOCK2_H
int (WSAAPI *win_getaddrinfo)(const char *node, const char *service,
const struct addrinfo *hints,
struct addrinfo **res);
HMODULE ws2mod = GetModuleHandle("ws2_32.dll");
win_getaddrinfo = GetProcAddress(ws2mod, "getaddrinfo");
if (win_getaddrinfo)
return win_getaddrinfo(node, service, hints, res);
#endif
 
*res = NULL;
sin = av_mallocz(sizeof(struct sockaddr_in));
if (!sin)
return EAI_FAIL;
sin->sin_family = AF_INET;
 
if (node) {
if (!ff_inet_aton(node, &sin->sin_addr)) {
if (hints && (hints->ai_flags & AI_NUMERICHOST)) {
av_free(sin);
return EAI_FAIL;
}
h = gethostbyname(node);
if (!h) {
av_free(sin);
return EAI_FAIL;
}
memcpy(&sin->sin_addr, h->h_addr_list[0], sizeof(struct in_addr));
}
} else {
if (hints && (hints->ai_flags & AI_PASSIVE))
sin->sin_addr.s_addr = INADDR_ANY;
else
sin->sin_addr.s_addr = INADDR_LOOPBACK;
}
 
/* Note: getaddrinfo allows service to be a string, which
* should be looked up using getservbyname. */
if (service)
sin->sin_port = htons(atoi(service));
 
ai = av_mallocz(sizeof(struct addrinfo));
if (!ai) {
av_free(sin);
return EAI_FAIL;
}
 
*res = ai;
ai->ai_family = AF_INET;
ai->ai_socktype = hints ? hints->ai_socktype : 0;
switch (ai->ai_socktype) {
case SOCK_STREAM:
ai->ai_protocol = IPPROTO_TCP;
break;
case SOCK_DGRAM:
ai->ai_protocol = IPPROTO_UDP;
break;
default:
ai->ai_protocol = 0;
break;
}
 
ai->ai_addr = (struct sockaddr *)sin;
ai->ai_addrlen = sizeof(struct sockaddr_in);
if (hints && (hints->ai_flags & AI_CANONNAME))
ai->ai_canonname = h ? av_strdup(h->h_name) : NULL;
 
ai->ai_next = NULL;
return 0;
}
 
void ff_freeaddrinfo(struct addrinfo *res)
{
#if HAVE_WINSOCK2_H
void (WSAAPI *win_freeaddrinfo)(struct addrinfo *res);
HMODULE ws2mod = GetModuleHandle("ws2_32.dll");
win_freeaddrinfo = (void (WSAAPI *)(struct addrinfo *res))
GetProcAddress(ws2mod, "freeaddrinfo");
if (win_freeaddrinfo) {
win_freeaddrinfo(res);
return;
}
#endif
 
av_free(res->ai_canonname);
av_free(res->ai_addr);
av_free(res);
}
 
int ff_getnameinfo(const struct sockaddr *sa, int salen,
char *host, int hostlen,
char *serv, int servlen, int flags)
{
const struct sockaddr_in *sin = (const struct sockaddr_in *)sa;
 
#if HAVE_WINSOCK2_H
int (WSAAPI *win_getnameinfo)(const struct sockaddr *sa, socklen_t salen,
char *host, DWORD hostlen,
char *serv, DWORD servlen, int flags);
HMODULE ws2mod = GetModuleHandle("ws2_32.dll");
win_getnameinfo = GetProcAddress(ws2mod, "getnameinfo");
if (win_getnameinfo)
return win_getnameinfo(sa, salen, host, hostlen, serv, servlen, flags);
#endif
 
if (sa->sa_family != AF_INET)
return EAI_FAMILY;
if (!host && !serv)
return EAI_NONAME;
 
if (host && hostlen > 0) {
struct hostent *ent = NULL;
uint32_t a;
if (!(flags & NI_NUMERICHOST))
ent = gethostbyaddr((const char *)&sin->sin_addr,
sizeof(sin->sin_addr), AF_INET);
 
if (ent) {
snprintf(host, hostlen, "%s", ent->h_name);
} else if (flags & NI_NAMERQD) {
return EAI_NONAME;
} else {
a = ntohl(sin->sin_addr.s_addr);
snprintf(host, hostlen, "%d.%d.%d.%d",
((a >> 24) & 0xff), ((a >> 16) & 0xff),
((a >> 8) & 0xff), (a & 0xff));
}
}
 
if (serv && servlen > 0) {
struct servent *ent = NULL;
#if HAVE_GETSERVBYPORT
if (!(flags & NI_NUMERICSERV))
ent = getservbyport(sin->sin_port, flags & NI_DGRAM ? "udp" : "tcp");
#endif
 
if (ent)
snprintf(serv, servlen, "%s", ent->s_name);
else
snprintf(serv, servlen, "%d", ntohs(sin->sin_port));
}
 
return 0;
}
#endif /* !HAVE_GETADDRINFO */
 
#if !HAVE_GETADDRINFO || HAVE_WINSOCK2_H
const char *ff_gai_strerror(int ecode)
{
switch (ecode) {
case EAI_AGAIN:
return "Temporary failure in name resolution";
case EAI_BADFLAGS:
return "Invalid flags for ai_flags";
case EAI_FAIL:
return "A non-recoverable error occurred";
case EAI_FAMILY:
return "The address family was not recognized or the address "
"length was invalid for the specified family";
case EAI_MEMORY:
return "Memory allocation failure";
#if EAI_NODATA != EAI_NONAME
case EAI_NODATA:
return "No address associated with hostname";
#endif
case EAI_NONAME:
return "The name does not resolve for the supplied parameters";
case EAI_SERVICE:
return "servname not supported for ai_socktype";
case EAI_SOCKTYPE:
return "ai_socktype not supported";
}
 
return "Unknown error";
}
#endif /* !HAVE_GETADDRINFO || HAVE_WINSOCK2_H */
 
int ff_socket_nonblock(int socket, int enable)
{
#if HAVE_WINSOCK2_H
u_long param = enable;
return ioctlsocket(socket, FIONBIO, &param);
#else
if (enable)
return fcntl(socket, F_SETFL, fcntl(socket, F_GETFL) | O_NONBLOCK);
else
return fcntl(socket, F_SETFL, fcntl(socket, F_GETFL) & ~O_NONBLOCK);
#endif
}
 
#if !HAVE_POLL_H
int ff_poll(struct pollfd *fds, nfds_t numfds, int timeout)
{
fd_set read_set;
fd_set write_set;
fd_set exception_set;
nfds_t i;
int n;
int rc;
 
#if HAVE_WINSOCK2_H
if (numfds >= FD_SETSIZE) {
errno = EINVAL;
return -1;
}
#endif
 
FD_ZERO(&read_set);
FD_ZERO(&write_set);
FD_ZERO(&exception_set);
 
n = 0;
for (i = 0; i < numfds; i++) {
if (fds[i].fd < 0)
continue;
#if !HAVE_WINSOCK2_H
if (fds[i].fd >= FD_SETSIZE) {
errno = EINVAL;
return -1;
}
#endif
 
if (fds[i].events & POLLIN)
FD_SET(fds[i].fd, &read_set);
if (fds[i].events & POLLOUT)
FD_SET(fds[i].fd, &write_set);
if (fds[i].events & POLLERR)
FD_SET(fds[i].fd, &exception_set);
 
if (fds[i].fd >= n)
n = fds[i].fd + 1;
}
 
if (n == 0)
/* Hey!? Nothing to poll, in fact!!! */
return 0;
 
if (timeout < 0) {
rc = select(n, &read_set, &write_set, &exception_set, NULL);
} else {
struct timeval tv;
tv.tv_sec = timeout / 1000;
tv.tv_usec = 1000 * (timeout % 1000);
rc = select(n, &read_set, &write_set, &exception_set, &tv);
}
 
if (rc < 0)
return rc;
 
for (i = 0; i < numfds; i++) {
fds[i].revents = 0;
 
if (FD_ISSET(fds[i].fd, &read_set))
fds[i].revents |= POLLIN;
if (FD_ISSET(fds[i].fd, &write_set))
fds[i].revents |= POLLOUT;
if (FD_ISSET(fds[i].fd, &exception_set))
fds[i].revents |= POLLERR;
}
 
return rc;
}
#endif /* HAVE_POLL_H */
#endif /* CONFIG_NETWORK */
/contrib/sdk/sources/ffmpeg/libavformat/os_support.h
0,0 → 1,131
/*
* various OS-feature replacement utilities
* copyright (c) 2000, 2001, 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_OS_SUPPORT_H
#define AVFORMAT_OS_SUPPORT_H
 
/**
* @file
* miscellaneous OS support macros and functions.
*/
 
#include "config.h"
 
#include <sys/stat.h>
 
#if defined(_WIN32) && !defined(__MINGW32CE__)
# include <fcntl.h>
# ifdef lseek
# undef lseek
# endif
# define lseek(f,p,w) _lseeki64((f), (p), (w))
# ifdef stat
# undef stat
# endif
# define stat _stati64
# ifdef fstat
# undef fstat
# endif
# define fstat(f,s) _fstati64((f), (s))
#endif /* defined(__MINGW32__) && !defined(__MINGW32CE__) */
 
#ifdef _WIN32
#if HAVE_DIRECT_H
#include <direct.h>
#elif HAVE_IO_H
#include <io.h>
#endif
#define mkdir(a, b) _mkdir(a)
#endif
 
static inline int is_dos_path(const char *path)
{
#if HAVE_DOS_PATHS
if (path[0] && path[1] == ':')
return 1;
#endif
return 0;
}
 
#if defined(__OS2__) || defined(__Plan9__)
#define SHUT_RD 0
#define SHUT_WR 1
#define SHUT_RDWR 2
#endif
 
#if defined(_WIN32)
#define SHUT_RD SD_RECEIVE
#define SHUT_WR SD_SEND
#define SHUT_RDWR SD_BOTH
 
#ifndef S_IRUSR
#define S_IRUSR S_IREAD
#endif
#ifndef S_IWUSR
#define S_IWUSR S_IWRITE
#endif
#endif
 
#if CONFIG_NETWORK
#if !HAVE_SOCKLEN_T
typedef int socklen_t;
#endif
 
/* most of the time closing a socket is just closing an fd */
#if !HAVE_CLOSESOCKET
#define closesocket close
#endif
 
#if !HAVE_POLL_H
typedef unsigned long nfds_t;
 
#if HAVE_WINSOCK2_H
#include <winsock2.h>
#endif
#if !HAVE_STRUCT_POLLFD
struct pollfd {
int fd;
short events; /* events to look for */
short revents; /* events that occurred */
};
 
/* events & revents */
#define POLLIN 0x0001 /* any readable data available */
#define POLLOUT 0x0002 /* file descriptor is writeable */
#define POLLRDNORM POLLIN
#define POLLWRNORM POLLOUT
#define POLLRDBAND 0x0008 /* priority readable data */
#define POLLWRBAND 0x0010 /* priority data can be written */
#define POLLPRI 0x0020 /* high priority readable data */
 
/* revents only */
#define POLLERR 0x0004 /* errors pending */
#define POLLHUP 0x0080 /* disconnected */
#define POLLNVAL 0x1000 /* invalid file descriptor */
#endif
 
 
int ff_poll(struct pollfd *fds, nfds_t numfds, int timeout);
#define poll ff_poll
#endif /* HAVE_POLL_H */
#endif /* CONFIG_NETWORK */
 
#endif /* AVFORMAT_OS_SUPPORT_H */
/contrib/sdk/sources/ffmpeg/libavformat/paf.c
0,0 → 1,262
/*
* Packed Animation File demuxer
* Copyright (c) 2012 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/channel_layout.h"
#include "libavcodec/paf.h"
#include "avformat.h"
#include "internal.h"
 
#define MAGIC "Packed Animation File V1.0\n(c) 1992-96 Amazing Studio\x0a\x1a"
 
typedef struct {
uint32_t buffer_size;
uint32_t frame_blks;
uint32_t nb_frames;
uint32_t start_offset;
uint32_t preload_count;
uint32_t max_video_blks;
uint32_t max_audio_blks;
 
uint32_t current_frame;
uint32_t current_frame_count;
uint32_t current_frame_block;
 
uint32_t *blocks_count_table;
uint32_t *frames_offset_table;
uint32_t *blocks_offset_table;
 
uint8_t *video_frame;
int video_size;
 
uint8_t *audio_frame;
uint8_t *temp_audio_frame;
int audio_size;
 
int got_audio;
} PAFDemuxContext;
 
static int read_probe(AVProbeData *p)
{
if ((p->buf_size >= strlen(MAGIC)) &&
!memcmp(p->buf, MAGIC, strlen(MAGIC)))
return AVPROBE_SCORE_MAX;
return 0;
}
 
static int read_close(AVFormatContext *s)
{
PAFDemuxContext *p = s->priv_data;
 
av_freep(&p->blocks_count_table);
av_freep(&p->frames_offset_table);
av_freep(&p->blocks_offset_table);
av_freep(&p->video_frame);
av_freep(&p->audio_frame);
av_freep(&p->temp_audio_frame);
 
return 0;
}
 
static void read_table(AVFormatContext *s, uint32_t *table, uint32_t count)
{
int i;
 
for (i = 0; i < count; i++)
table[i] = avio_rl32(s->pb);
 
avio_skip(s->pb, 4 * (FFALIGN(count, 512) - count));
}
 
static int read_header(AVFormatContext *s)
{
PAFDemuxContext *p = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *ast, *vst;
int ret = 0;
 
avio_skip(pb, 132);
 
vst = avformat_new_stream(s, 0);
if (!vst)
return AVERROR(ENOMEM);
 
vst->start_time = 0;
vst->nb_frames =
vst->duration =
p->nb_frames = avio_rl32(pb);
avio_skip(pb, 4);
vst->codec->width = avio_rl32(pb);
vst->codec->height = avio_rl32(pb);
avio_skip(pb, 4);
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codec->codec_tag = 0;
vst->codec->codec_id = AV_CODEC_ID_PAF_VIDEO;
avpriv_set_pts_info(vst, 64, 1, 10);
 
ast = avformat_new_stream(s, 0);
if (!ast)
return AVERROR(ENOMEM);
 
ast->start_time = 0;
ast->codec->codec_type = AVMEDIA_TYPE_AUDIO;
ast->codec->codec_tag = 0;
ast->codec->codec_id = AV_CODEC_ID_PAF_AUDIO;
ast->codec->channels = 2;
ast->codec->channel_layout = AV_CH_LAYOUT_STEREO;
ast->codec->sample_rate = 22050;
avpriv_set_pts_info(ast, 64, 1, 22050);
 
p->buffer_size = avio_rl32(pb);
p->preload_count = avio_rl32(pb);
p->frame_blks = avio_rl32(pb);
p->start_offset = avio_rl32(pb);
p->max_video_blks = avio_rl32(pb);
p->max_audio_blks = avio_rl32(pb);
if (p->buffer_size < 175 ||
p->max_audio_blks < 2 ||
p->max_video_blks < 1 ||
p->frame_blks < 1 ||
p->nb_frames < 1 ||
p->preload_count < 1 ||
p->buffer_size > 2048 ||
p->max_video_blks > 2048 ||
p->max_audio_blks > 2048 ||
p->nb_frames > INT_MAX / sizeof(uint32_t) ||
p->frame_blks > INT_MAX / sizeof(uint32_t))
return AVERROR_INVALIDDATA;
 
p->blocks_count_table = av_mallocz(p->nb_frames * sizeof(uint32_t));
p->frames_offset_table = av_mallocz(p->nb_frames * sizeof(uint32_t));
p->blocks_offset_table = av_mallocz(p->frame_blks * sizeof(uint32_t));
 
p->video_size = p->max_video_blks * p->buffer_size;
p->video_frame = av_mallocz(p->video_size);
 
p->audio_size = p->max_audio_blks * p->buffer_size;
p->audio_frame = av_mallocz(p->audio_size);
p->temp_audio_frame = av_mallocz(p->audio_size);
 
if (!p->blocks_count_table ||
!p->frames_offset_table ||
!p->blocks_offset_table ||
!p->video_frame ||
!p->audio_frame ||
!p->temp_audio_frame) {
ret = AVERROR(ENOMEM);
goto fail;
}
 
avio_seek(pb, p->buffer_size, SEEK_SET);
 
read_table(s, p->blocks_count_table, p->nb_frames);
read_table(s, p->frames_offset_table, p->nb_frames);
read_table(s, p->blocks_offset_table, p->frame_blks);
 
p->got_audio = 0;
p->current_frame = 0;
p->current_frame_block = 0;
 
avio_seek(pb, p->start_offset, SEEK_SET);
 
return 0;
 
fail:
read_close(s);
 
return ret;
}
 
static int read_packet(AVFormatContext *s, AVPacket *pkt)
{
PAFDemuxContext *p = s->priv_data;
AVIOContext *pb = s->pb;
uint32_t count, offset;
int size, i;
 
if (p->current_frame >= p->nb_frames)
return AVERROR_EOF;
 
if (url_feof(pb))
return AVERROR_EOF;
 
if (p->got_audio) {
if (av_new_packet(pkt, p->audio_size) < 0)
return AVERROR(ENOMEM);
 
memcpy(pkt->data, p->temp_audio_frame, p->audio_size);
pkt->duration = PAF_SOUND_SAMPLES * (p->audio_size / PAF_SOUND_FRAME_SIZE);
pkt->flags |= AV_PKT_FLAG_KEY;
pkt->stream_index = 1;
p->got_audio = 0;
return pkt->size;
}
 
count = (p->current_frame == 0) ? p->preload_count : p->blocks_count_table[p->current_frame - 1];
for (i = 0; i < count; i++) {
if (p->current_frame_block >= p->frame_blks)
return AVERROR_INVALIDDATA;
 
offset = p->blocks_offset_table[p->current_frame_block] & ~(1U << 31);
if (p->blocks_offset_table[p->current_frame_block] & (1U << 31)) {
if (offset > p->audio_size - p->buffer_size)
return AVERROR_INVALIDDATA;
 
avio_read(pb, p->audio_frame + offset, p->buffer_size);
if (offset == (p->max_audio_blks - 2) * p->buffer_size) {
memcpy(p->temp_audio_frame, p->audio_frame, p->audio_size);
p->got_audio = 1;
}
} else {
if (offset > p->video_size - p->buffer_size)
return AVERROR_INVALIDDATA;
 
avio_read(pb, p->video_frame + offset, p->buffer_size);
}
p->current_frame_block++;
}
 
if (p->frames_offset_table[p->current_frame] >= p->video_size)
return AVERROR_INVALIDDATA;
 
size = p->video_size - p->frames_offset_table[p->current_frame];
 
if (av_new_packet(pkt, size) < 0)
return AVERROR(ENOMEM);
 
pkt->stream_index = 0;
pkt->duration = 1;
memcpy(pkt->data, p->video_frame + p->frames_offset_table[p->current_frame], size);
if (pkt->data[0] & 0x20)
pkt->flags |= AV_PKT_FLAG_KEY;
p->current_frame++;
 
return pkt->size;
}
 
AVInputFormat ff_paf_demuxer = {
.name = "paf",
.long_name = NULL_IF_CONFIG_SMALL("Amazing Studio Packed Animation File"),
.priv_data_size = sizeof(PAFDemuxContext),
.read_probe = read_probe,
.read_header = read_header,
.read_packet = read_packet,
.read_close = read_close,
};
/contrib/sdk/sources/ffmpeg/libavformat/pcm.c
0,0 → 1,76
/*
* PCM common functions
* Copyright (c) 2003 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/mathematics.h"
#include "avformat.h"
#include "pcm.h"
 
#define RAW_SAMPLES 1024
 
int ff_pcm_read_packet(AVFormatContext *s, AVPacket *pkt)
{
int ret, size;
 
size= RAW_SAMPLES*s->streams[0]->codec->block_align;
if (size <= 0)
return AVERROR(EINVAL);
 
ret= av_get_packet(s->pb, pkt, size);
 
pkt->flags &= ~AV_PKT_FLAG_CORRUPT;
pkt->stream_index = 0;
if (ret < 0)
return ret;
 
return ret;
}
 
int ff_pcm_read_seek(AVFormatContext *s,
int stream_index, int64_t timestamp, int flags)
{
AVStream *st;
int block_align, byte_rate;
int64_t pos, ret;
 
st = s->streams[0];
 
block_align = st->codec->block_align ? st->codec->block_align :
(av_get_bits_per_sample(st->codec->codec_id) * st->codec->channels) >> 3;
byte_rate = st->codec->bit_rate ? st->codec->bit_rate >> 3 :
block_align * st->codec->sample_rate;
 
if (block_align <= 0 || byte_rate <= 0)
return -1;
if (timestamp < 0) timestamp = 0;
 
/* compute the position by aligning it to block_align */
pos = av_rescale_rnd(timestamp * byte_rate,
st->time_base.num,
st->time_base.den * (int64_t)block_align,
(flags & AVSEEK_FLAG_BACKWARD) ? AV_ROUND_DOWN : AV_ROUND_UP);
pos *= block_align;
 
/* recompute exact position */
st->cur_dts = av_rescale(pos, st->time_base.den, byte_rate * (int64_t)st->time_base.num);
if ((ret = avio_seek(s->pb, pos + s->data_offset, SEEK_SET)) < 0)
return ret;
return 0;
}
/contrib/sdk/sources/ffmpeg/libavformat/pcm.h
0,0 → 1,31
/*
* PCM common functions
* Copyright (C) 2007 Aurelien Jacobs <aurel@gnuage.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_PCM_H
#define AVFORMAT_PCM_H
 
#include "avformat.h"
 
int ff_pcm_read_packet(AVFormatContext *s, AVPacket *pkt);
int ff_pcm_read_seek(AVFormatContext *s,
int stream_index, int64_t timestamp, int flags);
 
#endif /* AVFORMAT_PCM_H */
/contrib/sdk/sources/ffmpeg/libavformat/pcmdec.c
0,0 → 1,146
/*
* RAW PCM demuxers
* Copyright (c) 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "internal.h"
#include "pcm.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavutil/avassert.h"
 
typedef struct PCMAudioDemuxerContext {
AVClass *class;
int sample_rate;
int channels;
} PCMAudioDemuxerContext;
 
static int pcm_read_header(AVFormatContext *s)
{
PCMAudioDemuxerContext *s1 = s->priv_data;
AVStream *st;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = s->iformat->raw_codec_id;
st->codec->sample_rate = s1->sample_rate;
st->codec->channels = s1->channels;
 
st->codec->bits_per_coded_sample =
av_get_bits_per_sample(st->codec->codec_id);
 
av_assert0(st->codec->bits_per_coded_sample > 0);
 
st->codec->block_align =
st->codec->bits_per_coded_sample * st->codec->channels / 8;
 
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
return 0;
}
 
static const AVOption pcm_options[] = {
{ "sample_rate", "", offsetof(PCMAudioDemuxerContext, sample_rate), AV_OPT_TYPE_INT, {.i64 = 44100}, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ "channels", "", offsetof(PCMAudioDemuxerContext, channels), AV_OPT_TYPE_INT, {.i64 = 1}, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
 
#define PCMDEF(name_, long_name_, ext, codec) \
static const AVClass name_ ## _demuxer_class = { \
.class_name = #name_ " demuxer", \
.item_name = av_default_item_name, \
.option = pcm_options, \
.version = LIBAVUTIL_VERSION_INT, \
}; \
AVInputFormat ff_pcm_ ## name_ ## _demuxer = { \
.name = #name_, \
.long_name = NULL_IF_CONFIG_SMALL(long_name_), \
.priv_data_size = sizeof(PCMAudioDemuxerContext), \
.read_header = pcm_read_header, \
.read_packet = ff_pcm_read_packet, \
.read_seek = ff_pcm_read_seek, \
.flags = AVFMT_GENERIC_INDEX, \
.extensions = ext, \
.raw_codec_id = codec, \
.priv_class = &name_ ## _demuxer_class, \
};
 
PCMDEF(f64be, "PCM 64-bit floating-point big-endian",
NULL, AV_CODEC_ID_PCM_F64BE)
 
PCMDEF(f64le, "PCM 64-bit floating-point little-endian",
NULL, AV_CODEC_ID_PCM_F64LE)
 
PCMDEF(f32be, "PCM 32-bit floating-point big-endian",
NULL, AV_CODEC_ID_PCM_F32BE)
 
PCMDEF(f32le, "PCM 32-bit floating-point little-endian",
NULL, AV_CODEC_ID_PCM_F32LE)
 
PCMDEF(s32be, "PCM signed 32-bit big-endian",
NULL, AV_CODEC_ID_PCM_S32BE)
 
PCMDEF(s32le, "PCM signed 32-bit little-endian",
NULL, AV_CODEC_ID_PCM_S32LE)
 
PCMDEF(s24be, "PCM signed 24-bit big-endian",
NULL, AV_CODEC_ID_PCM_S24BE)
 
PCMDEF(s24le, "PCM signed 24-bit little-endian",
NULL, AV_CODEC_ID_PCM_S24LE)
 
PCMDEF(s16be, "PCM signed 16-bit big-endian",
AV_NE("sw", NULL), AV_CODEC_ID_PCM_S16BE)
 
PCMDEF(s16le, "PCM signed 16-bit little-endian",
AV_NE(NULL, "sw"), AV_CODEC_ID_PCM_S16LE)
 
PCMDEF(s8, "PCM signed 8-bit",
"sb", AV_CODEC_ID_PCM_S8)
 
PCMDEF(u32be, "PCM unsigned 32-bit big-endian",
NULL, AV_CODEC_ID_PCM_U32BE)
 
PCMDEF(u32le, "PCM unsigned 32-bit little-endian",
NULL, AV_CODEC_ID_PCM_U32LE)
 
PCMDEF(u24be, "PCM unsigned 24-bit big-endian",
NULL, AV_CODEC_ID_PCM_U24BE)
 
PCMDEF(u24le, "PCM unsigned 24-bit little-endian",
NULL, AV_CODEC_ID_PCM_U24LE)
 
PCMDEF(u16be, "PCM unsigned 16-bit big-endian",
AV_NE("uw", NULL), AV_CODEC_ID_PCM_U16BE)
 
PCMDEF(u16le, "PCM unsigned 16-bit little-endian",
AV_NE(NULL, "uw"), AV_CODEC_ID_PCM_U16LE)
 
PCMDEF(u8, "PCM unsigned 8-bit",
"ub", AV_CODEC_ID_PCM_U8)
 
PCMDEF(alaw, "PCM A-law",
"al", AV_CODEC_ID_PCM_ALAW)
 
PCMDEF(mulaw, "PCM mu-law",
"ul", AV_CODEC_ID_PCM_MULAW)
/contrib/sdk/sources/ffmpeg/libavformat/pcmenc.c
0,0 → 1,94
/*
* RAW PCM muxers
* Copyright (c) 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "rawenc.h"
 
#define PCMDEF(name_, long_name_, ext, codec) \
AVOutputFormat ff_pcm_ ## name_ ## _muxer = { \
.name = #name_, \
.long_name = NULL_IF_CONFIG_SMALL(long_name_), \
.extensions = ext, \
.audio_codec = codec, \
.video_codec = AV_CODEC_ID_NONE, \
.write_packet = ff_raw_write_packet, \
.flags = AVFMT_NOTIMESTAMPS, \
};
 
PCMDEF(f64be, "PCM 64-bit floating-point big-endian",
NULL, AV_CODEC_ID_PCM_F64BE)
 
PCMDEF(f64le, "PCM 64-bit floating-point little-endian",
NULL, AV_CODEC_ID_PCM_F64LE)
 
PCMDEF(f32be, "PCM 32-bit floating-point big-endian",
NULL, AV_CODEC_ID_PCM_F32BE)
 
PCMDEF(f32le, "PCM 32-bit floating-point little-endian",
NULL, AV_CODEC_ID_PCM_F32LE)
 
PCMDEF(s32be, "PCM signed 32-bit big-endian",
NULL, AV_CODEC_ID_PCM_S32BE)
 
PCMDEF(s32le, "PCM signed 32-bit little-endian",
NULL, AV_CODEC_ID_PCM_S32LE)
 
PCMDEF(s24be, "PCM signed 24-bit big-endian",
NULL, AV_CODEC_ID_PCM_S24BE)
 
PCMDEF(s24le, "PCM signed 24-bit little-endian",
NULL, AV_CODEC_ID_PCM_S24LE)
 
PCMDEF(s16be, "PCM signed 16-bit big-endian",
AV_NE("sw", NULL), AV_CODEC_ID_PCM_S16BE)
 
PCMDEF(s16le, "PCM signed 16-bit little-endian",
AV_NE(NULL, "sw"), AV_CODEC_ID_PCM_S16LE)
 
PCMDEF(s8, "PCM signed 8-bit",
"sb", AV_CODEC_ID_PCM_S8)
 
PCMDEF(u32be, "PCM unsigned 32-bit big-endian",
NULL, AV_CODEC_ID_PCM_U32BE)
 
PCMDEF(u32le, "PCM unsigned 32-bit little-endian",
NULL, AV_CODEC_ID_PCM_U32LE)
 
PCMDEF(u24be, "PCM unsigned 24-bit big-endian",
NULL, AV_CODEC_ID_PCM_U24BE)
 
PCMDEF(u24le, "PCM unsigned 24-bit little-endian",
NULL, AV_CODEC_ID_PCM_U24LE)
 
PCMDEF(u16be, "PCM unsigned 16-bit big-endian",
AV_NE("uw", NULL), AV_CODEC_ID_PCM_U16BE)
 
PCMDEF(u16le, "PCM unsigned 16-bit little-endian",
AV_NE(NULL, "uw"), AV_CODEC_ID_PCM_U16LE)
 
PCMDEF(u8, "PCM unsigned 8-bit",
"ub", AV_CODEC_ID_PCM_U8)
 
PCMDEF(alaw, "PCM A-law",
"al", AV_CODEC_ID_PCM_ALAW)
 
PCMDEF(mulaw, "PCM mu-law",
"ul", AV_CODEC_ID_PCM_MULAW)
/contrib/sdk/sources/ffmpeg/libavformat/pjsdec.c
0,0 → 1,137
/*
* Copyright (c) 2012 Clément Bœsch
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* PJS (Phoenix Japanimation Society) subtitles format demuxer
*
* @see http://subs.com.ru/page.php?al=pjs
*/
 
#include "avformat.h"
#include "internal.h"
#include "subtitles.h"
 
typedef struct {
FFDemuxSubtitlesQueue q;
} PJSContext;
 
static int pjs_probe(AVProbeData *p)
{
char c;
int64_t start, end;
const unsigned char *ptr = p->buf;
 
if (sscanf(ptr, "%"SCNd64",%"SCNd64",%c", &start, &end, &c) == 3) {
size_t q1pos = strcspn(ptr, "\"");
size_t q2pos = q1pos + strcspn(ptr + q1pos + 1, "\"") + 1;
if (strcspn(ptr, "\r\n") > q2pos)
return AVPROBE_SCORE_MAX;
}
return 0;
}
 
static int64_t read_ts(char **line, int *duration)
{
int64_t start, end;
 
if (sscanf(*line, "%"SCNd64",%"SCNd64, &start, &end) == 2) {
*line += strcspn(*line, "\"") + 1;
*duration = end - start;
return start;
}
return AV_NOPTS_VALUE;
}
 
static int pjs_read_header(AVFormatContext *s)
{
PJSContext *pjs = s->priv_data;
AVStream *st = avformat_new_stream(s, NULL);
int res = 0;
 
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 64, 1, 10);
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
st->codec->codec_id = AV_CODEC_ID_PJS;
 
while (!url_feof(s->pb)) {
char line[4096];
char *p = line;
const int64_t pos = avio_tell(s->pb);
int len = ff_get_line(s->pb, line, sizeof(line));
int64_t pts_start;
int duration;
 
if (!len)
break;
 
line[strcspn(line, "\r\n")] = 0;
 
pts_start = read_ts(&p, &duration);
if (pts_start != AV_NOPTS_VALUE) {
AVPacket *sub;
 
p[strcspn(p, "\"")] = 0;
sub = ff_subtitles_queue_insert(&pjs->q, p, strlen(p), 0);
if (!sub)
return AVERROR(ENOMEM);
sub->pos = pos;
sub->pts = pts_start;
sub->duration = duration;
}
}
 
ff_subtitles_queue_finalize(&pjs->q);
return res;
}
 
static int pjs_read_packet(AVFormatContext *s, AVPacket *pkt)
{
PJSContext *pjs = s->priv_data;
return ff_subtitles_queue_read_packet(&pjs->q, pkt);
}
 
static int pjs_read_seek(AVFormatContext *s, int stream_index,
int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
{
PJSContext *pjs = s->priv_data;
return ff_subtitles_queue_seek(&pjs->q, s, stream_index,
min_ts, ts, max_ts, flags);
}
 
static int pjs_read_close(AVFormatContext *s)
{
PJSContext *pjs = s->priv_data;
ff_subtitles_queue_clean(&pjs->q);
return 0;
}
 
AVInputFormat ff_pjs_demuxer = {
.name = "pjs",
.long_name = NULL_IF_CONFIG_SMALL("PJS (Phoenix Japanimation Society) subtitles"),
.priv_data_size = sizeof(PJSContext),
.read_probe = pjs_probe,
.read_header = pjs_read_header,
.read_packet = pjs_read_packet,
.read_seek2 = pjs_read_seek,
.read_close = pjs_read_close,
.extensions = "pjs",
};
/contrib/sdk/sources/ffmpeg/libavformat/pmpdec.c
0,0 → 1,197
/*
* PMP demuxer.
* Copyright (c) 2011 Reimar Döffinger
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
 
typedef struct {
int cur_stream;
int num_streams;
int audio_packets;
int current_packet;
uint32_t *packet_sizes;
int packet_sizes_alloc;
} PMPContext;
 
static int pmp_probe(AVProbeData *p) {
if (AV_RN32(p->buf) == AV_RN32("pmpm") &&
AV_RL32(p->buf + 4) == 1)
return AVPROBE_SCORE_MAX;
return 0;
}
 
static int pmp_header(AVFormatContext *s)
{
PMPContext *pmp = s->priv_data;
AVIOContext *pb = s->pb;
int tb_num, tb_den;
uint32_t index_cnt;
int audio_codec_id = AV_CODEC_ID_NONE;
int srate, channels;
unsigned i;
uint64_t pos;
int64_t fsize = avio_size(pb);
 
AVStream *vst = avformat_new_stream(s, NULL);
if (!vst)
return AVERROR(ENOMEM);
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
avio_skip(pb, 8);
switch (avio_rl32(pb)) {
case 0:
vst->codec->codec_id = AV_CODEC_ID_MPEG4;
break;
case 1:
vst->codec->codec_id = AV_CODEC_ID_H264;
break;
default:
av_log(s, AV_LOG_ERROR, "Unsupported video format\n");
break;
}
index_cnt = avio_rl32(pb);
vst->codec->width = avio_rl32(pb);
vst->codec->height = avio_rl32(pb);
 
tb_num = avio_rl32(pb);
tb_den = avio_rl32(pb);
avpriv_set_pts_info(vst, 32, tb_num, tb_den);
vst->nb_frames = index_cnt;
vst->duration = index_cnt;
 
switch (avio_rl32(pb)) {
case 0:
audio_codec_id = AV_CODEC_ID_MP3;
break;
case 1:
av_log(s, AV_LOG_ERROR, "AAC not yet correctly supported\n");
audio_codec_id = AV_CODEC_ID_AAC;
break;
default:
av_log(s, AV_LOG_ERROR, "Unsupported audio format\n");
break;
}
pmp->num_streams = avio_rl16(pb) + 1;
avio_skip(pb, 10);
srate = avio_rl32(pb);
channels = avio_rl32(pb) + 1;
pos = avio_tell(pb) + 4LL*index_cnt;
for (i = 0; i < index_cnt; i++) {
uint32_t size = avio_rl32(pb);
int flags = size & 1 ? AVINDEX_KEYFRAME : 0;
if (url_feof(pb)) {
av_log(s, AV_LOG_FATAL, "Encountered EOF while reading index.\n");
return AVERROR_INVALIDDATA;
}
size >>= 1;
if (size < 9 + 4*pmp->num_streams) {
av_log(s, AV_LOG_ERROR, "Packet too small\n");
return AVERROR_INVALIDDATA;
}
av_add_index_entry(vst, pos, i, size, 0, flags);
pos += size;
if (fsize > 0 && i == 0 && pos > fsize) {
av_log(s, AV_LOG_ERROR, "File ends before first packet\n");
return AVERROR_INVALIDDATA;
}
}
for (i = 1; i < pmp->num_streams; i++) {
AVStream *ast = avformat_new_stream(s, NULL);
if (!ast)
return AVERROR(ENOMEM);
ast->codec->codec_type = AVMEDIA_TYPE_AUDIO;
ast->codec->codec_id = audio_codec_id;
ast->codec->channels = channels;
ast->codec->sample_rate = srate;
avpriv_set_pts_info(ast, 32, 1, srate);
}
return 0;
}
 
static int pmp_packet(AVFormatContext *s, AVPacket *pkt)
{
PMPContext *pmp = s->priv_data;
AVIOContext *pb = s->pb;
int ret = 0;
int i;
 
if (url_feof(pb))
return AVERROR_EOF;
if (pmp->cur_stream == 0) {
int num_packets;
pmp->audio_packets = avio_r8(pb);
if (!pmp->audio_packets) {
avpriv_request_sample(s, "0 audio packets");
return AVERROR_PATCHWELCOME;
}
num_packets = (pmp->num_streams - 1) * pmp->audio_packets + 1;
avio_skip(pb, 8);
pmp->current_packet = 0;
av_fast_malloc(&pmp->packet_sizes,
&pmp->packet_sizes_alloc,
num_packets * sizeof(*pmp->packet_sizes));
if (!pmp->packet_sizes_alloc) {
av_log(s, AV_LOG_ERROR, "Cannot (re)allocate packet buffer\n");
return AVERROR(ENOMEM);
}
for (i = 0; i < num_packets; i++)
pmp->packet_sizes[i] = avio_rl32(pb);
}
ret = av_get_packet(pb, pkt, pmp->packet_sizes[pmp->current_packet]);
if (ret >= 0) {
ret = 0;
// FIXME: this is a hack that should be removed once
// compute_pkt_fields() can handle timestamps properly
if (pmp->cur_stream == 0)
pkt->dts = s->streams[0]->cur_dts++;
pkt->stream_index = pmp->cur_stream;
}
if (pmp->current_packet % pmp->audio_packets == 0)
pmp->cur_stream = (pmp->cur_stream + 1) % pmp->num_streams;
pmp->current_packet++;
return ret;
}
 
static int pmp_seek(AVFormatContext *s, int stream_index, int64_t ts, int flags)
{
PMPContext *pmp = s->priv_data;
pmp->cur_stream = 0;
// fall back on default seek now
return -1;
}
 
static int pmp_close(AVFormatContext *s)
{
PMPContext *pmp = s->priv_data;
av_freep(&pmp->packet_sizes);
return 0;
}
 
AVInputFormat ff_pmp_demuxer = {
.name = "pmp",
.long_name = NULL_IF_CONFIG_SMALL("Playstation Portable PMP"),
.priv_data_size = sizeof(PMPContext),
.read_probe = pmp_probe,
.read_header = pmp_header,
.read_packet = pmp_packet,
.read_seek = pmp_seek,
.read_close = pmp_close,
};
/contrib/sdk/sources/ffmpeg/libavformat/psxstr.c
0,0 → 1,320
/*
* Sony Playstation (PSX) STR File Demuxer
* Copyright (c) 2003 The ffmpeg Project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* PSX STR file demuxer
* by Mike Melanson (melanson@pcisys.net)
* This module handles streams that have been ripped from Sony Playstation
* CD games. This demuxer can handle either raw STR files (which are just
* concatenations of raw compact disc sectors) or STR files with 0x2C-byte
* RIFF headers, followed by CD sectors.
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/internal.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
 
#define RIFF_TAG MKTAG('R', 'I', 'F', 'F')
#define CDXA_TAG MKTAG('C', 'D', 'X', 'A')
 
#define RAW_CD_SECTOR_SIZE 2352
#define RAW_CD_SECTOR_DATA_SIZE 2304
#define VIDEO_DATA_CHUNK_SIZE 0x7E0
#define VIDEO_DATA_HEADER_SIZE 0x38
#define RIFF_HEADER_SIZE 0x2C
 
#define CDXA_TYPE_MASK 0x0E
#define CDXA_TYPE_DATA 0x08
#define CDXA_TYPE_AUDIO 0x04
#define CDXA_TYPE_VIDEO 0x02
 
#define STR_MAGIC (0x80010160)
 
typedef struct StrChannel {
/* video parameters */
int video_stream_index;
AVPacket tmp_pkt;
 
/* audio parameters */
int audio_stream_index;
} StrChannel;
 
typedef struct StrDemuxContext {
 
/* a STR file can contain up to 32 channels of data */
StrChannel channels[32];
} StrDemuxContext;
 
static const uint8_t sync_header[12] = {0x00,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00};
 
static int str_probe(AVProbeData *p)
{
const uint8_t *sector= p->buf;
const uint8_t *end= sector + p->buf_size;
int aud=0, vid=0;
 
if (p->buf_size < RAW_CD_SECTOR_SIZE)
return 0;
 
if ((AV_RL32(&p->buf[0]) == RIFF_TAG) &&
(AV_RL32(&p->buf[8]) == CDXA_TAG)) {
 
/* RIFF header seen; skip 0x2C bytes */
sector += RIFF_HEADER_SIZE;
}
 
while (end - sector >= RAW_CD_SECTOR_SIZE) {
/* look for CD sync header (00, 0xFF x 10, 00) */
if (memcmp(sector,sync_header,sizeof(sync_header)))
return 0;
 
if (sector[0x11] >= 32)
return 0;
 
switch (sector[0x12] & CDXA_TYPE_MASK) {
case CDXA_TYPE_DATA:
case CDXA_TYPE_VIDEO: {
int current_sector = AV_RL16(&sector[0x1C]);
int sector_count = AV_RL16(&sector[0x1E]);
int frame_size = AV_RL32(&sector[0x24]);
 
if(!( frame_size>=0
&& current_sector < sector_count
&& sector_count*VIDEO_DATA_CHUNK_SIZE >=frame_size)){
return 0;
}
 
/*st->codec->width = AV_RL16(&sector[0x28]);
st->codec->height = AV_RL16(&sector[0x2A]);*/
 
// if (current_sector == sector_count-1) {
vid++;
// }
 
}
break;
case CDXA_TYPE_AUDIO:
if(sector[0x13]&0x2A)
return 0;
aud++;
break;
default:
if(sector[0x12] & CDXA_TYPE_MASK)
return 0;
}
sector += RAW_CD_SECTOR_SIZE;
}
/* MPEG files (like those ripped from VCDs) can also look like this;
* only return half certainty */
if(vid+aud > 3) return AVPROBE_SCORE_EXTENSION;
else if(vid+aud) return 1;
else return 0;
}
 
static int str_read_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
StrDemuxContext *str = s->priv_data;
unsigned char sector[RAW_CD_SECTOR_SIZE];
int start;
int i;
 
/* skip over any RIFF header */
if (avio_read(pb, sector, RIFF_HEADER_SIZE) != RIFF_HEADER_SIZE)
return AVERROR(EIO);
if (AV_RL32(&sector[0]) == RIFF_TAG)
start = RIFF_HEADER_SIZE;
else
start = 0;
 
avio_seek(pb, start, SEEK_SET);
 
for(i=0; i<32; i++){
str->channels[i].video_stream_index=
str->channels[i].audio_stream_index= -1;
}
 
s->ctx_flags |= AVFMTCTX_NOHEADER;
 
return 0;
}
 
static int str_read_packet(AVFormatContext *s,
AVPacket *ret_pkt)
{
AVIOContext *pb = s->pb;
StrDemuxContext *str = s->priv_data;
unsigned char sector[RAW_CD_SECTOR_SIZE];
int channel;
AVPacket *pkt;
AVStream *st;
 
while (1) {
 
if (avio_read(pb, sector, RAW_CD_SECTOR_SIZE) != RAW_CD_SECTOR_SIZE)
return AVERROR(EIO);
 
channel = sector[0x11];
if (channel >= 32)
return AVERROR_INVALIDDATA;
 
switch (sector[0x12] & CDXA_TYPE_MASK) {
 
case CDXA_TYPE_DATA:
case CDXA_TYPE_VIDEO:
{
 
int current_sector = AV_RL16(&sector[0x1C]);
int sector_count = AV_RL16(&sector[0x1E]);
int frame_size = AV_RL32(&sector[0x24]);
 
if(!( frame_size>=0
&& current_sector < sector_count
&& sector_count*VIDEO_DATA_CHUNK_SIZE >=frame_size)){
av_log(s, AV_LOG_ERROR, "Invalid parameters %d %d %d\n", current_sector, sector_count, frame_size);
break;
}
 
if(str->channels[channel].video_stream_index < 0){
/* allocate a new AVStream */
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 64, 1, 15);
 
str->channels[channel].video_stream_index = st->index;
 
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_MDEC;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->width = AV_RL16(&sector[0x28]);
st->codec->height = AV_RL16(&sector[0x2A]);
}
 
/* if this is the first sector of the frame, allocate a pkt */
pkt = &str->channels[channel].tmp_pkt;
 
if(pkt->size != sector_count*VIDEO_DATA_CHUNK_SIZE){
if(pkt->data)
av_log(s, AV_LOG_ERROR, "missmatching sector_count\n");
av_free_packet(pkt);
if (av_new_packet(pkt, sector_count*VIDEO_DATA_CHUNK_SIZE))
return AVERROR(EIO);
 
pkt->pos= avio_tell(pb) - RAW_CD_SECTOR_SIZE;
pkt->stream_index =
str->channels[channel].video_stream_index;
}
 
memcpy(pkt->data + current_sector*VIDEO_DATA_CHUNK_SIZE,
sector + VIDEO_DATA_HEADER_SIZE,
VIDEO_DATA_CHUNK_SIZE);
 
if (current_sector == sector_count-1) {
pkt->size= frame_size;
*ret_pkt = *pkt;
pkt->data= NULL;
pkt->size= -1;
pkt->buf = NULL;
#if FF_API_DESTRUCT_PACKET
FF_DISABLE_DEPRECATION_WARNINGS
pkt->destruct = NULL;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
return 0;
}
 
}
break;
 
case CDXA_TYPE_AUDIO:
if(str->channels[channel].audio_stream_index < 0){
int fmt = sector[0x13];
/* allocate a new AVStream */
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
str->channels[channel].audio_stream_index = st->index;
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_ADPCM_XA;
st->codec->codec_tag = 0; /* no fourcc */
if (fmt & 1) {
st->codec->channels = 2;
st->codec->channel_layout = AV_CH_LAYOUT_STEREO;
} else {
st->codec->channels = 1;
st->codec->channel_layout = AV_CH_LAYOUT_MONO;
}
st->codec->sample_rate = (fmt&4)?18900:37800;
// st->codec->bit_rate = 0; //FIXME;
st->codec->block_align = 128;
 
avpriv_set_pts_info(st, 64, 18 * 224 / st->codec->channels,
st->codec->sample_rate);
st->start_time = 0;
}
pkt = ret_pkt;
if (av_new_packet(pkt, 2304))
return AVERROR(EIO);
memcpy(pkt->data,sector+24,2304);
 
pkt->stream_index =
str->channels[channel].audio_stream_index;
pkt->duration = 1;
return 0;
default:
av_log(s, AV_LOG_WARNING, "Unknown sector type %02X\n", sector[0x12]);
/* drop the sector and move on */
break;
}
 
if (url_feof(pb))
return AVERROR(EIO);
}
}
 
static int str_read_close(AVFormatContext *s)
{
StrDemuxContext *str = s->priv_data;
int i;
for(i=0; i<32; i++){
if(str->channels[i].tmp_pkt.data)
av_free_packet(&str->channels[i].tmp_pkt);
}
 
return 0;
}
 
AVInputFormat ff_str_demuxer = {
.name = "psxstr",
.long_name = NULL_IF_CONFIG_SMALL("Sony Playstation STR"),
.priv_data_size = sizeof(StrDemuxContext),
.read_probe = str_probe,
.read_header = str_read_header,
.read_packet = str_read_packet,
.read_close = str_read_close,
.flags = AVFMT_NO_BYTE_SEEK,
};
/contrib/sdk/sources/ffmpeg/libavformat/pva.c
0,0 → 1,225
/*
* TechnoTrend PVA (.pva) demuxer
* Copyright (c) 2007, 2008 Ivo van Poorten
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "internal.h"
#include "mpeg.h"
 
#define PVA_MAX_PAYLOAD_LENGTH 0x17f8
#define PVA_VIDEO_PAYLOAD 0x01
#define PVA_AUDIO_PAYLOAD 0x02
#define PVA_MAGIC (('A' << 8) + 'V')
 
typedef struct {
int continue_pes;
} PVAContext;
 
static int pva_check(const uint8_t *p) {
int length = AV_RB16(p + 6);
if (AV_RB16(p) != PVA_MAGIC || !p[2] || p[2] > 2 || p[4] != 0x55 ||
(p[5] & 0xe0) || length > PVA_MAX_PAYLOAD_LENGTH)
return -1;
return length + 8;
}
 
static int pva_probe(AVProbeData * pd) {
const unsigned char *buf = pd->buf;
int len = pva_check(buf);
 
if (len < 0)
return 0;
 
if (pd->buf_size >= len + 8 &&
pva_check(buf + len) >= 0)
return AVPROBE_SCORE_EXTENSION;
 
return AVPROBE_SCORE_MAX / 4;
}
 
static int pva_read_header(AVFormatContext *s) {
AVStream *st;
 
if (!(st = avformat_new_stream(s, NULL)))
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_MPEG2VIDEO;
st->need_parsing = AVSTREAM_PARSE_FULL;
avpriv_set_pts_info(st, 32, 1, 90000);
av_add_index_entry(st, 0, 0, 0, 0, AVINDEX_KEYFRAME);
 
if (!(st = avformat_new_stream(s, NULL)))
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_MP2;
st->need_parsing = AVSTREAM_PARSE_FULL;
avpriv_set_pts_info(st, 33, 1, 90000);
av_add_index_entry(st, 0, 0, 0, 0, AVINDEX_KEYFRAME);
 
/* the parameters will be extracted from the compressed bitstream */
return 0;
}
 
#define pva_log if (read_packet) av_log
 
static int read_part_of_packet(AVFormatContext *s, int64_t *pts,
int *len, int *strid, int read_packet) {
AVIOContext *pb = s->pb;
PVAContext *pvactx = s->priv_data;
int syncword, streamid, reserved, flags, length, pts_flag;
int64_t pva_pts = AV_NOPTS_VALUE, startpos;
 
recover:
startpos = avio_tell(pb);
 
syncword = avio_rb16(pb);
streamid = avio_r8(pb);
avio_r8(pb); /* counter not used */
reserved = avio_r8(pb);
flags = avio_r8(pb);
length = avio_rb16(pb);
 
pts_flag = flags & 0x10;
 
if (syncword != PVA_MAGIC) {
pva_log(s, AV_LOG_ERROR, "invalid syncword\n");
return AVERROR(EIO);
}
if (streamid != PVA_VIDEO_PAYLOAD && streamid != PVA_AUDIO_PAYLOAD) {
pva_log(s, AV_LOG_ERROR, "invalid streamid\n");
return AVERROR(EIO);
}
if (reserved != 0x55) {
pva_log(s, AV_LOG_WARNING, "expected reserved byte to be 0x55\n");
}
if (length > PVA_MAX_PAYLOAD_LENGTH) {
pva_log(s, AV_LOG_ERROR, "invalid payload length %u\n", length);
return AVERROR(EIO);
}
 
if (streamid == PVA_VIDEO_PAYLOAD && pts_flag) {
pva_pts = avio_rb32(pb);
length -= 4;
} else if (streamid == PVA_AUDIO_PAYLOAD) {
/* PVA Audio Packets either start with a signaled PES packet or
* are a continuation of the previous PES packet. New PES packets
* always start at the beginning of a PVA Packet, never somewhere in
* the middle. */
if (!pvactx->continue_pes) {
int pes_signal, pes_header_data_length, pes_packet_length,
pes_flags;
unsigned char pes_header_data[256];
 
pes_signal = avio_rb24(pb);
avio_r8(pb);
pes_packet_length = avio_rb16(pb);
pes_flags = avio_rb16(pb);
pes_header_data_length = avio_r8(pb);
 
if (pes_signal != 1) {
pva_log(s, AV_LOG_WARNING, "expected signaled PES packet, "
"trying to recover\n");
avio_skip(pb, length - 9);
if (!read_packet)
return AVERROR(EIO);
goto recover;
}
 
avio_read(pb, pes_header_data, pes_header_data_length);
length -= 9 + pes_header_data_length;
 
pes_packet_length -= 3 + pes_header_data_length;
 
pvactx->continue_pes = pes_packet_length;
 
if (pes_flags & 0x80 && (pes_header_data[0] & 0xf0) == 0x20)
pva_pts = ff_parse_pes_pts(pes_header_data);
}
 
pvactx->continue_pes -= length;
 
if (pvactx->continue_pes < 0) {
pva_log(s, AV_LOG_WARNING, "audio data corruption\n");
pvactx->continue_pes = 0;
}
}
 
if (pva_pts != AV_NOPTS_VALUE)
av_add_index_entry(s->streams[streamid-1], startpos, pva_pts, 0, 0, AVINDEX_KEYFRAME);
 
*pts = pva_pts;
*len = length;
*strid = streamid;
return 0;
}
 
static int pva_read_packet(AVFormatContext *s, AVPacket *pkt) {
AVIOContext *pb = s->pb;
int64_t pva_pts;
int ret, length, streamid;
 
if (read_part_of_packet(s, &pva_pts, &length, &streamid, 1) < 0 ||
(ret = av_get_packet(pb, pkt, length)) <= 0)
return AVERROR(EIO);
 
pkt->stream_index = streamid - 1;
pkt->pts = pva_pts;
 
return ret;
}
 
static int64_t pva_read_timestamp(struct AVFormatContext *s, int stream_index,
int64_t *pos, int64_t pos_limit) {
AVIOContext *pb = s->pb;
PVAContext *pvactx = s->priv_data;
int length, streamid;
int64_t res = AV_NOPTS_VALUE;
 
pos_limit = FFMIN(*pos+PVA_MAX_PAYLOAD_LENGTH*8, (uint64_t)*pos+pos_limit);
 
while (*pos < pos_limit) {
res = AV_NOPTS_VALUE;
avio_seek(pb, *pos, SEEK_SET);
 
pvactx->continue_pes = 0;
if (read_part_of_packet(s, &res, &length, &streamid, 0)) {
(*pos)++;
continue;
}
if (streamid - 1 != stream_index || res == AV_NOPTS_VALUE) {
*pos = avio_tell(pb) + length;
continue;
}
break;
}
 
pvactx->continue_pes = 0;
return res;
}
 
AVInputFormat ff_pva_demuxer = {
.name = "pva",
.long_name = NULL_IF_CONFIG_SMALL("TechnoTrend PVA"),
.priv_data_size = sizeof(PVAContext),
.read_probe = pva_probe,
.read_header = pva_read_header,
.read_packet = pva_read_packet,
.read_timestamp = pva_read_timestamp,
};
/contrib/sdk/sources/ffmpeg/libavformat/pvfdec.c
0,0 → 1,75
/*
* PVF demuxer
* Copyright (c) 2012 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "internal.h"
#include "pcm.h"
 
static int pvf_probe(AVProbeData *p)
{
if (!memcmp(p->buf, "PVF1\n", 5))
return AVPROBE_SCORE_MAX;
return 0;
}
 
static int pvf_read_header(AVFormatContext *s)
{
char buffer[32];
AVStream *st;
int bps, channels, sample_rate;
 
avio_skip(s->pb, 5);
ff_get_line(s->pb, buffer, sizeof(buffer));
if (sscanf(buffer, "%d %d %d",
&channels,
&sample_rate,
&bps) != 3)
return AVERROR_INVALIDDATA;
 
if (channels <= 0 || bps <= 0 || sample_rate <= 0)
return AVERROR_INVALIDDATA;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->channels = channels;
st->codec->sample_rate = sample_rate;
st->codec->codec_id = ff_get_pcm_codec_id(bps, 0, 1, 0xFFFF);
st->codec->bits_per_coded_sample = bps;
st->codec->block_align = bps * st->codec->channels / 8;
 
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
 
return 0;
}
 
AVInputFormat ff_pvf_demuxer = {
.name = "pvf",
.long_name = NULL_IF_CONFIG_SMALL("PVF (Portable Voice Format)"),
.read_probe = pvf_probe,
.read_header = pvf_read_header,
.read_packet = ff_pcm_read_packet,
.read_seek = ff_pcm_read_seek,
.extensions = "pvf",
.flags = AVFMT_GENERIC_INDEX,
};
/contrib/sdk/sources/ffmpeg/libavformat/qcp.c
0,0 → 1,196
/*
* QCP format (.qcp) demuxer
* Copyright (c) 2009 Kenan Gillet
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* QCP format (.qcp) demuxer
* @author Kenan Gillet
* @see RFC 3625: "The QCP File Format and Media Types for Speech Data"
* http://tools.ietf.org/html/rfc3625
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
 
typedef struct {
uint32_t data_size; ///< size of data chunk
 
#define QCP_MAX_MODE 4
int16_t rates_per_mode[QCP_MAX_MODE+1]; ///< contains the packet size corresponding
///< to each mode, -1 if no size.
} QCPContext;
 
/**
* Last 15 out of 16 bytes of QCELP-13K GUID, as stored in the file;
* the first byte of the GUID can be either 0x41 or 0x42.
*/
static const uint8_t guid_qcelp_13k_part[15] = {
0x6d, 0x7f, 0x5e, 0x15, 0xb1, 0xd0, 0x11, 0xba,
0x91, 0x00, 0x80, 0x5f, 0xb4, 0xb9, 0x7e
};
 
/**
* EVRC GUID as stored in the file
*/
static const uint8_t guid_evrc[16] = {
0x8d, 0xd4, 0x89, 0xe6, 0x76, 0x90, 0xb5, 0x46,
0x91, 0xef, 0x73, 0x6a, 0x51, 0x00, 0xce, 0xb4
};
 
/**
* SMV GUID as stored in the file
*/
static const uint8_t guid_smv[16] = {
0x75, 0x2b, 0x7c, 0x8d, 0x97, 0xa7, 0x49, 0xed,
0x98, 0x5e, 0xd5, 0x3c, 0x8c, 0xc7, 0x5f, 0x84
};
 
/**
* @param guid contains at least 16 bytes
* @return 1 if the guid is a qcelp_13k guid, 0 otherwise
*/
static int is_qcelp_13k_guid(const uint8_t *guid) {
return (guid[0] == 0x41 || guid[0] == 0x42)
&& !memcmp(guid+1, guid_qcelp_13k_part, sizeof(guid_qcelp_13k_part));
}
 
static int qcp_probe(AVProbeData *pd)
{
if (AV_RL32(pd->buf ) == AV_RL32("RIFF") &&
AV_RL64(pd->buf+8) == AV_RL64("QLCMfmt "))
return AVPROBE_SCORE_MAX;
return 0;
}
 
static int qcp_read_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
QCPContext *c = s->priv_data;
AVStream *st = avformat_new_stream(s, NULL);
uint8_t buf[16];
int i, nb_rates;
 
if (!st)
return AVERROR(ENOMEM);
 
avio_rb32(pb); // "RIFF"
avio_skip(pb, 4 + 8 + 4 + 1 + 1); // filesize + "QLCMfmt " + chunk-size + major-version + minor-version
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->channels = 1;
st->codec->channel_layout = AV_CH_LAYOUT_MONO;
avio_read(pb, buf, 16);
if (is_qcelp_13k_guid(buf)) {
st->codec->codec_id = AV_CODEC_ID_QCELP;
} else if (!memcmp(buf, guid_evrc, 16)) {
st->codec->codec_id = AV_CODEC_ID_EVRC;
} else if (!memcmp(buf, guid_smv, 16)) {
st->codec->codec_id = AV_CODEC_ID_SMV;
} else {
av_log(s, AV_LOG_ERROR, "Unknown codec GUID.\n");
return AVERROR_INVALIDDATA;
}
avio_skip(pb, 2 + 80); // codec-version + codec-name
st->codec->bit_rate = avio_rl16(pb);
 
s->packet_size = avio_rl16(pb);
avio_skip(pb, 2); // block-size
st->codec->sample_rate = avio_rl16(pb);
avio_skip(pb, 2); // sample-size
 
memset(c->rates_per_mode, -1, sizeof(c->rates_per_mode));
nb_rates = avio_rl32(pb);
nb_rates = FFMIN(nb_rates, 8);
for (i=0; i<nb_rates; i++) {
int size = avio_r8(pb);
int mode = avio_r8(pb);
if (mode > QCP_MAX_MODE) {
av_log(s, AV_LOG_WARNING, "Unknown entry %d=>%d in rate-map-table.\n ", mode, size);
} else
c->rates_per_mode[mode] = size;
}
avio_skip(pb, 16 - 2*nb_rates + 20); // empty entries of rate-map-table + reserved
 
return 0;
}
 
static int qcp_read_packet(AVFormatContext *s, AVPacket *pkt)
{
AVIOContext *pb = s->pb;
QCPContext *c = s->priv_data;
unsigned int chunk_size, tag;
 
while(!url_feof(pb)) {
if (c->data_size) {
int pkt_size, ret, mode = avio_r8(pb);
 
if (s->packet_size) {
pkt_size = s->packet_size - 1;
} else if (mode > QCP_MAX_MODE || (pkt_size = c->rates_per_mode[mode]) < 0) {
c->data_size--;
continue;
}
 
if (c->data_size <= pkt_size) {
av_log(s, AV_LOG_WARNING, "Data chunk is too small.\n");
pkt_size = c->data_size - 1;
}
 
if ((ret = av_get_packet(pb, pkt, pkt_size)) >= 0) {
if (pkt_size != ret)
av_log(s, AV_LOG_ERROR, "Packet size is too small.\n");
 
c->data_size -= pkt_size + 1;
}
return ret;
}
 
if (avio_tell(pb) & 1 && avio_r8(pb))
av_log(s, AV_LOG_WARNING, "Padding should be 0.\n");
 
tag = avio_rl32(pb);
chunk_size = avio_rl32(pb);
switch (tag) {
case MKTAG('v', 'r', 'a', 't'):
if (avio_rl32(pb)) // var-rate-flag
s->packet_size = 0;
avio_skip(pb, 4); // size-in-packets
break;
case MKTAG('d', 'a', 't', 'a'):
c->data_size = chunk_size;
break;
 
default:
avio_skip(pb, chunk_size);
}
}
return AVERROR_EOF;
}
 
AVInputFormat ff_qcp_demuxer = {
.name = "qcp",
.long_name = NULL_IF_CONFIG_SMALL("QCP"),
.priv_data_size = sizeof(QCPContext),
.read_probe = qcp_probe,
.read_header = qcp_read_header,
.read_packet = qcp_read_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/qtpalette.h
0,0 → 1,313
/*
* Default Palettes for Quicktime Files
* Automatically generated from a utility derived from XAnim:
* http://xanim.va.pubnix.com/home.html
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_QTPALETTE_H
#define AVFORMAT_QTPALETTE_H
 
#include <inttypes.h>
 
static const uint8_t ff_qt_default_palette_4[4 * 3] = {
0x93, 0x65, 0x5E,
0xFF, 0xFF, 0xFF,
0xDF, 0xD0, 0xAB,
0x00, 0x00, 0x00
};
 
static const uint8_t ff_qt_default_palette_16[16 * 3] = {
0xFF, 0xFB, 0xFF,
0xEF, 0xD9, 0xBB,
0xE8, 0xC9, 0xB1,
0x93, 0x65, 0x5E,
0xFC, 0xDE, 0xE8,
0x9D, 0x88, 0x91,
0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF,
0x47, 0x48, 0x37,
0x7A, 0x5E, 0x55,
0xDF, 0xD0, 0xAB,
0xFF, 0xFB, 0xF9,
0xE8, 0xCA, 0xC5,
0x8A, 0x7C, 0x77,
0x00, 0x00, 0x00
};
 
static const uint8_t ff_qt_default_palette_256[256 * 3] = {
/* 0, 0x00 */ 0xFF, 0xFF, 0xFF,
/* 1, 0x01 */ 0xFF, 0xFF, 0xCC,
/* 2, 0x02 */ 0xFF, 0xFF, 0x99,
/* 3, 0x03 */ 0xFF, 0xFF, 0x66,
/* 4, 0x04 */ 0xFF, 0xFF, 0x33,
/* 5, 0x05 */ 0xFF, 0xFF, 0x00,
/* 6, 0x06 */ 0xFF, 0xCC, 0xFF,
/* 7, 0x07 */ 0xFF, 0xCC, 0xCC,
/* 8, 0x08 */ 0xFF, 0xCC, 0x99,
/* 9, 0x09 */ 0xFF, 0xCC, 0x66,
/* 10, 0x0A */ 0xFF, 0xCC, 0x33,
/* 11, 0x0B */ 0xFF, 0xCC, 0x00,
/* 12, 0x0C */ 0xFF, 0x99, 0xFF,
/* 13, 0x0D */ 0xFF, 0x99, 0xCC,
/* 14, 0x0E */ 0xFF, 0x99, 0x99,
/* 15, 0x0F */ 0xFF, 0x99, 0x66,
/* 16, 0x10 */ 0xFF, 0x99, 0x33,
/* 17, 0x11 */ 0xFF, 0x99, 0x00,
/* 18, 0x12 */ 0xFF, 0x66, 0xFF,
/* 19, 0x13 */ 0xFF, 0x66, 0xCC,
/* 20, 0x14 */ 0xFF, 0x66, 0x99,
/* 21, 0x15 */ 0xFF, 0x66, 0x66,
/* 22, 0x16 */ 0xFF, 0x66, 0x33,
/* 23, 0x17 */ 0xFF, 0x66, 0x00,
/* 24, 0x18 */ 0xFF, 0x33, 0xFF,
/* 25, 0x19 */ 0xFF, 0x33, 0xCC,
/* 26, 0x1A */ 0xFF, 0x33, 0x99,
/* 27, 0x1B */ 0xFF, 0x33, 0x66,
/* 28, 0x1C */ 0xFF, 0x33, 0x33,
/* 29, 0x1D */ 0xFF, 0x33, 0x00,
/* 30, 0x1E */ 0xFF, 0x00, 0xFF,
/* 31, 0x1F */ 0xFF, 0x00, 0xCC,
/* 32, 0x20 */ 0xFF, 0x00, 0x99,
/* 33, 0x21 */ 0xFF, 0x00, 0x66,
/* 34, 0x22 */ 0xFF, 0x00, 0x33,
/* 35, 0x23 */ 0xFF, 0x00, 0x00,
/* 36, 0x24 */ 0xCC, 0xFF, 0xFF,
/* 37, 0x25 */ 0xCC, 0xFF, 0xCC,
/* 38, 0x26 */ 0xCC, 0xFF, 0x99,
/* 39, 0x27 */ 0xCC, 0xFF, 0x66,
/* 40, 0x28 */ 0xCC, 0xFF, 0x33,
/* 41, 0x29 */ 0xCC, 0xFF, 0x00,
/* 42, 0x2A */ 0xCC, 0xCC, 0xFF,
/* 43, 0x2B */ 0xCC, 0xCC, 0xCC,
/* 44, 0x2C */ 0xCC, 0xCC, 0x99,
/* 45, 0x2D */ 0xCC, 0xCC, 0x66,
/* 46, 0x2E */ 0xCC, 0xCC, 0x33,
/* 47, 0x2F */ 0xCC, 0xCC, 0x00,
/* 48, 0x30 */ 0xCC, 0x99, 0xFF,
/* 49, 0x31 */ 0xCC, 0x99, 0xCC,
/* 50, 0x32 */ 0xCC, 0x99, 0x99,
/* 51, 0x33 */ 0xCC, 0x99, 0x66,
/* 52, 0x34 */ 0xCC, 0x99, 0x33,
/* 53, 0x35 */ 0xCC, 0x99, 0x00,
/* 54, 0x36 */ 0xCC, 0x66, 0xFF,
/* 55, 0x37 */ 0xCC, 0x66, 0xCC,
/* 56, 0x38 */ 0xCC, 0x66, 0x99,
/* 57, 0x39 */ 0xCC, 0x66, 0x66,
/* 58, 0x3A */ 0xCC, 0x66, 0x33,
/* 59, 0x3B */ 0xCC, 0x66, 0x00,
/* 60, 0x3C */ 0xCC, 0x33, 0xFF,
/* 61, 0x3D */ 0xCC, 0x33, 0xCC,
/* 62, 0x3E */ 0xCC, 0x33, 0x99,
/* 63, 0x3F */ 0xCC, 0x33, 0x66,
/* 64, 0x40 */ 0xCC, 0x33, 0x33,
/* 65, 0x41 */ 0xCC, 0x33, 0x00,
/* 66, 0x42 */ 0xCC, 0x00, 0xFF,
/* 67, 0x43 */ 0xCC, 0x00, 0xCC,
/* 68, 0x44 */ 0xCC, 0x00, 0x99,
/* 69, 0x45 */ 0xCC, 0x00, 0x66,
/* 70, 0x46 */ 0xCC, 0x00, 0x33,
/* 71, 0x47 */ 0xCC, 0x00, 0x00,
/* 72, 0x48 */ 0x99, 0xFF, 0xFF,
/* 73, 0x49 */ 0x99, 0xFF, 0xCC,
/* 74, 0x4A */ 0x99, 0xFF, 0x99,
/* 75, 0x4B */ 0x99, 0xFF, 0x66,
/* 76, 0x4C */ 0x99, 0xFF, 0x33,
/* 77, 0x4D */ 0x99, 0xFF, 0x00,
/* 78, 0x4E */ 0x99, 0xCC, 0xFF,
/* 79, 0x4F */ 0x99, 0xCC, 0xCC,
/* 80, 0x50 */ 0x99, 0xCC, 0x99,
/* 81, 0x51 */ 0x99, 0xCC, 0x66,
/* 82, 0x52 */ 0x99, 0xCC, 0x33,
/* 83, 0x53 */ 0x99, 0xCC, 0x00,
/* 84, 0x54 */ 0x99, 0x99, 0xFF,
/* 85, 0x55 */ 0x99, 0x99, 0xCC,
/* 86, 0x56 */ 0x99, 0x99, 0x99,
/* 87, 0x57 */ 0x99, 0x99, 0x66,
/* 88, 0x58 */ 0x99, 0x99, 0x33,
/* 89, 0x59 */ 0x99, 0x99, 0x00,
/* 90, 0x5A */ 0x99, 0x66, 0xFF,
/* 91, 0x5B */ 0x99, 0x66, 0xCC,
/* 92, 0x5C */ 0x99, 0x66, 0x99,
/* 93, 0x5D */ 0x99, 0x66, 0x66,
/* 94, 0x5E */ 0x99, 0x66, 0x33,
/* 95, 0x5F */ 0x99, 0x66, 0x00,
/* 96, 0x60 */ 0x99, 0x33, 0xFF,
/* 97, 0x61 */ 0x99, 0x33, 0xCC,
/* 98, 0x62 */ 0x99, 0x33, 0x99,
/* 99, 0x63 */ 0x99, 0x33, 0x66,
/* 100, 0x64 */ 0x99, 0x33, 0x33,
/* 101, 0x65 */ 0x99, 0x33, 0x00,
/* 102, 0x66 */ 0x99, 0x00, 0xFF,
/* 103, 0x67 */ 0x99, 0x00, 0xCC,
/* 104, 0x68 */ 0x99, 0x00, 0x99,
/* 105, 0x69 */ 0x99, 0x00, 0x66,
/* 106, 0x6A */ 0x99, 0x00, 0x33,
/* 107, 0x6B */ 0x99, 0x00, 0x00,
/* 108, 0x6C */ 0x66, 0xFF, 0xFF,
/* 109, 0x6D */ 0x66, 0xFF, 0xCC,
/* 110, 0x6E */ 0x66, 0xFF, 0x99,
/* 111, 0x6F */ 0x66, 0xFF, 0x66,
/* 112, 0x70 */ 0x66, 0xFF, 0x33,
/* 113, 0x71 */ 0x66, 0xFF, 0x00,
/* 114, 0x72 */ 0x66, 0xCC, 0xFF,
/* 115, 0x73 */ 0x66, 0xCC, 0xCC,
/* 116, 0x74 */ 0x66, 0xCC, 0x99,
/* 117, 0x75 */ 0x66, 0xCC, 0x66,
/* 118, 0x76 */ 0x66, 0xCC, 0x33,
/* 119, 0x77 */ 0x66, 0xCC, 0x00,
/* 120, 0x78 */ 0x66, 0x99, 0xFF,
/* 121, 0x79 */ 0x66, 0x99, 0xCC,
/* 122, 0x7A */ 0x66, 0x99, 0x99,
/* 123, 0x7B */ 0x66, 0x99, 0x66,
/* 124, 0x7C */ 0x66, 0x99, 0x33,
/* 125, 0x7D */ 0x66, 0x99, 0x00,
/* 126, 0x7E */ 0x66, 0x66, 0xFF,
/* 127, 0x7F */ 0x66, 0x66, 0xCC,
/* 128, 0x80 */ 0x66, 0x66, 0x99,
/* 129, 0x81 */ 0x66, 0x66, 0x66,
/* 130, 0x82 */ 0x66, 0x66, 0x33,
/* 131, 0x83 */ 0x66, 0x66, 0x00,
/* 132, 0x84 */ 0x66, 0x33, 0xFF,
/* 133, 0x85 */ 0x66, 0x33, 0xCC,
/* 134, 0x86 */ 0x66, 0x33, 0x99,
/* 135, 0x87 */ 0x66, 0x33, 0x66,
/* 136, 0x88 */ 0x66, 0x33, 0x33,
/* 137, 0x89 */ 0x66, 0x33, 0x00,
/* 138, 0x8A */ 0x66, 0x00, 0xFF,
/* 139, 0x8B */ 0x66, 0x00, 0xCC,
/* 140, 0x8C */ 0x66, 0x00, 0x99,
/* 141, 0x8D */ 0x66, 0x00, 0x66,
/* 142, 0x8E */ 0x66, 0x00, 0x33,
/* 143, 0x8F */ 0x66, 0x00, 0x00,
/* 144, 0x90 */ 0x33, 0xFF, 0xFF,
/* 145, 0x91 */ 0x33, 0xFF, 0xCC,
/* 146, 0x92 */ 0x33, 0xFF, 0x99,
/* 147, 0x93 */ 0x33, 0xFF, 0x66,
/* 148, 0x94 */ 0x33, 0xFF, 0x33,
/* 149, 0x95 */ 0x33, 0xFF, 0x00,
/* 150, 0x96 */ 0x33, 0xCC, 0xFF,
/* 151, 0x97 */ 0x33, 0xCC, 0xCC,
/* 152, 0x98 */ 0x33, 0xCC, 0x99,
/* 153, 0x99 */ 0x33, 0xCC, 0x66,
/* 154, 0x9A */ 0x33, 0xCC, 0x33,
/* 155, 0x9B */ 0x33, 0xCC, 0x00,
/* 156, 0x9C */ 0x33, 0x99, 0xFF,
/* 157, 0x9D */ 0x33, 0x99, 0xCC,
/* 158, 0x9E */ 0x33, 0x99, 0x99,
/* 159, 0x9F */ 0x33, 0x99, 0x66,
/* 160, 0xA0 */ 0x33, 0x99, 0x33,
/* 161, 0xA1 */ 0x33, 0x99, 0x00,
/* 162, 0xA2 */ 0x33, 0x66, 0xFF,
/* 163, 0xA3 */ 0x33, 0x66, 0xCC,
/* 164, 0xA4 */ 0x33, 0x66, 0x99,
/* 165, 0xA5 */ 0x33, 0x66, 0x66,
/* 166, 0xA6 */ 0x33, 0x66, 0x33,
/* 167, 0xA7 */ 0x33, 0x66, 0x00,
/* 168, 0xA8 */ 0x33, 0x33, 0xFF,
/* 169, 0xA9 */ 0x33, 0x33, 0xCC,
/* 170, 0xAA */ 0x33, 0x33, 0x99,
/* 171, 0xAB */ 0x33, 0x33, 0x66,
/* 172, 0xAC */ 0x33, 0x33, 0x33,
/* 173, 0xAD */ 0x33, 0x33, 0x00,
/* 174, 0xAE */ 0x33, 0x00, 0xFF,
/* 175, 0xAF */ 0x33, 0x00, 0xCC,
/* 176, 0xB0 */ 0x33, 0x00, 0x99,
/* 177, 0xB1 */ 0x33, 0x00, 0x66,
/* 178, 0xB2 */ 0x33, 0x00, 0x33,
/* 179, 0xB3 */ 0x33, 0x00, 0x00,
/* 180, 0xB4 */ 0x00, 0xFF, 0xFF,
/* 181, 0xB5 */ 0x00, 0xFF, 0xCC,
/* 182, 0xB6 */ 0x00, 0xFF, 0x99,
/* 183, 0xB7 */ 0x00, 0xFF, 0x66,
/* 184, 0xB8 */ 0x00, 0xFF, 0x33,
/* 185, 0xB9 */ 0x00, 0xFF, 0x00,
/* 186, 0xBA */ 0x00, 0xCC, 0xFF,
/* 187, 0xBB */ 0x00, 0xCC, 0xCC,
/* 188, 0xBC */ 0x00, 0xCC, 0x99,
/* 189, 0xBD */ 0x00, 0xCC, 0x66,
/* 190, 0xBE */ 0x00, 0xCC, 0x33,
/* 191, 0xBF */ 0x00, 0xCC, 0x00,
/* 192, 0xC0 */ 0x00, 0x99, 0xFF,
/* 193, 0xC1 */ 0x00, 0x99, 0xCC,
/* 194, 0xC2 */ 0x00, 0x99, 0x99,
/* 195, 0xC3 */ 0x00, 0x99, 0x66,
/* 196, 0xC4 */ 0x00, 0x99, 0x33,
/* 197, 0xC5 */ 0x00, 0x99, 0x00,
/* 198, 0xC6 */ 0x00, 0x66, 0xFF,
/* 199, 0xC7 */ 0x00, 0x66, 0xCC,
/* 200, 0xC8 */ 0x00, 0x66, 0x99,
/* 201, 0xC9 */ 0x00, 0x66, 0x66,
/* 202, 0xCA */ 0x00, 0x66, 0x33,
/* 203, 0xCB */ 0x00, 0x66, 0x00,
/* 204, 0xCC */ 0x00, 0x33, 0xFF,
/* 205, 0xCD */ 0x00, 0x33, 0xCC,
/* 206, 0xCE */ 0x00, 0x33, 0x99,
/* 207, 0xCF */ 0x00, 0x33, 0x66,
/* 208, 0xD0 */ 0x00, 0x33, 0x33,
/* 209, 0xD1 */ 0x00, 0x33, 0x00,
/* 210, 0xD2 */ 0x00, 0x00, 0xFF,
/* 211, 0xD3 */ 0x00, 0x00, 0xCC,
/* 212, 0xD4 */ 0x00, 0x00, 0x99,
/* 213, 0xD5 */ 0x00, 0x00, 0x66,
/* 214, 0xD6 */ 0x00, 0x00, 0x33,
/* 215, 0xD7 */ 0xEE, 0x00, 0x00,
/* 216, 0xD8 */ 0xDD, 0x00, 0x00,
/* 217, 0xD9 */ 0xBB, 0x00, 0x00,
/* 218, 0xDA */ 0xAA, 0x00, 0x00,
/* 219, 0xDB */ 0x88, 0x00, 0x00,
/* 220, 0xDC */ 0x77, 0x00, 0x00,
/* 221, 0xDD */ 0x55, 0x00, 0x00,
/* 222, 0xDE */ 0x44, 0x00, 0x00,
/* 223, 0xDF */ 0x22, 0x00, 0x00,
/* 224, 0xE0 */ 0x11, 0x00, 0x00,
/* 225, 0xE1 */ 0x00, 0xEE, 0x00,
/* 226, 0xE2 */ 0x00, 0xDD, 0x00,
/* 227, 0xE3 */ 0x00, 0xBB, 0x00,
/* 228, 0xE4 */ 0x00, 0xAA, 0x00,
/* 229, 0xE5 */ 0x00, 0x88, 0x00,
/* 230, 0xE6 */ 0x00, 0x77, 0x00,
/* 231, 0xE7 */ 0x00, 0x55, 0x00,
/* 232, 0xE8 */ 0x00, 0x44, 0x00,
/* 233, 0xE9 */ 0x00, 0x22, 0x00,
/* 234, 0xEA */ 0x00, 0x11, 0x00,
/* 235, 0xEB */ 0x00, 0x00, 0xEE,
/* 236, 0xEC */ 0x00, 0x00, 0xDD,
/* 237, 0xED */ 0x00, 0x00, 0xBB,
/* 238, 0xEE */ 0x00, 0x00, 0xAA,
/* 239, 0xEF */ 0x00, 0x00, 0x88,
/* 240, 0xF0 */ 0x00, 0x00, 0x77,
/* 241, 0xF1 */ 0x00, 0x00, 0x55,
/* 242, 0xF2 */ 0x00, 0x00, 0x44,
/* 243, 0xF3 */ 0x00, 0x00, 0x22,
/* 244, 0xF4 */ 0x00, 0x00, 0x11,
/* 245, 0xF5 */ 0xEE, 0xEE, 0xEE,
/* 246, 0xF6 */ 0xDD, 0xDD, 0xDD,
/* 247, 0xF7 */ 0xBB, 0xBB, 0xBB,
/* 248, 0xF8 */ 0xAA, 0xAA, 0xAA,
/* 249, 0xF9 */ 0x88, 0x88, 0x88,
/* 250, 0xFA */ 0x77, 0x77, 0x77,
/* 251, 0xFB */ 0x55, 0x55, 0x55,
/* 252, 0xFC */ 0x44, 0x44, 0x44,
/* 253, 0xFD */ 0x22, 0x22, 0x22,
/* 254, 0xFE */ 0x11, 0x11, 0x11,
/* 255, 0xFF */ 0x00, 0x00, 0x00
};
 
#endif /* AVFORMAT_QTPALETTE_H */
/contrib/sdk/sources/ffmpeg/libavformat/r3d.c
0,0 → 1,408
/*
* R3D REDCODE demuxer
* Copyright (c) 2008 Baptiste Coudurier <baptiste dot coudurier at gmail dot com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intreadwrite.h"
#include "libavutil/dict.h"
#include "libavutil/mathematics.h"
#include "avformat.h"
#include "internal.h"
 
typedef struct {
unsigned video_offsets_count;
unsigned *video_offsets;
unsigned rdvo_offset;
} R3DContext;
 
typedef struct {
unsigned size;
uint32_t tag;
uint64_t offset;
} Atom;
 
static int read_atom(AVFormatContext *s, Atom *atom)
{
atom->offset = avio_tell(s->pb);
atom->size = avio_rb32(s->pb);
if (atom->size < 8)
return -1;
atom->tag = avio_rl32(s->pb);
av_dlog(s, "atom %u %.4s offset %#"PRIx64"\n",
atom->size, (char*)&atom->tag, atom->offset);
return atom->size;
}
 
static int r3d_read_red1(AVFormatContext *s)
{
AVStream *st = avformat_new_stream(s, NULL);
char filename[258];
int tmp;
int av_unused tmp2;
AVRational framerate;
 
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_JPEG2000;
 
tmp = avio_r8(s->pb); // major version
tmp2 = avio_r8(s->pb); // minor version
av_dlog(s, "version %d.%d\n", tmp, tmp2);
 
tmp = avio_rb16(s->pb); // unknown
av_dlog(s, "unknown1 %d\n", tmp);
 
tmp = avio_rb32(s->pb);
avpriv_set_pts_info(st, 32, 1, tmp);
 
tmp = avio_rb32(s->pb); // filenum
av_dlog(s, "filenum %d\n", tmp);
 
avio_skip(s->pb, 32); // unknown
 
st->codec->width = avio_rb32(s->pb);
st->codec->height = avio_rb32(s->pb);
 
tmp = avio_rb16(s->pb); // unknown
av_dlog(s, "unknown2 %d\n", tmp);
 
framerate.num = avio_rb16(s->pb);
framerate.den = avio_rb16(s->pb);
if (framerate.num > 0 && framerate.den > 0) {
#if FF_API_R_FRAME_RATE
st->r_frame_rate =
#endif
st->avg_frame_rate = framerate;
}
 
tmp = avio_r8(s->pb); // audio channels
av_dlog(s, "audio channels %d\n", tmp);
if (tmp > 0) {
AVStream *ast = avformat_new_stream(s, NULL);
if (!ast)
return AVERROR(ENOMEM);
ast->codec->codec_type = AVMEDIA_TYPE_AUDIO;
ast->codec->codec_id = AV_CODEC_ID_PCM_S32BE;
ast->codec->channels = tmp;
avpriv_set_pts_info(ast, 32, 1, st->time_base.den);
}
 
avio_read(s->pb, filename, 257);
filename[sizeof(filename)-1] = 0;
av_dict_set(&st->metadata, "filename", filename, 0);
 
av_dlog(s, "filename %s\n", filename);
av_dlog(s, "resolution %dx%d\n", st->codec->width, st->codec->height);
av_dlog(s, "timescale %d\n", st->time_base.den);
av_dlog(s, "frame rate %d/%d\n",
framerate.num, framerate.den);
 
return 0;
}
 
static int r3d_read_rdvo(AVFormatContext *s, Atom *atom)
{
R3DContext *r3d = s->priv_data;
AVStream *st = s->streams[0];
int i;
 
r3d->video_offsets_count = (atom->size - 8) / 4;
r3d->video_offsets = av_malloc(atom->size);
if (!r3d->video_offsets)
return AVERROR(ENOMEM);
 
for (i = 0; i < r3d->video_offsets_count; i++) {
r3d->video_offsets[i] = avio_rb32(s->pb);
if (!r3d->video_offsets[i]) {
r3d->video_offsets_count = i;
break;
}
av_dlog(s, "video offset %d: %#x\n", i, r3d->video_offsets[i]);
}
 
if (st->avg_frame_rate.num)
st->duration = av_rescale_q(r3d->video_offsets_count,
av_inv_q(st->avg_frame_rate),
st->time_base);
av_dlog(s, "duration %"PRId64"\n", st->duration);
 
return 0;
}
 
static void r3d_read_reos(AVFormatContext *s)
{
R3DContext *r3d = s->priv_data;
int av_unused tmp;
 
r3d->rdvo_offset = avio_rb32(s->pb);
avio_rb32(s->pb); // rdvs offset
avio_rb32(s->pb); // rdao offset
avio_rb32(s->pb); // rdas offset
 
tmp = avio_rb32(s->pb);
av_dlog(s, "num video chunks %d\n", tmp);
 
tmp = avio_rb32(s->pb);
av_dlog(s, "num audio chunks %d\n", tmp);
 
avio_skip(s->pb, 6*4);
}
 
static int r3d_read_header(AVFormatContext *s)
{
R3DContext *r3d = s->priv_data;
Atom atom;
int ret;
 
if (read_atom(s, &atom) < 0) {
av_log(s, AV_LOG_ERROR, "error reading atom\n");
return -1;
}
if (atom.tag == MKTAG('R','E','D','1')) {
if ((ret = r3d_read_red1(s)) < 0) {
av_log(s, AV_LOG_ERROR, "error parsing 'red1' atom\n");
return ret;
}
} else {
av_log(s, AV_LOG_ERROR, "could not find 'red1' atom\n");
return -1;
}
 
s->data_offset = avio_tell(s->pb);
av_dlog(s, "data offset %#"PRIx64"\n", s->data_offset);
if (!s->pb->seekable)
return 0;
// find REOB/REOF/REOS to load index
avio_seek(s->pb, avio_size(s->pb)-48-8, SEEK_SET);
if (read_atom(s, &atom) < 0)
av_log(s, AV_LOG_ERROR, "error reading end atom\n");
 
if (atom.tag != MKTAG('R','E','O','B') &&
atom.tag != MKTAG('R','E','O','F') &&
atom.tag != MKTAG('R','E','O','S'))
goto out;
 
r3d_read_reos(s);
 
if (r3d->rdvo_offset) {
avio_seek(s->pb, r3d->rdvo_offset, SEEK_SET);
if (read_atom(s, &atom) < 0)
av_log(s, AV_LOG_ERROR, "error reading 'rdvo' atom\n");
if (atom.tag == MKTAG('R','D','V','O')) {
if (r3d_read_rdvo(s, &atom) < 0)
av_log(s, AV_LOG_ERROR, "error parsing 'rdvo' atom\n");
}
}
 
out:
avio_seek(s->pb, s->data_offset, SEEK_SET);
return 0;
}
 
static int r3d_read_redv(AVFormatContext *s, AVPacket *pkt, Atom *atom)
{
AVStream *st = s->streams[0];
int tmp;
int av_unused tmp2;
uint64_t pos = avio_tell(s->pb);
unsigned dts;
int ret;
 
dts = avio_rb32(s->pb);
 
tmp = avio_rb32(s->pb);
av_dlog(s, "frame num %d\n", tmp);
 
tmp = avio_r8(s->pb); // major version
tmp2 = avio_r8(s->pb); // minor version
av_dlog(s, "version %d.%d\n", tmp, tmp2);
 
tmp = avio_rb16(s->pb); // unknown
av_dlog(s, "unknown %d\n", tmp);
 
if (tmp > 4) {
tmp = avio_rb16(s->pb); // unknown
av_dlog(s, "unknown %d\n", tmp);
 
tmp = avio_rb16(s->pb); // unknown
av_dlog(s, "unknown %d\n", tmp);
 
tmp = avio_rb32(s->pb);
av_dlog(s, "width %d\n", tmp);
tmp = avio_rb32(s->pb);
av_dlog(s, "height %d\n", tmp);
 
tmp = avio_rb32(s->pb);
av_dlog(s, "metadata len %d\n", tmp);
}
tmp = atom->size - 8 - (avio_tell(s->pb) - pos);
if (tmp < 0)
return -1;
ret = av_get_packet(s->pb, pkt, tmp);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "error reading video packet\n");
return -1;
}
 
pkt->stream_index = 0;
pkt->dts = dts;
if (st->avg_frame_rate.num)
pkt->duration = (uint64_t)st->time_base.den*
st->avg_frame_rate.den/st->avg_frame_rate.num;
av_dlog(s, "pkt dts %"PRId64" duration %d\n", pkt->dts, pkt->duration);
 
return 0;
}
 
static int r3d_read_reda(AVFormatContext *s, AVPacket *pkt, Atom *atom)
{
AVStream *st = s->streams[1];
int av_unused tmp, tmp2;
int samples, size;
uint64_t pos = avio_tell(s->pb);
unsigned dts;
int ret;
 
dts = avio_rb32(s->pb);
 
st->codec->sample_rate = avio_rb32(s->pb);
if (st->codec->sample_rate <= 0) {
av_log(s, AV_LOG_ERROR, "Bad sample rate\n");
return AVERROR_INVALIDDATA;
}
 
samples = avio_rb32(s->pb);
 
tmp = avio_rb32(s->pb);
av_dlog(s, "packet num %d\n", tmp);
 
tmp = avio_rb16(s->pb); // unknown
av_dlog(s, "unknown %d\n", tmp);
 
tmp = avio_r8(s->pb); // major version
tmp2 = avio_r8(s->pb); // minor version
av_dlog(s, "version %d.%d\n", tmp, tmp2);
 
tmp = avio_rb32(s->pb); // unknown
av_dlog(s, "unknown %d\n", tmp);
 
size = atom->size - 8 - (avio_tell(s->pb) - pos);
if (size < 0)
return -1;
ret = av_get_packet(s->pb, pkt, size);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "error reading audio packet\n");
return ret;
}
 
pkt->stream_index = 1;
pkt->dts = dts;
if (st->codec->sample_rate)
pkt->duration = av_rescale(samples, st->time_base.den, st->codec->sample_rate);
av_dlog(s, "pkt dts %"PRId64" duration %d samples %d sample rate %d\n",
pkt->dts, pkt->duration, samples, st->codec->sample_rate);
 
return 0;
}
 
static int r3d_read_packet(AVFormatContext *s, AVPacket *pkt)
{
Atom atom;
int err = 0;
 
while (!err) {
if (read_atom(s, &atom) < 0) {
err = -1;
break;
}
switch (atom.tag) {
case MKTAG('R','E','D','V'):
if (s->streams[0]->discard == AVDISCARD_ALL)
goto skip;
if (!(err = r3d_read_redv(s, pkt, &atom)))
return 0;
break;
case MKTAG('R','E','D','A'):
if (s->nb_streams < 2)
return -1;
if (s->streams[1]->discard == AVDISCARD_ALL)
goto skip;
if (!(err = r3d_read_reda(s, pkt, &atom)))
return 0;
break;
default:
skip:
avio_skip(s->pb, atom.size-8);
}
}
return err;
}
 
static int r3d_probe(AVProbeData *p)
{
if (AV_RL32(p->buf + 4) == MKTAG('R','E','D','1'))
return AVPROBE_SCORE_MAX;
return 0;
}
 
static int r3d_seek(AVFormatContext *s, int stream_index, int64_t sample_time, int flags)
{
AVStream *st = s->streams[0]; // video stream
R3DContext *r3d = s->priv_data;
int frame_num;
 
if (!st->avg_frame_rate.num)
return -1;
 
frame_num = av_rescale_q(sample_time, st->time_base,
av_inv_q(st->avg_frame_rate));
av_dlog(s, "seek frame num %d timestamp %"PRId64"\n",
frame_num, sample_time);
 
if (frame_num < r3d->video_offsets_count) {
if (avio_seek(s->pb, r3d->video_offsets_count, SEEK_SET) < 0)
return -1;
} else {
av_log(s, AV_LOG_ERROR, "could not seek to frame %d\n", frame_num);
return -1;
}
 
return 0;
}
 
static int r3d_close(AVFormatContext *s)
{
R3DContext *r3d = s->priv_data;
 
av_freep(&r3d->video_offsets);
 
return 0;
}
 
AVInputFormat ff_r3d_demuxer = {
.name = "r3d",
.long_name = NULL_IF_CONFIG_SMALL("REDCODE R3D"),
.priv_data_size = sizeof(R3DContext),
.read_probe = r3d_probe,
.read_header = r3d_read_header,
.read_packet = r3d_read_packet,
.read_close = r3d_close,
.read_seek = r3d_seek,
};
/contrib/sdk/sources/ffmpeg/libavformat/rawdec.c
0,0 → 1,177
/*
* RAW demuxers
* Copyright (c) 2001 Fabrice Bellard
* Copyright (c) 2005 Alex Beregszaszi
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "internal.h"
#include "avio_internal.h"
#include "rawdec.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
#include "libavutil/avassert.h"
 
#define RAW_PACKET_SIZE 1024
 
int ff_raw_read_partial_packet(AVFormatContext *s, AVPacket *pkt)
{
int ret, size;
 
size = RAW_PACKET_SIZE;
 
if (av_new_packet(pkt, size) < 0)
return AVERROR(ENOMEM);
 
pkt->pos= avio_tell(s->pb);
pkt->stream_index = 0;
ret = ffio_read_partial(s->pb, pkt->data, size);
if (ret < 0) {
av_free_packet(pkt);
return ret;
}
av_shrink_packet(pkt, ret);
return ret;
}
 
int ff_raw_audio_read_header(AVFormatContext *s)
{
AVStream *st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = s->iformat->raw_codec_id;
st->need_parsing = AVSTREAM_PARSE_FULL_RAW;
st->start_time = 0;
/* the parameters will be extracted from the compressed bitstream */
 
return 0;
}
 
/* MPEG-1/H.263 input */
int ff_raw_video_read_header(AVFormatContext *s)
{
AVStream *st;
FFRawVideoDemuxerContext *s1 = s->priv_data;
int ret = 0;
 
 
st = avformat_new_stream(s, NULL);
if (!st) {
ret = AVERROR(ENOMEM);
goto fail;
}
 
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = s->iformat->raw_codec_id;
st->need_parsing = AVSTREAM_PARSE_FULL_RAW;
 
st->codec->time_base = av_inv_q(s1->framerate);
avpriv_set_pts_info(st, 64, 1, 1200000);
 
fail:
return ret;
}
 
static int ff_raw_data_read_header(AVFormatContext *s)
{
AVStream *st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_DATA;
st->codec->codec_id = s->iformat->raw_codec_id;
st->start_time = 0;
return 0;
}
 
/* Note: Do not forget to add new entries to the Makefile as well. */
 
#define OFFSET(x) offsetof(FFRawVideoDemuxerContext, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
const AVOption ff_rawvideo_options[] = {
{ "framerate", "", OFFSET(framerate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, DEC},
{ NULL },
};
 
#if CONFIG_DATA_DEMUXER
AVInputFormat ff_data_demuxer = {
.name = "data",
.long_name = NULL_IF_CONFIG_SMALL("raw data"),
.read_header = ff_raw_data_read_header,
.read_packet = ff_raw_read_partial_packet,
.raw_codec_id = AV_CODEC_ID_NONE,
};
#endif
 
#if CONFIG_LATM_DEMUXER
AVInputFormat ff_latm_demuxer = {
.name = "latm",
.long_name = NULL_IF_CONFIG_SMALL("raw LOAS/LATM"),
.read_header = ff_raw_audio_read_header,
.read_packet = ff_raw_read_partial_packet,
.flags = AVFMT_GENERIC_INDEX,
.extensions = "latm",
.raw_codec_id = AV_CODEC_ID_AAC_LATM,
};
#endif
 
#if CONFIG_MJPEG_DEMUXER
FF_DEF_RAWVIDEO_DEMUXER(mjpeg, "raw MJPEG video", NULL, "mjpg,mjpeg,mpo", AV_CODEC_ID_MJPEG)
#endif
 
#if CONFIG_MLP_DEMUXER
AVInputFormat ff_mlp_demuxer = {
.name = "mlp",
.long_name = NULL_IF_CONFIG_SMALL("raw MLP"),
.read_header = ff_raw_audio_read_header,
.read_packet = ff_raw_read_partial_packet,
.flags = AVFMT_GENERIC_INDEX,
.extensions = "mlp",
.raw_codec_id = AV_CODEC_ID_MLP,
};
#endif
 
#if CONFIG_TRUEHD_DEMUXER
AVInputFormat ff_truehd_demuxer = {
.name = "truehd",
.long_name = NULL_IF_CONFIG_SMALL("raw TrueHD"),
.read_header = ff_raw_audio_read_header,
.read_packet = ff_raw_read_partial_packet,
.flags = AVFMT_GENERIC_INDEX,
.extensions = "thd",
.raw_codec_id = AV_CODEC_ID_TRUEHD,
};
#endif
 
#if CONFIG_SHORTEN_DEMUXER
AVInputFormat ff_shorten_demuxer = {
.name = "shn",
.long_name = NULL_IF_CONFIG_SMALL("raw Shorten"),
.read_header = ff_raw_audio_read_header,
.read_packet = ff_raw_read_partial_packet,
.flags = AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH | AVFMT_NO_BYTE_SEEK,
.extensions = "shn",
.raw_codec_id = AV_CODEC_ID_SHORTEN,
};
#endif
 
#if CONFIG_VC1_DEMUXER
FF_DEF_RAWVIDEO_DEMUXER(vc1, "raw VC-1", NULL, "vc1", AV_CODEC_ID_VC1)
#endif
/contrib/sdk/sources/ffmpeg/libavformat/rawdec.h
0,0 → 1,67
/*
* RAW demuxers
* Copyright (C) 2007 Aurelien Jacobs <aurel@gnuage.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_RAWDEC_H
#define AVFORMAT_RAWDEC_H
 
#include "avformat.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
 
typedef struct FFRawVideoDemuxerContext {
const AVClass *class; /**< Class for private options. */
char *video_size; /**< String describing video size, set by a private option. */
char *pixel_format; /**< Set by a private option. */
AVRational framerate; /**< AVRational describing framerate, set by a private option. */
} FFRawVideoDemuxerContext;
 
extern const AVOption ff_rawvideo_options[];
 
int ff_raw_read_partial_packet(AVFormatContext *s, AVPacket *pkt);
 
int ff_raw_audio_read_header(AVFormatContext *s);
 
int ff_raw_video_read_header(AVFormatContext *s);
 
#define FF_RAWVIDEO_DEMUXER_CLASS(name)\
static const AVClass name ## _demuxer_class = {\
.class_name = #name " demuxer",\
.item_name = av_default_item_name,\
.option = ff_rawvideo_options,\
.version = LIBAVUTIL_VERSION_INT,\
};
 
#define FF_DEF_RAWVIDEO_DEMUXER(shortname, longname, probe, ext, id)\
FF_RAWVIDEO_DEMUXER_CLASS(shortname)\
AVInputFormat ff_ ## shortname ## _demuxer = {\
.name = #shortname,\
.long_name = NULL_IF_CONFIG_SMALL(longname),\
.read_probe = probe,\
.read_header = ff_raw_video_read_header,\
.read_packet = ff_raw_read_partial_packet,\
.extensions = ext,\
.flags = AVFMT_GENERIC_INDEX,\
.raw_codec_id = id,\
.priv_data_size = sizeof(FFRawVideoDemuxerContext),\
.priv_class = &shortname ## _demuxer_class,\
};
 
#endif /* AVFORMAT_RAWDEC_H */
/contrib/sdk/sources/ffmpeg/libavformat/rawenc.c
0,0 → 1,290
/*
* RAW muxers
* Copyright (c) 2001 Fabrice Bellard
* Copyright (c) 2005 Alex Beregszaszi
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "rawenc.h"
 
int ff_raw_write_packet(AVFormatContext *s, AVPacket *pkt)
{
avio_write(s->pb, pkt->data, pkt->size);
return 0;
}
 
/* Note: Do not forget to add new entries to the Makefile as well. */
 
#if CONFIG_AC3_MUXER
AVOutputFormat ff_ac3_muxer = {
.name = "ac3",
.long_name = NULL_IF_CONFIG_SMALL("raw AC-3"),
.mime_type = "audio/x-ac3",
.extensions = "ac3",
.audio_codec = AV_CODEC_ID_AC3,
.video_codec = AV_CODEC_ID_NONE,
.write_packet = ff_raw_write_packet,
.flags = AVFMT_NOTIMESTAMPS,
};
#endif
 
#if CONFIG_ADX_MUXER
AVOutputFormat ff_adx_muxer = {
.name = "adx",
.long_name = NULL_IF_CONFIG_SMALL("CRI ADX"),
.extensions = "adx",
.audio_codec = AV_CODEC_ID_ADPCM_ADX,
.video_codec = AV_CODEC_ID_NONE,
.write_packet = ff_raw_write_packet,
.flags = AVFMT_NOTIMESTAMPS,
};
#endif
 
#if CONFIG_CAVSVIDEO_MUXER
AVOutputFormat ff_cavsvideo_muxer = {
.name = "cavsvideo",
.long_name = NULL_IF_CONFIG_SMALL("raw Chinese AVS (Audio Video Standard) video"),
.extensions = "cavs",
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_CAVS,
.write_packet = ff_raw_write_packet,
.flags = AVFMT_NOTIMESTAMPS,
};
#endif
 
#if CONFIG_DATA_MUXER
AVOutputFormat ff_data_muxer = {
.name = "data",
.long_name = NULL_IF_CONFIG_SMALL("raw data"),
.write_packet = ff_raw_write_packet,
.flags = AVFMT_NOTIMESTAMPS,
};
#endif
 
#if CONFIG_DIRAC_MUXER
AVOutputFormat ff_dirac_muxer = {
.name = "dirac",
.long_name = NULL_IF_CONFIG_SMALL("raw Dirac"),
.extensions = "drc",
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_DIRAC,
.write_packet = ff_raw_write_packet,
.flags = AVFMT_NOTIMESTAMPS,
};
#endif
 
#if CONFIG_DNXHD_MUXER
AVOutputFormat ff_dnxhd_muxer = {
.name = "dnxhd",
.long_name = NULL_IF_CONFIG_SMALL("raw DNxHD (SMPTE VC-3)"),
.extensions = "dnxhd",
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_DNXHD,
.write_packet = ff_raw_write_packet,
.flags = AVFMT_NOTIMESTAMPS,
};
#endif
 
#if CONFIG_DTS_MUXER
AVOutputFormat ff_dts_muxer = {
.name = "dts",
.long_name = NULL_IF_CONFIG_SMALL("raw DTS"),
.mime_type = "audio/x-dca",
.extensions = "dts",
.audio_codec = AV_CODEC_ID_DTS,
.video_codec = AV_CODEC_ID_NONE,
.write_packet = ff_raw_write_packet,
.flags = AVFMT_NOTIMESTAMPS,
};
#endif
 
#if CONFIG_EAC3_MUXER
AVOutputFormat ff_eac3_muxer = {
.name = "eac3",
.long_name = NULL_IF_CONFIG_SMALL("raw E-AC-3"),
.mime_type = "audio/x-eac3",
.extensions = "eac3",
.audio_codec = AV_CODEC_ID_EAC3,
.video_codec = AV_CODEC_ID_NONE,
.write_packet = ff_raw_write_packet,
.flags = AVFMT_NOTIMESTAMPS,
};
#endif
 
#if CONFIG_G722_MUXER
AVOutputFormat ff_g722_muxer = {
.name = "g722",
.long_name = NULL_IF_CONFIG_SMALL("raw G.722"),
.mime_type = "audio/G722",
.extensions = "g722",
.audio_codec = AV_CODEC_ID_ADPCM_G722,
.video_codec = AV_CODEC_ID_NONE,
.write_packet = ff_raw_write_packet,
.flags = AVFMT_NOTIMESTAMPS,
};
#endif
 
#if CONFIG_G723_1_MUXER
AVOutputFormat ff_g723_1_muxer = {
.name = "g723_1",
.long_name = NULL_IF_CONFIG_SMALL("raw G.723.1"),
.mime_type = "audio/g723",
.extensions = "tco,rco",
.audio_codec = AV_CODEC_ID_G723_1,
.video_codec = AV_CODEC_ID_NONE,
.write_packet = ff_raw_write_packet,
.flags = AVFMT_NOTIMESTAMPS,
};
#endif
 
#if CONFIG_H261_MUXER
AVOutputFormat ff_h261_muxer = {
.name = "h261",
.long_name = NULL_IF_CONFIG_SMALL("raw H.261"),
.mime_type = "video/x-h261",
.extensions = "h261",
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_H261,
.write_packet = ff_raw_write_packet,
.flags = AVFMT_NOTIMESTAMPS,
};
#endif
 
#if CONFIG_H263_MUXER
AVOutputFormat ff_h263_muxer = {
.name = "h263",
.long_name = NULL_IF_CONFIG_SMALL("raw H.263"),
.mime_type = "video/x-h263",
.extensions = "h263",
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_H263,
.write_packet = ff_raw_write_packet,
.flags = AVFMT_NOTIMESTAMPS,
};
#endif
 
#if CONFIG_H264_MUXER
AVOutputFormat ff_h264_muxer = {
.name = "h264",
.long_name = NULL_IF_CONFIG_SMALL("raw H.264 video"),
.extensions = "h264",
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_H264,
.write_packet = ff_raw_write_packet,
.flags = AVFMT_NOTIMESTAMPS,
};
#endif
 
#if CONFIG_M4V_MUXER
AVOutputFormat ff_m4v_muxer = {
.name = "m4v",
.long_name = NULL_IF_CONFIG_SMALL("raw MPEG-4 video"),
.extensions = "m4v",
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_MPEG4,
.write_packet = ff_raw_write_packet,
.flags = AVFMT_NOTIMESTAMPS,
};
#endif
 
#if CONFIG_MJPEG_MUXER
AVOutputFormat ff_mjpeg_muxer = {
.name = "mjpeg",
.long_name = NULL_IF_CONFIG_SMALL("raw MJPEG video"),
.mime_type = "video/x-mjpeg",
.extensions = "mjpg,mjpeg",
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_MJPEG,
.write_packet = ff_raw_write_packet,
.flags = AVFMT_NOTIMESTAMPS,
};
#endif
 
#if CONFIG_MLP_MUXER
AVOutputFormat ff_mlp_muxer = {
.name = "mlp",
.long_name = NULL_IF_CONFIG_SMALL("raw MLP"),
.extensions = "mlp",
.audio_codec = AV_CODEC_ID_MLP,
.video_codec = AV_CODEC_ID_NONE,
.write_packet = ff_raw_write_packet,
.flags = AVFMT_NOTIMESTAMPS,
};
#endif
 
#if CONFIG_MPEG1VIDEO_MUXER
AVOutputFormat ff_mpeg1video_muxer = {
.name = "mpeg1video",
.long_name = NULL_IF_CONFIG_SMALL("raw MPEG-1 video"),
.mime_type = "video/x-mpeg",
.extensions = "mpg,mpeg,m1v",
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_MPEG1VIDEO,
.write_packet = ff_raw_write_packet,
.flags = AVFMT_NOTIMESTAMPS,
};
#endif
 
#if CONFIG_MPEG2VIDEO_MUXER
AVOutputFormat ff_mpeg2video_muxer = {
.name = "mpeg2video",
.long_name = NULL_IF_CONFIG_SMALL("raw MPEG-2 video"),
.extensions = "m2v",
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_MPEG2VIDEO,
.write_packet = ff_raw_write_packet,
.flags = AVFMT_NOTIMESTAMPS,
};
#endif
 
#if CONFIG_RAWVIDEO_MUXER
AVOutputFormat ff_rawvideo_muxer = {
.name = "rawvideo",
.long_name = NULL_IF_CONFIG_SMALL("raw video"),
.extensions = "yuv,rgb",
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_RAWVIDEO,
.write_packet = ff_raw_write_packet,
.flags = AVFMT_NOTIMESTAMPS,
};
#endif
 
#if CONFIG_TRUEHD_MUXER
AVOutputFormat ff_truehd_muxer = {
.name = "truehd",
.long_name = NULL_IF_CONFIG_SMALL("raw TrueHD"),
.extensions = "thd",
.audio_codec = AV_CODEC_ID_TRUEHD,
.video_codec = AV_CODEC_ID_NONE,
.write_packet = ff_raw_write_packet,
.flags = AVFMT_NOTIMESTAMPS,
};
#endif
 
#if CONFIG_VC1_MUXER
AVOutputFormat ff_vc1_muxer = {
.name = "vc1",
.long_name = NULL_IF_CONFIG_SMALL("raw VC-1 video"),
.extensions = "vc1",
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_VC1,
.write_packet = ff_raw_write_packet,
.flags = AVFMT_NOTIMESTAMPS,
};
#endif
/contrib/sdk/sources/ffmpeg/libavformat/rawenc.h
0,0 → 1,29
/*
* RAW muxers
* Copyright (C) 2007 Aurelien Jacobs <aurel@gnuage.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_RAWENC_H
#define AVFORMAT_RAWENC_H
 
#include "avformat.h"
 
int ff_raw_write_packet(AVFormatContext *s, AVPacket *pkt);
 
#endif /* AVFORMAT_RAWENC_H */
/contrib/sdk/sources/ffmpeg/libavformat/rawvideodec.c
0,0 → 1,115
/*
* RAW video demuxer
* Copyright (c) 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
#include "libavutil/opt.h"
#include "internal.h"
#include "avformat.h"
 
typedef struct RawVideoDemuxerContext {
const AVClass *class; /**< Class for private options. */
int width, height; /**< Integers describing video size, set by a private option. */
char *pixel_format; /**< Set by a private option. */
AVRational framerate; /**< AVRational describing framerate, set by a private option. */
} RawVideoDemuxerContext;
 
 
static int rawvideo_read_header(AVFormatContext *ctx)
{
RawVideoDemuxerContext *s = ctx->priv_data;
enum AVPixelFormat pix_fmt;
AVStream *st;
 
st = avformat_new_stream(ctx, NULL);
if (!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
 
st->codec->codec_id = ctx->iformat->raw_codec_id;
 
if ((pix_fmt = av_get_pix_fmt(s->pixel_format)) == AV_PIX_FMT_NONE) {
av_log(ctx, AV_LOG_ERROR, "No such pixel format: %s.\n",
s->pixel_format);
return AVERROR(EINVAL);
}
 
avpriv_set_pts_info(st, 64, s->framerate.den, s->framerate.num);
 
st->codec->width = s->width;
st->codec->height = s->height;
st->codec->pix_fmt = pix_fmt;
st->codec->bit_rate = av_rescale_q(avpicture_get_size(st->codec->pix_fmt, s->width, s->height),
(AVRational){8,1}, st->time_base);
 
return 0;
}
 
 
static int rawvideo_read_packet(AVFormatContext *s, AVPacket *pkt)
{
int packet_size, ret, width, height;
AVStream *st = s->streams[0];
 
width = st->codec->width;
height = st->codec->height;
 
packet_size = avpicture_get_size(st->codec->pix_fmt, width, height);
if (packet_size < 0)
return -1;
 
ret = av_get_packet(s->pb, pkt, packet_size);
pkt->pts = pkt->dts = pkt->pos / packet_size;
 
pkt->stream_index = 0;
if (ret < 0)
return ret;
return 0;
}
 
#define OFFSET(x) offsetof(RawVideoDemuxerContext, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption rawvideo_options[] = {
{ "video_size", "set frame size", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, DEC },
{ "pixel_format", "set pixel format", OFFSET(pixel_format), AV_OPT_TYPE_STRING, {.str = "yuv420p"}, 0, 0, DEC },
{ "framerate", "set frame rate", OFFSET(framerate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, DEC },
{ NULL },
};
 
static const AVClass rawvideo_demuxer_class = {
.class_name = "rawvideo demuxer",
.item_name = av_default_item_name,
.option = rawvideo_options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVInputFormat ff_rawvideo_demuxer = {
.name = "rawvideo",
.long_name = NULL_IF_CONFIG_SMALL("raw video"),
.priv_data_size = sizeof(RawVideoDemuxerContext),
.read_header = rawvideo_read_header,
.read_packet = rawvideo_read_packet,
.flags = AVFMT_GENERIC_INDEX,
.extensions = "yuv,cif,qcif,rgb",
.raw_codec_id = AV_CODEC_ID_RAWVIDEO,
.priv_class = &rawvideo_demuxer_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/rdt.c
0,0 → 1,573
/*
* Realmedia RTSP protocol (RDT) support.
* Copyright (c) 2007 Ronald S. Bultje
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* @brief Realmedia RTSP protocol (RDT) support
* @author Ronald S. Bultje <rbultje@ronald.bitfreak.net>
*/
 
#include "avformat.h"
#include "libavutil/avstring.h"
#include "rtpdec.h"
#include "rdt.h"
#include "libavutil/base64.h"
#include "libavutil/md5.h"
#include "rm.h"
#include "internal.h"
#include "avio_internal.h"
#include "libavcodec/get_bits.h"
 
struct RDTDemuxContext {
AVFormatContext *ic; /**< the containing (RTSP) demux context */
/** Each RDT stream-set (represented by one RTSPStream) can contain
* multiple streams (of the same content, but with possibly different
* codecs/bitrates). Each such stream is represented by one AVStream
* in the AVFormatContext, and this variable points to the offset in
* that array such that the first is the first stream of this set. */
AVStream **streams;
int n_streams; /**< streams with identifical content in this set */
void *dynamic_protocol_context;
DynamicPayloadPacketHandlerProc parse_packet;
uint32_t prev_timestamp;
int prev_set_id, prev_stream_id;
};
 
RDTDemuxContext *
ff_rdt_parse_open(AVFormatContext *ic, int first_stream_of_set_idx,
void *priv_data, RTPDynamicProtocolHandler *handler)
{
RDTDemuxContext *s = av_mallocz(sizeof(RDTDemuxContext));
if (!s)
return NULL;
 
s->ic = ic;
s->streams = &ic->streams[first_stream_of_set_idx];
do {
s->n_streams++;
} while (first_stream_of_set_idx + s->n_streams < ic->nb_streams &&
s->streams[s->n_streams]->id == s->streams[0]->id);
s->prev_set_id = -1;
s->prev_stream_id = -1;
s->prev_timestamp = -1;
s->parse_packet = handler ? handler->parse_packet : NULL;
s->dynamic_protocol_context = priv_data;
 
return s;
}
 
void
ff_rdt_parse_close(RDTDemuxContext *s)
{
av_free(s);
}
 
struct PayloadContext {
AVFormatContext *rmctx;
int nb_rmst;
RMStream **rmst;
uint8_t *mlti_data;
unsigned int mlti_data_size;
char buffer[RTP_MAX_PACKET_LENGTH + FF_INPUT_BUFFER_PADDING_SIZE];
int audio_pkt_cnt; /**< remaining audio packets in rmdec */
};
 
void
ff_rdt_calc_response_and_checksum(char response[41], char chksum[9],
const char *challenge)
{
int ch_len = strlen (challenge), i;
unsigned char zres[16],
buf[64] = { 0xa1, 0xe9, 0x14, 0x9d, 0x0e, 0x6b, 0x3b, 0x59 };
#define XOR_TABLE_SIZE 37
static const unsigned char xor_table[XOR_TABLE_SIZE] = {
0x05, 0x18, 0x74, 0xd0, 0x0d, 0x09, 0x02, 0x53,
0xc0, 0x01, 0x05, 0x05, 0x67, 0x03, 0x19, 0x70,
0x08, 0x27, 0x66, 0x10, 0x10, 0x72, 0x08, 0x09,
0x63, 0x11, 0x03, 0x71, 0x08, 0x08, 0x70, 0x02,
0x10, 0x57, 0x05, 0x18, 0x54 };
 
/* some (length) checks */
if (ch_len == 40) /* what a hack... */
ch_len = 32;
else if (ch_len > 56)
ch_len = 56;
memcpy(buf + 8, challenge, ch_len);
 
/* xor challenge bytewise with xor_table */
for (i = 0; i < XOR_TABLE_SIZE; i++)
buf[8 + i] ^= xor_table[i];
 
av_md5_sum(zres, buf, 64);
ff_data_to_hex(response, zres, 16, 1);
 
/* add tail */
strcpy (response + 32, "01d0a8e3");
 
/* calculate checksum */
for (i = 0; i < 8; i++)
chksum[i] = response[i * 4];
chksum[8] = 0;
}
 
static int
rdt_load_mdpr (PayloadContext *rdt, AVStream *st, int rule_nr)
{
AVIOContext pb;
int size;
uint32_t tag;
 
/**
* Layout of the MLTI chunk:
* 4: MLTI
* 2: number of streams
* Then for each stream ([number_of_streams] times):
* 2: mdpr index
* 2: number of mdpr chunks
* Then for each mdpr chunk ([number_of_mdpr_chunks] times):
* 4: size
* [size]: data
* we skip MDPR chunks until we reach the one of the stream
* we're interested in, and forward that ([size]+[data]) to
* the RM demuxer to parse the stream-specific header data.
*/
if (!rdt->mlti_data)
return -1;
ffio_init_context(&pb, rdt->mlti_data, rdt->mlti_data_size, 0,
NULL, NULL, NULL, NULL);
tag = avio_rl32(&pb);
if (tag == MKTAG('M', 'L', 'T', 'I')) {
int num, chunk_nr;
 
/* read index of MDPR chunk numbers */
num = avio_rb16(&pb);
if (rule_nr < 0 || rule_nr >= num)
return -1;
avio_skip(&pb, rule_nr * 2);
chunk_nr = avio_rb16(&pb);
avio_skip(&pb, (num - 1 - rule_nr) * 2);
 
/* read MDPR chunks */
num = avio_rb16(&pb);
if (chunk_nr >= num)
return -1;
while (chunk_nr--)
avio_skip(&pb, avio_rb32(&pb));
size = avio_rb32(&pb);
} else {
size = rdt->mlti_data_size;
avio_seek(&pb, 0, SEEK_SET);
}
if (ff_rm_read_mdpr_codecdata(rdt->rmctx, &pb, st, rdt->rmst[st->index], size, NULL) < 0)
return -1;
 
return 0;
}
 
/**
* Actual data handling.
*/
 
int
ff_rdt_parse_header(const uint8_t *buf, int len,
int *pset_id, int *pseq_no, int *pstream_id,
int *pis_keyframe, uint32_t *ptimestamp)
{
GetBitContext gb;
int consumed = 0, set_id, seq_no, stream_id, is_keyframe,
len_included, need_reliable;
uint32_t timestamp;
 
/* skip status packets */
while (len >= 5 && buf[1] == 0xFF /* status packet */) {
int pkt_len;
 
if (!(buf[0] & 0x80))
return -1; /* not followed by a data packet */
 
pkt_len = AV_RB16(buf+3);
buf += pkt_len;
len -= pkt_len;
consumed += pkt_len;
}
if (len < 16)
return -1;
/**
* Layout of the header (in bits):
* 1: len_included
* Flag indicating whether this header includes a length field;
* this can be used to concatenate multiple RDT packets in a
* single UDP/TCP data frame and is used to precede RDT data
* by stream status packets
* 1: need_reliable
* Flag indicating whether this header includes a "reliable
* sequence number"; these are apparently sequence numbers of
* data packets alone. For data packets, this flag is always
* set, according to the Real documentation [1]
* 5: set_id
* ID of a set of streams of identical content, possibly with
* different codecs or bitrates
* 1: is_reliable
* Flag set for certain streams deemed less tolerable for packet
* loss
* 16: seq_no
* Packet sequence number; if >=0xFF00, this is a non-data packet
* containing stream status info, the second byte indicates the
* type of status packet (see wireshark docs / source code [2])
* if (len_included) {
* 16: packet_len
* } else {
* packet_len = remainder of UDP/TCP frame
* }
* 1: is_back_to_back
* Back-to-Back flag; used for timing, set for one in every 10
* packets, according to the Real documentation [1]
* 1: is_slow_data
* Slow-data flag; currently unused, according to Real docs [1]
* 5: stream_id
* ID of the stream within this particular set of streams
* 1: is_no_keyframe
* Non-keyframe flag (unset if packet belongs to a keyframe)
* 32: timestamp (PTS)
* if (set_id == 0x1F) {
* 16: set_id (extended set-of-streams ID; see set_id)
* }
* if (need_reliable) {
* 16: reliable_seq_no
* Reliable sequence number (see need_reliable)
* }
* if (stream_id == 0x3F) {
* 16: stream_id (extended stream ID; see stream_id)
* }
* [1] https://protocol.helixcommunity.org/files/2005/devdocs/RDT_Feature_Level_20.txt
* [2] http://www.wireshark.org/docs/dfref/r/rdt.html and
* http://anonsvn.wireshark.org/viewvc/trunk/epan/dissectors/packet-rdt.c
*/
init_get_bits(&gb, buf, len << 3);
len_included = get_bits1(&gb);
need_reliable = get_bits1(&gb);
set_id = get_bits(&gb, 5);
skip_bits(&gb, 1);
seq_no = get_bits(&gb, 16);
if (len_included)
skip_bits(&gb, 16);
skip_bits(&gb, 2);
stream_id = get_bits(&gb, 5);
is_keyframe = !get_bits1(&gb);
timestamp = get_bits_long(&gb, 32);
if (set_id == 0x1f)
set_id = get_bits(&gb, 16);
if (need_reliable)
skip_bits(&gb, 16);
if (stream_id == 0x1f)
stream_id = get_bits(&gb, 16);
 
if (pset_id) *pset_id = set_id;
if (pseq_no) *pseq_no = seq_no;
if (pstream_id) *pstream_id = stream_id;
if (pis_keyframe) *pis_keyframe = is_keyframe;
if (ptimestamp) *ptimestamp = timestamp;
 
return consumed + (get_bits_count(&gb) >> 3);
}
 
/**< return 0 on packet, no more left, 1 on packet, 1 on partial packet... */
static int
rdt_parse_packet (AVFormatContext *ctx, PayloadContext *rdt, AVStream *st,
AVPacket *pkt, uint32_t *timestamp,
const uint8_t *buf, int len, uint16_t rtp_seq, int flags)
{
int seq = 1, res;
AVIOContext pb;
 
if (rdt->audio_pkt_cnt == 0) {
int pos;
 
ffio_init_context(&pb, buf, len, 0, NULL, NULL, NULL, NULL);
flags = (flags & RTP_FLAG_KEY) ? 2 : 0;
res = ff_rm_parse_packet (rdt->rmctx, &pb, st, rdt->rmst[st->index], len, pkt,
&seq, flags, *timestamp);
pos = avio_tell(&pb);
if (res < 0)
return res;
if (res > 0) {
if (st->codec->codec_id == AV_CODEC_ID_AAC) {
memcpy (rdt->buffer, buf + pos, len - pos);
rdt->rmctx->pb = avio_alloc_context (rdt->buffer, len - pos, 0,
NULL, NULL, NULL, NULL);
}
goto get_cache;
}
} else {
get_cache:
rdt->audio_pkt_cnt =
ff_rm_retrieve_cache (rdt->rmctx, rdt->rmctx->pb,
st, rdt->rmst[st->index], pkt);
if (rdt->audio_pkt_cnt == 0 &&
st->codec->codec_id == AV_CODEC_ID_AAC)
av_freep(&rdt->rmctx->pb);
}
pkt->stream_index = st->index;
pkt->pts = *timestamp;
 
return rdt->audio_pkt_cnt > 0;
}
 
int
ff_rdt_parse_packet(RDTDemuxContext *s, AVPacket *pkt,
uint8_t **bufptr, int len)
{
uint8_t *buf = bufptr ? *bufptr : NULL;
int seq_no, flags = 0, stream_id, set_id, is_keyframe;
uint32_t timestamp;
int rv= 0;
 
if (!s->parse_packet)
return -1;
 
if (!buf && s->prev_stream_id != -1) {
/* return the next packets, if any */
timestamp= 0; ///< Should not be used if buf is NULL, but should be set to the timestamp of the packet returned....
rv= s->parse_packet(s->ic, s->dynamic_protocol_context,
s->streams[s->prev_stream_id],
pkt, &timestamp, NULL, 0, 0, flags);
return rv;
}
 
if (len < 12)
return -1;
rv = ff_rdt_parse_header(buf, len, &set_id, &seq_no, &stream_id, &is_keyframe, &timestamp);
if (rv < 0)
return rv;
if (is_keyframe &&
(set_id != s->prev_set_id || timestamp != s->prev_timestamp ||
stream_id != s->prev_stream_id)) {
flags |= RTP_FLAG_KEY;
s->prev_set_id = set_id;
s->prev_timestamp = timestamp;
}
s->prev_stream_id = stream_id;
buf += rv;
len -= rv;
 
if (s->prev_stream_id >= s->n_streams) {
s->prev_stream_id = -1;
return -1;
}
 
rv = s->parse_packet(s->ic, s->dynamic_protocol_context,
s->streams[s->prev_stream_id],
pkt, &timestamp, buf, len, 0, flags);
 
return rv;
}
 
void
ff_rdt_subscribe_rule (char *cmd, int size,
int stream_nr, int rule_nr)
{
av_strlcatf(cmd, size, "stream=%d;rule=%d,stream=%d;rule=%d",
stream_nr, rule_nr * 2, stream_nr, rule_nr * 2 + 1);
}
 
static unsigned char *
rdt_parse_b64buf (unsigned int *target_len, const char *p)
{
unsigned char *target;
int len = strlen(p);
if (*p == '\"') {
p++;
len -= 2; /* skip embracing " at start/end */
}
*target_len = len * 3 / 4;
target = av_mallocz(*target_len + FF_INPUT_BUFFER_PADDING_SIZE);
av_base64_decode(target, p, *target_len);
return target;
}
 
static int
rdt_parse_sdp_line (AVFormatContext *s, int st_index,
PayloadContext *rdt, const char *line)
{
AVStream *stream = s->streams[st_index];
const char *p = line;
 
if (av_strstart(p, "OpaqueData:buffer;", &p)) {
rdt->mlti_data = rdt_parse_b64buf(&rdt->mlti_data_size, p);
} else if (av_strstart(p, "StartTime:integer;", &p))
stream->first_dts = atoi(p);
else if (av_strstart(p, "ASMRuleBook:string;", &p)) {
int n, first = -1;
 
for (n = 0; n < s->nb_streams; n++)
if (s->streams[n]->id == stream->id) {
int count = s->streams[n]->index + 1, err;
if (first == -1) first = n;
if (rdt->nb_rmst < count) {
if ((err = av_reallocp(&rdt->rmst,
count * sizeof(*rdt->rmst))) < 0) {
rdt->nb_rmst = 0;
return err;
}
memset(rdt->rmst + rdt->nb_rmst, 0,
(count - rdt->nb_rmst) * sizeof(*rdt->rmst));
rdt->nb_rmst = count;
}
rdt->rmst[s->streams[n]->index] = ff_rm_alloc_rmstream();
rdt_load_mdpr(rdt, s->streams[n], (n - first) * 2);
}
}
 
return 0;
}
 
static void
real_parse_asm_rule(AVStream *st, const char *p, const char *end)
{
do {
/* can be either averagebandwidth= or AverageBandwidth= */
if (sscanf(p, " %*1[Aa]verage%*1[Bb]andwidth=%d", &st->codec->bit_rate) == 1)
break;
if (!(p = strchr(p, ',')) || p > end)
p = end;
p++;
} while (p < end);
}
 
static AVStream *
add_dstream(AVFormatContext *s, AVStream *orig_st)
{
AVStream *st;
 
if (!(st = avformat_new_stream(s, NULL)))
return NULL;
st->id = orig_st->id;
st->codec->codec_type = orig_st->codec->codec_type;
st->first_dts = orig_st->first_dts;
 
return st;
}
 
static void
real_parse_asm_rulebook(AVFormatContext *s, AVStream *orig_st,
const char *p)
{
const char *end;
int n_rules = 0, odd = 0;
AVStream *st;
 
/**
* The ASMRuleBook contains a list of comma-separated strings per rule,
* and each rule is separated by a ;. The last one also has a ; at the
* end so we can use it as delimiter.
* Every rule occurs twice, once for when the RTSP packet header marker
* is set and once for if it isn't. We only read the first because we
* don't care much (that's what the "odd" variable is for).
* Each rule contains a set of one or more statements, optionally
* preceded by a single condition. If there's a condition, the rule
* starts with a '#'. Multiple conditions are merged between brackets,
* so there are never multiple conditions spread out over separate
* statements. Generally, these conditions are bitrate limits (min/max)
* for multi-bitrate streams.
*/
if (*p == '\"') p++;
while (1) {
if (!(end = strchr(p, ';')))
break;
if (!odd && end != p) {
if (n_rules > 0)
st = add_dstream(s, orig_st);
else
st = orig_st;
if (!st)
break;
real_parse_asm_rule(st, p, end);
n_rules++;
}
p = end + 1;
odd ^= 1;
}
}
 
void
ff_real_parse_sdp_a_line (AVFormatContext *s, int stream_index,
const char *line)
{
const char *p = line;
 
if (av_strstart(p, "ASMRuleBook:string;", &p))
real_parse_asm_rulebook(s, s->streams[stream_index], p);
}
 
static PayloadContext *
rdt_new_context (void)
{
PayloadContext *rdt = av_mallocz(sizeof(PayloadContext));
 
int ret = avformat_open_input(&rdt->rmctx, "", &ff_rdt_demuxer, NULL);
if (ret < 0) {
av_free(rdt);
return NULL;
}
 
return rdt;
}
 
static void
rdt_free_context (PayloadContext *rdt)
{
int i;
 
for (i = 0; i < rdt->nb_rmst; i++)
if (rdt->rmst[i]) {
ff_rm_free_rmstream(rdt->rmst[i]);
av_freep(&rdt->rmst[i]);
}
if (rdt->rmctx)
avformat_close_input(&rdt->rmctx);
av_freep(&rdt->mlti_data);
av_freep(&rdt->rmst);
av_free(rdt);
}
 
#define RDT_HANDLER(n, s, t) \
static RTPDynamicProtocolHandler rdt_ ## n ## _handler = { \
.enc_name = s, \
.codec_type = t, \
.codec_id = AV_CODEC_ID_NONE, \
.parse_sdp_a_line = rdt_parse_sdp_line, \
.alloc = rdt_new_context, \
.free = rdt_free_context, \
.parse_packet = rdt_parse_packet \
}
 
RDT_HANDLER(live_video, "x-pn-multirate-realvideo-live", AVMEDIA_TYPE_VIDEO);
RDT_HANDLER(live_audio, "x-pn-multirate-realaudio-live", AVMEDIA_TYPE_AUDIO);
RDT_HANDLER(video, "x-pn-realvideo", AVMEDIA_TYPE_VIDEO);
RDT_HANDLER(audio, "x-pn-realaudio", AVMEDIA_TYPE_AUDIO);
 
void av_register_rdt_dynamic_payload_handlers(void)
{
ff_register_dynamic_payload_handler(&rdt_video_handler);
ff_register_dynamic_payload_handler(&rdt_audio_handler);
ff_register_dynamic_payload_handler(&rdt_live_video_handler);
ff_register_dynamic_payload_handler(&rdt_live_audio_handler);
}
/contrib/sdk/sources/ffmpeg/libavformat/rdt.h
0,0 → 1,112
/*
* Realmedia RTSP (RDT) definitions
* Copyright (c) 2007 Ronald S. Bultje <rbultje@ronald.bitfreak.net>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_RDT_H
#define AVFORMAT_RDT_H
 
#include <stdint.h>
#include "avformat.h"
#include "rtpdec.h"
 
typedef struct RDTDemuxContext RDTDemuxContext;
 
/**
* Allocate and init the RDT parsing context.
* @param ic the containing RTSP demuxer context
* @param first_stream_of_set_idx index to the first AVStream in the RTSP
* demuxer context's ic->streams array that is part of this
* particular stream's set of streams (with identical content)
* @param priv_data private data of the payload data handler context
* @param handler pointer to the parse_packet() payload parsing function
* @return a newly allocated RDTDemuxContext. Free with ff_rdt_parse_close().
*/
RDTDemuxContext *ff_rdt_parse_open(AVFormatContext *ic,
int first_stream_of_set_idx,
void *priv_data,
RTPDynamicProtocolHandler *handler);
void ff_rdt_parse_close(RDTDemuxContext *s);
 
/**
* Calculate the response (RealChallenge2 in the RTSP header) to the
* challenge (RealChallenge1 in the RTSP header from the Real/Helix
* server), which is used as some sort of client validation.
*
* @param response pointer to response buffer, it should be at least 41 bytes
* (40 data + 1 zero) bytes long.
* @param chksum pointer to buffer containing a checksum of the response,
* it should be at least 9 (8 data + 1 zero) bytes long.
* @param challenge pointer to the RealChallenge1 value provided by the
* server.
*/
void ff_rdt_calc_response_and_checksum(char response[41], char chksum[9],
const char *challenge);
 
/**
* Register RDT-related dynamic payload handlers with our cache.
*/
void av_register_rdt_dynamic_payload_handlers(void);
 
/**
* Add subscription information to Subscribe parameter string.
*
* @param cmd string to write the subscription information into.
* @param size size of cmd.
* @param stream_nr stream number.
* @param rule_nr rule number to conform to.
*/
void ff_rdt_subscribe_rule(char *cmd, int size,
int stream_nr, int rule_nr);
 
/**
* Parse RDT-style packet header.
*
* @param buf input buffer
* @param len length of input buffer
* @param pset_id will be set to the set ID this packet belongs to
* @param pseq_no will be set to the sequence number of the packet
* @param pstream_id will be set to the stream ID this packet belongs to
* @param pis_keyframe will be whether this packet belongs to a keyframe
* @param ptimestamp will be set to the timestamp of the packet
* @return the amount of bytes consumed, or negative on error
*/
int ff_rdt_parse_header(const uint8_t *buf, int len,
int *pset_id, int *pseq_no, int *pstream_id,
int *pis_keyframe, uint32_t *ptimestamp);
 
/**
* Parse RDT-style packet data (header + media data).
* Usage similar to rtp_parse_packet().
*/
int ff_rdt_parse_packet(RDTDemuxContext *s, AVPacket *pkt,
uint8_t **buf, int len);
 
/**
* Parse a server-related SDP line.
*
* @param s the RTSP AVFormatContext
* @param stream_index the index of the first stream in the set represented
* by the SDP m= line (in s->streams)
* @param buf the SDP line
*/
void ff_real_parse_sdp_a_line(AVFormatContext *s, int stream_index,
const char *buf);
 
#endif /* AVFORMAT_RDT_H */
/contrib/sdk/sources/ffmpeg/libavformat/realtextdec.c
0,0 → 1,153
/*
* Copyright (c) 2012 Clément Bœsch
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* RealText subtitle demuxer
* @see http://service.real.com/help/library/guides/ProductionGuide/prodguide/htmfiles/realtext.htm
*/
 
#include "avformat.h"
#include "internal.h"
#include "subtitles.h"
#include "libavutil/avstring.h"
#include "libavutil/bprint.h"
#include "libavutil/intreadwrite.h"
 
typedef struct {
FFDemuxSubtitlesQueue q;
} RealTextContext;
 
static int realtext_probe(AVProbeData *p)
{
const unsigned char *ptr = p->buf;
 
if (AV_RB24(ptr) == 0xEFBBBF)
ptr += 3; /* skip UTF-8 BOM */
return !av_strncasecmp(ptr, "<window", 7) ? AVPROBE_SCORE_EXTENSION : 0;
}
 
static int read_ts(const char *s)
{
int hh, mm, ss, ms;
 
if (sscanf(s, "%u:%u:%u.%u", &hh, &mm, &ss, &ms) == 4) return (hh*3600 + mm*60 + ss) * 100 + ms;
if (sscanf(s, "%u:%u:%u" , &hh, &mm, &ss ) == 3) return (hh*3600 + mm*60 + ss) * 100;
if (sscanf(s, "%u:%u.%u", &mm, &ss, &ms) == 3) return ( mm*60 + ss) * 100 + ms;
if (sscanf(s, "%u:%u" , &mm, &ss ) == 2) return ( mm*60 + ss) * 100;
if (sscanf(s, "%u.%u", &ss, &ms) == 2) return ( ss) * 100 + ms;
return strtol(s, NULL, 10) * 100;
}
 
static int realtext_read_header(AVFormatContext *s)
{
RealTextContext *rt = s->priv_data;
AVStream *st = avformat_new_stream(s, NULL);
AVBPrint buf;
char c = 0;
int res = 0, duration = read_ts("60"); // default duration is 60 seconds
 
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 64, 1, 100);
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
st->codec->codec_id = AV_CODEC_ID_REALTEXT;
 
av_bprint_init(&buf, 0, AV_BPRINT_SIZE_UNLIMITED);
 
while (!url_feof(s->pb)) {
AVPacket *sub;
const int64_t pos = avio_tell(s->pb) - (c != 0);
int n = ff_smil_extract_next_chunk(s->pb, &buf, &c);
 
if (n == 0)
break;
 
if (!av_strncasecmp(buf.str, "<window", 7)) {
/* save header to extradata */
const char *p = ff_smil_get_attr_ptr(buf.str, "duration");
 
if (p)
duration = read_ts(p);
st->codec->extradata = av_strdup(buf.str);
if (!st->codec->extradata) {
res = AVERROR(ENOMEM);
goto end;
}
st->codec->extradata_size = buf.len + 1;
} else {
/* if we just read a <time> tag, introduce a new event, otherwise merge
* with the previous one */
int merge = !av_strncasecmp(buf.str, "<time", 5) ? 0 : 1;
sub = ff_subtitles_queue_insert(&rt->q, buf.str, buf.len, merge);
if (!sub) {
res = AVERROR(ENOMEM);
goto end;
}
if (!merge) {
const char *begin = ff_smil_get_attr_ptr(buf.str, "begin");
const char *end = ff_smil_get_attr_ptr(buf.str, "end");
 
sub->pos = pos;
sub->pts = begin ? read_ts(begin) : 0;
sub->duration = end ? (read_ts(end) - sub->pts) : duration;
}
}
av_bprint_clear(&buf);
}
ff_subtitles_queue_finalize(&rt->q);
 
end:
av_bprint_finalize(&buf, NULL);
return res;
}
 
static int realtext_read_packet(AVFormatContext *s, AVPacket *pkt)
{
RealTextContext *rt = s->priv_data;
return ff_subtitles_queue_read_packet(&rt->q, pkt);
}
 
static int realtext_read_seek(AVFormatContext *s, int stream_index,
int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
{
RealTextContext *rt = s->priv_data;
return ff_subtitles_queue_seek(&rt->q, s, stream_index,
min_ts, ts, max_ts, flags);
}
 
static int realtext_read_close(AVFormatContext *s)
{
RealTextContext *rt = s->priv_data;
ff_subtitles_queue_clean(&rt->q);
return 0;
}
 
AVInputFormat ff_realtext_demuxer = {
.name = "realtext",
.long_name = NULL_IF_CONFIG_SMALL("RealText subtitle format"),
.priv_data_size = sizeof(RealTextContext),
.read_probe = realtext_probe,
.read_header = realtext_read_header,
.read_packet = realtext_read_packet,
.read_seek2 = realtext_read_seek,
.read_close = realtext_read_close,
.extensions = "rt",
};
/contrib/sdk/sources/ffmpeg/libavformat/redspark.c
0,0 → 1,170
/*
* RedSpark demuxer
* Copyright (c) 2013 James Almer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavcodec/bytestream.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "avio.h"
#include "internal.h"
 
#define HEADER_SIZE 4096
 
typedef struct RedSparkContext {
int samples_count;
} RedSparkContext;
 
static int redspark_probe(AVProbeData *p)
{
uint32_t key, data;
uint8_t header[8];
 
/* Decrypt first 8 bytes of the header */
data = AV_RB32(p->buf);
data = data ^ (key = data ^ 0x52656453);
AV_WB32(header, data);
key = (key << 11) | (key >> 21);
 
data = AV_RB32(p->buf + 4) ^ (((key << 3) | (key >> 29)) + key);
AV_WB32(header + 4, data);
 
if (AV_RB64(header) == AV_RB64("RedSpark"))
return AVPROBE_SCORE_MAX;
 
return 0;
}
 
static int redspark_read_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
RedSparkContext *redspark = s->priv_data;
AVCodecContext *codec;
GetByteContext gbc;
int i, coef_off, ret = 0;
uint32_t key, data;
uint8_t *header, *pbc;
AVStream *st;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
codec = st->codec;
 
header = av_malloc(HEADER_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
if (!header)
return AVERROR(ENOMEM);
pbc = header;
 
/* Decrypt header */
data = avio_rb32(pb);
data = data ^ (key = data ^ 0x52656453);
bytestream_put_be32(&pbc, data);
key = (key << 11) | (key >> 21);
 
for (i = 4; i < HEADER_SIZE; i += 4) {
data = avio_rb32(pb) ^ (key = ((key << 3) | (key >> 29)) + key);
bytestream_put_be32(&pbc, data);
}
 
codec->codec_id = AV_CODEC_ID_ADPCM_THP;
codec->codec_type = AVMEDIA_TYPE_AUDIO;
 
bytestream2_init(&gbc, header, HEADER_SIZE);
bytestream2_seek(&gbc, 0x3c, SEEK_SET);
codec->sample_rate = bytestream2_get_be32u(&gbc);
if (codec->sample_rate <= 0 || codec->sample_rate > 96000) {
av_log(s, AV_LOG_ERROR, "Invalid sample rate: %d\n", codec->sample_rate);
ret = AVERROR_INVALIDDATA;
goto fail;
}
 
st->duration = bytestream2_get_be32u(&gbc) * 14;
redspark->samples_count = 0;
bytestream2_skipu(&gbc, 10);
codec->channels = bytestream2_get_byteu(&gbc);
if (!codec->channels) {
ret = AVERROR_INVALIDDATA;
goto fail;
}
 
coef_off = 0x54 + codec->channels * 8;
if (bytestream2_get_byteu(&gbc)) // Loop flag
coef_off += 16;
 
if (coef_off + codec->channels * (32 + 14) > HEADER_SIZE) {
ret = AVERROR_INVALIDDATA;
goto fail;
}
 
if (ff_alloc_extradata(codec, 32 * codec->channels)) {
ret = AVERROR(ENOMEM);
goto fail;
}
 
/* Get the ADPCM table */
bytestream2_seek(&gbc, coef_off, SEEK_SET);
for (i = 0; i < codec->channels; i++) {
if (bytestream2_get_bufferu(&gbc, codec->extradata + i * 32, 32) != 32) {
ret = AVERROR_INVALIDDATA;
goto fail;
}
bytestream2_skipu(&gbc, 14);
}
 
avpriv_set_pts_info(st, 64, 1, codec->sample_rate);
 
fail:
av_free(header);
 
return ret;
}
 
static int redspark_read_packet(AVFormatContext *s, AVPacket *pkt)
{
AVCodecContext *codec = s->streams[0]->codec;
RedSparkContext *redspark = s->priv_data;
uint32_t size = 8 * codec->channels;
int ret;
 
if (url_feof(s->pb) || redspark->samples_count == s->streams[0]->duration)
return AVERROR_EOF;
 
ret = av_get_packet(s->pb, pkt, size);
if (ret != size) {
av_free_packet(pkt);
return AVERROR(EIO);
}
 
pkt->duration = 14;
redspark->samples_count += pkt->duration;
pkt->stream_index = 0;
 
return ret;
}
 
AVInputFormat ff_redspark_demuxer = {
.name = "redspark",
.long_name = NULL_IF_CONFIG_SMALL("RedSpark"),
.priv_data_size = sizeof(RedSparkContext),
.read_probe = redspark_probe,
.read_header = redspark_read_header,
.read_packet = redspark_read_packet,
.extensions = "rsd",
};
/contrib/sdk/sources/ffmpeg/libavformat/riff.c
0,0 → 1,454
/*
* RIFF common functions and data
* Copyright (c) 2000 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/error.h"
#include "libavcodec/avcodec.h"
#include "avformat.h"
#include "riff.h"
 
/* Note: When encoding, the first matching tag is used, so order is
* important if multiple tags are possible for a given codec.
* Note also that this list is used for more than just riff, other
* files use it as well.
*/
const AVCodecTag ff_codec_bmp_tags[] = {
{ AV_CODEC_ID_H264, MKTAG('H', '2', '6', '4') },
{ AV_CODEC_ID_H264, MKTAG('h', '2', '6', '4') },
{ AV_CODEC_ID_H264, MKTAG('X', '2', '6', '4') },
{ AV_CODEC_ID_H264, MKTAG('x', '2', '6', '4') },
{ AV_CODEC_ID_H264, MKTAG('a', 'v', 'c', '1') },
{ AV_CODEC_ID_H264, MKTAG('D', 'A', 'V', 'C') },
{ AV_CODEC_ID_H264, MKTAG('S', 'M', 'V', '2') },
{ AV_CODEC_ID_H264, MKTAG('V', 'S', 'S', 'H') },
{ AV_CODEC_ID_H264, MKTAG('Q', '2', '6', '4') }, /* QNAP surveillance system */
{ AV_CODEC_ID_H264, MKTAG('V', '2', '6', '4') },
{ AV_CODEC_ID_H264, MKTAG('G', 'A', 'V', 'C') }, /* GeoVision camera */
{ AV_CODEC_ID_H264, MKTAG('U', 'M', 'S', 'V') },
{ AV_CODEC_ID_H263, MKTAG('H', '2', '6', '3') },
{ AV_CODEC_ID_H263, MKTAG('X', '2', '6', '3') },
{ AV_CODEC_ID_H263, MKTAG('T', '2', '6', '3') },
{ AV_CODEC_ID_H263, MKTAG('L', '2', '6', '3') },
{ AV_CODEC_ID_H263, MKTAG('V', 'X', '1', 'K') },
{ AV_CODEC_ID_H263, MKTAG('Z', 'y', 'G', 'o') },
{ AV_CODEC_ID_H263, MKTAG('M', '2', '6', '3') },
{ AV_CODEC_ID_H263, MKTAG('l', 's', 'v', 'm') },
{ AV_CODEC_ID_H263P, MKTAG('H', '2', '6', '3') },
{ AV_CODEC_ID_H263I, MKTAG('I', '2', '6', '3') }, /* Intel H.263 */
{ AV_CODEC_ID_H261, MKTAG('H', '2', '6', '1') },
{ AV_CODEC_ID_H263, MKTAG('U', '2', '6', '3') },
{ AV_CODEC_ID_MPEG4, MKTAG('F', 'M', 'P', '4') },
{ AV_CODEC_ID_MPEG4, MKTAG('D', 'I', 'V', 'X') },
{ AV_CODEC_ID_MPEG4, MKTAG('D', 'X', '5', '0') },
{ AV_CODEC_ID_MPEG4, MKTAG('X', 'V', 'I', 'D') },
{ AV_CODEC_ID_MPEG4, MKTAG('M', 'P', '4', 'S') },
{ AV_CODEC_ID_MPEG4, MKTAG('M', '4', 'S', '2') },
/* some broken AVIs use this */
{ AV_CODEC_ID_MPEG4, MKTAG( 4 , 0 , 0 , 0 ) },
/* some broken AVIs use this */
{ AV_CODEC_ID_MPEG4, MKTAG('Z', 'M', 'P', '4') },
{ AV_CODEC_ID_MPEG4, MKTAG('D', 'I', 'V', '1') },
{ AV_CODEC_ID_MPEG4, MKTAG('B', 'L', 'Z', '0') },
{ AV_CODEC_ID_MPEG4, MKTAG('m', 'p', '4', 'v') },
{ AV_CODEC_ID_MPEG4, MKTAG('U', 'M', 'P', '4') },
{ AV_CODEC_ID_MPEG4, MKTAG('W', 'V', '1', 'F') },
{ AV_CODEC_ID_MPEG4, MKTAG('S', 'E', 'D', 'G') },
{ AV_CODEC_ID_MPEG4, MKTAG('R', 'M', 'P', '4') },
{ AV_CODEC_ID_MPEG4, MKTAG('3', 'I', 'V', '2') },
/* WaWv MPEG-4 Video Codec */
{ AV_CODEC_ID_MPEG4, MKTAG('W', 'A', 'W', 'V') },
{ AV_CODEC_ID_MPEG4, MKTAG('F', 'F', 'D', 'S') },
{ AV_CODEC_ID_MPEG4, MKTAG('F', 'V', 'F', 'W') },
{ AV_CODEC_ID_MPEG4, MKTAG('D', 'C', 'O', 'D') },
{ AV_CODEC_ID_MPEG4, MKTAG('M', 'V', 'X', 'M') },
{ AV_CODEC_ID_MPEG4, MKTAG('P', 'M', '4', 'V') },
{ AV_CODEC_ID_MPEG4, MKTAG('S', 'M', 'P', '4') },
{ AV_CODEC_ID_MPEG4, MKTAG('D', 'X', 'G', 'M') },
{ AV_CODEC_ID_MPEG4, MKTAG('V', 'I', 'D', 'M') },
{ AV_CODEC_ID_MPEG4, MKTAG('M', '4', 'T', '3') },
{ AV_CODEC_ID_MPEG4, MKTAG('G', 'E', 'O', 'X') },
/* flipped video */
{ AV_CODEC_ID_MPEG4, MKTAG('H', 'D', 'X', '4') },
{ AV_CODEC_ID_MPEG4, MKTAG('D', 'M', '4', 'V') },
{ AV_CODEC_ID_MPEG4, MKTAG('D', 'M', 'K', '2') },
{ AV_CODEC_ID_MPEG4, MKTAG('D', 'I', 'G', 'I') },
{ AV_CODEC_ID_MPEG4, MKTAG('I', 'N', 'M', 'C') },
/* Ephv MPEG-4 */
{ AV_CODEC_ID_MPEG4, MKTAG('E', 'P', 'H', 'V') },
{ AV_CODEC_ID_MPEG4, MKTAG('E', 'M', '4', 'A') },
/* Divio MPEG-4 */
{ AV_CODEC_ID_MPEG4, MKTAG('M', '4', 'C', 'C') },
{ AV_CODEC_ID_MPEG4, MKTAG('S', 'N', '4', '0') },
{ AV_CODEC_ID_MPEG4, MKTAG('V', 'S', 'P', 'X') },
{ AV_CODEC_ID_MPEG4, MKTAG('U', 'L', 'D', 'X') },
{ AV_CODEC_ID_MPEG4, MKTAG('G', 'E', 'O', 'V') },
/* Samsung SHR-6040 */
{ AV_CODEC_ID_MPEG4, MKTAG('S', 'I', 'P', 'P') },
{ AV_CODEC_ID_MPEG4, MKTAG('S', 'M', '4', 'V') },
{ AV_CODEC_ID_MPEG4, MKTAG('X', 'V', 'I', 'X') },
{ AV_CODEC_ID_MPEG4, MKTAG('D', 'r', 'e', 'X') },
{ AV_CODEC_ID_MPEG4, MKTAG('Q', 'M', 'P', '4') }, /* QNAP Systems */
{ AV_CODEC_ID_MPEG4, MKTAG('P', 'L', 'V', '1') }, /* Pelco DVR MPEG-4 */
{ AV_CODEC_ID_MSMPEG4V3, MKTAG('M', 'P', '4', '3') },
{ AV_CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '3') },
{ AV_CODEC_ID_MSMPEG4V3, MKTAG('M', 'P', 'G', '3') },
{ AV_CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '5') },
{ AV_CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '6') },
{ AV_CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '4') },
{ AV_CODEC_ID_MSMPEG4V3, MKTAG('D', 'V', 'X', '3') },
{ AV_CODEC_ID_MSMPEG4V3, MKTAG('A', 'P', '4', '1') },
{ AV_CODEC_ID_MSMPEG4V3, MKTAG('C', 'O', 'L', '1') },
{ AV_CODEC_ID_MSMPEG4V3, MKTAG('C', 'O', 'L', '0') },
{ AV_CODEC_ID_MSMPEG4V2, MKTAG('M', 'P', '4', '2') },
{ AV_CODEC_ID_MSMPEG4V2, MKTAG('D', 'I', 'V', '2') },
{ AV_CODEC_ID_MSMPEG4V1, MKTAG('M', 'P', 'G', '4') },
{ AV_CODEC_ID_MSMPEG4V1, MKTAG('M', 'P', '4', '1') },
{ AV_CODEC_ID_WMV1, MKTAG('W', 'M', 'V', '1') },
{ AV_CODEC_ID_WMV2, MKTAG('W', 'M', 'V', '2') },
{ AV_CODEC_ID_WMV2, MKTAG('G', 'X', 'V', 'E') },
{ AV_CODEC_ID_DVVIDEO, MKTAG('d', 'v', 's', 'd') },
{ AV_CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', 'd') },
{ AV_CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', '1') },
{ AV_CODEC_ID_DVVIDEO, MKTAG('d', 'v', 's', 'l') },
{ AV_CODEC_ID_DVVIDEO, MKTAG('d', 'v', '2', '5') },
{ AV_CODEC_ID_DVVIDEO, MKTAG('d', 'v', '5', '0') },
/* Canopus DV */
{ AV_CODEC_ID_DVVIDEO, MKTAG('c', 'd', 'v', 'c') },
/* Canopus DV */
{ AV_CODEC_ID_DVVIDEO, MKTAG('C', 'D', 'V', 'H') },
/* Canopus DV */
{ AV_CODEC_ID_DVVIDEO, MKTAG('C', 'D', 'V', '5') },
{ AV_CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'c', ' ') },
{ AV_CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'c', 's') },
{ AV_CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', '1') },
{ AV_CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'i', 's') },
{ AV_CODEC_ID_DVVIDEO, MKTAG('p', 'd', 'v', 'c') },
{ AV_CODEC_ID_DVVIDEO, MKTAG('S', 'L', '2', '5') },
{ AV_CODEC_ID_DVVIDEO, MKTAG('S', 'L', 'D', 'V') },
{ AV_CODEC_ID_MPEG1VIDEO, MKTAG('m', 'p', 'g', '1') },
{ AV_CODEC_ID_MPEG1VIDEO, MKTAG('m', 'p', 'g', '2') },
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('m', 'p', 'g', '2') },
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('M', 'P', 'E', 'G') },
{ AV_CODEC_ID_MPEG1VIDEO, MKTAG('P', 'I', 'M', '1') },
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('P', 'I', 'M', '2') },
{ AV_CODEC_ID_MPEG1VIDEO, MKTAG('V', 'C', 'R', '2') },
{ AV_CODEC_ID_MPEG1VIDEO, MKTAG( 1 , 0 , 0 , 16) },
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG( 2 , 0 , 0 , 16) },
{ AV_CODEC_ID_MPEG4, MKTAG( 4 , 0 , 0 , 16) },
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('D', 'V', 'R', ' ') },
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('M', 'M', 'E', 'S') },
/* Lead MPEG-2 in AVI */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('L', 'M', 'P', '2') },
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('s', 'l', 'i', 'f') },
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('E', 'M', '2', 'V') },
/* Matrox MPEG-2 intra-only */
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('M', '7', '0', '1') },
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('m', 'p', 'g', 'v') },
{ AV_CODEC_ID_MPEG1VIDEO, MKTAG('B', 'W', '1', '0') },
{ AV_CODEC_ID_MPEG1VIDEO, MKTAG('X', 'M', 'P', 'G') }, /* Xing MPEG intra only */
{ AV_CODEC_ID_MJPEG, MKTAG('M', 'J', 'P', 'G') },
{ AV_CODEC_ID_MJPEG, MKTAG('L', 'J', 'P', 'G') },
{ AV_CODEC_ID_MJPEG, MKTAG('d', 'm', 'b', '1') },
{ AV_CODEC_ID_MJPEG, MKTAG('m', 'j', 'p', 'a') },
{ AV_CODEC_ID_LJPEG, MKTAG('L', 'J', 'P', 'G') },
/* Pegasus lossless JPEG */
{ AV_CODEC_ID_MJPEG, MKTAG('J', 'P', 'G', 'L') },
/* JPEG-LS custom FOURCC for AVI - encoder */
{ AV_CODEC_ID_JPEGLS, MKTAG('M', 'J', 'L', 'S') },
{ AV_CODEC_ID_JPEGLS, MKTAG('M', 'J', 'P', 'G') },
/* JPEG-LS custom FOURCC for AVI - decoder */
{ AV_CODEC_ID_MJPEG, MKTAG('M', 'J', 'L', 'S') },
{ AV_CODEC_ID_MJPEG, MKTAG('j', 'p', 'e', 'g') },
{ AV_CODEC_ID_MJPEG, MKTAG('I', 'J', 'P', 'G') },
{ AV_CODEC_ID_AVRN, MKTAG('A', 'V', 'R', 'n') },
{ AV_CODEC_ID_MJPEG, MKTAG('A', 'C', 'D', 'V') },
{ AV_CODEC_ID_MJPEG, MKTAG('Q', 'I', 'V', 'G') },
/* SL M-JPEG */
{ AV_CODEC_ID_MJPEG, MKTAG('S', 'L', 'M', 'J') },
/* Creative Webcam JPEG */
{ AV_CODEC_ID_MJPEG, MKTAG('C', 'J', 'P', 'G') },
/* Intel JPEG Library Video Codec */
{ AV_CODEC_ID_MJPEG, MKTAG('I', 'J', 'L', 'V') },
/* Midvid JPEG Video Codec */
{ AV_CODEC_ID_MJPEG, MKTAG('M', 'V', 'J', 'P') },
{ AV_CODEC_ID_MJPEG, MKTAG('A', 'V', 'I', '1') },
{ AV_CODEC_ID_MJPEG, MKTAG('A', 'V', 'I', '2') },
{ AV_CODEC_ID_MJPEG, MKTAG('M', 'T', 'S', 'J') },
/* Paradigm Matrix M-JPEG Codec */
{ AV_CODEC_ID_MJPEG, MKTAG('Z', 'J', 'P', 'G') },
{ AV_CODEC_ID_MJPEG, MKTAG('M', 'M', 'J', 'P') },
{ AV_CODEC_ID_HUFFYUV, MKTAG('H', 'F', 'Y', 'U') },
{ AV_CODEC_ID_FFVHUFF, MKTAG('F', 'F', 'V', 'H') },
{ AV_CODEC_ID_CYUV, MKTAG('C', 'Y', 'U', 'V') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG( 0 , 0 , 0 , 0 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG( 3 , 0 , 0 , 0 ) },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('I', '4', '2', '0') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', 'U', 'Y', '2') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '4', '2', '2') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('V', '4', '2', '2') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', 'U', 'N', 'V') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('U', 'Y', 'N', 'V') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('U', 'Y', 'N', 'Y') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('u', 'y', 'v', '1') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('2', 'V', 'u', '1') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('2', 'v', 'u', 'y') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('y', 'u', 'v', 's') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('y', 'u', 'v', '2') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('P', '4', '2', '2') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', 'V', '1', '2') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', 'V', '1', '6') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', 'V', '2', '4') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('U', 'Y', 'V', 'Y') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('V', 'Y', 'U', 'Y') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('I', 'Y', 'U', 'V') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '8', '0', '0') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '8', ' ', ' ') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('H', 'D', 'Y', 'C') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', 'V', 'U', '9') },
/* SoftLab-NSK VideoTizer */
{ AV_CODEC_ID_RAWVIDEO, MKTAG('V', 'D', 'T', 'Z') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '4', '1', '1') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('N', 'V', '1', '2') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('N', 'V', '2', '1') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '4', '1', 'B') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '4', '2', 'B') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', 'U', 'V', '9') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', 'V', 'U', '9') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('a', 'u', 'v', '2') },
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', 'V', 'Y', 'U') },
{ AV_CODEC_ID_FRWU, MKTAG('F', 'R', 'W', 'U') },
{ AV_CODEC_ID_R10K, MKTAG('R', '1', '0', 'k') },
{ AV_CODEC_ID_R210, MKTAG('r', '2', '1', '0') },
{ AV_CODEC_ID_V210, MKTAG('v', '2', '1', '0') },
{ AV_CODEC_ID_V308, MKTAG('v', '3', '0', '8') },
{ AV_CODEC_ID_V408, MKTAG('v', '4', '0', '8') },
{ AV_CODEC_ID_AYUV, MKTAG('A', 'Y', 'U', 'V') },
{ AV_CODEC_ID_V410, MKTAG('v', '4', '1', '0') },
{ AV_CODEC_ID_YUV4, MKTAG('y', 'u', 'v', '4') },
{ AV_CODEC_ID_INDEO3, MKTAG('I', 'V', '3', '1') },
{ AV_CODEC_ID_INDEO3, MKTAG('I', 'V', '3', '2') },
{ AV_CODEC_ID_INDEO4, MKTAG('I', 'V', '4', '1') },
{ AV_CODEC_ID_INDEO5, MKTAG('I', 'V', '5', '0') },
{ AV_CODEC_ID_VP3, MKTAG('V', 'P', '3', '1') },
{ AV_CODEC_ID_VP3, MKTAG('V', 'P', '3', '0') },
{ AV_CODEC_ID_VP5, MKTAG('V', 'P', '5', '0') },
{ AV_CODEC_ID_VP6, MKTAG('V', 'P', '6', '0') },
{ AV_CODEC_ID_VP6, MKTAG('V', 'P', '6', '1') },
{ AV_CODEC_ID_VP6, MKTAG('V', 'P', '6', '2') },
{ AV_CODEC_ID_VP6A, MKTAG('V', 'P', '6', 'A') },
{ AV_CODEC_ID_VP6F, MKTAG('V', 'P', '6', 'F') },
{ AV_CODEC_ID_VP6F, MKTAG('F', 'L', 'V', '4') },
{ AV_CODEC_ID_VP8, MKTAG('V', 'P', '8', '0') },
{ AV_CODEC_ID_ASV1, MKTAG('A', 'S', 'V', '1') },
{ AV_CODEC_ID_ASV2, MKTAG('A', 'S', 'V', '2') },
{ AV_CODEC_ID_VCR1, MKTAG('V', 'C', 'R', '1') },
{ AV_CODEC_ID_FFV1, MKTAG('F', 'F', 'V', '1') },
{ AV_CODEC_ID_XAN_WC4, MKTAG('X', 'x', 'a', 'n') },
{ AV_CODEC_ID_MIMIC, MKTAG('L', 'M', '2', '0') },
{ AV_CODEC_ID_MSRLE, MKTAG('m', 'r', 'l', 'e') },
{ AV_CODEC_ID_MSRLE, MKTAG( 1 , 0 , 0 , 0 ) },
{ AV_CODEC_ID_MSRLE, MKTAG( 2 , 0 , 0 , 0 ) },
{ AV_CODEC_ID_MSVIDEO1, MKTAG('M', 'S', 'V', 'C') },
{ AV_CODEC_ID_MSVIDEO1, MKTAG('m', 's', 'v', 'c') },
{ AV_CODEC_ID_MSVIDEO1, MKTAG('C', 'R', 'A', 'M') },
{ AV_CODEC_ID_MSVIDEO1, MKTAG('c', 'r', 'a', 'm') },
{ AV_CODEC_ID_MSVIDEO1, MKTAG('W', 'H', 'A', 'M') },
{ AV_CODEC_ID_MSVIDEO1, MKTAG('w', 'h', 'a', 'm') },
{ AV_CODEC_ID_CINEPAK, MKTAG('c', 'v', 'i', 'd') },
{ AV_CODEC_ID_TRUEMOTION1, MKTAG('D', 'U', 'C', 'K') },
{ AV_CODEC_ID_TRUEMOTION1, MKTAG('P', 'V', 'E', 'Z') },
{ AV_CODEC_ID_MSZH, MKTAG('M', 'S', 'Z', 'H') },
{ AV_CODEC_ID_ZLIB, MKTAG('Z', 'L', 'I', 'B') },
{ AV_CODEC_ID_SNOW, MKTAG('S', 'N', 'O', 'W') },
{ AV_CODEC_ID_4XM, MKTAG('4', 'X', 'M', 'V') },
{ AV_CODEC_ID_FLV1, MKTAG('F', 'L', 'V', '1') },
{ AV_CODEC_ID_FLV1, MKTAG('S', '2', '6', '3') },
{ AV_CODEC_ID_FLASHSV, MKTAG('F', 'S', 'V', '1') },
{ AV_CODEC_ID_SVQ1, MKTAG('s', 'v', 'q', '1') },
{ AV_CODEC_ID_TSCC, MKTAG('t', 's', 'c', 'c') },
{ AV_CODEC_ID_ULTI, MKTAG('U', 'L', 'T', 'I') },
{ AV_CODEC_ID_VIXL, MKTAG('V', 'I', 'X', 'L') },
{ AV_CODEC_ID_QPEG, MKTAG('Q', 'P', 'E', 'G') },
{ AV_CODEC_ID_QPEG, MKTAG('Q', '1', '.', '0') },
{ AV_CODEC_ID_QPEG, MKTAG('Q', '1', '.', '1') },
{ AV_CODEC_ID_WMV3, MKTAG('W', 'M', 'V', '3') },
{ AV_CODEC_ID_WMV3IMAGE, MKTAG('W', 'M', 'V', 'P') },
{ AV_CODEC_ID_VC1, MKTAG('W', 'V', 'C', '1') },
{ AV_CODEC_ID_VC1, MKTAG('W', 'M', 'V', 'A') },
{ AV_CODEC_ID_VC1IMAGE, MKTAG('W', 'V', 'P', '2') },
{ AV_CODEC_ID_LOCO, MKTAG('L', 'O', 'C', 'O') },
{ AV_CODEC_ID_WNV1, MKTAG('W', 'N', 'V', '1') },
{ AV_CODEC_ID_WNV1, MKTAG('Y', 'U', 'V', '8') },
{ AV_CODEC_ID_AASC, MKTAG('A', 'A', 'S', '4') },
{ AV_CODEC_ID_AASC, MKTAG('A', 'A', 'S', 'C') },
{ AV_CODEC_ID_INDEO2, MKTAG('R', 'T', '2', '1') },
{ AV_CODEC_ID_FRAPS, MKTAG('F', 'P', 'S', '1') },
{ AV_CODEC_ID_THEORA, MKTAG('t', 'h', 'e', 'o') },
{ AV_CODEC_ID_TRUEMOTION2, MKTAG('T', 'M', '2', '0') },
{ AV_CODEC_ID_CSCD, MKTAG('C', 'S', 'C', 'D') },
{ AV_CODEC_ID_ZMBV, MKTAG('Z', 'M', 'B', 'V') },
{ AV_CODEC_ID_KMVC, MKTAG('K', 'M', 'V', 'C') },
{ AV_CODEC_ID_CAVS, MKTAG('C', 'A', 'V', 'S') },
{ AV_CODEC_ID_JPEG2000, MKTAG('m', 'j', 'p', '2') },
{ AV_CODEC_ID_JPEG2000, MKTAG('M', 'J', '2', 'C') },
{ AV_CODEC_ID_JPEG2000, MKTAG('L', 'J', '2', 'C') },
{ AV_CODEC_ID_JPEG2000, MKTAG('L', 'J', '2', 'K') },
{ AV_CODEC_ID_JPEG2000, MKTAG('I', 'P', 'J', '2') },
{ AV_CODEC_ID_VMNC, MKTAG('V', 'M', 'n', 'c') },
{ AV_CODEC_ID_TARGA, MKTAG('t', 'g', 'a', ' ') },
{ AV_CODEC_ID_PNG, MKTAG('M', 'P', 'N', 'G') },
{ AV_CODEC_ID_PNG, MKTAG('P', 'N', 'G', '1') },
{ AV_CODEC_ID_CLJR, MKTAG('C', 'L', 'J', 'R') },
{ AV_CODEC_ID_DIRAC, MKTAG('d', 'r', 'a', 'c') },
{ AV_CODEC_ID_RPZA, MKTAG('a', 'z', 'p', 'r') },
{ AV_CODEC_ID_RPZA, MKTAG('R', 'P', 'Z', 'A') },
{ AV_CODEC_ID_RPZA, MKTAG('r', 'p', 'z', 'a') },
{ AV_CODEC_ID_SP5X, MKTAG('S', 'P', '5', '4') },
{ AV_CODEC_ID_AURA, MKTAG('A', 'U', 'R', 'A') },
{ AV_CODEC_ID_AURA2, MKTAG('A', 'U', 'R', '2') },
{ AV_CODEC_ID_DPX, MKTAG('d', 'p', 'x', ' ') },
{ AV_CODEC_ID_KGV1, MKTAG('K', 'G', 'V', '1') },
{ AV_CODEC_ID_LAGARITH, MKTAG('L', 'A', 'G', 'S') },
{ AV_CODEC_ID_AMV, MKTAG('A', 'M', 'V', 'F') },
{ AV_CODEC_ID_UTVIDEO, MKTAG('U', 'L', 'R', 'A') },
{ AV_CODEC_ID_UTVIDEO, MKTAG('U', 'L', 'R', 'G') },
{ AV_CODEC_ID_UTVIDEO, MKTAG('U', 'L', 'Y', '0') },
{ AV_CODEC_ID_UTVIDEO, MKTAG('U', 'L', 'Y', '2') },
/* Ut Video version 13.0.1 BT.709 codecs */
{ AV_CODEC_ID_UTVIDEO, MKTAG('U', 'L', 'H', '0') },
{ AV_CODEC_ID_UTVIDEO, MKTAG('U', 'L', 'H', '2') },
{ AV_CODEC_ID_VBLE, MKTAG('V', 'B', 'L', 'E') },
{ AV_CODEC_ID_ESCAPE130, MKTAG('E', '1', '3', '0') },
{ AV_CODEC_ID_DXTORY, MKTAG('x', 't', 'o', 'r') },
{ AV_CODEC_ID_ZEROCODEC, MKTAG('Z', 'E', 'C', 'O') },
{ AV_CODEC_ID_Y41P, MKTAG('Y', '4', '1', 'P') },
{ AV_CODEC_ID_FLIC, MKTAG('A', 'F', 'L', 'C') },
{ AV_CODEC_ID_MSS1, MKTAG('M', 'S', 'S', '1') },
{ AV_CODEC_ID_MSA1, MKTAG('M', 'S', 'A', '1') },
{ AV_CODEC_ID_TSCC2, MKTAG('T', 'S', 'C', '2') },
{ AV_CODEC_ID_MTS2, MKTAG('M', 'T', 'S', '2') },
{ AV_CODEC_ID_CLLC, MKTAG('C', 'L', 'L', 'C') },
{ AV_CODEC_ID_MSS2, MKTAG('M', 'S', 'S', '2') },
{ AV_CODEC_ID_SVQ3, MKTAG('S', 'V', 'Q', '3') },
{ AV_CODEC_ID_012V, MKTAG('0', '1', '2', 'v') },
{ AV_CODEC_ID_012V, MKTAG('a', '1', '2', 'v') },
{ AV_CODEC_ID_G2M, MKTAG('G', '2', 'M', '2') },
{ AV_CODEC_ID_G2M, MKTAG('G', '2', 'M', '3') },
{ AV_CODEC_ID_G2M, MKTAG('G', '2', 'M', '4') },
{ AV_CODEC_ID_NONE, 0 }
};
 
const AVCodecTag ff_codec_wav_tags[] = {
{ AV_CODEC_ID_PCM_S16LE, 0x0001 },
/* must come after s16le in this list */
{ AV_CODEC_ID_PCM_U8, 0x0001 },
{ AV_CODEC_ID_PCM_S24LE, 0x0001 },
{ AV_CODEC_ID_PCM_S32LE, 0x0001 },
{ AV_CODEC_ID_ADPCM_MS, 0x0002 },
{ AV_CODEC_ID_PCM_F32LE, 0x0003 },
/* must come after f32le in this list */
{ AV_CODEC_ID_PCM_F64LE, 0x0003 },
{ AV_CODEC_ID_PCM_ALAW, 0x0006 },
{ AV_CODEC_ID_PCM_MULAW, 0x0007 },
{ AV_CODEC_ID_WMAVOICE, 0x000A },
{ AV_CODEC_ID_ADPCM_IMA_OKI, 0x0010 },
{ AV_CODEC_ID_ADPCM_IMA_WAV, 0x0011 },
/* must come after adpcm_ima_wav in this list */
{ AV_CODEC_ID_PCM_ZORK, 0x0011 },
{ AV_CODEC_ID_ADPCM_IMA_OKI, 0x0017 },
{ AV_CODEC_ID_ADPCM_YAMAHA, 0x0020 },
{ AV_CODEC_ID_TRUESPEECH, 0x0022 },
{ AV_CODEC_ID_GSM_MS, 0x0031 },
{ AV_CODEC_ID_GSM_MS, 0x0032 }, /* msn audio */
{ AV_CODEC_ID_AMR_NB, 0x0038 }, /* rogue format number */
{ AV_CODEC_ID_G723_1, 0x0042 },
{ AV_CODEC_ID_ADPCM_G726, 0x0045 },
{ AV_CODEC_ID_MP2, 0x0050 },
{ AV_CODEC_ID_MP3, 0x0055 },
{ AV_CODEC_ID_AMR_NB, 0x0057 },
{ AV_CODEC_ID_AMR_WB, 0x0058 },
/* rogue format number */
{ AV_CODEC_ID_ADPCM_IMA_DK4, 0x0061 },
/* rogue format number */
{ AV_CODEC_ID_ADPCM_IMA_DK3, 0x0062 },
{ AV_CODEC_ID_ADPCM_G726, 0x0064 },
{ AV_CODEC_ID_ADPCM_IMA_WAV, 0x0069 },
{ AV_CODEC_ID_METASOUND, 0x0075 },
{ AV_CODEC_ID_AAC, 0x00ff },
{ AV_CODEC_ID_G723_1, 0x0111 },
{ AV_CODEC_ID_SIPR, 0x0130 },
{ AV_CODEC_ID_WMAV1, 0x0160 },
{ AV_CODEC_ID_WMAV2, 0x0161 },
{ AV_CODEC_ID_WMAPRO, 0x0162 },
{ AV_CODEC_ID_WMALOSSLESS, 0x0163 },
{ AV_CODEC_ID_ADPCM_CT, 0x0200 },
{ AV_CODEC_ID_ATRAC3, 0x0270 },
{ AV_CODEC_ID_ADPCM_G722, 0x028F },
{ AV_CODEC_ID_IMC, 0x0401 },
{ AV_CODEC_ID_IAC, 0x0402 },
{ AV_CODEC_ID_GSM_MS, 0x1500 },
{ AV_CODEC_ID_TRUESPEECH, 0x1501 },
/* ADTS AAC */
{ AV_CODEC_ID_AAC, 0x1600 },
{ AV_CODEC_ID_AAC_LATM, 0x1602 },
{ AV_CODEC_ID_AC3, 0x2000 },
{ AV_CODEC_ID_DTS, 0x2001 },
{ AV_CODEC_ID_SONIC, 0x2048 },
{ AV_CODEC_ID_SONIC_LS, 0x2048 },
{ AV_CODEC_ID_PCM_MULAW, 0x6c75 },
{ AV_CODEC_ID_AAC, 0x706d },
{ AV_CODEC_ID_AAC, 0x4143 },
{ AV_CODEC_ID_XAN_DPCM, 0x594a },
{ AV_CODEC_ID_G723_1, 0xA100 },
{ AV_CODEC_ID_AAC, 0xA106 },
{ AV_CODEC_ID_SPEEX, 0xA109 },
{ AV_CODEC_ID_FLAC, 0xF1AC },
{ AV_CODEC_ID_ADPCM_SWF, ('S' << 8) + 'F' },
/* HACK/FIXME: Does Vorbis in WAV/AVI have an (in)official ID? */
{ AV_CODEC_ID_VORBIS, ('V' << 8) + 'o' },
{ AV_CODEC_ID_NONE, 0 },
};
 
const AVMetadataConv ff_riff_info_conv[] = {
{ "IART", "artist" },
{ "ICMT", "comment" },
{ "ICOP", "copyright" },
{ "ICRD", "date" },
{ "IGNR", "genre" },
{ "ILNG", "language" },
{ "INAM", "title" },
{ "IPRD", "album" },
{ "IPRT", "track" },
{ "ITRK", "track" },
{ "ISFT", "encoder" },
{ "ISMP", "timecode" },
{ "ITCH", "encoded_by" },
{ 0 },
};
 
const struct AVCodecTag *avformat_get_riff_video_tags(void)
{
return ff_codec_bmp_tags;
}
 
const struct AVCodecTag *avformat_get_riff_audio_tags(void)
{
return ff_codec_wav_tags;
}
/contrib/sdk/sources/ffmpeg/libavformat/riff.h
0,0 → 1,98
/*
* RIFF common functions and data
* copyright (c) 2000 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* internal header for RIFF based (de)muxers
* do NOT include this in end user applications
*/
 
#ifndef AVFORMAT_RIFF_H
#define AVFORMAT_RIFF_H
 
#include "libavcodec/avcodec.h"
#include "avio.h"
#include "internal.h"
#include "metadata.h"
 
extern const AVMetadataConv ff_riff_info_conv[];
 
int64_t ff_start_tag(AVIOContext *pb, const char *tag);
void ff_end_tag(AVIOContext *pb, int64_t start);
 
/**
* Read BITMAPINFOHEADER structure and set AVStream codec width, height and
* bits_per_encoded_sample fields. Does not read extradata.
* @return codec tag
*/
int ff_get_bmp_header(AVIOContext *pb, AVStream *st, unsigned *esize);
 
void ff_put_bmp_header(AVIOContext *pb, AVCodecContext *enc, const AVCodecTag *tags, int for_asf);
int ff_put_wav_header(AVIOContext *pb, AVCodecContext *enc);
enum AVCodecID ff_wav_codec_get_id(unsigned int tag, int bps);
int ff_get_wav_header(AVIOContext *pb, AVCodecContext *codec, int size);
 
extern const AVCodecTag ff_codec_bmp_tags[]; // exposed through avformat_get_riff_video_tags()
extern const AVCodecTag ff_codec_wav_tags[];
 
void ff_parse_specific_params(AVCodecContext *stream, int *au_rate, int *au_ssize, int *au_scale);
 
int ff_read_riff_info(AVFormatContext *s, int64_t size);
 
/**
* Write all recognized RIFF tags from s->metadata
*/
void ff_riff_write_info(AVFormatContext *s);
 
/**
* Write a single RIFF info tag
*/
void ff_riff_write_info_tag(AVIOContext *pb, const char *tag, const char *str);
 
typedef uint8_t ff_asf_guid[16];
 
typedef struct AVCodecGuid {
enum AVCodecID id;
ff_asf_guid guid;
} AVCodecGuid;
 
extern const AVCodecGuid ff_codec_wav_guids[];
 
#define FF_PRI_GUID \
"%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x"
 
#define FF_ARG_GUID(g) \
g[0], g[1], g[2], g[3], g[4], g[5], g[6], g[7], \
g[8], g[9], g[10], g[11], g[12], g[13], g[14], g[15]
 
#define FF_MEDIASUBTYPE_BASE_GUID \
0x00, 0x00, 0x10, 0x00, 0x80, 0x00, 0x00, 0xAA, 0x00, 0x38, 0x9B, 0x71
 
static av_always_inline int ff_guidcmp(const void *g1, const void *g2)
{
return memcmp(g1, g2, sizeof(ff_asf_guid));
}
 
void ff_get_guid(AVIOContext *s, ff_asf_guid *g);
 
enum AVCodecID ff_codec_guid_get_id(const AVCodecGuid *guids, ff_asf_guid guid);
 
#endif /* AVFORMAT_RIFF_H */
/contrib/sdk/sources/ffmpeg/libavformat/riffdec.c
0,0 → 1,249
/*
* RIFF demuxing functions and data
* Copyright (c) 2000 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/dict.h"
#include "libavutil/error.h"
#include "libavutil/log.h"
#include "libavutil/mathematics.h"
#include "libavcodec/avcodec.h"
#include "libavcodec/bytestream.h"
#include "avformat.h"
#include "avio_internal.h"
#include "riff.h"
 
const AVCodecGuid ff_codec_wav_guids[] = {
{ AV_CODEC_ID_AC3, { 0x2C, 0x80, 0x6D, 0xE0, 0x46, 0xDB, 0xCF, 0x11, 0xB4, 0xD1, 0x00, 0x80, 0x5F, 0x6C, 0xBB, 0xEA } },
{ AV_CODEC_ID_ATRAC3P, { 0xBF, 0xAA, 0x23, 0xE9, 0x58, 0xCB, 0x71, 0x44, 0xA1, 0x19, 0xFF, 0xFA, 0x01, 0xE4, 0xCE, 0x62 } },
{ AV_CODEC_ID_EAC3, { 0xAF, 0x87, 0xFB, 0xA7, 0x02, 0x2D, 0xFB, 0x42, 0xA4, 0xD4, 0x05, 0xCD, 0x93, 0x84, 0x3B, 0xDD } },
{ AV_CODEC_ID_MP2, { 0x2B, 0x80, 0x6D, 0xE0, 0x46, 0xDB, 0xCF, 0x11, 0xB4, 0xD1, 0x00, 0x80, 0x5F, 0x6C, 0xBB, 0xEA } },
{ AV_CODEC_ID_NONE }
};
 
void ff_get_guid(AVIOContext *s, ff_asf_guid *g)
{
av_assert0(sizeof(*g) == 16); //compiler will optimize this out
if (avio_read(s, *g, sizeof(*g)) < (int)sizeof(*g))
memset(*g, 0, sizeof(*g));
}
 
enum AVCodecID ff_codec_guid_get_id(const AVCodecGuid *guids, ff_asf_guid guid)
{
int i;
for (i = 0; guids[i].id != AV_CODEC_ID_NONE; i++)
if (!ff_guidcmp(guids[i].guid, guid))
return guids[i].id;
return AV_CODEC_ID_NONE;
}
 
/* We could be given one of the three possible structures here:
* WAVEFORMAT, PCMWAVEFORMAT or WAVEFORMATEX. Each structure
* is an expansion of the previous one with the fields added
* at the bottom. PCMWAVEFORMAT adds 'WORD wBitsPerSample' and
* WAVEFORMATEX adds 'WORD cbSize' and basically makes itself
* an openended structure.
*/
 
static void parse_waveformatex(AVIOContext *pb, AVCodecContext *c)
{
ff_asf_guid subformat;
int bps = avio_rl16(pb);
if (bps)
c->bits_per_coded_sample = bps;
 
c->channel_layout = avio_rl32(pb); /* dwChannelMask */
 
ff_get_guid(pb, &subformat);
if (!memcmp(subformat + 4,
(const uint8_t[]){ FF_MEDIASUBTYPE_BASE_GUID }, 12)) {
c->codec_tag = AV_RL32(subformat);
c->codec_id = ff_wav_codec_get_id(c->codec_tag,
c->bits_per_coded_sample);
} else {
c->codec_id = ff_codec_guid_get_id(ff_codec_wav_guids, subformat);
if (!c->codec_id)
av_log(c, AV_LOG_WARNING,
"unknown subformat:"FF_PRI_GUID"\n",
FF_ARG_GUID(subformat));
}
}
 
int ff_get_wav_header(AVIOContext *pb, AVCodecContext *codec, int size)
{
int id;
 
id = avio_rl16(pb);
codec->codec_type = AVMEDIA_TYPE_AUDIO;
codec->channels = avio_rl16(pb);
codec->sample_rate = avio_rl32(pb);
codec->bit_rate = avio_rl32(pb) * 8;
codec->block_align = avio_rl16(pb);
if (size == 14) { /* We're dealing with plain vanilla WAVEFORMAT */
codec->bits_per_coded_sample = 8;
} else
codec->bits_per_coded_sample = avio_rl16(pb);
if (id == 0xFFFE) {
codec->codec_tag = 0;
} else {
codec->codec_tag = id;
codec->codec_id = ff_wav_codec_get_id(id,
codec->bits_per_coded_sample);
}
if (size >= 18) { /* We're obviously dealing with WAVEFORMATEX */
int cbSize = avio_rl16(pb); /* cbSize */
size -= 18;
cbSize = FFMIN(size, cbSize);
if (cbSize >= 22 && id == 0xfffe) { /* WAVEFORMATEXTENSIBLE */
parse_waveformatex(pb, codec);
cbSize -= 22;
size -= 22;
}
if (cbSize > 0) {
av_free(codec->extradata);
if (ff_alloc_extradata(codec, cbSize))
return AVERROR(ENOMEM);
avio_read(pb, codec->extradata, codec->extradata_size);
size -= cbSize;
}
 
/* It is possible for the chunk to contain garbage at the end */
if (size > 0)
avio_skip(pb, size);
}
if (codec->sample_rate <= 0) {
av_log(NULL, AV_LOG_ERROR,
"Invalid sample rate: %d\n", codec->sample_rate);
return AVERROR_INVALIDDATA;
}
if (codec->codec_id == AV_CODEC_ID_AAC_LATM) {
/* Channels and sample_rate values are those prior to applying SBR
* and/or PS. */
codec->channels = 0;
codec->sample_rate = 0;
}
/* override bits_per_coded_sample for G.726 */
if (codec->codec_id == AV_CODEC_ID_ADPCM_G726 && codec->sample_rate)
codec->bits_per_coded_sample = codec->bit_rate / codec->sample_rate;
 
return 0;
}
 
enum AVCodecID ff_wav_codec_get_id(unsigned int tag, int bps)
{
enum AVCodecID id;
id = ff_codec_get_id(ff_codec_wav_tags, tag);
if (id <= 0)
return id;
 
if (id == AV_CODEC_ID_PCM_S16LE)
id = ff_get_pcm_codec_id(bps, 0, 0, ~1);
else if (id == AV_CODEC_ID_PCM_F32LE)
id = ff_get_pcm_codec_id(bps, 1, 0, 0);
 
if (id == AV_CODEC_ID_ADPCM_IMA_WAV && bps == 8)
id = AV_CODEC_ID_PCM_ZORK;
return id;
}
 
int ff_get_bmp_header(AVIOContext *pb, AVStream *st, unsigned *esize)
{
int tag1;
if(esize) *esize = avio_rl32(pb);
else avio_rl32(pb);
st->codec->width = avio_rl32(pb);
st->codec->height = (int32_t)avio_rl32(pb);
avio_rl16(pb); /* planes */
st->codec->bits_per_coded_sample = avio_rl16(pb); /* depth */
tag1 = avio_rl32(pb);
avio_rl32(pb); /* ImageSize */
avio_rl32(pb); /* XPelsPerMeter */
avio_rl32(pb); /* YPelsPerMeter */
avio_rl32(pb); /* ClrUsed */
avio_rl32(pb); /* ClrImportant */
return tag1;
}
 
int ff_read_riff_info(AVFormatContext *s, int64_t size)
{
int64_t start, end, cur;
AVIOContext *pb = s->pb;
 
start = avio_tell(pb);
end = start + size;
 
while ((cur = avio_tell(pb)) >= 0 &&
cur <= end - 8 /* = tag + size */) {
uint32_t chunk_code;
int64_t chunk_size;
char key[5] = { 0 };
char *value;
 
chunk_code = avio_rl32(pb);
chunk_size = avio_rl32(pb);
if (url_feof(pb)) {
if (chunk_code || chunk_size) {
av_log(s, AV_LOG_WARNING, "INFO subchunk truncated\n");
return AVERROR_INVALIDDATA;
}
return AVERROR_EOF;
}
if (chunk_size > end ||
end - chunk_size < cur ||
chunk_size == UINT_MAX) {
avio_seek(pb, -9, SEEK_CUR);
chunk_code = avio_rl32(pb);
chunk_size = avio_rl32(pb);
if (chunk_size > end || end - chunk_size < cur || chunk_size == UINT_MAX) {
av_log(s, AV_LOG_WARNING, "too big INFO subchunk\n");
return AVERROR_INVALIDDATA;
}
}
 
chunk_size += (chunk_size & 1);
 
if (!chunk_code) {
if (chunk_size)
avio_skip(pb, chunk_size);
else if (pb->eof_reached) {
av_log(s, AV_LOG_WARNING, "truncated file\n");
return AVERROR_EOF;
}
continue;
}
 
value = av_mallocz(chunk_size + 1);
if (!value) {
av_log(s, AV_LOG_ERROR,
"out of memory, unable to read INFO tag\n");
return AVERROR(ENOMEM);
}
 
AV_WL32(key, chunk_code);
 
if (avio_read(pb, value, chunk_size) != chunk_size) {
av_log(s, AV_LOG_WARNING,
"premature end of file while reading INFO tag\n");
}
 
av_dict_set(&s->metadata, key, value, AV_DICT_DONT_STRDUP_VAL);
}
 
return 0;
}
/contrib/sdk/sources/ffmpeg/libavformat/riffenc.c
0,0 → 1,312
/*
* RIFF muxing functions
* Copyright (c) 2000 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/dict.h"
#include "libavutil/log.h"
#include "libavutil/mathematics.h"
#include "libavcodec/avcodec.h"
#include "libavcodec/bytestream.h"
#include "avformat.h"
#include "avio_internal.h"
#include "riff.h"
 
int64_t ff_start_tag(AVIOContext *pb, const char *tag)
{
ffio_wfourcc(pb, tag);
avio_wl32(pb, 0);
return avio_tell(pb);
}
 
void ff_end_tag(AVIOContext *pb, int64_t start)
{
int64_t pos;
 
av_assert0((start&1) == 0);
 
pos = avio_tell(pb);
if (pos & 1)
avio_w8(pb, 0);
avio_seek(pb, start - 4, SEEK_SET);
avio_wl32(pb, (uint32_t)(pos - start));
avio_seek(pb, FFALIGN(pos, 2), SEEK_SET);
}
 
/* WAVEFORMATEX header */
/* returns the size or -1 on error */
int ff_put_wav_header(AVIOContext *pb, AVCodecContext *enc)
{
int bps, blkalign, bytespersec, frame_size;
int hdrsize = 18;
int waveformatextensible;
uint8_t temp[256];
uint8_t *riff_extradata = temp;
uint8_t *riff_extradata_start = temp;
 
if (!enc->codec_tag || enc->codec_tag > 0xffff)
return -1;
 
/* We use the known constant frame size for the codec if known, otherwise
* fall back on using AVCodecContext.frame_size, which is not as reliable
* for indicating packet duration. */
frame_size = av_get_audio_frame_duration(enc, 0);
if (!frame_size)
frame_size = enc->frame_size;
 
waveformatextensible = (enc->channels > 2 && enc->channel_layout) ||
enc->sample_rate > 48000 ||
av_get_bits_per_sample(enc->codec_id) > 16;
 
if (waveformatextensible)
avio_wl16(pb, 0xfffe);
else
avio_wl16(pb, enc->codec_tag);
 
avio_wl16(pb, enc->channels);
avio_wl32(pb, enc->sample_rate);
if (enc->codec_id == AV_CODEC_ID_ATRAC3 ||
enc->codec_id == AV_CODEC_ID_G723_1 ||
enc->codec_id == AV_CODEC_ID_MP2 ||
enc->codec_id == AV_CODEC_ID_MP3 ||
enc->codec_id == AV_CODEC_ID_GSM_MS) {
bps = 0;
} else {
if (!(bps = av_get_bits_per_sample(enc->codec_id))) {
if (enc->bits_per_coded_sample)
bps = enc->bits_per_coded_sample;
else
bps = 16; // default to 16
}
}
if (bps != enc->bits_per_coded_sample && enc->bits_per_coded_sample) {
av_log(enc, AV_LOG_WARNING,
"requested bits_per_coded_sample (%d) "
"and actually stored (%d) differ\n",
enc->bits_per_coded_sample, bps);
}
 
if (enc->codec_id == AV_CODEC_ID_MP2 ||
enc->codec_id == AV_CODEC_ID_MP3) {
/* This is wrong, but it seems many demuxers do not work if this
* is set correctly. */
blkalign = frame_size;
// blkalign = 144 * enc->bit_rate/enc->sample_rate;
} else if (enc->codec_id == AV_CODEC_ID_AC3) {
blkalign = 3840; /* maximum bytes per frame */
} else if (enc->codec_id == AV_CODEC_ID_AAC) {
blkalign = 768 * enc->channels; /* maximum bytes per frame */
} else if (enc->codec_id == AV_CODEC_ID_G723_1) {
blkalign = 24;
} else if (enc->block_align != 0) { /* specified by the codec */
blkalign = enc->block_align;
} else
blkalign = bps * enc->channels / av_gcd(8, bps);
if (enc->codec_id == AV_CODEC_ID_PCM_U8 ||
enc->codec_id == AV_CODEC_ID_PCM_S24LE ||
enc->codec_id == AV_CODEC_ID_PCM_S32LE ||
enc->codec_id == AV_CODEC_ID_PCM_F32LE ||
enc->codec_id == AV_CODEC_ID_PCM_F64LE ||
enc->codec_id == AV_CODEC_ID_PCM_S16LE) {
bytespersec = enc->sample_rate * blkalign;
} else if (enc->codec_id == AV_CODEC_ID_G723_1) {
bytespersec = 800;
} else {
bytespersec = enc->bit_rate / 8;
}
avio_wl32(pb, bytespersec); /* bytes per second */
avio_wl16(pb, blkalign); /* block align */
avio_wl16(pb, bps); /* bits per sample */
if (enc->codec_id == AV_CODEC_ID_MP3) {
hdrsize += 12;
bytestream_put_le16(&riff_extradata, 1); /* wID */
bytestream_put_le32(&riff_extradata, 2); /* fdwFlags */
bytestream_put_le16(&riff_extradata, 1152); /* nBlockSize */
bytestream_put_le16(&riff_extradata, 1); /* nFramesPerBlock */
bytestream_put_le16(&riff_extradata, 1393); /* nCodecDelay */
} else if (enc->codec_id == AV_CODEC_ID_MP2) {
hdrsize += 22;
/* fwHeadLayer */
bytestream_put_le16(&riff_extradata, 2);
/* dwHeadBitrate */
bytestream_put_le32(&riff_extradata, enc->bit_rate);
/* fwHeadMode */
bytestream_put_le16(&riff_extradata, enc->channels == 2 ? 1 : 8);
/* fwHeadModeExt */
bytestream_put_le16(&riff_extradata, 0);
/* wHeadEmphasis */
bytestream_put_le16(&riff_extradata, 1);
/* fwHeadFlags */
bytestream_put_le16(&riff_extradata, 16);
/* dwPTSLow */
bytestream_put_le32(&riff_extradata, 0);
/* dwPTSHigh */
bytestream_put_le32(&riff_extradata, 0);
} else if (enc->codec_id == AV_CODEC_ID_G723_1) {
hdrsize += 20;
bytestream_put_le32(&riff_extradata, 0x9ace0002); /* extradata needed for msacm g723.1 codec */
bytestream_put_le32(&riff_extradata, 0xaea2f732);
bytestream_put_le16(&riff_extradata, 0xacde);
} else if (enc->codec_id == AV_CODEC_ID_GSM_MS ||
enc->codec_id == AV_CODEC_ID_ADPCM_IMA_WAV) {
hdrsize += 2;
/* wSamplesPerBlock */
bytestream_put_le16(&riff_extradata, frame_size);
} else if (enc->extradata_size) {
riff_extradata_start = enc->extradata;
riff_extradata = enc->extradata + enc->extradata_size;
hdrsize += enc->extradata_size;
}
/* write WAVEFORMATEXTENSIBLE extensions */
if (waveformatextensible) {
hdrsize += 22;
/* 22 is WAVEFORMATEXTENSIBLE size */
avio_wl16(pb, riff_extradata - riff_extradata_start + 22);
/* ValidBitsPerSample || SamplesPerBlock || Reserved */
avio_wl16(pb, bps);
/* dwChannelMask */
avio_wl32(pb, enc->channel_layout);
/* GUID + next 3 */
avio_wl32(pb, enc->codec_tag);
avio_wl32(pb, 0x00100000);
avio_wl32(pb, 0xAA000080);
avio_wl32(pb, 0x719B3800);
} else {
avio_wl16(pb, riff_extradata - riff_extradata_start); /* cbSize */
}
avio_write(pb, riff_extradata_start, riff_extradata - riff_extradata_start);
if (hdrsize & 1) {
hdrsize++;
avio_w8(pb, 0);
}
 
return hdrsize;
}
 
/* BITMAPINFOHEADER header */
void ff_put_bmp_header(AVIOContext *pb, AVCodecContext *enc,
const AVCodecTag *tags, int for_asf)
{
/* size */
avio_wl32(pb, 40 + enc->extradata_size);
avio_wl32(pb, enc->width);
//We always store RGB TopDown
avio_wl32(pb, enc->codec_tag ? enc->height : -enc->height);
/* planes */
avio_wl16(pb, 1);
/* depth */
avio_wl16(pb, enc->bits_per_coded_sample ? enc->bits_per_coded_sample : 24);
/* compression type */
avio_wl32(pb, enc->codec_tag);
avio_wl32(pb, (enc->width * enc->height * (enc->bits_per_coded_sample ? enc->bits_per_coded_sample : 24)+7) / 8);
avio_wl32(pb, 0);
avio_wl32(pb, 0);
avio_wl32(pb, 0);
avio_wl32(pb, 0);
 
avio_write(pb, enc->extradata, enc->extradata_size);
 
if (!for_asf && enc->extradata_size & 1)
avio_w8(pb, 0);
}
 
void ff_parse_specific_params(AVCodecContext *stream, int *au_rate,
int *au_ssize, int *au_scale)
{
int gcd;
int audio_frame_size;
 
/* We use the known constant frame size for the codec if known, otherwise
* fall back on using AVCodecContext.frame_size, which is not as reliable
* for indicating packet duration. */
audio_frame_size = av_get_audio_frame_duration(stream, 0);
if (!audio_frame_size)
audio_frame_size = stream->frame_size;
 
*au_ssize = stream->block_align;
if (audio_frame_size && stream->sample_rate) {
*au_scale = audio_frame_size;
*au_rate = stream->sample_rate;
} else if (stream->codec_type == AVMEDIA_TYPE_VIDEO ||
stream->codec_type == AVMEDIA_TYPE_DATA ||
stream->codec_type == AVMEDIA_TYPE_SUBTITLE) {
*au_scale = stream->time_base.num;
*au_rate = stream->time_base.den;
} else {
*au_scale = stream->block_align ? stream->block_align * 8 : 8;
*au_rate = stream->bit_rate ? stream->bit_rate :
8 * stream->sample_rate;
}
gcd = av_gcd(*au_scale, *au_rate);
*au_scale /= gcd;
*au_rate /= gcd;
}
 
void ff_riff_write_info_tag(AVIOContext *pb, const char *tag, const char *str)
{
int len = strlen(str);
if (len > 0) {
len++;
ffio_wfourcc(pb, tag);
avio_wl32(pb, len);
avio_put_str(pb, str);
if (len & 1)
avio_w8(pb, 0);
}
}
 
static const char riff_tags[][5] = {
"IARL", "IART", "ICMS", "ICMT", "ICOP", "ICRD", "ICRP", "IDIM", "IDPI",
"IENG", "IGNR", "IKEY", "ILGT", "ILNG", "IMED", "INAM", "IPLT", "IPRD",
"IPRT", "ITRK", "ISBJ", "ISFT", "ISHP", "ISMP", "ISRC", "ISRF", "ITCH",
{ 0 }
};
 
static int riff_has_valid_tags(AVFormatContext *s)
{
int i;
 
for (i = 0; *riff_tags[i]; i++)
if (av_dict_get(s->metadata, riff_tags[i], NULL, AV_DICT_MATCH_CASE))
return 1;
 
return 0;
}
 
void ff_riff_write_info(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
int i;
int64_t list_pos;
AVDictionaryEntry *t = NULL;
 
ff_metadata_conv(&s->metadata, ff_riff_info_conv, NULL);
 
/* writing empty LIST is not nice and may cause problems */
if (!riff_has_valid_tags(s))
return;
 
list_pos = ff_start_tag(pb, "LIST");
ffio_wfourcc(pb, "INFO");
for (i = 0; *riff_tags[i]; i++)
if ((t = av_dict_get(s->metadata, riff_tags[i],
NULL, AV_DICT_MATCH_CASE)))
ff_riff_write_info_tag(s->pb, t->key, t->value);
ff_end_tag(pb, list_pos);
}
/contrib/sdk/sources/ffmpeg/libavformat/rl2.c
0,0 → 1,298
/*
* RL2 Format Demuxer
* Copyright (c) 2008 Sascha Sommer (saschasommer@freenet.de)
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* RL2 file demuxer
* @file
* @author Sascha Sommer (saschasommer@freenet.de)
* @see http://wiki.multimedia.cx/index.php?title=RL2
*
* extradata:
* 2 byte le initial drawing offset within 320x200 viewport
* 4 byte le number of used colors
* 256 * 3 bytes rgb palette
* optional background_frame
*/
 
#include "libavutil/intreadwrite.h"
#include "libavutil/mathematics.h"
#include "avformat.h"
#include "internal.h"
 
#define EXTRADATA1_SIZE (6 + 256 * 3) ///< video base, clr, palette
 
#define FORM_TAG MKBETAG('F', 'O', 'R', 'M')
#define RLV2_TAG MKBETAG('R', 'L', 'V', '2')
#define RLV3_TAG MKBETAG('R', 'L', 'V', '3')
 
typedef struct Rl2DemuxContext {
unsigned int index_pos[2]; ///< indexes in the sample tables
} Rl2DemuxContext;
 
 
/**
* check if the file is in rl2 format
* @param p probe buffer
* @return 0 when the probe buffer does not contain rl2 data, > 0 otherwise
*/
static int rl2_probe(AVProbeData *p)
{
 
if(AV_RB32(&p->buf[0]) != FORM_TAG)
return 0;
 
if(AV_RB32(&p->buf[8]) != RLV2_TAG &&
AV_RB32(&p->buf[8]) != RLV3_TAG)
return 0;
 
return AVPROBE_SCORE_MAX;
}
 
/**
* read rl2 header data and setup the avstreams
* @param s demuxer context
* @return 0 on success, AVERROR otherwise
*/
static av_cold int rl2_read_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
AVStream *st;
unsigned int frame_count;
unsigned int audio_frame_counter = 0;
unsigned int video_frame_counter = 0;
unsigned int back_size;
unsigned short sound_rate;
unsigned short rate;
unsigned short channels;
unsigned short def_sound_size;
unsigned int signature;
unsigned int pts_den = 11025; /* video only case */
unsigned int pts_num = 1103;
unsigned int* chunk_offset = NULL;
int* chunk_size = NULL;
int* audio_size = NULL;
int i;
int ret = 0;
 
avio_skip(pb,4); /* skip FORM tag */
back_size = avio_rl32(pb); /**< get size of the background frame */
signature = avio_rb32(pb);
avio_skip(pb, 4); /* data size */
frame_count = avio_rl32(pb);
 
/* disallow back_sizes and frame_counts that may lead to overflows later */
if(back_size > INT_MAX/2 || frame_count > INT_MAX / sizeof(uint32_t))
return AVERROR_INVALIDDATA;
 
avio_skip(pb, 2); /* encoding mentod */
sound_rate = avio_rl16(pb);
rate = avio_rl16(pb);
channels = avio_rl16(pb);
def_sound_size = avio_rl16(pb);
 
/** setup video stream */
st = avformat_new_stream(s, NULL);
if(!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_RL2;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->width = 320;
st->codec->height = 200;
 
/** allocate and fill extradata */
st->codec->extradata_size = EXTRADATA1_SIZE;
 
if(signature == RLV3_TAG && back_size > 0)
st->codec->extradata_size += back_size;
 
if(ff_alloc_extradata(st->codec, st->codec->extradata_size))
return AVERROR(ENOMEM);
 
if(avio_read(pb,st->codec->extradata,st->codec->extradata_size) !=
st->codec->extradata_size)
return AVERROR(EIO);
 
/** setup audio stream if present */
if(sound_rate){
if (!channels || channels > 42) {
av_log(s, AV_LOG_ERROR, "Invalid number of channels: %d\n", channels);
return AVERROR_INVALIDDATA;
}
 
pts_num = def_sound_size;
pts_den = rate;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_PCM_U8;
st->codec->codec_tag = 1;
st->codec->channels = channels;
st->codec->bits_per_coded_sample = 8;
st->codec->sample_rate = rate;
st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
st->codec->bits_per_coded_sample;
st->codec->block_align = st->codec->channels *
st->codec->bits_per_coded_sample / 8;
avpriv_set_pts_info(st,32,1,rate);
}
 
avpriv_set_pts_info(s->streams[0], 32, pts_num, pts_den);
 
chunk_size = av_malloc(frame_count * sizeof(uint32_t));
audio_size = av_malloc(frame_count * sizeof(uint32_t));
chunk_offset = av_malloc(frame_count * sizeof(uint32_t));
 
if(!chunk_size || !audio_size || !chunk_offset){
av_free(chunk_size);
av_free(audio_size);
av_free(chunk_offset);
return AVERROR(ENOMEM);
}
 
/** read offset and size tables */
for(i=0; i < frame_count;i++)
chunk_size[i] = avio_rl32(pb);
for(i=0; i < frame_count;i++)
chunk_offset[i] = avio_rl32(pb);
for(i=0; i < frame_count;i++)
audio_size[i] = avio_rl32(pb) & 0xFFFF;
 
/** build the sample index */
for(i=0;i<frame_count;i++){
if(chunk_size[i] < 0 || audio_size[i] > chunk_size[i]){
ret = AVERROR_INVALIDDATA;
break;
}
 
if(sound_rate && audio_size[i]){
av_add_index_entry(s->streams[1], chunk_offset[i],
audio_frame_counter,audio_size[i], 0, AVINDEX_KEYFRAME);
audio_frame_counter += audio_size[i] / channels;
}
av_add_index_entry(s->streams[0], chunk_offset[i] + audio_size[i],
video_frame_counter,chunk_size[i]-audio_size[i],0,AVINDEX_KEYFRAME);
++video_frame_counter;
}
 
 
av_free(chunk_size);
av_free(audio_size);
av_free(chunk_offset);
 
return ret;
}
 
/**
* read a single audio or video packet
* @param s demuxer context
* @param pkt the packet to be filled
* @return 0 on success, AVERROR otherwise
*/
static int rl2_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
Rl2DemuxContext *rl2 = s->priv_data;
AVIOContext *pb = s->pb;
AVIndexEntry *sample = NULL;
int i;
int ret = 0;
int stream_id = -1;
int64_t pos = INT64_MAX;
 
/** check if there is a valid video or audio entry that can be used */
for(i=0; i<s->nb_streams; i++){
if(rl2->index_pos[i] < s->streams[i]->nb_index_entries
&& s->streams[i]->index_entries[ rl2->index_pos[i] ].pos < pos){
sample = &s->streams[i]->index_entries[ rl2->index_pos[i] ];
pos= sample->pos;
stream_id= i;
}
}
 
if(stream_id == -1)
return AVERROR_EOF;
 
++rl2->index_pos[stream_id];
 
/** position the stream (will probably be there anyway) */
avio_seek(pb, sample->pos, SEEK_SET);
 
/** fill the packet */
ret = av_get_packet(pb, pkt, sample->size);
if(ret != sample->size){
av_free_packet(pkt);
return AVERROR(EIO);
}
 
pkt->stream_index = stream_id;
pkt->pts = sample->timestamp;
 
return ret;
}
 
/**
* seek to a new timestamp
* @param s demuxer context
* @param stream_index index of the stream that should be seeked
* @param timestamp wanted timestamp
* @param flags direction and seeking mode
* @return 0 on success, -1 otherwise
*/
static int rl2_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
{
AVStream *st = s->streams[stream_index];
Rl2DemuxContext *rl2 = s->priv_data;
int i;
int index = av_index_search_timestamp(st, timestamp, flags);
if(index < 0)
return -1;
 
rl2->index_pos[stream_index] = index;
timestamp = st->index_entries[index].timestamp;
 
for(i=0; i < s->nb_streams; i++){
AVStream *st2 = s->streams[i];
index = av_index_search_timestamp(st2,
av_rescale_q(timestamp, st->time_base, st2->time_base),
flags | AVSEEK_FLAG_BACKWARD);
 
if(index < 0)
index = 0;
 
rl2->index_pos[i] = index;
}
 
return 0;
}
 
AVInputFormat ff_rl2_demuxer = {
.name = "rl2",
.long_name = NULL_IF_CONFIG_SMALL("RL2"),
.priv_data_size = sizeof(Rl2DemuxContext),
.read_probe = rl2_probe,
.read_header = rl2_read_header,
.read_packet = rl2_read_packet,
.read_seek = rl2_read_seek,
};
/contrib/sdk/sources/ffmpeg/libavformat/rm.c
0,0 → 1,47
/*
* "Real" compatible muxer and demuxer common code.
* Copyright (c) 2009 Aurelien Jacobs <aurel@gnuage.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "rm.h"
 
const char * const ff_rm_metadata[4] = {
"title",
"author",
"copyright",
"comment"
};
 
const AVCodecTag ff_rm_codec_tags[] = {
{ AV_CODEC_ID_RV10, MKTAG('R','V','1','0') },
{ AV_CODEC_ID_RV20, MKTAG('R','V','2','0') },
{ AV_CODEC_ID_RV20, MKTAG('R','V','T','R') },
{ AV_CODEC_ID_RV30, MKTAG('R','V','3','0') },
{ AV_CODEC_ID_RV40, MKTAG('R','V','4','0') },
{ AV_CODEC_ID_AC3, MKTAG('d','n','e','t') },
{ AV_CODEC_ID_RA_144, MKTAG('l','p','c','J') },
{ AV_CODEC_ID_RA_288, MKTAG('2','8','_','8') },
{ AV_CODEC_ID_COOK, MKTAG('c','o','o','k') },
{ AV_CODEC_ID_ATRAC3, MKTAG('a','t','r','c') },
{ AV_CODEC_ID_SIPR, MKTAG('s','i','p','r') },
{ AV_CODEC_ID_AAC, MKTAG('r','a','a','c') },
{ AV_CODEC_ID_AAC, MKTAG('r','a','c','p') },
{ AV_CODEC_ID_RALF, MKTAG('L','S','D',':') },
{ AV_CODEC_ID_NONE },
};
/contrib/sdk/sources/ffmpeg/libavformat/rm.h
0,0 → 1,95
/*
* "Real" compatible muxer and demuxer.
* Copyright (c) 2000, 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_RM_H
#define AVFORMAT_RM_H
 
#include "avformat.h"
#include "internal.h"
 
extern const char * const ff_rm_metadata[4];
extern const AVCodecTag ff_rm_codec_tags[];
 
typedef struct RMStream RMStream;
 
RMStream *ff_rm_alloc_rmstream (void);
void ff_rm_free_rmstream (RMStream *rms);
 
/*< input format for Realmedia-style RTSP streams */
extern AVInputFormat ff_rdt_demuxer;
 
/**
* Read the MDPR chunk, which contains stream-specific codec initialization
* parameters.
*
* @param s context containing RMContext and AVIOContext for stream reading
* @param pb context to read the data from
* @param st the stream that the MDPR chunk belongs to and where to store the
* parameters read from the chunk into
* @param rst real-specific stream information
* @param codec_data_size size of the MDPR chunk
* @return 0 on success, errno codes on error
*/
int ff_rm_read_mdpr_codecdata (AVFormatContext *s, AVIOContext *pb,
AVStream *st, RMStream *rst,
int codec_data_size, const uint8_t *mime);
 
/**
* Parse one rm-stream packet from the input bytestream.
*
* @param s context containing RMContext and AVIOContext for stream reading
* @param pb context to read the data from
* @param st stream to which the packet to be read belongs
* @param rst Real-specific stream information
* @param len packet length to read from the input
* @param pkt packet location to store the parsed packet data
* @param seq pointer to an integer containing the sequence number, may be
* updated
* @param flags the packet flags
* @param ts timestamp of the current packet
* @return <0 on error, 0 if a packet was placed in the pkt pointer. A
* value >0 means that no data was placed in pkt, but that cached
* data is available by calling ff_rm_retrieve_cache().
*/
int ff_rm_parse_packet (AVFormatContext *s, AVIOContext *pb,
AVStream *st, RMStream *rst, int len,
AVPacket *pkt, int *seq, int flags, int64_t ts);
 
/**
* Retrieve one cached packet from the rm-context. The real container can
* store several packets (as interpreted by the codec) in a single container
* packet, which means the demuxer holds some back when the first container
* packet is parsed and returned. The result is that rm->audio_pkt_cnt is
* a positive number, the amount of cached packets. Using this function, each
* of those packets can be retrieved sequentially.
*
* @param s context containing RMContext and AVIOContext for stream reading
* @param pb context to read the data from
* @param st stream that this packet belongs to
* @param rst Real-specific stream information
* @param pkt location to store the packet data
* @return the number of samples left for subsequent calls to this same
* function, or 0 if all samples have been retrieved.
*/
int ff_rm_retrieve_cache (AVFormatContext *s, AVIOContext *pb,
AVStream *st, RMStream *rst, AVPacket *pkt);
 
#endif /* AVFORMAT_RM_H */
/contrib/sdk/sources/ffmpeg/libavformat/rmdec.c
0,0 → 1,1064
/*
* "Real" compatible demuxer.
* Copyright (c) 2000, 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/internal.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/dict.h"
#include "avformat.h"
#include "avio_internal.h"
#include "internal.h"
#include "rmsipr.h"
#include "rm.h"
 
#define DEINT_ID_GENR MKTAG('g', 'e', 'n', 'r') ///< interleaving for Cooker/ATRAC
#define DEINT_ID_INT0 MKTAG('I', 'n', 't', '0') ///< no interleaving needed
#define DEINT_ID_INT4 MKTAG('I', 'n', 't', '4') ///< interleaving for 28.8
#define DEINT_ID_SIPR MKTAG('s', 'i', 'p', 'r') ///< interleaving for Sipro
#define DEINT_ID_VBRF MKTAG('v', 'b', 'r', 'f') ///< VBR case for AAC
#define DEINT_ID_VBRS MKTAG('v', 'b', 'r', 's') ///< VBR case for AAC
 
struct RMStream {
AVPacket pkt; ///< place to store merged video frame / reordered audio data
int videobufsize; ///< current assembled frame size
int videobufpos; ///< position for the next slice in the video buffer
int curpic_num; ///< picture number of current frame
int cur_slice, slices;
int64_t pktpos; ///< first slice position in file
/// Audio descrambling matrix parameters
int64_t audiotimestamp; ///< Audio packet timestamp
int sub_packet_cnt; // Subpacket counter, used while reading
int sub_packet_size, sub_packet_h, coded_framesize; ///< Descrambling parameters from container
int audio_framesize; /// Audio frame size from container
int sub_packet_lengths[16]; /// Length of each subpacket
int32_t deint_id; ///< deinterleaver used in audio stream
};
 
typedef struct {
int nb_packets;
int old_format;
int current_stream;
int remaining_len;
int audio_stream_num; ///< Stream number for audio packets
int audio_pkt_cnt; ///< Output packet counter
} RMDemuxContext;
 
static inline void get_strl(AVIOContext *pb, char *buf, int buf_size, int len)
{
int i;
char *q, r;
 
q = buf;
for(i=0;i<len;i++) {
r = avio_r8(pb);
if (i < buf_size - 1)
*q++ = r;
}
if (buf_size > 0) *q = '\0';
}
 
static void get_str8(AVIOContext *pb, char *buf, int buf_size)
{
get_strl(pb, buf, buf_size, avio_r8(pb));
}
 
static int rm_read_extradata(AVIOContext *pb, AVCodecContext *avctx, unsigned size)
{
if (size >= 1<<24)
return -1;
if (ff_alloc_extradata(avctx, size))
return AVERROR(ENOMEM);
avctx->extradata_size = avio_read(pb, avctx->extradata, size);
if (avctx->extradata_size != size)
return AVERROR(EIO);
return 0;
}
 
static void rm_read_metadata(AVFormatContext *s, AVIOContext *pb, int wide)
{
char buf[1024];
int i;
 
for (i=0; i<FF_ARRAY_ELEMS(ff_rm_metadata); i++) {
int len = wide ? avio_rb16(pb) : avio_r8(pb);
get_strl(pb, buf, sizeof(buf), len);
av_dict_set(&s->metadata, ff_rm_metadata[i], buf, 0);
}
}
 
RMStream *ff_rm_alloc_rmstream (void)
{
RMStream *rms = av_mallocz(sizeof(RMStream));
rms->curpic_num = -1;
return rms;
}
 
void ff_rm_free_rmstream (RMStream *rms)
{
av_free_packet(&rms->pkt);
}
 
static int rm_read_audio_stream_info(AVFormatContext *s, AVIOContext *pb,
AVStream *st, RMStream *ast, int read_all)
{
char buf[256];
uint32_t version;
int ret;
 
/* ra type header */
version = avio_rb16(pb); /* version */
if (version == 3) {
unsigned bytes_per_minute;
int header_size = avio_rb16(pb);
int64_t startpos = avio_tell(pb);
avio_skip(pb, 8);
bytes_per_minute = avio_rb16(pb);
avio_skip(pb, 4);
rm_read_metadata(s, pb, 0);
if ((startpos + header_size) >= avio_tell(pb) + 2) {
// fourcc (should always be "lpcJ")
avio_r8(pb);
get_str8(pb, buf, sizeof(buf));
}
// Skip extra header crap (this should never happen)
if ((startpos + header_size) > avio_tell(pb))
avio_skip(pb, header_size + startpos - avio_tell(pb));
if (bytes_per_minute)
st->codec->bit_rate = 8LL * bytes_per_minute / 60;
st->codec->sample_rate = 8000;
st->codec->channels = 1;
st->codec->channel_layout = AV_CH_LAYOUT_MONO;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_RA_144;
ast->deint_id = DEINT_ID_INT0;
} else {
int flavor, sub_packet_h, coded_framesize, sub_packet_size;
int codecdata_length;
unsigned bytes_per_minute;
/* old version (4) */
avio_skip(pb, 2); /* unused */
avio_rb32(pb); /* .ra4 */
avio_rb32(pb); /* data size */
avio_rb16(pb); /* version2 */
avio_rb32(pb); /* header size */
flavor= avio_rb16(pb); /* add codec info / flavor */
ast->coded_framesize = coded_framesize = avio_rb32(pb); /* coded frame size */
avio_rb32(pb); /* ??? */
bytes_per_minute = avio_rb32(pb);
if (version == 4) {
if (bytes_per_minute)
st->codec->bit_rate = 8LL * bytes_per_minute / 60;
}
avio_rb32(pb); /* ??? */
ast->sub_packet_h = sub_packet_h = avio_rb16(pb); /* 1 */
st->codec->block_align= avio_rb16(pb); /* frame size */
ast->sub_packet_size = sub_packet_size = avio_rb16(pb); /* sub packet size */
avio_rb16(pb); /* ??? */
if (version == 5) {
avio_rb16(pb); avio_rb16(pb); avio_rb16(pb);
}
st->codec->sample_rate = avio_rb16(pb);
avio_rb32(pb);
st->codec->channels = avio_rb16(pb);
if (version == 5) {
ast->deint_id = avio_rl32(pb);
avio_read(pb, buf, 4);
buf[4] = 0;
} else {
get_str8(pb, buf, sizeof(buf)); /* desc */
ast->deint_id = AV_RL32(buf);
get_str8(pb, buf, sizeof(buf)); /* desc */
}
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_tag = AV_RL32(buf);
st->codec->codec_id = ff_codec_get_id(ff_rm_codec_tags,
st->codec->codec_tag);
 
switch (st->codec->codec_id) {
case AV_CODEC_ID_AC3:
st->need_parsing = AVSTREAM_PARSE_FULL;
break;
case AV_CODEC_ID_RA_288:
st->codec->extradata_size= 0;
ast->audio_framesize = st->codec->block_align;
st->codec->block_align = coded_framesize;
break;
case AV_CODEC_ID_COOK:
st->need_parsing = AVSTREAM_PARSE_HEADERS;
case AV_CODEC_ID_ATRAC3:
case AV_CODEC_ID_SIPR:
if (read_all) {
codecdata_length = 0;
} else {
avio_rb16(pb); avio_r8(pb);
if (version == 5)
avio_r8(pb);
codecdata_length = avio_rb32(pb);
if(codecdata_length + FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)codecdata_length){
av_log(s, AV_LOG_ERROR, "codecdata_length too large\n");
return -1;
}
}
 
ast->audio_framesize = st->codec->block_align;
if (st->codec->codec_id == AV_CODEC_ID_SIPR) {
if (flavor > 3) {
av_log(s, AV_LOG_ERROR, "bad SIPR file flavor %d\n",
flavor);
return -1;
}
st->codec->block_align = ff_sipr_subpk_size[flavor];
} else {
if(sub_packet_size <= 0){
av_log(s, AV_LOG_ERROR, "sub_packet_size is invalid\n");
return -1;
}
st->codec->block_align = ast->sub_packet_size;
}
if ((ret = rm_read_extradata(pb, st->codec, codecdata_length)) < 0)
return ret;
 
break;
case AV_CODEC_ID_AAC:
avio_rb16(pb); avio_r8(pb);
if (version == 5)
avio_r8(pb);
codecdata_length = avio_rb32(pb);
if(codecdata_length + FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)codecdata_length){
av_log(s, AV_LOG_ERROR, "codecdata_length too large\n");
return -1;
}
if (codecdata_length >= 1) {
avio_r8(pb);
if ((ret = rm_read_extradata(pb, st->codec, codecdata_length - 1)) < 0)
return ret;
}
break;
default:
av_strlcpy(st->codec->codec_name, buf, sizeof(st->codec->codec_name));
}
if (ast->deint_id == DEINT_ID_INT4 ||
ast->deint_id == DEINT_ID_GENR ||
ast->deint_id == DEINT_ID_SIPR) {
if (st->codec->block_align <= 0 ||
ast->audio_framesize * sub_packet_h > (unsigned)INT_MAX ||
ast->audio_framesize * sub_packet_h < st->codec->block_align)
return AVERROR_INVALIDDATA;
if (av_new_packet(&ast->pkt, ast->audio_framesize * sub_packet_h) < 0)
return AVERROR(ENOMEM);
}
switch (ast->deint_id) {
case DEINT_ID_INT4:
if (ast->coded_framesize > ast->audio_framesize ||
sub_packet_h <= 1 ||
ast->coded_framesize * sub_packet_h > (2 + (sub_packet_h & 1)) * ast->audio_framesize)
return AVERROR_INVALIDDATA;
break;
case DEINT_ID_GENR:
if (ast->sub_packet_size <= 0 ||
ast->sub_packet_size > ast->audio_framesize)
return AVERROR_INVALIDDATA;
break;
case DEINT_ID_SIPR:
case DEINT_ID_INT0:
case DEINT_ID_VBRS:
case DEINT_ID_VBRF:
break;
default:
av_log(s, AV_LOG_ERROR, "Unknown interleaver %X\n", ast->deint_id);
return AVERROR_INVALIDDATA;
}
 
if (read_all) {
avio_r8(pb);
avio_r8(pb);
avio_r8(pb);
rm_read_metadata(s, pb, 0);
}
}
return 0;
}
 
int
ff_rm_read_mdpr_codecdata (AVFormatContext *s, AVIOContext *pb,
AVStream *st, RMStream *rst, int codec_data_size, const uint8_t *mime)
{
unsigned int v;
int size;
int64_t codec_pos;
int ret;
 
avpriv_set_pts_info(st, 64, 1, 1000);
codec_pos = avio_tell(pb);
v = avio_rb32(pb);
if (v == MKTAG(0xfd, 'a', 'r', '.')) {
/* ra type header */
if (rm_read_audio_stream_info(s, pb, st, rst, 0))
return -1;
} else if (v == MKBETAG('L', 'S', 'D', ':')) {
avio_seek(pb, -4, SEEK_CUR);
if ((ret = rm_read_extradata(pb, st->codec, codec_data_size)) < 0)
return ret;
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_tag = AV_RL32(st->codec->extradata);
st->codec->codec_id = ff_codec_get_id(ff_rm_codec_tags,
st->codec->codec_tag);
} else if(mime && !strcmp(mime, "logical-fileinfo")){
int stream_count, rule_count, property_count, i;
ff_free_stream(s, st);
if (avio_rb16(pb) != 0) {
av_log(s, AV_LOG_WARNING, "Unsupported version\n");
goto skip;
}
stream_count = avio_rb16(pb);
avio_skip(pb, 6*stream_count);
rule_count = avio_rb16(pb);
avio_skip(pb, 2*rule_count);
property_count = avio_rb16(pb);
for(i=0; i<property_count; i++){
uint8_t name[128], val[128];
avio_rb32(pb);
if (avio_rb16(pb) != 0) {
av_log(s, AV_LOG_WARNING, "Unsupported Name value property version\n");
goto skip; //FIXME skip just this one
}
get_str8(pb, name, sizeof(name));
switch(avio_rb32(pb)) {
case 2: get_strl(pb, val, sizeof(val), avio_rb16(pb));
av_dict_set(&s->metadata, name, val, 0);
break;
default: avio_skip(pb, avio_rb16(pb));
}
}
} else {
int fps;
if (avio_rl32(pb) != MKTAG('V', 'I', 'D', 'O')) {
fail1:
av_log(s, AV_LOG_WARNING, "Unsupported stream type %08x\n", v);
goto skip;
}
st->codec->codec_tag = avio_rl32(pb);
st->codec->codec_id = ff_codec_get_id(ff_rm_codec_tags,
st->codec->codec_tag);
av_dlog(s, "%X %X\n", st->codec->codec_tag, MKTAG('R', 'V', '2', '0'));
if (st->codec->codec_id == AV_CODEC_ID_NONE)
goto fail1;
st->codec->width = avio_rb16(pb);
st->codec->height = avio_rb16(pb);
avio_skip(pb, 2); // looks like bits per sample
avio_skip(pb, 4); // always zero?
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->need_parsing = AVSTREAM_PARSE_TIMESTAMPS;
fps = avio_rb32(pb);
 
if ((ret = rm_read_extradata(pb, st->codec, codec_data_size - (avio_tell(pb) - codec_pos))) < 0)
return ret;
 
if (fps > 0) {
av_reduce(&st->avg_frame_rate.den, &st->avg_frame_rate.num,
0x10000, fps, (1 << 30) - 1);
#if FF_API_R_FRAME_RATE
st->r_frame_rate = st->avg_frame_rate;
#endif
} else if (s->error_recognition & AV_EF_EXPLODE) {
av_log(s, AV_LOG_ERROR, "Invalid framerate\n");
return AVERROR_INVALIDDATA;
}
}
 
skip:
/* skip codec info */
size = avio_tell(pb) - codec_pos;
avio_skip(pb, codec_data_size - size);
 
return 0;
}
 
/** this function assumes that the demuxer has already seeked to the start
* of the INDX chunk, and will bail out if not. */
static int rm_read_index(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
unsigned int size, n_pkts, str_id, next_off, n, pos, pts;
AVStream *st;
 
do {
if (avio_rl32(pb) != MKTAG('I','N','D','X'))
return -1;
size = avio_rb32(pb);
if (size < 20)
return -1;
avio_skip(pb, 2);
n_pkts = avio_rb32(pb);
str_id = avio_rb16(pb);
next_off = avio_rb32(pb);
for (n = 0; n < s->nb_streams; n++)
if (s->streams[n]->id == str_id) {
st = s->streams[n];
break;
}
if (n == s->nb_streams) {
av_log(s, AV_LOG_ERROR,
"Invalid stream index %d for index at pos %"PRId64"\n",
str_id, avio_tell(pb));
goto skip;
} else if ((avio_size(pb) - avio_tell(pb)) / 14 < n_pkts) {
av_log(s, AV_LOG_ERROR,
"Nr. of packets in packet index for stream index %d "
"exceeds filesize (%"PRId64" at %"PRId64" = %"PRId64")\n",
str_id, avio_size(pb), avio_tell(pb),
(avio_size(pb) - avio_tell(pb)) / 14);
goto skip;
}
 
for (n = 0; n < n_pkts; n++) {
avio_skip(pb, 2);
pts = avio_rb32(pb);
pos = avio_rb32(pb);
avio_skip(pb, 4); /* packet no. */
 
av_add_index_entry(st, pos, pts, 0, 0, AVINDEX_KEYFRAME);
}
 
skip:
if (next_off && avio_tell(pb) < next_off &&
avio_seek(pb, next_off, SEEK_SET) < 0) {
av_log(s, AV_LOG_ERROR,
"Non-linear index detected, not supported\n");
return -1;
}
} while (next_off);
 
return 0;
}
 
static int rm_read_header_old(AVFormatContext *s)
{
RMDemuxContext *rm = s->priv_data;
AVStream *st;
 
rm->old_format = 1;
st = avformat_new_stream(s, NULL);
if (!st)
return -1;
st->priv_data = ff_rm_alloc_rmstream();
return rm_read_audio_stream_info(s, s->pb, st, st->priv_data, 1);
}
 
static int rm_read_header(AVFormatContext *s)
{
RMDemuxContext *rm = s->priv_data;
AVStream *st;
AVIOContext *pb = s->pb;
unsigned int tag;
int tag_size;
unsigned int start_time, duration;
unsigned int data_off = 0, indx_off = 0;
char buf[128], mime[128];
int flags = 0;
 
tag = avio_rl32(pb);
if (tag == MKTAG('.', 'r', 'a', 0xfd)) {
/* very old .ra format */
return rm_read_header_old(s);
} else if (tag != MKTAG('.', 'R', 'M', 'F')) {
return AVERROR(EIO);
}
 
tag_size = avio_rb32(pb);
avio_skip(pb, tag_size - 8);
 
for(;;) {
if (url_feof(pb))
return -1;
tag = avio_rl32(pb);
tag_size = avio_rb32(pb);
avio_rb16(pb);
av_dlog(s, "tag=%c%c%c%c (%08x) size=%d\n",
(tag ) & 0xff,
(tag >> 8) & 0xff,
(tag >> 16) & 0xff,
(tag >> 24) & 0xff,
tag,
tag_size);
if (tag_size < 10 && tag != MKTAG('D', 'A', 'T', 'A'))
return -1;
switch(tag) {
case MKTAG('P', 'R', 'O', 'P'):
/* file header */
avio_rb32(pb); /* max bit rate */
avio_rb32(pb); /* avg bit rate */
avio_rb32(pb); /* max packet size */
avio_rb32(pb); /* avg packet size */
avio_rb32(pb); /* nb packets */
duration = avio_rb32(pb); /* duration */
s->duration = av_rescale(duration, AV_TIME_BASE, 1000);
avio_rb32(pb); /* preroll */
indx_off = avio_rb32(pb); /* index offset */
data_off = avio_rb32(pb); /* data offset */
avio_rb16(pb); /* nb streams */
flags = avio_rb16(pb); /* flags */
break;
case MKTAG('C', 'O', 'N', 'T'):
rm_read_metadata(s, pb, 1);
break;
case MKTAG('M', 'D', 'P', 'R'):
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
st->id = avio_rb16(pb);
avio_rb32(pb); /* max bit rate */
st->codec->bit_rate = avio_rb32(pb); /* bit rate */
avio_rb32(pb); /* max packet size */
avio_rb32(pb); /* avg packet size */
start_time = avio_rb32(pb); /* start time */
avio_rb32(pb); /* preroll */
duration = avio_rb32(pb); /* duration */
st->start_time = start_time;
st->duration = duration;
if(duration>0)
s->duration = AV_NOPTS_VALUE;
get_str8(pb, buf, sizeof(buf)); /* desc */
get_str8(pb, mime, sizeof(mime)); /* mimetype */
st->codec->codec_type = AVMEDIA_TYPE_DATA;
st->priv_data = ff_rm_alloc_rmstream();
if (ff_rm_read_mdpr_codecdata(s, s->pb, st, st->priv_data,
avio_rb32(pb), mime) < 0)
return -1;
break;
case MKTAG('D', 'A', 'T', 'A'):
goto header_end;
default:
/* unknown tag: skip it */
avio_skip(pb, tag_size - 10);
break;
}
}
header_end:
rm->nb_packets = avio_rb32(pb); /* number of packets */
if (!rm->nb_packets && (flags & 4))
rm->nb_packets = 3600 * 25;
avio_rb32(pb); /* next data header */
 
if (!data_off)
data_off = avio_tell(pb) - 18;
if (indx_off && pb->seekable && !(s->flags & AVFMT_FLAG_IGNIDX) &&
avio_seek(pb, indx_off, SEEK_SET) >= 0) {
rm_read_index(s);
avio_seek(pb, data_off + 18, SEEK_SET);
}
 
return 0;
}
 
static int get_num(AVIOContext *pb, int *len)
{
int n, n1;
 
n = avio_rb16(pb);
(*len)-=2;
n &= 0x7FFF;
if (n >= 0x4000) {
return n - 0x4000;
} else {
n1 = avio_rb16(pb);
(*len)-=2;
return (n << 16) | n1;
}
}
 
/* multiple of 20 bytes for ra144 (ugly) */
#define RAW_PACKET_SIZE 1000
 
static int sync(AVFormatContext *s, int64_t *timestamp, int *flags, int *stream_index, int64_t *pos){
RMDemuxContext *rm = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *st;
uint32_t state=0xFFFFFFFF;
 
while(!url_feof(pb)){
int len, num, i;
*pos= avio_tell(pb) - 3;
if(rm->remaining_len > 0){
num= rm->current_stream;
len= rm->remaining_len;
*timestamp = AV_NOPTS_VALUE;
*flags= 0;
}else{
state= (state<<8) + avio_r8(pb);
 
if(state == MKBETAG('I', 'N', 'D', 'X')){
int n_pkts, expected_len;
len = avio_rb32(pb);
avio_skip(pb, 2);
n_pkts = avio_rb32(pb);
expected_len = 20 + n_pkts * 14;
if (len == 20)
/* some files don't add index entries to chunk size... */
len = expected_len;
else if (len != expected_len)
av_log(s, AV_LOG_WARNING,
"Index size %d (%d pkts) is wrong, should be %d.\n",
len, n_pkts, expected_len);
len -= 14; // we already read part of the index header
if(len<0)
continue;
goto skip;
} else if (state == MKBETAG('D','A','T','A')) {
av_log(s, AV_LOG_WARNING,
"DATA tag in middle of chunk, file may be broken.\n");
}
 
if(state > (unsigned)0xFFFF || state <= 12)
continue;
len=state - 12;
state= 0xFFFFFFFF;
 
num = avio_rb16(pb);
*timestamp = avio_rb32(pb);
avio_r8(pb); /* reserved */
*flags = avio_r8(pb); /* flags */
}
for(i=0;i<s->nb_streams;i++) {
st = s->streams[i];
if (num == st->id)
break;
}
if (i == s->nb_streams) {
skip:
/* skip packet if unknown number */
avio_skip(pb, len);
rm->remaining_len = 0;
continue;
}
*stream_index= i;
 
return len;
}
return -1;
}
 
static int rm_assemble_video_frame(AVFormatContext *s, AVIOContext *pb,
RMDemuxContext *rm, RMStream *vst,
AVPacket *pkt, int len, int *pseq,
int64_t *timestamp)
{
int hdr;
int seq = 0, pic_num = 0, len2 = 0, pos = 0; //init to silcense compiler warning
int type;
int ret;
 
hdr = avio_r8(pb); len--;
type = hdr >> 6;
 
if(type != 3){ // not frame as a part of packet
seq = avio_r8(pb); len--;
}
if(type != 1){ // not whole frame
len2 = get_num(pb, &len);
pos = get_num(pb, &len);
pic_num = avio_r8(pb); len--;
}
if(len<0) {
av_log(s, AV_LOG_ERROR, "Insufficient data\n");
return -1;
}
rm->remaining_len = len;
if(type&1){ // frame, not slice
if(type == 3){ // frame as a part of packet
len= len2;
*timestamp = pos;
}
if(rm->remaining_len < len) {
av_log(s, AV_LOG_ERROR, "Insufficient remaining len\n");
return -1;
}
rm->remaining_len -= len;
if(av_new_packet(pkt, len + 9) < 0)
return AVERROR(EIO);
pkt->data[0] = 0;
AV_WL32(pkt->data + 1, 1);
AV_WL32(pkt->data + 5, 0);
if ((ret = avio_read(pb, pkt->data + 9, len)) != len) {
av_free_packet(pkt);
av_log(s, AV_LOG_ERROR, "Failed to read %d bytes\n", len);
return ret < 0 ? ret : AVERROR(EIO);
}
return 0;
}
//now we have to deal with single slice
 
*pseq = seq;
if((seq & 0x7F) == 1 || vst->curpic_num != pic_num){
if (len2 > ffio_limit(pb, len2)) {
av_log(s, AV_LOG_ERROR, "Impossibly sized packet\n");
return AVERROR_INVALIDDATA;
}
vst->slices = ((hdr & 0x3F) << 1) + 1;
vst->videobufsize = len2 + 8*vst->slices + 1;
av_free_packet(&vst->pkt); //FIXME this should be output.
if(av_new_packet(&vst->pkt, vst->videobufsize) < 0)
return AVERROR(ENOMEM);
memset(vst->pkt.data, 0, vst->pkt.size);
vst->videobufpos = 8*vst->slices + 1;
vst->cur_slice = 0;
vst->curpic_num = pic_num;
vst->pktpos = avio_tell(pb);
}
if(type == 2)
len = FFMIN(len, pos);
 
if(++vst->cur_slice > vst->slices) {
av_log(s, AV_LOG_ERROR, "cur slice %d, too large\n", vst->cur_slice);
return 1;
}
if(!vst->pkt.data)
return AVERROR(ENOMEM);
AV_WL32(vst->pkt.data - 7 + 8*vst->cur_slice, 1);
AV_WL32(vst->pkt.data - 3 + 8*vst->cur_slice, vst->videobufpos - 8*vst->slices - 1);
if(vst->videobufpos + len > vst->videobufsize) {
av_log(s, AV_LOG_ERROR, "outside videobufsize\n");
return 1;
}
if (avio_read(pb, vst->pkt.data + vst->videobufpos, len) != len)
return AVERROR(EIO);
vst->videobufpos += len;
rm->remaining_len-= len;
 
if (type == 2 || vst->videobufpos == vst->videobufsize) {
vst->pkt.data[0] = vst->cur_slice-1;
*pkt= vst->pkt;
vst->pkt.data= NULL;
vst->pkt.size= 0;
vst->pkt.buf = NULL;
#if FF_API_DESTRUCT_PACKET
FF_DISABLE_DEPRECATION_WARNINGS
vst->pkt.destruct = NULL;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
if(vst->slices != vst->cur_slice) //FIXME find out how to set slices correct from the begin
memmove(pkt->data + 1 + 8*vst->cur_slice, pkt->data + 1 + 8*vst->slices,
vst->videobufpos - 1 - 8*vst->slices);
pkt->size = vst->videobufpos + 8*(vst->cur_slice - vst->slices);
pkt->pts = AV_NOPTS_VALUE;
pkt->pos = vst->pktpos;
vst->slices = 0;
return 0;
}
 
return 1;
}
 
static inline void
rm_ac3_swap_bytes (AVStream *st, AVPacket *pkt)
{
uint8_t *ptr;
int j;
 
if (st->codec->codec_id == AV_CODEC_ID_AC3) {
ptr = pkt->data;
for (j=0;j<pkt->size;j+=2) {
FFSWAP(int, ptr[0], ptr[1]);
ptr += 2;
}
}
}
 
int
ff_rm_parse_packet (AVFormatContext *s, AVIOContext *pb,
AVStream *st, RMStream *ast, int len, AVPacket *pkt,
int *seq, int flags, int64_t timestamp)
{
RMDemuxContext *rm = s->priv_data;
int ret;
 
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
rm->current_stream= st->id;
ret = rm_assemble_video_frame(s, pb, rm, ast, pkt, len, seq, &timestamp);
if(ret)
return ret < 0 ? ret : -1; //got partial frame or error
} else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
if ((ast->deint_id == DEINT_ID_GENR) ||
(ast->deint_id == DEINT_ID_INT4) ||
(ast->deint_id == DEINT_ID_SIPR)) {
int x;
int sps = ast->sub_packet_size;
int cfs = ast->coded_framesize;
int h = ast->sub_packet_h;
int y = ast->sub_packet_cnt;
int w = ast->audio_framesize;
 
if (flags & 2)
y = ast->sub_packet_cnt = 0;
if (!y)
ast->audiotimestamp = timestamp;
 
switch (ast->deint_id) {
case DEINT_ID_INT4:
for (x = 0; x < h/2; x++)
avio_read(pb, ast->pkt.data+x*2*w+y*cfs, cfs);
break;
case DEINT_ID_GENR:
for (x = 0; x < w/sps; x++)
avio_read(pb, ast->pkt.data+sps*(h*x+((h+1)/2)*(y&1)+(y>>1)), sps);
break;
case DEINT_ID_SIPR:
avio_read(pb, ast->pkt.data + y * w, w);
break;
}
 
if (++(ast->sub_packet_cnt) < h)
return -1;
if (ast->deint_id == DEINT_ID_SIPR)
ff_rm_reorder_sipr_data(ast->pkt.data, h, w);
 
ast->sub_packet_cnt = 0;
rm->audio_stream_num = st->index;
rm->audio_pkt_cnt = h * w / st->codec->block_align;
} else if ((ast->deint_id == DEINT_ID_VBRF) ||
(ast->deint_id == DEINT_ID_VBRS)) {
int x;
rm->audio_stream_num = st->index;
ast->sub_packet_cnt = (avio_rb16(pb) & 0xf0) >> 4;
if (ast->sub_packet_cnt) {
for (x = 0; x < ast->sub_packet_cnt; x++)
ast->sub_packet_lengths[x] = avio_rb16(pb);
rm->audio_pkt_cnt = ast->sub_packet_cnt;
ast->audiotimestamp = timestamp;
} else
return -1;
} else {
av_get_packet(pb, pkt, len);
rm_ac3_swap_bytes(st, pkt);
}
} else
av_get_packet(pb, pkt, len);
 
pkt->stream_index = st->index;
 
#if 0
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
if(st->codec->codec_id == AV_CODEC_ID_RV20){
int seq= 128*(pkt->data[2]&0x7F) + (pkt->data[3]>>1);
av_log(s, AV_LOG_DEBUG, "%d %"PRId64" %d\n", *timestamp, *timestamp*512LL/25, seq);
 
seq |= (timestamp&~0x3FFF);
if(seq - timestamp > 0x2000) seq -= 0x4000;
if(seq - timestamp < -0x2000) seq += 0x4000;
}
}
#endif
 
pkt->pts = timestamp;
if (flags & 2)
pkt->flags |= AV_PKT_FLAG_KEY;
 
return st->codec->codec_type == AVMEDIA_TYPE_AUDIO ? rm->audio_pkt_cnt : 0;
}
 
int
ff_rm_retrieve_cache (AVFormatContext *s, AVIOContext *pb,
AVStream *st, RMStream *ast, AVPacket *pkt)
{
RMDemuxContext *rm = s->priv_data;
 
av_assert0 (rm->audio_pkt_cnt > 0);
 
if (ast->deint_id == DEINT_ID_VBRF ||
ast->deint_id == DEINT_ID_VBRS)
av_get_packet(pb, pkt, ast->sub_packet_lengths[ast->sub_packet_cnt - rm->audio_pkt_cnt]);
else {
if(av_new_packet(pkt, st->codec->block_align) < 0)
return AVERROR(ENOMEM);
memcpy(pkt->data, ast->pkt.data + st->codec->block_align * //FIXME avoid this
(ast->sub_packet_h * ast->audio_framesize / st->codec->block_align - rm->audio_pkt_cnt),
st->codec->block_align);
}
rm->audio_pkt_cnt--;
if ((pkt->pts = ast->audiotimestamp) != AV_NOPTS_VALUE) {
ast->audiotimestamp = AV_NOPTS_VALUE;
pkt->flags = AV_PKT_FLAG_KEY;
} else
pkt->flags = 0;
pkt->stream_index = st->index;
 
return rm->audio_pkt_cnt;
}
 
static int rm_read_packet(AVFormatContext *s, AVPacket *pkt)
{
RMDemuxContext *rm = s->priv_data;
AVStream *st = NULL; // init to silence compiler warning
int i, len, res, seq = 1;
int64_t timestamp, pos;
int flags;
 
for (;;) {
if (rm->audio_pkt_cnt) {
// If there are queued audio packet return them first
st = s->streams[rm->audio_stream_num];
res = ff_rm_retrieve_cache(s, s->pb, st, st->priv_data, pkt);
if(res < 0)
return res;
flags = 0;
} else {
if (rm->old_format) {
RMStream *ast;
 
st = s->streams[0];
ast = st->priv_data;
timestamp = AV_NOPTS_VALUE;
len = !ast->audio_framesize ? RAW_PACKET_SIZE :
ast->coded_framesize * ast->sub_packet_h / 2;
flags = (seq++ == 1) ? 2 : 0;
pos = avio_tell(s->pb);
} else {
len=sync(s, &timestamp, &flags, &i, &pos);
if (len > 0)
st = s->streams[i];
}
 
if(len<0 || url_feof(s->pb))
return AVERROR(EIO);
 
res = ff_rm_parse_packet (s, s->pb, st, st->priv_data, len, pkt,
&seq, flags, timestamp);
if (res < -1)
return res;
if((flags&2) && (seq&0x7F) == 1)
av_add_index_entry(st, pos, timestamp, 0, 0, AVINDEX_KEYFRAME);
if (res)
continue;
}
 
if( (st->discard >= AVDISCARD_NONKEY && !(flags&2))
|| st->discard >= AVDISCARD_ALL){
av_free_packet(pkt);
} else
break;
}
 
return 0;
}
 
static int rm_read_close(AVFormatContext *s)
{
int i;
 
for (i=0;i<s->nb_streams;i++)
ff_rm_free_rmstream(s->streams[i]->priv_data);
 
return 0;
}
 
static int rm_probe(AVProbeData *p)
{
/* check file header */
if ((p->buf[0] == '.' && p->buf[1] == 'R' &&
p->buf[2] == 'M' && p->buf[3] == 'F' &&
p->buf[4] == 0 && p->buf[5] == 0) ||
(p->buf[0] == '.' && p->buf[1] == 'r' &&
p->buf[2] == 'a' && p->buf[3] == 0xfd))
return AVPROBE_SCORE_MAX;
else
return 0;
}
 
static int64_t rm_read_dts(AVFormatContext *s, int stream_index,
int64_t *ppos, int64_t pos_limit)
{
RMDemuxContext *rm = s->priv_data;
int64_t pos, dts;
int stream_index2, flags, len, h;
 
pos = *ppos;
 
if(rm->old_format)
return AV_NOPTS_VALUE;
 
if (avio_seek(s->pb, pos, SEEK_SET) < 0)
return AV_NOPTS_VALUE;
 
rm->remaining_len=0;
for(;;){
int seq=1;
AVStream *st;
 
len=sync(s, &dts, &flags, &stream_index2, &pos);
if(len<0)
return AV_NOPTS_VALUE;
 
st = s->streams[stream_index2];
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
h= avio_r8(s->pb); len--;
if(!(h & 0x40)){
seq = avio_r8(s->pb); len--;
}
}
 
if((flags&2) && (seq&0x7F) == 1){
av_dlog(s, "%d %d-%d %"PRId64" %d\n",
flags, stream_index2, stream_index, dts, seq);
av_add_index_entry(st, pos, dts, 0, 0, AVINDEX_KEYFRAME);
if(stream_index2 == stream_index)
break;
}
 
avio_skip(s->pb, len);
}
*ppos = pos;
return dts;
}
 
static int rm_read_seek(AVFormatContext *s, int stream_index,
int64_t pts, int flags)
{
RMDemuxContext *rm = s->priv_data;
 
if (ff_seek_frame_binary(s, stream_index, pts, flags) < 0)
return -1;
rm->audio_pkt_cnt = 0;
return 0;
}
 
 
AVInputFormat ff_rm_demuxer = {
.name = "rm",
.long_name = NULL_IF_CONFIG_SMALL("RealMedia"),
.priv_data_size = sizeof(RMDemuxContext),
.read_probe = rm_probe,
.read_header = rm_read_header,
.read_packet = rm_read_packet,
.read_close = rm_read_close,
.read_timestamp = rm_read_dts,
.read_seek = rm_read_seek,
};
 
AVInputFormat ff_rdt_demuxer = {
.name = "rdt",
.long_name = NULL_IF_CONFIG_SMALL("RDT demuxer"),
.priv_data_size = sizeof(RMDemuxContext),
.read_close = rm_read_close,
.flags = AVFMT_NOFILE,
};
/contrib/sdk/sources/ffmpeg/libavformat/rmenc.c
0,0 → 1,478
/*
* "Real" compatible muxer.
* Copyright (c) 2000, 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avformat.h"
#include "avio_internal.h"
#include "rm.h"
#include "libavutil/dict.h"
 
typedef struct {
int nb_packets;
int packet_total_size;
int packet_max_size;
/* codec related output */
int bit_rate;
float frame_rate;
int nb_frames; /* current frame number */
int total_frames; /* total number of frames */
int num;
AVCodecContext *enc;
} StreamInfo;
 
typedef struct {
StreamInfo streams[2];
StreamInfo *audio_stream, *video_stream;
int data_pos; /* position of the data after the header */
} RMMuxContext;
 
/* in ms */
#define BUFFER_DURATION 0
 
 
static void put_str(AVIOContext *s, const char *tag)
{
avio_wb16(s,strlen(tag));
while (*tag) {
avio_w8(s, *tag++);
}
}
 
static void put_str8(AVIOContext *s, const char *tag)
{
avio_w8(s, strlen(tag));
while (*tag) {
avio_w8(s, *tag++);
}
}
 
static int rv10_write_header(AVFormatContext *ctx,
int data_size, int index_pos)
{
RMMuxContext *rm = ctx->priv_data;
AVIOContext *s = ctx->pb;
StreamInfo *stream;
unsigned char *data_offset_ptr, *start_ptr;
const char *desc, *mimetype;
int nb_packets, packet_total_size, packet_max_size, size, packet_avg_size, i;
int bit_rate, v, duration, flags, data_pos;
AVDictionaryEntry *tag;
 
start_ptr = s->buf_ptr;
 
ffio_wfourcc(s, ".RMF");
avio_wb32(s,18); /* header size */
avio_wb16(s,0);
avio_wb32(s,0);
avio_wb32(s,4 + ctx->nb_streams); /* num headers */
 
ffio_wfourcc(s,"PROP");
avio_wb32(s, 50);
avio_wb16(s, 0);
packet_max_size = 0;
packet_total_size = 0;
nb_packets = 0;
bit_rate = 0;
duration = 0;
for(i=0;i<ctx->nb_streams;i++) {
StreamInfo *stream = &rm->streams[i];
bit_rate += stream->bit_rate;
if (stream->packet_max_size > packet_max_size)
packet_max_size = stream->packet_max_size;
nb_packets += stream->nb_packets;
packet_total_size += stream->packet_total_size;
/* select maximum duration */
v = (int) (1000.0 * (float)stream->total_frames / stream->frame_rate);
if (v > duration)
duration = v;
}
avio_wb32(s, bit_rate); /* max bit rate */
avio_wb32(s, bit_rate); /* avg bit rate */
avio_wb32(s, packet_max_size); /* max packet size */
if (nb_packets > 0)
packet_avg_size = packet_total_size / nb_packets;
else
packet_avg_size = 0;
avio_wb32(s, packet_avg_size); /* avg packet size */
avio_wb32(s, nb_packets); /* num packets */
avio_wb32(s, duration); /* duration */
avio_wb32(s, BUFFER_DURATION); /* preroll */
avio_wb32(s, index_pos); /* index offset */
/* computation of data the data offset */
data_offset_ptr = s->buf_ptr;
avio_wb32(s, 0); /* data offset : will be patched after */
avio_wb16(s, ctx->nb_streams); /* num streams */
flags = 1 | 2; /* save allowed & perfect play */
if (!s->seekable)
flags |= 4; /* live broadcast */
avio_wb16(s, flags);
 
/* comments */
 
ffio_wfourcc(s,"CONT");
size = 4 * 2 + 10;
for(i=0; i<FF_ARRAY_ELEMS(ff_rm_metadata); i++) {
tag = av_dict_get(ctx->metadata, ff_rm_metadata[i], NULL, 0);
if(tag) size += strlen(tag->value);
}
avio_wb32(s,size);
avio_wb16(s,0);
for(i=0; i<FF_ARRAY_ELEMS(ff_rm_metadata); i++) {
tag = av_dict_get(ctx->metadata, ff_rm_metadata[i], NULL, 0);
put_str(s, tag ? tag->value : "");
}
 
for(i=0;i<ctx->nb_streams;i++) {
int codec_data_size;
 
stream = &rm->streams[i];
 
if (stream->enc->codec_type == AVMEDIA_TYPE_AUDIO) {
desc = "The Audio Stream";
mimetype = "audio/x-pn-realaudio";
codec_data_size = 73;
} else {
desc = "The Video Stream";
mimetype = "video/x-pn-realvideo";
codec_data_size = 34;
}
 
ffio_wfourcc(s,"MDPR");
size = 10 + 9 * 4 + strlen(desc) + strlen(mimetype) + codec_data_size;
avio_wb32(s, size);
avio_wb16(s, 0);
 
avio_wb16(s, i); /* stream number */
avio_wb32(s, stream->bit_rate); /* max bit rate */
avio_wb32(s, stream->bit_rate); /* avg bit rate */
avio_wb32(s, stream->packet_max_size); /* max packet size */
if (stream->nb_packets > 0)
packet_avg_size = stream->packet_total_size /
stream->nb_packets;
else
packet_avg_size = 0;
avio_wb32(s, packet_avg_size); /* avg packet size */
avio_wb32(s, 0); /* start time */
avio_wb32(s, BUFFER_DURATION); /* preroll */
/* duration */
if (!s->seekable || !stream->total_frames)
avio_wb32(s, (int)(3600 * 1000));
else
avio_wb32(s, (int)(stream->total_frames * 1000 / stream->frame_rate));
put_str8(s, desc);
put_str8(s, mimetype);
avio_wb32(s, codec_data_size);
 
if (stream->enc->codec_type == AVMEDIA_TYPE_AUDIO) {
int coded_frame_size, fscode, sample_rate;
sample_rate = stream->enc->sample_rate;
coded_frame_size = (stream->enc->bit_rate *
stream->enc->frame_size) / (8 * sample_rate);
/* audio codec info */
avio_write(s, ".ra", 3);
avio_w8(s, 0xfd);
avio_wb32(s, 0x00040000); /* version */
ffio_wfourcc(s, ".ra4");
avio_wb32(s, 0x01b53530); /* stream length */
avio_wb16(s, 4); /* unknown */
avio_wb32(s, 0x39); /* header size */
 
switch(sample_rate) {
case 48000:
case 24000:
case 12000:
fscode = 1;
break;
default:
case 44100:
case 22050:
case 11025:
fscode = 2;
break;
case 32000:
case 16000:
case 8000:
fscode = 3;
}
avio_wb16(s, fscode); /* codec additional info, for AC-3, seems
to be a frequency code */
/* special hack to compensate rounding errors... */
if (coded_frame_size == 557)
coded_frame_size--;
avio_wb32(s, coded_frame_size); /* frame length */
avio_wb32(s, 0x51540); /* unknown */
avio_wb32(s, stream->enc->bit_rate / 8 * 60); /* bytes per minute */
avio_wb32(s, stream->enc->bit_rate / 8 * 60); /* bytes per minute */
avio_wb16(s, 0x01);
/* frame length : seems to be very important */
avio_wb16(s, coded_frame_size);
avio_wb32(s, 0); /* unknown */
avio_wb16(s, stream->enc->sample_rate); /* sample rate */
avio_wb32(s, 0x10); /* unknown */
avio_wb16(s, stream->enc->channels);
put_str8(s, "Int0"); /* codec name */
if (stream->enc->codec_tag) {
avio_w8(s, 4); /* tag length */
avio_wl32(s, stream->enc->codec_tag);
} else {
av_log(ctx, AV_LOG_ERROR, "Invalid codec tag\n");
return -1;
}
avio_wb16(s, 0); /* title length */
avio_wb16(s, 0); /* author length */
avio_wb16(s, 0); /* copyright length */
avio_w8(s, 0); /* end of header */
} else {
/* video codec info */
avio_wb32(s,34); /* size */
ffio_wfourcc(s, "VIDO");
if(stream->enc->codec_id == AV_CODEC_ID_RV10)
ffio_wfourcc(s,"RV10");
else
ffio_wfourcc(s,"RV20");
avio_wb16(s, stream->enc->width);
avio_wb16(s, stream->enc->height);
avio_wb16(s, (int) stream->frame_rate); /* frames per seconds ? */
avio_wb32(s,0); /* unknown meaning */
avio_wb16(s, (int) stream->frame_rate); /* unknown meaning */
avio_wb32(s,0); /* unknown meaning */
avio_wb16(s, 8); /* unknown meaning */
/* Seems to be the codec version: only use basic H263. The next
versions seems to add a diffential DC coding as in
MPEG... nothing new under the sun */
if(stream->enc->codec_id == AV_CODEC_ID_RV10)
avio_wb32(s,0x10000000);
else
avio_wb32(s,0x20103001);
//avio_wb32(s,0x10003000);
}
}
 
/* patch data offset field */
data_pos = s->buf_ptr - start_ptr;
rm->data_pos = data_pos;
data_offset_ptr[0] = data_pos >> 24;
data_offset_ptr[1] = data_pos >> 16;
data_offset_ptr[2] = data_pos >> 8;
data_offset_ptr[3] = data_pos;
 
/* data stream */
ffio_wfourcc(s, "DATA");
avio_wb32(s,data_size + 10 + 8);
avio_wb16(s,0);
 
avio_wb32(s, nb_packets); /* number of packets */
avio_wb32(s,0); /* next data header */
return 0;
}
 
static void write_packet_header(AVFormatContext *ctx, StreamInfo *stream,
int length, int key_frame)
{
int timestamp;
AVIOContext *s = ctx->pb;
 
stream->nb_packets++;
stream->packet_total_size += length;
if (length > stream->packet_max_size)
stream->packet_max_size = length;
 
avio_wb16(s,0); /* version */
avio_wb16(s,length + 12);
avio_wb16(s, stream->num); /* stream number */
timestamp = (1000 * (float)stream->nb_frames) / stream->frame_rate;
avio_wb32(s, timestamp); /* timestamp */
avio_w8(s, 0); /* reserved */
avio_w8(s, key_frame ? 2 : 0); /* flags */
}
 
static int rm_write_header(AVFormatContext *s)
{
RMMuxContext *rm = s->priv_data;
StreamInfo *stream;
int n;
AVCodecContext *codec;
 
if (s->nb_streams > 2) {
av_log(s, AV_LOG_ERROR, "At most 2 streams are currently supported for muxing in RM\n");
return AVERROR_PATCHWELCOME;
}
 
for(n=0;n<s->nb_streams;n++) {
s->streams[n]->id = n;
codec = s->streams[n]->codec;
stream = &rm->streams[n];
memset(stream, 0, sizeof(StreamInfo));
stream->num = n;
stream->bit_rate = codec->bit_rate;
stream->enc = codec;
 
switch(codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
rm->audio_stream = stream;
stream->frame_rate = (float)codec->sample_rate / (float)codec->frame_size;
/* XXX: dummy values */
stream->packet_max_size = 1024;
stream->nb_packets = 0;
stream->total_frames = stream->nb_packets;
break;
case AVMEDIA_TYPE_VIDEO:
rm->video_stream = stream;
stream->frame_rate = (float)codec->time_base.den / (float)codec->time_base.num;
/* XXX: dummy values */
stream->packet_max_size = 4096;
stream->nb_packets = 0;
stream->total_frames = stream->nb_packets;
break;
default:
return -1;
}
}
 
if (rv10_write_header(s, 0, 0))
return AVERROR_INVALIDDATA;
avio_flush(s->pb);
return 0;
}
 
static int rm_write_audio(AVFormatContext *s, const uint8_t *buf, int size, int flags)
{
uint8_t *buf1;
RMMuxContext *rm = s->priv_data;
AVIOContext *pb = s->pb;
StreamInfo *stream = rm->audio_stream;
int i;
 
/* XXX: suppress this malloc */
buf1 = av_malloc(size * sizeof(uint8_t));
 
write_packet_header(s, stream, size, !!(flags & AV_PKT_FLAG_KEY));
 
if (stream->enc->codec_id == AV_CODEC_ID_AC3) {
/* for AC-3, the words seem to be reversed */
for(i=0;i<size;i+=2) {
buf1[i] = buf[i+1];
buf1[i+1] = buf[i];
}
avio_write(pb, buf1, size);
} else {
avio_write(pb, buf, size);
}
stream->nb_frames++;
av_free(buf1);
return 0;
}
 
static int rm_write_video(AVFormatContext *s, const uint8_t *buf, int size, int flags)
{
RMMuxContext *rm = s->priv_data;
AVIOContext *pb = s->pb;
StreamInfo *stream = rm->video_stream;
int key_frame = !!(flags & AV_PKT_FLAG_KEY);
 
/* XXX: this is incorrect: should be a parameter */
 
/* Well, I spent some time finding the meaning of these bits. I am
not sure I understood everything, but it works !! */
#if 1
write_packet_header(s, stream, size + 7 + (size >= 0x4000)*4, key_frame);
/* bit 7: '1' if final packet of a frame converted in several packets */
avio_w8(pb, 0x81);
/* bit 7: '1' if I frame. bits 6..0 : sequence number in current
frame starting from 1 */
if (key_frame) {
avio_w8(pb, 0x81);
} else {
avio_w8(pb, 0x01);
}
if(size >= 0x4000){
avio_wb32(pb, size); /* total frame size */
avio_wb32(pb, size); /* offset from the start or the end */
}else{
avio_wb16(pb, 0x4000 | size); /* total frame size */
avio_wb16(pb, 0x4000 | size); /* offset from the start or the end */
}
#else
/* full frame */
write_packet_header(s, size + 6);
avio_w8(pb, 0xc0);
avio_wb16(pb, 0x4000 + size); /* total frame size */
avio_wb16(pb, 0x4000 + packet_number * 126); /* position in stream */
#endif
avio_w8(pb, stream->nb_frames & 0xff);
 
avio_write(pb, buf, size);
 
stream->nb_frames++;
return 0;
}
 
static int rm_write_packet(AVFormatContext *s, AVPacket *pkt)
{
if (s->streams[pkt->stream_index]->codec->codec_type ==
AVMEDIA_TYPE_AUDIO)
return rm_write_audio(s, pkt->data, pkt->size, pkt->flags);
else
return rm_write_video(s, pkt->data, pkt->size, pkt->flags);
}
 
static int rm_write_trailer(AVFormatContext *s)
{
RMMuxContext *rm = s->priv_data;
int data_size, index_pos, i;
AVIOContext *pb = s->pb;
 
if (s->pb->seekable) {
/* end of file: finish to write header */
index_pos = avio_tell(pb);
data_size = index_pos - rm->data_pos;
 
/* FIXME: write index */
 
/* undocumented end header */
avio_wb32(pb, 0);
avio_wb32(pb, 0);
 
avio_seek(pb, 0, SEEK_SET);
for(i=0;i<s->nb_streams;i++)
rm->streams[i].total_frames = rm->streams[i].nb_frames;
rv10_write_header(s, data_size, 0);
} else {
/* undocumented end header */
avio_wb32(pb, 0);
avio_wb32(pb, 0);
}
 
return 0;
}
 
 
AVOutputFormat ff_rm_muxer = {
.name = "rm",
.long_name = NULL_IF_CONFIG_SMALL("RealMedia"),
.mime_type = "application/vnd.rn-realmedia",
.extensions = "rm,ra",
.priv_data_size = sizeof(RMMuxContext),
.audio_codec = AV_CODEC_ID_AC3,
.video_codec = AV_CODEC_ID_RV10,
.write_header = rm_write_header,
.write_packet = rm_write_packet,
.write_trailer = rm_write_trailer,
.codec_tag = (const AVCodecTag* const []){ ff_rm_codec_tags, 0 },
};
/contrib/sdk/sources/ffmpeg/libavformat/rmsipr.c
0,0 → 1,61
/*
* tables and functions for demuxing SIPR audio muxed RealMedia style
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <stdint.h>
 
#include "rmsipr.h"
 
const unsigned char ff_sipr_subpk_size[4] = { 29, 19, 37, 20 };
 
static const unsigned char sipr_swaps[38][2] = {
{ 0, 63 }, { 1, 22 }, { 2, 44 }, { 3, 90 },
{ 5, 81 }, { 7, 31 }, { 8, 86 }, { 9, 58 },
{ 10, 36 }, { 12, 68 }, { 13, 39 }, { 14, 73 },
{ 15, 53 }, { 16, 69 }, { 17, 57 }, { 19, 88 },
{ 20, 34 }, { 21, 71 }, { 24, 46 }, { 25, 94 },
{ 26, 54 }, { 28, 75 }, { 29, 50 }, { 32, 70 },
{ 33, 92 }, { 35, 74 }, { 38, 85 }, { 40, 56 },
{ 42, 87 }, { 43, 65 }, { 45, 59 }, { 48, 79 },
{ 49, 93 }, { 51, 89 }, { 55, 95 }, { 61, 76 },
{ 67, 83 }, { 77, 80 }
};
 
/* This can be optimized, e.g. use memcpy() if data blocks are aligned. */
void ff_rm_reorder_sipr_data(uint8_t *buf, int sub_packet_h, int framesize)
{
int n, bs = sub_packet_h * framesize * 2 / 96; // nibbles per subpacket
 
for (n = 0; n < 38; n++) {
int j;
int i = bs * sipr_swaps[n][0];
int o = bs * sipr_swaps[n][1];
 
/* swap 4bit-nibbles of block 'i' with 'o' */
for (j = 0; j < bs; j++, i++, o++) {
int x = (buf[i >> 1] >> (4 * (i & 1))) & 0xF,
y = (buf[o >> 1] >> (4 * (o & 1))) & 0xF;
 
buf[o >> 1] = (x << (4 * (o & 1))) |
(buf[o >> 1] & (0xF << (4 * !(o & 1))));
buf[i >> 1] = (y << (4 * (i & 1))) |
(buf[i >> 1] & (0xF << (4 * !(i & 1))));
}
}
}
/contrib/sdk/sources/ffmpeg/libavformat/rmsipr.h
0,0 → 1,35
/*
* tables and functions for demuxing SIPR audio muxed RealMedia style
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_RMSIPR_H
#define AVFORMAT_RMSIPR_H
 
#include <stdint.h>
 
extern const unsigned char ff_sipr_subpk_size[4];
 
/**
* Perform 4-bit block reordering for SIPR data.
*
* @param buf SIPR data
*/
void ff_rm_reorder_sipr_data(uint8_t *buf, int sub_packet_h, int framesize);
 
#endif /* AVFORMAT_RMSIPR_H */
/contrib/sdk/sources/ffmpeg/libavformat/rpl.c
0,0 → 1,355
/*
* ARMovie/RPL demuxer
* Copyright (c) 2007 Christian Ohm, 2008 Eli Friedman
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avstring.h"
#include "libavutil/dict.h"
#include "avformat.h"
#include "internal.h"
#include <stdlib.h>
 
#define RPL_SIGNATURE "ARMovie\x0A"
#define RPL_SIGNATURE_SIZE 8
 
/** 256 is arbitrary, but should be big enough for any reasonable file. */
#define RPL_LINE_LENGTH 256
 
static int rpl_probe(AVProbeData *p)
{
if (memcmp(p->buf, RPL_SIGNATURE, RPL_SIGNATURE_SIZE))
return 0;
 
return AVPROBE_SCORE_MAX;
}
 
typedef struct RPLContext {
// RPL header data
int32_t frames_per_chunk;
 
// Stream position data
uint32_t chunk_number;
uint32_t chunk_part;
uint32_t frame_in_part;
} RPLContext;
 
static int read_line(AVIOContext * pb, char* line, int bufsize)
{
int i;
for (i = 0; i < bufsize - 1; i++) {
int b = avio_r8(pb);
if (b == 0)
break;
if (b == '\n') {
line[i] = '\0';
return url_feof(pb) ? -1 : 0;
}
line[i] = b;
}
line[i] = '\0';
return -1;
}
 
static int32_t read_int(const char* line, const char** endptr, int* error)
{
unsigned long result = 0;
for (; *line>='0' && *line<='9'; line++) {
if (result > (0x7FFFFFFF - 9) / 10)
*error = -1;
result = 10 * result + *line - '0';
}
*endptr = line;
return result;
}
 
static int32_t read_line_and_int(AVIOContext * pb, int* error)
{
char line[RPL_LINE_LENGTH];
const char *endptr;
*error |= read_line(pb, line, sizeof(line));
return read_int(line, &endptr, error);
}
 
/** Parsing for fps, which can be a fraction. Unfortunately,
* the spec for the header leaves out a lot of details,
* so this is mostly guessing.
*/
static AVRational read_fps(const char* line, int* error)
{
int64_t num, den = 1;
AVRational result;
num = read_int(line, &line, error);
if (*line == '.')
line++;
for (; *line>='0' && *line<='9'; line++) {
// Truncate any numerator too large to fit into an int64_t
if (num > (INT64_MAX - 9) / 10 || den > INT64_MAX / 10)
break;
num = 10 * num + *line - '0';
den *= 10;
}
if (!num)
*error = -1;
av_reduce(&result.num, &result.den, num, den, 0x7FFFFFFF);
return result;
}
 
static int rpl_read_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
RPLContext *rpl = s->priv_data;
AVStream *vst = NULL, *ast = NULL;
int total_audio_size;
int error = 0;
 
uint32_t i;
 
int32_t audio_format, chunk_catalog_offset, number_of_chunks;
AVRational fps;
 
char line[RPL_LINE_LENGTH];
 
// The header for RPL/ARMovie files is 21 lines of text
// containing the various header fields. The fields are always
// in the same order, and other text besides the first
// number usually isn't important.
// (The spec says that there exists some significance
// for the text in a few cases; samples needed.)
error |= read_line(pb, line, sizeof(line)); // ARMovie
error |= read_line(pb, line, sizeof(line)); // movie name
av_dict_set(&s->metadata, "title" , line, 0);
error |= read_line(pb, line, sizeof(line)); // date/copyright
av_dict_set(&s->metadata, "copyright", line, 0);
error |= read_line(pb, line, sizeof(line)); // author and other
av_dict_set(&s->metadata, "author" , line, 0);
 
// video headers
vst = avformat_new_stream(s, NULL);
if (!vst)
return AVERROR(ENOMEM);
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codec->codec_tag = read_line_and_int(pb, &error); // video format
vst->codec->width = read_line_and_int(pb, &error); // video width
vst->codec->height = read_line_and_int(pb, &error); // video height
vst->codec->bits_per_coded_sample = read_line_and_int(pb, &error); // video bits per sample
error |= read_line(pb, line, sizeof(line)); // video frames per second
fps = read_fps(line, &error);
avpriv_set_pts_info(vst, 32, fps.den, fps.num);
 
// Figure out the video codec
switch (vst->codec->codec_tag) {
#if 0
case 122:
vst->codec->codec_id = AV_CODEC_ID_ESCAPE122;
break;
#endif
case 124:
vst->codec->codec_id = AV_CODEC_ID_ESCAPE124;
// The header is wrong here, at least sometimes
vst->codec->bits_per_coded_sample = 16;
break;
case 130:
vst->codec->codec_id = AV_CODEC_ID_ESCAPE130;
break;
default:
avpriv_report_missing_feature(s, "Video format %i",
vst->codec->codec_tag);
vst->codec->codec_id = AV_CODEC_ID_NONE;
}
 
// Audio headers
 
// ARMovie supports multiple audio tracks; I don't have any
// samples, though. This code will ignore additional tracks.
audio_format = read_line_and_int(pb, &error); // audio format ID
if (audio_format) {
ast = avformat_new_stream(s, NULL);
if (!ast)
return AVERROR(ENOMEM);
ast->codec->codec_type = AVMEDIA_TYPE_AUDIO;
ast->codec->codec_tag = audio_format;
ast->codec->sample_rate = read_line_and_int(pb, &error); // audio bitrate
ast->codec->channels = read_line_and_int(pb, &error); // number of audio channels
ast->codec->bits_per_coded_sample = read_line_and_int(pb, &error); // audio bits per sample
// At least one sample uses 0 for ADPCM, which is really 4 bits
// per sample.
if (ast->codec->bits_per_coded_sample == 0)
ast->codec->bits_per_coded_sample = 4;
 
ast->codec->bit_rate = ast->codec->sample_rate *
ast->codec->bits_per_coded_sample *
ast->codec->channels;
 
ast->codec->codec_id = AV_CODEC_ID_NONE;
switch (audio_format) {
case 1:
if (ast->codec->bits_per_coded_sample == 16) {
// 16-bit audio is always signed
ast->codec->codec_id = AV_CODEC_ID_PCM_S16LE;
break;
}
// There are some other formats listed as legal per the spec;
// samples needed.
break;
case 101:
if (ast->codec->bits_per_coded_sample == 8) {
// The samples with this kind of audio that I have
// are all unsigned.
ast->codec->codec_id = AV_CODEC_ID_PCM_U8;
break;
} else if (ast->codec->bits_per_coded_sample == 4) {
ast->codec->codec_id = AV_CODEC_ID_ADPCM_IMA_EA_SEAD;
break;
}
break;
}
if (ast->codec->codec_id == AV_CODEC_ID_NONE)
avpriv_request_sample(s, "Audio format %i", audio_format);
avpriv_set_pts_info(ast, 32, 1, ast->codec->bit_rate);
} else {
for (i = 0; i < 3; i++)
error |= read_line(pb, line, sizeof(line));
}
 
rpl->frames_per_chunk = read_line_and_int(pb, &error); // video frames per chunk
if (rpl->frames_per_chunk > 1 && vst->codec->codec_tag != 124)
av_log(s, AV_LOG_WARNING,
"Don't know how to split frames for video format %i. "
"Video stream will be broken!\n", vst->codec->codec_tag);
 
number_of_chunks = read_line_and_int(pb, &error); // number of chunks in the file
// The number in the header is actually the index of the last chunk.
number_of_chunks++;
 
error |= read_line(pb, line, sizeof(line)); // "even" chunk size in bytes
error |= read_line(pb, line, sizeof(line)); // "odd" chunk size in bytes
chunk_catalog_offset = // offset of the "chunk catalog"
read_line_and_int(pb, &error); // (file index)
error |= read_line(pb, line, sizeof(line)); // offset to "helpful" sprite
error |= read_line(pb, line, sizeof(line)); // size of "helpful" sprite
error |= read_line(pb, line, sizeof(line)); // offset to key frame list
 
// Read the index
avio_seek(pb, chunk_catalog_offset, SEEK_SET);
total_audio_size = 0;
for (i = 0; !error && i < number_of_chunks; i++) {
int64_t offset, video_size, audio_size;
error |= read_line(pb, line, sizeof(line));
if (3 != sscanf(line, "%"SCNd64" , %"SCNd64" ; %"SCNd64,
&offset, &video_size, &audio_size))
error = -1;
av_add_index_entry(vst, offset, i * rpl->frames_per_chunk,
video_size, rpl->frames_per_chunk, 0);
if (ast)
av_add_index_entry(ast, offset + video_size, total_audio_size,
audio_size, audio_size * 8, 0);
total_audio_size += audio_size * 8;
}
 
if (error) return AVERROR(EIO);
 
return 0;
}
 
static int rpl_read_packet(AVFormatContext *s, AVPacket *pkt)
{
RPLContext *rpl = s->priv_data;
AVIOContext *pb = s->pb;
AVStream* stream;
AVIndexEntry* index_entry;
uint32_t ret;
 
if (rpl->chunk_part == s->nb_streams) {
rpl->chunk_number++;
rpl->chunk_part = 0;
}
 
stream = s->streams[rpl->chunk_part];
 
if (rpl->chunk_number >= stream->nb_index_entries)
return AVERROR_EOF;
 
index_entry = &stream->index_entries[rpl->chunk_number];
 
if (rpl->frame_in_part == 0)
if (avio_seek(pb, index_entry->pos, SEEK_SET) < 0)
return AVERROR(EIO);
 
if (stream->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
stream->codec->codec_tag == 124) {
// We have to split Escape 124 frames because there are
// multiple frames per chunk in Escape 124 samples.
uint32_t frame_size;
 
avio_skip(pb, 4); /* flags */
frame_size = avio_rl32(pb);
if (avio_seek(pb, -8, SEEK_CUR) < 0)
return AVERROR(EIO);
 
ret = av_get_packet(pb, pkt, frame_size);
if (ret != frame_size) {
av_free_packet(pkt);
return AVERROR(EIO);
}
pkt->duration = 1;
pkt->pts = index_entry->timestamp + rpl->frame_in_part;
pkt->stream_index = rpl->chunk_part;
 
rpl->frame_in_part++;
if (rpl->frame_in_part == rpl->frames_per_chunk) {
rpl->frame_in_part = 0;
rpl->chunk_part++;
}
} else {
ret = av_get_packet(pb, pkt, index_entry->size);
if (ret != index_entry->size) {
av_free_packet(pkt);
return AVERROR(EIO);
}
 
if (stream->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
// frames_per_chunk should always be one here; the header
// parsing will warn if it isn't.
pkt->duration = rpl->frames_per_chunk;
} else {
// All the audio codecs supported in this container
// (at least so far) are constant-bitrate.
pkt->duration = ret * 8;
}
pkt->pts = index_entry->timestamp;
pkt->stream_index = rpl->chunk_part;
rpl->chunk_part++;
}
 
// None of the Escape formats have keyframes, and the ADPCM
// format used doesn't have keyframes.
if (rpl->chunk_number == 0 && rpl->frame_in_part == 0)
pkt->flags |= AV_PKT_FLAG_KEY;
 
return ret;
}
 
AVInputFormat ff_rpl_demuxer = {
.name = "rpl",
.long_name = NULL_IF_CONFIG_SMALL("RPL / ARMovie"),
.priv_data_size = sizeof(RPLContext),
.read_probe = rpl_probe,
.read_header = rpl_read_header,
.read_packet = rpl_read_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/rsd.c
0,0 → 1,168
/*
* RSD demuxer
* Copyright (c) 2013 James Almer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavcodec/bytestream.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "avio.h"
#include "internal.h"
 
static const AVCodecTag rsd_tags[] = {
{ AV_CODEC_ID_ADPCM_THP, MKTAG('G','A','D','P') },
{ AV_CODEC_ID_ADPCM_IMA_RAD, MKTAG('R','A','D','P') },
{ AV_CODEC_ID_PCM_S16BE, MKTAG('P','C','M','B') },
{ AV_CODEC_ID_PCM_S16LE, MKTAG('P','C','M',' ') },
{ AV_CODEC_ID_NONE, 0 },
};
 
static const uint32_t rsd_unsupported_tags[] = {
MKTAG('O','G','G',' '),
MKTAG('V','A','G',' '),
MKTAG('W','A','D','P'),
MKTAG('X','A','D','P'),
MKTAG('X','M','A',' '),
};
 
static int rsd_probe(AVProbeData *p)
{
if (!memcmp(p->buf, "RSD", 3) &&
p->buf[3] - '0' >= 2 && p->buf[3] - '0' <= 6)
return AVPROBE_SCORE_EXTENSION;
return 0;
}
 
static int rsd_read_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
int i, version, start = 0x800;
AVCodecContext *codec;
AVStream *st = avformat_new_stream(s, NULL);
 
if (!st)
return AVERROR(ENOMEM);
 
avio_skip(pb, 3); // "RSD"
version = avio_r8(pb) - '0';
 
codec = st->codec;
codec->codec_type = AVMEDIA_TYPE_AUDIO;
codec->codec_tag = avio_rl32(pb);
codec->codec_id = ff_codec_get_id(rsd_tags, codec->codec_tag);
if (!codec->codec_id) {
char tag_buf[5];
 
av_get_codec_tag_string(tag_buf, sizeof(tag_buf), codec->codec_tag);
for (i=0; i < FF_ARRAY_ELEMS(rsd_unsupported_tags); i++) {
if (codec->codec_tag == rsd_unsupported_tags[i]) {
avpriv_request_sample(s, "Codec tag: %s", tag_buf);
return AVERROR_PATCHWELCOME;
}
}
av_log(s, AV_LOG_ERROR, "Unknown codec tag: %s\n", tag_buf);
return AVERROR_INVALIDDATA;
}
 
codec->channels = avio_rl32(pb);
if (!codec->channels)
return AVERROR_INVALIDDATA;
 
avio_skip(pb, 4); // Bit depth
codec->sample_rate = avio_rl32(pb);
if (!codec->sample_rate)
return AVERROR_INVALIDDATA;
 
avio_skip(pb, 4); // Unknown
 
switch (codec->codec_id) {
case AV_CODEC_ID_ADPCM_IMA_RAD:
codec->block_align = 20 * codec->channels;
if (pb->seekable)
st->duration = av_get_audio_frame_duration(codec, avio_size(pb) - start);
break;
case AV_CODEC_ID_ADPCM_THP:
/* RSD3GADP is mono, so only alloc enough memory
to store the coeff table for a single channel. */
 
if (ff_alloc_extradata(codec, 32))
return AVERROR(ENOMEM);
 
start = avio_rl32(pb);
 
if (avio_read(s->pb, codec->extradata, 32) != 32)
return AVERROR_INVALIDDATA;
 
for (i = 0; i < 16; i++)
AV_WB16(codec->extradata + i * 2, AV_RL16(codec->extradata + i * 2));
 
if (pb->seekable)
st->duration = (avio_size(pb) - start) / 8 * 14;
break;
case AV_CODEC_ID_PCM_S16LE:
case AV_CODEC_ID_PCM_S16BE:
if (version != 4)
start = avio_rl32(pb);
 
if (pb->seekable)
st->duration = (avio_size(pb) - start) / 2 / codec->channels;
break;
}
 
avio_skip(pb, start - avio_tell(pb));
 
avpriv_set_pts_info(st, 64, 1, codec->sample_rate);
 
return 0;
}
 
static int rsd_read_packet(AVFormatContext *s, AVPacket *pkt)
{
AVCodecContext *codec = s->streams[0]->codec;
int ret, size = 1024;
 
if (url_feof(s->pb))
return AVERROR_EOF;
 
if (codec->codec_id == AV_CODEC_ID_ADPCM_IMA_RAD)
ret = av_get_packet(s->pb, pkt, codec->block_align);
else
ret = av_get_packet(s->pb, pkt, size);
 
if (ret != size) {
if (ret < 0) {
av_free_packet(pkt);
return ret;
}
av_shrink_packet(pkt, ret);
}
pkt->stream_index = 0;
 
return ret;
}
 
AVInputFormat ff_rsd_demuxer = {
.name = "rsd",
.long_name = NULL_IF_CONFIG_SMALL("GameCube RSD"),
.read_probe = rsd_probe,
.read_header = rsd_read_header,
.read_packet = rsd_read_packet,
.extensions = "rsd",
.codec_tag = (const AVCodecTag* const []){rsd_tags, 0},
};
/contrib/sdk/sources/ffmpeg/libavformat/rso.c
0,0 → 1,30
/*
* RSO format common data
* Copyright (c) 2010 Rafael Carre
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "internal.h"
#include "rso.h"
 
const AVCodecTag ff_codec_rso_tags[] = {
{ AV_CODEC_ID_PCM_U8, 0x0100 },
{ AV_CODEC_ID_ADPCM_IMA_WAV, 0x0101 },
{ AV_CODEC_ID_NONE, 0 },
};
/contrib/sdk/sources/ffmpeg/libavformat/rso.h
0,0 → 1,32
/*
* RSO format common data
* Copyright (c) 2010 Rafael Carre
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_RSO_H
#define AVFORMAT_RSO_H
 
#include "internal.h"
 
#define RSO_HEADER_SIZE 8
 
/* The libavcodec codecs we support, and the IDs they have in the file */
extern const AVCodecTag ff_codec_rso_tags[];
 
#endif /* AVFORMAT_RSO_H */
/contrib/sdk/sources/ffmpeg/libavformat/rsodec.c
0,0 → 1,83
/*
* RSO demuxer
* Copyright (c) 2001 Fabrice Bellard (original AU code)
* Copyright (c) 2010 Rafael Carre
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
#include "pcm.h"
#include "rso.h"
 
static int rso_read_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
int id, rate, bps;
unsigned int size;
enum AVCodecID codec;
AVStream *st;
 
id = avio_rb16(pb);
size = avio_rb16(pb);
rate = avio_rb16(pb);
avio_rb16(pb); /* play mode ? (0x0000 = don't loop) */
 
codec = ff_codec_get_id(ff_codec_rso_tags, id);
 
if (codec == AV_CODEC_ID_ADPCM_IMA_WAV) {
avpriv_report_missing_feature(s, "ADPCM in RSO");
return AVERROR_PATCHWELCOME;
}
 
bps = av_get_bits_per_sample(codec);
if (!bps) {
avpriv_request_sample(s, "Unknown bits per sample");
return AVERROR_PATCHWELCOME;
}
 
/* now we are ready: build format streams */
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
st->duration = (size * 8) / bps;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_tag = id;
st->codec->codec_id = codec;
st->codec->channels = 1;
st->codec->channel_layout = AV_CH_LAYOUT_MONO;
st->codec->sample_rate = rate;
st->codec->block_align = 1;
 
avpriv_set_pts_info(st, 64, 1, rate);
 
return 0;
}
 
AVInputFormat ff_rso_demuxer = {
.name = "rso",
.long_name = NULL_IF_CONFIG_SMALL("Lego Mindstorms RSO"),
.extensions = "rso",
.read_header = rso_read_header,
.read_packet = ff_pcm_read_packet,
.read_seek = ff_pcm_read_seek,
.codec_tag = (const AVCodecTag* const []){ff_codec_rso_tags, 0},
};
/contrib/sdk/sources/ffmpeg/libavformat/rsoenc.c
0,0 → 1,111
/*
* RSO muxer
* Copyright (c) 2001 Fabrice Bellard (original AU code)
* Copyright (c) 2010 Rafael Carre
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "internal.h"
#include "riff.h"
#include "rso.h"
 
static int rso_write_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
AVCodecContext *enc = s->streams[0]->codec;
 
if (!enc->codec_tag)
return AVERROR_INVALIDDATA;
 
if (enc->channels != 1) {
av_log(s, AV_LOG_ERROR, "RSO only supports mono\n");
return AVERROR_INVALIDDATA;
}
 
if (!s->pb->seekable) {
av_log(s, AV_LOG_ERROR, "muxer does not support non seekable output\n");
return AVERROR_INVALIDDATA;
}
 
/* XXX: find legal sample rates (if any) */
if (enc->sample_rate >= 1u<<16) {
av_log(s, AV_LOG_ERROR, "Sample rate must be < 65536\n");
return AVERROR_INVALIDDATA;
}
 
if (enc->codec_id == AV_CODEC_ID_ADPCM_IMA_WAV) {
av_log(s, AV_LOG_ERROR, "ADPCM in RSO not implemented\n");
return AVERROR_PATCHWELCOME;
}
 
/* format header */
avio_wb16(pb, enc->codec_tag); /* codec ID */
avio_wb16(pb, 0); /* data size, will be written at EOF */
avio_wb16(pb, enc->sample_rate);
avio_wb16(pb, 0x0000); /* play mode ? (0x0000 = don't loop) */
 
avio_flush(pb);
 
return 0;
}
 
static int rso_write_packet(AVFormatContext *s, AVPacket *pkt)
{
avio_write(s->pb, pkt->data, pkt->size);
return 0;
}
 
static int rso_write_trailer(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
int64_t file_size;
uint16_t coded_file_size;
 
file_size = avio_tell(pb);
 
if (file_size < 0)
return file_size;
 
if (file_size > 0xffff + RSO_HEADER_SIZE) {
av_log(s, AV_LOG_WARNING,
"Output file is too big (%"PRId64" bytes >= 64kB)\n", file_size);
coded_file_size = 0xffff;
} else {
coded_file_size = file_size - RSO_HEADER_SIZE;
}
 
/* update file size */
avio_seek(pb, 2, SEEK_SET);
avio_wb16(pb, coded_file_size);
avio_seek(pb, file_size, SEEK_SET);
 
return 0;
}
 
AVOutputFormat ff_rso_muxer = {
.name = "rso",
.long_name = NULL_IF_CONFIG_SMALL("Lego Mindstorms RSO"),
.extensions = "rso",
.audio_codec = AV_CODEC_ID_PCM_U8,
.video_codec = AV_CODEC_ID_NONE,
.write_header = rso_write_header,
.write_packet = rso_write_packet,
.write_trailer = rso_write_trailer,
.codec_tag = (const AVCodecTag* const []){ff_codec_rso_tags, 0},
};
/contrib/sdk/sources/ffmpeg/libavformat/rtmp.h
0,0 → 1,71
/*
* RTMP definitions
* Copyright (c) 2009 Konstantin Shishkov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_RTMP_H
#define AVFORMAT_RTMP_H
 
#include "avformat.h"
 
#define RTMP_DEFAULT_PORT 1935
#define RTMPS_DEFAULT_PORT 443
 
#define RTMP_HANDSHAKE_PACKET_SIZE 1536
 
#define HMAC_IPAD_VAL 0x36
#define HMAC_OPAD_VAL 0x5C
 
/**
* emulated Flash client version - 9.0.124.2 on Linux
* @{
*/
#define RTMP_CLIENT_PLATFORM "LNX"
#define RTMP_CLIENT_VER1 9
#define RTMP_CLIENT_VER2 0
#define RTMP_CLIENT_VER3 124
#define RTMP_CLIENT_VER4 2
/** @} */ //version defines
 
/**
* Calculate HMAC-SHA2 digest for RTMP handshake packets.
*
* @param src input buffer
* @param len input buffer length (should be 1536)
* @param gap offset in buffer where 32 bytes should not be taken into account
* when calculating digest (since it will be used to store that digest)
* @param key digest key
* @param keylen digest key length
* @param dst buffer where calculated digest will be stored (32 bytes)
*/
int ff_rtmp_calc_digest(const uint8_t *src, int len, int gap,
const uint8_t *key, int keylen, uint8_t *dst);
 
/**
* Calculate digest position for RTMP handshake packets.
*
* @param buf input buffer (should be 1536 bytes)
* @param off offset in buffer where to start calculating digest position
* @param mod_val value used for computing modulo
* @param add_val value added at the end (after computing modulo)
*/
int ff_rtmp_calc_digest_pos(const uint8_t *buf, int off, int mod_val,
int add_val);
 
#endif /* AVFORMAT_RTMP_H */
/contrib/sdk/sources/ffmpeg/libavformat/rtmpcrypt.c
0,0 → 1,336
/*
* RTMPE network protocol
* Copyright (c) 2008-2009 Andrej Stepanchuk
* Copyright (c) 2009-2010 Howard Chu
* Copyright (c) 2012 Samuel Pitoiset
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* RTMPE protocol
*/
 
#include "libavutil/blowfish.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/opt.h"
#include "libavutil/rc4.h"
#include "libavutil/xtea.h"
 
#include "internal.h"
#include "rtmp.h"
#include "rtmpdh.h"
#include "rtmpcrypt.h"
#include "url.h"
 
/* protocol handler context */
typedef struct RTMPEContext {
const AVClass *class;
URLContext *stream; ///< TCP stream
FF_DH *dh; ///< Diffie-Hellman context
struct AVRC4 key_in; ///< RC4 key used for decrypt data
struct AVRC4 key_out; ///< RC4 key used for encrypt data
int handshaked; ///< flag indicating when the handshake is performed
int tunneling; ///< use a HTTP connection (RTMPTE)
} RTMPEContext;
 
static const uint8_t rtmpe8_keys[16][16] = {
{ 0xbf, 0xf0, 0x34, 0xb2, 0x11, 0xd9, 0x08, 0x1f,
0xcc, 0xdf, 0xb7, 0x95, 0x74, 0x8d, 0xe7, 0x32 },
{ 0x08, 0x6a, 0x5e, 0xb6, 0x17, 0x43, 0x09, 0x0e,
0x6e, 0xf0, 0x5a, 0xb8, 0xfe, 0x5a, 0x39, 0xe2 },
{ 0x7b, 0x10, 0x95, 0x6f, 0x76, 0xce, 0x05, 0x21,
0x23, 0x88, 0xa7, 0x3a, 0x44, 0x01, 0x49, 0xa1 },
{ 0xa9, 0x43, 0xf3, 0x17, 0xeb, 0xf1, 0x1b, 0xb2,
0xa6, 0x91, 0xa5, 0xee, 0x17, 0xf3, 0x63, 0x39 },
{ 0x7a, 0x30, 0xe0, 0x0a, 0xb5, 0x29, 0xe2, 0x2c,
0xa0, 0x87, 0xae, 0xa5, 0xc0, 0xcb, 0x79, 0xac },
{ 0xbd, 0xce, 0x0c, 0x23, 0x2f, 0xeb, 0xde, 0xff,
0x1c, 0xfa, 0xae, 0x16, 0x11, 0x23, 0x23, 0x9d },
{ 0x55, 0xdd, 0x3f, 0x7b, 0x77, 0xe7, 0xe6, 0x2e,
0x9b, 0xb8, 0xc4, 0x99, 0xc9, 0x48, 0x1e, 0xe4 },
{ 0x40, 0x7b, 0xb6, 0xb4, 0x71, 0xe8, 0x91, 0x36,
0xa7, 0xae, 0xbf, 0x55, 0xca, 0x33, 0xb8, 0x39 },
{ 0xfc, 0xf6, 0xbd, 0xc3, 0xb6, 0x3c, 0x36, 0x97,
0x7c, 0xe4, 0xf8, 0x25, 0x04, 0xd9, 0x59, 0xb2 },
{ 0x28, 0xe0, 0x91, 0xfd, 0x41, 0x95, 0x4c, 0x4c,
0x7f, 0xb7, 0xdb, 0x00, 0xe3, 0xa0, 0x66, 0xf8 },
{ 0x57, 0x84, 0x5b, 0x76, 0x4f, 0x25, 0x1b, 0x03,
0x46, 0xd4, 0x5b, 0xcd, 0xa2, 0xc3, 0x0d, 0x29 },
{ 0x0a, 0xcc, 0xee, 0xf8, 0xda, 0x55, 0xb5, 0x46,
0x03, 0x47, 0x34, 0x52, 0x58, 0x63, 0x71, 0x3b },
{ 0xb8, 0x20, 0x75, 0xdc, 0xa7, 0x5f, 0x1f, 0xee,
0xd8, 0x42, 0x68, 0xe8, 0xa7, 0x2a, 0x44, 0xcc },
{ 0x07, 0xcf, 0x6e, 0x9e, 0xa1, 0x6d, 0x7b, 0x25,
0x9f, 0xa7, 0xae, 0x6c, 0xd9, 0x2f, 0x56, 0x29 },
{ 0xfe, 0xb1, 0xea, 0xe4, 0x8c, 0x8c, 0x3c, 0xe1,
0x4e, 0x00, 0x64, 0xa7, 0x6a, 0x38, 0x7c, 0x2a },
{ 0x89, 0x3a, 0x94, 0x27, 0xcc, 0x30, 0x13, 0xa2,
0xf1, 0x06, 0x38, 0x5b, 0xa8, 0x29, 0xf9, 0x27 }
};
 
static const uint8_t rtmpe9_keys[16][24] = {
{ 0x79, 0x34, 0x77, 0x4c, 0x67, 0xd1, 0x38, 0x3a, 0xdf, 0xb3, 0x56, 0xbe,
0x8b, 0x7b, 0xd0, 0x24, 0x38, 0xe0, 0x73, 0x58, 0x41, 0x5d, 0x69, 0x67, },
{ 0x46, 0xf6, 0xb4, 0xcc, 0x01, 0x93, 0xe3, 0xa1, 0x9e, 0x7d, 0x3c, 0x65,
0x55, 0x86, 0xfd, 0x09, 0x8f, 0xf7, 0xb3, 0xc4, 0x6f, 0x41, 0xca, 0x5c, },
{ 0x1a, 0xe7, 0xe2, 0xf3, 0xf9, 0x14, 0x79, 0x94, 0xc0, 0xd3, 0x97, 0x43,
0x08, 0x7b, 0xb3, 0x84, 0x43, 0x2f, 0x9d, 0x84, 0x3f, 0x21, 0x01, 0x9b, },
{ 0xd3, 0xe3, 0x54, 0xb0, 0xf7, 0x1d, 0xf6, 0x2b, 0x5a, 0x43, 0x4d, 0x04,
0x83, 0x64, 0x3e, 0x0d, 0x59, 0x2f, 0x61, 0xcb, 0xb1, 0x6a, 0x59, 0x0d, },
{ 0xc8, 0xc1, 0xe9, 0xb8, 0x16, 0x56, 0x99, 0x21, 0x7b, 0x5b, 0x36, 0xb7,
0xb5, 0x9b, 0xdf, 0x06, 0x49, 0x2c, 0x97, 0xf5, 0x95, 0x48, 0x85, 0x7e, },
{ 0xeb, 0xe5, 0xe6, 0x2e, 0xa4, 0xba, 0xd4, 0x2c, 0xf2, 0x16, 0xe0, 0x8f,
0x66, 0x23, 0xa9, 0x43, 0x41, 0xce, 0x38, 0x14, 0x84, 0x95, 0x00, 0x53, },
{ 0x66, 0xdb, 0x90, 0xf0, 0x3b, 0x4f, 0xf5, 0x6f, 0xe4, 0x9c, 0x20, 0x89,
0x35, 0x5e, 0xd2, 0xb2, 0xc3, 0x9e, 0x9f, 0x7f, 0x63, 0xb2, 0x28, 0x81, },
{ 0xbb, 0x20, 0xac, 0xed, 0x2a, 0x04, 0x6a, 0x19, 0x94, 0x98, 0x9b, 0xc8,
0xff, 0xcd, 0x93, 0xef, 0xc6, 0x0d, 0x56, 0xa7, 0xeb, 0x13, 0xd9, 0x30, },
{ 0xbc, 0xf2, 0x43, 0x82, 0x09, 0x40, 0x8a, 0x87, 0x25, 0x43, 0x6d, 0xe6,
0xbb, 0xa4, 0xb9, 0x44, 0x58, 0x3f, 0x21, 0x7c, 0x99, 0xbb, 0x3f, 0x24, },
{ 0xec, 0x1a, 0xaa, 0xcd, 0xce, 0xbd, 0x53, 0x11, 0xd2, 0xfb, 0x83, 0xb6,
0xc3, 0xba, 0xab, 0x4f, 0x62, 0x79, 0xe8, 0x65, 0xa9, 0x92, 0x28, 0x76, },
{ 0xc6, 0x0c, 0x30, 0x03, 0x91, 0x18, 0x2d, 0x7b, 0x79, 0xda, 0xe1, 0xd5,
0x64, 0x77, 0x9a, 0x12, 0xc5, 0xb1, 0xd7, 0x91, 0x4f, 0x96, 0x4c, 0xa3, },
{ 0xd7, 0x7c, 0x2a, 0xbf, 0xa6, 0xe7, 0x85, 0x7c, 0x45, 0xad, 0xff, 0x12,
0x94, 0xd8, 0xde, 0xa4, 0x5c, 0x3d, 0x79, 0xa4, 0x44, 0x02, 0x5d, 0x22, },
{ 0x16, 0x19, 0x0d, 0x81, 0x6a, 0x4c, 0xc7, 0xf8, 0xb8, 0xf9, 0x4e, 0xcd,
0x2c, 0x9e, 0x90, 0x84, 0xb2, 0x08, 0x25, 0x60, 0xe1, 0x1e, 0xae, 0x18, },
{ 0xe9, 0x7c, 0x58, 0x26, 0x1b, 0x51, 0x9e, 0x49, 0x82, 0x60, 0x61, 0xfc,
0xa0, 0xa0, 0x1b, 0xcd, 0xf5, 0x05, 0xd6, 0xa6, 0x6d, 0x07, 0x88, 0xa3, },
{ 0x2b, 0x97, 0x11, 0x8b, 0xd9, 0x4e, 0xd9, 0xdf, 0x20, 0xe3, 0x9c, 0x10,
0xe6, 0xa1, 0x35, 0x21, 0x11, 0xf9, 0x13, 0x0d, 0x0b, 0x24, 0x65, 0xb2, },
{ 0x53, 0x6a, 0x4c, 0x54, 0xac, 0x8b, 0x9b, 0xb8, 0x97, 0x29, 0xfc, 0x60,
0x2c, 0x5b, 0x3a, 0x85, 0x68, 0xb5, 0xaa, 0x6a, 0x44, 0xcd, 0x3f, 0xa7, },
};
 
int ff_rtmpe_gen_pub_key(URLContext *h, uint8_t *buf)
{
RTMPEContext *rt = h->priv_data;
int offset, ret;
 
if (!(rt->dh = ff_dh_init(1024)))
return AVERROR(ENOMEM);
 
offset = ff_rtmp_calc_digest_pos(buf, 768, 632, 8);
if (offset < 0)
return offset;
 
/* generate a Diffie-Hellmann public key */
if ((ret = ff_dh_generate_public_key(rt->dh)) < 0)
return ret;
 
/* write the public key into the handshake buffer */
if ((ret = ff_dh_write_public_key(rt->dh, buf + offset, 128)) < 0)
return ret;
 
return 0;
}
 
int ff_rtmpe_compute_secret_key(URLContext *h, const uint8_t *serverdata,
const uint8_t *clientdata, int type)
{
RTMPEContext *rt = h->priv_data;
uint8_t secret_key[128], digest[32];
int server_pos, client_pos;
int ret;
 
if (type) {
if ((server_pos = ff_rtmp_calc_digest_pos(serverdata, 1532, 632, 772)) < 0)
return server_pos;
} else {
if ((server_pos = ff_rtmp_calc_digest_pos(serverdata, 768, 632, 8)) < 0)
return server_pos;
}
 
if ((client_pos = ff_rtmp_calc_digest_pos(clientdata, 768, 632, 8)) < 0)
return client_pos;
 
/* compute the shared secret secret in order to compute RC4 keys */
if ((ret = ff_dh_compute_shared_secret_key(rt->dh, serverdata + server_pos,
128, secret_key)) < 0)
return ret;
 
/* set output key */
if ((ret = ff_rtmp_calc_digest(serverdata + server_pos, 128, 0, secret_key,
128, digest)) < 0)
return ret;
av_rc4_init(&rt->key_out, digest, 16 * 8, 1);
 
/* set input key */
if ((ret = ff_rtmp_calc_digest(clientdata + client_pos, 128, 0, secret_key,
128, digest)) < 0)
return ret;
av_rc4_init(&rt->key_in, digest, 16 * 8, 1);
 
return 0;
}
 
static void rtmpe8_sig(const uint8_t *in, uint8_t *out, int key_id)
{
struct AVXTEA ctx;
 
av_xtea_init(&ctx, rtmpe8_keys[key_id]);
av_xtea_crypt(&ctx, out, in, 1, NULL, 0);
}
 
static void rtmpe9_sig(const uint8_t *in, uint8_t *out, int key_id)
{
struct AVBlowfish ctx;
uint32_t xl, xr;
 
xl = AV_RL32(in);
xr = AV_RL32(in + 4);
 
av_blowfish_init(&ctx, rtmpe9_keys[key_id], 24);
av_blowfish_crypt_ecb(&ctx, &xl, &xr, 0);
 
AV_WL32(out, xl);
AV_WL32(out + 4, xr);
}
 
void ff_rtmpe_encrypt_sig(URLContext *h, uint8_t *sig, const uint8_t *digest,
int type)
{
int i;
 
for (i = 0; i < 32; i += 8) {
if (type == 8) {
/* RTMPE type 8 uses XTEA on the signature */
rtmpe8_sig(sig + i, sig + i, digest[i] % 15);
} else if (type == 9) {
/* RTMPE type 9 uses Blowfish on the signature */
rtmpe9_sig(sig + i, sig + i, digest[i] % 15);
}
}
}
 
int ff_rtmpe_update_keystream(URLContext *h)
{
RTMPEContext *rt = h->priv_data;
char buf[RTMP_HANDSHAKE_PACKET_SIZE];
 
/* skip past 1536 bytes of the RC4 bytestream */
av_rc4_crypt(&rt->key_in, buf, NULL, sizeof(buf), NULL, 1);
av_rc4_crypt(&rt->key_out, buf, NULL, sizeof(buf), NULL, 1);
 
/* the next requests will be encrypted using RC4 keys */
rt->handshaked = 1;
 
return 0;
}
 
static int rtmpe_close(URLContext *h)
{
RTMPEContext *rt = h->priv_data;
 
ff_dh_free(rt->dh);
ffurl_close(rt->stream);
 
return 0;
}
 
static int rtmpe_open(URLContext *h, const char *uri, int flags)
{
RTMPEContext *rt = h->priv_data;
char host[256], url[1024];
int ret, port;
 
av_url_split(NULL, 0, NULL, 0, host, sizeof(host), &port, NULL, 0, uri);
 
if (rt->tunneling) {
if (port < 0)
port = 80;
ff_url_join(url, sizeof(url), "ffrtmphttp", NULL, host, port, NULL);
} else {
if (port < 0)
port = 1935;
ff_url_join(url, sizeof(url), "tcp", NULL, host, port, NULL);
}
 
/* open the tcp or ffrtmphttp connection */
if ((ret = ffurl_open(&rt->stream, url, AVIO_FLAG_READ_WRITE,
&h->interrupt_callback, NULL)) < 0) {
rtmpe_close(h);
return ret;
}
 
return 0;
}
 
static int rtmpe_read(URLContext *h, uint8_t *buf, int size)
{
RTMPEContext *rt = h->priv_data;
int ret;
 
rt->stream->flags |= h->flags & AVIO_FLAG_NONBLOCK;
ret = ffurl_read(rt->stream, buf, size);
rt->stream->flags &= ~AVIO_FLAG_NONBLOCK;
 
if (ret < 0 && ret != AVERROR_EOF)
return ret;
 
if (rt->handshaked && ret > 0) {
/* decrypt data received by the server */
av_rc4_crypt(&rt->key_in, buf, buf, ret, NULL, 1);
}
 
return ret;
}
 
static int rtmpe_write(URLContext *h, const uint8_t *buf, int size)
{
RTMPEContext *rt = h->priv_data;
int ret;
 
if (rt->handshaked) {
/* encrypt data to send to the server */
av_rc4_crypt(&rt->key_out, buf, buf, size, NULL, 1);
}
 
if ((ret = ffurl_write(rt->stream, buf, size)) < 0)
return ret;
 
return size;
}
 
#define OFFSET(x) offsetof(RTMPEContext, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
 
static const AVOption ffrtmpcrypt_options[] = {
{"ffrtmpcrypt_tunneling", "Use a HTTP tunneling connection (RTMPTE).", OFFSET(tunneling), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, DEC},
{ NULL },
};
 
static const AVClass ffrtmpcrypt_class = {
.class_name = "ffrtmpcrypt",
.item_name = av_default_item_name,
.option = ffrtmpcrypt_options,
.version = LIBAVUTIL_VERSION_INT,
};
 
URLProtocol ff_ffrtmpcrypt_protocol = {
.name = "ffrtmpcrypt",
.url_open = rtmpe_open,
.url_read = rtmpe_read,
.url_write = rtmpe_write,
.url_close = rtmpe_close,
.priv_data_size = sizeof(RTMPEContext),
.flags = URL_PROTOCOL_FLAG_NETWORK,
.priv_data_class = &ffrtmpcrypt_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/rtmpcrypt.h
0,0 → 1,69
/*
* RTMPE encryption utilities
* Copyright (c) 2012 Samuel Pitoiset
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_RTMPCRYPT_H
#define AVFORMAT_RTMPCRYPT_H
 
#include <stdint.h>
 
#include "url.h"
 
/**
* Initialize the Diffie-Hellmann context and generate the public key.
*
* @param h an URLContext
* @param buf handshake data (1536 bytes)
* @return zero on success, negative value otherwise
*/
int ff_rtmpe_gen_pub_key(URLContext *h, uint8_t *buf);
 
/**
* Compute the shared secret key and initialize the RC4 encryption.
*
* @param h an URLContext
* @param serverdata server data (1536 bytes)
* @param clientdata client data (1536 bytes)
* @param type the position of the server digest
* @return zero on success, negative value otherwise
*/
int ff_rtmpe_compute_secret_key(URLContext *h, const uint8_t *serverdata,
const uint8_t *clientdata, int type);
 
/**
* Encrypt the signature.
*
* @param h an URLContext
* @param signature the signature to encrypt
* @param digest the digest used for finding the encryption key
* @param type type of encryption (8 for XTEA, 9 for Blowfish)
*/
void ff_rtmpe_encrypt_sig(URLContext *h, uint8_t *signature,
const uint8_t *digest, int type);
 
/**
* Update the keystream and set RC4 keys for encryption.
*
* @param h an URLContext
* @return zero on success, negative value otherwise
*/
int ff_rtmpe_update_keystream(URLContext *h);
 
#endif /* AVFORMAT_RTMPCRYPT_H */
/contrib/sdk/sources/ffmpeg/libavformat/rtmpdh.c
0,0 → 1,339
/*
* RTMP Diffie-Hellmann utilities
* Copyright (c) 2009 Andrej Stepanchuk
* Copyright (c) 2009-2010 Howard Chu
* Copyright (c) 2012 Samuel Pitoiset
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* RTMP Diffie-Hellmann utilities
*/
 
#include "config.h"
#include "rtmpdh.h"
#include "libavutil/random_seed.h"
 
#define P1024 \
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" \
"29024E088A67CC74020BBEA63B139B22514A08798E3404DD" \
"EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" \
"E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" \
"EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381" \
"FFFFFFFFFFFFFFFF"
 
#define Q1024 \
"7FFFFFFFFFFFFFFFE487ED5110B4611A62633145C06E0E68" \
"948127044533E63A0105DF531D89CD9128A5043CC71A026E" \
"F7CA8CD9E69D218D98158536F92F8A1BA7F09AB6B6A8E122" \
"F242DABB312F3F637A262174D31BF6B585FFAE5B7A035BF6" \
"F71C35FDAD44CFD2D74F9208BE258FF324943328F67329C0" \
"FFFFFFFFFFFFFFFF"
 
#if CONFIG_NETTLE || CONFIG_GCRYPT
#if CONFIG_NETTLE
#define bn_new(bn) \
do { \
bn = av_malloc(sizeof(*bn)); \
if (bn) \
mpz_init2(bn, 1); \
} while (0)
#define bn_free(bn) \
do { \
mpz_clear(bn); \
av_free(bn); \
} while (0)
#define bn_set_word(bn, w) mpz_set_ui(bn, w)
#define bn_cmp(a, b) mpz_cmp(a, b)
#define bn_copy(to, from) mpz_set(to, from)
#define bn_sub_word(bn, w) mpz_sub_ui(bn, bn, w)
#define bn_cmp_1(bn) mpz_cmp_ui(bn, 1)
#define bn_num_bytes(bn) (mpz_sizeinbase(bn, 2) + 7) / 8
#define bn_bn2bin(bn, buf, len) nettle_mpz_get_str_256(len, buf, bn)
#define bn_bin2bn(bn, buf, len) \
do { \
bn_new(bn); \
if (bn) \
nettle_mpz_set_str_256_u(bn, len, buf); \
} while (0)
#define bn_hex2bn(bn, buf, ret) \
do { \
bn_new(bn); \
if (bn) \
ret = (mpz_set_str(bn, buf, 16) == 0); \
} while (0)
#define bn_modexp(bn, y, q, p) mpz_powm(bn, y, q, p)
#define bn_random(bn, num_bytes) \
do { \
gmp_randstate_t rs; \
gmp_randinit_mt(rs); \
gmp_randseed_ui(rs, av_get_random_seed()); \
mpz_urandomb(bn, rs, num_bytes); \
gmp_randclear(rs); \
} while (0)
#elif CONFIG_GCRYPT
#define bn_new(bn) bn = gcry_mpi_new(1)
#define bn_free(bn) gcry_mpi_release(bn)
#define bn_set_word(bn, w) gcry_mpi_set_ui(bn, w)
#define bn_cmp(a, b) gcry_mpi_cmp(a, b)
#define bn_copy(to, from) gcry_mpi_set(to, from)
#define bn_sub_word(bn, w) gcry_mpi_sub_ui(bn, bn, w)
#define bn_cmp_1(bn) gcry_mpi_cmp_ui(bn, 1)
#define bn_num_bytes(bn) (gcry_mpi_get_nbits(bn) + 7) / 8
#define bn_bn2bin(bn, buf, len) gcry_mpi_print(GCRYMPI_FMT_USG, buf, len, NULL, bn)
#define bn_bin2bn(bn, buf, len) gcry_mpi_scan(&bn, GCRYMPI_FMT_USG, buf, len, NULL)
#define bn_hex2bn(bn, buf, ret) ret = (gcry_mpi_scan(&bn, GCRYMPI_FMT_HEX, buf, 0, 0) == 0)
#define bn_modexp(bn, y, q, p) gcry_mpi_powm(bn, y, q, p)
#define bn_random(bn, num_bytes) gcry_mpi_randomize(bn, num_bytes, GCRY_WEAK_RANDOM)
#endif
 
#define MAX_BYTES 18000
 
#define dh_new() av_malloc(sizeof(FF_DH))
 
static FFBigNum dh_generate_key(FF_DH *dh)
{
int num_bytes;
 
num_bytes = bn_num_bytes(dh->p) - 1;
if (num_bytes <= 0 || num_bytes > MAX_BYTES)
return NULL;
 
bn_new(dh->priv_key);
if (!dh->priv_key)
return NULL;
bn_random(dh->priv_key, num_bytes);
 
bn_new(dh->pub_key);
if (!dh->pub_key) {
bn_free(dh->priv_key);
return NULL;
}
 
bn_modexp(dh->pub_key, dh->g, dh->priv_key, dh->p);
 
return dh->pub_key;
}
 
static int dh_compute_key(FF_DH *dh, FFBigNum pub_key_bn,
uint32_t pub_key_len, uint8_t *secret_key)
{
FFBigNum k;
int num_bytes;
 
num_bytes = bn_num_bytes(dh->p);
if (num_bytes <= 0 || num_bytes > MAX_BYTES)
return -1;
 
bn_new(k);
if (!k)
return -1;
 
bn_modexp(k, pub_key_bn, dh->priv_key, dh->p);
bn_bn2bin(k, secret_key, pub_key_len);
bn_free(k);
 
/* return the length of the shared secret key like DH_compute_key */
return pub_key_len;
}
 
void ff_dh_free(FF_DH *dh)
{
bn_free(dh->p);
bn_free(dh->g);
bn_free(dh->pub_key);
bn_free(dh->priv_key);
av_free(dh);
}
#elif CONFIG_OPENSSL
#define bn_new(bn) bn = BN_new()
#define bn_free(bn) BN_free(bn)
#define bn_set_word(bn, w) BN_set_word(bn, w)
#define bn_cmp(a, b) BN_cmp(a, b)
#define bn_copy(to, from) BN_copy(to, from)
#define bn_sub_word(bn, w) BN_sub_word(bn, w)
#define bn_cmp_1(bn) BN_cmp(bn, BN_value_one())
#define bn_num_bytes(bn) BN_num_bytes(bn)
#define bn_bn2bin(bn, buf, len) BN_bn2bin(bn, buf)
#define bn_bin2bn(bn, buf, len) bn = BN_bin2bn(buf, len, 0)
#define bn_hex2bn(bn, buf, ret) ret = BN_hex2bn(&bn, buf)
#define bn_modexp(bn, y, q, p) \
do { \
BN_CTX *ctx = BN_CTX_new(); \
if (!ctx) \
return AVERROR(ENOMEM); \
if (!BN_mod_exp(bn, y, q, p, ctx)) { \
BN_CTX_free(ctx); \
return AVERROR(EINVAL); \
} \
BN_CTX_free(ctx); \
} while (0)
 
#define dh_new() DH_new()
#define dh_generate_key(dh) DH_generate_key(dh)
#define dh_compute_key(dh, pub, len, secret) DH_compute_key(secret, pub, dh)
 
void ff_dh_free(FF_DH *dh)
{
DH_free(dh);
}
#endif
 
static int dh_is_valid_public_key(FFBigNum y, FFBigNum p, FFBigNum q)
{
FFBigNum bn = NULL;
int ret = AVERROR(EINVAL);
 
bn_new(bn);
if (!bn)
return AVERROR(ENOMEM);
 
/* y must lie in [2, p - 1] */
bn_set_word(bn, 1);
if (!bn_cmp(y, bn))
goto fail;
 
/* bn = p - 2 */
bn_copy(bn, p);
bn_sub_word(bn, 1);
if (!bn_cmp(y, bn))
goto fail;
 
/* Verify with Sophie-Germain prime
*
* This is a nice test to make sure the public key position is calculated
* correctly. This test will fail in about 50% of the cases if applied to
* random data.
*/
/* y must fulfill y^q mod p = 1 */
bn_modexp(bn, y, q, p);
 
if (bn_cmp_1(bn))
goto fail;
 
ret = 0;
fail:
bn_free(bn);
 
return ret;
}
 
av_cold FF_DH *ff_dh_init(int key_len)
{
FF_DH *dh;
int ret;
 
if (!(dh = dh_new()))
return NULL;
 
bn_new(dh->g);
if (!dh->g)
goto fail;
 
bn_hex2bn(dh->p, P1024, ret);
if (!ret)
goto fail;
 
bn_set_word(dh->g, 2);
dh->length = key_len;
 
return dh;
 
fail:
ff_dh_free(dh);
 
return NULL;
}
 
int ff_dh_generate_public_key(FF_DH *dh)
{
int ret = 0;
 
while (!ret) {
FFBigNum q1 = NULL;
 
if (!dh_generate_key(dh))
return AVERROR(EINVAL);
 
bn_hex2bn(q1, Q1024, ret);
if (!ret)
return AVERROR(ENOMEM);
 
ret = dh_is_valid_public_key(dh->pub_key, dh->p, q1);
bn_free(q1);
 
if (!ret) {
/* the public key is valid */
break;
}
}
 
return ret;
}
 
int ff_dh_write_public_key(FF_DH *dh, uint8_t *pub_key, int pub_key_len)
{
int len;
 
/* compute the length of the public key */
len = bn_num_bytes(dh->pub_key);
if (len <= 0 || len > pub_key_len)
return AVERROR(EINVAL);
 
/* convert the public key value into big-endian form */
memset(pub_key, 0, pub_key_len);
bn_bn2bin(dh->pub_key, pub_key + pub_key_len - len, len);
 
return 0;
}
 
int ff_dh_compute_shared_secret_key(FF_DH *dh, const uint8_t *pub_key,
int pub_key_len, uint8_t *secret_key)
{
FFBigNum q1 = NULL, pub_key_bn = NULL;
int ret;
 
/* convert the big-endian form of the public key into a bignum */
bn_bin2bn(pub_key_bn, pub_key, pub_key_len);
if (!pub_key_bn)
return AVERROR(ENOMEM);
 
/* convert the string containing a hexadecimal number into a bignum */
bn_hex2bn(q1, Q1024, ret);
if (!ret) {
ret = AVERROR(ENOMEM);
goto fail;
}
 
/* when the public key is valid we have to compute the shared secret key */
if ((ret = dh_is_valid_public_key(pub_key_bn, dh->p, q1)) < 0) {
goto fail;
} else if ((ret = dh_compute_key(dh, pub_key_bn, pub_key_len,
secret_key)) < 0) {
ret = AVERROR(EINVAL);
goto fail;
}
 
fail:
bn_free(pub_key_bn);
bn_free(q1);
 
return ret;
}
 
/contrib/sdk/sources/ffmpeg/libavformat/rtmpdh.h
0,0 → 1,102
/*
* RTMP Diffie-Hellmann utilities
* Copyright (c) 2012 Samuel Pitoiset
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_RTMPDH_H
#define AVFORMAT_RTMPDH_H
 
#include "avformat.h"
#include "config.h"
 
#if CONFIG_NETTLE || CONFIG_GCRYPT
#if CONFIG_NETTLE
#include <gmp.h>
#include <nettle/bignum.h>
 
typedef mpz_ptr FFBigNum;
#elif CONFIG_GCRYPT
#include <gcrypt.h>
 
typedef gcry_mpi_t FFBigNum;
#endif
 
typedef struct FF_DH {
FFBigNum p;
FFBigNum g;
FFBigNum pub_key;
FFBigNum priv_key;
long length;
} FF_DH;
 
#elif CONFIG_OPENSSL
#include <openssl/bn.h>
#include <openssl/dh.h>
 
typedef BIGNUM *FFBigNum;
typedef DH FF_DH;
#endif
 
/**
* Initialize a Diffie-Hellmann context.
*
* @param key_len length of the key
* @return a new Diffie-Hellmann context on success, NULL otherwise
*/
FF_DH *ff_dh_init(int key_len);
 
/**
* Free a Diffie-Hellmann context.
*
* @param dh a Diffie-Hellmann context to free
*/
void ff_dh_free(FF_DH *dh);
 
/**
* Generate a public key.
*
* @param dh a Diffie-Hellmann context
* @return zero on success, negative value otherwise
*/
int ff_dh_generate_public_key(FF_DH *dh);
 
/**
* Write the public key into the given buffer.
*
* @param dh a Diffie-Hellmann context, containing the public key to write
* @param pub_key the buffer where the public key is written
* @param pub_key_len the length of the buffer
* @return zero on success, negative value otherwise
*/
int ff_dh_write_public_key(FF_DH *dh, uint8_t *pub_key, int pub_key_len);
 
/**
* Compute the shared secret key from the private FF_DH value and the
* other party's public value.
*
* @param dh a Diffie-Hellmann context, containing the private key
* @param pub_key the buffer containing the public key
* @param pub_key_len the length of the buffer
* @param secret_key the buffer where the secret key is written
* @return length of the shared secret key on success, negative value otherwise
*/
int ff_dh_compute_shared_secret_key(FF_DH *dh, const uint8_t *pub_key,
int pub_key_len, uint8_t *secret_key);
 
#endif /* AVFORMAT_RTMPDH_H */
/contrib/sdk/sources/ffmpeg/libavformat/rtmphttp.c
0,0 → 1,277
/*
* RTMP HTTP network protocol
* Copyright (c) 2012 Samuel Pitoiset
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* RTMP HTTP protocol
*/
 
#include "libavutil/avstring.h"
#include "libavutil/intfloat.h"
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include "internal.h"
#include "http.h"
#include "rtmp.h"
 
#define RTMPT_DEFAULT_PORT 80
#define RTMPTS_DEFAULT_PORT RTMPS_DEFAULT_PORT
 
/* protocol handler context */
typedef struct RTMP_HTTPContext {
const AVClass *class;
URLContext *stream; ///< HTTP stream
char host[256]; ///< hostname of the server
int port; ///< port to connect (default is 80)
char client_id[64]; ///< client ID used for all requests except the first one
int seq; ///< sequence ID used for all requests
uint8_t *out_data; ///< output buffer
int out_size; ///< current output buffer size
int out_capacity; ///< current output buffer capacity
int initialized; ///< flag indicating when the http context is initialized
int finishing; ///< flag indicating when the client closes the connection
int nb_bytes_read; ///< number of bytes read since the last request
int tls; ///< use Transport Security Layer (RTMPTS)
} RTMP_HTTPContext;
 
static int rtmp_http_send_cmd(URLContext *h, const char *cmd)
{
RTMP_HTTPContext *rt = h->priv_data;
char uri[2048];
uint8_t c;
int ret;
 
ff_url_join(uri, sizeof(uri), "http", NULL, rt->host, rt->port,
"/%s/%s/%d", cmd, rt->client_id, rt->seq++);
 
av_opt_set_bin(rt->stream->priv_data, "post_data", rt->out_data,
rt->out_size, 0);
 
/* send a new request to the server */
if ((ret = ff_http_do_new_request(rt->stream, uri)) < 0)
return ret;
 
/* re-init output buffer */
rt->out_size = 0;
 
/* read the first byte which contains the polling interval */
if ((ret = ffurl_read(rt->stream, &c, 1)) < 0)
return ret;
 
/* re-init the number of bytes read */
rt->nb_bytes_read = 0;
 
return ret;
}
 
static int rtmp_http_write(URLContext *h, const uint8_t *buf, int size)
{
RTMP_HTTPContext *rt = h->priv_data;
 
if (rt->out_size + size > rt->out_capacity) {
int err;
rt->out_capacity = (rt->out_size + size) * 2;
if ((err = av_reallocp(&rt->out_data, rt->out_capacity)) < 0) {
rt->out_size = 0;
rt->out_capacity = 0;
return err;
}
}
 
memcpy(rt->out_data + rt->out_size, buf, size);
rt->out_size += size;
 
return size;
}
 
static int rtmp_http_read(URLContext *h, uint8_t *buf, int size)
{
RTMP_HTTPContext *rt = h->priv_data;
int ret, off = 0;
 
/* try to read at least 1 byte of data */
do {
ret = ffurl_read(rt->stream, buf + off, size);
if (ret < 0 && ret != AVERROR_EOF)
return ret;
 
if (ret == AVERROR_EOF) {
if (rt->finishing) {
/* Do not send new requests when the client wants to
* close the connection. */
return AVERROR(EAGAIN);
}
 
/* When the client has reached end of file for the last request,
* we have to send a new request if we have buffered data.
* Otherwise, we have to send an idle POST. */
if (rt->out_size > 0) {
if ((ret = rtmp_http_send_cmd(h, "send")) < 0)
return ret;
} else {
if (rt->nb_bytes_read == 0) {
/* Wait 50ms before retrying to read a server reply in
* order to reduce the number of idle requets. */
av_usleep(50000);
}
 
if ((ret = rtmp_http_write(h, "", 1)) < 0)
return ret;
 
if ((ret = rtmp_http_send_cmd(h, "idle")) < 0)
return ret;
}
 
if (h->flags & AVIO_FLAG_NONBLOCK) {
/* no incoming data to handle in nonblocking mode */
return AVERROR(EAGAIN);
}
} else {
off += ret;
size -= ret;
rt->nb_bytes_read += ret;
}
} while (off <= 0);
 
return off;
}
 
static int rtmp_http_close(URLContext *h)
{
RTMP_HTTPContext *rt = h->priv_data;
uint8_t tmp_buf[2048];
int ret = 0;
 
if (rt->initialized) {
/* client wants to close the connection */
rt->finishing = 1;
 
do {
ret = rtmp_http_read(h, tmp_buf, sizeof(tmp_buf));
} while (ret > 0);
 
/* re-init output buffer before sending the close command */
rt->out_size = 0;
 
if ((ret = rtmp_http_write(h, "", 1)) == 1)
ret = rtmp_http_send_cmd(h, "close");
}
 
av_freep(&rt->out_data);
ffurl_close(rt->stream);
 
return ret;
}
 
static int rtmp_http_open(URLContext *h, const char *uri, int flags)
{
RTMP_HTTPContext *rt = h->priv_data;
char headers[1024], url[1024];
int ret, off = 0;
 
av_url_split(NULL, 0, NULL, 0, rt->host, sizeof(rt->host), &rt->port,
NULL, 0, uri);
 
/* This is the first request that is sent to the server in order to
* register a client on the server and start a new session. The server
* replies with a unique id (usually a number) that is used by the client
* for all future requests.
* Note: the reply doesn't contain a value for the polling interval.
* A successful connect resets the consecutive index that is used
* in the URLs. */
if (rt->tls) {
if (rt->port < 0)
rt->port = RTMPTS_DEFAULT_PORT;
ff_url_join(url, sizeof(url), "https", NULL, rt->host, rt->port, "/open/1");
} else {
if (rt->port < 0)
rt->port = RTMPT_DEFAULT_PORT;
ff_url_join(url, sizeof(url), "http", NULL, rt->host, rt->port, "/open/1");
}
 
/* alloc the http context */
if ((ret = ffurl_alloc(&rt->stream, url, AVIO_FLAG_READ_WRITE, NULL)) < 0)
goto fail;
 
/* set options */
snprintf(headers, sizeof(headers),
"Cache-Control: no-cache\r\n"
"Content-type: application/x-fcs\r\n"
"User-Agent: Shockwave Flash\r\n");
av_opt_set(rt->stream->priv_data, "headers", headers, 0);
av_opt_set(rt->stream->priv_data, "multiple_requests", "1", 0);
av_opt_set_bin(rt->stream->priv_data, "post_data", "", 1, 0);
 
/* open the http context */
if ((ret = ffurl_connect(rt->stream, NULL)) < 0)
goto fail;
 
/* read the server reply which contains a unique ID */
for (;;) {
ret = ffurl_read(rt->stream, rt->client_id + off, sizeof(rt->client_id) - off);
if (ret == AVERROR_EOF)
break;
if (ret < 0)
goto fail;
off += ret;
if (off == sizeof(rt->client_id)) {
ret = AVERROR(EIO);
goto fail;
}
}
while (off > 0 && av_isspace(rt->client_id[off - 1]))
off--;
rt->client_id[off] = '\0';
 
/* http context is now initialized */
rt->initialized = 1;
return 0;
 
fail:
rtmp_http_close(h);
return ret;
}
 
#define OFFSET(x) offsetof(RTMP_HTTPContext, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
 
static const AVOption ffrtmphttp_options[] = {
{"ffrtmphttp_tls", "Use a HTTPS tunneling connection (RTMPTS).", OFFSET(tls), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, DEC},
{ NULL },
};
 
static const AVClass ffrtmphttp_class = {
.class_name = "ffrtmphttp",
.item_name = av_default_item_name,
.option = ffrtmphttp_options,
.version = LIBAVUTIL_VERSION_INT,
};
 
URLProtocol ff_ffrtmphttp_protocol = {
.name = "ffrtmphttp",
.url_open = rtmp_http_open,
.url_read = rtmp_http_read,
.url_write = rtmp_http_write,
.url_close = rtmp_http_close,
.priv_data_size = sizeof(RTMP_HTTPContext),
.flags = URL_PROTOCOL_FLAG_NETWORK,
.priv_data_class= &ffrtmphttp_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/rtmppkt.c
0,0 → 1,644
/*
* RTMP input format
* Copyright (c) 2009 Konstantin Shishkov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavcodec/bytestream.h"
#include "libavutil/avstring.h"
#include "libavutil/intfloat.h"
#include "avformat.h"
 
#include "rtmppkt.h"
#include "flv.h"
#include "url.h"
 
void ff_amf_write_bool(uint8_t **dst, int val)
{
bytestream_put_byte(dst, AMF_DATA_TYPE_BOOL);
bytestream_put_byte(dst, val);
}
 
void ff_amf_write_number(uint8_t **dst, double val)
{
bytestream_put_byte(dst, AMF_DATA_TYPE_NUMBER);
bytestream_put_be64(dst, av_double2int(val));
}
 
void ff_amf_write_string(uint8_t **dst, const char *str)
{
bytestream_put_byte(dst, AMF_DATA_TYPE_STRING);
bytestream_put_be16(dst, strlen(str));
bytestream_put_buffer(dst, str, strlen(str));
}
 
void ff_amf_write_string2(uint8_t **dst, const char *str1, const char *str2)
{
int len1 = 0, len2 = 0;
if (str1)
len1 = strlen(str1);
if (str2)
len2 = strlen(str2);
bytestream_put_byte(dst, AMF_DATA_TYPE_STRING);
bytestream_put_be16(dst, len1 + len2);
bytestream_put_buffer(dst, str1, len1);
bytestream_put_buffer(dst, str2, len2);
}
 
void ff_amf_write_null(uint8_t **dst)
{
bytestream_put_byte(dst, AMF_DATA_TYPE_NULL);
}
 
void ff_amf_write_object_start(uint8_t **dst)
{
bytestream_put_byte(dst, AMF_DATA_TYPE_OBJECT);
}
 
void ff_amf_write_field_name(uint8_t **dst, const char *str)
{
bytestream_put_be16(dst, strlen(str));
bytestream_put_buffer(dst, str, strlen(str));
}
 
void ff_amf_write_object_end(uint8_t **dst)
{
/* first two bytes are field name length = 0,
* AMF object should end with it and end marker
*/
bytestream_put_be24(dst, AMF_DATA_TYPE_OBJECT_END);
}
 
int ff_amf_read_bool(GetByteContext *bc, int *val)
{
if (bytestream2_get_byte(bc) != AMF_DATA_TYPE_BOOL)
return AVERROR_INVALIDDATA;
*val = bytestream2_get_byte(bc);
return 0;
}
 
int ff_amf_read_number(GetByteContext *bc, double *val)
{
uint64_t read;
if (bytestream2_get_byte(bc) != AMF_DATA_TYPE_NUMBER)
return AVERROR_INVALIDDATA;
read = bytestream2_get_be64(bc);
*val = av_int2double(read);
return 0;
}
 
int ff_amf_read_string(GetByteContext *bc, uint8_t *str,
int strsize, int *length)
{
int stringlen = 0;
int readsize;
if (bytestream2_get_byte(bc) != AMF_DATA_TYPE_STRING)
return AVERROR_INVALIDDATA;
stringlen = bytestream2_get_be16(bc);
if (stringlen + 1 > strsize)
return AVERROR(EINVAL);
readsize = bytestream2_get_buffer(bc, str, stringlen);
if (readsize != stringlen) {
av_log(NULL, AV_LOG_WARNING,
"Unable to read as many bytes as AMF string signaled\n");
}
str[readsize] = '\0';
*length = FFMIN(stringlen, readsize);
return 0;
}
 
int ff_amf_read_null(GetByteContext *bc)
{
if (bytestream2_get_byte(bc) != AMF_DATA_TYPE_NULL)
return AVERROR_INVALIDDATA;
return 0;
}
 
int ff_rtmp_check_alloc_array(RTMPPacket **prev_pkt, int *nb_prev_pkt,
int channel)
{
int nb_alloc;
RTMPPacket *ptr;
if (channel < *nb_prev_pkt)
return 0;
 
nb_alloc = channel + 16;
// This can't use the av_reallocp family of functions, since we
// would need to free each element in the array before the array
// itself is freed.
ptr = av_realloc_array(*prev_pkt, nb_alloc, sizeof(**prev_pkt));
if (!ptr)
return AVERROR(ENOMEM);
memset(ptr + *nb_prev_pkt, 0, (nb_alloc - *nb_prev_pkt) * sizeof(*ptr));
*prev_pkt = ptr;
*nb_prev_pkt = nb_alloc;
return 0;
}
 
int ff_rtmp_packet_read(URLContext *h, RTMPPacket *p,
int chunk_size, RTMPPacket **prev_pkt, int *nb_prev_pkt)
{
uint8_t hdr;
 
if (ffurl_read(h, &hdr, 1) != 1)
return AVERROR(EIO);
 
return ff_rtmp_packet_read_internal(h, p, chunk_size, prev_pkt,
nb_prev_pkt, hdr);
}
 
static int rtmp_packet_read_one_chunk(URLContext *h, RTMPPacket *p,
int chunk_size, RTMPPacket **prev_pkt_ptr,
int *nb_prev_pkt, uint8_t hdr)
{
 
uint8_t buf[16];
int channel_id, timestamp, size;
uint32_t extra = 0;
enum RTMPPacketType type;
int written = 0;
int ret, toread;
RTMPPacket *prev_pkt;
 
written++;
channel_id = hdr & 0x3F;
 
if (channel_id < 2) { //special case for channel number >= 64
buf[1] = 0;
if (ffurl_read_complete(h, buf, channel_id + 1) != channel_id + 1)
return AVERROR(EIO);
written += channel_id + 1;
channel_id = AV_RL16(buf) + 64;
}
if ((ret = ff_rtmp_check_alloc_array(prev_pkt_ptr, nb_prev_pkt,
channel_id)) < 0)
return ret;
prev_pkt = *prev_pkt_ptr;
size = prev_pkt[channel_id].size;
type = prev_pkt[channel_id].type;
extra = prev_pkt[channel_id].extra;
 
hdr >>= 6;
if (hdr == RTMP_PS_ONEBYTE) {
timestamp = prev_pkt[channel_id].ts_delta;
} else {
if (ffurl_read_complete(h, buf, 3) != 3)
return AVERROR(EIO);
written += 3;
timestamp = AV_RB24(buf);
if (hdr != RTMP_PS_FOURBYTES) {
if (ffurl_read_complete(h, buf, 3) != 3)
return AVERROR(EIO);
written += 3;
size = AV_RB24(buf);
if (ffurl_read_complete(h, buf, 1) != 1)
return AVERROR(EIO);
written++;
type = buf[0];
if (hdr == RTMP_PS_TWELVEBYTES) {
if (ffurl_read_complete(h, buf, 4) != 4)
return AVERROR(EIO);
written += 4;
extra = AV_RL32(buf);
}
}
if (timestamp == 0xFFFFFF) {
if (ffurl_read_complete(h, buf, 4) != 4)
return AVERROR(EIO);
timestamp = AV_RB32(buf);
}
}
if (hdr != RTMP_PS_TWELVEBYTES)
timestamp += prev_pkt[channel_id].timestamp;
 
if (!prev_pkt[channel_id].read) {
if ((ret = ff_rtmp_packet_create(p, channel_id, type, timestamp,
size)) < 0)
return ret;
p->read = written;
p->offset = 0;
prev_pkt[channel_id].ts_delta = timestamp -
prev_pkt[channel_id].timestamp;
prev_pkt[channel_id].timestamp = timestamp;
} else {
// previous packet in this channel hasn't completed reading
RTMPPacket *prev = &prev_pkt[channel_id];
p->data = prev->data;
p->size = prev->size;
p->channel_id = prev->channel_id;
p->type = prev->type;
p->ts_delta = prev->ts_delta;
p->extra = prev->extra;
p->offset = prev->offset;
p->read = prev->read + written;
p->timestamp = prev->timestamp;
prev->data = NULL;
}
p->extra = extra;
// save history
prev_pkt[channel_id].channel_id = channel_id;
prev_pkt[channel_id].type = type;
prev_pkt[channel_id].size = size;
prev_pkt[channel_id].extra = extra;
size = size - p->offset;
 
toread = FFMIN(size, chunk_size);
if (ffurl_read_complete(h, p->data + p->offset, toread) != toread) {
ff_rtmp_packet_destroy(p);
return AVERROR(EIO);
}
size -= toread;
p->read += toread;
p->offset += toread;
 
if (size > 0) {
RTMPPacket *prev = &prev_pkt[channel_id];
prev->data = p->data;
prev->read = p->read;
prev->offset = p->offset;
return AVERROR(EAGAIN);
}
 
prev_pkt[channel_id].read = 0; // read complete; reset if needed
return p->read;
}
 
int ff_rtmp_packet_read_internal(URLContext *h, RTMPPacket *p, int chunk_size,
RTMPPacket **prev_pkt, int *nb_prev_pkt,
uint8_t hdr)
{
while (1) {
int ret = rtmp_packet_read_one_chunk(h, p, chunk_size, prev_pkt,
nb_prev_pkt, hdr);
if (ret > 0 || ret != AVERROR(EAGAIN))
return ret;
 
if (ffurl_read(h, &hdr, 1) != 1)
return AVERROR(EIO);
}
}
 
int ff_rtmp_packet_write(URLContext *h, RTMPPacket *pkt,
int chunk_size, RTMPPacket **prev_pkt_ptr,
int *nb_prev_pkt)
{
uint8_t pkt_hdr[16], *p = pkt_hdr;
int mode = RTMP_PS_TWELVEBYTES;
int off = 0;
int written = 0;
int ret;
RTMPPacket *prev_pkt;
 
if ((ret = ff_rtmp_check_alloc_array(prev_pkt_ptr, nb_prev_pkt,
pkt->channel_id)) < 0)
return ret;
prev_pkt = *prev_pkt_ptr;
 
pkt->ts_delta = pkt->timestamp - prev_pkt[pkt->channel_id].timestamp;
 
//if channel_id = 0, this is first presentation of prev_pkt, send full hdr.
if (prev_pkt[pkt->channel_id].channel_id &&
pkt->extra == prev_pkt[pkt->channel_id].extra) {
if (pkt->type == prev_pkt[pkt->channel_id].type &&
pkt->size == prev_pkt[pkt->channel_id].size) {
mode = RTMP_PS_FOURBYTES;
if (pkt->ts_delta == prev_pkt[pkt->channel_id].ts_delta)
mode = RTMP_PS_ONEBYTE;
} else {
mode = RTMP_PS_EIGHTBYTES;
}
}
 
if (pkt->channel_id < 64) {
bytestream_put_byte(&p, pkt->channel_id | (mode << 6));
} else if (pkt->channel_id < 64 + 256) {
bytestream_put_byte(&p, 0 | (mode << 6));
bytestream_put_byte(&p, pkt->channel_id - 64);
} else {
bytestream_put_byte(&p, 1 | (mode << 6));
bytestream_put_le16(&p, pkt->channel_id - 64);
}
if (mode != RTMP_PS_ONEBYTE) {
uint32_t timestamp = pkt->timestamp;
if (mode != RTMP_PS_TWELVEBYTES)
timestamp = pkt->ts_delta;
bytestream_put_be24(&p, timestamp >= 0xFFFFFF ? 0xFFFFFF : timestamp);
if (mode != RTMP_PS_FOURBYTES) {
bytestream_put_be24(&p, pkt->size);
bytestream_put_byte(&p, pkt->type);
if (mode == RTMP_PS_TWELVEBYTES)
bytestream_put_le32(&p, pkt->extra);
}
if (timestamp >= 0xFFFFFF)
bytestream_put_be32(&p, timestamp);
}
// save history
prev_pkt[pkt->channel_id].channel_id = pkt->channel_id;
prev_pkt[pkt->channel_id].type = pkt->type;
prev_pkt[pkt->channel_id].size = pkt->size;
prev_pkt[pkt->channel_id].timestamp = pkt->timestamp;
if (mode != RTMP_PS_TWELVEBYTES) {
prev_pkt[pkt->channel_id].ts_delta = pkt->ts_delta;
} else {
prev_pkt[pkt->channel_id].ts_delta = pkt->timestamp;
}
prev_pkt[pkt->channel_id].extra = pkt->extra;
 
if ((ret = ffurl_write(h, pkt_hdr, p - pkt_hdr)) < 0)
return ret;
written = p - pkt_hdr + pkt->size;
while (off < pkt->size) {
int towrite = FFMIN(chunk_size, pkt->size - off);
if ((ret = ffurl_write(h, pkt->data + off, towrite)) < 0)
return ret;
off += towrite;
if (off < pkt->size) {
uint8_t marker = 0xC0 | pkt->channel_id;
if ((ret = ffurl_write(h, &marker, 1)) < 0)
return ret;
written++;
}
}
return written;
}
 
int ff_rtmp_packet_create(RTMPPacket *pkt, int channel_id, RTMPPacketType type,
int timestamp, int size)
{
if (size) {
pkt->data = av_malloc(size);
if (!pkt->data)
return AVERROR(ENOMEM);
}
pkt->size = size;
pkt->channel_id = channel_id;
pkt->type = type;
pkt->timestamp = timestamp;
pkt->extra = 0;
pkt->ts_delta = 0;
 
return 0;
}
 
void ff_rtmp_packet_destroy(RTMPPacket *pkt)
{
if (!pkt)
return;
av_freep(&pkt->data);
pkt->size = 0;
}
 
int ff_amf_tag_size(const uint8_t *data, const uint8_t *data_end)
{
const uint8_t *base = data;
AMFDataType type;
unsigned nb = -1;
int parse_key = 1;
 
if (data >= data_end)
return -1;
switch ((type = *data++)) {
case AMF_DATA_TYPE_NUMBER: return 9;
case AMF_DATA_TYPE_BOOL: return 2;
case AMF_DATA_TYPE_STRING: return 3 + AV_RB16(data);
case AMF_DATA_TYPE_LONG_STRING: return 5 + AV_RB32(data);
case AMF_DATA_TYPE_NULL: return 1;
case AMF_DATA_TYPE_ARRAY:
parse_key = 0;
case AMF_DATA_TYPE_MIXEDARRAY:
nb = bytestream_get_be32(&data);
case AMF_DATA_TYPE_OBJECT:
while (nb-- > 0 || type != AMF_DATA_TYPE_ARRAY) {
int t;
if (parse_key) {
int size = bytestream_get_be16(&data);
if (!size) {
data++;
break;
}
if (size < 0 || size >= data_end - data)
return -1;
data += size;
}
t = ff_amf_tag_size(data, data_end);
if (t < 0 || t >= data_end - data)
return -1;
data += t;
}
return data - base;
case AMF_DATA_TYPE_OBJECT_END: return 1;
default: return -1;
}
}
 
int ff_amf_get_field_value(const uint8_t *data, const uint8_t *data_end,
const uint8_t *name, uint8_t *dst, int dst_size)
{
int namelen = strlen(name);
int len;
 
while (*data != AMF_DATA_TYPE_OBJECT && data < data_end) {
len = ff_amf_tag_size(data, data_end);
if (len < 0)
len = data_end - data;
data += len;
}
if (data_end - data < 3)
return -1;
data++;
for (;;) {
int size = bytestream_get_be16(&data);
if (!size)
break;
if (size < 0 || size >= data_end - data)
return -1;
data += size;
if (size == namelen && !memcmp(data-size, name, namelen)) {
switch (*data++) {
case AMF_DATA_TYPE_NUMBER:
snprintf(dst, dst_size, "%g", av_int2double(AV_RB64(data)));
break;
case AMF_DATA_TYPE_BOOL:
snprintf(dst, dst_size, "%s", *data ? "true" : "false");
break;
case AMF_DATA_TYPE_STRING:
len = bytestream_get_be16(&data);
av_strlcpy(dst, data, FFMIN(len+1, dst_size));
break;
default:
return -1;
}
return 0;
}
len = ff_amf_tag_size(data, data_end);
if (len < 0 || len >= data_end - data)
return -1;
data += len;
}
return -1;
}
 
static const char* rtmp_packet_type(int type)
{
switch (type) {
case RTMP_PT_CHUNK_SIZE: return "chunk size";
case RTMP_PT_BYTES_READ: return "bytes read";
case RTMP_PT_PING: return "ping";
case RTMP_PT_SERVER_BW: return "server bandwidth";
case RTMP_PT_CLIENT_BW: return "client bandwidth";
case RTMP_PT_AUDIO: return "audio packet";
case RTMP_PT_VIDEO: return "video packet";
case RTMP_PT_FLEX_STREAM: return "Flex shared stream";
case RTMP_PT_FLEX_OBJECT: return "Flex shared object";
case RTMP_PT_FLEX_MESSAGE: return "Flex shared message";
case RTMP_PT_NOTIFY: return "notification";
case RTMP_PT_SHARED_OBJ: return "shared object";
case RTMP_PT_INVOKE: return "invoke";
case RTMP_PT_METADATA: return "metadata";
default: return "unknown";
}
}
 
static void amf_tag_contents(void *ctx, const uint8_t *data,
const uint8_t *data_end)
{
unsigned int size, nb = -1;
char buf[1024];
AMFDataType type;
int parse_key = 1;
 
if (data >= data_end)
return;
switch ((type = *data++)) {
case AMF_DATA_TYPE_NUMBER:
av_log(ctx, AV_LOG_DEBUG, " number %g\n", av_int2double(AV_RB64(data)));
return;
case AMF_DATA_TYPE_BOOL:
av_log(ctx, AV_LOG_DEBUG, " bool %d\n", *data);
return;
case AMF_DATA_TYPE_STRING:
case AMF_DATA_TYPE_LONG_STRING:
if (type == AMF_DATA_TYPE_STRING) {
size = bytestream_get_be16(&data);
} else {
size = bytestream_get_be32(&data);
}
size = FFMIN(size, sizeof(buf) - 1);
memcpy(buf, data, size);
buf[size] = 0;
av_log(ctx, AV_LOG_DEBUG, " string '%s'\n", buf);
return;
case AMF_DATA_TYPE_NULL:
av_log(ctx, AV_LOG_DEBUG, " NULL\n");
return;
case AMF_DATA_TYPE_ARRAY:
parse_key = 0;
case AMF_DATA_TYPE_MIXEDARRAY:
nb = bytestream_get_be32(&data);
case AMF_DATA_TYPE_OBJECT:
av_log(ctx, AV_LOG_DEBUG, " {\n");
while (nb-- > 0 || type != AMF_DATA_TYPE_ARRAY) {
int t;
if (parse_key) {
size = bytestream_get_be16(&data);
size = FFMIN(size, sizeof(buf) - 1);
if (!size) {
av_log(ctx, AV_LOG_DEBUG, " }\n");
data++;
break;
}
memcpy(buf, data, size);
buf[size] = 0;
if (size >= data_end - data)
return;
data += size;
av_log(ctx, AV_LOG_DEBUG, " %s: ", buf);
}
amf_tag_contents(ctx, data, data_end);
t = ff_amf_tag_size(data, data_end);
if (t < 0 || t >= data_end - data)
return;
data += t;
}
return;
case AMF_DATA_TYPE_OBJECT_END:
av_log(ctx, AV_LOG_DEBUG, " }\n");
return;
default:
return;
}
}
 
void ff_rtmp_packet_dump(void *ctx, RTMPPacket *p)
{
av_log(ctx, AV_LOG_DEBUG, "RTMP packet type '%s'(%d) for channel %d, timestamp %d, extra field %d size %d\n",
rtmp_packet_type(p->type), p->type, p->channel_id, p->timestamp, p->extra, p->size);
if (p->type == RTMP_PT_INVOKE || p->type == RTMP_PT_NOTIFY) {
uint8_t *src = p->data, *src_end = p->data + p->size;
while (src < src_end) {
int sz;
amf_tag_contents(ctx, src, src_end);
sz = ff_amf_tag_size(src, src_end);
if (sz < 0)
break;
src += sz;
}
} else if (p->type == RTMP_PT_SERVER_BW){
av_log(ctx, AV_LOG_DEBUG, "Server BW = %d\n", AV_RB32(p->data));
} else if (p->type == RTMP_PT_CLIENT_BW){
av_log(ctx, AV_LOG_DEBUG, "Client BW = %d\n", AV_RB32(p->data));
} else if (p->type != RTMP_PT_AUDIO && p->type != RTMP_PT_VIDEO && p->type != RTMP_PT_METADATA) {
int i;
for (i = 0; i < p->size; i++)
av_log(ctx, AV_LOG_DEBUG, " %02X", p->data[i]);
av_log(ctx, AV_LOG_DEBUG, "\n");
}
}
 
int ff_amf_match_string(const uint8_t *data, int size, const char *str)
{
int len = strlen(str);
int amf_len, type;
 
if (size < 1)
return 0;
 
type = *data++;
 
if (type != AMF_DATA_TYPE_LONG_STRING &&
type != AMF_DATA_TYPE_STRING)
return 0;
 
if (type == AMF_DATA_TYPE_LONG_STRING) {
if ((size -= 4 + 1) < 0)
return 0;
amf_len = bytestream_get_be32(&data);
} else {
if ((size -= 2 + 1) < 0)
return 0;
amf_len = bytestream_get_be16(&data);
}
 
if (amf_len > size)
return 0;
 
if (amf_len != len)
return 0;
 
return !memcmp(data, str, len);
}
/contrib/sdk/sources/ffmpeg/libavformat/rtmppkt.h
0,0 → 1,313
/*
* RTMP packet utilities
* Copyright (c) 2009 Konstantin Shishkov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_RTMPPKT_H
#define AVFORMAT_RTMPPKT_H
 
#include "libavcodec/bytestream.h"
#include "avformat.h"
#include "url.h"
 
/** maximum possible number of different RTMP channels */
#define RTMP_CHANNELS 65599
 
/**
* channels used to for RTMP packets with different purposes (i.e. data, network
* control, remote procedure calls, etc.)
*/
enum RTMPChannel {
RTMP_NETWORK_CHANNEL = 2, ///< channel for network-related messages (bandwidth report, ping, etc)
RTMP_SYSTEM_CHANNEL, ///< channel for sending server control messages
RTMP_AUDIO_CHANNEL, ///< channel for audio data
RTMP_VIDEO_CHANNEL = 6, ///< channel for video data
RTMP_SOURCE_CHANNEL = 8, ///< channel for a/v invokes
};
 
/**
* known RTMP packet types
*/
typedef enum RTMPPacketType {
RTMP_PT_CHUNK_SIZE = 1, ///< chunk size change
RTMP_PT_BYTES_READ = 3, ///< number of bytes read
RTMP_PT_PING, ///< ping
RTMP_PT_SERVER_BW, ///< server bandwidth
RTMP_PT_CLIENT_BW, ///< client bandwidth
RTMP_PT_AUDIO = 8, ///< audio packet
RTMP_PT_VIDEO, ///< video packet
RTMP_PT_FLEX_STREAM = 15, ///< Flex shared stream
RTMP_PT_FLEX_OBJECT, ///< Flex shared object
RTMP_PT_FLEX_MESSAGE, ///< Flex shared message
RTMP_PT_NOTIFY, ///< some notification
RTMP_PT_SHARED_OBJ, ///< shared object
RTMP_PT_INVOKE, ///< invoke some stream action
RTMP_PT_METADATA = 22, ///< FLV metadata
} RTMPPacketType;
 
/**
* possible RTMP packet header sizes
*/
enum RTMPPacketSize {
RTMP_PS_TWELVEBYTES = 0, ///< packet has 12-byte header
RTMP_PS_EIGHTBYTES, ///< packet has 8-byte header
RTMP_PS_FOURBYTES, ///< packet has 4-byte header
RTMP_PS_ONEBYTE ///< packet is really a next chunk of a packet
};
 
/**
* structure for holding RTMP packets
*/
typedef struct RTMPPacket {
int channel_id; ///< RTMP channel ID (nothing to do with audio/video channels though)
RTMPPacketType type; ///< packet payload type
uint32_t timestamp; ///< packet full timestamp
uint32_t ts_delta; ///< timestamp increment to the previous one in milliseconds (latter only for media packets)
uint32_t extra; ///< probably an additional channel ID used during streaming data
uint8_t *data; ///< packet payload
int size; ///< packet payload size
int offset; ///< amount of data read so far
int read; ///< amount read, including headers
} RTMPPacket;
 
/**
* Create new RTMP packet with given attributes.
*
* @param pkt packet
* @param channel_id packet channel ID
* @param type packet type
* @param timestamp packet timestamp
* @param size packet size
* @return zero on success, negative value otherwise
*/
int ff_rtmp_packet_create(RTMPPacket *pkt, int channel_id, RTMPPacketType type,
int timestamp, int size);
 
/**
* Free RTMP packet.
*
* @param pkt packet
*/
void ff_rtmp_packet_destroy(RTMPPacket *pkt);
 
/**
* Read RTMP packet sent by the server.
*
* @param h reader context
* @param p packet
* @param chunk_size current chunk size
* @param prev_pkt previously read packet headers for all channels
* (may be needed for restoring incomplete packet header)
* @param nb_prev_pkt number of allocated elements in prev_pkt
* @return number of bytes read on success, negative value otherwise
*/
int ff_rtmp_packet_read(URLContext *h, RTMPPacket *p,
int chunk_size, RTMPPacket **prev_pkt,
int *nb_prev_pkt);
/**
* Read internal RTMP packet sent by the server.
*
* @param h reader context
* @param p packet
* @param chunk_size current chunk size
* @param prev_pkt previously read packet headers for all channels
* (may be needed for restoring incomplete packet header)
* @param nb_prev_pkt number of allocated elements in prev_pkt
* @param c the first byte already read
* @return number of bytes read on success, negative value otherwise
*/
int ff_rtmp_packet_read_internal(URLContext *h, RTMPPacket *p, int chunk_size,
RTMPPacket **prev_pkt, int *nb_prev_pkt,
uint8_t c);
 
/**
* Send RTMP packet to the server.
*
* @param h reader context
* @param p packet to send
* @param chunk_size current chunk size
* @param prev_pkt previously sent packet headers for all channels
* (may be used for packet header compressing)
* @param nb_prev_pkt number of allocated elements in prev_pkt
* @return number of bytes written on success, negative value otherwise
*/
int ff_rtmp_packet_write(URLContext *h, RTMPPacket *p,
int chunk_size, RTMPPacket **prev_pkt,
int *nb_prev_pkt);
 
/**
* Print information and contents of RTMP packet.
*
* @param ctx output context
* @param p packet to dump
*/
void ff_rtmp_packet_dump(void *ctx, RTMPPacket *p);
 
/**
* Enlarge the prev_pkt array to fit the given channel
*
* @param prev_pkt array with previously sent packet headers
* @param nb_prev_pkt number of allocated elements in prev_pkt
* @param channel the channel number that needs to be allocated
*/
int ff_rtmp_check_alloc_array(RTMPPacket **prev_pkt, int *nb_prev_pkt,
int channel);
 
/**
* @name Functions used to work with the AMF format (which is also used in .flv)
* @see amf_* funcs in libavformat/flvdec.c
* @{
*/
 
/**
* Calculate number of bytes taken by first AMF entry in data.
*
* @param data input data
* @param data_end input buffer end
* @return number of bytes used by first AMF entry
*/
int ff_amf_tag_size(const uint8_t *data, const uint8_t *data_end);
 
/**
* Retrieve value of given AMF object field in string form.
*
* @param data AMF object data
* @param data_end input buffer end
* @param name name of field to retrieve
* @param dst buffer for storing result
* @param dst_size output buffer size
* @return 0 if search and retrieval succeeded, negative value otherwise
*/
int ff_amf_get_field_value(const uint8_t *data, const uint8_t *data_end,
const uint8_t *name, uint8_t *dst, int dst_size);
 
/**
* Write boolean value in AMF format to buffer.
*
* @param dst pointer to the input buffer (will be modified)
* @param val value to write
*/
void ff_amf_write_bool(uint8_t **dst, int val);
 
/**
* Write number in AMF format to buffer.
*
* @param dst pointer to the input buffer (will be modified)
* @param num value to write
*/
void ff_amf_write_number(uint8_t **dst, double num);
 
/**
* Write string in AMF format to buffer.
*
* @param dst pointer to the input buffer (will be modified)
* @param str string to write
*/
void ff_amf_write_string(uint8_t **dst, const char *str);
 
/**
* Write a string consisting of two parts in AMF format to a buffer.
*
* @param dst pointer to the input buffer (will be modified)
* @param str1 first string to write, may be null
* @param str2 second string to write, may be null
*/
void ff_amf_write_string2(uint8_t **dst, const char *str1, const char *str2);
 
/**
* Write AMF NULL value to buffer.
*
* @param dst pointer to the input buffer (will be modified)
*/
void ff_amf_write_null(uint8_t **dst);
 
/**
* Write marker for AMF object to buffer.
*
* @param dst pointer to the input buffer (will be modified)
*/
void ff_amf_write_object_start(uint8_t **dst);
 
/**
* Write string used as field name in AMF object to buffer.
*
* @param dst pointer to the input buffer (will be modified)
* @param str string to write
*/
void ff_amf_write_field_name(uint8_t **dst, const char *str);
 
/**
* Write marker for end of AMF object to buffer.
*
* @param dst pointer to the input buffer (will be modified)
*/
void ff_amf_write_object_end(uint8_t **dst);
 
/**
* Read AMF boolean value.
*
*@param[in,out] gbc GetByteContext initialized with AMF-formatted data
*@param[out] val 0 or 1
*@return 0 on success or an AVERROR code on failure
*/
int ff_amf_read_bool(GetByteContext *gbc, int *val);
 
/**
* Read AMF number value.
*
*@param[in,out] gbc GetByteContext initialized with AMF-formatted data
*@param[out] val read value
*@return 0 on success or an AVERROR code on failure
*/
int ff_amf_read_number(GetByteContext *gbc, double *val);
 
/**
* Read AMF string value.
*
* Appends a trailing null byte to output string in order to
* ease later parsing.
*
*@param[in,out] gbc GetByteContext initialized with AMF-formatted data
*@param[out] str read string
*@param[in] strsize buffer size available to store the read string
*@param[out] length read string length
*@return 0 on success or an AVERROR code on failure
*/
int ff_amf_read_string(GetByteContext *gbc, uint8_t *str,
int strsize, int *length);
 
/**
* Read AMF NULL value.
*
*@param[in,out] gbc GetByteContext initialized with AMF-formatted data
*@return 0 on success or an AVERROR code on failure
*/
int ff_amf_read_null(GetByteContext *gbc);
 
/**
* Match AMF string with a NULL-terminated string.
*
* @return 0 if the strings do not match.
*/
 
int ff_amf_match_string(const uint8_t *data, int size, const char *str);
 
/** @} */ // AMF funcs
 
#endif /* AVFORMAT_RTMPPKT_H */
/contrib/sdk/sources/ffmpeg/libavformat/rtmpproto.c
0,0 → 1,2829
/*
* RTMP network protocol
* Copyright (c) 2009 Konstantin Shishkov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* RTMP protocol
*/
 
#include "libavcodec/bytestream.h"
#include "libavutil/avstring.h"
#include "libavutil/base64.h"
#include "libavutil/intfloat.h"
#include "libavutil/lfg.h"
#include "libavutil/md5.h"
#include "libavutil/opt.h"
#include "libavutil/random_seed.h"
#include "libavutil/sha.h"
#include "avformat.h"
#include "internal.h"
 
#include "network.h"
 
#include "flv.h"
#include "rtmp.h"
#include "rtmpcrypt.h"
#include "rtmppkt.h"
#include "url.h"
 
#if CONFIG_ZLIB
#include <zlib.h>
#endif
 
#define APP_MAX_LENGTH 1024
#define PLAYPATH_MAX_LENGTH 256
#define TCURL_MAX_LENGTH 512
#define FLASHVER_MAX_LENGTH 64
#define RTMP_PKTDATA_DEFAULT_SIZE 4096
#define RTMP_HEADER 11
 
/** RTMP protocol handler state */
typedef enum {
STATE_START, ///< client has not done anything yet
STATE_HANDSHAKED, ///< client has performed handshake
STATE_FCPUBLISH, ///< client FCPublishing stream (for output)
STATE_PLAYING, ///< client has started receiving multimedia data from server
STATE_SEEKING, ///< client has started the seek operation. Back on STATE_PLAYING when the time comes
STATE_PUBLISHING, ///< client has started sending multimedia data to server (for output)
STATE_RECEIVING, ///< received a publish command (for input)
STATE_SENDING, ///< received a play command (for output)
STATE_STOPPED, ///< the broadcast has been stopped
} ClientState;
 
typedef struct TrackedMethod {
char *name;
int id;
} TrackedMethod;
 
/** protocol handler context */
typedef struct RTMPContext {
const AVClass *class;
URLContext* stream; ///< TCP stream used in interactions with RTMP server
RTMPPacket *prev_pkt[2]; ///< packet history used when reading and sending packets ([0] for reading, [1] for writing)
int nb_prev_pkt[2]; ///< number of elements in prev_pkt
int in_chunk_size; ///< size of the chunks incoming RTMP packets are divided into
int out_chunk_size; ///< size of the chunks outgoing RTMP packets are divided into
int is_input; ///< input/output flag
char *playpath; ///< stream identifier to play (with possible "mp4:" prefix)
int live; ///< 0: recorded, -1: live, -2: both
char *app; ///< name of application
char *conn; ///< append arbitrary AMF data to the Connect message
ClientState state; ///< current state
int stream_id; ///< ID assigned by the server for the stream
uint8_t* flv_data; ///< buffer with data for demuxer
int flv_size; ///< current buffer size
int flv_off; ///< number of bytes read from current buffer
int flv_nb_packets; ///< number of flv packets published
RTMPPacket out_pkt; ///< rtmp packet, created from flv a/v or metadata (for output)
uint32_t client_report_size; ///< number of bytes after which client should report to server
uint32_t bytes_read; ///< number of bytes read from server
uint32_t last_bytes_read; ///< number of bytes read last reported to server
int skip_bytes; ///< number of bytes to skip from the input FLV stream in the next write call
uint8_t flv_header[RTMP_HEADER]; ///< partial incoming flv packet header
int flv_header_bytes; ///< number of initialized bytes in flv_header
int nb_invokes; ///< keeps track of invoke messages
char* tcurl; ///< url of the target stream
char* flashver; ///< version of the flash plugin
char* swfhash; ///< SHA256 hash of the decompressed SWF file (32 bytes)
int swfhash_len; ///< length of the SHA256 hash
int swfsize; ///< size of the decompressed SWF file
char* swfurl; ///< url of the swf player
char* swfverify; ///< URL to player swf file, compute hash/size automatically
char swfverification[42]; ///< hash of the SWF verification
char* pageurl; ///< url of the web page
char* subscribe; ///< name of live stream to subscribe
int server_bw; ///< server bandwidth
int client_buffer_time; ///< client buffer time in ms
int flush_interval; ///< number of packets flushed in the same request (RTMPT only)
int encrypted; ///< use an encrypted connection (RTMPE only)
TrackedMethod*tracked_methods; ///< tracked methods buffer
int nb_tracked_methods; ///< number of tracked methods
int tracked_methods_size; ///< size of the tracked methods buffer
int listen; ///< listen mode flag
int listen_timeout; ///< listen timeout to wait for new connections
int nb_streamid; ///< The next stream id to return on createStream calls
char username[50];
char password[50];
char auth_params[500];
int do_reconnect;
int auth_tried;
} RTMPContext;
 
#define PLAYER_KEY_OPEN_PART_LEN 30 ///< length of partial key used for first client digest signing
/** Client key used for digest signing */
static const uint8_t rtmp_player_key[] = {
'G', 'e', 'n', 'u', 'i', 'n', 'e', ' ', 'A', 'd', 'o', 'b', 'e', ' ',
'F', 'l', 'a', 's', 'h', ' ', 'P', 'l', 'a', 'y', 'e', 'r', ' ', '0', '0', '1',
 
0xF0, 0xEE, 0xC2, 0x4A, 0x80, 0x68, 0xBE, 0xE8, 0x2E, 0x00, 0xD0, 0xD1, 0x02,
0x9E, 0x7E, 0x57, 0x6E, 0xEC, 0x5D, 0x2D, 0x29, 0x80, 0x6F, 0xAB, 0x93, 0xB8,
0xE6, 0x36, 0xCF, 0xEB, 0x31, 0xAE
};
 
#define SERVER_KEY_OPEN_PART_LEN 36 ///< length of partial key used for first server digest signing
/** Key used for RTMP server digest signing */
static const uint8_t rtmp_server_key[] = {
'G', 'e', 'n', 'u', 'i', 'n', 'e', ' ', 'A', 'd', 'o', 'b', 'e', ' ',
'F', 'l', 'a', 's', 'h', ' ', 'M', 'e', 'd', 'i', 'a', ' ',
'S', 'e', 'r', 'v', 'e', 'r', ' ', '0', '0', '1',
 
0xF0, 0xEE, 0xC2, 0x4A, 0x80, 0x68, 0xBE, 0xE8, 0x2E, 0x00, 0xD0, 0xD1, 0x02,
0x9E, 0x7E, 0x57, 0x6E, 0xEC, 0x5D, 0x2D, 0x29, 0x80, 0x6F, 0xAB, 0x93, 0xB8,
0xE6, 0x36, 0xCF, 0xEB, 0x31, 0xAE
};
 
static int add_tracked_method(RTMPContext *rt, const char *name, int id)
{
int err;
 
if (rt->nb_tracked_methods + 1 > rt->tracked_methods_size) {
rt->tracked_methods_size = (rt->nb_tracked_methods + 1) * 2;
if ((err = av_reallocp(&rt->tracked_methods, rt->tracked_methods_size *
sizeof(*rt->tracked_methods))) < 0) {
rt->nb_tracked_methods = 0;
rt->tracked_methods_size = 0;
return err;
}
}
 
rt->tracked_methods[rt->nb_tracked_methods].name = av_strdup(name);
if (!rt->tracked_methods[rt->nb_tracked_methods].name)
return AVERROR(ENOMEM);
rt->tracked_methods[rt->nb_tracked_methods].id = id;
rt->nb_tracked_methods++;
 
return 0;
}
 
static void del_tracked_method(RTMPContext *rt, int index)
{
memmove(&rt->tracked_methods[index], &rt->tracked_methods[index + 1],
sizeof(*rt->tracked_methods) * (rt->nb_tracked_methods - index - 1));
rt->nb_tracked_methods--;
}
 
static int find_tracked_method(URLContext *s, RTMPPacket *pkt, int offset,
char **tracked_method)
{
RTMPContext *rt = s->priv_data;
GetByteContext gbc;
double pkt_id;
int ret;
int i;
 
bytestream2_init(&gbc, pkt->data + offset, pkt->size - offset);
if ((ret = ff_amf_read_number(&gbc, &pkt_id)) < 0)
return ret;
 
for (i = 0; i < rt->nb_tracked_methods; i++) {
if (rt->tracked_methods[i].id != pkt_id)
continue;
 
*tracked_method = rt->tracked_methods[i].name;
del_tracked_method(rt, i);
break;
}
 
return 0;
}
 
static void free_tracked_methods(RTMPContext *rt)
{
int i;
 
for (i = 0; i < rt->nb_tracked_methods; i ++)
av_free(rt->tracked_methods[i].name);
av_free(rt->tracked_methods);
rt->tracked_methods = NULL;
rt->tracked_methods_size = 0;
rt->nb_tracked_methods = 0;
}
 
static int rtmp_send_packet(RTMPContext *rt, RTMPPacket *pkt, int track)
{
int ret;
 
if (pkt->type == RTMP_PT_INVOKE && track) {
GetByteContext gbc;
char name[128];
double pkt_id;
int len;
 
bytestream2_init(&gbc, pkt->data, pkt->size);
if ((ret = ff_amf_read_string(&gbc, name, sizeof(name), &len)) < 0)
goto fail;
 
if ((ret = ff_amf_read_number(&gbc, &pkt_id)) < 0)
goto fail;
 
if ((ret = add_tracked_method(rt, name, pkt_id)) < 0)
goto fail;
}
 
ret = ff_rtmp_packet_write(rt->stream, pkt, rt->out_chunk_size,
&rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
fail:
ff_rtmp_packet_destroy(pkt);
return ret;
}
 
static int rtmp_write_amf_data(URLContext *s, char *param, uint8_t **p)
{
char *field, *value;
char type;
 
/* The type must be B for Boolean, N for number, S for string, O for
* object, or Z for null. For Booleans the data must be either 0 or 1 for
* FALSE or TRUE, respectively. Likewise for Objects the data must be
* 0 or 1 to end or begin an object, respectively. Data items in subobjects
* may be named, by prefixing the type with 'N' and specifying the name
* before the value (ie. NB:myFlag:1). This option may be used multiple times
* to construct arbitrary AMF sequences. */
if (param[0] && param[1] == ':') {
type = param[0];
value = param + 2;
} else if (param[0] == 'N' && param[1] && param[2] == ':') {
type = param[1];
field = param + 3;
value = strchr(field, ':');
if (!value)
goto fail;
*value = '\0';
value++;
 
ff_amf_write_field_name(p, field);
} else {
goto fail;
}
 
switch (type) {
case 'B':
ff_amf_write_bool(p, value[0] != '0');
break;
case 'S':
ff_amf_write_string(p, value);
break;
case 'N':
ff_amf_write_number(p, strtod(value, NULL));
break;
case 'Z':
ff_amf_write_null(p);
break;
case 'O':
if (value[0] != '0')
ff_amf_write_object_start(p);
else
ff_amf_write_object_end(p);
break;
default:
goto fail;
break;
}
 
return 0;
 
fail:
av_log(s, AV_LOG_ERROR, "Invalid AMF parameter: %s\n", param);
return AVERROR(EINVAL);
}
 
/**
* Generate 'connect' call and send it to the server.
*/
static int gen_connect(URLContext *s, RTMPContext *rt)
{
RTMPPacket pkt;
uint8_t *p;
int ret;
 
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
0, 4096 + APP_MAX_LENGTH)) < 0)
return ret;
 
p = pkt.data;
 
ff_amf_write_string(&p, "connect");
ff_amf_write_number(&p, ++rt->nb_invokes);
ff_amf_write_object_start(&p);
ff_amf_write_field_name(&p, "app");
ff_amf_write_string2(&p, rt->app, rt->auth_params);
 
if (!rt->is_input) {
ff_amf_write_field_name(&p, "type");
ff_amf_write_string(&p, "nonprivate");
}
ff_amf_write_field_name(&p, "flashVer");
ff_amf_write_string(&p, rt->flashver);
 
if (rt->swfurl) {
ff_amf_write_field_name(&p, "swfUrl");
ff_amf_write_string(&p, rt->swfurl);
}
 
ff_amf_write_field_name(&p, "tcUrl");
ff_amf_write_string2(&p, rt->tcurl, rt->auth_params);
if (rt->is_input) {
ff_amf_write_field_name(&p, "fpad");
ff_amf_write_bool(&p, 0);
ff_amf_write_field_name(&p, "capabilities");
ff_amf_write_number(&p, 15.0);
 
/* Tell the server we support all the audio codecs except
* SUPPORT_SND_INTEL (0x0008) and SUPPORT_SND_UNUSED (0x0010)
* which are unused in the RTMP protocol implementation. */
ff_amf_write_field_name(&p, "audioCodecs");
ff_amf_write_number(&p, 4071.0);
ff_amf_write_field_name(&p, "videoCodecs");
ff_amf_write_number(&p, 252.0);
ff_amf_write_field_name(&p, "videoFunction");
ff_amf_write_number(&p, 1.0);
 
if (rt->pageurl) {
ff_amf_write_field_name(&p, "pageUrl");
ff_amf_write_string(&p, rt->pageurl);
}
}
ff_amf_write_object_end(&p);
 
if (rt->conn) {
char *param = rt->conn;
 
// Write arbitrary AMF data to the Connect message.
while (param != NULL) {
char *sep;
param += strspn(param, " ");
if (!*param)
break;
sep = strchr(param, ' ');
if (sep)
*sep = '\0';
if ((ret = rtmp_write_amf_data(s, param, &p)) < 0) {
// Invalid AMF parameter.
ff_rtmp_packet_destroy(&pkt);
return ret;
}
 
if (sep)
param = sep + 1;
else
break;
}
}
 
pkt.size = p - pkt.data;
 
return rtmp_send_packet(rt, &pkt, 1);
}
 
static int read_connect(URLContext *s, RTMPContext *rt)
{
RTMPPacket pkt = { 0 };
uint8_t *p;
const uint8_t *cp;
int ret;
char command[64];
int stringlen;
double seqnum;
uint8_t tmpstr[256];
GetByteContext gbc;
 
if ((ret = ff_rtmp_packet_read(rt->stream, &pkt, rt->in_chunk_size,
&rt->prev_pkt[0], &rt->nb_prev_pkt[0])) < 0)
return ret;
cp = pkt.data;
bytestream2_init(&gbc, cp, pkt.size);
if (ff_amf_read_string(&gbc, command, sizeof(command), &stringlen)) {
av_log(s, AV_LOG_ERROR, "Unable to read command string\n");
ff_rtmp_packet_destroy(&pkt);
return AVERROR_INVALIDDATA;
}
if (strcmp(command, "connect")) {
av_log(s, AV_LOG_ERROR, "Expecting connect, got %s\n", command);
ff_rtmp_packet_destroy(&pkt);
return AVERROR_INVALIDDATA;
}
ret = ff_amf_read_number(&gbc, &seqnum);
if (ret)
av_log(s, AV_LOG_WARNING, "SeqNum not found\n");
/* Here one could parse an AMF Object with data as flashVers and others. */
ret = ff_amf_get_field_value(gbc.buffer,
gbc.buffer + bytestream2_get_bytes_left(&gbc),
"app", tmpstr, sizeof(tmpstr));
if (ret)
av_log(s, AV_LOG_WARNING, "App field not found in connect\n");
if (!ret && strcmp(tmpstr, rt->app))
av_log(s, AV_LOG_WARNING, "App field don't match up: %s <-> %s\n",
tmpstr, rt->app);
ff_rtmp_packet_destroy(&pkt);
 
// Send Window Acknowledgement Size (as defined in speficication)
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL,
RTMP_PT_SERVER_BW, 0, 4)) < 0)
return ret;
p = pkt.data;
bytestream_put_be32(&p, rt->server_bw);
pkt.size = p - pkt.data;
ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
&rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt);
if (ret < 0)
return ret;
// Send Peer Bandwidth
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL,
RTMP_PT_CLIENT_BW, 0, 5)) < 0)
return ret;
p = pkt.data;
bytestream_put_be32(&p, rt->server_bw);
bytestream_put_byte(&p, 2); // dynamic
pkt.size = p - pkt.data;
ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
&rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt);
if (ret < 0)
return ret;
 
// Ping request
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL,
RTMP_PT_PING, 0, 6)) < 0)
return ret;
 
p = pkt.data;
bytestream_put_be16(&p, 0); // 0 -> Stream Begin
bytestream_put_be32(&p, 0);
ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
&rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt);
if (ret < 0)
return ret;
 
// Chunk size
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL,
RTMP_PT_CHUNK_SIZE, 0, 4)) < 0)
return ret;
 
p = pkt.data;
bytestream_put_be32(&p, rt->out_chunk_size);
ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
&rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt);
if (ret < 0)
return ret;
 
// Send result_ NetConnection.Connect.Success to connect
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL,
RTMP_PT_INVOKE, 0,
RTMP_PKTDATA_DEFAULT_SIZE)) < 0)
return ret;
 
p = pkt.data;
ff_amf_write_string(&p, "_result");
ff_amf_write_number(&p, seqnum);
 
ff_amf_write_object_start(&p);
ff_amf_write_field_name(&p, "fmsVer");
ff_amf_write_string(&p, "FMS/3,0,1,123");
ff_amf_write_field_name(&p, "capabilities");
ff_amf_write_number(&p, 31);
ff_amf_write_object_end(&p);
 
ff_amf_write_object_start(&p);
ff_amf_write_field_name(&p, "level");
ff_amf_write_string(&p, "status");
ff_amf_write_field_name(&p, "code");
ff_amf_write_string(&p, "NetConnection.Connect.Success");
ff_amf_write_field_name(&p, "description");
ff_amf_write_string(&p, "Connection succeeded.");
ff_amf_write_field_name(&p, "objectEncoding");
ff_amf_write_number(&p, 0);
ff_amf_write_object_end(&p);
 
pkt.size = p - pkt.data;
ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
&rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt);
if (ret < 0)
return ret;
 
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL,
RTMP_PT_INVOKE, 0, 30)) < 0)
return ret;
p = pkt.data;
ff_amf_write_string(&p, "onBWDone");
ff_amf_write_number(&p, 0);
ff_amf_write_null(&p);
ff_amf_write_number(&p, 8192);
pkt.size = p - pkt.data;
ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
&rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt);
 
return ret;
}
 
/**
* Generate 'releaseStream' call and send it to the server. It should make
* the server release some channel for media streams.
*/
static int gen_release_stream(URLContext *s, RTMPContext *rt)
{
RTMPPacket pkt;
uint8_t *p;
int ret;
 
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
0, 29 + strlen(rt->playpath))) < 0)
return ret;
 
av_log(s, AV_LOG_DEBUG, "Releasing stream...\n");
p = pkt.data;
ff_amf_write_string(&p, "releaseStream");
ff_amf_write_number(&p, ++rt->nb_invokes);
ff_amf_write_null(&p);
ff_amf_write_string(&p, rt->playpath);
 
return rtmp_send_packet(rt, &pkt, 1);
}
 
/**
* Generate 'FCPublish' call and send it to the server. It should make
* the server preapare for receiving media streams.
*/
static int gen_fcpublish_stream(URLContext *s, RTMPContext *rt)
{
RTMPPacket pkt;
uint8_t *p;
int ret;
 
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
0, 25 + strlen(rt->playpath))) < 0)
return ret;
 
av_log(s, AV_LOG_DEBUG, "FCPublish stream...\n");
p = pkt.data;
ff_amf_write_string(&p, "FCPublish");
ff_amf_write_number(&p, ++rt->nb_invokes);
ff_amf_write_null(&p);
ff_amf_write_string(&p, rt->playpath);
 
return rtmp_send_packet(rt, &pkt, 1);
}
 
/**
* Generate 'FCUnpublish' call and send it to the server. It should make
* the server destroy stream.
*/
static int gen_fcunpublish_stream(URLContext *s, RTMPContext *rt)
{
RTMPPacket pkt;
uint8_t *p;
int ret;
 
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
0, 27 + strlen(rt->playpath))) < 0)
return ret;
 
av_log(s, AV_LOG_DEBUG, "UnPublishing stream...\n");
p = pkt.data;
ff_amf_write_string(&p, "FCUnpublish");
ff_amf_write_number(&p, ++rt->nb_invokes);
ff_amf_write_null(&p);
ff_amf_write_string(&p, rt->playpath);
 
return rtmp_send_packet(rt, &pkt, 0);
}
 
/**
* Generate 'createStream' call and send it to the server. It should make
* the server allocate some channel for media streams.
*/
static int gen_create_stream(URLContext *s, RTMPContext *rt)
{
RTMPPacket pkt;
uint8_t *p;
int ret;
 
av_log(s, AV_LOG_DEBUG, "Creating stream...\n");
 
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
0, 25)) < 0)
return ret;
 
p = pkt.data;
ff_amf_write_string(&p, "createStream");
ff_amf_write_number(&p, ++rt->nb_invokes);
ff_amf_write_null(&p);
 
return rtmp_send_packet(rt, &pkt, 1);
}
 
 
/**
* Generate 'deleteStream' call and send it to the server. It should make
* the server remove some channel for media streams.
*/
static int gen_delete_stream(URLContext *s, RTMPContext *rt)
{
RTMPPacket pkt;
uint8_t *p;
int ret;
 
av_log(s, AV_LOG_DEBUG, "Deleting stream...\n");
 
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
0, 34)) < 0)
return ret;
 
p = pkt.data;
ff_amf_write_string(&p, "deleteStream");
ff_amf_write_number(&p, ++rt->nb_invokes);
ff_amf_write_null(&p);
ff_amf_write_number(&p, rt->stream_id);
 
return rtmp_send_packet(rt, &pkt, 0);
}
 
/**
* Generate client buffer time and send it to the server.
*/
static int gen_buffer_time(URLContext *s, RTMPContext *rt)
{
RTMPPacket pkt;
uint8_t *p;
int ret;
 
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_PING,
1, 10)) < 0)
return ret;
 
p = pkt.data;
bytestream_put_be16(&p, 3);
bytestream_put_be32(&p, rt->stream_id);
bytestream_put_be32(&p, rt->client_buffer_time);
 
return rtmp_send_packet(rt, &pkt, 0);
}
 
/**
* Generate 'play' call and send it to the server, then ping the server
* to start actual playing.
*/
static int gen_play(URLContext *s, RTMPContext *rt)
{
RTMPPacket pkt;
uint8_t *p;
int ret;
 
av_log(s, AV_LOG_DEBUG, "Sending play command for '%s'\n", rt->playpath);
 
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SOURCE_CHANNEL, RTMP_PT_INVOKE,
0, 29 + strlen(rt->playpath))) < 0)
return ret;
 
pkt.extra = rt->stream_id;
 
p = pkt.data;
ff_amf_write_string(&p, "play");
ff_amf_write_number(&p, ++rt->nb_invokes);
ff_amf_write_null(&p);
ff_amf_write_string(&p, rt->playpath);
ff_amf_write_number(&p, rt->live * 1000);
 
return rtmp_send_packet(rt, &pkt, 1);
}
 
static int gen_seek(URLContext *s, RTMPContext *rt, int64_t timestamp)
{
RTMPPacket pkt;
uint8_t *p;
int ret;
 
av_log(s, AV_LOG_DEBUG, "Sending seek command for timestamp %"PRId64"\n",
timestamp);
 
if ((ret = ff_rtmp_packet_create(&pkt, 3, RTMP_PT_INVOKE, 0, 26)) < 0)
return ret;
 
pkt.extra = rt->stream_id;
 
p = pkt.data;
ff_amf_write_string(&p, "seek");
ff_amf_write_number(&p, 0); //no tracking back responses
ff_amf_write_null(&p); //as usual, the first null param
ff_amf_write_number(&p, timestamp); //where we want to jump
 
return rtmp_send_packet(rt, &pkt, 1);
}
 
/**
* Generate 'publish' call and send it to the server.
*/
static int gen_publish(URLContext *s, RTMPContext *rt)
{
RTMPPacket pkt;
uint8_t *p;
int ret;
 
av_log(s, AV_LOG_DEBUG, "Sending publish command for '%s'\n", rt->playpath);
 
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SOURCE_CHANNEL, RTMP_PT_INVOKE,
0, 30 + strlen(rt->playpath))) < 0)
return ret;
 
pkt.extra = rt->stream_id;
 
p = pkt.data;
ff_amf_write_string(&p, "publish");
ff_amf_write_number(&p, ++rt->nb_invokes);
ff_amf_write_null(&p);
ff_amf_write_string(&p, rt->playpath);
ff_amf_write_string(&p, "live");
 
return rtmp_send_packet(rt, &pkt, 1);
}
 
/**
* Generate ping reply and send it to the server.
*/
static int gen_pong(URLContext *s, RTMPContext *rt, RTMPPacket *ppkt)
{
RTMPPacket pkt;
uint8_t *p;
int ret;
 
if (ppkt->size < 6) {
av_log(s, AV_LOG_ERROR, "Too short ping packet (%d)\n",
ppkt->size);
return AVERROR_INVALIDDATA;
}
 
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_PING,
ppkt->timestamp + 1, 6)) < 0)
return ret;
 
p = pkt.data;
bytestream_put_be16(&p, 7);
bytestream_put_be32(&p, AV_RB32(ppkt->data+2));
 
return rtmp_send_packet(rt, &pkt, 0);
}
 
/**
* Generate SWF verification message and send it to the server.
*/
static int gen_swf_verification(URLContext *s, RTMPContext *rt)
{
RTMPPacket pkt;
uint8_t *p;
int ret;
 
av_log(s, AV_LOG_DEBUG, "Sending SWF verification...\n");
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_PING,
0, 44)) < 0)
return ret;
 
p = pkt.data;
bytestream_put_be16(&p, 27);
memcpy(p, rt->swfverification, 42);
 
return rtmp_send_packet(rt, &pkt, 0);
}
 
/**
* Generate server bandwidth message and send it to the server.
*/
static int gen_server_bw(URLContext *s, RTMPContext *rt)
{
RTMPPacket pkt;
uint8_t *p;
int ret;
 
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_SERVER_BW,
0, 4)) < 0)
return ret;
 
p = pkt.data;
bytestream_put_be32(&p, rt->server_bw);
 
return rtmp_send_packet(rt, &pkt, 0);
}
 
/**
* Generate check bandwidth message and send it to the server.
*/
static int gen_check_bw(URLContext *s, RTMPContext *rt)
{
RTMPPacket pkt;
uint8_t *p;
int ret;
 
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
0, 21)) < 0)
return ret;
 
p = pkt.data;
ff_amf_write_string(&p, "_checkbw");
ff_amf_write_number(&p, ++rt->nb_invokes);
ff_amf_write_null(&p);
 
return rtmp_send_packet(rt, &pkt, 1);
}
 
/**
* Generate report on bytes read so far and send it to the server.
*/
static int gen_bytes_read(URLContext *s, RTMPContext *rt, uint32_t ts)
{
RTMPPacket pkt;
uint8_t *p;
int ret;
 
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_BYTES_READ,
ts, 4)) < 0)
return ret;
 
p = pkt.data;
bytestream_put_be32(&p, rt->bytes_read);
 
return rtmp_send_packet(rt, &pkt, 0);
}
 
static int gen_fcsubscribe_stream(URLContext *s, RTMPContext *rt,
const char *subscribe)
{
RTMPPacket pkt;
uint8_t *p;
int ret;
 
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
0, 27 + strlen(subscribe))) < 0)
return ret;
 
p = pkt.data;
ff_amf_write_string(&p, "FCSubscribe");
ff_amf_write_number(&p, ++rt->nb_invokes);
ff_amf_write_null(&p);
ff_amf_write_string(&p, subscribe);
 
return rtmp_send_packet(rt, &pkt, 1);
}
 
int ff_rtmp_calc_digest(const uint8_t *src, int len, int gap,
const uint8_t *key, int keylen, uint8_t *dst)
{
struct AVSHA *sha;
uint8_t hmac_buf[64+32] = {0};
int i;
 
sha = av_sha_alloc();
if (!sha)
return AVERROR(ENOMEM);
 
if (keylen < 64) {
memcpy(hmac_buf, key, keylen);
} else {
av_sha_init(sha, 256);
av_sha_update(sha,key, keylen);
av_sha_final(sha, hmac_buf);
}
for (i = 0; i < 64; i++)
hmac_buf[i] ^= HMAC_IPAD_VAL;
 
av_sha_init(sha, 256);
av_sha_update(sha, hmac_buf, 64);
if (gap <= 0) {
av_sha_update(sha, src, len);
} else { //skip 32 bytes used for storing digest
av_sha_update(sha, src, gap);
av_sha_update(sha, src + gap + 32, len - gap - 32);
}
av_sha_final(sha, hmac_buf + 64);
 
for (i = 0; i < 64; i++)
hmac_buf[i] ^= HMAC_IPAD_VAL ^ HMAC_OPAD_VAL; //reuse XORed key for opad
av_sha_init(sha, 256);
av_sha_update(sha, hmac_buf, 64+32);
av_sha_final(sha, dst);
 
av_free(sha);
 
return 0;
}
 
int ff_rtmp_calc_digest_pos(const uint8_t *buf, int off, int mod_val,
int add_val)
{
int i, digest_pos = 0;
 
for (i = 0; i < 4; i++)
digest_pos += buf[i + off];
digest_pos = digest_pos % mod_val + add_val;
 
return digest_pos;
}
 
/**
* Put HMAC-SHA2 digest of packet data (except for the bytes where this digest
* will be stored) into that packet.
*
* @param buf handshake data (1536 bytes)
* @param encrypted use an encrypted connection (RTMPE)
* @return offset to the digest inside input data
*/
static int rtmp_handshake_imprint_with_digest(uint8_t *buf, int encrypted)
{
int ret, digest_pos;
 
if (encrypted)
digest_pos = ff_rtmp_calc_digest_pos(buf, 772, 728, 776);
else
digest_pos = ff_rtmp_calc_digest_pos(buf, 8, 728, 12);
 
ret = ff_rtmp_calc_digest(buf, RTMP_HANDSHAKE_PACKET_SIZE, digest_pos,
rtmp_player_key, PLAYER_KEY_OPEN_PART_LEN,
buf + digest_pos);
if (ret < 0)
return ret;
 
return digest_pos;
}
 
/**
* Verify that the received server response has the expected digest value.
*
* @param buf handshake data received from the server (1536 bytes)
* @param off position to search digest offset from
* @return 0 if digest is valid, digest position otherwise
*/
static int rtmp_validate_digest(uint8_t *buf, int off)
{
uint8_t digest[32];
int ret, digest_pos;
 
digest_pos = ff_rtmp_calc_digest_pos(buf, off, 728, off + 4);
 
ret = ff_rtmp_calc_digest(buf, RTMP_HANDSHAKE_PACKET_SIZE, digest_pos,
rtmp_server_key, SERVER_KEY_OPEN_PART_LEN,
digest);
if (ret < 0)
return ret;
 
if (!memcmp(digest, buf + digest_pos, 32))
return digest_pos;
return 0;
}
 
static int rtmp_calc_swf_verification(URLContext *s, RTMPContext *rt,
uint8_t *buf)
{
uint8_t *p;
int ret;
 
if (rt->swfhash_len != 32) {
av_log(s, AV_LOG_ERROR,
"Hash of the decompressed SWF file is not 32 bytes long.\n");
return AVERROR(EINVAL);
}
 
p = &rt->swfverification[0];
bytestream_put_byte(&p, 1);
bytestream_put_byte(&p, 1);
bytestream_put_be32(&p, rt->swfsize);
bytestream_put_be32(&p, rt->swfsize);
 
if ((ret = ff_rtmp_calc_digest(rt->swfhash, 32, 0, buf, 32, p)) < 0)
return ret;
 
return 0;
}
 
#if CONFIG_ZLIB
static int rtmp_uncompress_swfplayer(uint8_t *in_data, int64_t in_size,
uint8_t **out_data, int64_t *out_size)
{
z_stream zs = { 0 };
void *ptr;
int size;
int ret = 0;
 
zs.avail_in = in_size;
zs.next_in = in_data;
ret = inflateInit(&zs);
if (ret != Z_OK)
return AVERROR_UNKNOWN;
 
do {
uint8_t tmp_buf[16384];
 
zs.avail_out = sizeof(tmp_buf);
zs.next_out = tmp_buf;
 
ret = inflate(&zs, Z_NO_FLUSH);
if (ret != Z_OK && ret != Z_STREAM_END) {
ret = AVERROR_UNKNOWN;
goto fail;
}
 
size = sizeof(tmp_buf) - zs.avail_out;
if (!(ptr = av_realloc(*out_data, *out_size + size))) {
ret = AVERROR(ENOMEM);
goto fail;
}
*out_data = ptr;
 
memcpy(*out_data + *out_size, tmp_buf, size);
*out_size += size;
} while (zs.avail_out == 0);
 
fail:
inflateEnd(&zs);
return ret;
}
#endif
 
static int rtmp_calc_swfhash(URLContext *s)
{
RTMPContext *rt = s->priv_data;
uint8_t *in_data = NULL, *out_data = NULL, *swfdata;
int64_t in_size, out_size;
URLContext *stream;
char swfhash[32];
int swfsize;
int ret = 0;
 
/* Get the SWF player file. */
if ((ret = ffurl_open(&stream, rt->swfverify, AVIO_FLAG_READ,
&s->interrupt_callback, NULL)) < 0) {
av_log(s, AV_LOG_ERROR, "Cannot open connection %s.\n", rt->swfverify);
goto fail;
}
 
if ((in_size = ffurl_seek(stream, 0, AVSEEK_SIZE)) < 0) {
ret = AVERROR(EIO);
goto fail;
}
 
if (!(in_data = av_malloc(in_size))) {
ret = AVERROR(ENOMEM);
goto fail;
}
 
if ((ret = ffurl_read_complete(stream, in_data, in_size)) < 0)
goto fail;
 
if (in_size < 3) {
ret = AVERROR_INVALIDDATA;
goto fail;
}
 
if (!memcmp(in_data, "CWS", 3)) {
/* Decompress the SWF player file using Zlib. */
if (!(out_data = av_malloc(8))) {
ret = AVERROR(ENOMEM);
goto fail;
}
*in_data = 'F'; // magic stuff
memcpy(out_data, in_data, 8);
out_size = 8;
 
#if CONFIG_ZLIB
if ((ret = rtmp_uncompress_swfplayer(in_data + 8, in_size - 8,
&out_data, &out_size)) < 0)
goto fail;
#else
av_log(s, AV_LOG_ERROR,
"Zlib is required for decompressing the SWF player file.\n");
ret = AVERROR(EINVAL);
goto fail;
#endif
swfsize = out_size;
swfdata = out_data;
} else {
swfsize = in_size;
swfdata = in_data;
}
 
/* Compute the SHA256 hash of the SWF player file. */
if ((ret = ff_rtmp_calc_digest(swfdata, swfsize, 0,
"Genuine Adobe Flash Player 001", 30,
swfhash)) < 0)
goto fail;
 
/* Set SWFVerification parameters. */
av_opt_set_bin(rt, "rtmp_swfhash", swfhash, 32, 0);
rt->swfsize = swfsize;
 
fail:
av_freep(&in_data);
av_freep(&out_data);
ffurl_close(stream);
return ret;
}
 
/**
* Perform handshake with the server by means of exchanging pseudorandom data
* signed with HMAC-SHA2 digest.
*
* @return 0 if handshake succeeds, negative value otherwise
*/
static int rtmp_handshake(URLContext *s, RTMPContext *rt)
{
AVLFG rnd;
uint8_t tosend [RTMP_HANDSHAKE_PACKET_SIZE+1] = {
3, // unencrypted data
0, 0, 0, 0, // client uptime
RTMP_CLIENT_VER1,
RTMP_CLIENT_VER2,
RTMP_CLIENT_VER3,
RTMP_CLIENT_VER4,
};
uint8_t clientdata[RTMP_HANDSHAKE_PACKET_SIZE];
uint8_t serverdata[RTMP_HANDSHAKE_PACKET_SIZE+1];
int i;
int server_pos, client_pos;
uint8_t digest[32], signature[32];
int ret, type = 0;
 
av_log(s, AV_LOG_DEBUG, "Handshaking...\n");
 
av_lfg_init(&rnd, 0xDEADC0DE);
// generate handshake packet - 1536 bytes of pseudorandom data
for (i = 9; i <= RTMP_HANDSHAKE_PACKET_SIZE; i++)
tosend[i] = av_lfg_get(&rnd) >> 24;
 
if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) {
/* When the client wants to use RTMPE, we have to change the command
* byte to 0x06 which means to use encrypted data and we have to set
* the flash version to at least 9.0.115.0. */
tosend[0] = 6;
tosend[5] = 128;
tosend[6] = 0;
tosend[7] = 3;
tosend[8] = 2;
 
/* Initialize the Diffie-Hellmann context and generate the public key
* to send to the server. */
if ((ret = ff_rtmpe_gen_pub_key(rt->stream, tosend + 1)) < 0)
return ret;
}
 
client_pos = rtmp_handshake_imprint_with_digest(tosend + 1, rt->encrypted);
if (client_pos < 0)
return client_pos;
 
if ((ret = ffurl_write(rt->stream, tosend,
RTMP_HANDSHAKE_PACKET_SIZE + 1)) < 0) {
av_log(s, AV_LOG_ERROR, "Cannot write RTMP handshake request\n");
return ret;
}
 
if ((ret = ffurl_read_complete(rt->stream, serverdata,
RTMP_HANDSHAKE_PACKET_SIZE + 1)) < 0) {
av_log(s, AV_LOG_ERROR, "Cannot read RTMP handshake response\n");
return ret;
}
 
if ((ret = ffurl_read_complete(rt->stream, clientdata,
RTMP_HANDSHAKE_PACKET_SIZE)) < 0) {
av_log(s, AV_LOG_ERROR, "Cannot read RTMP handshake response\n");
return ret;
}
 
av_log(s, AV_LOG_DEBUG, "Type answer %d\n", serverdata[0]);
av_log(s, AV_LOG_DEBUG, "Server version %d.%d.%d.%d\n",
serverdata[5], serverdata[6], serverdata[7], serverdata[8]);
 
if (rt->is_input && serverdata[5] >= 3) {
server_pos = rtmp_validate_digest(serverdata + 1, 772);
if (server_pos < 0)
return server_pos;
 
if (!server_pos) {
type = 1;
server_pos = rtmp_validate_digest(serverdata + 1, 8);
if (server_pos < 0)
return server_pos;
 
if (!server_pos) {
av_log(s, AV_LOG_ERROR, "Server response validating failed\n");
return AVERROR(EIO);
}
}
 
/* Generate SWFVerification token (SHA256 HMAC hash of decompressed SWF,
* key are the last 32 bytes of the server handshake. */
if (rt->swfsize) {
if ((ret = rtmp_calc_swf_verification(s, rt, serverdata + 1 +
RTMP_HANDSHAKE_PACKET_SIZE - 32)) < 0)
return ret;
}
 
ret = ff_rtmp_calc_digest(tosend + 1 + client_pos, 32, 0,
rtmp_server_key, sizeof(rtmp_server_key),
digest);
if (ret < 0)
return ret;
 
ret = ff_rtmp_calc_digest(clientdata, RTMP_HANDSHAKE_PACKET_SIZE - 32,
0, digest, 32, signature);
if (ret < 0)
return ret;
 
if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) {
/* Compute the shared secret key sent by the server and initialize
* the RC4 encryption. */
if ((ret = ff_rtmpe_compute_secret_key(rt->stream, serverdata + 1,
tosend + 1, type)) < 0)
return ret;
 
/* Encrypt the signature received by the server. */
ff_rtmpe_encrypt_sig(rt->stream, signature, digest, serverdata[0]);
}
 
if (memcmp(signature, clientdata + RTMP_HANDSHAKE_PACKET_SIZE - 32, 32)) {
av_log(s, AV_LOG_ERROR, "Signature mismatch\n");
return AVERROR(EIO);
}
 
for (i = 0; i < RTMP_HANDSHAKE_PACKET_SIZE; i++)
tosend[i] = av_lfg_get(&rnd) >> 24;
ret = ff_rtmp_calc_digest(serverdata + 1 + server_pos, 32, 0,
rtmp_player_key, sizeof(rtmp_player_key),
digest);
if (ret < 0)
return ret;
 
ret = ff_rtmp_calc_digest(tosend, RTMP_HANDSHAKE_PACKET_SIZE - 32, 0,
digest, 32,
tosend + RTMP_HANDSHAKE_PACKET_SIZE - 32);
if (ret < 0)
return ret;
 
if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) {
/* Encrypt the signature to be send to the server. */
ff_rtmpe_encrypt_sig(rt->stream, tosend +
RTMP_HANDSHAKE_PACKET_SIZE - 32, digest,
serverdata[0]);
}
 
// write reply back to the server
if ((ret = ffurl_write(rt->stream, tosend,
RTMP_HANDSHAKE_PACKET_SIZE)) < 0)
return ret;
 
if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) {
/* Set RC4 keys for encryption and update the keystreams. */
if ((ret = ff_rtmpe_update_keystream(rt->stream)) < 0)
return ret;
}
} else {
if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) {
/* Compute the shared secret key sent by the server and initialize
* the RC4 encryption. */
if ((ret = ff_rtmpe_compute_secret_key(rt->stream, serverdata + 1,
tosend + 1, 1)) < 0)
return ret;
 
if (serverdata[0] == 9) {
/* Encrypt the signature received by the server. */
ff_rtmpe_encrypt_sig(rt->stream, signature, digest,
serverdata[0]);
}
}
 
if ((ret = ffurl_write(rt->stream, serverdata + 1,
RTMP_HANDSHAKE_PACKET_SIZE)) < 0)
return ret;
 
if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) {
/* Set RC4 keys for encryption and update the keystreams. */
if ((ret = ff_rtmpe_update_keystream(rt->stream)) < 0)
return ret;
}
}
 
return 0;
}
 
static int rtmp_receive_hs_packet(RTMPContext* rt, uint32_t *first_int,
uint32_t *second_int, char *arraydata,
int size)
{
int inoutsize;
 
inoutsize = ffurl_read_complete(rt->stream, arraydata,
RTMP_HANDSHAKE_PACKET_SIZE);
if (inoutsize <= 0)
return AVERROR(EIO);
if (inoutsize != RTMP_HANDSHAKE_PACKET_SIZE) {
av_log(rt, AV_LOG_ERROR, "Erroneous Message size %d"
" not following standard\n", (int)inoutsize);
return AVERROR(EINVAL);
}
 
*first_int = AV_RB32(arraydata);
*second_int = AV_RB32(arraydata + 4);
return 0;
}
 
static int rtmp_send_hs_packet(RTMPContext* rt, uint32_t first_int,
uint32_t second_int, char *arraydata, int size)
{
int inoutsize;
 
AV_WB32(arraydata, first_int);
AV_WB32(arraydata + 4, second_int);
inoutsize = ffurl_write(rt->stream, arraydata,
RTMP_HANDSHAKE_PACKET_SIZE);
if (inoutsize != RTMP_HANDSHAKE_PACKET_SIZE) {
av_log(rt, AV_LOG_ERROR, "Unable to write answer\n");
return AVERROR(EIO);
}
 
return 0;
}
 
/**
* rtmp handshake server side
*/
static int rtmp_server_handshake(URLContext *s, RTMPContext *rt)
{
uint8_t buffer[RTMP_HANDSHAKE_PACKET_SIZE];
uint32_t hs_epoch;
uint32_t hs_my_epoch;
uint8_t hs_c1[RTMP_HANDSHAKE_PACKET_SIZE];
uint8_t hs_s1[RTMP_HANDSHAKE_PACKET_SIZE];
uint32_t zeroes;
uint32_t temp = 0;
int randomidx = 0;
int inoutsize = 0;
int ret;
 
inoutsize = ffurl_read_complete(rt->stream, buffer, 1); // Receive C0
if (inoutsize <= 0) {
av_log(s, AV_LOG_ERROR, "Unable to read handshake\n");
return AVERROR(EIO);
}
// Check Version
if (buffer[0] != 3) {
av_log(s, AV_LOG_ERROR, "RTMP protocol version mismatch\n");
return AVERROR(EIO);
}
if (ffurl_write(rt->stream, buffer, 1) <= 0) { // Send S0
av_log(s, AV_LOG_ERROR,
"Unable to write answer - RTMP S0\n");
return AVERROR(EIO);
}
/* Receive C1 */
ret = rtmp_receive_hs_packet(rt, &hs_epoch, &zeroes, hs_c1,
RTMP_HANDSHAKE_PACKET_SIZE);
if (ret) {
av_log(s, AV_LOG_ERROR, "RTMP Handshake C1 Error\n");
return ret;
}
/* Send S1 */
/* By now same epoch will be sent */
hs_my_epoch = hs_epoch;
/* Generate random */
for (randomidx = 8; randomidx < (RTMP_HANDSHAKE_PACKET_SIZE);
randomidx += 4)
AV_WB32(hs_s1 + randomidx, av_get_random_seed());
 
ret = rtmp_send_hs_packet(rt, hs_my_epoch, 0, hs_s1,
RTMP_HANDSHAKE_PACKET_SIZE);
if (ret) {
av_log(s, AV_LOG_ERROR, "RTMP Handshake S1 Error\n");
return ret;
}
/* Send S2 */
ret = rtmp_send_hs_packet(rt, hs_epoch, 0, hs_c1,
RTMP_HANDSHAKE_PACKET_SIZE);
if (ret) {
av_log(s, AV_LOG_ERROR, "RTMP Handshake S2 Error\n");
return ret;
}
/* Receive C2 */
ret = rtmp_receive_hs_packet(rt, &temp, &zeroes, buffer,
RTMP_HANDSHAKE_PACKET_SIZE);
if (ret) {
av_log(s, AV_LOG_ERROR, "RTMP Handshake C2 Error\n");
return ret;
}
if (temp != hs_my_epoch)
av_log(s, AV_LOG_WARNING,
"Erroneous C2 Message epoch does not match up with C1 epoch\n");
if (memcmp(buffer + 8, hs_s1 + 8,
RTMP_HANDSHAKE_PACKET_SIZE - 8))
av_log(s, AV_LOG_WARNING,
"Erroneous C2 Message random does not match up\n");
 
return 0;
}
 
static int handle_chunk_size(URLContext *s, RTMPPacket *pkt)
{
RTMPContext *rt = s->priv_data;
int ret;
 
if (pkt->size < 4) {
av_log(s, AV_LOG_ERROR,
"Too short chunk size change packet (%d)\n",
pkt->size);
return AVERROR_INVALIDDATA;
}
 
if (!rt->is_input) {
/* Send the same chunk size change packet back to the server,
* setting the outgoing chunk size to the same as the incoming one. */
if ((ret = ff_rtmp_packet_write(rt->stream, pkt, rt->out_chunk_size,
&rt->prev_pkt[1], &rt->nb_prev_pkt[1])) < 0)
return ret;
rt->out_chunk_size = AV_RB32(pkt->data);
}
 
rt->in_chunk_size = AV_RB32(pkt->data);
if (rt->in_chunk_size <= 0) {
av_log(s, AV_LOG_ERROR, "Incorrect chunk size %d\n",
rt->in_chunk_size);
return AVERROR_INVALIDDATA;
}
av_log(s, AV_LOG_DEBUG, "New incoming chunk size = %d\n",
rt->in_chunk_size);
 
return 0;
}
 
static int handle_ping(URLContext *s, RTMPPacket *pkt)
{
RTMPContext *rt = s->priv_data;
int t, ret;
 
if (pkt->size < 2) {
av_log(s, AV_LOG_ERROR, "Too short ping packet (%d)\n",
pkt->size);
return AVERROR_INVALIDDATA;
}
 
t = AV_RB16(pkt->data);
if (t == 6) {
if ((ret = gen_pong(s, rt, pkt)) < 0)
return ret;
} else if (t == 26) {
if (rt->swfsize) {
if ((ret = gen_swf_verification(s, rt)) < 0)
return ret;
} else {
av_log(s, AV_LOG_WARNING, "Ignoring SWFVerification request.\n");
}
}
 
return 0;
}
 
static int handle_client_bw(URLContext *s, RTMPPacket *pkt)
{
RTMPContext *rt = s->priv_data;
 
if (pkt->size < 4) {
av_log(s, AV_LOG_ERROR,
"Client bandwidth report packet is less than 4 bytes long (%d)\n",
pkt->size);
return AVERROR_INVALIDDATA;
}
 
rt->client_report_size = AV_RB32(pkt->data);
if (rt->client_report_size <= 0) {
av_log(s, AV_LOG_ERROR, "Incorrect client bandwidth %d\n",
rt->client_report_size);
return AVERROR_INVALIDDATA;
 
}
av_log(s, AV_LOG_DEBUG, "Client bandwidth = %d\n", rt->client_report_size);
rt->client_report_size >>= 1;
 
return 0;
}
 
static int handle_server_bw(URLContext *s, RTMPPacket *pkt)
{
RTMPContext *rt = s->priv_data;
 
if (pkt->size < 4) {
av_log(s, AV_LOG_ERROR,
"Too short server bandwidth report packet (%d)\n",
pkt->size);
return AVERROR_INVALIDDATA;
}
 
rt->server_bw = AV_RB32(pkt->data);
if (rt->server_bw <= 0) {
av_log(s, AV_LOG_ERROR, "Incorrect server bandwidth %d\n",
rt->server_bw);
return AVERROR_INVALIDDATA;
}
av_log(s, AV_LOG_DEBUG, "Server bandwidth = %d\n", rt->server_bw);
 
return 0;
}
 
static int do_adobe_auth(RTMPContext *rt, const char *user, const char *salt,
const char *opaque, const char *challenge)
{
uint8_t hash[16];
char hashstr[AV_BASE64_SIZE(sizeof(hash))], challenge2[10];
struct AVMD5 *md5 = av_md5_alloc();
if (!md5)
return AVERROR(ENOMEM);
 
snprintf(challenge2, sizeof(challenge2), "%08x", av_get_random_seed());
 
av_md5_init(md5);
av_md5_update(md5, user, strlen(user));
av_md5_update(md5, salt, strlen(salt));
av_md5_update(md5, rt->password, strlen(rt->password));
av_md5_final(md5, hash);
av_base64_encode(hashstr, sizeof(hashstr), hash,
sizeof(hash));
av_md5_init(md5);
av_md5_update(md5, hashstr, strlen(hashstr));
if (opaque)
av_md5_update(md5, opaque, strlen(opaque));
else if (challenge)
av_md5_update(md5, challenge, strlen(challenge));
av_md5_update(md5, challenge2, strlen(challenge2));
av_md5_final(md5, hash);
av_base64_encode(hashstr, sizeof(hashstr), hash,
sizeof(hash));
snprintf(rt->auth_params, sizeof(rt->auth_params),
"?authmod=%s&user=%s&challenge=%s&response=%s",
"adobe", user, challenge2, hashstr);
if (opaque)
av_strlcatf(rt->auth_params, sizeof(rt->auth_params),
"&opaque=%s", opaque);
 
av_free(md5);
return 0;
}
 
static int do_llnw_auth(RTMPContext *rt, const char *user, const char *nonce)
{
uint8_t hash[16];
char hashstr1[33], hashstr2[33];
const char *realm = "live";
const char *method = "publish";
const char *qop = "auth";
const char *nc = "00000001";
char cnonce[10];
struct AVMD5 *md5 = av_md5_alloc();
if (!md5)
return AVERROR(ENOMEM);
 
snprintf(cnonce, sizeof(cnonce), "%08x", av_get_random_seed());
 
av_md5_init(md5);
av_md5_update(md5, user, strlen(user));
av_md5_update(md5, ":", 1);
av_md5_update(md5, realm, strlen(realm));
av_md5_update(md5, ":", 1);
av_md5_update(md5, rt->password, strlen(rt->password));
av_md5_final(md5, hash);
ff_data_to_hex(hashstr1, hash, 16, 1);
hashstr1[32] = '\0';
 
av_md5_init(md5);
av_md5_update(md5, method, strlen(method));
av_md5_update(md5, ":/", 2);
av_md5_update(md5, rt->app, strlen(rt->app));
if (!strchr(rt->app, '/'))
av_md5_update(md5, "/_definst_", strlen("/_definst_"));
av_md5_final(md5, hash);
ff_data_to_hex(hashstr2, hash, 16, 1);
hashstr2[32] = '\0';
 
av_md5_init(md5);
av_md5_update(md5, hashstr1, strlen(hashstr1));
av_md5_update(md5, ":", 1);
if (nonce)
av_md5_update(md5, nonce, strlen(nonce));
av_md5_update(md5, ":", 1);
av_md5_update(md5, nc, strlen(nc));
av_md5_update(md5, ":", 1);
av_md5_update(md5, cnonce, strlen(cnonce));
av_md5_update(md5, ":", 1);
av_md5_update(md5, qop, strlen(qop));
av_md5_update(md5, ":", 1);
av_md5_update(md5, hashstr2, strlen(hashstr2));
av_md5_final(md5, hash);
ff_data_to_hex(hashstr1, hash, 16, 1);
 
snprintf(rt->auth_params, sizeof(rt->auth_params),
"?authmod=%s&user=%s&nonce=%s&cnonce=%s&nc=%s&response=%s",
"llnw", user, nonce, cnonce, nc, hashstr1);
 
av_free(md5);
return 0;
}
 
static int handle_connect_error(URLContext *s, const char *desc)
{
RTMPContext *rt = s->priv_data;
char buf[300], *ptr, authmod[15];
int i = 0, ret = 0;
const char *user = "", *salt = "", *opaque = NULL,
*challenge = NULL, *cptr = NULL, *nonce = NULL;
 
if (!(cptr = strstr(desc, "authmod=adobe")) &&
!(cptr = strstr(desc, "authmod=llnw"))) {
av_log(s, AV_LOG_ERROR,
"Unknown connect error (unsupported authentication method?)\n");
return AVERROR_UNKNOWN;
}
cptr += strlen("authmod=");
while (*cptr && *cptr != ' ' && i < sizeof(authmod) - 1)
authmod[i++] = *cptr++;
authmod[i] = '\0';
 
if (!rt->username[0] || !rt->password[0]) {
av_log(s, AV_LOG_ERROR, "No credentials set\n");
return AVERROR_UNKNOWN;
}
 
if (strstr(desc, "?reason=authfailed")) {
av_log(s, AV_LOG_ERROR, "Incorrect username/password\n");
return AVERROR_UNKNOWN;
} else if (strstr(desc, "?reason=nosuchuser")) {
av_log(s, AV_LOG_ERROR, "Incorrect username\n");
return AVERROR_UNKNOWN;
}
 
if (rt->auth_tried) {
av_log(s, AV_LOG_ERROR, "Authentication failed\n");
return AVERROR_UNKNOWN;
}
 
rt->auth_params[0] = '\0';
 
if (strstr(desc, "code=403 need auth")) {
snprintf(rt->auth_params, sizeof(rt->auth_params),
"?authmod=%s&user=%s", authmod, rt->username);
return 0;
}
 
if (!(cptr = strstr(desc, "?reason=needauth"))) {
av_log(s, AV_LOG_ERROR, "No auth parameters found\n");
return AVERROR_UNKNOWN;
}
 
av_strlcpy(buf, cptr + 1, sizeof(buf));
ptr = buf;
 
while (ptr) {
char *next = strchr(ptr, '&');
char *value = strchr(ptr, '=');
if (next)
*next++ = '\0';
if (value)
*value++ = '\0';
if (!strcmp(ptr, "user")) {
user = value;
} else if (!strcmp(ptr, "salt")) {
salt = value;
} else if (!strcmp(ptr, "opaque")) {
opaque = value;
} else if (!strcmp(ptr, "challenge")) {
challenge = value;
} else if (!strcmp(ptr, "nonce")) {
nonce = value;
}
ptr = next;
}
 
if (!strcmp(authmod, "adobe")) {
if ((ret = do_adobe_auth(rt, user, salt, opaque, challenge)) < 0)
return ret;
} else {
if ((ret = do_llnw_auth(rt, user, nonce)) < 0)
return ret;
}
 
rt->auth_tried = 1;
return 0;
}
 
static int handle_invoke_error(URLContext *s, RTMPPacket *pkt)
{
RTMPContext *rt = s->priv_data;
const uint8_t *data_end = pkt->data + pkt->size;
char *tracked_method = NULL;
int level = AV_LOG_ERROR;
uint8_t tmpstr[256];
int ret;
 
if ((ret = find_tracked_method(s, pkt, 9, &tracked_method)) < 0)
return ret;
 
if (!ff_amf_get_field_value(pkt->data + 9, data_end,
"description", tmpstr, sizeof(tmpstr))) {
if (tracked_method && (!strcmp(tracked_method, "_checkbw") ||
!strcmp(tracked_method, "releaseStream") ||
!strcmp(tracked_method, "FCSubscribe") ||
!strcmp(tracked_method, "FCPublish"))) {
/* Gracefully ignore Adobe-specific historical artifact errors. */
level = AV_LOG_WARNING;
ret = 0;
} else if (tracked_method && !strcmp(tracked_method, "connect")) {
ret = handle_connect_error(s, tmpstr);
if (!ret) {
rt->do_reconnect = 1;
level = AV_LOG_VERBOSE;
}
} else
ret = AVERROR_UNKNOWN;
av_log(s, level, "Server error: %s\n", tmpstr);
}
 
av_free(tracked_method);
return ret;
}
 
static int write_begin(URLContext *s)
{
RTMPContext *rt = s->priv_data;
PutByteContext pbc;
RTMPPacket spkt = { 0 };
int ret;
 
// Send Stream Begin 1
if ((ret = ff_rtmp_packet_create(&spkt, RTMP_NETWORK_CHANNEL,
RTMP_PT_PING, 0, 6)) < 0) {
av_log(s, AV_LOG_ERROR, "Unable to create response packet\n");
return ret;
}
 
bytestream2_init_writer(&pbc, spkt.data, spkt.size);
bytestream2_put_be16(&pbc, 0); // 0 -> Stream Begin
bytestream2_put_be32(&pbc, rt->nb_streamid);
 
ret = ff_rtmp_packet_write(rt->stream, &spkt, rt->out_chunk_size,
&rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
 
ff_rtmp_packet_destroy(&spkt);
 
return ret;
}
 
static int write_status(URLContext *s, RTMPPacket *pkt,
const char *status, const char *filename)
{
RTMPContext *rt = s->priv_data;
RTMPPacket spkt = { 0 };
char statusmsg[128];
uint8_t *pp;
int ret;
 
if ((ret = ff_rtmp_packet_create(&spkt, RTMP_SYSTEM_CHANNEL,
RTMP_PT_INVOKE, 0,
RTMP_PKTDATA_DEFAULT_SIZE)) < 0) {
av_log(s, AV_LOG_ERROR, "Unable to create response packet\n");
return ret;
}
 
pp = spkt.data;
spkt.extra = pkt->extra;
ff_amf_write_string(&pp, "onStatus");
ff_amf_write_number(&pp, 0);
ff_amf_write_null(&pp);
 
ff_amf_write_object_start(&pp);
ff_amf_write_field_name(&pp, "level");
ff_amf_write_string(&pp, "status");
ff_amf_write_field_name(&pp, "code");
ff_amf_write_string(&pp, status);
ff_amf_write_field_name(&pp, "description");
snprintf(statusmsg, sizeof(statusmsg),
"%s is now published", filename);
ff_amf_write_string(&pp, statusmsg);
ff_amf_write_field_name(&pp, "details");
ff_amf_write_string(&pp, filename);
ff_amf_write_field_name(&pp, "clientid");
snprintf(statusmsg, sizeof(statusmsg), "%s", LIBAVFORMAT_IDENT);
ff_amf_write_string(&pp, statusmsg);
ff_amf_write_object_end(&pp);
 
spkt.size = pp - spkt.data;
ret = ff_rtmp_packet_write(rt->stream, &spkt, rt->out_chunk_size,
&rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
ff_rtmp_packet_destroy(&spkt);
 
return ret;
}
 
static int send_invoke_response(URLContext *s, RTMPPacket *pkt)
{
RTMPContext *rt = s->priv_data;
double seqnum;
char filename[64];
char command[64];
int stringlen;
char *pchar;
const uint8_t *p = pkt->data;
uint8_t *pp = NULL;
RTMPPacket spkt = { 0 };
GetByteContext gbc;
int ret;
 
bytestream2_init(&gbc, p, pkt->size);
if (ff_amf_read_string(&gbc, command, sizeof(command),
&stringlen)) {
av_log(s, AV_LOG_ERROR, "Error in PT_INVOKE\n");
return AVERROR_INVALIDDATA;
}
 
ret = ff_amf_read_number(&gbc, &seqnum);
if (ret)
return ret;
ret = ff_amf_read_null(&gbc);
if (ret)
return ret;
if (!strcmp(command, "FCPublish") ||
!strcmp(command, "publish")) {
ret = ff_amf_read_string(&gbc, filename,
sizeof(filename), &stringlen);
// check with url
if (s->filename) {
pchar = strrchr(s->filename, '/');
if (!pchar) {
av_log(s, AV_LOG_WARNING,
"Unable to find / in url %s, bad format\n",
s->filename);
pchar = s->filename;
}
pchar++;
if (strcmp(pchar, filename))
av_log(s, AV_LOG_WARNING, "Unexpected stream %s, expecting"
" %s\n", filename, pchar);
}
rt->state = STATE_RECEIVING;
}
 
if (!strcmp(command, "FCPublish")) {
if ((ret = ff_rtmp_packet_create(&spkt, RTMP_SYSTEM_CHANNEL,
RTMP_PT_INVOKE, 0,
RTMP_PKTDATA_DEFAULT_SIZE)) < 0) {
av_log(s, AV_LOG_ERROR, "Unable to create response packet\n");
return ret;
}
pp = spkt.data;
ff_amf_write_string(&pp, "onFCPublish");
} else if (!strcmp(command, "publish")) {
ret = write_begin(s);
if (ret < 0)
return ret;
 
// Send onStatus(NetStream.Publish.Start)
return write_status(s, pkt, "NetStream.Publish.Start",
filename);
} else if (!strcmp(command, "play")) {
ret = write_begin(s);
if (ret < 0)
return ret;
rt->state = STATE_SENDING;
return write_status(s, pkt, "NetStream.Play.Start",
filename);
} else {
if ((ret = ff_rtmp_packet_create(&spkt, RTMP_SYSTEM_CHANNEL,
RTMP_PT_INVOKE, 0,
RTMP_PKTDATA_DEFAULT_SIZE)) < 0) {
av_log(s, AV_LOG_ERROR, "Unable to create response packet\n");
return ret;
}
pp = spkt.data;
ff_amf_write_string(&pp, "_result");
ff_amf_write_number(&pp, seqnum);
ff_amf_write_null(&pp);
if (!strcmp(command, "createStream")) {
rt->nb_streamid++;
if (rt->nb_streamid == 0 || rt->nb_streamid == 2)
rt->nb_streamid++; /* Values 0 and 2 are reserved */
ff_amf_write_number(&pp, rt->nb_streamid);
/* By now we don't control which streams are removed in
* deleteStream. There is no stream creation control
* if a client creates more than 2^32 - 2 streams. */
}
}
spkt.size = pp - spkt.data;
ret = ff_rtmp_packet_write(rt->stream, &spkt, rt->out_chunk_size,
&rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
ff_rtmp_packet_destroy(&spkt);
return ret;
}
 
static int handle_invoke_result(URLContext *s, RTMPPacket *pkt)
{
RTMPContext *rt = s->priv_data;
char *tracked_method = NULL;
int ret = 0;
 
if ((ret = find_tracked_method(s, pkt, 10, &tracked_method)) < 0)
return ret;
 
if (!tracked_method) {
/* Ignore this reply when the current method is not tracked. */
return ret;
}
 
if (!strcmp(tracked_method, "connect")) {
if (!rt->is_input) {
if ((ret = gen_release_stream(s, rt)) < 0)
goto fail;
 
if ((ret = gen_fcpublish_stream(s, rt)) < 0)
goto fail;
} else {
if ((ret = gen_server_bw(s, rt)) < 0)
goto fail;
}
 
if ((ret = gen_create_stream(s, rt)) < 0)
goto fail;
 
if (rt->is_input) {
/* Send the FCSubscribe command when the name of live
* stream is defined by the user or if it's a live stream. */
if (rt->subscribe) {
if ((ret = gen_fcsubscribe_stream(s, rt, rt->subscribe)) < 0)
goto fail;
} else if (rt->live == -1) {
if ((ret = gen_fcsubscribe_stream(s, rt, rt->playpath)) < 0)
goto fail;
}
}
} else if (!strcmp(tracked_method, "createStream")) {
//extract a number from the result
if (pkt->data[10] || pkt->data[19] != 5 || pkt->data[20]) {
av_log(s, AV_LOG_WARNING, "Unexpected reply on connect()\n");
} else {
rt->stream_id = av_int2double(AV_RB64(pkt->data + 21));
}
 
if (!rt->is_input) {
if ((ret = gen_publish(s, rt)) < 0)
goto fail;
} else {
if ((ret = gen_play(s, rt)) < 0)
goto fail;
if ((ret = gen_buffer_time(s, rt)) < 0)
goto fail;
}
}
 
fail:
av_free(tracked_method);
return ret;
}
 
static int handle_invoke_status(URLContext *s, RTMPPacket *pkt)
{
RTMPContext *rt = s->priv_data;
const uint8_t *data_end = pkt->data + pkt->size;
const uint8_t *ptr = pkt->data + RTMP_HEADER;
uint8_t tmpstr[256];
int i, t;
 
for (i = 0; i < 2; i++) {
t = ff_amf_tag_size(ptr, data_end);
if (t < 0)
return 1;
ptr += t;
}
 
t = ff_amf_get_field_value(ptr, data_end, "level", tmpstr, sizeof(tmpstr));
if (!t && !strcmp(tmpstr, "error")) {
t = ff_amf_get_field_value(ptr, data_end,
"description", tmpstr, sizeof(tmpstr));
if (t || !tmpstr[0])
t = ff_amf_get_field_value(ptr, data_end, "code",
tmpstr, sizeof(tmpstr));
if (!t)
av_log(s, AV_LOG_ERROR, "Server error: %s\n", tmpstr);
return -1;
}
 
t = ff_amf_get_field_value(ptr, data_end, "code", tmpstr, sizeof(tmpstr));
if (!t && !strcmp(tmpstr, "NetStream.Play.Start")) rt->state = STATE_PLAYING;
if (!t && !strcmp(tmpstr, "NetStream.Play.Stop")) rt->state = STATE_STOPPED;
if (!t && !strcmp(tmpstr, "NetStream.Play.UnpublishNotify")) rt->state = STATE_STOPPED;
if (!t && !strcmp(tmpstr, "NetStream.Publish.Start")) rt->state = STATE_PUBLISHING;
if (!t && !strcmp(tmpstr, "NetStream.Seek.Notify")) rt->state = STATE_PLAYING;
 
return 0;
}
 
static int handle_invoke(URLContext *s, RTMPPacket *pkt)
{
RTMPContext *rt = s->priv_data;
int ret = 0;
 
//TODO: check for the messages sent for wrong state?
if (ff_amf_match_string(pkt->data, pkt->size, "_error")) {
if ((ret = handle_invoke_error(s, pkt)) < 0)
return ret;
} else if (ff_amf_match_string(pkt->data, pkt->size, "_result")) {
if ((ret = handle_invoke_result(s, pkt)) < 0)
return ret;
} else if (ff_amf_match_string(pkt->data, pkt->size, "onStatus")) {
if ((ret = handle_invoke_status(s, pkt)) < 0)
return ret;
} else if (ff_amf_match_string(pkt->data, pkt->size, "onBWDone")) {
if ((ret = gen_check_bw(s, rt)) < 0)
return ret;
} else if (ff_amf_match_string(pkt->data, pkt->size, "releaseStream") ||
ff_amf_match_string(pkt->data, pkt->size, "FCPublish") ||
ff_amf_match_string(pkt->data, pkt->size, "publish") ||
ff_amf_match_string(pkt->data, pkt->size, "play") ||
ff_amf_match_string(pkt->data, pkt->size, "_checkbw") ||
ff_amf_match_string(pkt->data, pkt->size, "createStream")) {
if ((ret = send_invoke_response(s, pkt)) < 0)
return ret;
}
 
return ret;
}
 
static int update_offset(RTMPContext *rt, int size)
{
int old_flv_size;
 
// generate packet header and put data into buffer for FLV demuxer
if (rt->flv_off < rt->flv_size) {
// There is old unread data in the buffer, thus append at the end
old_flv_size = rt->flv_size;
rt->flv_size += size;
} else {
// All data has been read, write the new data at the start of the buffer
old_flv_size = 0;
rt->flv_size = size;
rt->flv_off = 0;
}
 
return old_flv_size;
}
 
static int append_flv_data(RTMPContext *rt, RTMPPacket *pkt, int skip)
{
int old_flv_size, ret;
PutByteContext pbc;
const uint8_t *data = pkt->data + skip;
const int size = pkt->size - skip;
uint32_t ts = pkt->timestamp;
 
old_flv_size = update_offset(rt, size + 15);
 
if ((ret = av_reallocp(&rt->flv_data, rt->flv_size)) < 0) {
rt->flv_size = rt->flv_off = 0;
return ret;
}
bytestream2_init_writer(&pbc, rt->flv_data, rt->flv_size);
bytestream2_skip_p(&pbc, old_flv_size);
bytestream2_put_byte(&pbc, pkt->type);
bytestream2_put_be24(&pbc, size);
bytestream2_put_be24(&pbc, ts);
bytestream2_put_byte(&pbc, ts >> 24);
bytestream2_put_be24(&pbc, 0);
bytestream2_put_buffer(&pbc, data, size);
bytestream2_put_be32(&pbc, 0);
 
return 0;
}
 
static int handle_notify(URLContext *s, RTMPPacket *pkt)
{
RTMPContext *rt = s->priv_data;
uint8_t commandbuffer[64];
char statusmsg[128];
int stringlen, ret, skip = 0;
GetByteContext gbc;
 
bytestream2_init(&gbc, pkt->data, pkt->size);
if (ff_amf_read_string(&gbc, commandbuffer, sizeof(commandbuffer),
&stringlen))
return AVERROR_INVALIDDATA;
 
// Skip the @setDataFrame string and validate it is a notification
if (!strcmp(commandbuffer, "@setDataFrame")) {
skip = gbc.buffer - pkt->data;
ret = ff_amf_read_string(&gbc, statusmsg,
sizeof(statusmsg), &stringlen);
if (ret < 0)
return AVERROR_INVALIDDATA;
}
 
return append_flv_data(rt, pkt, skip);
}
 
/**
* Parse received packet and possibly perform some action depending on
* the packet contents.
* @return 0 for no errors, negative values for serious errors which prevent
* further communications, positive values for uncritical errors
*/
static int rtmp_parse_result(URLContext *s, RTMPContext *rt, RTMPPacket *pkt)
{
int ret;
 
#ifdef DEBUG
ff_rtmp_packet_dump(s, pkt);
#endif
 
switch (pkt->type) {
case RTMP_PT_BYTES_READ:
av_dlog(s, "received bytes read report\n");
break;
case RTMP_PT_CHUNK_SIZE:
if ((ret = handle_chunk_size(s, pkt)) < 0)
return ret;
break;
case RTMP_PT_PING:
if ((ret = handle_ping(s, pkt)) < 0)
return ret;
break;
case RTMP_PT_CLIENT_BW:
if ((ret = handle_client_bw(s, pkt)) < 0)
return ret;
break;
case RTMP_PT_SERVER_BW:
if ((ret = handle_server_bw(s, pkt)) < 0)
return ret;
break;
case RTMP_PT_INVOKE:
if ((ret = handle_invoke(s, pkt)) < 0)
return ret;
break;
case RTMP_PT_VIDEO:
case RTMP_PT_AUDIO:
case RTMP_PT_METADATA:
case RTMP_PT_NOTIFY:
/* Audio, Video and Metadata packets are parsed in get_packet() */
break;
default:
av_log(s, AV_LOG_VERBOSE, "Unknown packet type received 0x%02X\n", pkt->type);
break;
}
return 0;
}
 
static int handle_metadata(RTMPContext *rt, RTMPPacket *pkt)
{
int ret, old_flv_size, type;
const uint8_t *next;
uint8_t *p;
uint32_t size;
uint32_t ts, cts, pts = 0;
 
old_flv_size = update_offset(rt, pkt->size);
 
if ((ret = av_reallocp(&rt->flv_data, rt->flv_size)) < 0) {
rt->flv_size = rt->flv_off = 0;
return ret;
}
 
next = pkt->data;
p = rt->flv_data + old_flv_size;
 
/* copy data while rewriting timestamps */
ts = pkt->timestamp;
 
while (next - pkt->data < pkt->size - RTMP_HEADER) {
type = bytestream_get_byte(&next);
size = bytestream_get_be24(&next);
cts = bytestream_get_be24(&next);
cts |= bytestream_get_byte(&next) << 24;
if (!pts)
pts = cts;
ts += cts - pts;
pts = cts;
if (size + 3 + 4 > pkt->data + pkt->size - next)
break;
bytestream_put_byte(&p, type);
bytestream_put_be24(&p, size);
bytestream_put_be24(&p, ts);
bytestream_put_byte(&p, ts >> 24);
memcpy(p, next, size + 3 + 4);
next += size + 3 + 4;
p += size + 3 + 4;
}
if (p != rt->flv_data + rt->flv_size) {
av_log(NULL, AV_LOG_WARNING, "Incomplete flv packets in "
"RTMP_PT_METADATA packet\n");
rt->flv_size = p - rt->flv_data;
}
 
return 0;
}
 
/**
* Interact with the server by receiving and sending RTMP packets until
* there is some significant data (media data or expected status notification).
*
* @param s reading context
* @param for_header non-zero value tells function to work until it
* gets notification from the server that playing has been started,
* otherwise function will work until some media data is received (or
* an error happens)
* @return 0 for successful operation, negative value in case of error
*/
static int get_packet(URLContext *s, int for_header)
{
RTMPContext *rt = s->priv_data;
int ret;
 
if (rt->state == STATE_STOPPED)
return AVERROR_EOF;
 
for (;;) {
RTMPPacket rpkt = { 0 };
if ((ret = ff_rtmp_packet_read(rt->stream, &rpkt,
rt->in_chunk_size, &rt->prev_pkt[0],
&rt->nb_prev_pkt[0])) <= 0) {
if (ret == 0) {
return AVERROR(EAGAIN);
} else {
return AVERROR(EIO);
}
}
rt->bytes_read += ret;
if (rt->bytes_read - rt->last_bytes_read > rt->client_report_size) {
av_log(s, AV_LOG_DEBUG, "Sending bytes read report\n");
if ((ret = gen_bytes_read(s, rt, rpkt.timestamp + 1)) < 0)
return ret;
rt->last_bytes_read = rt->bytes_read;
}
 
ret = rtmp_parse_result(s, rt, &rpkt);
 
// At this point we must check if we are in the seek state and continue
// with the next packet. handle_invoke will get us out of this state
// when the right message is encountered
if (rt->state == STATE_SEEKING) {
ff_rtmp_packet_destroy(&rpkt);
// We continue, let the natural flow of things happen:
// AVERROR(EAGAIN) or handle_invoke gets us out of here
continue;
}
 
if (ret < 0) {//serious error in current packet
ff_rtmp_packet_destroy(&rpkt);
return ret;
}
if (rt->do_reconnect && for_header) {
ff_rtmp_packet_destroy(&rpkt);
return 0;
}
if (rt->state == STATE_STOPPED) {
ff_rtmp_packet_destroy(&rpkt);
return AVERROR_EOF;
}
if (for_header && (rt->state == STATE_PLAYING ||
rt->state == STATE_PUBLISHING ||
rt->state == STATE_SENDING ||
rt->state == STATE_RECEIVING)) {
ff_rtmp_packet_destroy(&rpkt);
return 0;
}
if (!rpkt.size || !rt->is_input) {
ff_rtmp_packet_destroy(&rpkt);
continue;
}
if (rpkt.type == RTMP_PT_VIDEO || rpkt.type == RTMP_PT_AUDIO) {
ret = append_flv_data(rt, &rpkt, 0);
ff_rtmp_packet_destroy(&rpkt);
return ret;
} else if (rpkt.type == RTMP_PT_NOTIFY) {
ret = handle_notify(s, &rpkt);
ff_rtmp_packet_destroy(&rpkt);
return ret;
} else if (rpkt.type == RTMP_PT_METADATA) {
ret = handle_metadata(rt, &rpkt);
ff_rtmp_packet_destroy(&rpkt);
return 0;
}
ff_rtmp_packet_destroy(&rpkt);
}
}
 
static int rtmp_close(URLContext *h)
{
RTMPContext *rt = h->priv_data;
int ret = 0, i, j;
 
if (!rt->is_input) {
rt->flv_data = NULL;
if (rt->out_pkt.size)
ff_rtmp_packet_destroy(&rt->out_pkt);
if (rt->state > STATE_FCPUBLISH)
ret = gen_fcunpublish_stream(h, rt);
}
if (rt->state > STATE_HANDSHAKED)
ret = gen_delete_stream(h, rt);
for (i = 0; i < 2; i++) {
for (j = 0; j < rt->nb_prev_pkt[i]; j++)
ff_rtmp_packet_destroy(&rt->prev_pkt[i][j]);
av_freep(&rt->prev_pkt[i]);
}
 
free_tracked_methods(rt);
av_freep(&rt->flv_data);
ffurl_close(rt->stream);
return ret;
}
 
/**
* Open RTMP connection and verify that the stream can be played.
*
* URL syntax: rtmp://server[:port][/app][/playpath]
* where 'app' is first one or two directories in the path
* (e.g. /ondemand/, /flash/live/, etc.)
* and 'playpath' is a file name (the rest of the path,
* may be prefixed with "mp4:")
*/
static int rtmp_open(URLContext *s, const char *uri, int flags)
{
RTMPContext *rt = s->priv_data;
char proto[8], hostname[256], path[1024], auth[100], *fname;
char *old_app;
uint8_t buf[2048];
int port;
AVDictionary *opts = NULL;
int ret;
 
if (rt->listen_timeout > 0)
rt->listen = 1;
 
rt->is_input = !(flags & AVIO_FLAG_WRITE);
 
av_url_split(proto, sizeof(proto), auth, sizeof(auth),
hostname, sizeof(hostname), &port,
path, sizeof(path), s->filename);
 
if (strchr(path, ' ')) {
av_log(s, AV_LOG_WARNING,
"Detected librtmp style URL parameters, these aren't supported "
"by the libavformat internal RTMP handler currently enabled. "
"See the documentation for the correct way to pass parameters.\n");
}
 
if (auth[0]) {
char *ptr = strchr(auth, ':');
if (ptr) {
*ptr = '\0';
av_strlcpy(rt->username, auth, sizeof(rt->username));
av_strlcpy(rt->password, ptr + 1, sizeof(rt->password));
}
}
 
if (rt->listen && strcmp(proto, "rtmp")) {
av_log(s, AV_LOG_ERROR, "rtmp_listen not available for %s\n",
proto);
return AVERROR(EINVAL);
}
if (!strcmp(proto, "rtmpt") || !strcmp(proto, "rtmpts")) {
if (!strcmp(proto, "rtmpts"))
av_dict_set(&opts, "ffrtmphttp_tls", "1", 1);
 
/* open the http tunneling connection */
ff_url_join(buf, sizeof(buf), "ffrtmphttp", NULL, hostname, port, NULL);
} else if (!strcmp(proto, "rtmps")) {
/* open the tls connection */
if (port < 0)
port = RTMPS_DEFAULT_PORT;
ff_url_join(buf, sizeof(buf), "tls", NULL, hostname, port, NULL);
} else if (!strcmp(proto, "rtmpe") || (!strcmp(proto, "rtmpte"))) {
if (!strcmp(proto, "rtmpte"))
av_dict_set(&opts, "ffrtmpcrypt_tunneling", "1", 1);
 
/* open the encrypted connection */
ff_url_join(buf, sizeof(buf), "ffrtmpcrypt", NULL, hostname, port, NULL);
rt->encrypted = 1;
} else {
/* open the tcp connection */
if (port < 0)
port = RTMP_DEFAULT_PORT;
if (rt->listen)
ff_url_join(buf, sizeof(buf), "tcp", NULL, hostname, port,
"?listen&listen_timeout=%d",
rt->listen_timeout * 1000);
else
ff_url_join(buf, sizeof(buf), "tcp", NULL, hostname, port, NULL);
}
 
reconnect:
if ((ret = ffurl_open(&rt->stream, buf, AVIO_FLAG_READ_WRITE,
&s->interrupt_callback, &opts)) < 0) {
av_log(s , AV_LOG_ERROR, "Cannot open connection %s\n", buf);
goto fail;
}
 
if (rt->swfverify) {
if ((ret = rtmp_calc_swfhash(s)) < 0)
goto fail;
}
 
rt->state = STATE_START;
if (!rt->listen && (ret = rtmp_handshake(s, rt)) < 0)
goto fail;
if (rt->listen && (ret = rtmp_server_handshake(s, rt)) < 0)
goto fail;
 
rt->out_chunk_size = 128;
rt->in_chunk_size = 128; // Probably overwritten later
rt->state = STATE_HANDSHAKED;
 
// Keep the application name when it has been defined by the user.
old_app = rt->app;
 
rt->app = av_malloc(APP_MAX_LENGTH);
if (!rt->app) {
ret = AVERROR(ENOMEM);
goto fail;
}
 
//extract "app" part from path
if (!strncmp(path, "/ondemand/", 10)) {
fname = path + 10;
memcpy(rt->app, "ondemand", 9);
} else {
char *next = *path ? path + 1 : path;
char *p = strchr(next, '/');
if (!p) {
fname = next;
rt->app[0] = '\0';
} else {
// make sure we do not mismatch a playpath for an application instance
char *c = strchr(p + 1, ':');
fname = strchr(p + 1, '/');
if (!fname || (c && c < fname)) {
fname = p + 1;
av_strlcpy(rt->app, path + 1, FFMIN(p - path, APP_MAX_LENGTH));
} else {
fname++;
av_strlcpy(rt->app, path + 1, FFMIN(fname - path - 1, APP_MAX_LENGTH));
}
}
}
 
if (old_app) {
// The name of application has been defined by the user, override it.
if (strlen(old_app) >= APP_MAX_LENGTH) {
ret = AVERROR(EINVAL);
goto fail;
}
av_free(rt->app);
rt->app = old_app;
}
 
if (!rt->playpath) {
int len = strlen(fname);
 
rt->playpath = av_malloc(PLAYPATH_MAX_LENGTH);
if (!rt->playpath) {
ret = AVERROR(ENOMEM);
goto fail;
}
 
if (!strchr(fname, ':') && len >= 4 &&
(!strcmp(fname + len - 4, ".f4v") ||
!strcmp(fname + len - 4, ".mp4"))) {
memcpy(rt->playpath, "mp4:", 5);
} else if (len >= 4 && !strcmp(fname + len - 4, ".flv")) {
fname[len - 4] = '\0';
} else {
rt->playpath[0] = 0;
}
av_strlcat(rt->playpath, fname, PLAYPATH_MAX_LENGTH);
}
 
if (!rt->tcurl) {
rt->tcurl = av_malloc(TCURL_MAX_LENGTH);
if (!rt->tcurl) {
ret = AVERROR(ENOMEM);
goto fail;
}
ff_url_join(rt->tcurl, TCURL_MAX_LENGTH, proto, NULL, hostname,
port, "/%s", rt->app);
}
 
if (!rt->flashver) {
rt->flashver = av_malloc(FLASHVER_MAX_LENGTH);
if (!rt->flashver) {
ret = AVERROR(ENOMEM);
goto fail;
}
if (rt->is_input) {
snprintf(rt->flashver, FLASHVER_MAX_LENGTH, "%s %d,%d,%d,%d",
RTMP_CLIENT_PLATFORM, RTMP_CLIENT_VER1, RTMP_CLIENT_VER2,
RTMP_CLIENT_VER3, RTMP_CLIENT_VER4);
} else {
snprintf(rt->flashver, FLASHVER_MAX_LENGTH,
"FMLE/3.0 (compatible; %s)", LIBAVFORMAT_IDENT);
}
}
 
rt->client_report_size = 1048576;
rt->bytes_read = 0;
rt->last_bytes_read = 0;
rt->server_bw = 2500000;
 
av_log(s, AV_LOG_DEBUG, "Proto = %s, path = %s, app = %s, fname = %s\n",
proto, path, rt->app, rt->playpath);
if (!rt->listen) {
if ((ret = gen_connect(s, rt)) < 0)
goto fail;
} else {
if (read_connect(s, s->priv_data) < 0)
goto fail;
}
 
do {
ret = get_packet(s, 1);
} while (ret == AVERROR(EAGAIN));
if (ret < 0)
goto fail;
 
if (rt->do_reconnect) {
int i;
ffurl_close(rt->stream);
rt->stream = NULL;
rt->do_reconnect = 0;
rt->nb_invokes = 0;
for (i = 0; i < 2; i++)
memset(rt->prev_pkt[i], 0,
sizeof(**rt->prev_pkt) * rt->nb_prev_pkt[i]);
free_tracked_methods(rt);
goto reconnect;
}
 
if (rt->is_input) {
int err;
// generate FLV header for demuxer
rt->flv_size = 13;
if ((err = av_reallocp(&rt->flv_data, rt->flv_size)) < 0)
return err;
rt->flv_off = 0;
memcpy(rt->flv_data, "FLV\1\5\0\0\0\011\0\0\0\0", rt->flv_size);
} else {
rt->flv_size = 0;
rt->flv_data = NULL;
rt->flv_off = 0;
rt->skip_bytes = 13;
}
 
s->max_packet_size = rt->stream->max_packet_size;
s->is_streamed = 1;
return 0;
 
fail:
av_dict_free(&opts);
rtmp_close(s);
return ret;
}
 
static int rtmp_read(URLContext *s, uint8_t *buf, int size)
{
RTMPContext *rt = s->priv_data;
int orig_size = size;
int ret;
 
while (size > 0) {
int data_left = rt->flv_size - rt->flv_off;
 
if (data_left >= size) {
memcpy(buf, rt->flv_data + rt->flv_off, size);
rt->flv_off += size;
return orig_size;
}
if (data_left > 0) {
memcpy(buf, rt->flv_data + rt->flv_off, data_left);
buf += data_left;
size -= data_left;
rt->flv_off = rt->flv_size;
return data_left;
}
if ((ret = get_packet(s, 0)) < 0)
return ret;
}
return orig_size;
}
 
static int64_t rtmp_seek(URLContext *s, int stream_index, int64_t timestamp,
int flags)
{
RTMPContext *rt = s->priv_data;
int ret;
av_log(s, AV_LOG_DEBUG,
"Seek on stream index %d at timestamp %"PRId64" with flags %08x\n",
stream_index, timestamp, flags);
if ((ret = gen_seek(s, rt, timestamp)) < 0) {
av_log(s, AV_LOG_ERROR,
"Unable to send seek command on stream index %d at timestamp "
"%"PRId64" with flags %08x\n",
stream_index, timestamp, flags);
return ret;
}
rt->flv_off = rt->flv_size;
rt->state = STATE_SEEKING;
return timestamp;
}
 
static int rtmp_write(URLContext *s, const uint8_t *buf, int size)
{
RTMPContext *rt = s->priv_data;
int size_temp = size;
int pktsize, pkttype;
uint32_t ts;
const uint8_t *buf_temp = buf;
uint8_t c;
int ret;
 
do {
if (rt->skip_bytes) {
int skip = FFMIN(rt->skip_bytes, size_temp);
buf_temp += skip;
size_temp -= skip;
rt->skip_bytes -= skip;
continue;
}
 
if (rt->flv_header_bytes < RTMP_HEADER) {
const uint8_t *header = rt->flv_header;
int copy = FFMIN(RTMP_HEADER - rt->flv_header_bytes, size_temp);
int channel = RTMP_AUDIO_CHANNEL;
bytestream_get_buffer(&buf_temp, rt->flv_header + rt->flv_header_bytes, copy);
rt->flv_header_bytes += copy;
size_temp -= copy;
if (rt->flv_header_bytes < RTMP_HEADER)
break;
 
pkttype = bytestream_get_byte(&header);
pktsize = bytestream_get_be24(&header);
ts = bytestream_get_be24(&header);
ts |= bytestream_get_byte(&header) << 24;
bytestream_get_be24(&header);
rt->flv_size = pktsize;
 
if (pkttype == RTMP_PT_VIDEO)
channel = RTMP_VIDEO_CHANNEL;
 
//force 12bytes header
if (((pkttype == RTMP_PT_VIDEO || pkttype == RTMP_PT_AUDIO) && ts == 0) ||
pkttype == RTMP_PT_NOTIFY) {
if (pkttype == RTMP_PT_NOTIFY)
pktsize += 16;
if ((ret = ff_rtmp_check_alloc_array(&rt->prev_pkt[1],
&rt->nb_prev_pkt[1],
channel)) < 0)
return ret;
rt->prev_pkt[1][channel].channel_id = 0;
}
 
//this can be a big packet, it's better to send it right here
if ((ret = ff_rtmp_packet_create(&rt->out_pkt, channel,
pkttype, ts, pktsize)) < 0)
return ret;
 
rt->out_pkt.extra = rt->stream_id;
rt->flv_data = rt->out_pkt.data;
 
if (pkttype == RTMP_PT_NOTIFY)
ff_amf_write_string(&rt->flv_data, "@setDataFrame");
}
 
if (rt->flv_size - rt->flv_off > size_temp) {
bytestream_get_buffer(&buf_temp, rt->flv_data + rt->flv_off, size_temp);
rt->flv_off += size_temp;
size_temp = 0;
} else {
bytestream_get_buffer(&buf_temp, rt->flv_data + rt->flv_off, rt->flv_size - rt->flv_off);
size_temp -= rt->flv_size - rt->flv_off;
rt->flv_off += rt->flv_size - rt->flv_off;
}
 
if (rt->flv_off == rt->flv_size) {
rt->skip_bytes = 4;
 
if ((ret = rtmp_send_packet(rt, &rt->out_pkt, 0)) < 0)
return ret;
rt->flv_size = 0;
rt->flv_off = 0;
rt->flv_header_bytes = 0;
rt->flv_nb_packets++;
}
} while (buf_temp - buf < size);
 
if (rt->flv_nb_packets < rt->flush_interval)
return size;
rt->flv_nb_packets = 0;
 
/* set stream into nonblocking mode */
rt->stream->flags |= AVIO_FLAG_NONBLOCK;
 
/* try to read one byte from the stream */
ret = ffurl_read(rt->stream, &c, 1);
 
/* switch the stream back into blocking mode */
rt->stream->flags &= ~AVIO_FLAG_NONBLOCK;
 
if (ret == AVERROR(EAGAIN)) {
/* no incoming data to handle */
return size;
} else if (ret < 0) {
return ret;
} else if (ret == 1) {
RTMPPacket rpkt = { 0 };
 
if ((ret = ff_rtmp_packet_read_internal(rt->stream, &rpkt,
rt->in_chunk_size,
&rt->prev_pkt[0],
&rt->nb_prev_pkt[0], c)) <= 0)
return ret;
 
if ((ret = rtmp_parse_result(s, rt, &rpkt)) < 0)
return ret;
 
ff_rtmp_packet_destroy(&rpkt);
}
 
return size;
}
 
#define OFFSET(x) offsetof(RTMPContext, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
#define ENC AV_OPT_FLAG_ENCODING_PARAM
 
static const AVOption rtmp_options[] = {
{"rtmp_app", "Name of application to connect to on the RTMP server", OFFSET(app), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
{"rtmp_buffer", "Set buffer time in milliseconds. The default is 3000.", OFFSET(client_buffer_time), AV_OPT_TYPE_INT, {.i64 = 3000}, 0, INT_MAX, DEC|ENC},
{"rtmp_conn", "Append arbitrary AMF data to the Connect message", OFFSET(conn), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
{"rtmp_flashver", "Version of the Flash plugin used to run the SWF player.", OFFSET(flashver), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
{"rtmp_flush_interval", "Number of packets flushed in the same request (RTMPT only).", OFFSET(flush_interval), AV_OPT_TYPE_INT, {.i64 = 10}, 0, INT_MAX, ENC},
{"rtmp_live", "Specify that the media is a live stream.", OFFSET(live), AV_OPT_TYPE_INT, {.i64 = -2}, INT_MIN, INT_MAX, DEC, "rtmp_live"},
{"any", "both", 0, AV_OPT_TYPE_CONST, {.i64 = -2}, 0, 0, DEC, "rtmp_live"},
{"live", "live stream", 0, AV_OPT_TYPE_CONST, {.i64 = -1}, 0, 0, DEC, "rtmp_live"},
{"recorded", "recorded stream", 0, AV_OPT_TYPE_CONST, {.i64 = 0}, 0, 0, DEC, "rtmp_live"},
{"rtmp_pageurl", "URL of the web page in which the media was embedded. By default no value will be sent.", OFFSET(pageurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC},
{"rtmp_playpath", "Stream identifier to play or to publish", OFFSET(playpath), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
{"rtmp_subscribe", "Name of live stream to subscribe to. Defaults to rtmp_playpath.", OFFSET(subscribe), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC},
{"rtmp_swfhash", "SHA256 hash of the decompressed SWF file (32 bytes).", OFFSET(swfhash), AV_OPT_TYPE_BINARY, .flags = DEC},
{"rtmp_swfsize", "Size of the decompressed SWF file, required for SWFVerification.", OFFSET(swfsize), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, DEC},
{"rtmp_swfurl", "URL of the SWF player. By default no value will be sent", OFFSET(swfurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
{"rtmp_swfverify", "URL to player swf file, compute hash/size automatically.", OFFSET(swfverify), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC},
{"rtmp_tcurl", "URL of the target stream. Defaults to proto://host[:port]/app.", OFFSET(tcurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
{"rtmp_listen", "Listen for incoming rtmp connections", OFFSET(listen), AV_OPT_TYPE_INT, {.i64 = 0}, INT_MIN, INT_MAX, DEC, "rtmp_listen" },
{"listen", "Listen for incoming rtmp connections", OFFSET(listen), AV_OPT_TYPE_INT, {.i64 = 0}, INT_MIN, INT_MAX, DEC, "rtmp_listen" },
{"timeout", "Maximum timeout (in seconds) to wait for incoming connections. -1 is infinite. Implies -rtmp_listen 1", OFFSET(listen_timeout), AV_OPT_TYPE_INT, {.i64 = -1}, INT_MIN, INT_MAX, DEC, "rtmp_listen" },
{ NULL },
};
 
#define RTMP_PROTOCOL(flavor) \
static const AVClass flavor##_class = { \
.class_name = #flavor, \
.item_name = av_default_item_name, \
.option = rtmp_options, \
.version = LIBAVUTIL_VERSION_INT, \
}; \
\
URLProtocol ff_##flavor##_protocol = { \
.name = #flavor, \
.url_open = rtmp_open, \
.url_read = rtmp_read, \
.url_read_seek = rtmp_seek, \
.url_write = rtmp_write, \
.url_close = rtmp_close, \
.priv_data_size = sizeof(RTMPContext), \
.flags = URL_PROTOCOL_FLAG_NETWORK, \
.priv_data_class= &flavor##_class, \
};
 
 
RTMP_PROTOCOL(rtmp)
RTMP_PROTOCOL(rtmpe)
RTMP_PROTOCOL(rtmps)
RTMP_PROTOCOL(rtmpt)
RTMP_PROTOCOL(rtmpte)
RTMP_PROTOCOL(rtmpts)
/contrib/sdk/sources/ffmpeg/libavformat/rtp.c
0,0 → 1,152
/*
* RTP input/output format
* Copyright (c) 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avstring.h"
#include "libavutil/opt.h"
#include "avformat.h"
 
#include "rtp.h"
 
/* from http://www.iana.org/assignments/rtp-parameters last updated 05 January 2005 */
/* payload types >= 96 are dynamic;
* payload types between 72 and 76 are reserved for RTCP conflict avoidance;
* all the other payload types not present in the table are unassigned or
* reserved
*/
static const struct {
int pt;
const char enc_name[6];
enum AVMediaType codec_type;
enum AVCodecID codec_id;
int clock_rate;
int audio_channels;
} rtp_payload_types[] = {
{0, "PCMU", AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_PCM_MULAW, 8000, 1},
{3, "GSM", AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_NONE, 8000, 1},
{4, "G723", AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_G723_1, 8000, 1},
{5, "DVI4", AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_NONE, 8000, 1},
{6, "DVI4", AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_NONE, 16000, 1},
{7, "LPC", AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_NONE, 8000, 1},
{8, "PCMA", AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_PCM_ALAW, 8000, 1},
{9, "G722", AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_ADPCM_G722, 8000, 1},
{10, "L16", AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_PCM_S16BE, 44100, 2},
{11, "L16", AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_PCM_S16BE, 44100, 1},
{12, "QCELP", AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_QCELP, 8000, 1},
{13, "CN", AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_NONE, 8000, 1},
{14, "MPA", AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_MP2, -1, -1},
{14, "MPA", AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_MP3, -1, -1},
{15, "G728", AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_NONE, 8000, 1},
{16, "DVI4", AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_NONE, 11025, 1},
{17, "DVI4", AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_NONE, 22050, 1},
{18, "G729", AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_NONE, 8000, 1},
{25, "CelB", AVMEDIA_TYPE_VIDEO, AV_CODEC_ID_NONE, 90000, -1},
{26, "JPEG", AVMEDIA_TYPE_VIDEO, AV_CODEC_ID_MJPEG, 90000, -1},
{28, "nv", AVMEDIA_TYPE_VIDEO, AV_CODEC_ID_NONE, 90000, -1},
{31, "H261", AVMEDIA_TYPE_VIDEO, AV_CODEC_ID_H261, 90000, -1},
{32, "MPV", AVMEDIA_TYPE_VIDEO, AV_CODEC_ID_MPEG1VIDEO, 90000, -1},
{32, "MPV", AVMEDIA_TYPE_VIDEO, AV_CODEC_ID_MPEG2VIDEO, 90000, -1},
{33, "MP2T", AVMEDIA_TYPE_DATA, AV_CODEC_ID_MPEG2TS, 90000, -1},
{34, "H263", AVMEDIA_TYPE_VIDEO, AV_CODEC_ID_H263, 90000, -1},
{-1, "", AVMEDIA_TYPE_UNKNOWN, AV_CODEC_ID_NONE, -1, -1}
};
 
int ff_rtp_get_codec_info(AVCodecContext *codec, int payload_type)
{
int i = 0;
 
for (i = 0; rtp_payload_types[i].pt >= 0; i++)
if (rtp_payload_types[i].pt == payload_type) {
if (rtp_payload_types[i].codec_id != AV_CODEC_ID_NONE) {
codec->codec_type = rtp_payload_types[i].codec_type;
codec->codec_id = rtp_payload_types[i].codec_id;
if (rtp_payload_types[i].audio_channels > 0)
codec->channels = rtp_payload_types[i].audio_channels;
if (rtp_payload_types[i].clock_rate > 0)
codec->sample_rate = rtp_payload_types[i].clock_rate;
return 0;
}
}
return -1;
}
 
int ff_rtp_get_payload_type(AVFormatContext *fmt,
AVCodecContext *codec, int idx)
{
int i;
AVOutputFormat *ofmt = fmt ? fmt->oformat : NULL;
 
/* Was the payload type already specified for the RTP muxer? */
if (ofmt && ofmt->priv_class && fmt->priv_data) {
int64_t payload_type;
if (av_opt_get_int(fmt->priv_data, "payload_type", 0, &payload_type) >= 0 &&
payload_type >= 0)
return (int)payload_type;
}
 
/* static payload type */
for (i = 0; rtp_payload_types[i].pt >= 0; ++i)
if (rtp_payload_types[i].codec_id == codec->codec_id) {
if (codec->codec_id == AV_CODEC_ID_H263 && (!fmt || !fmt->oformat ||
!fmt->oformat->priv_class || !fmt->priv_data ||
!av_opt_flag_is_set(fmt->priv_data, "rtpflags", "rfc2190")))
continue;
/* G722 has 8000 as nominal rate even if the sample rate is 16000,
* see section 4.5.2 in RFC 3551. */
if (codec->codec_id == AV_CODEC_ID_ADPCM_G722 &&
codec->sample_rate == 16000 && codec->channels == 1)
return rtp_payload_types[i].pt;
if (codec->codec_type == AVMEDIA_TYPE_AUDIO &&
((rtp_payload_types[i].clock_rate > 0 &&
codec->sample_rate != rtp_payload_types[i].clock_rate) ||
(rtp_payload_types[i].audio_channels > 0 &&
codec->channels != rtp_payload_types[i].audio_channels)))
continue;
return rtp_payload_types[i].pt;
}
 
if (idx < 0)
idx = codec->codec_type == AVMEDIA_TYPE_AUDIO;
 
/* dynamic payload type */
return RTP_PT_PRIVATE + idx;
}
 
const char *ff_rtp_enc_name(int payload_type)
{
int i;
 
for (i = 0; rtp_payload_types[i].pt >= 0; i++)
if (rtp_payload_types[i].pt == payload_type)
return rtp_payload_types[i].enc_name;
 
return "";
}
 
enum AVCodecID ff_rtp_codec_id(const char *buf, enum AVMediaType codec_type)
{
int i;
 
for (i = 0; rtp_payload_types[i].pt >= 0; i++)
if (!av_strcasecmp(buf, rtp_payload_types[i].enc_name) && (codec_type == rtp_payload_types[i].codec_type))
return rtp_payload_types[i].codec_id;
 
return AV_CODEC_ID_NONE;
}
/contrib/sdk/sources/ffmpeg/libavformat/rtp.h
0,0 → 1,115
/*
* RTP definitions
* Copyright (c) 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_RTP_H
#define AVFORMAT_RTP_H
 
#include "libavformat/avformat.h"
#include "libavcodec/avcodec.h"
#include "libavutil/mathematics.h"
 
/**
* Return the payload type for a given stream used in the given format context.
* Static payload types are derived from the codec.
* Dynamic payload type are derived from the id field in AVStream.
* The format context private option payload_type overrides both.
*
* @param fmt The context of the format
* @param codec The context of the codec
* @param idx The stream index
* @return The payload type (the 'PT' field in the RTP header).
*/
int ff_rtp_get_payload_type(AVFormatContext *fmt, AVCodecContext *codec,
int idx);
 
/**
* Initialize a codec context based on the payload type.
*
* Fill the codec_type and codec_id fields of a codec context with
* information depending on the payload type; for audio codecs, the
* channels and sample_rate fields are also filled.
*
* @param codec The context of the codec
* @param payload_type The payload type (the 'PT' field in the RTP header)
* @return In case of unknown payload type or dynamic payload type, a
* negative value is returned; otherwise, 0 is returned
*/
int ff_rtp_get_codec_info(AVCodecContext *codec, int payload_type);
 
/**
* Return the encoding name (as defined in
* http://www.iana.org/assignments/rtp-parameters) for a given payload type.
*
* @param payload_type The payload type (the 'PT' field in the RTP header)
* @return In case of unknown payload type or dynamic payload type, a pointer
* to an empty string is returned; otherwise, a pointer to a string containing
* the encoding name is returned
*/
const char *ff_rtp_enc_name(int payload_type);
 
/**
* Return the codec id for the given encoding name and codec type.
*
* @param buf A pointer to the string containing the encoding name
* @param codec_type The codec type
* @return In case of unknown encoding name, AV_CODEC_ID_NONE is returned;
* otherwise, the codec id is returned
*/
enum AVCodecID ff_rtp_codec_id(const char *buf, enum AVMediaType codec_type);
 
#define RTP_PT_PRIVATE 96
#define RTP_VERSION 2
#define RTP_MAX_SDES 256 /**< maximum text length for SDES */
 
/* RTCP packets use 0.5% of the bandwidth */
#define RTCP_TX_RATIO_NUM 5
#define RTCP_TX_RATIO_DEN 1000
 
/* An arbitrary id value for RTP Xiph streams - only relevant to indicate
* that the configuration has changed within a stream (by changing the
* ident value sent).
*/
#define RTP_XIPH_IDENT 0xfecdba
 
/* RTCP packet types */
enum RTCPType {
RTCP_FIR = 192,
RTCP_NACK, // 193
RTCP_SMPTETC,// 194
RTCP_IJ, // 195
RTCP_SR = 200,
RTCP_RR, // 201
RTCP_SDES, // 202
RTCP_BYE, // 203
RTCP_APP, // 204
RTCP_RTPFB,// 205
RTCP_PSFB, // 206
RTCP_XR, // 207
RTCP_AVB, // 208
RTCP_RSI, // 209
RTCP_TOKEN,// 210
};
 
#define RTP_PT_IS_RTCP(x) (((x) >= RTCP_FIR && (x) <= RTCP_IJ) || \
((x) >= RTCP_SR && (x) <= RTCP_TOKEN))
 
#define NTP_TO_RTP_FORMAT(x) av_rescale((x), INT64_C(1) << 32, 1000000)
 
#endif /* AVFORMAT_RTP_H */
/contrib/sdk/sources/ffmpeg/libavformat/rtpdec.c
0,0 → 1,878
/*
* RTP input format
* Copyright (c) 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/mathematics.h"
#include "libavutil/avstring.h"
#include "libavutil/time.h"
#include "libavcodec/get_bits.h"
#include "avformat.h"
#include "network.h"
#include "srtp.h"
#include "url.h"
#include "rtpdec.h"
#include "rtpdec_formats.h"
 
#define MIN_FEEDBACK_INTERVAL 200000 /* 200 ms in us */
 
static RTPDynamicProtocolHandler realmedia_mp3_dynamic_handler = {
.enc_name = "X-MP3-draft-00",
.codec_type = AVMEDIA_TYPE_AUDIO,
.codec_id = AV_CODEC_ID_MP3ADU,
};
 
static RTPDynamicProtocolHandler speex_dynamic_handler = {
.enc_name = "speex",
.codec_type = AVMEDIA_TYPE_AUDIO,
.codec_id = AV_CODEC_ID_SPEEX,
};
 
static RTPDynamicProtocolHandler opus_dynamic_handler = {
.enc_name = "opus",
.codec_type = AVMEDIA_TYPE_AUDIO,
.codec_id = AV_CODEC_ID_OPUS,
};
 
static RTPDynamicProtocolHandler *rtp_first_dynamic_payload_handler = NULL;
 
void ff_register_dynamic_payload_handler(RTPDynamicProtocolHandler *handler)
{
handler->next = rtp_first_dynamic_payload_handler;
rtp_first_dynamic_payload_handler = handler;
}
 
void av_register_rtp_dynamic_payload_handlers(void)
{
ff_register_dynamic_payload_handler(&ff_amr_nb_dynamic_handler);
ff_register_dynamic_payload_handler(&ff_amr_wb_dynamic_handler);
ff_register_dynamic_payload_handler(&ff_g726_16_dynamic_handler);
ff_register_dynamic_payload_handler(&ff_g726_24_dynamic_handler);
ff_register_dynamic_payload_handler(&ff_g726_32_dynamic_handler);
ff_register_dynamic_payload_handler(&ff_g726_40_dynamic_handler);
ff_register_dynamic_payload_handler(&ff_h263_1998_dynamic_handler);
ff_register_dynamic_payload_handler(&ff_h263_2000_dynamic_handler);
ff_register_dynamic_payload_handler(&ff_h263_rfc2190_dynamic_handler);
ff_register_dynamic_payload_handler(&ff_h264_dynamic_handler);
ff_register_dynamic_payload_handler(&ff_ilbc_dynamic_handler);
ff_register_dynamic_payload_handler(&ff_jpeg_dynamic_handler);
ff_register_dynamic_payload_handler(&ff_mp4a_latm_dynamic_handler);
ff_register_dynamic_payload_handler(&ff_mp4v_es_dynamic_handler);
ff_register_dynamic_payload_handler(&ff_mpeg_audio_dynamic_handler);
ff_register_dynamic_payload_handler(&ff_mpeg_video_dynamic_handler);
ff_register_dynamic_payload_handler(&ff_mpeg4_generic_dynamic_handler);
ff_register_dynamic_payload_handler(&ff_mpegts_dynamic_handler);
ff_register_dynamic_payload_handler(&ff_ms_rtp_asf_pfa_handler);
ff_register_dynamic_payload_handler(&ff_ms_rtp_asf_pfv_handler);
ff_register_dynamic_payload_handler(&ff_qcelp_dynamic_handler);
ff_register_dynamic_payload_handler(&ff_qdm2_dynamic_handler);
ff_register_dynamic_payload_handler(&ff_qt_rtp_aud_handler);
ff_register_dynamic_payload_handler(&ff_qt_rtp_vid_handler);
ff_register_dynamic_payload_handler(&ff_quicktime_rtp_aud_handler);
ff_register_dynamic_payload_handler(&ff_quicktime_rtp_vid_handler);
ff_register_dynamic_payload_handler(&ff_svq3_dynamic_handler);
ff_register_dynamic_payload_handler(&ff_theora_dynamic_handler);
ff_register_dynamic_payload_handler(&ff_vorbis_dynamic_handler);
ff_register_dynamic_payload_handler(&ff_vp8_dynamic_handler);
ff_register_dynamic_payload_handler(&opus_dynamic_handler);
ff_register_dynamic_payload_handler(&realmedia_mp3_dynamic_handler);
ff_register_dynamic_payload_handler(&speex_dynamic_handler);
}
 
RTPDynamicProtocolHandler *ff_rtp_handler_find_by_name(const char *name,
enum AVMediaType codec_type)
{
RTPDynamicProtocolHandler *handler;
for (handler = rtp_first_dynamic_payload_handler;
handler; handler = handler->next)
if (!av_strcasecmp(name, handler->enc_name) &&
codec_type == handler->codec_type)
return handler;
return NULL;
}
 
RTPDynamicProtocolHandler *ff_rtp_handler_find_by_id(int id,
enum AVMediaType codec_type)
{
RTPDynamicProtocolHandler *handler;
for (handler = rtp_first_dynamic_payload_handler;
handler; handler = handler->next)
if (handler->static_payload_id && handler->static_payload_id == id &&
codec_type == handler->codec_type)
return handler;
return NULL;
}
 
static int rtcp_parse_packet(RTPDemuxContext *s, const unsigned char *buf,
int len)
{
int payload_len;
while (len >= 4) {
payload_len = FFMIN(len, (AV_RB16(buf + 2) + 1) * 4);
 
switch (buf[1]) {
case RTCP_SR:
if (payload_len < 20) {
av_log(NULL, AV_LOG_ERROR,
"Invalid length for RTCP SR packet\n");
return AVERROR_INVALIDDATA;
}
 
s->last_rtcp_reception_time = av_gettime();
s->last_rtcp_ntp_time = AV_RB64(buf + 8);
s->last_rtcp_timestamp = AV_RB32(buf + 16);
if (s->first_rtcp_ntp_time == AV_NOPTS_VALUE) {
s->first_rtcp_ntp_time = s->last_rtcp_ntp_time;
if (!s->base_timestamp)
s->base_timestamp = s->last_rtcp_timestamp;
s->rtcp_ts_offset = s->last_rtcp_timestamp - s->base_timestamp;
}
 
break;
case RTCP_BYE:
return -RTCP_BYE;
}
 
buf += payload_len;
len -= payload_len;
}
return -1;
}
 
#define RTP_SEQ_MOD (1 << 16)
 
static void rtp_init_statistics(RTPStatistics *s, uint16_t base_sequence)
{
memset(s, 0, sizeof(RTPStatistics));
s->max_seq = base_sequence;
s->probation = 1;
}
 
/*
* Called whenever there is a large jump in sequence numbers,
* or when they get out of probation...
*/
static void rtp_init_sequence(RTPStatistics *s, uint16_t seq)
{
s->max_seq = seq;
s->cycles = 0;
s->base_seq = seq - 1;
s->bad_seq = RTP_SEQ_MOD + 1;
s->received = 0;
s->expected_prior = 0;
s->received_prior = 0;
s->jitter = 0;
s->transit = 0;
}
 
/* Returns 1 if we should handle this packet. */
static int rtp_valid_packet_in_sequence(RTPStatistics *s, uint16_t seq)
{
uint16_t udelta = seq - s->max_seq;
const int MAX_DROPOUT = 3000;
const int MAX_MISORDER = 100;
const int MIN_SEQUENTIAL = 2;
 
/* source not valid until MIN_SEQUENTIAL packets with sequence
* seq. numbers have been received */
if (s->probation) {
if (seq == s->max_seq + 1) {
s->probation--;
s->max_seq = seq;
if (s->probation == 0) {
rtp_init_sequence(s, seq);
s->received++;
return 1;
}
} else {
s->probation = MIN_SEQUENTIAL - 1;
s->max_seq = seq;
}
} else if (udelta < MAX_DROPOUT) {
// in order, with permissible gap
if (seq < s->max_seq) {
// sequence number wrapped; count another 64k cycles
s->cycles += RTP_SEQ_MOD;
}
s->max_seq = seq;
} else if (udelta <= RTP_SEQ_MOD - MAX_MISORDER) {
// sequence made a large jump...
if (seq == s->bad_seq) {
/* two sequential packets -- assume that the other side
* restarted without telling us; just resync. */
rtp_init_sequence(s, seq);
} else {
s->bad_seq = (seq + 1) & (RTP_SEQ_MOD - 1);
return 0;
}
} else {
// duplicate or reordered packet...
}
s->received++;
return 1;
}
 
static void rtcp_update_jitter(RTPStatistics *s, uint32_t sent_timestamp,
uint32_t arrival_timestamp)
{
// Most of this is pretty straight from RFC 3550 appendix A.8
uint32_t transit = arrival_timestamp - sent_timestamp;
uint32_t prev_transit = s->transit;
int32_t d = transit - prev_transit;
// Doing the FFABS() call directly on the "transit - prev_transit"
// expression doesn't work, since it's an unsigned expression. Doing the
// transit calculation in unsigned is desired though, since it most
// probably will need to wrap around.
d = FFABS(d);
s->transit = transit;
if (!prev_transit)
return;
s->jitter += d - (int32_t) ((s->jitter + 8) >> 4);
}
 
int ff_rtp_check_and_send_back_rr(RTPDemuxContext *s, URLContext *fd,
AVIOContext *avio, int count)
{
AVIOContext *pb;
uint8_t *buf;
int len;
int rtcp_bytes;
RTPStatistics *stats = &s->statistics;
uint32_t lost;
uint32_t extended_max;
uint32_t expected_interval;
uint32_t received_interval;
int32_t lost_interval;
uint32_t expected;
uint32_t fraction;
 
if ((!fd && !avio) || (count < 1))
return -1;
 
/* TODO: I think this is way too often; RFC 1889 has algorithm for this */
/* XXX: MPEG pts hardcoded. RTCP send every 0.5 seconds */
s->octet_count += count;
rtcp_bytes = ((s->octet_count - s->last_octet_count) * RTCP_TX_RATIO_NUM) /
RTCP_TX_RATIO_DEN;
rtcp_bytes /= 50; // mmu_man: that's enough for me... VLC sends much less btw !?
if (rtcp_bytes < 28)
return -1;
s->last_octet_count = s->octet_count;
 
if (!fd)
pb = avio;
else if (avio_open_dyn_buf(&pb) < 0)
return -1;
 
// Receiver Report
avio_w8(pb, (RTP_VERSION << 6) + 1); /* 1 report block */
avio_w8(pb, RTCP_RR);
avio_wb16(pb, 7); /* length in words - 1 */
// our own SSRC: we use the server's SSRC + 1 to avoid conflicts
avio_wb32(pb, s->ssrc + 1);
avio_wb32(pb, s->ssrc); // server SSRC
// some placeholders we should really fill...
// RFC 1889/p64
extended_max = stats->cycles + stats->max_seq;
expected = extended_max - stats->base_seq;
lost = expected - stats->received;
lost = FFMIN(lost, 0xffffff); // clamp it since it's only 24 bits...
expected_interval = expected - stats->expected_prior;
stats->expected_prior = expected;
received_interval = stats->received - stats->received_prior;
stats->received_prior = stats->received;
lost_interval = expected_interval - received_interval;
if (expected_interval == 0 || lost_interval <= 0)
fraction = 0;
else
fraction = (lost_interval << 8) / expected_interval;
 
fraction = (fraction << 24) | lost;
 
avio_wb32(pb, fraction); /* 8 bits of fraction, 24 bits of total packets lost */
avio_wb32(pb, extended_max); /* max sequence received */
avio_wb32(pb, stats->jitter >> 4); /* jitter */
 
if (s->last_rtcp_ntp_time == AV_NOPTS_VALUE) {
avio_wb32(pb, 0); /* last SR timestamp */
avio_wb32(pb, 0); /* delay since last SR */
} else {
uint32_t middle_32_bits = s->last_rtcp_ntp_time >> 16; // this is valid, right? do we need to handle 64 bit values special?
uint32_t delay_since_last = av_rescale(av_gettime() - s->last_rtcp_reception_time,
65536, AV_TIME_BASE);
 
avio_wb32(pb, middle_32_bits); /* last SR timestamp */
avio_wb32(pb, delay_since_last); /* delay since last SR */
}
 
// CNAME
avio_w8(pb, (RTP_VERSION << 6) + 1); /* 1 report block */
avio_w8(pb, RTCP_SDES);
len = strlen(s->hostname);
avio_wb16(pb, (7 + len + 3) / 4); /* length in words - 1 */
avio_wb32(pb, s->ssrc + 1);
avio_w8(pb, 0x01);
avio_w8(pb, len);
avio_write(pb, s->hostname, len);
avio_w8(pb, 0); /* END */
// padding
for (len = (7 + len) % 4; len % 4; len++)
avio_w8(pb, 0);
 
avio_flush(pb);
if (!fd)
return 0;
len = avio_close_dyn_buf(pb, &buf);
if ((len > 0) && buf) {
int av_unused result;
av_dlog(s->ic, "sending %d bytes of RR\n", len);
result = ffurl_write(fd, buf, len);
av_dlog(s->ic, "result from ffurl_write: %d\n", result);
av_free(buf);
}
return 0;
}
 
void ff_rtp_send_punch_packets(URLContext *rtp_handle)
{
AVIOContext *pb;
uint8_t *buf;
int len;
 
/* Send a small RTP packet */
if (avio_open_dyn_buf(&pb) < 0)
return;
 
avio_w8(pb, (RTP_VERSION << 6));
avio_w8(pb, 0); /* Payload type */
avio_wb16(pb, 0); /* Seq */
avio_wb32(pb, 0); /* Timestamp */
avio_wb32(pb, 0); /* SSRC */
 
avio_flush(pb);
len = avio_close_dyn_buf(pb, &buf);
if ((len > 0) && buf)
ffurl_write(rtp_handle, buf, len);
av_free(buf);
 
/* Send a minimal RTCP RR */
if (avio_open_dyn_buf(&pb) < 0)
return;
 
avio_w8(pb, (RTP_VERSION << 6));
avio_w8(pb, RTCP_RR); /* receiver report */
avio_wb16(pb, 1); /* length in words - 1 */
avio_wb32(pb, 0); /* our own SSRC */
 
avio_flush(pb);
len = avio_close_dyn_buf(pb, &buf);
if ((len > 0) && buf)
ffurl_write(rtp_handle, buf, len);
av_free(buf);
}
 
static int find_missing_packets(RTPDemuxContext *s, uint16_t *first_missing,
uint16_t *missing_mask)
{
int i;
uint16_t next_seq = s->seq + 1;
RTPPacket *pkt = s->queue;
 
if (!pkt || pkt->seq == next_seq)
return 0;
 
*missing_mask = 0;
for (i = 1; i <= 16; i++) {
uint16_t missing_seq = next_seq + i;
while (pkt) {
int16_t diff = pkt->seq - missing_seq;
if (diff >= 0)
break;
pkt = pkt->next;
}
if (!pkt)
break;
if (pkt->seq == missing_seq)
continue;
*missing_mask |= 1 << (i - 1);
}
 
*first_missing = next_seq;
return 1;
}
 
int ff_rtp_send_rtcp_feedback(RTPDemuxContext *s, URLContext *fd,
AVIOContext *avio)
{
int len, need_keyframe, missing_packets;
AVIOContext *pb;
uint8_t *buf;
int64_t now;
uint16_t first_missing = 0, missing_mask = 0;
 
if (!fd && !avio)
return -1;
 
need_keyframe = s->handler && s->handler->need_keyframe &&
s->handler->need_keyframe(s->dynamic_protocol_context);
missing_packets = find_missing_packets(s, &first_missing, &missing_mask);
 
if (!need_keyframe && !missing_packets)
return 0;
 
/* Send new feedback if enough time has elapsed since the last
* feedback packet. */
 
now = av_gettime();
if (s->last_feedback_time &&
(now - s->last_feedback_time) < MIN_FEEDBACK_INTERVAL)
return 0;
s->last_feedback_time = now;
 
if (!fd)
pb = avio;
else if (avio_open_dyn_buf(&pb) < 0)
return -1;
 
if (need_keyframe) {
avio_w8(pb, (RTP_VERSION << 6) | 1); /* PLI */
avio_w8(pb, RTCP_PSFB);
avio_wb16(pb, 2); /* length in words - 1 */
// our own SSRC: we use the server's SSRC + 1 to avoid conflicts
avio_wb32(pb, s->ssrc + 1);
avio_wb32(pb, s->ssrc); // server SSRC
}
 
if (missing_packets) {
avio_w8(pb, (RTP_VERSION << 6) | 1); /* NACK */
avio_w8(pb, RTCP_RTPFB);
avio_wb16(pb, 3); /* length in words - 1 */
avio_wb32(pb, s->ssrc + 1);
avio_wb32(pb, s->ssrc); // server SSRC
 
avio_wb16(pb, first_missing);
avio_wb16(pb, missing_mask);
}
 
avio_flush(pb);
if (!fd)
return 0;
len = avio_close_dyn_buf(pb, &buf);
if (len > 0 && buf) {
ffurl_write(fd, buf, len);
av_free(buf);
}
return 0;
}
 
/**
* open a new RTP parse context for stream 'st'. 'st' can be NULL for
* MPEG2-TS streams.
*/
RTPDemuxContext *ff_rtp_parse_open(AVFormatContext *s1, AVStream *st,
int payload_type, int queue_size)
{
RTPDemuxContext *s;
 
s = av_mallocz(sizeof(RTPDemuxContext));
if (!s)
return NULL;
s->payload_type = payload_type;
s->last_rtcp_ntp_time = AV_NOPTS_VALUE;
s->first_rtcp_ntp_time = AV_NOPTS_VALUE;
s->ic = s1;
s->st = st;
s->queue_size = queue_size;
rtp_init_statistics(&s->statistics, 0);
if (st) {
switch (st->codec->codec_id) {
case AV_CODEC_ID_ADPCM_G722:
/* According to RFC 3551, the stream clock rate is 8000
* even if the sample rate is 16000. */
if (st->codec->sample_rate == 8000)
st->codec->sample_rate = 16000;
break;
default:
break;
}
}
// needed to send back RTCP RR in RTSP sessions
gethostname(s->hostname, sizeof(s->hostname));
return s;
}
 
void ff_rtp_parse_set_dynamic_protocol(RTPDemuxContext *s, PayloadContext *ctx,
RTPDynamicProtocolHandler *handler)
{
s->dynamic_protocol_context = ctx;
s->handler = handler;
}
 
void ff_rtp_parse_set_crypto(RTPDemuxContext *s, const char *suite,
const char *params)
{
if (!ff_srtp_set_crypto(&s->srtp, suite, params))
s->srtp_enabled = 1;
}
 
/**
* This was the second switch in rtp_parse packet.
* Normalizes time, if required, sets stream_index, etc.
*/
static void finalize_packet(RTPDemuxContext *s, AVPacket *pkt, uint32_t timestamp)
{
if (pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE)
return; /* Timestamp already set by depacketizer */
if (timestamp == RTP_NOTS_VALUE)
return;
 
if (s->last_rtcp_ntp_time != AV_NOPTS_VALUE && s->ic->nb_streams > 1) {
int64_t addend;
int delta_timestamp;
 
/* compute pts from timestamp with received ntp_time */
delta_timestamp = timestamp - s->last_rtcp_timestamp;
/* convert to the PTS timebase */
addend = av_rescale(s->last_rtcp_ntp_time - s->first_rtcp_ntp_time,
s->st->time_base.den,
(uint64_t) s->st->time_base.num << 32);
pkt->pts = s->range_start_offset + s->rtcp_ts_offset + addend +
delta_timestamp;
return;
}
 
if (!s->base_timestamp)
s->base_timestamp = timestamp;
/* assume that the difference is INT32_MIN < x < INT32_MAX,
* but allow the first timestamp to exceed INT32_MAX */
if (!s->timestamp)
s->unwrapped_timestamp += timestamp;
else
s->unwrapped_timestamp += (int32_t)(timestamp - s->timestamp);
s->timestamp = timestamp;
pkt->pts = s->unwrapped_timestamp + s->range_start_offset -
s->base_timestamp;
}
 
static int rtp_parse_packet_internal(RTPDemuxContext *s, AVPacket *pkt,
const uint8_t *buf, int len)
{
unsigned int ssrc;
int payload_type, seq, flags = 0;
int ext, csrc;
AVStream *st;
uint32_t timestamp;
int rv = 0;
 
csrc = buf[0] & 0x0f;
ext = buf[0] & 0x10;
payload_type = buf[1] & 0x7f;
if (buf[1] & 0x80)
flags |= RTP_FLAG_MARKER;
seq = AV_RB16(buf + 2);
timestamp = AV_RB32(buf + 4);
ssrc = AV_RB32(buf + 8);
/* store the ssrc in the RTPDemuxContext */
s->ssrc = ssrc;
 
/* NOTE: we can handle only one payload type */
if (s->payload_type != payload_type)
return -1;
 
st = s->st;
// only do something with this if all the rtp checks pass...
if (!rtp_valid_packet_in_sequence(&s->statistics, seq)) {
av_log(st ? st->codec : NULL, AV_LOG_ERROR,
"RTP: PT=%02x: bad cseq %04x expected=%04x\n",
payload_type, seq, ((s->seq + 1) & 0xffff));
return -1;
}
 
if (buf[0] & 0x20) {
int padding = buf[len - 1];
if (len >= 12 + padding)
len -= padding;
}
 
s->seq = seq;
len -= 12;
buf += 12;
 
len -= 4 * csrc;
buf += 4 * csrc;
if (len < 0)
return AVERROR_INVALIDDATA;
 
/* RFC 3550 Section 5.3.1 RTP Header Extension handling */
if (ext) {
if (len < 4)
return -1;
/* calculate the header extension length (stored as number
* of 32-bit words) */
ext = (AV_RB16(buf + 2) + 1) << 2;
 
if (len < ext)
return -1;
// skip past RTP header extension
len -= ext;
buf += ext;
}
 
if (s->handler && s->handler->parse_packet) {
rv = s->handler->parse_packet(s->ic, s->dynamic_protocol_context,
s->st, pkt, &timestamp, buf, len, seq,
flags);
} else if (st) {
if ((rv = av_new_packet(pkt, len)) < 0)
return rv;
memcpy(pkt->data, buf, len);
pkt->stream_index = st->index;
} else {
return AVERROR(EINVAL);
}
 
// now perform timestamp things....
finalize_packet(s, pkt, timestamp);
 
return rv;
}
 
void ff_rtp_reset_packet_queue(RTPDemuxContext *s)
{
while (s->queue) {
RTPPacket *next = s->queue->next;
av_free(s->queue->buf);
av_free(s->queue);
s->queue = next;
}
s->seq = 0;
s->queue_len = 0;
s->prev_ret = 0;
}
 
static void enqueue_packet(RTPDemuxContext *s, uint8_t *buf, int len)
{
uint16_t seq = AV_RB16(buf + 2);
RTPPacket **cur = &s->queue, *packet;
 
/* Find the correct place in the queue to insert the packet */
while (*cur) {
int16_t diff = seq - (*cur)->seq;
if (diff < 0)
break;
cur = &(*cur)->next;
}
 
packet = av_mallocz(sizeof(*packet));
if (!packet)
return;
packet->recvtime = av_gettime();
packet->seq = seq;
packet->len = len;
packet->buf = buf;
packet->next = *cur;
*cur = packet;
s->queue_len++;
}
 
static int has_next_packet(RTPDemuxContext *s)
{
return s->queue && s->queue->seq == (uint16_t) (s->seq + 1);
}
 
int64_t ff_rtp_queued_packet_time(RTPDemuxContext *s)
{
return s->queue ? s->queue->recvtime : 0;
}
 
static int rtp_parse_queued_packet(RTPDemuxContext *s, AVPacket *pkt)
{
int rv;
RTPPacket *next;
 
if (s->queue_len <= 0)
return -1;
 
if (!has_next_packet(s))
av_log(s->st ? s->st->codec : NULL, AV_LOG_WARNING,
"RTP: missed %d packets\n", s->queue->seq - s->seq - 1);
 
/* Parse the first packet in the queue, and dequeue it */
rv = rtp_parse_packet_internal(s, pkt, s->queue->buf, s->queue->len);
next = s->queue->next;
av_free(s->queue->buf);
av_free(s->queue);
s->queue = next;
s->queue_len--;
return rv;
}
 
static int rtp_parse_one_packet(RTPDemuxContext *s, AVPacket *pkt,
uint8_t **bufptr, int len)
{
uint8_t *buf = bufptr ? *bufptr : NULL;
int flags = 0;
uint32_t timestamp;
int rv = 0;
 
if (!buf) {
/* If parsing of the previous packet actually returned 0 or an error,
* there's nothing more to be parsed from that packet, but we may have
* indicated that we can return the next enqueued packet. */
if (s->prev_ret <= 0)
return rtp_parse_queued_packet(s, pkt);
/* return the next packets, if any */
if (s->handler && s->handler->parse_packet) {
/* timestamp should be overwritten by parse_packet, if not,
* the packet is left with pts == AV_NOPTS_VALUE */
timestamp = RTP_NOTS_VALUE;
rv = s->handler->parse_packet(s->ic, s->dynamic_protocol_context,
s->st, pkt, &timestamp, NULL, 0, 0,
flags);
finalize_packet(s, pkt, timestamp);
return rv;
}
}
 
if (len < 12)
return -1;
 
if ((buf[0] & 0xc0) != (RTP_VERSION << 6))
return -1;
if (RTP_PT_IS_RTCP(buf[1])) {
return rtcp_parse_packet(s, buf, len);
}
 
if (s->st) {
int64_t received = av_gettime();
uint32_t arrival_ts = av_rescale_q(received, AV_TIME_BASE_Q,
s->st->time_base);
timestamp = AV_RB32(buf + 4);
// Calculate the jitter immediately, before queueing the packet
// into the reordering queue.
rtcp_update_jitter(&s->statistics, timestamp, arrival_ts);
}
 
if ((s->seq == 0 && !s->queue) || s->queue_size <= 1) {
/* First packet, or no reordering */
return rtp_parse_packet_internal(s, pkt, buf, len);
} else {
uint16_t seq = AV_RB16(buf + 2);
int16_t diff = seq - s->seq;
if (diff < 0) {
/* Packet older than the previously emitted one, drop */
av_log(s->st ? s->st->codec : NULL, AV_LOG_WARNING,
"RTP: dropping old packet received too late\n");
return -1;
} else if (diff <= 1) {
/* Correct packet */
rv = rtp_parse_packet_internal(s, pkt, buf, len);
return rv;
} else {
/* Still missing some packet, enqueue this one. */
enqueue_packet(s, buf, len);
*bufptr = NULL;
/* Return the first enqueued packet if the queue is full,
* even if we're missing something */
if (s->queue_len >= s->queue_size)
return rtp_parse_queued_packet(s, pkt);
return -1;
}
}
}
 
/**
* Parse an RTP or RTCP packet directly sent as a buffer.
* @param s RTP parse context.
* @param pkt returned packet
* @param bufptr pointer to the input buffer or NULL to read the next packets
* @param len buffer len
* @return 0 if a packet is returned, 1 if a packet is returned and more can follow
* (use buf as NULL to read the next). -1 if no packet (error or no more packet).
*/
int ff_rtp_parse_packet(RTPDemuxContext *s, AVPacket *pkt,
uint8_t **bufptr, int len)
{
int rv;
if (s->srtp_enabled && bufptr && ff_srtp_decrypt(&s->srtp, *bufptr, &len) < 0)
return -1;
rv = rtp_parse_one_packet(s, pkt, bufptr, len);
s->prev_ret = rv;
while (rv == AVERROR(EAGAIN) && has_next_packet(s))
rv = rtp_parse_queued_packet(s, pkt);
return rv ? rv : has_next_packet(s);
}
 
void ff_rtp_parse_close(RTPDemuxContext *s)
{
ff_rtp_reset_packet_queue(s);
ff_srtp_free(&s->srtp);
av_free(s);
}
 
int ff_parse_fmtp(AVStream *stream, PayloadContext *data, const char *p,
int (*parse_fmtp)(AVStream *stream,
PayloadContext *data,
char *attr, char *value))
{
char attr[256];
char *value;
int res;
int value_size = strlen(p) + 1;
 
if (!(value = av_malloc(value_size))) {
av_log(NULL, AV_LOG_ERROR, "Failed to allocate data for FMTP.\n");
return AVERROR(ENOMEM);
}
 
// remove protocol identifier
while (*p && *p == ' ')
p++; // strip spaces
while (*p && *p != ' ')
p++; // eat protocol identifier
while (*p && *p == ' ')
p++; // strip trailing spaces
 
while (ff_rtsp_next_attr_and_value(&p,
attr, sizeof(attr),
value, value_size)) {
res = parse_fmtp(stream, data, attr, value);
if (res < 0 && res != AVERROR_PATCHWELCOME) {
av_free(value);
return res;
}
}
av_free(value);
return 0;
}
 
int ff_rtp_finalize_packet(AVPacket *pkt, AVIOContext **dyn_buf, int stream_idx)
{
int ret;
av_init_packet(pkt);
 
pkt->size = avio_close_dyn_buf(*dyn_buf, &pkt->data);
pkt->stream_index = stream_idx;
*dyn_buf = NULL;
if ((ret = av_packet_from_data(pkt, pkt->data, pkt->size)) < 0) {
av_freep(&pkt->data);
return ret;
}
return pkt->size;
}
/contrib/sdk/sources/ffmpeg/libavformat/rtpdec.h
0,0 → 1,215
/*
* RTP demuxer definitions
* Copyright (c) 2002 Fabrice Bellard
* Copyright (c) 2006 Ryan Martell <rdm4@martellventures.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_RTPDEC_H
#define AVFORMAT_RTPDEC_H
 
#include "libavcodec/avcodec.h"
#include "avformat.h"
#include "rtp.h"
#include "url.h"
#include "srtp.h"
 
typedef struct PayloadContext PayloadContext;
typedef struct RTPDynamicProtocolHandler RTPDynamicProtocolHandler;
 
#define RTP_MIN_PACKET_LENGTH 12
#define RTP_MAX_PACKET_LENGTH 8192
 
#define RTP_REORDER_QUEUE_DEFAULT_SIZE 10
 
#define RTP_NOTS_VALUE ((uint32_t)-1)
 
typedef struct RTPDemuxContext RTPDemuxContext;
RTPDemuxContext *ff_rtp_parse_open(AVFormatContext *s1, AVStream *st,
int payload_type, int queue_size);
void ff_rtp_parse_set_dynamic_protocol(RTPDemuxContext *s, PayloadContext *ctx,
RTPDynamicProtocolHandler *handler);
void ff_rtp_parse_set_crypto(RTPDemuxContext *s, const char *suite,
const char *params);
int ff_rtp_parse_packet(RTPDemuxContext *s, AVPacket *pkt,
uint8_t **buf, int len);
void ff_rtp_parse_close(RTPDemuxContext *s);
int64_t ff_rtp_queued_packet_time(RTPDemuxContext *s);
void ff_rtp_reset_packet_queue(RTPDemuxContext *s);
 
/**
* Send a dummy packet on both port pairs to set up the connection
* state in potential NAT routers, so that we're able to receive
* packets.
*
* Note, this only works if the NAT router doesn't remap ports. This
* isn't a standardized procedure, but it works in many cases in practice.
*
* The same routine is used with RDT too, even if RDT doesn't use normal
* RTP packets otherwise.
*/
void ff_rtp_send_punch_packets(URLContext* rtp_handle);
 
/**
* some rtp servers assume client is dead if they don't hear from them...
* so we send a Receiver Report to the provided URLContext or AVIOContext
* (we don't have access to the rtcp handle from here)
*/
int ff_rtp_check_and_send_back_rr(RTPDemuxContext *s, URLContext *fd,
AVIOContext *avio, int count);
int ff_rtp_send_rtcp_feedback(RTPDemuxContext *s, URLContext *fd,
AVIOContext *avio);
 
// these statistics are used for rtcp receiver reports...
typedef struct RTPStatistics {
uint16_t max_seq; ///< highest sequence number seen
uint32_t cycles; ///< shifted count of sequence number cycles
uint32_t base_seq; ///< base sequence number
uint32_t bad_seq; ///< last bad sequence number + 1
int probation; ///< sequence packets till source is valid
uint32_t received; ///< packets received
uint32_t expected_prior; ///< packets expected in last interval
uint32_t received_prior; ///< packets received in last interval
uint32_t transit; ///< relative transit time for previous packet
uint32_t jitter; ///< estimated jitter.
} RTPStatistics;
 
#define RTP_FLAG_KEY 0x1 ///< RTP packet contains a keyframe
#define RTP_FLAG_MARKER 0x2 ///< RTP marker bit was set for this packet
/**
* Packet parsing for "private" payloads in the RTP specs.
*
* @param ctx RTSP demuxer context
* @param s stream context
* @param st stream that this packet belongs to
* @param pkt packet in which to write the parsed data
* @param timestamp pointer to the RTP timestamp of the input data, can be
* updated by the function if returning older, buffered data
* @param buf pointer to raw RTP packet data
* @param len length of buf
* @param seq RTP sequence number of the packet
* @param flags flags from the RTP packet header (RTP_FLAG_*)
*/
typedef int (*DynamicPayloadPacketHandlerProc)(AVFormatContext *ctx,
PayloadContext *s,
AVStream *st, AVPacket *pkt,
uint32_t *timestamp,
const uint8_t * buf,
int len, uint16_t seq, int flags);
 
struct RTPDynamicProtocolHandler {
const char enc_name[50];
enum AVMediaType codec_type;
enum AVCodecID codec_id;
int static_payload_id; /* 0 means no payload id is set. 0 is a valid
* payload ID (PCMU), too, but that format doesn't
* require any custom depacketization code. */
 
/** Initialize dynamic protocol handler, called after the full rtpmap line is parsed, may be null */
int (*init)(AVFormatContext *s, int st_index, PayloadContext *priv_data);
/** Parse the a= line from the sdp field */
int (*parse_sdp_a_line)(AVFormatContext *s, int st_index,
PayloadContext *priv_data, const char *line);
/** Allocate any data needed by the rtp parsing for this dynamic data. */
PayloadContext *(*alloc)(void);
/** Free any data needed by the rtp parsing for this dynamic data. */
void (*free)(PayloadContext *protocol_data);
/** Parse handler for this dynamic packet */
DynamicPayloadPacketHandlerProc parse_packet;
int (*need_keyframe)(PayloadContext *context);
 
struct RTPDynamicProtocolHandler *next;
};
 
typedef struct RTPPacket {
uint16_t seq;
uint8_t *buf;
int len;
int64_t recvtime;
struct RTPPacket *next;
} RTPPacket;
 
struct RTPDemuxContext {
AVFormatContext *ic;
AVStream *st;
int payload_type;
uint32_t ssrc;
uint16_t seq;
uint32_t timestamp;
uint32_t base_timestamp;
uint32_t cur_timestamp;
int64_t unwrapped_timestamp;
int64_t range_start_offset;
int max_payload_size;
/* used to send back RTCP RR */
char hostname[256];
 
int srtp_enabled;
struct SRTPContext srtp;
 
/** Statistics for this stream (used by RTCP receiver reports) */
RTPStatistics statistics;
 
/** Fields for packet reordering @{ */
int prev_ret; ///< The return value of the actual parsing of the previous packet
RTPPacket* queue; ///< A sorted queue of buffered packets not yet returned
int queue_len; ///< The number of packets in queue
int queue_size; ///< The size of queue, or 0 if reordering is disabled
/*@}*/
 
/* rtcp sender statistics receive */
int64_t last_rtcp_ntp_time;
int64_t last_rtcp_reception_time;
int64_t first_rtcp_ntp_time;
uint32_t last_rtcp_timestamp;
int64_t rtcp_ts_offset;
 
/* rtcp sender statistics */
unsigned int packet_count;
unsigned int octet_count;
unsigned int last_octet_count;
int64_t last_feedback_time;
 
/* dynamic payload stuff */
const RTPDynamicProtocolHandler *handler;
PayloadContext *dynamic_protocol_context;
};
 
void ff_register_dynamic_payload_handler(RTPDynamicProtocolHandler *handler);
RTPDynamicProtocolHandler *ff_rtp_handler_find_by_name(const char *name,
enum AVMediaType codec_type);
RTPDynamicProtocolHandler *ff_rtp_handler_find_by_id(int id,
enum AVMediaType codec_type);
 
/* from rtsp.c, but used by rtp dynamic protocol handlers. */
int ff_rtsp_next_attr_and_value(const char **p, char *attr, int attr_size,
char *value, int value_size);
 
int ff_parse_fmtp(AVStream *stream, PayloadContext *data, const char *p,
int (*parse_fmtp)(AVStream *stream,
PayloadContext *data,
char *attr, char *value));
 
void av_register_rtp_dynamic_payload_handlers(void);
 
/**
* Close the dynamic buffer and make a packet from it.
*/
int ff_rtp_finalize_packet(AVPacket *pkt, AVIOContext **dyn_buf, int stream_idx);
 
#endif /* AVFORMAT_RTPDEC_H */
/contrib/sdk/sources/ffmpeg/libavformat/rtpdec_amr.c
0,0 → 1,209
/*
* RTP AMR Depacketizer, RFC 3267
* Copyright (c) 2010 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/channel_layout.h"
#include "avformat.h"
#include "rtpdec_formats.h"
#include "libavutil/avstring.h"
 
static const uint8_t frame_sizes_nb[16] = {
12, 13, 15, 17, 19, 20, 26, 31, 5, 0, 0, 0, 0, 0, 0, 0
};
static const uint8_t frame_sizes_wb[16] = {
17, 23, 32, 36, 40, 46, 50, 58, 60, 5, 5, 0, 0, 0, 0, 0
};
 
struct PayloadContext {
int octet_align;
int crc;
int interleaving;
int channels;
};
 
static PayloadContext *amr_new_context(void)
{
PayloadContext *data = av_mallocz(sizeof(PayloadContext));
if(!data) return data;
data->channels = 1;
return data;
}
 
static void amr_free_context(PayloadContext *data)
{
av_free(data);
}
 
static int amr_handle_packet(AVFormatContext *ctx, PayloadContext *data,
AVStream *st, AVPacket *pkt, uint32_t *timestamp,
const uint8_t *buf, int len, uint16_t seq,
int flags)
{
const uint8_t *frame_sizes = NULL;
int frames;
int i;
const uint8_t *speech_data;
uint8_t *ptr;
 
if (st->codec->codec_id == AV_CODEC_ID_AMR_NB) {
frame_sizes = frame_sizes_nb;
} else if (st->codec->codec_id == AV_CODEC_ID_AMR_WB) {
frame_sizes = frame_sizes_wb;
} else {
av_log(ctx, AV_LOG_ERROR, "Bad codec ID\n");
return AVERROR_INVALIDDATA;
}
 
if (st->codec->channels != 1) {
av_log(ctx, AV_LOG_ERROR, "Only mono AMR is supported\n");
return AVERROR_INVALIDDATA;
}
st->codec->channel_layout = AV_CH_LAYOUT_MONO;
 
/* The AMR RTP packet consists of one header byte, followed
* by one TOC byte for each AMR frame in the packet, followed
* by the speech data for all the AMR frames.
*
* The header byte contains only a codec mode request, for
* requesting what kind of AMR data the sender wants to
* receive. Not used at the moment.
*/
 
/* Count the number of frames in the packet. The highest bit
* is set in a TOC byte if there are more frames following.
*/
for (frames = 1; frames < len && (buf[frames] & 0x80); frames++) ;
 
if (1 + frames >= len) {
/* We hit the end of the packet while counting frames. */
av_log(ctx, AV_LOG_ERROR, "No speech data found\n");
return AVERROR_INVALIDDATA;
}
 
speech_data = buf + 1 + frames;
 
/* Everything except the codec mode request byte should be output. */
if (av_new_packet(pkt, len - 1)) {
av_log(ctx, AV_LOG_ERROR, "Out of memory\n");
return AVERROR(ENOMEM);
}
pkt->stream_index = st->index;
ptr = pkt->data;
 
for (i = 0; i < frames; i++) {
uint8_t toc = buf[1 + i];
int frame_size = frame_sizes[(toc >> 3) & 0x0f];
 
if (speech_data + frame_size > buf + len) {
/* Too little speech data */
av_log(ctx, AV_LOG_WARNING, "Too little speech data in the RTP packet\n");
/* Set the unwritten part of the packet to zero. */
memset(ptr, 0, pkt->data + pkt->size - ptr);
pkt->size = ptr - pkt->data;
return 0;
}
 
/* Extract the AMR frame mode from the TOC byte */
*ptr++ = toc & 0x7C;
 
/* Copy the speech data */
memcpy(ptr, speech_data, frame_size);
speech_data += frame_size;
ptr += frame_size;
}
 
if (speech_data < buf + len) {
av_log(ctx, AV_LOG_WARNING, "Too much speech data in the RTP packet?\n");
/* Set the unwritten part of the packet to zero. */
memset(ptr, 0, pkt->data + pkt->size - ptr);
pkt->size = ptr - pkt->data;
}
 
return 0;
}
 
static int amr_parse_fmtp(AVStream *stream, PayloadContext *data,
char *attr, char *value)
{
/* Some AMR SDP configurations contain "octet-align", without
* the trailing =1. Therefore, if the value is empty,
* interpret it as "1".
*/
if (!strcmp(value, "")) {
av_log(NULL, AV_LOG_WARNING, "AMR fmtp attribute %s had "
"nonstandard empty value\n", attr);
strcpy(value, "1");
}
if (!strcmp(attr, "octet-align"))
data->octet_align = atoi(value);
else if (!strcmp(attr, "crc"))
data->crc = atoi(value);
else if (!strcmp(attr, "interleaving"))
data->interleaving = atoi(value);
else if (!strcmp(attr, "channels"))
data->channels = atoi(value);
return 0;
}
 
static int amr_parse_sdp_line(AVFormatContext *s, int st_index,
PayloadContext *data, const char *line)
{
const char *p;
int ret;
 
if (st_index < 0)
return 0;
 
/* Parse an fmtp line this one:
* a=fmtp:97 octet-align=1; interleaving=0
* That is, a normal fmtp: line followed by semicolon & space
* separated key/value pairs.
*/
if (av_strstart(line, "fmtp:", &p)) {
ret = ff_parse_fmtp(s->streams[st_index], data, p, amr_parse_fmtp);
if (!data->octet_align || data->crc ||
data->interleaving || data->channels != 1) {
av_log(s, AV_LOG_ERROR, "Unsupported RTP/AMR configuration!\n");
return -1;
}
return ret;
}
return 0;
}
 
RTPDynamicProtocolHandler ff_amr_nb_dynamic_handler = {
.enc_name = "AMR",
.codec_type = AVMEDIA_TYPE_AUDIO,
.codec_id = AV_CODEC_ID_AMR_NB,
.parse_sdp_a_line = amr_parse_sdp_line,
.alloc = amr_new_context,
.free = amr_free_context,
.parse_packet = amr_handle_packet,
};
 
RTPDynamicProtocolHandler ff_amr_wb_dynamic_handler = {
.enc_name = "AMR-WB",
.codec_type = AVMEDIA_TYPE_AUDIO,
.codec_id = AV_CODEC_ID_AMR_WB,
.parse_sdp_a_line = amr_parse_sdp_line,
.alloc = amr_new_context,
.free = amr_free_context,
.parse_packet = amr_handle_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/rtpdec_asf.c
0,0 → 1,307
/*
* Microsoft RTP/ASF support.
* Copyright (c) 2008 Ronald S. Bultje
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* @brief Microsoft RTP/ASF support
* @author Ronald S. Bultje <rbultje@ronald.bitfreak.net>
*/
 
#include "libavutil/base64.h"
#include "libavutil/avstring.h"
#include "libavutil/intreadwrite.h"
#include "rtp.h"
#include "rtpdec_formats.h"
#include "rtsp.h"
#include "asf.h"
#include "avio_internal.h"
#include "internal.h"
 
/**
* From MSDN 2.2.1.4, we learn that ASF data packets over RTP should not
* contain any padding. Unfortunately, the header min/max_pktsize are not
* updated (thus making min_pktsize invalid). Here, we "fix" these faulty
* min_pktsize values in the ASF file header.
* @return 0 on success, <0 on failure (currently -1).
*/
static int rtp_asf_fix_header(uint8_t *buf, int len)
{
uint8_t *p = buf, *end = buf + len;
 
if (len < sizeof(ff_asf_guid) * 2 + 22 ||
memcmp(p, ff_asf_header, sizeof(ff_asf_guid))) {
return -1;
}
p += sizeof(ff_asf_guid) + 14;
do {
uint64_t chunksize = AV_RL64(p + sizeof(ff_asf_guid));
if (memcmp(p, ff_asf_file_header, sizeof(ff_asf_guid))) {
if (chunksize > end - p)
return -1;
p += chunksize;
continue;
}
 
/* skip most of the file header, to min_pktsize */
p += 6 * 8 + 3 * 4 + sizeof(ff_asf_guid) * 2;
if (p + 8 <= end && AV_RL32(p) == AV_RL32(p + 4)) {
/* and set that to zero */
AV_WL32(p, 0);
return 0;
}
break;
} while (end - p >= sizeof(ff_asf_guid) + 8);
 
return -1;
}
 
/**
* The following code is basically a buffered AVIOContext,
* with the added benefit of returning -EAGAIN (instead of 0)
* on packet boundaries, such that the ASF demuxer can return
* safely and resume business at the next packet.
*/
static int packetizer_read(void *opaque, uint8_t *buf, int buf_size)
{
return AVERROR(EAGAIN);
}
 
static void init_packetizer(AVIOContext *pb, uint8_t *buf, int len)
{
ffio_init_context(pb, buf, len, 0, NULL, packetizer_read, NULL, NULL);
 
/* this "fills" the buffer with its current content */
pb->pos = len;
pb->buf_end = buf + len;
}
 
int ff_wms_parse_sdp_a_line(AVFormatContext *s, const char *p)
{
int ret = 0;
if (av_strstart(p, "pgmpu:data:application/vnd.ms.wms-hdr.asfv1;base64,", &p)) {
AVIOContext pb;
RTSPState *rt = s->priv_data;
AVDictionary *opts = NULL;
int len = strlen(p) * 6 / 8;
char *buf = av_mallocz(len);
av_base64_decode(buf, p, len);
 
if (rtp_asf_fix_header(buf, len) < 0)
av_log(s, AV_LOG_ERROR,
"Failed to fix invalid RTSP-MS/ASF min_pktsize\n");
init_packetizer(&pb, buf, len);
if (rt->asf_ctx) {
avformat_close_input(&rt->asf_ctx);
}
if (!(rt->asf_ctx = avformat_alloc_context()))
return AVERROR(ENOMEM);
rt->asf_ctx->pb = &pb;
av_dict_set(&opts, "no_resync_search", "1", 0);
ret = avformat_open_input(&rt->asf_ctx, "", &ff_asf_demuxer, &opts);
av_dict_free(&opts);
if (ret < 0)
return ret;
av_dict_copy(&s->metadata, rt->asf_ctx->metadata, 0);
rt->asf_pb_pos = avio_tell(&pb);
av_free(buf);
rt->asf_ctx->pb = NULL;
}
return ret;
}
 
static int asfrtp_parse_sdp_line(AVFormatContext *s, int stream_index,
PayloadContext *asf, const char *line)
{
if (stream_index < 0)
return 0;
if (av_strstart(line, "stream:", &line)) {
RTSPState *rt = s->priv_data;
 
s->streams[stream_index]->id = strtol(line, NULL, 10);
 
if (rt->asf_ctx) {
int i;
 
for (i = 0; i < rt->asf_ctx->nb_streams; i++) {
if (s->streams[stream_index]->id == rt->asf_ctx->streams[i]->id) {
*s->streams[stream_index]->codec =
*rt->asf_ctx->streams[i]->codec;
rt->asf_ctx->streams[i]->codec->extradata_size = 0;
rt->asf_ctx->streams[i]->codec->extradata = NULL;
avpriv_set_pts_info(s->streams[stream_index], 32, 1, 1000);
}
}
}
}
 
return 0;
}
 
struct PayloadContext {
AVIOContext *pktbuf, pb;
uint8_t *buf;
};
 
/**
* @return 0 when a packet was written into /p pkt, and no more data is left;
* 1 when a packet was written into /p pkt, and more packets might be left;
* <0 when not enough data was provided to return a full packet, or on error.
*/
static int asfrtp_parse_packet(AVFormatContext *s, PayloadContext *asf,
AVStream *st, AVPacket *pkt,
uint32_t *timestamp,
const uint8_t *buf, int len, uint16_t seq,
int flags)
{
AVIOContext *pb = &asf->pb;
int res, mflags, len_off;
RTSPState *rt = s->priv_data;
 
if (!rt->asf_ctx)
return -1;
 
if (len > 0) {
int off, out_len = 0;
 
if (len < 4)
return -1;
 
av_freep(&asf->buf);
 
ffio_init_context(pb, buf, len, 0, NULL, NULL, NULL, NULL);
 
while (avio_tell(pb) + 4 < len) {
int start_off = avio_tell(pb);
 
mflags = avio_r8(pb);
if (mflags & 0x80)
flags |= RTP_FLAG_KEY;
len_off = avio_rb24(pb);
if (mflags & 0x20) /**< relative timestamp */
avio_skip(pb, 4);
if (mflags & 0x10) /**< has duration */
avio_skip(pb, 4);
if (mflags & 0x8) /**< has location ID */
avio_skip(pb, 4);
off = avio_tell(pb);
 
if (!(mflags & 0x40)) {
/**
* If 0x40 is not set, the len_off field specifies an offset
* of this packet's payload data in the complete (reassembled)
* ASF packet. This is used to spread one ASF packet over
* multiple RTP packets.
*/
if (asf->pktbuf && len_off != avio_tell(asf->pktbuf)) {
uint8_t *p;
avio_close_dyn_buf(asf->pktbuf, &p);
asf->pktbuf = NULL;
av_free(p);
}
if (!len_off && !asf->pktbuf &&
(res = avio_open_dyn_buf(&asf->pktbuf)) < 0)
return res;
if (!asf->pktbuf)
return AVERROR(EIO);
 
avio_write(asf->pktbuf, buf + off, len - off);
avio_skip(pb, len - off);
if (!(flags & RTP_FLAG_MARKER))
return -1;
out_len = avio_close_dyn_buf(asf->pktbuf, &asf->buf);
asf->pktbuf = NULL;
} else {
/**
* If 0x40 is set, the len_off field specifies the length of
* the next ASF packet that can be read from this payload
* data alone. This is commonly the same as the payload size,
* but could be less in case of packet splitting (i.e.
* multiple ASF packets in one RTP packet).
*/
 
int cur_len = start_off + len_off - off;
int prev_len = out_len;
out_len += cur_len;
if (FFMIN(cur_len, len - off) < 0)
return -1;
if ((res = av_reallocp(&asf->buf, out_len)) < 0)
return res;
memcpy(asf->buf + prev_len, buf + off,
FFMIN(cur_len, len - off));
avio_skip(pb, cur_len);
}
}
 
init_packetizer(pb, asf->buf, out_len);
pb->pos += rt->asf_pb_pos;
pb->eof_reached = 0;
rt->asf_ctx->pb = pb;
}
 
for (;;) {
int i;
 
res = ff_read_packet(rt->asf_ctx, pkt);
rt->asf_pb_pos = avio_tell(pb);
if (res != 0)
break;
for (i = 0; i < s->nb_streams; i++) {
if (s->streams[i]->id == rt->asf_ctx->streams[pkt->stream_index]->id) {
pkt->stream_index = i;
return 1; // FIXME: return 0 if last packet
}
}
av_free_packet(pkt);
}
 
return res == 1 ? -1 : res;
}
 
static PayloadContext *asfrtp_new_context(void)
{
return av_mallocz(sizeof(PayloadContext));
}
 
static void asfrtp_free_context(PayloadContext *asf)
{
if (asf->pktbuf) {
uint8_t *p = NULL;
avio_close_dyn_buf(asf->pktbuf, &p);
asf->pktbuf = NULL;
av_free(p);
}
av_freep(&asf->buf);
av_free(asf);
}
 
#define RTP_ASF_HANDLER(n, s, t) \
RTPDynamicProtocolHandler ff_ms_rtp_ ## n ## _handler = { \
.enc_name = s, \
.codec_type = t, \
.codec_id = AV_CODEC_ID_NONE, \
.parse_sdp_a_line = asfrtp_parse_sdp_line, \
.alloc = asfrtp_new_context, \
.free = asfrtp_free_context, \
.parse_packet = asfrtp_parse_packet, \
}
 
RTP_ASF_HANDLER(asf_pfv, "x-asf-pf", AVMEDIA_TYPE_VIDEO);
RTP_ASF_HANDLER(asf_pfa, "x-asf-pf", AVMEDIA_TYPE_AUDIO);
/contrib/sdk/sources/ffmpeg/libavformat/rtpdec_formats.h
0,0 → 1,69
/*
* RTP depacketizer declarations
* Copyright (c) 2010 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_RTPDEC_FORMATS_H
#define AVFORMAT_RTPDEC_FORMATS_H
 
#include "rtpdec.h"
 
/**
* Parse a Windows Media Server-specific SDP line
*
* @param s RTSP demux context
*/
int ff_wms_parse_sdp_a_line(AVFormatContext *s, const char *p);
 
int ff_h263_handle_packet(AVFormatContext *ctx, PayloadContext *data,
AVStream *st, AVPacket *pkt, uint32_t *timestamp,
const uint8_t *buf, int len, uint16_t seq, int flags);
 
extern RTPDynamicProtocolHandler ff_amr_nb_dynamic_handler;
extern RTPDynamicProtocolHandler ff_amr_wb_dynamic_handler;
extern RTPDynamicProtocolHandler ff_g726_16_dynamic_handler;
extern RTPDynamicProtocolHandler ff_g726_24_dynamic_handler;
extern RTPDynamicProtocolHandler ff_g726_32_dynamic_handler;
extern RTPDynamicProtocolHandler ff_g726_40_dynamic_handler;
extern RTPDynamicProtocolHandler ff_h263_1998_dynamic_handler;
extern RTPDynamicProtocolHandler ff_h263_2000_dynamic_handler;
extern RTPDynamicProtocolHandler ff_h263_rfc2190_dynamic_handler;
extern RTPDynamicProtocolHandler ff_h264_dynamic_handler;
extern RTPDynamicProtocolHandler ff_ilbc_dynamic_handler;
extern RTPDynamicProtocolHandler ff_jpeg_dynamic_handler;
extern RTPDynamicProtocolHandler ff_mp4a_latm_dynamic_handler;
extern RTPDynamicProtocolHandler ff_mp4v_es_dynamic_handler;
extern RTPDynamicProtocolHandler ff_mpeg_audio_dynamic_handler;
extern RTPDynamicProtocolHandler ff_mpeg_video_dynamic_handler;
extern RTPDynamicProtocolHandler ff_mpeg4_generic_dynamic_handler;
extern RTPDynamicProtocolHandler ff_mpegts_dynamic_handler;
extern RTPDynamicProtocolHandler ff_ms_rtp_asf_pfa_handler;
extern RTPDynamicProtocolHandler ff_ms_rtp_asf_pfv_handler;
extern RTPDynamicProtocolHandler ff_qcelp_dynamic_handler;
extern RTPDynamicProtocolHandler ff_qdm2_dynamic_handler;
extern RTPDynamicProtocolHandler ff_qt_rtp_aud_handler;
extern RTPDynamicProtocolHandler ff_qt_rtp_vid_handler;
extern RTPDynamicProtocolHandler ff_quicktime_rtp_aud_handler;
extern RTPDynamicProtocolHandler ff_quicktime_rtp_vid_handler;
extern RTPDynamicProtocolHandler ff_svq3_dynamic_handler;
extern RTPDynamicProtocolHandler ff_theora_dynamic_handler;
extern RTPDynamicProtocolHandler ff_vorbis_dynamic_handler;
extern RTPDynamicProtocolHandler ff_vp8_dynamic_handler;
 
#endif /* AVFORMAT_RTPDEC_FORMATS_H */
/contrib/sdk/sources/ffmpeg/libavformat/rtpdec_g726.c
0,0 → 1,48
/*
* Copyright (c) 2011 Miroslav Slugeň <Thunder.m@seznam.cz>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/attributes.h"
#include "avformat.h"
#include "rtpdec_formats.h"
 
#define RTP_G726_HANDLER(bitrate) \
static av_cold int g726_ ## bitrate ##_init(AVFormatContext *s, int st_index, \
PayloadContext *data) \
{ \
AVStream *stream = s->streams[st_index]; \
AVCodecContext *codec = stream->codec; \
\
codec->bits_per_coded_sample = bitrate/8; \
codec->bit_rate = codec->bits_per_coded_sample * codec->sample_rate; \
\
return 0; \
} \
\
RTPDynamicProtocolHandler ff_g726_ ## bitrate ## _dynamic_handler = { \
.enc_name = "G726-" #bitrate, \
.codec_type = AVMEDIA_TYPE_AUDIO, \
.codec_id = AV_CODEC_ID_ADPCM_G726, \
.init = g726_ ## bitrate ## _init, \
}
 
RTP_G726_HANDLER(16);
RTP_G726_HANDLER(24);
RTP_G726_HANDLER(32);
RTP_G726_HANDLER(40);
/contrib/sdk/sources/ffmpeg/libavformat/rtpdec_h263.c
0,0 → 1,115
/*
* RTP H.263 Depacketizer, RFC 4629
* Copyright (c) 2010 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "rtpdec_formats.h"
#include "libavutil/attributes.h"
#include "libavutil/intreadwrite.h"
 
static av_cold int h263_init(AVFormatContext *ctx, int st_index,
PayloadContext *data)
{
if (st_index < 0)
return 0;
ctx->streams[st_index]->need_parsing = AVSTREAM_PARSE_FULL;
return 0;
}
 
int ff_h263_handle_packet(AVFormatContext *ctx, PayloadContext *data,
AVStream *st, AVPacket *pkt, uint32_t *timestamp,
const uint8_t *buf, int len, uint16_t seq, int flags)
{
uint8_t *ptr;
uint16_t header;
int startcode, vrc, picture_header;
 
if (len < 2) {
av_log(ctx, AV_LOG_ERROR, "Too short H.263 RTP packet\n");
return AVERROR_INVALIDDATA;
}
 
/* Decode the 16 bit H.263+ payload header, as described in section
* 5.1 of RFC 4629. The fields of this header are:
* - 5 reserved bits, should be ignored.
* - One bit (P, startcode), indicating a picture start, picture segment
* start or video sequence end. If set, two zero bytes should be
* prepended to the payload.
* - One bit (V, vrc), indicating the presence of an 8 bit Video
* Redundancy Coding field after this 16 bit header.
* - 6 bits (PLEN, picture_header), the length (in bytes) of an extra
* picture header, following the VRC field.
* - 3 bits (PEBIT), the number of bits to ignore of the last byte
* of the extra picture header. (Not used at the moment.)
*/
header = AV_RB16(buf);
startcode = (header & 0x0400) >> 9;
vrc = header & 0x0200;
picture_header = (header & 0x01f8) >> 3;
buf += 2;
len -= 2;
 
if (vrc) {
/* Skip VRC header if present, not used at the moment. */
buf += 1;
len -= 1;
}
if (picture_header) {
/* Skip extra picture header if present, not used at the moment. */
buf += picture_header;
len -= picture_header;
}
 
if (len < 0) {
av_log(ctx, AV_LOG_ERROR, "Too short H.263 RTP packet\n");
return AVERROR_INVALIDDATA;
}
 
if (av_new_packet(pkt, len + startcode)) {
av_log(ctx, AV_LOG_ERROR, "Out of memory\n");
return AVERROR(ENOMEM);
}
pkt->stream_index = st->index;
ptr = pkt->data;
 
if (startcode) {
*ptr++ = 0;
*ptr++ = 0;
}
memcpy(ptr, buf, len);
 
return 0;
}
 
RTPDynamicProtocolHandler ff_h263_1998_dynamic_handler = {
.enc_name = "H263-1998",
.codec_type = AVMEDIA_TYPE_VIDEO,
.codec_id = AV_CODEC_ID_H263,
.init = h263_init,
.parse_packet = ff_h263_handle_packet,
};
 
RTPDynamicProtocolHandler ff_h263_2000_dynamic_handler = {
.enc_name = "H263-2000",
.codec_type = AVMEDIA_TYPE_VIDEO,
.codec_id = AV_CODEC_ID_H263,
.init = h263_init,
.parse_packet = ff_h263_handle_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/rtpdec_h263_rfc2190.c
0,0 → 1,215
/*
* RTP H.263 Depacketizer, RFC 2190
* Copyright (c) 2012 Martin Storsjo
* Based on the GStreamer H.263 Depayloder:
* Copyright 2005 Wim Taymans
* Copyright 2007 Edward Hervey
* Copyright 2007 Nokia Corporation
* Copyright 2007 Collabora Ltd, Philippe Kalaf
* Copyright 2010 Mark Nauwelaerts
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "rtpdec_formats.h"
#include "libavutil/attributes.h"
#include "libavutil/intreadwrite.h"
#include "libavcodec/get_bits.h"
 
struct PayloadContext {
AVIOContext *buf;
uint8_t endbyte;
int endbyte_bits;
uint32_t timestamp;
int newformat;
};
 
static PayloadContext *h263_new_context(void)
{
return av_mallocz(sizeof(PayloadContext));
}
 
static void h263_free_context(PayloadContext *data)
{
if (!data)
return;
if (data->buf) {
uint8_t *p;
avio_close_dyn_buf(data->buf, &p);
av_free(p);
}
av_free(data);
}
 
static av_cold int h263_init(AVFormatContext *ctx, int st_index, PayloadContext *data)
{
if (st_index < 0)
return 0;
ctx->streams[st_index]->need_parsing = AVSTREAM_PARSE_FULL;
return 0;
}
 
static int h263_handle_packet(AVFormatContext *ctx, PayloadContext *data,
AVStream *st, AVPacket *pkt, uint32_t *timestamp,
const uint8_t *buf, int len, uint16_t seq,
int flags)
{
/* Corresponding to header fields in the RFC */
int f, p, i, sbit, ebit, src, r;
int header_size, ret;
 
if (data->newformat)
return ff_h263_handle_packet(ctx, data, st, pkt, timestamp, buf, len,
seq, flags);
 
if (data->buf && data->timestamp != *timestamp) {
/* Dropping old buffered, unfinished data */
uint8_t *p;
avio_close_dyn_buf(data->buf, &p);
av_free(p);
data->buf = NULL;
}
 
if (len < 4) {
av_log(ctx, AV_LOG_ERROR, "Too short H.263 RTP packet: %d\n", len);
return AVERROR_INVALIDDATA;
}
 
f = buf[0] & 0x80;
p = buf[0] & 0x40;
if (!f) {
/* Mode A */
header_size = 4;
i = buf[1] & 0x10;
r = ((buf[1] & 0x01) << 3) | ((buf[2] & 0xe0) >> 5);
} else if (!p) {
/* Mode B */
header_size = 8;
if (len < header_size) {
av_log(ctx, AV_LOG_ERROR,
"Too short H.263 RTP packet: %d bytes, %d header bytes\n",
len, header_size);
return AVERROR_INVALIDDATA;
}
r = buf[3] & 0x03;
i = buf[4] & 0x80;
} else {
/* Mode C */
header_size = 12;
if (len < header_size) {
av_log(ctx, AV_LOG_ERROR,
"Too short H.263 RTP packet: %d bytes, %d header bytes\n",
len, header_size);
return AVERROR_INVALIDDATA;
}
r = buf[3] & 0x03;
i = buf[4] & 0x80;
}
sbit = (buf[0] >> 3) & 0x7;
ebit = buf[0] & 0x7;
src = (buf[1] & 0xe0) >> 5;
if (!(buf[0] & 0xf8)) { /* Reserved bits in RFC 2429/4629 are zero */
if ((src == 0 || src >= 6) && r) {
/* Invalid src for this format, and bits that should be zero
* according to RFC 2190 aren't zero. */
av_log(ctx, AV_LOG_WARNING,
"Interpreting H263 RTP data as RFC 2429/4629 even though "
"signalled with a static payload type.\n");
data->newformat = 1;
return ff_h263_handle_packet(ctx, data, st, pkt, timestamp, buf,
len, seq, flags);
}
}
 
buf += header_size;
len -= header_size;
 
if (!data->buf) {
/* Check the picture start code, only start buffering a new frame
* if this is correct */
if (len > 4 && AV_RB32(buf) >> 10 == 0x20) {
ret = avio_open_dyn_buf(&data->buf);
if (ret < 0)
return ret;
data->timestamp = *timestamp;
} else {
/* Frame not started yet, skipping */
return AVERROR(EAGAIN);
}
}
 
if (data->endbyte_bits || sbit) {
if (data->endbyte_bits == sbit) {
data->endbyte |= buf[0] & (0xff >> sbit);
data->endbyte_bits = 0;
buf++;
len--;
avio_w8(data->buf, data->endbyte);
} else {
/* Start/end skip bits not matching - missed packets? */
GetBitContext gb;
init_get_bits(&gb, buf, len*8 - ebit);
skip_bits(&gb, sbit);
if (data->endbyte_bits) {
data->endbyte |= get_bits(&gb, 8 - data->endbyte_bits);
avio_w8(data->buf, data->endbyte);
}
while (get_bits_left(&gb) >= 8)
avio_w8(data->buf, get_bits(&gb, 8));
data->endbyte_bits = get_bits_left(&gb);
if (data->endbyte_bits)
data->endbyte = get_bits(&gb, data->endbyte_bits) <<
(8 - data->endbyte_bits);
ebit = 0;
len = 0;
}
}
if (ebit) {
if (len > 0)
avio_write(data->buf, buf, len - 1);
data->endbyte_bits = 8 - ebit;
data->endbyte = buf[len - 1] & (0xff << ebit);
} else {
avio_write(data->buf, buf, len);
}
 
if (!(flags & RTP_FLAG_MARKER))
return AVERROR(EAGAIN);
 
if (data->endbyte_bits)
avio_w8(data->buf, data->endbyte);
data->endbyte_bits = 0;
 
ret = ff_rtp_finalize_packet(pkt, &data->buf, st->index);
if (ret < 0)
return ret;
if (!i)
pkt->flags |= AV_PKT_FLAG_KEY;
 
return 0;
}
 
RTPDynamicProtocolHandler ff_h263_rfc2190_dynamic_handler = {
.codec_type = AVMEDIA_TYPE_VIDEO,
.codec_id = AV_CODEC_ID_H263,
.init = h263_init,
.parse_packet = h263_handle_packet,
.alloc = h263_new_context,
.free = h263_free_context,
.static_payload_id = 34,
};
/contrib/sdk/sources/ffmpeg/libavformat/rtpdec_h264.c
0,0 → 1,401
/*
* RTP H264 Protocol (RFC3984)
* Copyright (c) 2006 Ryan Martell
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* @brief H.264 / RTP Code (RFC3984)
* @author Ryan Martell <rdm4@martellventures.com>
*
* @note Notes:
* Notes:
* This currently supports packetization mode:
* Single Nal Unit Mode (0), or
* Non-Interleaved Mode (1). It currently does not support
* Interleaved Mode (2). (This requires implementing STAP-B, MTAP16, MTAP24,
* FU-B packet types)
*/
 
#include "libavutil/attributes.h"
#include "libavutil/base64.h"
#include "libavutil/avstring.h"
#include "libavcodec/get_bits.h"
#include "avformat.h"
 
#include "network.h"
#include <assert.h>
 
#include "rtpdec.h"
#include "rtpdec_formats.h"
 
struct PayloadContext {
// sdp setup parameters
uint8_t profile_idc;
uint8_t profile_iop;
uint8_t level_idc;
int packetization_mode;
#ifdef DEBUG
int packet_types_received[32];
#endif
};
 
#ifdef DEBUG
#define COUNT_NAL_TYPE(data, nal) data->packet_types_received[(nal) & 0x1f]++
#else
#define COUNT_NAL_TYPE(data, nal) do { } while (0)
#endif
 
static const uint8_t start_sequence[] = { 0, 0, 0, 1 };
 
static int sdp_parse_fmtp_config_h264(AVStream *stream,
PayloadContext *h264_data,
char *attr, char *value)
{
AVCodecContext *codec = stream->codec;
assert(codec->codec_id == AV_CODEC_ID_H264);
assert(h264_data != NULL);
 
if (!strcmp(attr, "packetization-mode")) {
av_log(codec, AV_LOG_DEBUG, "RTP Packetization Mode: %d\n", atoi(value));
h264_data->packetization_mode = atoi(value);
/*
* Packetization Mode:
* 0 or not present: Single NAL mode (Only nals from 1-23 are allowed)
* 1: Non-interleaved Mode: 1-23, 24 (STAP-A), 28 (FU-A) are allowed.
* 2: Interleaved Mode: 25 (STAP-B), 26 (MTAP16), 27 (MTAP24), 28 (FU-A),
* and 29 (FU-B) are allowed.
*/
if (h264_data->packetization_mode > 1)
av_log(codec, AV_LOG_ERROR,
"Interleaved RTP mode is not supported yet.\n");
} else if (!strcmp(attr, "profile-level-id")) {
if (strlen(value) == 6) {
char buffer[3];
// 6 characters=3 bytes, in hex.
uint8_t profile_idc;
uint8_t profile_iop;
uint8_t level_idc;
 
buffer[0] = value[0];
buffer[1] = value[1];
buffer[2] = '\0';
profile_idc = strtol(buffer, NULL, 16);
buffer[0] = value[2];
buffer[1] = value[3];
profile_iop = strtol(buffer, NULL, 16);
buffer[0] = value[4];
buffer[1] = value[5];
level_idc = strtol(buffer, NULL, 16);
 
av_log(codec, AV_LOG_DEBUG,
"RTP Profile IDC: %x Profile IOP: %x Level: %x\n",
profile_idc, profile_iop, level_idc);
h264_data->profile_idc = profile_idc;
h264_data->profile_iop = profile_iop;
h264_data->level_idc = level_idc;
}
} else if (!strcmp(attr, "sprop-parameter-sets")) {
codec->extradata_size = 0;
av_freep(&codec->extradata);
 
while (*value) {
char base64packet[1024];
uint8_t decoded_packet[1024];
int packet_size;
char *dst = base64packet;
 
while (*value && *value != ','
&& (dst - base64packet) < sizeof(base64packet) - 1) {
*dst++ = *value++;
}
*dst++ = '\0';
 
if (*value == ',')
value++;
 
packet_size = av_base64_decode(decoded_packet, base64packet,
sizeof(decoded_packet));
if (packet_size > 0) {
uint8_t *dest = av_malloc(packet_size + sizeof(start_sequence) +
codec->extradata_size +
FF_INPUT_BUFFER_PADDING_SIZE);
if (!dest) {
av_log(codec, AV_LOG_ERROR,
"Unable to allocate memory for extradata!\n");
return AVERROR(ENOMEM);
}
if (codec->extradata_size) {
memcpy(dest, codec->extradata, codec->extradata_size);
av_free(codec->extradata);
}
 
memcpy(dest + codec->extradata_size, start_sequence,
sizeof(start_sequence));
memcpy(dest + codec->extradata_size + sizeof(start_sequence),
decoded_packet, packet_size);
memset(dest + codec->extradata_size + sizeof(start_sequence) +
packet_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
 
codec->extradata = dest;
codec->extradata_size += sizeof(start_sequence) + packet_size;
}
}
av_log(codec, AV_LOG_DEBUG, "Extradata set to %p (size: %d)!\n",
codec->extradata, codec->extradata_size);
}
return 0;
}
 
// return 0 on packet, no more left, 1 on packet, 1 on partial packet
static int h264_handle_packet(AVFormatContext *ctx, PayloadContext *data,
AVStream *st, AVPacket *pkt, uint32_t *timestamp,
const uint8_t *buf, int len, uint16_t seq,
int flags)
{
uint8_t nal;
uint8_t type;
int result = 0;
 
if (!len) {
av_log(ctx, AV_LOG_ERROR, "Empty H264 RTP packet\n");
return AVERROR_INVALIDDATA;
}
nal = buf[0];
type = nal & 0x1f;
 
assert(data);
assert(buf);
 
/* Simplify the case (these are all the nal types used internally by
* the h264 codec). */
if (type >= 1 && type <= 23)
type = 1;
switch (type) {
case 0: // undefined, but pass them through
case 1:
av_new_packet(pkt, len + sizeof(start_sequence));
memcpy(pkt->data, start_sequence, sizeof(start_sequence));
memcpy(pkt->data + sizeof(start_sequence), buf, len);
COUNT_NAL_TYPE(data, nal);
break;
 
case 24: // STAP-A (one packet, multiple nals)
// consume the STAP-A NAL
buf++;
len--;
// first we are going to figure out the total size
{
int pass = 0;
int total_length = 0;
uint8_t *dst = NULL;
 
for (pass = 0; pass < 2; pass++) {
const uint8_t *src = buf;
int src_len = len;
 
while (src_len > 2) {
uint16_t nal_size = AV_RB16(src);
 
// consume the length of the aggregate
src += 2;
src_len -= 2;
 
if (nal_size <= src_len) {
if (pass == 0) {
// counting
total_length += sizeof(start_sequence) + nal_size;
} else {
// copying
assert(dst);
memcpy(dst, start_sequence, sizeof(start_sequence));
dst += sizeof(start_sequence);
memcpy(dst, src, nal_size);
COUNT_NAL_TYPE(data, *src);
dst += nal_size;
}
} else {
av_log(ctx, AV_LOG_ERROR,
"nal size exceeds length: %d %d\n", nal_size, src_len);
}
 
// eat what we handled
src += nal_size;
src_len -= nal_size;
 
if (src_len < 0)
av_log(ctx, AV_LOG_ERROR,
"Consumed more bytes than we got! (%d)\n", src_len);
}
 
if (pass == 0) {
/* now we know the total size of the packet (with the
* start sequences added) */
av_new_packet(pkt, total_length);
dst = pkt->data;
} else {
assert(dst - pkt->data == total_length);
}
}
}
break;
 
case 25: // STAP-B
case 26: // MTAP-16
case 27: // MTAP-24
case 29: // FU-B
av_log(ctx, AV_LOG_ERROR,
"Unhandled type (%d) (See RFC for implementation details\n",
type);
result = AVERROR(ENOSYS);
break;
 
case 28: // FU-A (fragmented nal)
buf++;
len--; // skip the fu_indicator
if (len > 1) {
// these are the same as above, we just redo them here for clarity
uint8_t fu_indicator = nal;
uint8_t fu_header = *buf;
uint8_t start_bit = fu_header >> 7;
uint8_t av_unused end_bit = (fu_header & 0x40) >> 6;
uint8_t nal_type = fu_header & 0x1f;
uint8_t reconstructed_nal;
 
// Reconstruct this packet's true nal; only the data follows.
/* The original nal forbidden bit and NRI are stored in this
* packet's nal. */
reconstructed_nal = fu_indicator & 0xe0;
reconstructed_nal |= nal_type;
 
// skip the fu_header
buf++;
len--;
 
if (start_bit)
COUNT_NAL_TYPE(data, nal_type);
if (start_bit) {
/* copy in the start sequence, and the reconstructed nal */
av_new_packet(pkt, sizeof(start_sequence) + sizeof(nal) + len);
memcpy(pkt->data, start_sequence, sizeof(start_sequence));
pkt->data[sizeof(start_sequence)] = reconstructed_nal;
memcpy(pkt->data + sizeof(start_sequence) + sizeof(nal), buf, len);
} else {
av_new_packet(pkt, len);
memcpy(pkt->data, buf, len);
}
} else {
av_log(ctx, AV_LOG_ERROR, "Too short data for FU-A H264 RTP packet\n");
result = AVERROR_INVALIDDATA;
}
break;
 
case 30: // undefined
case 31: // undefined
default:
av_log(ctx, AV_LOG_ERROR, "Undefined type (%d)\n", type);
result = AVERROR_INVALIDDATA;
break;
}
 
pkt->stream_index = st->index;
 
return result;
}
 
static PayloadContext *h264_new_context(void)
{
return av_mallocz(sizeof(PayloadContext) + FF_INPUT_BUFFER_PADDING_SIZE);
}
 
static void h264_free_context(PayloadContext *data)
{
#ifdef DEBUG
int ii;
 
for (ii = 0; ii < 32; ii++) {
if (data->packet_types_received[ii])
av_log(NULL, AV_LOG_DEBUG, "Received %d packets of type %d\n",
data->packet_types_received[ii], ii);
}
#endif
 
av_free(data);
}
 
static av_cold int h264_init(AVFormatContext *s, int st_index,
PayloadContext *data)
{
if (st_index < 0)
return 0;
s->streams[st_index]->need_parsing = AVSTREAM_PARSE_FULL;
return 0;
}
 
static int parse_h264_sdp_line(AVFormatContext *s, int st_index,
PayloadContext *h264_data, const char *line)
{
AVStream *stream;
AVCodecContext *codec;
const char *p = line;
 
if (st_index < 0)
return 0;
 
stream = s->streams[st_index];
codec = stream->codec;
 
if (av_strstart(p, "framesize:", &p)) {
char buf1[50];
char *dst = buf1;
 
// remove the protocol identifier
while (*p && *p == ' ')
p++; // strip spaces.
while (*p && *p != ' ')
p++; // eat protocol identifier
while (*p && *p == ' ')
p++; // strip trailing spaces.
while (*p && *p != '-' && (dst - buf1) < sizeof(buf1) - 1)
*dst++ = *p++;
*dst = '\0';
 
// a='framesize:96 320-240'
// set our parameters
codec->width = atoi(buf1);
codec->height = atoi(p + 1); // skip the -
} else if (av_strstart(p, "fmtp:", &p)) {
return ff_parse_fmtp(stream, h264_data, p, sdp_parse_fmtp_config_h264);
} else if (av_strstart(p, "cliprect:", &p)) {
// could use this if we wanted.
}
 
return 0;
}
 
RTPDynamicProtocolHandler ff_h264_dynamic_handler = {
.enc_name = "H264",
.codec_type = AVMEDIA_TYPE_VIDEO,
.codec_id = AV_CODEC_ID_H264,
.init = h264_init,
.parse_sdp_a_line = parse_h264_sdp_line,
.alloc = h264_new_context,
.free = h264_free_context,
.parse_packet = h264_handle_packet
};
/contrib/sdk/sources/ffmpeg/libavformat/rtpdec_ilbc.c
0,0 → 1,73
/*
* RTP iLBC Depacketizer, RFC 3952
* Copyright (c) 2012 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "rtpdec_formats.h"
#include "libavutil/avstring.h"
 
static int ilbc_parse_fmtp(AVStream *stream, PayloadContext *data,
char *attr, char *value)
{
if (!strcmp(attr, "mode")) {
int mode = atoi(value);
switch (mode) {
case 20:
stream->codec->block_align = 38;
break;
case 30:
stream->codec->block_align = 50;
break;
default:
av_log(NULL, AV_LOG_ERROR, "Unsupported iLBC mode %d\n", mode);
return AVERROR(EINVAL);
}
}
return 0;
}
 
static int ilbc_parse_sdp_line(AVFormatContext *s, int st_index,
PayloadContext *data, const char *line)
{
const char *p;
AVStream *st;
 
if (st_index < 0)
return 0;
st = s->streams[st_index];
 
if (av_strstart(line, "fmtp:", &p)) {
int ret = ff_parse_fmtp(st, data, p, ilbc_parse_fmtp);
if (ret < 0)
return ret;
if (!st->codec->block_align) {
av_log(s, AV_LOG_ERROR, "No iLBC mode set\n");
return AVERROR(EINVAL);
}
}
return 0;
}
 
RTPDynamicProtocolHandler ff_ilbc_dynamic_handler = {
.enc_name = "iLBC",
.codec_type = AVMEDIA_TYPE_AUDIO,
.codec_id = AV_CODEC_ID_ILBC,
.parse_sdp_a_line = ilbc_parse_sdp_line,
};
/contrib/sdk/sources/ffmpeg/libavformat/rtpdec_jpeg.c
0,0 → 1,392
/*
* RTP JPEG-compressed Video Depacketizer, RFC 2435
* Copyright (c) 2012 Samuel Pitoiset
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "rtpdec.h"
#include "rtpdec_formats.h"
#include "libavutil/intreadwrite.h"
#include "libavcodec/mjpeg.h"
#include "libavcodec/bytestream.h"
 
/**
* RTP/JPEG specific private data.
*/
struct PayloadContext {
AVIOContext *frame; ///< current frame buffer
uint32_t timestamp; ///< current frame timestamp
int hdr_size; ///< size of the current frame header
uint8_t qtables[128][128];
uint8_t qtables_len[128];
};
 
static const uint8_t default_quantizers[128] = {
/* luma table */
16, 11, 12, 14, 12, 10, 16, 14,
13, 14, 18, 17, 16, 19, 24, 40,
26, 24, 22, 22, 24, 49, 35, 37,
29, 40, 58, 51, 61, 60, 57, 51,
56, 55, 64, 72, 92, 78, 64, 68,
87, 69, 55, 56, 80, 109, 81, 87,
95, 98, 103, 104, 103, 62, 77, 113,
121, 112, 100, 120, 92, 101, 103, 99,
 
/* chroma table */
17, 18, 18, 24, 21, 24, 47, 26,
26, 47, 99, 66, 56, 66, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99
};
 
static PayloadContext *jpeg_new_context(void)
{
return av_mallocz(sizeof(PayloadContext));
}
 
static inline void free_frame_if_needed(PayloadContext *jpeg)
{
if (jpeg->frame) {
uint8_t *p;
avio_close_dyn_buf(jpeg->frame, &p);
av_free(p);
jpeg->frame = NULL;
}
}
 
static void jpeg_free_context(PayloadContext *jpeg)
{
free_frame_if_needed(jpeg);
av_free(jpeg);
}
 
static int jpeg_create_huffman_table(PutByteContext *p, int table_class,
int table_id, const uint8_t *bits_table,
const uint8_t *value_table)
{
int i, n = 0;
 
bytestream2_put_byte(p, table_class << 4 | table_id);
 
for (i = 1; i <= 16; i++) {
n += bits_table[i];
bytestream2_put_byte(p, bits_table[i]);
}
 
for (i = 0; i < n; i++) {
bytestream2_put_byte(p, value_table[i]);
}
return n + 17;
}
 
static void jpeg_put_marker(PutByteContext *pbc, int code)
{
bytestream2_put_byte(pbc, 0xff);
bytestream2_put_byte(pbc, code);
}
 
static int jpeg_create_header(uint8_t *buf, int size, uint32_t type, uint32_t w,
uint32_t h, const uint8_t *qtable, int nb_qtable)
{
PutByteContext pbc;
uint8_t *dht_size_ptr;
int dht_size, i;
 
bytestream2_init_writer(&pbc, buf, size);
 
/* Convert from blocks to pixels. */
w <<= 3;
h <<= 3;
 
/* SOI */
jpeg_put_marker(&pbc, SOI);
 
/* JFIF header */
jpeg_put_marker(&pbc, APP0);
bytestream2_put_be16(&pbc, 16);
bytestream2_put_buffer(&pbc, "JFIF", 5);
bytestream2_put_be16(&pbc, 0x0201);
bytestream2_put_byte(&pbc, 0);
bytestream2_put_be16(&pbc, 1);
bytestream2_put_be16(&pbc, 1);
bytestream2_put_byte(&pbc, 0);
bytestream2_put_byte(&pbc, 0);
 
/* DQT */
jpeg_put_marker(&pbc, DQT);
bytestream2_put_be16(&pbc, 2 + nb_qtable * (1 + 64));
 
for (i = 0; i < nb_qtable; i++) {
bytestream2_put_byte(&pbc, i);
 
/* Each table is an array of 64 values given in zig-zag
* order, identical to the format used in a JFIF DQT
* marker segment. */
bytestream2_put_buffer(&pbc, qtable + 64 * i, 64);
}
 
/* DHT */
jpeg_put_marker(&pbc, DHT);
dht_size_ptr = pbc.buffer;
bytestream2_put_be16(&pbc, 0);
 
dht_size = 2;
dht_size += jpeg_create_huffman_table(&pbc, 0, 0,avpriv_mjpeg_bits_dc_luminance,
avpriv_mjpeg_val_dc);
dht_size += jpeg_create_huffman_table(&pbc, 0, 1, avpriv_mjpeg_bits_dc_chrominance,
avpriv_mjpeg_val_dc);
dht_size += jpeg_create_huffman_table(&pbc, 1, 0, avpriv_mjpeg_bits_ac_luminance,
avpriv_mjpeg_val_ac_luminance);
dht_size += jpeg_create_huffman_table(&pbc, 1, 1, avpriv_mjpeg_bits_ac_chrominance,
avpriv_mjpeg_val_ac_chrominance);
AV_WB16(dht_size_ptr, dht_size);
 
/* SOF0 */
jpeg_put_marker(&pbc, SOF0);
bytestream2_put_be16(&pbc, 17); /* size */
bytestream2_put_byte(&pbc, 8); /* bits per component */
bytestream2_put_be16(&pbc, h);
bytestream2_put_be16(&pbc, w);
bytestream2_put_byte(&pbc, 3); /* number of components */
bytestream2_put_byte(&pbc, 1); /* component number */
bytestream2_put_byte(&pbc, (2 << 4) | (type ? 2 : 1)); /* hsample/vsample */
bytestream2_put_byte(&pbc, 0); /* matrix number */
bytestream2_put_byte(&pbc, 2); /* component number */
bytestream2_put_byte(&pbc, 1 << 4 | 1); /* hsample/vsample */
bytestream2_put_byte(&pbc, nb_qtable == 2 ? 1 : 0); /* matrix number */
bytestream2_put_byte(&pbc, 3); /* component number */
bytestream2_put_byte(&pbc, 1 << 4 | 1); /* hsample/vsample */
bytestream2_put_byte(&pbc, nb_qtable == 2 ? 1 : 0); /* matrix number */
 
/* SOS */
jpeg_put_marker(&pbc, SOS);
bytestream2_put_be16(&pbc, 12);
bytestream2_put_byte(&pbc, 3);
bytestream2_put_byte(&pbc, 1);
bytestream2_put_byte(&pbc, 0);
bytestream2_put_byte(&pbc, 2);
bytestream2_put_byte(&pbc, 17);
bytestream2_put_byte(&pbc, 3);
bytestream2_put_byte(&pbc, 17);
bytestream2_put_byte(&pbc, 0);
bytestream2_put_byte(&pbc, 63);
bytestream2_put_byte(&pbc, 0);
 
/* Return the length in bytes of the JPEG header. */
return bytestream2_tell_p(&pbc);
}
 
static void create_default_qtables(uint8_t *qtables, uint8_t q)
{
int factor = q;
int i;
 
factor = av_clip(q, 1, 99);
 
if (q < 50)
q = 5000 / factor;
else
q = 200 - factor * 2;
 
for (i = 0; i < 128; i++) {
int val = (default_quantizers[i] * q + 50) / 100;
 
/* Limit the quantizers to 1 <= q <= 255. */
val = av_clip(val, 1, 255);
qtables[i] = val;
}
}
 
static int jpeg_parse_packet(AVFormatContext *ctx, PayloadContext *jpeg,
AVStream *st, AVPacket *pkt, uint32_t *timestamp,
const uint8_t *buf, int len, uint16_t seq,
int flags)
{
uint8_t type, q, width, height;
const uint8_t *qtables = NULL;
uint16_t qtable_len;
uint32_t off;
int ret;
 
if (len < 8) {
av_log(ctx, AV_LOG_ERROR, "Too short RTP/JPEG packet.\n");
return AVERROR_INVALIDDATA;
}
 
/* Parse the main JPEG header. */
off = AV_RB24(buf + 1); /* fragment byte offset */
type = AV_RB8(buf + 4); /* id of jpeg decoder params */
q = AV_RB8(buf + 5); /* quantization factor (or table id) */
width = AV_RB8(buf + 6); /* frame width in 8 pixel blocks */
height = AV_RB8(buf + 7); /* frame height in 8 pixel blocks */
buf += 8;
len -= 8;
 
/* Parse the restart marker header. */
if (type > 63) {
av_log(ctx, AV_LOG_ERROR,
"Unimplemented RTP/JPEG restart marker header.\n");
return AVERROR_PATCHWELCOME;
}
if (type > 1) {
av_log(ctx, AV_LOG_ERROR, "Unimplemented RTP/JPEG type %d\n", type);
return AVERROR_PATCHWELCOME;
}
 
/* Parse the quantization table header. */
if (off == 0) {
/* Start of JPEG data packet. */
uint8_t new_qtables[128];
uint8_t hdr[1024];
 
if (q > 127) {
uint8_t precision;
if (len < 4) {
av_log(ctx, AV_LOG_ERROR, "Too short RTP/JPEG packet.\n");
return AVERROR_INVALIDDATA;
}
 
/* The first byte is reserved for future use. */
precision = AV_RB8(buf + 1); /* size of coefficients */
qtable_len = AV_RB16(buf + 2); /* length in bytes */
buf += 4;
len -= 4;
 
if (precision)
av_log(ctx, AV_LOG_WARNING, "Only 8-bit precision is supported.\n");
 
if (qtable_len > 0) {
if (len < qtable_len) {
av_log(ctx, AV_LOG_ERROR, "Too short RTP/JPEG packet.\n");
return AVERROR_INVALIDDATA;
}
qtables = buf;
buf += qtable_len;
len -= qtable_len;
if (q < 255) {
if (jpeg->qtables_len[q - 128] &&
(jpeg->qtables_len[q - 128] != qtable_len ||
memcmp(qtables, &jpeg->qtables[q - 128][0], qtable_len))) {
av_log(ctx, AV_LOG_WARNING,
"Quantization tables for q=%d changed\n", q);
} else if (!jpeg->qtables_len[q - 128] && qtable_len <= 128) {
memcpy(&jpeg->qtables[q - 128][0], qtables,
qtable_len);
jpeg->qtables_len[q - 128] = qtable_len;
}
}
} else {
if (q == 255) {
av_log(ctx, AV_LOG_ERROR,
"Invalid RTP/JPEG packet. Quantization tables not found.\n");
return AVERROR_INVALIDDATA;
}
if (!jpeg->qtables_len[q - 128]) {
av_log(ctx, AV_LOG_ERROR,
"No quantization tables known for q=%d yet.\n", q);
return AVERROR_INVALIDDATA;
}
qtables = &jpeg->qtables[q - 128][0];
qtable_len = jpeg->qtables_len[q - 128];
}
} else { /* q <= 127 */
if (q == 0 || q > 99) {
av_log(ctx, AV_LOG_ERROR, "Reserved q value %d\n", q);
return AVERROR_INVALIDDATA;
}
create_default_qtables(new_qtables, q);
qtables = new_qtables;
qtable_len = sizeof(new_qtables);
}
 
/* Skip the current frame in case of the end packet
* has been lost somewhere. */
free_frame_if_needed(jpeg);
 
if ((ret = avio_open_dyn_buf(&jpeg->frame)) < 0)
return ret;
jpeg->timestamp = *timestamp;
 
/* Generate a frame and scan headers that can be prepended to the
* RTP/JPEG data payload to produce a JPEG compressed image in
* interchange format. */
jpeg->hdr_size = jpeg_create_header(hdr, sizeof(hdr), type, width,
height, qtables,
qtable_len / 64);
 
/* Copy JPEG header to frame buffer. */
avio_write(jpeg->frame, hdr, jpeg->hdr_size);
}
 
if (!jpeg->frame) {
av_log(ctx, AV_LOG_ERROR,
"Received packet without a start chunk; dropping frame.\n");
return AVERROR(EAGAIN);
}
 
if (jpeg->timestamp != *timestamp) {
/* Skip the current frame if timestamp is incorrect.
* A start packet has been lost somewhere. */
free_frame_if_needed(jpeg);
av_log(ctx, AV_LOG_ERROR, "RTP timestamps don't match.\n");
return AVERROR_INVALIDDATA;
}
 
if (off != avio_tell(jpeg->frame) - jpeg->hdr_size) {
av_log(ctx, AV_LOG_ERROR,
"Missing packets; dropping frame.\n");
return AVERROR(EAGAIN);
}
 
/* Copy data to frame buffer. */
avio_write(jpeg->frame, buf, len);
 
if (flags & RTP_FLAG_MARKER) {
/* End of JPEG data packet. */
uint8_t buf[2] = { 0xff, EOI };
 
/* Put EOI marker. */
avio_write(jpeg->frame, buf, sizeof(buf));
 
/* Prepare the JPEG packet. */
if ((ret = ff_rtp_finalize_packet(pkt, &jpeg->frame, st->index)) < 0) {
av_log(ctx, AV_LOG_ERROR,
"Error occurred when getting frame buffer.\n");
return ret;
}
 
return 0;
}
 
return AVERROR(EAGAIN);
}
 
RTPDynamicProtocolHandler ff_jpeg_dynamic_handler = {
.enc_name = "JPEG",
.codec_type = AVMEDIA_TYPE_VIDEO,
.codec_id = AV_CODEC_ID_MJPEG,
.alloc = jpeg_new_context,
.free = jpeg_free_context,
.parse_packet = jpeg_parse_packet,
.static_payload_id = 26,
};
/contrib/sdk/sources/ffmpeg/libavformat/rtpdec_latm.c
0,0 → 1,186
/*
* RTP Depacketization of MP4A-LATM, RFC 3016
* Copyright (c) 2010 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "rtpdec_formats.h"
#include "internal.h"
#include "libavutil/avstring.h"
#include "libavcodec/get_bits.h"
 
struct PayloadContext {
AVIOContext *dyn_buf;
uint8_t *buf;
int pos, len;
uint32_t timestamp;
};
 
static PayloadContext *latm_new_context(void)
{
return av_mallocz(sizeof(PayloadContext));
}
 
static void latm_free_context(PayloadContext *data)
{
if (!data)
return;
if (data->dyn_buf) {
uint8_t *p;
avio_close_dyn_buf(data->dyn_buf, &p);
av_free(p);
}
av_free(data->buf);
av_free(data);
}
 
static int latm_parse_packet(AVFormatContext *ctx, PayloadContext *data,
AVStream *st, AVPacket *pkt, uint32_t *timestamp,
const uint8_t *buf, int len, uint16_t seq,
int flags)
{
int ret, cur_len;
 
if (buf) {
if (!data->dyn_buf || data->timestamp != *timestamp) {
av_freep(&data->buf);
if (data->dyn_buf)
avio_close_dyn_buf(data->dyn_buf, &data->buf);
data->dyn_buf = NULL;
av_freep(&data->buf);
 
data->timestamp = *timestamp;
if ((ret = avio_open_dyn_buf(&data->dyn_buf)) < 0)
return ret;
}
avio_write(data->dyn_buf, buf, len);
 
if (!(flags & RTP_FLAG_MARKER))
return AVERROR(EAGAIN);
av_free(data->buf);
data->len = avio_close_dyn_buf(data->dyn_buf, &data->buf);
data->dyn_buf = NULL;
data->pos = 0;
}
 
if (!data->buf) {
av_log(ctx, AV_LOG_ERROR, "No data available yet\n");
return AVERROR(EIO);
}
 
cur_len = 0;
while (data->pos < data->len) {
uint8_t val = data->buf[data->pos++];
cur_len += val;
if (val != 0xff)
break;
}
if (data->pos + cur_len > data->len) {
av_log(ctx, AV_LOG_ERROR, "Malformed LATM packet\n");
return AVERROR(EIO);
}
 
if ((ret = av_new_packet(pkt, cur_len)) < 0)
return ret;
memcpy(pkt->data, data->buf + data->pos, cur_len);
data->pos += cur_len;
pkt->stream_index = st->index;
return data->pos < data->len;
}
 
static int parse_fmtp_config(AVStream *st, char *value)
{
int len = ff_hex_to_data(NULL, value), i, ret = 0;
GetBitContext gb;
uint8_t *config;
int audio_mux_version, same_time_framing, num_programs, num_layers;
 
/* Pad this buffer, too, to avoid out of bounds reads with get_bits below */
config = av_mallocz(len + FF_INPUT_BUFFER_PADDING_SIZE);
if (!config)
return AVERROR(ENOMEM);
ff_hex_to_data(config, value);
init_get_bits(&gb, config, len*8);
audio_mux_version = get_bits(&gb, 1);
same_time_framing = get_bits(&gb, 1);
skip_bits(&gb, 6); /* num_sub_frames */
num_programs = get_bits(&gb, 4);
num_layers = get_bits(&gb, 3);
if (audio_mux_version != 0 || same_time_framing != 1 || num_programs != 0 ||
num_layers != 0) {
av_log(NULL, AV_LOG_WARNING, "Unsupported LATM config (%d,%d,%d,%d)\n",
audio_mux_version, same_time_framing,
num_programs, num_layers);
ret = AVERROR_PATCHWELCOME;
goto end;
}
av_freep(&st->codec->extradata);
if (ff_alloc_extradata(st->codec, (get_bits_left(&gb) + 7)/8)) {
ret = AVERROR(ENOMEM);
goto end;
}
for (i = 0; i < st->codec->extradata_size; i++)
st->codec->extradata[i] = get_bits(&gb, 8);
 
end:
av_free(config);
return ret;
}
 
static int parse_fmtp(AVStream *stream, PayloadContext *data,
char *attr, char *value)
{
int res;
 
if (!strcmp(attr, "config")) {
res = parse_fmtp_config(stream, value);
if (res < 0)
return res;
} else if (!strcmp(attr, "cpresent")) {
int cpresent = atoi(value);
if (cpresent != 0)
avpriv_request_sample(NULL,
"RTP MP4A-LATM with in-band configuration");
}
 
return 0;
}
 
static int latm_parse_sdp_line(AVFormatContext *s, int st_index,
PayloadContext *data, const char *line)
{
const char *p;
 
if (st_index < 0)
return 0;
 
if (av_strstart(line, "fmtp:", &p))
return ff_parse_fmtp(s->streams[st_index], data, p, parse_fmtp);
 
return 0;
}
 
RTPDynamicProtocolHandler ff_mp4a_latm_dynamic_handler = {
.enc_name = "MP4A-LATM",
.codec_type = AVMEDIA_TYPE_AUDIO,
.codec_id = AV_CODEC_ID_AAC,
.parse_sdp_a_line = latm_parse_sdp_line,
.alloc = latm_new_context,
.free = latm_free_context,
.parse_packet = latm_parse_packet
};
/contrib/sdk/sources/ffmpeg/libavformat/rtpdec_mpeg12.c
0,0 → 1,73
/*
* Common code for the RTP depacketization of MPEG-1/2 formats.
* Copyright (c) 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/attributes.h"
#include "libavutil/intreadwrite.h"
#include "rtpdec_formats.h"
 
static av_cold int mpeg_init(AVFormatContext *ctx, int st_index, PayloadContext *data)
{
if (st_index < 0)
return 0;
ctx->streams[st_index]->need_parsing = AVSTREAM_PARSE_FULL;
return 0;
}
 
static int mpeg_parse_packet(AVFormatContext *ctx, PayloadContext *data,
AVStream *st, AVPacket *pkt, uint32_t *timestamp,
const uint8_t *buf, int len, uint16_t seq,
int flags)
{
unsigned int h;
if (len <= 4)
return AVERROR_INVALIDDATA;
h = AV_RB32(buf);
buf += 4;
len -= 4;
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO && h & (1 << 26)) {
/* MPEG-2 */
if (len <= 4)
return AVERROR_INVALIDDATA;
buf += 4;
len -= 4;
}
if (av_new_packet(pkt, len) < 0)
return AVERROR(ENOMEM);
memcpy(pkt->data, buf, len);
pkt->stream_index = st->index;
return 0;
}
 
RTPDynamicProtocolHandler ff_mpeg_audio_dynamic_handler = {
.codec_type = AVMEDIA_TYPE_AUDIO,
.codec_id = AV_CODEC_ID_MP3,
.init = mpeg_init,
.parse_packet = mpeg_parse_packet,
.static_payload_id = 14,
};
 
RTPDynamicProtocolHandler ff_mpeg_video_dynamic_handler = {
.codec_type = AVMEDIA_TYPE_VIDEO,
.codec_id = AV_CODEC_ID_MPEG2VIDEO,
.init = mpeg_init,
.parse_packet = mpeg_parse_packet,
.static_payload_id = 32,
};
/contrib/sdk/sources/ffmpeg/libavformat/rtpdec_mpeg4.c
0,0 → 1,279
/*
* Common code for the RTP depacketization of MPEG-4 formats.
* Copyright (c) 2010 Fabrice Bellard
* Romain Degez
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* @brief MPEG4 / RTP Code
* @author Fabrice Bellard
* @author Romain Degez
*/
 
#include "rtpdec_formats.h"
#include "internal.h"
#include "libavutil/attributes.h"
#include "libavutil/avstring.h"
#include "libavcodec/get_bits.h"
 
/** Structure listing useful vars to parse RTP packet payload */
struct PayloadContext {
int sizelength;
int indexlength;
int indexdeltalength;
int profile_level_id;
int streamtype;
int objecttype;
char *mode;
 
/** mpeg 4 AU headers */
struct AUHeaders {
int size;
int index;
int cts_flag;
int cts;
int dts_flag;
int dts;
int rap_flag;
int streamstate;
} *au_headers;
int au_headers_allocated;
int nb_au_headers;
int au_headers_length_bytes;
int cur_au_index;
 
uint8_t buf[RTP_MAX_PACKET_LENGTH];
int buf_pos, buf_size;
};
 
typedef struct {
const char *str;
uint16_t type;
uint32_t offset;
} AttrNameMap;
 
/* All known fmtp parameters and the corresponding RTPAttrTypeEnum */
#define ATTR_NAME_TYPE_INT 0
#define ATTR_NAME_TYPE_STR 1
static const AttrNameMap attr_names[] = {
{ "SizeLength", ATTR_NAME_TYPE_INT,
offsetof(PayloadContext, sizelength) },
{ "IndexLength", ATTR_NAME_TYPE_INT,
offsetof(PayloadContext, indexlength) },
{ "IndexDeltaLength", ATTR_NAME_TYPE_INT,
offsetof(PayloadContext, indexdeltalength) },
{ "profile-level-id", ATTR_NAME_TYPE_INT,
offsetof(PayloadContext, profile_level_id) },
{ "StreamType", ATTR_NAME_TYPE_INT,
offsetof(PayloadContext, streamtype) },
{ "mode", ATTR_NAME_TYPE_STR,
offsetof(PayloadContext, mode) },
{ NULL, -1, -1 },
};
 
static PayloadContext *new_context(void)
{
return av_mallocz(sizeof(PayloadContext));
}
 
static void free_context(PayloadContext *data)
{
av_free(data->au_headers);
av_free(data->mode);
av_free(data);
}
 
static int parse_fmtp_config(AVCodecContext *codec, char *value)
{
/* decode the hexa encoded parameter */
int len = ff_hex_to_data(NULL, value);
av_free(codec->extradata);
if (ff_alloc_extradata(codec, len))
return AVERROR(ENOMEM);
ff_hex_to_data(codec->extradata, value);
return 0;
}
 
static int rtp_parse_mp4_au(PayloadContext *data, const uint8_t *buf, int len)
{
int au_headers_length, au_header_size, i;
GetBitContext getbitcontext;
 
if (len < 2)
return AVERROR_INVALIDDATA;
 
/* decode the first 2 bytes where the AUHeader sections are stored
length in bits */
au_headers_length = AV_RB16(buf);
 
if (au_headers_length > RTP_MAX_PACKET_LENGTH)
return -1;
 
data->au_headers_length_bytes = (au_headers_length + 7) / 8;
 
/* skip AU headers length section (2 bytes) */
buf += 2;
len -= 2;
 
if (len < data->au_headers_length_bytes)
return AVERROR_INVALIDDATA;
 
init_get_bits(&getbitcontext, buf, data->au_headers_length_bytes * 8);
 
/* XXX: Wrong if optional additional sections are present (cts, dts etc...) */
au_header_size = data->sizelength + data->indexlength;
if (au_header_size <= 0 || (au_headers_length % au_header_size != 0))
return -1;
 
data->nb_au_headers = au_headers_length / au_header_size;
if (!data->au_headers || data->au_headers_allocated < data->nb_au_headers) {
av_free(data->au_headers);
data->au_headers = av_malloc(sizeof(struct AUHeaders) * data->nb_au_headers);
if (!data->au_headers)
return AVERROR(ENOMEM);
data->au_headers_allocated = data->nb_au_headers;
}
 
for (i = 0; i < data->nb_au_headers; ++i) {
data->au_headers[i].size = get_bits_long(&getbitcontext, data->sizelength);
data->au_headers[i].index = get_bits_long(&getbitcontext, data->indexlength);
}
 
return 0;
}
 
 
/* Follows RFC 3640 */
static int aac_parse_packet(AVFormatContext *ctx, PayloadContext *data,
AVStream *st, AVPacket *pkt, uint32_t *timestamp,
const uint8_t *buf, int len, uint16_t seq,
int flags)
{
int ret;
 
if (!buf) {
if (data->cur_au_index > data->nb_au_headers)
return AVERROR_INVALIDDATA;
if (data->buf_size - data->buf_pos < data->au_headers[data->cur_au_index].size)
return AVERROR_INVALIDDATA;
if ((ret = av_new_packet(pkt, data->au_headers[data->cur_au_index].size)) < 0)
return ret;
memcpy(pkt->data, &data->buf[data->buf_pos], data->au_headers[data->cur_au_index].size);
data->buf_pos += data->au_headers[data->cur_au_index].size;
pkt->stream_index = st->index;
data->cur_au_index++;
return data->cur_au_index < data->nb_au_headers;
}
 
if (rtp_parse_mp4_au(data, buf, len))
return -1;
 
buf += data->au_headers_length_bytes + 2;
len -= data->au_headers_length_bytes + 2;
 
if (len < data->au_headers[0].size)
return AVERROR_INVALIDDATA;
if ((ret = av_new_packet(pkt, data->au_headers[0].size)) < 0)
return ret;
memcpy(pkt->data, buf, data->au_headers[0].size);
len -= data->au_headers[0].size;
buf += data->au_headers[0].size;
pkt->stream_index = st->index;
 
if (len > 0 && data->nb_au_headers > 1) {
data->buf_size = FFMIN(len, sizeof(data->buf));
memcpy(data->buf, buf, data->buf_size);
data->cur_au_index = 1;
data->buf_pos = 0;
return 1;
}
 
return 0;
}
 
static int parse_fmtp(AVStream *stream, PayloadContext *data,
char *attr, char *value)
{
AVCodecContext *codec = stream->codec;
int res, i;
 
if (!strcmp(attr, "config")) {
res = parse_fmtp_config(codec, value);
 
if (res < 0)
return res;
}
 
if (codec->codec_id == AV_CODEC_ID_AAC) {
/* Looking for a known attribute */
for (i = 0; attr_names[i].str; ++i) {
if (!av_strcasecmp(attr, attr_names[i].str)) {
if (attr_names[i].type == ATTR_NAME_TYPE_INT) {
*(int *)((char *)data+
attr_names[i].offset) = atoi(value);
} else if (attr_names[i].type == ATTR_NAME_TYPE_STR)
*(char **)((char *)data+
attr_names[i].offset) = av_strdup(value);
}
}
}
return 0;
}
 
static int parse_sdp_line(AVFormatContext *s, int st_index,
PayloadContext *data, const char *line)
{
const char *p;
 
if (st_index < 0)
return 0;
 
if (av_strstart(line, "fmtp:", &p))
return ff_parse_fmtp(s->streams[st_index], data, p, parse_fmtp);
 
return 0;
}
 
static av_cold int init_video(AVFormatContext *s, int st_index,
PayloadContext *data)
{
if (st_index < 0)
return 0;
s->streams[st_index]->need_parsing = AVSTREAM_PARSE_FULL;
return 0;
}
 
RTPDynamicProtocolHandler ff_mp4v_es_dynamic_handler = {
.enc_name = "MP4V-ES",
.codec_type = AVMEDIA_TYPE_VIDEO,
.codec_id = AV_CODEC_ID_MPEG4,
.init = init_video,
.parse_sdp_a_line = parse_sdp_line,
};
 
RTPDynamicProtocolHandler ff_mpeg4_generic_dynamic_handler = {
.enc_name = "mpeg4-generic",
.codec_type = AVMEDIA_TYPE_AUDIO,
.codec_id = AV_CODEC_ID_AAC,
.parse_sdp_a_line = parse_sdp_line,
.alloc = new_context,
.free = free_context,
.parse_packet = aac_parse_packet
};
/contrib/sdk/sources/ffmpeg/libavformat/rtpdec_mpegts.c
0,0 → 1,108
/*
* RTP MPEG2TS depacketizer
* Copyright (c) 2003 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/attributes.h"
#include "mpegts.h"
#include "rtpdec_formats.h"
 
struct PayloadContext {
struct MpegTSContext *ts;
int read_buf_index;
int read_buf_size;
uint8_t buf[RTP_MAX_PACKET_LENGTH];
};
 
static PayloadContext *mpegts_new_context(void)
{
return av_mallocz(sizeof(PayloadContext));
}
 
static void mpegts_free_context(PayloadContext *data)
{
if (!data)
return;
if (data->ts)
ff_mpegts_parse_close(data->ts);
av_free(data);
}
 
static av_cold int mpegts_init(AVFormatContext *ctx, int st_index,
PayloadContext *data)
{
data->ts = ff_mpegts_parse_open(ctx);
if (!data->ts)
return AVERROR(ENOMEM);
return 0;
}
 
static int mpegts_handle_packet(AVFormatContext *ctx, PayloadContext *data,
AVStream *st, AVPacket *pkt, uint32_t *timestamp,
const uint8_t *buf, int len, uint16_t seq,
int flags)
{
int ret;
 
// We don't want to use the RTP timestamps at all. If the mpegts demuxer
// doesn't set any pts/dts, the generic rtpdec code shouldn't try to
// fill it in either, since the mpegts and RTP timestamps are in totally
// different ranges.
*timestamp = RTP_NOTS_VALUE;
 
if (!data->ts)
return AVERROR(EINVAL);
 
if (!buf) {
if (data->read_buf_index >= data->read_buf_size)
return AVERROR(EAGAIN);
ret = ff_mpegts_parse_packet(data->ts, pkt, data->buf + data->read_buf_index,
data->read_buf_size - data->read_buf_index);
if (ret < 0)
return AVERROR(EAGAIN);
data->read_buf_index += ret;
if (data->read_buf_index < data->read_buf_size)
return 1;
else
return 0;
}
 
ret = ff_mpegts_parse_packet(data->ts, pkt, buf, len);
/* The only error that can be returned from ff_mpegts_parse_packet
* is "no more data to return from the provided buffer", so return
* AVERROR(EAGAIN) for all errors */
if (ret < 0)
return AVERROR(EAGAIN);
if (ret < len) {
data->read_buf_size = FFMIN(len - ret, sizeof(data->buf));
memcpy(data->buf, buf + ret, data->read_buf_size);
data->read_buf_index = 0;
return 1;
}
return 0;
}
 
RTPDynamicProtocolHandler ff_mpegts_dynamic_handler = {
.codec_type = AVMEDIA_TYPE_DATA,
.parse_packet = mpegts_handle_packet,
.alloc = mpegts_new_context,
.init = mpegts_init,
.free = mpegts_free_context,
.static_payload_id = 33,
};
/contrib/sdk/sources/ffmpeg/libavformat/rtpdec_qcelp.c
0,0 → 1,230
/*
* RTP Depacketization of QCELP/PureVoice, RFC 2658
* Copyright (c) 2010 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "rtpdec_formats.h"
 
static const uint8_t frame_sizes[] = {
1, 4, 8, 17, 35
};
 
typedef struct {
int pos;
int size;
/* The largest frame is 35 bytes, only 10 frames are allowed per
* packet, and we return the first one immediately, so allocate
* space for 9 frames */
uint8_t data[35*9];
} InterleavePacket;
 
struct PayloadContext {
int interleave_size;
int interleave_index;
InterleavePacket group[6];
int group_finished;
 
/* The maximum packet size, 10 frames of 35 bytes each, and one
* packet header byte. */
uint8_t next_data[1 + 35*10];
int next_size;
uint32_t next_timestamp;
};
 
static PayloadContext *qcelp_new_context(void)
{
return av_mallocz(sizeof(PayloadContext));
}
 
static void qcelp_free_context(PayloadContext *data)
{
av_free(data);
}
 
static int return_stored_frame(AVFormatContext *ctx, PayloadContext *data,
AVStream *st, AVPacket *pkt, uint32_t *timestamp,
const uint8_t *buf, int len);
 
static int store_packet(AVFormatContext *ctx, PayloadContext *data,
AVStream *st, AVPacket *pkt, uint32_t *timestamp,
const uint8_t *buf, int len)
{
int interleave_size, interleave_index;
int frame_size, ret;
InterleavePacket* ip;
 
if (len < 2)
return AVERROR_INVALIDDATA;
 
interleave_size = buf[0] >> 3 & 7;
interleave_index = buf[0] & 7;
 
if (interleave_size > 5) {
av_log(ctx, AV_LOG_ERROR, "Invalid interleave size %d\n",
interleave_size);
return AVERROR_INVALIDDATA;
}
if (interleave_index > interleave_size) {
av_log(ctx, AV_LOG_ERROR, "Invalid interleave index %d/%d\n",
interleave_index, interleave_size);
return AVERROR_INVALIDDATA;
}
if (interleave_size != data->interleave_size) {
int i;
/* First packet, or changed interleave size */
data->interleave_size = interleave_size;
data->interleave_index = 0;
for (i = 0; i < 6; i++)
data->group[i].size = 0;
}
 
if (interleave_index < data->interleave_index) {
/* Wrapped around - missed the last packet of the previous group. */
if (data->group_finished) {
/* No more data in the packets in this interleaving group, just
* start processing the next one */
data->interleave_index = 0;
} else {
/* Stash away the current packet, emit everything we have of the
* previous group. */
for (; data->interleave_index <= interleave_size;
data->interleave_index++)
data->group[data->interleave_index].size = 0;
 
if (len > sizeof(data->next_data))
return AVERROR_INVALIDDATA;
memcpy(data->next_data, buf, len);
data->next_size = len;
data->next_timestamp = *timestamp;
*timestamp = RTP_NOTS_VALUE;
 
data->interleave_index = 0;
return return_stored_frame(ctx, data, st, pkt, timestamp, buf, len);
}
}
if (interleave_index > data->interleave_index) {
/* We missed a packet */
for (; data->interleave_index < interleave_index;
data->interleave_index++)
data->group[data->interleave_index].size = 0;
}
data->interleave_index = interleave_index;
 
if (buf[1] >= FF_ARRAY_ELEMS(frame_sizes))
return AVERROR_INVALIDDATA;
frame_size = frame_sizes[buf[1]];
if (1 + frame_size > len)
return AVERROR_INVALIDDATA;
 
if (len - 1 - frame_size > sizeof(data->group[0].data))
return AVERROR_INVALIDDATA;
 
if ((ret = av_new_packet(pkt, frame_size)) < 0)
return ret;
memcpy(pkt->data, &buf[1], frame_size);
pkt->stream_index = st->index;
 
ip = &data->group[data->interleave_index];
ip->size = len - 1 - frame_size;
ip->pos = 0;
memcpy(ip->data, &buf[1 + frame_size], ip->size);
/* Each packet must contain the same number of frames according to the
* RFC. If there's no data left in this packet, there shouldn't be any
* in any of the other frames in the interleaving group either. */
data->group_finished = ip->size == 0;
 
if (interleave_index == interleave_size) {
data->interleave_index = 0;
return !data->group_finished;
} else {
data->interleave_index++;
return 0;
}
}
 
static int return_stored_frame(AVFormatContext *ctx, PayloadContext *data,
AVStream *st, AVPacket *pkt, uint32_t *timestamp,
const uint8_t *buf, int len)
{
InterleavePacket* ip = &data->group[data->interleave_index];
int frame_size, ret;
 
if (data->group_finished && data->interleave_index == 0) {
*timestamp = data->next_timestamp;
ret = store_packet(ctx, data, st, pkt, timestamp, data->next_data,
data->next_size);
data->next_size = 0;
return ret;
}
 
if (ip->size == 0) {
/* No stored data for this interleave block, output an empty packet */
if ((ret = av_new_packet(pkt, 1)) < 0)
return ret;
pkt->data[0] = 0; // Blank - could also be 14, Erasure
} else {
if (ip->pos >= ip->size)
return AVERROR_INVALIDDATA;
if (ip->data[ip->pos] >= FF_ARRAY_ELEMS(frame_sizes))
return AVERROR_INVALIDDATA;
frame_size = frame_sizes[ip->data[ip->pos]];
if (ip->pos + frame_size > ip->size)
return AVERROR_INVALIDDATA;
 
if ((ret = av_new_packet(pkt, frame_size)) < 0)
return ret;
memcpy(pkt->data, &ip->data[ip->pos], frame_size);
 
ip->pos += frame_size;
data->group_finished = ip->pos >= ip->size;
}
pkt->stream_index = st->index;
 
if (data->interleave_index == data->interleave_size) {
data->interleave_index = 0;
if (!data->group_finished)
return 1;
else
return data->next_size > 0;
} else {
data->interleave_index++;
return 1;
}
}
 
static int qcelp_parse_packet(AVFormatContext *ctx, PayloadContext *data,
AVStream *st, AVPacket *pkt, uint32_t *timestamp,
const uint8_t *buf, int len, uint16_t seq,
int flags)
{
if (buf)
return store_packet(ctx, data, st, pkt, timestamp, buf, len);
else
return return_stored_frame(ctx, data, st, pkt, timestamp, buf, len);
}
 
RTPDynamicProtocolHandler ff_qcelp_dynamic_handler = {
.enc_name = "x-Purevoice",
.codec_type = AVMEDIA_TYPE_AUDIO,
.codec_id = AV_CODEC_ID_QCELP,
.static_payload_id = 12,
.alloc = qcelp_new_context,
.free = qcelp_free_context,
.parse_packet = qcelp_parse_packet
};
/contrib/sdk/sources/ffmpeg/libavformat/rtpdec_qdm2.c
0,0 → 1,318
/*
* QDesign Music 2 (QDM2) payload for RTP
* Copyright (c) 2010 Ronald S. Bultje
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* @brief RTP support for the QDM2 payload (todo: wiki)
* @author Ronald S. Bultje <rbultje@ronald.bitfreak.net>
*/
 
#include <string.h>
#include "libavutil/avassert.h"
#include "libavutil/intreadwrite.h"
#include "libavcodec/avcodec.h"
#include "internal.h"
#include "rtp.h"
#include "rtpdec.h"
#include "rtpdec_formats.h"
 
struct PayloadContext {
/** values read from the config header, used as packet headers */
//@{
int block_type; ///< superblock type, value 2 .. 8
int block_size; ///< from extradata, used as pkt length
int subpkts_per_block; ///< max. nr. of subpackets to add per output buffer
//@}
 
/** Temporary storage for superblock restoring, per packet ID (0x80 total) */
//@{
uint16_t len[0x80]; ///< how much the temporary buffer is filled
uint8_t buf[0x80][0x800]; ///< the temporary storage buffer
 
unsigned int cache; ///< number of data packets that we have cached right now
unsigned int n_pkts; ///< number of RTP packets received since last packet output / config
uint32_t timestamp; ///< timestamp of next-to-be-returned packet
//@}
};
 
/**
* Parse configuration (basically the codec-specific extradata) from
* an RTP config subpacket (starts with 0xff).
*
* Layout of the config subpacket (in bytes):
* 1: 0xFF <- config ID
* then an array {
* 1: size <- of the current item
* 1: item type <- 0 .. 4
* size-2: data <- data depends on the item type
* }
*
* Item 0 implies the end of the config subpacket, and has no data.
* Item 1 implies a stream configuration without extradata.
* Item 2 max. nr. of subpackets per superblock
* Item 3 superblock type for the stream
* Item 4 implies a stream configuration with extradata (size >= 0x1c).
*
* @return <0 on error, otherwise the number of bytes parsed from the
* input buffer.
*/
static int qdm2_parse_config(PayloadContext *qdm, AVStream *st,
const uint8_t *buf, const uint8_t *end)
{
const uint8_t *p = buf;
 
while (end - p >= 2) {
unsigned int item_len = p[0], config_item = p[1];
 
if (item_len < 2 || end - p < item_len || config_item > 4)
return AVERROR_INVALIDDATA;
 
switch (config_item) {
case 0: /* end of config block */
return p - buf + item_len;
case 1: /* stream without extradata */
/* FIXME: set default qdm->block_size */
break;
case 2: /**< subpackets per block */
if (item_len < 3)
return AVERROR_INVALIDDATA;
qdm->subpkts_per_block = p[2];
break;
case 3: /* superblock type */
if (item_len < 4)
return AVERROR_INVALIDDATA;
qdm->block_type = AV_RB16(p + 2);
break;
case 4: /* stream with extradata */
if (item_len < 30)
return AVERROR_INVALIDDATA;
av_freep(&st->codec->extradata);
if (ff_alloc_extradata(st->codec, 26 + item_len)) {
return AVERROR(ENOMEM);
}
AV_WB32(st->codec->extradata, 12);
memcpy(st->codec->extradata + 4, "frma", 4);
memcpy(st->codec->extradata + 8, "QDM2", 4);
AV_WB32(st->codec->extradata + 12, 6 + item_len);
memcpy(st->codec->extradata + 16, "QDCA", 4);
memcpy(st->codec->extradata + 20, p + 2, item_len - 2);
AV_WB32(st->codec->extradata + 18 + item_len, 8);
AV_WB32(st->codec->extradata + 22 + item_len, 0);
 
qdm->block_size = AV_RB32(p + 26);
break;
}
 
p += item_len;
}
 
return AVERROR(EAGAIN); /* not enough data */
}
 
/**
* Parse a single subpacket. We store this subpacket in an intermediate
* buffer (position depends on the ID (byte[0]). When called, at least
* 4 bytes are available for reading (see qdm2_parse_packet()).
*
* Layout of a single subpacket (RTP packets commonly contain multiple
* such subpackets) - length in bytes:
* 1: ordering ID <- 0 .. 0x7F
* 1: subpacket type <- 0 .. 0x7F; value & 0x80 means subpacket length = 2 bytes, else 1 byte
* 1/2: subpacket length <- length of the data following the flags/length fields
* if (subpacket type & 0x7F) == 0x7F
* 1: subpacket type, higher bits
* size: subpacket data
*
* The subpackets come in randomly, and should be encapsulated into 1
* or more superblocks (containing qdm->subpkts_per_block subpackets
* each) per RTP packet, in order of ascending "ordering ID", see
* qdm2_restore_block().
*
* @return <0 on error, otherwise the number of bytes parsed from the
* input buffer.
*/
static int qdm2_parse_subpacket(PayloadContext *qdm, AVStream *st,
const uint8_t *buf, const uint8_t *end)
{
const uint8_t *p = buf;
unsigned int id, len, type, to_copy;
 
/* parse header so we know the size of the header/data */
id = *p++;
type = *p++;
if (type & 0x80) {
len = AV_RB16(p);
p += 2;
type &= 0x7F;
} else
len = *p++;
 
if (end - p < len + (type == 0x7F) || id >= 0x80)
return AVERROR_INVALIDDATA;
if (type == 0x7F)
type |= *p++ << 8;
 
/* copy data into a temporary buffer */
to_copy = FFMIN(len + (p - &buf[1]), 0x800 - qdm->len[id]);
memcpy(&qdm->buf[id][qdm->len[id]], buf + 1, to_copy);
qdm->len[id] += to_copy;
 
return p + len - buf;
}
 
/**
* Add a superblock header around a set of subpackets.
*
* @return <0 on error, else 0.
*/
static int qdm2_restore_block(PayloadContext *qdm, AVStream *st, AVPacket *pkt)
{
int to_copy, n, res, include_csum;
uint8_t *p, *csum_pos = NULL;
 
/* create packet to hold subpkts into a superblock */
assert(qdm->cache > 0);
for (n = 0; n < 0x80; n++)
if (qdm->len[n] > 0)
break;
av_assert0(n < 0x80);
 
if ((res = av_new_packet(pkt, qdm->block_size)) < 0)
return res;
memset(pkt->data, 0, pkt->size);
pkt->stream_index = st->index;
p = pkt->data;
 
/* superblock header */
if (qdm->len[n] > 0xff) {
*p++ = qdm->block_type | 0x80;
AV_WB16(p, qdm->len[n]);
p += 2;
} else {
*p++ = qdm->block_type;
*p++ = qdm->len[n];
}
if ((include_csum = (qdm->block_type == 2 || qdm->block_type == 4))) {
csum_pos = p;
p += 2;
}
 
/* subpacket data */
to_copy = FFMIN(qdm->len[n], pkt->size - (p - pkt->data));
memcpy(p, qdm->buf[n], to_copy);
qdm->len[n] = 0;
 
/* checksum header */
if (include_csum) {
unsigned int total = 0;
uint8_t *q;
 
for (q = pkt->data; q < &pkt->data[qdm->block_size]; q++)
total += *q;
AV_WB16(csum_pos, (uint16_t) total);
}
 
return 0;
}
 
/** return 0 on packet, no more left, 1 on packet, -1 on partial packet... */
static int qdm2_parse_packet(AVFormatContext *s, PayloadContext *qdm,
AVStream *st, AVPacket *pkt,
uint32_t *timestamp,
const uint8_t *buf, int len, uint16_t seq,
int flags)
{
int res = AVERROR_INVALIDDATA, n;
const uint8_t *end = buf + len, *p = buf;
 
if (len > 0) {
if (len < 2)
return AVERROR_INVALIDDATA;
 
/* configuration block */
if (*p == 0xff) {
if (qdm->n_pkts > 0) {
av_log(s, AV_LOG_WARNING,
"Out of sequence config - dropping queue\n");
qdm->n_pkts = 0;
memset(qdm->len, 0, sizeof(qdm->len));
}
 
if ((res = qdm2_parse_config(qdm, st, ++p, end)) < 0)
return res;
p += res;
 
/* We set codec_id to AV_CODEC_ID_NONE initially to
* delay decoder initialization since extradata is
* carried within the RTP stream, not SDP. Here,
* by setting codec_id to AV_CODEC_ID_QDM2, we are signalling
* to the decoder that it is OK to initialize. */
st->codec->codec_id = AV_CODEC_ID_QDM2;
}
if (st->codec->codec_id == AV_CODEC_ID_NONE)
return AVERROR(EAGAIN);
 
/* subpackets */
while (end - p >= 4) {
if ((res = qdm2_parse_subpacket(qdm, st, p, end)) < 0)
return res;
p += res;
}
 
qdm->timestamp = *timestamp;
if (++qdm->n_pkts < qdm->subpkts_per_block)
return AVERROR(EAGAIN);
qdm->cache = 0;
for (n = 0; n < 0x80; n++)
if (qdm->len[n] > 0)
qdm->cache++;
}
 
/* output the subpackets into freshly created superblock structures */
if (!qdm->cache || (res = qdm2_restore_block(qdm, st, pkt)) < 0)
return res;
if (--qdm->cache == 0)
qdm->n_pkts = 0;
 
*timestamp = qdm->timestamp;
qdm->timestamp = RTP_NOTS_VALUE;
 
return (qdm->cache > 0) ? 1 : 0;
}
 
static PayloadContext *qdm2_extradata_new(void)
{
return av_mallocz(sizeof(PayloadContext));
}
 
static void qdm2_extradata_free(PayloadContext *qdm)
{
av_free(qdm);
}
 
RTPDynamicProtocolHandler ff_qdm2_dynamic_handler = {
.enc_name = "X-QDM",
.codec_type = AVMEDIA_TYPE_AUDIO,
.codec_id = AV_CODEC_ID_NONE,
.alloc = qdm2_extradata_new,
.free = qdm2_extradata_free,
.parse_packet = qdm2_parse_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/rtpdec_qt.c
0,0 → 1,262
/*
* RTP/Quicktime support.
* Copyright (c) 2009 Ronald S. Bultje
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* @brief Quicktime-style RTP support
* @author Ronald S. Bultje <rbultje@ronald.bitfreak.net>
*/
 
#include "avformat.h"
#include "internal.h"
#include "avio_internal.h"
#include "rtp.h"
#include "rtpdec.h"
#include "isom.h"
#include "libavcodec/get_bits.h"
 
struct PayloadContext {
AVPacket pkt;
int bytes_per_frame, remaining;
uint32_t timestamp;
};
 
static int qt_rtp_parse_packet(AVFormatContext *s, PayloadContext *qt,
AVStream *st, AVPacket *pkt,
uint32_t *timestamp, const uint8_t *buf,
int len, uint16_t seq, int flags)
{
AVIOContext pb;
GetBitContext gb;
int packing_scheme, has_payload_desc, has_packet_info, alen,
has_marker_bit = flags & RTP_FLAG_MARKER;
 
if (qt->remaining) {
int num = qt->pkt.size / qt->bytes_per_frame;
 
if (av_new_packet(pkt, qt->bytes_per_frame))
return AVERROR(ENOMEM);
pkt->stream_index = st->index;
pkt->flags = qt->pkt.flags;
memcpy(pkt->data,
&qt->pkt.data[(num - qt->remaining) * qt->bytes_per_frame],
qt->bytes_per_frame);
if (--qt->remaining == 0) {
av_freep(&qt->pkt.data);
qt->pkt.size = 0;
}
return qt->remaining > 0;
}
 
/**
* The RTP payload is described in:
* http://developer.apple.com/quicktime/icefloe/dispatch026.html
*/
init_get_bits(&gb, buf, len << 3);
ffio_init_context(&pb, buf, len, 0, NULL, NULL, NULL, NULL);
 
if (len < 4)
return AVERROR_INVALIDDATA;
 
skip_bits(&gb, 4); // version
if ((packing_scheme = get_bits(&gb, 2)) == 0)
return AVERROR_INVALIDDATA;
if (get_bits1(&gb))
flags |= RTP_FLAG_KEY;
has_payload_desc = get_bits1(&gb);
has_packet_info = get_bits1(&gb);
skip_bits(&gb, 23); // reserved:7, cache payload info:1, payload ID:15
 
if (has_payload_desc) {
int data_len, pos, is_start, is_finish;
uint32_t tag;
 
pos = get_bits_count(&gb) >> 3;
if (pos + 12 > len)
return AVERROR_INVALIDDATA;
 
skip_bits(&gb, 2); // has non-I frames:1, is sparse:1
is_start = get_bits1(&gb);
is_finish = get_bits1(&gb);
if (!is_start || !is_finish) {
avpriv_request_sample(s, "RTP-X-QT with payload description "
"split over several packets");
return AVERROR_PATCHWELCOME;
}
skip_bits(&gb, 12); // reserved
data_len = get_bits(&gb, 16);
 
avio_seek(&pb, pos + 4, SEEK_SET);
tag = avio_rl32(&pb);
if ((st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
tag != MKTAG('v','i','d','e')) ||
(st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
tag != MKTAG('s','o','u','n')))
return AVERROR_INVALIDDATA;
avpriv_set_pts_info(st, 32, 1, avio_rb32(&pb));
 
if (pos + data_len > len)
return AVERROR_INVALIDDATA;
/* TLVs */
while (avio_tell(&pb) + 4 < pos + data_len) {
int tlv_len = avio_rb16(&pb);
tag = avio_rl16(&pb);
if (avio_tell(&pb) + tlv_len > pos + data_len)
return AVERROR_INVALIDDATA;
 
#define MKTAG16(a,b) MKTAG(a,b,0,0)
switch (tag) {
case MKTAG16('s','d'): {
MOVStreamContext *msc;
void *priv_data = st->priv_data;
int nb_streams = s->nb_streams;
MOVContext *mc = av_mallocz(sizeof(*mc));
if (!mc)
return AVERROR(ENOMEM);
mc->fc = s;
st->priv_data = msc = av_mallocz(sizeof(MOVStreamContext));
if (!msc) {
av_free(mc);
st->priv_data = priv_data;
return AVERROR(ENOMEM);
}
/* ff_mov_read_stsd_entries updates stream s->nb_streams-1,
* so set it temporarily to indicate which stream to update. */
s->nb_streams = st->index + 1;
ff_mov_read_stsd_entries(mc, &pb, 1);
qt->bytes_per_frame = msc->bytes_per_frame;
av_free(msc);
av_free(mc);
st->priv_data = priv_data;
s->nb_streams = nb_streams;
break;
}
default:
avio_skip(&pb, tlv_len);
break;
}
}
 
/* 32-bit alignment */
avio_skip(&pb, ((avio_tell(&pb) + 3) & ~3) - avio_tell(&pb));
} else
avio_seek(&pb, 4, SEEK_SET);
 
if (has_packet_info) {
avpriv_request_sample(s, "RTP-X-QT with packet-specific info");
return AVERROR_PATCHWELCOME;
}
 
alen = len - avio_tell(&pb);
if (alen <= 0)
return AVERROR_INVALIDDATA;
 
switch (packing_scheme) {
case 3: /* one data packet spread over 1 or multiple RTP packets */
if (qt->pkt.size > 0 && qt->timestamp == *timestamp) {
int err;
if ((err = av_reallocp(&qt->pkt.data, qt->pkt.size + alen +
FF_INPUT_BUFFER_PADDING_SIZE)) < 0) {
qt->pkt.size = 0;
return err;
}
} else {
av_freep(&qt->pkt.data);
av_init_packet(&qt->pkt);
qt->pkt.data = av_realloc(NULL, alen + FF_INPUT_BUFFER_PADDING_SIZE);
if (!qt->pkt.data)
return AVERROR(ENOMEM);
qt->pkt.size = 0;
qt->timestamp = *timestamp;
}
memcpy(qt->pkt.data + qt->pkt.size, buf + avio_tell(&pb), alen);
qt->pkt.size += alen;
if (has_marker_bit) {
int ret = av_packet_from_data(pkt, qt->pkt.data, qt->pkt.size);
if (ret < 0)
return ret;
 
qt->pkt.size = 0;
qt->pkt.data = NULL;
pkt->flags = flags & RTP_FLAG_KEY ? AV_PKT_FLAG_KEY : 0;
pkt->stream_index = st->index;
memset(pkt->data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
return 0;
}
return AVERROR(EAGAIN);
 
case 1: /* constant packet size, multiple packets per RTP packet */
if (qt->bytes_per_frame == 0 ||
alen % qt->bytes_per_frame != 0)
return AVERROR_INVALIDDATA; /* wrongly padded */
qt->remaining = (alen / qt->bytes_per_frame) - 1;
if (av_new_packet(pkt, qt->bytes_per_frame))
return AVERROR(ENOMEM);
memcpy(pkt->data, buf + avio_tell(&pb), qt->bytes_per_frame);
pkt->flags = flags & RTP_FLAG_KEY ? AV_PKT_FLAG_KEY : 0;
pkt->stream_index = st->index;
if (qt->remaining > 0) {
av_freep(&qt->pkt.data);
qt->pkt.data = av_realloc(NULL, qt->remaining * qt->bytes_per_frame);
if (!qt->pkt.data) {
av_free_packet(pkt);
return AVERROR(ENOMEM);
}
qt->pkt.size = qt->remaining * qt->bytes_per_frame;
memcpy(qt->pkt.data,
buf + avio_tell(&pb) + qt->bytes_per_frame,
qt->remaining * qt->bytes_per_frame);
qt->pkt.flags = pkt->flags;
return 1;
}
return 0;
 
default: /* unimplemented */
avpriv_request_sample(NULL, "RTP-X-QT with packing scheme 2");
return AVERROR_PATCHWELCOME;
}
}
 
static PayloadContext *qt_rtp_new(void)
{
return av_mallocz(sizeof(PayloadContext));
}
 
static void qt_rtp_free(PayloadContext *qt)
{
av_freep(&qt->pkt.data);
av_free(qt);
}
 
#define RTP_QT_HANDLER(m, n, s, t) \
RTPDynamicProtocolHandler ff_ ## m ## _rtp_ ## n ## _handler = { \
.enc_name = s, \
.codec_type = t, \
.codec_id = AV_CODEC_ID_NONE, \
.alloc = qt_rtp_new, \
.free = qt_rtp_free, \
.parse_packet = qt_rtp_parse_packet, \
}
 
RTP_QT_HANDLER(qt, vid, "X-QT", AVMEDIA_TYPE_VIDEO);
RTP_QT_HANDLER(qt, aud, "X-QT", AVMEDIA_TYPE_AUDIO);
RTP_QT_HANDLER(quicktime, vid, "X-QUICKTIME", AVMEDIA_TYPE_VIDEO);
RTP_QT_HANDLER(quicktime, aud, "X-QUICKTIME", AVMEDIA_TYPE_AUDIO);
/contrib/sdk/sources/ffmpeg/libavformat/rtpdec_svq3.c
0,0 → 1,133
/*
* Sorenson-3 (SVQ3/SV3V) payload for RTP
* Copyright (c) 2010 Ronald S. Bultje
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* @brief RTP support for the SV3V (SVQ3) payload
* @author Ronald S. Bultje <rbultje@ronald.bitfreak.net>
* @see http://wiki.multimedia.cx/index.php?title=Sorenson_Video_3#Packetization
*/
 
#include <string.h>
#include "libavutil/intreadwrite.h"
#include "internal.h"
#include "rtp.h"
#include "rtpdec.h"
#include "rtpdec_formats.h"
 
struct PayloadContext {
AVIOContext *pktbuf;
int64_t timestamp;
};
 
/** return 0 on packet, <0 on partial packet or error... */
static int svq3_parse_packet (AVFormatContext *s, PayloadContext *sv,
AVStream *st, AVPacket *pkt,
uint32_t *timestamp,
const uint8_t *buf, int len, uint16_t seq,
int flags)
{
int config_packet, start_packet, end_packet;
 
if (len < 2)
return AVERROR_INVALIDDATA;
 
config_packet = buf[0] & 0x40;
start_packet = buf[0] & 0x20;
end_packet = buf[0] & 0x10;
buf += 2; // ignore buf[1]
len -= 2;
 
if (config_packet) {
 
av_freep(&st->codec->extradata);
st->codec->extradata_size = 0;
 
if (len < 2 || ff_alloc_extradata(st->codec, len + 8))
return AVERROR_INVALIDDATA;
 
memcpy(st->codec->extradata, "SEQH", 4);
AV_WB32(st->codec->extradata + 4, len);
memcpy(st->codec->extradata + 8, buf, len);
 
/* We set codec_id to AV_CODEC_ID_NONE initially to
* delay decoder initialization since extradata is
* carried within the RTP stream, not SDP. Here,
* by setting codec_id to AV_CODEC_ID_SVQ3, we are signalling
* to the decoder that it is OK to initialize. */
st->codec->codec_id = AV_CODEC_ID_SVQ3;
 
return AVERROR(EAGAIN);
}
 
if (start_packet) {
int res;
 
if (sv->pktbuf) {
uint8_t *tmp;
avio_close_dyn_buf(sv->pktbuf, &tmp);
av_free(tmp);
}
if ((res = avio_open_dyn_buf(&sv->pktbuf)) < 0)
return res;
sv->timestamp = *timestamp;
}
 
if (!sv->pktbuf)
return AVERROR_INVALIDDATA;
 
avio_write(sv->pktbuf, buf, len);
 
if (end_packet) {
int ret = ff_rtp_finalize_packet(pkt, &sv->pktbuf, st->index);
if (ret < 0)
return ret;
 
*timestamp = sv->timestamp;
return 0;
}
 
return AVERROR(EAGAIN);
}
 
static PayloadContext *svq3_extradata_new(void)
{
return av_mallocz(sizeof(PayloadContext));
}
 
static void svq3_extradata_free(PayloadContext *sv)
{
if (sv->pktbuf) {
uint8_t *buf;
avio_close_dyn_buf(sv->pktbuf, &buf);
av_free(buf);
}
av_free(sv);
}
 
RTPDynamicProtocolHandler ff_svq3_dynamic_handler = {
.enc_name = "X-SV3V-ES",
.codec_type = AVMEDIA_TYPE_VIDEO,
.codec_id = AV_CODEC_ID_NONE, // see if (config_packet) above
.alloc = svq3_extradata_new,
.free = svq3_extradata_free,
.parse_packet = svq3_parse_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/rtpdec_vp8.c
0,0 → 1,298
/*
* RTP VP8 Depacketizer
* Copyright (c) 2010 Josh Allmann
* Copyright (c) 2012 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* @brief RTP support for the VP8 payload
* @author Josh Allmann <joshua.allmann@gmail.com>
* @see http://tools.ietf.org/html/draft-ietf-payload-vp8-05
*/
 
#include "libavcodec/bytestream.h"
 
#include "rtpdec_formats.h"
 
struct PayloadContext {
AVIOContext *data;
uint32_t timestamp;
int is_keyframe;
/* If sequence_ok is set, we keep returning data (even if we might have
* lost some data, but we haven't lost any too critical data that would
* cause the decoder to desynchronize and output random garbage).
*/
int sequence_ok;
int first_part_size;
uint16_t prev_seq;
int prev_pictureid;
int broken_frame;
/* If sequence_dirty is set, we have lost some data (critical or
* non-critical) and decoding will have some sort of artefacts, and
* we thus should request a new keyframe.
*/
int sequence_dirty;
int got_keyframe;
};
 
static void vp8_free_buffer(PayloadContext *vp8)
{
uint8_t *tmp;
if (!vp8->data)
return;
avio_close_dyn_buf(vp8->data, &tmp);
av_free(tmp);
vp8->data = NULL;
}
 
static int vp8_broken_sequence(AVFormatContext *ctx, PayloadContext *vp8,
const char *msg)
{
vp8->sequence_ok = 0;
av_log(ctx, AV_LOG_WARNING, "%s", msg);
vp8_free_buffer(vp8);
return AVERROR(EAGAIN);
}
 
static int vp8_handle_packet(AVFormatContext *ctx, PayloadContext *vp8,
AVStream *st, AVPacket *pkt, uint32_t *timestamp,
const uint8_t *buf, int len, uint16_t seq,
int flags)
{
int start_partition, end_packet;
int extended_bits, part_id;
int pictureid_present = 0, tl0picidx_present = 0, tid_present = 0,
keyidx_present = 0;
int pictureid = -1, pictureid_mask = 0;
int returned_old_frame = 0;
uint32_t old_timestamp = 0;
 
if (!buf) {
if (vp8->data) {
int ret = ff_rtp_finalize_packet(pkt, &vp8->data, st->index);
if (ret < 0)
return ret;
*timestamp = vp8->timestamp;
if (vp8->sequence_dirty)
pkt->flags |= AV_PKT_FLAG_CORRUPT;
return 0;
}
return AVERROR(EAGAIN);
}
 
if (len < 1)
return AVERROR_INVALIDDATA;
 
extended_bits = buf[0] & 0x80;
start_partition = buf[0] & 0x10;
part_id = buf[0] & 0x0f;
end_packet = flags & RTP_FLAG_MARKER;
buf++;
len--;
if (extended_bits) {
if (len < 1)
return AVERROR_INVALIDDATA;
pictureid_present = buf[0] & 0x80;
tl0picidx_present = buf[0] & 0x40;
tid_present = buf[0] & 0x20;
keyidx_present = buf[0] & 0x10;
buf++;
len--;
}
if (pictureid_present) {
if (len < 1)
return AVERROR_INVALIDDATA;
if (buf[0] & 0x80) {
if (len < 2)
return AVERROR_INVALIDDATA;
pictureid = AV_RB16(buf) & 0x7fff;
pictureid_mask = 0x7fff;
buf += 2;
len -= 2;
} else {
pictureid = buf[0] & 0x7f;
pictureid_mask = 0x7f;
buf++;
len--;
}
}
if (tl0picidx_present) {
// Ignoring temporal level zero index
buf++;
len--;
}
if (tid_present || keyidx_present) {
// Ignoring temporal layer index, layer sync bit and keyframe index
buf++;
len--;
}
if (len < 1)
return AVERROR_INVALIDDATA;
 
if (start_partition && part_id == 0 && len >= 3) {
int res;
int non_key = buf[0] & 0x01;
if (!non_key) {
vp8_free_buffer(vp8);
// Keyframe, decoding ok again
vp8->sequence_ok = 1;
vp8->sequence_dirty = 0;
vp8->got_keyframe = 1;
} else {
int can_continue = vp8->data && !vp8->is_keyframe &&
avio_tell(vp8->data) >= vp8->first_part_size;
if (!vp8->sequence_ok)
return AVERROR(EAGAIN);
if (!vp8->got_keyframe)
return vp8_broken_sequence(ctx, vp8, "Keyframe missing\n");
if (pictureid >= 0) {
if (pictureid != ((vp8->prev_pictureid + 1) & pictureid_mask)) {
return vp8_broken_sequence(ctx, vp8,
"Missed a picture, sequence broken\n");
} else {
if (vp8->data && !can_continue)
return vp8_broken_sequence(ctx, vp8,
"Missed a picture, sequence broken\n");
}
} else {
uint16_t expected_seq = vp8->prev_seq + 1;
int16_t diff = seq - expected_seq;
if (vp8->data) {
// No picture id, so we can't know if missed packets
// contained any new frames. If diff == 0, we did get
// later packets from the same frame (matching timestamp),
// so we know we didn't miss any frame. If diff == 1 and
// we still have data (not flushed by the end of frame
// marker), the single missed packet must have been part
// of the same frame.
if ((diff == 0 || diff == 1) && can_continue) {
// Proceed with what we have
} else {
return vp8_broken_sequence(ctx, vp8,
"Missed too much, sequence broken\n");
}
} else {
if (diff != 0)
return vp8_broken_sequence(ctx, vp8,
"Missed unknown data, sequence broken\n");
}
}
if (vp8->data) {
vp8->sequence_dirty = 1;
if (avio_tell(vp8->data) >= vp8->first_part_size) {
int ret = ff_rtp_finalize_packet(pkt, &vp8->data, st->index);
if (ret < 0)
return ret;
pkt->flags |= AV_PKT_FLAG_CORRUPT;
returned_old_frame = 1;
old_timestamp = vp8->timestamp;
} else {
// Shouldn't happen
vp8_free_buffer(vp8);
}
}
}
vp8->first_part_size = (AV_RL16(&buf[1]) << 3 | buf[0] >> 5) + 3;
if ((res = avio_open_dyn_buf(&vp8->data)) < 0)
return res;
vp8->timestamp = *timestamp;
vp8->broken_frame = 0;
vp8->prev_pictureid = pictureid;
vp8->is_keyframe = !non_key;
} else {
uint16_t expected_seq = vp8->prev_seq + 1;
 
if (!vp8->sequence_ok)
return AVERROR(EAGAIN);
 
if (vp8->timestamp != *timestamp) {
// Missed the start of the new frame, sequence broken
return vp8_broken_sequence(ctx, vp8,
"Received no start marker; dropping frame\n");
}
 
if (seq != expected_seq) {
if (vp8->is_keyframe) {
return vp8_broken_sequence(ctx, vp8,
"Missed part of a keyframe, sequence broken\n");
} else if (vp8->data && avio_tell(vp8->data) >= vp8->first_part_size) {
vp8->broken_frame = 1;
vp8->sequence_dirty = 1;
} else {
return vp8_broken_sequence(ctx, vp8,
"Missed part of the first partition, sequence broken\n");
}
}
}
 
if (!vp8->data)
return vp8_broken_sequence(ctx, vp8, "Received no start marker\n");
 
vp8->prev_seq = seq;
if (!vp8->broken_frame)
avio_write(vp8->data, buf, len);
 
if (returned_old_frame) {
*timestamp = old_timestamp;
return end_packet ? 1 : 0;
}
 
if (end_packet) {
int ret;
ret = ff_rtp_finalize_packet(pkt, &vp8->data, st->index);
if (ret < 0)
return ret;
if (vp8->sequence_dirty)
pkt->flags |= AV_PKT_FLAG_CORRUPT;
return 0;
}
 
return AVERROR(EAGAIN);
}
 
static PayloadContext *vp8_new_context(void)
{
PayloadContext *vp8 = av_mallocz(sizeof(PayloadContext));
if (!vp8)
return NULL;
vp8->sequence_ok = 1;
return vp8;
}
 
static void vp8_free_context(PayloadContext *vp8)
{
vp8_free_buffer(vp8);
av_free(vp8);
}
 
static int vp8_need_keyframe(PayloadContext *vp8)
{
return vp8->sequence_dirty || !vp8->sequence_ok;
}
 
RTPDynamicProtocolHandler ff_vp8_dynamic_handler = {
.enc_name = "VP8",
.codec_type = AVMEDIA_TYPE_VIDEO,
.codec_id = AV_CODEC_ID_VP8,
.alloc = vp8_new_context,
.free = vp8_free_context,
.parse_packet = vp8_handle_packet,
.need_keyframe = vp8_need_keyframe,
};
/contrib/sdk/sources/ffmpeg/libavformat/rtpdec_xiph.c
0,0 → 1,411
/*
* Xiph RTP Protocols
* Copyright (c) 2009 Colin McQuillian
* Copyright (c) 2010 Josh Allmann
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* @brief Xiph / RTP Code
* @author Colin McQuillan <m.niloc@gmail.com>
* @author Josh Allmann <joshua.allmann@gmail.com>
*/
 
#include "libavutil/attributes.h"
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/base64.h"
#include "libavcodec/bytestream.h"
 
#include "internal.h"
#include "rtpdec.h"
#include "rtpdec_formats.h"
 
/**
* RTP/Xiph specific private data.
*/
struct PayloadContext {
unsigned ident; ///< 24-bit stream configuration identifier
uint32_t timestamp;
AVIOContext* fragment; ///< buffer for split payloads
uint8_t *split_buf;
int split_pos, split_buf_len, split_buf_size;
int split_pkts;
};
 
static PayloadContext *xiph_new_context(void)
{
return av_mallocz(sizeof(PayloadContext));
}
 
static inline void free_fragment_if_needed(PayloadContext * data)
{
if (data->fragment) {
uint8_t* p;
avio_close_dyn_buf(data->fragment, &p);
av_free(p);
data->fragment = NULL;
}
}
 
static void xiph_free_context(PayloadContext * data)
{
free_fragment_if_needed(data);
av_free(data->split_buf);
av_free(data);
}
 
static av_cold int xiph_vorbis_init(AVFormatContext *ctx, int st_index,
PayloadContext *data)
{
if (st_index < 0)
return 0;
ctx->streams[st_index]->need_parsing = AVSTREAM_PARSE_HEADERS;
return 0;
}
 
 
static int xiph_handle_packet(AVFormatContext *ctx, PayloadContext *data,
AVStream *st, AVPacket *pkt, uint32_t *timestamp,
const uint8_t *buf, int len, uint16_t seq,
int flags)
{
 
int ident, fragmented, tdt, num_pkts, pkt_len;
 
if (!buf) {
if (!data->split_buf || data->split_pos + 2 > data->split_buf_len ||
data->split_pkts <= 0) {
av_log(ctx, AV_LOG_ERROR, "No more data to return\n");
return AVERROR_INVALIDDATA;
}
pkt_len = AV_RB16(data->split_buf + data->split_pos);
data->split_pos += 2;
if (data->split_pos + pkt_len > data->split_buf_len) {
av_log(ctx, AV_LOG_ERROR, "Not enough data to return\n");
return AVERROR_INVALIDDATA;
}
if (av_new_packet(pkt, pkt_len)) {
av_log(ctx, AV_LOG_ERROR, "Out of memory.\n");
return AVERROR(ENOMEM);
}
pkt->stream_index = st->index;
memcpy(pkt->data, data->split_buf + data->split_pos, pkt_len);
data->split_pos += pkt_len;
data->split_pkts--;
return data->split_pkts > 0;
}
 
if (len < 6) {
av_log(ctx, AV_LOG_ERROR, "Invalid %d byte packet\n", len);
return AVERROR_INVALIDDATA;
}
 
// read xiph rtp headers
ident = AV_RB24(buf);
fragmented = buf[3] >> 6;
tdt = (buf[3] >> 4) & 3;
num_pkts = buf[3] & 0xf;
pkt_len = AV_RB16(buf + 4);
 
if (pkt_len > len - 6) {
av_log(ctx, AV_LOG_ERROR,
"Invalid packet length %d in %d byte packet\n", pkt_len,
len);
return AVERROR_INVALIDDATA;
}
 
if (ident != data->ident) {
av_log(ctx, AV_LOG_ERROR,
"Unimplemented Xiph SDP configuration change detected\n");
return AVERROR_PATCHWELCOME;
}
 
if (tdt) {
av_log(ctx, AV_LOG_ERROR,
"Unimplemented RTP Xiph packet settings (%d,%d,%d)\n",
fragmented, tdt, num_pkts);
return AVERROR_PATCHWELCOME;
}
 
buf += 6; // move past header bits
len -= 6;
 
if (fragmented == 0) {
if (av_new_packet(pkt, pkt_len)) {
av_log(ctx, AV_LOG_ERROR, "Out of memory.\n");
return AVERROR(ENOMEM);
}
pkt->stream_index = st->index;
memcpy(pkt->data, buf, pkt_len);
buf += pkt_len;
len -= pkt_len;
num_pkts--;
 
if (num_pkts > 0) {
if (len > data->split_buf_size || !data->split_buf) {
av_freep(&data->split_buf);
data->split_buf_size = 2 * len;
data->split_buf = av_malloc(data->split_buf_size);
if (!data->split_buf) {
av_log(ctx, AV_LOG_ERROR, "Out of memory.\n");
av_free_packet(pkt);
return AVERROR(ENOMEM);
}
}
memcpy(data->split_buf, buf, len);
data->split_buf_len = len;
data->split_pos = 0;
data->split_pkts = num_pkts;
return 1;
}
 
return 0;
 
} else if (fragmented == 1) {
// start of xiph data fragment
int res;
 
// end packet has been lost somewhere, so drop buffered data
free_fragment_if_needed(data);
 
if((res = avio_open_dyn_buf(&data->fragment)) < 0)
return res;
 
avio_write(data->fragment, buf, pkt_len);
data->timestamp = *timestamp;
 
} else {
av_assert1(fragmented < 4);
if (data->timestamp != *timestamp) {
// skip if fragmented timestamp is incorrect;
// a start packet has been lost somewhere
free_fragment_if_needed(data);
av_log(ctx, AV_LOG_ERROR, "RTP timestamps don't match!\n");
return AVERROR_INVALIDDATA;
}
if (!data->fragment) {
av_log(ctx, AV_LOG_WARNING,
"Received packet without a start fragment; dropping.\n");
return AVERROR(EAGAIN);
}
 
// copy data to fragment buffer
avio_write(data->fragment, buf, pkt_len);
 
if (fragmented == 3) {
// end of xiph data packet
int ret = ff_rtp_finalize_packet(pkt, &data->fragment, st->index);
if (ret < 0) {
av_log(ctx, AV_LOG_ERROR,
"Error occurred when getting fragment buffer.");
return ret;
}
 
return 0;
}
}
 
return AVERROR(EAGAIN);
}
 
/**
* Length encoding described in RFC5215 section 3.1.1.
*/
static int get_base128(const uint8_t ** buf, const uint8_t * buf_end)
{
int n = 0;
for (; *buf < buf_end; ++*buf) {
n <<= 7;
n += **buf & 0x7f;
if (!(**buf & 0x80)) {
++*buf;
return n;
}
}
return 0;
}
 
/**
* Based off parse_packed_headers in Vorbis RTP
*/
static int
parse_packed_headers(const uint8_t * packed_headers,
const uint8_t * packed_headers_end,
AVCodecContext * codec, PayloadContext * xiph_data)
{
 
unsigned num_packed, num_headers, length, length1, length2, extradata_alloc;
uint8_t *ptr;
 
if (packed_headers_end - packed_headers < 9) {
av_log(codec, AV_LOG_ERROR,
"Invalid %td byte packed header.",
packed_headers_end - packed_headers);
return AVERROR_INVALIDDATA;
}
 
num_packed = bytestream_get_be32(&packed_headers);
xiph_data->ident = bytestream_get_be24(&packed_headers);
length = bytestream_get_be16(&packed_headers);
num_headers = get_base128(&packed_headers, packed_headers_end);
length1 = get_base128(&packed_headers, packed_headers_end);
length2 = get_base128(&packed_headers, packed_headers_end);
 
if (num_packed != 1 || num_headers > 3) {
av_log(codec, AV_LOG_ERROR,
"Unimplemented number of headers: %d packed headers, %d headers\n",
num_packed, num_headers);
return AVERROR_PATCHWELCOME;
}
 
if (packed_headers_end - packed_headers != length ||
length1 > length || length2 > length - length1) {
av_log(codec, AV_LOG_ERROR,
"Bad packed header lengths (%d,%d,%td,%d)\n", length1,
length2, packed_headers_end - packed_headers, length);
return AVERROR_INVALIDDATA;
}
 
/* allocate extra space:
* -- length/255 +2 for xiphlacing
* -- one for the '2' marker
* -- FF_INPUT_BUFFER_PADDING_SIZE required */
extradata_alloc = length + length/255 + 3 + FF_INPUT_BUFFER_PADDING_SIZE;
 
if (ff_alloc_extradata(codec, extradata_alloc)) {
av_log(codec, AV_LOG_ERROR, "Out of memory\n");
return AVERROR(ENOMEM);
}
ptr = codec->extradata;
*ptr++ = 2;
ptr += av_xiphlacing(ptr, length1);
ptr += av_xiphlacing(ptr, length2);
memcpy(ptr, packed_headers, length);
ptr += length;
codec->extradata_size = ptr - codec->extradata;
// clear out remaining parts of the buffer
memset(ptr, 0, extradata_alloc - codec->extradata_size);
 
return 0;
}
 
static int xiph_parse_fmtp_pair(AVStream* stream,
PayloadContext *xiph_data,
char *attr, char *value)
{
AVCodecContext *codec = stream->codec;
int result = 0;
 
if (!strcmp(attr, "sampling")) {
if (!strcmp(value, "YCbCr-4:2:0")) {
codec->pix_fmt = AV_PIX_FMT_YUV420P;
} else if (!strcmp(value, "YCbCr-4:4:2")) {
codec->pix_fmt = AV_PIX_FMT_YUV422P;
} else if (!strcmp(value, "YCbCr-4:4:4")) {
codec->pix_fmt = AV_PIX_FMT_YUV444P;
} else {
av_log(codec, AV_LOG_ERROR,
"Unsupported pixel format %s\n", attr);
return AVERROR_INVALIDDATA;
}
} else if (!strcmp(attr, "width")) {
/* This is an integer between 1 and 1048561
* and MUST be in multiples of 16. */
codec->width = atoi(value);
return 0;
} else if (!strcmp(attr, "height")) {
/* This is an integer between 1 and 1048561
* and MUST be in multiples of 16. */
codec->height = atoi(value);
return 0;
} else if (!strcmp(attr, "delivery-method")) {
/* Possible values are: inline, in_band, out_band/specific_name. */
return AVERROR_PATCHWELCOME;
} else if (!strcmp(attr, "configuration-uri")) {
/* NOTE: configuration-uri is supported only under 2 conditions:
*--after the delivery-method tag
* --with a delivery-method value of out_band */
return AVERROR_PATCHWELCOME;
} else if (!strcmp(attr, "configuration")) {
/* NOTE: configuration is supported only AFTER the delivery-method tag
* The configuration value is a base64 encoded packed header */
uint8_t *decoded_packet = NULL;
int packet_size;
size_t decoded_alloc = strlen(value) / 4 * 3 + 4;
 
if (decoded_alloc <= INT_MAX) {
decoded_packet = av_malloc(decoded_alloc);
if (decoded_packet) {
packet_size =
av_base64_decode(decoded_packet, value, decoded_alloc);
 
result = parse_packed_headers
(decoded_packet, decoded_packet + packet_size, codec,
xiph_data);
} else {
av_log(codec, AV_LOG_ERROR,
"Out of memory while decoding SDP configuration.\n");
result = AVERROR(ENOMEM);
}
} else {
av_log(codec, AV_LOG_ERROR, "Packet too large\n");
result = AVERROR_INVALIDDATA;
}
av_free(decoded_packet);
}
return result;
}
 
static int xiph_parse_sdp_line(AVFormatContext *s, int st_index,
PayloadContext *data, const char *line)
{
const char *p;
 
if (st_index < 0)
return 0;
 
if (av_strstart(line, "fmtp:", &p)) {
return ff_parse_fmtp(s->streams[st_index], data, p,
xiph_parse_fmtp_pair);
}
 
return 0;
}
 
RTPDynamicProtocolHandler ff_theora_dynamic_handler = {
.enc_name = "theora",
.codec_type = AVMEDIA_TYPE_VIDEO,
.codec_id = AV_CODEC_ID_THEORA,
.parse_sdp_a_line = xiph_parse_sdp_line,
.alloc = xiph_new_context,
.free = xiph_free_context,
.parse_packet = xiph_handle_packet
};
 
RTPDynamicProtocolHandler ff_vorbis_dynamic_handler = {
.enc_name = "vorbis",
.codec_type = AVMEDIA_TYPE_AUDIO,
.codec_id = AV_CODEC_ID_VORBIS,
.init = xiph_vorbis_init,
.parse_sdp_a_line = xiph_parse_sdp_line,
.alloc = xiph_new_context,
.free = xiph_free_context,
.parse_packet = xiph_handle_packet
};
/contrib/sdk/sources/ffmpeg/libavformat/rtpenc.c
0,0 → 1,608
/*
* RTP output format
* Copyright (c) 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "mpegts.h"
#include "internal.h"
#include "libavutil/mathematics.h"
#include "libavutil/random_seed.h"
#include "libavutil/opt.h"
 
#include "rtpenc.h"
 
static const AVOption options[] = {
FF_RTP_FLAG_OPTS(RTPMuxContext, flags),
{ "payload_type", "Specify RTP payload type", offsetof(RTPMuxContext, payload_type), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, 127, AV_OPT_FLAG_ENCODING_PARAM },
{ "ssrc", "Stream identifier", offsetof(RTPMuxContext, ssrc), AV_OPT_TYPE_INT, { .i64 = 0 }, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
{ "cname", "CNAME to include in RTCP SR packets", offsetof(RTPMuxContext, cname), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
{ "seq", "Starting sequence number", offsetof(RTPMuxContext, seq), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 65535, AV_OPT_FLAG_ENCODING_PARAM },
{ NULL },
};
 
static const AVClass rtp_muxer_class = {
.class_name = "RTP muxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
#define RTCP_SR_SIZE 28
 
static int is_supported(enum AVCodecID id)
{
switch(id) {
case AV_CODEC_ID_H263:
case AV_CODEC_ID_H263P:
case AV_CODEC_ID_H264:
case AV_CODEC_ID_MPEG1VIDEO:
case AV_CODEC_ID_MPEG2VIDEO:
case AV_CODEC_ID_MPEG4:
case AV_CODEC_ID_AAC:
case AV_CODEC_ID_MP2:
case AV_CODEC_ID_MP3:
case AV_CODEC_ID_PCM_ALAW:
case AV_CODEC_ID_PCM_MULAW:
case AV_CODEC_ID_PCM_S8:
case AV_CODEC_ID_PCM_S16BE:
case AV_CODEC_ID_PCM_S16LE:
case AV_CODEC_ID_PCM_U16BE:
case AV_CODEC_ID_PCM_U16LE:
case AV_CODEC_ID_PCM_U8:
case AV_CODEC_ID_MPEG2TS:
case AV_CODEC_ID_AMR_NB:
case AV_CODEC_ID_AMR_WB:
case AV_CODEC_ID_VORBIS:
case AV_CODEC_ID_THEORA:
case AV_CODEC_ID_VP8:
case AV_CODEC_ID_ADPCM_G722:
case AV_CODEC_ID_ADPCM_G726:
case AV_CODEC_ID_ILBC:
case AV_CODEC_ID_MJPEG:
case AV_CODEC_ID_SPEEX:
case AV_CODEC_ID_OPUS:
return 1;
default:
return 0;
}
}
 
static int rtp_write_header(AVFormatContext *s1)
{
RTPMuxContext *s = s1->priv_data;
int n;
AVStream *st;
 
if (s1->nb_streams != 1) {
av_log(s1, AV_LOG_ERROR, "Only one stream supported in the RTP muxer\n");
return AVERROR(EINVAL);
}
st = s1->streams[0];
if (!is_supported(st->codec->codec_id)) {
av_log(s1, AV_LOG_ERROR, "Unsupported codec %s\n", avcodec_get_name(st->codec->codec_id));
 
return -1;
}
 
if (s->payload_type < 0) {
/* Re-validate non-dynamic payload types */
if (st->id < RTP_PT_PRIVATE)
st->id = ff_rtp_get_payload_type(s1, st->codec, -1);
 
s->payload_type = st->id;
} else {
/* private option takes priority */
st->id = s->payload_type;
}
 
s->base_timestamp = av_get_random_seed();
s->timestamp = s->base_timestamp;
s->cur_timestamp = 0;
if (!s->ssrc)
s->ssrc = av_get_random_seed();
s->first_packet = 1;
s->first_rtcp_ntp_time = ff_ntp_time();
if (s1->start_time_realtime)
/* Round the NTP time to whole milliseconds. */
s->first_rtcp_ntp_time = (s1->start_time_realtime / 1000) * 1000 +
NTP_OFFSET_US;
// Pick a random sequence start number, but in the lower end of the
// available range, so that any wraparound doesn't happen immediately.
// (Immediate wraparound would be an issue for SRTP.)
if (s->seq < 0) {
if (st->codec->flags & CODEC_FLAG_BITEXACT) {
s->seq = 0;
} else
s->seq = av_get_random_seed() & 0x0fff;
} else
s->seq &= 0xffff; // Use the given parameter, wrapped to the right interval
 
if (s1->packet_size) {
if (s1->pb->max_packet_size)
s1->packet_size = FFMIN(s1->packet_size,
s1->pb->max_packet_size);
} else
s1->packet_size = s1->pb->max_packet_size;
if (s1->packet_size <= 12) {
av_log(s1, AV_LOG_ERROR, "Max packet size %d too low\n", s1->packet_size);
return AVERROR(EIO);
}
s->buf = av_malloc(s1->packet_size);
if (s->buf == NULL) {
return AVERROR(ENOMEM);
}
s->max_payload_size = s1->packet_size - 12;
 
s->max_frames_per_packet = 0;
if (s1->max_delay > 0) {
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
int frame_size = av_get_audio_frame_duration(st->codec, 0);
if (!frame_size)
frame_size = st->codec->frame_size;
if (frame_size == 0) {
av_log(s1, AV_LOG_ERROR, "Cannot respect max delay: frame size = 0\n");
} else {
s->max_frames_per_packet =
av_rescale_q_rnd(s1->max_delay,
AV_TIME_BASE_Q,
(AVRational){ frame_size, st->codec->sample_rate },
AV_ROUND_DOWN);
}
}
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
/* FIXME: We should round down here... */
s->max_frames_per_packet = av_rescale_q(s1->max_delay, (AVRational){1, 1000000}, st->codec->time_base);
}
}
 
avpriv_set_pts_info(st, 32, 1, 90000);
switch(st->codec->codec_id) {
case AV_CODEC_ID_MP2:
case AV_CODEC_ID_MP3:
s->buf_ptr = s->buf + 4;
break;
case AV_CODEC_ID_MPEG1VIDEO:
case AV_CODEC_ID_MPEG2VIDEO:
break;
case AV_CODEC_ID_MPEG2TS:
n = s->max_payload_size / TS_PACKET_SIZE;
if (n < 1)
n = 1;
s->max_payload_size = n * TS_PACKET_SIZE;
s->buf_ptr = s->buf;
break;
case AV_CODEC_ID_H264:
/* check for H.264 MP4 syntax */
if (st->codec->extradata_size > 4 && st->codec->extradata[0] == 1) {
s->nal_length_size = (st->codec->extradata[4] & 0x03) + 1;
}
break;
case AV_CODEC_ID_VORBIS:
case AV_CODEC_ID_THEORA:
if (!s->max_frames_per_packet) s->max_frames_per_packet = 15;
s->max_frames_per_packet = av_clip(s->max_frames_per_packet, 1, 15);
s->max_payload_size -= 6; // ident+frag+tdt/vdt+pkt_num+pkt_length
s->num_frames = 0;
goto defaultcase;
case AV_CODEC_ID_ADPCM_G722:
/* Due to a historical error, the clock rate for G722 in RTP is
* 8000, even if the sample rate is 16000. See RFC 3551. */
avpriv_set_pts_info(st, 32, 1, 8000);
break;
case AV_CODEC_ID_OPUS:
if (st->codec->channels > 2) {
av_log(s1, AV_LOG_ERROR, "Multistream opus not supported in RTP\n");
goto fail;
}
/* The opus RTP RFC says that all opus streams should use 48000 Hz
* as clock rate, since all opus sample rates can be expressed in
* this clock rate, and sample rate changes on the fly are supported. */
avpriv_set_pts_info(st, 32, 1, 48000);
break;
case AV_CODEC_ID_ILBC:
if (st->codec->block_align != 38 && st->codec->block_align != 50) {
av_log(s1, AV_LOG_ERROR, "Incorrect iLBC block size specified\n");
goto fail;
}
if (!s->max_frames_per_packet)
s->max_frames_per_packet = 1;
s->max_frames_per_packet = FFMIN(s->max_frames_per_packet,
s->max_payload_size / st->codec->block_align);
goto defaultcase;
case AV_CODEC_ID_AMR_NB:
case AV_CODEC_ID_AMR_WB:
if (!s->max_frames_per_packet)
s->max_frames_per_packet = 12;
if (st->codec->codec_id == AV_CODEC_ID_AMR_NB)
n = 31;
else
n = 61;
/* max_header_toc_size + the largest AMR payload must fit */
if (1 + s->max_frames_per_packet + n > s->max_payload_size) {
av_log(s1, AV_LOG_ERROR, "RTP max payload size too small for AMR\n");
goto fail;
}
if (st->codec->channels != 1) {
av_log(s1, AV_LOG_ERROR, "Only mono is supported\n");
goto fail;
}
case AV_CODEC_ID_AAC:
s->num_frames = 0;
default:
defaultcase:
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
avpriv_set_pts_info(st, 32, 1, st->codec->sample_rate);
}
s->buf_ptr = s->buf;
break;
}
 
return 0;
 
fail:
av_freep(&s->buf);
return AVERROR(EINVAL);
}
 
/* send an rtcp sender report packet */
static void rtcp_send_sr(AVFormatContext *s1, int64_t ntp_time)
{
RTPMuxContext *s = s1->priv_data;
uint32_t rtp_ts;
 
av_dlog(s1, "RTCP: %02x %"PRIx64" %x\n", s->payload_type, ntp_time, s->timestamp);
 
s->last_rtcp_ntp_time = ntp_time;
rtp_ts = av_rescale_q(ntp_time - s->first_rtcp_ntp_time, (AVRational){1, 1000000},
s1->streams[0]->time_base) + s->base_timestamp;
avio_w8(s1->pb, (RTP_VERSION << 6));
avio_w8(s1->pb, RTCP_SR);
avio_wb16(s1->pb, 6); /* length in words - 1 */
avio_wb32(s1->pb, s->ssrc);
avio_wb64(s1->pb, NTP_TO_RTP_FORMAT(ntp_time));
avio_wb32(s1->pb, rtp_ts);
avio_wb32(s1->pb, s->packet_count);
avio_wb32(s1->pb, s->octet_count);
 
if (s->cname) {
int len = FFMIN(strlen(s->cname), 255);
avio_w8(s1->pb, (RTP_VERSION << 6) + 1);
avio_w8(s1->pb, RTCP_SDES);
avio_wb16(s1->pb, (7 + len + 3) / 4); /* length in words - 1 */
 
avio_wb32(s1->pb, s->ssrc);
avio_w8(s1->pb, 0x01); /* CNAME */
avio_w8(s1->pb, len);
avio_write(s1->pb, s->cname, len);
avio_w8(s1->pb, 0); /* END */
for (len = (7 + len) % 4; len % 4; len++)
avio_w8(s1->pb, 0);
}
 
avio_flush(s1->pb);
}
 
/* send an rtp packet. sequence number is incremented, but the caller
must update the timestamp itself */
void ff_rtp_send_data(AVFormatContext *s1, const uint8_t *buf1, int len, int m)
{
RTPMuxContext *s = s1->priv_data;
 
av_dlog(s1, "rtp_send_data size=%d\n", len);
 
/* build the RTP header */
avio_w8(s1->pb, (RTP_VERSION << 6));
avio_w8(s1->pb, (s->payload_type & 0x7f) | ((m & 0x01) << 7));
avio_wb16(s1->pb, s->seq);
avio_wb32(s1->pb, s->timestamp);
avio_wb32(s1->pb, s->ssrc);
 
avio_write(s1->pb, buf1, len);
avio_flush(s1->pb);
 
s->seq = (s->seq + 1) & 0xffff;
s->octet_count += len;
s->packet_count++;
}
 
/* send an integer number of samples and compute time stamp and fill
the rtp send buffer before sending. */
static int rtp_send_samples(AVFormatContext *s1,
const uint8_t *buf1, int size, int sample_size_bits)
{
RTPMuxContext *s = s1->priv_data;
int len, max_packet_size, n;
/* Calculate the number of bytes to get samples aligned on a byte border */
int aligned_samples_size = sample_size_bits/av_gcd(sample_size_bits, 8);
 
max_packet_size = (s->max_payload_size / aligned_samples_size) * aligned_samples_size;
/* Not needed, but who knows. Don't check if samples aren't an even number of bytes. */
if ((sample_size_bits % 8) == 0 && ((8 * size) % sample_size_bits) != 0)
return AVERROR(EINVAL);
n = 0;
while (size > 0) {
s->buf_ptr = s->buf;
len = FFMIN(max_packet_size, size);
 
/* copy data */
memcpy(s->buf_ptr, buf1, len);
s->buf_ptr += len;
buf1 += len;
size -= len;
s->timestamp = s->cur_timestamp + n * 8 / sample_size_bits;
ff_rtp_send_data(s1, s->buf, s->buf_ptr - s->buf, 0);
n += (s->buf_ptr - s->buf);
}
return 0;
}
 
static void rtp_send_mpegaudio(AVFormatContext *s1,
const uint8_t *buf1, int size)
{
RTPMuxContext *s = s1->priv_data;
int len, count, max_packet_size;
 
max_packet_size = s->max_payload_size;
 
/* test if we must flush because not enough space */
len = (s->buf_ptr - s->buf);
if ((len + size) > max_packet_size) {
if (len > 4) {
ff_rtp_send_data(s1, s->buf, s->buf_ptr - s->buf, 0);
s->buf_ptr = s->buf + 4;
}
}
if (s->buf_ptr == s->buf + 4) {
s->timestamp = s->cur_timestamp;
}
 
/* add the packet */
if (size > max_packet_size) {
/* big packet: fragment */
count = 0;
while (size > 0) {
len = max_packet_size - 4;
if (len > size)
len = size;
/* build fragmented packet */
s->buf[0] = 0;
s->buf[1] = 0;
s->buf[2] = count >> 8;
s->buf[3] = count;
memcpy(s->buf + 4, buf1, len);
ff_rtp_send_data(s1, s->buf, len + 4, 0);
size -= len;
buf1 += len;
count += len;
}
} else {
if (s->buf_ptr == s->buf + 4) {
/* no fragmentation possible */
s->buf[0] = 0;
s->buf[1] = 0;
s->buf[2] = 0;
s->buf[3] = 0;
}
memcpy(s->buf_ptr, buf1, size);
s->buf_ptr += size;
}
}
 
static void rtp_send_raw(AVFormatContext *s1,
const uint8_t *buf1, int size)
{
RTPMuxContext *s = s1->priv_data;
int len, max_packet_size;
 
max_packet_size = s->max_payload_size;
 
while (size > 0) {
len = max_packet_size;
if (len > size)
len = size;
 
s->timestamp = s->cur_timestamp;
ff_rtp_send_data(s1, buf1, len, (len == size));
 
buf1 += len;
size -= len;
}
}
 
/* NOTE: size is assumed to be an integer multiple of TS_PACKET_SIZE */
static void rtp_send_mpegts_raw(AVFormatContext *s1,
const uint8_t *buf1, int size)
{
RTPMuxContext *s = s1->priv_data;
int len, out_len;
 
while (size >= TS_PACKET_SIZE) {
len = s->max_payload_size - (s->buf_ptr - s->buf);
if (len > size)
len = size;
memcpy(s->buf_ptr, buf1, len);
buf1 += len;
size -= len;
s->buf_ptr += len;
 
out_len = s->buf_ptr - s->buf;
if (out_len >= s->max_payload_size) {
ff_rtp_send_data(s1, s->buf, out_len, 0);
s->buf_ptr = s->buf;
}
}
}
 
static int rtp_send_ilbc(AVFormatContext *s1, const uint8_t *buf, int size)
{
RTPMuxContext *s = s1->priv_data;
AVStream *st = s1->streams[0];
int frame_duration = av_get_audio_frame_duration(st->codec, 0);
int frame_size = st->codec->block_align;
int frames = size / frame_size;
 
while (frames > 0) {
int n = FFMIN(s->max_frames_per_packet - s->num_frames, frames);
 
if (!s->num_frames) {
s->buf_ptr = s->buf;
s->timestamp = s->cur_timestamp;
}
memcpy(s->buf_ptr, buf, n * frame_size);
frames -= n;
s->num_frames += n;
s->buf_ptr += n * frame_size;
buf += n * frame_size;
s->cur_timestamp += n * frame_duration;
 
if (s->num_frames == s->max_frames_per_packet) {
ff_rtp_send_data(s1, s->buf, s->buf_ptr - s->buf, 1);
s->num_frames = 0;
}
}
return 0;
}
 
static int rtp_write_packet(AVFormatContext *s1, AVPacket *pkt)
{
RTPMuxContext *s = s1->priv_data;
AVStream *st = s1->streams[0];
int rtcp_bytes;
int size= pkt->size;
 
av_dlog(s1, "%d: write len=%d\n", pkt->stream_index, size);
 
rtcp_bytes = ((s->octet_count - s->last_octet_count) * RTCP_TX_RATIO_NUM) /
RTCP_TX_RATIO_DEN;
if ((s->first_packet || ((rtcp_bytes >= RTCP_SR_SIZE) &&
(ff_ntp_time() - s->last_rtcp_ntp_time > 5000000))) &&
!(s->flags & FF_RTP_FLAG_SKIP_RTCP)) {
rtcp_send_sr(s1, ff_ntp_time());
s->last_octet_count = s->octet_count;
s->first_packet = 0;
}
s->cur_timestamp = s->base_timestamp + pkt->pts;
 
switch(st->codec->codec_id) {
case AV_CODEC_ID_PCM_MULAW:
case AV_CODEC_ID_PCM_ALAW:
case AV_CODEC_ID_PCM_U8:
case AV_CODEC_ID_PCM_S8:
return rtp_send_samples(s1, pkt->data, size, 8 * st->codec->channels);
case AV_CODEC_ID_PCM_U16BE:
case AV_CODEC_ID_PCM_U16LE:
case AV_CODEC_ID_PCM_S16BE:
case AV_CODEC_ID_PCM_S16LE:
return rtp_send_samples(s1, pkt->data, size, 16 * st->codec->channels);
case AV_CODEC_ID_ADPCM_G722:
/* The actual sample size is half a byte per sample, but since the
* stream clock rate is 8000 Hz while the sample rate is 16000 Hz,
* the correct parameter for send_samples_bits is 8 bits per stream
* clock. */
return rtp_send_samples(s1, pkt->data, size, 8 * st->codec->channels);
case AV_CODEC_ID_ADPCM_G726:
return rtp_send_samples(s1, pkt->data, size,
st->codec->bits_per_coded_sample * st->codec->channels);
case AV_CODEC_ID_MP2:
case AV_CODEC_ID_MP3:
rtp_send_mpegaudio(s1, pkt->data, size);
break;
case AV_CODEC_ID_MPEG1VIDEO:
case AV_CODEC_ID_MPEG2VIDEO:
ff_rtp_send_mpegvideo(s1, pkt->data, size);
break;
case AV_CODEC_ID_AAC:
if (s->flags & FF_RTP_FLAG_MP4A_LATM)
ff_rtp_send_latm(s1, pkt->data, size);
else
ff_rtp_send_aac(s1, pkt->data, size);
break;
case AV_CODEC_ID_AMR_NB:
case AV_CODEC_ID_AMR_WB:
ff_rtp_send_amr(s1, pkt->data, size);
break;
case AV_CODEC_ID_MPEG2TS:
rtp_send_mpegts_raw(s1, pkt->data, size);
break;
case AV_CODEC_ID_H264:
ff_rtp_send_h264(s1, pkt->data, size);
break;
case AV_CODEC_ID_H263:
if (s->flags & FF_RTP_FLAG_RFC2190) {
int mb_info_size = 0;
const uint8_t *mb_info =
av_packet_get_side_data(pkt, AV_PKT_DATA_H263_MB_INFO,
&mb_info_size);
ff_rtp_send_h263_rfc2190(s1, pkt->data, size, mb_info, mb_info_size);
break;
}
/* Fallthrough */
case AV_CODEC_ID_H263P:
ff_rtp_send_h263(s1, pkt->data, size);
break;
case AV_CODEC_ID_VORBIS:
case AV_CODEC_ID_THEORA:
ff_rtp_send_xiph(s1, pkt->data, size);
break;
case AV_CODEC_ID_VP8:
ff_rtp_send_vp8(s1, pkt->data, size);
break;
case AV_CODEC_ID_ILBC:
rtp_send_ilbc(s1, pkt->data, size);
break;
case AV_CODEC_ID_MJPEG:
ff_rtp_send_jpeg(s1, pkt->data, size);
break;
case AV_CODEC_ID_OPUS:
if (size > s->max_payload_size) {
av_log(s1, AV_LOG_ERROR,
"Packet size %d too large for max RTP payload size %d\n",
size, s->max_payload_size);
return AVERROR(EINVAL);
}
/* Intentional fallthrough */
default:
/* better than nothing : send the codec raw data */
rtp_send_raw(s1, pkt->data, size);
break;
}
return 0;
}
 
static int rtp_write_trailer(AVFormatContext *s1)
{
RTPMuxContext *s = s1->priv_data;
 
av_freep(&s->buf);
 
return 0;
}
 
AVOutputFormat ff_rtp_muxer = {
.name = "rtp",
.long_name = NULL_IF_CONFIG_SMALL("RTP output"),
.priv_data_size = sizeof(RTPMuxContext),
.audio_codec = AV_CODEC_ID_PCM_MULAW,
.video_codec = AV_CODEC_ID_MPEG4,
.write_header = rtp_write_header,
.write_packet = rtp_write_packet,
.write_trailer = rtp_write_trailer,
.priv_class = &rtp_muxer_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/rtpenc.h
0,0 → 1,96
/*
* RTP muxer definitions
* Copyright (c) 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_RTPENC_H
#define AVFORMAT_RTPENC_H
 
#include "avformat.h"
#include "rtp.h"
 
struct RTPMuxContext {
const AVClass *av_class;
AVFormatContext *ic;
AVStream *st;
int payload_type;
uint32_t ssrc;
const char *cname;
int seq;
uint32_t timestamp;
uint32_t base_timestamp;
uint32_t cur_timestamp;
int max_payload_size;
int num_frames;
 
/* rtcp sender statistics */
int64_t last_rtcp_ntp_time;
int64_t first_rtcp_ntp_time;
unsigned int packet_count;
unsigned int octet_count;
unsigned int last_octet_count;
int first_packet;
/* buffer for output */
uint8_t *buf;
uint8_t *buf_ptr;
 
int max_frames_per_packet;
 
/**
* Number of bytes used for H.264 NAL length, if the MP4 syntax is used
* (1, 2 or 4)
*/
int nal_length_size;
 
int flags;
 
unsigned int frame_count;
};
 
typedef struct RTPMuxContext RTPMuxContext;
 
#define FF_RTP_FLAG_MP4A_LATM 1
#define FF_RTP_FLAG_RFC2190 2
#define FF_RTP_FLAG_SKIP_RTCP 4
#define FF_RTP_FLAG_H264_MODE0 8
 
#define FF_RTP_FLAG_OPTS(ctx, fieldname) \
{ "rtpflags", "RTP muxer flags", offsetof(ctx, fieldname), AV_OPT_TYPE_FLAGS, {.i64 = 0}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "rtpflags" }, \
{ "latm", "Use MP4A-LATM packetization instead of MPEG4-GENERIC for AAC", 0, AV_OPT_TYPE_CONST, {.i64 = FF_RTP_FLAG_MP4A_LATM}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "rtpflags" }, \
{ "rfc2190", "Use RFC 2190 packetization instead of RFC 4629 for H.263", 0, AV_OPT_TYPE_CONST, {.i64 = FF_RTP_FLAG_RFC2190}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "rtpflags" }, \
{ "skip_rtcp", "Don't send RTCP sender reports", 0, AV_OPT_TYPE_CONST, {.i64 = FF_RTP_FLAG_SKIP_RTCP}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "rtpflags" }, \
{ "h264_mode0", "Use mode 0 for H264 in RTP", 0, AV_OPT_TYPE_CONST, {.i64 = FF_RTP_FLAG_H264_MODE0}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "rtpflags" } \
 
void ff_rtp_send_data(AVFormatContext *s1, const uint8_t *buf1, int len, int m);
 
void ff_rtp_send_h264(AVFormatContext *s1, const uint8_t *buf1, int size);
void ff_rtp_send_h263(AVFormatContext *s1, const uint8_t *buf1, int size);
void ff_rtp_send_h263_rfc2190(AVFormatContext *s1, const uint8_t *buf1, int size,
const uint8_t *mb_info, int mb_info_size);
void ff_rtp_send_aac(AVFormatContext *s1, const uint8_t *buff, int size);
void ff_rtp_send_latm(AVFormatContext *s1, const uint8_t *buff, int size);
void ff_rtp_send_amr(AVFormatContext *s1, const uint8_t *buff, int size);
void ff_rtp_send_mpegvideo(AVFormatContext *s1, const uint8_t *buf1, int size);
void ff_rtp_send_xiph(AVFormatContext *s1, const uint8_t *buff, int size);
void ff_rtp_send_vp8(AVFormatContext *s1, const uint8_t *buff, int size);
void ff_rtp_send_jpeg(AVFormatContext *s1, const uint8_t *buff, int size);
 
const uint8_t *ff_h263_find_resync_marker_reverse(const uint8_t *av_restrict start,
const uint8_t *av_restrict end);
 
#endif /* AVFORMAT_RTPENC_H */
/contrib/sdk/sources/ffmpeg/libavformat/rtpenc_aac.c
0,0 → 1,85
/*
* copyright (c) 2007 Luca Abeni
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "rtpenc.h"
 
 
void ff_rtp_send_aac(AVFormatContext *s1, const uint8_t *buff, int size)
{
RTPMuxContext *s = s1->priv_data;
int len, max_packet_size;
uint8_t *p;
const int max_frames_per_packet = s->max_frames_per_packet ? s->max_frames_per_packet : 5;
const int max_au_headers_size = 2 + 2 * max_frames_per_packet;
 
/* skip ADTS header, if present */
if ((s1->streams[0]->codec->extradata_size) == 0) {
size -= 7;
buff += 7;
}
max_packet_size = s->max_payload_size - max_au_headers_size;
 
/* test if the packet must be sent */
len = (s->buf_ptr - s->buf);
if ((s->num_frames == max_frames_per_packet) || (len && (len + size) > s->max_payload_size)) {
int au_size = s->num_frames * 2;
 
p = s->buf + max_au_headers_size - au_size - 2;
if (p != s->buf) {
memmove(p + 2, s->buf + 2, au_size);
}
/* Write the AU header size */
p[0] = au_size >> 5;
p[1] = (au_size & 0x1F) << 3;
 
ff_rtp_send_data(s1, p, s->buf_ptr - p, 1);
 
s->num_frames = 0;
}
if (s->num_frames == 0) {
s->buf_ptr = s->buf + max_au_headers_size;
s->timestamp = s->cur_timestamp;
}
 
if (size <= max_packet_size) {
p = s->buf + s->num_frames++ * 2 + 2;
*p++ = size >> 5;
*p = (size & 0x1F) << 3;
memcpy(s->buf_ptr, buff, size);
s->buf_ptr += size;
} else {
int au_size = size;
 
max_packet_size = s->max_payload_size - 4;
p = s->buf;
p[0] = 0;
p[1] = 16;
while (size > 0) {
len = FFMIN(size, max_packet_size);
p[2] = au_size >> 5;
p[3] = (au_size & 0x1F) << 3;
memcpy(p + 4, buff, len);
ff_rtp_send_data(s1, p, len + 4, len == size);
size -= len;
buff += len;
}
}
}
/contrib/sdk/sources/ffmpeg/libavformat/rtpenc_amr.c
0,0 → 1,65
/*
* RTP packetization for AMR audio
* Copyright (c) 2007 Luca Abeni
* Copyright (c) 2009 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "rtpenc.h"
 
/**
* Packetize AMR frames into RTP packets according to RFC 3267,
* in octet-aligned mode.
*/
void ff_rtp_send_amr(AVFormatContext *s1, const uint8_t *buff, int size)
{
RTPMuxContext *s = s1->priv_data;
int max_header_toc_size = 1 + s->max_frames_per_packet;
uint8_t *p;
int len;
 
/* Test if the packet must be sent. */
len = s->buf_ptr - s->buf;
if (s->num_frames == s->max_frames_per_packet || (len && len + size - 1 > s->max_payload_size)) {
int header_size = s->num_frames + 1;
p = s->buf + max_header_toc_size - header_size;
if (p != s->buf)
memmove(p, s->buf, header_size);
 
ff_rtp_send_data(s1, p, s->buf_ptr - p, 1);
 
s->num_frames = 0;
}
 
if (!s->num_frames) {
s->buf[0] = 0xf0;
s->buf_ptr = s->buf + max_header_toc_size;
s->timestamp = s->cur_timestamp;
} else {
/* Mark the previous TOC entry as having more entries following. */
s->buf[1 + s->num_frames - 1] |= 0x80;
}
 
/* Copy the frame type and quality bits. */
s->buf[1 + s->num_frames++] = buff[0] & 0x7C;
buff++;
size--;
memcpy(s->buf_ptr, buff, size);
s->buf_ptr += size;
}
/contrib/sdk/sources/ffmpeg/libavformat/rtpenc_chain.c
0,0 → 1,109
/*
* RTP muxer chaining code
* Copyright (c) 2010 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "avio_internal.h"
#include "rtpenc_chain.h"
#include "rtp.h"
#include "libavutil/opt.h"
 
int ff_rtp_chain_mux_open(AVFormatContext **out, AVFormatContext *s,
AVStream *st, URLContext *handle, int packet_size,
int idx)
{
AVFormatContext *rtpctx = NULL;
int ret;
AVOutputFormat *rtp_format = av_guess_format("rtp", NULL, NULL);
uint8_t *rtpflags;
AVDictionary *opts = NULL;
 
if (!rtp_format) {
ret = AVERROR(ENOSYS);
goto fail;
}
 
/* Allocate an AVFormatContext for each output stream */
rtpctx = avformat_alloc_context();
if (!rtpctx) {
ret = AVERROR(ENOMEM);
goto fail;
}
 
rtpctx->oformat = rtp_format;
if (!avformat_new_stream(rtpctx, NULL)) {
ret = AVERROR(ENOMEM);
goto fail;
}
/* Pass the interrupt callback on */
rtpctx->interrupt_callback = s->interrupt_callback;
/* Copy the max delay setting; the rtp muxer reads this. */
rtpctx->max_delay = s->max_delay;
/* Copy other stream parameters. */
rtpctx->streams[0]->sample_aspect_ratio = st->sample_aspect_ratio;
rtpctx->flags |= s->flags & AVFMT_FLAG_MP4A_LATM;
 
/* Get the payload type from the codec */
if (st->id < RTP_PT_PRIVATE)
rtpctx->streams[0]->id =
ff_rtp_get_payload_type(s, st->codec, idx);
else
rtpctx->streams[0]->id = st->id;
 
 
if (av_opt_get(s, "rtpflags", AV_OPT_SEARCH_CHILDREN, &rtpflags) >= 0)
av_dict_set(&opts, "rtpflags", rtpflags, AV_DICT_DONT_STRDUP_VAL);
 
/* Set the synchronized start time. */
rtpctx->start_time_realtime = s->start_time_realtime;
 
avcodec_copy_context(rtpctx->streams[0]->codec, st->codec);
 
if (handle) {
ret = ffio_fdopen(&rtpctx->pb, handle);
if (ret < 0)
ffurl_close(handle);
} else
ret = ffio_open_dyn_packet_buf(&rtpctx->pb, packet_size);
if (!ret)
ret = avformat_write_header(rtpctx, &opts);
av_dict_free(&opts);
 
if (ret) {
if (handle && rtpctx->pb) {
avio_close(rtpctx->pb);
} else if (rtpctx->pb) {
uint8_t *ptr;
avio_close_dyn_buf(rtpctx->pb, &ptr);
av_free(ptr);
}
avformat_free_context(rtpctx);
return ret;
}
 
*out = rtpctx;
return 0;
 
fail:
av_free(rtpctx);
if (handle)
ffurl_close(handle);
return ret;
}
/contrib/sdk/sources/ffmpeg/libavformat/rtpenc_chain.h
0,0 → 1,32
/*
* RTP muxer chaining code
* Copyright (c) 2010 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_RTPENC_CHAIN_H
#define AVFORMAT_RTPENC_CHAIN_H
 
#include "avformat.h"
#include "url.h"
 
int ff_rtp_chain_mux_open(AVFormatContext **out, AVFormatContext *s,
AVStream *st, URLContext *handle, int packet_size,
int id);
 
#endif /* AVFORMAT_RTPENC_CHAIN_H */
/contrib/sdk/sources/ffmpeg/libavformat/rtpenc_h263.c
0,0 → 1,81
/*
* RTP packetization for H.263 video
* Copyright (c) 2009 Luca Abeni
* Copyright (c) 2009 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "rtpenc.h"
 
const uint8_t *ff_h263_find_resync_marker_reverse(const uint8_t *av_restrict start,
const uint8_t *av_restrict end)
{
const uint8_t *p = end - 1;
start += 1; /* Make sure we never return the original start. */
for (; p > start; p -= 2) {
if (!*p) {
if (!p[ 1] && p[2]) return p;
else if (!p[-1] && p[1]) return p - 1;
}
}
return end;
}
 
/**
* Packetize H.263 frames into RTP packets according to RFC 4629
*/
void ff_rtp_send_h263(AVFormatContext *s1, const uint8_t *buf1, int size)
{
RTPMuxContext *s = s1->priv_data;
int len, max_packet_size;
uint8_t *q;
 
max_packet_size = s->max_payload_size;
 
while (size > 0) {
q = s->buf;
if (size >= 2 && (buf1[0] == 0) && (buf1[1] == 0)) {
*q++ = 0x04;
buf1 += 2;
size -= 2;
} else {
*q++ = 0;
}
*q++ = 0;
 
len = FFMIN(max_packet_size - 2, size);
 
/* Look for a better place to split the frame into packets. */
if (len < size) {
const uint8_t *end = ff_h263_find_resync_marker_reverse(buf1,
buf1 + len);
len = end - buf1;
}
 
memcpy(q, buf1, len);
q += len;
 
/* 90 KHz time stamp */
s->timestamp = s->cur_timestamp;
ff_rtp_send_data(s1, s->buf, q - s->buf, (len == size));
 
buf1 += len;
size -= len;
}
}
/contrib/sdk/sources/ffmpeg/libavformat/rtpenc_h263_rfc2190.c
0,0 → 1,195
/*
* RTP packetization for H.263 video
* Copyright (c) 2012 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "rtpenc.h"
#include "libavcodec/put_bits.h"
#include "libavcodec/get_bits.h"
 
struct H263Info {
int src;
int i;
int u;
int s;
int a;
int pb;
int tr;
};
 
struct H263State {
int gobn;
int mba;
int hmv1, vmv1, hmv2, vmv2;
int quant;
};
 
static void send_mode_a(AVFormatContext *s1, const struct H263Info *info,
const uint8_t *buf, int len, int ebits, int m)
{
RTPMuxContext *s = s1->priv_data;
PutBitContext pb;
 
init_put_bits(&pb, s->buf, 32);
put_bits(&pb, 1, 0); /* F - 0, mode A */
put_bits(&pb, 1, 0); /* P - 0, normal I/P */
put_bits(&pb, 3, 0); /* SBIT - 0 bits */
put_bits(&pb, 3, ebits); /* EBIT */
put_bits(&pb, 3, info->src); /* SRC - source format */
put_bits(&pb, 1, info->i); /* I - inter/intra */
put_bits(&pb, 1, info->u); /* U - unrestricted motion vector */
put_bits(&pb, 1, info->s); /* S - syntax-baesd arithmetic coding */
put_bits(&pb, 1, info->a); /* A - advanced prediction */
put_bits(&pb, 4, 0); /* R - reserved */
put_bits(&pb, 2, 0); /* DBQ - 0 */
put_bits(&pb, 3, 0); /* TRB - 0 */
put_bits(&pb, 8, info->tr); /* TR */
flush_put_bits(&pb);
memcpy(s->buf + 4, buf, len);
 
ff_rtp_send_data(s1, s->buf, len + 4, m);
}
 
static void send_mode_b(AVFormatContext *s1, const struct H263Info *info,
const struct H263State *state, const uint8_t *buf,
int len, int sbits, int ebits, int m)
{
RTPMuxContext *s = s1->priv_data;
PutBitContext pb;
 
init_put_bits(&pb, s->buf, 64);
put_bits(&pb, 1, 1); /* F - 1, mode B */
put_bits(&pb, 1, 0); /* P - 0, mode B */
put_bits(&pb, 3, sbits); /* SBIT - 0 bits */
put_bits(&pb, 3, ebits); /* EBIT - 0 bits */
put_bits(&pb, 3, info->src); /* SRC - source format */
put_bits(&pb, 5, state->quant); /* QUANT - quantizer for the first MB */
put_bits(&pb, 5, state->gobn); /* GOBN - GOB number */
put_bits(&pb, 9, state->mba); /* MBA - MB address */
put_bits(&pb, 2, 0); /* R - reserved */
put_bits(&pb, 1, info->i); /* I - inter/intra */
put_bits(&pb, 1, info->u); /* U - unrestricted motion vector */
put_bits(&pb, 1, info->s); /* S - syntax-baesd arithmetic coding */
put_bits(&pb, 1, info->a); /* A - advanced prediction */
put_bits(&pb, 7, state->hmv1); /* HVM1 - horizontal motion vector 1 */
put_bits(&pb, 7, state->vmv1); /* VMV1 - vertical motion vector 1 */
put_bits(&pb, 7, state->hmv2); /* HVM2 - horizontal motion vector 2 */
put_bits(&pb, 7, state->vmv2); /* VMV2 - vertical motion vector 2 */
flush_put_bits(&pb);
memcpy(s->buf + 8, buf, len);
 
ff_rtp_send_data(s1, s->buf, len + 8, m);
}
 
void ff_rtp_send_h263_rfc2190(AVFormatContext *s1, const uint8_t *buf, int size,
const uint8_t *mb_info, int mb_info_size)
{
RTPMuxContext *s = s1->priv_data;
int len, sbits = 0, ebits = 0;
GetBitContext gb;
struct H263Info info = { 0 };
struct H263State state = { 0 };
int mb_info_pos = 0, mb_info_count = mb_info_size / 12;
const uint8_t *buf_base = buf;
 
s->timestamp = s->cur_timestamp;
 
init_get_bits(&gb, buf, size*8);
if (get_bits(&gb, 22) == 0x20) { /* Picture Start Code */
info.tr = get_bits(&gb, 8);
skip_bits(&gb, 2); /* PTYPE start, H261 disambiguation */
skip_bits(&gb, 3); /* Split screen, document camera, freeze picture release */
info.src = get_bits(&gb, 3);
info.i = get_bits(&gb, 1);
info.u = get_bits(&gb, 1);
info.s = get_bits(&gb, 1);
info.a = get_bits(&gb, 1);
info.pb = get_bits(&gb, 1);
}
 
while (size > 0) {
struct H263State packet_start_state = state;
len = FFMIN(s->max_payload_size - 8, size);
 
/* Look for a better place to split the frame into packets. */
if (len < size) {
const uint8_t *end = ff_h263_find_resync_marker_reverse(buf,
buf + len);
len = end - buf;
if (len == s->max_payload_size - 8) {
/* Skip mb info prior to the start of the current ptr */
while (mb_info_pos < mb_info_count) {
uint32_t pos = AV_RL32(&mb_info[12*mb_info_pos])/8;
if (pos >= buf - buf_base)
break;
mb_info_pos++;
}
/* Find the first mb info past the end pointer */
while (mb_info_pos + 1 < mb_info_count) {
uint32_t pos = AV_RL32(&mb_info[12*(mb_info_pos + 1)])/8;
if (pos >= end - buf_base)
break;
mb_info_pos++;
}
if (mb_info_pos < mb_info_count) {
const uint8_t *ptr = &mb_info[12*mb_info_pos];
uint32_t bit_pos = AV_RL32(ptr);
uint32_t pos = (bit_pos + 7)/8;
if (pos <= end - buf_base) {
state.quant = ptr[4];
state.gobn = ptr[5];
state.mba = AV_RL16(&ptr[6]);
state.hmv1 = (int8_t) ptr[8];
state.vmv1 = (int8_t) ptr[9];
state.hmv2 = (int8_t) ptr[10];
state.vmv2 = (int8_t) ptr[11];
ebits = 8 * pos - bit_pos;
len = pos - (buf - buf_base);
mb_info_pos++;
} else {
av_log(s1, AV_LOG_ERROR,
"Unable to split H263 packet, use -mb_info %d "
"or lower.\n", s->max_payload_size - 8);
}
} else {
av_log(s1, AV_LOG_ERROR, "Unable to split H263 packet, "
"use -mb_info %d or -ps 1.\n",
s->max_payload_size - 8);
}
}
}
 
if (size > 2 && !buf[0] && !buf[1])
send_mode_a(s1, &info, buf, len, ebits, len == size);
else
send_mode_b(s1, &info, &packet_start_state, buf, len, sbits,
ebits, len == size);
 
if (ebits) {
sbits = 8 - ebits;
len--;
} else {
sbits = 0;
}
buf += len;
size -= len;
ebits = 0;
}
}
/contrib/sdk/sources/ffmpeg/libavformat/rtpenc_h264.c
0,0 → 1,109
/*
* RTP packetization for H.264 (RFC3984)
* Copyright (c) 2008 Luca Abeni
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* @brief H.264 packetization
* @author Luca Abeni <lucabe72@email.it>
*/
 
#include "avformat.h"
#include "avc.h"
#include "rtpenc.h"
 
static const uint8_t *avc_mp4_find_startcode(const uint8_t *start, const uint8_t *end, int nal_length_size)
{
unsigned int res = 0;
 
if (end - start < nal_length_size)
return NULL;
while (nal_length_size--)
res = (res << 8) | *start++;
 
if (res > end - start)
return NULL;
 
return start + res;
}
 
static void nal_send(AVFormatContext *s1, const uint8_t *buf, int size, int last)
{
RTPMuxContext *s = s1->priv_data;
 
av_log(s1, AV_LOG_DEBUG, "Sending NAL %x of len %d M=%d\n", buf[0] & 0x1F, size, last);
if (size <= s->max_payload_size) {
ff_rtp_send_data(s1, buf, size, last);
} else {
uint8_t type = buf[0] & 0x1F;
uint8_t nri = buf[0] & 0x60;
 
if (s->flags & FF_RTP_FLAG_H264_MODE0) {
av_log(s1, AV_LOG_ERROR,
"NAL size %d > %d, try -slice-max-size %d\n", size,
s->max_payload_size, s->max_payload_size);
return;
}
av_log(s1, AV_LOG_DEBUG, "NAL size %d > %d\n", size, s->max_payload_size);
s->buf[0] = 28; /* FU Indicator; Type = 28 ---> FU-A */
s->buf[0] |= nri;
s->buf[1] = type;
s->buf[1] |= 1 << 7;
buf += 1;
size -= 1;
while (size + 2 > s->max_payload_size) {
memcpy(&s->buf[2], buf, s->max_payload_size - 2);
ff_rtp_send_data(s1, s->buf, s->max_payload_size, 0);
buf += s->max_payload_size - 2;
size -= s->max_payload_size - 2;
s->buf[1] &= ~(1 << 7);
}
s->buf[1] |= 1 << 6;
memcpy(&s->buf[2], buf, size);
ff_rtp_send_data(s1, s->buf, size + 2, last);
}
}
 
void ff_rtp_send_h264(AVFormatContext *s1, const uint8_t *buf1, int size)
{
const uint8_t *r, *end = buf1 + size;
RTPMuxContext *s = s1->priv_data;
 
s->timestamp = s->cur_timestamp;
if (s->nal_length_size)
r = avc_mp4_find_startcode(buf1, end, s->nal_length_size) ? buf1 : end;
else
r = ff_avc_find_startcode(buf1, end);
while (r < end) {
const uint8_t *r1;
 
if (s->nal_length_size) {
r1 = avc_mp4_find_startcode(r, end, s->nal_length_size);
if (!r1)
r1 = end;
r += s->nal_length_size;
} else {
while (!*(r++));
r1 = ff_avc_find_startcode(r, end);
}
nal_send(s1, r, r1 - r, r1 == end);
r = r1;
}
}
/contrib/sdk/sources/ffmpeg/libavformat/rtpenc_jpeg.c
0,0 → 1,138
/*
* RTP JPEG-compressed video Packetizer, RFC 2435
* Copyright (c) 2012 Samuel Pitoiset
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavcodec/bytestream.h"
#include "libavcodec/mjpeg.h"
#include "libavutil/intreadwrite.h"
#include "rtpenc.h"
 
void ff_rtp_send_jpeg(AVFormatContext *s1, const uint8_t *buf, int size)
{
RTPMuxContext *s = s1->priv_data;
const uint8_t *qtables = NULL;
int nb_qtables = 0;
uint8_t type = 1; /* default pixel format is AV_PIX_FMT_YUVJ420P */
uint8_t w, h;
uint8_t *p;
int off = 0; /* fragment offset of the current JPEG frame */
int len;
int i;
 
s->buf_ptr = s->buf;
s->timestamp = s->cur_timestamp;
 
/* convert video pixel dimensions from pixels to blocks */
w = s1->streams[0]->codec->width >> 3;
h = s1->streams[0]->codec->height >> 3;
 
/* check if pixel format is not the normal 420 case */
if (s1->streams[0]->codec->pix_fmt == AV_PIX_FMT_YUVJ422P) {
type = 0;
} else if (s1->streams[0]->codec->pix_fmt == AV_PIX_FMT_YUVJ420P) {
type = 1;
} else {
av_log(s1, AV_LOG_ERROR, "Unsupported pixel format\n");
return;
}
 
/* preparse the header for getting some infos */
for (i = 0; i < size; i++) {
if (buf[i] != 0xff)
continue;
 
if (buf[i + 1] == DQT) {
if (buf[i + 4])
av_log(s1, AV_LOG_WARNING,
"Only 8-bit precision is supported.\n");
 
/* a quantization table is 64 bytes long */
nb_qtables = AV_RB16(&buf[i + 2]) / 65;
if (i + 4 + nb_qtables * 65 > size) {
av_log(s1, AV_LOG_ERROR, "Too short JPEG header. Aborted!\n");
return;
}
 
qtables = &buf[i + 4];
} else if (buf[i + 1] == SOF0) {
if (buf[i + 14] != 17 || buf[i + 17] != 17) {
av_log(s1, AV_LOG_ERROR,
"Only 1x1 chroma blocks are supported. Aborted!\n");
return;
}
} else if (buf[i + 1] == SOS) {
/* SOS is last marker in the header */
i += AV_RB16(&buf[i + 2]) + 2;
break;
}
}
 
/* skip JPEG header */
buf += i;
size -= i;
 
for (i = size - 2; i >= 0; i--) {
if (buf[i] == 0xff && buf[i + 1] == EOI) {
/* Remove the EOI marker */
size = i;
break;
}
}
 
p = s->buf_ptr;
while (size > 0) {
int hdr_size = 8;
 
if (off == 0 && nb_qtables)
hdr_size += 4 + 64 * nb_qtables;
 
/* payload max in one packet */
len = FFMIN(size, s->max_payload_size - hdr_size);
 
/* set main header */
bytestream_put_byte(&p, 0);
bytestream_put_be24(&p, off);
bytestream_put_byte(&p, type);
bytestream_put_byte(&p, 255);
bytestream_put_byte(&p, w);
bytestream_put_byte(&p, h);
 
if (off == 0 && nb_qtables) {
/* set quantization tables header */
bytestream_put_byte(&p, 0);
bytestream_put_byte(&p, 0);
bytestream_put_be16(&p, 64 * nb_qtables);
 
for (i = 0; i < nb_qtables; i++)
bytestream_put_buffer(&p, &qtables[65 * i + 1], 64);
}
 
/* copy payload data */
memcpy(p, buf, len);
 
/* marker bit is last packet in frame */
ff_rtp_send_data(s1, s->buf, len + hdr_size, size == len);
 
buf += len;
size -= len;
off += len;
p = s->buf;
}
}
/contrib/sdk/sources/ffmpeg/libavformat/rtpenc_latm.c
0,0 → 1,61
/*
* RTP Packetization of MPEG-4 Audio (RFC 3016)
* Copyright (c) 2011 Juan Carlos Rodriguez <ing.juancarlosrodriguez@hotmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "rtpenc.h"
 
void ff_rtp_send_latm(AVFormatContext *s1, const uint8_t *buff, int size)
{
/* MP4A-LATM
* The RTP payload format specification is described in RFC 3016
* The encoding specifications are provided in ISO/IEC 14496-3 */
 
RTPMuxContext *s = s1->priv_data;
int header_size;
int offset = 0;
int len = 0;
 
/* skip ADTS header, if present */
if ((s1->streams[0]->codec->extradata_size) == 0) {
size -= 7;
buff += 7;
}
 
/* PayloadLengthInfo() */
header_size = size/0xFF + 1;
memset(s->buf, 0xFF, header_size - 1);
s->buf[header_size - 1] = size % 0xFF;
 
s->timestamp = s->cur_timestamp;
 
/* PayloadMux() */
while (size > 0) {
len = FFMIN(size, s->max_payload_size - (!offset ? header_size : 0));
size -= len;
if (!offset) {
memcpy(s->buf + header_size, buff, len);
ff_rtp_send_data(s1, s->buf, header_size + len, !size);
} else {
ff_rtp_send_data(s1, buff + offset, len, !size);
}
offset += len;
}
}
/contrib/sdk/sources/ffmpeg/libavformat/rtpenc_mpv.c
0,0 → 1,117
/*
* RTP packetization for MPEG video
* Copyright (c) 2002 Fabrice Bellard
* Copyright (c) 2007 Luca Abeni
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavcodec/internal.h"
#include "avformat.h"
#include "rtpenc.h"
 
/* NOTE: a single frame must be passed with sequence header if
needed. XXX: use slices. */
void ff_rtp_send_mpegvideo(AVFormatContext *s1, const uint8_t *buf1, int size)
{
RTPMuxContext *s = s1->priv_data;
int len, h, max_packet_size;
uint8_t *q;
const uint8_t *end = buf1 + size;
int begin_of_slice, end_of_slice, frame_type, temporal_reference;
 
max_packet_size = s->max_payload_size;
begin_of_slice = 1;
end_of_slice = 0;
frame_type = 0;
temporal_reference = 0;
 
while (size > 0) {
int begin_of_sequence;
 
begin_of_sequence = 0;
len = max_packet_size - 4;
 
if (len >= size) {
len = size;
end_of_slice = 1;
} else {
const uint8_t *r, *r1;
int start_code;
 
r1 = buf1;
while (1) {
start_code = -1;
r = avpriv_find_start_code(r1, end, &start_code);
if((start_code & 0xFFFFFF00) == 0x100) {
/* New start code found */
if (start_code == 0x100) {
frame_type = (r[1] & 0x38) >> 3;
temporal_reference = (int)r[0] << 2 | r[1] >> 6;
}
if (start_code == 0x1B8) {
begin_of_sequence = 1;
}
 
if (r - buf1 - 4 <= len) {
/* The current slice fits in the packet */
if (begin_of_slice == 0) {
/* no slice at the beginning of the packet... */
end_of_slice = 1;
len = r - buf1 - 4;
break;
}
r1 = r;
} else {
if ((r1 - buf1 > 4) && (r - r1 < max_packet_size)) {
len = r1 - buf1 - 4;
end_of_slice = 1;
}
break;
}
} else {
break;
}
}
}
 
h = 0;
h |= temporal_reference << 16;
h |= begin_of_sequence << 13;
h |= begin_of_slice << 12;
h |= end_of_slice << 11;
h |= frame_type << 8;
 
q = s->buf;
*q++ = h >> 24;
*q++ = h >> 16;
*q++ = h >> 8;
*q++ = h;
 
memcpy(q, buf1, len);
q += len;
 
/* 90kHz time stamp */
s->timestamp = s->cur_timestamp;
ff_rtp_send_data(s1, s->buf, q - s->buf, (len == size));
 
buf1 += len;
size -= len;
begin_of_slice = end_of_slice;
end_of_slice = 0;
}
}
/contrib/sdk/sources/ffmpeg/libavformat/rtpenc_vp8.c
0,0 → 1,55
/*
* RTP VP8 Packetizer
* Copyright (c) 2010 Josh Allmann
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "rtpenc.h"
 
/* Based on a draft spec for VP8 RTP.
* ( http://tools.ietf.org/html/draft-ietf-payload-vp8-05 ) */
void ff_rtp_send_vp8(AVFormatContext *s1, const uint8_t *buf, int size)
{
RTPMuxContext *s = s1->priv_data;
int len, max_packet_size, header_size;
 
s->buf_ptr = s->buf;
s->timestamp = s->cur_timestamp;
 
// extended control bit set, reference frame, start of partition,
// partition id 0
*s->buf_ptr++ = 0x90;
*s->buf_ptr++ = 0x80; // Picture id present
*s->buf_ptr++ = s->frame_count++ & 0x7f;
// Calculate the number of remaining bytes
header_size = s->buf_ptr - s->buf;
max_packet_size = s->max_payload_size - header_size;
 
while (size > 0) {
len = FFMIN(size, max_packet_size);
 
memcpy(s->buf_ptr, buf, len);
// marker bit is last packet in frame
ff_rtp_send_data(s1, s->buf, len + header_size, size == len);
 
size -= len;
buf += len;
// Clear the partition start bit, keep the rest of the header untouched
s->buf[0] &= ~0x10;
}
}
/contrib/sdk/sources/ffmpeg/libavformat/rtpenc_xiph.c
0,0 → 1,127
/*
* RTP packetization for Xiph audio and video
* Copyright (c) 2010 Josh Allmann
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avassert.h"
#include "avformat.h"
#include "rtpenc.h"
 
/**
* Packetize Xiph frames into RTP according to
* RFC 5215 (Vorbis) and the Theora RFC draft.
* (http://svn.xiph.org/trunk/theora/doc/draft-ietf-avt-rtp-theora-00.txt)
*/
void ff_rtp_send_xiph(AVFormatContext *s1, const uint8_t *buff, int size)
{
RTPMuxContext *s = s1->priv_data;
int max_pkt_size, xdt, frag;
uint8_t *q;
 
max_pkt_size = s->max_payload_size;
 
// set xiph data type
switch (*buff) {
case 0x01: // vorbis id
case 0x05: // vorbis setup
case 0x80: // theora header
case 0x82: // theora tables
xdt = 1; // packed config payload
break;
case 0x03: // vorbis comments
case 0x81: // theora comments
xdt = 2; // comment payload
break;
default:
xdt = 0; // raw data payload
break;
}
 
// Set ident.
// Probably need a non-fixed way of generating
// this, but it has to be done in SDP and passed in from there.
q = s->buf;
*q++ = (RTP_XIPH_IDENT >> 16) & 0xff;
*q++ = (RTP_XIPH_IDENT >> 8) & 0xff;
*q++ = (RTP_XIPH_IDENT ) & 0xff;
 
// set fragment
// 0 - whole frame (possibly multiple frames)
// 1 - first fragment
// 2 - fragment continuation
// 3 - last fragmement
frag = size <= max_pkt_size ? 0 : 1;
 
if (!frag && !xdt) { // do we have a whole frame of raw data?
uint8_t *end_ptr = s->buf + 6 + max_pkt_size; // what we're allowed to write
uint8_t *ptr = s->buf_ptr + 2 + size; // what we're going to write
int remaining = end_ptr - ptr;
 
av_assert1(s->num_frames <= s->max_frames_per_packet);
if ((s->num_frames > 0 && remaining < 0) ||
s->num_frames == s->max_frames_per_packet) {
// send previous packets now; no room for new data
ff_rtp_send_data(s1, s->buf, s->buf_ptr - s->buf, 0);
s->num_frames = 0;
}
 
// buffer current frame to send later
if (0 == s->num_frames) s->timestamp = s->cur_timestamp;
s->num_frames++;
 
// Set packet header. Normally, this is OR'd with frag and xdt,
// but those are zero, so omitted here
*q++ = s->num_frames;
 
if (s->num_frames > 1) q = s->buf_ptr; // jump ahead if needed
*q++ = (size >> 8) & 0xff;
*q++ = size & 0xff;
memcpy(q, buff, size);
q += size;
s->buf_ptr = q;
 
return;
} else if (s->num_frames) {
// immediately send buffered frames if buffer is not raw data,
// or if current frame is fragmented.
ff_rtp_send_data(s1, s->buf, s->buf_ptr - s->buf, 0);
}
 
s->timestamp = s->cur_timestamp;
s->num_frames = 0;
s->buf_ptr = q;
while (size > 0) {
int len = (!frag || frag == 3) ? size : max_pkt_size;
q = s->buf_ptr;
 
// set packet headers
*q++ = (frag << 6) | (xdt << 4); // num_frames = 0
*q++ = (len >> 8) & 0xff;
*q++ = len & 0xff;
// set packet body
memcpy(q, buff, len);
q += len;
buff += len;
size -= len;
 
ff_rtp_send_data(s1, s->buf, q - s->buf, 0);
 
frag = size <= max_pkt_size ? 3 : 2;
}
}
/contrib/sdk/sources/ffmpeg/libavformat/rtpproto.c
0,0 → 1,563
/*
* RTP network protocol
* Copyright (c) 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* RTP protocol
*/
 
#include "libavutil/parseutils.h"
#include "libavutil/avstring.h"
#include "avformat.h"
#include "avio_internal.h"
#include "rtp.h"
#include "rtpproto.h"
#include "url.h"
 
#include <stdarg.h>
#include "internal.h"
#include "network.h"
#include "os_support.h"
#include <fcntl.h>
#if HAVE_POLL_H
#include <sys/poll.h>
#endif
 
typedef struct RTPContext {
URLContext *rtp_hd, *rtcp_hd;
int rtp_fd, rtcp_fd, nb_ssm_include_addrs, nb_ssm_exclude_addrs;
struct sockaddr_storage **ssm_include_addrs, **ssm_exclude_addrs;
int write_to_source;
struct sockaddr_storage last_rtp_source, last_rtcp_source;
socklen_t last_rtp_source_len, last_rtcp_source_len;
} RTPContext;
 
/**
* If no filename is given to av_open_input_file because you want to
* get the local port first, then you must call this function to set
* the remote server address.
*
* @param h media file context
* @param uri of the remote server
* @return zero if no error.
*/
 
int ff_rtp_set_remote_url(URLContext *h, const char *uri)
{
RTPContext *s = h->priv_data;
char hostname[256];
int port, rtcp_port;
const char *p;
 
char buf[1024];
char path[1024];
 
av_url_split(NULL, 0, NULL, 0, hostname, sizeof(hostname), &port,
path, sizeof(path), uri);
rtcp_port = port + 1;
 
p = strchr(uri, '?');
if (p) {
if (av_find_info_tag(buf, sizeof(buf), "rtcpport", p)) {
rtcp_port = strtol(buf, NULL, 10);
}
}
 
ff_url_join(buf, sizeof(buf), "udp", NULL, hostname, port, "%s", path);
ff_udp_set_remote_url(s->rtp_hd, buf);
 
ff_url_join(buf, sizeof(buf), "udp", NULL, hostname, rtcp_port, "%s", path);
ff_udp_set_remote_url(s->rtcp_hd, buf);
return 0;
}
 
static struct addrinfo* rtp_resolve_host(const char *hostname, int port,
int type, int family, int flags)
{
struct addrinfo hints = { 0 }, *res = 0;
int error;
char service[16];
 
snprintf(service, sizeof(service), "%d", port);
hints.ai_socktype = type;
hints.ai_family = family;
hints.ai_flags = flags;
if ((error = getaddrinfo(hostname, service, &hints, &res))) {
res = NULL;
av_log(NULL, AV_LOG_ERROR, "rtp_resolve_host: %s\n", gai_strerror(error));
}
 
return res;
}
 
static int compare_addr(const struct sockaddr_storage *a,
const struct sockaddr_storage *b)
{
if (a->ss_family != b->ss_family)
return 1;
if (a->ss_family == AF_INET) {
return (((const struct sockaddr_in *)a)->sin_addr.s_addr !=
((const struct sockaddr_in *)b)->sin_addr.s_addr);
}
 
#if HAVE_STRUCT_SOCKADDR_IN6
if (a->ss_family == AF_INET6) {
const uint8_t *s6_addr_a = ((const struct sockaddr_in6 *)a)->sin6_addr.s6_addr;
const uint8_t *s6_addr_b = ((const struct sockaddr_in6 *)b)->sin6_addr.s6_addr;
return memcmp(s6_addr_a, s6_addr_b, 16);
}
#endif
return 1;
}
 
static int get_port(const struct sockaddr_storage *ss)
{
if (ss->ss_family == AF_INET)
return ntohs(((const struct sockaddr_in *)ss)->sin_port);
#if HAVE_STRUCT_SOCKADDR_IN6
if (ss->ss_family == AF_INET6)
return ntohs(((const struct sockaddr_in6 *)ss)->sin6_port);
#endif
return 0;
}
 
static void set_port(struct sockaddr_storage *ss, int port)
{
if (ss->ss_family == AF_INET)
((struct sockaddr_in *)ss)->sin_port = htons(port);
#if HAVE_STRUCT_SOCKADDR_IN6
else if (ss->ss_family == AF_INET6)
((struct sockaddr_in6 *)ss)->sin6_port = htons(port);
#endif
}
 
static int rtp_check_source_lists(RTPContext *s, struct sockaddr_storage *source_addr_ptr)
{
int i;
if (s->nb_ssm_exclude_addrs) {
for (i = 0; i < s->nb_ssm_exclude_addrs; i++) {
if (!compare_addr(source_addr_ptr, s->ssm_exclude_addrs[i]))
return 1;
}
}
if (s->nb_ssm_include_addrs) {
for (i = 0; i < s->nb_ssm_include_addrs; i++) {
if (!compare_addr(source_addr_ptr, s->ssm_include_addrs[i]))
return 0;
}
return 1;
}
return 0;
}
 
/**
* add option to url of the form:
* "http://host:port/path?option1=val1&option2=val2...
*/
 
static av_printf_format(3, 4) void url_add_option(char *buf, int buf_size, const char *fmt, ...)
{
char buf1[1024];
va_list ap;
 
va_start(ap, fmt);
if (strchr(buf, '?'))
av_strlcat(buf, "&", buf_size);
else
av_strlcat(buf, "?", buf_size);
vsnprintf(buf1, sizeof(buf1), fmt, ap);
av_strlcat(buf, buf1, buf_size);
va_end(ap);
}
 
static void build_udp_url(char *buf, int buf_size,
const char *hostname, int port,
int local_port, int ttl,
int max_packet_size, int connect,
const char *include_sources,
const char *exclude_sources)
{
ff_url_join(buf, buf_size, "udp", NULL, hostname, port, NULL);
if (local_port >= 0)
url_add_option(buf, buf_size, "localport=%d", local_port);
if (ttl >= 0)
url_add_option(buf, buf_size, "ttl=%d", ttl);
if (max_packet_size >=0)
url_add_option(buf, buf_size, "pkt_size=%d", max_packet_size);
if (connect)
url_add_option(buf, buf_size, "connect=1");
url_add_option(buf, buf_size, "fifo_size=0");
if (include_sources && include_sources[0])
url_add_option(buf, buf_size, "sources=%s", include_sources);
if (exclude_sources && exclude_sources[0])
url_add_option(buf, buf_size, "block=%s", exclude_sources);
}
 
static void rtp_parse_addr_list(URLContext *h, char *buf,
struct sockaddr_storage ***address_list_ptr,
int *address_list_size_ptr)
{
struct addrinfo *ai = NULL;
struct sockaddr_storage *source_addr;
char tmp = '\0', *p = buf, *next;
 
/* Resolve all of the IPs */
 
while (p && p[0]) {
next = strchr(p, ',');
 
if (next) {
tmp = *next;
*next = '\0';
}
 
ai = rtp_resolve_host(p, 0, SOCK_DGRAM, AF_UNSPEC, 0);
if (ai) {
source_addr = av_mallocz(sizeof(struct sockaddr_storage));
if (!source_addr)
break;
 
memcpy(source_addr, ai->ai_addr, ai->ai_addrlen);
freeaddrinfo(ai);
dynarray_add(address_list_ptr, address_list_size_ptr, source_addr);
} else {
av_log(h, AV_LOG_WARNING, "Unable to resolve %s\n", p);
}
 
if (next) {
*next = tmp;
p = next + 1;
} else {
p = NULL;
}
}
}
 
/**
* url syntax: rtp://host:port[?option=val...]
* option: 'ttl=n' : set the ttl value (for multicast only)
* 'rtcpport=n' : set the remote rtcp port to n
* 'localrtpport=n' : set the local rtp port to n
* 'localrtcpport=n' : set the local rtcp port to n
* 'pkt_size=n' : set max packet size
* 'connect=0/1' : do a connect() on the UDP socket
* 'sources=ip[,ip]' : list allowed source IP addresses
* 'block=ip[,ip]' : list disallowed source IP addresses
* 'write_to_source=0/1' : send packets to the source address of the latest received packet
* deprecated option:
* 'localport=n' : set the local port to n
*
* if rtcpport isn't set the rtcp port will be the rtp port + 1
* if local rtp port isn't set any available port will be used for the local
* rtp and rtcp ports
* if the local rtcp port is not set it will be the local rtp port + 1
*/
 
static int rtp_open(URLContext *h, const char *uri, int flags)
{
RTPContext *s = h->priv_data;
int rtp_port, rtcp_port,
ttl, connect,
local_rtp_port, local_rtcp_port, max_packet_size;
char hostname[256], include_sources[1024] = "", exclude_sources[1024] = "";
char buf[1024];
char path[1024];
const char *p;
int i, max_retry_count = 3;
 
av_url_split(NULL, 0, NULL, 0, hostname, sizeof(hostname), &rtp_port,
path, sizeof(path), uri);
/* extract parameters */
ttl = -1;
rtcp_port = rtp_port+1;
local_rtp_port = -1;
local_rtcp_port = -1;
max_packet_size = -1;
connect = 0;
 
p = strchr(uri, '?');
if (p) {
if (av_find_info_tag(buf, sizeof(buf), "ttl", p)) {
ttl = strtol(buf, NULL, 10);
}
if (av_find_info_tag(buf, sizeof(buf), "rtcpport", p)) {
rtcp_port = strtol(buf, NULL, 10);
}
if (av_find_info_tag(buf, sizeof(buf), "localport", p)) {
local_rtp_port = strtol(buf, NULL, 10);
}
if (av_find_info_tag(buf, sizeof(buf), "localrtpport", p)) {
local_rtp_port = strtol(buf, NULL, 10);
}
if (av_find_info_tag(buf, sizeof(buf), "localrtcpport", p)) {
local_rtcp_port = strtol(buf, NULL, 10);
}
if (av_find_info_tag(buf, sizeof(buf), "pkt_size", p)) {
max_packet_size = strtol(buf, NULL, 10);
}
if (av_find_info_tag(buf, sizeof(buf), "connect", p)) {
connect = strtol(buf, NULL, 10);
}
if (av_find_info_tag(buf, sizeof(buf), "write_to_source", p)) {
s->write_to_source = strtol(buf, NULL, 10);
}
if (av_find_info_tag(buf, sizeof(buf), "sources", p)) {
av_strlcpy(include_sources, buf, sizeof(include_sources));
rtp_parse_addr_list(h, buf, &s->ssm_include_addrs, &s->nb_ssm_include_addrs);
}
if (av_find_info_tag(buf, sizeof(buf), "block", p)) {
av_strlcpy(exclude_sources, buf, sizeof(exclude_sources));
rtp_parse_addr_list(h, buf, &s->ssm_exclude_addrs, &s->nb_ssm_exclude_addrs);
}
}
 
for (i = 0;i < max_retry_count;i++) {
build_udp_url(buf, sizeof(buf),
hostname, rtp_port, local_rtp_port, ttl, max_packet_size,
connect, include_sources, exclude_sources);
if (ffurl_open(&s->rtp_hd, buf, flags, &h->interrupt_callback, NULL) < 0)
goto fail;
local_rtp_port = ff_udp_get_local_port(s->rtp_hd);
if(local_rtp_port == 65535) {
local_rtp_port = -1;
continue;
}
if (local_rtcp_port<0) {
local_rtcp_port = local_rtp_port + 1;
build_udp_url(buf, sizeof(buf),
hostname, rtcp_port, local_rtcp_port, ttl, max_packet_size,
connect, include_sources, exclude_sources);
if (ffurl_open(&s->rtcp_hd, buf, flags, &h->interrupt_callback, NULL) < 0) {
local_rtp_port = local_rtcp_port = -1;
continue;
}
break;
}
build_udp_url(buf, sizeof(buf),
hostname, rtcp_port, local_rtcp_port, ttl, max_packet_size,
connect, include_sources, exclude_sources);
if (ffurl_open(&s->rtcp_hd, buf, flags, &h->interrupt_callback, NULL) < 0)
goto fail;
break;
}
 
/* just to ease handle access. XXX: need to suppress direct handle
access */
s->rtp_fd = ffurl_get_file_handle(s->rtp_hd);
s->rtcp_fd = ffurl_get_file_handle(s->rtcp_hd);
 
h->max_packet_size = s->rtp_hd->max_packet_size;
h->is_streamed = 1;
return 0;
 
fail:
if (s->rtp_hd)
ffurl_close(s->rtp_hd);
if (s->rtcp_hd)
ffurl_close(s->rtcp_hd);
return AVERROR(EIO);
}
 
static int rtp_read(URLContext *h, uint8_t *buf, int size)
{
RTPContext *s = h->priv_data;
int len, n, i;
struct pollfd p[2] = {{s->rtp_fd, POLLIN, 0}, {s->rtcp_fd, POLLIN, 0}};
int poll_delay = h->flags & AVIO_FLAG_NONBLOCK ? 0 : 100;
struct sockaddr_storage *addrs[2] = { &s->last_rtp_source, &s->last_rtcp_source };
socklen_t *addr_lens[2] = { &s->last_rtp_source_len, &s->last_rtcp_source_len };
 
for(;;) {
if (ff_check_interrupt(&h->interrupt_callback))
return AVERROR_EXIT;
n = poll(p, 2, poll_delay);
if (n > 0) {
/* first try RTCP, then RTP */
for (i = 1; i >= 0; i--) {
if (!(p[i].revents & POLLIN))
continue;
*addr_lens[i] = sizeof(*addrs[i]);
len = recvfrom(p[i].fd, buf, size, 0,
(struct sockaddr *)addrs[i], addr_lens[i]);
if (len < 0) {
if (ff_neterrno() == AVERROR(EAGAIN) ||
ff_neterrno() == AVERROR(EINTR))
continue;
return AVERROR(EIO);
}
if (rtp_check_source_lists(s, addrs[i]))
continue;
return len;
}
} else if (n < 0) {
if (ff_neterrno() == AVERROR(EINTR))
continue;
return AVERROR(EIO);
}
if (h->flags & AVIO_FLAG_NONBLOCK)
return AVERROR(EAGAIN);
}
return len;
}
 
static int rtp_write(URLContext *h, const uint8_t *buf, int size)
{
RTPContext *s = h->priv_data;
int ret;
URLContext *hd;
 
if (size < 2)
return AVERROR(EINVAL);
 
if (s->write_to_source) {
int fd;
struct sockaddr_storage *source, temp_source;
socklen_t *source_len, temp_len;
if (!s->last_rtp_source.ss_family && !s->last_rtcp_source.ss_family) {
av_log(h, AV_LOG_ERROR,
"Unable to send packet to source, no packets received yet\n");
// Intentionally not returning an error here
return size;
}
 
if (RTP_PT_IS_RTCP(buf[1])) {
fd = s->rtcp_fd;
source = &s->last_rtcp_source;
source_len = &s->last_rtcp_source_len;
} else {
fd = s->rtp_fd;
source = &s->last_rtp_source;
source_len = &s->last_rtp_source_len;
}
if (!source->ss_family) {
source = &temp_source;
source_len = &temp_len;
if (RTP_PT_IS_RTCP(buf[1])) {
temp_source = s->last_rtp_source;
temp_len = s->last_rtp_source_len;
set_port(source, get_port(source) + 1);
av_log(h, AV_LOG_INFO,
"Not received any RTCP packets yet, inferring peer port "
"from the RTP port\n");
} else {
temp_source = s->last_rtcp_source;
temp_len = s->last_rtcp_source_len;
set_port(source, get_port(source) - 1);
av_log(h, AV_LOG_INFO,
"Not received any RTP packets yet, inferring peer port "
"from the RTCP port\n");
}
}
 
if (!(h->flags & AVIO_FLAG_NONBLOCK)) {
ret = ff_network_wait_fd(fd, 1);
if (ret < 0)
return ret;
}
ret = sendto(fd, buf, size, 0, (struct sockaddr *) source,
*source_len);
 
return ret < 0 ? ff_neterrno() : ret;
}
 
if (RTP_PT_IS_RTCP(buf[1])) {
/* RTCP payload type */
hd = s->rtcp_hd;
} else {
/* RTP payload type */
hd = s->rtp_hd;
}
 
ret = ffurl_write(hd, buf, size);
return ret;
}
 
static int rtp_close(URLContext *h)
{
RTPContext *s = h->priv_data;
int i;
 
for (i = 0; i < s->nb_ssm_include_addrs; i++)
av_free(s->ssm_include_addrs[i]);
av_freep(&s->ssm_include_addrs);
for (i = 0; i < s->nb_ssm_exclude_addrs; i++)
av_free(s->ssm_exclude_addrs[i]);
av_freep(&s->ssm_exclude_addrs);
 
ffurl_close(s->rtp_hd);
ffurl_close(s->rtcp_hd);
return 0;
}
 
/**
* Return the local rtp port used by the RTP connection
* @param h media file context
* @return the local port number
*/
 
int ff_rtp_get_local_rtp_port(URLContext *h)
{
RTPContext *s = h->priv_data;
return ff_udp_get_local_port(s->rtp_hd);
}
 
/**
* Return the local rtcp port used by the RTP connection
* @param h media file context
* @return the local port number
*/
 
int ff_rtp_get_local_rtcp_port(URLContext *h)
{
RTPContext *s = h->priv_data;
return ff_udp_get_local_port(s->rtcp_hd);
}
 
static int rtp_get_file_handle(URLContext *h)
{
RTPContext *s = h->priv_data;
return s->rtp_fd;
}
 
static int rtp_get_multi_file_handle(URLContext *h, int **handles,
int *numhandles)
{
RTPContext *s = h->priv_data;
int *hs = *handles = av_malloc(sizeof(**handles) * 2);
if (!hs)
return AVERROR(ENOMEM);
hs[0] = s->rtp_fd;
hs[1] = s->rtcp_fd;
*numhandles = 2;
return 0;
}
 
URLProtocol ff_rtp_protocol = {
.name = "rtp",
.url_open = rtp_open,
.url_read = rtp_read,
.url_write = rtp_write,
.url_close = rtp_close,
.url_get_file_handle = rtp_get_file_handle,
.url_get_multi_file_handle = rtp_get_multi_file_handle,
.priv_data_size = sizeof(RTPContext),
.flags = URL_PROTOCOL_FLAG_NETWORK,
};
/contrib/sdk/sources/ffmpeg/libavformat/rtpproto.h
0,0 → 1,31
/*
* RTP network protocol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_RTPPROTO_H
#define AVFORMAT_RTPPROTO_H
 
#include "url.h"
 
int ff_rtp_set_remote_url(URLContext *h, const char *uri);
 
int ff_rtp_get_local_rtp_port(URLContext *h);
int ff_rtp_get_local_rtcp_port(URLContext *h);
 
#endif /* AVFORMAT_RTPPROTO_H */
/contrib/sdk/sources/ffmpeg/libavformat/rtsp.c
0,0 → 1,2344
/*
* RTSP/SDP client
* Copyright (c) 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avassert.h"
#include "libavutil/base64.h"
#include "libavutil/avstring.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/mathematics.h"
#include "libavutil/parseutils.h"
#include "libavutil/random_seed.h"
#include "libavutil/dict.h"
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include "avformat.h"
#include "avio_internal.h"
 
#if HAVE_POLL_H
#include <poll.h>
#endif
#include "internal.h"
#include "network.h"
#include "os_support.h"
#include "http.h"
#include "rtsp.h"
 
#include "rtpdec.h"
#include "rtpproto.h"
#include "rdt.h"
#include "rtpdec_formats.h"
#include "rtpenc_chain.h"
#include "url.h"
#include "rtpenc.h"
#include "mpegts.h"
 
/* Timeout values for socket poll, in ms,
* and read_packet(), in seconds */
#define POLL_TIMEOUT_MS 100
#define READ_PACKET_TIMEOUT_S 10
#define MAX_TIMEOUTS READ_PACKET_TIMEOUT_S * 1000 / POLL_TIMEOUT_MS
#define SDP_MAX_SIZE 16384
#define RECVBUF_SIZE 10 * RTP_MAX_PACKET_LENGTH
#define DEFAULT_REORDERING_DELAY 100000
 
#define OFFSET(x) offsetof(RTSPState, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
#define ENC AV_OPT_FLAG_ENCODING_PARAM
 
#define RTSP_FLAG_OPTS(name, longname) \
{ name, longname, OFFSET(rtsp_flags), AV_OPT_TYPE_FLAGS, {.i64 = 0}, INT_MIN, INT_MAX, DEC, "rtsp_flags" }, \
{ "filter_src", "Only receive packets from the negotiated peer IP", 0, AV_OPT_TYPE_CONST, {.i64 = RTSP_FLAG_FILTER_SRC}, 0, 0, DEC, "rtsp_flags" }
 
#define RTSP_MEDIATYPE_OPTS(name, longname) \
{ name, longname, OFFSET(media_type_mask), AV_OPT_TYPE_FLAGS, { .i64 = (1 << (AVMEDIA_TYPE_DATA+1)) - 1 }, INT_MIN, INT_MAX, DEC, "allowed_media_types" }, \
{ "video", "Video", 0, AV_OPT_TYPE_CONST, {.i64 = 1 << AVMEDIA_TYPE_VIDEO}, 0, 0, DEC, "allowed_media_types" }, \
{ "audio", "Audio", 0, AV_OPT_TYPE_CONST, {.i64 = 1 << AVMEDIA_TYPE_AUDIO}, 0, 0, DEC, "allowed_media_types" }, \
{ "data", "Data", 0, AV_OPT_TYPE_CONST, {.i64 = 1 << AVMEDIA_TYPE_DATA}, 0, 0, DEC, "allowed_media_types" }
 
#define RTSP_REORDERING_OPTS() \
{ "reorder_queue_size", "Number of packets to buffer for handling of reordered packets", OFFSET(reordering_queue_size), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, DEC }
 
const AVOption ff_rtsp_options[] = {
{ "initial_pause", "Don't start playing the stream immediately", OFFSET(initial_pause), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, DEC },
FF_RTP_FLAG_OPTS(RTSPState, rtp_muxer_flags),
{ "rtsp_transport", "RTSP transport protocols", OFFSET(lower_transport_mask), AV_OPT_TYPE_FLAGS, {.i64 = 0}, INT_MIN, INT_MAX, DEC|ENC, "rtsp_transport" }, \
{ "udp", "UDP", 0, AV_OPT_TYPE_CONST, {.i64 = 1 << RTSP_LOWER_TRANSPORT_UDP}, 0, 0, DEC|ENC, "rtsp_transport" }, \
{ "tcp", "TCP", 0, AV_OPT_TYPE_CONST, {.i64 = 1 << RTSP_LOWER_TRANSPORT_TCP}, 0, 0, DEC|ENC, "rtsp_transport" }, \
{ "udp_multicast", "UDP multicast", 0, AV_OPT_TYPE_CONST, {.i64 = 1 << RTSP_LOWER_TRANSPORT_UDP_MULTICAST}, 0, 0, DEC, "rtsp_transport" },
{ "http", "HTTP tunneling", 0, AV_OPT_TYPE_CONST, {.i64 = (1 << RTSP_LOWER_TRANSPORT_HTTP)}, 0, 0, DEC, "rtsp_transport" },
RTSP_FLAG_OPTS("rtsp_flags", "RTSP flags"),
{ "listen", "Wait for incoming connections", 0, AV_OPT_TYPE_CONST, {.i64 = RTSP_FLAG_LISTEN}, 0, 0, DEC, "rtsp_flags" },
RTSP_MEDIATYPE_OPTS("allowed_media_types", "Media types to accept from the server"),
{ "min_port", "Minimum local UDP port", OFFSET(rtp_port_min), AV_OPT_TYPE_INT, {.i64 = RTSP_RTP_PORT_MIN}, 0, 65535, DEC|ENC },
{ "max_port", "Maximum local UDP port", OFFSET(rtp_port_max), AV_OPT_TYPE_INT, {.i64 = RTSP_RTP_PORT_MAX}, 0, 65535, DEC|ENC },
{ "timeout", "Maximum timeout (in seconds) to wait for incoming connections. -1 is infinite. Implies flag listen", OFFSET(initial_timeout), AV_OPT_TYPE_INT, {.i64 = -1}, INT_MIN, INT_MAX, DEC },
{ "stimeout", "timeout (in micro seconds) of socket i/o operations.", OFFSET(stimeout), AV_OPT_TYPE_INT, {.i64 = 0}, INT_MIN, INT_MAX, DEC },
RTSP_REORDERING_OPTS(),
{ "user-agent", "override User-Agent header", OFFSET(user_agent), AV_OPT_TYPE_STRING, {.str = LIBAVFORMAT_IDENT}, 0, 0, DEC },
{ NULL },
};
 
static const AVOption sdp_options[] = {
RTSP_FLAG_OPTS("sdp_flags", "SDP flags"),
{ "custom_io", "Use custom IO", 0, AV_OPT_TYPE_CONST, {.i64 = RTSP_FLAG_CUSTOM_IO}, 0, 0, DEC, "rtsp_flags" },
{ "rtcp_to_source", "Send RTCP packets to the source address of received packets", 0, AV_OPT_TYPE_CONST, {.i64 = RTSP_FLAG_RTCP_TO_SOURCE}, 0, 0, DEC, "rtsp_flags" },
RTSP_MEDIATYPE_OPTS("allowed_media_types", "Media types to accept from the server"),
RTSP_REORDERING_OPTS(),
{ NULL },
};
 
static const AVOption rtp_options[] = {
RTSP_FLAG_OPTS("rtp_flags", "RTP flags"),
RTSP_REORDERING_OPTS(),
{ NULL },
};
 
static void get_word_until_chars(char *buf, int buf_size,
const char *sep, const char **pp)
{
const char *p;
char *q;
 
p = *pp;
p += strspn(p, SPACE_CHARS);
q = buf;
while (!strchr(sep, *p) && *p != '\0') {
if ((q - buf) < buf_size - 1)
*q++ = *p;
p++;
}
if (buf_size > 0)
*q = '\0';
*pp = p;
}
 
static void get_word_sep(char *buf, int buf_size, const char *sep,
const char **pp)
{
if (**pp == '/') (*pp)++;
get_word_until_chars(buf, buf_size, sep, pp);
}
 
static void get_word(char *buf, int buf_size, const char **pp)
{
get_word_until_chars(buf, buf_size, SPACE_CHARS, pp);
}
 
/** Parse a string p in the form of Range:npt=xx-xx, and determine the start
* and end time.
* Used for seeking in the rtp stream.
*/
static void rtsp_parse_range_npt(const char *p, int64_t *start, int64_t *end)
{
char buf[256];
 
p += strspn(p, SPACE_CHARS);
if (!av_stristart(p, "npt=", &p))
return;
 
*start = AV_NOPTS_VALUE;
*end = AV_NOPTS_VALUE;
 
get_word_sep(buf, sizeof(buf), "-", &p);
av_parse_time(start, buf, 1);
if (*p == '-') {
p++;
get_word_sep(buf, sizeof(buf), "-", &p);
av_parse_time(end, buf, 1);
}
}
 
static int get_sockaddr(const char *buf, struct sockaddr_storage *sock)
{
struct addrinfo hints = { 0 }, *ai = NULL;
hints.ai_flags = AI_NUMERICHOST;
if (getaddrinfo(buf, NULL, &hints, &ai))
return -1;
memcpy(sock, ai->ai_addr, FFMIN(sizeof(*sock), ai->ai_addrlen));
freeaddrinfo(ai);
return 0;
}
 
#if CONFIG_RTPDEC
static void init_rtp_handler(RTPDynamicProtocolHandler *handler,
RTSPStream *rtsp_st, AVCodecContext *codec)
{
if (!handler)
return;
if (codec)
codec->codec_id = handler->codec_id;
rtsp_st->dynamic_handler = handler;
if (handler->alloc) {
rtsp_st->dynamic_protocol_context = handler->alloc();
if (!rtsp_st->dynamic_protocol_context)
rtsp_st->dynamic_handler = NULL;
}
}
 
/* parse the rtpmap description: <codec_name>/<clock_rate>[/<other params>] */
static int sdp_parse_rtpmap(AVFormatContext *s,
AVStream *st, RTSPStream *rtsp_st,
int payload_type, const char *p)
{
AVCodecContext *codec = st->codec;
char buf[256];
int i;
AVCodec *c;
const char *c_name;
 
/* See if we can handle this kind of payload.
* The space should normally not be there but some Real streams or
* particular servers ("RealServer Version 6.1.3.970", see issue 1658)
* have a trailing space. */
get_word_sep(buf, sizeof(buf), "/ ", &p);
if (payload_type < RTP_PT_PRIVATE) {
/* We are in a standard case
* (from http://www.iana.org/assignments/rtp-parameters). */
codec->codec_id = ff_rtp_codec_id(buf, codec->codec_type);
}
 
if (codec->codec_id == AV_CODEC_ID_NONE) {
RTPDynamicProtocolHandler *handler =
ff_rtp_handler_find_by_name(buf, codec->codec_type);
init_rtp_handler(handler, rtsp_st, codec);
/* If no dynamic handler was found, check with the list of standard
* allocated types, if such a stream for some reason happens to
* use a private payload type. This isn't handled in rtpdec.c, since
* the format name from the rtpmap line never is passed into rtpdec. */
if (!rtsp_st->dynamic_handler)
codec->codec_id = ff_rtp_codec_id(buf, codec->codec_type);
}
 
c = avcodec_find_decoder(codec->codec_id);
if (c && c->name)
c_name = c->name;
else
c_name = "(null)";
 
get_word_sep(buf, sizeof(buf), "/", &p);
i = atoi(buf);
switch (codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
av_log(s, AV_LOG_DEBUG, "audio codec set to: %s\n", c_name);
codec->sample_rate = RTSP_DEFAULT_AUDIO_SAMPLERATE;
codec->channels = RTSP_DEFAULT_NB_AUDIO_CHANNELS;
if (i > 0) {
codec->sample_rate = i;
avpriv_set_pts_info(st, 32, 1, codec->sample_rate);
get_word_sep(buf, sizeof(buf), "/", &p);
i = atoi(buf);
if (i > 0)
codec->channels = i;
}
av_log(s, AV_LOG_DEBUG, "audio samplerate set to: %i\n",
codec->sample_rate);
av_log(s, AV_LOG_DEBUG, "audio channels set to: %i\n",
codec->channels);
break;
case AVMEDIA_TYPE_VIDEO:
av_log(s, AV_LOG_DEBUG, "video codec set to: %s\n", c_name);
if (i > 0)
avpriv_set_pts_info(st, 32, 1, i);
break;
default:
break;
}
if (rtsp_st->dynamic_handler && rtsp_st->dynamic_handler->init)
rtsp_st->dynamic_handler->init(s, st->index,
rtsp_st->dynamic_protocol_context);
return 0;
}
 
/* parse the attribute line from the fmtp a line of an sdp response. This
* is broken out as a function because it is used in rtp_h264.c, which is
* forthcoming. */
int ff_rtsp_next_attr_and_value(const char **p, char *attr, int attr_size,
char *value, int value_size)
{
*p += strspn(*p, SPACE_CHARS);
if (**p) {
get_word_sep(attr, attr_size, "=", p);
if (**p == '=')
(*p)++;
get_word_sep(value, value_size, ";", p);
if (**p == ';')
(*p)++;
return 1;
}
return 0;
}
 
typedef struct SDPParseState {
/* SDP only */
struct sockaddr_storage default_ip;
int default_ttl;
int skip_media; ///< set if an unknown m= line occurs
int nb_default_include_source_addrs; /**< Number of source-specific multicast include source IP address (from SDP content) */
struct RTSPSource **default_include_source_addrs; /**< Source-specific multicast include source IP address (from SDP content) */
int nb_default_exclude_source_addrs; /**< Number of source-specific multicast exclude source IP address (from SDP content) */
struct RTSPSource **default_exclude_source_addrs; /**< Source-specific multicast exclude source IP address (from SDP content) */
} SDPParseState;
 
static void copy_default_source_addrs(struct RTSPSource **addrs, int count,
struct RTSPSource ***dest, int *dest_count)
{
RTSPSource *rtsp_src, *rtsp_src2;
int i;
for (i = 0; i < count; i++) {
rtsp_src = addrs[i];
rtsp_src2 = av_malloc(sizeof(*rtsp_src2));
if (!rtsp_src2)
continue;
memcpy(rtsp_src2, rtsp_src, sizeof(*rtsp_src));
dynarray_add(dest, dest_count, rtsp_src2);
}
}
 
static void sdp_parse_line(AVFormatContext *s, SDPParseState *s1,
int letter, const char *buf)
{
RTSPState *rt = s->priv_data;
char buf1[64], st_type[64];
const char *p;
enum AVMediaType codec_type;
int payload_type, i;
AVStream *st;
RTSPStream *rtsp_st;
RTSPSource *rtsp_src;
struct sockaddr_storage sdp_ip;
int ttl;
 
av_dlog(s, "sdp: %c='%s'\n", letter, buf);
 
p = buf;
if (s1->skip_media && letter != 'm')
return;
switch (letter) {
case 'c':
get_word(buf1, sizeof(buf1), &p);
if (strcmp(buf1, "IN") != 0)
return;
get_word(buf1, sizeof(buf1), &p);
if (strcmp(buf1, "IP4") && strcmp(buf1, "IP6"))
return;
get_word_sep(buf1, sizeof(buf1), "/", &p);
if (get_sockaddr(buf1, &sdp_ip))
return;
ttl = 16;
if (*p == '/') {
p++;
get_word_sep(buf1, sizeof(buf1), "/", &p);
ttl = atoi(buf1);
}
if (s->nb_streams == 0) {
s1->default_ip = sdp_ip;
s1->default_ttl = ttl;
} else {
rtsp_st = rt->rtsp_streams[rt->nb_rtsp_streams - 1];
rtsp_st->sdp_ip = sdp_ip;
rtsp_st->sdp_ttl = ttl;
}
break;
case 's':
av_dict_set(&s->metadata, "title", p, 0);
break;
case 'i':
if (s->nb_streams == 0) {
av_dict_set(&s->metadata, "comment", p, 0);
break;
}
break;
case 'm':
/* new stream */
s1->skip_media = 0;
codec_type = AVMEDIA_TYPE_UNKNOWN;
get_word(st_type, sizeof(st_type), &p);
if (!strcmp(st_type, "audio")) {
codec_type = AVMEDIA_TYPE_AUDIO;
} else if (!strcmp(st_type, "video")) {
codec_type = AVMEDIA_TYPE_VIDEO;
} else if (!strcmp(st_type, "application")) {
codec_type = AVMEDIA_TYPE_DATA;
}
if (codec_type == AVMEDIA_TYPE_UNKNOWN || !(rt->media_type_mask & (1 << codec_type))) {
s1->skip_media = 1;
return;
}
rtsp_st = av_mallocz(sizeof(RTSPStream));
if (!rtsp_st)
return;
rtsp_st->stream_index = -1;
dynarray_add(&rt->rtsp_streams, &rt->nb_rtsp_streams, rtsp_st);
 
rtsp_st->sdp_ip = s1->default_ip;
rtsp_st->sdp_ttl = s1->default_ttl;
 
copy_default_source_addrs(s1->default_include_source_addrs,
s1->nb_default_include_source_addrs,
&rtsp_st->include_source_addrs,
&rtsp_st->nb_include_source_addrs);
copy_default_source_addrs(s1->default_exclude_source_addrs,
s1->nb_default_exclude_source_addrs,
&rtsp_st->exclude_source_addrs,
&rtsp_st->nb_exclude_source_addrs);
 
get_word(buf1, sizeof(buf1), &p); /* port */
rtsp_st->sdp_port = atoi(buf1);
 
get_word(buf1, sizeof(buf1), &p); /* protocol */
if (!strcmp(buf1, "udp"))
rt->transport = RTSP_TRANSPORT_RAW;
else if (strstr(buf1, "/AVPF") || strstr(buf1, "/SAVPF"))
rtsp_st->feedback = 1;
 
/* XXX: handle list of formats */
get_word(buf1, sizeof(buf1), &p); /* format list */
rtsp_st->sdp_payload_type = atoi(buf1);
 
if (!strcmp(ff_rtp_enc_name(rtsp_st->sdp_payload_type), "MP2T")) {
/* no corresponding stream */
if (rt->transport == RTSP_TRANSPORT_RAW) {
if (!rt->ts && CONFIG_RTPDEC)
rt->ts = ff_mpegts_parse_open(s);
} else {
RTPDynamicProtocolHandler *handler;
handler = ff_rtp_handler_find_by_id(
rtsp_st->sdp_payload_type, AVMEDIA_TYPE_DATA);
init_rtp_handler(handler, rtsp_st, NULL);
if (handler && handler->init)
handler->init(s, -1, rtsp_st->dynamic_protocol_context);
}
} else if (rt->server_type == RTSP_SERVER_WMS &&
codec_type == AVMEDIA_TYPE_DATA) {
/* RTX stream, a stream that carries all the other actual
* audio/video streams. Don't expose this to the callers. */
} else {
st = avformat_new_stream(s, NULL);
if (!st)
return;
st->id = rt->nb_rtsp_streams - 1;
rtsp_st->stream_index = st->index;
st->codec->codec_type = codec_type;
if (rtsp_st->sdp_payload_type < RTP_PT_PRIVATE) {
RTPDynamicProtocolHandler *handler;
/* if standard payload type, we can find the codec right now */
ff_rtp_get_codec_info(st->codec, rtsp_st->sdp_payload_type);
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
st->codec->sample_rate > 0)
avpriv_set_pts_info(st, 32, 1, st->codec->sample_rate);
/* Even static payload types may need a custom depacketizer */
handler = ff_rtp_handler_find_by_id(
rtsp_st->sdp_payload_type, st->codec->codec_type);
init_rtp_handler(handler, rtsp_st, st->codec);
if (handler && handler->init)
handler->init(s, st->index,
rtsp_st->dynamic_protocol_context);
}
}
/* put a default control url */
av_strlcpy(rtsp_st->control_url, rt->control_uri,
sizeof(rtsp_st->control_url));
break;
case 'a':
if (av_strstart(p, "control:", &p)) {
if (s->nb_streams == 0) {
if (!strncmp(p, "rtsp://", 7))
av_strlcpy(rt->control_uri, p,
sizeof(rt->control_uri));
} else {
char proto[32];
/* get the control url */
rtsp_st = rt->rtsp_streams[rt->nb_rtsp_streams - 1];
 
/* XXX: may need to add full url resolution */
av_url_split(proto, sizeof(proto), NULL, 0, NULL, 0,
NULL, NULL, 0, p);
if (proto[0] == '\0') {
/* relative control URL */
if (rtsp_st->control_url[strlen(rtsp_st->control_url)-1]!='/')
av_strlcat(rtsp_st->control_url, "/",
sizeof(rtsp_st->control_url));
av_strlcat(rtsp_st->control_url, p,
sizeof(rtsp_st->control_url));
} else
av_strlcpy(rtsp_st->control_url, p,
sizeof(rtsp_st->control_url));
}
} else if (av_strstart(p, "rtpmap:", &p) && s->nb_streams > 0) {
/* NOTE: rtpmap is only supported AFTER the 'm=' tag */
get_word(buf1, sizeof(buf1), &p);
payload_type = atoi(buf1);
rtsp_st = rt->rtsp_streams[rt->nb_rtsp_streams - 1];
if (rtsp_st->stream_index >= 0) {
st = s->streams[rtsp_st->stream_index];
sdp_parse_rtpmap(s, st, rtsp_st, payload_type, p);
}
} else if (av_strstart(p, "fmtp:", &p) ||
av_strstart(p, "framesize:", &p)) {
/* NOTE: fmtp is only supported AFTER the 'a=rtpmap:xxx' tag */
// let dynamic protocol handlers have a stab at the line.
get_word(buf1, sizeof(buf1), &p);
payload_type = atoi(buf1);
for (i = 0; i < rt->nb_rtsp_streams; i++) {
rtsp_st = rt->rtsp_streams[i];
if (rtsp_st->sdp_payload_type == payload_type &&
rtsp_st->dynamic_handler &&
rtsp_st->dynamic_handler->parse_sdp_a_line)
rtsp_st->dynamic_handler->parse_sdp_a_line(s, i,
rtsp_st->dynamic_protocol_context, buf);
}
} else if (av_strstart(p, "range:", &p)) {
int64_t start, end;
 
// this is so that seeking on a streamed file can work.
rtsp_parse_range_npt(p, &start, &end);
s->start_time = start;
/* AV_NOPTS_VALUE means live broadcast (and can't seek) */
s->duration = (end == AV_NOPTS_VALUE) ?
AV_NOPTS_VALUE : end - start;
} else if (av_strstart(p, "IsRealDataType:integer;",&p)) {
if (atoi(p) == 1)
rt->transport = RTSP_TRANSPORT_RDT;
} else if (av_strstart(p, "SampleRate:integer;", &p) &&
s->nb_streams > 0) {
st = s->streams[s->nb_streams - 1];
st->codec->sample_rate = atoi(p);
} else if (av_strstart(p, "crypto:", &p) && s->nb_streams > 0) {
// RFC 4568
rtsp_st = rt->rtsp_streams[rt->nb_rtsp_streams - 1];
get_word(buf1, sizeof(buf1), &p); // ignore tag
get_word(rtsp_st->crypto_suite, sizeof(rtsp_st->crypto_suite), &p);
p += strspn(p, SPACE_CHARS);
if (av_strstart(p, "inline:", &p))
get_word(rtsp_st->crypto_params, sizeof(rtsp_st->crypto_params), &p);
} else if (av_strstart(p, "source-filter:", &p)) {
int exclude = 0;
get_word(buf1, sizeof(buf1), &p);
if (strcmp(buf1, "incl") && strcmp(buf1, "excl"))
return;
exclude = !strcmp(buf1, "excl");
 
get_word(buf1, sizeof(buf1), &p);
if (strcmp(buf1, "IN") != 0)
return;
get_word(buf1, sizeof(buf1), &p);
if (strcmp(buf1, "IP4") && strcmp(buf1, "IP6") && strcmp(buf1, "*"))
return;
// not checking that the destination address actually matches or is wildcard
get_word(buf1, sizeof(buf1), &p);
 
while (*p != '\0') {
rtsp_src = av_mallocz(sizeof(*rtsp_src));
if (!rtsp_src)
return;
get_word(rtsp_src->addr, sizeof(rtsp_src->addr), &p);
if (exclude) {
if (s->nb_streams == 0) {
dynarray_add(&s1->default_exclude_source_addrs, &s1->nb_default_exclude_source_addrs, rtsp_src);
} else {
rtsp_st = rt->rtsp_streams[rt->nb_rtsp_streams - 1];
dynarray_add(&rtsp_st->exclude_source_addrs, &rtsp_st->nb_exclude_source_addrs, rtsp_src);
}
} else {
if (s->nb_streams == 0) {
dynarray_add(&s1->default_include_source_addrs, &s1->nb_default_include_source_addrs, rtsp_src);
} else {
rtsp_st = rt->rtsp_streams[rt->nb_rtsp_streams - 1];
dynarray_add(&rtsp_st->include_source_addrs, &rtsp_st->nb_include_source_addrs, rtsp_src);
}
}
}
} else {
if (rt->server_type == RTSP_SERVER_WMS)
ff_wms_parse_sdp_a_line(s, p);
if (s->nb_streams > 0) {
rtsp_st = rt->rtsp_streams[rt->nb_rtsp_streams - 1];
 
if (rt->server_type == RTSP_SERVER_REAL)
ff_real_parse_sdp_a_line(s, rtsp_st->stream_index, p);
 
if (rtsp_st->dynamic_handler &&
rtsp_st->dynamic_handler->parse_sdp_a_line)
rtsp_st->dynamic_handler->parse_sdp_a_line(s,
rtsp_st->stream_index,
rtsp_st->dynamic_protocol_context, buf);
}
}
break;
}
}
 
int ff_sdp_parse(AVFormatContext *s, const char *content)
{
RTSPState *rt = s->priv_data;
const char *p;
int letter, i;
/* Some SDP lines, particularly for Realmedia or ASF RTSP streams,
* contain long SDP lines containing complete ASF Headers (several
* kB) or arrays of MDPR (RM stream descriptor) headers plus
* "rulebooks" describing their properties. Therefore, the SDP line
* buffer is large.
*
* The Vorbis FMTP line can be up to 16KB - see xiph_parse_sdp_line
* in rtpdec_xiph.c. */
char buf[16384], *q;
SDPParseState sdp_parse_state = { { 0 } }, *s1 = &sdp_parse_state;
 
p = content;
for (;;) {
p += strspn(p, SPACE_CHARS);
letter = *p;
if (letter == '\0')
break;
p++;
if (*p != '=')
goto next_line;
p++;
/* get the content */
q = buf;
while (*p != '\n' && *p != '\r' && *p != '\0') {
if ((q - buf) < sizeof(buf) - 1)
*q++ = *p;
p++;
}
*q = '\0';
sdp_parse_line(s, s1, letter, buf);
next_line:
while (*p != '\n' && *p != '\0')
p++;
if (*p == '\n')
p++;
}
 
for (i = 0; i < s1->nb_default_include_source_addrs; i++)
av_free(s1->default_include_source_addrs[i]);
av_freep(&s1->default_include_source_addrs);
for (i = 0; i < s1->nb_default_exclude_source_addrs; i++)
av_free(s1->default_exclude_source_addrs[i]);
av_freep(&s1->default_exclude_source_addrs);
 
rt->p = av_malloc(sizeof(struct pollfd)*2*(rt->nb_rtsp_streams+1));
if (!rt->p) return AVERROR(ENOMEM);
return 0;
}
#endif /* CONFIG_RTPDEC */
 
void ff_rtsp_undo_setup(AVFormatContext *s)
{
RTSPState *rt = s->priv_data;
int i;
 
for (i = 0; i < rt->nb_rtsp_streams; i++) {
RTSPStream *rtsp_st = rt->rtsp_streams[i];
if (!rtsp_st)
continue;
if (rtsp_st->transport_priv) {
if (s->oformat) {
AVFormatContext *rtpctx = rtsp_st->transport_priv;
av_write_trailer(rtpctx);
if (rt->lower_transport == RTSP_LOWER_TRANSPORT_TCP) {
uint8_t *ptr;
avio_close_dyn_buf(rtpctx->pb, &ptr);
av_free(ptr);
} else {
avio_close(rtpctx->pb);
}
avformat_free_context(rtpctx);
} else if (rt->transport == RTSP_TRANSPORT_RDT && CONFIG_RTPDEC)
ff_rdt_parse_close(rtsp_st->transport_priv);
else if (rt->transport == RTSP_TRANSPORT_RTP && CONFIG_RTPDEC)
ff_rtp_parse_close(rtsp_st->transport_priv);
}
rtsp_st->transport_priv = NULL;
if (rtsp_st->rtp_handle)
ffurl_close(rtsp_st->rtp_handle);
rtsp_st->rtp_handle = NULL;
}
}
 
/* close and free RTSP streams */
void ff_rtsp_close_streams(AVFormatContext *s)
{
RTSPState *rt = s->priv_data;
int i, j;
RTSPStream *rtsp_st;
 
ff_rtsp_undo_setup(s);
for (i = 0; i < rt->nb_rtsp_streams; i++) {
rtsp_st = rt->rtsp_streams[i];
if (rtsp_st) {
if (rtsp_st->dynamic_handler && rtsp_st->dynamic_protocol_context)
rtsp_st->dynamic_handler->free(
rtsp_st->dynamic_protocol_context);
for (j = 0; j < rtsp_st->nb_include_source_addrs; j++)
av_free(rtsp_st->include_source_addrs[j]);
av_freep(&rtsp_st->include_source_addrs);
for (j = 0; j < rtsp_st->nb_exclude_source_addrs; j++)
av_free(rtsp_st->exclude_source_addrs[j]);
av_freep(&rtsp_st->exclude_source_addrs);
 
av_free(rtsp_st);
}
}
av_free(rt->rtsp_streams);
if (rt->asf_ctx) {
avformat_close_input(&rt->asf_ctx);
}
if (rt->ts && CONFIG_RTPDEC)
ff_mpegts_parse_close(rt->ts);
av_free(rt->p);
av_free(rt->recvbuf);
}
 
int ff_rtsp_open_transport_ctx(AVFormatContext *s, RTSPStream *rtsp_st)
{
RTSPState *rt = s->priv_data;
AVStream *st = NULL;
int reordering_queue_size = rt->reordering_queue_size;
if (reordering_queue_size < 0) {
if (rt->lower_transport == RTSP_LOWER_TRANSPORT_TCP || !s->max_delay)
reordering_queue_size = 0;
else
reordering_queue_size = RTP_REORDER_QUEUE_DEFAULT_SIZE;
}
 
/* open the RTP context */
if (rtsp_st->stream_index >= 0)
st = s->streams[rtsp_st->stream_index];
if (!st)
s->ctx_flags |= AVFMTCTX_NOHEADER;
 
if (s->oformat && CONFIG_RTSP_MUXER) {
int ret = ff_rtp_chain_mux_open((AVFormatContext **)&rtsp_st->transport_priv, s, st,
rtsp_st->rtp_handle,
RTSP_TCP_MAX_PACKET_SIZE,
rtsp_st->stream_index);
/* Ownership of rtp_handle is passed to the rtp mux context */
rtsp_st->rtp_handle = NULL;
if (ret < 0)
return ret;
} else if (rt->transport == RTSP_TRANSPORT_RAW) {
return 0; // Don't need to open any parser here
} else if (rt->transport == RTSP_TRANSPORT_RDT && CONFIG_RTPDEC)
rtsp_st->transport_priv = ff_rdt_parse_open(s, st->index,
rtsp_st->dynamic_protocol_context,
rtsp_st->dynamic_handler);
else if (CONFIG_RTPDEC)
rtsp_st->transport_priv = ff_rtp_parse_open(s, st,
rtsp_st->sdp_payload_type,
reordering_queue_size);
 
if (!rtsp_st->transport_priv) {
return AVERROR(ENOMEM);
} else if (rt->transport == RTSP_TRANSPORT_RTP && CONFIG_RTPDEC) {
if (rtsp_st->dynamic_handler) {
ff_rtp_parse_set_dynamic_protocol(rtsp_st->transport_priv,
rtsp_st->dynamic_protocol_context,
rtsp_st->dynamic_handler);
}
if (rtsp_st->crypto_suite[0])
ff_rtp_parse_set_crypto(rtsp_st->transport_priv,
rtsp_st->crypto_suite,
rtsp_st->crypto_params);
}
 
return 0;
}
 
#if CONFIG_RTSP_DEMUXER || CONFIG_RTSP_MUXER
static void rtsp_parse_range(int *min_ptr, int *max_ptr, const char **pp)
{
const char *q;
char *p;
int v;
 
q = *pp;
q += strspn(q, SPACE_CHARS);
v = strtol(q, &p, 10);
if (*p == '-') {
p++;
*min_ptr = v;
v = strtol(p, &p, 10);
*max_ptr = v;
} else {
*min_ptr = v;
*max_ptr = v;
}
*pp = p;
}
 
/* XXX: only one transport specification is parsed */
static void rtsp_parse_transport(RTSPMessageHeader *reply, const char *p)
{
char transport_protocol[16];
char profile[16];
char lower_transport[16];
char parameter[16];
RTSPTransportField *th;
char buf[256];
 
reply->nb_transports = 0;
 
for (;;) {
p += strspn(p, SPACE_CHARS);
if (*p == '\0')
break;
 
th = &reply->transports[reply->nb_transports];
 
get_word_sep(transport_protocol, sizeof(transport_protocol),
"/", &p);
if (!av_strcasecmp (transport_protocol, "rtp")) {
get_word_sep(profile, sizeof(profile), "/;,", &p);
lower_transport[0] = '\0';
/* rtp/avp/<protocol> */
if (*p == '/') {
get_word_sep(lower_transport, sizeof(lower_transport),
";,", &p);
}
th->transport = RTSP_TRANSPORT_RTP;
} else if (!av_strcasecmp (transport_protocol, "x-pn-tng") ||
!av_strcasecmp (transport_protocol, "x-real-rdt")) {
/* x-pn-tng/<protocol> */
get_word_sep(lower_transport, sizeof(lower_transport), "/;,", &p);
profile[0] = '\0';
th->transport = RTSP_TRANSPORT_RDT;
} else if (!av_strcasecmp(transport_protocol, "raw")) {
get_word_sep(profile, sizeof(profile), "/;,", &p);
lower_transport[0] = '\0';
/* raw/raw/<protocol> */
if (*p == '/') {
get_word_sep(lower_transport, sizeof(lower_transport),
";,", &p);
}
th->transport = RTSP_TRANSPORT_RAW;
}
if (!av_strcasecmp(lower_transport, "TCP"))
th->lower_transport = RTSP_LOWER_TRANSPORT_TCP;
else
th->lower_transport = RTSP_LOWER_TRANSPORT_UDP;
 
if (*p == ';')
p++;
/* get each parameter */
while (*p != '\0' && *p != ',') {
get_word_sep(parameter, sizeof(parameter), "=;,", &p);
if (!strcmp(parameter, "port")) {
if (*p == '=') {
p++;
rtsp_parse_range(&th->port_min, &th->port_max, &p);
}
} else if (!strcmp(parameter, "client_port")) {
if (*p == '=') {
p++;
rtsp_parse_range(&th->client_port_min,
&th->client_port_max, &p);
}
} else if (!strcmp(parameter, "server_port")) {
if (*p == '=') {
p++;
rtsp_parse_range(&th->server_port_min,
&th->server_port_max, &p);
}
} else if (!strcmp(parameter, "interleaved")) {
if (*p == '=') {
p++;
rtsp_parse_range(&th->interleaved_min,
&th->interleaved_max, &p);
}
} else if (!strcmp(parameter, "multicast")) {
if (th->lower_transport == RTSP_LOWER_TRANSPORT_UDP)
th->lower_transport = RTSP_LOWER_TRANSPORT_UDP_MULTICAST;
} else if (!strcmp(parameter, "ttl")) {
if (*p == '=') {
char *end;
p++;
th->ttl = strtol(p, &end, 10);
p = end;
}
} else if (!strcmp(parameter, "destination")) {
if (*p == '=') {
p++;
get_word_sep(buf, sizeof(buf), ";,", &p);
get_sockaddr(buf, &th->destination);
}
} else if (!strcmp(parameter, "source")) {
if (*p == '=') {
p++;
get_word_sep(buf, sizeof(buf), ";,", &p);
av_strlcpy(th->source, buf, sizeof(th->source));
}
} else if (!strcmp(parameter, "mode")) {
if (*p == '=') {
p++;
get_word_sep(buf, sizeof(buf), ";, ", &p);
if (!strcmp(buf, "record") ||
!strcmp(buf, "receive"))
th->mode_record = 1;
}
}
 
while (*p != ';' && *p != '\0' && *p != ',')
p++;
if (*p == ';')
p++;
}
if (*p == ',')
p++;
 
reply->nb_transports++;
}
}
 
static void handle_rtp_info(RTSPState *rt, const char *url,
uint32_t seq, uint32_t rtptime)
{
int i;
if (!rtptime || !url[0])
return;
if (rt->transport != RTSP_TRANSPORT_RTP)
return;
for (i = 0; i < rt->nb_rtsp_streams; i++) {
RTSPStream *rtsp_st = rt->rtsp_streams[i];
RTPDemuxContext *rtpctx = rtsp_st->transport_priv;
if (!rtpctx)
continue;
if (!strcmp(rtsp_st->control_url, url)) {
rtpctx->base_timestamp = rtptime;
break;
}
}
}
 
static void rtsp_parse_rtp_info(RTSPState *rt, const char *p)
{
int read = 0;
char key[20], value[1024], url[1024] = "";
uint32_t seq = 0, rtptime = 0;
 
for (;;) {
p += strspn(p, SPACE_CHARS);
if (!*p)
break;
get_word_sep(key, sizeof(key), "=", &p);
if (*p != '=')
break;
p++;
get_word_sep(value, sizeof(value), ";, ", &p);
read++;
if (!strcmp(key, "url"))
av_strlcpy(url, value, sizeof(url));
else if (!strcmp(key, "seq"))
seq = strtoul(value, NULL, 10);
else if (!strcmp(key, "rtptime"))
rtptime = strtoul(value, NULL, 10);
if (*p == ',') {
handle_rtp_info(rt, url, seq, rtptime);
url[0] = '\0';
seq = rtptime = 0;
read = 0;
}
if (*p)
p++;
}
if (read > 0)
handle_rtp_info(rt, url, seq, rtptime);
}
 
void ff_rtsp_parse_line(RTSPMessageHeader *reply, const char *buf,
RTSPState *rt, const char *method)
{
const char *p;
 
/* NOTE: we do case independent match for broken servers */
p = buf;
if (av_stristart(p, "Session:", &p)) {
int t;
get_word_sep(reply->session_id, sizeof(reply->session_id), ";", &p);
if (av_stristart(p, ";timeout=", &p) &&
(t = strtol(p, NULL, 10)) > 0) {
reply->timeout = t;
}
} else if (av_stristart(p, "Content-Length:", &p)) {
reply->content_length = strtol(p, NULL, 10);
} else if (av_stristart(p, "Transport:", &p)) {
rtsp_parse_transport(reply, p);
} else if (av_stristart(p, "CSeq:", &p)) {
reply->seq = strtol(p, NULL, 10);
} else if (av_stristart(p, "Range:", &p)) {
rtsp_parse_range_npt(p, &reply->range_start, &reply->range_end);
} else if (av_stristart(p, "RealChallenge1:", &p)) {
p += strspn(p, SPACE_CHARS);
av_strlcpy(reply->real_challenge, p, sizeof(reply->real_challenge));
} else if (av_stristart(p, "Server:", &p)) {
p += strspn(p, SPACE_CHARS);
av_strlcpy(reply->server, p, sizeof(reply->server));
} else if (av_stristart(p, "Notice:", &p) ||
av_stristart(p, "X-Notice:", &p)) {
reply->notice = strtol(p, NULL, 10);
} else if (av_stristart(p, "Location:", &p)) {
p += strspn(p, SPACE_CHARS);
av_strlcpy(reply->location, p , sizeof(reply->location));
} else if (av_stristart(p, "WWW-Authenticate:", &p) && rt) {
p += strspn(p, SPACE_CHARS);
ff_http_auth_handle_header(&rt->auth_state, "WWW-Authenticate", p);
} else if (av_stristart(p, "Authentication-Info:", &p) && rt) {
p += strspn(p, SPACE_CHARS);
ff_http_auth_handle_header(&rt->auth_state, "Authentication-Info", p);
} else if (av_stristart(p, "Content-Base:", &p) && rt) {
p += strspn(p, SPACE_CHARS);
if (method && !strcmp(method, "DESCRIBE"))
av_strlcpy(rt->control_uri, p , sizeof(rt->control_uri));
} else if (av_stristart(p, "RTP-Info:", &p) && rt) {
p += strspn(p, SPACE_CHARS);
if (method && !strcmp(method, "PLAY"))
rtsp_parse_rtp_info(rt, p);
} else if (av_stristart(p, "Public:", &p) && rt) {
if (strstr(p, "GET_PARAMETER") &&
method && !strcmp(method, "OPTIONS"))
rt->get_parameter_supported = 1;
} else if (av_stristart(p, "x-Accept-Dynamic-Rate:", &p) && rt) {
p += strspn(p, SPACE_CHARS);
rt->accept_dynamic_rate = atoi(p);
} else if (av_stristart(p, "Content-Type:", &p)) {
p += strspn(p, SPACE_CHARS);
av_strlcpy(reply->content_type, p, sizeof(reply->content_type));
}
}
 
/* skip a RTP/TCP interleaved packet */
void ff_rtsp_skip_packet(AVFormatContext *s)
{
RTSPState *rt = s->priv_data;
int ret, len, len1;
uint8_t buf[1024];
 
ret = ffurl_read_complete(rt->rtsp_hd, buf, 3);
if (ret != 3)
return;
len = AV_RB16(buf + 1);
 
av_dlog(s, "skipping RTP packet len=%d\n", len);
 
/* skip payload */
while (len > 0) {
len1 = len;
if (len1 > sizeof(buf))
len1 = sizeof(buf);
ret = ffurl_read_complete(rt->rtsp_hd, buf, len1);
if (ret != len1)
return;
len -= len1;
}
}
 
int ff_rtsp_read_reply(AVFormatContext *s, RTSPMessageHeader *reply,
unsigned char **content_ptr,
int return_on_interleaved_data, const char *method)
{
RTSPState *rt = s->priv_data;
char buf[4096], buf1[1024], *q;
unsigned char ch;
const char *p;
int ret, content_length, line_count = 0, request = 0;
unsigned char *content = NULL;
 
start:
line_count = 0;
request = 0;
content = NULL;
memset(reply, 0, sizeof(*reply));
 
/* parse reply (XXX: use buffers) */
rt->last_reply[0] = '\0';
for (;;) {
q = buf;
for (;;) {
ret = ffurl_read_complete(rt->rtsp_hd, &ch, 1);
av_dlog(s, "ret=%d c=%02x [%c]\n", ret, ch, ch);
if (ret != 1)
return AVERROR_EOF;
if (ch == '\n')
break;
if (ch == '$') {
/* XXX: only parse it if first char on line ? */
if (return_on_interleaved_data) {
return 1;
} else
ff_rtsp_skip_packet(s);
} else if (ch != '\r') {
if ((q - buf) < sizeof(buf) - 1)
*q++ = ch;
}
}
*q = '\0';
 
av_dlog(s, "line='%s'\n", buf);
 
/* test if last line */
if (buf[0] == '\0')
break;
p = buf;
if (line_count == 0) {
/* get reply code */
get_word(buf1, sizeof(buf1), &p);
if (!strncmp(buf1, "RTSP/", 5)) {
get_word(buf1, sizeof(buf1), &p);
reply->status_code = atoi(buf1);
av_strlcpy(reply->reason, p, sizeof(reply->reason));
} else {
av_strlcpy(reply->reason, buf1, sizeof(reply->reason)); // method
get_word(buf1, sizeof(buf1), &p); // object
request = 1;
}
} else {
ff_rtsp_parse_line(reply, p, rt, method);
av_strlcat(rt->last_reply, p, sizeof(rt->last_reply));
av_strlcat(rt->last_reply, "\n", sizeof(rt->last_reply));
}
line_count++;
}
 
if (rt->session_id[0] == '\0' && reply->session_id[0] != '\0' && !request)
av_strlcpy(rt->session_id, reply->session_id, sizeof(rt->session_id));
 
content_length = reply->content_length;
if (content_length > 0) {
/* leave some room for a trailing '\0' (useful for simple parsing) */
content = av_malloc(content_length + 1);
ffurl_read_complete(rt->rtsp_hd, content, content_length);
content[content_length] = '\0';
}
if (content_ptr)
*content_ptr = content;
else
av_free(content);
 
if (request) {
char buf[1024];
char base64buf[AV_BASE64_SIZE(sizeof(buf))];
const char* ptr = buf;
 
if (!strcmp(reply->reason, "OPTIONS")) {
snprintf(buf, sizeof(buf), "RTSP/1.0 200 OK\r\n");
if (reply->seq)
av_strlcatf(buf, sizeof(buf), "CSeq: %d\r\n", reply->seq);
if (reply->session_id[0])
av_strlcatf(buf, sizeof(buf), "Session: %s\r\n",
reply->session_id);
} else {
snprintf(buf, sizeof(buf), "RTSP/1.0 501 Not Implemented\r\n");
}
av_strlcat(buf, "\r\n", sizeof(buf));
 
if (rt->control_transport == RTSP_MODE_TUNNEL) {
av_base64_encode(base64buf, sizeof(base64buf), buf, strlen(buf));
ptr = base64buf;
}
ffurl_write(rt->rtsp_hd_out, ptr, strlen(ptr));
 
rt->last_cmd_time = av_gettime();
/* Even if the request from the server had data, it is not the data
* that the caller wants or expects. The memory could also be leaked
* if the actual following reply has content data. */
if (content_ptr)
av_freep(content_ptr);
/* If method is set, this is called from ff_rtsp_send_cmd,
* where a reply to exactly this request is awaited. For
* callers from within packet receiving, we just want to
* return to the caller and go back to receiving packets. */
if (method)
goto start;
return 0;
}
 
if (rt->seq != reply->seq) {
av_log(s, AV_LOG_WARNING, "CSeq %d expected, %d received.\n",
rt->seq, reply->seq);
}
 
/* EOS */
if (reply->notice == 2101 /* End-of-Stream Reached */ ||
reply->notice == 2104 /* Start-of-Stream Reached */ ||
reply->notice == 2306 /* Continuous Feed Terminated */) {
rt->state = RTSP_STATE_IDLE;
} else if (reply->notice >= 4400 && reply->notice < 5500) {
return AVERROR(EIO); /* data or server error */
} else if (reply->notice == 2401 /* Ticket Expired */ ||
(reply->notice >= 5500 && reply->notice < 5600) /* end of term */ )
return AVERROR(EPERM);
 
return 0;
}
 
/**
* Send a command to the RTSP server without waiting for the reply.
*
* @param s RTSP (de)muxer context
* @param method the method for the request
* @param url the target url for the request
* @param headers extra header lines to include in the request
* @param send_content if non-null, the data to send as request body content
* @param send_content_length the length of the send_content data, or 0 if
* send_content is null
*
* @return zero if success, nonzero otherwise
*/
static int rtsp_send_cmd_with_content_async(AVFormatContext *s,
const char *method, const char *url,
const char *headers,
const unsigned char *send_content,
int send_content_length)
{
RTSPState *rt = s->priv_data;
char buf[4096], *out_buf;
char base64buf[AV_BASE64_SIZE(sizeof(buf))];
 
/* Add in RTSP headers */
out_buf = buf;
rt->seq++;
snprintf(buf, sizeof(buf), "%s %s RTSP/1.0\r\n", method, url);
if (headers)
av_strlcat(buf, headers, sizeof(buf));
av_strlcatf(buf, sizeof(buf), "CSeq: %d\r\n", rt->seq);
av_strlcatf(buf, sizeof(buf), "User-Agent: %s\r\n", rt->user_agent);
if (rt->session_id[0] != '\0' && (!headers ||
!strstr(headers, "\nIf-Match:"))) {
av_strlcatf(buf, sizeof(buf), "Session: %s\r\n", rt->session_id);
}
if (rt->auth[0]) {
char *str = ff_http_auth_create_response(&rt->auth_state,
rt->auth, url, method);
if (str)
av_strlcat(buf, str, sizeof(buf));
av_free(str);
}
if (send_content_length > 0 && send_content)
av_strlcatf(buf, sizeof(buf), "Content-Length: %d\r\n", send_content_length);
av_strlcat(buf, "\r\n", sizeof(buf));
 
/* base64 encode rtsp if tunneling */
if (rt->control_transport == RTSP_MODE_TUNNEL) {
av_base64_encode(base64buf, sizeof(base64buf), buf, strlen(buf));
out_buf = base64buf;
}
 
av_dlog(s, "Sending:\n%s--\n", buf);
 
ffurl_write(rt->rtsp_hd_out, out_buf, strlen(out_buf));
if (send_content_length > 0 && send_content) {
if (rt->control_transport == RTSP_MODE_TUNNEL) {
av_log(s, AV_LOG_ERROR, "tunneling of RTSP requests "
"with content data not supported\n");
return AVERROR_PATCHWELCOME;
}
ffurl_write(rt->rtsp_hd_out, send_content, send_content_length);
}
rt->last_cmd_time = av_gettime();
 
return 0;
}
 
int ff_rtsp_send_cmd_async(AVFormatContext *s, const char *method,
const char *url, const char *headers)
{
return rtsp_send_cmd_with_content_async(s, method, url, headers, NULL, 0);
}
 
int ff_rtsp_send_cmd(AVFormatContext *s, const char *method, const char *url,
const char *headers, RTSPMessageHeader *reply,
unsigned char **content_ptr)
{
return ff_rtsp_send_cmd_with_content(s, method, url, headers, reply,
content_ptr, NULL, 0);
}
 
int ff_rtsp_send_cmd_with_content(AVFormatContext *s,
const char *method, const char *url,
const char *header,
RTSPMessageHeader *reply,
unsigned char **content_ptr,
const unsigned char *send_content,
int send_content_length)
{
RTSPState *rt = s->priv_data;
HTTPAuthType cur_auth_type;
int ret, attempts = 0;
 
retry:
cur_auth_type = rt->auth_state.auth_type;
if ((ret = rtsp_send_cmd_with_content_async(s, method, url, header,
send_content,
send_content_length)))
return ret;
 
if ((ret = ff_rtsp_read_reply(s, reply, content_ptr, 0, method) ) < 0)
return ret;
attempts++;
 
if (reply->status_code == 401 &&
(cur_auth_type == HTTP_AUTH_NONE || rt->auth_state.stale) &&
rt->auth_state.auth_type != HTTP_AUTH_NONE && attempts < 2)
goto retry;
 
if (reply->status_code > 400){
av_log(s, AV_LOG_ERROR, "method %s failed: %d%s\n",
method,
reply->status_code,
reply->reason);
av_log(s, AV_LOG_DEBUG, "%s\n", rt->last_reply);
}
 
return 0;
}
 
int ff_rtsp_make_setup_request(AVFormatContext *s, const char *host, int port,
int lower_transport, const char *real_challenge)
{
RTSPState *rt = s->priv_data;
int rtx = 0, j, i, err, interleave = 0, port_off;
RTSPStream *rtsp_st;
RTSPMessageHeader reply1, *reply = &reply1;
char cmd[2048];
const char *trans_pref;
 
if (rt->transport == RTSP_TRANSPORT_RDT)
trans_pref = "x-pn-tng";
else if (rt->transport == RTSP_TRANSPORT_RAW)
trans_pref = "RAW/RAW";
else
trans_pref = "RTP/AVP";
 
/* default timeout: 1 minute */
rt->timeout = 60;
 
/* Choose a random starting offset within the first half of the
* port range, to allow for a number of ports to try even if the offset
* happens to be at the end of the random range. */
port_off = av_get_random_seed() % ((rt->rtp_port_max - rt->rtp_port_min)/2);
/* even random offset */
port_off -= port_off & 0x01;
 
for (j = rt->rtp_port_min + port_off, i = 0; i < rt->nb_rtsp_streams; ++i) {
char transport[2048];
 
/*
* WMS serves all UDP data over a single connection, the RTX, which
* isn't necessarily the first in the SDP but has to be the first
* to be set up, else the second/third SETUP will fail with a 461.
*/
if (lower_transport == RTSP_LOWER_TRANSPORT_UDP &&
rt->server_type == RTSP_SERVER_WMS) {
if (i == 0) {
/* rtx first */
for (rtx = 0; rtx < rt->nb_rtsp_streams; rtx++) {
int len = strlen(rt->rtsp_streams[rtx]->control_url);
if (len >= 4 &&
!strcmp(rt->rtsp_streams[rtx]->control_url + len - 4,
"/rtx"))
break;
}
if (rtx == rt->nb_rtsp_streams)
return -1; /* no RTX found */
rtsp_st = rt->rtsp_streams[rtx];
} else
rtsp_st = rt->rtsp_streams[i > rtx ? i : i - 1];
} else
rtsp_st = rt->rtsp_streams[i];
 
/* RTP/UDP */
if (lower_transport == RTSP_LOWER_TRANSPORT_UDP) {
char buf[256];
 
if (rt->server_type == RTSP_SERVER_WMS && i > 1) {
port = reply->transports[0].client_port_min;
goto have_port;
}
 
/* first try in specified port range */
while (j <= rt->rtp_port_max) {
ff_url_join(buf, sizeof(buf), "rtp", NULL, host, -1,
"?localport=%d", j);
/* we will use two ports per rtp stream (rtp and rtcp) */
j += 2;
if (!ffurl_open(&rtsp_st->rtp_handle, buf, AVIO_FLAG_READ_WRITE,
&s->interrupt_callback, NULL))
goto rtp_opened;
}
av_log(s, AV_LOG_ERROR, "Unable to open an input RTP port\n");
err = AVERROR(EIO);
goto fail;
 
rtp_opened:
port = ff_rtp_get_local_rtp_port(rtsp_st->rtp_handle);
have_port:
snprintf(transport, sizeof(transport) - 1,
"%s/UDP;", trans_pref);
if (rt->server_type != RTSP_SERVER_REAL)
av_strlcat(transport, "unicast;", sizeof(transport));
av_strlcatf(transport, sizeof(transport),
"client_port=%d", port);
if (rt->transport == RTSP_TRANSPORT_RTP &&
!(rt->server_type == RTSP_SERVER_WMS && i > 0))
av_strlcatf(transport, sizeof(transport), "-%d", port + 1);
}
 
/* RTP/TCP */
else if (lower_transport == RTSP_LOWER_TRANSPORT_TCP) {
/* For WMS streams, the application streams are only used for
* UDP. When trying to set it up for TCP streams, the server
* will return an error. Therefore, we skip those streams. */
if (rt->server_type == RTSP_SERVER_WMS &&
(rtsp_st->stream_index < 0 ||
s->streams[rtsp_st->stream_index]->codec->codec_type ==
AVMEDIA_TYPE_DATA))
continue;
snprintf(transport, sizeof(transport) - 1,
"%s/TCP;", trans_pref);
if (rt->transport != RTSP_TRANSPORT_RDT)
av_strlcat(transport, "unicast;", sizeof(transport));
av_strlcatf(transport, sizeof(transport),
"interleaved=%d-%d",
interleave, interleave + 1);
interleave += 2;
}
 
else if (lower_transport == RTSP_LOWER_TRANSPORT_UDP_MULTICAST) {
snprintf(transport, sizeof(transport) - 1,
"%s/UDP;multicast", trans_pref);
}
if (s->oformat) {
av_strlcat(transport, ";mode=record", sizeof(transport));
} else if (rt->server_type == RTSP_SERVER_REAL ||
rt->server_type == RTSP_SERVER_WMS)
av_strlcat(transport, ";mode=play", sizeof(transport));
snprintf(cmd, sizeof(cmd),
"Transport: %s\r\n",
transport);
if (rt->accept_dynamic_rate)
av_strlcat(cmd, "x-Dynamic-Rate: 0\r\n", sizeof(cmd));
if (i == 0 && rt->server_type == RTSP_SERVER_REAL && CONFIG_RTPDEC) {
char real_res[41], real_csum[9];
ff_rdt_calc_response_and_checksum(real_res, real_csum,
real_challenge);
av_strlcatf(cmd, sizeof(cmd),
"If-Match: %s\r\n"
"RealChallenge2: %s, sd=%s\r\n",
rt->session_id, real_res, real_csum);
}
ff_rtsp_send_cmd(s, "SETUP", rtsp_st->control_url, cmd, reply, NULL);
if (reply->status_code == 461 /* Unsupported protocol */ && i == 0) {
err = 1;
goto fail;
} else if (reply->status_code != RTSP_STATUS_OK ||
reply->nb_transports != 1) {
err = AVERROR_INVALIDDATA;
goto fail;
}
 
/* XXX: same protocol for all streams is required */
if (i > 0) {
if (reply->transports[0].lower_transport != rt->lower_transport ||
reply->transports[0].transport != rt->transport) {
err = AVERROR_INVALIDDATA;
goto fail;
}
} else {
rt->lower_transport = reply->transports[0].lower_transport;
rt->transport = reply->transports[0].transport;
}
 
/* Fail if the server responded with another lower transport mode
* than what we requested. */
if (reply->transports[0].lower_transport != lower_transport) {
av_log(s, AV_LOG_ERROR, "Nonmatching transport in server reply\n");
err = AVERROR_INVALIDDATA;
goto fail;
}
 
switch(reply->transports[0].lower_transport) {
case RTSP_LOWER_TRANSPORT_TCP:
rtsp_st->interleaved_min = reply->transports[0].interleaved_min;
rtsp_st->interleaved_max = reply->transports[0].interleaved_max;
break;
 
case RTSP_LOWER_TRANSPORT_UDP: {
char url[1024], options[30] = "";
const char *peer = host;
 
if (rt->rtsp_flags & RTSP_FLAG_FILTER_SRC)
av_strlcpy(options, "?connect=1", sizeof(options));
/* Use source address if specified */
if (reply->transports[0].source[0])
peer = reply->transports[0].source;
ff_url_join(url, sizeof(url), "rtp", NULL, peer,
reply->transports[0].server_port_min, "%s", options);
if (!(rt->server_type == RTSP_SERVER_WMS && i > 1) &&
ff_rtp_set_remote_url(rtsp_st->rtp_handle, url) < 0) {
err = AVERROR_INVALIDDATA;
goto fail;
}
/* Try to initialize the connection state in a
* potential NAT router by sending dummy packets.
* RTP/RTCP dummy packets are used for RDT, too.
*/
if (!(rt->server_type == RTSP_SERVER_WMS && i > 1) && s->iformat &&
CONFIG_RTPDEC)
ff_rtp_send_punch_packets(rtsp_st->rtp_handle);
break;
}
case RTSP_LOWER_TRANSPORT_UDP_MULTICAST: {
char url[1024], namebuf[50], optbuf[20] = "";
struct sockaddr_storage addr;
int port, ttl;
 
if (reply->transports[0].destination.ss_family) {
addr = reply->transports[0].destination;
port = reply->transports[0].port_min;
ttl = reply->transports[0].ttl;
} else {
addr = rtsp_st->sdp_ip;
port = rtsp_st->sdp_port;
ttl = rtsp_st->sdp_ttl;
}
if (ttl > 0)
snprintf(optbuf, sizeof(optbuf), "?ttl=%d", ttl);
getnameinfo((struct sockaddr*) &addr, sizeof(addr),
namebuf, sizeof(namebuf), NULL, 0, NI_NUMERICHOST);
ff_url_join(url, sizeof(url), "rtp", NULL, namebuf,
port, "%s", optbuf);
if (ffurl_open(&rtsp_st->rtp_handle, url, AVIO_FLAG_READ_WRITE,
&s->interrupt_callback, NULL) < 0) {
err = AVERROR_INVALIDDATA;
goto fail;
}
break;
}
}
 
if ((err = ff_rtsp_open_transport_ctx(s, rtsp_st)))
goto fail;
}
 
if (rt->nb_rtsp_streams && reply->timeout > 0)
rt->timeout = reply->timeout;
 
if (rt->server_type == RTSP_SERVER_REAL)
rt->need_subscription = 1;
 
return 0;
 
fail:
ff_rtsp_undo_setup(s);
return err;
}
 
void ff_rtsp_close_connections(AVFormatContext *s)
{
RTSPState *rt = s->priv_data;
if (rt->rtsp_hd_out != rt->rtsp_hd) ffurl_close(rt->rtsp_hd_out);
ffurl_close(rt->rtsp_hd);
rt->rtsp_hd = rt->rtsp_hd_out = NULL;
}
 
int ff_rtsp_connect(AVFormatContext *s)
{
RTSPState *rt = s->priv_data;
char host[1024], path[1024], tcpname[1024], cmd[2048], auth[128];
int port, err, tcp_fd;
RTSPMessageHeader reply1 = {0}, *reply = &reply1;
int lower_transport_mask = 0;
char real_challenge[64] = "";
struct sockaddr_storage peer;
socklen_t peer_len = sizeof(peer);
 
if (rt->rtp_port_max < rt->rtp_port_min) {
av_log(s, AV_LOG_ERROR, "Invalid UDP port range, max port %d less "
"than min port %d\n", rt->rtp_port_max,
rt->rtp_port_min);
return AVERROR(EINVAL);
}
 
if (!ff_network_init())
return AVERROR(EIO);
 
if (s->max_delay < 0) /* Not set by the caller */
s->max_delay = s->iformat ? DEFAULT_REORDERING_DELAY : 0;
 
rt->control_transport = RTSP_MODE_PLAIN;
if (rt->lower_transport_mask & (1 << RTSP_LOWER_TRANSPORT_HTTP)) {
rt->lower_transport_mask = 1 << RTSP_LOWER_TRANSPORT_TCP;
rt->control_transport = RTSP_MODE_TUNNEL;
}
/* Only pass through valid flags from here */
rt->lower_transport_mask &= (1 << RTSP_LOWER_TRANSPORT_NB) - 1;
 
redirect:
lower_transport_mask = rt->lower_transport_mask;
/* extract hostname and port */
av_url_split(NULL, 0, auth, sizeof(auth),
host, sizeof(host), &port, path, sizeof(path), s->filename);
if (*auth) {
av_strlcpy(rt->auth, auth, sizeof(rt->auth));
}
if (port < 0)
port = RTSP_DEFAULT_PORT;
 
if (!lower_transport_mask)
lower_transport_mask = (1 << RTSP_LOWER_TRANSPORT_NB) - 1;
 
if (s->oformat) {
/* Only UDP or TCP - UDP multicast isn't supported. */
lower_transport_mask &= (1 << RTSP_LOWER_TRANSPORT_UDP) |
(1 << RTSP_LOWER_TRANSPORT_TCP);
if (!lower_transport_mask || rt->control_transport == RTSP_MODE_TUNNEL) {
av_log(s, AV_LOG_ERROR, "Unsupported lower transport method, "
"only UDP and TCP are supported for output.\n");
err = AVERROR(EINVAL);
goto fail;
}
}
 
/* Construct the URI used in request; this is similar to s->filename,
* but with authentication credentials removed and RTSP specific options
* stripped out. */
ff_url_join(rt->control_uri, sizeof(rt->control_uri), "rtsp", NULL,
host, port, "%s", path);
 
if (rt->control_transport == RTSP_MODE_TUNNEL) {
/* set up initial handshake for tunneling */
char httpname[1024];
char sessioncookie[17];
char headers[1024];
 
ff_url_join(httpname, sizeof(httpname), "http", auth, host, port, "%s", path);
snprintf(sessioncookie, sizeof(sessioncookie), "%08x%08x",
av_get_random_seed(), av_get_random_seed());
 
/* GET requests */
if (ffurl_alloc(&rt->rtsp_hd, httpname, AVIO_FLAG_READ,
&s->interrupt_callback) < 0) {
err = AVERROR(EIO);
goto fail;
}
 
/* generate GET headers */
snprintf(headers, sizeof(headers),
"x-sessioncookie: %s\r\n"
"Accept: application/x-rtsp-tunnelled\r\n"
"Pragma: no-cache\r\n"
"Cache-Control: no-cache\r\n",
sessioncookie);
av_opt_set(rt->rtsp_hd->priv_data, "headers", headers, 0);
 
/* complete the connection */
if (ffurl_connect(rt->rtsp_hd, NULL)) {
err = AVERROR(EIO);
goto fail;
}
 
/* POST requests */
if (ffurl_alloc(&rt->rtsp_hd_out, httpname, AVIO_FLAG_WRITE,
&s->interrupt_callback) < 0 ) {
err = AVERROR(EIO);
goto fail;
}
 
/* generate POST headers */
snprintf(headers, sizeof(headers),
"x-sessioncookie: %s\r\n"
"Content-Type: application/x-rtsp-tunnelled\r\n"
"Pragma: no-cache\r\n"
"Cache-Control: no-cache\r\n"
"Content-Length: 32767\r\n"
"Expires: Sun, 9 Jan 1972 00:00:00 GMT\r\n",
sessioncookie);
av_opt_set(rt->rtsp_hd_out->priv_data, "headers", headers, 0);
av_opt_set(rt->rtsp_hd_out->priv_data, "chunked_post", "0", 0);
 
/* Initialize the authentication state for the POST session. The HTTP
* protocol implementation doesn't properly handle multi-pass
* authentication for POST requests, since it would require one of
* the following:
* - implementing Expect: 100-continue, which many HTTP servers
* don't support anyway, even less the RTSP servers that do HTTP
* tunneling
* - sending the whole POST data until getting a 401 reply specifying
* what authentication method to use, then resending all that data
* - waiting for potential 401 replies directly after sending the
* POST header (waiting for some unspecified time)
* Therefore, we copy the full auth state, which works for both basic
* and digest. (For digest, we would have to synchronize the nonce
* count variable between the two sessions, if we'd do more requests
* with the original session, though.)
*/
ff_http_init_auth_state(rt->rtsp_hd_out, rt->rtsp_hd);
 
/* complete the connection */
if (ffurl_connect(rt->rtsp_hd_out, NULL)) {
err = AVERROR(EIO);
goto fail;
}
} else {
/* open the tcp connection */
ff_url_join(tcpname, sizeof(tcpname), "tcp", NULL, host, port,
"?timeout=%d", rt->stimeout);
if (ffurl_open(&rt->rtsp_hd, tcpname, AVIO_FLAG_READ_WRITE,
&s->interrupt_callback, NULL) < 0) {
err = AVERROR(EIO);
goto fail;
}
rt->rtsp_hd_out = rt->rtsp_hd;
}
rt->seq = 0;
 
tcp_fd = ffurl_get_file_handle(rt->rtsp_hd);
if (!getpeername(tcp_fd, (struct sockaddr*) &peer, &peer_len)) {
getnameinfo((struct sockaddr*) &peer, peer_len, host, sizeof(host),
NULL, 0, NI_NUMERICHOST);
}
 
/* request options supported by the server; this also detects server
* type */
for (rt->server_type = RTSP_SERVER_RTP;;) {
cmd[0] = 0;
if (rt->server_type == RTSP_SERVER_REAL)
av_strlcat(cmd,
/*
* The following entries are required for proper
* streaming from a Realmedia server. They are
* interdependent in some way although we currently
* don't quite understand how. Values were copied
* from mplayer SVN r23589.
* ClientChallenge is a 16-byte ID in hex
* CompanyID is a 16-byte ID in base64
*/
"ClientChallenge: 9e26d33f2984236010ef6253fb1887f7\r\n"
"PlayerStarttime: [28/03/2003:22:50:23 00:00]\r\n"
"CompanyID: KnKV4M4I/B2FjJ1TToLycw==\r\n"
"GUID: 00000000-0000-0000-0000-000000000000\r\n",
sizeof(cmd));
ff_rtsp_send_cmd(s, "OPTIONS", rt->control_uri, cmd, reply, NULL);
if (reply->status_code != RTSP_STATUS_OK) {
err = AVERROR_INVALIDDATA;
goto fail;
}
 
/* detect server type if not standard-compliant RTP */
if (rt->server_type != RTSP_SERVER_REAL && reply->real_challenge[0]) {
rt->server_type = RTSP_SERVER_REAL;
continue;
} else if (!av_strncasecmp(reply->server, "WMServer/", 9)) {
rt->server_type = RTSP_SERVER_WMS;
} else if (rt->server_type == RTSP_SERVER_REAL)
strcpy(real_challenge, reply->real_challenge);
break;
}
 
if (s->iformat && CONFIG_RTSP_DEMUXER)
err = ff_rtsp_setup_input_streams(s, reply);
else if (CONFIG_RTSP_MUXER)
err = ff_rtsp_setup_output_streams(s, host);
if (err)
goto fail;
 
do {
int lower_transport = ff_log2_tab[lower_transport_mask &
~(lower_transport_mask - 1)];
 
err = ff_rtsp_make_setup_request(s, host, port, lower_transport,
rt->server_type == RTSP_SERVER_REAL ?
real_challenge : NULL);
if (err < 0)
goto fail;
lower_transport_mask &= ~(1 << lower_transport);
if (lower_transport_mask == 0 && err == 1) {
err = AVERROR(EPROTONOSUPPORT);
goto fail;
}
} while (err);
 
rt->lower_transport_mask = lower_transport_mask;
av_strlcpy(rt->real_challenge, real_challenge, sizeof(rt->real_challenge));
rt->state = RTSP_STATE_IDLE;
rt->seek_timestamp = 0; /* default is to start stream at position zero */
return 0;
fail:
ff_rtsp_close_streams(s);
ff_rtsp_close_connections(s);
if (reply->status_code >=300 && reply->status_code < 400 && s->iformat) {
av_strlcpy(s->filename, reply->location, sizeof(s->filename));
av_log(s, AV_LOG_INFO, "Status %d: Redirecting to %s\n",
reply->status_code,
s->filename);
goto redirect;
}
ff_network_close();
return err;
}
#endif /* CONFIG_RTSP_DEMUXER || CONFIG_RTSP_MUXER */
 
#if CONFIG_RTPDEC
static int udp_read_packet(AVFormatContext *s, RTSPStream **prtsp_st,
uint8_t *buf, int buf_size, int64_t wait_end)
{
RTSPState *rt = s->priv_data;
RTSPStream *rtsp_st;
int n, i, ret, tcp_fd, timeout_cnt = 0;
int max_p = 0;
struct pollfd *p = rt->p;
int *fds = NULL, fdsnum, fdsidx;
 
for (;;) {
if (ff_check_interrupt(&s->interrupt_callback))
return AVERROR_EXIT;
if (wait_end && wait_end - av_gettime() < 0)
return AVERROR(EAGAIN);
max_p = 0;
if (rt->rtsp_hd) {
tcp_fd = ffurl_get_file_handle(rt->rtsp_hd);
p[max_p].fd = tcp_fd;
p[max_p++].events = POLLIN;
} else {
tcp_fd = -1;
}
for (i = 0; i < rt->nb_rtsp_streams; i++) {
rtsp_st = rt->rtsp_streams[i];
if (rtsp_st->rtp_handle) {
if (ret = ffurl_get_multi_file_handle(rtsp_st->rtp_handle,
&fds, &fdsnum)) {
av_log(s, AV_LOG_ERROR, "Unable to recover rtp ports\n");
return ret;
}
if (fdsnum != 2) {
av_log(s, AV_LOG_ERROR,
"Number of fds %d not supported\n", fdsnum);
return AVERROR_INVALIDDATA;
}
for (fdsidx = 0; fdsidx < fdsnum; fdsidx++) {
p[max_p].fd = fds[fdsidx];
p[max_p++].events = POLLIN;
}
av_free(fds);
}
}
n = poll(p, max_p, POLL_TIMEOUT_MS);
if (n > 0) {
int j = 1 - (tcp_fd == -1);
timeout_cnt = 0;
for (i = 0; i < rt->nb_rtsp_streams; i++) {
rtsp_st = rt->rtsp_streams[i];
if (rtsp_st->rtp_handle) {
if (p[j].revents & POLLIN || p[j+1].revents & POLLIN) {
ret = ffurl_read(rtsp_st->rtp_handle, buf, buf_size);
if (ret > 0) {
*prtsp_st = rtsp_st;
return ret;
}
}
j+=2;
}
}
#if CONFIG_RTSP_DEMUXER
if (tcp_fd != -1 && p[0].revents & POLLIN) {
if (rt->rtsp_flags & RTSP_FLAG_LISTEN) {
if (rt->state == RTSP_STATE_STREAMING) {
if (!ff_rtsp_parse_streaming_commands(s))
return AVERROR_EOF;
else
av_log(s, AV_LOG_WARNING,
"Unable to answer to TEARDOWN\n");
} else
return 0;
} else {
RTSPMessageHeader reply;
ret = ff_rtsp_read_reply(s, &reply, NULL, 0, NULL);
if (ret < 0)
return ret;
/* XXX: parse message */
if (rt->state != RTSP_STATE_STREAMING)
return 0;
}
}
#endif
} else if (n == 0 && ++timeout_cnt >= MAX_TIMEOUTS) {
return AVERROR(ETIMEDOUT);
} else if (n < 0 && errno != EINTR)
return AVERROR(errno);
}
}
 
static int pick_stream(AVFormatContext *s, RTSPStream **rtsp_st,
const uint8_t *buf, int len)
{
RTSPState *rt = s->priv_data;
int i;
if (len < 0)
return len;
if (rt->nb_rtsp_streams == 1) {
*rtsp_st = rt->rtsp_streams[0];
return len;
}
if (len >= 8 && rt->transport == RTSP_TRANSPORT_RTP) {
if (RTP_PT_IS_RTCP(rt->recvbuf[1])) {
int no_ssrc = 0;
for (i = 0; i < rt->nb_rtsp_streams; i++) {
RTPDemuxContext *rtpctx = rt->rtsp_streams[i]->transport_priv;
if (!rtpctx)
continue;
if (rtpctx->ssrc == AV_RB32(&buf[4])) {
*rtsp_st = rt->rtsp_streams[i];
return len;
}
if (!rtpctx->ssrc)
no_ssrc = 1;
}
if (no_ssrc) {
av_log(s, AV_LOG_WARNING,
"Unable to pick stream for packet - SSRC not known for "
"all streams\n");
return AVERROR(EAGAIN);
}
} else {
for (i = 0; i < rt->nb_rtsp_streams; i++) {
if ((buf[1] & 0x7f) == rt->rtsp_streams[i]->sdp_payload_type) {
*rtsp_st = rt->rtsp_streams[i];
return len;
}
}
}
}
av_log(s, AV_LOG_WARNING, "Unable to pick stream for packet\n");
return AVERROR(EAGAIN);
}
 
int ff_rtsp_fetch_packet(AVFormatContext *s, AVPacket *pkt)
{
RTSPState *rt = s->priv_data;
int ret, len;
RTSPStream *rtsp_st, *first_queue_st = NULL;
int64_t wait_end = 0;
 
if (rt->nb_byes == rt->nb_rtsp_streams)
return AVERROR_EOF;
 
/* get next frames from the same RTP packet */
if (rt->cur_transport_priv) {
if (rt->transport == RTSP_TRANSPORT_RDT) {
ret = ff_rdt_parse_packet(rt->cur_transport_priv, pkt, NULL, 0);
} else if (rt->transport == RTSP_TRANSPORT_RTP) {
ret = ff_rtp_parse_packet(rt->cur_transport_priv, pkt, NULL, 0);
} else if (rt->ts && CONFIG_RTPDEC) {
ret = ff_mpegts_parse_packet(rt->ts, pkt, rt->recvbuf + rt->recvbuf_pos, rt->recvbuf_len - rt->recvbuf_pos);
if (ret >= 0) {
rt->recvbuf_pos += ret;
ret = rt->recvbuf_pos < rt->recvbuf_len;
}
} else
ret = -1;
if (ret == 0) {
rt->cur_transport_priv = NULL;
return 0;
} else if (ret == 1) {
return 0;
} else
rt->cur_transport_priv = NULL;
}
 
redo:
if (rt->transport == RTSP_TRANSPORT_RTP) {
int i;
int64_t first_queue_time = 0;
for (i = 0; i < rt->nb_rtsp_streams; i++) {
RTPDemuxContext *rtpctx = rt->rtsp_streams[i]->transport_priv;
int64_t queue_time;
if (!rtpctx)
continue;
queue_time = ff_rtp_queued_packet_time(rtpctx);
if (queue_time && (queue_time - first_queue_time < 0 ||
!first_queue_time)) {
first_queue_time = queue_time;
first_queue_st = rt->rtsp_streams[i];
}
}
if (first_queue_time) {
wait_end = first_queue_time + s->max_delay;
} else {
wait_end = 0;
first_queue_st = NULL;
}
}
 
/* read next RTP packet */
if (!rt->recvbuf) {
rt->recvbuf = av_malloc(RECVBUF_SIZE);
if (!rt->recvbuf)
return AVERROR(ENOMEM);
}
 
switch(rt->lower_transport) {
default:
#if CONFIG_RTSP_DEMUXER
case RTSP_LOWER_TRANSPORT_TCP:
len = ff_rtsp_tcp_read_packet(s, &rtsp_st, rt->recvbuf, RECVBUF_SIZE);
break;
#endif
case RTSP_LOWER_TRANSPORT_UDP:
case RTSP_LOWER_TRANSPORT_UDP_MULTICAST:
len = udp_read_packet(s, &rtsp_st, rt->recvbuf, RECVBUF_SIZE, wait_end);
if (len > 0 && rtsp_st->transport_priv && rt->transport == RTSP_TRANSPORT_RTP)
ff_rtp_check_and_send_back_rr(rtsp_st->transport_priv, rtsp_st->rtp_handle, NULL, len);
break;
case RTSP_LOWER_TRANSPORT_CUSTOM:
if (first_queue_st && rt->transport == RTSP_TRANSPORT_RTP &&
wait_end && wait_end < av_gettime())
len = AVERROR(EAGAIN);
else
len = ffio_read_partial(s->pb, rt->recvbuf, RECVBUF_SIZE);
len = pick_stream(s, &rtsp_st, rt->recvbuf, len);
if (len > 0 && rtsp_st->transport_priv && rt->transport == RTSP_TRANSPORT_RTP)
ff_rtp_check_and_send_back_rr(rtsp_st->transport_priv, NULL, s->pb, len);
break;
}
if (len == AVERROR(EAGAIN) && first_queue_st &&
rt->transport == RTSP_TRANSPORT_RTP) {
rtsp_st = first_queue_st;
ret = ff_rtp_parse_packet(rtsp_st->transport_priv, pkt, NULL, 0);
goto end;
}
if (len < 0)
return len;
if (len == 0)
return AVERROR_EOF;
if (rt->transport == RTSP_TRANSPORT_RDT) {
ret = ff_rdt_parse_packet(rtsp_st->transport_priv, pkt, &rt->recvbuf, len);
} else if (rt->transport == RTSP_TRANSPORT_RTP) {
ret = ff_rtp_parse_packet(rtsp_st->transport_priv, pkt, &rt->recvbuf, len);
if (rtsp_st->feedback) {
AVIOContext *pb = NULL;
if (rt->lower_transport == RTSP_LOWER_TRANSPORT_CUSTOM)
pb = s->pb;
ff_rtp_send_rtcp_feedback(rtsp_st->transport_priv, rtsp_st->rtp_handle, pb);
}
if (ret < 0) {
/* Either bad packet, or a RTCP packet. Check if the
* first_rtcp_ntp_time field was initialized. */
RTPDemuxContext *rtpctx = rtsp_st->transport_priv;
if (rtpctx->first_rtcp_ntp_time != AV_NOPTS_VALUE) {
/* first_rtcp_ntp_time has been initialized for this stream,
* copy the same value to all other uninitialized streams,
* in order to map their timestamp origin to the same ntp time
* as this one. */
int i;
AVStream *st = NULL;
if (rtsp_st->stream_index >= 0)
st = s->streams[rtsp_st->stream_index];
for (i = 0; i < rt->nb_rtsp_streams; i++) {
RTPDemuxContext *rtpctx2 = rt->rtsp_streams[i]->transport_priv;
AVStream *st2 = NULL;
if (rt->rtsp_streams[i]->stream_index >= 0)
st2 = s->streams[rt->rtsp_streams[i]->stream_index];
if (rtpctx2 && st && st2 &&
rtpctx2->first_rtcp_ntp_time == AV_NOPTS_VALUE) {
rtpctx2->first_rtcp_ntp_time = rtpctx->first_rtcp_ntp_time;
rtpctx2->rtcp_ts_offset = av_rescale_q(
rtpctx->rtcp_ts_offset, st->time_base,
st2->time_base);
}
}
}
if (ret == -RTCP_BYE) {
rt->nb_byes++;
 
av_log(s, AV_LOG_DEBUG, "Received BYE for stream %d (%d/%d)\n",
rtsp_st->stream_index, rt->nb_byes, rt->nb_rtsp_streams);
 
if (rt->nb_byes == rt->nb_rtsp_streams)
return AVERROR_EOF;
}
}
} else if (rt->ts && CONFIG_RTPDEC) {
ret = ff_mpegts_parse_packet(rt->ts, pkt, rt->recvbuf, len);
if (ret >= 0) {
if (ret < len) {
rt->recvbuf_len = len;
rt->recvbuf_pos = ret;
rt->cur_transport_priv = rt->ts;
return 1;
} else {
ret = 0;
}
}
} else {
return AVERROR_INVALIDDATA;
}
end:
if (ret < 0)
goto redo;
if (ret == 1)
/* more packets may follow, so we save the RTP context */
rt->cur_transport_priv = rtsp_st->transport_priv;
 
return ret;
}
#endif /* CONFIG_RTPDEC */
 
#if CONFIG_SDP_DEMUXER
static int sdp_probe(AVProbeData *p1)
{
const char *p = p1->buf, *p_end = p1->buf + p1->buf_size;
 
/* we look for a line beginning "c=IN IP" */
while (p < p_end && *p != '\0') {
if (p + sizeof("c=IN IP") - 1 < p_end &&
av_strstart(p, "c=IN IP", NULL))
return AVPROBE_SCORE_EXTENSION;
 
while (p < p_end - 1 && *p != '\n') p++;
if (++p >= p_end)
break;
if (*p == '\r')
p++;
}
return 0;
}
 
static void append_source_addrs(char *buf, int size, const char *name,
int count, struct RTSPSource **addrs)
{
int i;
if (!count)
return;
av_strlcatf(buf, size, "&%s=%s", name, addrs[0]->addr);
for (i = 1; i < count; i++)
av_strlcatf(buf, size, ",%s", addrs[i]->addr);
}
 
static int sdp_read_header(AVFormatContext *s)
{
RTSPState *rt = s->priv_data;
RTSPStream *rtsp_st;
int size, i, err;
char *content;
char url[1024];
 
if (!ff_network_init())
return AVERROR(EIO);
 
if (s->max_delay < 0) /* Not set by the caller */
s->max_delay = DEFAULT_REORDERING_DELAY;
if (rt->rtsp_flags & RTSP_FLAG_CUSTOM_IO)
rt->lower_transport = RTSP_LOWER_TRANSPORT_CUSTOM;
 
/* read the whole sdp file */
/* XXX: better loading */
content = av_malloc(SDP_MAX_SIZE);
size = avio_read(s->pb, content, SDP_MAX_SIZE - 1);
if (size <= 0) {
av_free(content);
return AVERROR_INVALIDDATA;
}
content[size] ='\0';
 
err = ff_sdp_parse(s, content);
av_free(content);
if (err) goto fail;
 
/* open each RTP stream */
for (i = 0; i < rt->nb_rtsp_streams; i++) {
char namebuf[50];
rtsp_st = rt->rtsp_streams[i];
 
if (!(rt->rtsp_flags & RTSP_FLAG_CUSTOM_IO)) {
getnameinfo((struct sockaddr*) &rtsp_st->sdp_ip, sizeof(rtsp_st->sdp_ip),
namebuf, sizeof(namebuf), NULL, 0, NI_NUMERICHOST);
ff_url_join(url, sizeof(url), "rtp", NULL,
namebuf, rtsp_st->sdp_port,
"?localport=%d&ttl=%d&connect=%d&write_to_source=%d",
rtsp_st->sdp_port, rtsp_st->sdp_ttl,
rt->rtsp_flags & RTSP_FLAG_FILTER_SRC ? 1 : 0,
rt->rtsp_flags & RTSP_FLAG_RTCP_TO_SOURCE ? 1 : 0);
 
append_source_addrs(url, sizeof(url), "sources",
rtsp_st->nb_include_source_addrs,
rtsp_st->include_source_addrs);
append_source_addrs(url, sizeof(url), "block",
rtsp_st->nb_exclude_source_addrs,
rtsp_st->exclude_source_addrs);
if (ffurl_open(&rtsp_st->rtp_handle, url, AVIO_FLAG_READ_WRITE,
&s->interrupt_callback, NULL) < 0) {
err = AVERROR_INVALIDDATA;
goto fail;
}
}
if ((err = ff_rtsp_open_transport_ctx(s, rtsp_st)))
goto fail;
}
return 0;
fail:
ff_rtsp_close_streams(s);
ff_network_close();
return err;
}
 
static int sdp_read_close(AVFormatContext *s)
{
ff_rtsp_close_streams(s);
ff_network_close();
return 0;
}
 
static const AVClass sdp_demuxer_class = {
.class_name = "SDP demuxer",
.item_name = av_default_item_name,
.option = sdp_options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVInputFormat ff_sdp_demuxer = {
.name = "sdp",
.long_name = NULL_IF_CONFIG_SMALL("SDP"),
.priv_data_size = sizeof(RTSPState),
.read_probe = sdp_probe,
.read_header = sdp_read_header,
.read_packet = ff_rtsp_fetch_packet,
.read_close = sdp_read_close,
.priv_class = &sdp_demuxer_class,
};
#endif /* CONFIG_SDP_DEMUXER */
 
#if CONFIG_RTP_DEMUXER
static int rtp_probe(AVProbeData *p)
{
if (av_strstart(p->filename, "rtp:", NULL))
return AVPROBE_SCORE_MAX;
return 0;
}
 
static int rtp_read_header(AVFormatContext *s)
{
uint8_t recvbuf[RTP_MAX_PACKET_LENGTH];
char host[500], sdp[500];
int ret, port;
URLContext* in = NULL;
int payload_type;
AVCodecContext codec = { 0 };
struct sockaddr_storage addr;
AVIOContext pb;
socklen_t addrlen = sizeof(addr);
RTSPState *rt = s->priv_data;
 
if (!ff_network_init())
return AVERROR(EIO);
 
ret = ffurl_open(&in, s->filename, AVIO_FLAG_READ,
&s->interrupt_callback, NULL);
if (ret)
goto fail;
 
while (1) {
ret = ffurl_read(in, recvbuf, sizeof(recvbuf));
if (ret == AVERROR(EAGAIN))
continue;
if (ret < 0)
goto fail;
if (ret < 12) {
av_log(s, AV_LOG_WARNING, "Received too short packet\n");
continue;
}
 
if ((recvbuf[0] & 0xc0) != 0x80) {
av_log(s, AV_LOG_WARNING, "Unsupported RTP version packet "
"received\n");
continue;
}
 
if (RTP_PT_IS_RTCP(recvbuf[1]))
continue;
 
payload_type = recvbuf[1] & 0x7f;
break;
}
getsockname(ffurl_get_file_handle(in), (struct sockaddr*) &addr, &addrlen);
ffurl_close(in);
in = NULL;
 
if (ff_rtp_get_codec_info(&codec, payload_type)) {
av_log(s, AV_LOG_ERROR, "Unable to receive RTP payload type %d "
"without an SDP file describing it\n",
payload_type);
goto fail;
}
if (codec.codec_type != AVMEDIA_TYPE_DATA) {
av_log(s, AV_LOG_WARNING, "Guessing on RTP content - if not received "
"properly you need an SDP file "
"describing it\n");
}
 
av_url_split(NULL, 0, NULL, 0, host, sizeof(host), &port,
NULL, 0, s->filename);
 
snprintf(sdp, sizeof(sdp),
"v=0\r\nc=IN IP%d %s\r\nm=%s %d RTP/AVP %d\r\n",
addr.ss_family == AF_INET ? 4 : 6, host,
codec.codec_type == AVMEDIA_TYPE_DATA ? "application" :
codec.codec_type == AVMEDIA_TYPE_VIDEO ? "video" : "audio",
port, payload_type);
av_log(s, AV_LOG_VERBOSE, "SDP:\n%s\n", sdp);
 
ffio_init_context(&pb, sdp, strlen(sdp), 0, NULL, NULL, NULL, NULL);
s->pb = &pb;
 
/* sdp_read_header initializes this again */
ff_network_close();
 
rt->media_type_mask = (1 << (AVMEDIA_TYPE_DATA+1)) - 1;
 
ret = sdp_read_header(s);
s->pb = NULL;
return ret;
 
fail:
if (in)
ffurl_close(in);
ff_network_close();
return ret;
}
 
static const AVClass rtp_demuxer_class = {
.class_name = "RTP demuxer",
.item_name = av_default_item_name,
.option = rtp_options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVInputFormat ff_rtp_demuxer = {
.name = "rtp",
.long_name = NULL_IF_CONFIG_SMALL("RTP input"),
.priv_data_size = sizeof(RTSPState),
.read_probe = rtp_probe,
.read_header = rtp_read_header,
.read_packet = ff_rtsp_fetch_packet,
.read_close = sdp_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &rtp_demuxer_class,
};
#endif /* CONFIG_RTP_DEMUXER */
/contrib/sdk/sources/ffmpeg/libavformat/rtsp.h
0,0 → 1,627
/*
* RTSP definitions
* Copyright (c) 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_RTSP_H
#define AVFORMAT_RTSP_H
 
#include <stdint.h>
#include "avformat.h"
#include "rtspcodes.h"
#include "rtpdec.h"
#include "network.h"
#include "httpauth.h"
 
#include "libavutil/log.h"
#include "libavutil/opt.h"
 
/**
* Network layer over which RTP/etc packet data will be transported.
*/
enum RTSPLowerTransport {
RTSP_LOWER_TRANSPORT_UDP = 0, /**< UDP/unicast */
RTSP_LOWER_TRANSPORT_TCP = 1, /**< TCP; interleaved in RTSP */
RTSP_LOWER_TRANSPORT_UDP_MULTICAST = 2, /**< UDP/multicast */
RTSP_LOWER_TRANSPORT_NB,
RTSP_LOWER_TRANSPORT_HTTP = 8, /**< HTTP tunneled - not a proper
transport mode as such,
only for use via AVOptions */
RTSP_LOWER_TRANSPORT_CUSTOM = 16, /**< Custom IO - not a public
option for lower_transport_mask,
but set in the SDP demuxer based
on a flag. */
};
 
/**
* Packet profile of the data that we will be receiving. Real servers
* commonly send RDT (although they can sometimes send RTP as well),
* whereas most others will send RTP.
*/
enum RTSPTransport {
RTSP_TRANSPORT_RTP, /**< Standards-compliant RTP */
RTSP_TRANSPORT_RDT, /**< Realmedia Data Transport */
RTSP_TRANSPORT_RAW, /**< Raw data (over UDP) */
RTSP_TRANSPORT_NB
};
 
/**
* Transport mode for the RTSP data. This may be plain, or
* tunneled, which is done over HTTP.
*/
enum RTSPControlTransport {
RTSP_MODE_PLAIN, /**< Normal RTSP */
RTSP_MODE_TUNNEL /**< RTSP over HTTP (tunneling) */
};
 
#define RTSP_DEFAULT_PORT 554
#define RTSP_MAX_TRANSPORTS 8
#define RTSP_TCP_MAX_PACKET_SIZE 1472
#define RTSP_DEFAULT_NB_AUDIO_CHANNELS 1
#define RTSP_DEFAULT_AUDIO_SAMPLERATE 44100
#define RTSP_RTP_PORT_MIN 5000
#define RTSP_RTP_PORT_MAX 65000
 
/**
* This describes a single item in the "Transport:" line of one stream as
* negotiated by the SETUP RTSP command. Multiple transports are comma-
* separated ("Transport: x-read-rdt/tcp;interleaved=0-1,rtp/avp/udp;
* client_port=1000-1001;server_port=1800-1801") and described in separate
* RTSPTransportFields.
*/
typedef struct RTSPTransportField {
/** interleave ids, if TCP transport; each TCP/RTSP data packet starts
* with a '$', stream length and stream ID. If the stream ID is within
* the range of this interleaved_min-max, then the packet belongs to
* this stream. */
int interleaved_min, interleaved_max;
 
/** UDP multicast port range; the ports to which we should connect to
* receive multicast UDP data. */
int port_min, port_max;
 
/** UDP client ports; these should be the local ports of the UDP RTP
* (and RTCP) sockets over which we receive RTP/RTCP data. */
int client_port_min, client_port_max;
 
/** UDP unicast server port range; the ports to which we should connect
* to receive unicast UDP RTP/RTCP data. */
int server_port_min, server_port_max;
 
/** time-to-live value (required for multicast); the amount of HOPs that
* packets will be allowed to make before being discarded. */
int ttl;
 
/** transport set to record data */
int mode_record;
 
struct sockaddr_storage destination; /**< destination IP address */
char source[INET6_ADDRSTRLEN + 1]; /**< source IP address */
 
/** data/packet transport protocol; e.g. RTP or RDT */
enum RTSPTransport transport;
 
/** network layer transport protocol; e.g. TCP or UDP uni-/multicast */
enum RTSPLowerTransport lower_transport;
} RTSPTransportField;
 
/**
* This describes the server response to each RTSP command.
*/
typedef struct RTSPMessageHeader {
/** length of the data following this header */
int content_length;
 
enum RTSPStatusCode status_code; /**< response code from server */
 
/** number of items in the 'transports' variable below */
int nb_transports;
 
/** Time range of the streams that the server will stream. In
* AV_TIME_BASE unit, AV_NOPTS_VALUE if not used */
int64_t range_start, range_end;
 
/** describes the complete "Transport:" line of the server in response
* to a SETUP RTSP command by the client */
RTSPTransportField transports[RTSP_MAX_TRANSPORTS];
 
int seq; /**< sequence number */
 
/** the "Session:" field. This value is initially set by the server and
* should be re-transmitted by the client in every RTSP command. */
char session_id[512];
 
/** the "Location:" field. This value is used to handle redirection.
*/
char location[4096];
 
/** the "RealChallenge1:" field from the server */
char real_challenge[64];
 
/** the "Server: field, which can be used to identify some special-case
* servers that are not 100% standards-compliant. We use this to identify
* Windows Media Server, which has a value "WMServer/v.e.r.sion", where
* version is a sequence of digits (e.g. 9.0.0.3372). Helix/Real servers
* use something like "Helix [..] Server Version v.e.r.sion (platform)
* (RealServer compatible)" or "RealServer Version v.e.r.sion (platform)",
* where platform is the output of $uname -msr | sed 's/ /-/g'. */
char server[64];
 
/** The "timeout" comes as part of the server response to the "SETUP"
* command, in the "Session: <xyz>[;timeout=<value>]" line. It is the
* time, in seconds, that the server will go without traffic over the
* RTSP/TCP connection before it closes the connection. To prevent
* this, sent dummy requests (e.g. OPTIONS) with intervals smaller
* than this value. */
int timeout;
 
/** The "Notice" or "X-Notice" field value. See
* http://tools.ietf.org/html/draft-stiemerling-rtsp-announce-00
* for a complete list of supported values. */
int notice;
 
/** The "reason" is meant to specify better the meaning of the error code
* returned
*/
char reason[256];
 
/**
* Content type header
*/
char content_type[64];
} RTSPMessageHeader;
 
/**
* Client state, i.e. whether we are currently receiving data (PLAYING) or
* setup-but-not-receiving (PAUSED). State can be changed in applications
* by calling av_read_play/pause().
*/
enum RTSPClientState {
RTSP_STATE_IDLE, /**< not initialized */
RTSP_STATE_STREAMING, /**< initialized and sending/receiving data */
RTSP_STATE_PAUSED, /**< initialized, but not receiving data */
RTSP_STATE_SEEKING, /**< initialized, requesting a seek */
};
 
/**
* Identify particular servers that require special handling, such as
* standards-incompliant "Transport:" lines in the SETUP request.
*/
enum RTSPServerType {
RTSP_SERVER_RTP, /**< Standards-compliant RTP-server */
RTSP_SERVER_REAL, /**< Realmedia-style server */
RTSP_SERVER_WMS, /**< Windows Media server */
RTSP_SERVER_NB
};
 
/**
* Private data for the RTSP demuxer.
*
* @todo Use AVIOContext instead of URLContext
*/
typedef struct RTSPState {
const AVClass *class; /**< Class for private options. */
URLContext *rtsp_hd; /* RTSP TCP connection handle */
 
/** number of items in the 'rtsp_streams' variable */
int nb_rtsp_streams;
 
struct RTSPStream **rtsp_streams; /**< streams in this session */
 
/** indicator of whether we are currently receiving data from the
* server. Basically this isn't more than a simple cache of the
* last PLAY/PAUSE command sent to the server, to make sure we don't
* send 2x the same unexpectedly or commands in the wrong state. */
enum RTSPClientState state;
 
/** the seek value requested when calling av_seek_frame(). This value
* is subsequently used as part of the "Range" parameter when emitting
* the RTSP PLAY command. If we are currently playing, this command is
* called instantly. If we are currently paused, this command is called
* whenever we resume playback. Either way, the value is only used once,
* see rtsp_read_play() and rtsp_read_seek(). */
int64_t seek_timestamp;
 
int seq; /**< RTSP command sequence number */
 
/** copy of RTSPMessageHeader->session_id, i.e. the server-provided session
* identifier that the client should re-transmit in each RTSP command */
char session_id[512];
 
/** copy of RTSPMessageHeader->timeout, i.e. the time (in seconds) that
* the server will go without traffic on the RTSP/TCP line before it
* closes the connection. */
int timeout;
 
/** timestamp of the last RTSP command that we sent to the RTSP server.
* This is used to calculate when to send dummy commands to keep the
* connection alive, in conjunction with timeout. */
int64_t last_cmd_time;
 
/** the negotiated data/packet transport protocol; e.g. RTP or RDT */
enum RTSPTransport transport;
 
/** the negotiated network layer transport protocol; e.g. TCP or UDP
* uni-/multicast */
enum RTSPLowerTransport lower_transport;
 
/** brand of server that we're talking to; e.g. WMS, REAL or other.
* Detected based on the value of RTSPMessageHeader->server or the presence
* of RTSPMessageHeader->real_challenge */
enum RTSPServerType server_type;
 
/** the "RealChallenge1:" field from the server */
char real_challenge[64];
 
/** plaintext authorization line (username:password) */
char auth[128];
 
/** authentication state */
HTTPAuthState auth_state;
 
/** The last reply of the server to a RTSP command */
char last_reply[2048]; /* XXX: allocate ? */
 
/** RTSPStream->transport_priv of the last stream that we read a
* packet from */
void *cur_transport_priv;
 
/** The following are used for Real stream selection */
//@{
/** whether we need to send a "SET_PARAMETER Subscribe:" command */
int need_subscription;
 
/** stream setup during the last frame read. This is used to detect if
* we need to subscribe or unsubscribe to any new streams. */
enum AVDiscard *real_setup_cache;
 
/** current stream setup. This is a temporary buffer used to compare
* current setup to previous frame setup. */
enum AVDiscard *real_setup;
 
/** the last value of the "SET_PARAMETER Subscribe:" RTSP command.
* this is used to send the same "Unsubscribe:" if stream setup changed,
* before sending a new "Subscribe:" command. */
char last_subscription[1024];
//@}
 
/** The following are used for RTP/ASF streams */
//@{
/** ASF demuxer context for the embedded ASF stream from WMS servers */
AVFormatContext *asf_ctx;
 
/** cache for position of the asf demuxer, since we load a new
* data packet in the bytecontext for each incoming RTSP packet. */
uint64_t asf_pb_pos;
//@}
 
/** some MS RTSP streams contain a URL in the SDP that we need to use
* for all subsequent RTSP requests, rather than the input URI; in
* other cases, this is a copy of AVFormatContext->filename. */
char control_uri[1024];
 
/** The following are used for parsing raw mpegts in udp */
//@{
struct MpegTSContext *ts;
int recvbuf_pos;
int recvbuf_len;
//@}
 
/** Additional output handle, used when input and output are done
* separately, eg for HTTP tunneling. */
URLContext *rtsp_hd_out;
 
/** RTSP transport mode, such as plain or tunneled. */
enum RTSPControlTransport control_transport;
 
/* Number of RTCP BYE packets the RTSP session has received.
* An EOF is propagated back if nb_byes == nb_streams.
* This is reset after a seek. */
int nb_byes;
 
/** Reusable buffer for receiving packets */
uint8_t* recvbuf;
 
/**
* A mask with all requested transport methods
*/
int lower_transport_mask;
 
/**
* The number of returned packets
*/
uint64_t packets;
 
/**
* Polling array for udp
*/
struct pollfd *p;
 
/**
* Whether the server supports the GET_PARAMETER method.
*/
int get_parameter_supported;
 
/**
* Do not begin to play the stream immediately.
*/
int initial_pause;
 
/**
* Option flags for the chained RTP muxer.
*/
int rtp_muxer_flags;
 
/** Whether the server accepts the x-Dynamic-Rate header */
int accept_dynamic_rate;
 
/**
* Various option flags for the RTSP muxer/demuxer.
*/
int rtsp_flags;
 
/**
* Mask of all requested media types
*/
int media_type_mask;
 
/**
* Minimum and maximum local UDP ports.
*/
int rtp_port_min, rtp_port_max;
 
/**
* Timeout to wait for incoming connections.
*/
int initial_timeout;
 
/**
* timeout of socket i/o operations.
*/
int stimeout;
 
/**
* Size of RTP packet reordering queue.
*/
int reordering_queue_size;
 
/**
* User-Agent string
*/
char *user_agent;
} RTSPState;
 
#define RTSP_FLAG_FILTER_SRC 0x1 /**< Filter incoming UDP packets -
receive packets only from the right
source address and port. */
#define RTSP_FLAG_LISTEN 0x2 /**< Wait for incoming connections. */
#define RTSP_FLAG_CUSTOM_IO 0x4 /**< Do all IO via the AVIOContext. */
#define RTSP_FLAG_RTCP_TO_SOURCE 0x8 /**< Send RTCP packets to the source
address of received packets. */
 
typedef struct RTSPSource {
char addr[128]; /**< Source-specific multicast include source IP address (from SDP content) */
} RTSPSource;
 
/**
* Describe a single stream, as identified by a single m= line block in the
* SDP content. In the case of RDT, one RTSPStream can represent multiple
* AVStreams. In this case, each AVStream in this set has similar content
* (but different codec/bitrate).
*/
typedef struct RTSPStream {
URLContext *rtp_handle; /**< RTP stream handle (if UDP) */
void *transport_priv; /**< RTP/RDT parse context if input, RTP AVFormatContext if output */
 
/** corresponding stream index, if any. -1 if none (MPEG2TS case) */
int stream_index;
 
/** interleave IDs; copies of RTSPTransportField->interleaved_min/max
* for the selected transport. Only used for TCP. */
int interleaved_min, interleaved_max;
 
char control_url[1024]; /**< url for this stream (from SDP) */
 
/** The following are used only in SDP, not RTSP */
//@{
int sdp_port; /**< port (from SDP content) */
struct sockaddr_storage sdp_ip; /**< IP address (from SDP content) */
int nb_include_source_addrs; /**< Number of source-specific multicast include source IP addresses (from SDP content) */
struct RTSPSource **include_source_addrs; /**< Source-specific multicast include source IP addresses (from SDP content) */
int nb_exclude_source_addrs; /**< Number of source-specific multicast exclude source IP addresses (from SDP content) */
struct RTSPSource **exclude_source_addrs; /**< Source-specific multicast exclude source IP addresses (from SDP content) */
int sdp_ttl; /**< IP Time-To-Live (from SDP content) */
int sdp_payload_type; /**< payload type */
//@}
 
/** The following are used for dynamic protocols (rtpdec_*.c/rdt.c) */
//@{
/** handler structure */
RTPDynamicProtocolHandler *dynamic_handler;
 
/** private data associated with the dynamic protocol */
PayloadContext *dynamic_protocol_context;
//@}
 
/** Enable sending RTCP feedback messages according to RFC 4585 */
int feedback;
 
char crypto_suite[40];
char crypto_params[100];
} RTSPStream;
 
void ff_rtsp_parse_line(RTSPMessageHeader *reply, const char *buf,
RTSPState *rt, const char *method);
 
/**
* Send a command to the RTSP server without waiting for the reply.
*
* @see rtsp_send_cmd_with_content_async
*/
int ff_rtsp_send_cmd_async(AVFormatContext *s, const char *method,
const char *url, const char *headers);
 
/**
* Send a command to the RTSP server and wait for the reply.
*
* @param s RTSP (de)muxer context
* @param method the method for the request
* @param url the target url for the request
* @param headers extra header lines to include in the request
* @param reply pointer where the RTSP message header will be stored
* @param content_ptr pointer where the RTSP message body, if any, will
* be stored (length is in reply)
* @param send_content if non-null, the data to send as request body content
* @param send_content_length the length of the send_content data, or 0 if
* send_content is null
*
* @return zero if success, nonzero otherwise
*/
int ff_rtsp_send_cmd_with_content(AVFormatContext *s,
const char *method, const char *url,
const char *headers,
RTSPMessageHeader *reply,
unsigned char **content_ptr,
const unsigned char *send_content,
int send_content_length);
 
/**
* Send a command to the RTSP server and wait for the reply.
*
* @see rtsp_send_cmd_with_content
*/
int ff_rtsp_send_cmd(AVFormatContext *s, const char *method,
const char *url, const char *headers,
RTSPMessageHeader *reply, unsigned char **content_ptr);
 
/**
* Read a RTSP message from the server, or prepare to read data
* packets if we're reading data interleaved over the TCP/RTSP
* connection as well.
*
* @param s RTSP (de)muxer context
* @param reply pointer where the RTSP message header will be stored
* @param content_ptr pointer where the RTSP message body, if any, will
* be stored (length is in reply)
* @param return_on_interleaved_data whether the function may return if we
* encounter a data marker ('$'), which precedes data
* packets over interleaved TCP/RTSP connections. If this
* is set, this function will return 1 after encountering
* a '$'. If it is not set, the function will skip any
* data packets (if they are encountered), until a reply
* has been fully parsed. If no more data is available
* without parsing a reply, it will return an error.
* @param method the RTSP method this is a reply to. This affects how
* some response headers are acted upon. May be NULL.
*
* @return 1 if a data packets is ready to be received, -1 on error,
* and 0 on success.
*/
int ff_rtsp_read_reply(AVFormatContext *s, RTSPMessageHeader *reply,
unsigned char **content_ptr,
int return_on_interleaved_data, const char *method);
 
/**
* Skip a RTP/TCP interleaved packet.
*/
void ff_rtsp_skip_packet(AVFormatContext *s);
 
/**
* Connect to the RTSP server and set up the individual media streams.
* This can be used for both muxers and demuxers.
*
* @param s RTSP (de)muxer context
*
* @return 0 on success, < 0 on error. Cleans up all allocations done
* within the function on error.
*/
int ff_rtsp_connect(AVFormatContext *s);
 
/**
* Close and free all streams within the RTSP (de)muxer
*
* @param s RTSP (de)muxer context
*/
void ff_rtsp_close_streams(AVFormatContext *s);
 
/**
* Close all connection handles within the RTSP (de)muxer
*
* @param s RTSP (de)muxer context
*/
void ff_rtsp_close_connections(AVFormatContext *s);
 
/**
* Get the description of the stream and set up the RTSPStream child
* objects.
*/
int ff_rtsp_setup_input_streams(AVFormatContext *s, RTSPMessageHeader *reply);
 
/**
* Announce the stream to the server and set up the RTSPStream child
* objects for each media stream.
*/
int ff_rtsp_setup_output_streams(AVFormatContext *s, const char *addr);
 
/**
* Parse RTSP commands (OPTIONS, PAUSE and TEARDOWN) during streaming in
* listen mode.
*/
int ff_rtsp_parse_streaming_commands(AVFormatContext *s);
 
/**
* Parse an SDP description of streams by populating an RTSPState struct
* within the AVFormatContext; also allocate the RTP streams and the
* pollfd array used for UDP streams.
*/
int ff_sdp_parse(AVFormatContext *s, const char *content);
 
/**
* Receive one RTP packet from an TCP interleaved RTSP stream.
*/
int ff_rtsp_tcp_read_packet(AVFormatContext *s, RTSPStream **prtsp_st,
uint8_t *buf, int buf_size);
 
/**
* Receive one packet from the RTSPStreams set up in the AVFormatContext
* (which should contain a RTSPState struct as priv_data).
*/
int ff_rtsp_fetch_packet(AVFormatContext *s, AVPacket *pkt);
 
/**
* Do the SETUP requests for each stream for the chosen
* lower transport mode.
* @return 0 on success, <0 on error, 1 if protocol is unavailable
*/
int ff_rtsp_make_setup_request(AVFormatContext *s, const char *host, int port,
int lower_transport, const char *real_challenge);
 
/**
* Undo the effect of ff_rtsp_make_setup_request, close the
* transport_priv and rtp_handle fields.
*/
void ff_rtsp_undo_setup(AVFormatContext *s);
 
/**
* Open RTSP transport context.
*/
int ff_rtsp_open_transport_ctx(AVFormatContext *s, RTSPStream *rtsp_st);
 
extern const AVOption ff_rtsp_options[];
 
#endif /* AVFORMAT_RTSP_H */
/contrib/sdk/sources/ffmpeg/libavformat/rtspcodes.h
0,0 → 1,54
/*
* RTSP definitions
* copyright (c) 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_RTSPCODES_H
#define AVFORMAT_RTSPCODES_H
 
/** RTSP handling */
enum RTSPStatusCode {
RTSP_STATUS_OK =200, /**< OK */
RTSP_STATUS_METHOD =405, /**< Method Not Allowed */
RTSP_STATUS_BANDWIDTH =453, /**< Not Enough Bandwidth */
RTSP_STATUS_SESSION =454, /**< Session Not Found */
RTSP_STATUS_STATE =455, /**< Method Not Valid in This State */
RTSP_STATUS_AGGREGATE =459, /**< Aggregate operation not allowed */
RTSP_STATUS_ONLY_AGGREGATE =460, /**< Only aggregate operation allowed */
RTSP_STATUS_TRANSPORT =461, /**< Unsupported transport */
RTSP_STATUS_INTERNAL =500, /**< Internal Server Error */
RTSP_STATUS_SERVICE =503, /**< Service Unavailable */
RTSP_STATUS_VERSION =505, /**< RTSP Version not supported */
};
 
enum RTSPMethod {
DESCRIBE,
ANNOUNCE,
OPTIONS,
SETUP,
PLAY,
PAUSE,
TEARDOWN,
GET_PARAMETER,
SET_PARAMETER,
REDIRECT,
RECORD,
UNKNOWN = -1,
};
#endif /* AVFORMAT_RTSPCODES_H */
/contrib/sdk/sources/ffmpeg/libavformat/rtspdec.c
0,0 → 1,938
/*
* RTSP demuxer
* Copyright (c) 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avstring.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/mathematics.h"
#include "libavutil/random_seed.h"
#include "libavutil/time.h"
#include "avformat.h"
 
#include "internal.h"
#include "network.h"
#include "os_support.h"
#include "rtpproto.h"
#include "rtsp.h"
#include "rdt.h"
#include "url.h"
 
static const struct RTSPStatusMessage {
enum RTSPStatusCode code;
const char *message;
} status_messages[] = {
{ RTSP_STATUS_OK, "OK" },
{ RTSP_STATUS_METHOD, "Method Not Allowed" },
{ RTSP_STATUS_BANDWIDTH, "Not Enough Bandwidth" },
{ RTSP_STATUS_SESSION, "Session Not Found" },
{ RTSP_STATUS_STATE, "Method Not Valid in This State" },
{ RTSP_STATUS_AGGREGATE, "Aggregate operation not allowed" },
{ RTSP_STATUS_ONLY_AGGREGATE, "Only aggregate operation allowed" },
{ RTSP_STATUS_TRANSPORT, "Unsupported transport" },
{ RTSP_STATUS_INTERNAL, "Internal Server Error" },
{ RTSP_STATUS_SERVICE, "Service Unavailable" },
{ RTSP_STATUS_VERSION, "RTSP Version not supported" },
{ 0, "NULL" }
};
 
static int rtsp_read_close(AVFormatContext *s)
{
RTSPState *rt = s->priv_data;
 
if (!(rt->rtsp_flags & RTSP_FLAG_LISTEN))
ff_rtsp_send_cmd_async(s, "TEARDOWN", rt->control_uri, NULL);
 
ff_rtsp_close_streams(s);
ff_rtsp_close_connections(s);
ff_network_close();
rt->real_setup = NULL;
av_freep(&rt->real_setup_cache);
return 0;
}
 
static inline int read_line(AVFormatContext *s, char *rbuf, const int rbufsize,
int *rbuflen)
{
RTSPState *rt = s->priv_data;
int idx = 0;
int ret = 0;
*rbuflen = 0;
 
do {
ret = ffurl_read_complete(rt->rtsp_hd, rbuf + idx, 1);
if (ret <= 0)
return ret ? ret : AVERROR_EOF;
if (rbuf[idx] == '\r') {
/* Ignore */
} else if (rbuf[idx] == '\n') {
rbuf[idx] = '\0';
*rbuflen = idx;
return 0;
} else
idx++;
} while (idx < rbufsize);
av_log(s, AV_LOG_ERROR, "Message too long\n");
return AVERROR(EIO);
}
 
static int rtsp_send_reply(AVFormatContext *s, enum RTSPStatusCode code,
const char *extracontent, uint16_t seq)
{
RTSPState *rt = s->priv_data;
char message[4096];
int index = 0;
while (status_messages[index].code) {
if (status_messages[index].code == code) {
snprintf(message, sizeof(message), "RTSP/1.0 %d %s\r\n",
code, status_messages[index].message);
break;
}
index++;
}
if (!status_messages[index].code)
return AVERROR(EINVAL);
av_strlcatf(message, sizeof(message), "CSeq: %d\r\n", seq);
av_strlcatf(message, sizeof(message), "Server: %s\r\n", LIBAVFORMAT_IDENT);
if (extracontent)
av_strlcat(message, extracontent, sizeof(message));
av_strlcat(message, "\r\n", sizeof(message));
av_dlog(s, "Sending response:\n%s", message);
ffurl_write(rt->rtsp_hd, message, strlen(message));
 
return 0;
}
 
static inline int check_sessionid(AVFormatContext *s,
RTSPMessageHeader *request)
{
RTSPState *rt = s->priv_data;
unsigned char *session_id = rt->session_id;
if (!session_id[0]) {
av_log(s, AV_LOG_WARNING, "There is no session-id at the moment\n");
return 0;
}
if (strcmp(session_id, request->session_id)) {
av_log(s, AV_LOG_ERROR, "Unexpected session-id %s\n",
request->session_id);
rtsp_send_reply(s, RTSP_STATUS_SESSION, NULL, request->seq);
return AVERROR_STREAM_NOT_FOUND;
}
return 0;
}
 
static inline int rtsp_read_request(AVFormatContext *s,
RTSPMessageHeader *request,
const char *method)
{
RTSPState *rt = s->priv_data;
char rbuf[1024];
int rbuflen, ret;
do {
ret = read_line(s, rbuf, sizeof(rbuf), &rbuflen);
if (ret)
return ret;
if (rbuflen > 1) {
av_dlog(s, "Parsing[%d]: %s\n", rbuflen, rbuf);
ff_rtsp_parse_line(request, rbuf, rt, method);
}
} while (rbuflen > 0);
if (request->seq != rt->seq + 1) {
av_log(s, AV_LOG_ERROR, "Unexpected Sequence number %d\n",
request->seq);
return AVERROR(EINVAL);
}
if (rt->session_id[0] && strcmp(method, "OPTIONS")) {
ret = check_sessionid(s, request);
if (ret)
return ret;
}
 
return 0;
}
 
static int rtsp_read_announce(AVFormatContext *s)
{
RTSPState *rt = s->priv_data;
RTSPMessageHeader request = { 0 };
char sdp[4096];
int ret;
 
ret = rtsp_read_request(s, &request, "ANNOUNCE");
if (ret)
return ret;
rt->seq++;
if (strcmp(request.content_type, "application/sdp")) {
av_log(s, AV_LOG_ERROR, "Unexpected content type %s\n",
request.content_type);
rtsp_send_reply(s, RTSP_STATUS_SERVICE, NULL, request.seq);
return AVERROR_OPTION_NOT_FOUND;
}
if (request.content_length && request.content_length < sizeof(sdp) - 1) {
/* Read SDP */
if (ffurl_read_complete(rt->rtsp_hd, sdp, request.content_length)
< request.content_length) {
av_log(s, AV_LOG_ERROR,
"Unable to get complete SDP Description in ANNOUNCE\n");
rtsp_send_reply(s, RTSP_STATUS_INTERNAL, NULL, request.seq);
return AVERROR(EIO);
}
sdp[request.content_length] = '\0';
av_log(s, AV_LOG_VERBOSE, "SDP: %s\n", sdp);
ret = ff_sdp_parse(s, sdp);
if (ret)
return ret;
rtsp_send_reply(s, RTSP_STATUS_OK, NULL, request.seq);
return 0;
}
av_log(s, AV_LOG_ERROR,
"Content-Length header value exceeds sdp allocated buffer (4KB)\n");
rtsp_send_reply(s, RTSP_STATUS_INTERNAL,
"Content-Length exceeds buffer size", request.seq);
return AVERROR(EIO);
}
 
static int rtsp_read_options(AVFormatContext *s)
{
RTSPState *rt = s->priv_data;
RTSPMessageHeader request = { 0 };
int ret = 0;
 
/* Parsing headers */
ret = rtsp_read_request(s, &request, "OPTIONS");
if (ret)
return ret;
rt->seq++;
/* Send Reply */
rtsp_send_reply(s, RTSP_STATUS_OK,
"Public: ANNOUNCE, PAUSE, SETUP, TEARDOWN, RECORD\r\n",
request.seq);
return 0;
}
 
static int rtsp_read_setup(AVFormatContext *s, char* host, char *controlurl)
{
RTSPState *rt = s->priv_data;
RTSPMessageHeader request = { 0 };
int ret = 0;
char url[1024];
RTSPStream *rtsp_st;
char responseheaders[1024];
int localport = -1;
int transportidx = 0;
int streamid = 0;
 
ret = rtsp_read_request(s, &request, "SETUP");
if (ret)
return ret;
rt->seq++;
if (!request.nb_transports) {
av_log(s, AV_LOG_ERROR, "No transport defined in SETUP\n");
return AVERROR_INVALIDDATA;
}
for (transportidx = 0; transportidx < request.nb_transports;
transportidx++) {
if (!request.transports[transportidx].mode_record ||
(request.transports[transportidx].lower_transport !=
RTSP_LOWER_TRANSPORT_UDP &&
request.transports[transportidx].lower_transport !=
RTSP_LOWER_TRANSPORT_TCP)) {
av_log(s, AV_LOG_ERROR, "mode=record/receive not set or transport"
" protocol not supported (yet)\n");
return AVERROR_INVALIDDATA;
}
}
if (request.nb_transports > 1)
av_log(s, AV_LOG_WARNING, "More than one transport not supported, "
"using first of all\n");
for (streamid = 0; streamid < rt->nb_rtsp_streams; streamid++) {
if (!strcmp(rt->rtsp_streams[streamid]->control_url,
controlurl))
break;
}
if (streamid == rt->nb_rtsp_streams) {
av_log(s, AV_LOG_ERROR, "Unable to find requested track\n");
return AVERROR_STREAM_NOT_FOUND;
}
rtsp_st = rt->rtsp_streams[streamid];
localport = rt->rtp_port_min;
 
if (request.transports[0].lower_transport == RTSP_LOWER_TRANSPORT_TCP) {
rt->lower_transport = RTSP_LOWER_TRANSPORT_TCP;
if ((ret = ff_rtsp_open_transport_ctx(s, rtsp_st))) {
rtsp_send_reply(s, RTSP_STATUS_TRANSPORT, NULL, request.seq);
return ret;
}
rtsp_st->interleaved_min = request.transports[0].interleaved_min;
rtsp_st->interleaved_max = request.transports[0].interleaved_max;
snprintf(responseheaders, sizeof(responseheaders), "Transport: "
"RTP/AVP/TCP;unicast;mode=receive;interleaved=%d-%d"
"\r\n", request.transports[0].interleaved_min,
request.transports[0].interleaved_max);
} else {
do {
ff_url_join(url, sizeof(url), "rtp", NULL, host, localport, NULL);
av_dlog(s, "Opening: %s", url);
ret = ffurl_open(&rtsp_st->rtp_handle, url, AVIO_FLAG_READ_WRITE,
&s->interrupt_callback, NULL);
if (ret)
localport += 2;
} while (ret || localport > rt->rtp_port_max);
if (localport > rt->rtp_port_max) {
rtsp_send_reply(s, RTSP_STATUS_TRANSPORT, NULL, request.seq);
return ret;
}
 
av_dlog(s, "Listening on: %d",
ff_rtp_get_local_rtp_port(rtsp_st->rtp_handle));
if ((ret = ff_rtsp_open_transport_ctx(s, rtsp_st))) {
rtsp_send_reply(s, RTSP_STATUS_TRANSPORT, NULL, request.seq);
return ret;
}
 
localport = ff_rtp_get_local_rtp_port(rtsp_st->rtp_handle);
snprintf(responseheaders, sizeof(responseheaders), "Transport: "
"RTP/AVP/UDP;unicast;mode=receive;source=%s;"
"client_port=%d-%d;server_port=%d-%d\r\n",
host, request.transports[0].client_port_min,
request.transports[0].client_port_max, localport,
localport + 1);
}
 
/* Establish sessionid if not previously set */
/* Put this in a function? */
/* RFC 2326: session id must be at least 8 digits */
while (strlen(rt->session_id) < 8)
av_strlcatf(rt->session_id, 512, "%u", av_get_random_seed());
 
av_strlcatf(responseheaders, sizeof(responseheaders), "Session: %s\r\n",
rt->session_id);
/* Send Reply */
rtsp_send_reply(s, RTSP_STATUS_OK, responseheaders, request.seq);
 
rt->state = RTSP_STATE_PAUSED;
return 0;
}
 
static int rtsp_read_record(AVFormatContext *s)
{
RTSPState *rt = s->priv_data;
RTSPMessageHeader request = { 0 };
int ret = 0;
char responseheaders[1024];
 
ret = rtsp_read_request(s, &request, "RECORD");
if (ret)
return ret;
ret = check_sessionid(s, &request);
if (ret)
return ret;
rt->seq++;
snprintf(responseheaders, sizeof(responseheaders), "Session: %s\r\n",
rt->session_id);
rtsp_send_reply(s, RTSP_STATUS_OK, responseheaders, request.seq);
 
rt->state = RTSP_STATE_STREAMING;
return 0;
}
 
static inline int parse_command_line(AVFormatContext *s, const char *line,
int linelen, char *uri, int urisize,
char *method, int methodsize,
enum RTSPMethod *methodcode)
{
RTSPState *rt = s->priv_data;
const char *linept, *searchlinept;
linept = strchr(line, ' ');
if (linept - line > methodsize - 1) {
av_log(s, AV_LOG_ERROR, "Method string too long\n");
return AVERROR(EIO);
}
memcpy(method, line, linept - line);
method[linept - line] = '\0';
linept++;
if (!strcmp(method, "ANNOUNCE"))
*methodcode = ANNOUNCE;
else if (!strcmp(method, "OPTIONS"))
*methodcode = OPTIONS;
else if (!strcmp(method, "RECORD"))
*methodcode = RECORD;
else if (!strcmp(method, "SETUP"))
*methodcode = SETUP;
else if (!strcmp(method, "PAUSE"))
*methodcode = PAUSE;
else if (!strcmp(method, "TEARDOWN"))
*methodcode = TEARDOWN;
else
*methodcode = UNKNOWN;
/* Check method with the state */
if (rt->state == RTSP_STATE_IDLE) {
if ((*methodcode != ANNOUNCE) && (*methodcode != OPTIONS)) {
av_log(s, AV_LOG_ERROR, "Unexpected command in Idle State %s\n",
line);
return AVERROR_PROTOCOL_NOT_FOUND;
}
} else if (rt->state == RTSP_STATE_PAUSED) {
if ((*methodcode != OPTIONS) && (*methodcode != RECORD)
&& (*methodcode != SETUP)) {
av_log(s, AV_LOG_ERROR, "Unexpected command in Paused State %s\n",
line);
return AVERROR_PROTOCOL_NOT_FOUND;
}
} else if (rt->state == RTSP_STATE_STREAMING) {
if ((*methodcode != PAUSE) && (*methodcode != OPTIONS)
&& (*methodcode != TEARDOWN)) {
av_log(s, AV_LOG_ERROR, "Unexpected command in Streaming State"
" %s\n", line);
return AVERROR_PROTOCOL_NOT_FOUND;
}
} else {
av_log(s, AV_LOG_ERROR, "Unexpected State [%d]\n", rt->state);
return AVERROR_BUG;
}
 
searchlinept = strchr(linept, ' ');
if (searchlinept == NULL) {
av_log(s, AV_LOG_ERROR, "Error parsing message URI\n");
return AVERROR_INVALIDDATA;
}
if (searchlinept - linept > urisize - 1) {
av_log(s, AV_LOG_ERROR, "uri string length exceeded buffer size\n");
return AVERROR(EIO);
}
memcpy(uri, linept, searchlinept - linept);
uri[searchlinept - linept] = '\0';
if (strcmp(rt->control_uri, uri)) {
char host[128], path[512], auth[128];
int port;
char ctl_host[128], ctl_path[512], ctl_auth[128];
int ctl_port;
av_url_split(NULL, 0, auth, sizeof(auth), host, sizeof(host), &port,
path, sizeof(path), uri);
av_url_split(NULL, 0, ctl_auth, sizeof(ctl_auth), ctl_host,
sizeof(ctl_host), &ctl_port, ctl_path, sizeof(ctl_path),
rt->control_uri);
if (strcmp(host, ctl_host))
av_log(s, AV_LOG_INFO, "Host %s differs from expected %s\n",
host, ctl_host);
if (strcmp(path, ctl_path) && *methodcode != SETUP)
av_log(s, AV_LOG_WARNING, "WARNING: Path %s differs from expected"
" %s\n", path, ctl_path);
if (*methodcode == ANNOUNCE) {
av_log(s, AV_LOG_INFO,
"Updating control URI to %s\n", uri);
av_strlcpy(rt->control_uri, uri, sizeof(rt->control_uri));
}
}
 
linept = searchlinept + 1;
if (!av_strstart(linept, "RTSP/1.0", NULL)) {
av_log(s, AV_LOG_ERROR, "Error parsing protocol or version\n");
return AVERROR_PROTOCOL_NOT_FOUND;
}
return 0;
}
 
int ff_rtsp_parse_streaming_commands(AVFormatContext *s)
{
RTSPState *rt = s->priv_data;
unsigned char rbuf[4096];
unsigned char method[10];
char uri[500];
int ret;
int rbuflen = 0;
RTSPMessageHeader request = { 0 };
enum RTSPMethod methodcode;
 
ret = read_line(s, rbuf, sizeof(rbuf), &rbuflen);
if (ret < 0)
return ret;
ret = parse_command_line(s, rbuf, rbuflen, uri, sizeof(uri), method,
sizeof(method), &methodcode);
if (ret) {
av_log(s, AV_LOG_ERROR, "RTSP: Unexpected Command\n");
return ret;
}
 
ret = rtsp_read_request(s, &request, method);
if (ret)
return ret;
rt->seq++;
if (methodcode == PAUSE) {
rt->state = RTSP_STATE_PAUSED;
ret = rtsp_send_reply(s, RTSP_STATUS_OK, NULL , request.seq);
// TODO: Missing date header in response
} else if (methodcode == OPTIONS) {
ret = rtsp_send_reply(s, RTSP_STATUS_OK,
"Public: ANNOUNCE, PAUSE, SETUP, TEARDOWN, "
"RECORD\r\n", request.seq);
} else if (methodcode == TEARDOWN) {
rt->state = RTSP_STATE_IDLE;
ret = rtsp_send_reply(s, RTSP_STATUS_OK, NULL , request.seq);
return 0;
}
return ret;
}
 
static int rtsp_read_play(AVFormatContext *s)
{
RTSPState *rt = s->priv_data;
RTSPMessageHeader reply1, *reply = &reply1;
int i;
char cmd[1024];
 
av_log(s, AV_LOG_DEBUG, "hello state=%d\n", rt->state);
rt->nb_byes = 0;
 
if (!(rt->server_type == RTSP_SERVER_REAL && rt->need_subscription)) {
if (rt->transport == RTSP_TRANSPORT_RTP) {
for (i = 0; i < rt->nb_rtsp_streams; i++) {
RTSPStream *rtsp_st = rt->rtsp_streams[i];
RTPDemuxContext *rtpctx = rtsp_st->transport_priv;
if (!rtpctx)
continue;
ff_rtp_reset_packet_queue(rtpctx);
rtpctx->last_rtcp_ntp_time = AV_NOPTS_VALUE;
rtpctx->first_rtcp_ntp_time = AV_NOPTS_VALUE;
rtpctx->base_timestamp = 0;
rtpctx->timestamp = 0;
rtpctx->unwrapped_timestamp = 0;
rtpctx->rtcp_ts_offset = 0;
}
}
if (rt->state == RTSP_STATE_PAUSED) {
cmd[0] = 0;
} else {
snprintf(cmd, sizeof(cmd),
"Range: npt=%"PRId64".%03"PRId64"-\r\n",
rt->seek_timestamp / AV_TIME_BASE,
rt->seek_timestamp / (AV_TIME_BASE / 1000) % 1000);
}
ff_rtsp_send_cmd(s, "PLAY", rt->control_uri, cmd, reply, NULL);
if (reply->status_code != RTSP_STATUS_OK) {
return -1;
}
if (rt->transport == RTSP_TRANSPORT_RTP &&
reply->range_start != AV_NOPTS_VALUE) {
for (i = 0; i < rt->nb_rtsp_streams; i++) {
RTSPStream *rtsp_st = rt->rtsp_streams[i];
RTPDemuxContext *rtpctx = rtsp_st->transport_priv;
AVStream *st = NULL;
if (!rtpctx || rtsp_st->stream_index < 0)
continue;
st = s->streams[rtsp_st->stream_index];
rtpctx->range_start_offset =
av_rescale_q(reply->range_start, AV_TIME_BASE_Q,
st->time_base);
}
}
}
rt->state = RTSP_STATE_STREAMING;
return 0;
}
 
/* pause the stream */
static int rtsp_read_pause(AVFormatContext *s)
{
RTSPState *rt = s->priv_data;
RTSPMessageHeader reply1, *reply = &reply1;
 
if (rt->state != RTSP_STATE_STREAMING)
return 0;
else if (!(rt->server_type == RTSP_SERVER_REAL && rt->need_subscription)) {
ff_rtsp_send_cmd(s, "PAUSE", rt->control_uri, NULL, reply, NULL);
if (reply->status_code != RTSP_STATUS_OK) {
return -1;
}
}
rt->state = RTSP_STATE_PAUSED;
return 0;
}
 
int ff_rtsp_setup_input_streams(AVFormatContext *s, RTSPMessageHeader *reply)
{
RTSPState *rt = s->priv_data;
char cmd[1024];
unsigned char *content = NULL;
int ret;
 
/* describe the stream */
snprintf(cmd, sizeof(cmd),
"Accept: application/sdp\r\n");
if (rt->server_type == RTSP_SERVER_REAL) {
/**
* The Require: attribute is needed for proper streaming from
* Realmedia servers.
*/
av_strlcat(cmd,
"Require: com.real.retain-entity-for-setup\r\n",
sizeof(cmd));
}
ff_rtsp_send_cmd(s, "DESCRIBE", rt->control_uri, cmd, reply, &content);
if (!content)
return AVERROR_INVALIDDATA;
if (reply->status_code != RTSP_STATUS_OK) {
av_freep(&content);
return AVERROR_INVALIDDATA;
}
 
av_log(s, AV_LOG_VERBOSE, "SDP:\n%s\n", content);
/* now we got the SDP description, we parse it */
ret = ff_sdp_parse(s, (const char *)content);
av_freep(&content);
if (ret < 0)
return ret;
 
return 0;
}
 
static int rtsp_listen(AVFormatContext *s)
{
RTSPState *rt = s->priv_data;
char host[128], path[512], auth[128];
char uri[500];
int port;
char tcpname[500];
unsigned char rbuf[4096];
unsigned char method[10];
int rbuflen = 0;
int ret;
enum RTSPMethod methodcode;
 
/* extract hostname and port */
av_url_split(NULL, 0, auth, sizeof(auth), host, sizeof(host), &port,
path, sizeof(path), s->filename);
 
/* ff_url_join. No authorization by now (NULL) */
ff_url_join(rt->control_uri, sizeof(rt->control_uri), "rtsp", NULL, host,
port, "%s", path);
 
if (port < 0)
port = RTSP_DEFAULT_PORT;
 
/* Create TCP connection */
ff_url_join(tcpname, sizeof(tcpname), "tcp", NULL, host, port,
"?listen&listen_timeout=%d", rt->initial_timeout * 1000);
 
if (ret = ffurl_open(&rt->rtsp_hd, tcpname, AVIO_FLAG_READ_WRITE,
&s->interrupt_callback, NULL)) {
av_log(s, AV_LOG_ERROR, "Unable to open RTSP for listening\n");
return ret;
}
rt->state = RTSP_STATE_IDLE;
rt->rtsp_hd_out = rt->rtsp_hd;
for (;;) { /* Wait for incoming RTSP messages */
ret = read_line(s, rbuf, sizeof(rbuf), &rbuflen);
if (ret < 0)
return ret;
ret = parse_command_line(s, rbuf, rbuflen, uri, sizeof(uri), method,
sizeof(method), &methodcode);
if (ret) {
av_log(s, AV_LOG_ERROR, "RTSP: Unexpected Command\n");
return ret;
}
 
if (methodcode == ANNOUNCE) {
ret = rtsp_read_announce(s);
rt->state = RTSP_STATE_PAUSED;
} else if (methodcode == OPTIONS) {
ret = rtsp_read_options(s);
} else if (methodcode == RECORD) {
ret = rtsp_read_record(s);
if (!ret)
return 0; // We are ready for streaming
} else if (methodcode == SETUP)
ret = rtsp_read_setup(s, host, uri);
if (ret) {
ffurl_close(rt->rtsp_hd);
return AVERROR_INVALIDDATA;
}
}
return 0;
}
 
static int rtsp_probe(AVProbeData *p)
{
if (av_strstart(p->filename, "rtsp:", NULL))
return AVPROBE_SCORE_MAX;
return 0;
}
 
static int rtsp_read_header(AVFormatContext *s)
{
RTSPState *rt = s->priv_data;
int ret;
 
if (rt->initial_timeout > 0)
rt->rtsp_flags |= RTSP_FLAG_LISTEN;
 
if (rt->rtsp_flags & RTSP_FLAG_LISTEN) {
ret = rtsp_listen(s);
if (ret)
return ret;
} else {
ret = ff_rtsp_connect(s);
if (ret)
return ret;
 
rt->real_setup_cache = !s->nb_streams ? NULL :
av_mallocz(2 * s->nb_streams * sizeof(*rt->real_setup_cache));
if (!rt->real_setup_cache && s->nb_streams)
return AVERROR(ENOMEM);
rt->real_setup = rt->real_setup_cache + s->nb_streams;
 
if (rt->initial_pause) {
/* do not start immediately */
} else {
if (rtsp_read_play(s) < 0) {
ff_rtsp_close_streams(s);
ff_rtsp_close_connections(s);
return AVERROR_INVALIDDATA;
}
}
}
 
return 0;
}
 
int ff_rtsp_tcp_read_packet(AVFormatContext *s, RTSPStream **prtsp_st,
uint8_t *buf, int buf_size)
{
RTSPState *rt = s->priv_data;
int id, len, i, ret;
RTSPStream *rtsp_st;
 
av_dlog(s, "tcp_read_packet:\n");
redo:
for (;;) {
RTSPMessageHeader reply;
 
ret = ff_rtsp_read_reply(s, &reply, NULL, 1, NULL);
if (ret < 0)
return ret;
if (ret == 1) /* received '$' */
break;
/* XXX: parse message */
if (rt->state != RTSP_STATE_STREAMING)
return 0;
}
ret = ffurl_read_complete(rt->rtsp_hd, buf, 3);
if (ret != 3)
return -1;
id = buf[0];
len = AV_RB16(buf + 1);
av_dlog(s, "id=%d len=%d\n", id, len);
if (len > buf_size || len < 8)
goto redo;
/* get the data */
ret = ffurl_read_complete(rt->rtsp_hd, buf, len);
if (ret != len)
return -1;
if (rt->transport == RTSP_TRANSPORT_RDT &&
ff_rdt_parse_header(buf, len, &id, NULL, NULL, NULL, NULL) < 0)
return -1;
 
/* find the matching stream */
for (i = 0; i < rt->nb_rtsp_streams; i++) {
rtsp_st = rt->rtsp_streams[i];
if (id >= rtsp_st->interleaved_min &&
id <= rtsp_st->interleaved_max)
goto found;
}
goto redo;
found:
*prtsp_st = rtsp_st;
return len;
}
 
static int resetup_tcp(AVFormatContext *s)
{
RTSPState *rt = s->priv_data;
char host[1024];
int port;
 
av_url_split(NULL, 0, NULL, 0, host, sizeof(host), &port, NULL, 0,
s->filename);
ff_rtsp_undo_setup(s);
return ff_rtsp_make_setup_request(s, host, port, RTSP_LOWER_TRANSPORT_TCP,
rt->real_challenge);
}
 
static int rtsp_read_packet(AVFormatContext *s, AVPacket *pkt)
{
RTSPState *rt = s->priv_data;
int ret;
RTSPMessageHeader reply1, *reply = &reply1;
char cmd[1024];
 
retry:
if (rt->server_type == RTSP_SERVER_REAL) {
int i;
 
for (i = 0; i < s->nb_streams; i++)
rt->real_setup[i] = s->streams[i]->discard;
 
if (!rt->need_subscription) {
if (memcmp (rt->real_setup, rt->real_setup_cache,
sizeof(enum AVDiscard) * s->nb_streams)) {
snprintf(cmd, sizeof(cmd),
"Unsubscribe: %s\r\n",
rt->last_subscription);
ff_rtsp_send_cmd(s, "SET_PARAMETER", rt->control_uri,
cmd, reply, NULL);
if (reply->status_code != RTSP_STATUS_OK)
return AVERROR_INVALIDDATA;
rt->need_subscription = 1;
}
}
 
if (rt->need_subscription) {
int r, rule_nr, first = 1;
 
memcpy(rt->real_setup_cache, rt->real_setup,
sizeof(enum AVDiscard) * s->nb_streams);
rt->last_subscription[0] = 0;
 
snprintf(cmd, sizeof(cmd),
"Subscribe: ");
for (i = 0; i < rt->nb_rtsp_streams; i++) {
rule_nr = 0;
for (r = 0; r < s->nb_streams; r++) {
if (s->streams[r]->id == i) {
if (s->streams[r]->discard != AVDISCARD_ALL) {
if (!first)
av_strlcat(rt->last_subscription, ",",
sizeof(rt->last_subscription));
ff_rdt_subscribe_rule(
rt->last_subscription,
sizeof(rt->last_subscription), i, rule_nr);
first = 0;
}
rule_nr++;
}
}
}
av_strlcatf(cmd, sizeof(cmd), "%s\r\n", rt->last_subscription);
ff_rtsp_send_cmd(s, "SET_PARAMETER", rt->control_uri,
cmd, reply, NULL);
if (reply->status_code != RTSP_STATUS_OK)
return AVERROR_INVALIDDATA;
rt->need_subscription = 0;
 
if (rt->state == RTSP_STATE_STREAMING)
rtsp_read_play (s);
}
}
 
ret = ff_rtsp_fetch_packet(s, pkt);
if (ret < 0) {
if (ret == AVERROR(ETIMEDOUT) && !rt->packets) {
if (rt->lower_transport == RTSP_LOWER_TRANSPORT_UDP &&
rt->lower_transport_mask & (1 << RTSP_LOWER_TRANSPORT_TCP)) {
RTSPMessageHeader reply1, *reply = &reply1;
av_log(s, AV_LOG_WARNING, "UDP timeout, retrying with TCP\n");
if (rtsp_read_pause(s) != 0)
return -1;
// TEARDOWN is required on Real-RTSP, but might make
// other servers close the connection.
if (rt->server_type == RTSP_SERVER_REAL)
ff_rtsp_send_cmd(s, "TEARDOWN", rt->control_uri, NULL,
reply, NULL);
rt->session_id[0] = '\0';
if (resetup_tcp(s) == 0) {
rt->state = RTSP_STATE_IDLE;
rt->need_subscription = 1;
if (rtsp_read_play(s) != 0)
return -1;
goto retry;
}
}
}
return ret;
}
rt->packets++;
 
if (!(rt->rtsp_flags & RTSP_FLAG_LISTEN)) {
/* send dummy request to keep TCP connection alive */
if ((av_gettime() - rt->last_cmd_time) / 1000000 >= rt->timeout / 2 ||
rt->auth_state.stale) {
if (rt->server_type == RTSP_SERVER_WMS ||
(rt->server_type != RTSP_SERVER_REAL &&
rt->get_parameter_supported)) {
ff_rtsp_send_cmd_async(s, "GET_PARAMETER", rt->control_uri, NULL);
} else {
ff_rtsp_send_cmd_async(s, "OPTIONS", "*", NULL);
}
/* The stale flag should be reset when creating the auth response in
* ff_rtsp_send_cmd_async, but reset it here just in case we never
* called the auth code (if we didn't have any credentials set). */
rt->auth_state.stale = 0;
}
}
 
return 0;
}
 
static int rtsp_read_seek(AVFormatContext *s, int stream_index,
int64_t timestamp, int flags)
{
RTSPState *rt = s->priv_data;
 
rt->seek_timestamp = av_rescale_q(timestamp,
s->streams[stream_index]->time_base,
AV_TIME_BASE_Q);
switch(rt->state) {
default:
case RTSP_STATE_IDLE:
break;
case RTSP_STATE_STREAMING:
if (rtsp_read_pause(s) != 0)
return -1;
rt->state = RTSP_STATE_SEEKING;
if (rtsp_read_play(s) != 0)
return -1;
break;
case RTSP_STATE_PAUSED:
rt->state = RTSP_STATE_IDLE;
break;
}
return 0;
}
 
static const AVClass rtsp_demuxer_class = {
.class_name = "RTSP demuxer",
.item_name = av_default_item_name,
.option = ff_rtsp_options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVInputFormat ff_rtsp_demuxer = {
.name = "rtsp",
.long_name = NULL_IF_CONFIG_SMALL("RTSP input"),
.priv_data_size = sizeof(RTSPState),
.read_probe = rtsp_probe,
.read_header = rtsp_read_header,
.read_packet = rtsp_read_packet,
.read_close = rtsp_read_close,
.read_seek = rtsp_read_seek,
.flags = AVFMT_NOFILE,
.read_play = rtsp_read_play,
.read_pause = rtsp_read_pause,
.priv_class = &rtsp_demuxer_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/rtspenc.c
0,0 → 1,247
/*
* RTSP muxer
* Copyright (c) 2010 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
 
#if HAVE_POLL_H
#include <poll.h>
#endif
#include "network.h"
#include "os_support.h"
#include "rtsp.h"
#include "internal.h"
#include "avio_internal.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/avstring.h"
#include "libavutil/time.h"
#include "url.h"
 
#define SDP_MAX_SIZE 16384
 
static const AVClass rtsp_muxer_class = {
.class_name = "RTSP muxer",
.item_name = av_default_item_name,
.option = ff_rtsp_options,
.version = LIBAVUTIL_VERSION_INT,
};
 
int ff_rtsp_setup_output_streams(AVFormatContext *s, const char *addr)
{
RTSPState *rt = s->priv_data;
RTSPMessageHeader reply1, *reply = &reply1;
int i;
char *sdp;
AVFormatContext sdp_ctx, *ctx_array[1];
 
s->start_time_realtime = av_gettime();
 
/* Announce the stream */
sdp = av_mallocz(SDP_MAX_SIZE);
if (sdp == NULL)
return AVERROR(ENOMEM);
/* We create the SDP based on the RTSP AVFormatContext where we
* aren't allowed to change the filename field. (We create the SDP
* based on the RTSP context since the contexts for the RTP streams
* don't exist yet.) In order to specify a custom URL with the actual
* peer IP instead of the originally specified hostname, we create
* a temporary copy of the AVFormatContext, where the custom URL is set.
*
* FIXME: Create the SDP without copying the AVFormatContext.
* This either requires setting up the RTP stream AVFormatContexts
* already here (complicating things immensely) or getting a more
* flexible SDP creation interface.
*/
sdp_ctx = *s;
ff_url_join(sdp_ctx.filename, sizeof(sdp_ctx.filename),
"rtsp", NULL, addr, -1, NULL);
ctx_array[0] = &sdp_ctx;
if (av_sdp_create(ctx_array, 1, sdp, SDP_MAX_SIZE)) {
av_free(sdp);
return AVERROR_INVALIDDATA;
}
av_log(s, AV_LOG_VERBOSE, "SDP:\n%s\n", sdp);
ff_rtsp_send_cmd_with_content(s, "ANNOUNCE", rt->control_uri,
"Content-Type: application/sdp\r\n",
reply, NULL, sdp, strlen(sdp));
av_free(sdp);
if (reply->status_code != RTSP_STATUS_OK)
return AVERROR_INVALIDDATA;
 
/* Set up the RTSPStreams for each AVStream */
for (i = 0; i < s->nb_streams; i++) {
RTSPStream *rtsp_st;
 
rtsp_st = av_mallocz(sizeof(RTSPStream));
if (!rtsp_st)
return AVERROR(ENOMEM);
dynarray_add(&rt->rtsp_streams, &rt->nb_rtsp_streams, rtsp_st);
 
rtsp_st->stream_index = i;
 
av_strlcpy(rtsp_st->control_url, rt->control_uri, sizeof(rtsp_st->control_url));
/* Note, this must match the relative uri set in the sdp content */
av_strlcatf(rtsp_st->control_url, sizeof(rtsp_st->control_url),
"/streamid=%d", i);
}
 
return 0;
}
 
static int rtsp_write_record(AVFormatContext *s)
{
RTSPState *rt = s->priv_data;
RTSPMessageHeader reply1, *reply = &reply1;
char cmd[1024];
 
snprintf(cmd, sizeof(cmd),
"Range: npt=0.000-\r\n");
ff_rtsp_send_cmd(s, "RECORD", rt->control_uri, cmd, reply, NULL);
if (reply->status_code != RTSP_STATUS_OK)
return -1;
rt->state = RTSP_STATE_STREAMING;
return 0;
}
 
static int rtsp_write_header(AVFormatContext *s)
{
int ret;
 
ret = ff_rtsp_connect(s);
if (ret)
return ret;
 
if (rtsp_write_record(s) < 0) {
ff_rtsp_close_streams(s);
ff_rtsp_close_connections(s);
return AVERROR_INVALIDDATA;
}
return 0;
}
 
static int tcp_write_packet(AVFormatContext *s, RTSPStream *rtsp_st)
{
RTSPState *rt = s->priv_data;
AVFormatContext *rtpctx = rtsp_st->transport_priv;
uint8_t *buf, *ptr;
int size;
uint8_t *interleave_header, *interleaved_packet;
 
size = avio_close_dyn_buf(rtpctx->pb, &buf);
rtpctx->pb = NULL;
ptr = buf;
while (size > 4) {
uint32_t packet_len = AV_RB32(ptr);
int id;
/* The interleaving header is exactly 4 bytes, which happens to be
* the same size as the packet length header from
* ffio_open_dyn_packet_buf. So by writing the interleaving header
* over these bytes, we get a consecutive interleaved packet
* that can be written in one call. */
interleaved_packet = interleave_header = ptr;
ptr += 4;
size -= 4;
if (packet_len > size || packet_len < 2)
break;
if (RTP_PT_IS_RTCP(ptr[1]))
id = rtsp_st->interleaved_max; /* RTCP */
else
id = rtsp_st->interleaved_min; /* RTP */
interleave_header[0] = '$';
interleave_header[1] = id;
AV_WB16(interleave_header + 2, packet_len);
ffurl_write(rt->rtsp_hd_out, interleaved_packet, 4 + packet_len);
ptr += packet_len;
size -= packet_len;
}
av_free(buf);
return ffio_open_dyn_packet_buf(&rtpctx->pb, RTSP_TCP_MAX_PACKET_SIZE);
}
 
static int rtsp_write_packet(AVFormatContext *s, AVPacket *pkt)
{
RTSPState *rt = s->priv_data;
RTSPStream *rtsp_st;
int n;
struct pollfd p = {ffurl_get_file_handle(rt->rtsp_hd), POLLIN, 0};
AVFormatContext *rtpctx;
int ret;
 
while (1) {
n = poll(&p, 1, 0);
if (n <= 0)
break;
if (p.revents & POLLIN) {
RTSPMessageHeader reply;
 
/* Don't let ff_rtsp_read_reply handle interleaved packets,
* since it would block and wait for an RTSP reply on the socket
* (which may not be coming any time soon) if it handles
* interleaved packets internally. */
ret = ff_rtsp_read_reply(s, &reply, NULL, 1, NULL);
if (ret < 0)
return AVERROR(EPIPE);
if (ret == 1)
ff_rtsp_skip_packet(s);
/* XXX: parse message */
if (rt->state != RTSP_STATE_STREAMING)
return AVERROR(EPIPE);
}
}
 
if (pkt->stream_index < 0 || pkt->stream_index >= rt->nb_rtsp_streams)
return AVERROR_INVALIDDATA;
rtsp_st = rt->rtsp_streams[pkt->stream_index];
rtpctx = rtsp_st->transport_priv;
 
ret = ff_write_chained(rtpctx, 0, pkt, s);
/* ff_write_chained does all the RTP packetization. If using TCP as
* transport, rtpctx->pb is only a dyn_packet_buf that queues up the
* packets, so we need to send them out on the TCP connection separately.
*/
if (!ret && rt->lower_transport == RTSP_LOWER_TRANSPORT_TCP)
ret = tcp_write_packet(s, rtsp_st);
return ret;
}
 
static int rtsp_write_close(AVFormatContext *s)
{
RTSPState *rt = s->priv_data;
 
ff_rtsp_send_cmd_async(s, "TEARDOWN", rt->control_uri, NULL);
 
ff_rtsp_close_streams(s);
ff_rtsp_close_connections(s);
ff_network_close();
return 0;
}
 
AVOutputFormat ff_rtsp_muxer = {
.name = "rtsp",
.long_name = NULL_IF_CONFIG_SMALL("RTSP output"),
.priv_data_size = sizeof(RTSPState),
.audio_codec = AV_CODEC_ID_AAC,
.video_codec = AV_CODEC_ID_MPEG4,
.write_header = rtsp_write_header,
.write_packet = rtsp_write_packet,
.write_trailer = rtsp_write_close,
.flags = AVFMT_NOFILE | AVFMT_GLOBALHEADER,
.priv_class = &rtsp_muxer_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/samidec.c
0,0 → 1,137
/*
* Copyright (c) 2012 Clément Bœsch
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* SAMI subtitle demuxer
* @see http://msdn.microsoft.com/en-us/library/ms971327.aspx
*/
 
#include "avformat.h"
#include "internal.h"
#include "subtitles.h"
#include "libavcodec/internal.h"
#include "libavutil/avstring.h"
#include "libavutil/bprint.h"
#include "libavutil/intreadwrite.h"
 
typedef struct {
FFDemuxSubtitlesQueue q;
} SAMIContext;
 
static int sami_probe(AVProbeData *p)
{
const unsigned char *ptr = p->buf;
 
if (AV_RB24(ptr) == 0xEFBBBF)
ptr += 3; /* skip UTF-8 BOM */
return !strncmp(ptr, "<SAMI>", 6) ? AVPROBE_SCORE_MAX : 0;
}
 
static int sami_read_header(AVFormatContext *s)
{
SAMIContext *sami = s->priv_data;
AVStream *st = avformat_new_stream(s, NULL);
AVBPrint buf, hdr_buf;
char c = 0;
int res = 0, got_first_sync_point = 0;
 
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 64, 1, 1000);
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
st->codec->codec_id = AV_CODEC_ID_SAMI;
 
av_bprint_init(&buf, 0, AV_BPRINT_SIZE_UNLIMITED);
av_bprint_init(&hdr_buf, 0, AV_BPRINT_SIZE_UNLIMITED);
 
while (!url_feof(s->pb)) {
AVPacket *sub;
const int64_t pos = avio_tell(s->pb) - (c != 0);
int is_sync, n = ff_smil_extract_next_chunk(s->pb, &buf, &c);
 
if (n == 0)
break;
 
is_sync = !av_strncasecmp(buf.str, "<SYNC", 5);
if (is_sync)
got_first_sync_point = 1;
 
if (!got_first_sync_point) {
av_bprintf(&hdr_buf, "%s", buf.str);
} else {
sub = ff_subtitles_queue_insert(&sami->q, buf.str, buf.len, !is_sync);
if (!sub) {
res = AVERROR(ENOMEM);
goto end;
}
if (is_sync) {
const char *p = ff_smil_get_attr_ptr(buf.str, "Start");
sub->pos = pos;
sub->pts = p ? strtol(p, NULL, 10) : 0;
sub->duration = -1;
}
}
av_bprint_clear(&buf);
}
 
res = avpriv_bprint_to_extradata(st->codec, &hdr_buf);
if (res < 0)
goto end;
 
ff_subtitles_queue_finalize(&sami->q);
 
end:
av_bprint_finalize(&buf, NULL);
return res;
}
 
static int sami_read_packet(AVFormatContext *s, AVPacket *pkt)
{
SAMIContext *sami = s->priv_data;
return ff_subtitles_queue_read_packet(&sami->q, pkt);
}
 
static int sami_read_seek(AVFormatContext *s, int stream_index,
int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
{
SAMIContext *sami = s->priv_data;
return ff_subtitles_queue_seek(&sami->q, s, stream_index,
min_ts, ts, max_ts, flags);
}
 
static int sami_read_close(AVFormatContext *s)
{
SAMIContext *sami = s->priv_data;
ff_subtitles_queue_clean(&sami->q);
return 0;
}
 
AVInputFormat ff_sami_demuxer = {
.name = "sami",
.long_name = NULL_IF_CONFIG_SMALL("SAMI subtitle format"),
.priv_data_size = sizeof(SAMIContext),
.read_probe = sami_probe,
.read_header = sami_read_header,
.read_packet = sami_read_packet,
.read_seek2 = sami_read_seek,
.read_close = sami_read_close,
.extensions = "smi,sami",
};
/contrib/sdk/sources/ffmpeg/libavformat/sapdec.c
0,0 → 1,238
/*
* Session Announcement Protocol (RFC 2974) demuxer
* Copyright (c) 2010 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "libavutil/avstring.h"
#include "libavutil/intreadwrite.h"
#include "network.h"
#include "os_support.h"
#include "internal.h"
#include "avio_internal.h"
#include "url.h"
#include "rtpdec.h"
#if HAVE_POLL_H
#include <poll.h>
#endif
 
struct SAPState {
URLContext *ann_fd;
AVFormatContext *sdp_ctx;
AVIOContext sdp_pb;
uint16_t hash;
char *sdp;
int eof;
};
 
static int sap_probe(AVProbeData *p)
{
if (av_strstart(p->filename, "sap:", NULL))
return AVPROBE_SCORE_MAX;
return 0;
}
 
static int sap_read_close(AVFormatContext *s)
{
struct SAPState *sap = s->priv_data;
if (sap->sdp_ctx)
avformat_close_input(&sap->sdp_ctx);
if (sap->ann_fd)
ffurl_close(sap->ann_fd);
av_freep(&sap->sdp);
ff_network_close();
return 0;
}
 
static int sap_read_header(AVFormatContext *s)
{
struct SAPState *sap = s->priv_data;
char host[1024], path[1024], url[1024];
uint8_t recvbuf[RTP_MAX_PACKET_LENGTH];
int port;
int ret, i;
AVInputFormat* infmt;
 
if (!ff_network_init())
return AVERROR(EIO);
 
av_url_split(NULL, 0, NULL, 0, host, sizeof(host), &port,
path, sizeof(path), s->filename);
if (port < 0)
port = 9875;
 
if (!host[0]) {
/* Listen for announcements on sap.mcast.net if no host was specified */
av_strlcpy(host, "224.2.127.254", sizeof(host));
}
 
ff_url_join(url, sizeof(url), "udp", NULL, host, port, "?localport=%d",
port);
ret = ffurl_open(&sap->ann_fd, url, AVIO_FLAG_READ,
&s->interrupt_callback, NULL);
if (ret)
goto fail;
 
while (1) {
int addr_type, auth_len;
int pos;
 
ret = ffurl_read(sap->ann_fd, recvbuf, sizeof(recvbuf) - 1);
if (ret == AVERROR(EAGAIN))
continue;
if (ret < 0)
goto fail;
recvbuf[ret] = '\0'; /* Null terminate for easier parsing */
if (ret < 8) {
av_log(s, AV_LOG_WARNING, "Received too short packet\n");
continue;
}
 
if ((recvbuf[0] & 0xe0) != 0x20) {
av_log(s, AV_LOG_WARNING, "Unsupported SAP version packet "
"received\n");
continue;
}
 
if (recvbuf[0] & 0x04) {
av_log(s, AV_LOG_WARNING, "Received stream deletion "
"announcement\n");
continue;
}
addr_type = recvbuf[0] & 0x10;
auth_len = recvbuf[1];
sap->hash = AV_RB16(&recvbuf[2]);
pos = 4;
if (addr_type)
pos += 16; /* IPv6 */
else
pos += 4; /* IPv4 */
pos += auth_len * 4;
if (pos + 4 >= ret) {
av_log(s, AV_LOG_WARNING, "Received too short packet\n");
continue;
}
#define MIME "application/sdp"
if (strcmp(&recvbuf[pos], MIME) == 0) {
pos += strlen(MIME) + 1;
} else if (strncmp(&recvbuf[pos], "v=0\r\n", 5) == 0) {
// Direct SDP without a mime type
} else {
av_log(s, AV_LOG_WARNING, "Unsupported mime type %s\n",
&recvbuf[pos]);
continue;
}
 
sap->sdp = av_strdup(&recvbuf[pos]);
break;
}
 
av_log(s, AV_LOG_VERBOSE, "SDP:\n%s\n", sap->sdp);
ffio_init_context(&sap->sdp_pb, sap->sdp, strlen(sap->sdp), 0, NULL, NULL,
NULL, NULL);
 
infmt = av_find_input_format("sdp");
if (!infmt)
goto fail;
sap->sdp_ctx = avformat_alloc_context();
if (!sap->sdp_ctx) {
ret = AVERROR(ENOMEM);
goto fail;
}
sap->sdp_ctx->max_delay = s->max_delay;
sap->sdp_ctx->pb = &sap->sdp_pb;
sap->sdp_ctx->interrupt_callback = s->interrupt_callback;
ret = avformat_open_input(&sap->sdp_ctx, "temp.sdp", infmt, NULL);
if (ret < 0)
goto fail;
if (sap->sdp_ctx->ctx_flags & AVFMTCTX_NOHEADER)
s->ctx_flags |= AVFMTCTX_NOHEADER;
for (i = 0; i < sap->sdp_ctx->nb_streams; i++) {
AVStream *st = avformat_new_stream(s, NULL);
if (!st) {
ret = AVERROR(ENOMEM);
goto fail;
}
st->id = i;
avcodec_copy_context(st->codec, sap->sdp_ctx->streams[i]->codec);
st->time_base = sap->sdp_ctx->streams[i]->time_base;
}
 
return 0;
 
fail:
sap_read_close(s);
return ret;
}
 
static int sap_fetch_packet(AVFormatContext *s, AVPacket *pkt)
{
struct SAPState *sap = s->priv_data;
int fd = ffurl_get_file_handle(sap->ann_fd);
int n, ret;
struct pollfd p = {fd, POLLIN, 0};
uint8_t recvbuf[RTP_MAX_PACKET_LENGTH];
 
if (sap->eof)
return AVERROR_EOF;
 
while (1) {
n = poll(&p, 1, 0);
if (n <= 0 || !(p.revents & POLLIN))
break;
ret = ffurl_read(sap->ann_fd, recvbuf, sizeof(recvbuf));
if (ret >= 8) {
uint16_t hash = AV_RB16(&recvbuf[2]);
/* Should ideally check the source IP address, too */
if (recvbuf[0] & 0x04 && hash == sap->hash) {
/* Stream deletion */
sap->eof = 1;
return AVERROR_EOF;
}
}
}
ret = av_read_frame(sap->sdp_ctx, pkt);
if (ret < 0)
return ret;
if (s->ctx_flags & AVFMTCTX_NOHEADER) {
while (sap->sdp_ctx->nb_streams > s->nb_streams) {
int i = s->nb_streams;
AVStream *st = avformat_new_stream(s, NULL);
if (!st) {
av_free_packet(pkt);
return AVERROR(ENOMEM);
}
st->id = i;
avcodec_copy_context(st->codec, sap->sdp_ctx->streams[i]->codec);
st->time_base = sap->sdp_ctx->streams[i]->time_base;
}
}
return ret;
}
 
AVInputFormat ff_sap_demuxer = {
.name = "sap",
.long_name = NULL_IF_CONFIG_SMALL("SAP input"),
.priv_data_size = sizeof(struct SAPState),
.read_probe = sap_probe,
.read_header = sap_read_header,
.read_packet = sap_fetch_packet,
.read_close = sap_read_close,
.flags = AVFMT_NOFILE,
};
/contrib/sdk/sources/ffmpeg/libavformat/sapenc.c
0,0 → 1,270
/*
* Session Announcement Protocol (RFC 2974) muxer
* Copyright (c) 2010 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "libavutil/parseutils.h"
#include "libavutil/random_seed.h"
#include "libavutil/avstring.h"
#include "libavutil/dict.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/time.h"
#include "internal.h"
#include "network.h"
#include "os_support.h"
#include "rtpenc_chain.h"
#include "url.h"
 
struct SAPState {
uint8_t *ann;
int ann_size;
URLContext *ann_fd;
int64_t last_time;
};
 
static int sap_write_close(AVFormatContext *s)
{
struct SAPState *sap = s->priv_data;
int i;
 
for (i = 0; i < s->nb_streams; i++) {
AVFormatContext *rtpctx = s->streams[i]->priv_data;
if (!rtpctx)
continue;
av_write_trailer(rtpctx);
avio_close(rtpctx->pb);
avformat_free_context(rtpctx);
s->streams[i]->priv_data = NULL;
}
 
if (sap->last_time && sap->ann && sap->ann_fd) {
sap->ann[0] |= 4; /* Session deletion*/
ffurl_write(sap->ann_fd, sap->ann, sap->ann_size);
}
 
av_freep(&sap->ann);
if (sap->ann_fd)
ffurl_close(sap->ann_fd);
ff_network_close();
return 0;
}
 
static int sap_write_header(AVFormatContext *s)
{
struct SAPState *sap = s->priv_data;
char host[1024], path[1024], url[1024], announce_addr[50] = "";
char *option_list;
int port = 9875, base_port = 5004, i, pos = 0, same_port = 0, ttl = 255;
AVFormatContext **contexts = NULL;
int ret = 0;
struct sockaddr_storage localaddr;
socklen_t addrlen = sizeof(localaddr);
int udp_fd;
AVDictionaryEntry* title = av_dict_get(s->metadata, "title", NULL, 0);
 
if (!ff_network_init())
return AVERROR(EIO);
 
/* extract hostname and port */
av_url_split(NULL, 0, NULL, 0, host, sizeof(host), &base_port,
path, sizeof(path), s->filename);
if (base_port < 0)
base_port = 5004;
 
/* search for options */
option_list = strrchr(path, '?');
if (option_list) {
char buf[50];
if (av_find_info_tag(buf, sizeof(buf), "announce_port", option_list)) {
port = strtol(buf, NULL, 10);
}
if (av_find_info_tag(buf, sizeof(buf), "same_port", option_list)) {
same_port = strtol(buf, NULL, 10);
}
if (av_find_info_tag(buf, sizeof(buf), "ttl", option_list)) {
ttl = strtol(buf, NULL, 10);
}
if (av_find_info_tag(buf, sizeof(buf), "announce_addr", option_list)) {
av_strlcpy(announce_addr, buf, sizeof(announce_addr));
}
}
 
if (!announce_addr[0]) {
struct addrinfo hints = { 0 }, *ai = NULL;
hints.ai_family = AF_UNSPEC;
if (getaddrinfo(host, NULL, &hints, &ai)) {
av_log(s, AV_LOG_ERROR, "Unable to resolve %s\n", host);
ret = AVERROR(EIO);
goto fail;
}
if (ai->ai_family == AF_INET) {
/* Also known as sap.mcast.net */
av_strlcpy(announce_addr, "224.2.127.254", sizeof(announce_addr));
#if HAVE_STRUCT_SOCKADDR_IN6
} else if (ai->ai_family == AF_INET6) {
/* With IPv6, you can use the same destination in many different
* multicast subnets, to choose how far you want it routed.
* This one is intended to be routed globally. */
av_strlcpy(announce_addr, "ff0e::2:7ffe", sizeof(announce_addr));
#endif
} else {
freeaddrinfo(ai);
av_log(s, AV_LOG_ERROR, "Host %s resolved to unsupported "
"address family\n", host);
ret = AVERROR(EIO);
goto fail;
}
freeaddrinfo(ai);
}
 
contexts = av_mallocz(sizeof(AVFormatContext*) * s->nb_streams);
if (!contexts) {
ret = AVERROR(ENOMEM);
goto fail;
}
 
s->start_time_realtime = av_gettime();
for (i = 0; i < s->nb_streams; i++) {
URLContext *fd;
 
ff_url_join(url, sizeof(url), "rtp", NULL, host, base_port,
"?ttl=%d", ttl);
if (!same_port)
base_port += 2;
ret = ffurl_open(&fd, url, AVIO_FLAG_WRITE, &s->interrupt_callback, NULL);
if (ret) {
ret = AVERROR(EIO);
goto fail;
}
ret = ff_rtp_chain_mux_open(&contexts[i], s, s->streams[i], fd, 0, i);
if (ret < 0)
goto fail;
s->streams[i]->priv_data = contexts[i];
av_strlcpy(contexts[i]->filename, url, sizeof(contexts[i]->filename));
}
 
if (s->nb_streams > 0 && title)
av_dict_set(&contexts[0]->metadata, "title", title->value, 0);
 
ff_url_join(url, sizeof(url), "udp", NULL, announce_addr, port,
"?ttl=%d&connect=1", ttl);
ret = ffurl_open(&sap->ann_fd, url, AVIO_FLAG_WRITE,
&s->interrupt_callback, NULL);
if (ret) {
ret = AVERROR(EIO);
goto fail;
}
 
udp_fd = ffurl_get_file_handle(sap->ann_fd);
if (getsockname(udp_fd, (struct sockaddr*) &localaddr, &addrlen)) {
ret = AVERROR(EIO);
goto fail;
}
if (localaddr.ss_family != AF_INET
#if HAVE_STRUCT_SOCKADDR_IN6
&& localaddr.ss_family != AF_INET6
#endif
) {
av_log(s, AV_LOG_ERROR, "Unsupported protocol family\n");
ret = AVERROR(EIO);
goto fail;
}
sap->ann_size = 8192;
sap->ann = av_mallocz(sap->ann_size);
if (!sap->ann) {
ret = AVERROR(EIO);
goto fail;
}
sap->ann[pos] = (1 << 5);
#if HAVE_STRUCT_SOCKADDR_IN6
if (localaddr.ss_family == AF_INET6)
sap->ann[pos] |= 0x10;
#endif
pos++;
sap->ann[pos++] = 0; /* Authentication length */
AV_WB16(&sap->ann[pos], av_get_random_seed());
pos += 2;
if (localaddr.ss_family == AF_INET) {
memcpy(&sap->ann[pos], &((struct sockaddr_in*)&localaddr)->sin_addr,
sizeof(struct in_addr));
pos += sizeof(struct in_addr);
#if HAVE_STRUCT_SOCKADDR_IN6
} else {
memcpy(&sap->ann[pos], &((struct sockaddr_in6*)&localaddr)->sin6_addr,
sizeof(struct in6_addr));
pos += sizeof(struct in6_addr);
#endif
}
 
av_strlcpy(&sap->ann[pos], "application/sdp", sap->ann_size - pos);
pos += strlen(&sap->ann[pos]) + 1;
 
if (av_sdp_create(contexts, s->nb_streams, &sap->ann[pos],
sap->ann_size - pos)) {
ret = AVERROR_INVALIDDATA;
goto fail;
}
av_freep(&contexts);
av_log(s, AV_LOG_VERBOSE, "SDP:\n%s\n", &sap->ann[pos]);
pos += strlen(&sap->ann[pos]);
sap->ann_size = pos;
 
if (sap->ann_size > sap->ann_fd->max_packet_size) {
av_log(s, AV_LOG_ERROR, "Announcement too large to send in one "
"packet\n");
goto fail;
}
 
return 0;
 
fail:
av_free(contexts);
sap_write_close(s);
return ret;
}
 
static int sap_write_packet(AVFormatContext *s, AVPacket *pkt)
{
AVFormatContext *rtpctx;
struct SAPState *sap = s->priv_data;
int64_t now = av_gettime();
 
if (!sap->last_time || now - sap->last_time > 5000000) {
int ret = ffurl_write(sap->ann_fd, sap->ann, sap->ann_size);
/* Don't abort even if we get "Destination unreachable" */
if (ret < 0 && ret != AVERROR(ECONNREFUSED))
return ret;
sap->last_time = now;
}
rtpctx = s->streams[pkt->stream_index]->priv_data;
return ff_write_chained(rtpctx, 0, pkt, s);
}
 
AVOutputFormat ff_sap_muxer = {
.name = "sap",
.long_name = NULL_IF_CONFIG_SMALL("SAP output"),
.priv_data_size = sizeof(struct SAPState),
.audio_codec = AV_CODEC_ID_AAC,
.video_codec = AV_CODEC_ID_MPEG4,
.write_header = sap_write_header,
.write_packet = sap_write_packet,
.write_trailer = sap_write_close,
.flags = AVFMT_NOFILE | AVFMT_GLOBALHEADER,
};
/contrib/sdk/sources/ffmpeg/libavformat/sauce.c
0,0 → 1,104
/*
* SAUCE header parser
* Copyright (c) 2010 Peter Ross <pross@xvid.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* SAUCE header parser
*/
 
#include "libavutil/intreadwrite.h"
#include "libavutil/dict.h"
#include "avformat.h"
#include "sauce.h"
 
int ff_sauce_read(AVFormatContext *avctx, uint64_t *fsize, int *got_width, int get_height)
{
AVIOContext *pb = avctx->pb;
char buf[36];
int datatype, filetype, t1, t2, nb_comments;
uint64_t start_pos = avio_size(pb) - 128;
 
avio_seek(pb, start_pos, SEEK_SET);
if (avio_read(pb, buf, 7) != 7)
return -1;
if (memcmp(buf, "SAUCE00", 7))
return -1;
 
#define GET_SAUCE_META(name,size) \
if (avio_read(pb, buf, size) == size && buf[0]) { \
buf[size] = 0; \
av_dict_set(&avctx->metadata, name, buf, 0); \
}
 
GET_SAUCE_META("title", 35)
GET_SAUCE_META("artist", 20)
GET_SAUCE_META("publisher", 20)
GET_SAUCE_META("date", 8)
avio_skip(pb, 4);
datatype = avio_r8(pb);
filetype = avio_r8(pb);
t1 = avio_rl16(pb);
t2 = avio_rl16(pb);
nb_comments = avio_r8(pb);
avio_skip(pb, 1); /* flags */
avio_skip(pb, 4);
GET_SAUCE_META("encoder", 22);
 
if (got_width && datatype && filetype) {
if ((datatype == 1 && filetype <=2) || (datatype == 5 && filetype == 255) || datatype == 6) {
if (t1) {
avctx->streams[0]->codec->width = t1<<3;
*got_width = 1;
}
if (get_height && t2)
avctx->streams[0]->codec->height = t2<<4;
} else if (datatype == 5) {
if (filetype) {
avctx->streams[0]->codec->width = (filetype == 1 ? t1 : filetype) << 4;
*got_width = 1;
}
if (get_height && t2)
avctx->streams[0]->codec->height = t2<<4;
}
}
 
*fsize -= 128;
 
if (nb_comments > 0) {
avio_seek(pb, start_pos - 64*nb_comments - 5, SEEK_SET);
if (avio_read(pb, buf, 5) == 5 && !memcmp(buf, "COMNT", 5)) {
int i;
char *str = av_malloc(65*nb_comments + 1);
*fsize -= 64*nb_comments + 5;
if (!str)
return 0;
for (i = 0; i < nb_comments; i++) {
if (avio_read(pb, str + 65*i, 64) != 64)
break;
str[65*i + 64] = '\n';
}
str[65*i] = 0;
av_dict_set(&avctx->metadata, "comment", str, AV_DICT_DONT_STRDUP_VAL);
}
}
 
return 0;
}
/contrib/sdk/sources/ffmpeg/libavformat/sauce.h
0,0 → 1,40
/*
* SAUCE header parser
* Copyright (c) 2010 Peter Ross <pross@xvid.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* SAUCE header parser
*/
 
#ifndef AVFORMAT_SAUCE_H
#define AVFORMAT_SAUCE_H
 
#include "avformat.h"
 
/**
* @param avctx AVFormatContext
* @param[out] fsize return length of file, less SAUCE header
* @param[out] got_width set to non-zero if SAUCE header reported height
* @param get_height Tell SAUCE header to parse height
*/
int ff_sauce_read(AVFormatContext *avctx, uint64_t *fsize, int *got_width, int get_height);
 
#endif /* AVFORMAT_SAUCE_H */
/contrib/sdk/sources/ffmpeg/libavformat/sbgdec.c
0,0 → 1,1511
/*
* SBG (SBaGen) file format decoder
* Copyright (c) 2011 Nicolas George
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "libavutil/intreadwrite.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "avformat.h"
#include "internal.h"
 
#define SBG_SCALE (1 << 16)
#define DAY (24 * 60 * 60)
#define DAY_TS ((int64_t)DAY * AV_TIME_BASE)
 
struct sbg_demuxer {
AVClass *class;
int sample_rate;
int frame_size;
int max_file_size;
};
 
struct sbg_string {
char *s;
char *e;
};
 
enum sbg_fade_type {
SBG_FADE_SILENCE = 0,
SBG_FADE_SAME = 1,
SBG_FADE_ADAPT = 3,
};
 
struct sbg_fade {
int8_t in, out, slide;
};
 
enum sbg_synth_type {
SBG_TYPE_NONE,
SBG_TYPE_SINE,
SBG_TYPE_NOISE,
SBG_TYPE_BELL,
SBG_TYPE_MIX,
SBG_TYPE_SPIN,
};
 
/* bell: freq constant, ampl decreases exponentially, can be approx lin */
 
struct sbg_timestamp {
int64_t t;
char type; /* 0 for relative, 'N' for now, 'T' for absolute */
};
 
struct sbg_script_definition {
char *name;
int name_len;
int elements, nb_elements;
char type; /* 'S' or 'B' */
};
 
struct sbg_script_synth {
int carrier;
int beat;
int vol;
enum sbg_synth_type type;
struct {
int l, r;
} ref;
};
 
struct sbg_script_tseq {
struct sbg_timestamp ts;
char *name;
int name_len;
int lock;
struct sbg_fade fade;
};
 
struct sbg_script_event {
int64_t ts;
int64_t ts_int, ts_trans, ts_next;
int elements, nb_elements;
struct sbg_fade fade;
};
 
struct sbg_script {
struct sbg_script_definition *def;
struct sbg_script_synth *synth;
struct sbg_script_tseq *tseq;
struct sbg_script_tseq *block_tseq;
struct sbg_script_event *events;
int nb_def;
int nb_tseq;
int nb_events;
int nb_synth;
int64_t start_ts;
int64_t end_ts;
int64_t opt_fade_time;
int64_t opt_duration;
char *opt_mix;
int sample_rate;
uint8_t opt_start_at_first;
uint8_t opt_end_at_last;
};
 
struct sbg_parser {
void *log;
char *script, *end;
char *cursor;
struct sbg_script scs;
struct sbg_timestamp current_time;
int nb_block_tseq;
int nb_def_max, nb_synth_max, nb_tseq_max, nb_block_tseq_max;
int line_no;
char err_msg[128];
};
 
enum ws_interval_type {
WS_SINE = MKTAG('S','I','N','E'),
WS_NOISE = MKTAG('N','O','I','S'),
};
 
struct ws_interval {
int64_t ts1, ts2;
enum ws_interval_type type;
uint32_t channels;
int32_t f1, f2;
int32_t a1, a2;
uint32_t phi;
};
 
struct ws_intervals {
struct ws_interval *inter;
int nb_inter;
int max_inter;
};
 
static void *alloc_array_elem(void **array, size_t elsize,
int *size, int *max_size)
{
void *ret;
 
if (*size == *max_size) {
int m = FFMAX(32, FFMIN(*max_size, INT_MAX / 2) * 2);
if (*size >= m)
return NULL;
*array = av_realloc_f(*array, m, elsize);
if (!*array)
return NULL;
*max_size = m;
}
ret = (char *)*array + elsize * *size;
memset(ret, 0, elsize);
(*size)++;
return ret;
}
 
static int str_to_time(const char *str, int64_t *rtime)
{
const char *cur = str;
char *end;
int hours, minutes;
double seconds = 0;
 
if (*cur < '0' || *cur > '9')
return 0;
hours = strtol(cur, &end, 10);
if (end == cur || *end != ':' || end[1] < '0' || end[1] > '9')
return 0;
cur = end + 1;
minutes = strtol(cur, &end, 10);
if (end == cur)
return 0;
cur = end;
if (*end == ':'){
seconds = strtod(cur + 1, &end);
if (end > cur + 1)
cur = end;
}
*rtime = (hours * 3600 + minutes * 60 + seconds) * AV_TIME_BASE;
return cur - str;
}
 
static inline int is_space(char c)
{
return c == ' ' || c == '\t' || c == '\r';
}
 
static inline int scale_double(void *log, double d, double m, int *r)
{
m *= d * SBG_SCALE;
if (m < INT_MIN || m >= INT_MAX) {
if (log)
av_log(log, AV_LOG_ERROR, "%g is too large\n", d);
return AVERROR(EDOM);
}
*r = m;
return 0;
}
 
static int lex_space(struct sbg_parser *p)
{
char *c = p->cursor;
 
while (p->cursor < p->end && is_space(*p->cursor))
p->cursor++;
return p->cursor > c;
}
 
static int lex_char(struct sbg_parser *p, char c)
{
int r = p->cursor < p->end && *p->cursor == c;
 
p->cursor += r;
return r;
}
 
static int lex_double(struct sbg_parser *p, double *r)
{
double d;
char *end;
 
if (p->cursor == p->end || is_space(*p->cursor) || *p->cursor == '\n')
return 0;
d = strtod(p->cursor, &end);
if (end > p->cursor) {
*r = d;
p->cursor = end;
return 1;
}
return 0;
}
 
static int lex_fixed(struct sbg_parser *p, const char *t, int l)
{
if (p->end - p->cursor < l || memcmp(p->cursor, t, l))
return 0;
p->cursor += l;
return 1;
}
 
static int lex_line_end(struct sbg_parser *p)
{
if (p->cursor < p->end && *p->cursor == '#') {
p->cursor++;
while (p->cursor < p->end && *p->cursor != '\n')
p->cursor++;
}
if (p->cursor == p->end)
/* simulate final LF for files lacking it */
return 1;
if (*p->cursor != '\n')
return 0;
p->cursor++;
p->line_no++;
lex_space(p);
return 1;
}
 
static int lex_wsword(struct sbg_parser *p, struct sbg_string *rs)
{
char *s = p->cursor, *c = s;
 
if (s == p->end || *s == '\n')
return 0;
while (c < p->end && *c != '\n' && !is_space(*c))
c++;
rs->s = s;
rs->e = p->cursor = c;
lex_space(p);
return 1;
}
 
static int lex_name(struct sbg_parser *p, struct sbg_string *rs)
{
char *s = p->cursor, *c = s;
 
while (c < p->end && ((*c >= 'a' && *c <= 'z') || (*c >= 'A' && *c <= 'Z')
|| (*c >= '0' && *c <= '9') || *c == '_' || *c == '-'))
c++;
if (c == s)
return 0;
rs->s = s;
rs->e = p->cursor = c;
return 1;
}
 
static int lex_time(struct sbg_parser *p, int64_t *rt)
{
int r = str_to_time(p->cursor, rt);
p->cursor += r;
return r > 0;
}
 
#define FORWARD_ERROR(c) \
do { \
int errcode = c; \
if (errcode <= 0) \
return errcode ? errcode : AVERROR_INVALIDDATA; \
} while(0);
 
static int parse_immediate(struct sbg_parser *p)
{
snprintf(p->err_msg, sizeof(p->err_msg),
"immediate sequences not yet implemented");
return AVERROR_PATCHWELCOME;
}
 
static int parse_preprogrammed(struct sbg_parser *p)
{
snprintf(p->err_msg, sizeof(p->err_msg),
"preprogrammed sequences not yet implemented");
return AVERROR_PATCHWELCOME;
}
 
static int parse_optarg(struct sbg_parser *p, char o, struct sbg_string *r)
{
if (!lex_wsword(p, r)) {
snprintf(p->err_msg, sizeof(p->err_msg),
"option '%c' requires an argument", o);
return AVERROR_INVALIDDATA;
}
return 1;
}
 
static int parse_options(struct sbg_parser *p)
{
struct sbg_string ostr, oarg;
char mode = 0;
int r;
char *tptr;
double v;
 
if (p->cursor == p->end || *p->cursor != '-')
return 0;
while (lex_char(p, '-') && lex_wsword(p, &ostr)) {
for (; ostr.s < ostr.e; ostr.s++) {
char opt = *ostr.s;
switch (opt) {
case 'S':
p->scs.opt_start_at_first = 1;
break;
case 'E':
p->scs.opt_end_at_last = 1;
break;
case 'i':
mode = 'i';
break;
case 'p':
mode = 'p';
break;
case 'F':
FORWARD_ERROR(parse_optarg(p, opt, &oarg));
v = strtod(oarg.s, &tptr);
if (oarg.e != tptr) {
snprintf(p->err_msg, sizeof(p->err_msg),
"syntax error for option -F");
return AVERROR_INVALIDDATA;
}
p->scs.opt_fade_time = v * AV_TIME_BASE / 1000;
break;
case 'L':
FORWARD_ERROR(parse_optarg(p, opt, &oarg));
r = str_to_time(oarg.s, &p->scs.opt_duration);
if (oarg.e != oarg.s + r) {
snprintf(p->err_msg, sizeof(p->err_msg),
"syntax error for option -L");
return AVERROR_INVALIDDATA;
}
break;
case 'T':
FORWARD_ERROR(parse_optarg(p, opt, &oarg));
r = str_to_time(oarg.s, &p->scs.start_ts);
if (oarg.e != oarg.s + r) {
snprintf(p->err_msg, sizeof(p->err_msg),
"syntax error for option -T");
return AVERROR_INVALIDDATA;
}
break;
case 'm':
FORWARD_ERROR(parse_optarg(p, opt, &oarg));
tptr = av_malloc(oarg.e - oarg.s + 1);
if (!tptr)
return AVERROR(ENOMEM);
memcpy(tptr, oarg.s, oarg.e - oarg.s);
tptr[oarg.e - oarg.s] = 0;
av_free(p->scs.opt_mix);
p->scs.opt_mix = tptr;
break;
case 'q':
FORWARD_ERROR(parse_optarg(p, opt, &oarg));
v = strtod(oarg.s, &tptr);
if (oarg.e != tptr) {
snprintf(p->err_msg, sizeof(p->err_msg),
"syntax error for option -q");
return AVERROR_INVALIDDATA;
}
if (v != 1) {
snprintf(p->err_msg, sizeof(p->err_msg),
"speed factor other than 1 not supported");
return AVERROR_PATCHWELCOME;
}
break;
case 'r':
FORWARD_ERROR(parse_optarg(p, opt, &oarg));
r = strtol(oarg.s, &tptr, 10);
if (oarg.e != tptr) {
snprintf(p->err_msg, sizeof(p->err_msg),
"syntax error for option -r");
return AVERROR_INVALIDDATA;
}
if (r < 40) {
snprintf(p->err_msg, sizeof(p->err_msg),
"invalid sample rate");
return AVERROR_PATCHWELCOME;
}
p->scs.sample_rate = r;
break;
default:
snprintf(p->err_msg, sizeof(p->err_msg),
"unknown option: '%c'", *ostr.s);
return AVERROR_INVALIDDATA;
}
}
}
switch (mode) {
case 'i':
return parse_immediate(p);
case 'p':
return parse_preprogrammed(p);
case 0:
if (!lex_line_end(p))
return AVERROR_INVALIDDATA;
return 1;
}
return AVERROR_BUG;
}
 
static int parse_timestamp(struct sbg_parser *p,
struct sbg_timestamp *rts, int64_t *rrel)
{
int64_t abs = 0, rel = 0, dt;
char type = 0;
int r;
 
if (lex_fixed(p, "NOW", 3)) {
type = 'N';
r = 1;
} else {
r = lex_time(p, &abs);
if (r)
type = 'T';
}
while (lex_char(p, '+')) {
if (!lex_time(p, &dt))
return AVERROR_INVALIDDATA;
rel += dt;
r = 1;
}
if (r) {
if (!lex_space(p))
return AVERROR_INVALIDDATA;
rts->type = type;
rts->t = abs;
*rrel = rel;
}
return r;
}
 
static int parse_fade(struct sbg_parser *p, struct sbg_fade *fr)
{
struct sbg_fade f = {0};
 
if (lex_char(p, '<'))
f.in = SBG_FADE_SILENCE;
else if (lex_char(p, '-'))
f.in = SBG_FADE_SAME;
else if (lex_char(p, '='))
f.in = SBG_FADE_ADAPT;
else
return 0;
if (lex_char(p, '>'))
f.out = SBG_FADE_SILENCE;
else if (lex_char(p, '-'))
f.out = SBG_FADE_SAME;
else if (lex_char(p, '='))
f.out = SBG_FADE_ADAPT;
else
return AVERROR_INVALIDDATA;
*fr = f;
return 1;
}
 
static int parse_time_sequence(struct sbg_parser *p, int inblock)
{
struct sbg_timestamp ts;
int64_t rel_ts;
int r;
struct sbg_fade fade = { SBG_FADE_SAME, SBG_FADE_SAME, 0 };
struct sbg_string name;
struct sbg_script_tseq *tseq;
 
r = parse_timestamp(p, &ts, &rel_ts);
if (!r)
return 0;
if (r < 0)
return r;
if (ts.type) {
if (inblock)
return AVERROR_INVALIDDATA;
p->current_time.type = ts.type;
p->current_time.t = ts.t;
} else if(!inblock && !p->current_time.type) {
snprintf(p->err_msg, sizeof(p->err_msg),
"relative time without previous absolute time");
return AVERROR_INVALIDDATA;
}
ts.type = p->current_time.type;
ts.t = p->current_time.t + rel_ts;
r = parse_fade(p, &fade);
if (r < 0)
return r;
lex_space(p);
if (!lex_name(p, &name))
return AVERROR_INVALIDDATA;
lex_space(p);
if (lex_fixed(p, "->", 2)) {
fade.slide = SBG_FADE_ADAPT;
lex_space(p);
}
if (!lex_line_end(p))
return AVERROR_INVALIDDATA;
tseq = inblock ?
alloc_array_elem((void **)&p->scs.block_tseq, sizeof(*tseq),
&p->nb_block_tseq, &p->nb_block_tseq_max) :
alloc_array_elem((void **)&p->scs.tseq, sizeof(*tseq),
&p->scs.nb_tseq, &p->nb_tseq_max);
if (!tseq)
return AVERROR(ENOMEM);
tseq->ts = ts;
tseq->name = name.s;
tseq->name_len = name.e - name.s;
tseq->fade = fade;
return 1;
}
 
static int parse_wave_def(struct sbg_parser *p, int wavenum)
{
snprintf(p->err_msg, sizeof(p->err_msg),
"waveform definitions not yet implemented");
return AVERROR_PATCHWELCOME;
}
 
static int parse_block_def(struct sbg_parser *p,
struct sbg_script_definition *def)
{
int r, tseq;
 
lex_space(p);
if (!lex_line_end(p))
return AVERROR_INVALIDDATA;
tseq = p->nb_block_tseq;
while (1) {
r = parse_time_sequence(p, 1);
if (r < 0)
return r;
if (!r)
break;
}
if (!lex_char(p, '}'))
return AVERROR_INVALIDDATA;
lex_space(p);
if (!lex_line_end(p))
return AVERROR_INVALIDDATA;
def->type = 'B';
def->elements = tseq;
def->nb_elements = p->nb_block_tseq - tseq;
if (!def->nb_elements)
return AVERROR_INVALIDDATA;
return 1;
}
 
static int parse_volume(struct sbg_parser *p, int *vol)
{
double v;
 
if (!lex_char(p, '/'))
return 0;
if (!lex_double(p, &v))
return AVERROR_INVALIDDATA;
if (scale_double(p->log, v, 0.01, vol))
return AVERROR(ERANGE);
return 1;
}
 
static int parse_synth_channel_sine(struct sbg_parser *p,
struct sbg_script_synth *synth)
{
double carrierf, beatf;
int carrier, beat, vol;
 
if (!lex_double(p, &carrierf))
return 0;
if (!lex_double(p, &beatf))
beatf = 0;
FORWARD_ERROR(parse_volume(p, &vol));
if (scale_double(p->log, carrierf, 1, &carrier) < 0 ||
scale_double(p->log, beatf, 1, &beat) < 0)
return AVERROR(EDOM);
synth->type = SBG_TYPE_SINE;
synth->carrier = carrier;
synth->beat = beat;
synth->vol = vol;
return 1;
}
 
static int parse_synth_channel_pink(struct sbg_parser *p,
struct sbg_script_synth *synth)
{
int vol;
 
if (!lex_fixed(p, "pink", 4))
return 0;
FORWARD_ERROR(parse_volume(p, &vol));
synth->type = SBG_TYPE_NOISE;
synth->vol = vol;
return 1;
}
 
static int parse_synth_channel_bell(struct sbg_parser *p,
struct sbg_script_synth *synth)
{
double carrierf;
int carrier, vol;
 
if (!lex_fixed(p, "bell", 4))
return 0;
if (!lex_double(p, &carrierf))
return AVERROR_INVALIDDATA;
FORWARD_ERROR(parse_volume(p, &vol));
if (scale_double(p->log, carrierf, 1, &carrier) < 0)
return AVERROR(EDOM);
synth->type = SBG_TYPE_BELL;
synth->carrier = carrier;
synth->vol = vol;
return 1;
}
 
static int parse_synth_channel_mix(struct sbg_parser *p,
struct sbg_script_synth *synth)
{
int vol;
 
if (!lex_fixed(p, "mix", 3))
return 0;
FORWARD_ERROR(parse_volume(p, &vol));
synth->type = SBG_TYPE_MIX;
synth->vol = vol;
return 1;
}
 
static int parse_synth_channel_spin(struct sbg_parser *p,
struct sbg_script_synth *synth)
{
double carrierf, beatf;
int carrier, beat, vol;
 
if (!lex_fixed(p, "spin:", 5))
return 0;
if (!lex_double(p, &carrierf))
return AVERROR_INVALIDDATA;
if (!lex_double(p, &beatf))
return AVERROR_INVALIDDATA;
FORWARD_ERROR(parse_volume(p, &vol));
if (scale_double(p->log, carrierf, 1, &carrier) < 0 ||
scale_double(p->log, beatf, 1, &beat) < 0)
return AVERROR(EDOM);
synth->type = SBG_TYPE_SPIN;
synth->carrier = carrier;
synth->beat = beat;
synth->vol = vol;
return 1;
}
 
static int parse_synth_channel(struct sbg_parser *p)
{
int r;
struct sbg_script_synth *synth;
 
synth = alloc_array_elem((void **)&p->scs.synth, sizeof(*synth),
&p->scs.nb_synth, &p->nb_synth_max);
if (!synth)
return AVERROR(ENOMEM);
r = lex_char(p, '-');
if (!r)
r = parse_synth_channel_pink(p, synth);
if (!r)
r = parse_synth_channel_bell(p, synth);
if (!r)
r = parse_synth_channel_mix(p, synth);
if (!r)
r = parse_synth_channel_spin(p, synth);
/* Unimplemented: wave%d:%f%f/vol (carrier, beat) */
if (!r)
r = parse_synth_channel_sine(p, synth);
if (r <= 0)
p->scs.nb_synth--;
return r;
}
 
static int parse_synth_def(struct sbg_parser *p,
struct sbg_script_definition *def)
{
int r, synth;
 
synth = p->scs.nb_synth;
while (1) {
r = parse_synth_channel(p);
if (r < 0)
return r;
if (!r || !lex_space(p))
break;
}
lex_space(p);
if (synth == p->scs.nb_synth)
return AVERROR_INVALIDDATA;
if (!lex_line_end(p))
return AVERROR_INVALIDDATA;
def->type = 'S';
def->elements = synth;
def->nb_elements = p->scs.nb_synth - synth;
return 1;
}
 
static int parse_named_def(struct sbg_parser *p)
{
char *cursor_save = p->cursor;
struct sbg_string name;
struct sbg_script_definition *def;
 
if (!lex_name(p, &name) || !lex_char(p, ':') || !lex_space(p)) {
p->cursor = cursor_save;
return 0;
}
if (name.e - name.s == 6 && !memcmp(name.s, "wave", 4) &&
name.s[4] >= '0' && name.s[4] <= '9' &&
name.s[5] >= '0' && name.s[5] <= '9') {
int wavenum = (name.s[4] - '0') * 10 + (name.s[5] - '0');
return parse_wave_def(p, wavenum);
}
def = alloc_array_elem((void **)&p->scs.def, sizeof(*def),
&p->scs.nb_def, &p->nb_def_max);
if (!def)
return AVERROR(ENOMEM);
def->name = name.s;
def->name_len = name.e - name.s;
if (lex_char(p, '{'))
return parse_block_def(p, def);
return parse_synth_def(p, def);
}
 
static void free_script(struct sbg_script *s)
{
av_freep(&s->def);
av_freep(&s->synth);
av_freep(&s->tseq);
av_freep(&s->block_tseq);
av_freep(&s->events);
av_freep(&s->opt_mix);
}
 
static int parse_script(void *log, char *script, int script_len,
struct sbg_script *rscript)
{
struct sbg_parser sp = {
.log = log,
.script = script,
.end = script + script_len,
.cursor = script,
.line_no = 1,
.err_msg = "",
.scs = {
/* default values */
.start_ts = AV_NOPTS_VALUE,
.sample_rate = 44100,
.opt_fade_time = 60 * AV_TIME_BASE,
},
};
int r;
 
lex_space(&sp);
while (sp.cursor < sp.end) {
r = parse_options(&sp);
if (r < 0)
goto fail;
if (!r && !lex_line_end(&sp))
break;
}
while (sp.cursor < sp.end) {
r = parse_named_def(&sp);
if (!r)
r = parse_time_sequence(&sp, 0);
if (!r)
r = lex_line_end(&sp) ? 1 : AVERROR_INVALIDDATA;
if (r < 0)
goto fail;
}
*rscript = sp.scs;
return 1;
fail:
free_script(&sp.scs);
if (!*sp.err_msg)
if (r == AVERROR_INVALIDDATA)
snprintf(sp.err_msg, sizeof(sp.err_msg), "syntax error");
if (log && *sp.err_msg) {
const char *ctx = sp.cursor;
const char *ectx = av_x_if_null(memchr(ctx, '\n', sp.end - sp.cursor),
sp.end);
int lctx = ectx - ctx;
const char *quote = "\"";
if (lctx > 0 && ctx[lctx - 1] == '\r')
lctx--;
if (lctx == 0) {
ctx = "the end of line";
lctx = strlen(ctx);
quote = "";
}
av_log(log, AV_LOG_ERROR, "Error line %d: %s near %s%.*s%s.\n",
sp.line_no, sp.err_msg, quote, lctx, ctx, quote);
}
return r;
}
 
static int read_whole_file(AVIOContext *io, int max_size, char **rbuf)
{
char *buf = NULL;
int size = 0, bufsize = 0, r;
 
while (1) {
if (bufsize - size < 1024) {
bufsize = FFMIN(FFMAX(2 * bufsize, 8192), max_size);
if (bufsize - size < 2) {
size = AVERROR(EFBIG);
goto fail;
}
buf = av_realloc_f(buf, bufsize, 1);
if (!buf) {
size = AVERROR(ENOMEM);
goto fail;
}
}
r = avio_read(io, buf, bufsize - size - 1);
if (r == AVERROR_EOF)
break;
if (r < 0)
goto fail;
size += r;
}
buf[size] = 0;
*rbuf = buf;
return size;
fail:
av_free(buf);
return size;
}
 
static void expand_timestamps(void *log, struct sbg_script *s)
{
int i, nb_rel = 0;
int64_t now, cur_ts, delta = 0;
 
for (i = 0; i < s->nb_tseq; i++)
nb_rel += s->tseq[i].ts.type == 'N';
if (nb_rel == s->nb_tseq) {
/* All ts are relative to NOW: consider NOW = 0 */
now = 0;
if (s->start_ts != AV_NOPTS_VALUE)
av_log(log, AV_LOG_WARNING,
"Start time ignored in a purely relative script.\n");
} else if (nb_rel == 0 && s->start_ts != AV_NOPTS_VALUE ||
s->opt_start_at_first) {
/* All ts are absolute and start time is specified */
if (s->start_ts == AV_NOPTS_VALUE)
s->start_ts = s->tseq[0].ts.t;
now = s->start_ts;
} else {
/* Mixed relative/absolute ts: expand */
time_t now0;
struct tm *tm;
 
av_log(log, AV_LOG_WARNING,
"Scripts with mixed absolute and relative timestamps can give "
"unexpected results (pause, seeking, time zone change).\n");
#undef time
time(&now0);
tm = localtime(&now0);
now = tm ? tm->tm_hour * 3600 + tm->tm_min * 60 + tm->tm_sec :
now0 % DAY;
av_log(log, AV_LOG_INFO, "Using %02d:%02d:%02d as NOW.\n",
(int)(now / 3600), (int)(now / 60) % 60, (int)now % 60);
now *= AV_TIME_BASE;
for (i = 0; i < s->nb_tseq; i++) {
if (s->tseq[i].ts.type == 'N') {
s->tseq[i].ts.t += now;
s->tseq[i].ts.type = 'T'; /* not necessary */
}
}
}
if (s->start_ts == AV_NOPTS_VALUE)
s->start_ts = s->opt_start_at_first ? s->tseq[0].ts.t : now;
s->end_ts = s->opt_duration ? s->start_ts + s->opt_duration :
AV_NOPTS_VALUE; /* may be overridden later by -E option */
cur_ts = now;
for (i = 0; i < s->nb_tseq; i++) {
if (s->tseq[i].ts.t + delta < cur_ts)
delta += DAY_TS;
cur_ts = s->tseq[i].ts.t += delta;
}
}
 
static int expand_tseq(void *log, struct sbg_script *s, int *nb_ev_max,
int64_t t0, struct sbg_script_tseq *tseq)
{
int i, r;
struct sbg_script_definition *def;
struct sbg_script_tseq *be;
struct sbg_script_event *ev;
 
if (tseq->lock++) {
av_log(log, AV_LOG_ERROR, "Recursion loop on \"%.*s\"\n",
tseq->name_len, tseq->name);
return AVERROR(EINVAL);
}
t0 += tseq->ts.t;
for (i = 0; i < s->nb_def; i++) {
if (s->def[i].name_len == tseq->name_len &&
!memcmp(s->def[i].name, tseq->name, tseq->name_len))
break;
}
if (i >= s->nb_def) {
av_log(log, AV_LOG_ERROR, "Tone-set \"%.*s\" not defined\n",
tseq->name_len, tseq->name);
return AVERROR(EINVAL);
}
def = &s->def[i];
if (def->type == 'B') {
be = s->block_tseq + def->elements;
for (i = 0; i < def->nb_elements; i++) {
r = expand_tseq(log, s, nb_ev_max, t0, &be[i]);
if (r < 0)
return r;
}
} else {
ev = alloc_array_elem((void **)&s->events, sizeof(*ev),
&s->nb_events, nb_ev_max);
ev->ts = tseq->ts.t;
ev->elements = def->elements;
ev->nb_elements = def->nb_elements;
ev->fade = tseq->fade;
}
tseq->lock--;
return 0;
}
 
static int expand_script(void *log, struct sbg_script *s)
{
int i, r, nb_events_max = 0;
 
expand_timestamps(log, s);
for (i = 0; i < s->nb_tseq; i++) {
r = expand_tseq(log, s, &nb_events_max, 0, &s->tseq[i]);
if (r < 0)
return r;
}
if (!s->nb_events) {
av_log(log, AV_LOG_ERROR, "No events in script\n");
return AVERROR_INVALIDDATA;
}
if (s->opt_end_at_last)
s->end_ts = s->events[s->nb_events - 1].ts;
return 0;
}
 
static int add_interval(struct ws_intervals *inter,
enum ws_interval_type type, uint32_t channels, int ref,
int64_t ts1, int32_t f1, int32_t a1,
int64_t ts2, int32_t f2, int32_t a2)
{
struct ws_interval *i, *ri;
 
if (ref >= 0) {
ri = &inter->inter[ref];
/* ref and new intervals are constant, identical and adjacent */
if (ri->type == type && ri->channels == channels &&
ri->f1 == ri->f2 && ri->f2 == f1 && f1 == f2 &&
ri->a1 == ri->a2 && ri->a2 == a1 && a1 == a2 &&
ri->ts2 == ts1) {
ri->ts2 = ts2;
return ref;
}
}
i = alloc_array_elem((void **)&inter->inter, sizeof(*i),
&inter->nb_inter, &inter->max_inter);
if (!i)
return AVERROR(ENOMEM);
i->ts1 = ts1;
i->ts2 = ts2;
i->type = type;
i->channels = channels;
i->f1 = f1;
i->f2 = f2;
i->a1 = a1;
i->a2 = a2;
i->phi = ref >= 0 ? ref | 0x80000000 : 0;
return i - inter->inter;
}
 
static int add_bell(struct ws_intervals *inter, struct sbg_script *s,
int64_t ts1, int64_t ts2, int32_t f, int32_t a)
{
/* SBaGen uses an exponential decrease every 50ms.
We approximate it with piecewise affine segments. */
int32_t cpoints[][2] = {
{ 2, a },
{ 4, a - a / 4 },
{ 8, a / 2 },
{ 16, a / 4 },
{ 25, a / 10 },
{ 50, a / 80 },
{ 75, 0 },
};
int i, r;
int64_t dt = s->sample_rate / 20, ts3 = ts1, ts4;
for (i = 0; i < FF_ARRAY_ELEMS(cpoints); i++) {
ts4 = FFMIN(ts2, ts1 + cpoints[i][0] * dt);
r = add_interval(inter, WS_SINE, 3, -1,
ts3, f, a, ts4, f, cpoints[i][1]);
if (r < 0)
return r;
ts3 = ts4;
a = cpoints[i][1];
}
return 0;
}
 
static int generate_interval(void *log, struct sbg_script *s,
struct ws_intervals *inter,
int64_t ts1, int64_t ts2,
struct sbg_script_synth *s1,
struct sbg_script_synth *s2,
int transition)
{
int r;
 
if (ts2 <= ts1 || (s1->vol == 0 && s2->vol == 0))
return 0;
switch (s1->type) {
case SBG_TYPE_NONE:
break;
case SBG_TYPE_SINE:
if (s1->beat == 0 && s2->beat == 0) {
r = add_interval(inter, WS_SINE, 3, s1->ref.l,
ts1, s1->carrier, s1->vol,
ts2, s2->carrier, s2->vol);
if (r < 0)
return r;
s2->ref.l = s2->ref.r = r;
} else {
r = add_interval(inter, WS_SINE, 1, s1->ref.l,
ts1, s1->carrier + s1->beat / 2, s1->vol,
ts2, s2->carrier + s2->beat / 2, s2->vol);
if (r < 0)
return r;
s2->ref.l = r;
r = add_interval(inter, WS_SINE, 2, s1->ref.r,
ts1, s1->carrier - s1->beat / 2, s1->vol,
ts2, s2->carrier - s2->beat / 2, s2->vol);
if (r < 0)
return r;
s2->ref.r = r;
}
break;
 
case SBG_TYPE_BELL:
if (transition == 2) {
r = add_bell(inter, s, ts1, ts2, s1->carrier, s2->vol);
if (r < 0)
return r;
}
break;
 
case SBG_TYPE_SPIN:
av_log(log, AV_LOG_WARNING, "Spinning noise not implemented, "
"using pink noise instead.\n");
/* fall through */
case SBG_TYPE_NOISE:
/* SBaGen's pink noise generator uses:
- 1 band of white noise, mean square: 1/3;
- 9 bands of subsampled white noise with linear
interpolation, mean square: 2/3 each;
with 1/10 weight each: the total mean square is 7/300.
Our pink noise generator uses 8 bands of white noise with
rectangular subsampling: the total mean square is 1/24.
Therefore, to match SBaGen's volume, we must multiply vol by
sqrt((7/300) / (1/24)) = sqrt(14/25) =~ 0.748
*/
r = add_interval(inter, WS_NOISE, 3, s1->ref.l,
ts1, 0, s1->vol - s1->vol / 4,
ts2, 0, s2->vol - s2->vol / 4);
if (r < 0)
return r;
s2->ref.l = s2->ref.r = r;
break;
 
case SBG_TYPE_MIX:
/* Unimplemented: silence; warning present elsewhere */
default:
av_log(log, AV_LOG_ERROR,
"Type %d is not implemented\n", s1->type);
return AVERROR_PATCHWELCOME;
}
return 0;
}
 
static int generate_plateau(void *log, struct sbg_script *s,
struct ws_intervals *inter,
struct sbg_script_event *ev1)
{
int64_t ts1 = ev1->ts_int, ts2 = ev1->ts_trans;
int i, r;
struct sbg_script_synth *s1;
 
for (i = 0; i < ev1->nb_elements; i++) {
s1 = &s->synth[ev1->elements + i];
r = generate_interval(log, s, inter, ts1, ts2, s1, s1, 0);
if (r < 0)
return r;
}
return 0;
}
 
/*
 
ts1 ts2 ts1 tsmid ts2
| | | | |
v v v | v
____ ____ v ____
''''.... ''.. ..''
''''....____ ''....''
 
compatible transition incompatible transition
*/
 
static int generate_transition(void *log, struct sbg_script *s,
struct ws_intervals *inter,
struct sbg_script_event *ev1,
struct sbg_script_event *ev2)
{
int64_t ts1 = ev1->ts_trans, ts2 = ev1->ts_next;
/* (ts1 + ts2) / 2 without overflow */
int64_t tsmid = (ts1 >> 1) + (ts2 >> 1) + (ts1 & ts2 & 1);
enum sbg_fade_type type = ev1->fade.slide | (ev1->fade.out & ev2->fade.in);
int nb_elements = FFMAX(ev1->nb_elements, ev2->nb_elements);
struct sbg_script_synth *s1, *s2, s1mod, s2mod, smid;
int pass, i, r;
 
for (pass = 0; pass < 2; pass++) {
/* pass = 0 -> compatible and first half of incompatible
pass = 1 -> second half of incompatible
Using two passes like that ensures that the intervals are generated
in increasing order according to their start timestamp.
Otherwise it would be necessary to sort them
while keeping the mutual references.
*/
for (i = 0; i < nb_elements; i++) {
s1 = i < ev1->nb_elements ? &s->synth[ev1->elements + i] : &s1mod;
s2 = i < ev2->nb_elements ? &s->synth[ev2->elements + i] : &s2mod;
s1mod = s1 != &s1mod ? *s1 : (struct sbg_script_synth){ 0 };
s2mod = s2 != &s2mod ? *s2 : (struct sbg_script_synth){ 0 };
if (ev1->fade.slide) {
/* for slides, and only for slides, silence ("-") is equivalent
to anything with volume 0 */
if (s1mod.type == SBG_TYPE_NONE) {
s1mod = s2mod;
s1mod.vol = 0;
} else if (s2mod.type == SBG_TYPE_NONE) {
s2mod = s1mod;
s2mod.vol = 0;
}
}
if (s1mod.type == s2mod.type &&
s1mod.type != SBG_TYPE_BELL &&
(type == SBG_FADE_ADAPT ||
(s1mod.carrier == s2mod.carrier &&
s1mod.beat == s2mod.beat))) {
/* compatible: single transition */
if (!pass) {
r = generate_interval(log, s, inter,
ts1, ts2, &s1mod, &s2mod, 3);
if (r < 0)
return r;
s2->ref = s2mod.ref;
}
} else {
/* incompatible: silence at midpoint */
if (!pass) {
smid = s1mod;
smid.vol = 0;
r = generate_interval(log, s, inter,
ts1, tsmid, &s1mod, &smid, 1);
if (r < 0)
return r;
} else {
smid = s2mod;
smid.vol = 0;
r = generate_interval(log, s, inter,
tsmid, ts2, &smid, &s2mod, 2);
if (r < 0)
return r;
s2->ref = s2mod.ref;
}
}
}
}
return 0;
}
 
/*
ev1 trats ev2 intts endts ev3
| | | | | |
v v v v v v
________________
.... .... ....
'''....________________....''' '''...._______________
 
\_________/\______________/\_________/\______________/\_________/\_____________/
tr x->1 int1 tr 1->2 int2 tr 2->3 int3
*/
 
static int generate_intervals(void *log, struct sbg_script *s, int sample_rate,
struct ws_intervals *inter)
{
int64_t trans_time = s->opt_fade_time / 2;
struct sbg_script_event ev0, *ev1, *ev2;
int64_t period;
int i, r;
 
/* SBaGen handles the time before and after the extremal events,
and the corresponding transitions, as if the sequence were cyclic
with a 24-hours period. */
period = s->events[s->nb_events - 1].ts - s->events[0].ts;
period = (period + (DAY_TS - 1)) / DAY_TS * DAY_TS;
period = FFMAX(period, DAY_TS);
 
/* Prepare timestamps for transitions */
for (i = 0; i < s->nb_events; i++) {
ev1 = &s->events[i];
ev2 = &s->events[(i + 1) % s->nb_events];
ev1->ts_int = ev1->ts;
ev1->ts_trans = ev1->fade.slide ? ev1->ts
: ev2->ts + (ev1 < ev2 ? 0 : period);
}
for (i = 0; i < s->nb_events; i++) {
ev1 = &s->events[i];
ev2 = &s->events[(i + 1) % s->nb_events];
if (!ev1->fade.slide) {
ev1->ts_trans = FFMAX(ev1->ts_int, ev1->ts_trans - trans_time);
ev2->ts_int = FFMIN(ev2->ts_trans, ev2->ts_int + trans_time);
}
ev1->ts_next = ev2->ts_int + (ev1 < ev2 ? 0 : period);
}
 
/* Pseudo event before the first one */
ev0 = s->events[s->nb_events - 1];
ev0.ts_int -= period;
ev0.ts_trans -= period;
ev0.ts_next -= period;
 
/* Convert timestamps */
for (i = -1; i < s->nb_events; i++) {
ev1 = i < 0 ? &ev0 : &s->events[i];
ev1->ts_int = av_rescale(ev1->ts_int, sample_rate, AV_TIME_BASE);
ev1->ts_trans = av_rescale(ev1->ts_trans, sample_rate, AV_TIME_BASE);
ev1->ts_next = av_rescale(ev1->ts_next, sample_rate, AV_TIME_BASE);
}
 
/* Generate intervals */
for (i = 0; i < s->nb_synth; i++)
s->synth[i].ref.l = s->synth[i].ref.r = -1;
for (i = -1; i < s->nb_events; i++) {
ev1 = i < 0 ? &ev0 : &s->events[i];
ev2 = &s->events[(i + 1) % s->nb_events];
r = generate_plateau(log, s, inter, ev1);
if (r < 0)
return r;
r = generate_transition(log, s, inter, ev1, ev2);
if (r < 0)
return r;
}
if (!inter->nb_inter)
av_log(log, AV_LOG_WARNING, "Completely silent script.\n");
return 0;
}
 
static int encode_intervals(struct sbg_script *s, AVCodecContext *avc,
struct ws_intervals *inter)
{
int i, edata_size = 4;
uint8_t *edata;
 
for (i = 0; i < inter->nb_inter; i++) {
edata_size += inter->inter[i].type == WS_SINE ? 44 :
inter->inter[i].type == WS_NOISE ? 32 : 0;
if (edata_size < 0)
return AVERROR(ENOMEM);
}
if (ff_alloc_extradata(avc, edata_size))
return AVERROR(ENOMEM);
edata = avc->extradata;
 
#define ADD_EDATA32(v) do { AV_WL32(edata, (v)); edata += 4; } while(0)
#define ADD_EDATA64(v) do { AV_WL64(edata, (v)); edata += 8; } while(0)
ADD_EDATA32(inter->nb_inter);
for (i = 0; i < inter->nb_inter; i++) {
ADD_EDATA64(inter->inter[i].ts1);
ADD_EDATA64(inter->inter[i].ts2);
ADD_EDATA32(inter->inter[i].type);
ADD_EDATA32(inter->inter[i].channels);
switch (inter->inter[i].type) {
case WS_SINE:
ADD_EDATA32(inter->inter[i].f1);
ADD_EDATA32(inter->inter[i].f2);
ADD_EDATA32(inter->inter[i].a1);
ADD_EDATA32(inter->inter[i].a2);
ADD_EDATA32(inter->inter[i].phi);
break;
case WS_NOISE:
ADD_EDATA32(inter->inter[i].a1);
ADD_EDATA32(inter->inter[i].a2);
break;
}
}
if (edata != avc->extradata + edata_size)
return AVERROR_BUG;
return 0;
}
 
static av_cold int sbg_read_probe(AVProbeData *p)
{
int r, score;
struct sbg_script script = { 0 };
 
r = parse_script(NULL, p->buf, p->buf_size, &script);
score = r < 0 || !script.nb_def || !script.nb_tseq ? 0 :
AVPROBE_SCORE_MAX / 3;
free_script(&script);
return score;
}
 
static av_cold int sbg_read_header(AVFormatContext *avf)
{
struct sbg_demuxer *sbg = avf->priv_data;
int r;
char *buf = NULL;
struct sbg_script script = { 0 };
AVStream *st;
struct ws_intervals inter = { 0 };
 
r = read_whole_file(avf->pb, sbg->max_file_size, &buf);
if (r < 0)
goto fail;
r = parse_script(avf, buf, r, &script);
if (r < 0)
goto fail;
if (!sbg->sample_rate)
sbg->sample_rate = script.sample_rate;
else
script.sample_rate = sbg->sample_rate;
if (!sbg->frame_size)
sbg->frame_size = FFMAX(1, sbg->sample_rate / 10);
if (script.opt_mix)
av_log(avf, AV_LOG_WARNING, "Mix feature not implemented: "
"-m is ignored and mix channels will be silent.\n");
r = expand_script(avf, &script);
if (r < 0)
goto fail;
av_freep(&buf);
r = generate_intervals(avf, &script, sbg->sample_rate, &inter);
if (r < 0)
goto fail;
 
st = avformat_new_stream(avf, NULL);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_FFWAVESYNTH;
st->codec->channels = 2;
st->codec->channel_layout = AV_CH_LAYOUT_STEREO;
st->codec->sample_rate = sbg->sample_rate;
st->codec->frame_size = sbg->frame_size;
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
st->probe_packets = 0;
st->start_time = av_rescale(script.start_ts,
sbg->sample_rate, AV_TIME_BASE);
st->duration = script.end_ts == AV_NOPTS_VALUE ? AV_NOPTS_VALUE :
av_rescale(script.end_ts - script.start_ts,
sbg->sample_rate, AV_TIME_BASE);
st->cur_dts = st->start_time;
r = encode_intervals(&script, st->codec, &inter);
if (r < 0)
goto fail;
 
av_free(inter.inter);
free_script(&script);
return 0;
 
fail:
av_free(inter.inter);
free_script(&script);
av_free(buf);
return r;
}
 
static int sbg_read_packet(AVFormatContext *avf, AVPacket *packet)
{
int64_t ts, end_ts;
 
ts = avf->streams[0]->cur_dts;
end_ts = ts + avf->streams[0]->codec->frame_size;
if (avf->streams[0]->duration != AV_NOPTS_VALUE)
end_ts = FFMIN(avf->streams[0]->start_time + avf->streams[0]->duration,
end_ts);
if (end_ts <= ts)
return AVERROR_EOF;
if (av_new_packet(packet, 12) < 0)
return AVERROR(ENOMEM);
packet->dts = packet->pts = ts;
packet->duration = end_ts - ts;
AV_WL64(packet->data + 0, ts);
AV_WL32(packet->data + 8, packet->duration);
return packet->size;
}
 
static int sbg_read_seek2(AVFormatContext *avf, int stream_index,
int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
{
if (flags || stream_index > 0)
return AVERROR(EINVAL);
if (stream_index < 0)
ts = av_rescale_q(ts, AV_TIME_BASE_Q, avf->streams[0]->time_base);
avf->streams[0]->cur_dts = ts;
return 0;
}
 
static int sbg_read_seek(AVFormatContext *avf, int stream_index,
int64_t ts, int flags)
{
return sbg_read_seek2(avf, stream_index, ts, ts, ts, 0);
}
 
static const AVOption sbg_options[] = {
{ "sample_rate", "", offsetof(struct sbg_demuxer, sample_rate),
AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX,
AV_OPT_FLAG_DECODING_PARAM },
{ "frame_size", "", offsetof(struct sbg_demuxer, frame_size),
AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX,
AV_OPT_FLAG_DECODING_PARAM },
{ "max_file_size", "", offsetof(struct sbg_demuxer, max_file_size),
AV_OPT_TYPE_INT, { .i64 = 5000000 }, 0, INT_MAX,
AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
 
static const AVClass sbg_demuxer_class = {
.class_name = "sbg_demuxer",
.item_name = av_default_item_name,
.option = sbg_options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVInputFormat ff_sbg_demuxer = {
.name = "sbg",
.long_name = NULL_IF_CONFIG_SMALL("SBaGen binaural beats script"),
.priv_data_size = sizeof(struct sbg_demuxer),
.read_probe = sbg_read_probe,
.read_header = sbg_read_header,
.read_packet = sbg_read_packet,
.read_seek = sbg_read_seek,
.read_seek2 = sbg_read_seek2,
.extensions = "sbg",
.priv_class = &sbg_demuxer_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/sctp.c
0,0 → 1,332
/*
* SCTP protocol
* Copyright (c) 2012 Luca Barbato
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
*
* sctp url_protocol
*
* url syntax: sctp://host:port[?option=val...]
* option: 'listen' : listen for an incoming connection
* 'max_streams=n' : set the maximum number of streams
* 'reuse=1' : enable reusing the socket [TBD]
*
* by setting the maximum number of streams the protocol will use the
* first two bytes of the incoming/outgoing buffer to store the
* stream number of the packet being read/written.
* @see sctp_read
* @see sctp_write
*/
 
 
#include <netinet/in.h>
#include <netinet/sctp.h>
 
#include "config.h"
 
#if HAVE_POLL_H
#include <poll.h>
#endif
 
#include "libavutil/intreadwrite.h"
#include "libavutil/parseutils.h"
#include "avformat.h"
#include "internal.h"
#include "network.h"
#include "os_support.h"
#include "url.h"
 
/*
* The sctp_recvmsg and sctp_sendmsg functions are part of the user
* library that offers support for the SCTP kernel Implementation.
* To avoid build-time clashes the functions sport an ff_-prefix here.
* The main purpose of this code is to provide the SCTP Socket API
* mappings for user applications to interface with SCTP in the kernel.
*
* This implementation is based on the Socket API Extensions for SCTP
* defined in <draft-ietf-tsvwg-sctpsocket-10.txt>
*
* Copyright (c) 2003 International Business Machines, Corp.
*
* Written or modified by:
* Ryan Layer <rmlayer@us.ibm.com>
*/
 
static int ff_sctp_recvmsg(int s, void *msg, size_t len, struct sockaddr *from,
socklen_t *fromlen, struct sctp_sndrcvinfo *sinfo,
int *msg_flags)
{
int recvb;
struct iovec iov;
char incmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))];
struct msghdr inmsg = { 0 };
struct cmsghdr *cmsg = NULL;
 
iov.iov_base = msg;
iov.iov_len = len;
 
inmsg.msg_name = from;
inmsg.msg_namelen = fromlen ? *fromlen : 0;
inmsg.msg_iov = &iov;
inmsg.msg_iovlen = 1;
inmsg.msg_control = incmsg;
inmsg.msg_controllen = sizeof(incmsg);
 
if ((recvb = recvmsg(s, &inmsg, msg_flags ? *msg_flags : 0)) < 0)
return recvb;
 
if (fromlen)
*fromlen = inmsg.msg_namelen;
if (msg_flags)
*msg_flags = inmsg.msg_flags;
 
for (cmsg = CMSG_FIRSTHDR(&inmsg); cmsg != NULL;
cmsg = CMSG_NXTHDR(&inmsg, cmsg)) {
if ((IPPROTO_SCTP == cmsg->cmsg_level) &&
(SCTP_SNDRCV == cmsg->cmsg_type))
break;
}
 
/* Copy sinfo. */
if (cmsg)
memcpy(sinfo, CMSG_DATA(cmsg), sizeof(struct sctp_sndrcvinfo));
 
return recvb;
}
 
static int ff_sctp_send(int s, const void *msg, size_t len,
const struct sctp_sndrcvinfo *sinfo, int flags)
{
struct msghdr outmsg;
struct iovec iov;
 
outmsg.msg_name = NULL;
outmsg.msg_namelen = 0;
outmsg.msg_iov = &iov;
iov.iov_base = (void*)msg;
iov.iov_len = len;
outmsg.msg_iovlen = 1;
outmsg.msg_controllen = 0;
 
if (sinfo) {
char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))];
struct cmsghdr *cmsg;
 
outmsg.msg_control = outcmsg;
outmsg.msg_controllen = sizeof(outcmsg);
outmsg.msg_flags = 0;
 
cmsg = CMSG_FIRSTHDR(&outmsg);
cmsg->cmsg_level = IPPROTO_SCTP;
cmsg->cmsg_type = SCTP_SNDRCV;
cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
 
outmsg.msg_controllen = cmsg->cmsg_len;
memcpy(CMSG_DATA(cmsg), sinfo, sizeof(struct sctp_sndrcvinfo));
}
 
return sendmsg(s, &outmsg, flags);
}
 
typedef struct SCTPContext {
int fd;
int max_streams;
struct sockaddr_storage dest_addr;
socklen_t dest_addr_len;
} SCTPContext;
 
static int sctp_open(URLContext *h, const char *uri, int flags)
{
struct addrinfo *ai, *cur_ai;
struct addrinfo hints = { 0 };
struct sctp_event_subscribe event = { 0 };
struct sctp_initmsg initparams = { 0 };
int port;
int fd = -1;
SCTPContext *s = h->priv_data;
const char *p;
char buf[256];
int ret, listen_socket = 0;
char hostname[1024], proto[1024], path[1024];
char portstr[10];
 
av_url_split(proto, sizeof(proto), NULL, 0, hostname, sizeof(hostname),
&port, path, sizeof(path), uri);
if (strcmp(proto, "sctp"))
return AVERROR(EINVAL);
if (port <= 0 || port >= 65536) {
av_log(s, AV_LOG_ERROR, "Port missing in uri\n");
return AVERROR(EINVAL);
}
 
s->max_streams = 0;
p = strchr(uri, '?');
if (p) {
if (av_find_info_tag(buf, sizeof(buf), "listen", p))
listen_socket = 1;
if (av_find_info_tag(buf, sizeof(buf), "max_streams", p))
s->max_streams = strtol(buf, NULL, 10);
}
 
hints.ai_family = AF_UNSPEC;
hints.ai_socktype = SOCK_STREAM;
snprintf(portstr, sizeof(portstr), "%d", port);
ret = getaddrinfo(hostname, portstr, &hints, &ai);
if (ret) {
av_log(h, AV_LOG_ERROR, "Failed to resolve hostname %s: %s\n",
hostname, gai_strerror(ret));
return AVERROR(EIO);
}
 
cur_ai = ai;
 
fd = ff_socket(cur_ai->ai_family, SOCK_STREAM, IPPROTO_SCTP);
if (fd < 0)
goto fail;
 
s->dest_addr_len = sizeof(s->dest_addr);
 
if (listen_socket) {
int fd1;
ret = bind(fd, cur_ai->ai_addr, cur_ai->ai_addrlen);
listen(fd, 100);
fd1 = accept(fd, NULL, NULL);
closesocket(fd);
fd = fd1;
} else
ret = connect(fd, cur_ai->ai_addr, cur_ai->ai_addrlen);
 
ff_socket_nonblock(fd, 1);
 
event.sctp_data_io_event = 1;
/* TODO: Subscribe to more event types and handle them */
 
if (setsockopt(fd, IPPROTO_SCTP, SCTP_EVENTS, &event,
sizeof(event)) != 0) {
av_log(h, AV_LOG_ERROR,
"SCTP ERROR: Unable to subscribe to events\n");
goto fail;
}
 
if (s->max_streams) {
initparams.sinit_max_instreams = s->max_streams;
initparams.sinit_num_ostreams = s->max_streams;
if (setsockopt(fd, IPPROTO_SCTP, SCTP_INITMSG, &initparams,
sizeof(initparams)) < 0)
av_log(h, AV_LOG_ERROR,
"SCTP ERROR: Unable to initialize socket max streams %d\n",
s->max_streams);
}
 
h->priv_data = s;
h->is_streamed = 1;
s->fd = fd;
freeaddrinfo(ai);
return 0;
 
fail:
ret = AVERROR(EIO);
freeaddrinfo(ai);
return ret;
}
 
static int sctp_wait_fd(int fd, int write)
{
int ev = write ? POLLOUT : POLLIN;
struct pollfd p = { .fd = fd, .events = ev, .revents = 0 };
int ret;
 
ret = poll(&p, 1, 100);
return ret < 0 ? ff_neterrno() : p.revents & ev ? 0 : AVERROR(EAGAIN);
}
 
static int sctp_read(URLContext *h, uint8_t *buf, int size)
{
SCTPContext *s = h->priv_data;
int ret;
 
if (!(h->flags & AVIO_FLAG_NONBLOCK)) {
ret = sctp_wait_fd(s->fd, 0);
if (ret < 0)
return ret;
}
 
if (s->max_streams) {
/*StreamId is introduced as a 2byte code into the stream*/
struct sctp_sndrcvinfo info = { 0 };
ret = ff_sctp_recvmsg(s->fd, buf + 2, size - 2, NULL, 0, &info, 0);
AV_WB16(buf, info.sinfo_stream);
ret = ret < 0 ? ret : ret + 2;
} else
ret = recv(s->fd, buf, size, 0);
 
return ret < 0 ? ff_neterrno() : ret;
}
 
static int sctp_write(URLContext *h, const uint8_t *buf, int size)
{
SCTPContext *s = h->priv_data;
int ret;
 
if (!(h->flags & AVIO_FLAG_NONBLOCK)) {
ret = sctp_wait_fd(s->fd, 1);
if (ret < 0)
return ret;
}
 
if (s->max_streams) {
/*StreamId is introduced as a 2byte code into the stream*/
struct sctp_sndrcvinfo info = { 0 };
info.sinfo_stream = AV_RB16(buf);
if (info.sinfo_stream > s->max_streams) {
av_log(h, AV_LOG_ERROR, "bad input data\n");
return AVERROR(EINVAL);
}
ret = ff_sctp_send(s->fd, buf + 2, size - 2, &info, MSG_EOR);
} else
ret = send(s->fd, buf, size, 0);
 
return ret < 0 ? ff_neterrno() : ret;
}
 
static int sctp_close(URLContext *h)
{
SCTPContext *s = h->priv_data;
closesocket(s->fd);
return 0;
}
 
static int sctp_get_file_handle(URLContext *h)
{
SCTPContext *s = h->priv_data;
return s->fd;
}
 
URLProtocol ff_sctp_protocol = {
.name = "sctp",
.url_open = sctp_open,
.url_read = sctp_read,
.url_write = sctp_write,
.url_close = sctp_close,
.url_get_file_handle = sctp_get_file_handle,
.priv_data_size = sizeof(SCTPContext),
.flags = URL_PROTOCOL_FLAG_NETWORK,
};
/contrib/sdk/sources/ffmpeg/libavformat/sdp.c
0,0 → 1,713
/*
* copyright (c) 2007 Luca Abeni
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <string.h>
#include "libavutil/avstring.h"
#include "libavutil/base64.h"
#include "libavutil/dict.h"
#include "libavutil/parseutils.h"
#include "libavutil/opt.h"
#include "libavcodec/xiph.h"
#include "libavcodec/mpeg4audio.h"
#include "avformat.h"
#include "internal.h"
#include "avc.h"
#include "rtp.h"
#if CONFIG_NETWORK
#include "network.h"
#endif
 
#if CONFIG_RTP_MUXER
#define MAX_EXTRADATA_SIZE ((INT_MAX - 10) / 2)
 
struct sdp_session_level {
int sdp_version; /**< protocol version (currently 0) */
int id; /**< session ID */
int version; /**< session version */
int start_time; /**< session start time (NTP time, in seconds),
or 0 in case of permanent session */
int end_time; /**< session end time (NTP time, in seconds),
or 0 if the session is not bounded */
int ttl; /**< TTL, in case of multicast stream */
const char *user; /**< username of the session's creator */
const char *src_addr; /**< IP address of the machine from which the session was created */
const char *src_type; /**< address type of src_addr */
const char *dst_addr; /**< destination IP address (can be multicast) */
const char *dst_type; /**< destination IP address type */
const char *name; /**< session name (can be an empty string) */
};
 
static void sdp_write_address(char *buff, int size, const char *dest_addr,
const char *dest_type, int ttl)
{
if (dest_addr) {
if (!dest_type)
dest_type = "IP4";
if (ttl > 0 && !strcmp(dest_type, "IP4")) {
/* The TTL should only be specified for IPv4 multicast addresses,
* not for IPv6. */
av_strlcatf(buff, size, "c=IN %s %s/%d\r\n", dest_type, dest_addr, ttl);
} else {
av_strlcatf(buff, size, "c=IN %s %s\r\n", dest_type, dest_addr);
}
}
}
 
static void sdp_write_header(char *buff, int size, struct sdp_session_level *s)
{
av_strlcatf(buff, size, "v=%d\r\n"
"o=- %d %d IN %s %s\r\n"
"s=%s\r\n",
s->sdp_version,
s->id, s->version, s->src_type, s->src_addr,
s->name);
sdp_write_address(buff, size, s->dst_addr, s->dst_type, s->ttl);
av_strlcatf(buff, size, "t=%d %d\r\n"
"a=tool:libavformat " AV_STRINGIFY(LIBAVFORMAT_VERSION) "\r\n",
s->start_time, s->end_time);
}
 
#if CONFIG_NETWORK
static int resolve_destination(char *dest_addr, int size, char *type,
int type_size)
{
struct addrinfo hints = { 0 }, *ai;
int is_multicast;
 
av_strlcpy(type, "IP4", type_size);
if (!dest_addr[0])
return 0;
 
/* Resolve the destination, since it must be written
* as a numeric IP address in the SDP. */
 
if (getaddrinfo(dest_addr, NULL, &hints, &ai))
return 0;
getnameinfo(ai->ai_addr, ai->ai_addrlen, dest_addr, size,
NULL, 0, NI_NUMERICHOST);
#ifdef AF_INET6
if (ai->ai_family == AF_INET6)
av_strlcpy(type, "IP6", type_size);
#endif
is_multicast = ff_is_multicast_address(ai->ai_addr);
freeaddrinfo(ai);
return is_multicast;
}
#else
static int resolve_destination(char *dest_addr, int size, char *type,
int type_size)
{
return 0;
}
#endif
 
static int sdp_get_address(char *dest_addr, int size, int *ttl, const char *url)
{
int port;
const char *p;
char proto[32];
 
av_url_split(proto, sizeof(proto), NULL, 0, dest_addr, size, &port, NULL, 0, url);
 
*ttl = 0;
 
if (strcmp(proto, "rtp") && strcmp(proto, "srtp")) {
/* The url isn't for the actual rtp sessions,
* don't parse out anything else than the destination.
*/
return 0;
}
 
p = strchr(url, '?');
if (p) {
char buff[64];
 
if (av_find_info_tag(buff, sizeof(buff), "ttl", p)) {
*ttl = strtol(buff, NULL, 10);
} else {
*ttl = 5;
}
}
 
return port;
}
 
#define MAX_PSET_SIZE 1024
static char *extradata2psets(AVCodecContext *c)
{
char *psets, *p;
const uint8_t *r;
static const char pset_string[] = "; sprop-parameter-sets=";
static const char profile_string[] = "; profile-level-id=";
uint8_t *orig_extradata = NULL;
int orig_extradata_size = 0;
const uint8_t *sps = NULL, *sps_end;
 
if (c->extradata_size > MAX_EXTRADATA_SIZE) {
av_log(c, AV_LOG_ERROR, "Too much extradata!\n");
 
return NULL;
}
if (c->extradata[0] == 1) {
uint8_t *dummy_p;
int dummy_int;
AVBitStreamFilterContext *bsfc= av_bitstream_filter_init("h264_mp4toannexb");
 
if (!bsfc) {
av_log(c, AV_LOG_ERROR, "Cannot open the h264_mp4toannexb BSF!\n");
 
return NULL;
}
 
orig_extradata_size = c->extradata_size;
orig_extradata = av_mallocz(orig_extradata_size +
FF_INPUT_BUFFER_PADDING_SIZE);
if (!orig_extradata) {
av_bitstream_filter_close(bsfc);
return NULL;
}
memcpy(orig_extradata, c->extradata, orig_extradata_size);
av_bitstream_filter_filter(bsfc, c, NULL, &dummy_p, &dummy_int, NULL, 0, 0);
av_bitstream_filter_close(bsfc);
}
 
psets = av_mallocz(MAX_PSET_SIZE);
if (psets == NULL) {
av_log(c, AV_LOG_ERROR, "Cannot allocate memory for the parameter sets.\n");
av_free(orig_extradata);
return NULL;
}
memcpy(psets, pset_string, strlen(pset_string));
p = psets + strlen(pset_string);
r = ff_avc_find_startcode(c->extradata, c->extradata + c->extradata_size);
while (r < c->extradata + c->extradata_size) {
const uint8_t *r1;
uint8_t nal_type;
 
while (!*(r++));
nal_type = *r & 0x1f;
r1 = ff_avc_find_startcode(r, c->extradata + c->extradata_size);
if (nal_type != 7 && nal_type != 8) { /* Only output SPS and PPS */
r = r1;
continue;
}
if (p != (psets + strlen(pset_string))) {
*p = ',';
p++;
}
if (!sps) {
sps = r;
sps_end = r1;
}
if (av_base64_encode(p, MAX_PSET_SIZE - (p - psets), r, r1 - r) == NULL) {
av_log(c, AV_LOG_ERROR, "Cannot Base64-encode %td %td!\n", MAX_PSET_SIZE - (p - psets), r1 - r);
av_free(psets);
 
return NULL;
}
p += strlen(p);
r = r1;
}
if (sps && sps_end - sps >= 4) {
memcpy(p, profile_string, strlen(profile_string));
p += strlen(p);
ff_data_to_hex(p, sps + 1, 3, 0);
p[6] = '\0';
}
if (orig_extradata) {
av_free(c->extradata);
c->extradata = orig_extradata;
c->extradata_size = orig_extradata_size;
}
 
return psets;
}
 
static char *extradata2config(AVCodecContext *c)
{
char *config;
 
if (c->extradata_size > MAX_EXTRADATA_SIZE) {
av_log(c, AV_LOG_ERROR, "Too much extradata!\n");
 
return NULL;
}
config = av_malloc(10 + c->extradata_size * 2);
if (config == NULL) {
av_log(c, AV_LOG_ERROR, "Cannot allocate memory for the config info.\n");
return NULL;
}
memcpy(config, "; config=", 9);
ff_data_to_hex(config + 9, c->extradata, c->extradata_size, 0);
config[9 + c->extradata_size * 2] = 0;
 
return config;
}
 
static char *xiph_extradata2config(AVCodecContext *c)
{
char *config, *encoded_config;
uint8_t *header_start[3];
int headers_len, header_len[3], config_len;
int first_header_size;
 
switch (c->codec_id) {
case AV_CODEC_ID_THEORA:
first_header_size = 42;
break;
case AV_CODEC_ID_VORBIS:
first_header_size = 30;
break;
default:
av_log(c, AV_LOG_ERROR, "Unsupported Xiph codec ID\n");
return NULL;
}
 
if (avpriv_split_xiph_headers(c->extradata, c->extradata_size,
first_header_size, header_start,
header_len) < 0) {
av_log(c, AV_LOG_ERROR, "Extradata corrupt.\n");
return NULL;
}
 
headers_len = header_len[0] + header_len[2];
config_len = 4 + // count
3 + // ident
2 + // packet size
1 + // header count
2 + // header size
headers_len; // and the rest
 
config = av_malloc(config_len);
if (!config)
goto xiph_fail;
 
encoded_config = av_malloc(AV_BASE64_SIZE(config_len));
if (!encoded_config) {
av_free(config);
goto xiph_fail;
}
 
config[0] = config[1] = config[2] = 0;
config[3] = 1;
config[4] = (RTP_XIPH_IDENT >> 16) & 0xff;
config[5] = (RTP_XIPH_IDENT >> 8) & 0xff;
config[6] = (RTP_XIPH_IDENT ) & 0xff;
config[7] = (headers_len >> 8) & 0xff;
config[8] = headers_len & 0xff;
config[9] = 2;
config[10] = header_len[0];
config[11] = 0; // size of comment header; nonexistent
memcpy(config + 12, header_start[0], header_len[0]);
memcpy(config + 12 + header_len[0], header_start[2], header_len[2]);
 
av_base64_encode(encoded_config, AV_BASE64_SIZE(config_len),
config, config_len);
av_free(config);
 
return encoded_config;
 
xiph_fail:
av_log(c, AV_LOG_ERROR,
"Not enough memory for configuration string\n");
return NULL;
}
 
static int latm_context2profilelevel(AVCodecContext *c)
{
/* MP4A-LATM
* The RTP payload format specification is described in RFC 3016
* The encoding specifications are provided in ISO/IEC 14496-3 */
 
int profile_level = 0x2B;
 
/* TODO: AAC Profile only supports AAC LC Object Type.
* Different Object Types should implement different Profile Levels */
 
if (c->sample_rate <= 24000) {
if (c->channels <= 2)
profile_level = 0x28; // AAC Profile, Level 1
} else if (c->sample_rate <= 48000) {
if (c->channels <= 2) {
profile_level = 0x29; // AAC Profile, Level 2
} else if (c->channels <= 5) {
profile_level = 0x2A; // AAC Profile, Level 4
}
} else if (c->sample_rate <= 96000) {
if (c->channels <= 5) {
profile_level = 0x2B; // AAC Profile, Level 5
}
}
 
return profile_level;
}
 
static char *latm_context2config(AVCodecContext *c)
{
/* MP4A-LATM
* The RTP payload format specification is described in RFC 3016
* The encoding specifications are provided in ISO/IEC 14496-3 */
 
uint8_t config_byte[6];
int rate_index;
char *config;
 
for (rate_index = 0; rate_index < 16; rate_index++)
if (avpriv_mpeg4audio_sample_rates[rate_index] == c->sample_rate)
break;
if (rate_index == 16) {
av_log(c, AV_LOG_ERROR, "Unsupported sample rate\n");
return NULL;
}
 
config_byte[0] = 0x40;
config_byte[1] = 0;
config_byte[2] = 0x20 | rate_index;
config_byte[3] = c->channels << 4;
config_byte[4] = 0x3f;
config_byte[5] = 0xc0;
 
config = av_malloc(6*2+1);
if (!config) {
av_log(c, AV_LOG_ERROR, "Cannot allocate memory for the config info.\n");
return NULL;
}
ff_data_to_hex(config, config_byte, 6, 1);
config[12] = 0;
 
return config;
}
 
static char *sdp_write_media_attributes(char *buff, int size, AVCodecContext *c, int payload_type, AVFormatContext *fmt)
{
char *config = NULL;
 
switch (c->codec_id) {
case AV_CODEC_ID_H264: {
int mode = 1;
if (fmt && fmt->oformat->priv_class &&
av_opt_flag_is_set(fmt->priv_data, "rtpflags", "h264_mode0"))
mode = 0;
if (c->extradata_size) {
config = extradata2psets(c);
}
av_strlcatf(buff, size, "a=rtpmap:%d H264/90000\r\n"
"a=fmtp:%d packetization-mode=%d%s\r\n",
payload_type,
payload_type, mode, config ? config : "");
break;
}
case AV_CODEC_ID_H263:
case AV_CODEC_ID_H263P:
/* a=framesize is required by 3GPP TS 26.234 (PSS). It
* actually specifies the maximum video size, but we only know
* the current size. This is required for playback on Android
* stagefright and on Samsung bada. */
if (!fmt || !fmt->oformat->priv_class ||
!av_opt_flag_is_set(fmt->priv_data, "rtpflags", "rfc2190") ||
c->codec_id == AV_CODEC_ID_H263P)
av_strlcatf(buff, size, "a=rtpmap:%d H263-2000/90000\r\n"
"a=framesize:%d %d-%d\r\n",
payload_type,
payload_type, c->width, c->height);
break;
case AV_CODEC_ID_MPEG4:
if (c->extradata_size) {
config = extradata2config(c);
}
av_strlcatf(buff, size, "a=rtpmap:%d MP4V-ES/90000\r\n"
"a=fmtp:%d profile-level-id=1%s\r\n",
payload_type,
payload_type, config ? config : "");
break;
case AV_CODEC_ID_AAC:
if (fmt && fmt->oformat && fmt->oformat->priv_class &&
av_opt_flag_is_set(fmt->priv_data, "rtpflags", "latm")) {
config = latm_context2config(c);
if (!config)
return NULL;
av_strlcatf(buff, size, "a=rtpmap:%d MP4A-LATM/%d/%d\r\n"
"a=fmtp:%d profile-level-id=%d;cpresent=0;config=%s\r\n",
payload_type, c->sample_rate, c->channels,
payload_type, latm_context2profilelevel(c), config);
} else {
if (c->extradata_size) {
config = extradata2config(c);
} else {
/* FIXME: maybe we can forge config information based on the
* codec parameters...
*/
av_log(c, AV_LOG_ERROR, "AAC with no global headers is currently not supported.\n");
return NULL;
}
if (config == NULL) {
return NULL;
}
av_strlcatf(buff, size, "a=rtpmap:%d MPEG4-GENERIC/%d/%d\r\n"
"a=fmtp:%d profile-level-id=1;"
"mode=AAC-hbr;sizelength=13;indexlength=3;"
"indexdeltalength=3%s\r\n",
payload_type, c->sample_rate, c->channels,
payload_type, config);
}
break;
case AV_CODEC_ID_PCM_S16BE:
if (payload_type >= RTP_PT_PRIVATE)
av_strlcatf(buff, size, "a=rtpmap:%d L16/%d/%d\r\n",
payload_type,
c->sample_rate, c->channels);
break;
case AV_CODEC_ID_PCM_MULAW:
if (payload_type >= RTP_PT_PRIVATE)
av_strlcatf(buff, size, "a=rtpmap:%d PCMU/%d/%d\r\n",
payload_type,
c->sample_rate, c->channels);
break;
case AV_CODEC_ID_PCM_ALAW:
if (payload_type >= RTP_PT_PRIVATE)
av_strlcatf(buff, size, "a=rtpmap:%d PCMA/%d/%d\r\n",
payload_type,
c->sample_rate, c->channels);
break;
case AV_CODEC_ID_AMR_NB:
av_strlcatf(buff, size, "a=rtpmap:%d AMR/%d/%d\r\n"
"a=fmtp:%d octet-align=1\r\n",
payload_type, c->sample_rate, c->channels,
payload_type);
break;
case AV_CODEC_ID_AMR_WB:
av_strlcatf(buff, size, "a=rtpmap:%d AMR-WB/%d/%d\r\n"
"a=fmtp:%d octet-align=1\r\n",
payload_type, c->sample_rate, c->channels,
payload_type);
break;
case AV_CODEC_ID_VORBIS:
if (c->extradata_size)
config = xiph_extradata2config(c);
else
av_log(c, AV_LOG_ERROR, "Vorbis configuration info missing\n");
if (!config)
return NULL;
 
av_strlcatf(buff, size, "a=rtpmap:%d vorbis/%d/%d\r\n"
"a=fmtp:%d configuration=%s\r\n",
payload_type, c->sample_rate, c->channels,
payload_type, config);
break;
case AV_CODEC_ID_THEORA: {
const char *pix_fmt;
if (c->extradata_size)
config = xiph_extradata2config(c);
else
av_log(c, AV_LOG_ERROR, "Theora configuation info missing\n");
if (!config)
return NULL;
 
switch (c->pix_fmt) {
case AV_PIX_FMT_YUV420P:
pix_fmt = "YCbCr-4:2:0";
break;
case AV_PIX_FMT_YUV422P:
pix_fmt = "YCbCr-4:2:2";
break;
case AV_PIX_FMT_YUV444P:
pix_fmt = "YCbCr-4:4:4";
break;
default:
av_log(c, AV_LOG_ERROR, "Unsupported pixel format.\n");
return NULL;
}
 
av_strlcatf(buff, size, "a=rtpmap:%d theora/90000\r\n"
"a=fmtp:%d delivery-method=inline; "
"width=%d; height=%d; sampling=%s; "
"configuration=%s\r\n",
payload_type, payload_type,
c->width, c->height, pix_fmt, config);
break;
}
case AV_CODEC_ID_VP8:
av_strlcatf(buff, size, "a=rtpmap:%d VP8/90000\r\n",
payload_type);
break;
case AV_CODEC_ID_MJPEG:
if (payload_type >= RTP_PT_PRIVATE)
av_strlcatf(buff, size, "a=rtpmap:%d JPEG/90000\r\n",
payload_type);
break;
case AV_CODEC_ID_ADPCM_G722:
if (payload_type >= RTP_PT_PRIVATE)
av_strlcatf(buff, size, "a=rtpmap:%d G722/%d/%d\r\n",
payload_type,
8000, c->channels);
break;
case AV_CODEC_ID_ADPCM_G726: {
if (payload_type >= RTP_PT_PRIVATE)
av_strlcatf(buff, size, "a=rtpmap:%d G726-%d/%d\r\n",
payload_type,
c->bits_per_coded_sample*8,
c->sample_rate);
break;
}
case AV_CODEC_ID_ILBC:
av_strlcatf(buff, size, "a=rtpmap:%d iLBC/%d\r\n"
"a=fmtp:%d mode=%d\r\n",
payload_type, c->sample_rate,
payload_type, c->block_align == 38 ? 20 : 30);
break;
case AV_CODEC_ID_SPEEX:
av_strlcatf(buff, size, "a=rtpmap:%d speex/%d\r\n",
payload_type, c->sample_rate);
if (c->codec) {
const char *mode;
uint64_t vad_option;
 
if (c->flags & CODEC_FLAG_QSCALE)
mode = "on";
else if (!av_opt_get_int(c, "vad", AV_OPT_FLAG_ENCODING_PARAM, &vad_option) && vad_option)
mode = "vad";
else
mode = "off";
 
av_strlcatf(buff, size, "a=fmtp:%d vbr=%s\r\n",
payload_type, mode);
}
break;
case AV_CODEC_ID_OPUS:
av_strlcatf(buff, size, "a=rtpmap:%d opus/48000\r\n",
payload_type);
break;
default:
/* Nothing special to do here... */
break;
}
 
av_free(config);
 
return buff;
}
 
void ff_sdp_write_media(char *buff, int size, AVStream *st, int idx,
const char *dest_addr, const char *dest_type,
int port, int ttl, AVFormatContext *fmt)
{
AVCodecContext *c = st->codec;
const char *type;
int payload_type;
 
payload_type = ff_rtp_get_payload_type(fmt, c, idx);
 
switch (c->codec_type) {
case AVMEDIA_TYPE_VIDEO : type = "video" ; break;
case AVMEDIA_TYPE_AUDIO : type = "audio" ; break;
case AVMEDIA_TYPE_SUBTITLE: type = "text" ; break;
default : type = "application"; break;
}
 
av_strlcatf(buff, size, "m=%s %d RTP/AVP %d\r\n", type, port, payload_type);
sdp_write_address(buff, size, dest_addr, dest_type, ttl);
if (c->bit_rate) {
av_strlcatf(buff, size, "b=AS:%d\r\n", c->bit_rate / 1000);
}
 
sdp_write_media_attributes(buff, size, c, payload_type, fmt);
}
 
int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size)
{
AVDictionaryEntry *title = av_dict_get(ac[0]->metadata, "title", NULL, 0);
struct sdp_session_level s = { 0 };
int i, j, port, ttl, is_multicast, index = 0;
char dst[32], dst_type[5];
 
memset(buf, 0, size);
s.user = "-";
s.src_addr = "127.0.0.1"; /* FIXME: Properly set this */
s.src_type = "IP4";
s.name = title ? title->value : "No Name";
 
port = 0;
ttl = 0;
if (n_files == 1) {
port = sdp_get_address(dst, sizeof(dst), &ttl, ac[0]->filename);
is_multicast = resolve_destination(dst, sizeof(dst), dst_type,
sizeof(dst_type));
if (!is_multicast)
ttl = 0;
if (dst[0]) {
s.dst_addr = dst;
s.dst_type = dst_type;
s.ttl = ttl;
if (!strcmp(dst_type, "IP6")) {
s.src_addr = "::1";
s.src_type = "IP6";
}
}
}
sdp_write_header(buf, size, &s);
 
dst[0] = 0;
for (i = 0; i < n_files; i++) {
if (n_files != 1) {
port = sdp_get_address(dst, sizeof(dst), &ttl, ac[i]->filename);
is_multicast = resolve_destination(dst, sizeof(dst), dst_type,
sizeof(dst_type));
if (!is_multicast)
ttl = 0;
}
for (j = 0; j < ac[i]->nb_streams; j++) {
ff_sdp_write_media(buf, size, ac[i]->streams[j], index++,
dst[0] ? dst : NULL, dst_type,
(port > 0) ? port + j * 2 : 0,
ttl, ac[i]);
if (port <= 0) {
av_strlcatf(buf, size,
"a=control:streamid=%d\r\n", i + j);
}
if (ac[i]->pb && ac[i]->pb->av_class) {
uint8_t *crypto_suite = NULL, *crypto_params = NULL;
av_opt_get(ac[i]->pb, "srtp_out_suite", AV_OPT_SEARCH_CHILDREN,
&crypto_suite);
av_opt_get(ac[i]->pb, "srtp_out_params", AV_OPT_SEARCH_CHILDREN,
&crypto_params);
if (crypto_suite && crypto_suite[0])
av_strlcatf(buf, size,
"a=crypto:1 %s inline:%s\r\n",
crypto_suite, crypto_params);
av_free(crypto_suite);
av_free(crypto_params);
}
}
}
 
return 0;
}
#else
int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size)
{
return AVERROR(ENOSYS);
}
 
void ff_sdp_write_media(char *buff, int size, AVStream *st, int idx,
const char *dest_addr, const char *dest_type,
int port, int ttl, AVFormatContext *fmt)
{
}
#endif
/contrib/sdk/sources/ffmpeg/libavformat/seek-test.c
0,0 → 1,154
/*
* Copyright (c) 2003 Fabrice Bellard
* Copyright (c) 2007 Michael Niedermayer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
 
#include "libavutil/common.h"
#include "libavutil/mathematics.h"
#include "libavformat/avformat.h"
 
static char buffer[20];
 
static const char *ret_str(int v)
{
switch (v) {
case AVERROR_EOF: return "-EOF";
case AVERROR(EIO): return "-EIO";
case AVERROR(ENOMEM): return "-ENOMEM";
case AVERROR(EINVAL): return "-EINVAL";
default:
snprintf(buffer, sizeof(buffer), "%2d", v);
return buffer;
}
}
 
static void ts_str(char buffer[60], int64_t ts, AVRational base)
{
if (ts == AV_NOPTS_VALUE) {
strcpy(buffer, " NOPTS ");
return;
}
ts= av_rescale_q(ts, base, (AVRational){1, 1000000});
snprintf(buffer, 60, "%c%"PRId64".%06"PRId64"", ts<0 ? '-' : ' ', FFABS(ts)/1000000, FFABS(ts)%1000000);
}
 
int main(int argc, char **argv)
{
const char *filename;
AVFormatContext *ic = NULL;
int i, ret, stream_id;
int j;
int64_t timestamp;
AVDictionary *format_opts = NULL;
int64_t seekfirst = AV_NOPTS_VALUE;
int firstback=0;
int frame_count = 1;
int duration = 4;
 
for(i=2; i<argc; i+=2){
if (!strcmp(argv[i], "-seekforw")){
seekfirst = atoi(argv[i+1]);
} else if(!strcmp(argv[i], "-seekback")){
seekfirst = atoi(argv[i+1]);
firstback = 1;
} else if(!strcmp(argv[i], "-frames")){
frame_count = atoi(argv[i+1]);
} else if(!strcmp(argv[i], "-duration")){
duration = atoi(argv[i+1]);
} else {
argc = 1;
}
}
 
av_dict_set(&format_opts, "channels", "1", 0);
av_dict_set(&format_opts, "sample_rate", "22050", 0);
 
/* initialize libavcodec, and register all codecs and formats */
av_register_all();
 
if (argc < 2) {
printf("usage: %s input_file\n"
"\n", argv[0]);
return 1;
}
 
filename = argv[1];
 
ret = avformat_open_input(&ic, filename, NULL, &format_opts);
av_dict_free(&format_opts);
if (ret < 0) {
fprintf(stderr, "cannot open %s\n", filename);
return 1;
}
 
ret = avformat_find_stream_info(ic, NULL);
if (ret < 0) {
fprintf(stderr, "%s: could not find codec parameters\n", filename);
return 1;
}
 
if(seekfirst != AV_NOPTS_VALUE){
if(firstback) avformat_seek_file(ic, -1, INT64_MIN, seekfirst, seekfirst, 0);
else avformat_seek_file(ic, -1, seekfirst, seekfirst, INT64_MAX, 0);
}
for(i=0; ; i++){
AVPacket pkt = { 0 };
AVStream *av_uninit(st);
char ts_buf[60];
 
if(ret>=0){
for(j=0; j<frame_count; j++) {
ret= av_read_frame(ic, &pkt);
if(ret>=0){
char dts_buf[60];
st= ic->streams[pkt.stream_index];
ts_str(dts_buf, pkt.dts, st->time_base);
ts_str(ts_buf, pkt.pts, st->time_base);
printf("ret:%-10s st:%2d flags:%d dts:%s pts:%s pos:%7" PRId64 " size:%6d", ret_str(ret), pkt.stream_index, pkt.flags, dts_buf, ts_buf, pkt.pos, pkt.size);
av_free_packet(&pkt);
} else
printf("ret:%s", ret_str(ret)); // necessary to avoid trailing whitespace
printf("\n");
}
}
 
if(i>25) break;
 
stream_id= (i>>1)%(ic->nb_streams+1) - 1;
timestamp= (i*19362894167LL) % (duration*AV_TIME_BASE) - AV_TIME_BASE;
if(stream_id>=0){
st= ic->streams[stream_id];
timestamp= av_rescale_q(timestamp, AV_TIME_BASE_Q, st->time_base);
}
//FIXME fully test the new seek API
if(i&1) ret = avformat_seek_file(ic, stream_id, INT64_MIN, timestamp, timestamp, 0);
else ret = avformat_seek_file(ic, stream_id, timestamp, timestamp, INT64_MAX, 0);
ts_str(ts_buf, timestamp, stream_id < 0 ? AV_TIME_BASE_Q : st->time_base);
printf("ret:%-10s st:%2d flags:%d ts:%s\n", ret_str(ret), stream_id, i&1, ts_buf);
}
 
avformat_close_input(&ic);
 
return 0;
}
/contrib/sdk/sources/ffmpeg/libavformat/seek.c
0,0 → 1,509
/*
* seek utility functions for use within format handlers
*
* Copyright (c) 2009 Ivan Schreter
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "seek.h"
#include "libavutil/mathematics.h"
#include "libavutil/mem.h"
#include "internal.h"
 
// NOTE: implementation should be moved here in another patch, to keep patches
// separated.
 
/**
* helper structure describing keyframe search state of one stream
*/
typedef struct {
int64_t pos_lo; ///< position of the frame with low timestamp in file or INT64_MAX if not found (yet)
int64_t ts_lo; ///< frame presentation timestamp or same as pos_lo for byte seeking
 
int64_t pos_hi; ///< position of the frame with high timestamp in file or INT64_MAX if not found (yet)
int64_t ts_hi; ///< frame presentation timestamp or same as pos_hi for byte seeking
 
int64_t last_pos; ///< last known position of a frame, for multi-frame packets
 
int64_t term_ts; ///< termination timestamp (which TS we already read)
AVRational term_ts_tb; ///< timebase for term_ts
int64_t first_ts; ///< first packet timestamp in this iteration (to fill term_ts later)
AVRational first_ts_tb; ///< timebase for first_ts
 
int terminated; ///< termination flag for the current iteration
} AVSyncPoint;
 
/**
* Compute a distance between timestamps.
*
* Distances are only comparable, if same time bases are used for computing
* distances.
*
* @param ts_hi high timestamp
* @param tb_hi high timestamp time base
* @param ts_lo low timestamp
* @param tb_lo low timestamp time base
* @return representation of distance between high and low timestamps
*/
static int64_t ts_distance(int64_t ts_hi,
AVRational tb_hi,
int64_t ts_lo,
AVRational tb_lo)
{
int64_t hi, lo;
 
hi = ts_hi * tb_hi.num * tb_lo.den;
lo = ts_lo * tb_lo.num * tb_hi.den;
 
return hi - lo;
}
 
/**
* Partial search for keyframes in multiple streams.
*
* This routine searches in each stream for the next lower and the next higher
* timestamp compared to the given target timestamp. The search starts at the current
* file position and ends at the file position, where all streams have already been
* examined (or when all higher key frames are found in the first iteration).
*
* This routine is called iteratively with an exponential backoff to find the lower
* timestamp.
*
* @param s format context
* @param timestamp target timestamp (or position, if AVSEEK_FLAG_BYTE)
* @param timebase time base for timestamps
* @param flags seeking flags
* @param sync array with information per stream
* @param keyframes_to_find count of keyframes to find in total
* @param found_lo ptr to the count of already found low timestamp keyframes
* @param found_hi ptr to the count of already found high timestamp keyframes
* @param first_iter flag for first iteration
*/
static void search_hi_lo_keyframes(AVFormatContext *s,
int64_t timestamp,
AVRational timebase,
int flags,
AVSyncPoint *sync,
int keyframes_to_find,
int *found_lo,
int *found_hi,
int first_iter)
{
AVPacket pkt;
AVSyncPoint *sp;
AVStream *st;
int idx;
int flg;
int terminated_count = 0;
int64_t pos;
int64_t pts, dts; // PTS/DTS from stream
int64_t ts; // PTS in stream-local time base or position for byte seeking
AVRational ts_tb; // Time base of the stream or 1:1 for byte seeking
 
for (;;) {
if (av_read_frame(s, &pkt) < 0) {
// EOF or error, make sure high flags are set
for (idx = 0; idx < s->nb_streams; ++idx) {
if (s->streams[idx]->discard < AVDISCARD_ALL) {
sp = &sync[idx];
if (sp->pos_hi == INT64_MAX) {
// no high frame exists for this stream
(*found_hi)++;
sp->ts_hi = INT64_MAX;
sp->pos_hi = INT64_MAX - 1;
}
}
}
break;
}
 
idx = pkt.stream_index;
st = s->streams[idx];
if (st->discard >= AVDISCARD_ALL)
// this stream is not active, skip packet
continue;
 
sp = &sync[idx];
 
flg = pkt.flags;
pos = pkt.pos;
pts = pkt.pts;
dts = pkt.dts;
if (pts == AV_NOPTS_VALUE)
// some formats don't provide PTS, only DTS
pts = dts;
 
av_free_packet(&pkt);
 
// Multi-frame packets only return position for the very first frame.
// Other frames are read with position == -1. Therefore, we note down
// last known position of a frame and use it if a frame without
// position arrives. In this way, it's possible to seek to proper
// position. Additionally, for parsers not providing position at all,
// an approximation will be used (starting position of this iteration).
if (pos < 0)
pos = sp->last_pos;
else
sp->last_pos = pos;
 
// Evaluate key frames with known TS (or any frames, if AVSEEK_FLAG_ANY set).
if (pts != AV_NOPTS_VALUE &&
((flg & AV_PKT_FLAG_KEY) || (flags & AVSEEK_FLAG_ANY))) {
if (flags & AVSEEK_FLAG_BYTE) {
// for byte seeking, use position as timestamp
ts = pos;
ts_tb.num = 1;
ts_tb.den = 1;
} else {
// otherwise, get stream time_base
ts = pts;
ts_tb = st->time_base;
}
 
if (sp->first_ts == AV_NOPTS_VALUE) {
// Note down termination timestamp for the next iteration - when
// we encounter a packet with the same timestamp, we will ignore
// any further packets for this stream in next iteration (as they
// are already evaluated).
sp->first_ts = ts;
sp->first_ts_tb = ts_tb;
}
 
if (sp->term_ts != AV_NOPTS_VALUE &&
av_compare_ts(ts, ts_tb, sp->term_ts, sp->term_ts_tb) > 0) {
// past the end position from last iteration, ignore packet
if (!sp->terminated) {
sp->terminated = 1;
++terminated_count;
if (sp->pos_hi == INT64_MAX) {
// no high frame exists for this stream
(*found_hi)++;
sp->ts_hi = INT64_MAX;
sp->pos_hi = INT64_MAX - 1;
}
if (terminated_count == keyframes_to_find)
break; // all terminated, iteration done
}
continue;
}
 
if (av_compare_ts(ts, ts_tb, timestamp, timebase) <= 0) {
// keyframe found before target timestamp
if (sp->pos_lo == INT64_MAX) {
// found first keyframe lower than target timestamp
(*found_lo)++;
sp->ts_lo = ts;
sp->pos_lo = pos;
} else if (sp->ts_lo < ts) {
// found a better match (closer to target timestamp)
sp->ts_lo = ts;
sp->pos_lo = pos;
}
}
if (av_compare_ts(ts, ts_tb, timestamp, timebase) >= 0) {
// keyframe found after target timestamp
if (sp->pos_hi == INT64_MAX) {
// found first keyframe higher than target timestamp
(*found_hi)++;
sp->ts_hi = ts;
sp->pos_hi = pos;
if (*found_hi >= keyframes_to_find && first_iter) {
// We found high frame for all. They may get updated
// to TS closer to target TS in later iterations (which
// will stop at start position of previous iteration).
break;
}
} else if (sp->ts_hi > ts) {
// found a better match (actually, shouldn't happen)
sp->ts_hi = ts;
sp->pos_hi = pos;
}
}
}
}
 
// Clean up the parser.
ff_read_frame_flush(s);
}
 
int64_t ff_gen_syncpoint_search(AVFormatContext *s,
int stream_index,
int64_t pos,
int64_t ts_min,
int64_t ts,
int64_t ts_max,
int flags)
{
AVSyncPoint *sync, *sp;
AVStream *st;
int i;
int keyframes_to_find = 0;
int64_t curpos;
int64_t step;
int found_lo = 0, found_hi = 0;
int64_t min_distance, distance;
int64_t min_pos = 0;
int first_iter = 1;
AVRational time_base;
 
if (flags & AVSEEK_FLAG_BYTE) {
// for byte seeking, we have exact 1:1 "timestamps" - positions
time_base.num = 1;
time_base.den = 1;
} else {
if (stream_index >= 0) {
// we have a reference stream, which time base we use
st = s->streams[stream_index];
time_base = st->time_base;
} else {
// no reference stream, use AV_TIME_BASE as reference time base
time_base.num = 1;
time_base.den = AV_TIME_BASE;
}
}
 
// Initialize syncpoint structures for each stream.
sync = av_malloc(s->nb_streams * sizeof(AVSyncPoint));
if (!sync)
// cannot allocate helper structure
return -1;
 
for (i = 0; i < s->nb_streams; ++i) {
st = s->streams[i];
sp = &sync[i];
 
sp->pos_lo = INT64_MAX;
sp->ts_lo = INT64_MAX;
sp->pos_hi = INT64_MAX;
sp->ts_hi = INT64_MAX;
sp->terminated = 0;
sp->first_ts = AV_NOPTS_VALUE;
sp->term_ts = ts_max;
sp->term_ts_tb = time_base;
sp->last_pos = pos;
 
st->cur_dts = AV_NOPTS_VALUE;
 
if (st->discard < AVDISCARD_ALL)
++keyframes_to_find;
}
 
if (!keyframes_to_find) {
// no stream active, error
av_free(sync);
return -1;
}
 
// Find keyframes in all active streams with timestamp/position just before
// and just after requested timestamp/position.
step = s->pb->buffer_size;
curpos = FFMAX(pos - step / 2, 0);
for (;;) {
avio_seek(s->pb, curpos, SEEK_SET);
search_hi_lo_keyframes(s,
ts, time_base,
flags,
sync,
keyframes_to_find,
&found_lo, &found_hi,
first_iter);
if (found_lo == keyframes_to_find && found_hi == keyframes_to_find)
break; // have all keyframes we wanted
if (!curpos)
break; // cannot go back anymore
 
curpos = pos - step;
if (curpos < 0)
curpos = 0;
step *= 2;
 
// switch termination positions
for (i = 0; i < s->nb_streams; ++i) {
st = s->streams[i];
st->cur_dts = AV_NOPTS_VALUE;
 
sp = &sync[i];
if (sp->first_ts != AV_NOPTS_VALUE) {
sp->term_ts = sp->first_ts;
sp->term_ts_tb = sp->first_ts_tb;
sp->first_ts = AV_NOPTS_VALUE;
}
sp->terminated = 0;
sp->last_pos = curpos;
}
first_iter = 0;
}
 
// Find actual position to start decoding so that decoder synchronizes
// closest to ts and between ts_min and ts_max.
pos = INT64_MAX;
 
for (i = 0; i < s->nb_streams; ++i) {
st = s->streams[i];
if (st->discard < AVDISCARD_ALL) {
sp = &sync[i];
min_distance = INT64_MAX;
// Find timestamp closest to requested timestamp within min/max limits.
if (sp->pos_lo != INT64_MAX
&& av_compare_ts(ts_min, time_base, sp->ts_lo, st->time_base) <= 0
&& av_compare_ts(sp->ts_lo, st->time_base, ts_max, time_base) <= 0) {
// low timestamp is in range
min_distance = ts_distance(ts, time_base, sp->ts_lo, st->time_base);
min_pos = sp->pos_lo;
}
if (sp->pos_hi != INT64_MAX
&& av_compare_ts(ts_min, time_base, sp->ts_hi, st->time_base) <= 0
&& av_compare_ts(sp->ts_hi, st->time_base, ts_max, time_base) <= 0) {
// high timestamp is in range, check distance
distance = ts_distance(sp->ts_hi, st->time_base, ts, time_base);
if (distance < min_distance) {
min_distance = distance;
min_pos = sp->pos_hi;
}
}
if (min_distance == INT64_MAX) {
// no timestamp is in range, cannot seek
av_free(sync);
return -1;
}
if (min_pos < pos)
pos = min_pos;
}
}
 
avio_seek(s->pb, pos, SEEK_SET);
av_free(sync);
return pos;
}
 
AVParserState *ff_store_parser_state(AVFormatContext *s)
{
int i;
AVStream *st;
AVParserStreamState *ss;
AVParserState *state = av_malloc(sizeof(AVParserState));
if (!state)
return NULL;
 
state->stream_states = av_malloc(sizeof(AVParserStreamState) * s->nb_streams);
if (!state->stream_states) {
av_free(state);
return NULL;
}
 
state->fpos = avio_tell(s->pb);
 
// copy context structures
state->packet_buffer = s->packet_buffer;
state->parse_queue = s->parse_queue;
state->raw_packet_buffer = s->raw_packet_buffer;
state->raw_packet_buffer_remaining_size = s->raw_packet_buffer_remaining_size;
 
s->packet_buffer = NULL;
s->parse_queue = NULL;
s->raw_packet_buffer = NULL;
s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
 
// copy stream structures
state->nb_streams = s->nb_streams;
for (i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
ss = &state->stream_states[i];
 
ss->parser = st->parser;
ss->last_IP_pts = st->last_IP_pts;
ss->cur_dts = st->cur_dts;
ss->reference_dts = st->reference_dts;
ss->probe_packets = st->probe_packets;
 
st->parser = NULL;
st->last_IP_pts = AV_NOPTS_VALUE;
st->cur_dts = AV_NOPTS_VALUE;
st->reference_dts = AV_NOPTS_VALUE;
st->probe_packets = MAX_PROBE_PACKETS;
}
 
return state;
}
 
void ff_restore_parser_state(AVFormatContext *s, AVParserState *state)
{
int i;
AVStream *st;
AVParserStreamState *ss;
ff_read_frame_flush(s);
 
if (!state)
return;
 
avio_seek(s->pb, state->fpos, SEEK_SET);
 
// copy context structures
s->packet_buffer = state->packet_buffer;
s->parse_queue = state->parse_queue;
s->raw_packet_buffer = state->raw_packet_buffer;
s->raw_packet_buffer_remaining_size = state->raw_packet_buffer_remaining_size;
 
// copy stream structures
for (i = 0; i < state->nb_streams; i++) {
st = s->streams[i];
ss = &state->stream_states[i];
 
st->parser = ss->parser;
st->last_IP_pts = ss->last_IP_pts;
st->cur_dts = ss->cur_dts;
st->reference_dts = ss->reference_dts;
st->probe_packets = ss->probe_packets;
}
 
av_free(state->stream_states);
av_free(state);
}
 
static void free_packet_list(AVPacketList *pktl)
{
AVPacketList *cur;
while (pktl) {
cur = pktl;
pktl = cur->next;
av_free_packet(&cur->pkt);
av_free(cur);
}
}
 
void ff_free_parser_state(AVFormatContext *s, AVParserState *state)
{
int i;
AVParserStreamState *ss;
 
if (!state)
return;
 
for (i = 0; i < state->nb_streams; i++) {
ss = &state->stream_states[i];
if (ss->parser)
av_parser_close(ss->parser);
}
 
free_packet_list(state->packet_buffer);
free_packet_list(state->parse_queue);
free_packet_list(state->raw_packet_buffer);
 
av_free(state->stream_states);
av_free(state);
}
/contrib/sdk/sources/ffmpeg/libavformat/seek.h
0,0 → 1,123
/*
* seek utility functions for use within format handlers
*
* Copyright (c) 2009 Ivan Schreter
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_SEEK_H
#define AVFORMAT_SEEK_H
 
#include "avformat.h"
 
/**
* structure to store parser state of one AVStream
*/
typedef struct AVParserStreamState {
// saved members of AVStream
AVCodecParserContext *parser;
int64_t last_IP_pts;
int64_t cur_dts;
int64_t reference_dts;
int probe_packets;
} AVParserStreamState;
 
/**
* structure to store parser state of AVFormat
*/
typedef struct AVParserState {
int64_t fpos; ///< file position at the time of call
 
// saved members of AVFormatContext
AVPacketList *packet_buffer; ///< packet buffer of original state
AVPacketList *parse_queue; ///< parse queue of original state
AVPacketList *raw_packet_buffer; ///< raw packet buffer of original state
int raw_packet_buffer_remaining_size; ///< remaining space in raw_packet_buffer
 
// saved info for streams
int nb_streams; ///< number of streams with stored state
AVParserStreamState *stream_states; ///< states of individual streams (array)
} AVParserState;
 
/**
* Search for the sync point of all active streams.
*
* This routine is not supposed to be called directly by a user application,
* but by demuxers.
*
* A sync point is defined as a point in stream, such that, when decoding start
* from this point, the decoded output of all streams synchronizes closest
* to the given timestamp ts. This routine also takes timestamp limits into account.
* Thus, the output will synchronize no sooner than ts_min and no later than ts_max.
*
* @param stream_index stream index for time base reference of timestamps
* @param pos approximate position where to start searching for key frames
* @param min_ts minimum allowed timestamp (position, if AVSEEK_FLAG_BYTE set)
* @param ts target timestamp (or position, if AVSEEK_FLAG_BYTE set in flags)
* @param max_ts maximum allowed timestamp (position, if AVSEEK_FLAG_BYTE set)
* @param flags if AVSEEK_FLAG_ANY is set, seek to any frame, otherwise only
* to a keyframe. If AVSEEK_FLAG_BYTE is set, search by
* position, not by timestamp.
* @return -1 if no such sync point could be found, otherwise stream position
* (stream is repositioned to this position)
*/
int64_t ff_gen_syncpoint_search(AVFormatContext *s,
int stream_index,
int64_t pos,
int64_t min_ts,
int64_t ts,
int64_t max_ts,
int flags);
 
/**
* Store current parser state and file position.
*
* This function can be used by demuxers before a destructive seeking algorithm
* to store the parser state. Depending on the outcome of the seek, either the original
* state can be restored or the new state kept and the original state freed.
*
* @note As a side effect, the original parser state is reset, since structures
* are relinked to the stored state instead of being deeply-copied (for
* performance reasons and to keep the code simple).
*
* @param s context from which to save state
* @return parser state object or NULL if memory could not be allocated
*/
AVParserState *ff_store_parser_state(AVFormatContext *s);
 
/**
* Restore previously saved parser state and file position.
*
* Saved state will be invalidated and freed by this call, since internal
* structures will be relinked back to the stored state instead of being
* deeply-copied.
*
* @param s context to which to restore state (same as used for storing state)
* @param state state to restore
*/
void ff_restore_parser_state(AVFormatContext *s, AVParserState *state);
 
/**
* Free previously saved parser state.
*
* @param s context to which the state belongs (same as used for storing state)
* @param state state to free
*/
void ff_free_parser_state(AVFormatContext *s, AVParserState *state);
 
#endif /* AVFORMAT_SEEK_H */
/contrib/sdk/sources/ffmpeg/libavformat/segafilm.c
0,0 → 1,290
/*
* Sega FILM Format (CPK) Demuxer
* Copyright (c) 2003 The ffmpeg Project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Sega FILM (.cpk) file demuxer
* by Mike Melanson (melanson@pcisys.net)
* For more information regarding the Sega FILM file format, visit:
* http://www.pcisys.net/~melanson/codecs/
*/
 
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
#include "avio_internal.h"
 
#define FILM_TAG MKBETAG('F', 'I', 'L', 'M')
#define FDSC_TAG MKBETAG('F', 'D', 'S', 'C')
#define STAB_TAG MKBETAG('S', 'T', 'A', 'B')
#define CVID_TAG MKBETAG('c', 'v', 'i', 'd')
#define RAW_TAG MKBETAG('r', 'a', 'w', ' ')
 
typedef struct {
int stream;
int64_t sample_offset;
unsigned int sample_size;
int64_t pts;
int keyframe;
} film_sample;
 
typedef struct FilmDemuxContext {
int video_stream_index;
int audio_stream_index;
 
enum AVCodecID audio_type;
unsigned int audio_samplerate;
unsigned int audio_bits;
unsigned int audio_channels;
 
enum AVCodecID video_type;
unsigned int sample_count;
film_sample *sample_table;
unsigned int current_sample;
 
unsigned int base_clock;
unsigned int version;
} FilmDemuxContext;
 
static int film_probe(AVProbeData *p)
{
if (AV_RB32(&p->buf[0]) != FILM_TAG)
return 0;
 
return AVPROBE_SCORE_MAX;
}
 
static int film_read_header(AVFormatContext *s)
{
FilmDemuxContext *film = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *st;
unsigned char scratch[256];
int i;
unsigned int data_offset;
unsigned int audio_frame_counter;
 
film->sample_table = NULL;
 
/* load the main FILM header */
if (avio_read(pb, scratch, 16) != 16)
return AVERROR(EIO);
data_offset = AV_RB32(&scratch[4]);
film->version = AV_RB32(&scratch[8]);
 
/* load the FDSC chunk */
if (film->version == 0) {
/* special case for Lemmings .film files; 20-byte header */
if (avio_read(pb, scratch, 20) != 20)
return AVERROR(EIO);
/* make some assumptions about the audio parameters */
film->audio_type = AV_CODEC_ID_PCM_S8;
film->audio_samplerate = 22050;
film->audio_channels = 1;
film->audio_bits = 8;
} else {
/* normal Saturn .cpk files; 32-byte header */
if (avio_read(pb, scratch, 32) != 32)
return AVERROR(EIO);
film->audio_samplerate = AV_RB16(&scratch[24]);
film->audio_channels = scratch[21];
film->audio_bits = scratch[22];
if (scratch[23] == 2 && film->audio_channels > 0)
film->audio_type = AV_CODEC_ID_ADPCM_ADX;
else if (film->audio_channels > 0) {
if (film->audio_bits == 8)
film->audio_type = AV_CODEC_ID_PCM_S8_PLANAR;
else if (film->audio_bits == 16)
film->audio_type = AV_CODEC_ID_PCM_S16BE_PLANAR;
else
film->audio_type = AV_CODEC_ID_NONE;
} else
film->audio_type = AV_CODEC_ID_NONE;
}
 
if (AV_RB32(&scratch[0]) != FDSC_TAG)
return AVERROR_INVALIDDATA;
 
if (AV_RB32(&scratch[8]) == CVID_TAG) {
film->video_type = AV_CODEC_ID_CINEPAK;
} else if (AV_RB32(&scratch[8]) == RAW_TAG) {
film->video_type = AV_CODEC_ID_RAWVIDEO;
} else {
film->video_type = AV_CODEC_ID_NONE;
}
 
/* initialize the decoder streams */
if (film->video_type) {
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
film->video_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = film->video_type;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->width = AV_RB32(&scratch[16]);
st->codec->height = AV_RB32(&scratch[12]);
 
if (film->video_type == AV_CODEC_ID_RAWVIDEO) {
if (scratch[20] == 24) {
st->codec->pix_fmt = AV_PIX_FMT_RGB24;
} else {
av_log(s, AV_LOG_ERROR, "raw video is using unhandled %dbpp\n", scratch[20]);
return -1;
}
}
}
 
if (film->audio_type) {
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
film->audio_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = film->audio_type;
st->codec->codec_tag = 1;
st->codec->channels = film->audio_channels;
st->codec->sample_rate = film->audio_samplerate;
 
if (film->audio_type == AV_CODEC_ID_ADPCM_ADX) {
st->codec->bits_per_coded_sample = 18 * 8 / 32;
st->codec->block_align = st->codec->channels * 18;
st->need_parsing = AVSTREAM_PARSE_FULL;
} else {
st->codec->bits_per_coded_sample = film->audio_bits;
st->codec->block_align = st->codec->channels *
st->codec->bits_per_coded_sample / 8;
}
 
st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
st->codec->bits_per_coded_sample;
}
 
/* load the sample table */
if (avio_read(pb, scratch, 16) != 16)
return AVERROR(EIO);
if (AV_RB32(&scratch[0]) != STAB_TAG)
return AVERROR_INVALIDDATA;
film->base_clock = AV_RB32(&scratch[8]);
film->sample_count = AV_RB32(&scratch[12]);
if(film->sample_count >= UINT_MAX / sizeof(film_sample))
return -1;
film->sample_table = av_malloc(film->sample_count * sizeof(film_sample));
if (!film->sample_table)
return AVERROR(ENOMEM);
 
for (i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
avpriv_set_pts_info(st, 33, 1, film->base_clock);
else
avpriv_set_pts_info(st, 64, 1, film->audio_samplerate);
}
 
audio_frame_counter = 0;
for (i = 0; i < film->sample_count; i++) {
/* load the next sample record and transfer it to an internal struct */
if (avio_read(pb, scratch, 16) != 16) {
av_freep(&film->sample_table);
return AVERROR(EIO);
}
film->sample_table[i].sample_offset =
data_offset + AV_RB32(&scratch[0]);
film->sample_table[i].sample_size = AV_RB32(&scratch[4]);
if (film->sample_table[i].sample_size > INT_MAX / 4)
return AVERROR_INVALIDDATA;
if (AV_RB32(&scratch[8]) == 0xFFFFFFFF) {
film->sample_table[i].stream = film->audio_stream_index;
film->sample_table[i].pts = audio_frame_counter;
 
if (film->audio_type == AV_CODEC_ID_ADPCM_ADX)
audio_frame_counter += (film->sample_table[i].sample_size * 32 /
(18 * film->audio_channels));
else if (film->audio_type != AV_CODEC_ID_NONE)
audio_frame_counter += (film->sample_table[i].sample_size /
(film->audio_channels * film->audio_bits / 8));
} else {
film->sample_table[i].stream = film->video_stream_index;
film->sample_table[i].pts = AV_RB32(&scratch[8]) & 0x7FFFFFFF;
film->sample_table[i].keyframe = (scratch[8] & 0x80) ? 0 : 1;
}
}
 
film->current_sample = 0;
 
return 0;
}
 
static int film_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
FilmDemuxContext *film = s->priv_data;
AVIOContext *pb = s->pb;
film_sample *sample;
int ret = 0;
 
if (film->current_sample >= film->sample_count)
return AVERROR_EOF;
 
sample = &film->sample_table[film->current_sample];
 
/* position the stream (will probably be there anyway) */
avio_seek(pb, sample->sample_offset, SEEK_SET);
 
/* do a special song and dance when loading FILM Cinepak chunks */
if ((sample->stream == film->video_stream_index) &&
(film->video_type == AV_CODEC_ID_CINEPAK)) {
pkt->pos= avio_tell(pb);
if (av_new_packet(pkt, sample->sample_size))
return AVERROR(ENOMEM);
avio_read(pb, pkt->data, sample->sample_size);
} else {
ret= av_get_packet(pb, pkt, sample->sample_size);
if (ret != sample->sample_size)
ret = AVERROR(EIO);
}
 
pkt->stream_index = sample->stream;
pkt->pts = sample->pts;
 
film->current_sample++;
 
return ret;
}
 
static int film_read_close(AVFormatContext *s)
{
FilmDemuxContext *film = s->priv_data;
 
av_freep(&film->sample_table);
 
return 0;
}
 
AVInputFormat ff_segafilm_demuxer = {
.name = "film_cpk",
.long_name = NULL_IF_CONFIG_SMALL("Sega FILM / CPK"),
.priv_data_size = sizeof(FilmDemuxContext),
.read_probe = film_probe,
.read_header = film_read_header,
.read_packet = film_read_packet,
.read_close = film_read_close,
};
/contrib/sdk/sources/ffmpeg/libavformat/segment.c
0,0 → 1,826
/*
* Copyright (c) 2011, Luca Barbato
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file generic segmenter
* M3U8 specification can be find here:
* @url{http://tools.ietf.org/id/draft-pantos-http-live-streaming}
*/
 
/* #define DEBUG */
 
#include <float.h>
 
#include "avformat.h"
#include "internal.h"
 
#include "libavutil/avassert.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavutil/avstring.h"
#include "libavutil/parseutils.h"
#include "libavutil/mathematics.h"
#include "libavutil/timestamp.h"
 
typedef struct SegmentListEntry {
int index;
double start_time, end_time;
int64_t start_pts;
int64_t offset_pts;
char filename[1024];
struct SegmentListEntry *next;
} SegmentListEntry;
 
typedef enum {
LIST_TYPE_UNDEFINED = -1,
LIST_TYPE_FLAT = 0,
LIST_TYPE_CSV,
LIST_TYPE_M3U8,
LIST_TYPE_EXT, ///< deprecated
LIST_TYPE_FFCONCAT,
LIST_TYPE_NB,
} ListType;
 
#define SEGMENT_LIST_FLAG_CACHE 1
#define SEGMENT_LIST_FLAG_LIVE 2
 
typedef struct {
const AVClass *class; /**< Class for private options. */
int segment_idx; ///< index of the segment file to write, starting from 0
int segment_idx_wrap; ///< number after which the index wraps
int segment_count; ///< number of segment files already written
AVOutputFormat *oformat;
AVFormatContext *avf;
char *format; ///< format to use for output segment files
char *list; ///< filename for the segment list file
int list_flags; ///< flags affecting list generation
int list_size; ///< number of entries for the segment list file
ListType list_type; ///< set the list type
AVIOContext *list_pb; ///< list file put-byte context
char *time_str; ///< segment duration specification string
int64_t time; ///< segment duration
 
char *times_str; ///< segment times specification string
int64_t *times; ///< list of segment interval specification
int nb_times; ///< number of elments in the times array
 
char *frames_str; ///< segment frame numbers specification string
int *frames; ///< list of frame number specification
int nb_frames; ///< number of elments in the frames array
int frame_count;
 
int64_t time_delta;
int individual_header_trailer; /**< Set by a private option. */
int write_header_trailer; /**< Set by a private option. */
 
int reset_timestamps; ///< reset timestamps at the begin of each segment
int64_t initial_offset; ///< initial timestamps offset, expressed in microseconds
char *reference_stream_specifier; ///< reference stream specifier
int reference_stream_index;
 
SegmentListEntry cur_entry;
SegmentListEntry *segment_list_entries;
SegmentListEntry *segment_list_entries_end;
 
int is_first_pkt; ///< tells if it is the first packet in the segment
} SegmentContext;
 
static void print_csv_escaped_str(AVIOContext *ctx, const char *str)
{
int needs_quoting = !!str[strcspn(str, "\",\n\r")];
 
if (needs_quoting)
avio_w8(ctx, '"');
 
for (; *str; str++) {
if (*str == '"')
avio_w8(ctx, '"');
avio_w8(ctx, *str);
}
if (needs_quoting)
avio_w8(ctx, '"');
}
 
static int segment_mux_init(AVFormatContext *s)
{
SegmentContext *seg = s->priv_data;
AVFormatContext *oc;
int i;
 
seg->avf = oc = avformat_alloc_context();
if (!oc)
return AVERROR(ENOMEM);
 
oc->oformat = seg->oformat;
oc->interrupt_callback = s->interrupt_callback;
av_dict_copy(&oc->metadata, s->metadata, 0);
 
for (i = 0; i < s->nb_streams; i++) {
AVStream *st;
AVCodecContext *icodec, *ocodec;
 
if (!(st = avformat_new_stream(oc, NULL)))
return AVERROR(ENOMEM);
icodec = s->streams[i]->codec;
ocodec = st->codec;
avcodec_copy_context(ocodec, icodec);
if (!oc->oformat->codec_tag ||
av_codec_get_id (oc->oformat->codec_tag, icodec->codec_tag) == ocodec->codec_id ||
av_codec_get_tag(oc->oformat->codec_tag, icodec->codec_id) <= 0) {
ocodec->codec_tag = icodec->codec_tag;
} else {
ocodec->codec_tag = 0;
}
st->sample_aspect_ratio = s->streams[i]->sample_aspect_ratio;
}
 
return 0;
}
 
static int set_segment_filename(AVFormatContext *s)
{
SegmentContext *seg = s->priv_data;
AVFormatContext *oc = seg->avf;
 
if (seg->segment_idx_wrap)
seg->segment_idx %= seg->segment_idx_wrap;
if (av_get_frame_filename(oc->filename, sizeof(oc->filename),
s->filename, seg->segment_idx) < 0) {
av_log(oc, AV_LOG_ERROR, "Invalid segment filename template '%s'\n", s->filename);
return AVERROR(EINVAL);
}
av_strlcpy(seg->cur_entry.filename, oc->filename, sizeof(seg->cur_entry.filename));
return 0;
}
 
static int segment_start(AVFormatContext *s, int write_header)
{
SegmentContext *seg = s->priv_data;
AVFormatContext *oc = seg->avf;
int err = 0;
 
if (write_header) {
avformat_free_context(oc);
seg->avf = NULL;
if ((err = segment_mux_init(s)) < 0)
return err;
oc = seg->avf;
}
 
seg->segment_idx++;
if ((err = set_segment_filename(s)) < 0)
return err;
 
if ((err = avio_open2(&oc->pb, oc->filename, AVIO_FLAG_WRITE,
&s->interrupt_callback, NULL)) < 0)
return err;
 
if (oc->oformat->priv_class && oc->priv_data)
av_opt_set(oc->priv_data, "resend_headers", "1", 0); /* mpegts specific */
 
if (write_header) {
if ((err = avformat_write_header(oc, NULL)) < 0)
return err;
}
 
seg->is_first_pkt = 1;
return 0;
}
 
static int segment_list_open(AVFormatContext *s)
{
SegmentContext *seg = s->priv_data;
int ret;
 
ret = avio_open2(&seg->list_pb, seg->list, AVIO_FLAG_WRITE,
&s->interrupt_callback, NULL);
if (ret < 0)
return ret;
 
if (seg->list_type == LIST_TYPE_M3U8 && seg->segment_list_entries) {
SegmentListEntry *entry;
double max_duration = 0;
 
avio_printf(seg->list_pb, "#EXTM3U\n");
avio_printf(seg->list_pb, "#EXT-X-VERSION:3\n");
avio_printf(seg->list_pb, "#EXT-X-MEDIA-SEQUENCE:%d\n", seg->segment_list_entries->index);
avio_printf(seg->list_pb, "#EXT-X-ALLOW-CACHE:%s\n",
seg->list_flags & SEGMENT_LIST_FLAG_CACHE ? "YES" : "NO");
 
for (entry = seg->segment_list_entries; entry; entry = entry->next)
max_duration = FFMAX(max_duration, entry->end_time - entry->start_time);
avio_printf(seg->list_pb, "#EXT-X-TARGETDURATION:%"PRId64"\n", (int64_t)ceil(max_duration));
} else if (seg->list_type == LIST_TYPE_FFCONCAT) {
avio_printf(seg->list_pb, "ffconcat version 1.0\n");
}
 
return ret;
}
 
static void segment_list_print_entry(AVIOContext *list_ioctx,
ListType list_type,
const SegmentListEntry *list_entry,
void *log_ctx)
{
switch (list_type) {
case LIST_TYPE_FLAT:
avio_printf(list_ioctx, "%s\n", list_entry->filename);
break;
case LIST_TYPE_CSV:
case LIST_TYPE_EXT:
print_csv_escaped_str(list_ioctx, list_entry->filename);
avio_printf(list_ioctx, ",%f,%f\n", list_entry->start_time, list_entry->end_time);
break;
case LIST_TYPE_M3U8:
avio_printf(list_ioctx, "#EXTINF:%f,\n%s\n",
list_entry->end_time - list_entry->start_time, list_entry->filename);
break;
case LIST_TYPE_FFCONCAT:
{
char *buf;
if (av_escape(&buf, list_entry->filename, NULL, AV_ESCAPE_MODE_AUTO, AV_ESCAPE_FLAG_WHITESPACE) < 0) {
av_log(log_ctx, AV_LOG_WARNING,
"Error writing list entry '%s' in list file\n", list_entry->filename);
return;
}
avio_printf(list_ioctx, "file %s\n", buf);
av_free(buf);
break;
}
default:
av_assert0(!"Invalid list type");
}
}
 
static int segment_end(AVFormatContext *s, int write_trailer, int is_last)
{
SegmentContext *seg = s->priv_data;
AVFormatContext *oc = seg->avf;
int ret = 0;
 
av_write_frame(oc, NULL); /* Flush any buffered data (fragmented mp4) */
if (write_trailer)
ret = av_write_trailer(oc);
 
if (ret < 0)
av_log(s, AV_LOG_ERROR, "Failure occurred when ending segment '%s'\n",
oc->filename);
 
if (seg->list) {
if (seg->list_size || seg->list_type == LIST_TYPE_M3U8) {
SegmentListEntry *entry = av_mallocz(sizeof(*entry));
if (!entry) {
ret = AVERROR(ENOMEM);
goto end;
}
 
/* append new element */
memcpy(entry, &seg->cur_entry, sizeof(*entry));
if (!seg->segment_list_entries)
seg->segment_list_entries = seg->segment_list_entries_end = entry;
else
seg->segment_list_entries_end->next = entry;
seg->segment_list_entries_end = entry;
 
/* drop first item */
if (seg->list_size && seg->segment_count > seg->list_size) {
entry = seg->segment_list_entries;
seg->segment_list_entries = seg->segment_list_entries->next;
av_freep(&entry);
}
 
avio_close(seg->list_pb);
if ((ret = segment_list_open(s)) < 0)
goto end;
for (entry = seg->segment_list_entries; entry; entry = entry->next)
segment_list_print_entry(seg->list_pb, seg->list_type, entry, s);
if (seg->list_type == LIST_TYPE_M3U8 && is_last)
avio_printf(seg->list_pb, "#EXT-X-ENDLIST\n");
} else {
segment_list_print_entry(seg->list_pb, seg->list_type, &seg->cur_entry, s);
}
avio_flush(seg->list_pb);
}
 
av_log(s, AV_LOG_VERBOSE, "segment:'%s' count:%d ended\n",
seg->avf->filename, seg->segment_count);
seg->segment_count++;
 
end:
avio_close(oc->pb);
 
return ret;
}
 
static int parse_times(void *log_ctx, int64_t **times, int *nb_times,
const char *times_str)
{
char *p;
int i, ret = 0;
char *times_str1 = av_strdup(times_str);
char *saveptr = NULL;
 
if (!times_str1)
return AVERROR(ENOMEM);
 
#define FAIL(err) ret = err; goto end
 
*nb_times = 1;
for (p = times_str1; *p; p++)
if (*p == ',')
(*nb_times)++;
 
*times = av_malloc(sizeof(**times) * *nb_times);
if (!*times) {
av_log(log_ctx, AV_LOG_ERROR, "Could not allocate forced times array\n");
FAIL(AVERROR(ENOMEM));
}
 
p = times_str1;
for (i = 0; i < *nb_times; i++) {
int64_t t;
char *tstr = av_strtok(p, ",", &saveptr);
p = NULL;
 
if (!tstr || !tstr[0]) {
av_log(log_ctx, AV_LOG_ERROR, "Empty time specification in times list %s\n",
times_str);
FAIL(AVERROR(EINVAL));
}
 
ret = av_parse_time(&t, tstr, 1);
if (ret < 0) {
av_log(log_ctx, AV_LOG_ERROR,
"Invalid time duration specification '%s' in times list %s\n", tstr, times_str);
FAIL(AVERROR(EINVAL));
}
(*times)[i] = t;
 
/* check on monotonicity */
if (i && (*times)[i-1] > (*times)[i]) {
av_log(log_ctx, AV_LOG_ERROR,
"Specified time %f is greater than the following time %f\n",
(float)((*times)[i])/1000000, (float)((*times)[i-1])/1000000);
FAIL(AVERROR(EINVAL));
}
}
 
end:
av_free(times_str1);
return ret;
}
 
static int parse_frames(void *log_ctx, int **frames, int *nb_frames,
const char *frames_str)
{
char *p;
int i, ret = 0;
char *frames_str1 = av_strdup(frames_str);
char *saveptr = NULL;
 
if (!frames_str1)
return AVERROR(ENOMEM);
 
#define FAIL(err) ret = err; goto end
 
*nb_frames = 1;
for (p = frames_str1; *p; p++)
if (*p == ',')
(*nb_frames)++;
 
*frames = av_malloc(sizeof(**frames) * *nb_frames);
if (!*frames) {
av_log(log_ctx, AV_LOG_ERROR, "Could not allocate forced frames array\n");
FAIL(AVERROR(ENOMEM));
}
 
p = frames_str1;
for (i = 0; i < *nb_frames; i++) {
long int f;
char *tailptr;
char *fstr = av_strtok(p, ",", &saveptr);
 
p = NULL;
if (!fstr) {
av_log(log_ctx, AV_LOG_ERROR, "Empty frame specification in frame list %s\n",
frames_str);
FAIL(AVERROR(EINVAL));
}
f = strtol(fstr, &tailptr, 10);
if (*tailptr || f <= 0 || f >= INT_MAX) {
av_log(log_ctx, AV_LOG_ERROR,
"Invalid argument '%s', must be a positive integer <= INT64_MAX\n",
fstr);
FAIL(AVERROR(EINVAL));
}
(*frames)[i] = f;
 
/* check on monotonicity */
if (i && (*frames)[i-1] > (*frames)[i]) {
av_log(log_ctx, AV_LOG_ERROR,
"Specified frame %d is greater than the following frame %d\n",
(*frames)[i], (*frames)[i-1]);
FAIL(AVERROR(EINVAL));
}
}
 
end:
av_free(frames_str1);
return ret;
}
 
static int open_null_ctx(AVIOContext **ctx)
{
int buf_size = 32768;
uint8_t *buf = av_malloc(buf_size);
if (!buf)
return AVERROR(ENOMEM);
*ctx = avio_alloc_context(buf, buf_size, AVIO_FLAG_WRITE, NULL, NULL, NULL, NULL);
if (!*ctx) {
av_free(buf);
return AVERROR(ENOMEM);
}
return 0;
}
 
static void close_null_ctx(AVIOContext *pb)
{
av_free(pb->buffer);
av_free(pb);
}
 
static int select_reference_stream(AVFormatContext *s)
{
SegmentContext *seg = s->priv_data;
int ret, i;
 
seg->reference_stream_index = -1;
if (!strcmp(seg->reference_stream_specifier, "auto")) {
/* select first index of type with highest priority */
int type_index_map[AVMEDIA_TYPE_NB];
static const enum AVMediaType type_priority_list[] = {
AVMEDIA_TYPE_VIDEO,
AVMEDIA_TYPE_AUDIO,
AVMEDIA_TYPE_SUBTITLE,
AVMEDIA_TYPE_DATA,
AVMEDIA_TYPE_ATTACHMENT
};
enum AVMediaType type;
 
for (i = 0; i < AVMEDIA_TYPE_NB; i++)
type_index_map[i] = -1;
 
/* select first index for each type */
for (i = 0; i < s->nb_streams; i++) {
type = s->streams[i]->codec->codec_type;
if ((unsigned)type < AVMEDIA_TYPE_NB && type_index_map[type] == -1
/* ignore attached pictures/cover art streams */
&& !(s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC))
type_index_map[type] = i;
}
 
for (i = 0; i < FF_ARRAY_ELEMS(type_priority_list); i++) {
type = type_priority_list[i];
if ((seg->reference_stream_index = type_index_map[type]) >= 0)
break;
}
} else {
for (i = 0; i < s->nb_streams; i++) {
ret = avformat_match_stream_specifier(s, s->streams[i],
seg->reference_stream_specifier);
if (ret < 0)
return ret;
if (ret > 0) {
seg->reference_stream_index = i;
break;
}
}
}
 
if (seg->reference_stream_index < 0) {
av_log(s, AV_LOG_ERROR, "Could not select stream matching identifier '%s'\n",
seg->reference_stream_specifier);
return AVERROR(EINVAL);
}
 
return 0;
}
 
static int seg_write_header(AVFormatContext *s)
{
SegmentContext *seg = s->priv_data;
AVFormatContext *oc = NULL;
int ret;
 
seg->segment_count = 0;
if (!seg->write_header_trailer)
seg->individual_header_trailer = 0;
 
if (!!seg->time_str + !!seg->times_str + !!seg->frames_str > 1) {
av_log(s, AV_LOG_ERROR,
"segment_time, segment_times, and segment_frames options "
"are mutually exclusive, select just one of them\n");
return AVERROR(EINVAL);
}
 
if (seg->times_str) {
if ((ret = parse_times(s, &seg->times, &seg->nb_times, seg->times_str)) < 0)
return ret;
} else if (seg->frames_str) {
if ((ret = parse_frames(s, &seg->frames, &seg->nb_frames, seg->frames_str)) < 0)
return ret;
} else {
/* set default value if not specified */
if (!seg->time_str)
seg->time_str = av_strdup("2");
if ((ret = av_parse_time(&seg->time, seg->time_str, 1)) < 0) {
av_log(s, AV_LOG_ERROR,
"Invalid time duration specification '%s' for segment_time option\n",
seg->time_str);
return ret;
}
}
 
if (seg->list) {
if (seg->list_type == LIST_TYPE_UNDEFINED) {
if (av_match_ext(seg->list, "csv" )) seg->list_type = LIST_TYPE_CSV;
else if (av_match_ext(seg->list, "ext" )) seg->list_type = LIST_TYPE_EXT;
else if (av_match_ext(seg->list, "m3u8")) seg->list_type = LIST_TYPE_M3U8;
else if (av_match_ext(seg->list, "ffcat,ffconcat")) seg->list_type = LIST_TYPE_FFCONCAT;
else seg->list_type = LIST_TYPE_FLAT;
}
if ((ret = segment_list_open(s)) < 0)
goto fail;
}
if (seg->list_type == LIST_TYPE_EXT)
av_log(s, AV_LOG_WARNING, "'ext' list type option is deprecated in favor of 'csv'\n");
 
if ((ret = select_reference_stream(s)) < 0)
goto fail;
av_log(s, AV_LOG_VERBOSE, "Selected stream id:%d type:%s\n",
seg->reference_stream_index,
av_get_media_type_string(s->streams[seg->reference_stream_index]->codec->codec_type));
 
seg->oformat = av_guess_format(seg->format, s->filename, NULL);
 
if (!seg->oformat) {
ret = AVERROR_MUXER_NOT_FOUND;
goto fail;
}
if (seg->oformat->flags & AVFMT_NOFILE) {
av_log(s, AV_LOG_ERROR, "format %s not supported.\n",
seg->oformat->name);
ret = AVERROR(EINVAL);
goto fail;
}
 
if ((ret = segment_mux_init(s)) < 0)
goto fail;
oc = seg->avf;
 
if ((ret = set_segment_filename(s)) < 0)
goto fail;
 
if (seg->write_header_trailer) {
if ((ret = avio_open2(&oc->pb, oc->filename, AVIO_FLAG_WRITE,
&s->interrupt_callback, NULL)) < 0)
goto fail;
} else {
if ((ret = open_null_ctx(&oc->pb)) < 0)
goto fail;
}
 
if ((ret = avformat_write_header(oc, NULL)) < 0) {
avio_close(oc->pb);
goto fail;
}
seg->is_first_pkt = 1;
 
if (oc->avoid_negative_ts > 0 && s->avoid_negative_ts < 0)
s->avoid_negative_ts = 1;
 
if (!seg->write_header_trailer) {
close_null_ctx(oc->pb);
if ((ret = avio_open2(&oc->pb, oc->filename, AVIO_FLAG_WRITE,
&s->interrupt_callback, NULL)) < 0)
goto fail;
}
 
fail:
if (ret) {
if (seg->list)
avio_close(seg->list_pb);
if (seg->avf)
avformat_free_context(seg->avf);
}
return ret;
}
 
static int seg_write_packet(AVFormatContext *s, AVPacket *pkt)
{
SegmentContext *seg = s->priv_data;
AVFormatContext *oc = seg->avf;
AVStream *st = s->streams[pkt->stream_index];
int64_t end_pts = INT64_MAX, offset;
int start_frame = INT_MAX;
int ret;
 
if (seg->times) {
end_pts = seg->segment_count < seg->nb_times ?
seg->times[seg->segment_count] : INT64_MAX;
} else if (seg->frames) {
start_frame = seg->segment_count <= seg->nb_frames ?
seg->frames[seg->segment_count] : INT_MAX;
} else {
end_pts = seg->time * (seg->segment_count+1);
}
 
av_dlog(s, "packet stream:%d pts:%s pts_time:%s is_key:%d frame:%d\n",
pkt->stream_index, av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &st->time_base),
pkt->flags & AV_PKT_FLAG_KEY,
pkt->stream_index == seg->reference_stream_index ? seg->frame_count : -1);
 
if (pkt->stream_index == seg->reference_stream_index &&
pkt->flags & AV_PKT_FLAG_KEY &&
(seg->frame_count >= start_frame ||
(pkt->pts != AV_NOPTS_VALUE &&
av_compare_ts(pkt->pts, st->time_base,
end_pts-seg->time_delta, AV_TIME_BASE_Q) >= 0))) {
if ((ret = segment_end(s, seg->individual_header_trailer, 0)) < 0)
goto fail;
 
if ((ret = segment_start(s, seg->individual_header_trailer)) < 0)
goto fail;
 
oc = seg->avf;
 
seg->cur_entry.index = seg->segment_idx;
seg->cur_entry.start_time = (double)pkt->pts * av_q2d(st->time_base);
seg->cur_entry.start_pts = av_rescale_q(pkt->pts, st->time_base, AV_TIME_BASE_Q);
} else if (pkt->pts != AV_NOPTS_VALUE) {
seg->cur_entry.end_time =
FFMAX(seg->cur_entry.end_time, (double)(pkt->pts + pkt->duration) * av_q2d(st->time_base));
}
 
if (seg->is_first_pkt) {
av_log(s, AV_LOG_DEBUG, "segment:'%s' starts with packet stream:%d pts:%s pts_time:%s frame:%d\n",
seg->avf->filename, pkt->stream_index,
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &st->time_base), seg->frame_count);
seg->is_first_pkt = 0;
}
 
av_log(s, AV_LOG_DEBUG, "stream:%d start_pts_time:%s pts:%s pts_time:%s dts:%s dts_time:%s",
pkt->stream_index,
av_ts2timestr(seg->cur_entry.start_pts, &AV_TIME_BASE_Q),
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &st->time_base),
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &st->time_base));
 
/* compute new timestamps */
offset = av_rescale_q(seg->initial_offset - (seg->reset_timestamps ? seg->cur_entry.start_pts : 0),
AV_TIME_BASE_Q, st->time_base);
if (pkt->pts != AV_NOPTS_VALUE)
pkt->pts += offset;
if (pkt->dts != AV_NOPTS_VALUE)
pkt->dts += offset;
 
av_log(s, AV_LOG_DEBUG, " -> pts:%s pts_time:%s dts:%s dts_time:%s\n",
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &st->time_base),
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &st->time_base));
 
ret = ff_write_chained(oc, pkt->stream_index, pkt, s);
 
fail:
if (pkt->stream_index == seg->reference_stream_index)
seg->frame_count++;
 
if (ret < 0) {
if (seg->list)
avio_close(seg->list_pb);
avformat_free_context(oc);
}
 
return ret;
}
 
static int seg_write_trailer(struct AVFormatContext *s)
{
SegmentContext *seg = s->priv_data;
AVFormatContext *oc = seg->avf;
SegmentListEntry *cur, *next;
 
int ret;
if (!seg->write_header_trailer) {
if ((ret = segment_end(s, 0, 1)) < 0)
goto fail;
open_null_ctx(&oc->pb);
ret = av_write_trailer(oc);
close_null_ctx(oc->pb);
} else {
ret = segment_end(s, 1, 1);
}
fail:
if (seg->list)
avio_close(seg->list_pb);
 
av_opt_free(seg);
av_freep(&seg->times);
av_freep(&seg->frames);
 
cur = seg->segment_list_entries;
while (cur) {
next = cur->next;
av_free(cur);
cur = next;
}
 
avformat_free_context(oc);
return ret;
}
 
#define OFFSET(x) offsetof(SegmentContext, x)
#define E AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
{ "reference_stream", "set reference stream", OFFSET(reference_stream_specifier), AV_OPT_TYPE_STRING, {.str = "auto"}, CHAR_MIN, CHAR_MAX, E },
{ "segment_format", "set container format used for the segments", OFFSET(format), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E },
{ "segment_list", "set the segment list filename", OFFSET(list), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E },
 
{ "segment_list_flags","set flags affecting segment list generation", OFFSET(list_flags), AV_OPT_TYPE_FLAGS, {.i64 = SEGMENT_LIST_FLAG_CACHE }, 0, UINT_MAX, E, "list_flags"},
{ "cache", "allow list caching", 0, AV_OPT_TYPE_CONST, {.i64 = SEGMENT_LIST_FLAG_CACHE }, INT_MIN, INT_MAX, E, "list_flags"},
{ "live", "enable live-friendly list generation (useful for HLS)", 0, AV_OPT_TYPE_CONST, {.i64 = SEGMENT_LIST_FLAG_LIVE }, INT_MIN, INT_MAX, E, "list_flags"},
 
{ "segment_list_size", "set the maximum number of playlist entries", OFFSET(list_size), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, E },
 
{ "segment_list_type", "set the segment list type", OFFSET(list_type), AV_OPT_TYPE_INT, {.i64 = LIST_TYPE_UNDEFINED}, -1, LIST_TYPE_NB-1, E, "list_type" },
{ "flat", "flat format", 0, AV_OPT_TYPE_CONST, {.i64=LIST_TYPE_FLAT }, INT_MIN, INT_MAX, E, "list_type" },
{ "csv", "csv format", 0, AV_OPT_TYPE_CONST, {.i64=LIST_TYPE_CSV }, INT_MIN, INT_MAX, E, "list_type" },
{ "ext", "extended format", 0, AV_OPT_TYPE_CONST, {.i64=LIST_TYPE_EXT }, INT_MIN, INT_MAX, E, "list_type" },
{ "ffconcat", "ffconcat format", 0, AV_OPT_TYPE_CONST, {.i64=LIST_TYPE_FFCONCAT }, INT_MIN, INT_MAX, E, "list_type" },
{ "m3u8", "M3U8 format", 0, AV_OPT_TYPE_CONST, {.i64=LIST_TYPE_M3U8 }, INT_MIN, INT_MAX, E, "list_type" },
{ "hls", "Apple HTTP Live Streaming compatible", 0, AV_OPT_TYPE_CONST, {.i64=LIST_TYPE_M3U8 }, INT_MIN, INT_MAX, E, "list_type" },
 
{ "segment_time", "set segment duration", OFFSET(time_str),AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E },
{ "segment_time_delta","set approximation value used for the segment times", OFFSET(time_delta), AV_OPT_TYPE_DURATION, {.i64 = 0}, 0, 0, E },
{ "segment_times", "set segment split time points", OFFSET(times_str),AV_OPT_TYPE_STRING,{.str = NULL}, 0, 0, E },
{ "segment_frames", "set segment split frame numbers", OFFSET(frames_str),AV_OPT_TYPE_STRING,{.str = NULL}, 0, 0, E },
{ "segment_wrap", "set number after which the index wraps", OFFSET(segment_idx_wrap), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, E },
{ "segment_start_number", "set the sequence number of the first segment", OFFSET(segment_idx), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, E },
 
{ "individual_header_trailer", "write header/trailer to each segment", OFFSET(individual_header_trailer), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, E },
{ "write_header_trailer", "write a header to the first segment and a trailer to the last one", OFFSET(write_header_trailer), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, E },
{ "reset_timestamps", "reset timestamps at the begin of each segment", OFFSET(reset_timestamps), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, E },
{ "initial_offset", "set initial timestamp offset", OFFSET(initial_offset), AV_OPT_TYPE_DURATION, {.i64 = 0}, -INT64_MAX, INT64_MAX, E },
{ NULL },
};
 
static const AVClass seg_class = {
.class_name = "segment muxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVOutputFormat ff_segment_muxer = {
.name = "segment",
.long_name = NULL_IF_CONFIG_SMALL("segment"),
.priv_data_size = sizeof(SegmentContext),
.flags = AVFMT_NOFILE|AVFMT_GLOBALHEADER,
.write_header = seg_write_header,
.write_packet = seg_write_packet,
.write_trailer = seg_write_trailer,
.priv_class = &seg_class,
};
 
static const AVClass sseg_class = {
.class_name = "stream_segment muxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVOutputFormat ff_stream_segment_muxer = {
.name = "stream_segment,ssegment",
.long_name = NULL_IF_CONFIG_SMALL("streaming segment muxer"),
.priv_data_size = sizeof(SegmentContext),
.flags = AVFMT_NOFILE,
.write_header = seg_write_header,
.write_packet = seg_write_packet,
.write_trailer = seg_write_trailer,
.priv_class = &sseg_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/sierravmd.c
0,0 → 1,318
/*
* Sierra VMD Format Demuxer
* Copyright (c) 2004 The ffmpeg Project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Sierra VMD file demuxer
* by Vladimir "VAG" Gneushev (vagsoft at mail.ru)
* for more information on the Sierra VMD file format, visit:
* http://www.pcisys.net/~melanson/codecs/
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
#include "avio_internal.h"
 
#define VMD_HEADER_SIZE 0x0330
#define BYTES_PER_FRAME_RECORD 16
 
typedef struct {
int stream_index;
int64_t frame_offset;
unsigned int frame_size;
int64_t pts;
int keyframe;
unsigned char frame_record[BYTES_PER_FRAME_RECORD];
} vmd_frame;
 
typedef struct VmdDemuxContext {
int video_stream_index;
int audio_stream_index;
 
unsigned int frame_count;
unsigned int frames_per_block;
vmd_frame *frame_table;
unsigned int current_frame;
int is_indeo3;
 
int sample_rate;
int64_t audio_sample_counter;
int skiphdr;
 
unsigned char vmd_header[VMD_HEADER_SIZE];
} VmdDemuxContext;
 
static int vmd_probe(AVProbeData *p)
{
int w, h, sample_rate;
if (p->buf_size < 806)
return 0;
/* check if the first 2 bytes of the file contain the appropriate size
* of a VMD header chunk */
if (AV_RL16(&p->buf[0]) != VMD_HEADER_SIZE - 2)
return 0;
w = AV_RL16(&p->buf[12]);
h = AV_RL16(&p->buf[14]);
sample_rate = AV_RL16(&p->buf[804]);
if ((!w || w > 2048 || !h || h > 2048) &&
sample_rate != 22050)
return 0;
 
/* only return half certainty since this check is a bit sketchy */
return AVPROBE_SCORE_EXTENSION;
}
 
static int vmd_read_header(AVFormatContext *s)
{
VmdDemuxContext *vmd = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *st = NULL, *vst = NULL;
unsigned int toc_offset;
unsigned char *raw_frame_table;
int raw_frame_table_size;
int64_t current_offset;
int i, j, ret;
int width, height;
unsigned int total_frames;
int64_t current_audio_pts = 0;
unsigned char chunk[BYTES_PER_FRAME_RECORD];
int num, den;
int sound_buffers;
 
/* fetch the main header, including the 2 header length bytes */
avio_seek(pb, 0, SEEK_SET);
if (avio_read(pb, vmd->vmd_header, VMD_HEADER_SIZE) != VMD_HEADER_SIZE)
return AVERROR(EIO);
 
width = AV_RL16(&vmd->vmd_header[12]);
height = AV_RL16(&vmd->vmd_header[14]);
if (width && height) {
if(vmd->vmd_header[24] == 'i' && vmd->vmd_header[25] == 'v' && vmd->vmd_header[26] == '3') {
vmd->is_indeo3 = 1;
} else {
vmd->is_indeo3 = 0;
}
/* start up the decoders */
vst = avformat_new_stream(s, NULL);
if (!vst)
return AVERROR(ENOMEM);
avpriv_set_pts_info(vst, 33, 1, 10);
vmd->video_stream_index = vst->index;
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codec->codec_id = vmd->is_indeo3 ? AV_CODEC_ID_INDEO3 : AV_CODEC_ID_VMDVIDEO;
vst->codec->codec_tag = 0; /* no fourcc */
vst->codec->width = width;
vst->codec->height = height;
if(vmd->is_indeo3 && vst->codec->width > 320){
vst->codec->width >>= 1;
vst->codec->height >>= 1;
}
if (ff_alloc_extradata(vst->codec, VMD_HEADER_SIZE))
return AVERROR(ENOMEM);
memcpy(vst->codec->extradata, vmd->vmd_header, VMD_HEADER_SIZE);
}
 
/* if sample rate is 0, assume no audio */
vmd->sample_rate = AV_RL16(&vmd->vmd_header[804]);
if (vmd->sample_rate) {
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
vmd->audio_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_VMDAUDIO;
st->codec->codec_tag = 0; /* no fourcc */
if (vmd->vmd_header[811] & 0x80) {
st->codec->channels = 2;
st->codec->channel_layout = AV_CH_LAYOUT_STEREO;
} else {
st->codec->channels = 1;
st->codec->channel_layout = AV_CH_LAYOUT_MONO;
}
st->codec->sample_rate = vmd->sample_rate;
st->codec->block_align = AV_RL16(&vmd->vmd_header[806]);
if (st->codec->block_align & 0x8000) {
st->codec->bits_per_coded_sample = 16;
st->codec->block_align = -(st->codec->block_align - 0x10000);
} else {
st->codec->bits_per_coded_sample = 8;
}
st->codec->bit_rate = st->codec->sample_rate *
st->codec->bits_per_coded_sample * st->codec->channels;
 
/* calculate pts */
num = st->codec->block_align;
den = st->codec->sample_rate * st->codec->channels;
av_reduce(&num, &den, num, den, (1UL<<31)-1);
if (vst)
avpriv_set_pts_info(vst, 33, num, den);
avpriv_set_pts_info(st, 33, num, den);
}
 
toc_offset = AV_RL32(&vmd->vmd_header[812]);
vmd->frame_count = AV_RL16(&vmd->vmd_header[6]);
vmd->frames_per_block = AV_RL16(&vmd->vmd_header[18]);
avio_seek(pb, toc_offset, SEEK_SET);
 
raw_frame_table = NULL;
vmd->frame_table = NULL;
sound_buffers = AV_RL16(&vmd->vmd_header[808]);
raw_frame_table_size = vmd->frame_count * 6;
if(vmd->frame_count * vmd->frames_per_block >= UINT_MAX / sizeof(vmd_frame) - sound_buffers){
av_log(s, AV_LOG_ERROR, "vmd->frame_count * vmd->frames_per_block too large\n");
return -1;
}
raw_frame_table = av_malloc(raw_frame_table_size);
vmd->frame_table = av_malloc((vmd->frame_count * vmd->frames_per_block + sound_buffers) * sizeof(vmd_frame));
if (!raw_frame_table || !vmd->frame_table) {
ret = AVERROR(ENOMEM);
goto error;
}
if (avio_read(pb, raw_frame_table, raw_frame_table_size) !=
raw_frame_table_size) {
ret = AVERROR(EIO);
goto error;
}
 
total_frames = 0;
for (i = 0; i < vmd->frame_count; i++) {
 
current_offset = AV_RL32(&raw_frame_table[6 * i + 2]);
 
/* handle each entry in index block */
for (j = 0; j < vmd->frames_per_block; j++) {
int type;
uint32_t size;
 
avio_read(pb, chunk, BYTES_PER_FRAME_RECORD);
type = chunk[0];
size = AV_RL32(&chunk[2]);
if (size > INT_MAX / 2) {
av_log(s, AV_LOG_ERROR, "Invalid frame size\n");
ret = AVERROR_INVALIDDATA;
goto error;
}
if(!size && type != 1)
continue;
switch(type) {
case 1: /* Audio Chunk */
if (!st) break;
/* first audio chunk contains several audio buffers */
vmd->frame_table[total_frames].frame_offset = current_offset;
vmd->frame_table[total_frames].stream_index = vmd->audio_stream_index;
vmd->frame_table[total_frames].frame_size = size;
memcpy(vmd->frame_table[total_frames].frame_record, chunk, BYTES_PER_FRAME_RECORD);
vmd->frame_table[total_frames].pts = current_audio_pts;
total_frames++;
if(!current_audio_pts)
current_audio_pts += sound_buffers - 1;
else
current_audio_pts++;
break;
case 2: /* Video Chunk */
vmd->frame_table[total_frames].frame_offset = current_offset;
vmd->frame_table[total_frames].stream_index = vmd->video_stream_index;
vmd->frame_table[total_frames].frame_size = size;
memcpy(vmd->frame_table[total_frames].frame_record, chunk, BYTES_PER_FRAME_RECORD);
vmd->frame_table[total_frames].pts = i;
total_frames++;
break;
}
current_offset += size;
}
}
 
av_free(raw_frame_table);
 
vmd->current_frame = 0;
vmd->frame_count = total_frames;
 
return 0;
 
error:
av_freep(&raw_frame_table);
av_freep(&vmd->frame_table);
return ret;
}
 
static int vmd_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
VmdDemuxContext *vmd = s->priv_data;
AVIOContext *pb = s->pb;
int ret = 0;
vmd_frame *frame;
 
if (vmd->current_frame >= vmd->frame_count)
return AVERROR_EOF;
 
frame = &vmd->frame_table[vmd->current_frame];
/* position the stream (will probably be there already) */
avio_seek(pb, frame->frame_offset, SEEK_SET);
 
if(ffio_limit(pb, frame->frame_size) != frame->frame_size)
return AVERROR(EIO);
if (av_new_packet(pkt, frame->frame_size + BYTES_PER_FRAME_RECORD))
return AVERROR(ENOMEM);
pkt->pos= avio_tell(pb);
memcpy(pkt->data, frame->frame_record, BYTES_PER_FRAME_RECORD);
if(vmd->is_indeo3 && frame->frame_record[0] == 0x02)
ret = avio_read(pb, pkt->data, frame->frame_size);
else
ret = avio_read(pb, pkt->data + BYTES_PER_FRAME_RECORD,
frame->frame_size);
 
if (ret != frame->frame_size) {
av_free_packet(pkt);
ret = AVERROR(EIO);
}
pkt->stream_index = frame->stream_index;
pkt->pts = frame->pts;
av_log(s, AV_LOG_DEBUG, " dispatching %s frame with %d bytes and pts %"PRId64"\n",
(frame->frame_record[0] == 0x02) ? "video" : "audio",
frame->frame_size + BYTES_PER_FRAME_RECORD,
pkt->pts);
 
vmd->current_frame++;
 
return ret;
}
 
static int vmd_read_close(AVFormatContext *s)
{
VmdDemuxContext *vmd = s->priv_data;
 
av_freep(&vmd->frame_table);
 
return 0;
}
 
AVInputFormat ff_vmd_demuxer = {
.name = "vmd",
.long_name = NULL_IF_CONFIG_SMALL("Sierra VMD"),
.priv_data_size = sizeof(VmdDemuxContext),
.read_probe = vmd_probe,
.read_header = vmd_read_header,
.read_packet = vmd_read_packet,
.read_close = vmd_read_close,
};
/contrib/sdk/sources/ffmpeg/libavformat/siff.c
0,0 → 1,252
/*
* Beam Software SIFF demuxer
* Copyright (c) 2007 Konstantin Shishkov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
#include "avio_internal.h"
 
enum SIFFTags{
TAG_SIFF = MKTAG('S', 'I', 'F', 'F'),
TAG_BODY = MKTAG('B', 'O', 'D', 'Y'),
TAG_VBHD = MKTAG('V', 'B', 'H', 'D'),
TAG_SHDR = MKTAG('S', 'H', 'D', 'R'),
TAG_VBV1 = MKTAG('V', 'B', 'V', '1'),
TAG_SOUN = MKTAG('S', 'O', 'U', 'N'),
};
 
enum VBFlags{
VB_HAS_GMC = 0x01,
VB_HAS_AUDIO = 0x04,
VB_HAS_VIDEO = 0x08,
VB_HAS_PALETTE = 0x10,
VB_HAS_LENGTH = 0x20
};
 
typedef struct SIFFContext{
int frames;
int cur_frame;
int rate;
int bits;
int block_align;
 
int has_video;
int has_audio;
 
int curstrm;
int pktsize;
int gmcsize;
int sndsize;
 
int flags;
uint8_t gmc[4];
}SIFFContext;
 
static int siff_probe(AVProbeData *p)
{
uint32_t tag = AV_RL32(p->buf + 8);
/* check file header */
if (AV_RL32(p->buf) != TAG_SIFF ||
(tag != TAG_VBV1 && tag != TAG_SOUN))
return 0;
return AVPROBE_SCORE_MAX;
}
 
static int create_audio_stream(AVFormatContext *s, SIFFContext *c)
{
AVStream *ast;
ast = avformat_new_stream(s, NULL);
if (!ast)
return AVERROR(ENOMEM);
ast->codec->codec_type = AVMEDIA_TYPE_AUDIO;
ast->codec->codec_id = AV_CODEC_ID_PCM_U8;
ast->codec->channels = 1;
ast->codec->channel_layout = AV_CH_LAYOUT_MONO;
ast->codec->bits_per_coded_sample = 8;
ast->codec->sample_rate = c->rate;
avpriv_set_pts_info(ast, 16, 1, c->rate);
ast->start_time = 0;
return 0;
}
 
static int siff_parse_vbv1(AVFormatContext *s, SIFFContext *c, AVIOContext *pb)
{
AVStream *st;
int width, height;
 
if (avio_rl32(pb) != TAG_VBHD){
av_log(s, AV_LOG_ERROR, "Header chunk is missing\n");
return AVERROR_INVALIDDATA;
}
if(avio_rb32(pb) != 32){
av_log(s, AV_LOG_ERROR, "Header chunk size is incorrect\n");
return AVERROR_INVALIDDATA;
}
if(avio_rl16(pb) != 1){
av_log(s, AV_LOG_ERROR, "Incorrect header version\n");
return AVERROR_INVALIDDATA;
}
width = avio_rl16(pb);
height = avio_rl16(pb);
avio_skip(pb, 4);
c->frames = avio_rl16(pb);
if(!c->frames){
av_log(s, AV_LOG_ERROR, "File contains no frames ???\n");
return AVERROR_INVALIDDATA;
}
c->bits = avio_rl16(pb);
c->rate = avio_rl16(pb);
c->block_align = c->rate * (c->bits >> 3);
 
avio_skip(pb, 16); //zeroes
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_VB;
st->codec->codec_tag = MKTAG('V', 'B', 'V', '1');
st->codec->width = width;
st->codec->height = height;
st->codec->pix_fmt = AV_PIX_FMT_PAL8;
st->nb_frames =
st->duration = c->frames;
avpriv_set_pts_info(st, 16, 1, 12);
 
c->cur_frame = 0;
c->has_video = 1;
c->has_audio = !!c->rate;
c->curstrm = -1;
if (c->has_audio && create_audio_stream(s, c) < 0)
return AVERROR(ENOMEM);
return 0;
}
 
static int siff_parse_soun(AVFormatContext *s, SIFFContext *c, AVIOContext *pb)
{
if (avio_rl32(pb) != TAG_SHDR){
av_log(s, AV_LOG_ERROR, "Header chunk is missing\n");
return AVERROR_INVALIDDATA;
}
if(avio_rb32(pb) != 8){
av_log(s, AV_LOG_ERROR, "Header chunk size is incorrect\n");
return AVERROR_INVALIDDATA;
}
avio_skip(pb, 4); //unknown value
c->rate = avio_rl16(pb);
c->bits = avio_rl16(pb);
c->block_align = c->rate * (c->bits >> 3);
return create_audio_stream(s, c);
}
 
static int siff_read_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
SIFFContext *c = s->priv_data;
uint32_t tag;
int ret;
 
if (avio_rl32(pb) != TAG_SIFF)
return AVERROR_INVALIDDATA;
avio_skip(pb, 4); //ignore size
tag = avio_rl32(pb);
 
if (tag != TAG_VBV1 && tag != TAG_SOUN){
av_log(s, AV_LOG_ERROR, "Not a VBV file\n");
return AVERROR_INVALIDDATA;
}
 
if (tag == TAG_VBV1 && (ret = siff_parse_vbv1(s, c, pb)) < 0)
return ret;
if (tag == TAG_SOUN && (ret = siff_parse_soun(s, c, pb)) < 0)
return ret;
if (avio_rl32(pb) != MKTAG('B', 'O', 'D', 'Y')){
av_log(s, AV_LOG_ERROR, "'BODY' chunk is missing\n");
return AVERROR_INVALIDDATA;
}
avio_skip(pb, 4); //ignore size
 
return 0;
}
 
static int siff_read_packet(AVFormatContext *s, AVPacket *pkt)
{
SIFFContext *c = s->priv_data;
int size;
 
if (c->has_video){
if (c->cur_frame >= c->frames)
return AVERROR_EOF;
if (c->curstrm == -1){
c->pktsize = avio_rl32(s->pb) - 4;
c->flags = avio_rl16(s->pb);
c->gmcsize = (c->flags & VB_HAS_GMC) ? 4 : 0;
if (c->gmcsize)
avio_read(s->pb, c->gmc, c->gmcsize);
c->sndsize = (c->flags & VB_HAS_AUDIO) ? avio_rl32(s->pb): 0;
c->curstrm = !!(c->flags & VB_HAS_AUDIO);
}
 
if (!c->curstrm){
size = c->pktsize - c->sndsize - c->gmcsize - 2;
size = ffio_limit(s->pb, size);
if(size < 0 || c->pktsize < c->sndsize)
return AVERROR_INVALIDDATA;
if (av_new_packet(pkt, size + c->gmcsize + 2) < 0)
return AVERROR(ENOMEM);
AV_WL16(pkt->data, c->flags);
if (c->gmcsize)
memcpy(pkt->data + 2, c->gmc, c->gmcsize);
avio_read(s->pb, pkt->data + 2 + c->gmcsize, size);
pkt->stream_index = 0;
c->curstrm = -1;
}else{
if ((size = av_get_packet(s->pb, pkt, c->sndsize - 4)) < 0)
return AVERROR(EIO);
pkt->stream_index = 1;
pkt->duration = size;
c->curstrm = 0;
}
if(!c->cur_frame || c->curstrm)
pkt->flags |= AV_PKT_FLAG_KEY;
if (c->curstrm == -1)
c->cur_frame++;
}else{
size = av_get_packet(s->pb, pkt, c->block_align);
if(!size)
return AVERROR_EOF;
if(size < 0)
return AVERROR(EIO);
pkt->duration = size;
}
return pkt->size;
}
 
AVInputFormat ff_siff_demuxer = {
.name = "siff",
.long_name = NULL_IF_CONFIG_SMALL("Beam Software SIFF"),
.priv_data_size = sizeof(SIFFContext),
.read_probe = siff_probe,
.read_header = siff_read_header,
.read_packet = siff_read_packet,
.extensions = "vb,son",
};
/contrib/sdk/sources/ffmpeg/libavformat/smacker.c
0,0 → 1,389
/*
* Smacker demuxer
* Copyright (c) 2006 Konstantin Shishkov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/*
* Based on http://wiki.multimedia.cx/index.php?title=Smacker
*/
 
#include "libavutil/bswap.h"
#include "libavutil/channel_layout.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
 
#define SMACKER_PAL 0x01
#define SMACKER_FLAG_RING_FRAME 0x01
 
enum SAudFlags {
SMK_AUD_PACKED = 0x80,
SMK_AUD_16BITS = 0x20,
SMK_AUD_STEREO = 0x10,
SMK_AUD_BINKAUD = 0x08,
SMK_AUD_USEDCT = 0x04
};
 
typedef struct SmackerContext {
/* Smacker file header */
uint32_t magic;
uint32_t width, height;
uint32_t frames;
int pts_inc;
uint32_t flags;
uint32_t audio[7];
uint32_t treesize;
uint32_t mmap_size, mclr_size, full_size, type_size;
uint8_t aflags[7];
uint32_t rates[7];
uint32_t pad;
/* frame info */
uint32_t *frm_size;
uint8_t *frm_flags;
/* internal variables */
int cur_frame;
int is_ver4;
int64_t cur_pts;
/* current frame for demuxing */
uint8_t pal[768];
int indexes[7];
int videoindex;
uint8_t *bufs[7];
int buf_sizes[7];
int stream_id[7];
int curstream;
int64_t nextpos;
int64_t aud_pts[7];
} SmackerContext;
 
typedef struct SmackerFrame {
int64_t pts;
int stream;
} SmackerFrame;
 
/* palette used in Smacker */
static const uint8_t smk_pal[64] = {
0x00, 0x04, 0x08, 0x0C, 0x10, 0x14, 0x18, 0x1C,
0x20, 0x24, 0x28, 0x2C, 0x30, 0x34, 0x38, 0x3C,
0x41, 0x45, 0x49, 0x4D, 0x51, 0x55, 0x59, 0x5D,
0x61, 0x65, 0x69, 0x6D, 0x71, 0x75, 0x79, 0x7D,
0x82, 0x86, 0x8A, 0x8E, 0x92, 0x96, 0x9A, 0x9E,
0xA2, 0xA6, 0xAA, 0xAE, 0xB2, 0xB6, 0xBA, 0xBE,
0xC3, 0xC7, 0xCB, 0xCF, 0xD3, 0xD7, 0xDB, 0xDF,
0xE3, 0xE7, 0xEB, 0xEF, 0xF3, 0xF7, 0xFB, 0xFF
};
 
 
static int smacker_probe(AVProbeData *p)
{
if(p->buf[0] == 'S' && p->buf[1] == 'M' && p->buf[2] == 'K'
&& (p->buf[3] == '2' || p->buf[3] == '4'))
return AVPROBE_SCORE_MAX;
else
return 0;
}
 
static int smacker_read_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
SmackerContext *smk = s->priv_data;
AVStream *st, *ast[7];
int i, ret;
int tbase;
 
/* read and check header */
smk->magic = avio_rl32(pb);
if (smk->magic != MKTAG('S', 'M', 'K', '2') && smk->magic != MKTAG('S', 'M', 'K', '4'))
return AVERROR_INVALIDDATA;
smk->width = avio_rl32(pb);
smk->height = avio_rl32(pb);
smk->frames = avio_rl32(pb);
smk->pts_inc = (int32_t)avio_rl32(pb);
smk->flags = avio_rl32(pb);
if(smk->flags & SMACKER_FLAG_RING_FRAME)
smk->frames++;
for(i = 0; i < 7; i++)
smk->audio[i] = avio_rl32(pb);
smk->treesize = avio_rl32(pb);
 
if(smk->treesize >= UINT_MAX/4){ // smk->treesize + 16 must not overflow (this check is probably redundant)
av_log(s, AV_LOG_ERROR, "treesize too large\n");
return AVERROR_INVALIDDATA;
}
 
//FIXME remove extradata "rebuilding"
smk->mmap_size = avio_rl32(pb);
smk->mclr_size = avio_rl32(pb);
smk->full_size = avio_rl32(pb);
smk->type_size = avio_rl32(pb);
for(i = 0; i < 7; i++) {
smk->rates[i] = avio_rl24(pb);
smk->aflags[i] = avio_r8(pb);
}
smk->pad = avio_rl32(pb);
/* setup data */
if(smk->frames > 0xFFFFFF) {
av_log(s, AV_LOG_ERROR, "Too many frames: %i\n", smk->frames);
return AVERROR_INVALIDDATA;
}
smk->frm_size = av_malloc_array(smk->frames, sizeof(*smk->frm_size));
smk->frm_flags = av_malloc(smk->frames);
if (!smk->frm_size || !smk->frm_flags) {
av_freep(&smk->frm_size);
av_freep(&smk->frm_flags);
return AVERROR(ENOMEM);
}
 
smk->is_ver4 = (smk->magic != MKTAG('S', 'M', 'K', '2'));
 
/* read frame info */
for(i = 0; i < smk->frames; i++) {
smk->frm_size[i] = avio_rl32(pb);
}
for(i = 0; i < smk->frames; i++) {
smk->frm_flags[i] = avio_r8(pb);
}
 
/* init video codec */
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
smk->videoindex = st->index;
st->codec->width = smk->width;
st->codec->height = smk->height;
st->codec->pix_fmt = AV_PIX_FMT_PAL8;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_SMACKVIDEO;
st->codec->codec_tag = smk->magic;
/* Smacker uses 100000 as internal timebase */
if(smk->pts_inc < 0)
smk->pts_inc = -smk->pts_inc;
else
smk->pts_inc *= 100;
tbase = 100000;
av_reduce(&tbase, &smk->pts_inc, tbase, smk->pts_inc, (1UL<<31)-1);
avpriv_set_pts_info(st, 33, smk->pts_inc, tbase);
st->duration = smk->frames;
/* handle possible audio streams */
for(i = 0; i < 7; i++) {
smk->indexes[i] = -1;
if (smk->rates[i]) {
ast[i] = avformat_new_stream(s, NULL);
if (!ast[i])
return AVERROR(ENOMEM);
smk->indexes[i] = ast[i]->index;
ast[i]->codec->codec_type = AVMEDIA_TYPE_AUDIO;
if (smk->aflags[i] & SMK_AUD_BINKAUD) {
ast[i]->codec->codec_id = AV_CODEC_ID_BINKAUDIO_RDFT;
} else if (smk->aflags[i] & SMK_AUD_USEDCT) {
ast[i]->codec->codec_id = AV_CODEC_ID_BINKAUDIO_DCT;
} else if (smk->aflags[i] & SMK_AUD_PACKED){
ast[i]->codec->codec_id = AV_CODEC_ID_SMACKAUDIO;
ast[i]->codec->codec_tag = MKTAG('S', 'M', 'K', 'A');
} else {
ast[i]->codec->codec_id = AV_CODEC_ID_PCM_U8;
}
if (smk->aflags[i] & SMK_AUD_STEREO) {
ast[i]->codec->channels = 2;
ast[i]->codec->channel_layout = AV_CH_LAYOUT_STEREO;
} else {
ast[i]->codec->channels = 1;
ast[i]->codec->channel_layout = AV_CH_LAYOUT_MONO;
}
ast[i]->codec->sample_rate = smk->rates[i];
ast[i]->codec->bits_per_coded_sample = (smk->aflags[i] & SMK_AUD_16BITS) ? 16 : 8;
if(ast[i]->codec->bits_per_coded_sample == 16 && ast[i]->codec->codec_id == AV_CODEC_ID_PCM_U8)
ast[i]->codec->codec_id = AV_CODEC_ID_PCM_S16LE;
avpriv_set_pts_info(ast[i], 64, 1, ast[i]->codec->sample_rate
* ast[i]->codec->channels * ast[i]->codec->bits_per_coded_sample / 8);
}
}
 
 
/* load trees to extradata, they will be unpacked by decoder */
if(ff_alloc_extradata(st->codec, smk->treesize + 16)){
av_log(s, AV_LOG_ERROR, "Cannot allocate %i bytes of extradata\n", smk->treesize + 16);
av_freep(&smk->frm_size);
av_freep(&smk->frm_flags);
return AVERROR(ENOMEM);
}
ret = avio_read(pb, st->codec->extradata + 16, st->codec->extradata_size - 16);
if(ret != st->codec->extradata_size - 16){
av_freep(&smk->frm_size);
av_freep(&smk->frm_flags);
return AVERROR(EIO);
}
((int32_t*)st->codec->extradata)[0] = av_le2ne32(smk->mmap_size);
((int32_t*)st->codec->extradata)[1] = av_le2ne32(smk->mclr_size);
((int32_t*)st->codec->extradata)[2] = av_le2ne32(smk->full_size);
((int32_t*)st->codec->extradata)[3] = av_le2ne32(smk->type_size);
 
smk->curstream = -1;
smk->nextpos = avio_tell(pb);
 
return 0;
}
 
 
static int smacker_read_packet(AVFormatContext *s, AVPacket *pkt)
{
SmackerContext *smk = s->priv_data;
int flags;
int ret;
int i;
int frame_size = 0;
int palchange = 0;
 
if (url_feof(s->pb) || smk->cur_frame >= smk->frames)
return AVERROR_EOF;
 
/* if we demuxed all streams, pass another frame */
if(smk->curstream < 0) {
avio_seek(s->pb, smk->nextpos, 0);
frame_size = smk->frm_size[smk->cur_frame] & (~3);
flags = smk->frm_flags[smk->cur_frame];
/* handle palette change event */
if(flags & SMACKER_PAL){
int size, sz, t, off, j, pos;
uint8_t *pal = smk->pal;
uint8_t oldpal[768];
 
memcpy(oldpal, pal, 768);
size = avio_r8(s->pb);
size = size * 4 - 1;
if(size + 1 > frame_size)
return AVERROR_INVALIDDATA;
frame_size -= size;
frame_size--;
sz = 0;
pos = avio_tell(s->pb) + size;
while(sz < 256){
t = avio_r8(s->pb);
if(t & 0x80){ /* skip palette entries */
sz += (t & 0x7F) + 1;
pal += ((t & 0x7F) + 1) * 3;
} else if(t & 0x40){ /* copy with offset */
off = avio_r8(s->pb);
j = (t & 0x3F) + 1;
if (off + j > 0x100) {
av_log(s, AV_LOG_ERROR,
"Invalid palette update, offset=%d length=%d extends beyond palette size\n",
off, j);
return AVERROR_INVALIDDATA;
}
off *= 3;
while(j-- && sz < 256) {
*pal++ = oldpal[off + 0];
*pal++ = oldpal[off + 1];
*pal++ = oldpal[off + 2];
sz++;
off += 3;
}
} else { /* new entries */
*pal++ = smk_pal[t];
*pal++ = smk_pal[avio_r8(s->pb) & 0x3F];
*pal++ = smk_pal[avio_r8(s->pb) & 0x3F];
sz++;
}
}
avio_seek(s->pb, pos, 0);
palchange |= 1;
}
flags >>= 1;
smk->curstream = -1;
/* if audio chunks are present, put them to stack and retrieve later */
for(i = 0; i < 7; i++) {
if(flags & 1) {
uint32_t size;
int err;
 
size = avio_rl32(s->pb) - 4;
if (!size || size + 4L > frame_size) {
av_log(s, AV_LOG_ERROR, "Invalid audio part size\n");
return AVERROR_INVALIDDATA;
}
frame_size -= size;
frame_size -= 4;
smk->curstream++;
if ((err = av_reallocp(&smk->bufs[smk->curstream], size)) < 0) {
smk->buf_sizes[smk->curstream] = 0;
return err;
}
smk->buf_sizes[smk->curstream] = size;
ret = avio_read(s->pb, smk->bufs[smk->curstream], size);
if(ret != size)
return AVERROR(EIO);
smk->stream_id[smk->curstream] = smk->indexes[i];
}
flags >>= 1;
}
if (frame_size < 0 || frame_size >= INT_MAX/2)
return AVERROR_INVALIDDATA;
if (av_new_packet(pkt, frame_size + 769))
return AVERROR(ENOMEM);
if(smk->frm_size[smk->cur_frame] & 1)
palchange |= 2;
pkt->data[0] = palchange;
memcpy(pkt->data + 1, smk->pal, 768);
ret = avio_read(s->pb, pkt->data + 769, frame_size);
if(ret != frame_size)
return AVERROR(EIO);
pkt->stream_index = smk->videoindex;
pkt->pts = smk->cur_frame;
pkt->size = ret + 769;
smk->cur_frame++;
smk->nextpos = avio_tell(s->pb);
} else {
if (smk->stream_id[smk->curstream] < 0 || !smk->bufs[smk->curstream])
return AVERROR_INVALIDDATA;
if (av_new_packet(pkt, smk->buf_sizes[smk->curstream]))
return AVERROR(ENOMEM);
memcpy(pkt->data, smk->bufs[smk->curstream], smk->buf_sizes[smk->curstream]);
pkt->size = smk->buf_sizes[smk->curstream];
pkt->stream_index = smk->stream_id[smk->curstream];
pkt->pts = smk->aud_pts[smk->curstream];
smk->aud_pts[smk->curstream] += AV_RL32(pkt->data);
smk->curstream--;
}
 
return 0;
}
 
static int smacker_read_close(AVFormatContext *s)
{
SmackerContext *smk = s->priv_data;
int i;
 
for(i = 0; i < 7; i++)
av_freep(&smk->bufs[i]);
av_freep(&smk->frm_size);
av_freep(&smk->frm_flags);
 
return 0;
}
 
AVInputFormat ff_smacker_demuxer = {
.name = "smk",
.long_name = NULL_IF_CONFIG_SMALL("Smacker"),
.priv_data_size = sizeof(SmackerContext),
.read_probe = smacker_probe,
.read_header = smacker_read_header,
.read_packet = smacker_read_packet,
.read_close = smacker_read_close,
};
/contrib/sdk/sources/ffmpeg/libavformat/smjpeg.c
0,0 → 1,40
/*
* SMJPEG common code
* Copyright (c) 2011-2012 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* SMJPEG common code
*/
 
#include "avformat.h"
#include "internal.h"
#include "smjpeg.h"
 
const AVCodecTag ff_codec_smjpeg_video_tags[] = {
{ AV_CODEC_ID_MJPEG, MKTAG('J', 'F', 'I', 'F') },
{ AV_CODEC_ID_NONE, 0 },
};
 
const AVCodecTag ff_codec_smjpeg_audio_tags[] = {
{ AV_CODEC_ID_ADPCM_IMA_SMJPEG, MKTAG('A', 'P', 'C', 'M') },
{ AV_CODEC_ID_PCM_S16LE, MKTAG('N', 'O', 'N', 'E') },
{ AV_CODEC_ID_NONE, 0 },
};
/contrib/sdk/sources/ffmpeg/libavformat/smjpeg.h
0,0 → 1,45
/*
* SMJPEG common code
* Copyright (c) 2011-2012 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* SMJPEG common code
*/
 
#ifndef AVFORMAT_SMJPEG_H
#define AVFORMAT_SMJPEG_H
 
#include "internal.h"
 
#define SMJPEG_MAGIC "\x0\xaSMJPEG"
 
#define SMJPEG_DONE MKTAG('D', 'O', 'N', 'E')
#define SMJPEG_HEND MKTAG('H', 'E', 'N', 'D')
#define SMJPEG_SND MKTAG('_', 'S', 'N', 'D')
#define SMJPEG_SNDD MKTAG('s', 'n', 'd', 'D')
#define SMJPEG_TXT MKTAG('_', 'T', 'X', 'T')
#define SMJPEG_VID MKTAG('_', 'V', 'I', 'D')
#define SMJPEG_VIDD MKTAG('v', 'i', 'd', 'D')
 
extern const AVCodecTag ff_codec_smjpeg_video_tags[];
extern const AVCodecTag ff_codec_smjpeg_audio_tags[];
 
#endif /* AVFORMAT_SMJPEG_H */
/contrib/sdk/sources/ffmpeg/libavformat/smjpegdec.c
0,0 → 1,183
/*
* SMJPEG demuxer
* Copyright (c) 2011 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* This is a demuxer for Loki SDL Motion JPEG files
*/
 
#include "avformat.h"
#include "internal.h"
#include "riff.h"
#include "smjpeg.h"
 
typedef struct SMJPEGContext {
int audio_stream_index;
int video_stream_index;
} SMJPEGContext;
 
static int smjpeg_probe(AVProbeData *p)
{
if (!memcmp(p->buf, SMJPEG_MAGIC, 8))
return AVPROBE_SCORE_MAX;
return 0;
}
 
static int smjpeg_read_header(AVFormatContext *s)
{
SMJPEGContext *sc = s->priv_data;
AVStream *ast = NULL, *vst = NULL;
AVIOContext *pb = s->pb;
uint32_t version, htype, hlength, duration;
char *comment;
 
avio_skip(pb, 8); // magic
version = avio_rb32(pb);
if (version)
avpriv_request_sample(s, "Unknown version %d", version);
 
duration = avio_rb32(pb); // in msec
 
while (!url_feof(pb)) {
htype = avio_rl32(pb);
switch (htype) {
case SMJPEG_TXT:
hlength = avio_rb32(pb);
if (!hlength || hlength > 512)
return AVERROR_INVALIDDATA;
comment = av_malloc(hlength + 1);
if (!comment)
return AVERROR(ENOMEM);
if (avio_read(pb, comment, hlength) != hlength) {
av_freep(&comment);
av_log(s, AV_LOG_ERROR, "error when reading comment\n");
return AVERROR_INVALIDDATA;
}
comment[hlength] = 0;
av_dict_set(&s->metadata, "comment", comment,
AV_DICT_DONT_STRDUP_VAL);
break;
case SMJPEG_SND:
if (ast) {
avpriv_request_sample(s, "Multiple audio streams");
return AVERROR_PATCHWELCOME;
}
hlength = avio_rb32(pb);
if (hlength < 8)
return AVERROR_INVALIDDATA;
ast = avformat_new_stream(s, 0);
if (!ast)
return AVERROR(ENOMEM);
ast->codec->codec_type = AVMEDIA_TYPE_AUDIO;
ast->codec->sample_rate = avio_rb16(pb);
ast->codec->bits_per_coded_sample = avio_r8(pb);
ast->codec->channels = avio_r8(pb);
ast->codec->codec_tag = avio_rl32(pb);
ast->codec->codec_id = ff_codec_get_id(ff_codec_smjpeg_audio_tags,
ast->codec->codec_tag);
ast->duration = duration;
sc->audio_stream_index = ast->index;
avpriv_set_pts_info(ast, 32, 1, 1000);
avio_skip(pb, hlength - 8);
break;
case SMJPEG_VID:
if (vst) {
avpriv_request_sample(s, "Multiple video streams");
return AVERROR_INVALIDDATA;
}
hlength = avio_rb32(pb);
if (hlength < 12)
return AVERROR_INVALIDDATA;
vst = avformat_new_stream(s, 0);
if (!vst)
return AVERROR(ENOMEM);
vst->nb_frames = avio_rb32(pb);
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codec->width = avio_rb16(pb);
vst->codec->height = avio_rb16(pb);
vst->codec->codec_tag = avio_rl32(pb);
vst->codec->codec_id = ff_codec_get_id(ff_codec_smjpeg_video_tags,
vst->codec->codec_tag);
vst->duration = duration;
sc->video_stream_index = vst->index;
avpriv_set_pts_info(vst, 32, 1, 1000);
avio_skip(pb, hlength - 12);
break;
case SMJPEG_HEND:
return 0;
default:
av_log(s, AV_LOG_ERROR, "unknown header %x\n", htype);
return AVERROR_INVALIDDATA;
}
}
 
return AVERROR_EOF;
}
 
static int smjpeg_read_packet(AVFormatContext *s, AVPacket *pkt)
{
SMJPEGContext *sc = s->priv_data;
uint32_t dtype, size, timestamp;
int64_t pos;
int ret;
 
if (url_feof(s->pb))
return AVERROR_EOF;
pos = avio_tell(s->pb);
dtype = avio_rl32(s->pb);
switch (dtype) {
case SMJPEG_SNDD:
timestamp = avio_rb32(s->pb);
size = avio_rb32(s->pb);
ret = av_get_packet(s->pb, pkt, size);
pkt->stream_index = sc->audio_stream_index;
pkt->pts = timestamp;
pkt->pos = pos;
break;
case SMJPEG_VIDD:
timestamp = avio_rb32(s->pb);
size = avio_rb32(s->pb);
ret = av_get_packet(s->pb, pkt, size);
pkt->stream_index = sc->video_stream_index;
pkt->pts = timestamp;
pkt->pos = pos;
break;
case SMJPEG_DONE:
ret = AVERROR_EOF;
break;
default:
av_log(s, AV_LOG_ERROR, "unknown chunk %x\n", dtype);
ret = AVERROR_INVALIDDATA;
break;
}
return ret;
}
 
AVInputFormat ff_smjpeg_demuxer = {
.name = "smjpeg",
.long_name = NULL_IF_CONFIG_SMALL("Loki SDL MJPEG"),
.priv_data_size = sizeof(SMJPEGContext),
.read_probe = smjpeg_probe,
.read_header = smjpeg_read_header,
.read_packet = smjpeg_read_packet,
.extensions = "mjpg",
.flags = AVFMT_GENERIC_INDEX,
};
/contrib/sdk/sources/ffmpeg/libavformat/smjpegenc.c
0,0 → 1,146
/*
* SMJPEG muxer
* Copyright (c) 2012 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* This is a muxer for Loki SDL Motion JPEG files
*/
 
#include "avformat.h"
#include "internal.h"
#include "smjpeg.h"
 
typedef struct SMJPEGMuxContext {
uint32_t duration;
} SMJPEGMuxContext;
 
static int smjpeg_write_header(AVFormatContext *s)
{
AVDictionaryEntry *t = NULL;
AVIOContext *pb = s->pb;
int n, tag;
 
if (s->nb_streams > 2) {
av_log(s, AV_LOG_ERROR, "more than >2 streams are not supported\n");
return AVERROR(EINVAL);
}
avio_write(pb, SMJPEG_MAGIC, 8);
avio_wb32(pb, 0);
avio_wb32(pb, 0);
 
while ((t = av_dict_get(s->metadata, "", t, AV_DICT_IGNORE_SUFFIX))) {
avio_wl32(pb, SMJPEG_TXT);
avio_wb32(pb, strlen(t->key) + strlen(t->value) + 3);
avio_write(pb, t->key, strlen(t->key));
avio_write(pb, " = ", 3);
avio_write(pb, t->value, strlen(t->value));
}
 
for (n = 0; n < s->nb_streams; n++) {
AVStream *st = s->streams[n];
AVCodecContext *codec = st->codec;
if (codec->codec_type == AVMEDIA_TYPE_AUDIO) {
tag = ff_codec_get_tag(ff_codec_smjpeg_audio_tags, codec->codec_id);
if (!tag) {
av_log(s, AV_LOG_ERROR, "unsupported audio codec\n");
return AVERROR(EINVAL);
}
avio_wl32(pb, SMJPEG_SND);
avio_wb32(pb, 8);
avio_wb16(pb, codec->sample_rate);
avio_w8(pb, codec->bits_per_coded_sample);
avio_w8(pb, codec->channels);
avio_wl32(pb, tag);
avpriv_set_pts_info(st, 32, 1, 1000);
} else if (codec->codec_type == AVMEDIA_TYPE_VIDEO) {
tag = ff_codec_get_tag(ff_codec_smjpeg_video_tags, codec->codec_id);
if (!tag) {
av_log(s, AV_LOG_ERROR, "unsupported video codec\n");
return AVERROR(EINVAL);
}
avio_wl32(pb, SMJPEG_VID);
avio_wb32(pb, 12);
avio_wb32(pb, 0);
avio_wb16(pb, codec->width);
avio_wb16(pb, codec->height);
avio_wl32(pb, tag);
avpriv_set_pts_info(st, 32, 1, 1000);
}
}
 
avio_wl32(pb, SMJPEG_HEND);
avio_flush(pb);
 
return 0;
}
 
static int smjpeg_write_packet(AVFormatContext *s, AVPacket *pkt)
{
SMJPEGMuxContext *smc = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *st = s->streams[pkt->stream_index];
AVCodecContext *codec = st->codec;
 
if (codec->codec_type == AVMEDIA_TYPE_AUDIO)
avio_wl32(pb, SMJPEG_SNDD);
else if (codec->codec_type == AVMEDIA_TYPE_VIDEO)
avio_wl32(pb, SMJPEG_VIDD);
else
return 0;
 
avio_wb32(pb, pkt->pts);
avio_wb32(pb, pkt->size);
avio_write(pb, pkt->data, pkt->size);
 
smc->duration = FFMAX(smc->duration, pkt->pts + pkt->duration);
return 0;
}
 
static int smjpeg_write_trailer(AVFormatContext *s)
{
SMJPEGMuxContext *smc = s->priv_data;
AVIOContext *pb = s->pb;
int64_t currentpos;
 
if (pb->seekable) {
currentpos = avio_tell(pb);
avio_seek(pb, 12, SEEK_SET);
avio_wb32(pb, smc->duration);
avio_seek(pb, currentpos, SEEK_SET);
}
 
avio_wl32(pb, SMJPEG_DONE);
 
return 0;
}
 
AVOutputFormat ff_smjpeg_muxer = {
.name = "smjpeg",
.long_name = NULL_IF_CONFIG_SMALL("Loki SDL MJPEG"),
.priv_data_size = sizeof(SMJPEGMuxContext),
.audio_codec = AV_CODEC_ID_PCM_S16LE,
.video_codec = AV_CODEC_ID_MJPEG,
.write_header = smjpeg_write_header,
.write_packet = smjpeg_write_packet,
.write_trailer = smjpeg_write_trailer,
.flags = AVFMT_GLOBALHEADER | AVFMT_TS_NONSTRICT,
.codec_tag = (const AVCodecTag *const []){ ff_codec_smjpeg_video_tags, ff_codec_smjpeg_audio_tags, 0 },
};
/contrib/sdk/sources/ffmpeg/libavformat/smoothstreamingenc.c
0,0 → 1,643
/*
* Live smooth streaming fragmenter
* Copyright (c) 2012 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "config.h"
#include <float.h>
#if HAVE_UNISTD_H
#include <unistd.h>
#endif
 
#include "avformat.h"
#include "internal.h"
#include "os_support.h"
#include "avc.h"
#include "url.h"
#include "isom.h"
 
#include "libavutil/opt.h"
#include "libavutil/avstring.h"
#include "libavutil/mathematics.h"
#include "libavutil/intreadwrite.h"
 
typedef struct {
char file[1024];
char infofile[1024];
int64_t start_time, duration;
int n;
int64_t start_pos, size;
} Fragment;
 
typedef struct {
AVFormatContext *ctx;
int ctx_inited;
char dirname[1024];
uint8_t iobuf[32768];
URLContext *out; // Current output stream where all output is written
URLContext *out2; // Auxiliary output stream where all output is also written
URLContext *tail_out; // The actual main output stream, if we're currently seeked back to write elsewhere
int64_t tail_pos, cur_pos, cur_start_pos;
int packets_written;
const char *stream_type_tag;
int nb_fragments, fragments_size, fragment_index;
Fragment **fragments;
 
const char *fourcc;
char *private_str;
int packet_size;
int audio_tag;
} OutputStream;
 
typedef struct {
const AVClass *class; /* Class for private options. */
int window_size;
int extra_window_size;
int lookahead_count;
int min_frag_duration;
int remove_at_exit;
OutputStream *streams;
int has_video, has_audio;
int nb_fragments;
} SmoothStreamingContext;
 
static int ism_write(void *opaque, uint8_t *buf, int buf_size)
{
OutputStream *os = opaque;
if (os->out)
ffurl_write(os->out, buf, buf_size);
if (os->out2)
ffurl_write(os->out2, buf, buf_size);
os->cur_pos += buf_size;
if (os->cur_pos >= os->tail_pos)
os->tail_pos = os->cur_pos;
return buf_size;
}
 
static int64_t ism_seek(void *opaque, int64_t offset, int whence)
{
OutputStream *os = opaque;
int i;
if (whence != SEEK_SET)
return AVERROR(ENOSYS);
if (os->tail_out) {
if (os->out) {
ffurl_close(os->out);
}
if (os->out2) {
ffurl_close(os->out2);
}
os->out = os->tail_out;
os->out2 = NULL;
os->tail_out = NULL;
}
if (offset >= os->cur_start_pos) {
if (os->out)
ffurl_seek(os->out, offset - os->cur_start_pos, SEEK_SET);
os->cur_pos = offset;
return offset;
}
for (i = os->nb_fragments - 1; i >= 0; i--) {
Fragment *frag = os->fragments[i];
if (offset >= frag->start_pos && offset < frag->start_pos + frag->size) {
int ret;
AVDictionary *opts = NULL;
os->tail_out = os->out;
av_dict_set(&opts, "truncate", "0", 0);
ret = ffurl_open(&os->out, frag->file, AVIO_FLAG_READ_WRITE, &os->ctx->interrupt_callback, &opts);
av_dict_free(&opts);
if (ret < 0) {
os->out = os->tail_out;
os->tail_out = NULL;
return ret;
}
av_dict_set(&opts, "truncate", "0", 0);
ffurl_open(&os->out2, frag->infofile, AVIO_FLAG_READ_WRITE, &os->ctx->interrupt_callback, &opts);
av_dict_free(&opts);
ffurl_seek(os->out, offset - frag->start_pos, SEEK_SET);
if (os->out2)
ffurl_seek(os->out2, offset - frag->start_pos, SEEK_SET);
os->cur_pos = offset;
return offset;
}
}
return AVERROR(EIO);
}
 
static void get_private_data(OutputStream *os)
{
AVCodecContext *codec = os->ctx->streams[0]->codec;
uint8_t *ptr = codec->extradata;
int size = codec->extradata_size;
int i;
if (codec->codec_id == AV_CODEC_ID_H264) {
ff_avc_write_annexb_extradata(ptr, &ptr, &size);
if (!ptr)
ptr = codec->extradata;
}
if (!ptr)
return;
os->private_str = av_mallocz(2*size + 1);
for (i = 0; i < size; i++)
snprintf(&os->private_str[2*i], 3, "%02x", ptr[i]);
if (ptr != codec->extradata)
av_free(ptr);
}
 
static void ism_free(AVFormatContext *s)
{
SmoothStreamingContext *c = s->priv_data;
int i, j;
if (!c->streams)
return;
for (i = 0; i < s->nb_streams; i++) {
OutputStream *os = &c->streams[i];
ffurl_close(os->out);
ffurl_close(os->out2);
ffurl_close(os->tail_out);
os->out = os->out2 = os->tail_out = NULL;
if (os->ctx && os->ctx_inited)
av_write_trailer(os->ctx);
if (os->ctx && os->ctx->pb)
av_free(os->ctx->pb);
if (os->ctx)
avformat_free_context(os->ctx);
av_free(os->private_str);
for (j = 0; j < os->nb_fragments; j++)
av_free(os->fragments[j]);
av_free(os->fragments);
}
av_freep(&c->streams);
}
 
static void output_chunk_list(OutputStream *os, AVIOContext *out, int final, int skip, int window_size)
{
int removed = 0, i, start = 0;
if (os->nb_fragments <= 0)
return;
if (os->fragments[0]->n > 0)
removed = 1;
if (final)
skip = 0;
if (window_size)
start = FFMAX(os->nb_fragments - skip - window_size, 0);
for (i = start; i < os->nb_fragments - skip; i++) {
Fragment *frag = os->fragments[i];
if (!final || removed)
avio_printf(out, "<c t=\"%"PRIu64"\" d=\"%"PRIu64"\" />\n", frag->start_time, frag->duration);
else
avio_printf(out, "<c n=\"%d\" d=\"%"PRIu64"\" />\n", frag->n, frag->duration);
}
}
 
static int write_manifest(AVFormatContext *s, int final)
{
SmoothStreamingContext *c = s->priv_data;
AVIOContext *out;
char filename[1024], temp_filename[1024];
int ret, i, video_chunks = 0, audio_chunks = 0, video_streams = 0, audio_streams = 0;
int64_t duration = 0;
 
snprintf(filename, sizeof(filename), "%s/Manifest", s->filename);
snprintf(temp_filename, sizeof(temp_filename), "%s/Manifest.tmp", s->filename);
ret = avio_open2(&out, temp_filename, AVIO_FLAG_WRITE, &s->interrupt_callback, NULL);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "Unable to open %s for writing\n", temp_filename);
return ret;
}
avio_printf(out, "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n");
for (i = 0; i < s->nb_streams; i++) {
OutputStream *os = &c->streams[i];
if (os->nb_fragments > 0) {
Fragment *last = os->fragments[os->nb_fragments - 1];
duration = last->start_time + last->duration;
}
if (s->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
video_chunks = os->nb_fragments;
video_streams++;
} else {
audio_chunks = os->nb_fragments;
audio_streams++;
}
}
if (!final) {
duration = 0;
video_chunks = audio_chunks = 0;
}
if (c->window_size) {
video_chunks = FFMIN(video_chunks, c->window_size);
audio_chunks = FFMIN(audio_chunks, c->window_size);
}
avio_printf(out, "<SmoothStreamingMedia MajorVersion=\"2\" MinorVersion=\"0\" Duration=\"%"PRIu64"\"", duration);
if (!final)
avio_printf(out, " IsLive=\"true\" LookAheadFragmentCount=\"%d\" DVRWindowLength=\"0\"", c->lookahead_count);
avio_printf(out, ">\n");
if (c->has_video) {
int last = -1, index = 0;
avio_printf(out, "<StreamIndex Type=\"video\" QualityLevels=\"%d\" Chunks=\"%d\" Url=\"QualityLevels({bitrate})/Fragments(video={start time})\">\n", video_streams, video_chunks);
for (i = 0; i < s->nb_streams; i++) {
OutputStream *os = &c->streams[i];
if (s->streams[i]->codec->codec_type != AVMEDIA_TYPE_VIDEO)
continue;
last = i;
avio_printf(out, "<QualityLevel Index=\"%d\" Bitrate=\"%d\" FourCC=\"%s\" MaxWidth=\"%d\" MaxHeight=\"%d\" CodecPrivateData=\"%s\" />\n", index, s->streams[i]->codec->bit_rate, os->fourcc, s->streams[i]->codec->width, s->streams[i]->codec->height, os->private_str);
index++;
}
output_chunk_list(&c->streams[last], out, final, c->lookahead_count, c->window_size);
avio_printf(out, "</StreamIndex>\n");
}
if (c->has_audio) {
int last = -1, index = 0;
avio_printf(out, "<StreamIndex Type=\"audio\" QualityLevels=\"%d\" Chunks=\"%d\" Url=\"QualityLevels({bitrate})/Fragments(audio={start time})\">\n", audio_streams, audio_chunks);
for (i = 0; i < s->nb_streams; i++) {
OutputStream *os = &c->streams[i];
if (s->streams[i]->codec->codec_type != AVMEDIA_TYPE_AUDIO)
continue;
last = i;
avio_printf(out, "<QualityLevel Index=\"%d\" Bitrate=\"%d\" FourCC=\"%s\" SamplingRate=\"%d\" Channels=\"%d\" BitsPerSample=\"16\" PacketSize=\"%d\" AudioTag=\"%d\" CodecPrivateData=\"%s\" />\n", index, s->streams[i]->codec->bit_rate, os->fourcc, s->streams[i]->codec->sample_rate, s->streams[i]->codec->channels, os->packet_size, os->audio_tag, os->private_str);
index++;
}
output_chunk_list(&c->streams[last], out, final, c->lookahead_count, c->window_size);
avio_printf(out, "</StreamIndex>\n");
}
avio_printf(out, "</SmoothStreamingMedia>\n");
avio_flush(out);
avio_close(out);
rename(temp_filename, filename);
return 0;
}
 
static int ism_write_header(AVFormatContext *s)
{
SmoothStreamingContext *c = s->priv_data;
int ret = 0, i;
AVOutputFormat *oformat;
 
if (mkdir(s->filename, 0777) < 0) {
av_log(s, AV_LOG_ERROR, "mkdir failed\n");
ret = AVERROR(errno);
goto fail;
}
 
oformat = av_guess_format("ismv", NULL, NULL);
if (!oformat) {
ret = AVERROR_MUXER_NOT_FOUND;
goto fail;
}
 
c->streams = av_mallocz(sizeof(*c->streams) * s->nb_streams);
if (!c->streams) {
ret = AVERROR(ENOMEM);
goto fail;
}
 
for (i = 0; i < s->nb_streams; i++) {
OutputStream *os = &c->streams[i];
AVFormatContext *ctx;
AVStream *st;
AVDictionary *opts = NULL;
char buf[10];
 
if (!s->streams[i]->codec->bit_rate) {
av_log(s, AV_LOG_ERROR, "No bit rate set for stream %d\n", i);
ret = AVERROR(EINVAL);
goto fail;
}
snprintf(os->dirname, sizeof(os->dirname), "%s/QualityLevels(%d)", s->filename, s->streams[i]->codec->bit_rate);
if (mkdir(os->dirname, 0777) < 0) {
ret = AVERROR(errno);
av_log(s, AV_LOG_ERROR, "mkdir failed\n");
goto fail;
}
 
ctx = avformat_alloc_context();
if (!ctx) {
ret = AVERROR(ENOMEM);
goto fail;
}
os->ctx = ctx;
ctx->oformat = oformat;
ctx->interrupt_callback = s->interrupt_callback;
 
if (!(st = avformat_new_stream(ctx, NULL))) {
ret = AVERROR(ENOMEM);
goto fail;
}
avcodec_copy_context(st->codec, s->streams[i]->codec);
st->sample_aspect_ratio = s->streams[i]->sample_aspect_ratio;
 
ctx->pb = avio_alloc_context(os->iobuf, sizeof(os->iobuf), AVIO_FLAG_WRITE, os, NULL, ism_write, ism_seek);
if (!ctx->pb) {
ret = AVERROR(ENOMEM);
goto fail;
}
 
snprintf(buf, sizeof(buf), "%d", c->lookahead_count);
av_dict_set(&opts, "ism_lookahead", buf, 0);
av_dict_set(&opts, "movflags", "frag_custom", 0);
if ((ret = avformat_write_header(ctx, &opts)) < 0) {
goto fail;
}
os->ctx_inited = 1;
avio_flush(ctx->pb);
av_dict_free(&opts);
s->streams[i]->time_base = st->time_base;
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
c->has_video = 1;
os->stream_type_tag = "video";
if (st->codec->codec_id == AV_CODEC_ID_H264) {
os->fourcc = "H264";
} else if (st->codec->codec_id == AV_CODEC_ID_VC1) {
os->fourcc = "WVC1";
} else {
av_log(s, AV_LOG_ERROR, "Unsupported video codec\n");
ret = AVERROR(EINVAL);
goto fail;
}
} else {
c->has_audio = 1;
os->stream_type_tag = "audio";
if (st->codec->codec_id == AV_CODEC_ID_AAC) {
os->fourcc = "AACL";
os->audio_tag = 0xff;
} else if (st->codec->codec_id == AV_CODEC_ID_WMAPRO) {
os->fourcc = "WMAP";
os->audio_tag = 0x0162;
} else {
av_log(s, AV_LOG_ERROR, "Unsupported audio codec\n");
ret = AVERROR(EINVAL);
goto fail;
}
os->packet_size = st->codec->block_align ? st->codec->block_align : 4;
}
get_private_data(os);
}
 
if (!c->has_video && c->min_frag_duration <= 0) {
av_log(s, AV_LOG_WARNING, "no video stream and no min frag duration set\n");
ret = AVERROR(EINVAL);
}
ret = write_manifest(s, 0);
 
fail:
if (ret)
ism_free(s);
return ret;
}
 
static int parse_fragment(AVFormatContext *s, const char *filename, int64_t *start_ts, int64_t *duration, int64_t *moof_size, int64_t size)
{
AVIOContext *in;
int ret;
uint32_t len;
if ((ret = avio_open2(&in, filename, AVIO_FLAG_READ, &s->interrupt_callback, NULL)) < 0)
return ret;
ret = AVERROR(EIO);
*moof_size = avio_rb32(in);
if (*moof_size < 8 || *moof_size > size)
goto fail;
if (avio_rl32(in) != MKTAG('m','o','o','f'))
goto fail;
len = avio_rb32(in);
if (len > *moof_size)
goto fail;
if (avio_rl32(in) != MKTAG('m','f','h','d'))
goto fail;
avio_seek(in, len - 8, SEEK_CUR);
avio_rb32(in); /* traf size */
if (avio_rl32(in) != MKTAG('t','r','a','f'))
goto fail;
while (avio_tell(in) < *moof_size) {
uint32_t len = avio_rb32(in);
uint32_t tag = avio_rl32(in);
int64_t end = avio_tell(in) + len - 8;
if (len < 8 || len >= *moof_size)
goto fail;
if (tag == MKTAG('u','u','i','d')) {
static const uint8_t tfxd[] = {
0x6d, 0x1d, 0x9b, 0x05, 0x42, 0xd5, 0x44, 0xe6,
0x80, 0xe2, 0x14, 0x1d, 0xaf, 0xf7, 0x57, 0xb2
};
uint8_t uuid[16];
avio_read(in, uuid, 16);
if (!memcmp(uuid, tfxd, 16) && len >= 8 + 16 + 4 + 16) {
avio_seek(in, 4, SEEK_CUR);
*start_ts = avio_rb64(in);
*duration = avio_rb64(in);
ret = 0;
break;
}
}
avio_seek(in, end, SEEK_SET);
}
fail:
avio_close(in);
return ret;
}
 
static int add_fragment(OutputStream *os, const char *file, const char *infofile, int64_t start_time, int64_t duration, int64_t start_pos, int64_t size)
{
int err;
Fragment *frag;
if (os->nb_fragments >= os->fragments_size) {
os->fragments_size = (os->fragments_size + 1) * 2;
if ((err = av_reallocp(&os->fragments, sizeof(*os->fragments) *
os->fragments_size)) < 0) {
os->fragments_size = 0;
os->nb_fragments = 0;
return err;
}
}
frag = av_mallocz(sizeof(*frag));
if (!frag)
return AVERROR(ENOMEM);
av_strlcpy(frag->file, file, sizeof(frag->file));
av_strlcpy(frag->infofile, infofile, sizeof(frag->infofile));
frag->start_time = start_time;
frag->duration = duration;
frag->start_pos = start_pos;
frag->size = size;
frag->n = os->fragment_index;
os->fragments[os->nb_fragments++] = frag;
os->fragment_index++;
return 0;
}
 
static int copy_moof(AVFormatContext *s, const char* infile, const char *outfile, int64_t size)
{
AVIOContext *in, *out;
int ret = 0;
if ((ret = avio_open2(&in, infile, AVIO_FLAG_READ, &s->interrupt_callback, NULL)) < 0)
return ret;
if ((ret = avio_open2(&out, outfile, AVIO_FLAG_WRITE, &s->interrupt_callback, NULL)) < 0) {
avio_close(in);
return ret;
}
while (size > 0) {
uint8_t buf[8192];
int n = FFMIN(size, sizeof(buf));
n = avio_read(in, buf, n);
if (n <= 0) {
ret = AVERROR(EIO);
break;
}
avio_write(out, buf, n);
size -= n;
}
avio_flush(out);
avio_close(out);
avio_close(in);
return ret;
}
 
static int ism_flush(AVFormatContext *s, int final)
{
SmoothStreamingContext *c = s->priv_data;
int i, ret = 0;
 
for (i = 0; i < s->nb_streams; i++) {
OutputStream *os = &c->streams[i];
char filename[1024], target_filename[1024], header_filename[1024];
int64_t start_pos = os->tail_pos, size;
int64_t start_ts, duration, moof_size;
if (!os->packets_written)
continue;
 
snprintf(filename, sizeof(filename), "%s/temp", os->dirname);
ret = ffurl_open(&os->out, filename, AVIO_FLAG_WRITE, &s->interrupt_callback, NULL);
if (ret < 0)
break;
os->cur_start_pos = os->tail_pos;
av_write_frame(os->ctx, NULL);
avio_flush(os->ctx->pb);
os->packets_written = 0;
if (!os->out || os->tail_out)
return AVERROR(EIO);
 
ffurl_close(os->out);
os->out = NULL;
size = os->tail_pos - start_pos;
if ((ret = parse_fragment(s, filename, &start_ts, &duration, &moof_size, size)) < 0)
break;
snprintf(header_filename, sizeof(header_filename), "%s/FragmentInfo(%s=%"PRIu64")", os->dirname, os->stream_type_tag, start_ts);
snprintf(target_filename, sizeof(target_filename), "%s/Fragments(%s=%"PRIu64")", os->dirname, os->stream_type_tag, start_ts);
copy_moof(s, filename, header_filename, moof_size);
rename(filename, target_filename);
add_fragment(os, target_filename, header_filename, start_ts, duration, start_pos, size);
}
 
if (c->window_size || (final && c->remove_at_exit)) {
for (i = 0; i < s->nb_streams; i++) {
OutputStream *os = &c->streams[i];
int j;
int remove = os->nb_fragments - c->window_size - c->extra_window_size - c->lookahead_count;
if (final && c->remove_at_exit)
remove = os->nb_fragments;
if (remove > 0) {
for (j = 0; j < remove; j++) {
unlink(os->fragments[j]->file);
unlink(os->fragments[j]->infofile);
av_free(os->fragments[j]);
}
os->nb_fragments -= remove;
memmove(os->fragments, os->fragments + remove, os->nb_fragments * sizeof(*os->fragments));
}
if (final && c->remove_at_exit)
rmdir(os->dirname);
}
}
 
if (ret >= 0)
ret = write_manifest(s, final);
return ret;
}
 
static int ism_write_packet(AVFormatContext *s, AVPacket *pkt)
{
SmoothStreamingContext *c = s->priv_data;
AVStream *st = s->streams[pkt->stream_index];
OutputStream *os = &c->streams[pkt->stream_index];
int64_t end_dts = (c->nb_fragments + 1LL) * c->min_frag_duration;
int ret;
 
if (st->first_dts == AV_NOPTS_VALUE)
st->first_dts = pkt->dts;
 
if ((!c->has_video || st->codec->codec_type == AVMEDIA_TYPE_VIDEO) &&
av_compare_ts(pkt->dts - st->first_dts, st->time_base,
end_dts, AV_TIME_BASE_Q) >= 0 &&
pkt->flags & AV_PKT_FLAG_KEY && os->packets_written) {
 
if ((ret = ism_flush(s, 0)) < 0)
return ret;
c->nb_fragments++;
}
 
os->packets_written++;
return ff_write_chained(os->ctx, 0, pkt, s);
}
 
static int ism_write_trailer(AVFormatContext *s)
{
SmoothStreamingContext *c = s->priv_data;
ism_flush(s, 1);
 
if (c->remove_at_exit) {
char filename[1024];
snprintf(filename, sizeof(filename), "%s/Manifest", s->filename);
unlink(filename);
rmdir(s->filename);
}
 
ism_free(s);
return 0;
}
 
#define OFFSET(x) offsetof(SmoothStreamingContext, x)
#define E AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
{ "window_size", "number of fragments kept in the manifest", OFFSET(window_size), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, E },
{ "extra_window_size", "number of fragments kept outside of the manifest before removing from disk", OFFSET(extra_window_size), AV_OPT_TYPE_INT, { .i64 = 5 }, 0, INT_MAX, E },
{ "lookahead_count", "number of lookahead fragments", OFFSET(lookahead_count), AV_OPT_TYPE_INT, { .i64 = 2 }, 0, INT_MAX, E },
{ "min_frag_duration", "minimum fragment duration (in microseconds)", OFFSET(min_frag_duration), AV_OPT_TYPE_INT64, { .i64 = 5000000 }, 0, INT_MAX, E },
{ "remove_at_exit", "remove all fragments when finished", OFFSET(remove_at_exit), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, E },
{ NULL },
};
 
static const AVClass ism_class = {
.class_name = "smooth streaming muxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
 
AVOutputFormat ff_smoothstreaming_muxer = {
.name = "smoothstreaming",
.long_name = NULL_IF_CONFIG_SMALL("Smooth Streaming Muxer"),
.priv_data_size = sizeof(SmoothStreamingContext),
.audio_codec = AV_CODEC_ID_AAC,
.video_codec = AV_CODEC_ID_H264,
.flags = AVFMT_GLOBALHEADER | AVFMT_NOFILE,
.write_header = ism_write_header,
.write_packet = ism_write_packet,
.write_trailer = ism_write_trailer,
.codec_tag = (const AVCodecTag* const []){ ff_mp4_obj_type, 0 },
.priv_class = &ism_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/smush.c
0,0 → 1,238
/*
* LucasArts Smush demuxer
* Copyright (c) 2006 Cyril Zorin
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
#include "avio.h"
 
typedef struct {
int version;
int audio_stream_index;
int video_stream_index;
} SMUSHContext;
 
static int smush_read_probe(AVProbeData *p)
{
if (((AV_RL32(p->buf) == MKTAG('S', 'A', 'N', 'M') &&
AV_RL32(p->buf + 8) == MKTAG('S', 'H', 'D', 'R')) ||
(AV_RL32(p->buf) == MKTAG('A', 'N', 'I', 'M') &&
AV_RL32(p->buf + 8) == MKTAG('A', 'H', 'D', 'R')))) {
return AVPROBE_SCORE_MAX;
}
 
return 0;
}
 
static int smush_read_header(AVFormatContext *ctx)
{
SMUSHContext *smush = ctx->priv_data;
AVIOContext *pb = ctx->pb;
AVStream *vst, *ast;
uint32_t magic, nframes, size, subversion, i;
uint32_t width = 0, height = 0, got_audio = 0, read = 0;
uint32_t sample_rate, channels, palette[256];
 
magic = avio_rb32(pb);
avio_skip(pb, 4); // skip movie size
 
if (magic == MKBETAG('A', 'N', 'I', 'M')) {
if (avio_rb32(pb) != MKBETAG('A', 'H', 'D', 'R'))
return AVERROR_INVALIDDATA;
 
size = avio_rb32(pb);
if (size < 3 * 256 + 6)
return AVERROR_INVALIDDATA;
 
smush->version = 0;
subversion = avio_rl16(pb);
nframes = avio_rl16(pb);
 
avio_skip(pb, 2); // skip pad
 
for (i = 0; i < 256; i++)
palette[i] = avio_rb24(pb);
 
avio_skip(pb, size - (3 * 256 + 6));
} else if (magic == MKBETAG('S', 'A', 'N', 'M') ) {
if (avio_rb32(pb) != MKBETAG('S', 'H', 'D', 'R'))
return AVERROR_INVALIDDATA;
 
size = avio_rb32(pb);
if (size < 14)
return AVERROR_INVALIDDATA;
 
smush->version = 1;
subversion = avio_rl16(pb);
nframes = avio_rl32(pb);
avio_skip(pb, 2); // skip pad
width = avio_rl16(pb);
height = avio_rl16(pb);
avio_skip(pb, 2); // skip pad
avio_skip(pb, size - 14);
 
if (avio_rb32(pb) != MKBETAG('F', 'L', 'H', 'D'))
return AVERROR_INVALIDDATA;
 
size = avio_rb32(pb);
while (!got_audio && ((read + 8) < size)) {
uint32_t sig, chunk_size;
 
if (url_feof(pb))
return AVERROR_EOF;
 
sig = avio_rb32(pb);
chunk_size = avio_rb32(pb);
read += 8;
switch (sig) {
case MKBETAG('W', 'a', 'v', 'e'):
got_audio = 1;
sample_rate = avio_rl32(pb);
channels = avio_rl32(pb);
avio_skip(pb, chunk_size - 8);
read += chunk_size;
break;
case MKBETAG('B', 'l', '1', '6'):
case MKBETAG('A', 'N', 'N', 'O'):
avio_skip(pb, chunk_size);
read += chunk_size;
break;
default:
return AVERROR_INVALIDDATA;
break;
}
}
 
avio_skip(pb, size - read);
} else {
av_log(ctx, AV_LOG_ERROR, "Wrong magic\n");
return AVERROR_INVALIDDATA;
}
 
vst = avformat_new_stream(ctx, 0);
if (!vst)
return AVERROR(ENOMEM);
 
smush->video_stream_index = vst->index;
 
vst->start_time = 0;
vst->duration =
vst->nb_frames = nframes;
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codec->codec_id = AV_CODEC_ID_SANM;
vst->codec->codec_tag = 0;
vst->codec->width = width;
vst->codec->height = height;
 
avpriv_set_pts_info(vst, 64, 66667, 1000000);
 
if (!smush->version) {
if (ff_alloc_extradata(vst->codec, 1024 + 2))
return AVERROR(ENOMEM);
 
AV_WL16(vst->codec->extradata, subversion);
for (i = 0; i < 256; i++)
AV_WL32(vst->codec->extradata + 2 + i * 4, palette[i]);
}
 
if (got_audio) {
ast = avformat_new_stream(ctx, 0);
if (!ast)
return AVERROR(ENOMEM);
 
smush->audio_stream_index = ast->index;
 
ast->start_time = 0;
ast->codec->codec_type = AVMEDIA_TYPE_AUDIO;
ast->codec->codec_id = AV_CODEC_ID_VIMA;
ast->codec->codec_tag = 0;
ast->codec->sample_rate = sample_rate;
ast->codec->channels = channels;
 
avpriv_set_pts_info(ast, 64, 1, ast->codec->sample_rate);
}
 
return 0;
}
 
static int smush_read_packet(AVFormatContext *ctx, AVPacket *pkt)
{
SMUSHContext *smush = ctx->priv_data;
AVIOContext *pb = ctx->pb;
int done = 0;
 
while (!done) {
uint32_t sig, size;
 
if (url_feof(pb))
return AVERROR_EOF;
 
sig = avio_rb32(pb);
size = avio_rb32(pb);
 
switch (sig) {
case MKBETAG('F', 'R', 'M', 'E'):
if (smush->version)
break;
if (av_get_packet(pb, pkt, size) < 0)
return AVERROR(EIO);
 
pkt->stream_index = smush->video_stream_index;
done = 1;
break;
case MKBETAG('B', 'l', '1', '6'):
if (av_get_packet(pb, pkt, size) < 0)
return AVERROR(EIO);
 
pkt->stream_index = smush->video_stream_index;
pkt->duration = 1;
done = 1;
break;
case MKBETAG('W', 'a', 'v', 'e'):
if (size < 13)
return AVERROR_INVALIDDATA;
if (av_get_packet(pb, pkt, size) < 13)
return AVERROR(EIO);
 
pkt->stream_index = smush->audio_stream_index;
pkt->flags |= AV_PKT_FLAG_KEY;
pkt->duration = AV_RB32(pkt->data);
if (pkt->duration == 0xFFFFFFFFu)
pkt->duration = AV_RB32(pkt->data + 8);
done = 1;
break;
default:
avio_skip(pb, size);
break;
}
}
 
return 0;
}
 
AVInputFormat ff_smush_demuxer = {
.name = "smush",
.long_name = NULL_IF_CONFIG_SMALL("LucasArts Smush"),
.priv_data_size = sizeof(SMUSHContext),
.read_probe = smush_read_probe,
.read_header = smush_read_header,
.read_packet = smush_read_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/sol.c
0,0 → 1,151
/*
* Sierra SOL demuxer
* Copyright Konstantin Shishkov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/*
* Based on documents from Game Audio Player and own research
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
#include "pcm.h"
 
/* if we don't know the size in advance */
#define AU_UNKNOWN_SIZE ((uint32_t)(~0))
 
static int sol_probe(AVProbeData *p)
{
/* check file header */
uint16_t magic = AV_RL32(p->buf);
if ((magic == 0x0B8D || magic == 0x0C0D || magic == 0x0C8D) &&
p->buf[2] == 'S' && p->buf[3] == 'O' &&
p->buf[4] == 'L' && p->buf[5] == 0)
return AVPROBE_SCORE_MAX;
else
return 0;
}
 
#define SOL_DPCM 1
#define SOL_16BIT 4
#define SOL_STEREO 16
 
static enum AVCodecID sol_codec_id(int magic, int type)
{
if (magic == 0x0B8D)
{
if (type & SOL_DPCM) return AV_CODEC_ID_SOL_DPCM;
else return AV_CODEC_ID_PCM_U8;
}
if (type & SOL_DPCM)
{
if (type & SOL_16BIT) return AV_CODEC_ID_SOL_DPCM;
else if (magic == 0x0C8D) return AV_CODEC_ID_SOL_DPCM;
else return AV_CODEC_ID_SOL_DPCM;
}
if (type & SOL_16BIT) return AV_CODEC_ID_PCM_S16LE;
return AV_CODEC_ID_PCM_U8;
}
 
static int sol_codec_type(int magic, int type)
{
if (magic == 0x0B8D) return 1;//SOL_DPCM_OLD;
if (type & SOL_DPCM)
{
if (type & SOL_16BIT) return 3;//SOL_DPCM_NEW16;
else if (magic == 0x0C8D) return 1;//SOL_DPCM_OLD;
else return 2;//SOL_DPCM_NEW8;
}
return -1;
}
 
static int sol_channels(int magic, int type)
{
if (magic == 0x0B8D || !(type & SOL_STEREO)) return 1;
return 2;
}
 
static int sol_read_header(AVFormatContext *s)
{
unsigned int magic,tag;
AVIOContext *pb = s->pb;
unsigned int id, channels, rate, type;
enum AVCodecID codec;
AVStream *st;
 
/* check ".snd" header */
magic = avio_rl16(pb);
tag = avio_rl32(pb);
if (tag != MKTAG('S', 'O', 'L', 0))
return -1;
rate = avio_rl16(pb);
type = avio_r8(pb);
avio_skip(pb, 4); /* size */
if (magic != 0x0B8D)
avio_r8(pb); /* newer SOLs contain padding byte */
 
codec = sol_codec_id(magic, type);
channels = sol_channels(magic, type);
 
if (codec == AV_CODEC_ID_SOL_DPCM)
id = sol_codec_type(magic, type);
else id = 0;
 
/* now we are ready: build format streams */
st = avformat_new_stream(s, NULL);
if (!st)
return -1;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_tag = id;
st->codec->codec_id = codec;
st->codec->channels = channels;
st->codec->channel_layout = channels == 1 ? AV_CH_LAYOUT_MONO :
AV_CH_LAYOUT_STEREO;
st->codec->sample_rate = rate;
avpriv_set_pts_info(st, 64, 1, rate);
return 0;
}
 
#define MAX_SIZE 4096
 
static int sol_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
int ret;
 
if (url_feof(s->pb))
return AVERROR(EIO);
ret= av_get_packet(s->pb, pkt, MAX_SIZE);
if (ret < 0)
return ret;
pkt->flags &= ~AV_PKT_FLAG_CORRUPT;
pkt->stream_index = 0;
return 0;
}
 
AVInputFormat ff_sol_demuxer = {
.name = "sol",
.long_name = NULL_IF_CONFIG_SMALL("Sierra SOL"),
.read_probe = sol_probe,
.read_header = sol_read_header,
.read_packet = sol_read_packet,
.read_seek = ff_pcm_read_seek,
};
/contrib/sdk/sources/ffmpeg/libavformat/sox.h
0,0 → 1,29
/*
* SoX native format common data
* Copyright (c) 2009 Daniel Verkamp <daniel@drv.nu>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_SOX_H
#define AVFORMAT_SOX_H
 
#define SOX_FIXED_HDR (4 + 8 + 8 + 4 + 4) /**< Size of fixed header without magic */
 
#define SOX_TAG MKTAG('.', 'S', 'o', 'X')
 
#endif /* AVFORMAT_SOX_H */
/contrib/sdk/sources/ffmpeg/libavformat/soxdec.c
0,0 → 1,134
/*
* SoX native format demuxer
* Copyright (c) 2009 Daniel Verkamp <daniel@drv.nu>
*
* Based on libSoX sox-fmt.c
* Copyright (c) 2008 robs@users.sourceforge.net
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* SoX native format demuxer
* @author Daniel Verkamp
* @see http://wiki.multimedia.cx/index.php?title=SoX_native_intermediate_format
*/
 
#include "libavutil/intreadwrite.h"
#include "libavutil/intfloat.h"
#include "libavutil/dict.h"
#include "avformat.h"
#include "internal.h"
#include "pcm.h"
#include "sox.h"
 
static int sox_probe(AVProbeData *p)
{
if (AV_RL32(p->buf) == SOX_TAG || AV_RB32(p->buf) == SOX_TAG)
return AVPROBE_SCORE_MAX;
return 0;
}
 
static int sox_read_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
unsigned header_size, comment_size;
double sample_rate, sample_rate_frac;
AVStream *st;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
 
if (avio_rl32(pb) == SOX_TAG) {
st->codec->codec_id = AV_CODEC_ID_PCM_S32LE;
header_size = avio_rl32(pb);
avio_skip(pb, 8); /* sample count */
sample_rate = av_int2double(avio_rl64(pb));
st->codec->channels = avio_rl32(pb);
comment_size = avio_rl32(pb);
} else {
st->codec->codec_id = AV_CODEC_ID_PCM_S32BE;
header_size = avio_rb32(pb);
avio_skip(pb, 8); /* sample count */
sample_rate = av_int2double(avio_rb64(pb));
st->codec->channels = avio_rb32(pb);
comment_size = avio_rb32(pb);
}
 
if (comment_size > 0xFFFFFFFFU - SOX_FIXED_HDR - 4U) {
av_log(s, AV_LOG_ERROR, "invalid comment size (%u)\n", comment_size);
return AVERROR_INVALIDDATA;
}
 
if (sample_rate <= 0 || sample_rate > INT_MAX) {
av_log(s, AV_LOG_ERROR, "invalid sample rate (%f)\n", sample_rate);
return AVERROR_INVALIDDATA;
}
 
sample_rate_frac = sample_rate - floor(sample_rate);
if (sample_rate_frac)
av_log(s, AV_LOG_WARNING,
"truncating fractional part of sample rate (%f)\n",
sample_rate_frac);
 
if ((header_size + 4) & 7 || header_size < SOX_FIXED_HDR + comment_size
|| st->codec->channels > 65535) /* Reserve top 16 bits */ {
av_log(s, AV_LOG_ERROR, "invalid header\n");
return AVERROR_INVALIDDATA;
}
 
if (comment_size && comment_size < UINT_MAX) {
char *comment = av_malloc(comment_size+1);
if(!comment)
return AVERROR(ENOMEM);
if (avio_read(pb, comment, comment_size) != comment_size) {
av_freep(&comment);
return AVERROR(EIO);
}
comment[comment_size] = 0;
 
av_dict_set(&s->metadata, "comment", comment,
AV_DICT_DONT_STRDUP_VAL);
}
 
avio_skip(pb, header_size - SOX_FIXED_HDR - comment_size);
 
st->codec->sample_rate = sample_rate;
st->codec->bits_per_coded_sample = 32;
st->codec->bit_rate = st->codec->sample_rate *
st->codec->bits_per_coded_sample *
st->codec->channels;
st->codec->block_align = st->codec->bits_per_coded_sample *
st->codec->channels / 8;
 
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
 
return 0;
}
 
AVInputFormat ff_sox_demuxer = {
.name = "sox",
.long_name = NULL_IF_CONFIG_SMALL("SoX native"),
.read_probe = sox_probe,
.read_header = sox_read_header,
.read_packet = ff_pcm_read_packet,
.read_seek = ff_pcm_read_seek,
};
/contrib/sdk/sources/ffmpeg/libavformat/soxenc.c
0,0 → 1,121
/*
* SoX native format muxer
* Copyright (c) 2009 Daniel Verkamp <daniel@drv.nu>
*
* Based on libSoX sox-fmt.c
* Copyright (c) 2008 robs@users.sourceforge.net
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* SoX native format muxer
* @author Daniel Verkamp
* @see http://wiki.multimedia.cx/index.php?title=SoX_native_intermediate_format
*/
 
#include "libavutil/intreadwrite.h"
#include "libavutil/intfloat.h"
#include "libavutil/dict.h"
#include "avformat.h"
#include "avio_internal.h"
#include "rawenc.h"
#include "sox.h"
 
typedef struct {
int64_t header_size;
} SoXContext;
 
static int sox_write_header(AVFormatContext *s)
{
SoXContext *sox = s->priv_data;
AVIOContext *pb = s->pb;
AVCodecContext *enc = s->streams[0]->codec;
AVDictionaryEntry *comment;
size_t comment_len = 0, comment_size;
 
comment = av_dict_get(s->metadata, "comment", NULL, 0);
if (comment)
comment_len = strlen(comment->value);
comment_size = FFALIGN(comment_len, 8);
 
sox->header_size = SOX_FIXED_HDR + comment_size;
 
if (enc->codec_id == AV_CODEC_ID_PCM_S32LE) {
ffio_wfourcc(pb, ".SoX");
avio_wl32(pb, sox->header_size);
avio_wl64(pb, 0); /* number of samples */
avio_wl64(pb, av_double2int(enc->sample_rate));
avio_wl32(pb, enc->channels);
avio_wl32(pb, comment_size);
} else if (enc->codec_id == AV_CODEC_ID_PCM_S32BE) {
ffio_wfourcc(pb, "XoS.");
avio_wb32(pb, sox->header_size);
avio_wb64(pb, 0); /* number of samples */
avio_wb64(pb, av_double2int(enc->sample_rate));
avio_wb32(pb, enc->channels);
avio_wb32(pb, comment_size);
} else {
av_log(s, AV_LOG_ERROR, "invalid codec; use pcm_s32le or pcm_s32be\n");
return -1;
}
 
if (comment_len)
avio_write(pb, comment->value, comment_len);
 
ffio_fill(pb, 0, comment_size - comment_len);
 
avio_flush(pb);
 
return 0;
}
 
static int sox_write_trailer(AVFormatContext *s)
{
SoXContext *sox = s->priv_data;
AVIOContext *pb = s->pb;
AVCodecContext *enc = s->streams[0]->codec;
 
if (s->pb->seekable) {
/* update number of samples */
int64_t file_size = avio_tell(pb);
int64_t num_samples = (file_size - sox->header_size - 4LL) >> 2LL;
avio_seek(pb, 8, SEEK_SET);
if (enc->codec_id == AV_CODEC_ID_PCM_S32LE) {
avio_wl64(pb, num_samples);
} else
avio_wb64(pb, num_samples);
avio_seek(pb, file_size, SEEK_SET);
 
avio_flush(pb);
}
 
return 0;
}
 
AVOutputFormat ff_sox_muxer = {
.name = "sox",
.long_name = NULL_IF_CONFIG_SMALL("SoX native"),
.extensions = "sox",
.priv_data_size = sizeof(SoXContext),
.audio_codec = AV_CODEC_ID_PCM_S32LE,
.video_codec = AV_CODEC_ID_NONE,
.write_header = sox_write_header,
.write_packet = ff_raw_write_packet,
.write_trailer = sox_write_trailer,
};
/contrib/sdk/sources/ffmpeg/libavformat/spdif.c
0,0 → 1,42
/*
* IEC 61937 common code
* Copyright (c) 2009 Bartlomiej Wolowiec
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "spdif.h"
#include "libavutil/bswap.h"
 
//TODO move to DSP
void ff_spdif_bswap_buf16(uint16_t *dst, const uint16_t *src, int w)
{
int i;
 
for (i = 0; i + 8 <= w; i += 8) {
dst[i + 0] = av_bswap16(src[i + 0]);
dst[i + 1] = av_bswap16(src[i + 1]);
dst[i + 2] = av_bswap16(src[i + 2]);
dst[i + 3] = av_bswap16(src[i + 3]);
dst[i + 4] = av_bswap16(src[i + 4]);
dst[i + 5] = av_bswap16(src[i + 5]);
dst[i + 6] = av_bswap16(src[i + 6]);
dst[i + 7] = av_bswap16(src[i + 7]);
}
for (; i < w; i++)
dst[i + 0] = av_bswap16(src[i + 0]);
}
/contrib/sdk/sources/ffmpeg/libavformat/spdif.h
0,0 → 1,65
/*
* IEC 61937 common header
* Copyright (c) 2009 Bartlomiej Wolowiec
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_SPDIF_H
#define AVFORMAT_SPDIF_H
 
#include <stdint.h>
#include "avformat.h"
 
#define SYNCWORD1 0xF872
#define SYNCWORD2 0x4E1F
#define BURST_HEADER_SIZE 0x8
 
enum IEC61937DataType {
IEC61937_AC3 = 0x01, ///< AC-3 data
IEC61937_MPEG1_LAYER1 = 0x04, ///< MPEG-1 layer 1
IEC61937_MPEG1_LAYER23 = 0x05, ///< MPEG-1 layer 2 or 3 data or MPEG-2 without extension
IEC61937_MPEG2_EXT = 0x06, ///< MPEG-2 data with extension
IEC61937_MPEG2_AAC = 0x07, ///< MPEG-2 AAC ADTS
IEC61937_MPEG2_LAYER1_LSF = 0x08, ///< MPEG-2, layer-1 low sampling frequency
IEC61937_MPEG2_LAYER2_LSF = 0x09, ///< MPEG-2, layer-2 low sampling frequency
IEC61937_MPEG2_LAYER3_LSF = 0x0A, ///< MPEG-2, layer-3 low sampling frequency
IEC61937_DTS1 = 0x0B, ///< DTS type I (512 samples)
IEC61937_DTS2 = 0x0C, ///< DTS type II (1024 samples)
IEC61937_DTS3 = 0x0D, ///< DTS type III (2048 samples)
IEC61937_ATRAC = 0x0E, ///< ATRAC data
IEC61937_ATRAC3 = 0x0F, ///< ATRAC3 data
IEC61937_ATRACX = 0x10, ///< ATRAC3+ data
IEC61937_DTSHD = 0x11, ///< DTS HD data
IEC61937_WMAPRO = 0x12, ///< WMA 9 Professional data
IEC61937_MPEG2_AAC_LSF_2048 = 0x13, ///< MPEG-2 AAC ADTS half-rate low sampling frequency
IEC61937_MPEG2_AAC_LSF_4096 = 0x13 | 0x20, ///< MPEG-2 AAC ADTS quarter-rate low sampling frequency
IEC61937_EAC3 = 0x15, ///< E-AC-3 data
IEC61937_TRUEHD = 0x16, ///< TrueHD data
};
 
static const uint16_t spdif_mpeg_pkt_offset[2][3] = {
//LAYER1 LAYER2 LAYER3
{ 3072, 9216, 4608 }, // MPEG2 LSF
{ 1536, 4608, 4608 }, // MPEG1
};
 
void ff_spdif_bswap_buf16(uint16_t *dst, const uint16_t *src, int w);
int ff_spdif_read_packet(AVFormatContext *s, AVPacket *pkt);
int ff_spdif_probe(const uint8_t *p_buf, int buf_size, enum AVCodecID *codec);
 
#endif /* AVFORMAT_SPDIF_H */
/contrib/sdk/sources/ffmpeg/libavformat/spdifdec.c
0,0 → 1,240
/*
* IEC 61937 demuxer
* Copyright (c) 2010 Anssi Hannula <anssi.hannula at iki.fi>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* IEC 61937 demuxer, used for compressed data in S/PDIF
* @author Anssi Hannula
*/
 
#include "avformat.h"
#include "spdif.h"
#include "libavcodec/ac3.h"
#include "libavcodec/aacadtsdec.h"
 
static int spdif_get_offset_and_codec(AVFormatContext *s,
enum IEC61937DataType data_type,
const char *buf, int *offset,
enum AVCodecID *codec)
{
AACADTSHeaderInfo aac_hdr;
GetBitContext gbc;
 
switch (data_type & 0xff) {
case IEC61937_AC3:
*offset = AC3_FRAME_SIZE << 2;
*codec = AV_CODEC_ID_AC3;
break;
case IEC61937_MPEG1_LAYER1:
*offset = spdif_mpeg_pkt_offset[1][0];
*codec = AV_CODEC_ID_MP1;
break;
case IEC61937_MPEG1_LAYER23:
*offset = spdif_mpeg_pkt_offset[1][0];
*codec = AV_CODEC_ID_MP3;
break;
case IEC61937_MPEG2_EXT:
*offset = 4608;
*codec = AV_CODEC_ID_MP3;
break;
case IEC61937_MPEG2_AAC:
init_get_bits(&gbc, buf, AAC_ADTS_HEADER_SIZE * 8);
if (avpriv_aac_parse_header(&gbc, &aac_hdr) < 0) {
if (s) /* be silent during a probe */
av_log(s, AV_LOG_ERROR, "Invalid AAC packet in IEC 61937\n");
return AVERROR_INVALIDDATA;
}
*offset = aac_hdr.samples << 2;
*codec = AV_CODEC_ID_AAC;
break;
case IEC61937_MPEG2_LAYER1_LSF:
*offset = spdif_mpeg_pkt_offset[0][0];
*codec = AV_CODEC_ID_MP1;
break;
case IEC61937_MPEG2_LAYER2_LSF:
*offset = spdif_mpeg_pkt_offset[0][1];
*codec = AV_CODEC_ID_MP2;
break;
case IEC61937_MPEG2_LAYER3_LSF:
*offset = spdif_mpeg_pkt_offset[0][2];
*codec = AV_CODEC_ID_MP3;
break;
case IEC61937_DTS1:
*offset = 2048;
*codec = AV_CODEC_ID_DTS;
break;
case IEC61937_DTS2:
*offset = 4096;
*codec = AV_CODEC_ID_DTS;
break;
case IEC61937_DTS3:
*offset = 8192;
*codec = AV_CODEC_ID_DTS;
break;
default:
if (s) { /* be silent during a probe */
avpriv_request_sample(s, "Data type 0x%04x in IEC 61937",
data_type);
}
return AVERROR_PATCHWELCOME;
}
return 0;
}
 
/* Largest offset between bursts we currently handle, i.e. AAC with
aac_hdr.samples = 4096 */
#define SPDIF_MAX_OFFSET 16384
 
static int spdif_probe(AVProbeData *p)
{
enum AVCodecID codec;
return ff_spdif_probe (p->buf, p->buf_size, &codec);
}
 
int ff_spdif_probe(const uint8_t *p_buf, int buf_size, enum AVCodecID *codec)
{
const uint8_t *buf = p_buf;
const uint8_t *probe_end = p_buf + FFMIN(2 * SPDIF_MAX_OFFSET, buf_size - 1);
const uint8_t *expected_code = buf + 7;
uint32_t state = 0;
int sync_codes = 0;
int consecutive_codes = 0;
int offset;
 
for (; buf < probe_end; buf++) {
state = (state << 8) | *buf;
 
if (state == (AV_BSWAP16C(SYNCWORD1) << 16 | AV_BSWAP16C(SYNCWORD2))
&& buf[1] < 0x37) {
sync_codes++;
 
if (buf == expected_code) {
if (++consecutive_codes >= 2)
return AVPROBE_SCORE_MAX;
} else
consecutive_codes = 0;
 
if (buf + 4 + AAC_ADTS_HEADER_SIZE > p_buf + buf_size)
break;
 
/* continue probing to find more sync codes */
probe_end = FFMIN(buf + SPDIF_MAX_OFFSET, p_buf + buf_size - 1);
 
/* skip directly to the next sync code */
if (!spdif_get_offset_and_codec(NULL, (buf[2] << 8) | buf[1],
&buf[5], &offset, codec)) {
if (buf + offset >= p_buf + buf_size)
break;
expected_code = buf + offset;
buf = expected_code - 7;
}
}
}
 
if (!sync_codes)
return 0;
 
if (sync_codes >= 6)
/* good amount of sync codes but with unexpected offsets */
return AVPROBE_SCORE_EXTENSION;
 
/* some sync codes were found */
return AVPROBE_SCORE_EXTENSION / 4;
}
 
static int spdif_read_header(AVFormatContext *s)
{
s->ctx_flags |= AVFMTCTX_NOHEADER;
return 0;
}
 
int ff_spdif_read_packet(AVFormatContext *s, AVPacket *pkt)
{
AVIOContext *pb = s->pb;
enum IEC61937DataType data_type;
enum AVCodecID codec_id;
uint32_t state = 0;
int pkt_size_bits, offset, ret;
 
while (state != (AV_BSWAP16C(SYNCWORD1) << 16 | AV_BSWAP16C(SYNCWORD2))) {
state = (state << 8) | avio_r8(pb);
if (url_feof(pb))
return AVERROR_EOF;
}
 
data_type = avio_rl16(pb);
pkt_size_bits = avio_rl16(pb);
 
if (pkt_size_bits % 16)
avpriv_request_sample(s, "Packet not ending at a 16-bit boundary");
 
ret = av_new_packet(pkt, FFALIGN(pkt_size_bits, 16) >> 3);
if (ret)
return ret;
 
pkt->pos = avio_tell(pb) - BURST_HEADER_SIZE;
 
if (avio_read(pb, pkt->data, pkt->size) < pkt->size) {
av_free_packet(pkt);
return AVERROR_EOF;
}
ff_spdif_bswap_buf16((uint16_t *)pkt->data, (uint16_t *)pkt->data, pkt->size >> 1);
 
ret = spdif_get_offset_and_codec(s, data_type, pkt->data,
&offset, &codec_id);
if (ret) {
av_free_packet(pkt);
return ret;
}
 
/* skip over the padding to the beginning of the next frame */
avio_skip(pb, offset - pkt->size - BURST_HEADER_SIZE);
 
if (!s->nb_streams) {
/* first packet, create a stream */
AVStream *st = avformat_new_stream(s, NULL);
if (!st) {
av_free_packet(pkt);
return AVERROR(ENOMEM);
}
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = codec_id;
} else if (codec_id != s->streams[0]->codec->codec_id) {
avpriv_report_missing_feature(s, "Codec change in IEC 61937");
return AVERROR_PATCHWELCOME;
}
 
if (!s->bit_rate && s->streams[0]->codec->sample_rate)
/* stream bitrate matches 16-bit stereo PCM bitrate for currently
supported codecs */
s->bit_rate = 2 * 16 * s->streams[0]->codec->sample_rate;
 
return 0;
}
 
AVInputFormat ff_spdif_demuxer = {
.name = "spdif",
.long_name = NULL_IF_CONFIG_SMALL("IEC 61937 (compressed data in S/PDIF)"),
.read_probe = spdif_probe,
.read_header = spdif_read_header,
.read_packet = ff_spdif_read_packet,
.flags = AVFMT_GENERIC_INDEX,
};
/contrib/sdk/sources/ffmpeg/libavformat/spdifenc.c
0,0 → 1,556
/*
* IEC 61937 muxer
* Copyright (c) 2009 Bartlomiej Wolowiec
* Copyright (c) 2010 Anssi Hannula
* Copyright (c) 2010 Carl Eugen Hoyos
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* IEC-61937 encapsulation of various formats, used by S/PDIF
* @author Bartlomiej Wolowiec
* @author Anssi Hannula
* @author Carl Eugen Hoyos
*/
 
/*
* Terminology used in specification:
* data-burst - IEC61937 frame, contains header and encapsuled frame
* burst-preambule - IEC61937 frame header, contains 16-bits words named Pa, Pb, Pc and Pd
* burst-payload - encapsuled frame
* Pa, Pb - syncword - 0xF872, 0x4E1F
* Pc - burst-info, contains data-type (bits 0-6), error flag (bit 7), data-type-dependent info (bits 8-12)
* and bitstream number (bits 13-15)
* data-type - determines type of encapsuled frames
* Pd - length code (number of bits or bytes of encapsuled frame - according to data_type)
*
* IEC 61937 frames at normal usage start every specific count of bytes,
* dependent from data-type (spaces between packets are filled by zeros)
*/
 
#include "avformat.h"
#include "avio_internal.h"
#include "spdif.h"
#include "libavcodec/ac3.h"
#include "libavcodec/dca.h"
#include "libavcodec/aacadtsdec.h"
#include "libavutil/opt.h"
 
typedef struct IEC61937Context {
const AVClass *av_class;
enum IEC61937DataType data_type;///< burst info - reference to type of payload of the data-burst
int length_code; ///< length code in bits or bytes, depending on data type
int pkt_offset; ///< data burst repetition period in bytes
uint8_t *buffer; ///< allocated buffer, used for swap bytes
int buffer_size; ///< size of allocated buffer
 
uint8_t *out_buf; ///< pointer to the outgoing data before byte-swapping
int out_bytes; ///< amount of outgoing bytes
 
int use_preamble; ///< preamble enabled (disabled for exactly pre-padded DTS)
int extra_bswap; ///< extra bswap for payload (for LE DTS => standard BE DTS)
 
uint8_t *hd_buf; ///< allocated buffer to concatenate hd audio frames
int hd_buf_size; ///< size of the hd audio buffer
int hd_buf_count; ///< number of frames in the hd audio buffer
int hd_buf_filled; ///< amount of bytes in the hd audio buffer
 
int dtshd_skip; ///< counter used for skipping DTS-HD frames
 
/* AVOptions: */
int dtshd_rate;
int dtshd_fallback;
#define SPDIF_FLAG_BIGENDIAN 0x01
int spdif_flags;
 
/// function, which generates codec dependent header information.
/// Sets data_type and pkt_offset, and length_code, out_bytes, out_buf if necessary
int (*header_info) (AVFormatContext *s, AVPacket *pkt);
} IEC61937Context;
 
static const AVOption options[] = {
{ "spdif_flags", "IEC 61937 encapsulation flags", offsetof(IEC61937Context, spdif_flags), AV_OPT_TYPE_FLAGS, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "spdif_flags" },
{ "be", "output in big-endian format (for use as s16be)", 0, AV_OPT_TYPE_CONST, {.i64 = SPDIF_FLAG_BIGENDIAN}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "spdif_flags" },
{ "dtshd_rate", "mux complete DTS frames in HD mode at the specified IEC958 rate (in Hz, default 0=disabled)", offsetof(IEC61937Context, dtshd_rate), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 768000, AV_OPT_FLAG_ENCODING_PARAM },
{ "dtshd_fallback_time", "min secs to strip HD for after an overflow (-1: till the end, default 60)", offsetof(IEC61937Context, dtshd_fallback), AV_OPT_TYPE_INT, {.i64 = 60}, -1, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
{ NULL },
};
 
static const AVClass spdif_class = {
.class_name = "spdif",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
static int spdif_header_ac3(AVFormatContext *s, AVPacket *pkt)
{
IEC61937Context *ctx = s->priv_data;
int bitstream_mode = pkt->data[5] & 0x7;
 
ctx->data_type = IEC61937_AC3 | (bitstream_mode << 8);
ctx->pkt_offset = AC3_FRAME_SIZE << 2;
return 0;
}
 
static int spdif_header_eac3(AVFormatContext *s, AVPacket *pkt)
{
IEC61937Context *ctx = s->priv_data;
static const uint8_t eac3_repeat[4] = {6, 3, 2, 1};
int repeat = 1;
 
if ((pkt->data[4] & 0xc0) != 0xc0) /* fscod */
repeat = eac3_repeat[(pkt->data[4] & 0x30) >> 4]; /* numblkscod */
 
ctx->hd_buf = av_fast_realloc(ctx->hd_buf, &ctx->hd_buf_size, ctx->hd_buf_filled + pkt->size);
if (!ctx->hd_buf)
return AVERROR(ENOMEM);
 
memcpy(&ctx->hd_buf[ctx->hd_buf_filled], pkt->data, pkt->size);
 
ctx->hd_buf_filled += pkt->size;
if (++ctx->hd_buf_count < repeat){
ctx->pkt_offset = 0;
return 0;
}
ctx->data_type = IEC61937_EAC3;
ctx->pkt_offset = 24576;
ctx->out_buf = ctx->hd_buf;
ctx->out_bytes = ctx->hd_buf_filled;
ctx->length_code = ctx->hd_buf_filled;
 
ctx->hd_buf_count = 0;
ctx->hd_buf_filled = 0;
return 0;
}
 
/*
* DTS type IV (DTS-HD) can be transmitted with various frame repetition
* periods; longer repetition periods allow for longer packets and therefore
* higher bitrate. Longer repetition periods mean that the constant bitrate of
* the outputted IEC 61937 stream is higher.
* The repetition period is measured in IEC 60958 frames (4 bytes).
*/
static int spdif_dts4_subtype(int period)
{
switch (period) {
case 512: return 0x0;
case 1024: return 0x1;
case 2048: return 0x2;
case 4096: return 0x3;
case 8192: return 0x4;
case 16384: return 0x5;
}
return -1;
}
 
static int spdif_header_dts4(AVFormatContext *s, AVPacket *pkt, int core_size,
int sample_rate, int blocks)
{
IEC61937Context *ctx = s->priv_data;
static const char dtshd_start_code[10] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xfe };
int pkt_size = pkt->size;
int period;
int subtype;
 
if (!core_size) {
av_log(s, AV_LOG_ERROR, "HD mode not supported for this format\n");
return AVERROR(EINVAL);
}
 
if (!sample_rate) {
av_log(s, AV_LOG_ERROR, "Unknown DTS sample rate for HD\n");
return AVERROR_INVALIDDATA;
}
 
period = ctx->dtshd_rate * (blocks << 5) / sample_rate;
subtype = spdif_dts4_subtype(period);
 
if (subtype < 0) {
av_log(s, AV_LOG_ERROR, "Specified HD rate of %d Hz would require an "
"impossible repetition period of %d for the current DTS stream"
" (blocks = %d, sample rate = %d)\n", ctx->dtshd_rate, period,
blocks << 5, sample_rate);
return AVERROR(EINVAL);
}
 
/* set pkt_offset and DTS IV subtype according to the requested output
* rate */
ctx->pkt_offset = period * 4;
ctx->data_type = IEC61937_DTSHD | subtype << 8;
 
/* If the bitrate is too high for transmitting at the selected
* repetition period setting, strip DTS-HD until a good amount
* of consecutive non-overflowing HD frames have been observed.
* This generally only happens if the caller is cramming a Master
* Audio stream into 192kHz IEC 60958 (which may or may not fit). */
if (sizeof(dtshd_start_code) + 2 + pkt_size
> ctx->pkt_offset - BURST_HEADER_SIZE && core_size) {
if (!ctx->dtshd_skip)
av_log(s, AV_LOG_WARNING, "DTS-HD bitrate too high, "
"temporarily sending core only\n");
if (ctx->dtshd_fallback > 0)
ctx->dtshd_skip = sample_rate * ctx->dtshd_fallback / (blocks << 5);
else
/* skip permanently (dtshd_fallback == -1) or just once
* (dtshd_fallback == 0) */
ctx->dtshd_skip = 1;
}
if (ctx->dtshd_skip && core_size) {
pkt_size = core_size;
if (ctx->dtshd_fallback >= 0)
--ctx->dtshd_skip;
}
 
ctx->out_bytes = sizeof(dtshd_start_code) + 2 + pkt_size;
 
/* Align so that (length_code & 0xf) == 0x8. This is reportedly needed
* with some receivers, but the exact requirement is unconfirmed. */
ctx->length_code = FFALIGN(ctx->out_bytes + 0x8, 0x10) - 0x8;
 
av_fast_malloc(&ctx->hd_buf, &ctx->hd_buf_size, ctx->out_bytes);
if (!ctx->hd_buf)
return AVERROR(ENOMEM);
 
ctx->out_buf = ctx->hd_buf;
 
memcpy(ctx->hd_buf, dtshd_start_code, sizeof(dtshd_start_code));
AV_WB16(ctx->hd_buf + sizeof(dtshd_start_code), pkt_size);
memcpy(ctx->hd_buf + sizeof(dtshd_start_code) + 2, pkt->data, pkt_size);
 
return 0;
}
 
static int spdif_header_dts(AVFormatContext *s, AVPacket *pkt)
{
IEC61937Context *ctx = s->priv_data;
uint32_t syncword_dts = AV_RB32(pkt->data);
int blocks;
int sample_rate = 0;
int core_size = 0;
 
if (pkt->size < 9)
return AVERROR_INVALIDDATA;
 
switch (syncword_dts) {
case DCA_MARKER_RAW_BE:
blocks = (AV_RB16(pkt->data + 4) >> 2) & 0x7f;
core_size = ((AV_RB24(pkt->data + 5) >> 4) & 0x3fff) + 1;
sample_rate = avpriv_dca_sample_rates[(pkt->data[8] >> 2) & 0x0f];
break;
case DCA_MARKER_RAW_LE:
blocks = (AV_RL16(pkt->data + 4) >> 2) & 0x7f;
ctx->extra_bswap = 1;
break;
case DCA_MARKER_14B_BE:
blocks =
(((pkt->data[5] & 0x07) << 4) | ((pkt->data[6] & 0x3f) >> 2));
break;
case DCA_MARKER_14B_LE:
blocks =
(((pkt->data[4] & 0x07) << 4) | ((pkt->data[7] & 0x3f) >> 2));
ctx->extra_bswap = 1;
break;
case DCA_HD_MARKER:
/* We only handle HD frames that are paired with core. However,
sometimes DTS-HD streams with core have a stray HD frame without
core in the beginning of the stream. */
av_log(s, AV_LOG_ERROR, "stray DTS-HD frame\n");
return AVERROR_INVALIDDATA;
default:
av_log(s, AV_LOG_ERROR, "bad DTS syncword 0x%x\n", syncword_dts);
return AVERROR_INVALIDDATA;
}
blocks++;
 
if (ctx->dtshd_rate)
/* DTS type IV output requested */
return spdif_header_dts4(s, pkt, core_size, sample_rate, blocks);
 
switch (blocks) {
case 512 >> 5: ctx->data_type = IEC61937_DTS1; break;
case 1024 >> 5: ctx->data_type = IEC61937_DTS2; break;
case 2048 >> 5: ctx->data_type = IEC61937_DTS3; break;
default:
av_log(s, AV_LOG_ERROR, "%i samples in DTS frame not supported\n",
blocks << 5);
return AVERROR(ENOSYS);
}
 
/* discard extraneous data by default */
if (core_size && core_size < pkt->size) {
ctx->out_bytes = core_size;
ctx->length_code = core_size << 3;
}
 
ctx->pkt_offset = blocks << 7;
 
if (ctx->out_bytes == ctx->pkt_offset) {
/* The DTS stream fits exactly into the output stream, so skip the
* preamble as it would not fit in there. This is the case for dts
* discs and dts-in-wav. */
ctx->use_preamble = 0;
} else if (ctx->out_bytes > ctx->pkt_offset - BURST_HEADER_SIZE) {
avpriv_request_sample(s, "Unrecognized large DTS frame");
/* This will fail with a "bitrate too high" in the caller */
}
 
return 0;
}
 
static const enum IEC61937DataType mpeg_data_type[2][3] = {
// LAYER1 LAYER2 LAYER3
{ IEC61937_MPEG2_LAYER1_LSF, IEC61937_MPEG2_LAYER2_LSF, IEC61937_MPEG2_LAYER3_LSF },//MPEG2 LSF
{ IEC61937_MPEG1_LAYER1, IEC61937_MPEG1_LAYER23, IEC61937_MPEG1_LAYER23 }, //MPEG1
};
 
static int spdif_header_mpeg(AVFormatContext *s, AVPacket *pkt)
{
IEC61937Context *ctx = s->priv_data;
int version = (pkt->data[1] >> 3) & 3;
int layer = 3 - ((pkt->data[1] >> 1) & 3);
int extension = pkt->data[2] & 1;
 
if (layer == 3 || version == 1) {
av_log(s, AV_LOG_ERROR, "Wrong MPEG file format\n");
return AVERROR_INVALIDDATA;
}
av_log(s, AV_LOG_DEBUG, "version: %i layer: %i extension: %i\n", version, layer, extension);
if (version == 2 && extension) {
ctx->data_type = IEC61937_MPEG2_EXT;
ctx->pkt_offset = 4608;
} else {
ctx->data_type = mpeg_data_type [version & 1][layer];
ctx->pkt_offset = spdif_mpeg_pkt_offset[version & 1][layer];
}
// TODO Data type dependent info (normal/karaoke, dynamic range control)
return 0;
}
 
static int spdif_header_aac(AVFormatContext *s, AVPacket *pkt)
{
IEC61937Context *ctx = s->priv_data;
AACADTSHeaderInfo hdr;
GetBitContext gbc;
int ret;
 
init_get_bits(&gbc, pkt->data, AAC_ADTS_HEADER_SIZE * 8);
ret = avpriv_aac_parse_header(&gbc, &hdr);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "Wrong AAC file format\n");
return AVERROR_INVALIDDATA;
}
 
ctx->pkt_offset = hdr.samples << 2;
switch (hdr.num_aac_frames) {
case 1:
ctx->data_type = IEC61937_MPEG2_AAC;
break;
case 2:
ctx->data_type = IEC61937_MPEG2_AAC_LSF_2048;
break;
case 4:
ctx->data_type = IEC61937_MPEG2_AAC_LSF_4096;
break;
default:
av_log(s, AV_LOG_ERROR, "%i samples in AAC frame not supported\n",
hdr.samples);
return AVERROR(EINVAL);
}
//TODO Data type dependent info (LC profile/SBR)
return 0;
}
 
 
/*
* It seems Dolby TrueHD frames have to be encapsulated in MAT frames before
* they can be encapsulated in IEC 61937.
* Here we encapsulate 24 TrueHD frames in a single MAT frame, padding them
* to achieve constant rate.
* The actual format of a MAT frame is unknown, but the below seems to work.
* However, it seems it is not actually necessary for the 24 TrueHD frames to
* be in an exact alignment with the MAT frame.
*/
#define MAT_FRAME_SIZE 61424
#define TRUEHD_FRAME_OFFSET 2560
#define MAT_MIDDLE_CODE_OFFSET -4
 
static int spdif_header_truehd(AVFormatContext *s, AVPacket *pkt)
{
IEC61937Context *ctx = s->priv_data;
int mat_code_length = 0;
static const char mat_end_code[16] = { 0xC3, 0xC2, 0xC0, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x97, 0x11 };
 
if (!ctx->hd_buf_count) {
static const char mat_start_code[20] = { 0x07, 0x9E, 0x00, 0x03, 0x84, 0x01, 0x01, 0x01, 0x80, 0x00, 0x56, 0xA5, 0x3B, 0xF4, 0x81, 0x83, 0x49, 0x80, 0x77, 0xE0 };
mat_code_length = sizeof(mat_start_code) + BURST_HEADER_SIZE;
memcpy(ctx->hd_buf, mat_start_code, sizeof(mat_start_code));
 
} else if (ctx->hd_buf_count == 12) {
static const char mat_middle_code[12] = { 0xC3, 0xC1, 0x42, 0x49, 0x3B, 0xFA, 0x82, 0x83, 0x49, 0x80, 0x77, 0xE0 };
mat_code_length = sizeof(mat_middle_code) + MAT_MIDDLE_CODE_OFFSET;
memcpy(&ctx->hd_buf[12 * TRUEHD_FRAME_OFFSET - BURST_HEADER_SIZE + MAT_MIDDLE_CODE_OFFSET],
mat_middle_code, sizeof(mat_middle_code));
}
 
if (pkt->size > TRUEHD_FRAME_OFFSET - mat_code_length) {
/* if such frames exist, we'd need some more complex logic to
* distribute the TrueHD frames in the MAT frame */
avpriv_request_sample(s, "Too large TrueHD frame of %d bytes",
pkt->size);
return AVERROR_PATCHWELCOME;
}
 
memcpy(&ctx->hd_buf[ctx->hd_buf_count * TRUEHD_FRAME_OFFSET - BURST_HEADER_SIZE + mat_code_length],
pkt->data, pkt->size);
memset(&ctx->hd_buf[ctx->hd_buf_count * TRUEHD_FRAME_OFFSET - BURST_HEADER_SIZE + mat_code_length + pkt->size],
0, TRUEHD_FRAME_OFFSET - pkt->size - mat_code_length);
 
if (++ctx->hd_buf_count < 24){
ctx->pkt_offset = 0;
return 0;
}
memcpy(&ctx->hd_buf[MAT_FRAME_SIZE - sizeof(mat_end_code)], mat_end_code, sizeof(mat_end_code));
ctx->hd_buf_count = 0;
 
ctx->data_type = IEC61937_TRUEHD;
ctx->pkt_offset = 61440;
ctx->out_buf = ctx->hd_buf;
ctx->out_bytes = MAT_FRAME_SIZE;
ctx->length_code = MAT_FRAME_SIZE;
return 0;
}
 
static int spdif_write_header(AVFormatContext *s)
{
IEC61937Context *ctx = s->priv_data;
 
switch (s->streams[0]->codec->codec_id) {
case AV_CODEC_ID_AC3:
ctx->header_info = spdif_header_ac3;
break;
case AV_CODEC_ID_EAC3:
ctx->header_info = spdif_header_eac3;
break;
case AV_CODEC_ID_MP1:
case AV_CODEC_ID_MP2:
case AV_CODEC_ID_MP3:
ctx->header_info = spdif_header_mpeg;
break;
case AV_CODEC_ID_DTS:
ctx->header_info = spdif_header_dts;
break;
case AV_CODEC_ID_AAC:
ctx->header_info = spdif_header_aac;
break;
case AV_CODEC_ID_TRUEHD:
ctx->header_info = spdif_header_truehd;
ctx->hd_buf = av_malloc(MAT_FRAME_SIZE);
if (!ctx->hd_buf)
return AVERROR(ENOMEM);
break;
default:
av_log(s, AV_LOG_ERROR, "codec not supported\n");
return AVERROR_PATCHWELCOME;
}
return 0;
}
 
static int spdif_write_trailer(AVFormatContext *s)
{
IEC61937Context *ctx = s->priv_data;
av_freep(&ctx->buffer);
av_freep(&ctx->hd_buf);
return 0;
}
 
static av_always_inline void spdif_put_16(IEC61937Context *ctx,
AVIOContext *pb, unsigned int val)
{
if (ctx->spdif_flags & SPDIF_FLAG_BIGENDIAN)
avio_wb16(pb, val);
else
avio_wl16(pb, val);
}
 
static int spdif_write_packet(struct AVFormatContext *s, AVPacket *pkt)
{
IEC61937Context *ctx = s->priv_data;
int ret, padding;
 
ctx->out_buf = pkt->data;
ctx->out_bytes = pkt->size;
ctx->length_code = FFALIGN(pkt->size, 2) << 3;
ctx->use_preamble = 1;
ctx->extra_bswap = 0;
 
ret = ctx->header_info(s, pkt);
if (ret < 0)
return ret;
if (!ctx->pkt_offset)
return 0;
 
padding = (ctx->pkt_offset - ctx->use_preamble * BURST_HEADER_SIZE - ctx->out_bytes) & ~1;
if (padding < 0) {
av_log(s, AV_LOG_ERROR, "bitrate is too high\n");
return AVERROR(EINVAL);
}
 
if (ctx->use_preamble) {
spdif_put_16(ctx, s->pb, SYNCWORD1); //Pa
spdif_put_16(ctx, s->pb, SYNCWORD2); //Pb
spdif_put_16(ctx, s->pb, ctx->data_type); //Pc
spdif_put_16(ctx, s->pb, ctx->length_code);//Pd
}
 
if (ctx->extra_bswap ^ (ctx->spdif_flags & SPDIF_FLAG_BIGENDIAN)) {
avio_write(s->pb, ctx->out_buf, ctx->out_bytes & ~1);
} else {
av_fast_malloc(&ctx->buffer, &ctx->buffer_size, ctx->out_bytes + FF_INPUT_BUFFER_PADDING_SIZE);
if (!ctx->buffer)
return AVERROR(ENOMEM);
ff_spdif_bswap_buf16((uint16_t *)ctx->buffer, (uint16_t *)ctx->out_buf, ctx->out_bytes >> 1);
avio_write(s->pb, ctx->buffer, ctx->out_bytes & ~1);
}
 
/* a final lone byte has to be MSB aligned */
if (ctx->out_bytes & 1)
spdif_put_16(ctx, s->pb, ctx->out_buf[ctx->out_bytes - 1] << 8);
 
ffio_fill(s->pb, 0, padding);
 
av_log(s, AV_LOG_DEBUG, "type=%x len=%i pkt_offset=%i\n",
ctx->data_type, ctx->out_bytes, ctx->pkt_offset);
 
return 0;
}
 
AVOutputFormat ff_spdif_muxer = {
.name = "spdif",
.long_name = NULL_IF_CONFIG_SMALL("IEC 61937 (used on S/PDIF - IEC958)"),
.extensions = "spdif",
.priv_data_size = sizeof(IEC61937Context),
.audio_codec = AV_CODEC_ID_AC3,
.video_codec = AV_CODEC_ID_NONE,
.write_header = spdif_write_header,
.write_packet = spdif_write_packet,
.write_trailer = spdif_write_trailer,
.flags = AVFMT_NOTIMESTAMPS,
.priv_class = &spdif_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/srtdec.c
0,0 → 1,165
/*
* SubRip subtitle demuxer
* Copyright (c) 2010 Aurelien Jacobs <aurel@gnuage.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "internal.h"
#include "subtitles.h"
#include "libavutil/bprint.h"
#include "libavutil/intreadwrite.h"
 
typedef struct {
FFDemuxSubtitlesQueue q;
} SRTContext;
 
static int srt_probe(AVProbeData *p)
{
const unsigned char *ptr = p->buf;
int i, v, num = 0;
 
if (AV_RB24(ptr) == 0xEFBBBF)
ptr += 3; /* skip UTF-8 BOM */
 
while (*ptr == '\r' || *ptr == '\n')
ptr++;
for (i=0; i<2; i++) {
if ((num == i || num + 1 == i)
&& sscanf(ptr, "%*d:%*2d:%*2d%*1[,.]%*3d --> %*d:%*2d:%*2d%*1[,.]%3d", &v) == 1)
return AVPROBE_SCORE_MAX;
num = atoi(ptr);
ptr += ff_subtitles_next_line(ptr);
}
return 0;
}
 
static int64_t get_pts(const char **buf, int *duration,
int32_t *x1, int32_t *y1, int32_t *x2, int32_t *y2)
{
int i;
 
for (i=0; i<2; i++) {
int hh1, mm1, ss1, ms1;
int hh2, mm2, ss2, ms2;
if (sscanf(*buf, "%d:%2d:%2d%*1[,.]%3d --> %d:%2d:%2d%*1[,.]%3d"
"%*[ ]X1:%u X2:%u Y1:%u Y2:%u",
&hh1, &mm1, &ss1, &ms1,
&hh2, &mm2, &ss2, &ms2,
x1, x2, y1, y2) >= 8) {
int64_t start = (hh1*3600LL + mm1*60LL + ss1) * 1000LL + ms1;
int64_t end = (hh2*3600LL + mm2*60LL + ss2) * 1000LL + ms2;
*duration = end - start;
*buf += ff_subtitles_next_line(*buf);
return start;
}
*buf += ff_subtitles_next_line(*buf);
}
return AV_NOPTS_VALUE;
}
 
static int srt_read_header(AVFormatContext *s)
{
SRTContext *srt = s->priv_data;
AVBPrint buf;
AVStream *st = avformat_new_stream(s, NULL);
int res = 0;
 
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 64, 1, 1000);
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
st->codec->codec_id = AV_CODEC_ID_SUBRIP;
 
av_bprint_init(&buf, 0, AV_BPRINT_SIZE_UNLIMITED);
 
while (!url_feof(s->pb)) {
ff_subtitles_read_chunk(s->pb, &buf);
 
if (buf.len) {
int64_t pos = avio_tell(s->pb);
int64_t pts;
int duration;
const char *ptr = buf.str;
int32_t x1 = -1, y1 = -1, x2 = -1, y2 = -1;
AVPacket *sub;
 
pts = get_pts(&ptr, &duration, &x1, &y1, &x2, &y2);
if (pts != AV_NOPTS_VALUE) {
int len = buf.len - (ptr - buf.str);
if (len <= 0)
continue;
sub = ff_subtitles_queue_insert(&srt->q, ptr, len, 0);
if (!sub) {
res = AVERROR(ENOMEM);
goto end;
}
sub->pos = pos;
sub->pts = pts;
sub->duration = duration;
if (x1 != -1) {
uint8_t *p = av_packet_new_side_data(sub, AV_PKT_DATA_SUBTITLE_POSITION, 16);
if (p) {
AV_WL32(p, x1);
AV_WL32(p + 4, y1);
AV_WL32(p + 8, x2);
AV_WL32(p + 12, y2);
}
}
}
}
}
 
ff_subtitles_queue_finalize(&srt->q);
 
end:
av_bprint_finalize(&buf, NULL);
return res;
}
 
static int srt_read_packet(AVFormatContext *s, AVPacket *pkt)
{
SRTContext *srt = s->priv_data;
return ff_subtitles_queue_read_packet(&srt->q, pkt);
}
 
static int srt_read_seek(AVFormatContext *s, int stream_index,
int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
{
SRTContext *srt = s->priv_data;
return ff_subtitles_queue_seek(&srt->q, s, stream_index,
min_ts, ts, max_ts, flags);
}
 
static int srt_read_close(AVFormatContext *s)
{
SRTContext *srt = s->priv_data;
ff_subtitles_queue_clean(&srt->q);
return 0;
}
 
AVInputFormat ff_srt_demuxer = {
.name = "srt",
.long_name = NULL_IF_CONFIG_SMALL("SubRip subtitle"),
.priv_data_size = sizeof(SRTContext),
.read_probe = srt_probe,
.read_header = srt_read_header,
.read_packet = srt_read_packet,
.read_seek2 = srt_read_seek,
.read_close = srt_read_close,
};
/contrib/sdk/sources/ffmpeg/libavformat/srtenc.c
0,0 → 1,115
/*
* SubRip subtitle muxer
* Copyright (c) 2012 Nicolas George <nicolas.george@normalesup.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "internal.h"
#include "libavutil/log.h"
#include "libavutil/intreadwrite.h"
 
/* TODO: add options for:
- character encoding;
- LF / CRLF;
- byte order mark.
*/
 
typedef struct SRTContext{
unsigned index;
} SRTContext;
 
static int srt_write_header(AVFormatContext *avf)
{
SRTContext *srt = avf->priv_data;
 
if (avf->nb_streams != 1 ||
avf->streams[0]->codec->codec_type != AVMEDIA_TYPE_SUBTITLE) {
av_log(avf, AV_LOG_ERROR,
"SRT supports only a single subtitles stream.\n");
return AVERROR(EINVAL);
}
if (avf->streams[0]->codec->codec_id != AV_CODEC_ID_TEXT &&
avf->streams[0]->codec->codec_id != AV_CODEC_ID_SUBRIP &&
avf->streams[0]->codec->codec_id != AV_CODEC_ID_SRT) {
av_log(avf, AV_LOG_ERROR,
"Unsupported subtitles codec: %s\n",
avcodec_get_name(avf->streams[0]->codec->codec_id));
return AVERROR(EINVAL);
}
avpriv_set_pts_info(avf->streams[0], 64, 1, 1000);
srt->index = 1;
return 0;
}
 
static int srt_write_packet(AVFormatContext *avf, AVPacket *pkt)
{
SRTContext *srt = avf->priv_data;
int write_ts = avf->streams[0]->codec->codec_id != AV_CODEC_ID_SRT;
 
if (write_ts) {
int64_t s = pkt->pts, e, d = pkt->duration;
int size, x1 = -1, y1 = -1, x2 = -1, y2 = -1;
const uint8_t *p;
 
p = av_packet_get_side_data(pkt, AV_PKT_DATA_SUBTITLE_POSITION, &size);
if (p && size == 16) {
x1 = AV_RL32(p );
y1 = AV_RL32(p + 4);
x2 = AV_RL32(p + 8);
y2 = AV_RL32(p + 12);
}
 
if (d <= 0)
/* For backward compatibility, fallback to convergence_duration. */
d = pkt->convergence_duration;
if (s == AV_NOPTS_VALUE || d < 0) {
av_log(avf, AV_LOG_WARNING,
"Insufficient timestamps in event number %d.\n", srt->index);
return 0;
}
e = s + d;
avio_printf(avf->pb, "%d\n%02d:%02d:%02d,%03d --> %02d:%02d:%02d,%03d",
srt->index,
(int)(s / 3600000), (int)(s / 60000) % 60,
(int)(s / 1000) % 60, (int)(s % 1000),
(int)(e / 3600000), (int)(e / 60000) % 60,
(int)(e / 1000) % 60, (int)(e % 1000));
if (p)
avio_printf(avf->pb, " X1:%03d X2:%03d Y1:%03d Y2:%03d",
x1, x2, y1, y2);
avio_printf(avf->pb, "\n");
}
avio_write(avf->pb, pkt->data, pkt->size);
if (write_ts)
avio_write(avf->pb, "\n\n", 2);
srt->index++;
return 0;
}
 
AVOutputFormat ff_srt_muxer = {
.name = "srt",
.long_name = NULL_IF_CONFIG_SMALL("SubRip subtitle"),
.mime_type = "application/x-subrip",
.extensions = "srt",
.priv_data_size = sizeof(SRTContext),
.write_header = srt_write_header,
.write_packet = srt_write_packet,
.flags = AVFMT_VARIABLE_FPS | AVFMT_TS_NONSTRICT,
.subtitle_codec = AV_CODEC_ID_SUBRIP,
};
/contrib/sdk/sources/ffmpeg/libavformat/srtp.c
0,0 → 1,472
/*
* SRTP encryption/decryption
* Copyright (c) 2012 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/base64.h"
#include "libavutil/aes.h"
#include "libavutil/hmac.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/log.h"
#include "rtp.h"
#include "rtpdec.h"
#include "srtp.h"
 
void ff_srtp_free(struct SRTPContext *s)
{
if (!s)
return;
av_freep(&s->aes);
if (s->hmac)
av_hmac_free(s->hmac);
s->hmac = NULL;
}
 
static void encrypt_counter(struct AVAES *aes, uint8_t *iv, uint8_t *outbuf,
int outlen)
{
int i, j, outpos;
for (i = 0, outpos = 0; outpos < outlen; i++) {
uint8_t keystream[16];
AV_WB16(&iv[14], i);
av_aes_crypt(aes, keystream, iv, 1, NULL, 0);
for (j = 0; j < 16 && outpos < outlen; j++, outpos++)
outbuf[outpos] ^= keystream[j];
}
}
 
static void derive_key(struct AVAES *aes, const uint8_t *salt, int label,
uint8_t *out, int outlen)
{
uint8_t input[16] = { 0 };
memcpy(input, salt, 14);
// Key derivation rate assumed to be zero
input[14 - 7] ^= label;
memset(out, 0, outlen);
encrypt_counter(aes, input, out, outlen);
}
 
int ff_srtp_set_crypto(struct SRTPContext *s, const char *suite,
const char *params)
{
uint8_t buf[30];
 
ff_srtp_free(s);
 
// RFC 4568
if (!strcmp(suite, "AES_CM_128_HMAC_SHA1_80") ||
!strcmp(suite, "SRTP_AES128_CM_HMAC_SHA1_80")) {
s->rtp_hmac_size = s->rtcp_hmac_size = 10;
} else if (!strcmp(suite, "AES_CM_128_HMAC_SHA1_32")) {
s->rtp_hmac_size = s->rtcp_hmac_size = 4;
} else if (!strcmp(suite, "SRTP_AES128_CM_HMAC_SHA1_32")) {
// RFC 5764 section 4.1.2
s->rtp_hmac_size = 4;
s->rtcp_hmac_size = 10;
} else {
av_log(NULL, AV_LOG_WARNING, "SRTP Crypto suite %s not supported\n",
suite);
return AVERROR(EINVAL);
}
if (av_base64_decode(buf, params, sizeof(buf)) != sizeof(buf)) {
av_log(NULL, AV_LOG_WARNING, "Incorrect amount of SRTP params\n");
return AVERROR(EINVAL);
}
// MKI and lifetime not handled yet
s->aes = av_aes_alloc();
s->hmac = av_hmac_alloc(AV_HMAC_SHA1);
if (!s->aes || !s->hmac)
return AVERROR(ENOMEM);
memcpy(s->master_key, buf, 16);
memcpy(s->master_salt, buf + 16, 14);
 
// RFC 3711
av_aes_init(s->aes, s->master_key, 128, 0);
 
derive_key(s->aes, s->master_salt, 0x00, s->rtp_key, sizeof(s->rtp_key));
derive_key(s->aes, s->master_salt, 0x02, s->rtp_salt, sizeof(s->rtp_salt));
derive_key(s->aes, s->master_salt, 0x01, s->rtp_auth, sizeof(s->rtp_auth));
 
derive_key(s->aes, s->master_salt, 0x03, s->rtcp_key, sizeof(s->rtcp_key));
derive_key(s->aes, s->master_salt, 0x05, s->rtcp_salt, sizeof(s->rtcp_salt));
derive_key(s->aes, s->master_salt, 0x04, s->rtcp_auth, sizeof(s->rtcp_auth));
return 0;
}
 
static void create_iv(uint8_t *iv, const uint8_t *salt, uint64_t index,
uint32_t ssrc)
{
uint8_t indexbuf[8];
int i;
memset(iv, 0, 16);
AV_WB32(&iv[4], ssrc);
AV_WB64(indexbuf, index);
for (i = 0; i < 8; i++) // index << 16
iv[6 + i] ^= indexbuf[i];
for (i = 0; i < 14; i++)
iv[i] ^= salt[i];
}
 
int ff_srtp_decrypt(struct SRTPContext *s, uint8_t *buf, int *lenptr)
{
uint8_t iv[16] = { 0 }, hmac[20];
int len = *lenptr;
int av_uninit(seq_largest);
uint32_t ssrc, av_uninit(roc);
uint64_t index;
int rtcp, hmac_size;
 
// TODO: Missing replay protection
 
if (len < 2)
return AVERROR_INVALIDDATA;
 
rtcp = RTP_PT_IS_RTCP(buf[1]);
hmac_size = rtcp ? s->rtcp_hmac_size : s->rtp_hmac_size;
 
if (len < hmac_size)
return AVERROR_INVALIDDATA;
 
// Authentication HMAC
av_hmac_init(s->hmac, rtcp ? s->rtcp_auth : s->rtp_auth, sizeof(s->rtp_auth));
// If MKI is used, this should exclude the MKI as well
av_hmac_update(s->hmac, buf, len - hmac_size);
 
if (!rtcp) {
int seq = AV_RB16(buf + 2);
uint32_t v;
uint8_t rocbuf[4];
 
// RFC 3711 section 3.3.1, appendix A
seq_largest = s->seq_initialized ? s->seq_largest : seq;
v = roc = s->roc;
if (seq_largest < 32768) {
if (seq - seq_largest > 32768)
v = roc - 1;
} else {
if (seq_largest - 32768 > seq)
v = roc + 1;
}
if (v == roc) {
seq_largest = FFMAX(seq_largest, seq);
} else if (v == roc + 1) {
seq_largest = seq;
roc = v;
}
index = seq + (((uint64_t)v) << 16);
 
AV_WB32(rocbuf, roc);
av_hmac_update(s->hmac, rocbuf, 4);
}
 
av_hmac_final(s->hmac, hmac, sizeof(hmac));
if (memcmp(hmac, buf + len - hmac_size, hmac_size)) {
av_log(NULL, AV_LOG_WARNING, "HMAC mismatch\n");
return AVERROR_INVALIDDATA;
}
 
len -= hmac_size;
*lenptr = len;
 
if (len < 12)
return AVERROR_INVALIDDATA;
 
if (rtcp) {
uint32_t srtcp_index = AV_RB32(buf + len - 4);
len -= 4;
*lenptr = len;
 
ssrc = AV_RB32(buf + 4);
index = srtcp_index & 0x7fffffff;
 
buf += 8;
len -= 8;
if (!(srtcp_index & 0x80000000))
return 0;
} else {
int ext, csrc;
s->seq_initialized = 1;
s->seq_largest = seq_largest;
s->roc = roc;
 
csrc = buf[0] & 0x0f;
ext = buf[0] & 0x10;
ssrc = AV_RB32(buf + 8);
 
buf += 12;
len -= 12;
 
buf += 4 * csrc;
len -= 4 * csrc;
if (len < 0)
return AVERROR_INVALIDDATA;
 
if (ext) {
if (len < 4)
return AVERROR_INVALIDDATA;
ext = (AV_RB16(buf + 2) + 1) * 4;
if (len < ext)
return AVERROR_INVALIDDATA;
len -= ext;
buf += ext;
}
}
 
create_iv(iv, rtcp ? s->rtcp_salt : s->rtp_salt, index, ssrc);
av_aes_init(s->aes, rtcp ? s->rtcp_key : s->rtp_key, 128, 0);
encrypt_counter(s->aes, iv, buf, len);
 
return 0;
}
 
int ff_srtp_encrypt(struct SRTPContext *s, const uint8_t *in, int len,
uint8_t *out, int outlen)
{
uint8_t iv[16] = { 0 }, hmac[20];
uint64_t index;
uint32_t ssrc;
int rtcp, hmac_size, padding;
uint8_t *buf;
 
if (len < 8)
return AVERROR_INVALIDDATA;
 
rtcp = RTP_PT_IS_RTCP(in[1]);
hmac_size = rtcp ? s->rtcp_hmac_size : s->rtp_hmac_size;
padding = hmac_size;
if (rtcp)
padding += 4; // For the RTCP index
 
if (len + padding > outlen)
return 0;
 
memcpy(out, in, len);
buf = out;
 
if (rtcp) {
ssrc = AV_RB32(buf + 4);
index = s->rtcp_index++;
 
buf += 8;
len -= 8;
} else {
int ext, csrc;
int seq = AV_RB16(buf + 2);
 
if (len < 12)
return AVERROR_INVALIDDATA;
 
ssrc = AV_RB32(buf + 8);
 
if (seq < s->seq_largest)
s->roc++;
s->seq_largest = seq;
index = seq + (((uint64_t)s->roc) << 16);
 
csrc = buf[0] & 0x0f;
ext = buf[0] & 0x10;
 
buf += 12;
len -= 12;
 
buf += 4 * csrc;
len -= 4 * csrc;
if (len < 0)
return AVERROR_INVALIDDATA;
 
if (ext) {
if (len < 4)
return AVERROR_INVALIDDATA;
ext = (AV_RB16(buf + 2) + 1) * 4;
if (len < ext)
return AVERROR_INVALIDDATA;
len -= ext;
buf += ext;
}
}
 
create_iv(iv, rtcp ? s->rtcp_salt : s->rtp_salt, index, ssrc);
av_aes_init(s->aes, rtcp ? s->rtcp_key : s->rtp_key, 128, 0);
encrypt_counter(s->aes, iv, buf, len);
 
if (rtcp) {
AV_WB32(buf + len, 0x80000000 | index);
len += 4;
}
 
av_hmac_init(s->hmac, rtcp ? s->rtcp_auth : s->rtp_auth, sizeof(s->rtp_auth));
av_hmac_update(s->hmac, out, buf + len - out);
if (!rtcp) {
uint8_t rocbuf[4];
AV_WB32(rocbuf, s->roc);
av_hmac_update(s->hmac, rocbuf, 4);
}
av_hmac_final(s->hmac, hmac, sizeof(hmac));
 
memcpy(buf + len, hmac, hmac_size);
len += hmac_size;
return buf + len - out;
}
 
#ifdef TEST
#include <stdio.h>
 
static const char *aes128_80_key = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmn";
 
static const uint8_t rtp_aes128_80[] = {
// RTP header
0x80, 0xe0, 0x12, 0x34, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78,
// encrypted payload
0x62, 0x69, 0x76, 0xca, 0xc5,
// HMAC
0xa1, 0xac, 0x1b, 0xb4, 0xa0, 0x1c, 0xd5, 0x49, 0x28, 0x99,
};
 
static const uint8_t rtcp_aes128_80[] = {
// RTCP header
0x81, 0xc9, 0x00, 0x07, 0x12, 0x34, 0x56, 0x78,
// encrypted payload
0x8a, 0xac, 0xdc, 0xa5, 0x4c, 0xf6, 0x78, 0xa6, 0x62, 0x8f, 0x24, 0xda,
0x6c, 0x09, 0x3f, 0xa9, 0x28, 0x7a, 0xb5, 0x7f, 0x1f, 0x0f, 0xc9, 0x35,
// RTCP index
0x80, 0x00, 0x00, 0x03,
// HMAC
0xe9, 0x3b, 0xc0, 0x5c, 0x0c, 0x06, 0x9f, 0xab, 0xc0, 0xde,
};
 
static const char *aes128_32_key = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmn";
 
static const uint8_t rtp_aes128_32[] = {
// RTP header
0x80, 0xe0, 0x12, 0x34, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78,
// encrypted payload
0x62, 0x69, 0x76, 0xca, 0xc5,
// HMAC
0xa1, 0xac, 0x1b, 0xb4,
};
 
static const uint8_t rtcp_aes128_32[] = {
// RTCP header
0x81, 0xc9, 0x00, 0x07, 0x12, 0x34, 0x56, 0x78,
// encrypted payload
0x35, 0xe9, 0xb5, 0xff, 0x0d, 0xd1, 0xde, 0x70, 0x74, 0x10, 0xaa, 0x1b,
0xb2, 0x8d, 0xf0, 0x20, 0x02, 0x99, 0x6b, 0x1b, 0x0b, 0xd0, 0x47, 0x34,
// RTCP index
0x80, 0x00, 0x00, 0x04,
// HMAC
0x5b, 0xd2, 0xa9, 0x9d,
};
 
static const char *aes128_80_32_key = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmn";
 
static const uint8_t rtp_aes128_80_32[] = {
// RTP header
0x80, 0xe0, 0x12, 0x34, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78,
// encrypted payload
0x62, 0x69, 0x76, 0xca, 0xc5,
// HMAC
0xa1, 0xac, 0x1b, 0xb4,
};
 
static const uint8_t rtcp_aes128_80_32[] = {
// RTCP header
0x81, 0xc9, 0x00, 0x07, 0x12, 0x34, 0x56, 0x78,
// encrypted payload
0xd6, 0xae, 0xc1, 0x58, 0x63, 0x70, 0xc9, 0x88, 0x66, 0x26, 0x1c, 0x53,
0xff, 0x5d, 0x5d, 0x2b, 0x0f, 0x8c, 0x72, 0x3e, 0xc9, 0x1d, 0x43, 0xf9,
// RTCP index
0x80, 0x00, 0x00, 0x05,
// HMAC
0x09, 0x16, 0xb4, 0x27, 0x9a, 0xe9, 0x92, 0x26, 0x4e, 0x10,
};
 
static void print_data(const uint8_t *buf, int len)
{
int i;
for (i = 0; i < len; i++)
printf("%02x", buf[i]);
printf("\n");
}
 
static int test_decrypt(struct SRTPContext *srtp, const uint8_t *in, int len,
uint8_t *out)
{
memcpy(out, in, len);
if (!ff_srtp_decrypt(srtp, out, &len)) {
print_data(out, len);
return len;
} else
return -1;
}
 
static void test_encrypt(const uint8_t *data, int in_len, const char *suite,
const char *key)
{
struct SRTPContext enc = { 0 }, dec = { 0 };
int len;
char buf[RTP_MAX_PACKET_LENGTH];
ff_srtp_set_crypto(&enc, suite, key);
ff_srtp_set_crypto(&dec, suite, key);
len = ff_srtp_encrypt(&enc, data, in_len, buf, sizeof(buf));
if (!ff_srtp_decrypt(&dec, buf, &len)) {
if (len == in_len && !memcmp(buf, data, len))
printf("Decrypted content matches input\n");
else
printf("Decrypted content doesn't match input\n");
} else {
printf("Decryption failed\n");
}
ff_srtp_free(&enc);
ff_srtp_free(&dec);
}
 
int main(void)
{
static const char *aes128_80_suite = "AES_CM_128_HMAC_SHA1_80";
static const char *aes128_32_suite = "AES_CM_128_HMAC_SHA1_32";
static const char *aes128_80_32_suite = "SRTP_AES128_CM_HMAC_SHA1_32";
static const char *test_key = "abcdefghijklmnopqrstuvwxyz1234567890ABCD";
uint8_t buf[RTP_MAX_PACKET_LENGTH];
struct SRTPContext srtp = { 0 };
int len;
ff_srtp_set_crypto(&srtp, aes128_80_suite, aes128_80_key);
len = test_decrypt(&srtp, rtp_aes128_80, sizeof(rtp_aes128_80), buf);
test_encrypt(buf, len, aes128_80_suite, test_key);
test_encrypt(buf, len, aes128_32_suite, test_key);
test_encrypt(buf, len, aes128_80_32_suite, test_key);
test_decrypt(&srtp, rtcp_aes128_80, sizeof(rtcp_aes128_80), buf);
test_encrypt(buf, len, aes128_80_suite, test_key);
test_encrypt(buf, len, aes128_32_suite, test_key);
test_encrypt(buf, len, aes128_80_32_suite, test_key);
ff_srtp_free(&srtp);
 
memset(&srtp, 0, sizeof(srtp)); // Clear the context
ff_srtp_set_crypto(&srtp, aes128_32_suite, aes128_32_key);
test_decrypt(&srtp, rtp_aes128_32, sizeof(rtp_aes128_32), buf);
test_decrypt(&srtp, rtcp_aes128_32, sizeof(rtcp_aes128_32), buf);
ff_srtp_free(&srtp);
 
memset(&srtp, 0, sizeof(srtp)); // Clear the context
ff_srtp_set_crypto(&srtp, aes128_80_32_suite, aes128_80_32_key);
test_decrypt(&srtp, rtp_aes128_80_32, sizeof(rtp_aes128_80_32), buf);
test_decrypt(&srtp, rtcp_aes128_80_32, sizeof(rtcp_aes128_80_32), buf);
ff_srtp_free(&srtp);
return 0;
}
#endif /* TEST */
/contrib/sdk/sources/ffmpeg/libavformat/srtp.h
0,0 → 1,52
/*
* SRTP encryption/decryption
* Copyright (c) 2012 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_SRTP_H
#define AVFORMAT_SRTP_H
 
#include <stdint.h>
 
struct AVAES;
struct AVHMAC;
 
struct SRTPContext {
struct AVAES *aes;
struct AVHMAC *hmac;
int rtp_hmac_size, rtcp_hmac_size;
uint8_t master_key[16];
uint8_t master_salt[14];
uint8_t rtp_key[16], rtcp_key[16];
uint8_t rtp_salt[14], rtcp_salt[14];
uint8_t rtp_auth[20], rtcp_auth[20];
int seq_largest, seq_initialized;
uint32_t roc;
 
uint32_t rtcp_index;
};
 
int ff_srtp_set_crypto(struct SRTPContext *s, const char *suite,
const char *params);
void ff_srtp_free(struct SRTPContext *s);
int ff_srtp_decrypt(struct SRTPContext *s, uint8_t *buf, int *lenptr);
int ff_srtp_encrypt(struct SRTPContext *s, const uint8_t *in, int len,
uint8_t *out, int outlen);
 
#endif /* AVFORMAT_SRTP_H */
/contrib/sdk/sources/ffmpeg/libavformat/srtpproto.c
0,0 → 1,145
/*
* SRTP network protocol
* Copyright (c) 2012 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/opt.h"
#include "avformat.h"
#include "avio_internal.h"
#include "url.h"
 
#include "internal.h"
#include "rtpdec.h"
#include "srtp.h"
 
typedef struct SRTPProtoContext {
const AVClass *class;
URLContext *rtp_hd;
const char *out_suite, *out_params;
const char *in_suite, *in_params;
struct SRTPContext srtp_out, srtp_in;
uint8_t encryptbuf[RTP_MAX_PACKET_LENGTH];
} SRTPProtoContext;
 
#define D AV_OPT_FLAG_DECODING_PARAM
#define E AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
{ "srtp_out_suite", "", offsetof(SRTPProtoContext, out_suite), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, E },
{ "srtp_out_params", "", offsetof(SRTPProtoContext, out_params), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, E },
{ "srtp_in_suite", "", offsetof(SRTPProtoContext, in_suite), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, E },
{ "srtp_in_params", "", offsetof(SRTPProtoContext, in_params), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, E },
{ NULL }
};
 
static const AVClass srtp_context_class = {
.class_name = "srtp",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
static int srtp_close(URLContext *h)
{
SRTPProtoContext *s = h->priv_data;
ff_srtp_free(&s->srtp_out);
ff_srtp_free(&s->srtp_in);
ffurl_close(s->rtp_hd);
s->rtp_hd = NULL;
return 0;
}
 
static int srtp_open(URLContext *h, const char *uri, int flags)
{
SRTPProtoContext *s = h->priv_data;
char hostname[256], buf[1024], path[1024];
int rtp_port, ret;
 
if (s->out_suite && s->out_params)
if ((ret = ff_srtp_set_crypto(&s->srtp_out, s->out_suite, s->out_params)) < 0)
goto fail;
if (s->in_suite && s->in_params)
if ((ret = ff_srtp_set_crypto(&s->srtp_in, s->in_suite, s->in_params)) < 0)
goto fail;
 
av_url_split(NULL, 0, NULL, 0, hostname, sizeof(hostname), &rtp_port,
path, sizeof(path), uri);
ff_url_join(buf, sizeof(buf), "rtp", NULL, hostname, rtp_port, "%s", path);
if ((ret = ffurl_open(&s->rtp_hd, buf, flags, &h->interrupt_callback, NULL)) < 0)
goto fail;
 
h->max_packet_size = FFMIN(s->rtp_hd->max_packet_size,
sizeof(s->encryptbuf)) - 14;
h->is_streamed = 1;
return 0;
 
fail:
srtp_close(h);
return ret;
}
 
static int srtp_read(URLContext *h, uint8_t *buf, int size)
{
SRTPProtoContext *s = h->priv_data;
int ret;
start:
ret = ffurl_read(s->rtp_hd, buf, size);
if (ret > 0 && s->srtp_in.aes) {
if (ff_srtp_decrypt(&s->srtp_in, buf, &ret) < 0)
goto start;
}
return ret;
}
 
static int srtp_write(URLContext *h, const uint8_t *buf, int size)
{
SRTPProtoContext *s = h->priv_data;
if (!s->srtp_out.aes)
return ffurl_write(s->rtp_hd, buf, size);
size = ff_srtp_encrypt(&s->srtp_out, buf, size, s->encryptbuf,
sizeof(s->encryptbuf));
if (size < 0)
return size;
return ffurl_write(s->rtp_hd, s->encryptbuf, size);
}
 
static int srtp_get_file_handle(URLContext *h)
{
SRTPProtoContext *s = h->priv_data;
return ffurl_get_file_handle(s->rtp_hd);
}
 
static int srtp_get_multi_file_handle(URLContext *h, int **handles,
int *numhandles)
{
SRTPProtoContext *s = h->priv_data;
return ffurl_get_multi_file_handle(s->rtp_hd, handles, numhandles);
}
 
URLProtocol ff_srtp_protocol = {
.name = "srtp",
.url_open = srtp_open,
.url_read = srtp_read,
.url_write = srtp_write,
.url_close = srtp_close,
.url_get_file_handle = srtp_get_file_handle,
.url_get_multi_file_handle = srtp_get_multi_file_handle,
.priv_data_size = sizeof(SRTPProtoContext),
.priv_data_class = &srtp_context_class,
.flags = URL_PROTOCOL_FLAG_NETWORK,
};
/contrib/sdk/sources/ffmpeg/libavformat/subtitles.c
0,0 → 1,283
/*
* Copyright (c) 2012-2013 Clément Bœsch <u pkh me>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "subtitles.h"
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
 
AVPacket *ff_subtitles_queue_insert(FFDemuxSubtitlesQueue *q,
const uint8_t *event, int len, int merge)
{
AVPacket *subs, *sub;
 
if (merge && q->nb_subs > 0) {
/* merge with previous event */
 
int old_len;
sub = &q->subs[q->nb_subs - 1];
old_len = sub->size;
if (av_grow_packet(sub, len) < 0)
return NULL;
memcpy(sub->data + old_len, event, len);
} else {
/* new event */
 
if (q->nb_subs >= INT_MAX/sizeof(*q->subs) - 1)
return NULL;
subs = av_fast_realloc(q->subs, &q->allocated_size,
(q->nb_subs + 1) * sizeof(*q->subs));
if (!subs)
return NULL;
q->subs = subs;
sub = &subs[q->nb_subs++];
if (av_new_packet(sub, len) < 0)
return NULL;
sub->flags |= AV_PKT_FLAG_KEY;
sub->pts = sub->dts = 0;
memcpy(sub->data, event, len);
}
return sub;
}
 
static int cmp_pkt_sub_ts_pos(const void *a, const void *b)
{
const AVPacket *s1 = a;
const AVPacket *s2 = b;
if (s1->pts == s2->pts) {
if (s1->pos == s2->pos)
return 0;
return s1->pos > s2->pos ? 1 : -1;
}
return s1->pts > s2->pts ? 1 : -1;
}
 
static int cmp_pkt_sub_pos_ts(const void *a, const void *b)
{
const AVPacket *s1 = a;
const AVPacket *s2 = b;
if (s1->pos == s2->pos) {
if (s1->pts == s2->pts)
return 0;
return s1->pts > s2->pts ? 1 : -1;
}
return s1->pos > s2->pos ? 1 : -1;
}
 
void ff_subtitles_queue_finalize(FFDemuxSubtitlesQueue *q)
{
int i;
 
qsort(q->subs, q->nb_subs, sizeof(*q->subs),
q->sort == SUB_SORT_TS_POS ? cmp_pkt_sub_ts_pos
: cmp_pkt_sub_pos_ts);
for (i = 0; i < q->nb_subs; i++)
if (q->subs[i].duration == -1 && i < q->nb_subs - 1)
q->subs[i].duration = q->subs[i + 1].pts - q->subs[i].pts;
}
 
int ff_subtitles_queue_read_packet(FFDemuxSubtitlesQueue *q, AVPacket *pkt)
{
AVPacket *sub = q->subs + q->current_sub_idx;
 
if (q->current_sub_idx == q->nb_subs)
return AVERROR_EOF;
if (av_copy_packet(pkt, sub) < 0) {
return AVERROR(ENOMEM);
}
 
pkt->dts = pkt->pts;
q->current_sub_idx++;
return 0;
}
 
static int search_sub_ts(const FFDemuxSubtitlesQueue *q, int64_t ts)
{
int s1 = 0, s2 = q->nb_subs - 1;
 
if (s2 < s1)
return AVERROR(ERANGE);
 
for (;;) {
int mid;
 
if (s1 == s2)
return s1;
if (s1 == s2 - 1)
return q->subs[s1].pts <= q->subs[s2].pts ? s1 : s2;
mid = (s1 + s2) / 2;
if (q->subs[mid].pts <= ts)
s1 = mid;
else
s2 = mid;
}
}
 
int ff_subtitles_queue_seek(FFDemuxSubtitlesQueue *q, AVFormatContext *s, int stream_index,
int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
{
if (flags & AVSEEK_FLAG_BYTE) {
return AVERROR(ENOSYS);
} else if (flags & AVSEEK_FLAG_FRAME) {
if (ts < 0 || ts >= q->nb_subs)
return AVERROR(ERANGE);
q->current_sub_idx = ts;
} else {
int i, idx = search_sub_ts(q, ts);
int64_t ts_selected;
 
if (idx < 0)
return idx;
for (i = idx; i < q->nb_subs && q->subs[i].pts < min_ts; i++)
if (stream_index == -1 || q->subs[i].stream_index == stream_index)
idx = i;
for (i = idx; i > 0 && q->subs[i].pts > max_ts; i--)
if (stream_index == -1 || q->subs[i].stream_index == stream_index)
idx = i;
 
ts_selected = q->subs[idx].pts;
if (ts_selected < min_ts || ts_selected > max_ts)
return AVERROR(ERANGE);
 
/* look back in the latest subtitles for overlapping subtitles */
for (i = idx - 1; i >= 0; i--) {
int64_t pts = q->subs[i].pts;
if (q->subs[i].duration <= 0 ||
(stream_index != -1 && q->subs[i].stream_index != stream_index))
continue;
if (pts >= min_ts && pts > ts_selected - q->subs[i].duration)
idx = i;
else
break;
}
 
/* If the queue is used to store multiple subtitles streams (like with
* VobSub) and the stream index is not specified, we need to make sure
* to focus on the smallest file position offset for a same timestamp;
* queue is ordered by pts and then filepos, so we can take the first
* entry for a given timestamp. */
if (stream_index == -1)
while (idx > 0 && q->subs[idx - 1].pts == q->subs[idx].pts)
idx--;
 
q->current_sub_idx = idx;
}
return 0;
}
 
void ff_subtitles_queue_clean(FFDemuxSubtitlesQueue *q)
{
int i;
 
for (i = 0; i < q->nb_subs; i++)
av_free_packet(&q->subs[i]);
av_freep(&q->subs);
q->nb_subs = q->allocated_size = q->current_sub_idx = 0;
}
 
int ff_smil_extract_next_chunk(AVIOContext *pb, AVBPrint *buf, char *c)
{
int i = 0;
char end_chr;
 
if (!*c) // cached char?
*c = avio_r8(pb);
if (!*c)
return 0;
 
end_chr = *c == '<' ? '>' : '<';
do {
av_bprint_chars(buf, *c, 1);
*c = avio_r8(pb);
i++;
} while (*c != end_chr && *c);
if (end_chr == '>') {
av_bprint_chars(buf, '>', 1);
*c = 0;
}
return i;
}
 
const char *ff_smil_get_attr_ptr(const char *s, const char *attr)
{
int in_quotes = 0;
const int len = strlen(attr);
 
while (*s) {
while (*s) {
if (!in_quotes && av_isspace(*s))
break;
in_quotes ^= *s == '"'; // XXX: support escaping?
s++;
}
while (av_isspace(*s))
s++;
if (!av_strncasecmp(s, attr, len) && s[len] == '=')
return s + len + 1 + (s[len + 1] == '"');
}
return NULL;
}
 
static inline int is_eol(char c)
{
return c == '\r' || c == '\n';
}
 
void ff_subtitles_read_chunk(AVIOContext *pb, AVBPrint *buf)
{
char eol_buf[5], last_was_cr = 0;
int n = 0, i = 0, nb_eol = 0;
 
av_bprint_clear(buf);
 
for (;;) {
char c = avio_r8(pb);
 
if (!c)
break;
 
/* ignore all initial line breaks */
if (n == 0 && is_eol(c))
continue;
 
/* line break buffering: we don't want to add the trailing \r\n */
if (is_eol(c)) {
nb_eol += c == '\n' || last_was_cr;
if (nb_eol == 2)
break;
eol_buf[i++] = c;
if (i == sizeof(eol_buf) - 1)
break;
last_was_cr = c == '\r';
continue;
}
 
/* only one line break followed by data: we flush the line breaks
* buffer */
if (i) {
eol_buf[i] = 0;
av_bprintf(buf, "%s", eol_buf);
i = nb_eol = 0;
}
 
av_bprint_chars(buf, c, 1);
n++;
}
}
/contrib/sdk/sources/ffmpeg/libavformat/subtitles.h
0,0 → 1,124
/*
* Copyright (c) 2012 Clément Bœsch
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_SUBTITLES_H
#define AVFORMAT_SUBTITLES_H
 
#include <stdint.h>
#include "avformat.h"
#include "libavutil/bprint.h"
 
enum sub_sort {
SUB_SORT_TS_POS = 0, ///< sort by timestamps, then position
SUB_SORT_POS_TS, ///< sort by position, then timestamps
};
 
typedef struct {
AVPacket *subs; ///< array of subtitles packets
int nb_subs; ///< number of subtitles packets
int allocated_size; ///< allocated size for subs
int current_sub_idx; ///< current position for the read packet callback
enum sub_sort sort; ///< sort method to use when finalizing subtitles
} FFDemuxSubtitlesQueue;
 
/**
* Insert a new subtitle event.
*
* @param event the subtitle line, may not be zero terminated
* @param len the length of the event (in strlen() sense, so without '\0')
* @param merge set to 1 if the current event should be concatenated with the
* previous one instead of adding a new entry, 0 otherwise
*/
AVPacket *ff_subtitles_queue_insert(FFDemuxSubtitlesQueue *q,
const uint8_t *event, int len, int merge);
 
/**
* Set missing durations and sort subtitles by PTS, and then byte position.
*/
void ff_subtitles_queue_finalize(FFDemuxSubtitlesQueue *q);
 
/**
* Generic read_packet() callback for subtitles demuxers using this queue
* system.
*/
int ff_subtitles_queue_read_packet(FFDemuxSubtitlesQueue *q, AVPacket *pkt);
 
/**
* Update current_sub_idx to emulate a seek. Except the first parameter, it
* matches AVInputFormat->read_seek2 prototypes.
*/
int ff_subtitles_queue_seek(FFDemuxSubtitlesQueue *q, AVFormatContext *s, int stream_index,
int64_t min_ts, int64_t ts, int64_t max_ts, int flags);
 
/**
* Remove and destroy all the subtitles packets.
*/
void ff_subtitles_queue_clean(FFDemuxSubtitlesQueue *q);
 
/**
* SMIL helper to load next chunk ("<...>" or untagged content) in buf.
*
* @param c cached character, to avoid a backward seek
*/
int ff_smil_extract_next_chunk(AVIOContext *pb, AVBPrint *buf, char *c);
 
/**
* SMIL helper to point on the value of an attribute in the given tag.
*
* @param s SMIL tag ("<...>")
* @param attr the attribute to look for
*/
const char *ff_smil_get_attr_ptr(const char *s, const char *attr);
 
/**
* @brief Read a subtitles chunk.
*
* A chunk is defined by a multiline "event", ending with a second line break.
* The trailing line breaks are trimmed. CRLF are supported.
* Example: "foo\r\nbar\r\n\r\nnext" will print "foo\r\nbar" into buf, and pb
* will focus on the 'n' of the "next" string.
*
* @param pb I/O context
* @param buf an initialized buf where the chunk is written
*
* @note buf is cleared before writing into it.
*/
void ff_subtitles_read_chunk(AVIOContext *pb, AVBPrint *buf);
 
/**
* Get the number of characters to increment to jump to the next line, or to
* the end of the string.
* The function handles the following line breaks schemes:
* LF, CRLF (MS), or standalone CR (old MacOS).
*/
static av_always_inline int ff_subtitles_next_line(const char *ptr)
{
int n = strcspn(ptr, "\r\n");
ptr += n;
if (*ptr == '\r') {
ptr++;
n++;
}
if (*ptr == '\n')
n++;
return n;
}
 
#endif /* AVFORMAT_SUBTITLES_H */
/contrib/sdk/sources/ffmpeg/libavformat/subviewer1dec.c
0,0 → 1,124
/*
* Copyright (c) 2012 Clément Bœsch
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* SubViewer v1 subtitle demuxer
*/
 
#include "avformat.h"
#include "internal.h"
#include "subtitles.h"
 
typedef struct {
FFDemuxSubtitlesQueue q;
} SubViewer1Context;
 
static int subviewer1_probe(AVProbeData *p)
{
const unsigned char *ptr = p->buf;
 
if (strstr(ptr, "******** START SCRIPT ********"))
return AVPROBE_SCORE_EXTENSION;
return 0;
}
 
static int subviewer1_read_header(AVFormatContext *s)
{
int delay = 0;
AVPacket *sub = NULL;
SubViewer1Context *subviewer1 = s->priv_data;
AVStream *st = avformat_new_stream(s, NULL);
 
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 64, 1, 1);
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
st->codec->codec_id = AV_CODEC_ID_SUBVIEWER1;
 
while (!url_feof(s->pb)) {
char line[4096];
int len = ff_get_line(s->pb, line, sizeof(line));
int hh, mm, ss;
 
if (!len)
break;
 
if (!strncmp(line, "[DELAY]", 7)) {
ff_get_line(s->pb, line, sizeof(line));
sscanf(line, "%d", &delay);
}
 
if (sscanf(line, "[%d:%d:%d]", &hh, &mm, &ss) == 3) {
const int64_t pos = avio_tell(s->pb);
int64_t pts_start = hh*3600LL + mm*60LL + ss + delay;
 
len = ff_get_line(s->pb, line, sizeof(line));
line[strcspn(line, "\r\n")] = 0;
if (!*line) {
if (sub)
sub->duration = pts_start - sub->pts;
} else {
sub = ff_subtitles_queue_insert(&subviewer1->q, line, len, 0);
if (!sub)
return AVERROR(ENOMEM);
sub->pos = pos;
sub->pts = pts_start;
sub->duration = -1;
}
}
}
 
ff_subtitles_queue_finalize(&subviewer1->q);
return 0;
}
 
static int subviewer1_read_packet(AVFormatContext *s, AVPacket *pkt)
{
SubViewer1Context *subviewer1 = s->priv_data;
return ff_subtitles_queue_read_packet(&subviewer1->q, pkt);
}
 
static int subviewer1_read_seek(AVFormatContext *s, int stream_index,
int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
{
SubViewer1Context *subviewer1 = s->priv_data;
return ff_subtitles_queue_seek(&subviewer1->q, s, stream_index,
min_ts, ts, max_ts, flags);
}
 
static int subviewer1_read_close(AVFormatContext *s)
{
SubViewer1Context *subviewer1 = s->priv_data;
ff_subtitles_queue_clean(&subviewer1->q);
return 0;
}
 
AVInputFormat ff_subviewer1_demuxer = {
.name = "subviewer1",
.long_name = NULL_IF_CONFIG_SMALL("SubViewer v1 subtitle format"),
.priv_data_size = sizeof(SubViewer1Context),
.read_probe = subviewer1_probe,
.read_header = subviewer1_read_header,
.read_packet = subviewer1_read_packet,
.read_seek2 = subviewer1_read_seek,
.read_close = subviewer1_read_close,
.extensions = "sub",
};
/contrib/sdk/sources/ffmpeg/libavformat/subviewerdec.c
0,0 → 1,194
/*
* Copyright (c) 2012 Clément Bœsch
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* SubViewer subtitle demuxer
* @see https://en.wikipedia.org/wiki/SubViewer
*/
 
#include "avformat.h"
#include "internal.h"
#include "subtitles.h"
#include "libavcodec/internal.h"
#include "libavutil/avstring.h"
#include "libavutil/bprint.h"
#include "libavutil/intreadwrite.h"
 
typedef struct {
FFDemuxSubtitlesQueue q;
} SubViewerContext;
 
static int subviewer_probe(AVProbeData *p)
{
char c;
const unsigned char *ptr = p->buf;
 
if (AV_RB24(ptr) == 0xEFBBBF)
ptr += 3; /* skip UTF-8 BOM */
if (sscanf(ptr, "%*u:%*u:%*u.%*u,%*u:%*u:%*u.%*u%c", &c) == 1)
return AVPROBE_SCORE_EXTENSION;
if (!strncmp(ptr, "[INFORMATION]", 13))
return AVPROBE_SCORE_MAX/3;
return 0;
}
 
static int read_ts(const char *s, int64_t *start, int *duration)
{
int64_t end;
int hh1, mm1, ss1, ms1;
int hh2, mm2, ss2, ms2;
 
if (sscanf(s, "%u:%u:%u.%u,%u:%u:%u.%u",
&hh1, &mm1, &ss1, &ms1, &hh2, &mm2, &ss2, &ms2) == 8) {
end = (hh2*3600LL + mm2*60LL + ss2) * 100LL + ms2;
*start = (hh1*3600LL + mm1*60LL + ss1) * 100LL + ms1;
*duration = end - *start;
return 0;
}
return -1;
}
 
static int subviewer_read_header(AVFormatContext *s)
{
SubViewerContext *subviewer = s->priv_data;
AVStream *st = avformat_new_stream(s, NULL);
AVBPrint header;
int res = 0, new_event = 1;
int64_t pts_start = AV_NOPTS_VALUE;
int duration = -1;
AVPacket *sub = NULL;
 
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 64, 1, 100);
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
st->codec->codec_id = AV_CODEC_ID_SUBVIEWER;
 
av_bprint_init(&header, 0, AV_BPRINT_SIZE_UNLIMITED);
 
while (!url_feof(s->pb)) {
char line[2048];
int64_t pos = 0;
int len = ff_get_line(s->pb, line, sizeof(line));
 
if (!len)
break;
 
line[strcspn(line, "\r\n")] = 0;
 
if (line[0] == '[' && strncmp(line, "[br]", 4)) {
 
/* ignore event style, XXX: add to side_data? */
if (strstr(line, "[COLF]") || strstr(line, "[SIZE]") ||
strstr(line, "[FONT]") || strstr(line, "[STYLE]"))
continue;
 
if (!st->codec->extradata) { // header not finalized yet
av_bprintf(&header, "%s\n", line);
if (!strncmp(line, "[END INFORMATION]", 17) || !strncmp(line, "[SUBTITLE]", 10)) {
/* end of header */
res = avpriv_bprint_to_extradata(st->codec, &header);
if (res < 0)
goto end;
} else if (strncmp(line, "[INFORMATION]", 13)) {
/* assume file metadata at this point */
int i, j = 0;
char key[32], value[128];
 
for (i = 1; i < sizeof(key) - 1 && line[i] && line[i] != ']'; i++)
key[i - 1] = av_tolower(line[i]);
key[i - 1] = 0;
 
if (line[i] == ']')
i++;
while (line[i] == ' ')
i++;
while (j < sizeof(value) - 1 && line[i] && line[i] != ']')
value[j++] = line[i++];
value[j] = 0;
 
av_dict_set(&s->metadata, key, value, 0);
}
}
} else if (read_ts(line, &pts_start, &duration) >= 0) {
new_event = 1;
pos = avio_tell(s->pb);
} else if (*line) {
if (!new_event) {
sub = ff_subtitles_queue_insert(&subviewer->q, "\n", 1, 1);
if (!sub) {
res = AVERROR(ENOMEM);
goto end;
}
}
sub = ff_subtitles_queue_insert(&subviewer->q, line, strlen(line), !new_event);
if (!sub) {
res = AVERROR(ENOMEM);
goto end;
}
if (new_event) {
sub->pos = pos;
sub->pts = pts_start;
sub->duration = duration;
}
new_event = 0;
}
}
 
ff_subtitles_queue_finalize(&subviewer->q);
 
end:
av_bprint_finalize(&header, NULL);
return res;
}
 
static int subviewer_read_packet(AVFormatContext *s, AVPacket *pkt)
{
SubViewerContext *subviewer = s->priv_data;
return ff_subtitles_queue_read_packet(&subviewer->q, pkt);
}
 
static int subviewer_read_seek(AVFormatContext *s, int stream_index,
int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
{
SubViewerContext *subviewer = s->priv_data;
return ff_subtitles_queue_seek(&subviewer->q, s, stream_index,
min_ts, ts, max_ts, flags);
}
 
static int subviewer_read_close(AVFormatContext *s)
{
SubViewerContext *subviewer = s->priv_data;
ff_subtitles_queue_clean(&subviewer->q);
return 0;
}
 
AVInputFormat ff_subviewer_demuxer = {
.name = "subviewer",
.long_name = NULL_IF_CONFIG_SMALL("SubViewer subtitle format"),
.priv_data_size = sizeof(SubViewerContext),
.read_probe = subviewer_probe,
.read_header = subviewer_read_header,
.read_packet = subviewer_read_packet,
.read_seek2 = subviewer_read_seek,
.read_close = subviewer_read_close,
.extensions = "sub",
};
/contrib/sdk/sources/ffmpeg/libavformat/swf.c
0,0 → 1,29
/*
* Flash Compatible Streaming Format
* Copyright (c) 2000 Fabrice Bellard
* Copyright (c) 2003 Tinic Uro
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "internal.h"
 
const AVCodecTag ff_swf_codec_tags[] = {
{ AV_CODEC_ID_FLV1, 0x02 },
{ AV_CODEC_ID_VP6F, 0x04 },
{ AV_CODEC_ID_NONE, 0 },
};
/contrib/sdk/sources/ffmpeg/libavformat/swf.h
0,0 → 1,146
/*
* Flash Compatible Streaming Format common header.
* Copyright (c) 2000 Fabrice Bellard
* Copyright (c) 2003 Tinic Uro
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_SWF_H
#define AVFORMAT_SWF_H
 
#include "config.h"
 
#if CONFIG_ZLIB
#include <zlib.h>
#endif
 
#include "libavutil/fifo.h"
#include "avformat.h"
#include "avio.h"
#include "internal.h"
 
/* should have a generic way to indicate probable size */
#define DUMMY_FILE_SIZE (100 * 1024 * 1024)
#define DUMMY_DURATION 600 /* in seconds */
 
enum {
TAG_END = 0,
TAG_SHOWFRAME = 1,
TAG_DEFINESHAPE = 2,
TAG_FREECHARACTER = 3,
TAG_PLACEOBJECT = 4,
TAG_REMOVEOBJECT = 5,
TAG_DEFINEBITS = 6,
TAG_DEFINEBUTTON = 7,
TAG_JPEGTABLES = 8,
TAG_SETBACKGROUNDCOLOR = 9,
TAG_DEFINEFONT = 10,
TAG_DEFINETEXT = 11,
TAG_DOACTION = 12,
TAG_DEFINEFONTINFO = 13,
TAG_DEFINESOUND = 14,
TAG_STARTSOUND = 15,
TAG_DEFINEBUTTONSOUND = 17,
TAG_STREAMHEAD = 18,
TAG_STREAMBLOCK = 19,
TAG_DEFINEBITSLOSSLESS = 20,
TAG_JPEG2 = 21,
TAG_DEFINESHAPE2 = 22,
TAG_DEFINEBUTTONCXFORM = 23,
TAG_PROTECT = 24,
TAG_PLACEOBJECT2 = 26,
TAG_REMOVEOBJECT2 = 28,
TAG_DEFINESHAPE3 = 32,
TAG_DEFINETEXT2 = 33,
TAG_DEFINEBUTTON2 = 34,
TAG_DEFINEBITSJPEG3 = 35,
TAG_DEFINEBITSLOSSLESS2 = 36,
TAG_DEFINEEDITTEXT = 37,
TAG_DEFINESPRITE = 39,
TAG_FRAMELABEL = 43,
TAG_STREAMHEAD2 = 45,
TAG_DEFINEMORPHSHAPE = 46,
TAG_DEFINEFONT2 = 48,
TAG_EXPORTASSETS = 56,
TAG_IMPORTASSETS = 57,
TAG_ENABLEDEBUGGER = 58,
TAG_DOINITACTION = 59,
TAG_VIDEOSTREAM = 60,
TAG_VIDEOFRAME = 61,
TAG_DEFINEFONTINFO2 = 62,
TAG_ENABLEDEBUGGER2 = 64,
TAG_SCRIPTLIMITS = 65,
TAG_SETTABINDEX = 66,
TAG_FILEATTRIBUTES = 69,
TAG_PLACEOBJECT3 = 70,
TAG_IMPORTASSETS2 = 71,
TAG_DEFINEFONTALIGNZONES = 73,
TAG_CSMTEXTSETTINGS = 74,
TAG_DEFINEFONT3 = 75,
TAG_SYMBOLCLASS = 76,
TAG_METADATA = 77,
TAG_DEFINESCALINGGRID = 78,
TAG_DOABC = 82,
TAG_DEFINESHAPE4 = 83,
TAG_DEFINEMORPHSHAPE2 = 84,
TAG_DEFINESCENEANDFRAMELABELDATA = 86,
TAG_DEFINEBINARYDATA = 87,
TAG_DEFINEFONTNAME = 88,
TAG_STARTSOUND2 = 89,
TAG_DEFINEBITSJPEG4 = 90,
TAG_DEFINEFONT4 = 91,
};
 
#define TAG_LONG 0x100
 
/* flags for shape definition */
#define FLAG_MOVETO 0x01
#define FLAG_SETFILL0 0x02
#define FLAG_SETFILL1 0x04
 
#define AUDIO_FIFO_SIZE 65536
 
/* character id used */
#define BITMAP_ID 0
#define VIDEO_ID 0
#define SHAPE_ID 1
 
typedef struct SWFContext {
int64_t duration_pos;
int64_t tag_pos;
int64_t vframes_pos;
int samples_per_frame;
int sound_samples;
int swf_frame_number;
int video_frame_number;
int frame_rate;
int tag;
AVFifoBuffer *audio_fifo;
AVCodecContext *audio_enc, *video_enc;
#if CONFIG_ZLIB
AVIOContext *zpb;
#define ZBUF_SIZE 4096
uint8_t *zbuf_in;
uint8_t *zbuf_out;
z_stream zstream;
#endif
} SWFContext;
 
extern const AVCodecTag ff_swf_codec_tags[];
 
#endif /* AVFORMAT_SWF_H */
/contrib/sdk/sources/ffmpeg/libavformat/swfdec.c
0,0 → 1,489
/*
* Flash Compatible Streaming Format demuxer
* Copyright (c) 2000 Fabrice Bellard
* Copyright (c) 2003 Tinic Uro
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "libavutil/imgutils.h"
#include "libavutil/intreadwrite.h"
#include "swf.h"
 
static const AVCodecTag swf_audio_codec_tags[] = {
{ AV_CODEC_ID_PCM_S16LE, 0x00 },
{ AV_CODEC_ID_ADPCM_SWF, 0x01 },
{ AV_CODEC_ID_MP3, 0x02 },
{ AV_CODEC_ID_PCM_S16LE, 0x03 },
// { AV_CODEC_ID_NELLYMOSER, 0x06 },
{ AV_CODEC_ID_NONE, 0 },
};
 
static int get_swf_tag(AVIOContext *pb, int *len_ptr)
{
int tag, len;
 
if (url_feof(pb))
return AVERROR_EOF;
 
tag = avio_rl16(pb);
len = tag & 0x3f;
tag = tag >> 6;
if (len == 0x3f) {
len = avio_rl32(pb);
}
*len_ptr = len;
return tag;
}
 
 
static int swf_probe(AVProbeData *p)
{
/* check file header */
if ((p->buf[0] == 'F' || p->buf[0] == 'C') && p->buf[1] == 'W' &&
p->buf[2] == 'S')
return AVPROBE_SCORE_MAX;
else
return 0;
}
 
#if CONFIG_ZLIB
static int zlib_refill(void *opaque, uint8_t *buf, int buf_size)
{
AVFormatContext *s = opaque;
SWFContext *swf = s->priv_data;
z_stream *z = &swf->zstream;
int ret;
 
retry:
if (!z->avail_in) {
int n = avio_read(s->pb, swf->zbuf_in, ZBUF_SIZE);
if (n < 0)
return n;
z->next_in = swf->zbuf_in;
z->avail_in = n;
}
 
z->next_out = buf;
z->avail_out = buf_size;
 
ret = inflate(z, Z_NO_FLUSH);
if (ret < 0)
return AVERROR(EINVAL);
if (ret == Z_STREAM_END)
return AVERROR_EOF;
 
if (buf_size - z->avail_out == 0)
goto retry;
 
return buf_size - z->avail_out;
}
#endif
 
static int swf_read_header(AVFormatContext *s)
{
SWFContext *swf = s->priv_data;
AVIOContext *pb = s->pb;
int nbits, len, tag;
 
tag = avio_rb32(pb) & 0xffffff00;
avio_rl32(pb);
 
if (tag == MKBETAG('C', 'W', 'S', 0)) {
av_log(s, AV_LOG_INFO, "SWF compressed file detected\n");
#if CONFIG_ZLIB
swf->zbuf_in = av_malloc(ZBUF_SIZE);
swf->zbuf_out = av_malloc(ZBUF_SIZE);
swf->zpb = avio_alloc_context(swf->zbuf_out, ZBUF_SIZE, 0, s,
zlib_refill, NULL, NULL);
if (!swf->zbuf_in || !swf->zbuf_out || !swf->zpb)
return AVERROR(ENOMEM);
swf->zpb->seekable = 0;
if (inflateInit(&swf->zstream) != Z_OK) {
av_log(s, AV_LOG_ERROR, "Unable to init zlib context\n");
return AVERROR(EINVAL);
}
pb = swf->zpb;
#else
av_log(s, AV_LOG_ERROR, "zlib support is required to read SWF compressed files\n");
return AVERROR(EIO);
#endif
} else if (tag != MKBETAG('F', 'W', 'S', 0))
return AVERROR(EIO);
/* skip rectangle size */
nbits = avio_r8(pb) >> 3;
len = (4 * nbits - 3 + 7) / 8;
avio_skip(pb, len);
swf->frame_rate = avio_rl16(pb); /* 8.8 fixed */
avio_rl16(pb); /* frame count */
 
swf->samples_per_frame = 0;
s->ctx_flags |= AVFMTCTX_NOHEADER;
return 0;
}
 
static AVStream *create_new_audio_stream(AVFormatContext *s, int id, int info)
{
int sample_rate_code, sample_size_code;
AVStream *ast = avformat_new_stream(s, NULL);
if (!ast)
return NULL;
ast->id = id;
if (info & 1) {
ast->codec->channels = 2;
ast->codec->channel_layout = AV_CH_LAYOUT_STEREO;
} else {
ast->codec->channels = 1;
ast->codec->channel_layout = AV_CH_LAYOUT_MONO;
}
ast->codec->codec_type = AVMEDIA_TYPE_AUDIO;
ast->codec->codec_id = ff_codec_get_id(swf_audio_codec_tags, info>>4 & 15);
ast->need_parsing = AVSTREAM_PARSE_FULL;
sample_rate_code = info>>2 & 3;
sample_size_code = info>>1 & 1;
if (!sample_size_code && ast->codec->codec_id == AV_CODEC_ID_PCM_S16LE)
ast->codec->codec_id = AV_CODEC_ID_PCM_U8;
ast->codec->sample_rate = 44100 >> (3 - sample_rate_code);
avpriv_set_pts_info(ast, 64, 1, ast->codec->sample_rate);
return ast;
}
 
static int swf_read_packet(AVFormatContext *s, AVPacket *pkt)
{
SWFContext *swf = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *vst = NULL, *ast = NULL, *st = 0;
int tag, len, i, frame, v, res;
 
#if CONFIG_ZLIB
if (swf->zpb)
pb = swf->zpb;
#endif
 
for(;;) {
uint64_t pos = avio_tell(pb);
tag = get_swf_tag(pb, &len);
if (tag < 0)
return tag;
if (len < 0) {
av_log(s, AV_LOG_ERROR, "invalid tag length: %d\n", len);
return AVERROR_INVALIDDATA;
}
if (tag == TAG_VIDEOSTREAM) {
int ch_id = avio_rl16(pb);
len -= 2;
 
for (i=0; i<s->nb_streams; i++) {
st = s->streams[i];
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO && st->id == ch_id)
goto skip;
}
 
avio_rl16(pb);
avio_rl16(pb);
avio_rl16(pb);
avio_r8(pb);
/* Check for FLV1 */
vst = avformat_new_stream(s, NULL);
if (!vst)
return AVERROR(ENOMEM);
vst->id = ch_id;
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codec->codec_id = ff_codec_get_id(ff_swf_codec_tags, avio_r8(pb));
avpriv_set_pts_info(vst, 16, 256, swf->frame_rate);
len -= 8;
} else if (tag == TAG_STREAMHEAD || tag == TAG_STREAMHEAD2) {
/* streaming found */
 
for (i=0; i<s->nb_streams; i++) {
st = s->streams[i];
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO && st->id == -1)
goto skip;
}
 
avio_r8(pb);
v = avio_r8(pb);
swf->samples_per_frame = avio_rl16(pb);
ast = create_new_audio_stream(s, -1, v); /* -1 to avoid clash with video stream ch_id */
if (!ast)
return AVERROR(ENOMEM);
len -= 4;
} else if (tag == TAG_DEFINESOUND) {
/* audio stream */
int ch_id = avio_rl16(pb);
 
for (i=0; i<s->nb_streams; i++) {
st = s->streams[i];
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO && st->id == ch_id)
goto skip;
}
 
// FIXME: The entire audio stream is stored in a single chunk/tag. Normally,
// these are smaller audio streams in DEFINESOUND tags, but it's technically
// possible they could be huge. Break it up into multiple packets if it's big.
v = avio_r8(pb);
ast = create_new_audio_stream(s, ch_id, v);
if (!ast)
return AVERROR(ENOMEM);
ast->duration = avio_rl32(pb); // number of samples
if (((v>>4) & 15) == 2) { // MP3 sound data record
ast->skip_samples = avio_rl16(pb);
len -= 2;
}
len -= 7;
if ((res = av_get_packet(pb, pkt, len)) < 0)
return res;
pkt->pos = pos;
pkt->stream_index = ast->index;
return pkt->size;
} else if (tag == TAG_VIDEOFRAME) {
int ch_id = avio_rl16(pb);
len -= 2;
for(i=0; i<s->nb_streams; i++) {
st = s->streams[i];
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO && st->id == ch_id) {
frame = avio_rl16(pb);
len -= 2;
if (len <= 0)
goto skip;
if ((res = av_get_packet(pb, pkt, len)) < 0)
return res;
pkt->pos = pos;
pkt->pts = frame;
pkt->stream_index = st->index;
return pkt->size;
}
}
} else if (tag == TAG_DEFINEBITSLOSSLESS || tag == TAG_DEFINEBITSLOSSLESS2) {
#if CONFIG_ZLIB
long out_len;
uint8_t *buf = NULL, *zbuf = NULL, *pal;
uint32_t colormap[AVPALETTE_COUNT] = {0};
const int alpha_bmp = tag == TAG_DEFINEBITSLOSSLESS2;
const int colormapbpp = 3 + alpha_bmp;
int linesize, colormapsize = 0;
 
const int ch_id = avio_rl16(pb);
const int bmp_fmt = avio_r8(pb);
const int width = avio_rl16(pb);
const int height = avio_rl16(pb);
 
len -= 2+1+2+2;
 
switch (bmp_fmt) {
case 3: // PAL-8
linesize = width;
colormapsize = avio_r8(pb) + 1;
len--;
break;
case 4: // RGB15
linesize = width * 2;
break;
case 5: // RGB24 (0RGB)
linesize = width * 4;
break;
default:
av_log(s, AV_LOG_ERROR, "invalid bitmap format %d, skipped\n", bmp_fmt);
goto bitmap_end_skip;
}
 
linesize = FFALIGN(linesize, 4);
 
if (av_image_check_size(width, height, 0, s) < 0 ||
linesize >= INT_MAX / height ||
linesize * height >= INT_MAX - colormapsize * colormapbpp) {
av_log(s, AV_LOG_ERROR, "invalid frame size %dx%d\n", width, height);
goto bitmap_end_skip;
}
 
out_len = colormapsize * colormapbpp + linesize * height;
 
av_dlog(s, "bitmap: ch=%d fmt=%d %dx%d (linesize=%d) len=%d->%ld pal=%d\n",
ch_id, bmp_fmt, width, height, linesize, len, out_len, colormapsize);
 
zbuf = av_malloc(len);
buf = av_malloc(out_len);
if (!zbuf || !buf) {
res = AVERROR(ENOMEM);
goto bitmap_end;
}
 
len = avio_read(pb, zbuf, len);
if (len < 0 || (res = uncompress(buf, &out_len, zbuf, len)) != Z_OK) {
av_log(s, AV_LOG_WARNING, "Failed to uncompress one bitmap\n");
goto bitmap_end_skip;
}
 
for (i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
if (st->codec->codec_id == AV_CODEC_ID_RAWVIDEO && st->id == -3)
break;
}
if (i == s->nb_streams) {
vst = avformat_new_stream(s, NULL);
if (!vst) {
res = AVERROR(ENOMEM);
goto bitmap_end;
}
vst->id = -3; /* -3 to avoid clash with video stream and audio stream */
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
avpriv_set_pts_info(vst, 64, 256, swf->frame_rate);
st = vst;
}
st->codec->width = width;
st->codec->height = height;
 
if ((res = av_new_packet(pkt, out_len - colormapsize * colormapbpp)) < 0)
goto bitmap_end;
pkt->pos = pos;
pkt->stream_index = st->index;
 
switch (bmp_fmt) {
case 3:
st->codec->pix_fmt = AV_PIX_FMT_PAL8;
for (i = 0; i < colormapsize; i++)
if (alpha_bmp) colormap[i] = buf[3]<<24 | AV_RB24(buf + 4*i);
else colormap[i] = 0xffU <<24 | AV_RB24(buf + 3*i);
pal = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE, AVPALETTE_SIZE);
if (!pal) {
res = AVERROR(ENOMEM);
goto bitmap_end;
}
memcpy(pal, colormap, AVPALETTE_SIZE);
break;
case 4:
st->codec->pix_fmt = AV_PIX_FMT_RGB555;
break;
case 5:
st->codec->pix_fmt = alpha_bmp ? AV_PIX_FMT_ARGB : AV_PIX_FMT_0RGB;
break;
default:
av_assert0(0);
}
 
if (linesize * height > pkt->size) {
res = AVERROR_INVALIDDATA;
goto bitmap_end;
}
memcpy(pkt->data, buf + colormapsize*colormapbpp, linesize * height);
 
res = pkt->size;
 
bitmap_end:
av_freep(&zbuf);
av_freep(&buf);
return res;
bitmap_end_skip:
av_freep(&zbuf);
av_freep(&buf);
#else
av_log(s, AV_LOG_ERROR, "this file requires zlib support compiled in\n");
#endif
} else if (tag == TAG_STREAMBLOCK) {
for (i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO && st->id == -1) {
if (st->codec->codec_id == AV_CODEC_ID_MP3) {
avio_skip(pb, 4);
len -= 4;
if (len <= 0)
goto skip;
if ((res = av_get_packet(pb, pkt, len)) < 0)
return res;
} else { // ADPCM, PCM
if (len <= 0)
goto skip;
if ((res = av_get_packet(pb, pkt, len)) < 0)
return res;
}
pkt->pos = pos;
pkt->stream_index = st->index;
return pkt->size;
}
}
} else if (tag == TAG_JPEG2) {
for (i=0; i<s->nb_streams; i++) {
st = s->streams[i];
if (st->codec->codec_id == AV_CODEC_ID_MJPEG && st->id == -2)
break;
}
if (i == s->nb_streams) {
vst = avformat_new_stream(s, NULL);
if (!vst)
return AVERROR(ENOMEM);
vst->id = -2; /* -2 to avoid clash with video stream and audio stream */
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codec->codec_id = AV_CODEC_ID_MJPEG;
avpriv_set_pts_info(vst, 64, 256, swf->frame_rate);
st = vst;
}
avio_rl16(pb); /* BITMAP_ID */
len -= 2;
if (len < 4)
goto skip;
if ((res = av_new_packet(pkt, len)) < 0)
return res;
avio_read(pb, pkt->data, 4);
if (AV_RB32(pkt->data) == 0xffd8ffd9 ||
AV_RB32(pkt->data) == 0xffd9ffd8) {
/* old SWF files containing SOI/EOI as data start */
/* files created by swink have reversed tag */
pkt->size -= 4;
avio_read(pb, pkt->data, pkt->size);
} else {
avio_read(pb, pkt->data + 4, pkt->size - 4);
}
pkt->pos = pos;
pkt->stream_index = st->index;
return pkt->size;
} else {
av_log(s, AV_LOG_DEBUG, "Unknown tag: %d\n", tag);
}
skip:
if(len<0)
av_log(s, AV_LOG_WARNING, "Cliping len %d\n", len);
len = FFMAX(0, len);
avio_skip(pb, len);
}
}
 
#if CONFIG_ZLIB
static av_cold int swf_read_close(AVFormatContext *avctx)
{
SWFContext *s = avctx->priv_data;
inflateEnd(&s->zstream);
av_freep(&s->zbuf_in);
av_freep(&s->zbuf_out);
av_freep(&s->zpb);
return 0;
}
#endif
 
AVInputFormat ff_swf_demuxer = {
.name = "swf",
.long_name = NULL_IF_CONFIG_SMALL("SWF (ShockWave Flash)"),
.priv_data_size = sizeof(SWFContext),
.read_probe = swf_probe,
.read_header = swf_read_header,
.read_packet = swf_read_packet,
#if CONFIG_ZLIB
.read_close = swf_read_close,
#endif
};
/contrib/sdk/sources/ffmpeg/libavformat/swfenc.c
0,0 → 1,545
/*
* Flash Compatible Streaming Format muxer
* Copyright (c) 2000 Fabrice Bellard
* Copyright (c) 2003 Tinic Uro
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavcodec/put_bits.h"
#include "libavutil/avassert.h"
#include "avformat.h"
#include "swf.h"
 
static void put_swf_tag(AVFormatContext *s, int tag)
{
SWFContext *swf = s->priv_data;
AVIOContext *pb = s->pb;
 
swf->tag_pos = avio_tell(pb);
swf->tag = tag;
/* reserve some room for the tag */
if (tag & TAG_LONG) {
avio_wl16(pb, 0);
avio_wl32(pb, 0);
} else {
avio_wl16(pb, 0);
}
}
 
static void put_swf_end_tag(AVFormatContext *s)
{
SWFContext *swf = s->priv_data;
AVIOContext *pb = s->pb;
int64_t pos;
int tag_len, tag;
 
pos = avio_tell(pb);
tag_len = pos - swf->tag_pos - 2;
tag = swf->tag;
avio_seek(pb, swf->tag_pos, SEEK_SET);
if (tag & TAG_LONG) {
tag &= ~TAG_LONG;
avio_wl16(pb, (tag << 6) | 0x3f);
avio_wl32(pb, tag_len - 4);
} else {
av_assert0(tag_len < 0x3f);
avio_wl16(pb, (tag << 6) | tag_len);
}
avio_seek(pb, pos, SEEK_SET);
}
 
static inline void max_nbits(int *nbits_ptr, int val)
{
int n;
 
if (val == 0)
return;
val = abs(val);
n = 1;
while (val != 0) {
n++;
val >>= 1;
}
if (n > *nbits_ptr)
*nbits_ptr = n;
}
 
static void put_swf_rect(AVIOContext *pb,
int xmin, int xmax, int ymin, int ymax)
{
PutBitContext p;
uint8_t buf[256];
int nbits, mask;
 
init_put_bits(&p, buf, sizeof(buf));
 
nbits = 0;
max_nbits(&nbits, xmin);
max_nbits(&nbits, xmax);
max_nbits(&nbits, ymin);
max_nbits(&nbits, ymax);
mask = (1 << nbits) - 1;
 
/* rectangle info */
put_bits(&p, 5, nbits);
put_bits(&p, nbits, xmin & mask);
put_bits(&p, nbits, xmax & mask);
put_bits(&p, nbits, ymin & mask);
put_bits(&p, nbits, ymax & mask);
 
flush_put_bits(&p);
avio_write(pb, buf, put_bits_ptr(&p) - p.buf);
}
 
static void put_swf_line_edge(PutBitContext *pb, int dx, int dy)
{
int nbits, mask;
 
put_bits(pb, 1, 1); /* edge */
put_bits(pb, 1, 1); /* line select */
nbits = 2;
max_nbits(&nbits, dx);
max_nbits(&nbits, dy);
 
mask = (1 << nbits) - 1;
put_bits(pb, 4, nbits - 2); /* 16 bits precision */
if (dx == 0) {
put_bits(pb, 1, 0);
put_bits(pb, 1, 1);
put_bits(pb, nbits, dy & mask);
} else if (dy == 0) {
put_bits(pb, 1, 0);
put_bits(pb, 1, 0);
put_bits(pb, nbits, dx & mask);
} else {
put_bits(pb, 1, 1);
put_bits(pb, nbits, dx & mask);
put_bits(pb, nbits, dy & mask);
}
}
 
#define FRAC_BITS 16
 
static void put_swf_matrix(AVIOContext *pb,
int a, int b, int c, int d, int tx, int ty)
{
PutBitContext p;
uint8_t buf[256];
int nbits;
 
init_put_bits(&p, buf, sizeof(buf));
 
put_bits(&p, 1, 1); /* a, d present */
nbits = 1;
max_nbits(&nbits, a);
max_nbits(&nbits, d);
put_bits(&p, 5, nbits); /* nb bits */
put_bits(&p, nbits, a);
put_bits(&p, nbits, d);
 
put_bits(&p, 1, 1); /* b, c present */
nbits = 1;
max_nbits(&nbits, c);
max_nbits(&nbits, b);
put_bits(&p, 5, nbits); /* nb bits */
put_bits(&p, nbits, c);
put_bits(&p, nbits, b);
 
nbits = 1;
max_nbits(&nbits, tx);
max_nbits(&nbits, ty);
put_bits(&p, 5, nbits); /* nb bits */
put_bits(&p, nbits, tx);
put_bits(&p, nbits, ty);
 
flush_put_bits(&p);
avio_write(pb, buf, put_bits_ptr(&p) - p.buf);
}
 
static int swf_write_header(AVFormatContext *s)
{
SWFContext *swf = s->priv_data;
AVIOContext *pb = s->pb;
PutBitContext p;
uint8_t buf1[256];
int i, width, height, rate, rate_base;
int version;
 
swf->sound_samples = 0;
swf->swf_frame_number = 0;
swf->video_frame_number = 0;
 
for(i=0;i<s->nb_streams;i++) {
AVCodecContext *enc = s->streams[i]->codec;
if (enc->codec_type == AVMEDIA_TYPE_AUDIO) {
if (swf->audio_enc) {
av_log(s, AV_LOG_ERROR, "SWF muxer only supports 1 audio stream\n");
return AVERROR_INVALIDDATA;
}
if (enc->codec_id == AV_CODEC_ID_MP3) {
if (!enc->frame_size) {
av_log(s, AV_LOG_ERROR, "audio frame size not set\n");
return -1;
}
swf->audio_enc = enc;
swf->audio_fifo= av_fifo_alloc(AUDIO_FIFO_SIZE);
if (!swf->audio_fifo)
return AVERROR(ENOMEM);
} else {
av_log(s, AV_LOG_ERROR, "SWF muxer only supports MP3\n");
return -1;
}
} else {
if (swf->video_enc) {
av_log(s, AV_LOG_ERROR, "SWF muxer only supports 1 video stream\n");
return AVERROR_INVALIDDATA;
}
if (enc->codec_id == AV_CODEC_ID_VP6F ||
enc->codec_id == AV_CODEC_ID_FLV1 ||
enc->codec_id == AV_CODEC_ID_MJPEG) {
swf->video_enc = enc;
} else {
av_log(s, AV_LOG_ERROR, "SWF muxer only supports VP6, FLV1 and MJPEG\n");
return -1;
}
}
}
 
if (!swf->video_enc) {
/* currently, cannot work correctly if audio only */
width = 320;
height = 200;
rate = 10;
rate_base= 1;
} else {
width = swf->video_enc->width;
height = swf->video_enc->height;
rate = swf->video_enc->time_base.den;
rate_base = swf->video_enc->time_base.num;
}
 
if (!swf->audio_enc)
swf->samples_per_frame = (44100.0 * rate_base) / rate;
else
swf->samples_per_frame = (swf->audio_enc->sample_rate * rate_base) / rate;
 
avio_write(pb, "FWS", 3);
 
if (!strcmp("avm2", s->oformat->name))
version = 9;
else if (swf->video_enc && swf->video_enc->codec_id == AV_CODEC_ID_VP6F)
version = 8; /* version 8 and above support VP6 codec */
else if (swf->video_enc && swf->video_enc->codec_id == AV_CODEC_ID_FLV1)
version = 6; /* version 6 and above support FLV1 codec */
else
version = 4; /* version 4 for mpeg audio support */
avio_w8(pb, version);
 
avio_wl32(pb, DUMMY_FILE_SIZE); /* dummy size
(will be patched if not streamed) */
 
put_swf_rect(pb, 0, width * 20, 0, height * 20);
avio_wl16(pb, (rate * 256) / rate_base); /* frame rate */
swf->duration_pos = avio_tell(pb);
avio_wl16(pb, (uint16_t)(DUMMY_DURATION * (int64_t)rate / rate_base)); /* frame count */
 
/* avm2/swf v9 (also v8?) files require a file attribute tag */
if (version == 9) {
put_swf_tag(s, TAG_FILEATTRIBUTES);
avio_wl32(pb, 1<<3); /* set ActionScript v3/AVM2 flag */
put_swf_end_tag(s);
}
 
/* define a shape with the jpeg inside */
if (swf->video_enc && swf->video_enc->codec_id == AV_CODEC_ID_MJPEG) {
put_swf_tag(s, TAG_DEFINESHAPE);
 
avio_wl16(pb, SHAPE_ID); /* ID of shape */
/* bounding rectangle */
put_swf_rect(pb, 0, width, 0, height);
/* style info */
avio_w8(pb, 1); /* one fill style */
avio_w8(pb, 0x41); /* clipped bitmap fill */
avio_wl16(pb, BITMAP_ID); /* bitmap ID */
/* position of the bitmap */
put_swf_matrix(pb, (int)(1.0 * (1 << FRAC_BITS)), 0,
0, (int)(1.0 * (1 << FRAC_BITS)), 0, 0);
avio_w8(pb, 0); /* no line style */
 
/* shape drawing */
init_put_bits(&p, buf1, sizeof(buf1));
put_bits(&p, 4, 1); /* one fill bit */
put_bits(&p, 4, 0); /* zero line bit */
 
put_bits(&p, 1, 0); /* not an edge */
put_bits(&p, 5, FLAG_MOVETO | FLAG_SETFILL0);
put_bits(&p, 5, 1); /* nbits */
put_bits(&p, 1, 0); /* X */
put_bits(&p, 1, 0); /* Y */
put_bits(&p, 1, 1); /* set fill style 1 */
 
/* draw the rectangle ! */
put_swf_line_edge(&p, width, 0);
put_swf_line_edge(&p, 0, height);
put_swf_line_edge(&p, -width, 0);
put_swf_line_edge(&p, 0, -height);
 
/* end of shape */
put_bits(&p, 1, 0); /* not an edge */
put_bits(&p, 5, 0);
 
flush_put_bits(&p);
avio_write(pb, buf1, put_bits_ptr(&p) - p.buf);
 
put_swf_end_tag(s);
}
 
if (swf->audio_enc && swf->audio_enc->codec_id == AV_CODEC_ID_MP3) {
int v = 0;
 
/* start sound */
put_swf_tag(s, TAG_STREAMHEAD2);
switch(swf->audio_enc->sample_rate) {
case 11025: v |= 1 << 2; break;
case 22050: v |= 2 << 2; break;
case 44100: v |= 3 << 2; break;
default:
/* not supported */
av_log(s, AV_LOG_ERROR, "swf does not support that sample rate, choose from (44100, 22050, 11025).\n");
return -1;
}
v |= 0x02; /* 16 bit playback */
if (swf->audio_enc->channels == 2)
v |= 0x01; /* stereo playback */
avio_w8(s->pb, v);
v |= 0x20; /* mp3 compressed */
avio_w8(s->pb, v);
avio_wl16(s->pb, swf->samples_per_frame); /* avg samples per frame */
avio_wl16(s->pb, 0);
 
put_swf_end_tag(s);
}
 
avio_flush(s->pb);
return 0;
}
 
static int swf_write_video(AVFormatContext *s,
AVCodecContext *enc, const uint8_t *buf, int size)
{
SWFContext *swf = s->priv_data;
AVIOContext *pb = s->pb;
 
/* Flash Player limit */
if (swf->swf_frame_number == 16000)
av_log(enc, AV_LOG_INFO, "warning: Flash Player limit of 16000 frames reached\n");
 
if (enc->codec_id == AV_CODEC_ID_VP6F ||
enc->codec_id == AV_CODEC_ID_FLV1) {
if (swf->video_frame_number == 0) {
/* create a new video object */
put_swf_tag(s, TAG_VIDEOSTREAM);
avio_wl16(pb, VIDEO_ID);
swf->vframes_pos = avio_tell(pb);
avio_wl16(pb, 15000); /* hard flash player limit */
avio_wl16(pb, enc->width);
avio_wl16(pb, enc->height);
avio_w8(pb, 0);
avio_w8(pb,ff_codec_get_tag(ff_swf_codec_tags, enc->codec_id));
put_swf_end_tag(s);
 
/* place the video object for the first time */
put_swf_tag(s, TAG_PLACEOBJECT2);
avio_w8(pb, 0x36);
avio_wl16(pb, 1);
avio_wl16(pb, VIDEO_ID);
put_swf_matrix(pb, 1 << FRAC_BITS, 0, 0, 1 << FRAC_BITS, 0, 0);
avio_wl16(pb, swf->video_frame_number);
avio_write(pb, "video", 5);
avio_w8(pb, 0x00);
put_swf_end_tag(s);
} else {
/* mark the character for update */
put_swf_tag(s, TAG_PLACEOBJECT2);
avio_w8(pb, 0x11);
avio_wl16(pb, 1);
avio_wl16(pb, swf->video_frame_number);
put_swf_end_tag(s);
}
 
/* set video frame data */
put_swf_tag(s, TAG_VIDEOFRAME | TAG_LONG);
avio_wl16(pb, VIDEO_ID);
avio_wl16(pb, swf->video_frame_number++);
avio_write(pb, buf, size);
put_swf_end_tag(s);
} else if (enc->codec_id == AV_CODEC_ID_MJPEG) {
if (swf->swf_frame_number > 0) {
/* remove the shape */
put_swf_tag(s, TAG_REMOVEOBJECT);
avio_wl16(pb, SHAPE_ID); /* shape ID */
avio_wl16(pb, 1); /* depth */
put_swf_end_tag(s);
 
/* free the bitmap */
put_swf_tag(s, TAG_FREECHARACTER);
avio_wl16(pb, BITMAP_ID);
put_swf_end_tag(s);
}
 
put_swf_tag(s, TAG_JPEG2 | TAG_LONG);
 
avio_wl16(pb, BITMAP_ID); /* ID of the image */
 
/* a dummy jpeg header seems to be required */
avio_wb32(pb, 0xffd8ffd9);
/* write the jpeg image */
avio_write(pb, buf, size);
 
put_swf_end_tag(s);
 
/* draw the shape */
 
put_swf_tag(s, TAG_PLACEOBJECT);
avio_wl16(pb, SHAPE_ID); /* shape ID */
avio_wl16(pb, 1); /* depth */
put_swf_matrix(pb, 20 << FRAC_BITS, 0, 0, 20 << FRAC_BITS, 0, 0);
put_swf_end_tag(s);
}
 
swf->swf_frame_number++;
 
/* streaming sound always should be placed just before showframe tags */
if (swf->audio_enc && av_fifo_size(swf->audio_fifo)) {
int frame_size = av_fifo_size(swf->audio_fifo);
put_swf_tag(s, TAG_STREAMBLOCK | TAG_LONG);
avio_wl16(pb, swf->sound_samples);
avio_wl16(pb, 0); // seek samples
av_fifo_generic_read(swf->audio_fifo, pb, frame_size, (void*)avio_write);
put_swf_end_tag(s);
 
/* update FIFO */
swf->sound_samples = 0;
}
 
/* output the frame */
put_swf_tag(s, TAG_SHOWFRAME);
put_swf_end_tag(s);
 
return 0;
}
 
static int swf_write_audio(AVFormatContext *s,
AVCodecContext *enc, uint8_t *buf, int size)
{
SWFContext *swf = s->priv_data;
 
/* Flash Player limit */
if (swf->swf_frame_number == 16000)
av_log(enc, AV_LOG_INFO, "warning: Flash Player limit of 16000 frames reached\n");
 
if (av_fifo_size(swf->audio_fifo) + size > AUDIO_FIFO_SIZE) {
av_log(s, AV_LOG_ERROR, "audio fifo too small to mux audio essence\n");
return -1;
}
 
av_fifo_generic_write(swf->audio_fifo, buf, size, NULL);
swf->sound_samples += enc->frame_size;
 
/* if audio only stream make sure we add swf frames */
if (!swf->video_enc)
swf_write_video(s, enc, 0, 0);
 
return 0;
}
 
static int swf_write_packet(AVFormatContext *s, AVPacket *pkt)
{
AVCodecContext *codec = s->streams[pkt->stream_index]->codec;
if (codec->codec_type == AVMEDIA_TYPE_AUDIO)
return swf_write_audio(s, codec, pkt->data, pkt->size);
else
return swf_write_video(s, codec, pkt->data, pkt->size);
}
 
static int swf_write_trailer(AVFormatContext *s)
{
SWFContext *swf = s->priv_data;
AVIOContext *pb = s->pb;
AVCodecContext *enc, *video_enc;
int file_size, i;
 
video_enc = NULL;
for(i=0;i<s->nb_streams;i++) {
enc = s->streams[i]->codec;
if (enc->codec_type == AVMEDIA_TYPE_VIDEO)
video_enc = enc;
else {
av_fifo_free(swf->audio_fifo);
swf->audio_fifo = NULL;
}
}
 
put_swf_tag(s, TAG_END);
put_swf_end_tag(s);
 
/* patch file size and number of frames if not streamed */
if (s->pb->seekable && video_enc) {
file_size = avio_tell(pb);
avio_seek(pb, 4, SEEK_SET);
avio_wl32(pb, file_size);
avio_seek(pb, swf->duration_pos, SEEK_SET);
avio_wl16(pb, swf->video_frame_number);
if (swf->vframes_pos) {
avio_seek(pb, swf->vframes_pos, SEEK_SET);
avio_wl16(pb, swf->video_frame_number);
}
avio_seek(pb, file_size, SEEK_SET);
}
return 0;
}
 
#if CONFIG_SWF_MUXER
AVOutputFormat ff_swf_muxer = {
.name = "swf",
.long_name = NULL_IF_CONFIG_SMALL("SWF (ShockWave Flash)"),
.mime_type = "application/x-shockwave-flash",
.extensions = "swf",
.priv_data_size = sizeof(SWFContext),
.audio_codec = AV_CODEC_ID_MP3,
.video_codec = AV_CODEC_ID_FLV1,
.write_header = swf_write_header,
.write_packet = swf_write_packet,
.write_trailer = swf_write_trailer,
.flags = AVFMT_TS_NONSTRICT,
};
#endif
#if CONFIG_AVM2_MUXER
AVOutputFormat ff_avm2_muxer = {
.name = "avm2",
.long_name = NULL_IF_CONFIG_SMALL("SWF (ShockWave Flash) (AVM2)"),
.mime_type = "application/x-shockwave-flash",
.priv_data_size = sizeof(SWFContext),
.audio_codec = AV_CODEC_ID_MP3,
.video_codec = AV_CODEC_ID_FLV1,
.write_header = swf_write_header,
.write_packet = swf_write_packet,
.write_trailer = swf_write_trailer,
.flags = AVFMT_TS_NONSTRICT,
};
#endif
/contrib/sdk/sources/ffmpeg/libavformat/takdec.c
0,0 → 1,210
/*
* Raw TAK demuxer
* Copyright (c) 2012 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/crc.h"
#include "libavcodec/tak.h"
#include "avformat.h"
#include "avio_internal.h"
#include "internal.h"
#include "rawdec.h"
#include "apetag.h"
 
typedef struct TAKDemuxContext {
int mlast_frame;
int64_t data_end;
} TAKDemuxContext;
 
static int tak_probe(AVProbeData *p)
{
if (!memcmp(p->buf, "tBaK", 4))
return AVPROBE_SCORE_EXTENSION;
return 0;
}
 
static unsigned long tak_check_crc(unsigned long checksum, const uint8_t *buf,
unsigned int len)
{
return av_crc(av_crc_get_table(AV_CRC_24_IEEE), checksum, buf, len);
}
 
static int tak_read_header(AVFormatContext *s)
{
TAKDemuxContext *tc = s->priv_data;
AVIOContext *pb = s->pb;
GetBitContext gb;
AVStream *st;
uint8_t *buffer = NULL;
int ret;
 
st = avformat_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_TAK;
st->need_parsing = AVSTREAM_PARSE_FULL_RAW;
 
tc->mlast_frame = 0;
if (avio_rl32(pb) != MKTAG('t', 'B', 'a', 'K')) {
avio_seek(pb, -4, SEEK_CUR);
return 0;
}
 
while (!url_feof(pb)) {
enum TAKMetaDataType type;
int size;
 
type = avio_r8(pb) & 0x7f;
size = avio_rl24(pb);
 
switch (type) {
case TAK_METADATA_STREAMINFO:
case TAK_METADATA_LAST_FRAME:
case TAK_METADATA_ENCODER:
if (size <= 3)
return AVERROR_INVALIDDATA;
 
buffer = av_malloc(size - 3 + FF_INPUT_BUFFER_PADDING_SIZE);
if (!buffer)
return AVERROR(ENOMEM);
 
ffio_init_checksum(pb, tak_check_crc, 0xCE04B7U);
if (avio_read(pb, buffer, size - 3) != size - 3) {
av_freep(&buffer);
return AVERROR(EIO);
}
if (ffio_get_checksum(s->pb) != avio_rb24(pb)) {
av_log(s, AV_LOG_ERROR, "%d metadata block CRC error.\n", type);
if (s->error_recognition & AV_EF_EXPLODE) {
av_freep(&buffer);
return AVERROR_INVALIDDATA;
}
}
 
init_get_bits8(&gb, buffer, size - 3);
break;
case TAK_METADATA_MD5: {
uint8_t md5[16];
int i;
 
if (size != 19)
return AVERROR_INVALIDDATA;
ffio_init_checksum(pb, tak_check_crc, 0xCE04B7U);
avio_read(pb, md5, 16);
if (ffio_get_checksum(s->pb) != avio_rb24(pb)) {
av_log(s, AV_LOG_ERROR, "MD5 metadata block CRC error.\n");
if (s->error_recognition & AV_EF_EXPLODE)
return AVERROR_INVALIDDATA;
}
 
av_log(s, AV_LOG_VERBOSE, "MD5=");
for (i = 0; i < 16; i++)
av_log(s, AV_LOG_VERBOSE, "%02x", md5[i]);
av_log(s, AV_LOG_VERBOSE, "\n");
break;
}
case TAK_METADATA_END: {
int64_t curpos = avio_tell(pb);
 
if (pb->seekable) {
ff_ape_parse_tag(s);
avio_seek(pb, curpos, SEEK_SET);
}
 
tc->data_end += curpos;
return 0;
}
default:
ret = avio_skip(pb, size);
if (ret < 0)
return ret;
}
 
if (type == TAK_METADATA_STREAMINFO) {
TAKStreamInfo ti;
 
avpriv_tak_parse_streaminfo(&gb, &ti);
if (ti.samples > 0)
st->duration = ti.samples;
st->codec->bits_per_coded_sample = ti.bps;
if (ti.ch_layout)
st->codec->channel_layout = ti.ch_layout;
st->codec->sample_rate = ti.sample_rate;
st->codec->channels = ti.channels;
st->start_time = 0;
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
st->codec->extradata = buffer;
st->codec->extradata_size = size - 3;
buffer = NULL;
} else if (type == TAK_METADATA_LAST_FRAME) {
if (size != 11)
return AVERROR_INVALIDDATA;
tc->mlast_frame = 1;
tc->data_end = get_bits64(&gb, TAK_LAST_FRAME_POS_BITS) +
get_bits(&gb, TAK_LAST_FRAME_SIZE_BITS);
av_freep(&buffer);
} else if (type == TAK_METADATA_ENCODER) {
av_log(s, AV_LOG_VERBOSE, "encoder version: %0X\n",
get_bits_long(&gb, TAK_ENCODER_VERSION_BITS));
av_freep(&buffer);
}
}
 
return AVERROR_EOF;
}
 
static int raw_read_packet(AVFormatContext *s, AVPacket *pkt)
{
TAKDemuxContext *tc = s->priv_data;
int ret;
 
if (tc->mlast_frame) {
AVIOContext *pb = s->pb;
int64_t size, left;
 
left = tc->data_end - avio_tell(pb);
size = FFMIN(left, 1024);
if (size <= 0)
return AVERROR_EOF;
 
ret = av_get_packet(pb, pkt, size);
if (ret < 0)
return ret;
 
pkt->stream_index = 0;
} else {
ret = ff_raw_read_partial_packet(s, pkt);
}
 
return ret;
}
 
AVInputFormat ff_tak_demuxer = {
.name = "tak",
.long_name = NULL_IF_CONFIG_SMALL("raw TAK"),
.priv_data_size = sizeof(TAKDemuxContext),
.read_probe = tak_probe,
.read_header = tak_read_header,
.read_packet = raw_read_packet,
.flags = AVFMT_GENERIC_INDEX,
.extensions = "tak",
.raw_codec_id = AV_CODEC_ID_TAK,
};
/contrib/sdk/sources/ffmpeg/libavformat/tcp.c
0,0 → 1,228
/*
* TCP protocol
* Copyright (c) 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avformat.h"
#include "libavutil/parseutils.h"
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include "internal.h"
#include "network.h"
#include "os_support.h"
#include "url.h"
#if HAVE_POLL_H
#include <poll.h>
#endif
 
typedef struct TCPContext {
const AVClass *class;
int fd;
int listen;
int open_timeout;
int rw_timeout;
int listen_timeout;
} TCPContext;
 
#define OFFSET(x) offsetof(TCPContext, x)
#define D AV_OPT_FLAG_DECODING_PARAM
#define E AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
{"listen", "listen on port instead of connecting", OFFSET(listen), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, D|E },
{"timeout", "timeout of socket i/o operations", OFFSET(rw_timeout), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, D|E },
{"listen_timeout", "connection awaiting timeout", OFFSET(listen_timeout), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, D|E },
{NULL}
};
 
static const AVClass tcp_context_class = {
.class_name = "tcp",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
/* return non zero if error */
static int tcp_open(URLContext *h, const char *uri, int flags)
{
struct addrinfo hints = { 0 }, *ai, *cur_ai;
int port, fd = -1;
TCPContext *s = h->priv_data;
const char *p;
char buf[256];
int ret;
char hostname[1024],proto[1024],path[1024];
char portstr[10];
s->open_timeout = 5000000;
 
av_url_split(proto, sizeof(proto), NULL, 0, hostname, sizeof(hostname),
&port, path, sizeof(path), uri);
if (strcmp(proto, "tcp"))
return AVERROR(EINVAL);
if (port <= 0 || port >= 65536) {
av_log(h, AV_LOG_ERROR, "Port missing in uri\n");
return AVERROR(EINVAL);
}
p = strchr(uri, '?');
if (p) {
if (av_find_info_tag(buf, sizeof(buf), "listen", p))
s->listen = 1;
if (av_find_info_tag(buf, sizeof(buf), "timeout", p)) {
s->rw_timeout = strtol(buf, NULL, 10);
}
if (av_find_info_tag(buf, sizeof(buf), "listen_timeout", p)) {
s->listen_timeout = strtol(buf, NULL, 10);
}
}
if (s->rw_timeout >= 0) {
s->open_timeout =
h->rw_timeout = s->rw_timeout;
}
hints.ai_family = AF_UNSPEC;
hints.ai_socktype = SOCK_STREAM;
snprintf(portstr, sizeof(portstr), "%d", port);
if (s->listen)
hints.ai_flags |= AI_PASSIVE;
if (!hostname[0])
ret = getaddrinfo(NULL, portstr, &hints, &ai);
else
ret = getaddrinfo(hostname, portstr, &hints, &ai);
if (ret) {
av_log(h, AV_LOG_ERROR,
"Failed to resolve hostname %s: %s\n",
hostname, gai_strerror(ret));
return AVERROR(EIO);
}
 
cur_ai = ai;
 
restart:
fd = ff_socket(cur_ai->ai_family,
cur_ai->ai_socktype,
cur_ai->ai_protocol);
if (fd < 0) {
ret = ff_neterrno();
goto fail;
}
 
if (s->listen) {
if ((fd = ff_listen_bind(fd, cur_ai->ai_addr, cur_ai->ai_addrlen,
s->listen_timeout, h)) < 0) {
ret = fd;
goto fail1;
}
} else {
if ((ret = ff_listen_connect(fd, cur_ai->ai_addr, cur_ai->ai_addrlen,
s->open_timeout / 1000, h, !!cur_ai->ai_next)) < 0) {
 
if (ret == AVERROR_EXIT)
goto fail1;
else
goto fail;
}
}
 
h->is_streamed = 1;
s->fd = fd;
freeaddrinfo(ai);
return 0;
 
fail:
if (cur_ai->ai_next) {
/* Retry with the next sockaddr */
cur_ai = cur_ai->ai_next;
if (fd >= 0)
closesocket(fd);
ret = 0;
goto restart;
}
fail1:
if (fd >= 0)
closesocket(fd);
freeaddrinfo(ai);
return ret;
}
 
static int tcp_read(URLContext *h, uint8_t *buf, int size)
{
TCPContext *s = h->priv_data;
int ret;
 
if (!(h->flags & AVIO_FLAG_NONBLOCK)) {
ret = ff_network_wait_fd_timeout(s->fd, 0, h->rw_timeout, &h->interrupt_callback);
if (ret)
return ret;
}
ret = recv(s->fd, buf, size, 0);
return ret < 0 ? ff_neterrno() : ret;
}
 
static int tcp_write(URLContext *h, const uint8_t *buf, int size)
{
TCPContext *s = h->priv_data;
int ret;
 
if (!(h->flags & AVIO_FLAG_NONBLOCK)) {
ret = ff_network_wait_fd_timeout(s->fd, 1, h->rw_timeout, &h->interrupt_callback);
if (ret)
return ret;
}
ret = send(s->fd, buf, size, 0);
return ret < 0 ? ff_neterrno() : ret;
}
 
static int tcp_shutdown(URLContext *h, int flags)
{
TCPContext *s = h->priv_data;
int how;
 
if (flags & AVIO_FLAG_WRITE && flags & AVIO_FLAG_READ) {
how = SHUT_RDWR;
} else if (flags & AVIO_FLAG_WRITE) {
how = SHUT_WR;
} else {
how = SHUT_RD;
}
 
return shutdown(s->fd, how);
}
 
static int tcp_close(URLContext *h)
{
TCPContext *s = h->priv_data;
closesocket(s->fd);
return 0;
}
 
static int tcp_get_file_handle(URLContext *h)
{
TCPContext *s = h->priv_data;
return s->fd;
}
 
URLProtocol ff_tcp_protocol = {
.name = "tcp",
.url_open = tcp_open,
.url_read = tcp_read,
.url_write = tcp_write,
.url_close = tcp_close,
.url_get_file_handle = tcp_get_file_handle,
.url_shutdown = tcp_shutdown,
.priv_data_size = sizeof(TCPContext),
.priv_data_class = &tcp_context_class,
.flags = URL_PROTOCOL_FLAG_NETWORK,
};
/contrib/sdk/sources/ffmpeg/libavformat/tedcaptionsdec.c
0,0 → 1,366
/*
* TED Talks captions format decoder
* Copyright (c) 2012 Nicolas George
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/bprint.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "avformat.h"
#include "internal.h"
#include "subtitles.h"
 
typedef struct {
AVClass *class;
int64_t start_time;
FFDemuxSubtitlesQueue subs;
} TEDCaptionsDemuxer;
 
static const AVOption tedcaptions_options[] = {
{ "start_time", "set the start time (offset) of the subtitles, in ms",
offsetof(TEDCaptionsDemuxer, start_time), FF_OPT_TYPE_INT64,
{ .i64 = 15000 }, INT64_MIN, INT64_MAX,
AV_OPT_FLAG_SUBTITLE_PARAM | AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
 
static const AVClass tedcaptions_demuxer_class = {
.class_name = "tedcaptions_demuxer",
.item_name = av_default_item_name,
.option = tedcaptions_options,
.version = LIBAVUTIL_VERSION_INT,
};
 
#define BETWEEN(a, amin, amax) ((unsigned)((a) - (amin)) <= (amax) - (amin))
 
#define HEX_DIGIT_TEST(c) (BETWEEN(c, '0', '9') || BETWEEN((c) | 32, 'a', 'z'))
#define HEX_DIGIT_VAL(c) ((c) <= '9' ? (c) - '0' : ((c) | 32) - 'a' + 10)
#define ERR_CODE(c) (c < 0 ? c : AVERROR_INVALIDDATA)
 
static void av_bprint_utf8(AVBPrint *bp, unsigned c)
{
int bytes, i;
 
if (c <= 0x7F) {
av_bprint_chars(bp, c, 1);
return;
}
bytes = (av_log2(c) - 2) / 5;
av_bprint_chars(bp, (c >> (bytes * 6)) | ((0xFF80 >> bytes) & 0xFF), 1);
for (i = bytes - 1; i >= 0; i--)
av_bprint_chars(bp, ((c >> (i * 6)) & 0x3F) | 0x80, 1);
}
 
static void next_byte(AVIOContext *pb, int *cur_byte)
{
uint8_t b;
int ret = avio_read(pb, &b, 1);
*cur_byte = ret > 0 ? b : ret == 0 ? AVERROR_EOF : ret;
}
 
static void skip_spaces(AVIOContext *pb, int *cur_byte)
{
while (*cur_byte == ' ' || *cur_byte == '\t' ||
*cur_byte == '\n' || *cur_byte == '\r')
next_byte(pb, cur_byte);
}
 
static int expect_byte(AVIOContext *pb, int *cur_byte, uint8_t c)
{
skip_spaces(pb, cur_byte);
if (*cur_byte != c)
return ERR_CODE(*cur_byte);
next_byte(pb, cur_byte);
return 0;
}
 
static int parse_string(AVIOContext *pb, int *cur_byte, AVBPrint *bp, int full)
{
int ret;
 
av_bprint_init(bp, 0, full ? -1 : 1);
ret = expect_byte(pb, cur_byte, '"');
if (ret < 0)
goto fail;
while (*cur_byte > 0 && *cur_byte != '"') {
if (*cur_byte == '\\') {
next_byte(pb, cur_byte);
if (*cur_byte < 0) {
ret = AVERROR_INVALIDDATA;
goto fail;
}
if ((*cur_byte | 32) == 'u') {
unsigned chr = 0, i;
for (i = 0; i < 4; i++) {
next_byte(pb, cur_byte);
if (!HEX_DIGIT_TEST(*cur_byte)) {
ret = ERR_CODE(*cur_byte);
goto fail;
}
chr = chr * 16 + HEX_DIGIT_VAL(*cur_byte);
}
av_bprint_utf8(bp, chr);
} else {
av_bprint_chars(bp, *cur_byte, 1);
}
} else {
av_bprint_chars(bp, *cur_byte, 1);
}
next_byte(pb, cur_byte);
}
ret = expect_byte(pb, cur_byte, '"');
if (ret < 0)
goto fail;
if (full && !av_bprint_is_complete(bp)) {
ret = AVERROR(ENOMEM);
goto fail;
}
return 0;
 
fail:
av_bprint_finalize(bp, NULL);
return ret;
}
 
static int parse_label(AVIOContext *pb, int *cur_byte, AVBPrint *bp)
{
int ret;
 
ret = parse_string(pb, cur_byte, bp, 0);
if (ret < 0)
return ret;
ret = expect_byte(pb, cur_byte, ':');
if (ret < 0)
return ret;
return 0;
}
 
static int parse_boolean(AVIOContext *pb, int *cur_byte, int *result)
{
static const char * const text[] = { "false", "true" };
const char *p;
int i;
 
skip_spaces(pb, cur_byte);
for (i = 0; i < 2; i++) {
p = text[i];
if (*cur_byte != *p)
continue;
for (; *p; p++, next_byte(pb, cur_byte))
if (*cur_byte != *p)
return AVERROR_INVALIDDATA;
if (BETWEEN(*cur_byte | 32, 'a', 'z'))
return AVERROR_INVALIDDATA;
*result = i;
return 0;
}
return AVERROR_INVALIDDATA;
}
 
static int parse_int(AVIOContext *pb, int *cur_byte, int64_t *result)
{
int64_t val = 0;
 
skip_spaces(pb, cur_byte);
if ((unsigned)*cur_byte - '0' > 9)
return AVERROR_INVALIDDATA;
while (BETWEEN(*cur_byte, '0', '9')) {
val = val * 10 + (*cur_byte - '0');
next_byte(pb, cur_byte);
}
*result = val;
return 0;
}
 
static int parse_file(AVIOContext *pb, FFDemuxSubtitlesQueue *subs)
{
int ret, cur_byte, start_of_par;
AVBPrint label, content;
int64_t pos, start, duration;
AVPacket *pkt;
 
next_byte(pb, &cur_byte);
ret = expect_byte(pb, &cur_byte, '{');
if (ret < 0)
return AVERROR_INVALIDDATA;
ret = parse_label(pb, &cur_byte, &label);
if (ret < 0 || strcmp(label.str, "captions"))
return AVERROR_INVALIDDATA;
ret = expect_byte(pb, &cur_byte, '[');
if (ret < 0)
return AVERROR_INVALIDDATA;
while (1) {
content.size = 0;
start = duration = AV_NOPTS_VALUE;
ret = expect_byte(pb, &cur_byte, '{');
if (ret < 0)
return ret;
pos = avio_tell(pb) - 1;
while (1) {
ret = parse_label(pb, &cur_byte, &label);
if (ret < 0)
return ret;
if (!strcmp(label.str, "startOfParagraph")) {
ret = parse_boolean(pb, &cur_byte, &start_of_par);
if (ret < 0)
return ret;
} else if (!strcmp(label.str, "content")) {
ret = parse_string(pb, &cur_byte, &content, 1);
if (ret < 0)
return ret;
} else if (!strcmp(label.str, "startTime")) {
ret = parse_int(pb, &cur_byte, &start);
if (ret < 0)
return ret;
} else if (!strcmp(label.str, "duration")) {
ret = parse_int(pb, &cur_byte, &duration);
if (ret < 0)
return ret;
} else {
return AVERROR_INVALIDDATA;
}
skip_spaces(pb, &cur_byte);
if (cur_byte != ',')
break;
next_byte(pb, &cur_byte);
}
ret = expect_byte(pb, &cur_byte, '}');
if (ret < 0)
return ret;
 
if (!content.size || start == AV_NOPTS_VALUE ||
duration == AV_NOPTS_VALUE)
return AVERROR_INVALIDDATA;
pkt = ff_subtitles_queue_insert(subs, content.str, content.len, 0);
if (!pkt)
return AVERROR(ENOMEM);
pkt->pos = pos;
pkt->pts = start;
pkt->duration = duration;
av_bprint_finalize(&content, NULL);
 
skip_spaces(pb, &cur_byte);
if (cur_byte != ',')
break;
next_byte(pb, &cur_byte);
}
ret = expect_byte(pb, &cur_byte, ']');
if (ret < 0)
return ret;
ret = expect_byte(pb, &cur_byte, '}');
if (ret < 0)
return ret;
skip_spaces(pb, &cur_byte);
if (cur_byte != AVERROR_EOF)
return ERR_CODE(cur_byte);
return 0;
}
 
static av_cold int tedcaptions_read_header(AVFormatContext *avf)
{
TEDCaptionsDemuxer *tc = avf->priv_data;
AVStream *st;
int ret, i;
AVPacket *last;
 
ret = parse_file(avf->pb, &tc->subs);
if (ret < 0) {
if (ret == AVERROR_INVALIDDATA)
av_log(avf, AV_LOG_ERROR, "Syntax error near offset %"PRId64".\n",
avio_tell(avf->pb));
ff_subtitles_queue_clean(&tc->subs);
return ret;
}
ff_subtitles_queue_finalize(&tc->subs);
for (i = 0; i < tc->subs.nb_subs; i++)
tc->subs.subs[i].pts += tc->start_time;
 
last = &tc->subs.subs[tc->subs.nb_subs - 1];
st = avformat_new_stream(avf, NULL);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
st->codec->codec_id = AV_CODEC_ID_TEXT;
avpriv_set_pts_info(st, 64, 1, 1000);
st->probe_packets = 0;
st->start_time = 0;
st->duration = last->pts + last->duration;
st->cur_dts = 0;
 
return 0;
}
 
static int tedcaptions_read_packet(AVFormatContext *avf, AVPacket *packet)
{
TEDCaptionsDemuxer *tc = avf->priv_data;
 
return ff_subtitles_queue_read_packet(&tc->subs, packet);
}
 
static int tedcaptions_read_close(AVFormatContext *avf)
{
TEDCaptionsDemuxer *tc = avf->priv_data;
 
ff_subtitles_queue_clean(&tc->subs);
return 0;
}
 
static av_cold int tedcaptions_read_probe(AVProbeData *p)
{
static const char *const tags[] = {
"\"captions\"", "\"duration\"", "\"content\"",
"\"startOfParagraph\"", "\"startTime\"",
};
unsigned i, count = 0;
const char *t;
 
if (p->buf[strspn(p->buf, " \t\r\n")] != '{')
return 0;
for (i = 0; i < FF_ARRAY_ELEMS(tags); i++) {
if (!(t = strstr(p->buf, tags[i])))
continue;
t += strlen(tags[i]);
t += strspn(t, " \t\r\n");
if (*t == ':')
count++;
}
return count == FF_ARRAY_ELEMS(tags) ? AVPROBE_SCORE_MAX :
count ? AVPROBE_SCORE_EXTENSION : 0;
}
 
static int tedcaptions_read_seek(AVFormatContext *avf, int stream_index,
int64_t min_ts, int64_t ts, int64_t max_ts,
int flags)
{
TEDCaptionsDemuxer *tc = avf->priv_data;
return ff_subtitles_queue_seek(&tc->subs, avf, stream_index,
min_ts, ts, max_ts, flags);
}
 
AVInputFormat ff_tedcaptions_demuxer = {
.name = "tedcaptions",
.long_name = NULL_IF_CONFIG_SMALL("TED Talks captions"),
.priv_data_size = sizeof(TEDCaptionsDemuxer),
.priv_class = &tedcaptions_demuxer_class,
.read_header = tedcaptions_read_header,
.read_packet = tedcaptions_read_packet,
.read_close = tedcaptions_read_close,
.read_probe = tedcaptions_read_probe,
.read_seek2 = tedcaptions_read_seek,
};
/contrib/sdk/sources/ffmpeg/libavformat/tee.c
0,0 → 1,498
/*
* Tee pseudo-muxer
* Copyright (c) 2012 Nicolas George
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with FFmpeg; if not, write to the Free Software * Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
 
#include "libavutil/avutil.h"
#include "libavutil/avstring.h"
#include "libavutil/opt.h"
#include "avformat.h"
 
#define MAX_SLAVES 16
 
typedef struct {
AVFormatContext *avf;
AVBitStreamFilterContext **bsfs; ///< bitstream filters per stream
 
/** map from input to output streams indexes,
* disabled output streams are set to -1 */
int *stream_map;
} TeeSlave;
 
typedef struct TeeContext {
const AVClass *class;
unsigned nb_slaves;
TeeSlave slaves[MAX_SLAVES];
} TeeContext;
 
static const char *const slave_delim = "|";
static const char *const slave_opt_open = "[";
static const char *const slave_opt_close = "]";
static const char *const slave_opt_delim = ":]"; /* must have the close too */
static const char *const slave_bsfs_spec_sep = "/";
 
static const AVClass tee_muxer_class = {
.class_name = "Tee muxer",
.item_name = av_default_item_name,
.version = LIBAVUTIL_VERSION_INT,
};
 
static int parse_slave_options(void *log, char *slave,
AVDictionary **options, char **filename)
{
const char *p;
char *key, *val;
int ret;
 
if (!strspn(slave, slave_opt_open)) {
*filename = slave;
return 0;
}
p = slave + 1;
if (strspn(p, slave_opt_close)) {
*filename = (char *)p + 1;
return 0;
}
while (1) {
ret = av_opt_get_key_value(&p, "=", slave_opt_delim, 0, &key, &val);
if (ret < 0) {
av_log(log, AV_LOG_ERROR, "No option found near \"%s\"\n", p);
goto fail;
}
ret = av_dict_set(options, key, val,
AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL);
if (ret < 0)
goto fail;
if (strspn(p, slave_opt_close))
break;
p++;
}
*filename = (char *)p + 1;
return 0;
 
fail:
av_dict_free(options);
return ret;
}
 
/**
* Parse list of bitstream filters and add them to the list of filters
* pointed to by bsfs.
*
* The list must be specified in the form:
* BSFS ::= BSF[,BSFS]
*/
static int parse_bsfs(void *log_ctx, const char *bsfs_spec,
AVBitStreamFilterContext **bsfs)
{
char *bsf_name, *buf, *dup, *saveptr;
int ret = 0;
 
if (!(dup = buf = av_strdup(bsfs_spec)))
return AVERROR(ENOMEM);
 
while (bsf_name = av_strtok(buf, ",", &saveptr)) {
AVBitStreamFilterContext *bsf = av_bitstream_filter_init(bsf_name);
 
if (!bsf) {
av_log(log_ctx, AV_LOG_ERROR,
"Cannot initialize bitstream filter with name '%s', "
"unknown filter or internal error happened\n",
bsf_name);
ret = AVERROR_UNKNOWN;
goto end;
}
 
/* append bsf context to the list of bsf contexts */
*bsfs = bsf;
bsfs = &bsf->next;
 
buf = NULL;
}
 
end:
av_free(dup);
return ret;
}
 
static int open_slave(AVFormatContext *avf, char *slave, TeeSlave *tee_slave)
{
int i, ret;
AVDictionary *options = NULL;
AVDictionaryEntry *entry;
char *filename;
char *format = NULL, *select = NULL;
AVFormatContext *avf2 = NULL;
AVStream *st, *st2;
int stream_count;
 
if ((ret = parse_slave_options(avf, slave, &options, &filename)) < 0)
return ret;
 
#define STEAL_OPTION(option, field) do { \
if ((entry = av_dict_get(options, option, NULL, 0))) { \
field = entry->value; \
entry->value = NULL; /* prevent it from being freed */ \
av_dict_set(&options, option, NULL, 0); \
} \
} while (0)
 
STEAL_OPTION("f", format);
STEAL_OPTION("select", select);
 
ret = avformat_alloc_output_context2(&avf2, NULL, format, filename);
if (ret < 0)
goto end;
av_dict_copy(&avf2->metadata, avf->metadata, 0);
 
tee_slave->stream_map = av_calloc(avf->nb_streams, sizeof(*tee_slave->stream_map));
if (!tee_slave->stream_map) {
ret = AVERROR(ENOMEM);
goto end;
}
 
stream_count = 0;
for (i = 0; i < avf->nb_streams; i++) {
st = avf->streams[i];
if (select) {
ret = avformat_match_stream_specifier(avf, avf->streams[i], select);
if (ret < 0) {
av_log(avf, AV_LOG_ERROR,
"Invalid stream specifier '%s' for output '%s'\n",
select, slave);
goto end;
}
 
if (ret == 0) { /* no match */
tee_slave->stream_map[i] = -1;
continue;
}
}
tee_slave->stream_map[i] = stream_count++;
 
if (!(st2 = avformat_new_stream(avf2, NULL))) {
ret = AVERROR(ENOMEM);
goto end;
}
st2->id = st->id;
st2->r_frame_rate = st->r_frame_rate;
st2->time_base = st->time_base;
st2->start_time = st->start_time;
st2->duration = st->duration;
st2->nb_frames = st->nb_frames;
st2->disposition = st->disposition;
st2->sample_aspect_ratio = st->sample_aspect_ratio;
st2->avg_frame_rate = st->avg_frame_rate;
av_dict_copy(&st2->metadata, st->metadata, 0);
if ((ret = avcodec_copy_context(st2->codec, st->codec)) < 0)
goto end;
}
 
if (!(avf2->oformat->flags & AVFMT_NOFILE)) {
if ((ret = avio_open(&avf2->pb, filename, AVIO_FLAG_WRITE)) < 0) {
av_log(avf, AV_LOG_ERROR, "Slave '%s': error opening: %s\n",
slave, av_err2str(ret));
goto end;
}
}
 
if ((ret = avformat_write_header(avf2, &options)) < 0) {
av_log(avf, AV_LOG_ERROR, "Slave '%s': error writing header: %s\n",
slave, av_err2str(ret));
goto end;
}
 
tee_slave->avf = avf2;
tee_slave->bsfs = av_calloc(avf2->nb_streams, sizeof(TeeSlave));
if (!tee_slave->bsfs) {
ret = AVERROR(ENOMEM);
goto end;
}
 
entry = NULL;
while (entry = av_dict_get(options, "bsfs", NULL, AV_DICT_IGNORE_SUFFIX)) {
const char *spec = entry->key + strlen("bsfs");
if (*spec) {
if (strspn(spec, slave_bsfs_spec_sep) != 1) {
av_log(avf, AV_LOG_ERROR,
"Specifier separator in '%s' is '%c', but only characters '%s' "
"are allowed\n", entry->key, *spec, slave_bsfs_spec_sep);
return AVERROR(EINVAL);
}
spec++; /* consume separator */
}
 
for (i = 0; i < avf2->nb_streams; i++) {
ret = avformat_match_stream_specifier(avf2, avf2->streams[i], spec);
if (ret < 0) {
av_log(avf, AV_LOG_ERROR,
"Invalid stream specifier '%s' in bsfs option '%s' for slave "
"output '%s'\n", spec, entry->key, filename);
goto end;
}
 
if (ret > 0) {
av_log(avf, AV_LOG_DEBUG, "spec:%s bsfs:%s matches stream %d of slave "
"output '%s'\n", spec, entry->value, i, filename);
if (tee_slave->bsfs[i]) {
av_log(avf, AV_LOG_WARNING,
"Duplicate bsfs specification associated to stream %d of slave "
"output '%s', filters will be ignored\n", i, filename);
continue;
}
ret = parse_bsfs(avf, entry->value, &tee_slave->bsfs[i]);
if (ret < 0) {
av_log(avf, AV_LOG_ERROR,
"Error parsing bitstream filter sequence '%s' associated to "
"stream %d of slave output '%s'\n", entry->value, i, filename);
goto end;
}
}
}
 
av_dict_set(&options, entry->key, NULL, 0);
}
 
if (options) {
entry = NULL;
while ((entry = av_dict_get(options, "", entry, AV_DICT_IGNORE_SUFFIX)))
av_log(avf2, AV_LOG_ERROR, "Unknown option '%s'\n", entry->key);
ret = AVERROR_OPTION_NOT_FOUND;
goto end;
}
 
end:
av_free(format);
av_free(select);
av_dict_free(&options);
return ret;
}
 
static void close_slaves(AVFormatContext *avf)
{
TeeContext *tee = avf->priv_data;
AVFormatContext *avf2;
unsigned i, j;
 
for (i = 0; i < tee->nb_slaves; i++) {
avf2 = tee->slaves[i].avf;
 
for (j = 0; j < avf2->nb_streams; j++) {
AVBitStreamFilterContext *bsf_next, *bsf = tee->slaves[i].bsfs[j];
while (bsf) {
bsf_next = bsf->next;
av_bitstream_filter_close(bsf);
bsf = bsf_next;
}
}
av_freep(&tee->slaves[i].stream_map);
av_freep(&tee->slaves[i].bsfs);
 
avio_close(avf2->pb);
avf2->pb = NULL;
avformat_free_context(avf2);
tee->slaves[i].avf = NULL;
}
}
 
static void log_slave(TeeSlave *slave, void *log_ctx, int log_level)
{
int i;
av_log(log_ctx, log_level, "filename:'%s' format:%s\n",
slave->avf->filename, slave->avf->oformat->name);
for (i = 0; i < slave->avf->nb_streams; i++) {
AVStream *st = slave->avf->streams[i];
AVBitStreamFilterContext *bsf = slave->bsfs[i];
 
av_log(log_ctx, log_level, " stream:%d codec:%s type:%s",
i, avcodec_get_name(st->codec->codec_id),
av_get_media_type_string(st->codec->codec_type));
if (bsf) {
av_log(log_ctx, log_level, " bsfs:");
while (bsf) {
av_log(log_ctx, log_level, "%s%s",
bsf->filter->name, bsf->next ? "," : "");
bsf = bsf->next;
}
}
av_log(log_ctx, log_level, "\n");
}
}
 
static int tee_write_header(AVFormatContext *avf)
{
TeeContext *tee = avf->priv_data;
unsigned nb_slaves = 0, i;
const char *filename = avf->filename;
char *slaves[MAX_SLAVES];
int ret;
 
while (*filename) {
if (nb_slaves == MAX_SLAVES) {
av_log(avf, AV_LOG_ERROR, "Maximum %d slave muxers reached.\n",
MAX_SLAVES);
ret = AVERROR_PATCHWELCOME;
goto fail;
}
if (!(slaves[nb_slaves++] = av_get_token(&filename, slave_delim))) {
ret = AVERROR(ENOMEM);
goto fail;
}
if (strspn(filename, slave_delim))
filename++;
}
 
for (i = 0; i < nb_slaves; i++) {
if ((ret = open_slave(avf, slaves[i], &tee->slaves[i])) < 0)
goto fail;
log_slave(&tee->slaves[i], avf, AV_LOG_VERBOSE);
av_freep(&slaves[i]);
}
 
tee->nb_slaves = nb_slaves;
 
for (i = 0; i < avf->nb_streams; i++) {
int j, mapped = 0;
for (j = 0; j < tee->nb_slaves; j++)
mapped += tee->slaves[j].stream_map[i] >= 0;
if (!mapped)
av_log(avf, AV_LOG_WARNING, "Input stream #%d is not mapped "
"to any slave.\n", i);
}
return 0;
 
fail:
for (i = 0; i < nb_slaves; i++)
av_freep(&slaves[i]);
close_slaves(avf);
return ret;
}
 
static int filter_packet(void *log_ctx, AVPacket *pkt,
AVFormatContext *fmt_ctx, AVBitStreamFilterContext *bsf_ctx)
{
AVCodecContext *enc_ctx = fmt_ctx->streams[pkt->stream_index]->codec;
int ret = 0;
 
while (bsf_ctx) {
AVPacket new_pkt = *pkt;
ret = av_bitstream_filter_filter(bsf_ctx, enc_ctx, NULL,
&new_pkt.data, &new_pkt.size,
pkt->data, pkt->size,
pkt->flags & AV_PKT_FLAG_KEY);
if (ret == 0 && new_pkt.data != pkt->data && new_pkt.destruct) {
if ((ret = av_copy_packet(&new_pkt, pkt)) < 0)
break;
ret = 1;
}
 
if (ret > 0) {
av_free_packet(pkt);
new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
av_buffer_default_free, NULL, 0);
if (!new_pkt.buf)
break;
}
*pkt = new_pkt;
 
bsf_ctx = bsf_ctx->next;
}
 
if (ret < 0) {
av_log(log_ctx, AV_LOG_ERROR,
"Failed to filter bitstream with filter %s for stream %d in file '%s' with codec %s\n",
bsf_ctx->filter->name, pkt->stream_index, fmt_ctx->filename,
avcodec_get_name(enc_ctx->codec_id));
}
 
return ret;
}
 
static int tee_write_trailer(AVFormatContext *avf)
{
TeeContext *tee = avf->priv_data;
AVFormatContext *avf2;
int ret_all = 0, ret;
unsigned i;
 
for (i = 0; i < tee->nb_slaves; i++) {
avf2 = tee->slaves[i].avf;
if ((ret = av_write_trailer(avf2)) < 0)
if (!ret_all)
ret_all = ret;
if (!(avf2->oformat->flags & AVFMT_NOFILE)) {
if ((ret = avio_close(avf2->pb)) < 0)
if (!ret_all)
ret_all = ret;
avf2->pb = NULL;
}
}
close_slaves(avf);
return ret_all;
}
 
static int tee_write_packet(AVFormatContext *avf, AVPacket *pkt)
{
TeeContext *tee = avf->priv_data;
AVFormatContext *avf2;
AVPacket pkt2;
int ret_all = 0, ret;
unsigned i, s;
int s2;
AVRational tb, tb2;
 
for (i = 0; i < tee->nb_slaves; i++) {
avf2 = tee->slaves[i].avf;
s = pkt->stream_index;
s2 = tee->slaves[i].stream_map[s];
if (s2 < 0)
continue;
 
if ((ret = av_copy_packet(&pkt2, pkt)) < 0 ||
(ret = av_dup_packet(&pkt2))< 0)
if (!ret_all) {
ret = ret_all;
continue;
}
tb = avf ->streams[s ]->time_base;
tb2 = avf2->streams[s2]->time_base;
pkt2.pts = av_rescale_q(pkt->pts, tb, tb2);
pkt2.dts = av_rescale_q(pkt->dts, tb, tb2);
pkt2.duration = av_rescale_q(pkt->duration, tb, tb2);
pkt2.stream_index = s2;
 
filter_packet(avf2, &pkt2, avf2, tee->slaves[i].bsfs[s2]);
if ((ret = av_interleaved_write_frame(avf2, &pkt2)) < 0)
if (!ret_all)
ret_all = ret;
}
return ret_all;
}
 
AVOutputFormat ff_tee_muxer = {
.name = "tee",
.long_name = NULL_IF_CONFIG_SMALL("Multiple muxer tee"),
.priv_data_size = sizeof(TeeContext),
.write_header = tee_write_header,
.write_trailer = tee_write_trailer,
.write_packet = tee_write_packet,
.priv_class = &tee_muxer_class,
.flags = AVFMT_NOFILE,
};
/contrib/sdk/sources/ffmpeg/libavformat/thp.c
0,0 → 1,206
/*
* THP Demuxer
* Copyright (c) 2007 Marco Gerards
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intreadwrite.h"
#include "libavutil/intfloat.h"
#include "avformat.h"
#include "internal.h"
 
typedef struct ThpDemuxContext {
int version;
unsigned first_frame;
unsigned first_framesz;
unsigned last_frame;
int compoff;
unsigned framecnt;
AVRational fps;
unsigned frame;
int64_t next_frame;
unsigned next_framesz;
int video_stream_index;
int audio_stream_index;
int compcount;
unsigned char components[16];
AVStream* vst;
int has_audio;
unsigned audiosize;
} ThpDemuxContext;
 
 
static int thp_probe(AVProbeData *p)
{
/* check file header */
if (AV_RL32(p->buf) == MKTAG('T', 'H', 'P', '\0'))
return AVPROBE_SCORE_MAX;
else
return 0;
}
 
static int thp_read_header(AVFormatContext *s)
{
ThpDemuxContext *thp = s->priv_data;
AVStream *st;
AVIOContext *pb = s->pb;
int64_t fsize= avio_size(pb);
int i;
 
/* Read the file header. */
avio_rb32(pb); /* Skip Magic. */
thp->version = avio_rb32(pb);
 
avio_rb32(pb); /* Max buf size. */
avio_rb32(pb); /* Max samples. */
 
thp->fps = av_d2q(av_int2float(avio_rb32(pb)), INT_MAX);
thp->framecnt = avio_rb32(pb);
thp->first_framesz = avio_rb32(pb);
pb->maxsize = avio_rb32(pb);
if(fsize>0 && (!pb->maxsize || fsize < pb->maxsize))
pb->maxsize= fsize;
 
thp->compoff = avio_rb32(pb);
avio_rb32(pb); /* offsetDataOffset. */
thp->first_frame = avio_rb32(pb);
thp->last_frame = avio_rb32(pb);
 
thp->next_framesz = thp->first_framesz;
thp->next_frame = thp->first_frame;
 
/* Read the component structure. */
avio_seek (pb, thp->compoff, SEEK_SET);
thp->compcount = avio_rb32(pb);
 
/* Read the list of component types. */
avio_read(pb, thp->components, 16);
 
for (i = 0; i < thp->compcount; i++) {
if (thp->components[i] == 0) {
if (thp->vst != 0)
break;
 
/* Video component. */
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
/* The denominator and numerator are switched because 1/fps
is required. */
avpriv_set_pts_info(st, 64, thp->fps.den, thp->fps.num);
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_THP;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->width = avio_rb32(pb);
st->codec->height = avio_rb32(pb);
st->codec->sample_rate = av_q2d(thp->fps);
st->nb_frames =
st->duration = thp->framecnt;
thp->vst = st;
thp->video_stream_index = st->index;
 
if (thp->version == 0x11000)
avio_rb32(pb); /* Unknown. */
} else if (thp->components[i] == 1) {
if (thp->has_audio != 0)
break;
 
/* Audio component. */
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_ADPCM_THP;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->channels = avio_rb32(pb); /* numChannels. */
st->codec->sample_rate = avio_rb32(pb); /* Frequency. */
 
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
 
thp->audio_stream_index = st->index;
thp->has_audio = 1;
}
}
 
return 0;
}
 
static int thp_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
ThpDemuxContext *thp = s->priv_data;
AVIOContext *pb = s->pb;
unsigned int size;
int ret;
 
if (thp->audiosize == 0) {
/* Terminate when last frame is reached. */
if (thp->frame >= thp->framecnt)
return AVERROR_EOF;
 
avio_seek(pb, thp->next_frame, SEEK_SET);
 
/* Locate the next frame and read out its size. */
thp->next_frame += FFMAX(thp->next_framesz, 1);
thp->next_framesz = avio_rb32(pb);
 
avio_rb32(pb); /* Previous total size. */
size = avio_rb32(pb); /* Total size of this frame. */
 
/* Store the audiosize so the next time this function is called,
the audio can be read. */
if (thp->has_audio)
thp->audiosize = avio_rb32(pb); /* Audio size. */
else
thp->frame++;
 
ret = av_get_packet(pb, pkt, size);
if (ret != size) {
av_free_packet(pkt);
return AVERROR(EIO);
}
 
pkt->stream_index = thp->video_stream_index;
} else {
ret = av_get_packet(pb, pkt, thp->audiosize);
if (ret != thp->audiosize) {
av_free_packet(pkt);
return AVERROR(EIO);
}
 
pkt->stream_index = thp->audio_stream_index;
if (thp->audiosize >= 8)
pkt->duration = AV_RB32(&pkt->data[4]);
 
thp->audiosize = 0;
thp->frame++;
}
 
return 0;
}
 
AVInputFormat ff_thp_demuxer = {
.name = "thp",
.long_name = NULL_IF_CONFIG_SMALL("THP"),
.priv_data_size = sizeof(ThpDemuxContext),
.read_probe = thp_probe,
.read_header = thp_read_header,
.read_packet = thp_read_packet
};
/contrib/sdk/sources/ffmpeg/libavformat/tiertexseq.c
0,0 → 1,317
/*
* Tiertex Limited SEQ File Demuxer
* Copyright (c) 2006 Gregory Montoir (cyx@users.sourceforge.net)
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Tiertex Limited SEQ file demuxer
*/
 
#include "libavutil/channel_layout.h"
#include "avformat.h"
#include "internal.h"
 
#define SEQ_FRAME_SIZE 6144
#define SEQ_FRAME_W 256
#define SEQ_FRAME_H 128
#define SEQ_NUM_FRAME_BUFFERS 30
#define SEQ_AUDIO_BUFFER_SIZE 882
#define SEQ_SAMPLE_RATE 22050
#define SEQ_FRAME_RATE 25
 
 
typedef struct TiertexSeqFrameBuffer {
int fill_size;
int data_size;
unsigned char *data;
} TiertexSeqFrameBuffer;
 
typedef struct SeqDemuxContext {
int audio_stream_index;
int video_stream_index;
int current_frame_pts;
int current_frame_offs;
TiertexSeqFrameBuffer frame_buffers[SEQ_NUM_FRAME_BUFFERS];
int frame_buffers_count;
unsigned int current_audio_data_size;
unsigned int current_audio_data_offs;
unsigned int current_pal_data_size;
unsigned int current_pal_data_offs;
unsigned int current_video_data_size;
unsigned char *current_video_data_ptr;
int audio_buffer_full;
} SeqDemuxContext;
 
 
static int seq_probe(AVProbeData *p)
{
int i;
 
if (p->buf_size < 258)
return 0;
 
/* there's no real header in a .seq file, the only thing they have in common */
/* is the first 256 bytes of the file which are always filled with 0 */
for (i = 0; i < 256; i++)
if (p->buf[i])
return 0;
 
if(p->buf[256]==0 && p->buf[257]==0)
return 0;
 
/* only one fourth of the score since the previous check is too naive */
return AVPROBE_SCORE_MAX / 4;
}
 
static int seq_init_frame_buffers(SeqDemuxContext *seq, AVIOContext *pb)
{
int i, sz;
TiertexSeqFrameBuffer *seq_buffer;
 
avio_seek(pb, 256, SEEK_SET);
 
for (i = 0; i < SEQ_NUM_FRAME_BUFFERS; i++) {
sz = avio_rl16(pb);
if (sz == 0)
break;
else {
seq_buffer = &seq->frame_buffers[i];
seq_buffer->fill_size = 0;
seq_buffer->data_size = sz;
seq_buffer->data = av_malloc(sz);
if (!seq_buffer->data)
return AVERROR(ENOMEM);
}
}
seq->frame_buffers_count = i;
return 0;
}
 
static int seq_fill_buffer(SeqDemuxContext *seq, AVIOContext *pb, int buffer_num, unsigned int data_offs, int data_size)
{
TiertexSeqFrameBuffer *seq_buffer;
 
if (buffer_num >= SEQ_NUM_FRAME_BUFFERS)
return AVERROR_INVALIDDATA;
 
seq_buffer = &seq->frame_buffers[buffer_num];
if (seq_buffer->fill_size + data_size > seq_buffer->data_size || data_size <= 0)
return AVERROR_INVALIDDATA;
 
avio_seek(pb, seq->current_frame_offs + data_offs, SEEK_SET);
if (avio_read(pb, seq_buffer->data + seq_buffer->fill_size, data_size) != data_size)
return AVERROR(EIO);
 
seq_buffer->fill_size += data_size;
return 0;
}
 
static int seq_parse_frame_data(SeqDemuxContext *seq, AVIOContext *pb)
{
unsigned int offset_table[4], buffer_num[4];
TiertexSeqFrameBuffer *seq_buffer;
int i, e, err;
 
seq->current_frame_offs += SEQ_FRAME_SIZE;
avio_seek(pb, seq->current_frame_offs, SEEK_SET);
 
/* sound data */
seq->current_audio_data_offs = avio_rl16(pb);
if (seq->current_audio_data_offs) {
seq->current_audio_data_size = SEQ_AUDIO_BUFFER_SIZE * 2;
} else {
seq->current_audio_data_size = 0;
}
 
/* palette data */
seq->current_pal_data_offs = avio_rl16(pb);
if (seq->current_pal_data_offs) {
seq->current_pal_data_size = 768;
} else {
seq->current_pal_data_size = 0;
}
 
/* video data */
for (i = 0; i < 4; i++)
buffer_num[i] = avio_r8(pb);
 
for (i = 0; i < 4; i++)
offset_table[i] = avio_rl16(pb);
 
for (i = 0; i < 3; i++) {
if (offset_table[i]) {
for (e = i + 1; e < 3 && offset_table[e] == 0; e++);
err = seq_fill_buffer(seq, pb, buffer_num[1 + i],
offset_table[i],
offset_table[e] - offset_table[i]);
if (err)
return err;
}
}
 
if (buffer_num[0] != 255) {
if (buffer_num[0] >= SEQ_NUM_FRAME_BUFFERS)
return AVERROR_INVALIDDATA;
 
seq_buffer = &seq->frame_buffers[buffer_num[0]];
seq->current_video_data_size = seq_buffer->fill_size;
seq->current_video_data_ptr = seq_buffer->data;
seq_buffer->fill_size = 0;
} else {
seq->current_video_data_size = 0;
seq->current_video_data_ptr = 0;
}
 
return 0;
}
 
static int seq_read_header(AVFormatContext *s)
{
int i, rc;
SeqDemuxContext *seq = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *st;
 
/* init internal buffers */
rc = seq_init_frame_buffers(seq, pb);
if (rc)
return rc;
 
seq->current_frame_offs = 0;
 
/* preload (no audio data, just buffer operations related data) */
for (i = 1; i <= 100; i++) {
rc = seq_parse_frame_data(seq, pb);
if (rc)
return rc;
}
 
seq->current_frame_pts = 0;
 
seq->audio_buffer_full = 0;
 
/* initialize the video decoder stream */
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
avpriv_set_pts_info(st, 32, 1, SEQ_FRAME_RATE);
seq->video_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_TIERTEXSEQVIDEO;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->width = SEQ_FRAME_W;
st->codec->height = SEQ_FRAME_H;
 
/* initialize the audio decoder stream */
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
st->start_time = 0;
avpriv_set_pts_info(st, 32, 1, SEQ_SAMPLE_RATE);
seq->audio_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_PCM_S16BE;
st->codec->codec_tag = 0; /* no tag */
st->codec->channels = 1;
st->codec->channel_layout = AV_CH_LAYOUT_MONO;
st->codec->sample_rate = SEQ_SAMPLE_RATE;
st->codec->bits_per_coded_sample = 16;
st->codec->bit_rate = st->codec->sample_rate * st->codec->bits_per_coded_sample * st->codec->channels;
st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample / 8;
 
return 0;
}
 
static int seq_read_packet(AVFormatContext *s, AVPacket *pkt)
{
int rc;
SeqDemuxContext *seq = s->priv_data;
AVIOContext *pb = s->pb;
 
if (!seq->audio_buffer_full) {
rc = seq_parse_frame_data(seq, pb);
if (rc)
return rc;
 
/* video packet */
if (seq->current_pal_data_size + seq->current_video_data_size != 0) {
if (av_new_packet(pkt, 1 + seq->current_pal_data_size + seq->current_video_data_size))
return AVERROR(ENOMEM);
 
pkt->data[0] = 0;
if (seq->current_pal_data_size) {
pkt->data[0] |= 1;
avio_seek(pb, seq->current_frame_offs + seq->current_pal_data_offs, SEEK_SET);
if (avio_read(pb, &pkt->data[1], seq->current_pal_data_size) != seq->current_pal_data_size)
return AVERROR(EIO);
}
if (seq->current_video_data_size) {
pkt->data[0] |= 2;
memcpy(&pkt->data[1 + seq->current_pal_data_size],
seq->current_video_data_ptr,
seq->current_video_data_size);
}
pkt->stream_index = seq->video_stream_index;
pkt->pts = seq->current_frame_pts;
 
/* sound buffer will be processed on next read_packet() call */
seq->audio_buffer_full = 1;
return 0;
}
}
 
/* audio packet */
if (seq->current_audio_data_offs == 0) /* end of data reached */
return AVERROR(EIO);
 
avio_seek(pb, seq->current_frame_offs + seq->current_audio_data_offs, SEEK_SET);
rc = av_get_packet(pb, pkt, seq->current_audio_data_size);
if (rc < 0)
return rc;
 
pkt->stream_index = seq->audio_stream_index;
seq->current_frame_pts++;
 
seq->audio_buffer_full = 0;
return 0;
}
 
static int seq_read_close(AVFormatContext *s)
{
int i;
SeqDemuxContext *seq = s->priv_data;
 
for (i = 0; i < SEQ_NUM_FRAME_BUFFERS; i++)
av_free(seq->frame_buffers[i].data);
 
return 0;
}
 
AVInputFormat ff_tiertexseq_demuxer = {
.name = "tiertexseq",
.long_name = NULL_IF_CONFIG_SMALL("Tiertex Limited SEQ"),
.priv_data_size = sizeof(SeqDemuxContext),
.read_probe = seq_probe,
.read_header = seq_read_header,
.read_packet = seq_read_packet,
.read_close = seq_read_close,
};
/contrib/sdk/sources/ffmpeg/libavformat/tls.c
0,0 → 1,396
/*
* TLS/SSL Protocol
* Copyright (c) 2011 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "url.h"
#include "libavutil/avstring.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#if CONFIG_GNUTLS
#include <gnutls/gnutls.h>
#include <gnutls/x509.h>
#define TLS_read(c, buf, size) gnutls_record_recv(c->session, buf, size)
#define TLS_write(c, buf, size) gnutls_record_send(c->session, buf, size)
#define TLS_shutdown(c) gnutls_bye(c->session, GNUTLS_SHUT_RDWR)
#define TLS_free(c) do { \
if (c->session) \
gnutls_deinit(c->session); \
if (c->cred) \
gnutls_certificate_free_credentials(c->cred); \
} while (0)
#elif CONFIG_OPENSSL
#include <openssl/bio.h>
#include <openssl/ssl.h>
#include <openssl/err.h>
#define TLS_read(c, buf, size) SSL_read(c->ssl, buf, size)
#define TLS_write(c, buf, size) SSL_write(c->ssl, buf, size)
#define TLS_shutdown(c) SSL_shutdown(c->ssl)
#define TLS_free(c) do { \
if (c->ssl) \
SSL_free(c->ssl); \
if (c->ctx) \
SSL_CTX_free(c->ctx); \
} while (0)
#endif
#include "network.h"
#include "os_support.h"
#include "internal.h"
#if HAVE_POLL_H
#include <poll.h>
#endif
 
typedef struct {
const AVClass *class;
URLContext *tcp;
#if CONFIG_GNUTLS
gnutls_session_t session;
gnutls_certificate_credentials_t cred;
#elif CONFIG_OPENSSL
SSL_CTX *ctx;
SSL *ssl;
#endif
int fd;
char *ca_file;
int verify;
char *cert_file;
char *key_file;
int listen;
} TLSContext;
 
#define OFFSET(x) offsetof(TLSContext, x)
#define D AV_OPT_FLAG_DECODING_PARAM
#define E AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
{"ca_file", "Certificate Authority database file", OFFSET(ca_file), AV_OPT_TYPE_STRING, .flags = D|E },
{"cafile", "Certificate Authority database file", OFFSET(ca_file), AV_OPT_TYPE_STRING, .flags = D|E },
{"tls_verify", "Verify the peer certificate", OFFSET(verify), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, .flags = D|E },
{"cert_file", "Certificate file", OFFSET(cert_file), AV_OPT_TYPE_STRING, .flags = D|E },
{"key_file", "Private key file", OFFSET(key_file), AV_OPT_TYPE_STRING, .flags = D|E },
{"listen", "Listen for incoming connections", OFFSET(listen), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, .flags = D|E },
{ NULL }
};
 
static const AVClass tls_class = {
.class_name = "tls",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
static int do_tls_poll(URLContext *h, int ret)
{
TLSContext *c = h->priv_data;
struct pollfd p = { c->fd, 0, 0 };
#if CONFIG_GNUTLS
switch (ret) {
case GNUTLS_E_AGAIN:
case GNUTLS_E_INTERRUPTED:
break;
case GNUTLS_E_WARNING_ALERT_RECEIVED:
av_log(h, AV_LOG_WARNING, "%s\n", gnutls_strerror(ret));
break;
default:
av_log(h, AV_LOG_ERROR, "%s\n", gnutls_strerror(ret));
return AVERROR(EIO);
}
if (gnutls_record_get_direction(c->session))
p.events = POLLOUT;
else
p.events = POLLIN;
#elif CONFIG_OPENSSL
ret = SSL_get_error(c->ssl, ret);
if (ret == SSL_ERROR_WANT_READ) {
p.events = POLLIN;
} else if (ret == SSL_ERROR_WANT_WRITE) {
p.events = POLLOUT;
} else {
av_log(h, AV_LOG_ERROR, "%s\n", ERR_error_string(ERR_get_error(), NULL));
return AVERROR(EIO);
}
#endif
if (h->flags & AVIO_FLAG_NONBLOCK)
return AVERROR(EAGAIN);
while (1) {
int n = poll(&p, 1, 100);
if (n > 0)
break;
if (ff_check_interrupt(&h->interrupt_callback))
return AVERROR(EINTR);
}
return 0;
}
 
static void set_options(URLContext *h, const char *uri)
{
TLSContext *c = h->priv_data;
char buf[1024];
const char *p = strchr(uri, '?');
if (!p)
return;
 
if (!c->ca_file && av_find_info_tag(buf, sizeof(buf), "cafile", p))
c->ca_file = av_strdup(buf);
 
if (!c->verify && av_find_info_tag(buf, sizeof(buf), "verify", p)) {
char *endptr = NULL;
c->verify = strtol(buf, &endptr, 10);
if (buf == endptr)
c->verify = 1;
}
 
if (!c->cert_file && av_find_info_tag(buf, sizeof(buf), "cert", p))
c->cert_file = av_strdup(buf);
 
if (!c->key_file && av_find_info_tag(buf, sizeof(buf), "key", p))
c->key_file = av_strdup(buf);
}
 
static int tls_open(URLContext *h, const char *uri, int flags)
{
TLSContext *c = h->priv_data;
int ret;
int port;
char buf[200], host[200], opts[50] = "";
int numerichost = 0;
struct addrinfo hints = { 0 }, *ai = NULL;
const char *proxy_path;
int use_proxy;
const char *p = strchr(uri, '?');
 
ff_tls_init();
 
if(p && av_find_info_tag(buf, sizeof(buf), "listen", p))
c->listen = 1;
if (c->listen)
snprintf(opts, sizeof(opts), "?listen=1");
 
av_url_split(NULL, 0, NULL, 0, host, sizeof(host), &port, NULL, 0, uri);
ff_url_join(buf, sizeof(buf), "tcp", NULL, host, port, "%s", opts);
 
hints.ai_flags = AI_NUMERICHOST;
if (!getaddrinfo(host, NULL, &hints, &ai)) {
numerichost = 1;
freeaddrinfo(ai);
}
 
proxy_path = getenv("http_proxy");
use_proxy = !ff_http_match_no_proxy(getenv("no_proxy"), host) &&
proxy_path != NULL && av_strstart(proxy_path, "http://", NULL);
 
if (use_proxy) {
char proxy_host[200], proxy_auth[200], dest[200];
int proxy_port;
av_url_split(NULL, 0, proxy_auth, sizeof(proxy_auth),
proxy_host, sizeof(proxy_host), &proxy_port, NULL, 0,
proxy_path);
ff_url_join(dest, sizeof(dest), NULL, NULL, host, port, NULL);
ff_url_join(buf, sizeof(buf), "httpproxy", proxy_auth, proxy_host,
proxy_port, "/%s", dest);
}
 
ret = ffurl_open(&c->tcp, buf, AVIO_FLAG_READ_WRITE,
&h->interrupt_callback, NULL);
if (ret)
goto fail;
c->fd = ffurl_get_file_handle(c->tcp);
 
#if CONFIG_GNUTLS
gnutls_init(&c->session, c->listen ? GNUTLS_SERVER : GNUTLS_CLIENT);
if (!c->listen && !numerichost)
gnutls_server_name_set(c->session, GNUTLS_NAME_DNS, host, strlen(host));
gnutls_certificate_allocate_credentials(&c->cred);
set_options(h, uri);
if (c->ca_file) {
ret = gnutls_certificate_set_x509_trust_file(c->cred, c->ca_file, GNUTLS_X509_FMT_PEM);
if (ret < 0)
av_log(h, AV_LOG_ERROR, "%s\n", gnutls_strerror(ret));
}
#if GNUTLS_VERSION_MAJOR >= 3
else
gnutls_certificate_set_x509_system_trust(c->cred);
#endif
gnutls_certificate_set_verify_flags(c->cred, c->verify ?
GNUTLS_VERIFY_ALLOW_X509_V1_CA_CRT : 0);
if (c->cert_file && c->key_file) {
ret = gnutls_certificate_set_x509_key_file(c->cred,
c->cert_file, c->key_file,
GNUTLS_X509_FMT_PEM);
if (ret < 0) {
av_log(h, AV_LOG_ERROR,
"Unable to set cert/key files %s and %s: %s\n",
c->cert_file, c->key_file, gnutls_strerror(ret));
ret = AVERROR(EIO);
goto fail;
}
} else if (c->cert_file || c->key_file)
av_log(h, AV_LOG_ERROR, "cert and key required\n");
gnutls_credentials_set(c->session, GNUTLS_CRD_CERTIFICATE, c->cred);
gnutls_transport_set_ptr(c->session, (gnutls_transport_ptr_t)
(intptr_t) c->fd);
gnutls_priority_set_direct(c->session, "NORMAL", NULL);
while (1) {
ret = gnutls_handshake(c->session);
if (ret == 0)
break;
if ((ret = do_tls_poll(h, ret)) < 0)
goto fail;
}
if (c->verify) {
unsigned int status, cert_list_size;
gnutls_x509_crt_t cert;
const gnutls_datum_t *cert_list;
if ((ret = gnutls_certificate_verify_peers2(c->session, &status)) < 0) {
av_log(h, AV_LOG_ERROR, "Unable to verify peer certificate: %s\n",
gnutls_strerror(ret));
ret = AVERROR(EIO);
goto fail;
}
if (status & GNUTLS_CERT_INVALID) {
av_log(h, AV_LOG_ERROR, "Peer certificate failed verification\n");
ret = AVERROR(EIO);
goto fail;
}
if (gnutls_certificate_type_get(c->session) != GNUTLS_CRT_X509) {
av_log(h, AV_LOG_ERROR, "Unsupported certificate type\n");
ret = AVERROR(EIO);
goto fail;
}
gnutls_x509_crt_init(&cert);
cert_list = gnutls_certificate_get_peers(c->session, &cert_list_size);
gnutls_x509_crt_import(cert, cert_list, GNUTLS_X509_FMT_DER);
ret = gnutls_x509_crt_check_hostname(cert, host);
gnutls_x509_crt_deinit(cert);
if (!ret) {
av_log(h, AV_LOG_ERROR,
"The certificate's owner does not match hostname %s\n", host);
ret = AVERROR(EIO);
goto fail;
}
}
#elif CONFIG_OPENSSL
c->ctx = SSL_CTX_new(c->listen ? TLSv1_server_method() : TLSv1_client_method());
if (!c->ctx) {
av_log(h, AV_LOG_ERROR, "%s\n", ERR_error_string(ERR_get_error(), NULL));
ret = AVERROR(EIO);
goto fail;
}
set_options(h, uri);
if (c->ca_file) {
if (!SSL_CTX_load_verify_locations(c->ctx, c->ca_file, NULL))
av_log(h, AV_LOG_ERROR, "SSL_CTX_load_verify_locations %s\n", ERR_error_string(ERR_get_error(), NULL));
}
if (c->cert_file && !SSL_CTX_use_certificate_chain_file(c->ctx, c->cert_file)) {
av_log(h, AV_LOG_ERROR, "Unable to load cert file %s: %s\n",
c->cert_file, ERR_error_string(ERR_get_error(), NULL));
ret = AVERROR(EIO);
goto fail;
}
if (c->key_file && !SSL_CTX_use_PrivateKey_file(c->ctx, c->key_file, SSL_FILETYPE_PEM)) {
av_log(h, AV_LOG_ERROR, "Unable to load key file %s: %s\n",
c->key_file, ERR_error_string(ERR_get_error(), NULL));
ret = AVERROR(EIO);
goto fail;
}
// Note, this doesn't check that the peer certificate actually matches
// the requested hostname.
if (c->verify)
SSL_CTX_set_verify(c->ctx, SSL_VERIFY_PEER|SSL_VERIFY_FAIL_IF_NO_PEER_CERT, NULL);
c->ssl = SSL_new(c->ctx);
if (!c->ssl) {
av_log(h, AV_LOG_ERROR, "%s\n", ERR_error_string(ERR_get_error(), NULL));
ret = AVERROR(EIO);
goto fail;
}
SSL_set_fd(c->ssl, c->fd);
if (!c->listen && !numerichost)
SSL_set_tlsext_host_name(c->ssl, host);
while (1) {
ret = c->listen ? SSL_accept(c->ssl) : SSL_connect(c->ssl);
if (ret > 0)
break;
if (ret == 0) {
av_log(h, AV_LOG_ERROR, "Unable to negotiate TLS/SSL session\n");
ret = AVERROR(EIO);
goto fail;
}
if ((ret = do_tls_poll(h, ret)) < 0)
goto fail;
}
#endif
return 0;
fail:
TLS_free(c);
if (c->tcp)
ffurl_close(c->tcp);
ff_tls_deinit();
return ret;
}
 
static int tls_read(URLContext *h, uint8_t *buf, int size)
{
TLSContext *c = h->priv_data;
while (1) {
int ret = TLS_read(c, buf, size);
if (ret > 0)
return ret;
if (ret == 0)
return AVERROR_EOF;
if ((ret = do_tls_poll(h, ret)) < 0)
return ret;
}
return 0;
}
 
static int tls_write(URLContext *h, const uint8_t *buf, int size)
{
TLSContext *c = h->priv_data;
while (1) {
int ret = TLS_write(c, buf, size);
if (ret > 0)
return ret;
if (ret == 0)
return AVERROR_EOF;
if ((ret = do_tls_poll(h, ret)) < 0)
return ret;
}
return 0;
}
 
static int tls_close(URLContext *h)
{
TLSContext *c = h->priv_data;
TLS_shutdown(c);
TLS_free(c);
ffurl_close(c->tcp);
ff_tls_deinit();
return 0;
}
 
URLProtocol ff_tls_protocol = {
.name = "tls",
.url_open = tls_open,
.url_read = tls_read,
.url_write = tls_write,
.url_close = tls_close,
.priv_data_size = sizeof(TLSContext),
.flags = URL_PROTOCOL_FLAG_NETWORK,
.priv_data_class = &tls_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/tmv.c
0,0 → 1,199
/*
* 8088flex TMV file demuxer
* Copyright (c) 2009 Daniel Verkamp <daniel at drv.nu>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* 8088flex TMV file demuxer
* @author Daniel Verkamp
* @see http://www.oldskool.org/pc/8088_Corruption
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
 
enum {
TMV_PADDING = 0x01,
TMV_STEREO = 0x02,
};
 
#define TMV_TAG MKTAG('T', 'M', 'A', 'V')
 
typedef struct TMVContext {
unsigned audio_chunk_size;
unsigned video_chunk_size;
unsigned padding;
unsigned stream_index;
} TMVContext;
 
#define TMV_HEADER_SIZE 12
 
#define PROBE_MIN_SAMPLE_RATE 5000
#define PROBE_MAX_FPS 120
#define PROBE_MIN_AUDIO_SIZE (PROBE_MIN_SAMPLE_RATE / PROBE_MAX_FPS)
 
static int tmv_probe(AVProbeData *p)
{
if (AV_RL32(p->buf) == TMV_TAG &&
AV_RL16(p->buf+4) >= PROBE_MIN_SAMPLE_RATE &&
AV_RL16(p->buf+6) >= PROBE_MIN_AUDIO_SIZE &&
!p->buf[8] && // compression method
p->buf[9] && // char cols
p->buf[10]) // char rows
return AVPROBE_SCORE_MAX /
((p->buf[9] == 40 && p->buf[10] == 25) ? 1 : 4);
return 0;
}
 
static int tmv_read_header(AVFormatContext *s)
{
TMVContext *tmv = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *vst, *ast;
AVRational fps;
unsigned comp_method, char_cols, char_rows, features;
 
if (avio_rl32(pb) != TMV_TAG)
return -1;
 
if (!(vst = avformat_new_stream(s, NULL)))
return AVERROR(ENOMEM);
 
if (!(ast = avformat_new_stream(s, NULL)))
return AVERROR(ENOMEM);
 
ast->codec->sample_rate = avio_rl16(pb);
if (!ast->codec->sample_rate) {
av_log(s, AV_LOG_ERROR, "invalid sample rate\n");
return -1;
}
 
tmv->audio_chunk_size = avio_rl16(pb);
if (!tmv->audio_chunk_size) {
av_log(s, AV_LOG_ERROR, "invalid audio chunk size\n");
return -1;
}
 
comp_method = avio_r8(pb);
if (comp_method) {
av_log(s, AV_LOG_ERROR, "unsupported compression method %d\n",
comp_method);
return -1;
}
 
char_cols = avio_r8(pb);
char_rows = avio_r8(pb);
tmv->video_chunk_size = char_cols * char_rows * 2;
 
features = avio_r8(pb);
if (features & ~(TMV_PADDING | TMV_STEREO)) {
av_log(s, AV_LOG_ERROR, "unsupported features 0x%02x\n",
features & ~(TMV_PADDING | TMV_STEREO));
return -1;
}
 
ast->codec->codec_type = AVMEDIA_TYPE_AUDIO;
ast->codec->codec_id = AV_CODEC_ID_PCM_U8;
if (features & TMV_STEREO) {
ast->codec->channels = 2;
ast->codec->channel_layout = AV_CH_LAYOUT_STEREO;
} else {
ast->codec->channels = 1;
ast->codec->channel_layout = AV_CH_LAYOUT_MONO;
}
ast->codec->bits_per_coded_sample = 8;
ast->codec->bit_rate = ast->codec->sample_rate *
ast->codec->bits_per_coded_sample;
avpriv_set_pts_info(ast, 32, 1, ast->codec->sample_rate);
 
fps.num = ast->codec->sample_rate * ast->codec->channels;
fps.den = tmv->audio_chunk_size;
av_reduce(&fps.num, &fps.den, fps.num, fps.den, 0xFFFFFFFFLL);
 
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codec->codec_id = AV_CODEC_ID_TMV;
vst->codec->pix_fmt = AV_PIX_FMT_PAL8;
vst->codec->width = char_cols * 8;
vst->codec->height = char_rows * 8;
avpriv_set_pts_info(vst, 32, fps.den, fps.num);
 
if (features & TMV_PADDING)
tmv->padding =
((tmv->video_chunk_size + tmv->audio_chunk_size + 511) & ~511) -
(tmv->video_chunk_size + tmv->audio_chunk_size);
 
vst->codec->bit_rate = ((tmv->video_chunk_size + tmv->padding) *
fps.num * 8) / fps.den;
 
return 0;
}
 
static int tmv_read_packet(AVFormatContext *s, AVPacket *pkt)
{
TMVContext *tmv = s->priv_data;
AVIOContext *pb = s->pb;
int ret, pkt_size = tmv->stream_index ?
tmv->audio_chunk_size : tmv->video_chunk_size;
 
if (url_feof(pb))
return AVERROR_EOF;
 
ret = av_get_packet(pb, pkt, pkt_size);
 
if (tmv->stream_index)
avio_skip(pb, tmv->padding);
 
pkt->stream_index = tmv->stream_index;
tmv->stream_index ^= 1;
pkt->flags |= AV_PKT_FLAG_KEY;
 
return ret;
}
 
static int tmv_read_seek(AVFormatContext *s, int stream_index,
int64_t timestamp, int flags)
{
TMVContext *tmv = s->priv_data;
int64_t pos;
 
if (stream_index)
return -1;
 
pos = timestamp *
(tmv->audio_chunk_size + tmv->video_chunk_size + tmv->padding);
 
if (avio_seek(s->pb, pos + TMV_HEADER_SIZE, SEEK_SET) < 0)
return -1;
tmv->stream_index = 0;
return 0;
}
 
AVInputFormat ff_tmv_demuxer = {
.name = "tmv",
.long_name = NULL_IF_CONFIG_SMALL("8088flex TMV"),
.priv_data_size = sizeof(TMVContext),
.read_probe = tmv_probe,
.read_header = tmv_read_header,
.read_packet = tmv_read_packet,
.read_seek = tmv_read_seek,
.flags = AVFMT_GENERIC_INDEX,
};
/contrib/sdk/sources/ffmpeg/libavformat/tta.c
0,0 → 1,189
/*
* TTA demuxer
* Copyright (c) 2006 Alex Beregszaszi
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavcodec/get_bits.h"
#include "apetag.h"
#include "avformat.h"
#include "avio_internal.h"
#include "internal.h"
#include "id3v1.h"
#include "libavutil/crc.h"
#include "libavutil/dict.h"
 
typedef struct {
int totalframes, currentframe;
int frame_size;
int last_frame_size;
} TTAContext;
 
static unsigned long tta_check_crc(unsigned long checksum, const uint8_t *buf,
unsigned int len)
{
return av_crc(av_crc_get_table(AV_CRC_32_IEEE_LE), checksum, buf, len);
}
 
static int tta_probe(AVProbeData *p)
{
if (AV_RL32(&p->buf[0]) == MKTAG('T', 'T', 'A', '1') &&
(AV_RL16(&p->buf[4]) == 1 || AV_RL16(&p->buf[4]) == 2) &&
AV_RL16(&p->buf[6]) > 0 &&
AV_RL16(&p->buf[8]) > 0 &&
AV_RL32(&p->buf[10]) > 0)
return AVPROBE_SCORE_EXTENSION + 30;
return 0;
}
 
static int tta_read_header(AVFormatContext *s)
{
TTAContext *c = s->priv_data;
AVStream *st;
int i, channels, bps, samplerate;
uint64_t framepos, start_offset;
uint32_t nb_samples, crc;
 
ff_id3v1_read(s);
 
start_offset = avio_tell(s->pb);
ffio_init_checksum(s->pb, tta_check_crc, UINT32_MAX);
if (avio_rl32(s->pb) != AV_RL32("TTA1"))
return AVERROR_INVALIDDATA;
 
avio_skip(s->pb, 2); // FIXME: flags
channels = avio_rl16(s->pb);
bps = avio_rl16(s->pb);
samplerate = avio_rl32(s->pb);
if(samplerate <= 0 || samplerate > 1000000){
av_log(s, AV_LOG_ERROR, "nonsense samplerate\n");
return AVERROR_INVALIDDATA;
}
 
nb_samples = avio_rl32(s->pb);
if (!nb_samples) {
av_log(s, AV_LOG_ERROR, "invalid number of samples\n");
return AVERROR_INVALIDDATA;
}
 
crc = ffio_get_checksum(s->pb) ^ UINT32_MAX;
if (crc != avio_rl32(s->pb)) {
av_log(s, AV_LOG_ERROR, "Header CRC error\n");
return AVERROR_INVALIDDATA;
}
 
c->frame_size = samplerate * 256 / 245;
c->last_frame_size = nb_samples % c->frame_size;
if (!c->last_frame_size)
c->last_frame_size = c->frame_size;
c->totalframes = nb_samples / c->frame_size + (c->last_frame_size < c->frame_size);
c->currentframe = 0;
 
if(c->totalframes >= UINT_MAX/sizeof(uint32_t) || c->totalframes <= 0){
av_log(s, AV_LOG_ERROR, "totalframes %d invalid\n", c->totalframes);
return AVERROR_INVALIDDATA;
}
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
avpriv_set_pts_info(st, 64, 1, samplerate);
st->start_time = 0;
st->duration = nb_samples;
 
framepos = avio_tell(s->pb) + 4*c->totalframes + 4;
 
if (ff_alloc_extradata(st->codec, avio_tell(s->pb) - start_offset))
return AVERROR(ENOMEM);
 
avio_seek(s->pb, start_offset, SEEK_SET);
avio_read(s->pb, st->codec->extradata, st->codec->extradata_size);
 
ffio_init_checksum(s->pb, tta_check_crc, UINT32_MAX);
for (i = 0; i < c->totalframes; i++) {
uint32_t size = avio_rl32(s->pb);
av_add_index_entry(st, framepos, i * c->frame_size, size, 0,
AVINDEX_KEYFRAME);
framepos += size;
}
crc = ffio_get_checksum(s->pb) ^ UINT32_MAX;
if (crc != avio_rl32(s->pb)) {
av_log(s, AV_LOG_ERROR, "Seek table CRC error\n");
return AVERROR_INVALIDDATA;
}
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_TTA;
st->codec->channels = channels;
st->codec->sample_rate = samplerate;
st->codec->bits_per_coded_sample = bps;
 
if (s->pb->seekable) {
int64_t pos = avio_tell(s->pb);
ff_ape_parse_tag(s);
avio_seek(s->pb, pos, SEEK_SET);
}
 
return 0;
}
 
static int tta_read_packet(AVFormatContext *s, AVPacket *pkt)
{
TTAContext *c = s->priv_data;
AVStream *st = s->streams[0];
int size, ret;
 
// FIXME!
if (c->currentframe >= c->totalframes)
return AVERROR_EOF;
 
size = st->index_entries[c->currentframe].size;
 
ret = av_get_packet(s->pb, pkt, size);
pkt->dts = st->index_entries[c->currentframe++].timestamp;
pkt->duration = c->currentframe == c->totalframes ? c->last_frame_size :
c->frame_size;
return ret;
}
 
static int tta_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
{
TTAContext *c = s->priv_data;
AVStream *st = s->streams[stream_index];
int index = av_index_search_timestamp(st, timestamp, flags);
if (index < 0)
return -1;
if (avio_seek(s->pb, st->index_entries[index].pos, SEEK_SET) < 0)
return -1;
 
c->currentframe = index;
 
return 0;
}
 
AVInputFormat ff_tta_demuxer = {
.name = "tta",
.long_name = NULL_IF_CONFIG_SMALL("TTA (True Audio)"),
.priv_data_size = sizeof(TTAContext),
.read_probe = tta_probe,
.read_header = tta_read_header,
.read_packet = tta_read_packet,
.read_seek = tta_read_seek,
.extensions = "tta",
};
/contrib/sdk/sources/ffmpeg/libavformat/tty.c
0,0 → 1,160
/*
* Tele-typewriter demuxer
* Copyright (c) 2010 Peter Ross <pross@xvid.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Tele-typewriter demuxer
*/
 
#include "libavutil/intreadwrite.h"
#include "libavutil/avstring.h"
#include "libavutil/log.h"
#include "libavutil/dict.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "avformat.h"
#include "internal.h"
#include "sauce.h"
 
typedef struct {
AVClass *class;
int chars_per_frame;
uint64_t fsize; /**< file size less metadata buffer */
int width, height; /**< Set by a private option. */
AVRational framerate; /**< Set by a private option. */
} TtyDemuxContext;
 
/**
* Parse EFI header
*/
static int efi_read(AVFormatContext *avctx, uint64_t start_pos)
{
TtyDemuxContext *s = avctx->priv_data;
AVIOContext *pb = avctx->pb;
char buf[37];
int len;
 
avio_seek(pb, start_pos, SEEK_SET);
if (avio_r8(pb) != 0x1A)
return -1;
 
#define GET_EFI_META(name,size) \
len = avio_r8(pb); \
if (len < 1 || len > size) \
return -1; \
if (avio_read(pb, buf, size) == size) { \
buf[len] = 0; \
av_dict_set(&avctx->metadata, name, buf, 0); \
}
 
GET_EFI_META("filename", 12)
GET_EFI_META("title", 36)
 
s->fsize = start_pos;
return 0;
}
 
static int read_header(AVFormatContext *avctx)
{
TtyDemuxContext *s = avctx->priv_data;
int ret = 0;
AVStream *st = avformat_new_stream(avctx, NULL);
 
if (!st) {
ret = AVERROR(ENOMEM);
goto fail;
}
st->codec->codec_tag = 0;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_ANSI;
 
st->codec->width = s->width;
st->codec->height = s->height;
avpriv_set_pts_info(st, 60, s->framerate.den, s->framerate.num);
st->avg_frame_rate = s->framerate;
 
/* simulate tty display speed */
s->chars_per_frame = FFMAX(av_q2d(st->time_base)*s->chars_per_frame, 1);
 
if (avctx->pb->seekable) {
s->fsize = avio_size(avctx->pb);
st->duration = (s->fsize + s->chars_per_frame - 1) / s->chars_per_frame;
 
if (ff_sauce_read(avctx, &s->fsize, 0, 0) < 0)
efi_read(avctx, s->fsize - 51);
 
avio_seek(avctx->pb, 0, SEEK_SET);
}
 
fail:
return ret;
}
 
static int read_packet(AVFormatContext *avctx, AVPacket *pkt)
{
TtyDemuxContext *s = avctx->priv_data;
int n;
 
if (url_feof(avctx->pb))
return AVERROR_EOF;
 
n = s->chars_per_frame;
if (s->fsize) {
// ignore metadata buffer
uint64_t p = avio_tell(avctx->pb);
if (p == s->fsize)
return AVERROR_EOF;
if (p + s->chars_per_frame > s->fsize)
n = s->fsize - p;
}
 
pkt->size = av_get_packet(avctx->pb, pkt, n);
if (pkt->size < 0)
return pkt->size;
pkt->flags |= AV_PKT_FLAG_KEY;
return 0;
}
 
#define OFFSET(x) offsetof(TtyDemuxContext, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "chars_per_frame", "", offsetof(TtyDemuxContext, chars_per_frame), AV_OPT_TYPE_INT, {.i64 = 6000}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM},
{ "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, DEC },
{ "framerate", "", OFFSET(framerate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, DEC },
{ NULL },
};
 
static const AVClass tty_demuxer_class = {
.class_name = "TTY demuxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVInputFormat ff_tty_demuxer = {
.name = "tty",
.long_name = NULL_IF_CONFIG_SMALL("Tele-typewriter"),
.priv_data_size = sizeof(TtyDemuxContext),
.read_header = read_header,
.read_packet = read_packet,
.extensions = "ans,art,asc,diz,ice,nfo,txt,vt",
.priv_class = &tty_demuxer_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/txd.c
0,0 → 1,100
/*
* Renderware TeXture Dictionary (.txd) demuxer
* Copyright (c) 2007 Ivo van Poorten
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intreadwrite.h"
#include "avformat.h"
 
#define TXD_FILE 0x16
#define TXD_INFO 0x01
#define TXD_EXTRA 0x03
#define TXD_TEXTURE 0x15
#define TXD_TEXTURE_DATA 0x01
#define TXD_MARKER 0x1803ffff
#define TXD_MARKER2 0x1003ffff
 
static int txd_probe(AVProbeData * pd) {
if (AV_RL32(pd->buf ) == TXD_FILE &&
(AV_RL32(pd->buf+8) == TXD_MARKER || AV_RL32(pd->buf+8) == TXD_MARKER2))
return AVPROBE_SCORE_MAX;
return 0;
}
 
static int txd_read_header(AVFormatContext *s) {
AVStream *st;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_TXD;
st->codec->time_base.den = 5;
st->codec->time_base.num = 1;
/* the parameters will be extracted from the compressed bitstream */
 
return 0;
}
 
static int txd_read_packet(AVFormatContext *s, AVPacket *pkt) {
AVIOContext *pb = s->pb;
unsigned int id, chunk_size, marker;
int ret;
 
next_chunk:
id = avio_rl32(pb);
chunk_size = avio_rl32(pb);
marker = avio_rl32(pb);
 
if (url_feof(s->pb))
return AVERROR_EOF;
if (marker != TXD_MARKER && marker != TXD_MARKER2) {
av_log(s, AV_LOG_ERROR, "marker does not match\n");
return AVERROR_INVALIDDATA;
}
 
switch (id) {
case TXD_INFO:
if (chunk_size > 100)
break;
case TXD_EXTRA:
avio_skip(s->pb, chunk_size);
case TXD_FILE:
case TXD_TEXTURE:
goto next_chunk;
default:
av_log(s, AV_LOG_ERROR, "unknown chunk id %i\n", id);
return AVERROR_INVALIDDATA;
}
 
ret = av_get_packet(s->pb, pkt, chunk_size);
if (ret < 0)
return ret;
pkt->stream_index = 0;
 
return 0;
}
 
AVInputFormat ff_txd_demuxer = {
.name = "txd",
.long_name = NULL_IF_CONFIG_SMALL("Renderware TeXture Dictionary"),
.read_probe = txd_probe,
.read_header = txd_read_header,
.read_packet = txd_read_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/udp.c
0,0 → 1,867
/*
* UDP prototype streaming system
* Copyright (c) 2000, 2001, 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* UDP protocol
*/
 
#define _BSD_SOURCE /* Needed for using struct ip_mreq with recent glibc */
 
#include "avformat.h"
#include "avio_internal.h"
#include "libavutil/parseutils.h"
#include "libavutil/fifo.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/avstring.h"
#include "libavutil/opt.h"
#include "libavutil/log.h"
#include "libavutil/time.h"
#include "internal.h"
#include "network.h"
#include "os_support.h"
#include "url.h"
 
#if HAVE_PTHREAD_CANCEL
#include <pthread.h>
#endif
 
#ifndef HAVE_PTHREAD_CANCEL
#define HAVE_PTHREAD_CANCEL 0
#endif
 
#ifndef IPV6_ADD_MEMBERSHIP
#define IPV6_ADD_MEMBERSHIP IPV6_JOIN_GROUP
#define IPV6_DROP_MEMBERSHIP IPV6_LEAVE_GROUP
#endif
 
#define UDP_TX_BUF_SIZE 32768
#define UDP_MAX_PKT_SIZE 65536
 
typedef struct {
const AVClass *class;
int udp_fd;
int ttl;
int buffer_size;
int is_multicast;
int local_port;
int reuse_socket;
int overrun_nonfatal;
struct sockaddr_storage dest_addr;
int dest_addr_len;
int is_connected;
 
/* Circular Buffer variables for use in UDP receive code */
int circular_buffer_size;
AVFifoBuffer *fifo;
int circular_buffer_error;
#if HAVE_PTHREAD_CANCEL
pthread_t circular_buffer_thread;
pthread_mutex_t mutex;
pthread_cond_t cond;
int thread_started;
#endif
uint8_t tmp[UDP_MAX_PKT_SIZE+4];
int remaining_in_dg;
char *local_addr;
int packet_size;
int timeout;
} UDPContext;
 
#define OFFSET(x) offsetof(UDPContext, x)
#define D AV_OPT_FLAG_DECODING_PARAM
#define E AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
{"buffer_size", "Socket buffer size in bytes", OFFSET(buffer_size), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, D|E },
{"localport", "Set local port to bind to", OFFSET(local_port), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, D|E },
{"localaddr", "Choose local IP address", OFFSET(local_addr), AV_OPT_TYPE_STRING, {.str = ""}, 0, 0, D|E },
{"pkt_size", "Set size of UDP packets", OFFSET(packet_size), AV_OPT_TYPE_INT, {.i64 = 1472}, 0, INT_MAX, D|E },
{"reuse", "Explicitly allow or disallow reusing UDP sockets", OFFSET(reuse_socket), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, D|E },
{"ttl", "Set the time to live value (for multicast only)", OFFSET(ttl), AV_OPT_TYPE_INT, {.i64 = 16}, 0, INT_MAX, E },
{"connect", "Should connect() be called on socket", OFFSET(is_connected), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, D|E },
/* TODO 'sources', 'block' option */
{"fifo_size", "Set the UDP receiving circular buffer size, expressed as a number of packets with size of 188 bytes", OFFSET(circular_buffer_size), AV_OPT_TYPE_INT, {.i64 = 7*4096}, 0, INT_MAX, D },
{"overrun_nonfatal", "Survive in case of UDP receiving circular buffer overrun", OFFSET(overrun_nonfatal), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, D },
{"timeout", "In read mode: if no data arrived in more than this time interval, raise error", OFFSET(timeout), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, D },
{NULL}
};
 
static const AVClass udp_context_class = {
.class_name = "udp",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
static void log_net_error(void *ctx, int level, const char* prefix)
{
char errbuf[100];
av_strerror(ff_neterrno(), errbuf, sizeof(errbuf));
av_log(ctx, level, "%s: %s\n", prefix, errbuf);
}
 
static int udp_set_multicast_ttl(int sockfd, int mcastTTL,
struct sockaddr *addr)
{
#ifdef IP_MULTICAST_TTL
if (addr->sa_family == AF_INET) {
if (setsockopt(sockfd, IPPROTO_IP, IP_MULTICAST_TTL, &mcastTTL, sizeof(mcastTTL)) < 0) {
log_net_error(NULL, AV_LOG_ERROR, "setsockopt(IP_MULTICAST_TTL)");
return -1;
}
}
#endif
#if defined(IPPROTO_IPV6) && defined(IPV6_MULTICAST_HOPS)
if (addr->sa_family == AF_INET6) {
if (setsockopt(sockfd, IPPROTO_IPV6, IPV6_MULTICAST_HOPS, &mcastTTL, sizeof(mcastTTL)) < 0) {
log_net_error(NULL, AV_LOG_ERROR, "setsockopt(IPV6_MULTICAST_HOPS)");
return -1;
}
}
#endif
return 0;
}
 
static int udp_join_multicast_group(int sockfd, struct sockaddr *addr)
{
#ifdef IP_ADD_MEMBERSHIP
if (addr->sa_family == AF_INET) {
struct ip_mreq mreq;
 
mreq.imr_multiaddr.s_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr;
mreq.imr_interface.s_addr= INADDR_ANY;
if (setsockopt(sockfd, IPPROTO_IP, IP_ADD_MEMBERSHIP, (const void *)&mreq, sizeof(mreq)) < 0) {
log_net_error(NULL, AV_LOG_ERROR, "setsockopt(IP_ADD_MEMBERSHIP)");
return -1;
}
}
#endif
#if HAVE_STRUCT_IPV6_MREQ && defined(IPPROTO_IPV6)
if (addr->sa_family == AF_INET6) {
struct ipv6_mreq mreq6;
 
memcpy(&mreq6.ipv6mr_multiaddr, &(((struct sockaddr_in6 *)addr)->sin6_addr), sizeof(struct in6_addr));
mreq6.ipv6mr_interface= 0;
if (setsockopt(sockfd, IPPROTO_IPV6, IPV6_ADD_MEMBERSHIP, &mreq6, sizeof(mreq6)) < 0) {
log_net_error(NULL, AV_LOG_ERROR, "setsockopt(IPV6_ADD_MEMBERSHIP)");
return -1;
}
}
#endif
return 0;
}
 
static int udp_leave_multicast_group(int sockfd, struct sockaddr *addr)
{
#ifdef IP_DROP_MEMBERSHIP
if (addr->sa_family == AF_INET) {
struct ip_mreq mreq;
 
mreq.imr_multiaddr.s_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr;
mreq.imr_interface.s_addr= INADDR_ANY;
if (setsockopt(sockfd, IPPROTO_IP, IP_DROP_MEMBERSHIP, (const void *)&mreq, sizeof(mreq)) < 0) {
log_net_error(NULL, AV_LOG_ERROR, "setsockopt(IP_DROP_MEMBERSHIP)");
return -1;
}
}
#endif
#if HAVE_STRUCT_IPV6_MREQ && defined(IPPROTO_IPV6)
if (addr->sa_family == AF_INET6) {
struct ipv6_mreq mreq6;
 
memcpy(&mreq6.ipv6mr_multiaddr, &(((struct sockaddr_in6 *)addr)->sin6_addr), sizeof(struct in6_addr));
mreq6.ipv6mr_interface= 0;
if (setsockopt(sockfd, IPPROTO_IPV6, IPV6_DROP_MEMBERSHIP, &mreq6, sizeof(mreq6)) < 0) {
log_net_error(NULL, AV_LOG_ERROR, "setsockopt(IPV6_DROP_MEMBERSHIP)");
return -1;
}
}
#endif
return 0;
}
 
static struct addrinfo* udp_resolve_host(const char *hostname, int port,
int type, int family, int flags)
{
struct addrinfo hints = { 0 }, *res = 0;
int error;
char sport[16];
const char *node = 0, *service = "0";
 
if (port > 0) {
snprintf(sport, sizeof(sport), "%d", port);
service = sport;
}
if ((hostname) && (hostname[0] != '\0') && (hostname[0] != '?')) {
node = hostname;
}
hints.ai_socktype = type;
hints.ai_family = family;
hints.ai_flags = flags;
if ((error = getaddrinfo(node, service, &hints, &res))) {
res = NULL;
av_log(NULL, AV_LOG_ERROR, "udp_resolve_host: %s\n", gai_strerror(error));
}
 
return res;
}
 
static int udp_set_multicast_sources(int sockfd, struct sockaddr *addr,
int addr_len, char **sources,
int nb_sources, int include)
{
#if HAVE_STRUCT_GROUP_SOURCE_REQ && defined(MCAST_BLOCK_SOURCE) && !defined(_WIN32)
/* These ones are available in the microsoft SDK, but don't seem to work
* as on linux, so just prefer the v4-only approach there for now. */
int i;
for (i = 0; i < nb_sources; i++) {
struct group_source_req mreqs;
int level = addr->sa_family == AF_INET ? IPPROTO_IP : IPPROTO_IPV6;
struct addrinfo *sourceaddr = udp_resolve_host(sources[i], 0,
SOCK_DGRAM, AF_UNSPEC,
0);
if (!sourceaddr)
return AVERROR(ENOENT);
 
mreqs.gsr_interface = 0;
memcpy(&mreqs.gsr_group, addr, addr_len);
memcpy(&mreqs.gsr_source, sourceaddr->ai_addr, sourceaddr->ai_addrlen);
freeaddrinfo(sourceaddr);
 
if (setsockopt(sockfd, level,
include ? MCAST_JOIN_SOURCE_GROUP : MCAST_BLOCK_SOURCE,
(const void *)&mreqs, sizeof(mreqs)) < 0) {
if (include)
log_net_error(NULL, AV_LOG_ERROR, "setsockopt(MCAST_JOIN_SOURCE_GROUP)");
else
log_net_error(NULL, AV_LOG_ERROR, "setsockopt(MCAST_BLOCK_SOURCE)");
return ff_neterrno();
}
}
#elif HAVE_STRUCT_IP_MREQ_SOURCE && defined(IP_BLOCK_SOURCE)
int i;
if (addr->sa_family != AF_INET) {
av_log(NULL, AV_LOG_ERROR,
"Setting multicast sources only supported for IPv4\n");
return AVERROR(EINVAL);
}
for (i = 0; i < nb_sources; i++) {
struct ip_mreq_source mreqs;
struct addrinfo *sourceaddr = udp_resolve_host(sources[i], 0,
SOCK_DGRAM, AF_UNSPEC,
0);
if (!sourceaddr)
return AVERROR(ENOENT);
if (sourceaddr->ai_addr->sa_family != AF_INET) {
freeaddrinfo(sourceaddr);
av_log(NULL, AV_LOG_ERROR, "%s is of incorrect protocol family\n",
sources[i]);
return AVERROR(EINVAL);
}
 
mreqs.imr_multiaddr.s_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr;
mreqs.imr_interface.s_addr = INADDR_ANY;
mreqs.imr_sourceaddr.s_addr = ((struct sockaddr_in *)sourceaddr->ai_addr)->sin_addr.s_addr;
freeaddrinfo(sourceaddr);
 
if (setsockopt(sockfd, IPPROTO_IP,
include ? IP_ADD_SOURCE_MEMBERSHIP : IP_BLOCK_SOURCE,
(const void *)&mreqs, sizeof(mreqs)) < 0) {
if (include)
log_net_error(NULL, AV_LOG_ERROR, "setsockopt(IP_ADD_SOURCE_MEMBERSHIP)");
else
log_net_error(NULL, AV_LOG_ERROR, "setsockopt(IP_BLOCK_SOURCE)");
return ff_neterrno();
}
}
#else
return AVERROR(ENOSYS);
#endif
return 0;
}
static int udp_set_url(struct sockaddr_storage *addr,
const char *hostname, int port)
{
struct addrinfo *res0;
int addr_len;
 
res0 = udp_resolve_host(hostname, port, SOCK_DGRAM, AF_UNSPEC, 0);
if (res0 == 0) return AVERROR(EIO);
memcpy(addr, res0->ai_addr, res0->ai_addrlen);
addr_len = res0->ai_addrlen;
freeaddrinfo(res0);
 
return addr_len;
}
 
static int udp_socket_create(UDPContext *s, struct sockaddr_storage *addr,
socklen_t *addr_len, const char *localaddr)
{
int udp_fd = -1;
struct addrinfo *res0 = NULL, *res = NULL;
int family = AF_UNSPEC;
 
if (((struct sockaddr *) &s->dest_addr)->sa_family)
family = ((struct sockaddr *) &s->dest_addr)->sa_family;
res0 = udp_resolve_host(localaddr[0] ? localaddr : NULL, s->local_port,
SOCK_DGRAM, family, AI_PASSIVE);
if (res0 == 0)
goto fail;
for (res = res0; res; res=res->ai_next) {
udp_fd = ff_socket(res->ai_family, SOCK_DGRAM, 0);
if (udp_fd != -1) break;
log_net_error(NULL, AV_LOG_ERROR, "socket");
}
 
if (udp_fd < 0)
goto fail;
 
memcpy(addr, res->ai_addr, res->ai_addrlen);
*addr_len = res->ai_addrlen;
 
freeaddrinfo(res0);
 
return udp_fd;
 
fail:
if (udp_fd >= 0)
closesocket(udp_fd);
if(res0)
freeaddrinfo(res0);
return -1;
}
 
static int udp_port(struct sockaddr_storage *addr, int addr_len)
{
char sbuf[sizeof(int)*3+1];
int error;
 
if ((error = getnameinfo((struct sockaddr *)addr, addr_len, NULL, 0, sbuf, sizeof(sbuf), NI_NUMERICSERV)) != 0) {
av_log(NULL, AV_LOG_ERROR, "getnameinfo: %s\n", gai_strerror(error));
return -1;
}
 
return strtol(sbuf, NULL, 10);
}
 
 
/**
* If no filename is given to av_open_input_file because you want to
* get the local port first, then you must call this function to set
* the remote server address.
*
* url syntax: udp://host:port[?option=val...]
* option: 'ttl=n' : set the ttl value (for multicast only)
* 'localport=n' : set the local port
* 'pkt_size=n' : set max packet size
* 'reuse=1' : enable reusing the socket
* 'overrun_nonfatal=1': survive in case of circular buffer overrun
*
* @param h media file context
* @param uri of the remote server
* @return zero if no error.
*/
int ff_udp_set_remote_url(URLContext *h, const char *uri)
{
UDPContext *s = h->priv_data;
char hostname[256], buf[10];
int port;
const char *p;
 
av_url_split(NULL, 0, NULL, 0, hostname, sizeof(hostname), &port, NULL, 0, uri);
 
/* set the destination address */
s->dest_addr_len = udp_set_url(&s->dest_addr, hostname, port);
if (s->dest_addr_len < 0) {
return AVERROR(EIO);
}
s->is_multicast = ff_is_multicast_address((struct sockaddr*) &s->dest_addr);
p = strchr(uri, '?');
if (p) {
if (av_find_info_tag(buf, sizeof(buf), "connect", p)) {
int was_connected = s->is_connected;
s->is_connected = strtol(buf, NULL, 10);
if (s->is_connected && !was_connected) {
if (connect(s->udp_fd, (struct sockaddr *) &s->dest_addr,
s->dest_addr_len)) {
s->is_connected = 0;
log_net_error(h, AV_LOG_ERROR, "connect");
return AVERROR(EIO);
}
}
}
}
 
return 0;
}
 
/**
* Return the local port used by the UDP connection
* @param h media file context
* @return the local port number
*/
int ff_udp_get_local_port(URLContext *h)
{
UDPContext *s = h->priv_data;
return s->local_port;
}
 
/**
* Return the udp file handle for select() usage to wait for several RTP
* streams at the same time.
* @param h media file context
*/
static int udp_get_file_handle(URLContext *h)
{
UDPContext *s = h->priv_data;
return s->udp_fd;
}
 
#if HAVE_PTHREAD_CANCEL
static void *circular_buffer_task( void *_URLContext)
{
URLContext *h = _URLContext;
UDPContext *s = h->priv_data;
int old_cancelstate;
 
pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &old_cancelstate);
pthread_mutex_lock(&s->mutex);
if (ff_socket_nonblock(s->udp_fd, 0) < 0) {
av_log(h, AV_LOG_ERROR, "Failed to set blocking mode");
s->circular_buffer_error = AVERROR(EIO);
goto end;
}
while(1) {
int len;
 
pthread_mutex_unlock(&s->mutex);
/* Blocking operations are always cancellation points;
see "General Information" / "Thread Cancelation Overview"
in Single Unix. */
pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_cancelstate);
len = recv(s->udp_fd, s->tmp+4, sizeof(s->tmp)-4, 0);
pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &old_cancelstate);
pthread_mutex_lock(&s->mutex);
if (len < 0) {
if (ff_neterrno() != AVERROR(EAGAIN) && ff_neterrno() != AVERROR(EINTR)) {
s->circular_buffer_error = ff_neterrno();
goto end;
}
continue;
}
AV_WL32(s->tmp, len);
 
if(av_fifo_space(s->fifo) < len + 4) {
/* No Space left */
if (s->overrun_nonfatal) {
av_log(h, AV_LOG_WARNING, "Circular buffer overrun. "
"Surviving due to overrun_nonfatal option\n");
continue;
} else {
av_log(h, AV_LOG_ERROR, "Circular buffer overrun. "
"To avoid, increase fifo_size URL option. "
"To survive in such case, use overrun_nonfatal option\n");
s->circular_buffer_error = AVERROR(EIO);
goto end;
}
}
av_fifo_generic_write(s->fifo, s->tmp, len+4, NULL);
pthread_cond_signal(&s->cond);
}
 
end:
pthread_cond_signal(&s->cond);
pthread_mutex_unlock(&s->mutex);
return NULL;
}
#endif
 
static int parse_source_list(char *buf, char **sources, int *num_sources,
int max_sources)
{
char *source_start;
 
source_start = buf;
while (1) {
char *next = strchr(source_start, ',');
if (next)
*next = '\0';
sources[*num_sources] = av_strdup(source_start);
if (!sources[*num_sources])
return AVERROR(ENOMEM);
source_start = next + 1;
(*num_sources)++;
if (*num_sources >= max_sources || !next)
break;
}
return 0;
}
 
/* put it in UDP context */
/* return non zero if error */
static int udp_open(URLContext *h, const char *uri, int flags)
{
char hostname[1024], localaddr[1024] = "";
int port, udp_fd = -1, tmp, bind_ret = -1;
UDPContext *s = h->priv_data;
int is_output;
const char *p;
char buf[256];
struct sockaddr_storage my_addr;
socklen_t len;
int reuse_specified = 0;
int i, num_include_sources = 0, num_exclude_sources = 0;
char *include_sources[32], *exclude_sources[32];
 
h->is_streamed = 1;
 
is_output = !(flags & AVIO_FLAG_READ);
if (!s->buffer_size) /* if not set explicitly */
s->buffer_size = is_output ? UDP_TX_BUF_SIZE : UDP_MAX_PKT_SIZE;
 
p = strchr(uri, '?');
if (p) {
if (av_find_info_tag(buf, sizeof(buf), "reuse", p)) {
char *endptr = NULL;
s->reuse_socket = strtol(buf, &endptr, 10);
/* assume if no digits were found it is a request to enable it */
if (buf == endptr)
s->reuse_socket = 1;
reuse_specified = 1;
}
if (av_find_info_tag(buf, sizeof(buf), "overrun_nonfatal", p)) {
char *endptr = NULL;
s->overrun_nonfatal = strtol(buf, &endptr, 10);
/* assume if no digits were found it is a request to enable it */
if (buf == endptr)
s->overrun_nonfatal = 1;
if (!HAVE_PTHREAD_CANCEL)
av_log(h, AV_LOG_WARNING,
"'overrun_nonfatal' option was set but it is not supported "
"on this build (pthread support is required)\n");
}
if (av_find_info_tag(buf, sizeof(buf), "ttl", p)) {
s->ttl = strtol(buf, NULL, 10);
}
if (av_find_info_tag(buf, sizeof(buf), "localport", p)) {
s->local_port = strtol(buf, NULL, 10);
}
if (av_find_info_tag(buf, sizeof(buf), "pkt_size", p)) {
s->packet_size = strtol(buf, NULL, 10);
}
if (av_find_info_tag(buf, sizeof(buf), "buffer_size", p)) {
s->buffer_size = strtol(buf, NULL, 10);
}
if (av_find_info_tag(buf, sizeof(buf), "connect", p)) {
s->is_connected = strtol(buf, NULL, 10);
}
if (av_find_info_tag(buf, sizeof(buf), "fifo_size", p)) {
s->circular_buffer_size = strtol(buf, NULL, 10);
if (!HAVE_PTHREAD_CANCEL)
av_log(h, AV_LOG_WARNING,
"'circular_buffer_size' option was set but it is not supported "
"on this build (pthread support is required)\n");
}
if (av_find_info_tag(buf, sizeof(buf), "localaddr", p)) {
av_strlcpy(localaddr, buf, sizeof(localaddr));
}
if (av_find_info_tag(buf, sizeof(buf), "sources", p)) {
if (parse_source_list(buf, include_sources, &num_include_sources,
FF_ARRAY_ELEMS(include_sources)))
goto fail;
}
if (av_find_info_tag(buf, sizeof(buf), "block", p)) {
if (parse_source_list(buf, exclude_sources, &num_exclude_sources,
FF_ARRAY_ELEMS(exclude_sources)))
goto fail;
}
if (!is_output && av_find_info_tag(buf, sizeof(buf), "timeout", p))
s->timeout = strtol(buf, NULL, 10);
}
/* handling needed to support options picking from both AVOption and URL */
s->circular_buffer_size *= 188;
if (flags & AVIO_FLAG_WRITE) {
h->max_packet_size = s->packet_size;
} else {
h->max_packet_size = UDP_MAX_PKT_SIZE;
}
h->rw_timeout = s->timeout;
 
/* fill the dest addr */
av_url_split(NULL, 0, NULL, 0, hostname, sizeof(hostname), &port, NULL, 0, uri);
 
/* XXX: fix av_url_split */
if (hostname[0] == '\0' || hostname[0] == '?') {
/* only accepts null hostname if input */
if (!(flags & AVIO_FLAG_READ))
goto fail;
} else {
if (ff_udp_set_remote_url(h, uri) < 0)
goto fail;
}
 
if ((s->is_multicast || !s->local_port) && (h->flags & AVIO_FLAG_READ))
s->local_port = port;
udp_fd = udp_socket_create(s, &my_addr, &len, localaddr[0] ? localaddr : s->local_addr);
if (udp_fd < 0)
goto fail;
 
/* Follow the requested reuse option, unless it's multicast in which
* case enable reuse unless explicitly disabled.
*/
if (s->reuse_socket || (s->is_multicast && !reuse_specified)) {
s->reuse_socket = 1;
if (setsockopt (udp_fd, SOL_SOCKET, SO_REUSEADDR, &(s->reuse_socket), sizeof(s->reuse_socket)) != 0)
goto fail;
}
 
/* If multicast, try binding the multicast address first, to avoid
* receiving UDP packets from other sources aimed at the same UDP
* port. This fails on windows. This makes sending to the same address
* using sendto() fail, so only do it if we're opened in read-only mode. */
if (s->is_multicast && !(h->flags & AVIO_FLAG_WRITE)) {
bind_ret = bind(udp_fd,(struct sockaddr *)&s->dest_addr, len);
}
/* bind to the local address if not multicast or if the multicast
* bind failed */
/* the bind is needed to give a port to the socket now */
if (bind_ret < 0 && bind(udp_fd,(struct sockaddr *)&my_addr, len) < 0) {
log_net_error(h, AV_LOG_ERROR, "bind failed");
goto fail;
}
 
len = sizeof(my_addr);
getsockname(udp_fd, (struct sockaddr *)&my_addr, &len);
s->local_port = udp_port(&my_addr, len);
 
if (s->is_multicast) {
if (h->flags & AVIO_FLAG_WRITE) {
/* output */
if (udp_set_multicast_ttl(udp_fd, s->ttl, (struct sockaddr *)&s->dest_addr) < 0)
goto fail;
}
if (h->flags & AVIO_FLAG_READ) {
/* input */
if (num_include_sources && num_exclude_sources) {
av_log(h, AV_LOG_ERROR, "Simultaneously including and excluding multicast sources is not supported\n");
goto fail;
}
if (num_include_sources) {
if (udp_set_multicast_sources(udp_fd, (struct sockaddr *)&s->dest_addr, s->dest_addr_len, include_sources, num_include_sources, 1) < 0)
goto fail;
} else {
if (udp_join_multicast_group(udp_fd, (struct sockaddr *)&s->dest_addr) < 0)
goto fail;
}
if (num_exclude_sources) {
if (udp_set_multicast_sources(udp_fd, (struct sockaddr *)&s->dest_addr, s->dest_addr_len, exclude_sources, num_exclude_sources, 0) < 0)
goto fail;
}
}
}
 
if (is_output) {
/* limit the tx buf size to limit latency */
tmp = s->buffer_size;
if (setsockopt(udp_fd, SOL_SOCKET, SO_SNDBUF, &tmp, sizeof(tmp)) < 0) {
log_net_error(h, AV_LOG_ERROR, "setsockopt(SO_SNDBUF)");
goto fail;
}
} else {
/* set udp recv buffer size to the largest possible udp packet size to
* avoid losing data on OSes that set this too low by default. */
tmp = s->buffer_size;
if (setsockopt(udp_fd, SOL_SOCKET, SO_RCVBUF, &tmp, sizeof(tmp)) < 0) {
log_net_error(h, AV_LOG_WARNING, "setsockopt(SO_RECVBUF)");
}
/* make the socket non-blocking */
ff_socket_nonblock(udp_fd, 1);
}
if (s->is_connected) {
if (connect(udp_fd, (struct sockaddr *) &s->dest_addr, s->dest_addr_len)) {
log_net_error(h, AV_LOG_ERROR, "connect");
goto fail;
}
}
 
for (i = 0; i < num_include_sources; i++)
av_freep(&include_sources[i]);
for (i = 0; i < num_exclude_sources; i++)
av_freep(&exclude_sources[i]);
 
s->udp_fd = udp_fd;
 
#if HAVE_PTHREAD_CANCEL
if (!is_output && s->circular_buffer_size) {
int ret;
 
/* start the task going */
s->fifo = av_fifo_alloc(s->circular_buffer_size);
ret = pthread_mutex_init(&s->mutex, NULL);
if (ret != 0) {
av_log(h, AV_LOG_ERROR, "pthread_mutex_init failed : %s\n", strerror(ret));
goto fail;
}
ret = pthread_cond_init(&s->cond, NULL);
if (ret != 0) {
av_log(h, AV_LOG_ERROR, "pthread_cond_init failed : %s\n", strerror(ret));
goto cond_fail;
}
ret = pthread_create(&s->circular_buffer_thread, NULL, circular_buffer_task, h);
if (ret != 0) {
av_log(h, AV_LOG_ERROR, "pthread_create failed : %s\n", strerror(ret));
goto thread_fail;
}
s->thread_started = 1;
}
#endif
 
return 0;
#if HAVE_PTHREAD_CANCEL
thread_fail:
pthread_cond_destroy(&s->cond);
cond_fail:
pthread_mutex_destroy(&s->mutex);
#endif
fail:
if (udp_fd >= 0)
closesocket(udp_fd);
av_fifo_free(s->fifo);
for (i = 0; i < num_include_sources; i++)
av_freep(&include_sources[i]);
for (i = 0; i < num_exclude_sources; i++)
av_freep(&exclude_sources[i]);
return AVERROR(EIO);
}
 
static int udp_read(URLContext *h, uint8_t *buf, int size)
{
UDPContext *s = h->priv_data;
int ret;
int avail, nonblock = h->flags & AVIO_FLAG_NONBLOCK;
 
#if HAVE_PTHREAD_CANCEL
if (s->fifo) {
pthread_mutex_lock(&s->mutex);
do {
avail = av_fifo_size(s->fifo);
if (avail) { // >=size) {
uint8_t tmp[4];
 
av_fifo_generic_read(s->fifo, tmp, 4, NULL);
avail= AV_RL32(tmp);
if(avail > size){
av_log(h, AV_LOG_WARNING, "Part of datagram lost due to insufficient buffer size\n");
avail= size;
}
 
av_fifo_generic_read(s->fifo, buf, avail, NULL);
av_fifo_drain(s->fifo, AV_RL32(tmp) - avail);
pthread_mutex_unlock(&s->mutex);
return avail;
} else if(s->circular_buffer_error){
int err = s->circular_buffer_error;
pthread_mutex_unlock(&s->mutex);
return err;
} else if(nonblock) {
pthread_mutex_unlock(&s->mutex);
return AVERROR(EAGAIN);
}
else {
/* FIXME: using the monotonic clock would be better,
but it does not exist on all supported platforms. */
int64_t t = av_gettime() + 100000;
struct timespec tv = { .tv_sec = t / 1000000,
.tv_nsec = (t % 1000000) * 1000 };
if (pthread_cond_timedwait(&s->cond, &s->mutex, &tv) < 0) {
pthread_mutex_unlock(&s->mutex);
return AVERROR(errno == ETIMEDOUT ? EAGAIN : errno);
}
nonblock = 1;
}
} while( 1);
}
#endif
 
if (!(h->flags & AVIO_FLAG_NONBLOCK)) {
ret = ff_network_wait_fd(s->udp_fd, 0);
if (ret < 0)
return ret;
}
ret = recv(s->udp_fd, buf, size, 0);
 
return ret < 0 ? ff_neterrno() : ret;
}
 
static int udp_write(URLContext *h, const uint8_t *buf, int size)
{
UDPContext *s = h->priv_data;
int ret;
 
if (!(h->flags & AVIO_FLAG_NONBLOCK)) {
ret = ff_network_wait_fd(s->udp_fd, 1);
if (ret < 0)
return ret;
}
 
if (!s->is_connected) {
ret = sendto (s->udp_fd, buf, size, 0,
(struct sockaddr *) &s->dest_addr,
s->dest_addr_len);
} else
ret = send(s->udp_fd, buf, size, 0);
 
return ret < 0 ? ff_neterrno() : ret;
}
 
static int udp_close(URLContext *h)
{
UDPContext *s = h->priv_data;
int ret;
 
if (s->is_multicast && (h->flags & AVIO_FLAG_READ))
udp_leave_multicast_group(s->udp_fd, (struct sockaddr *)&s->dest_addr);
closesocket(s->udp_fd);
#if HAVE_PTHREAD_CANCEL
if (s->thread_started) {
pthread_cancel(s->circular_buffer_thread);
ret = pthread_join(s->circular_buffer_thread, NULL);
if (ret != 0)
av_log(h, AV_LOG_ERROR, "pthread_join(): %s\n", strerror(ret));
pthread_mutex_destroy(&s->mutex);
pthread_cond_destroy(&s->cond);
}
#endif
av_fifo_free(s->fifo);
return 0;
}
 
URLProtocol ff_udp_protocol = {
.name = "udp",
.url_open = udp_open,
.url_read = udp_read,
.url_write = udp_write,
.url_close = udp_close,
.url_get_file_handle = udp_get_file_handle,
.priv_data_size = sizeof(UDPContext),
.priv_data_class = &udp_context_class,
.flags = URL_PROTOCOL_FLAG_NETWORK,
};
/contrib/sdk/sources/ffmpeg/libavformat/unix.c
0,0 → 1,155
/*
* Unix socket protocol
* Copyright (c) 2013 Luca Barbato
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
*
* Unix socket url_protocol
*
*/
 
#include "libavutil/avstring.h"
#include "libavutil/opt.h"
#include "os_support.h"
#include "network.h"
#include <sys/un.h>
#include "url.h"
 
typedef struct UnixContext {
const AVClass *class;
struct sockaddr_un addr;
int timeout;
int listen;
int type;
int fd;
} UnixContext;
 
#define OFFSET(x) offsetof(UnixContext, x)
#define ED AV_OPT_FLAG_DECODING_PARAM|AV_OPT_FLAG_ENCODING_PARAM
static const AVOption unix_options[] = {
{ "listen", "Open socket for listening", OFFSET(listen), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, ED },
{ "timeout", "Timeout in ms", OFFSET(timeout), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, ED },
{ "type", "Socket type", OFFSET(type), AV_OPT_TYPE_INT, { .i64 = SOCK_STREAM }, INT_MIN, INT_MAX, ED, "type" },
{ "stream", "Stream (reliable stream-oriented)", 0, AV_OPT_TYPE_CONST, { .i64 = SOCK_STREAM }, INT_MIN, INT_MAX, ED, "type" },
{ "datagram", "Datagram (unreliable packet-oriented)", 0, AV_OPT_TYPE_CONST, { .i64 = SOCK_DGRAM }, INT_MIN, INT_MAX, ED, "type" },
{ "seqpacket", "Seqpacket (reliable packet-oriented", 0, AV_OPT_TYPE_CONST, { .i64 = SOCK_SEQPACKET }, INT_MIN, INT_MAX, ED, "type" },
{ NULL }
};
 
static const AVClass unix_class = {
.class_name = "unix",
.item_name = av_default_item_name,
.option = unix_options,
.version = LIBAVUTIL_VERSION_INT,
};
 
static int unix_open(URLContext *h, const char *filename, int flags)
{
UnixContext *s = h->priv_data;
int fd, ret;
 
av_strstart(filename, "unix:", &filename);
s->addr.sun_family = AF_UNIX;
av_strlcpy(s->addr.sun_path, filename, sizeof(s->addr.sun_path));
 
if ((fd = ff_socket(AF_UNIX, s->type, 0)) < 0)
return ff_neterrno();
 
if (s->listen) {
fd = ff_listen_bind(fd, (struct sockaddr *)&s->addr,
sizeof(s->addr), s->timeout, h);
if (fd < 0) {
ret = fd;
goto fail;
}
} else {
ret = ff_listen_connect(fd, (struct sockaddr *)&s->addr,
sizeof(s->addr), s->timeout, h, 0);
if (ret < 0)
goto fail;
}
 
s->fd = fd;
 
return 0;
 
fail:
if (s->listen && AVUNERROR(ret) != EADDRINUSE)
unlink(s->addr.sun_path);
if (fd >= 0)
closesocket(fd);
return ret;
}
 
static int unix_read(URLContext *h, uint8_t *buf, int size)
{
UnixContext *s = h->priv_data;
int ret;
 
if (!(h->flags & AVIO_FLAG_NONBLOCK)) {
ret = ff_network_wait_fd(s->fd, 0);
if (ret < 0)
return ret;
}
ret = recv(s->fd, buf, size, 0);
return ret < 0 ? ff_neterrno() : ret;
}
 
static int unix_write(URLContext *h, const uint8_t *buf, int size)
{
UnixContext *s = h->priv_data;
int ret;
 
if (!(h->flags & AVIO_FLAG_NONBLOCK)) {
ret = ff_network_wait_fd(s->fd, 1);
if (ret < 0)
return ret;
}
ret = send(s->fd, buf, size, 0);
return ret < 0 ? ff_neterrno() : ret;
}
 
static int unix_close(URLContext *h)
{
UnixContext *s = h->priv_data;
if (s->listen)
unlink(s->addr.sun_path);
closesocket(s->fd);
return 0;
}
 
static int unix_get_file_handle(URLContext *h)
{
UnixContext *s = h->priv_data;
return s->fd;
}
 
URLProtocol ff_unix_protocol = {
.name = "unix",
.url_open = unix_open,
.url_read = unix_read,
.url_write = unix_write,
.url_close = unix_close,
.url_get_file_handle = unix_get_file_handle,
.priv_data_size = sizeof(UnixContext),
.priv_data_class = &unix_class,
.flags = URL_PROTOCOL_FLAG_NETWORK,
};
/contrib/sdk/sources/ffmpeg/libavformat/url-test.c
0,0 → 1,55
/*
* Copyright (c) 2012 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "url.h"
 
static void test(const char *base, const char *rel)
{
char buf[200], buf2[200];
ff_make_absolute_url(buf, sizeof(buf), base, rel);
printf("%s\n", buf);
if (base) {
/* Test in-buffer replacement */
snprintf(buf2, sizeof(buf2), "%s", base);
ff_make_absolute_url(buf2, sizeof(buf2), buf2, rel);
if (strcmp(buf, buf2)) {
printf("In-place handling of %s + %s failed\n", base, rel);
exit(1);
}
}
}
 
int main(void)
{
test(NULL, "baz");
test("/foo/bar", "baz");
test("/foo/bar", "../baz");
test("/foo/bar", "/baz");
test("http://server/foo/", "baz");
test("http://server/foo/bar", "baz");
test("http://server/foo/", "../baz");
test("http://server/foo/bar/123", "../../baz");
test("http://server/foo/bar/123", "/baz");
test("http://server/foo/bar/123", "https://other/url");
test("http://server/foo/bar?param=value/with/slashes", "/baz");
test("http://server/foo/bar?param&otherparam", "?someparam");
test("http://server/foo/bar", "//other/url");
return 0;
}
/contrib/sdk/sources/ffmpeg/libavformat/url.c
0,0 → 1,147
/*
* URL utility functions
* Copyright (c) 2000, 2001, 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
 
#include "avformat.h"
#include "config.h"
#include "url.h"
#if CONFIG_NETWORK
#include "network.h"
#endif
#include "libavutil/avstring.h"
 
/**
* @file
* URL utility functions.
*/
 
int ff_url_join(char *str, int size, const char *proto,
const char *authorization, const char *hostname,
int port, const char *fmt, ...)
{
#if CONFIG_NETWORK
struct addrinfo hints = { 0 }, *ai;
#endif
 
str[0] = '\0';
if (proto)
av_strlcatf(str, size, "%s://", proto);
if (authorization && authorization[0])
av_strlcatf(str, size, "%s@", authorization);
#if CONFIG_NETWORK && defined(AF_INET6)
/* Determine if hostname is a numerical IPv6 address,
* properly escape it within [] in that case. */
hints.ai_flags = AI_NUMERICHOST;
if (!getaddrinfo(hostname, NULL, &hints, &ai)) {
if (ai->ai_family == AF_INET6) {
av_strlcat(str, "[", size);
av_strlcat(str, hostname, size);
av_strlcat(str, "]", size);
} else {
av_strlcat(str, hostname, size);
}
freeaddrinfo(ai);
} else
#endif
/* Not an IPv6 address, just output the plain string. */
av_strlcat(str, hostname, size);
 
if (port >= 0)
av_strlcatf(str, size, ":%d", port);
if (fmt) {
va_list vl;
int len = strlen(str);
 
va_start(vl, fmt);
vsnprintf(str + len, size > len ? size - len : 0, fmt, vl);
va_end(vl);
}
return strlen(str);
}
 
void ff_make_absolute_url(char *buf, int size, const char *base,
const char *rel)
{
char *sep, *path_query;
/* Absolute path, relative to the current server */
if (base && strstr(base, "://") && rel[0] == '/') {
if (base != buf)
av_strlcpy(buf, base, size);
sep = strstr(buf, "://");
if (sep) {
/* Take scheme from base url */
if (rel[1] == '/') {
sep[1] = '\0';
} else {
/* Take scheme and host from base url */
sep += 3;
sep = strchr(sep, '/');
if (sep)
*sep = '\0';
}
}
av_strlcat(buf, rel, size);
return;
}
/* If rel actually is an absolute url, just copy it */
if (!base || strstr(rel, "://") || rel[0] == '/') {
av_strlcpy(buf, rel, size);
return;
}
if (base != buf)
av_strlcpy(buf, base, size);
 
/* Strip off any query string from base */
path_query = strchr(buf, '?');
if (path_query != NULL)
*path_query = '\0';
 
/* Is relative path just a new query part? */
if (rel[0] == '?') {
av_strlcat(buf, rel, size);
return;
}
 
/* Remove the file name from the base url */
sep = strrchr(buf, '/');
if (sep)
sep[1] = '\0';
else
buf[0] = '\0';
while (av_strstart(rel, "../", NULL) && sep) {
/* Remove the path delimiter at the end */
sep[0] = '\0';
sep = strrchr(buf, '/');
/* If the next directory name to pop off is "..", break here */
if (!strcmp(sep ? &sep[1] : buf, "..")) {
/* Readd the slash we just removed */
av_strlcat(buf, "/", size);
break;
}
/* Cut off the directory name */
if (sep)
sep[1] = '\0';
else
buf[0] = '\0';
rel += 3;
}
av_strlcat(buf, rel, size);
}
/contrib/sdk/sources/ffmpeg/libavformat/url.h
0,0 → 1,288
/*
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* unbuffered private I/O API
*/
 
#ifndef AVFORMAT_URL_H
#define AVFORMAT_URL_H
 
#include "avio.h"
#include "libavformat/version.h"
 
#include "libavutil/dict.h"
#include "libavutil/log.h"
 
#define URL_PROTOCOL_FLAG_NESTED_SCHEME 1 /*< The protocol name can be the first part of a nested protocol scheme */
#define URL_PROTOCOL_FLAG_NETWORK 2 /*< The protocol uses network */
 
extern int (*url_interrupt_cb)(void);
 
extern const AVClass ffurl_context_class;
 
typedef struct URLContext {
const AVClass *av_class; /**< information for av_log(). Set by url_open(). */
struct URLProtocol *prot;
void *priv_data;
char *filename; /**< specified URL */
int flags;
int max_packet_size; /**< if non zero, the stream is packetized with this max packet size */
int is_streamed; /**< true if streamed (no seek possible), default = false */
int is_connected;
AVIOInterruptCB interrupt_callback;
int64_t rw_timeout; /**< maximum time to wait for (network) read/write operation completion, in mcs */
} URLContext;
 
typedef struct URLProtocol {
const char *name;
int (*url_open)( URLContext *h, const char *url, int flags);
/**
* This callback is to be used by protocols which open further nested
* protocols. options are then to be passed to ffurl_open()/ffurl_connect()
* for those nested protocols.
*/
int (*url_open2)(URLContext *h, const char *url, int flags, AVDictionary **options);
 
/**
* Read data from the protocol.
* If data is immediately available (even less than size), EOF is
* reached or an error occurs (including EINTR), return immediately.
* Otherwise:
* In non-blocking mode, return AVERROR(EAGAIN) immediately.
* In blocking mode, wait for data/EOF/error with a short timeout (0.1s),
* and return AVERROR(EAGAIN) on timeout.
* Checking interrupt_callback, looping on EINTR and EAGAIN and until
* enough data has been read is left to the calling function; see
* retry_transfer_wrapper in avio.c.
*/
int (*url_read)( URLContext *h, unsigned char *buf, int size);
int (*url_write)(URLContext *h, const unsigned char *buf, int size);
int64_t (*url_seek)( URLContext *h, int64_t pos, int whence);
int (*url_close)(URLContext *h);
struct URLProtocol *next;
int (*url_read_pause)(URLContext *h, int pause);
int64_t (*url_read_seek)(URLContext *h, int stream_index,
int64_t timestamp, int flags);
int (*url_get_file_handle)(URLContext *h);
int (*url_get_multi_file_handle)(URLContext *h, int **handles,
int *numhandles);
int (*url_shutdown)(URLContext *h, int flags);
int priv_data_size;
const AVClass *priv_data_class;
int flags;
int (*url_check)(URLContext *h, int mask);
} URLProtocol;
 
/**
* Create a URLContext for accessing to the resource indicated by
* url, but do not initiate the connection yet.
*
* @param puc pointer to the location where, in case of success, the
* function puts the pointer to the created URLContext
* @param flags flags which control how the resource indicated by url
* is to be opened
* @param int_cb interrupt callback to use for the URLContext, may be
* NULL
* @return >= 0 in case of success, a negative value corresponding to an
* AVERROR code in case of failure
*/
int ffurl_alloc(URLContext **puc, const char *filename, int flags,
const AVIOInterruptCB *int_cb);
 
/**
* Connect an URLContext that has been allocated by ffurl_alloc
*
* @param options A dictionary filled with options for nested protocols,
* i.e. it will be passed to url_open2() for protocols implementing it.
* This parameter will be destroyed and replaced with a dict containing options
* that were not found. May be NULL.
*/
int ffurl_connect(URLContext *uc, AVDictionary **options);
 
/**
* Create an URLContext for accessing to the resource indicated by
* url, and open it.
*
* @param puc pointer to the location where, in case of success, the
* function puts the pointer to the created URLContext
* @param flags flags which control how the resource indicated by url
* is to be opened
* @param int_cb interrupt callback to use for the URLContext, may be
* NULL
* @param options A dictionary filled with protocol-private options. On return
* this parameter will be destroyed and replaced with a dict containing options
* that were not found. May be NULL.
* @return >= 0 in case of success, a negative value corresponding to an
* AVERROR code in case of failure
*/
int ffurl_open(URLContext **puc, const char *filename, int flags,
const AVIOInterruptCB *int_cb, AVDictionary **options);
 
/**
* Read up to size bytes from the resource accessed by h, and store
* the read bytes in buf.
*
* @return The number of bytes actually read, or a negative value
* corresponding to an AVERROR code in case of error. A value of zero
* indicates that it is not possible to read more from the accessed
* resource (except if the value of the size argument is also zero).
*/
int ffurl_read(URLContext *h, unsigned char *buf, int size);
 
/**
* Read as many bytes as possible (up to size), calling the
* read function multiple times if necessary.
* This makes special short-read handling in applications
* unnecessary, if the return value is < size then it is
* certain there was either an error or the end of file was reached.
*/
int ffurl_read_complete(URLContext *h, unsigned char *buf, int size);
 
/**
* Write size bytes from buf to the resource accessed by h.
*
* @return the number of bytes actually written, or a negative value
* corresponding to an AVERROR code in case of failure
*/
int ffurl_write(URLContext *h, const unsigned char *buf, int size);
 
/**
* Change the position that will be used by the next read/write
* operation on the resource accessed by h.
*
* @param pos specifies the new position to set
* @param whence specifies how pos should be interpreted, it must be
* one of SEEK_SET (seek from the beginning), SEEK_CUR (seek from the
* current position), SEEK_END (seek from the end), or AVSEEK_SIZE
* (return the filesize of the requested resource, pos is ignored).
* @return a negative value corresponding to an AVERROR code in case
* of failure, or the resulting file position, measured in bytes from
* the beginning of the file. You can use this feature together with
* SEEK_CUR to read the current file position.
*/
int64_t ffurl_seek(URLContext *h, int64_t pos, int whence);
 
/**
* Close the resource accessed by the URLContext h, and free the
* memory used by it. Also set the URLContext pointer to NULL.
*
* @return a negative value if an error condition occurred, 0
* otherwise
*/
int ffurl_closep(URLContext **h);
int ffurl_close(URLContext *h);
 
/**
* Return the filesize of the resource accessed by h, AVERROR(ENOSYS)
* if the operation is not supported by h, or another negative value
* corresponding to an AVERROR error code in case of failure.
*/
int64_t ffurl_size(URLContext *h);
 
/**
* Return the file descriptor associated with this URL. For RTP, this
* will return only the RTP file descriptor, not the RTCP file descriptor.
*
* @return the file descriptor associated with this URL, or <0 on error.
*/
int ffurl_get_file_handle(URLContext *h);
 
/**
* Return the file descriptors associated with this URL.
*
* @return 0 on success or <0 on error.
*/
int ffurl_get_multi_file_handle(URLContext *h, int **handles, int *numhandles);
 
/**
* Signal the URLContext that we are done reading or writing the stream.
*
* @param h pointer to the resource
* @param flags flags which control how the resource indicated by url
* is to be shutdown
*
* @return a negative value if an error condition occurred, 0
* otherwise
*/
int ffurl_shutdown(URLContext *h, int flags);
 
/**
* Register the URLProtocol protocol.
*
* @param size the size of the URLProtocol struct referenced
*/
int ffurl_register_protocol(URLProtocol *protocol, int size);
 
/**
* Check if the user has requested to interrup a blocking function
* associated with cb.
*/
int ff_check_interrupt(AVIOInterruptCB *cb);
 
/**
* Iterate over all available protocols.
*
* @param prev result of the previous call to this functions or NULL.
*/
URLProtocol *ffurl_protocol_next(URLProtocol *prev);
 
/* udp.c */
int ff_udp_set_remote_url(URLContext *h, const char *uri);
int ff_udp_get_local_port(URLContext *h);
 
/**
* Assemble a URL string from components. This is the reverse operation
* of av_url_split.
*
* Note, this requires networking to be initialized, so the caller must
* ensure ff_network_init has been called.
*
* @see av_url_split
*
* @param str the buffer to fill with the url
* @param size the size of the str buffer
* @param proto the protocol identifier, if null, the separator
* after the identifier is left out, too
* @param authorization an optional authorization string, may be null.
* An empty string is treated the same as a null string.
* @param hostname the host name string
* @param port the port number, left out from the string if negative
* @param fmt a generic format string for everything to add after the
* host/port, may be null
* @return the number of characters written to the destination buffer
*/
int ff_url_join(char *str, int size, const char *proto,
const char *authorization, const char *hostname,
int port, const char *fmt, ...) av_printf_format(7, 8);
 
/**
* Convert a relative url into an absolute url, given a base url.
*
* @param buf the buffer where output absolute url is written
* @param size the size of buf
* @param base the base url, may be equal to buf.
* @param rel the new url, which is interpreted relative to base
*/
void ff_make_absolute_url(char *buf, int size, const char *base,
const char *rel);
 
 
#endif /* AVFORMAT_URL_H */
/contrib/sdk/sources/ffmpeg/libavformat/urldecode.c
0,0 → 1,86
/*
* Simple URL decoding function
* Copyright (c) 2012 Antti Seppälä
*
* References:
* RFC 3986: Uniform Resource Identifier (URI): Generic Syntax
* T. Berners-Lee et al. The Internet Society, 2005
*
* based on http://www.icosaedro.it/apache/urldecode.c
* from Umberto Salsi (salsi@icosaedro.it)
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <string.h>
 
#include "libavutil/mem.h"
#include "libavutil/avstring.h"
#include "urldecode.h"
 
char *ff_urldecode(const char *url)
{
int s = 0, d = 0, url_len = 0;
char c;
char *dest = NULL;
 
if (!url)
return NULL;
 
url_len = strlen(url) + 1;
dest = av_malloc(url_len);
 
if (!dest)
return NULL;
 
while (s < url_len) {
c = url[s++];
 
if (c == '%' && s + 2 < url_len) {
char c2 = url[s++];
char c3 = url[s++];
if (av_isxdigit(c2) && av_isxdigit(c3)) {
c2 = av_tolower(c2);
c3 = av_tolower(c3);
 
if (c2 <= '9')
c2 = c2 - '0';
else
c2 = c2 - 'a' + 10;
 
if (c3 <= '9')
c3 = c3 - '0';
else
c3 = c3 - 'a' + 10;
 
dest[d++] = 16 * c2 + c3;
 
} else { /* %zz or something other invalid */
dest[d++] = c;
dest[d++] = c2;
dest[d++] = c3;
}
} else if (c == '+') {
dest[d++] = ' ';
} else {
dest[d++] = c;
}
 
}
 
return dest;
}
/contrib/sdk/sources/ffmpeg/libavformat/urldecode.h
0,0 → 1,35
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_URLDECODE_H
#define AVFORMAT_URLDECODE_H
 
/**
* Decodes an URL from its percent-encoded form back into normal
* representation. This function returns the decoded URL in a string.
* The URL to be decoded does not necessarily have to be encoded but
* in that case the original string is duplicated.
*
* @param url a string to be decoded.
* @return new string with the URL decoded or NULL if decoding failed.
* Note that the returned string should be explicitly freed when not
* used anymore.
*/
char *ff_urldecode(const char *url);
 
#endif /* AVFORMAT_URLDECODE_H */
/contrib/sdk/sources/ffmpeg/libavformat/utils.c
0,0 → 1,4284
/*
* various utility functions for use within FFmpeg
* Copyright (c) 2000, 2001, 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "avio_internal.h"
#include "internal.h"
#include "libavcodec/internal.h"
#include "libavcodec/raw.h"
#include "libavcodec/bytestream.h"
#include "libavutil/opt.h"
#include "libavutil/dict.h"
#include "libavutil/internal.h"
#include "libavutil/pixdesc.h"
#include "metadata.h"
#include "id3v2.h"
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/mathematics.h"
#include "libavutil/parseutils.h"
#include "libavutil/time.h"
#include "libavutil/timestamp.h"
#include "riff.h"
#include "audiointerleave.h"
#include "url.h"
#include <stdarg.h>
#if CONFIG_NETWORK
#include "network.h"
#endif
 
#undef NDEBUG
#include <assert.h>
 
/**
* @file
* various utility functions for use within FFmpeg
*/
 
unsigned avformat_version(void)
{
av_assert0(LIBAVFORMAT_VERSION_MICRO >= 100);
return LIBAVFORMAT_VERSION_INT;
}
 
const char *avformat_configuration(void)
{
return FFMPEG_CONFIGURATION;
}
 
const char *avformat_license(void)
{
#define LICENSE_PREFIX "libavformat license: "
return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
}
 
#define RELATIVE_TS_BASE (INT64_MAX - (1LL<<48))
 
static int is_relative(int64_t ts) {
return ts > (RELATIVE_TS_BASE - (1LL<<48));
}
 
/**
* Wrap a given time stamp, if there is an indication for an overflow
*
* @param st stream
* @param timestamp the time stamp to wrap
* @return resulting time stamp
*/
static int64_t wrap_timestamp(AVStream *st, int64_t timestamp)
{
if (st->pts_wrap_behavior != AV_PTS_WRAP_IGNORE &&
st->pts_wrap_reference != AV_NOPTS_VALUE && timestamp != AV_NOPTS_VALUE) {
if (st->pts_wrap_behavior == AV_PTS_WRAP_ADD_OFFSET &&
timestamp < st->pts_wrap_reference)
return timestamp + (1ULL<<st->pts_wrap_bits);
else if (st->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET &&
timestamp >= st->pts_wrap_reference)
return timestamp - (1ULL<<st->pts_wrap_bits);
}
return timestamp;
}
 
MAKE_ACCESSORS(AVStream, stream, AVRational, r_frame_rate)
MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, video_codec)
MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, audio_codec)
MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, subtitle_codec)
 
static AVCodec *find_decoder(AVFormatContext *s, AVStream *st, enum AVCodecID codec_id)
{
if (st->codec->codec)
return st->codec->codec;
 
switch(st->codec->codec_type){
case AVMEDIA_TYPE_VIDEO:
if(s->video_codec) return s->video_codec;
break;
case AVMEDIA_TYPE_AUDIO:
if(s->audio_codec) return s->audio_codec;
break;
case AVMEDIA_TYPE_SUBTITLE:
if(s->subtitle_codec) return s->subtitle_codec;
break;
}
 
return avcodec_find_decoder(codec_id);
}
 
int av_format_get_probe_score(const AVFormatContext *s)
{
return s->probe_score;
}
 
/* an arbitrarily chosen "sane" max packet size -- 50M */
#define SANE_CHUNK_SIZE (50000000)
 
int ffio_limit(AVIOContext *s, int size)
{
if(s->maxsize>=0){
int64_t remaining= s->maxsize - avio_tell(s);
if(remaining < size){
int64_t newsize= avio_size(s);
if(!s->maxsize || s->maxsize<newsize)
s->maxsize= newsize - !newsize;
remaining= s->maxsize - avio_tell(s);
remaining= FFMAX(remaining, 0);
}
 
if(s->maxsize>=0 && remaining+1 < size){
av_log(NULL, remaining ? AV_LOG_ERROR : AV_LOG_DEBUG, "Truncating packet of size %d to %"PRId64"\n", size, remaining+1);
size= remaining+1;
}
}
return size;
}
 
/*
* Read the data in sane-sized chunks and append to pkt.
* Return the number of bytes read or an error.
*/
static int append_packet_chunked(AVIOContext *s, AVPacket *pkt, int size)
{
int64_t orig_pos = pkt->pos; // av_grow_packet might reset pos
int orig_size = pkt->size;
int ret;
 
do {
int prev_size = pkt->size;
int read_size;
 
/*
* When the caller requests a lot of data, limit it to the amount left
* in file or SANE_CHUNK_SIZE when it is not known
*/
read_size = size;
if (read_size > SANE_CHUNK_SIZE/10) {
read_size = ffio_limit(s, read_size);
// If filesize/maxsize is unknown, limit to SANE_CHUNK_SIZE
if (s->maxsize < 0)
read_size = FFMIN(read_size, SANE_CHUNK_SIZE);
}
 
ret = av_grow_packet(pkt, read_size);
if (ret < 0)
break;
 
ret = avio_read(s, pkt->data + prev_size, read_size);
if (ret != read_size) {
av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
break;
}
 
size -= read_size;
} while (size > 0);
if (size > 0)
pkt->flags |= AV_PKT_FLAG_CORRUPT;
 
pkt->pos = orig_pos;
if (!pkt->size)
av_free_packet(pkt);
return pkt->size > orig_size ? pkt->size - orig_size : ret;
}
 
int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
{
av_init_packet(pkt);
pkt->data = NULL;
pkt->size = 0;
pkt->pos = avio_tell(s);
 
return append_packet_chunked(s, pkt, size);
}
 
int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
{
if (!pkt->size)
return av_get_packet(s, pkt, size);
return append_packet_chunked(s, pkt, size);
}
 
 
int av_filename_number_test(const char *filename)
{
char buf[1024];
return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
}
 
AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret)
{
AVProbeData lpd = *pd;
AVInputFormat *fmt1 = NULL, *fmt;
int score, nodat = 0, score_max=0;
const static uint8_t zerobuffer[AVPROBE_PADDING_SIZE];
 
if (!lpd.buf)
lpd.buf = zerobuffer;
 
if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
int id3len = ff_id3v2_tag_len(lpd.buf);
if (lpd.buf_size > id3len + 16) {
lpd.buf += id3len;
lpd.buf_size -= id3len;
}else
nodat = 1;
}
 
fmt = NULL;
while ((fmt1 = av_iformat_next(fmt1))) {
if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
continue;
score = 0;
if (fmt1->read_probe) {
score = fmt1->read_probe(&lpd);
if(fmt1->extensions && av_match_ext(lpd.filename, fmt1->extensions))
score = FFMAX(score, nodat ? AVPROBE_SCORE_EXTENSION / 2 - 1 : 1);
} else if (fmt1->extensions) {
if (av_match_ext(lpd.filename, fmt1->extensions)) {
score = AVPROBE_SCORE_EXTENSION;
}
}
if (score > score_max) {
score_max = score;
fmt = fmt1;
}else if (score == score_max)
fmt = NULL;
}
if(nodat)
score_max = FFMIN(AVPROBE_SCORE_EXTENSION / 2 - 1, score_max);
*score_ret= score_max;
 
return fmt;
}
 
AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
{
int score_ret;
AVInputFormat *fmt= av_probe_input_format3(pd, is_opened, &score_ret);
if(score_ret > *score_max){
*score_max= score_ret;
return fmt;
}else
return NULL;
}
 
AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
int score=0;
return av_probe_input_format2(pd, is_opened, &score);
}
 
static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd)
{
static const struct {
const char *name; enum AVCodecID id; enum AVMediaType type;
} fmt_id_type[] = {
{ "aac" , AV_CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
{ "ac3" , AV_CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
{ "dts" , AV_CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
{ "eac3" , AV_CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
{ "h264" , AV_CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
{ "loas" , AV_CODEC_ID_AAC_LATM , AVMEDIA_TYPE_AUDIO },
{ "m4v" , AV_CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
{ "mp3" , AV_CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
{ "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
{ 0 }
};
int score;
AVInputFormat *fmt = av_probe_input_format3(pd, 1, &score);
 
if (fmt && st->request_probe <= score) {
int i;
av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
for (i = 0; fmt_id_type[i].name; i++) {
if (!strcmp(fmt->name, fmt_id_type[i].name)) {
st->codec->codec_id = fmt_id_type[i].id;
st->codec->codec_type = fmt_id_type[i].type;
break;
}
}
}
return score;
}
 
/************************************************************/
/* input media file */
 
int av_demuxer_open(AVFormatContext *ic){
int err;
 
if (ic->iformat->read_header) {
err = ic->iformat->read_header(ic);
if (err < 0)
return err;
}
 
if (ic->pb && !ic->data_offset)
ic->data_offset = avio_tell(ic->pb);
 
return 0;
}
 
 
int av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt,
const char *filename, void *logctx,
unsigned int offset, unsigned int max_probe_size)
{
AVProbeData pd = { filename ? filename : "", NULL, -offset };
unsigned char *buf = NULL;
uint8_t *mime_type;
int ret = 0, probe_size, buf_offset = 0;
int score = 0;
 
if (!max_probe_size) {
max_probe_size = PROBE_BUF_MAX;
} else if (max_probe_size > PROBE_BUF_MAX) {
max_probe_size = PROBE_BUF_MAX;
} else if (max_probe_size < PROBE_BUF_MIN) {
av_log(logctx, AV_LOG_ERROR,
"Specified probe size value %u cannot be < %u\n", max_probe_size, PROBE_BUF_MIN);
return AVERROR(EINVAL);
}
 
if (offset >= max_probe_size) {
return AVERROR(EINVAL);
}
 
if (!*fmt && pb->av_class && av_opt_get(pb, "mime_type", AV_OPT_SEARCH_CHILDREN, &mime_type) >= 0 && mime_type) {
if (!av_strcasecmp(mime_type, "audio/aacp")) {
*fmt = av_find_input_format("aac");
}
av_freep(&mime_type);
}
 
for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt;
probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
 
if (probe_size < offset) {
continue;
}
score = probe_size < max_probe_size ? AVPROBE_SCORE_RETRY : 0;
 
/* read probe data */
if ((ret = av_reallocp(&buf, probe_size + AVPROBE_PADDING_SIZE)) < 0)
return ret;
if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
/* fail if error was not end of file, otherwise, lower score */
if (ret != AVERROR_EOF) {
av_free(buf);
return ret;
}
score = 0;
ret = 0; /* error was end of file, nothing read */
}
pd.buf_size = buf_offset += ret;
pd.buf = &buf[offset];
 
memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
 
/* guess file format */
*fmt = av_probe_input_format2(&pd, 1, &score);
if(*fmt){
if(score <= AVPROBE_SCORE_RETRY){ //this can only be true in the last iteration
av_log(logctx, AV_LOG_WARNING, "Format %s detected only with low score of %d, misdetection possible!\n", (*fmt)->name, score);
}else
av_log(logctx, AV_LOG_DEBUG, "Format %s probed with size=%d and score=%d\n", (*fmt)->name, probe_size, score);
}
}
 
if (!*fmt) {
av_free(buf);
return AVERROR_INVALIDDATA;
}
 
/* rewind. reuse probe buffer to avoid seeking */
ret = ffio_rewind_with_probe_data(pb, &buf, pd.buf_size);
 
return ret < 0 ? ret : score;
}
 
int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
const char *filename, void *logctx,
unsigned int offset, unsigned int max_probe_size)
{
int ret = av_probe_input_buffer2(pb, fmt, filename, logctx, offset, max_probe_size);
return ret < 0 ? ret : 0;
}
 
 
/* open input file and probe the format if necessary */
static int init_input(AVFormatContext *s, const char *filename, AVDictionary **options)
{
int ret;
AVProbeData pd = {filename, NULL, 0};
int score = AVPROBE_SCORE_RETRY;
 
if (s->pb) {
s->flags |= AVFMT_FLAG_CUSTOM_IO;
if (!s->iformat)
return av_probe_input_buffer2(s->pb, &s->iformat, filename, s, 0, s->probesize);
else if (s->iformat->flags & AVFMT_NOFILE)
av_log(s, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
"will be ignored with AVFMT_NOFILE format.\n");
return 0;
}
 
if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
(!s->iformat && (s->iformat = av_probe_input_format2(&pd, 0, &score))))
return score;
 
if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ | s->avio_flags,
&s->interrupt_callback, options)) < 0)
return ret;
if (s->iformat)
return 0;
return av_probe_input_buffer2(s->pb, &s->iformat, filename, s, 0, s->probesize);
}
 
static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
AVPacketList **plast_pktl){
AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
if (!pktl)
return NULL;
 
if (*packet_buffer)
(*plast_pktl)->next = pktl;
else
*packet_buffer = pktl;
 
/* add the packet in the buffered packet list */
*plast_pktl = pktl;
pktl->pkt= *pkt;
return &pktl->pkt;
}
 
int avformat_queue_attached_pictures(AVFormatContext *s)
{
int i;
for (i = 0; i < s->nb_streams; i++)
if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
s->streams[i]->discard < AVDISCARD_ALL) {
AVPacket copy = s->streams[i]->attached_pic;
copy.buf = av_buffer_ref(copy.buf);
if (!copy.buf)
return AVERROR(ENOMEM);
 
add_to_pktbuf(&s->raw_packet_buffer, &copy, &s->raw_packet_buffer_end);
}
return 0;
}
 
int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
{
AVFormatContext *s = *ps;
int ret = 0;
AVDictionary *tmp = NULL;
ID3v2ExtraMeta *id3v2_extra_meta = NULL;
 
if (!s && !(s = avformat_alloc_context()))
return AVERROR(ENOMEM);
if (!s->av_class){
av_log(NULL, AV_LOG_ERROR, "Input context has not been properly allocated by avformat_alloc_context() and is not NULL either\n");
return AVERROR(EINVAL);
}
if (fmt)
s->iformat = fmt;
 
if (options)
av_dict_copy(&tmp, *options, 0);
 
if ((ret = av_opt_set_dict(s, &tmp)) < 0)
goto fail;
 
if ((ret = init_input(s, filename, &tmp)) < 0)
goto fail;
s->probe_score = ret;
avio_skip(s->pb, s->skip_initial_bytes);
 
/* check filename in case an image number is expected */
if (s->iformat->flags & AVFMT_NEEDNUMBER) {
if (!av_filename_number_test(filename)) {
ret = AVERROR(EINVAL);
goto fail;
}
}
 
s->duration = s->start_time = AV_NOPTS_VALUE;
av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
 
/* allocate private data */
if (s->iformat->priv_data_size > 0) {
if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
ret = AVERROR(ENOMEM);
goto fail;
}
if (s->iformat->priv_class) {
*(const AVClass**)s->priv_data = s->iformat->priv_class;
av_opt_set_defaults(s->priv_data);
if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
goto fail;
}
}
 
/* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
if (s->pb)
ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
 
if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->iformat->read_header)
if ((ret = s->iformat->read_header(s)) < 0)
goto fail;
 
if (id3v2_extra_meta) {
if (!strcmp(s->iformat->name, "mp3") || !strcmp(s->iformat->name, "aac") ||
!strcmp(s->iformat->name, "tta")) {
if((ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
goto fail;
} else
av_log(s, AV_LOG_DEBUG, "demuxer does not support additional id3 data, skipping\n");
}
ff_id3v2_free_extra_meta(&id3v2_extra_meta);
 
if ((ret = avformat_queue_attached_pictures(s)) < 0)
goto fail;
 
if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->pb && !s->data_offset)
s->data_offset = avio_tell(s->pb);
 
s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
 
if (options) {
av_dict_free(options);
*options = tmp;
}
*ps = s;
return 0;
 
fail:
ff_id3v2_free_extra_meta(&id3v2_extra_meta);
av_dict_free(&tmp);
if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
avio_close(s->pb);
avformat_free_context(s);
*ps = NULL;
return ret;
}
 
/*******************************************************/
 
static void force_codec_ids(AVFormatContext *s, AVStream *st)
{
switch(st->codec->codec_type){
case AVMEDIA_TYPE_VIDEO:
if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
break;
case AVMEDIA_TYPE_AUDIO:
if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
break;
case AVMEDIA_TYPE_SUBTITLE:
if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
break;
}
}
 
static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
{
if(st->request_probe>0){
AVProbeData *pd = &st->probe_data;
int end;
av_log(s, AV_LOG_DEBUG, "probing stream %d pp:%d\n", st->index, st->probe_packets);
--st->probe_packets;
 
if (pkt) {
uint8_t *new_buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
if(!new_buf) {
av_log(s, AV_LOG_WARNING,
"Failed to reallocate probe buffer for stream %d\n",
st->index);
goto no_packet;
}
pd->buf = new_buf;
memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
pd->buf_size += pkt->size;
memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
} else {
no_packet:
st->probe_packets = 0;
if (!pd->buf_size) {
av_log(s, AV_LOG_WARNING, "nothing to probe for stream %d\n",
st->index);
}
}
 
end= s->raw_packet_buffer_remaining_size <= 0
|| st->probe_packets<=0;
 
if(end || av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
int score= set_codec_from_probe_data(s, st, pd);
if( (st->codec->codec_id != AV_CODEC_ID_NONE && score > AVPROBE_SCORE_RETRY)
|| end){
pd->buf_size=0;
av_freep(&pd->buf);
st->request_probe= -1;
if(st->codec->codec_id != AV_CODEC_ID_NONE){
av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
}else
av_log(s, AV_LOG_WARNING, "probed stream %d failed\n", st->index);
}
force_codec_ids(s, st);
}
}
return 0;
}
 
int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
{
int ret, i, err;
AVStream *st;
 
for(;;){
AVPacketList *pktl = s->raw_packet_buffer;
 
if (pktl) {
*pkt = pktl->pkt;
st = s->streams[pkt->stream_index];
if (s->raw_packet_buffer_remaining_size <= 0) {
if ((err = probe_codec(s, st, NULL)) < 0)
return err;
}
if(st->request_probe <= 0){
s->raw_packet_buffer = pktl->next;
s->raw_packet_buffer_remaining_size += pkt->size;
av_free(pktl);
return 0;
}
}
 
pkt->data = NULL;
pkt->size = 0;
av_init_packet(pkt);
ret= s->iformat->read_packet(s, pkt);
if (ret < 0) {
if (!pktl || ret == AVERROR(EAGAIN))
return ret;
for (i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
if (st->probe_packets) {
if ((err = probe_codec(s, st, NULL)) < 0)
return err;
}
av_assert0(st->request_probe <= 0);
}
continue;
}
 
if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
(pkt->flags & AV_PKT_FLAG_CORRUPT)) {
av_log(s, AV_LOG_WARNING,
"Dropped corrupted packet (stream = %d)\n",
pkt->stream_index);
av_free_packet(pkt);
continue;
}
 
if(!(s->flags & AVFMT_FLAG_KEEP_SIDE_DATA))
av_packet_merge_side_data(pkt);
 
if(pkt->stream_index >= (unsigned)s->nb_streams){
av_log(s, AV_LOG_ERROR, "Invalid stream index %d\n", pkt->stream_index);
continue;
}
 
st= s->streams[pkt->stream_index];
pkt->dts = wrap_timestamp(st, pkt->dts);
pkt->pts = wrap_timestamp(st, pkt->pts);
 
force_codec_ids(s, st);
 
/* TODO: audio: time filter; video: frame reordering (pts != dts) */
if (s->use_wallclock_as_timestamps)
pkt->dts = pkt->pts = av_rescale_q(av_gettime(), AV_TIME_BASE_Q, st->time_base);
 
if(!pktl && st->request_probe <= 0)
return ret;
 
add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
s->raw_packet_buffer_remaining_size -= pkt->size;
 
if ((err = probe_codec(s, st, pkt)) < 0)
return err;
}
}
 
#if FF_API_READ_PACKET
int av_read_packet(AVFormatContext *s, AVPacket *pkt)
{
return ff_read_packet(s, pkt);
}
#endif
 
 
/**********************************************************/
 
static int determinable_frame_size(AVCodecContext *avctx)
{
if (/*avctx->codec_id == AV_CODEC_ID_AAC ||*/
avctx->codec_id == AV_CODEC_ID_MP1 ||
avctx->codec_id == AV_CODEC_ID_MP2 ||
avctx->codec_id == AV_CODEC_ID_MP3/* ||
avctx->codec_id == AV_CODEC_ID_CELT*/)
return 1;
return 0;
}
 
/**
* Get the number of samples of an audio frame. Return -1 on error.
*/
int ff_get_audio_frame_size(AVCodecContext *enc, int size, int mux)
{
int frame_size;
 
/* give frame_size priority if demuxing */
if (!mux && enc->frame_size > 1)
return enc->frame_size;
 
if ((frame_size = av_get_audio_frame_duration(enc, size)) > 0)
return frame_size;
 
/* Fall back on using frame_size if muxing. */
if (enc->frame_size > 1)
return enc->frame_size;
 
//For WMA we currently have no other means to calculate duration thus we
//do it here by assuming CBR, which is true for all known cases.
if(!mux && enc->bit_rate>0 && size>0 && enc->sample_rate>0 && enc->block_align>1) {
if (enc->codec_id == AV_CODEC_ID_WMAV1 || enc->codec_id == AV_CODEC_ID_WMAV2)
return ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
}
 
return -1;
}
 
 
/**
* Return the frame duration in seconds. Return 0 if not available.
*/
void ff_compute_frame_duration(int *pnum, int *pden, AVStream *st,
AVCodecParserContext *pc, AVPacket *pkt)
{
int frame_size;
 
*pnum = 0;
*pden = 0;
switch(st->codec->codec_type) {
case AVMEDIA_TYPE_VIDEO:
if (st->r_frame_rate.num && !pc) {
*pnum = st->r_frame_rate.den;
*pden = st->r_frame_rate.num;
} else if(st->time_base.num*1000LL > st->time_base.den) {
*pnum = st->time_base.num;
*pden = st->time_base.den;
}else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
*pnum = st->codec->time_base.num;
*pden = st->codec->time_base.den;
if (pc && pc->repeat_pict) {
if (*pnum > INT_MAX / (1 + pc->repeat_pict))
*pden /= 1 + pc->repeat_pict;
else
*pnum *= 1 + pc->repeat_pict;
}
//If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
//Thus if we have no parser in such case leave duration undefined.
if(st->codec->ticks_per_frame>1 && !pc){
*pnum = *pden = 0;
}
}
break;
case AVMEDIA_TYPE_AUDIO:
frame_size = ff_get_audio_frame_size(st->codec, pkt->size, 0);
if (frame_size <= 0 || st->codec->sample_rate <= 0)
break;
*pnum = frame_size;
*pden = st->codec->sample_rate;
break;
default:
break;
}
}
 
static int is_intra_only(AVCodecContext *enc){
const AVCodecDescriptor *desc;
 
if(enc->codec_type != AVMEDIA_TYPE_VIDEO)
return 1;
 
desc = av_codec_get_codec_descriptor(enc);
if (!desc) {
desc = avcodec_descriptor_get(enc->codec_id);
av_codec_set_codec_descriptor(enc, desc);
}
if (desc)
return !!(desc->props & AV_CODEC_PROP_INTRA_ONLY);
return 0;
}
 
static int has_decode_delay_been_guessed(AVStream *st)
{
if(st->codec->codec_id != AV_CODEC_ID_H264) return 1;
if(!st->info) // if we have left find_stream_info then nb_decoded_frames won't increase anymore for stream copy
return 1;
#if CONFIG_H264_DECODER
if(st->codec->has_b_frames &&
avpriv_h264_has_num_reorder_frames(st->codec) == st->codec->has_b_frames)
return 1;
#endif
if(st->codec->has_b_frames<3)
return st->nb_decoded_frames >= 7;
else if(st->codec->has_b_frames<4)
return st->nb_decoded_frames >= 18;
else
return st->nb_decoded_frames >= 20;
}
 
static AVPacketList *get_next_pkt(AVFormatContext *s, AVStream *st, AVPacketList *pktl)
{
if (pktl->next)
return pktl->next;
if (pktl == s->parse_queue_end)
return s->packet_buffer;
return NULL;
}
 
static int update_wrap_reference(AVFormatContext *s, AVStream *st, int stream_index)
{
if (s->correct_ts_overflow && st->pts_wrap_bits < 63 &&
st->pts_wrap_reference == AV_NOPTS_VALUE && st->first_dts != AV_NOPTS_VALUE) {
int i;
 
// reference time stamp should be 60 s before first time stamp
int64_t pts_wrap_reference = st->first_dts - av_rescale(60, st->time_base.den, st->time_base.num);
// if first time stamp is not more than 1/8 and 60s before the wrap point, subtract rather than add wrap offset
int pts_wrap_behavior = (st->first_dts < (1LL<<st->pts_wrap_bits) - (1LL<<st->pts_wrap_bits-3)) ||
(st->first_dts < (1LL<<st->pts_wrap_bits) - av_rescale(60, st->time_base.den, st->time_base.num)) ?
AV_PTS_WRAP_ADD_OFFSET : AV_PTS_WRAP_SUB_OFFSET;
 
AVProgram *first_program = av_find_program_from_stream(s, NULL, stream_index);
 
if (!first_program) {
int default_stream_index = av_find_default_stream_index(s);
if (s->streams[default_stream_index]->pts_wrap_reference == AV_NOPTS_VALUE) {
for (i=0; i<s->nb_streams; i++) {
s->streams[i]->pts_wrap_reference = pts_wrap_reference;
s->streams[i]->pts_wrap_behavior = pts_wrap_behavior;
}
}
else {
st->pts_wrap_reference = s->streams[default_stream_index]->pts_wrap_reference;
st->pts_wrap_behavior = s->streams[default_stream_index]->pts_wrap_behavior;
}
}
else {
AVProgram *program = first_program;
while (program) {
if (program->pts_wrap_reference != AV_NOPTS_VALUE) {
pts_wrap_reference = program->pts_wrap_reference;
pts_wrap_behavior = program->pts_wrap_behavior;
break;
}
program = av_find_program_from_stream(s, program, stream_index);
}
 
// update every program with differing pts_wrap_reference
program = first_program;
while(program) {
if (program->pts_wrap_reference != pts_wrap_reference) {
for (i=0; i<program->nb_stream_indexes; i++) {
s->streams[program->stream_index[i]]->pts_wrap_reference = pts_wrap_reference;
s->streams[program->stream_index[i]]->pts_wrap_behavior = pts_wrap_behavior;
}
 
program->pts_wrap_reference = pts_wrap_reference;
program->pts_wrap_behavior = pts_wrap_behavior;
}
program = av_find_program_from_stream(s, program, stream_index);
}
}
return 1;
}
return 0;
}
 
static void update_initial_timestamps(AVFormatContext *s, int stream_index,
int64_t dts, int64_t pts, AVPacket *pkt)
{
AVStream *st= s->streams[stream_index];
AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
int64_t pts_buffer[MAX_REORDER_DELAY+1];
int64_t shift;
int i, delay;
 
if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE || is_relative(dts))
return;
 
delay = st->codec->has_b_frames;
st->first_dts= dts - (st->cur_dts - RELATIVE_TS_BASE);
st->cur_dts= dts;
shift = st->first_dts - RELATIVE_TS_BASE;
 
for (i=0; i<MAX_REORDER_DELAY+1; i++)
pts_buffer[i] = AV_NOPTS_VALUE;
 
if (is_relative(pts))
pts += shift;
 
for(; pktl; pktl= get_next_pkt(s, st, pktl)){
if(pktl->pkt.stream_index != stream_index)
continue;
if(is_relative(pktl->pkt.pts))
pktl->pkt.pts += shift;
 
if(is_relative(pktl->pkt.dts))
pktl->pkt.dts += shift;
 
if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
st->start_time= pktl->pkt.pts;
 
if(pktl->pkt.pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY && has_decode_delay_been_guessed(st)){
pts_buffer[0]= pktl->pkt.pts;
for(i=0; i<delay && pts_buffer[i] > pts_buffer[i+1]; i++)
FFSWAP(int64_t, pts_buffer[i], pts_buffer[i+1]);
if(pktl->pkt.dts == AV_NOPTS_VALUE)
pktl->pkt.dts= pts_buffer[0];
}
}
 
if (update_wrap_reference(s, st, stream_index) && st->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET) {
// correct first time stamps to negative values
st->first_dts = wrap_timestamp(st, st->first_dts);
st->cur_dts = wrap_timestamp(st, st->cur_dts);
pkt->dts = wrap_timestamp(st, pkt->dts);
pkt->pts = wrap_timestamp(st, pkt->pts);
pts = wrap_timestamp(st, pts);
}
 
if (st->start_time == AV_NOPTS_VALUE)
st->start_time = pts;
}
 
static void update_initial_durations(AVFormatContext *s, AVStream *st,
int stream_index, int duration)
{
AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
int64_t cur_dts= RELATIVE_TS_BASE;
 
if(st->first_dts != AV_NOPTS_VALUE){
cur_dts= st->first_dts;
for(; pktl; pktl= get_next_pkt(s, st, pktl)){
if(pktl->pkt.stream_index == stream_index){
if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
break;
cur_dts -= duration;
}
}
if(pktl && pktl->pkt.dts != st->first_dts) {
av_log(s, AV_LOG_DEBUG, "first_dts %s not matching first dts %s (pts %s, duration %d) in the queue\n",
av_ts2str(st->first_dts), av_ts2str(pktl->pkt.dts), av_ts2str(pktl->pkt.pts), pktl->pkt.duration);
return;
}
if(!pktl) {
av_log(s, AV_LOG_DEBUG, "first_dts %s but no packet with dts in the queue\n", av_ts2str(st->first_dts));
return;
}
pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
st->first_dts = cur_dts;
}else if(st->cur_dts != RELATIVE_TS_BASE)
return;
 
for(; pktl; pktl= get_next_pkt(s, st, pktl)){
if(pktl->pkt.stream_index != stream_index)
continue;
if(pktl->pkt.pts == pktl->pkt.dts && (pktl->pkt.dts == AV_NOPTS_VALUE || pktl->pkt.dts == st->first_dts)
&& !pktl->pkt.duration){
pktl->pkt.dts= cur_dts;
if(!st->codec->has_b_frames)
pktl->pkt.pts= cur_dts;
// if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
pktl->pkt.duration = duration;
}else
break;
cur_dts = pktl->pkt.dts + pktl->pkt.duration;
}
if(!pktl)
st->cur_dts= cur_dts;
}
 
static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
AVCodecParserContext *pc, AVPacket *pkt)
{
int num, den, presentation_delayed, delay, i;
int64_t offset;
 
if (s->flags & AVFMT_FLAG_NOFILLIN)
return;
 
if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
pkt->dts= AV_NOPTS_VALUE;
 
if (pc && pc->pict_type == AV_PICTURE_TYPE_B
&& !st->codec->has_b_frames)
//FIXME Set low_delay = 0 when has_b_frames = 1
st->codec->has_b_frames = 1;
 
/* do we have a video B-frame ? */
delay= st->codec->has_b_frames;
presentation_delayed = 0;
 
/* XXX: need has_b_frame, but cannot get it if the codec is
not initialized */
if (delay &&
pc && pc->pict_type != AV_PICTURE_TYPE_B)
presentation_delayed = 1;
 
if (pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE &&
st->pts_wrap_bits < 63 &&
pkt->dts - (1LL << (st->pts_wrap_bits - 1)) > pkt->pts) {
if(is_relative(st->cur_dts) || pkt->dts - (1LL<<(st->pts_wrap_bits-1)) > st->cur_dts) {
pkt->dts -= 1LL<<st->pts_wrap_bits;
} else
pkt->pts += 1LL<<st->pts_wrap_bits;
}
 
// some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
// we take the conservative approach and discard both
// Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination %"PRIi64"\n", pkt->dts);
if(strcmp(s->iformat->name, "mov,mp4,m4a,3gp,3g2,mj2")) // otherwise we discard correct timestamps for vc1-wmapro.ism
pkt->dts= AV_NOPTS_VALUE;
}
 
if (pkt->duration == 0) {
ff_compute_frame_duration(&num, &den, st, pc, pkt);
if (den && num) {
pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
}
}
if(pkt->duration != 0 && (s->packet_buffer || s->parse_queue))
update_initial_durations(s, st, pkt->stream_index, pkt->duration);
 
/* correct timestamps with byte offset if demuxers only have timestamps
on packet boundaries */
if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
/* this will estimate bitrate based on this frame's duration and size */
offset = av_rescale(pc->offset, pkt->duration, pkt->size);
if(pkt->pts != AV_NOPTS_VALUE)
pkt->pts += offset;
if(pkt->dts != AV_NOPTS_VALUE)
pkt->dts += offset;
}
 
if (pc && pc->dts_sync_point >= 0) {
// we have synchronization info from the parser
int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
if (den > 0) {
int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
if (pkt->dts != AV_NOPTS_VALUE) {
// got DTS from the stream, update reference timestamp
st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
} else if (st->reference_dts != AV_NOPTS_VALUE) {
// compute DTS based on reference timestamp
pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
}
 
if (st->reference_dts != AV_NOPTS_VALUE && pkt->pts == AV_NOPTS_VALUE)
pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
 
if (pc->dts_sync_point > 0)
st->reference_dts = pkt->dts; // new reference
}
}
 
/* This may be redundant, but it should not hurt. */
if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
presentation_delayed = 1;
 
av_dlog(NULL, "IN delayed:%d pts:%s, dts:%s cur_dts:%s st:%d pc:%p duration:%d\n",
presentation_delayed, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), pkt->stream_index, pc, pkt->duration);
/* interpolate PTS and DTS if they are not present */
//We skip H264 currently because delay and has_b_frames are not reliably set
if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != AV_CODEC_ID_H264){
if (presentation_delayed) {
/* DTS = decompression timestamp */
/* PTS = presentation timestamp */
if (pkt->dts == AV_NOPTS_VALUE)
pkt->dts = st->last_IP_pts;
update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt);
if (pkt->dts == AV_NOPTS_VALUE)
pkt->dts = st->cur_dts;
 
/* this is tricky: the dts must be incremented by the duration
of the frame we are displaying, i.e. the last I- or P-frame */
if (st->last_IP_duration == 0)
st->last_IP_duration = pkt->duration;
if(pkt->dts != AV_NOPTS_VALUE)
st->cur_dts = pkt->dts + st->last_IP_duration;
st->last_IP_duration = pkt->duration;
st->last_IP_pts= pkt->pts;
/* cannot compute PTS if not present (we can compute it only
by knowing the future */
} else if (pkt->pts != AV_NOPTS_VALUE ||
pkt->dts != AV_NOPTS_VALUE ||
pkt->duration ) {
int duration = pkt->duration;
 
/* presentation is not delayed : PTS and DTS are the same */
if (pkt->pts == AV_NOPTS_VALUE)
pkt->pts = pkt->dts;
update_initial_timestamps(s, pkt->stream_index, pkt->pts,
pkt->pts, pkt);
if (pkt->pts == AV_NOPTS_VALUE)
pkt->pts = st->cur_dts;
pkt->dts = pkt->pts;
if (pkt->pts != AV_NOPTS_VALUE)
st->cur_dts = pkt->pts + duration;
}
}
 
if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY && has_decode_delay_been_guessed(st)){
st->pts_buffer[0]= pkt->pts;
for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
if(pkt->dts == AV_NOPTS_VALUE)
pkt->dts= st->pts_buffer[0];
}
if(st->codec->codec_id == AV_CODEC_ID_H264){ // we skipped it above so we try here
update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt); // this should happen on the first packet
}
if(pkt->dts > st->cur_dts)
st->cur_dts = pkt->dts;
 
av_dlog(NULL, "OUTdelayed:%d/%d pts:%s, dts:%s cur_dts:%s\n",
presentation_delayed, delay, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts));
 
/* update flags */
if (is_intra_only(st->codec))
pkt->flags |= AV_PKT_FLAG_KEY;
if (pc)
pkt->convergence_duration = pc->convergence_duration;
}
 
static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
{
while (*pkt_buf) {
AVPacketList *pktl = *pkt_buf;
*pkt_buf = pktl->next;
av_free_packet(&pktl->pkt);
av_freep(&pktl);
}
*pkt_buf_end = NULL;
}
 
/**
* Parse a packet, add all split parts to parse_queue
*
* @param pkt packet to parse, NULL when flushing the parser at end of stream
*/
static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
{
AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
AVStream *st = s->streams[stream_index];
uint8_t *data = pkt ? pkt->data : NULL;
int size = pkt ? pkt->size : 0;
int ret = 0, got_output = 0;
 
if (!pkt) {
av_init_packet(&flush_pkt);
pkt = &flush_pkt;
got_output = 1;
} else if (!size && st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) {
// preserve 0-size sync packets
compute_pkt_fields(s, st, st->parser, pkt);
}
 
while (size > 0 || (pkt == &flush_pkt && got_output)) {
int len;
 
av_init_packet(&out_pkt);
len = av_parser_parse2(st->parser, st->codec,
&out_pkt.data, &out_pkt.size, data, size,
pkt->pts, pkt->dts, pkt->pos);
 
pkt->pts = pkt->dts = AV_NOPTS_VALUE;
pkt->pos = -1;
/* increment read pointer */
data += len;
size -= len;
 
got_output = !!out_pkt.size;
 
if (!out_pkt.size)
continue;
 
if (pkt->side_data) {
out_pkt.side_data = pkt->side_data;
out_pkt.side_data_elems = pkt->side_data_elems;
pkt->side_data = NULL;
pkt->side_data_elems = 0;
}
 
/* set the duration */
out_pkt.duration = 0;
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
if (st->codec->sample_rate > 0) {
out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
(AVRational){ 1, st->codec->sample_rate },
st->time_base,
AV_ROUND_DOWN);
}
} else if (st->codec->time_base.num != 0 &&
st->codec->time_base.den != 0) {
out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
st->codec->time_base,
st->time_base,
AV_ROUND_DOWN);
}
 
out_pkt.stream_index = st->index;
out_pkt.pts = st->parser->pts;
out_pkt.dts = st->parser->dts;
out_pkt.pos = st->parser->pos;
 
if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW)
out_pkt.pos = st->parser->frame_offset;
 
if (st->parser->key_frame == 1 ||
(st->parser->key_frame == -1 &&
st->parser->pict_type == AV_PICTURE_TYPE_I))
out_pkt.flags |= AV_PKT_FLAG_KEY;
 
if(st->parser->key_frame == -1 && st->parser->pict_type==AV_PICTURE_TYPE_NONE && (pkt->flags&AV_PKT_FLAG_KEY))
out_pkt.flags |= AV_PKT_FLAG_KEY;
 
compute_pkt_fields(s, st, st->parser, &out_pkt);
 
if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
out_pkt.buf = pkt->buf;
pkt->buf = NULL;
#if FF_API_DESTRUCT_PACKET
FF_DISABLE_DEPRECATION_WARNINGS
out_pkt.destruct = pkt->destruct;
pkt->destruct = NULL;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
}
if ((ret = av_dup_packet(&out_pkt)) < 0)
goto fail;
 
if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
av_free_packet(&out_pkt);
ret = AVERROR(ENOMEM);
goto fail;
}
}
 
 
/* end of the stream => close and free the parser */
if (pkt == &flush_pkt) {
av_parser_close(st->parser);
st->parser = NULL;
}
 
fail:
av_free_packet(pkt);
return ret;
}
 
static int read_from_packet_buffer(AVPacketList **pkt_buffer,
AVPacketList **pkt_buffer_end,
AVPacket *pkt)
{
AVPacketList *pktl;
av_assert0(*pkt_buffer);
pktl = *pkt_buffer;
*pkt = pktl->pkt;
*pkt_buffer = pktl->next;
if (!pktl->next)
*pkt_buffer_end = NULL;
av_freep(&pktl);
return 0;
}
 
static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
{
int ret = 0, i, got_packet = 0;
 
av_init_packet(pkt);
 
while (!got_packet && !s->parse_queue) {
AVStream *st;
AVPacket cur_pkt;
 
/* read next packet */
ret = ff_read_packet(s, &cur_pkt);
if (ret < 0) {
if (ret == AVERROR(EAGAIN))
return ret;
/* flush the parsers */
for(i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
if (st->parser && st->need_parsing)
parse_packet(s, NULL, st->index);
}
/* all remaining packets are now in parse_queue =>
* really terminate parsing */
break;
}
ret = 0;
st = s->streams[cur_pkt.stream_index];
 
if (cur_pkt.pts != AV_NOPTS_VALUE &&
cur_pkt.dts != AV_NOPTS_VALUE &&
cur_pkt.pts < cur_pkt.dts) {
av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%s, dts=%s, size=%d\n",
cur_pkt.stream_index,
av_ts2str(cur_pkt.pts),
av_ts2str(cur_pkt.dts),
cur_pkt.size);
}
if (s->debug & FF_FDEBUG_TS)
av_log(s, AV_LOG_DEBUG, "ff_read_packet stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
cur_pkt.stream_index,
av_ts2str(cur_pkt.pts),
av_ts2str(cur_pkt.dts),
cur_pkt.size,
cur_pkt.duration,
cur_pkt.flags);
 
if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
st->parser = av_parser_init(st->codec->codec_id);
if (!st->parser) {
av_log(s, AV_LOG_VERBOSE, "parser not found for codec "
"%s, packets or times may be invalid.\n",
avcodec_get_name(st->codec->codec_id));
/* no parser available: just output the raw packets */
st->need_parsing = AVSTREAM_PARSE_NONE;
} else if(st->need_parsing == AVSTREAM_PARSE_HEADERS) {
st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
} else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE) {
st->parser->flags |= PARSER_FLAG_ONCE;
} else if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
}
}
 
if (!st->need_parsing || !st->parser) {
/* no parsing needed: we just output the packet as is */
*pkt = cur_pkt;
compute_pkt_fields(s, st, NULL, pkt);
if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
(pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
ff_reduce_index(s, st->index);
av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
}
got_packet = 1;
} else if (st->discard < AVDISCARD_ALL) {
if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
return ret;
} else {
/* free packet */
av_free_packet(&cur_pkt);
}
if (pkt->flags & AV_PKT_FLAG_KEY)
st->skip_to_keyframe = 0;
if (st->skip_to_keyframe) {
av_free_packet(&cur_pkt);
if (got_packet) {
*pkt = cur_pkt;
}
got_packet = 0;
}
}
 
if (!got_packet && s->parse_queue)
ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
 
if(s->debug & FF_FDEBUG_TS)
av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
pkt->stream_index,
av_ts2str(pkt->pts),
av_ts2str(pkt->dts),
pkt->size,
pkt->duration,
pkt->flags);
 
return ret;
}
 
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
{
const int genpts = s->flags & AVFMT_FLAG_GENPTS;
int eof = 0;
int ret;
AVStream *st;
 
if (!genpts) {
ret = s->packet_buffer ?
read_from_packet_buffer(&s->packet_buffer, &s->packet_buffer_end, pkt) :
read_frame_internal(s, pkt);
if (ret < 0)
return ret;
goto return_packet;
}
 
for (;;) {
AVPacketList *pktl = s->packet_buffer;
 
if (pktl) {
AVPacket *next_pkt = &pktl->pkt;
 
if (next_pkt->dts != AV_NOPTS_VALUE) {
int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
// last dts seen for this stream. if any of packets following
// current one had no dts, we will set this to AV_NOPTS_VALUE.
int64_t last_dts = next_pkt->dts;
while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
if (pktl->pkt.stream_index == next_pkt->stream_index &&
(av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0)) {
if (av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
next_pkt->pts = pktl->pkt.dts;
}
if (last_dts != AV_NOPTS_VALUE) {
// Once last dts was set to AV_NOPTS_VALUE, we don't change it.
last_dts = pktl->pkt.dts;
}
}
pktl = pktl->next;
}
if (eof && next_pkt->pts == AV_NOPTS_VALUE && last_dts != AV_NOPTS_VALUE) {
// Fixing the last reference frame had none pts issue (For MXF etc).
// We only do this when
// 1. eof.
// 2. we are not able to resolve a pts value for current packet.
// 3. the packets for this stream at the end of the files had valid dts.
next_pkt->pts = last_dts + next_pkt->duration;
}
pktl = s->packet_buffer;
}
 
/* read packet from packet buffer, if there is data */
if (!(next_pkt->pts == AV_NOPTS_VALUE &&
next_pkt->dts != AV_NOPTS_VALUE && !eof)) {
ret = read_from_packet_buffer(&s->packet_buffer,
&s->packet_buffer_end, pkt);
goto return_packet;
}
}
 
ret = read_frame_internal(s, pkt);
if (ret < 0) {
if (pktl && ret != AVERROR(EAGAIN)) {
eof = 1;
continue;
} else
return ret;
}
 
if (av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
&s->packet_buffer_end)) < 0)
return AVERROR(ENOMEM);
}
 
return_packet:
 
st = s->streams[pkt->stream_index];
if (st->skip_samples) {
uint8_t *p = av_packet_new_side_data(pkt, AV_PKT_DATA_SKIP_SAMPLES, 10);
if (p) {
AV_WL32(p, st->skip_samples);
av_log(s, AV_LOG_DEBUG, "demuxer injecting skip %d\n", st->skip_samples);
}
st->skip_samples = 0;
}
 
if ((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY) {
ff_reduce_index(s, st->index);
av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
}
 
if (is_relative(pkt->dts))
pkt->dts -= RELATIVE_TS_BASE;
if (is_relative(pkt->pts))
pkt->pts -= RELATIVE_TS_BASE;
 
return ret;
}
 
/* XXX: suppress the packet queue */
static void flush_packet_queue(AVFormatContext *s)
{
free_packet_buffer(&s->parse_queue, &s->parse_queue_end);
free_packet_buffer(&s->packet_buffer, &s->packet_buffer_end);
free_packet_buffer(&s->raw_packet_buffer, &s->raw_packet_buffer_end);
 
s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
}
 
/*******************************************************/
/* seek support */
 
int av_find_default_stream_index(AVFormatContext *s)
{
int first_audio_index = -1;
int i;
AVStream *st;
 
if (s->nb_streams <= 0)
return -1;
for(i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
!(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
return i;
}
if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
first_audio_index = i;
}
return first_audio_index >= 0 ? first_audio_index : 0;
}
 
/**
* Flush the frame reader.
*/
void ff_read_frame_flush(AVFormatContext *s)
{
AVStream *st;
int i, j;
 
flush_packet_queue(s);
 
/* for each stream, reset read state */
for(i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
 
if (st->parser) {
av_parser_close(st->parser);
st->parser = NULL;
}
st->last_IP_pts = AV_NOPTS_VALUE;
if(st->first_dts == AV_NOPTS_VALUE) st->cur_dts = RELATIVE_TS_BASE;
else st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
st->reference_dts = AV_NOPTS_VALUE;
 
st->probe_packets = MAX_PROBE_PACKETS;
 
for(j=0; j<MAX_REORDER_DELAY+1; j++)
st->pts_buffer[j]= AV_NOPTS_VALUE;
}
}
 
void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
{
int i;
 
for(i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
 
st->cur_dts = av_rescale(timestamp,
st->time_base.den * (int64_t)ref_st->time_base.num,
st->time_base.num * (int64_t)ref_st->time_base.den);
}
}
 
void ff_reduce_index(AVFormatContext *s, int stream_index)
{
AVStream *st= s->streams[stream_index];
unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
 
if((unsigned)st->nb_index_entries >= max_entries){
int i;
for(i=0; 2*i<st->nb_index_entries; i++)
st->index_entries[i]= st->index_entries[2*i];
st->nb_index_entries= i;
}
}
 
int ff_add_index_entry(AVIndexEntry **index_entries,
int *nb_index_entries,
unsigned int *index_entries_allocated_size,
int64_t pos, int64_t timestamp, int size, int distance, int flags)
{
AVIndexEntry *entries, *ie;
int index;
 
if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
return -1;
 
if(timestamp == AV_NOPTS_VALUE)
return AVERROR(EINVAL);
 
if (size < 0 || size > 0x3FFFFFFF)
return AVERROR(EINVAL);
 
if (is_relative(timestamp)) //FIXME this maintains previous behavior but we should shift by the correct offset once known
timestamp -= RELATIVE_TS_BASE;
 
entries = av_fast_realloc(*index_entries,
index_entries_allocated_size,
(*nb_index_entries + 1) *
sizeof(AVIndexEntry));
if(!entries)
return -1;
 
*index_entries= entries;
 
index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
 
if(index<0){
index= (*nb_index_entries)++;
ie= &entries[index];
av_assert0(index==0 || ie[-1].timestamp < timestamp);
}else{
ie= &entries[index];
if(ie->timestamp != timestamp){
if(ie->timestamp <= timestamp)
return -1;
memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
(*nb_index_entries)++;
}else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
distance= ie->min_distance;
}
 
ie->pos = pos;
ie->timestamp = timestamp;
ie->min_distance= distance;
ie->size= size;
ie->flags = flags;
 
return index;
}
 
int av_add_index_entry(AVStream *st,
int64_t pos, int64_t timestamp, int size, int distance, int flags)
{
timestamp = wrap_timestamp(st, timestamp);
return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
&st->index_entries_allocated_size, pos,
timestamp, size, distance, flags);
}
 
int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
int64_t wanted_timestamp, int flags)
{
int a, b, m;
int64_t timestamp;
 
a = - 1;
b = nb_entries;
 
//optimize appending index entries at the end
if(b && entries[b-1].timestamp < wanted_timestamp)
a= b-1;
 
while (b - a > 1) {
m = (a + b) >> 1;
timestamp = entries[m].timestamp;
if(timestamp >= wanted_timestamp)
b = m;
if(timestamp <= wanted_timestamp)
a = m;
}
m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
 
if(!(flags & AVSEEK_FLAG_ANY)){
while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
}
}
 
if(m == nb_entries)
return -1;
return m;
}
 
int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
int flags)
{
return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
wanted_timestamp, flags);
}
 
static int64_t ff_read_timestamp(AVFormatContext *s, int stream_index, int64_t *ppos, int64_t pos_limit,
int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
{
int64_t ts = read_timestamp(s, stream_index, ppos, pos_limit);
if (stream_index >= 0)
ts = wrap_timestamp(s->streams[stream_index], ts);
return ts;
}
 
int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
{
AVInputFormat *avif= s->iformat;
int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
int64_t ts_min, ts_max, ts;
int index;
int64_t ret;
AVStream *st;
 
if (stream_index < 0)
return -1;
 
av_dlog(s, "read_seek: %d %s\n", stream_index, av_ts2str(target_ts));
 
ts_max=
ts_min= AV_NOPTS_VALUE;
pos_limit= -1; //gcc falsely says it may be uninitialized
 
st= s->streams[stream_index];
if(st->index_entries){
AVIndexEntry *e;
 
index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
index= FFMAX(index, 0);
e= &st->index_entries[index];
 
if(e->timestamp <= target_ts || e->pos == e->min_distance){
pos_min= e->pos;
ts_min= e->timestamp;
av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%s\n",
pos_min, av_ts2str(ts_min));
}else{
av_assert1(index==0);
}
 
index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
av_assert0(index < st->nb_index_entries);
if(index >= 0){
e= &st->index_entries[index];
av_assert1(e->timestamp >= target_ts);
pos_max= e->pos;
ts_max= e->timestamp;
pos_limit= pos_max - e->min_distance;
av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%s\n",
pos_max, pos_limit, av_ts2str(ts_max));
}
}
 
pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
if(pos<0)
return -1;
 
/* do the seek */
if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
return ret;
 
ff_read_frame_flush(s);
ff_update_cur_dts(s, st, ts);
 
return 0;
}
 
int ff_find_last_ts(AVFormatContext *s, int stream_index, int64_t *ts, int64_t *pos,
int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
{
int64_t step= 1024;
int64_t limit, ts_max;
int64_t filesize = avio_size(s->pb);
int64_t pos_max = filesize - 1;
do{
limit = pos_max;
pos_max = FFMAX(0, (pos_max) - step);
ts_max = ff_read_timestamp(s, stream_index, &pos_max, limit, read_timestamp);
step += step;
}while(ts_max == AV_NOPTS_VALUE && 2*limit > step);
if (ts_max == AV_NOPTS_VALUE)
return -1;
 
for(;;){
int64_t tmp_pos = pos_max + 1;
int64_t tmp_ts = ff_read_timestamp(s, stream_index, &tmp_pos, INT64_MAX, read_timestamp);
if(tmp_ts == AV_NOPTS_VALUE)
break;
av_assert0(tmp_pos > pos_max);
ts_max = tmp_ts;
pos_max = tmp_pos;
if(tmp_pos >= filesize)
break;
}
 
if (ts)
*ts = ts_max;
if (pos)
*pos = pos_max;
 
return 0;
}
 
int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
int64_t pos_min, int64_t pos_max, int64_t pos_limit,
int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
{
int64_t pos, ts;
int64_t start_pos;
int no_change;
int ret;
 
av_dlog(s, "gen_seek: %d %s\n", stream_index, av_ts2str(target_ts));
 
if(ts_min == AV_NOPTS_VALUE){
pos_min = s->data_offset;
ts_min = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
if (ts_min == AV_NOPTS_VALUE)
return -1;
}
 
if(ts_min >= target_ts){
*ts_ret= ts_min;
return pos_min;
}
 
if(ts_max == AV_NOPTS_VALUE){
if ((ret = ff_find_last_ts(s, stream_index, &ts_max, &pos_max, read_timestamp)) < 0)
return ret;
pos_limit= pos_max;
}
 
if(ts_max <= target_ts){
*ts_ret= ts_max;
return pos_max;
}
 
if(ts_min > ts_max){
return -1;
}else if(ts_min == ts_max){
pos_limit= pos_min;
}
 
no_change=0;
while (pos_min < pos_limit) {
av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%s dts_max=%s\n",
pos_min, pos_max, av_ts2str(ts_min), av_ts2str(ts_max));
assert(pos_limit <= pos_max);
 
if(no_change==0){
int64_t approximate_keyframe_distance= pos_max - pos_limit;
// interpolate position (better than dichotomy)
pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
+ pos_min - approximate_keyframe_distance;
}else if(no_change==1){
// bisection, if interpolation failed to change min or max pos last time
pos = (pos_min + pos_limit)>>1;
}else{
/* linear search if bisection failed, can only happen if there
are very few or no keyframes between min/max */
pos=pos_min;
}
if(pos <= pos_min)
pos= pos_min + 1;
else if(pos > pos_limit)
pos= pos_limit;
start_pos= pos;
 
ts = ff_read_timestamp(s, stream_index, &pos, INT64_MAX, read_timestamp); //may pass pos_limit instead of -1
if(pos == pos_max)
no_change++;
else
no_change=0;
av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %s %s %s target:%s limit:%"PRId64" start:%"PRId64" noc:%d\n",
pos_min, pos, pos_max,
av_ts2str(ts_min), av_ts2str(ts), av_ts2str(ts_max), av_ts2str(target_ts),
pos_limit, start_pos, no_change);
if(ts == AV_NOPTS_VALUE){
av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
return -1;
}
assert(ts != AV_NOPTS_VALUE);
if (target_ts <= ts) {
pos_limit = start_pos - 1;
pos_max = pos;
ts_max = ts;
}
if (target_ts >= ts) {
pos_min = pos;
ts_min = ts;
}
}
 
pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
#if 0
pos_min = pos;
ts_min = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
pos_min++;
ts_max = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
av_dlog(s, "pos=0x%"PRIx64" %s<=%s<=%s\n",
pos, av_ts2str(ts_min), av_ts2str(target_ts), av_ts2str(ts_max));
#endif
*ts_ret= ts;
return pos;
}
 
static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
int64_t pos_min, pos_max;
 
pos_min = s->data_offset;
pos_max = avio_size(s->pb) - 1;
 
if (pos < pos_min) pos= pos_min;
else if(pos > pos_max) pos= pos_max;
 
avio_seek(s->pb, pos, SEEK_SET);
 
s->io_repositioned = 1;
 
return 0;
}
 
static int seek_frame_generic(AVFormatContext *s,
int stream_index, int64_t timestamp, int flags)
{
int index;
int64_t ret;
AVStream *st;
AVIndexEntry *ie;
 
st = s->streams[stream_index];
 
index = av_index_search_timestamp(st, timestamp, flags);
 
if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
return -1;
 
if(index < 0 || index==st->nb_index_entries-1){
AVPacket pkt;
int nonkey=0;
 
if(st->nb_index_entries){
av_assert0(st->index_entries);
ie= &st->index_entries[st->nb_index_entries-1];
if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
return ret;
ff_update_cur_dts(s, st, ie->timestamp);
}else{
if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
return ret;
}
for (;;) {
int read_status;
do{
read_status = av_read_frame(s, &pkt);
} while (read_status == AVERROR(EAGAIN));
if (read_status < 0)
break;
av_free_packet(&pkt);
if(stream_index == pkt.stream_index && pkt.dts > timestamp){
if(pkt.flags & AV_PKT_FLAG_KEY)
break;
if(nonkey++ > 1000 && st->codec->codec_id != AV_CODEC_ID_CDGRAPHICS){
av_log(s, AV_LOG_ERROR,"seek_frame_generic failed as this stream seems to contain no keyframes after the target timestamp, %d non keyframes found\n", nonkey);
break;
}
}
}
index = av_index_search_timestamp(st, timestamp, flags);
}
if (index < 0)
return -1;
 
ff_read_frame_flush(s);
if (s->iformat->read_seek){
if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
return 0;
}
ie = &st->index_entries[index];
if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
return ret;
ff_update_cur_dts(s, st, ie->timestamp);
 
return 0;
}
 
static int seek_frame_internal(AVFormatContext *s, int stream_index,
int64_t timestamp, int flags)
{
int ret;
AVStream *st;
 
if (flags & AVSEEK_FLAG_BYTE) {
if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
return -1;
ff_read_frame_flush(s);
return seek_frame_byte(s, stream_index, timestamp, flags);
}
 
if(stream_index < 0){
stream_index= av_find_default_stream_index(s);
if(stream_index < 0)
return -1;
 
st= s->streams[stream_index];
/* timestamp for default must be expressed in AV_TIME_BASE units */
timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
}
 
/* first, we try the format specific seek */
if (s->iformat->read_seek) {
ff_read_frame_flush(s);
ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
} else
ret = -1;
if (ret >= 0) {
return 0;
}
 
if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
ff_read_frame_flush(s);
return ff_seek_frame_binary(s, stream_index, timestamp, flags);
} else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
ff_read_frame_flush(s);
return seek_frame_generic(s, stream_index, timestamp, flags);
}
else
return -1;
}
 
int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
{
int ret;
 
if (s->iformat->read_seek2 && !s->iformat->read_seek) {
int64_t min_ts = INT64_MIN, max_ts = INT64_MAX;
if ((flags & AVSEEK_FLAG_BACKWARD))
max_ts = timestamp;
else
min_ts = timestamp;
return avformat_seek_file(s, stream_index, min_ts, timestamp, max_ts,
flags & ~AVSEEK_FLAG_BACKWARD);
}
 
ret = seek_frame_internal(s, stream_index, timestamp, flags);
 
if (ret >= 0)
ret = avformat_queue_attached_pictures(s);
 
return ret;
}
 
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
{
if(min_ts > ts || max_ts < ts)
return -1;
if (stream_index < -1 || stream_index >= (int)s->nb_streams)
return AVERROR(EINVAL);
 
if(s->seek2any>0)
flags |= AVSEEK_FLAG_ANY;
flags &= ~AVSEEK_FLAG_BACKWARD;
 
if (s->iformat->read_seek2) {
int ret;
ff_read_frame_flush(s);
 
if (stream_index == -1 && s->nb_streams == 1) {
AVRational time_base = s->streams[0]->time_base;
ts = av_rescale_q(ts, AV_TIME_BASE_Q, time_base);
min_ts = av_rescale_rnd(min_ts, time_base.den,
time_base.num * (int64_t)AV_TIME_BASE,
AV_ROUND_UP | AV_ROUND_PASS_MINMAX);
max_ts = av_rescale_rnd(max_ts, time_base.den,
time_base.num * (int64_t)AV_TIME_BASE,
AV_ROUND_DOWN | AV_ROUND_PASS_MINMAX);
}
 
ret = s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
 
if (ret >= 0)
ret = avformat_queue_attached_pictures(s);
return ret;
}
 
if(s->iformat->read_timestamp){
//try to seek via read_timestamp()
}
 
// Fall back on old API if new is not implemented but old is.
// Note the old API has somewhat different semantics.
if (s->iformat->read_seek || 1) {
int dir = (ts - (uint64_t)min_ts > (uint64_t)max_ts - ts ? AVSEEK_FLAG_BACKWARD : 0);
int ret = av_seek_frame(s, stream_index, ts, flags | dir);
if (ret<0 && ts != min_ts && max_ts != ts) {
ret = av_seek_frame(s, stream_index, dir ? max_ts : min_ts, flags | dir);
if (ret >= 0)
ret = av_seek_frame(s, stream_index, ts, flags | (dir^AVSEEK_FLAG_BACKWARD));
}
return ret;
}
 
// try some generic seek like seek_frame_generic() but with new ts semantics
return -1; //unreachable
}
 
/*******************************************************/
 
/**
* Return TRUE if the stream has accurate duration in any stream.
*
* @return TRUE if the stream has accurate duration for at least one component.
*/
static int has_duration(AVFormatContext *ic)
{
int i;
AVStream *st;
 
for(i = 0;i < ic->nb_streams; i++) {
st = ic->streams[i];
if (st->duration != AV_NOPTS_VALUE)
return 1;
}
if (ic->duration != AV_NOPTS_VALUE)
return 1;
return 0;
}
 
/**
* Estimate the stream timings from the one of each components.
*
* Also computes the global bitrate if possible.
*/
static void update_stream_timings(AVFormatContext *ic)
{
int64_t start_time, start_time1, start_time_text, end_time, end_time1;
int64_t duration, duration1, filesize;
int i;
AVStream *st;
AVProgram *p;
 
start_time = INT64_MAX;
start_time_text = INT64_MAX;
end_time = INT64_MIN;
duration = INT64_MIN;
for(i = 0;i < ic->nb_streams; i++) {
st = ic->streams[i];
if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE || st->codec->codec_type == AVMEDIA_TYPE_DATA) {
if (start_time1 < start_time_text)
start_time_text = start_time1;
} else
start_time = FFMIN(start_time, start_time1);
end_time1 = AV_NOPTS_VALUE;
if (st->duration != AV_NOPTS_VALUE) {
end_time1 = start_time1
+ av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
end_time = FFMAX(end_time, end_time1);
}
for(p = NULL; (p = av_find_program_from_stream(ic, p, i)); ){
if(p->start_time == AV_NOPTS_VALUE || p->start_time > start_time1)
p->start_time = start_time1;
if(p->end_time < end_time1)
p->end_time = end_time1;
}
}
if (st->duration != AV_NOPTS_VALUE) {
duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
duration = FFMAX(duration, duration1);
}
}
if (start_time == INT64_MAX || (start_time > start_time_text && start_time - start_time_text < AV_TIME_BASE))
start_time = start_time_text;
else if(start_time > start_time_text)
av_log(ic, AV_LOG_VERBOSE, "Ignoring outlier non primary stream starttime %f\n", start_time_text / (float)AV_TIME_BASE);
 
if (start_time != INT64_MAX) {
ic->start_time = start_time;
if (end_time != INT64_MIN) {
if (ic->nb_programs) {
for (i=0; i<ic->nb_programs; i++) {
p = ic->programs[i];
if(p->start_time != AV_NOPTS_VALUE && p->end_time > p->start_time)
duration = FFMAX(duration, p->end_time - p->start_time);
}
} else
duration = FFMAX(duration, end_time - start_time);
}
}
if (duration != INT64_MIN && duration > 0 && ic->duration == AV_NOPTS_VALUE) {
ic->duration = duration;
}
if (ic->pb && (filesize = avio_size(ic->pb)) > 0 && ic->duration != AV_NOPTS_VALUE) {
/* compute the bitrate */
double bitrate = (double)filesize * 8.0 * AV_TIME_BASE /
(double)ic->duration;
if (bitrate >= 0 && bitrate <= INT_MAX)
ic->bit_rate = bitrate;
}
}
 
static void fill_all_stream_timings(AVFormatContext *ic)
{
int i;
AVStream *st;
 
update_stream_timings(ic);
for(i = 0;i < ic->nb_streams; i++) {
st = ic->streams[i];
if (st->start_time == AV_NOPTS_VALUE) {
if(ic->start_time != AV_NOPTS_VALUE)
st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
if(ic->duration != AV_NOPTS_VALUE)
st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
}
}
}
 
static void estimate_timings_from_bit_rate(AVFormatContext *ic)
{
int64_t filesize, duration;
int i, show_warning = 0;
AVStream *st;
 
/* if bit_rate is already set, we believe it */
if (ic->bit_rate <= 0) {
int bit_rate = 0;
for(i=0;i<ic->nb_streams;i++) {
st = ic->streams[i];
if (st->codec->bit_rate > 0) {
if (INT_MAX - st->codec->bit_rate < bit_rate) {
bit_rate = 0;
break;
}
bit_rate += st->codec->bit_rate;
}
}
ic->bit_rate = bit_rate;
}
 
/* if duration is already set, we believe it */
if (ic->duration == AV_NOPTS_VALUE &&
ic->bit_rate != 0) {
filesize = ic->pb ? avio_size(ic->pb) : 0;
if (filesize > 0) {
for(i = 0; i < ic->nb_streams; i++) {
st = ic->streams[i];
if ( st->time_base.num <= INT64_MAX / ic->bit_rate
&& st->duration == AV_NOPTS_VALUE) {
duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
st->duration = duration;
show_warning = 1;
}
}
}
}
if (show_warning)
av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
}
 
#define DURATION_MAX_READ_SIZE 250000LL
#define DURATION_MAX_RETRY 4
 
/* only usable for MPEG-PS streams */
static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
{
AVPacket pkt1, *pkt = &pkt1;
AVStream *st;
int read_size, i, ret;
int64_t end_time;
int64_t filesize, offset, duration;
int retry=0;
 
/* flush packet queue */
flush_packet_queue(ic);
 
for (i=0; i<ic->nb_streams; i++) {
st = ic->streams[i];
if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n");
 
if (st->parser) {
av_parser_close(st->parser);
st->parser= NULL;
}
}
 
/* estimate the end time (duration) */
/* XXX: may need to support wrapping */
filesize = ic->pb ? avio_size(ic->pb) : 0;
end_time = AV_NOPTS_VALUE;
do{
offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
if (offset < 0)
offset = 0;
 
avio_seek(ic->pb, offset, SEEK_SET);
read_size = 0;
for(;;) {
if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
break;
 
do {
ret = ff_read_packet(ic, pkt);
} while(ret == AVERROR(EAGAIN));
if (ret != 0)
break;
read_size += pkt->size;
st = ic->streams[pkt->stream_index];
if (pkt->pts != AV_NOPTS_VALUE &&
(st->start_time != AV_NOPTS_VALUE ||
st->first_dts != AV_NOPTS_VALUE)) {
duration = end_time = pkt->pts;
if (st->start_time != AV_NOPTS_VALUE)
duration -= st->start_time;
else
duration -= st->first_dts;
if (duration > 0) {
if (st->duration == AV_NOPTS_VALUE || st->info->last_duration<=0 ||
(st->duration < duration && FFABS(duration - st->info->last_duration) < 60LL*st->time_base.den / st->time_base.num))
st->duration = duration;
st->info->last_duration = duration;
}
}
av_free_packet(pkt);
}
}while( end_time==AV_NOPTS_VALUE
&& filesize > (DURATION_MAX_READ_SIZE<<retry)
&& ++retry <= DURATION_MAX_RETRY);
 
fill_all_stream_timings(ic);
 
avio_seek(ic->pb, old_offset, SEEK_SET);
for (i=0; i<ic->nb_streams; i++) {
st= ic->streams[i];
st->cur_dts= st->first_dts;
st->last_IP_pts = AV_NOPTS_VALUE;
st->reference_dts = AV_NOPTS_VALUE;
}
}
 
static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
{
int64_t file_size;
 
/* get the file size, if possible */
if (ic->iformat->flags & AVFMT_NOFILE) {
file_size = 0;
} else {
file_size = avio_size(ic->pb);
file_size = FFMAX(0, file_size);
}
 
if ((!strcmp(ic->iformat->name, "mpeg") ||
!strcmp(ic->iformat->name, "mpegts")) &&
file_size && ic->pb->seekable) {
/* get accurate estimate from the PTSes */
estimate_timings_from_pts(ic, old_offset);
ic->duration_estimation_method = AVFMT_DURATION_FROM_PTS;
} else if (has_duration(ic)) {
/* at least one component has timings - we use them for all
the components */
fill_all_stream_timings(ic);
ic->duration_estimation_method = AVFMT_DURATION_FROM_STREAM;
} else {
/* less precise: use bitrate info */
estimate_timings_from_bit_rate(ic);
ic->duration_estimation_method = AVFMT_DURATION_FROM_BITRATE;
}
update_stream_timings(ic);
 
{
int i;
AVStream av_unused *st;
for(i = 0;i < ic->nb_streams; i++) {
st = ic->streams[i];
av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
(double) st->start_time / AV_TIME_BASE,
(double) st->duration / AV_TIME_BASE);
}
av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
(double) ic->start_time / AV_TIME_BASE,
(double) ic->duration / AV_TIME_BASE,
ic->bit_rate / 1000);
}
}
 
static int has_codec_parameters(AVStream *st, const char **errmsg_ptr)
{
AVCodecContext *avctx = st->codec;
 
#define FAIL(errmsg) do { \
if (errmsg_ptr) \
*errmsg_ptr = errmsg; \
return 0; \
} while (0)
 
switch (avctx->codec_type) {
case AVMEDIA_TYPE_AUDIO:
if (!avctx->frame_size && determinable_frame_size(avctx))
FAIL("unspecified frame size");
if (st->info->found_decoder >= 0 && avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
FAIL("unspecified sample format");
if (!avctx->sample_rate)
FAIL("unspecified sample rate");
if (!avctx->channels)
FAIL("unspecified number of channels");
if (st->info->found_decoder >= 0 && !st->nb_decoded_frames && avctx->codec_id == AV_CODEC_ID_DTS)
FAIL("no decodable DTS frames");
break;
case AVMEDIA_TYPE_VIDEO:
if (!avctx->width)
FAIL("unspecified size");
if (st->info->found_decoder >= 0 && avctx->pix_fmt == AV_PIX_FMT_NONE)
FAIL("unspecified pixel format");
if (st->codec->codec_id == AV_CODEC_ID_RV30 || st->codec->codec_id == AV_CODEC_ID_RV40)
if (!st->sample_aspect_ratio.num && !st->codec->sample_aspect_ratio.num && !st->codec_info_nb_frames)
FAIL("no frame in rv30/40 and no sar");
break;
case AVMEDIA_TYPE_SUBTITLE:
if (avctx->codec_id == AV_CODEC_ID_HDMV_PGS_SUBTITLE && !avctx->width)
FAIL("unspecified size");
break;
case AVMEDIA_TYPE_DATA:
if(avctx->codec_id == AV_CODEC_ID_NONE) return 1;
}
 
if (avctx->codec_id == AV_CODEC_ID_NONE)
FAIL("unknown codec");
return 1;
}
 
/* returns 1 or 0 if or if not decoded data was returned, or a negative error */
static int try_decode_frame(AVFormatContext *s, AVStream *st, AVPacket *avpkt, AVDictionary **options)
{
const AVCodec *codec;
int got_picture = 1, ret = 0;
AVFrame *frame = avcodec_alloc_frame();
AVSubtitle subtitle;
AVPacket pkt = *avpkt;
 
if (!frame)
return AVERROR(ENOMEM);
 
if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
AVDictionary *thread_opt = NULL;
 
codec = find_decoder(s, st, st->codec->codec_id);
 
if (!codec) {
st->info->found_decoder = -1;
ret = -1;
goto fail;
}
 
/* force thread count to 1 since the h264 decoder will not extract SPS
* and PPS to extradata during multi-threaded decoding */
av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
if (!options)
av_dict_free(&thread_opt);
if (ret < 0) {
st->info->found_decoder = -1;
goto fail;
}
st->info->found_decoder = 1;
} else if (!st->info->found_decoder)
st->info->found_decoder = 1;
 
if (st->info->found_decoder < 0) {
ret = -1;
goto fail;
}
 
while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
ret >= 0 &&
(!has_codec_parameters(st, NULL) ||
!has_decode_delay_been_guessed(st) ||
(!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
got_picture = 0;
avcodec_get_frame_defaults(frame);
switch(st->codec->codec_type) {
case AVMEDIA_TYPE_VIDEO:
ret = avcodec_decode_video2(st->codec, frame,
&got_picture, &pkt);
break;
case AVMEDIA_TYPE_AUDIO:
ret = avcodec_decode_audio4(st->codec, frame, &got_picture, &pkt);
break;
case AVMEDIA_TYPE_SUBTITLE:
ret = avcodec_decode_subtitle2(st->codec, &subtitle,
&got_picture, &pkt);
ret = pkt.size;
break;
default:
break;
}
if (ret >= 0) {
if (got_picture)
st->nb_decoded_frames++;
pkt.data += ret;
pkt.size -= ret;
ret = got_picture;
}
}
 
if(!pkt.data && !got_picture)
ret = -1;
 
fail:
avcodec_free_frame(&frame);
return ret;
}
 
unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id)
{
while (tags->id != AV_CODEC_ID_NONE) {
if (tags->id == id)
return tags->tag;
tags++;
}
return 0;
}
 
enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
{
int i;
for(i=0; tags[i].id != AV_CODEC_ID_NONE;i++) {
if(tag == tags[i].tag)
return tags[i].id;
}
for(i=0; tags[i].id != AV_CODEC_ID_NONE; i++) {
if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
return tags[i].id;
}
return AV_CODEC_ID_NONE;
}
 
enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags)
{
if (flt) {
switch (bps) {
case 32: return be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE;
case 64: return be ? AV_CODEC_ID_PCM_F64BE : AV_CODEC_ID_PCM_F64LE;
default: return AV_CODEC_ID_NONE;
}
} else {
bps += 7;
bps >>= 3;
if (sflags & (1 << (bps - 1))) {
switch (bps) {
case 1: return AV_CODEC_ID_PCM_S8;
case 2: return be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE;
case 3: return be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE;
case 4: return be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE;
default: return AV_CODEC_ID_NONE;
}
} else {
switch (bps) {
case 1: return AV_CODEC_ID_PCM_U8;
case 2: return be ? AV_CODEC_ID_PCM_U16BE : AV_CODEC_ID_PCM_U16LE;
case 3: return be ? AV_CODEC_ID_PCM_U24BE : AV_CODEC_ID_PCM_U24LE;
case 4: return be ? AV_CODEC_ID_PCM_U32BE : AV_CODEC_ID_PCM_U32LE;
default: return AV_CODEC_ID_NONE;
}
}
}
}
 
unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum AVCodecID id)
{
unsigned int tag;
if (!av_codec_get_tag2(tags, id, &tag))
return 0;
return tag;
}
 
int av_codec_get_tag2(const AVCodecTag * const *tags, enum AVCodecID id,
unsigned int *tag)
{
int i;
for(i=0; tags && tags[i]; i++){
const AVCodecTag *codec_tags = tags[i];
while (codec_tags->id != AV_CODEC_ID_NONE) {
if (codec_tags->id == id) {
*tag = codec_tags->tag;
return 1;
}
codec_tags++;
}
}
return 0;
}
 
enum AVCodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
{
int i;
for(i=0; tags && tags[i]; i++){
enum AVCodecID id= ff_codec_get_id(tags[i], tag);
if(id!=AV_CODEC_ID_NONE) return id;
}
return AV_CODEC_ID_NONE;
}
 
static void compute_chapters_end(AVFormatContext *s)
{
unsigned int i, j;
int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
 
for (i = 0; i < s->nb_chapters; i++)
if (s->chapters[i]->end == AV_NOPTS_VALUE) {
AVChapter *ch = s->chapters[i];
int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
: INT64_MAX;
 
for (j = 0; j < s->nb_chapters; j++) {
AVChapter *ch1 = s->chapters[j];
int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base);
if (j != i && next_start > ch->start && next_start < end)
end = next_start;
}
ch->end = (end == INT64_MAX) ? ch->start : end;
}
}
 
static int get_std_framerate(int i){
if(i<60*12) return (i+1)*1001;
else return ((const int[]){24,30,60,12,15,48})[i-60*12]*1000*12;
}
 
/*
* Is the time base unreliable.
* This is a heuristic to balance between quick acceptance of the values in
* the headers vs. some extra checks.
* Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
* MPEG-2 commonly misuses field repeat flags to store different framerates.
* And there are "variable" fps files this needs to detect as well.
*/
static int tb_unreliable(AVCodecContext *c){
if( c->time_base.den >= 101L*c->time_base.num
|| c->time_base.den < 5L*c->time_base.num
/* || c->codec_tag == AV_RL32("DIVX")
|| c->codec_tag == AV_RL32("XVID")*/
|| c->codec_tag == AV_RL32("mp4v")
|| c->codec_id == AV_CODEC_ID_MPEG2VIDEO
|| c->codec_id == AV_CODEC_ID_H264
)
return 1;
return 0;
}
 
#if FF_API_FORMAT_PARAMETERS
int av_find_stream_info(AVFormatContext *ic)
{
return avformat_find_stream_info(ic, NULL);
}
#endif
 
int ff_alloc_extradata(AVCodecContext *avctx, int size)
{
int ret;
 
if (size < 0 || size >= INT32_MAX - FF_INPUT_BUFFER_PADDING_SIZE) {
avctx->extradata_size = 0;
return AVERROR(EINVAL);
}
avctx->extradata = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
if (avctx->extradata) {
memset(avctx->extradata + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
avctx->extradata_size = size;
ret = 0;
} else {
avctx->extradata_size = 0;
ret = AVERROR(ENOMEM);
}
return ret;
}
 
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
{
int i, count, ret = 0, j;
int64_t read_size;
AVStream *st;
AVPacket pkt1, *pkt;
int64_t old_offset = avio_tell(ic->pb);
int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those
int flush_codecs = ic->probesize > 0;
 
if(ic->pb)
av_log(ic, AV_LOG_DEBUG, "File position before avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
 
for(i=0;i<ic->nb_streams;i++) {
const AVCodec *codec;
AVDictionary *thread_opt = NULL;
st = ic->streams[i];
 
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
/* if(!st->time_base.num)
st->time_base= */
if(!st->codec->time_base.num)
st->codec->time_base= st->time_base;
}
//only for the split stuff
if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
st->parser = av_parser_init(st->codec->codec_id);
if(st->parser){
if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
} else if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
}
} else if (st->need_parsing) {
av_log(ic, AV_LOG_VERBOSE, "parser not found for codec "
"%s, packets or times may be invalid.\n",
avcodec_get_name(st->codec->codec_id));
}
}
codec = find_decoder(ic, st, st->codec->codec_id);
 
/* force thread count to 1 since the h264 decoder will not extract SPS
* and PPS to extradata during multi-threaded decoding */
av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
 
/* Ensure that subtitle_header is properly set. */
if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
&& codec && !st->codec->codec)
avcodec_open2(st->codec, codec, options ? &options[i]
: &thread_opt);
 
//try to just open decoders, in case this is enough to get parameters
if (!has_codec_parameters(st, NULL) && st->request_probe <= 0) {
if (codec && !st->codec->codec)
avcodec_open2(st->codec, codec, options ? &options[i]
: &thread_opt);
}
if (!options)
av_dict_free(&thread_opt);
}
 
for (i=0; i<ic->nb_streams; i++) {
#if FF_API_R_FRAME_RATE
ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
#endif
ic->streams[i]->info->fps_first_dts = AV_NOPTS_VALUE;
ic->streams[i]->info->fps_last_dts = AV_NOPTS_VALUE;
}
 
count = 0;
read_size = 0;
for(;;) {
if (ff_check_interrupt(&ic->interrupt_callback)){
ret= AVERROR_EXIT;
av_log(ic, AV_LOG_DEBUG, "interrupted\n");
break;
}
 
/* check if one codec still needs to be handled */
for(i=0;i<ic->nb_streams;i++) {
int fps_analyze_framecount = 20;
 
st = ic->streams[i];
if (!has_codec_parameters(st, NULL))
break;
/* if the timebase is coarse (like the usual millisecond precision
of mkv), we need to analyze more frames to reliably arrive at
the correct fps */
if (av_q2d(st->time_base) > 0.0005)
fps_analyze_framecount *= 2;
if (ic->fps_probe_size >= 0)
fps_analyze_framecount = ic->fps_probe_size;
if (st->disposition & AV_DISPOSITION_ATTACHED_PIC)
fps_analyze_framecount = 0;
/* variable fps and no guess at the real fps */
if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
&& st->info->duration_count < fps_analyze_framecount
&& st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
break;
if(st->parser && st->parser->parser->split && !st->codec->extradata)
break;
if (st->first_dts == AV_NOPTS_VALUE &&
(st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
break;
}
if (i == ic->nb_streams) {
/* NOTE: if the format has no header, then we need to read
some packets to get most of the streams, so we cannot
stop here */
if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
/* if we found the info for all the codecs, we can stop */
ret = count;
av_log(ic, AV_LOG_DEBUG, "All info found\n");
flush_codecs = 0;
break;
}
}
/* we did not get all the codec info, but we read too much data */
if (read_size >= ic->probesize) {
ret = count;
av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit of %d bytes reached\n", ic->probesize);
for (i = 0; i < ic->nb_streams; i++)
if (!ic->streams[i]->r_frame_rate.num &&
ic->streams[i]->info->duration_count <= 1 &&
strcmp(ic->iformat->name, "image2"))
av_log(ic, AV_LOG_WARNING,
"Stream #%d: not enough frames to estimate rate; "
"consider increasing probesize\n", i);
break;
}
 
/* NOTE: a new stream can be added there if no header in file
(AVFMTCTX_NOHEADER) */
ret = read_frame_internal(ic, &pkt1);
if (ret == AVERROR(EAGAIN))
continue;
 
if (ret < 0) {
/* EOF or error*/
break;
}
 
if (ic->flags & AVFMT_FLAG_NOBUFFER)
free_packet_buffer(&ic->packet_buffer, &ic->packet_buffer_end);
{
pkt = add_to_pktbuf(&ic->packet_buffer, &pkt1,
&ic->packet_buffer_end);
if (!pkt) {
ret = AVERROR(ENOMEM);
goto find_stream_info_err;
}
if ((ret = av_dup_packet(pkt)) < 0)
goto find_stream_info_err;
}
 
st = ic->streams[pkt->stream_index];
if (!(st->disposition & AV_DISPOSITION_ATTACHED_PIC))
read_size += pkt->size;
 
if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
/* check for non-increasing dts */
if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
st->info->fps_last_dts >= pkt->dts) {
av_log(ic, AV_LOG_DEBUG, "Non-increasing DTS in stream %d: "
"packet %d with DTS %"PRId64", packet %d with DTS "
"%"PRId64"\n", st->index, st->info->fps_last_dts_idx,
st->info->fps_last_dts, st->codec_info_nb_frames, pkt->dts);
st->info->fps_first_dts = st->info->fps_last_dts = AV_NOPTS_VALUE;
}
/* check for a discontinuity in dts - if the difference in dts
* is more than 1000 times the average packet duration in the sequence,
* we treat it as a discontinuity */
if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
st->info->fps_last_dts_idx > st->info->fps_first_dts_idx &&
(pkt->dts - st->info->fps_last_dts) / 1000 >
(st->info->fps_last_dts - st->info->fps_first_dts) / (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) {
av_log(ic, AV_LOG_WARNING, "DTS discontinuity in stream %d: "
"packet %d with DTS %"PRId64", packet %d with DTS "
"%"PRId64"\n", st->index, st->info->fps_last_dts_idx,
st->info->fps_last_dts, st->codec_info_nb_frames, pkt->dts);
st->info->fps_first_dts = st->info->fps_last_dts = AV_NOPTS_VALUE;
}
 
/* update stored dts values */
if (st->info->fps_first_dts == AV_NOPTS_VALUE) {
st->info->fps_first_dts = pkt->dts;
st->info->fps_first_dts_idx = st->codec_info_nb_frames;
}
st->info->fps_last_dts = pkt->dts;
st->info->fps_last_dts_idx = st->codec_info_nb_frames;
}
if (st->codec_info_nb_frames>1) {
int64_t t=0;
if (st->time_base.den > 0)
t = av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q);
if (st->avg_frame_rate.num > 0)
t = FFMAX(t, av_rescale_q(st->codec_info_nb_frames, av_inv_q(st->avg_frame_rate), AV_TIME_BASE_Q));
 
if ( t==0
&& st->codec_info_nb_frames>30
&& st->info->fps_first_dts != AV_NOPTS_VALUE
&& st->info->fps_last_dts != AV_NOPTS_VALUE)
t = FFMAX(t, av_rescale_q(st->info->fps_last_dts - st->info->fps_first_dts, st->time_base, AV_TIME_BASE_Q));
 
if (t >= ic->max_analyze_duration) {
av_log(ic, AV_LOG_VERBOSE, "max_analyze_duration %d reached at %"PRId64" microseconds\n", ic->max_analyze_duration, t);
break;
}
if (pkt->duration) {
st->info->codec_info_duration += pkt->duration;
st->info->codec_info_duration_fields += st->parser && st->need_parsing && st->codec->ticks_per_frame==2 ? st->parser->repeat_pict + 1 : 2;
}
}
#if FF_API_R_FRAME_RATE
{
int64_t last = st->info->last_dts;
 
if( pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && pkt->dts > last
&& pkt->dts - (uint64_t)last < INT64_MAX){
double dts= (is_relative(pkt->dts) ? pkt->dts - RELATIVE_TS_BASE : pkt->dts) * av_q2d(st->time_base);
int64_t duration= pkt->dts - last;
 
if (!st->info->duration_error)
st->info->duration_error = av_mallocz(sizeof(st->info->duration_error[0])*2);
if (!st->info->duration_error)
return AVERROR(ENOMEM);
 
// if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
// av_log(NULL, AV_LOG_ERROR, "%f\n", dts);
for (i=0; i<MAX_STD_TIMEBASES; i++) {
int framerate= get_std_framerate(i);
double sdts= dts*framerate/(1001*12);
for(j=0; j<2; j++){
int64_t ticks= llrint(sdts+j*0.5);
double error= sdts - ticks + j*0.5;
st->info->duration_error[j][0][i] += error;
st->info->duration_error[j][1][i] += error*error;
}
}
st->info->duration_count++;
// ignore the first 4 values, they might have some random jitter
if (st->info->duration_count > 3 && is_relative(pkt->dts) == is_relative(last))
st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
}
if (pkt->dts != AV_NOPTS_VALUE)
st->info->last_dts = pkt->dts;
}
#endif
if(st->parser && st->parser->parser->split && !st->codec->extradata){
int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
if (ff_alloc_extradata(st->codec, i))
return AVERROR(ENOMEM);
memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
}
}
 
/* if still no information, we try to open the codec and to
decompress the frame. We try to avoid that in most cases as
it takes longer and uses more memory. For MPEG-4, we need to
decompress for QuickTime.
 
If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
least one frame of codec data, this makes sure the codec initializes
the channel configuration and does not only trust the values from the container.
*/
try_decode_frame(ic, st, pkt, (options && i < orig_nb_streams ) ? &options[i] : NULL);
 
st->codec_info_nb_frames++;
count++;
}
 
if (flush_codecs) {
AVPacket empty_pkt = { 0 };
int err = 0;
av_init_packet(&empty_pkt);
 
for(i=0;i<ic->nb_streams;i++) {
 
st = ic->streams[i];
 
/* flush the decoders */
if (st->info->found_decoder == 1) {
do {
err = try_decode_frame(ic, st, &empty_pkt,
(options && i < orig_nb_streams) ?
&options[i] : NULL);
} while (err > 0 && !has_codec_parameters(st, NULL));
 
if (err < 0) {
av_log(ic, AV_LOG_INFO,
"decoding for stream %d failed\n", st->index);
}
}
}
}
 
// close codecs which were opened in try_decode_frame()
for(i=0;i<ic->nb_streams;i++) {
st = ic->streams[i];
avcodec_close(st->codec);
}
for(i=0;i<ic->nb_streams;i++) {
st = ic->streams[i];
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
if(st->codec->codec_id == AV_CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample){
uint32_t tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
if (avpriv_find_pix_fmt(ff_raw_pix_fmt_tags, tag) == st->codec->pix_fmt)
st->codec->codec_tag= tag;
}
 
/* estimate average framerate if not set by demuxer */
if (st->info->codec_info_duration_fields && !st->avg_frame_rate.num && st->info->codec_info_duration) {
int best_fps = 0;
double best_error = 0.01;
 
if (st->info->codec_info_duration >= INT64_MAX / st->time_base.num / 2||
st->info->codec_info_duration_fields >= INT64_MAX / st->time_base.den ||
st->info->codec_info_duration < 0)
continue;
av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
st->info->codec_info_duration_fields*(int64_t)st->time_base.den,
st->info->codec_info_duration*2*(int64_t)st->time_base.num, 60000);
 
/* round guessed framerate to a "standard" framerate if it's
* within 1% of the original estimate*/
for (j = 1; j < MAX_STD_TIMEBASES; j++) {
AVRational std_fps = { get_std_framerate(j), 12*1001 };
double error = fabs(av_q2d(st->avg_frame_rate) / av_q2d(std_fps) - 1);
 
if (error < best_error) {
best_error = error;
best_fps = std_fps.num;
}
}
if (best_fps) {
av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
best_fps, 12*1001, INT_MAX);
}
}
// the check for tb_unreliable() is not completely correct, since this is not about handling
// a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
// ipmovie.c produces.
if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > FFMAX(1, st->time_base.den/(500LL*st->time_base.num)) && !st->r_frame_rate.num)
av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
if (st->info->duration_count>1 && !st->r_frame_rate.num
&& tb_unreliable(st->codec)) {
int num = 0;
double best_error= 0.01;
 
for (j=0; j<MAX_STD_TIMEBASES; j++) {
int k;
 
if(st->info->codec_info_duration && st->info->codec_info_duration*av_q2d(st->time_base) < (1001*12.0)/get_std_framerate(j))
continue;
if(!st->info->codec_info_duration && 1.0 < (1001*12.0)/get_std_framerate(j))
continue;
for(k=0; k<2; k++){
int n= st->info->duration_count;
double a= st->info->duration_error[k][0][j] / n;
double error= st->info->duration_error[k][1][j]/n - a*a;
 
if(error < best_error && best_error> 0.000000001){
best_error= error;
num = get_std_framerate(j);
}
if(error < 0.02)
av_log(NULL, AV_LOG_DEBUG, "rfps: %f %f\n", get_std_framerate(j) / 12.0/1001, error);
}
}
// do not increase frame rate by more than 1 % in order to match a standard rate.
if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
}
 
if (!st->r_frame_rate.num){
if( st->codec->time_base.den * (int64_t)st->time_base.num
<= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
st->r_frame_rate.num = st->codec->time_base.den;
st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
}else{
st->r_frame_rate.num = st->time_base.den;
st->r_frame_rate.den = st->time_base.num;
}
}
}else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
if(!st->codec->bits_per_coded_sample)
st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
// set stream disposition based on audio service type
switch (st->codec->audio_service_type) {
case AV_AUDIO_SERVICE_TYPE_EFFECTS:
st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break;
case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break;
case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break;
case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
st->disposition = AV_DISPOSITION_COMMENT; break;
case AV_AUDIO_SERVICE_TYPE_KARAOKE:
st->disposition = AV_DISPOSITION_KARAOKE; break;
}
}
}
 
if(ic->probesize)
estimate_timings(ic, old_offset);
 
if (ret >= 0 && ic->nb_streams)
ret = -1; /* we could not have all the codec parameters before EOF */
for(i=0;i<ic->nb_streams;i++) {
const char *errmsg;
st = ic->streams[i];
if (!has_codec_parameters(st, &errmsg)) {
char buf[256];
avcodec_string(buf, sizeof(buf), st->codec, 0);
av_log(ic, AV_LOG_WARNING,
"Could not find codec parameters for stream %d (%s): %s\n"
"Consider increasing the value for the 'analyzeduration' and 'probesize' options\n",
i, buf, errmsg);
} else {
ret = 0;
}
}
 
compute_chapters_end(ic);
 
find_stream_info_err:
for (i=0; i < ic->nb_streams; i++) {
st = ic->streams[i];
if (ic->streams[i]->codec && ic->streams[i]->codec->codec_type != AVMEDIA_TYPE_AUDIO)
ic->streams[i]->codec->thread_count = 0;
if (st->info)
av_freep(&st->info->duration_error);
av_freep(&ic->streams[i]->info);
}
if(ic->pb)
av_log(ic, AV_LOG_DEBUG, "File position after avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
return ret;
}
 
AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
{
int i, j;
 
for (i = 0; i < ic->nb_programs; i++) {
if (ic->programs[i] == last) {
last = NULL;
} else {
if (!last)
for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
if (ic->programs[i]->stream_index[j] == s)
return ic->programs[i];
}
}
return NULL;
}
 
int av_find_best_stream(AVFormatContext *ic,
enum AVMediaType type,
int wanted_stream_nb,
int related_stream,
AVCodec **decoder_ret,
int flags)
{
int i, nb_streams = ic->nb_streams;
int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1, best_bitrate = -1, best_multiframe = -1, count, bitrate, multiframe;
unsigned *program = NULL;
AVCodec *decoder = NULL, *best_decoder = NULL;
 
if (related_stream >= 0 && wanted_stream_nb < 0) {
AVProgram *p = av_find_program_from_stream(ic, NULL, related_stream);
if (p) {
program = p->stream_index;
nb_streams = p->nb_stream_indexes;
}
}
for (i = 0; i < nb_streams; i++) {
int real_stream_index = program ? program[i] : i;
AVStream *st = ic->streams[real_stream_index];
AVCodecContext *avctx = st->codec;
if (avctx->codec_type != type)
continue;
if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
continue;
if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
continue;
if (decoder_ret) {
decoder = find_decoder(ic, st, st->codec->codec_id);
if (!decoder) {
if (ret < 0)
ret = AVERROR_DECODER_NOT_FOUND;
continue;
}
}
count = st->codec_info_nb_frames;
bitrate = avctx->bit_rate;
multiframe = FFMIN(5, count);
if ((best_multiframe > multiframe) ||
(best_multiframe == multiframe && best_bitrate > bitrate) ||
(best_multiframe == multiframe && best_bitrate == bitrate && best_count >= count))
continue;
best_count = count;
best_bitrate = bitrate;
best_multiframe = multiframe;
ret = real_stream_index;
best_decoder = decoder;
if (program && i == nb_streams - 1 && ret < 0) {
program = NULL;
nb_streams = ic->nb_streams;
i = 0; /* no related stream found, try again with everything */
}
}
if (decoder_ret)
*decoder_ret = best_decoder;
return ret;
}
 
/*******************************************************/
 
int av_read_play(AVFormatContext *s)
{
if (s->iformat->read_play)
return s->iformat->read_play(s);
if (s->pb)
return avio_pause(s->pb, 0);
return AVERROR(ENOSYS);
}
 
int av_read_pause(AVFormatContext *s)
{
if (s->iformat->read_pause)
return s->iformat->read_pause(s);
if (s->pb)
return avio_pause(s->pb, 1);
return AVERROR(ENOSYS);
}
 
void ff_free_stream(AVFormatContext *s, AVStream *st){
av_assert0(s->nb_streams>0);
av_assert0(s->streams[ s->nb_streams-1 ] == st);
 
if (st->parser) {
av_parser_close(st->parser);
}
if (st->attached_pic.data)
av_free_packet(&st->attached_pic);
av_dict_free(&st->metadata);
av_freep(&st->probe_data.buf);
av_freep(&st->index_entries);
av_freep(&st->codec->extradata);
av_freep(&st->codec->subtitle_header);
av_freep(&st->codec);
av_freep(&st->priv_data);
if (st->info)
av_freep(&st->info->duration_error);
av_freep(&st->info);
av_freep(&s->streams[ --s->nb_streams ]);
}
 
void avformat_free_context(AVFormatContext *s)
{
int i;
 
if (!s)
return;
 
av_opt_free(s);
if (s->iformat && s->iformat->priv_class && s->priv_data)
av_opt_free(s->priv_data);
 
for(i=s->nb_streams-1; i>=0; i--) {
ff_free_stream(s, s->streams[i]);
}
for(i=s->nb_programs-1; i>=0; i--) {
av_dict_free(&s->programs[i]->metadata);
av_freep(&s->programs[i]->stream_index);
av_freep(&s->programs[i]);
}
av_freep(&s->programs);
av_freep(&s->priv_data);
while(s->nb_chapters--) {
av_dict_free(&s->chapters[s->nb_chapters]->metadata);
av_freep(&s->chapters[s->nb_chapters]);
}
av_freep(&s->chapters);
av_dict_free(&s->metadata);
av_freep(&s->streams);
av_free(s);
}
 
#if FF_API_CLOSE_INPUT_FILE
void av_close_input_file(AVFormatContext *s)
{
avformat_close_input(&s);
}
#endif
 
void avformat_close_input(AVFormatContext **ps)
{
AVFormatContext *s;
AVIOContext *pb;
 
if (!ps || !*ps)
return;
 
s = *ps;
pb = s->pb;
 
if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
(s->flags & AVFMT_FLAG_CUSTOM_IO))
pb = NULL;
 
flush_packet_queue(s);
 
if (s->iformat) {
if (s->iformat->read_close)
s->iformat->read_close(s);
}
 
avformat_free_context(s);
 
*ps = NULL;
 
avio_close(pb);
}
 
#if FF_API_NEW_STREAM
AVStream *av_new_stream(AVFormatContext *s, int id)
{
AVStream *st = avformat_new_stream(s, NULL);
if (st)
st->id = id;
return st;
}
#endif
 
AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c)
{
AVStream *st;
int i;
AVStream **streams;
 
if (s->nb_streams >= INT_MAX/sizeof(*streams))
return NULL;
streams = av_realloc_array(s->streams, s->nb_streams + 1, sizeof(*streams));
if (!streams)
return NULL;
s->streams = streams;
 
st = av_mallocz(sizeof(AVStream));
if (!st)
return NULL;
if (!(st->info = av_mallocz(sizeof(*st->info)))) {
av_free(st);
return NULL;
}
st->info->last_dts = AV_NOPTS_VALUE;
 
st->codec = avcodec_alloc_context3(c);
if (s->iformat) {
/* no default bitrate if decoding */
st->codec->bit_rate = 0;
}
st->index = s->nb_streams;
st->start_time = AV_NOPTS_VALUE;
st->duration = AV_NOPTS_VALUE;
/* we set the current DTS to 0 so that formats without any timestamps
but durations get some timestamps, formats with some unknown
timestamps have their first few packets buffered and the
timestamps corrected before they are returned to the user */
st->cur_dts = s->iformat ? RELATIVE_TS_BASE : 0;
st->first_dts = AV_NOPTS_VALUE;
st->probe_packets = MAX_PROBE_PACKETS;
st->pts_wrap_reference = AV_NOPTS_VALUE;
st->pts_wrap_behavior = AV_PTS_WRAP_IGNORE;
 
/* default pts setting is MPEG-like */
avpriv_set_pts_info(st, 33, 1, 90000);
st->last_IP_pts = AV_NOPTS_VALUE;
for(i=0; i<MAX_REORDER_DELAY+1; i++)
st->pts_buffer[i]= AV_NOPTS_VALUE;
st->reference_dts = AV_NOPTS_VALUE;
 
st->sample_aspect_ratio = (AVRational){0,1};
 
#if FF_API_R_FRAME_RATE
st->info->last_dts = AV_NOPTS_VALUE;
#endif
st->info->fps_first_dts = AV_NOPTS_VALUE;
st->info->fps_last_dts = AV_NOPTS_VALUE;
 
s->streams[s->nb_streams++] = st;
return st;
}
 
AVProgram *av_new_program(AVFormatContext *ac, int id)
{
AVProgram *program=NULL;
int i;
 
av_dlog(ac, "new_program: id=0x%04x\n", id);
 
for(i=0; i<ac->nb_programs; i++)
if(ac->programs[i]->id == id)
program = ac->programs[i];
 
if(!program){
program = av_mallocz(sizeof(AVProgram));
if (!program)
return NULL;
dynarray_add(&ac->programs, &ac->nb_programs, program);
program->discard = AVDISCARD_NONE;
}
program->id = id;
program->pts_wrap_reference = AV_NOPTS_VALUE;
program->pts_wrap_behavior = AV_PTS_WRAP_IGNORE;
 
program->start_time =
program->end_time = AV_NOPTS_VALUE;
 
return program;
}
 
AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
{
AVChapter *chapter = NULL;
int i;
 
for(i=0; i<s->nb_chapters; i++)
if(s->chapters[i]->id == id)
chapter = s->chapters[i];
 
if(!chapter){
chapter= av_mallocz(sizeof(AVChapter));
if(!chapter)
return NULL;
dynarray_add(&s->chapters, &s->nb_chapters, chapter);
}
av_dict_set(&chapter->metadata, "title", title, 0);
chapter->id = id;
chapter->time_base= time_base;
chapter->start = start;
chapter->end = end;
 
return chapter;
}
 
void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
{
int i, j;
AVProgram *program=NULL;
void *tmp;
 
if (idx >= ac->nb_streams) {
av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
return;
}
 
for(i=0; i<ac->nb_programs; i++){
if(ac->programs[i]->id != progid)
continue;
program = ac->programs[i];
for(j=0; j<program->nb_stream_indexes; j++)
if(program->stream_index[j] == idx)
return;
 
tmp = av_realloc_array(program->stream_index, program->nb_stream_indexes+1, sizeof(unsigned int));
if(!tmp)
return;
program->stream_index = tmp;
program->stream_index[program->nb_stream_indexes++] = idx;
return;
}
}
 
static void print_fps(double d, const char *postfix){
uint64_t v= lrintf(d*100);
if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
}
 
static void dump_metadata(void *ctx, AVDictionary *m, const char *indent)
{
if(m && !(av_dict_count(m) == 1 && av_dict_get(m, "language", NULL, 0))){
AVDictionaryEntry *tag=NULL;
 
av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
if(strcmp("language", tag->key)){
const char *p = tag->value;
av_log(ctx, AV_LOG_INFO, "%s %-16s: ", indent, tag->key);
while(*p) {
char tmp[256];
size_t len = strcspn(p, "\x8\xa\xb\xc\xd");
av_strlcpy(tmp, p, FFMIN(sizeof(tmp), len+1));
av_log(ctx, AV_LOG_INFO, "%s", tmp);
p += len;
if (*p == 0xd) av_log(ctx, AV_LOG_INFO, " ");
if (*p == 0xa) av_log(ctx, AV_LOG_INFO, "\n%s %-16s: ", indent, "");
if (*p) p++;
}
av_log(ctx, AV_LOG_INFO, "\n");
}
}
}
}
 
/* "user interface" functions */
static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
{
char buf[256];
int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
AVStream *st = ic->streams[i];
int g = av_gcd(st->time_base.num, st->time_base.den);
AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
avcodec_string(buf, sizeof(buf), st->codec, is_output);
av_log(NULL, AV_LOG_INFO, " Stream #%d:%d", index, i);
/* the pid is an important information, so we display it */
/* XXX: add a generic system */
if (flags & AVFMT_SHOW_IDS)
av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
if (lang)
av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
av_log(NULL, AV_LOG_INFO, ": %s", buf);
if (st->sample_aspect_ratio.num && // default
av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
AVRational display_aspect_ratio;
av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
st->codec->width*st->sample_aspect_ratio.num,
st->codec->height*st->sample_aspect_ratio.den,
1024*1024);
av_log(NULL, AV_LOG_INFO, ", SAR %d:%d DAR %d:%d",
st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
display_aspect_ratio.num, display_aspect_ratio.den);
}
if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
if(st->avg_frame_rate.den && st->avg_frame_rate.num)
print_fps(av_q2d(st->avg_frame_rate), "fps");
#if FF_API_R_FRAME_RATE
if(st->r_frame_rate.den && st->r_frame_rate.num)
print_fps(av_q2d(st->r_frame_rate), "tbr");
#endif
if(st->time_base.den && st->time_base.num)
print_fps(1/av_q2d(st->time_base), "tbn");
if(st->codec->time_base.den && st->codec->time_base.num)
print_fps(1/av_q2d(st->codec->time_base), "tbc");
}
if (st->disposition & AV_DISPOSITION_DEFAULT)
av_log(NULL, AV_LOG_INFO, " (default)");
if (st->disposition & AV_DISPOSITION_DUB)
av_log(NULL, AV_LOG_INFO, " (dub)");
if (st->disposition & AV_DISPOSITION_ORIGINAL)
av_log(NULL, AV_LOG_INFO, " (original)");
if (st->disposition & AV_DISPOSITION_COMMENT)
av_log(NULL, AV_LOG_INFO, " (comment)");
if (st->disposition & AV_DISPOSITION_LYRICS)
av_log(NULL, AV_LOG_INFO, " (lyrics)");
if (st->disposition & AV_DISPOSITION_KARAOKE)
av_log(NULL, AV_LOG_INFO, " (karaoke)");
if (st->disposition & AV_DISPOSITION_FORCED)
av_log(NULL, AV_LOG_INFO, " (forced)");
if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
av_log(NULL, AV_LOG_INFO, " (visual impaired)");
if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
av_log(NULL, AV_LOG_INFO, " (clean effects)");
av_log(NULL, AV_LOG_INFO, "\n");
dump_metadata(NULL, st->metadata, " ");
}
 
void av_dump_format(AVFormatContext *ic,
int index,
const char *url,
int is_output)
{
int i;
uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
if (ic->nb_streams && !printed)
return;
 
av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
is_output ? "Output" : "Input",
index,
is_output ? ic->oformat->name : ic->iformat->name,
is_output ? "to" : "from", url);
dump_metadata(NULL, ic->metadata, " ");
if (!is_output) {
av_log(NULL, AV_LOG_INFO, " Duration: ");
if (ic->duration != AV_NOPTS_VALUE) {
int hours, mins, secs, us;
int64_t duration = ic->duration + 5000;
secs = duration / AV_TIME_BASE;
us = duration % AV_TIME_BASE;
mins = secs / 60;
secs %= 60;
hours = mins / 60;
mins %= 60;
av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
(100 * us) / AV_TIME_BASE);
} else {
av_log(NULL, AV_LOG_INFO, "N/A");
}
if (ic->start_time != AV_NOPTS_VALUE) {
int secs, us;
av_log(NULL, AV_LOG_INFO, ", start: ");
secs = ic->start_time / AV_TIME_BASE;
us = abs(ic->start_time % AV_TIME_BASE);
av_log(NULL, AV_LOG_INFO, "%d.%06d",
secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
}
av_log(NULL, AV_LOG_INFO, ", bitrate: ");
if (ic->bit_rate) {
av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
} else {
av_log(NULL, AV_LOG_INFO, "N/A");
}
av_log(NULL, AV_LOG_INFO, "\n");
}
for (i = 0; i < ic->nb_chapters; i++) {
AVChapter *ch = ic->chapters[i];
av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
 
dump_metadata(NULL, ch->metadata, " ");
}
if(ic->nb_programs) {
int j, k, total = 0;
for(j=0; j<ic->nb_programs; j++) {
AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
"name", NULL, 0);
av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
name ? name->value : "");
dump_metadata(NULL, ic->programs[j]->metadata, " ");
for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
printed[ic->programs[j]->stream_index[k]] = 1;
}
total += ic->programs[j]->nb_stream_indexes;
}
if (total < ic->nb_streams)
av_log(NULL, AV_LOG_INFO, " No Program\n");
}
for(i=0;i<ic->nb_streams;i++)
if (!printed[i])
dump_stream_format(ic, i, index, is_output);
 
av_free(printed);
}
 
uint64_t ff_ntp_time(void)
{
return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
}
 
int av_get_frame_filename(char *buf, int buf_size,
const char *path, int number)
{
const char *p;
char *q, buf1[20], c;
int nd, len, percentd_found;
 
q = buf;
p = path;
percentd_found = 0;
for(;;) {
c = *p++;
if (c == '\0')
break;
if (c == '%') {
do {
nd = 0;
while (av_isdigit(*p)) {
nd = nd * 10 + *p++ - '0';
}
c = *p++;
} while (av_isdigit(c));
 
switch(c) {
case '%':
goto addchar;
case 'd':
if (percentd_found)
goto fail;
percentd_found = 1;
snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
len = strlen(buf1);
if ((q - buf + len) > buf_size - 1)
goto fail;
memcpy(q, buf1, len);
q += len;
break;
default:
goto fail;
}
} else {
addchar:
if ((q - buf) < buf_size - 1)
*q++ = c;
}
}
if (!percentd_found)
goto fail;
*q = '\0';
return 0;
fail:
*q = '\0';
return -1;
}
 
static void hex_dump_internal(void *avcl, FILE *f, int level,
const uint8_t *buf, int size)
{
int len, i, j, c;
#define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
 
for(i=0;i<size;i+=16) {
len = size - i;
if (len > 16)
len = 16;
PRINT("%08x ", i);
for(j=0;j<16;j++) {
if (j < len)
PRINT(" %02x", buf[i+j]);
else
PRINT(" ");
}
PRINT(" ");
for(j=0;j<len;j++) {
c = buf[i+j];
if (c < ' ' || c > '~')
c = '.';
PRINT("%c", c);
}
PRINT("\n");
}
#undef PRINT
}
 
void av_hex_dump(FILE *f, const uint8_t *buf, int size)
{
hex_dump_internal(NULL, f, 0, buf, size);
}
 
void av_hex_dump_log(void *avcl, int level, const uint8_t *buf, int size)
{
hex_dump_internal(avcl, NULL, level, buf, size);
}
 
static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload, AVRational time_base)
{
#define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
PRINT("stream #%d:\n", pkt->stream_index);
PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0));
PRINT(" duration=%0.3f\n", pkt->duration * av_q2d(time_base));
/* DTS is _always_ valid after av_read_frame() */
PRINT(" dts=");
if (pkt->dts == AV_NOPTS_VALUE)
PRINT("N/A");
else
PRINT("%0.3f", pkt->dts * av_q2d(time_base));
/* PTS may not be known if B-frames are present. */
PRINT(" pts=");
if (pkt->pts == AV_NOPTS_VALUE)
PRINT("N/A");
else
PRINT("%0.3f", pkt->pts * av_q2d(time_base));
PRINT("\n");
PRINT(" size=%d\n", pkt->size);
#undef PRINT
if (dump_payload)
av_hex_dump(f, pkt->data, pkt->size);
}
 
void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st)
{
pkt_dump_internal(NULL, f, 0, pkt, dump_payload, st->time_base);
}
 
void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
AVStream *st)
{
pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, st->time_base);
}
 
void av_url_split(char *proto, int proto_size,
char *authorization, int authorization_size,
char *hostname, int hostname_size,
int *port_ptr,
char *path, int path_size,
const char *url)
{
const char *p, *ls, *ls2, *at, *at2, *col, *brk;
 
if (port_ptr) *port_ptr = -1;
if (proto_size > 0) proto[0] = 0;
if (authorization_size > 0) authorization[0] = 0;
if (hostname_size > 0) hostname[0] = 0;
if (path_size > 0) path[0] = 0;
 
/* parse protocol */
if ((p = strchr(url, ':'))) {
av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
p++; /* skip ':' */
if (*p == '/') p++;
if (*p == '/') p++;
} else {
/* no protocol means plain filename */
av_strlcpy(path, url, path_size);
return;
}
 
/* separate path from hostname */
ls = strchr(p, '/');
ls2 = strchr(p, '?');
if(!ls)
ls = ls2;
else if (ls && ls2)
ls = FFMIN(ls, ls2);
if(ls)
av_strlcpy(path, ls, path_size);
else
ls = &p[strlen(p)]; // XXX
 
/* the rest is hostname, use that to parse auth/port */
if (ls != p) {
/* authorization (user[:pass]@hostname) */
at2 = p;
while ((at = strchr(p, '@')) && at < ls) {
av_strlcpy(authorization, at2,
FFMIN(authorization_size, at + 1 - at2));
p = at + 1; /* skip '@' */
}
 
if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
/* [host]:port */
av_strlcpy(hostname, p + 1,
FFMIN(hostname_size, brk - p));
if (brk[1] == ':' && port_ptr)
*port_ptr = atoi(brk + 2);
} else if ((col = strchr(p, ':')) && col < ls) {
av_strlcpy(hostname, p,
FFMIN(col + 1 - p, hostname_size));
if (port_ptr) *port_ptr = atoi(col + 1);
} else
av_strlcpy(hostname, p,
FFMIN(ls + 1 - p, hostname_size));
}
}
 
char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
{
int i;
static const char hex_table_uc[16] = { '0', '1', '2', '3',
'4', '5', '6', '7',
'8', '9', 'A', 'B',
'C', 'D', 'E', 'F' };
static const char hex_table_lc[16] = { '0', '1', '2', '3',
'4', '5', '6', '7',
'8', '9', 'a', 'b',
'c', 'd', 'e', 'f' };
const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
 
for(i = 0; i < s; i++) {
buff[i * 2] = hex_table[src[i] >> 4];
buff[i * 2 + 1] = hex_table[src[i] & 0xF];
}
 
return buff;
}
 
int ff_hex_to_data(uint8_t *data, const char *p)
{
int c, len, v;
 
len = 0;
v = 1;
for (;;) {
p += strspn(p, SPACE_CHARS);
if (*p == '\0')
break;
c = av_toupper((unsigned char) *p++);
if (c >= '0' && c <= '9')
c = c - '0';
else if (c >= 'A' && c <= 'F')
c = c - 'A' + 10;
else
break;
v = (v << 4) | c;
if (v & 0x100) {
if (data)
data[len] = v;
len++;
v = 1;
}
}
return len;
}
 
#if FF_API_SET_PTS_INFO
void av_set_pts_info(AVStream *s, int pts_wrap_bits,
unsigned int pts_num, unsigned int pts_den)
{
avpriv_set_pts_info(s, pts_wrap_bits, pts_num, pts_den);
}
#endif
 
void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
unsigned int pts_num, unsigned int pts_den)
{
AVRational new_tb;
if(av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)){
if(new_tb.num != pts_num)
av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/new_tb.num);
}else
av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index);
 
if(new_tb.num <= 0 || new_tb.den <= 0) {
av_log(NULL, AV_LOG_ERROR, "Ignoring attempt to set invalid timebase %d/%d for st:%d\n", new_tb.num, new_tb.den, s->index);
return;
}
s->time_base = new_tb;
av_codec_set_pkt_timebase(s->codec, new_tb);
s->pts_wrap_bits = pts_wrap_bits;
}
 
void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
void *context)
{
const char *ptr = str;
 
/* Parse key=value pairs. */
for (;;) {
const char *key;
char *dest = NULL, *dest_end;
int key_len, dest_len = 0;
 
/* Skip whitespace and potential commas. */
while (*ptr && (av_isspace(*ptr) || *ptr == ','))
ptr++;
if (!*ptr)
break;
 
key = ptr;
 
if (!(ptr = strchr(key, '=')))
break;
ptr++;
key_len = ptr - key;
 
callback_get_buf(context, key, key_len, &dest, &dest_len);
dest_end = dest + dest_len - 1;
 
if (*ptr == '\"') {
ptr++;
while (*ptr && *ptr != '\"') {
if (*ptr == '\\') {
if (!ptr[1])
break;
if (dest && dest < dest_end)
*dest++ = ptr[1];
ptr += 2;
} else {
if (dest && dest < dest_end)
*dest++ = *ptr;
ptr++;
}
}
if (*ptr == '\"')
ptr++;
} else {
for (; *ptr && !(av_isspace(*ptr) || *ptr == ','); ptr++)
if (dest && dest < dest_end)
*dest++ = *ptr;
}
if (dest)
*dest = 0;
}
}
 
int ff_find_stream_index(AVFormatContext *s, int id)
{
int i;
for (i = 0; i < s->nb_streams; i++) {
if (s->streams[i]->id == id)
return i;
}
return -1;
}
 
int64_t ff_iso8601_to_unix_time(const char *datestr)
{
struct tm time1 = {0}, time2 = {0};
char *ret1, *ret2;
ret1 = av_small_strptime(datestr, "%Y - %m - %d %H:%M:%S", &time1);
ret2 = av_small_strptime(datestr, "%Y - %m - %dT%H:%M:%S", &time2);
if (ret2 && !ret1)
return av_timegm(&time2);
else
return av_timegm(&time1);
}
 
int avformat_query_codec(AVOutputFormat *ofmt, enum AVCodecID codec_id, int std_compliance)
{
if (ofmt) {
if (ofmt->query_codec)
return ofmt->query_codec(codec_id, std_compliance);
else if (ofmt->codec_tag)
return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
else if (codec_id == ofmt->video_codec || codec_id == ofmt->audio_codec ||
codec_id == ofmt->subtitle_codec)
return 1;
}
return AVERROR_PATCHWELCOME;
}
 
int avformat_network_init(void)
{
#if CONFIG_NETWORK
int ret;
ff_network_inited_globally = 1;
if ((ret = ff_network_init()) < 0)
return ret;
ff_tls_init();
#endif
return 0;
}
 
int avformat_network_deinit(void)
{
#if CONFIG_NETWORK
ff_network_close();
ff_tls_deinit();
#endif
return 0;
}
 
int ff_add_param_change(AVPacket *pkt, int32_t channels,
uint64_t channel_layout, int32_t sample_rate,
int32_t width, int32_t height)
{
uint32_t flags = 0;
int size = 4;
uint8_t *data;
if (!pkt)
return AVERROR(EINVAL);
if (channels) {
size += 4;
flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
}
if (channel_layout) {
size += 8;
flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
}
if (sample_rate) {
size += 4;
flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
}
if (width || height) {
size += 8;
flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
}
data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
if (!data)
return AVERROR(ENOMEM);
bytestream_put_le32(&data, flags);
if (channels)
bytestream_put_le32(&data, channels);
if (channel_layout)
bytestream_put_le64(&data, channel_layout);
if (sample_rate)
bytestream_put_le32(&data, sample_rate);
if (width || height) {
bytestream_put_le32(&data, width);
bytestream_put_le32(&data, height);
}
return 0;
}
 
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
{
AVRational undef = {0, 1};
AVRational stream_sample_aspect_ratio = stream ? stream->sample_aspect_ratio : undef;
AVRational codec_sample_aspect_ratio = stream && stream->codec ? stream->codec->sample_aspect_ratio : undef;
AVRational frame_sample_aspect_ratio = frame ? frame->sample_aspect_ratio : codec_sample_aspect_ratio;
 
av_reduce(&stream_sample_aspect_ratio.num, &stream_sample_aspect_ratio.den,
stream_sample_aspect_ratio.num, stream_sample_aspect_ratio.den, INT_MAX);
if (stream_sample_aspect_ratio.num <= 0 || stream_sample_aspect_ratio.den <= 0)
stream_sample_aspect_ratio = undef;
 
av_reduce(&frame_sample_aspect_ratio.num, &frame_sample_aspect_ratio.den,
frame_sample_aspect_ratio.num, frame_sample_aspect_ratio.den, INT_MAX);
if (frame_sample_aspect_ratio.num <= 0 || frame_sample_aspect_ratio.den <= 0)
frame_sample_aspect_ratio = undef;
 
if (stream_sample_aspect_ratio.num)
return stream_sample_aspect_ratio;
else
return frame_sample_aspect_ratio;
}
 
AVRational av_guess_frame_rate(AVFormatContext *format, AVStream *st, AVFrame *frame)
{
AVRational fr = st->r_frame_rate;
 
if (st->codec->ticks_per_frame > 1) {
AVRational codec_fr = av_inv_q(st->codec->time_base);
AVRational avg_fr = st->avg_frame_rate;
codec_fr.den *= st->codec->ticks_per_frame;
if ( codec_fr.num > 0 && codec_fr.den > 0 && av_q2d(codec_fr) < av_q2d(fr)*0.7
&& fabs(1.0 - av_q2d(av_div_q(avg_fr, fr))) > 0.1)
fr = codec_fr;
}
 
return fr;
}
 
int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st,
const char *spec)
{
if (*spec <= '9' && *spec >= '0') /* opt:index */
return strtol(spec, NULL, 0) == st->index;
else if (*spec == 'v' || *spec == 'a' || *spec == 's' || *spec == 'd' ||
*spec == 't') { /* opt:[vasdt] */
enum AVMediaType type;
 
switch (*spec++) {
case 'v': type = AVMEDIA_TYPE_VIDEO; break;
case 'a': type = AVMEDIA_TYPE_AUDIO; break;
case 's': type = AVMEDIA_TYPE_SUBTITLE; break;
case 'd': type = AVMEDIA_TYPE_DATA; break;
case 't': type = AVMEDIA_TYPE_ATTACHMENT; break;
default: av_assert0(0);
}
if (type != st->codec->codec_type)
return 0;
if (*spec++ == ':') { /* possibly followed by :index */
int i, index = strtol(spec, NULL, 0);
for (i = 0; i < s->nb_streams; i++)
if (s->streams[i]->codec->codec_type == type && index-- == 0)
return i == st->index;
return 0;
}
return 1;
} else if (*spec == 'p' && *(spec + 1) == ':') {
int prog_id, i, j;
char *endptr;
spec += 2;
prog_id = strtol(spec, &endptr, 0);
for (i = 0; i < s->nb_programs; i++) {
if (s->programs[i]->id != prog_id)
continue;
 
if (*endptr++ == ':') {
int stream_idx = strtol(endptr, NULL, 0);
return stream_idx >= 0 &&
stream_idx < s->programs[i]->nb_stream_indexes &&
st->index == s->programs[i]->stream_index[stream_idx];
}
 
for (j = 0; j < s->programs[i]->nb_stream_indexes; j++)
if (st->index == s->programs[i]->stream_index[j])
return 1;
}
return 0;
} else if (*spec == '#') {
int sid;
char *endptr;
sid = strtol(spec + 1, &endptr, 0);
if (!*endptr)
return st->id == sid;
} else if (!*spec) /* empty specifier, matches everything */
return 1;
 
av_log(s, AV_LOG_ERROR, "Invalid stream specifier: %s.\n", spec);
return AVERROR(EINVAL);
}
 
void ff_generate_avci_extradata(AVStream *st)
{
static const uint8_t avci100_1080p_extradata[] = {
// SPS
0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
0x18, 0x21, 0x02, 0x56, 0xb9, 0x3d, 0x7d, 0x7e,
0x4f, 0xe3, 0x3f, 0x11, 0xf1, 0x9e, 0x08, 0xb8,
0x8c, 0x54, 0x43, 0xc0, 0x78, 0x02, 0x27, 0xe2,
0x70, 0x1e, 0x30, 0x10, 0x10, 0x14, 0x00, 0x00,
0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xca,
0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// PPS
0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
0xd0
};
static const uint8_t avci100_1080i_extradata[] = {
// SPS
0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
0x18, 0x21, 0x03, 0x3a, 0x46, 0x65, 0x6a, 0x65,
0x24, 0xad, 0xe9, 0x12, 0x32, 0x14, 0x1a, 0x26,
0x34, 0xad, 0xa4, 0x41, 0x82, 0x23, 0x01, 0x50,
0x2b, 0x1a, 0x24, 0x69, 0x48, 0x30, 0x40, 0x2e,
0x11, 0x12, 0x08, 0xc6, 0x8c, 0x04, 0x41, 0x28,
0x4c, 0x34, 0xf0, 0x1e, 0x01, 0x13, 0xf2, 0xe0,
0x3c, 0x60, 0x20, 0x20, 0x28, 0x00, 0x00, 0x03,
0x00, 0x08, 0x00, 0x00, 0x03, 0x01, 0x94, 0x00,
// PPS
0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
0xd0
};
static const uint8_t avci50_1080i_extradata[] = {
// SPS
0x00, 0x00, 0x00, 0x01, 0x67, 0x6e, 0x10, 0x28,
0xa6, 0xd4, 0x20, 0x32, 0x33, 0x0c, 0x71, 0x18,
0x88, 0x62, 0x10, 0x19, 0x19, 0x86, 0x38, 0x8c,
0x44, 0x30, 0x21, 0x02, 0x56, 0x4e, 0x6e, 0x61,
0x87, 0x3e, 0x73, 0x4d, 0x98, 0x0c, 0x03, 0x06,
0x9c, 0x0b, 0x73, 0xe6, 0xc0, 0xb5, 0x18, 0x63,
0x0d, 0x39, 0xe0, 0x5b, 0x02, 0xd4, 0xc6, 0x19,
0x1a, 0x79, 0x8c, 0x32, 0x34, 0x24, 0xf0, 0x16,
0x81, 0x13, 0xf7, 0xff, 0x80, 0x02, 0x00, 0x01,
0xf1, 0x80, 0x80, 0x80, 0xa0, 0x00, 0x00, 0x03,
0x00, 0x20, 0x00, 0x00, 0x06, 0x50, 0x80, 0x00,
// PPS
0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x31, 0x12,
0x11
};
static const uint8_t avci100_720p_extradata[] = {
// SPS
0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
0xb6, 0xd4, 0x20, 0x2a, 0x33, 0x1d, 0xc7, 0x62,
0xa1, 0x08, 0x40, 0x54, 0x66, 0x3b, 0x8e, 0xc5,
0x42, 0x02, 0x10, 0x25, 0x64, 0x2c, 0x89, 0xe8,
0x85, 0xe4, 0x21, 0x4b, 0x90, 0x83, 0x06, 0x95,
0xd1, 0x06, 0x46, 0x97, 0x20, 0xc8, 0xd7, 0x43,
0x08, 0x11, 0xc2, 0x1e, 0x4c, 0x91, 0x0f, 0x01,
0x40, 0x16, 0xec, 0x07, 0x8c, 0x04, 0x04, 0x05,
0x00, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x03,
0x00, 0x64, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00,
// PPS
0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x31, 0x12,
0x11
};
int size = 0;
const uint8_t *data = 0;
if (st->codec->width == 1920) {
if (st->codec->field_order == AV_FIELD_PROGRESSIVE) {
data = avci100_1080p_extradata;
size = sizeof(avci100_1080p_extradata);
} else {
data = avci100_1080i_extradata;
size = sizeof(avci100_1080i_extradata);
}
} else if (st->codec->width == 1440) {
data = avci50_1080i_extradata;
size = sizeof(avci50_1080i_extradata);
} else if (st->codec->width == 1280) {
data = avci100_720p_extradata;
size = sizeof(avci100_720p_extradata);
}
if (!size)
return;
av_freep(&st->codec->extradata);
if (ff_alloc_extradata(st->codec, size))
return;
memcpy(st->codec->extradata, data, size);
}
/contrib/sdk/sources/ffmpeg/libavformat/vc1test.c
0,0 → 1,119
/*
* VC1 Test Bitstreams Format Demuxer
* Copyright (c) 2006, 2008 Konstantin Shishkov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* VC1 test bitstream file demuxer
* by Konstantin Shishkov
* Format specified in SMPTE standard 421 Annex L
*/
 
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
 
#define VC1_EXTRADATA_SIZE 4
 
static int vc1t_probe(AVProbeData *p)
{
if (p->buf_size < 24)
return 0;
if (p->buf[3] != 0xC5 || AV_RL32(&p->buf[4]) != 4 || AV_RL32(&p->buf[20]) != 0xC)
return 0;
 
return AVPROBE_SCORE_EXTENSION;
}
 
static int vc1t_read_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
AVStream *st;
int frames;
uint32_t fps;
 
frames = avio_rl24(pb);
if(avio_r8(pb) != 0xC5 || avio_rl32(pb) != 4)
return -1;
 
/* init video codec */
st = avformat_new_stream(s, NULL);
if (!st)
return -1;
 
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_WMV3;
 
if (ff_alloc_extradata(st->codec, VC1_EXTRADATA_SIZE))
return AVERROR(ENOMEM);
avio_read(pb, st->codec->extradata, VC1_EXTRADATA_SIZE);
st->codec->height = avio_rl32(pb);
st->codec->width = avio_rl32(pb);
if(avio_rl32(pb) != 0xC)
return -1;
avio_skip(pb, 8);
fps = avio_rl32(pb);
if(fps == 0xFFFFFFFF)
avpriv_set_pts_info(st, 32, 1, 1000);
else{
if (!fps) {
av_log(s, AV_LOG_ERROR, "Zero FPS specified, defaulting to 1 FPS\n");
fps = 1;
}
avpriv_set_pts_info(st, 24, 1, fps);
st->duration = frames;
}
 
return 0;
}
 
static int vc1t_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
AVIOContext *pb = s->pb;
int frame_size;
int keyframe = 0;
uint32_t pts;
 
if(url_feof(pb))
return AVERROR(EIO);
 
frame_size = avio_rl24(pb);
if(avio_r8(pb) & 0x80)
keyframe = 1;
pts = avio_rl32(pb);
if(av_get_packet(pb, pkt, frame_size) < 0)
return AVERROR(EIO);
if(s->streams[0]->time_base.den == 1000)
pkt->pts = pts;
pkt->flags |= keyframe ? AV_PKT_FLAG_KEY : 0;
pkt->pos -= 8;
 
return pkt->size;
}
 
AVInputFormat ff_vc1t_demuxer = {
.name = "vc1test",
.long_name = NULL_IF_CONFIG_SMALL("VC-1 test bitstream"),
.read_probe = vc1t_probe,
.read_header = vc1t_read_header,
.read_packet = vc1t_read_packet,
.flags = AVFMT_GENERIC_INDEX,
};
/contrib/sdk/sources/ffmpeg/libavformat/vc1testenc.c
0,0 → 1,94
/*
* VC-1 test bitstreams format muxer.
* Copyright (c) 2008 Konstantin Shishkov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avformat.h"
#include "internal.h"
 
typedef struct RCVContext {
int frames;
} RCVContext;
 
static int vc1test_write_header(AVFormatContext *s)
{
AVCodecContext *avc = s->streams[0]->codec;
AVIOContext *pb = s->pb;
 
if (avc->codec_id != AV_CODEC_ID_WMV3) {
av_log(s, AV_LOG_ERROR, "Only WMV3 is accepted!\n");
return -1;
}
avio_wl24(pb, 0); //frames count will be here
avio_w8(pb, 0xC5);
avio_wl32(pb, 4);
avio_write(pb, avc->extradata, 4);
avio_wl32(pb, avc->height);
avio_wl32(pb, avc->width);
avio_wl32(pb, 0xC);
avio_wl24(pb, 0); // hrd_buffer
avio_w8(pb, 0x80); // level|cbr|res1
avio_wl32(pb, 0); // hrd_rate
if (s->streams[0]->avg_frame_rate.den && s->streams[0]->avg_frame_rate.num == 1)
avio_wl32(pb, s->streams[0]->avg_frame_rate.den);
else
avio_wl32(pb, 0xFFFFFFFF); //variable framerate
avpriv_set_pts_info(s->streams[0], 32, 1, 1000);
 
return 0;
}
 
static int vc1test_write_packet(AVFormatContext *s, AVPacket *pkt)
{
RCVContext *ctx = s->priv_data;
AVIOContext *pb = s->pb;
 
if (!pkt->size)
return 0;
avio_wl32(pb, pkt->size | ((pkt->flags & AV_PKT_FLAG_KEY) ? 0x80000000 : 0));
avio_wl32(pb, pkt->pts);
avio_write(pb, pkt->data, pkt->size);
ctx->frames++;
 
return 0;
}
 
static int vc1test_write_trailer(AVFormatContext *s)
{
RCVContext *ctx = s->priv_data;
AVIOContext *pb = s->pb;
 
if (s->pb->seekable) {
avio_seek(pb, 0, SEEK_SET);
avio_wl24(pb, ctx->frames);
avio_flush(pb);
}
return 0;
}
 
AVOutputFormat ff_vc1t_muxer = {
.name = "vc1test",
.long_name = NULL_IF_CONFIG_SMALL("VC-1 test bitstream"),
.extensions = "rcv",
.priv_data_size = sizeof(RCVContext),
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_WMV3,
.write_header = vc1test_write_header,
.write_packet = vc1test_write_packet,
.write_trailer = vc1test_write_trailer,
};
/contrib/sdk/sources/ffmpeg/libavformat/version.h
0,0 → 1,76
/*
* Version macros.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_VERSION_H
#define AVFORMAT_VERSION_H
 
/**
* @file
* @ingroup libavf
* Libavformat version macros
*/
 
#include "libavutil/avutil.h"
 
#define LIBAVFORMAT_VERSION_MAJOR 55
#define LIBAVFORMAT_VERSION_MINOR 19
#define LIBAVFORMAT_VERSION_MICRO 104
 
#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \
LIBAVFORMAT_VERSION_MINOR, \
LIBAVFORMAT_VERSION_MICRO)
#define LIBAVFORMAT_VERSION AV_VERSION(LIBAVFORMAT_VERSION_MAJOR, \
LIBAVFORMAT_VERSION_MINOR, \
LIBAVFORMAT_VERSION_MICRO)
#define LIBAVFORMAT_BUILD LIBAVFORMAT_VERSION_INT
 
#define LIBAVFORMAT_IDENT "Lavf" AV_STRINGIFY(LIBAVFORMAT_VERSION)
 
/**
* FF_API_* defines may be placed below to indicate public API that will be
* dropped at a future version bump. The defines themselves are not part of
* the public API and may change, break or disappear at any time.
*/
 
#ifndef FF_API_ALLOC_OUTPUT_CONTEXT
#define FF_API_ALLOC_OUTPUT_CONTEXT (LIBAVFORMAT_VERSION_MAJOR < 56)
#endif
#ifndef FF_API_FORMAT_PARAMETERS
#define FF_API_FORMAT_PARAMETERS (LIBAVFORMAT_VERSION_MAJOR < 56)
#endif
#ifndef FF_API_NEW_STREAM
#define FF_API_NEW_STREAM (LIBAVFORMAT_VERSION_MAJOR < 56)
#endif
#ifndef FF_API_SET_PTS_INFO
#define FF_API_SET_PTS_INFO (LIBAVFORMAT_VERSION_MAJOR < 56)
#endif
#ifndef FF_API_CLOSE_INPUT_FILE
#define FF_API_CLOSE_INPUT_FILE (LIBAVFORMAT_VERSION_MAJOR < 56)
#endif
#ifndef FF_API_READ_PACKET
#define FF_API_READ_PACKET (LIBAVFORMAT_VERSION_MAJOR < 56)
#endif
#ifndef FF_API_ASS_SSA
#define FF_API_ASS_SSA (LIBAVFORMAT_VERSION_MAJOR < 56)
#endif
#ifndef FF_API_R_FRAME_RATE
#define FF_API_R_FRAME_RATE 1
#endif
#endif /* AVFORMAT_VERSION_H */
/contrib/sdk/sources/ffmpeg/libavformat/vivo.c
0,0 → 1,313
/*
* Vivo stream demuxer
* Copyright (c) 2009 Daniel Verkamp <daniel at drv.nu>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* @brief Vivo stream demuxer
* @author Daniel Verkamp <daniel at drv.nu>
* @sa http://wiki.multimedia.cx/index.php?title=Vivo
*/
 
#include "libavutil/parseutils.h"
#include "avformat.h"
#include "internal.h"
 
typedef struct VivoContext {
int version;
 
int type;
int sequence;
int length;
 
uint8_t text[1024 + 1];
} VivoContext;
 
static int vivo_probe(AVProbeData *p)
{
const unsigned char *buf = p->buf;
unsigned c, length = 0;
 
// stream must start with packet of type 0 and sequence number 0
if (*buf++ != 0)
return 0;
 
// read at most 2 bytes of coded length
c = *buf++;
length = c & 0x7F;
if (c & 0x80) {
c = *buf++;
length = (length << 7) | (c & 0x7F);
}
if (c & 0x80 || length > 1024 || length < 21)
return 0;
 
if (memcmp(buf, "\r\nVersion:Vivo/", 15))
return 0;
buf += 15;
 
if (*buf < '0' && *buf > '2')
return 0;
 
return AVPROBE_SCORE_MAX;
}
 
static int vivo_get_packet_header(AVFormatContext *s)
{
VivoContext *vivo = s->priv_data;
AVIOContext *pb = s->pb;
unsigned c, get_length = 0;
 
if (url_feof(pb))
return AVERROR_EOF;
 
c = avio_r8(pb);
if (c == 0x82) {
get_length = 1;
c = avio_r8(pb);
}
 
vivo->type = c >> 4;
vivo->sequence = c & 0xF;
 
switch (vivo->type) {
case 0: get_length = 1; break;
case 1: vivo->length = 128; break;
case 2: get_length = 1; break;
case 3: vivo->length = 40; break;
case 4: vivo->length = 24; break;
default:
av_log(s, AV_LOG_ERROR, "unknown packet type %d\n", vivo->type);
return AVERROR_INVALIDDATA;
}
 
if (get_length) {
c = avio_r8(pb);
vivo->length = c & 0x7F;
if (c & 0x80) {
c = avio_r8(pb);
vivo->length = (vivo->length << 7) | (c & 0x7F);
 
if (c & 0x80) {
av_log(s, AV_LOG_ERROR, "coded length is more than two bytes\n");
return AVERROR_INVALIDDATA;
}
}
}
 
return 0;
}
 
static int vivo_read_header(AVFormatContext *s)
{
VivoContext *vivo = s->priv_data;
AVRational fps = { 1, 25};
AVStream *ast, *vst;
unsigned char *line, *line_end, *key, *value;
long value_int;
int ret, value_used;
int64_t duration = 0;
char *end_value;
 
vst = avformat_new_stream(s, NULL);
ast = avformat_new_stream(s, NULL);
if (!ast || !vst)
return AVERROR(ENOMEM);
 
ast->codec->sample_rate = 8000;
 
while (1) {
if ((ret = vivo_get_packet_header(s)) < 0)
return ret;
 
// done reading all text header packets?
if (vivo->sequence || vivo->type)
break;
 
if (vivo->length <= 1024) {
avio_read(s->pb, vivo->text, vivo->length);
vivo->text[vivo->length] = 0;
} else {
av_log(s, AV_LOG_WARNING, "too big header, skipping\n");
avio_skip(s->pb, vivo->length);
continue;
}
 
line = vivo->text;
while (*line) {
line_end = strstr(line, "\r\n");
if (!line_end)
break;
 
*line_end = 0;
key = line;
line = line_end + 2; // skip \r\n
 
if (line_end == key) // skip blank lines
continue;
 
value = strchr(key, ':');
if (!value) {
av_log(s, AV_LOG_WARNING, "missing colon in key:value pair '%s'\n",
value);
continue;
}
 
*value++ = 0;
 
av_log(s, AV_LOG_DEBUG, "header: '%s' = '%s'\n", key, value);
 
value_int = strtol(value, &end_value, 10);
value_used = 0;
if (*end_value == 0) { // valid integer
av_log(s, AV_LOG_DEBUG, "got a valid integer (%ld)\n", value_int);
value_used = 1;
if (!strcmp(key, "Duration")) {
duration = value_int;
} else if (!strcmp(key, "Width")) {
vst->codec->width = value_int;
} else if (!strcmp(key, "Height")) {
vst->codec->height = value_int;
} else if (!strcmp(key, "TimeUnitNumerator")) {
fps.num = value_int / 1000;
} else if (!strcmp(key, "TimeUnitDenominator")) {
fps.den = value_int;
} else if (!strcmp(key, "SamplingFrequency")) {
ast->codec->sample_rate = value_int;
} else if (!strcmp(key, "NominalBitrate")) {
} else if (!strcmp(key, "Length")) {
// size of file
} else {
value_used = 0;
}
}
 
if (!strcmp(key, "Version")) {
if (sscanf(value, "Vivo/%d.", &vivo->version) != 1)
return AVERROR_INVALIDDATA;
value_used = 1;
} else if (!strcmp(key, "FPS")) {
AVRational tmp;
 
value_used = 1;
if (!av_parse_ratio(&tmp, value, 10000, AV_LOG_WARNING, s))
fps = av_inv_q(tmp);
}
 
if (!value_used)
av_dict_set(&s->metadata, key, value, 0);
}
}
 
avpriv_set_pts_info(ast, 64, 1, ast->codec->sample_rate);
avpriv_set_pts_info(vst, 64, fps.num, fps.den);
if (duration)
s->duration = av_rescale(duration, 1000, 1);
 
vst->start_time = 0;
vst->codec->codec_tag = 0;
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
 
if (vivo->version == 1) {
vst->codec->codec_id = AV_CODEC_ID_H263;
ast->codec->codec_id = AV_CODEC_ID_G723_1;
ast->codec->bits_per_coded_sample = 8;
ast->codec->block_align = 24;
ast->codec->bit_rate = 6400;
}
 
ast->start_time = 0;
ast->codec->codec_tag = 0;
ast->codec->codec_type = AVMEDIA_TYPE_AUDIO;
ast->codec->channels = 1;
 
return 0;
}
 
static int vivo_read_packet(AVFormatContext *s, AVPacket *pkt)
{
VivoContext *vivo = s->priv_data;
AVIOContext *pb = s->pb;
unsigned old_sequence = vivo->sequence, old_type = vivo->type;
int stream_index, ret = 0;
 
restart:
 
if (url_feof(pb))
return AVERROR_EOF;
 
switch (vivo->type) {
case 0:
avio_skip(pb, vivo->length);
if ((ret = vivo_get_packet_header(s)) < 0)
return ret;
goto restart;
case 1:
case 2: // video
stream_index = 0;
break;
case 3:
case 4: // audio
stream_index = 1;
break;
default:
av_log(s, AV_LOG_ERROR, "unknown packet type %d\n", vivo->type);
return AVERROR_INVALIDDATA;
}
 
if ((ret = av_get_packet(pb, pkt, vivo->length)) < 0)
goto fail;
 
// get next packet header
if ((ret = vivo_get_packet_header(s)) < 0)
goto fail;
 
while (vivo->sequence == old_sequence &&
(((vivo->type - 1) >> 1) == ((old_type - 1) >> 1))) {
if (url_feof(pb)) {
ret = AVERROR_EOF;
break;
}
 
if ((ret = av_append_packet(pb, pkt, vivo->length)) < 0)
break;
 
// get next packet header
if ((ret = vivo_get_packet_header(s)) < 0)
break;
}
 
pkt->stream_index = stream_index;
 
fail:
if (ret < 0)
av_free_packet(pkt);
return ret;
}
 
AVInputFormat ff_vivo_demuxer = {
.name = "vivo",
.long_name = NULL_IF_CONFIG_SMALL("Vivo"),
.priv_data_size = sizeof(VivoContext),
.read_probe = vivo_probe,
.read_header = vivo_read_header,
.read_packet = vivo_read_packet,
.extensions = "viv",
};
/contrib/sdk/sources/ffmpeg/libavformat/voc.c
0,0 → 1,37
/*
* Creative Voice File common data.
* Copyright (c) 2006 Aurelien Jacobs <aurel@gnuage.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "voc.h"
#include "internal.h"
 
const unsigned char ff_voc_magic[21] = "Creative Voice File\x1A";
 
const AVCodecTag ff_voc_codec_tags[] = {
{AV_CODEC_ID_PCM_U8, 0x00},
{AV_CODEC_ID_ADPCM_SBPRO_4, 0x01},
{AV_CODEC_ID_ADPCM_SBPRO_3, 0x02},
{AV_CODEC_ID_ADPCM_SBPRO_2, 0x03},
{AV_CODEC_ID_PCM_S16LE, 0x04},
{AV_CODEC_ID_PCM_ALAW, 0x06},
{AV_CODEC_ID_PCM_MULAW, 0x07},
{AV_CODEC_ID_ADPCM_CT, 0x0200},
{AV_CODEC_ID_NONE, 0},
};
/contrib/sdk/sources/ffmpeg/libavformat/voc.h
0,0 → 1,51
/*
* Creative Voice File demuxer.
* Copyright (c) 2006 Aurelien Jacobs <aurel@gnuage.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_VOC_H
#define AVFORMAT_VOC_H
 
#include "avformat.h"
#include "internal.h"
 
typedef struct voc_dec_context {
int64_t remaining_size;
} VocDecContext;
 
typedef enum voc_type {
VOC_TYPE_EOF = 0x00,
VOC_TYPE_VOICE_DATA = 0x01,
VOC_TYPE_VOICE_DATA_CONT = 0x02,
VOC_TYPE_SILENCE = 0x03,
VOC_TYPE_MARKER = 0x04,
VOC_TYPE_ASCII = 0x05,
VOC_TYPE_REPETITION_START = 0x06,
VOC_TYPE_REPETITION_END = 0x07,
VOC_TYPE_EXTENDED = 0x08,
VOC_TYPE_NEW_VOICE_DATA = 0x09,
} VocType;
 
extern const unsigned char ff_voc_magic[21];
extern const AVCodecTag ff_voc_codec_tags[];
 
int ff_voc_get_packet(AVFormatContext *s, AVPacket *pkt,
AVStream *st, int max_size);
 
#endif /* AVFORMAT_VOC_H */
/contrib/sdk/sources/ffmpeg/libavformat/vocdec.c
0,0 → 1,175
/*
* Creative Voice File demuxer.
* Copyright (c) 2006 Aurelien Jacobs <aurel@gnuage.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/intreadwrite.h"
#include "voc.h"
#include "internal.h"
 
 
static int voc_probe(AVProbeData *p)
{
int version, check;
 
if (memcmp(p->buf, ff_voc_magic, sizeof(ff_voc_magic) - 1))
return 0;
version = AV_RL16(p->buf + 22);
check = AV_RL16(p->buf + 24);
if (~version + 0x1234 != check)
return 10;
 
return AVPROBE_SCORE_MAX;
}
 
static int voc_read_header(AVFormatContext *s)
{
VocDecContext *voc = s->priv_data;
AVIOContext *pb = s->pb;
int header_size;
AVStream *st;
 
avio_skip(pb, 20);
header_size = avio_rl16(pb) - 22;
if (header_size != 4) {
av_log(s, AV_LOG_ERROR, "unknown header size: %d\n", header_size);
return AVERROR(ENOSYS);
}
avio_skip(pb, header_size);
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
 
voc->remaining_size = 0;
return 0;
}
 
int
ff_voc_get_packet(AVFormatContext *s, AVPacket *pkt, AVStream *st, int max_size)
{
VocDecContext *voc = s->priv_data;
AVCodecContext *dec = st->codec;
AVIOContext *pb = s->pb;
VocType type;
int size, tmp_codec=-1;
int sample_rate = 0;
int channels = 1;
 
while (!voc->remaining_size) {
type = avio_r8(pb);
if (type == VOC_TYPE_EOF)
return AVERROR_EOF;
voc->remaining_size = avio_rl24(pb);
if (!voc->remaining_size) {
if (!s->pb->seekable)
return AVERROR(EIO);
voc->remaining_size = avio_size(pb) - avio_tell(pb);
}
max_size -= 4;
 
switch (type) {
case VOC_TYPE_VOICE_DATA:
if (!dec->sample_rate) {
dec->sample_rate = 1000000 / (256 - avio_r8(pb));
if (sample_rate)
dec->sample_rate = sample_rate;
avpriv_set_pts_info(st, 64, 1, dec->sample_rate);
dec->channels = channels;
dec->bits_per_coded_sample = av_get_bits_per_sample(dec->codec_id);
} else
avio_skip(pb, 1);
tmp_codec = avio_r8(pb);
voc->remaining_size -= 2;
max_size -= 2;
channels = 1;
break;
 
case VOC_TYPE_VOICE_DATA_CONT:
break;
 
case VOC_TYPE_EXTENDED:
sample_rate = avio_rl16(pb);
avio_r8(pb);
channels = avio_r8(pb) + 1;
sample_rate = 256000000 / (channels * (65536 - sample_rate));
voc->remaining_size = 0;
max_size -= 4;
break;
 
case VOC_TYPE_NEW_VOICE_DATA:
if (!dec->sample_rate) {
dec->sample_rate = avio_rl32(pb);
avpriv_set_pts_info(st, 64, 1, dec->sample_rate);
dec->bits_per_coded_sample = avio_r8(pb);
dec->channels = avio_r8(pb);
} else
avio_skip(pb, 6);
tmp_codec = avio_rl16(pb);
avio_skip(pb, 4);
voc->remaining_size -= 12;
max_size -= 12;
break;
 
default:
avio_skip(pb, voc->remaining_size);
max_size -= voc->remaining_size;
voc->remaining_size = 0;
break;
}
}
 
if (tmp_codec >= 0) {
tmp_codec = ff_codec_get_id(ff_voc_codec_tags, tmp_codec);
if (dec->codec_id == AV_CODEC_ID_NONE)
dec->codec_id = tmp_codec;
else if (dec->codec_id != tmp_codec)
av_log(s, AV_LOG_WARNING, "Ignoring mid-stream change in audio codec\n");
if (dec->codec_id == AV_CODEC_ID_NONE) {
if (s->audio_codec_id == AV_CODEC_ID_NONE) {
av_log(s, AV_LOG_ERROR, "unknown codec tag\n");
return AVERROR(EINVAL);
}
av_log(s, AV_LOG_WARNING, "unknown codec tag\n");
}
}
 
dec->bit_rate = dec->sample_rate * dec->channels * dec->bits_per_coded_sample;
 
if (max_size <= 0)
max_size = 2048;
size = FFMIN(voc->remaining_size, max_size);
voc->remaining_size -= size;
return av_get_packet(pb, pkt, size);
}
 
static int voc_read_packet(AVFormatContext *s, AVPacket *pkt)
{
return ff_voc_get_packet(s, pkt, s->streams[0], 0);
}
 
AVInputFormat ff_voc_demuxer = {
.name = "voc",
.long_name = NULL_IF_CONFIG_SMALL("Creative Voice"),
.priv_data_size = sizeof(VocDecContext),
.read_probe = voc_probe,
.read_header = voc_read_header,
.read_packet = voc_read_packet,
.codec_tag = (const AVCodecTag* const []){ ff_voc_codec_tags, 0 },
};
/contrib/sdk/sources/ffmpeg/libavformat/vocenc.c
0,0 → 1,110
/*
* Creative Voice File muxer.
* Copyright (c) 2006 Aurelien Jacobs <aurel@gnuage.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "voc.h"
#include "internal.h"
 
 
typedef struct voc_enc_context {
int param_written;
} VocEncContext;
 
static int voc_write_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
AVCodecContext *enc = s->streams[0]->codec;
const int header_size = 26;
const int version = 0x0114;
 
if (s->nb_streams != 1
|| s->streams[0]->codec->codec_type != AVMEDIA_TYPE_AUDIO)
return AVERROR_PATCHWELCOME;
 
if (!enc->codec_tag && enc->codec_id != AV_CODEC_ID_PCM_U8) {
av_log(s, AV_LOG_ERROR, "unsupported codec\n");
return AVERROR(EINVAL);
}
 
avio_write(pb, ff_voc_magic, sizeof(ff_voc_magic) - 1);
avio_wl16(pb, header_size);
avio_wl16(pb, version);
avio_wl16(pb, ~version + 0x1234);
 
return 0;
}
 
static int voc_write_packet(AVFormatContext *s, AVPacket *pkt)
{
VocEncContext *voc = s->priv_data;
AVCodecContext *enc = s->streams[0]->codec;
AVIOContext *pb = s->pb;
 
if (!voc->param_written) {
if (enc->codec_tag > 3) {
avio_w8(pb, VOC_TYPE_NEW_VOICE_DATA);
avio_wl24(pb, pkt->size + 12);
avio_wl32(pb, enc->sample_rate);
avio_w8(pb, enc->bits_per_coded_sample);
avio_w8(pb, enc->channels);
avio_wl16(pb, enc->codec_tag);
avio_wl32(pb, 0);
} else {
if (s->streams[0]->codec->channels > 1) {
avio_w8(pb, VOC_TYPE_EXTENDED);
avio_wl24(pb, 4);
avio_wl16(pb, 65536-(256000000 + enc->sample_rate*enc->channels/2)/(enc->sample_rate*enc->channels));
avio_w8(pb, enc->codec_tag);
avio_w8(pb, enc->channels - 1);
}
avio_w8(pb, VOC_TYPE_VOICE_DATA);
avio_wl24(pb, pkt->size + 2);
avio_w8(pb, 256 - (1000000 + enc->sample_rate/2) / enc->sample_rate);
avio_w8(pb, enc->codec_tag);
}
voc->param_written = 1;
} else {
avio_w8(pb, VOC_TYPE_VOICE_DATA_CONT);
avio_wl24(pb, pkt->size);
}
 
avio_write(pb, pkt->data, pkt->size);
return 0;
}
 
static int voc_write_trailer(AVFormatContext *s)
{
avio_w8(s->pb, 0);
return 0;
}
 
AVOutputFormat ff_voc_muxer = {
.name = "voc",
.long_name = NULL_IF_CONFIG_SMALL("Creative Voice"),
.mime_type = "audio/x-voc",
.extensions = "voc",
.priv_data_size = sizeof(VocEncContext),
.audio_codec = AV_CODEC_ID_PCM_S16LE,
.video_codec = AV_CODEC_ID_NONE,
.write_header = voc_write_header,
.write_packet = voc_write_packet,
.write_trailer = voc_write_trailer,
.codec_tag = (const AVCodecTag* const []){ ff_voc_codec_tags, 0 },
};
/contrib/sdk/sources/ffmpeg/libavformat/vorbiscomment.c
0,0 → 1,76
/*
* VorbisComment writer
* Copyright (c) 2009 James Darnley
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "metadata.h"
#include "vorbiscomment.h"
#include "libavcodec/bytestream.h"
#include "libavutil/dict.h"
 
/**
* VorbisComment metadata conversion mapping.
* from Ogg Vorbis I format specification: comment field and header specification
* http://xiph.org/vorbis/doc/v-comment.html
*/
const AVMetadataConv ff_vorbiscomment_metadata_conv[] = {
{ "ALBUMARTIST", "album_artist"},
{ "TRACKNUMBER", "track" },
{ "DISCNUMBER", "disc" },
{ "DESCRIPTION", "comment" },
{ 0 }
};
 
int ff_vorbiscomment_length(AVDictionary *m, const char *vendor_string,
unsigned *count)
{
int len = 8;
len += strlen(vendor_string);
*count = 0;
if (m) {
AVDictionaryEntry *tag = NULL;
while ((tag = av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
len += 4 +strlen(tag->key) + 1 + strlen(tag->value);
(*count)++;
}
}
return len;
}
 
int ff_vorbiscomment_write(uint8_t **p, AVDictionary **m,
const char *vendor_string, const unsigned count)
{
bytestream_put_le32(p, strlen(vendor_string));
bytestream_put_buffer(p, vendor_string, strlen(vendor_string));
if (*m) {
AVDictionaryEntry *tag = NULL;
bytestream_put_le32(p, count);
while ((tag = av_dict_get(*m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
unsigned int len1 = strlen(tag->key);
unsigned int len2 = strlen(tag->value);
bytestream_put_le32(p, len1+1+len2);
bytestream_put_buffer(p, tag->key, len1);
bytestream_put_byte(p, '=');
bytestream_put_buffer(p, tag->value, len2);
}
} else
bytestream_put_le32(p, 0);
return 0;
}
/contrib/sdk/sources/ffmpeg/libavformat/vorbiscomment.h
0,0 → 1,57
/*
* VorbisComment writer
* Copyright (c) 2009 James Darnley
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_VORBISCOMMENT_H
#define AVFORMAT_VORBISCOMMENT_H
 
#include "avformat.h"
#include "metadata.h"
 
/**
* Calculate the length in bytes of a VorbisComment. This is the minimum
* size required by ff_vorbiscomment_write().
*
* @param m The metadata structure to be parsed. For no metadata, set to NULL.
* @param vendor_string The vendor string to be added into the VorbisComment.
* For no string, set to an empty string.
* @param count Pointer to store the number of tags in m because m->count is "not allowed"
* @return The length in bytes.
*/
int ff_vorbiscomment_length(AVDictionary *m, const char *vendor_string,
unsigned *count);
 
/**
* Write a VorbisComment into a buffer. The buffer, p, must have enough
* data to hold the whole VorbisComment. The minimum size required can be
* obtained by passing the same AVDictionary and vendor_string to
* ff_vorbiscomment_length()
*
* @param p The buffer in which to write.
* @param m The metadata struct to write.
* @param vendor_string The vendor string to write.
* @param count The number of tags in m because m->count is "not allowed"
*/
int ff_vorbiscomment_write(uint8_t **p, AVDictionary **m,
const char *vendor_string, const unsigned count);
 
extern const AVMetadataConv ff_vorbiscomment_metadata_conv[];
 
#endif /* AVFORMAT_VORBISCOMMENT_H */
/contrib/sdk/sources/ffmpeg/libavformat/vplayerdec.c
0,0 → 1,128
/*
* Copyright (c) 2012 Clément Bœsch
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* VPlayer subtitles format demuxer
*/
 
#include "avformat.h"
#include "internal.h"
#include "subtitles.h"
 
typedef struct {
FFDemuxSubtitlesQueue q;
} VPlayerContext;
 
static int vplayer_probe(AVProbeData *p)
{
char c;
const unsigned char *ptr = p->buf;
 
if (sscanf(ptr, "%*d:%*d:%*d.%*d%c", &c) == 1 && strchr(": =", c))
return AVPROBE_SCORE_MAX;
return 0;
}
 
static int64_t read_ts(char **line)
{
char c;
int hh, mm, ss, ms, len;
 
if (sscanf(*line, "%d:%d:%d.%d%c%n",
&hh, &mm, &ss, &ms, &c, &len) >= 5) {
*line += len;
return (hh*3600LL + mm*60LL + ss) * 100LL + ms;
}
return AV_NOPTS_VALUE;
}
 
static int vplayer_read_header(AVFormatContext *s)
{
VPlayerContext *vplayer = s->priv_data;
AVStream *st = avformat_new_stream(s, NULL);
 
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 64, 1, 100);
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
st->codec->codec_id = AV_CODEC_ID_VPLAYER;
 
while (!url_feof(s->pb)) {
char line[4096];
char *p = line;
const int64_t pos = avio_tell(s->pb);
int len = ff_get_line(s->pb, line, sizeof(line));
int64_t pts_start;
 
if (!len)
break;
 
line[strcspn(line, "\r\n")] = 0;
 
pts_start = read_ts(&p);
if (pts_start != AV_NOPTS_VALUE) {
AVPacket *sub;
 
sub = ff_subtitles_queue_insert(&vplayer->q, p, strlen(p), 0);
if (!sub)
return AVERROR(ENOMEM);
sub->pos = pos;
sub->pts = pts_start;
sub->duration = -1;
}
}
 
ff_subtitles_queue_finalize(&vplayer->q);
return 0;
}
 
static int vplayer_read_packet(AVFormatContext *s, AVPacket *pkt)
{
VPlayerContext *vplayer = s->priv_data;
return ff_subtitles_queue_read_packet(&vplayer->q, pkt);
}
 
static int vplayer_read_seek(AVFormatContext *s, int stream_index,
int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
{
VPlayerContext *vplayer = s->priv_data;
return ff_subtitles_queue_seek(&vplayer->q, s, stream_index,
min_ts, ts, max_ts, flags);
}
 
static int vplayer_read_close(AVFormatContext *s)
{
VPlayerContext *vplayer = s->priv_data;
ff_subtitles_queue_clean(&vplayer->q);
return 0;
}
 
AVInputFormat ff_vplayer_demuxer = {
.name = "vplayer",
.long_name = NULL_IF_CONFIG_SMALL("VPlayer subtitles"),
.priv_data_size = sizeof(VPlayerContext),
.read_probe = vplayer_probe,
.read_header = vplayer_read_header,
.read_packet = vplayer_read_packet,
.read_seek2 = vplayer_read_seek,
.read_close = vplayer_read_close,
.extensions = "txt",
};
/contrib/sdk/sources/ffmpeg/libavformat/vqf.c
0,0 → 1,295
/*
* VQF demuxer
* Copyright (c) 2009 Vitor Sessak
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avformat.h"
#include "internal.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/dict.h"
#include "libavutil/mathematics.h"
#include "riff.h"
 
typedef struct VqfContext {
int frame_bit_len;
uint8_t last_frame_bits;
int remaining_bits;
} VqfContext;
 
static int vqf_probe(AVProbeData *probe_packet)
{
if (AV_RL32(probe_packet->buf) != MKTAG('T','W','I','N'))
return 0;
 
if (!memcmp(probe_packet->buf + 4, "97012000", 8))
return AVPROBE_SCORE_MAX;
 
if (!memcmp(probe_packet->buf + 4, "00052200", 8))
return AVPROBE_SCORE_MAX;
 
return AVPROBE_SCORE_EXTENSION;
}
 
static void add_metadata(AVFormatContext *s, uint32_t tag,
unsigned int tag_len, unsigned int remaining)
{
int len = FFMIN(tag_len, remaining);
char *buf, key[5] = {0};
 
if (len == UINT_MAX)
return;
 
buf = av_malloc(len+1);
if (!buf)
return;
avio_read(s->pb, buf, len);
buf[len] = 0;
AV_WL32(key, tag);
av_dict_set(&s->metadata, key, buf, AV_DICT_DONT_STRDUP_VAL);
}
 
static const AVMetadataConv vqf_metadata_conv[] = {
{ "(c) ", "copyright" },
{ "ARNG", "arranger" },
{ "AUTH", "author" },
{ "BAND", "band" },
{ "CDCT", "conductor" },
{ "COMT", "comment" },
{ "FILE", "filename" },
{ "GENR", "genre" },
{ "LABL", "publisher" },
{ "MUSC", "composer" },
{ "NAME", "title" },
{ "NOTE", "note" },
{ "PROD", "producer" },
{ "PRSN", "personnel" },
{ "REMX", "remixer" },
{ "SING", "singer" },
{ "TRCK", "track" },
{ "WORD", "words" },
{ 0 },
};
 
static int vqf_read_header(AVFormatContext *s)
{
VqfContext *c = s->priv_data;
AVStream *st = avformat_new_stream(s, NULL);
int chunk_tag;
int rate_flag = -1;
int header_size;
int read_bitrate = 0;
int size;
uint8_t comm_chunk[12];
 
if (!st)
return AVERROR(ENOMEM);
 
avio_skip(s->pb, 12);
 
header_size = avio_rb32(s->pb);
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_TWINVQ;
st->start_time = 0;
 
do {
int len;
chunk_tag = avio_rl32(s->pb);
 
if (chunk_tag == MKTAG('D','A','T','A'))
break;
 
len = avio_rb32(s->pb);
 
if ((unsigned) len > INT_MAX/2) {
av_log(s, AV_LOG_ERROR, "Malformed header\n");
return -1;
}
 
header_size -= 8;
 
switch(chunk_tag){
case MKTAG('C','O','M','M'):
avio_read(s->pb, comm_chunk, 12);
st->codec->channels = AV_RB32(comm_chunk ) + 1;
read_bitrate = AV_RB32(comm_chunk + 4);
rate_flag = AV_RB32(comm_chunk + 8);
avio_skip(s->pb, len-12);
 
if (st->codec->channels <= 0) {
av_log(s, AV_LOG_ERROR, "Invalid number of channels\n");
return AVERROR_INVALIDDATA;
}
 
st->codec->bit_rate = read_bitrate*1000;
break;
case MKTAG('D','S','I','Z'): // size of compressed data
{
char buf[8] = {0};
int size = avio_rb32(s->pb);
 
snprintf(buf, sizeof(buf), "%d", size);
av_dict_set(&s->metadata, "size", buf, 0);
}
break;
case MKTAG('Y','E','A','R'): // recording date
case MKTAG('E','N','C','D'): // compression date
case MKTAG('E','X','T','R'): // reserved
case MKTAG('_','Y','M','H'): // reserved
case MKTAG('_','N','T','T'): // reserved
case MKTAG('_','I','D','3'): // reserved for ID3 tags
avio_skip(s->pb, FFMIN(len, header_size));
break;
default:
add_metadata(s, chunk_tag, len, header_size);
break;
}
 
header_size -= len;
 
} while (header_size >= 0 && !url_feof(s->pb));
 
switch (rate_flag) {
case -1:
av_log(s, AV_LOG_ERROR, "COMM tag not found!\n");
return -1;
case 44:
st->codec->sample_rate = 44100;
break;
case 22:
st->codec->sample_rate = 22050;
break;
case 11:
st->codec->sample_rate = 11025;
break;
default:
if (rate_flag < 8 || rate_flag > 44) {
av_log(s, AV_LOG_ERROR, "Invalid rate flag %d\n", rate_flag);
return AVERROR_INVALIDDATA;
}
st->codec->sample_rate = rate_flag*1000;
break;
}
 
if (read_bitrate / st->codec->channels < 8 ||
read_bitrate / st->codec->channels > 48) {
av_log(s, AV_LOG_ERROR, "Invalid bitrate per channel %d\n",
read_bitrate / st->codec->channels);
return AVERROR_INVALIDDATA;
}
 
switch (((st->codec->sample_rate/1000) << 8) +
read_bitrate/st->codec->channels) {
case (11<<8) + 8 :
case (8 <<8) + 8 :
case (11<<8) + 10:
case (22<<8) + 32:
size = 512;
break;
case (16<<8) + 16:
case (22<<8) + 20:
case (22<<8) + 24:
size = 1024;
break;
case (44<<8) + 40:
case (44<<8) + 48:
size = 2048;
break;
default:
av_log(s, AV_LOG_ERROR, "Mode not suported: %d Hz, %d kb/s.\n",
st->codec->sample_rate, st->codec->bit_rate);
return -1;
}
c->frame_bit_len = st->codec->bit_rate*size/st->codec->sample_rate;
avpriv_set_pts_info(st, 64, size, st->codec->sample_rate);
 
/* put first 12 bytes of COMM chunk in extradata */
if (ff_alloc_extradata(st->codec, 12))
return AVERROR(ENOMEM);
memcpy(st->codec->extradata, comm_chunk, 12);
 
ff_metadata_conv_ctx(s, NULL, vqf_metadata_conv);
 
return 0;
}
 
static int vqf_read_packet(AVFormatContext *s, AVPacket *pkt)
{
VqfContext *c = s->priv_data;
int ret;
int size = (c->frame_bit_len - c->remaining_bits + 7)>>3;
 
if (av_new_packet(pkt, size+2) < 0)
return AVERROR(EIO);
 
pkt->pos = avio_tell(s->pb);
pkt->stream_index = 0;
pkt->duration = 1;
 
pkt->data[0] = 8 - c->remaining_bits; // Number of bits to skip
pkt->data[1] = c->last_frame_bits;
ret = avio_read(s->pb, pkt->data+2, size);
 
if (ret<=0) {
av_free_packet(pkt);
return AVERROR(EIO);
}
 
c->last_frame_bits = pkt->data[size+1];
c->remaining_bits = (size << 3) - c->frame_bit_len + c->remaining_bits;
 
return size+2;
}
 
static int vqf_read_seek(AVFormatContext *s,
int stream_index, int64_t timestamp, int flags)
{
VqfContext *c = s->priv_data;
AVStream *st;
int ret;
int64_t pos;
 
st = s->streams[stream_index];
pos = av_rescale_rnd(timestamp * st->codec->bit_rate,
st->time_base.num,
st->time_base.den * (int64_t)c->frame_bit_len,
(flags & AVSEEK_FLAG_BACKWARD) ?
AV_ROUND_DOWN : AV_ROUND_UP);
pos *= c->frame_bit_len;
 
st->cur_dts = av_rescale(pos, st->time_base.den,
st->codec->bit_rate * (int64_t)st->time_base.num);
 
if ((ret = avio_seek(s->pb, ((pos-7) >> 3) + s->data_offset, SEEK_SET)) < 0)
return ret;
 
c->remaining_bits = -7 - ((pos-7)&7);
return 0;
}
 
AVInputFormat ff_vqf_demuxer = {
.name = "vqf",
.long_name = NULL_IF_CONFIG_SMALL("Nippon Telegraph and Telephone Corporation (NTT) TwinVQ"),
.priv_data_size = sizeof(VqfContext),
.read_probe = vqf_probe,
.read_header = vqf_read_header,
.read_packet = vqf_read_packet,
.read_seek = vqf_read_seek,
.extensions = "vqf,vql,vqe",
};
/contrib/sdk/sources/ffmpeg/libavformat/w64.c
0,0 → 1,50
/*
* Copyright (c) 2009 Daniel Verkamp
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "w64.h"
 
const uint8_t ff_w64_guid_riff[16] = {
'r', 'i', 'f', 'f',
0x2E, 0x91, 0xCF, 0x11, 0xA5, 0xD6, 0x28, 0xDB, 0x04, 0xC1, 0x00, 0x00
};
 
const uint8_t ff_w64_guid_wave[16] = {
'w', 'a', 'v', 'e',
0xF3, 0xAC, 0xD3, 0x11, 0x8C, 0xD1, 0x00, 0xC0, 0x4F, 0x8E, 0xDB, 0x8A
};
 
const uint8_t ff_w64_guid_fmt [16] = {
'f', 'm', 't', ' ',
0xF3, 0xAC, 0xD3, 0x11, 0x8C, 0xD1, 0x00, 0xC0, 0x4F, 0x8E, 0xDB, 0x8A
};
 
const uint8_t ff_w64_guid_fact[16] = { 'f', 'a', 'c', 't',
0xF3, 0xAC, 0xD3, 0x11, 0x8C, 0xD1, 0x00, 0xC0, 0x4F, 0x8E, 0xDB, 0x8A
};
 
const uint8_t ff_w64_guid_data[16] = {
'd', 'a', 't', 'a',
0xF3, 0xAC, 0xD3, 0x11, 0x8C, 0xD1, 0x00, 0xC0, 0x4F, 0x8E, 0xDB, 0x8A
};
 
const uint8_t ff_w64_guid_summarylist[16] = {
0xBC, 0x94, 0x5F, 0x92,
0x5A, 0x52, 0xD2, 0x11, 0x86, 0xDC, 0x00, 0xC0, 0x4F, 0x8E, 0xDB, 0x8A
};
/contrib/sdk/sources/ffmpeg/libavformat/w64.h
0,0 → 1,31
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_W64_H
#define AVFORMAT_W64_H
 
#include <stdint.h>
 
extern const uint8_t ff_w64_guid_riff[16];
extern const uint8_t ff_w64_guid_wave[16];
extern const uint8_t ff_w64_guid_fmt [16];
extern const uint8_t ff_w64_guid_fact[16];
extern const uint8_t ff_w64_guid_data[16];
extern const uint8_t ff_w64_guid_summarylist[16];
 
#endif /* AVFORMAT_W64_H */
/contrib/sdk/sources/ffmpeg/libavformat/wavdec.c
0,0 → 1,737
/*
* WAV demuxer
* Copyright (c) 2001, 2002 Fabrice Bellard
*
* Sony Wave64 demuxer
* RF64 demuxer
* Copyright (c) 2009 Daniel Verkamp
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avassert.h"
#include "libavutil/dict.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/log.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "avformat.h"
#include "avio.h"
#include "avio_internal.h"
#include "internal.h"
#include "metadata.h"
#include "pcm.h"
#include "riff.h"
#include "w64.h"
#include "spdif.h"
 
typedef struct WAVDemuxContext {
const AVClass *class;
int64_t data_end;
int w64;
int64_t smv_data_ofs;
int smv_block_size;
int smv_frames_per_jpeg;
int smv_block;
int smv_last_stream;
int smv_eof;
int audio_eof;
int ignore_length;
int spdif;
int smv_cur_pt;
int smv_given_first;
int unaligned; // e.g. if an odd number of bytes ID3 tag was prepended
} WAVDemuxContext;
 
#if CONFIG_WAV_DEMUXER
 
static int64_t next_tag(AVIOContext *pb, uint32_t *tag)
{
*tag = avio_rl32(pb);
return avio_rl32(pb);
}
 
/* RIFF chunks are always at even offsets relative to where they start. */
static int64_t wav_seek_tag(WAVDemuxContext * wav, AVIOContext *s, int64_t offset, int whence)
{
offset += offset < INT64_MAX && offset + wav->unaligned & 1;
 
return avio_seek(s, offset, whence);
}
 
/* return the size of the found tag */
static int64_t find_tag(WAVDemuxContext * wav, AVIOContext *pb, uint32_t tag1)
{
unsigned int tag;
int64_t size;
 
for (;;) {
if (url_feof(pb))
return AVERROR_EOF;
size = next_tag(pb, &tag);
if (tag == tag1)
break;
wav_seek_tag(wav, pb, size, SEEK_CUR);
}
return size;
}
 
static int wav_probe(AVProbeData *p)
{
/* check file header */
if (p->buf_size <= 32)
return 0;
if (!memcmp(p->buf + 8, "WAVE", 4)) {
if (!memcmp(p->buf, "RIFF", 4))
/* Since the ACT demuxer has a standard WAV header at the top of
* its own, the returned score is decreased to avoid a probe
* conflict between ACT and WAV. */
return AVPROBE_SCORE_MAX - 1;
else if (!memcmp(p->buf, "RF64", 4) &&
!memcmp(p->buf + 12, "ds64", 4))
return AVPROBE_SCORE_MAX;
}
return 0;
}
 
static void handle_stream_probing(AVStream *st)
{
if (st->codec->codec_id == AV_CODEC_ID_PCM_S16LE) {
st->request_probe = AVPROBE_SCORE_EXTENSION;
st->probe_packets = FFMIN(st->probe_packets, 4);
}
}
 
static int wav_parse_fmt_tag(AVFormatContext *s, int64_t size, AVStream **st)
{
AVIOContext *pb = s->pb;
int ret;
 
/* parse fmt header */
*st = avformat_new_stream(s, NULL);
if (!*st)
return AVERROR(ENOMEM);
 
ret = ff_get_wav_header(pb, (*st)->codec, size);
if (ret < 0)
return ret;
handle_stream_probing(*st);
 
(*st)->need_parsing = AVSTREAM_PARSE_FULL_RAW;
 
avpriv_set_pts_info(*st, 64, 1, (*st)->codec->sample_rate);
 
return 0;
}
 
static inline int wav_parse_bext_string(AVFormatContext *s, const char *key,
int length)
{
char temp[257];
int ret;
 
av_assert0(length <= sizeof(temp));
if ((ret = avio_read(s->pb, temp, length)) < 0)
return ret;
 
temp[length] = 0;
 
if (strlen(temp))
return av_dict_set(&s->metadata, key, temp, 0);
 
return 0;
}
 
static int wav_parse_bext_tag(AVFormatContext *s, int64_t size)
{
char temp[131], *coding_history;
int ret, x;
uint64_t time_reference;
int64_t umid_parts[8], umid_mask = 0;
 
if ((ret = wav_parse_bext_string(s, "description", 256)) < 0 ||
(ret = wav_parse_bext_string(s, "originator", 32)) < 0 ||
(ret = wav_parse_bext_string(s, "originator_reference", 32)) < 0 ||
(ret = wav_parse_bext_string(s, "origination_date", 10)) < 0 ||
(ret = wav_parse_bext_string(s, "origination_time", 8)) < 0)
return ret;
 
time_reference = avio_rl64(s->pb);
snprintf(temp, sizeof(temp), "%"PRIu64, time_reference);
if ((ret = av_dict_set(&s->metadata, "time_reference", temp, 0)) < 0)
return ret;
 
/* check if version is >= 1, in which case an UMID may be present */
if (avio_rl16(s->pb) >= 1) {
for (x = 0; x < 8; x++)
umid_mask |= umid_parts[x] = avio_rb64(s->pb);
 
if (umid_mask) {
/* the string formatting below is per SMPTE 330M-2004 Annex C */
if (umid_parts[4] == 0 && umid_parts[5] == 0 &&
umid_parts[6] == 0 && umid_parts[7] == 0) {
/* basic UMID */
snprintf(temp, sizeof(temp),
"0x%016"PRIX64"%016"PRIX64"%016"PRIX64"%016"PRIX64,
umid_parts[0], umid_parts[1],
umid_parts[2], umid_parts[3]);
} else {
/* extended UMID */
snprintf(temp, sizeof(temp),
"0x%016"PRIX64"%016"PRIX64"%016"PRIX64"%016"PRIX64
"%016"PRIX64"%016"PRIX64"%016"PRIX64"%016"PRIX64,
umid_parts[0], umid_parts[1],
umid_parts[2], umid_parts[3],
umid_parts[4], umid_parts[5],
umid_parts[6], umid_parts[7]);
}
 
if ((ret = av_dict_set(&s->metadata, "umid", temp, 0)) < 0)
return ret;
}
 
avio_skip(s->pb, 190);
} else
avio_skip(s->pb, 254);
 
if (size > 602) {
/* CodingHistory present */
size -= 602;
 
if (!(coding_history = av_malloc(size + 1)))
return AVERROR(ENOMEM);
 
if ((ret = avio_read(s->pb, coding_history, size)) < 0)
return ret;
 
coding_history[size] = 0;
if ((ret = av_dict_set(&s->metadata, "coding_history", coding_history,
AV_DICT_DONT_STRDUP_VAL)) < 0)
return ret;
}
 
return 0;
}
 
static const AVMetadataConv wav_metadata_conv[] = {
{ "description", "comment" },
{ "originator", "encoded_by" },
{ "origination_date", "date" },
{ "origination_time", "creation_time" },
{ 0 },
};
 
/* wav input */
static int wav_read_header(AVFormatContext *s)
{
int64_t size, av_uninit(data_size);
int64_t sample_count = 0;
int rf64;
uint32_t tag;
AVIOContext *pb = s->pb;
AVStream *st = NULL;
WAVDemuxContext *wav = s->priv_data;
int ret, got_fmt = 0;
int64_t next_tag_ofs, data_ofs = -1;
 
wav->unaligned = avio_tell(s->pb) & 1;
 
wav->smv_data_ofs = -1;
 
/* check RIFF header */
tag = avio_rl32(pb);
 
rf64 = tag == MKTAG('R', 'F', '6', '4');
if (!rf64 && tag != MKTAG('R', 'I', 'F', 'F'))
return AVERROR_INVALIDDATA;
avio_rl32(pb); /* file size */
tag = avio_rl32(pb);
if (tag != MKTAG('W', 'A', 'V', 'E'))
return AVERROR_INVALIDDATA;
 
if (rf64) {
if (avio_rl32(pb) != MKTAG('d', 's', '6', '4'))
return AVERROR_INVALIDDATA;
size = avio_rl32(pb);
if (size < 24)
return AVERROR_INVALIDDATA;
avio_rl64(pb); /* RIFF size */
 
data_size = avio_rl64(pb);
sample_count = avio_rl64(pb);
 
if (data_size < 0 || sample_count < 0) {
av_log(s, AV_LOG_ERROR, "negative data_size and/or sample_count in "
"ds64: data_size = %"PRId64", sample_count = %"PRId64"\n",
data_size, sample_count);
return AVERROR_INVALIDDATA;
}
avio_skip(pb, size - 24); /* skip rest of ds64 chunk */
 
}
 
for (;;) {
AVStream *vst;
size = next_tag(pb, &tag);
next_tag_ofs = avio_tell(pb) + size;
 
if (url_feof(pb))
break;
 
switch (tag) {
case MKTAG('f', 'm', 't', ' '):
/* only parse the first 'fmt ' tag found */
if (!got_fmt && (ret = wav_parse_fmt_tag(s, size, &st)) < 0) {
return ret;
} else if (got_fmt)
av_log(s, AV_LOG_WARNING, "found more than one 'fmt ' tag\n");
 
got_fmt = 1;
break;
case MKTAG('d', 'a', 't', 'a'):
if (!got_fmt) {
av_log(s, AV_LOG_ERROR,
"found no 'fmt ' tag before the 'data' tag\n");
return AVERROR_INVALIDDATA;
}
 
if (rf64) {
next_tag_ofs = wav->data_end = avio_tell(pb) + data_size;
} else {
data_size = size;
next_tag_ofs = wav->data_end = size ? next_tag_ofs : INT64_MAX;
}
 
data_ofs = avio_tell(pb);
 
/* don't look for footer metadata if we can't seek or if we don't
* know where the data tag ends
*/
if (!pb->seekable || (!rf64 && !size))
goto break_loop;
break;
case MKTAG('f', 'a', 'c', 't'):
if (!sample_count)
sample_count = avio_rl32(pb);
break;
case MKTAG('b', 'e', 'x', 't'):
if ((ret = wav_parse_bext_tag(s, size)) < 0)
return ret;
break;
case MKTAG('S','M','V','0'):
if (!got_fmt) {
av_log(s, AV_LOG_ERROR, "found no 'fmt ' tag before the 'SMV0' tag\n");
return AVERROR_INVALIDDATA;
}
// SMV file, a wav file with video appended.
if (size != MKTAG('0','2','0','0')) {
av_log(s, AV_LOG_ERROR, "Unknown SMV version found\n");
goto break_loop;
}
av_log(s, AV_LOG_DEBUG, "Found SMV data\n");
wav->smv_given_first = 0;
vst = avformat_new_stream(s, NULL);
if (!vst)
return AVERROR(ENOMEM);
avio_r8(pb);
vst->id = 1;
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codec->codec_id = AV_CODEC_ID_SMVJPEG;
vst->codec->width = avio_rl24(pb);
vst->codec->height = avio_rl24(pb);
if (ff_alloc_extradata(vst->codec, 4)) {
av_log(s, AV_LOG_ERROR, "Could not allocate extradata.\n");
return AVERROR(ENOMEM);
}
size = avio_rl24(pb);
wav->smv_data_ofs = avio_tell(pb) + (size - 5) * 3;
avio_rl24(pb);
wav->smv_block_size = avio_rl24(pb);
avpriv_set_pts_info(vst, 32, 1, avio_rl24(pb));
vst->duration = avio_rl24(pb);
avio_rl24(pb);
avio_rl24(pb);
wav->smv_frames_per_jpeg = avio_rl24(pb);
if (wav->smv_frames_per_jpeg > 65536) {
av_log(s, AV_LOG_ERROR, "too many frames per jpeg\n");
return AVERROR_INVALIDDATA;
}
AV_WL32(vst->codec->extradata, wav->smv_frames_per_jpeg);
wav->smv_cur_pt = 0;
goto break_loop;
case MKTAG('L', 'I', 'S', 'T'):
if (size < 4) {
av_log(s, AV_LOG_ERROR, "too short LIST tag\n");
return AVERROR_INVALIDDATA;
}
switch (avio_rl32(pb)) {
case MKTAG('I', 'N', 'F', 'O'):
ff_read_riff_info(s, size - 4);
}
break;
}
 
/* seek to next tag unless we know that we'll run into EOF */
if ((avio_size(pb) > 0 && next_tag_ofs >= avio_size(pb)) ||
wav_seek_tag(wav, pb, next_tag_ofs, SEEK_SET) < 0) {
break;
}
}
 
break_loop:
if (data_ofs < 0) {
av_log(s, AV_LOG_ERROR, "no 'data' tag found\n");
return AVERROR_INVALIDDATA;
}
 
avio_seek(pb, data_ofs, SEEK_SET);
 
if (!sample_count || av_get_exact_bits_per_sample(st->codec->codec_id) > 0)
if ( st->codec->channels
&& data_size
&& av_get_bits_per_sample(st->codec->codec_id)
&& wav->data_end <= avio_size(pb))
sample_count = (data_size << 3)
/
(st->codec->channels * (uint64_t)av_get_bits_per_sample(st->codec->codec_id));
 
if (sample_count)
st->duration = sample_count;
 
ff_metadata_conv_ctx(s, NULL, wav_metadata_conv);
ff_metadata_conv_ctx(s, NULL, ff_riff_info_conv);
 
return 0;
}
 
/**
* Find chunk with w64 GUID by skipping over other chunks.
* @return the size of the found chunk
*/
static int64_t find_guid(AVIOContext *pb, const uint8_t guid1[16])
{
uint8_t guid[16];
int64_t size;
 
while (!url_feof(pb)) {
avio_read(pb, guid, 16);
size = avio_rl64(pb);
if (size <= 24)
return AVERROR_INVALIDDATA;
if (!memcmp(guid, guid1, 16))
return size;
avio_skip(pb, FFALIGN(size, INT64_C(8)) - 24);
}
return AVERROR_EOF;
}
 
#define MAX_SIZE 4096
 
static int wav_read_packet(AVFormatContext *s, AVPacket *pkt)
{
int ret, size;
int64_t left;
AVStream *st;
WAVDemuxContext *wav = s->priv_data;
 
if (CONFIG_SPDIF_DEMUXER && wav->spdif == 0 &&
s->streams[0]->codec->codec_tag == 1) {
enum AVCodecID codec;
ret = ff_spdif_probe(s->pb->buffer, s->pb->buf_end - s->pb->buffer,
&codec);
if (ret > AVPROBE_SCORE_EXTENSION) {
s->streams[0]->codec->codec_id = codec;
wav->spdif = 1;
} else {
wav->spdif = -1;
}
}
if (CONFIG_SPDIF_DEMUXER && wav->spdif == 1)
return ff_spdif_read_packet(s, pkt);
 
if (wav->smv_data_ofs > 0) {
int64_t audio_dts, video_dts;
smv_retry:
audio_dts = (int32_t)s->streams[0]->cur_dts;
video_dts = (int32_t)s->streams[1]->cur_dts;
 
if (audio_dts != AV_NOPTS_VALUE && video_dts != AV_NOPTS_VALUE) {
/*We always return a video frame first to get the pixel format first*/
wav->smv_last_stream = wav->smv_given_first ?
av_compare_ts(video_dts, s->streams[1]->time_base,
audio_dts, s->streams[0]->time_base) > 0 : 0;
wav->smv_given_first = 1;
}
wav->smv_last_stream = !wav->smv_last_stream;
wav->smv_last_stream |= wav->audio_eof;
wav->smv_last_stream &= !wav->smv_eof;
if (wav->smv_last_stream) {
uint64_t old_pos = avio_tell(s->pb);
uint64_t new_pos = wav->smv_data_ofs +
wav->smv_block * wav->smv_block_size;
if (avio_seek(s->pb, new_pos, SEEK_SET) < 0) {
ret = AVERROR_EOF;
goto smv_out;
}
size = avio_rl24(s->pb);
ret = av_get_packet(s->pb, pkt, size);
if (ret < 0)
goto smv_out;
pkt->pos -= 3;
pkt->pts = wav->smv_block * wav->smv_frames_per_jpeg + wav->smv_cur_pt;
wav->smv_cur_pt++;
if (wav->smv_frames_per_jpeg > 0)
wav->smv_cur_pt %= wav->smv_frames_per_jpeg;
if (!wav->smv_cur_pt)
wav->smv_block++;
 
pkt->stream_index = 1;
smv_out:
avio_seek(s->pb, old_pos, SEEK_SET);
if (ret == AVERROR_EOF) {
wav->smv_eof = 1;
goto smv_retry;
}
return ret;
}
}
 
st = s->streams[0];
 
left = wav->data_end - avio_tell(s->pb);
if (wav->ignore_length)
left = INT_MAX;
if (left <= 0) {
if (CONFIG_W64_DEMUXER && wav->w64)
left = find_guid(s->pb, ff_w64_guid_data) - 24;
else
left = find_tag(wav, s->pb, MKTAG('d', 'a', 't', 'a'));
if (left < 0) {
wav->audio_eof = 1;
if (wav->smv_data_ofs > 0 && !wav->smv_eof)
goto smv_retry;
return AVERROR_EOF;
}
wav->data_end = avio_tell(s->pb) + left;
}
 
size = MAX_SIZE;
if (st->codec->block_align > 1) {
if (size < st->codec->block_align)
size = st->codec->block_align;
size = (size / st->codec->block_align) * st->codec->block_align;
}
size = FFMIN(size, left);
ret = av_get_packet(s->pb, pkt, size);
if (ret < 0)
return ret;
pkt->stream_index = 0;
 
return ret;
}
 
static int wav_read_seek(AVFormatContext *s,
int stream_index, int64_t timestamp, int flags)
{
WAVDemuxContext *wav = s->priv_data;
AVStream *st;
wav->smv_eof = 0;
wav->audio_eof = 0;
if (wav->smv_data_ofs > 0) {
int64_t smv_timestamp = timestamp;
if (stream_index == 0)
smv_timestamp = av_rescale_q(timestamp, s->streams[0]->time_base, s->streams[1]->time_base);
else
timestamp = av_rescale_q(smv_timestamp, s->streams[1]->time_base, s->streams[0]->time_base);
if (wav->smv_frames_per_jpeg > 0) {
wav->smv_block = smv_timestamp / wav->smv_frames_per_jpeg;
wav->smv_cur_pt = smv_timestamp % wav->smv_frames_per_jpeg;
}
}
 
st = s->streams[0];
switch (st->codec->codec_id) {
case AV_CODEC_ID_MP2:
case AV_CODEC_ID_MP3:
case AV_CODEC_ID_AC3:
case AV_CODEC_ID_DTS:
/* use generic seeking with dynamically generated indexes */
return -1;
default:
break;
}
return ff_pcm_read_seek(s, stream_index, timestamp, flags);
}
 
#define OFFSET(x) offsetof(WAVDemuxContext, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption demux_options[] = {
{ "ignore_length", "Ignore length", OFFSET(ignore_length), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, DEC },
{ NULL },
};
 
static const AVClass wav_demuxer_class = {
.class_name = "WAV demuxer",
.item_name = av_default_item_name,
.option = demux_options,
.version = LIBAVUTIL_VERSION_INT,
};
AVInputFormat ff_wav_demuxer = {
.name = "wav",
.long_name = NULL_IF_CONFIG_SMALL("WAV / WAVE (Waveform Audio)"),
.priv_data_size = sizeof(WAVDemuxContext),
.read_probe = wav_probe,
.read_header = wav_read_header,
.read_packet = wav_read_packet,
.read_seek = wav_read_seek,
.flags = AVFMT_GENERIC_INDEX,
.codec_tag = (const AVCodecTag * const []) { ff_codec_wav_tags, 0 },
.priv_class = &wav_demuxer_class,
};
#endif /* CONFIG_WAV_DEMUXER */
 
#if CONFIG_W64_DEMUXER
static int w64_probe(AVProbeData *p)
{
if (p->buf_size <= 40)
return 0;
if (!memcmp(p->buf, ff_w64_guid_riff, 16) &&
!memcmp(p->buf + 24, ff_w64_guid_wave, 16))
return AVPROBE_SCORE_MAX;
else
return 0;
}
 
static int w64_read_header(AVFormatContext *s)
{
int64_t size, data_ofs = 0;
AVIOContext *pb = s->pb;
WAVDemuxContext *wav = s->priv_data;
AVStream *st;
uint8_t guid[16];
int ret;
 
avio_read(pb, guid, 16);
if (memcmp(guid, ff_w64_guid_riff, 16))
return AVERROR_INVALIDDATA;
 
/* riff + wave + fmt + sizes */
if (avio_rl64(pb) < 16 + 8 + 16 + 8 + 16 + 8)
return AVERROR_INVALIDDATA;
 
avio_read(pb, guid, 16);
if (memcmp(guid, ff_w64_guid_wave, 16)) {
av_log(s, AV_LOG_ERROR, "could not find wave guid\n");
return AVERROR_INVALIDDATA;
}
 
wav->w64 = 1;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
while (!url_feof(pb)) {
if (avio_read(pb, guid, 16) != 16)
break;
size = avio_rl64(pb);
if (size <= 24 || INT64_MAX - size < avio_tell(pb))
return AVERROR_INVALIDDATA;
 
if (!memcmp(guid, ff_w64_guid_fmt, 16)) {
/* subtract chunk header size - normal wav file doesn't count it */
ret = ff_get_wav_header(pb, st->codec, size - 24);
if (ret < 0)
return ret;
avio_skip(pb, FFALIGN(size, INT64_C(8)) - size);
 
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
} else if (!memcmp(guid, ff_w64_guid_fact, 16)) {
int64_t samples;
 
samples = avio_rl64(pb);
if (samples > 0)
st->duration = samples;
} else if (!memcmp(guid, ff_w64_guid_data, 16)) {
wav->data_end = avio_tell(pb) + size - 24;
 
data_ofs = avio_tell(pb);
if (!pb->seekable)
break;
 
avio_skip(pb, size - 24);
} else if (!memcmp(guid, ff_w64_guid_summarylist, 16)) {
int64_t start, end, cur;
uint32_t count, chunk_size, i;
 
start = avio_tell(pb);
end = start + FFALIGN(size, INT64_C(8)) - 24;
count = avio_rl32(pb);
 
for (i = 0; i < count; i++) {
char chunk_key[5], *value;
 
if (url_feof(pb) || (cur = avio_tell(pb)) < 0 || cur > end - 8 /* = tag + size */)
break;
 
chunk_key[4] = 0;
avio_read(pb, chunk_key, 4);
chunk_size = avio_rl32(pb);
 
value = av_mallocz(chunk_size + 1);
if (!value)
return AVERROR(ENOMEM);
 
ret = avio_get_str16le(pb, chunk_size, value, chunk_size);
avio_skip(pb, chunk_size - ret);
 
av_dict_set(&s->metadata, chunk_key, value, AV_DICT_DONT_STRDUP_VAL);
}
 
avio_skip(pb, end - avio_tell(pb));
} else {
av_log(s, AV_LOG_DEBUG, "unknown guid: "FF_PRI_GUID"\n", FF_ARG_GUID(guid));
avio_skip(pb, FFALIGN(size, INT64_C(8)) - 24);
}
}
 
if (!data_ofs)
return AVERROR_EOF;
 
ff_metadata_conv_ctx(s, NULL, wav_metadata_conv);
ff_metadata_conv_ctx(s, NULL, ff_riff_info_conv);
 
handle_stream_probing(st);
st->need_parsing = AVSTREAM_PARSE_FULL_RAW;
 
avio_seek(pb, data_ofs, SEEK_SET);
 
return 0;
}
 
AVInputFormat ff_w64_demuxer = {
.name = "w64",
.long_name = NULL_IF_CONFIG_SMALL("Sony Wave64"),
.priv_data_size = sizeof(WAVDemuxContext),
.read_probe = w64_probe,
.read_header = w64_read_header,
.read_packet = wav_read_packet,
.read_seek = wav_read_seek,
.flags = AVFMT_GENERIC_INDEX,
.codec_tag = (const AVCodecTag * const []) { ff_codec_wav_tags, 0 },
};
#endif /* CONFIG_W64_DEMUXER */
/contrib/sdk/sources/ffmpeg/libavformat/wavenc.c
0,0 → 1,383
/*
* WAV muxer
* Copyright (c) 2001, 2002 Fabrice Bellard
*
* Sony Wave64 muxer
* Copyright (c) 2012 Paul B Mahol
*
* WAV muxer RF64 support
* Copyright (c) 2013 Daniel Verkamp <daniel@drv.nu>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <stdint.h>
#include <string.h>
 
#include "libavutil/dict.h"
#include "libavutil/common.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
 
#include "avformat.h"
#include "avio.h"
#include "avio_internal.h"
#include "internal.h"
#include "riff.h"
 
#define RF64_AUTO (-1)
#define RF64_NEVER 0
#define RF64_ALWAYS 1
 
typedef struct WAVMuxContext {
const AVClass *class;
int64_t data;
int64_t fact_pos;
int64_t ds64;
int64_t minpts;
int64_t maxpts;
int last_duration;
int write_bext;
int rf64;
} WAVMuxContext;
 
#if CONFIG_WAV_MUXER
static inline void bwf_write_bext_string(AVFormatContext *s, const char *key, int maxlen)
{
AVDictionaryEntry *tag;
int len = 0;
 
if (tag = av_dict_get(s->metadata, key, NULL, 0)) {
len = strlen(tag->value);
len = FFMIN(len, maxlen);
avio_write(s->pb, tag->value, len);
}
 
ffio_fill(s->pb, 0, maxlen - len);
}
 
static void bwf_write_bext_chunk(AVFormatContext *s)
{
AVDictionaryEntry *tmp_tag;
uint64_t time_reference = 0;
int64_t bext = ff_start_tag(s->pb, "bext");
 
bwf_write_bext_string(s, "description", 256);
bwf_write_bext_string(s, "originator", 32);
bwf_write_bext_string(s, "originator_reference", 32);
bwf_write_bext_string(s, "origination_date", 10);
bwf_write_bext_string(s, "origination_time", 8);
 
if (tmp_tag = av_dict_get(s->metadata, "time_reference", NULL, 0))
time_reference = strtoll(tmp_tag->value, NULL, 10);
avio_wl64(s->pb, time_reference);
avio_wl16(s->pb, 1); // set version to 1
 
if (tmp_tag = av_dict_get(s->metadata, "umid", NULL, 0)) {
unsigned char umidpart_str[17] = {0};
int i;
uint64_t umidpart;
int len = strlen(tmp_tag->value+2);
 
for (i = 0; i < len/16; i++) {
memcpy(umidpart_str, tmp_tag->value + 2 + (i*16), 16);
umidpart = strtoll(umidpart_str, NULL, 16);
avio_wb64(s->pb, umidpart);
}
ffio_fill(s->pb, 0, 64 - i*8);
} else
ffio_fill(s->pb, 0, 64); // zero UMID
 
ffio_fill(s->pb, 0, 190); // Reserved
 
if (tmp_tag = av_dict_get(s->metadata, "coding_history", NULL, 0))
avio_put_str(s->pb, tmp_tag->value);
 
ff_end_tag(s->pb, bext);
}
 
static int wav_write_header(AVFormatContext *s)
{
WAVMuxContext *wav = s->priv_data;
AVIOContext *pb = s->pb;
int64_t fmt;
 
if (wav->rf64 == RF64_ALWAYS) {
ffio_wfourcc(pb, "RF64");
avio_wl32(pb, -1); /* RF64 chunk size: use size in ds64 */
} else {
ffio_wfourcc(pb, "RIFF");
avio_wl32(pb, 0); /* file length */
}
 
ffio_wfourcc(pb, "WAVE");
 
if (wav->rf64 != RF64_NEVER) {
/* write empty ds64 chunk or JUNK chunk to reserve space for ds64 */
ffio_wfourcc(pb, wav->rf64 == RF64_ALWAYS ? "ds64" : "JUNK");
avio_wl32(pb, 28); /* chunk size */
wav->ds64 = avio_tell(pb);
ffio_fill(pb, 0, 28);
}
 
/* format header */
fmt = ff_start_tag(pb, "fmt ");
if (ff_put_wav_header(pb, s->streams[0]->codec) < 0) {
av_log(s, AV_LOG_ERROR, "%s codec not supported in WAVE format\n",
s->streams[0]->codec->codec ? s->streams[0]->codec->codec->name : "NONE");
return -1;
}
ff_end_tag(pb, fmt);
 
if (s->streams[0]->codec->codec_tag != 0x01 /* hence for all other than PCM */
&& s->pb->seekable) {
wav->fact_pos = ff_start_tag(pb, "fact");
avio_wl32(pb, 0);
ff_end_tag(pb, wav->fact_pos);
}
 
if (wav->write_bext)
bwf_write_bext_chunk(s);
 
avpriv_set_pts_info(s->streams[0], 64, 1, s->streams[0]->codec->sample_rate);
wav->maxpts = wav->last_duration = 0;
wav->minpts = INT64_MAX;
 
/* info header */
ff_riff_write_info(s);
 
/* data header */
wav->data = ff_start_tag(pb, "data");
 
avio_flush(pb);
 
return 0;
}
 
static int wav_write_packet(AVFormatContext *s, AVPacket *pkt)
{
AVIOContext *pb = s->pb;
WAVMuxContext *wav = s->priv_data;
avio_write(pb, pkt->data, pkt->size);
if(pkt->pts != AV_NOPTS_VALUE) {
wav->minpts = FFMIN(wav->minpts, pkt->pts);
wav->maxpts = FFMAX(wav->maxpts, pkt->pts);
wav->last_duration = pkt->duration;
} else
av_log(s, AV_LOG_ERROR, "wav_write_packet: NOPTS\n");
return 0;
}
 
static int wav_write_trailer(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
WAVMuxContext *wav = s->priv_data;
int64_t file_size, data_size;
int64_t number_of_samples = 0;
int rf64 = 0;
 
avio_flush(pb);
 
if (s->pb->seekable) {
/* update file size */
file_size = avio_tell(pb);
data_size = file_size - wav->data;
if (wav->rf64 == RF64_ALWAYS || (wav->rf64 == RF64_AUTO && file_size - 8 > UINT32_MAX)) {
rf64 = 1;
} else {
avio_seek(pb, 4, SEEK_SET);
avio_wl32(pb, (uint32_t)(file_size - 8));
avio_seek(pb, file_size, SEEK_SET);
 
ff_end_tag(pb, wav->data);
avio_flush(pb);
}
 
number_of_samples = av_rescale(wav->maxpts - wav->minpts + wav->last_duration,
s->streams[0]->codec->sample_rate * (int64_t)s->streams[0]->time_base.num,
s->streams[0]->time_base.den);
 
if(s->streams[0]->codec->codec_tag != 0x01) {
/* Update num_samps in fact chunk */
avio_seek(pb, wav->fact_pos, SEEK_SET);
if (rf64 || (wav->rf64 == RF64_AUTO && number_of_samples > UINT32_MAX)) {
rf64 = 1;
avio_wl32(pb, -1);
} else {
avio_wl32(pb, number_of_samples);
avio_seek(pb, file_size, SEEK_SET);
avio_flush(pb);
}
}
 
if (rf64) {
/* overwrite RIFF with RF64 */
avio_seek(pb, 0, SEEK_SET);
ffio_wfourcc(pb, "RF64");
avio_wl32(pb, -1);
 
/* write ds64 chunk (overwrite JUNK if rf64 == RF64_AUTO) */
avio_seek(pb, wav->ds64 - 8, SEEK_SET);
ffio_wfourcc(pb, "ds64");
avio_wl32(pb, 28); /* ds64 chunk size */
avio_wl64(pb, file_size - 8); /* RF64 chunk size */
avio_wl64(pb, data_size); /* data chunk size */
avio_wl64(pb, number_of_samples); /* fact chunk number of samples */
avio_wl32(pb, 0); /* number of table entries for non-'data' chunks */
 
/* write -1 in data chunk size */
avio_seek(pb, wav->data - 4, SEEK_SET);
avio_wl32(pb, -1);
 
avio_seek(pb, file_size, SEEK_SET);
avio_flush(pb);
}
}
return 0;
}
 
#define OFFSET(x) offsetof(WAVMuxContext, x)
#define ENC AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
{ "write_bext", "Write BEXT chunk.", OFFSET(write_bext), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, ENC },
{ "rf64", "Use RF64 header rather than RIFF for large files.", OFFSET(rf64), AV_OPT_TYPE_INT, { .i64 = RF64_NEVER },-1, 1, ENC, "rf64" },
{ "auto", "Write RF64 header if file grows large enough.", 0, AV_OPT_TYPE_CONST, { .i64 = RF64_AUTO }, 0, 0, ENC, "rf64" },
{ "always", "Always write RF64 header regardless of file size.", 0, AV_OPT_TYPE_CONST, { .i64 = RF64_ALWAYS }, 0, 0, ENC, "rf64" },
{ "never", "Never write RF64 header regardless of file size.", 0, AV_OPT_TYPE_CONST, { .i64 = RF64_NEVER }, 0, 0, ENC, "rf64" },
{ NULL },
};
 
static const AVClass wav_muxer_class = {
.class_name = "WAV muxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVOutputFormat ff_wav_muxer = {
.name = "wav",
.long_name = NULL_IF_CONFIG_SMALL("WAV / WAVE (Waveform Audio)"),
.mime_type = "audio/x-wav",
.extensions = "wav",
.priv_data_size = sizeof(WAVMuxContext),
.audio_codec = AV_CODEC_ID_PCM_S16LE,
.video_codec = AV_CODEC_ID_NONE,
.write_header = wav_write_header,
.write_packet = wav_write_packet,
.write_trailer = wav_write_trailer,
.flags = AVFMT_TS_NONSTRICT,
.codec_tag = (const AVCodecTag* const []){ ff_codec_wav_tags, 0 },
.priv_class = &wav_muxer_class,
};
#endif /* CONFIG_WAV_MUXER */
 
#if CONFIG_W64_MUXER
#include "w64.h"
 
static void start_guid(AVIOContext *pb, const uint8_t *guid, int64_t *pos)
{
*pos = avio_tell(pb);
 
avio_write(pb, guid, 16);
avio_wl64(pb, INT64_MAX);
}
 
static void end_guid(AVIOContext *pb, int64_t start)
{
int64_t end, pos = avio_tell(pb);
 
end = FFALIGN(pos, 8);
ffio_fill(pb, 0, end - pos);
avio_seek(pb, start + 16, SEEK_SET);
avio_wl64(pb, end - start);
avio_seek(pb, end, SEEK_SET);
}
 
static int w64_write_header(AVFormatContext *s)
{
WAVMuxContext *wav = s->priv_data;
AVIOContext *pb = s->pb;
int64_t start;
int ret;
 
avio_write(pb, ff_w64_guid_riff, sizeof(ff_w64_guid_riff));
avio_wl64(pb, -1);
avio_write(pb, ff_w64_guid_wave, sizeof(ff_w64_guid_wave));
start_guid(pb, ff_w64_guid_fmt, &start);
if ((ret = ff_put_wav_header(pb, s->streams[0]->codec)) < 0) {
av_log(s, AV_LOG_ERROR, "%s codec not supported\n",
s->streams[0]->codec->codec ? s->streams[0]->codec->codec->name : "NONE");
return ret;
}
end_guid(pb, start);
 
if (s->streams[0]->codec->codec_tag != 0x01 /* hence for all other than PCM */
&& s->pb->seekable) {
start_guid(pb, ff_w64_guid_fact, &wav->fact_pos);
avio_wl64(pb, 0);
end_guid(pb, wav->fact_pos);
}
 
start_guid(pb, ff_w64_guid_data, &wav->data);
 
return 0;
}
 
static int w64_write_trailer(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
WAVMuxContext *wav = s->priv_data;
int64_t file_size;
 
if (pb->seekable) {
end_guid(pb, wav->data);
 
file_size = avio_tell(pb);
avio_seek(pb, 16, SEEK_SET);
avio_wl64(pb, file_size);
 
if (s->streams[0]->codec->codec_tag != 0x01) {
int64_t number_of_samples;
 
number_of_samples = av_rescale(wav->maxpts - wav->minpts + wav->last_duration,
s->streams[0]->codec->sample_rate * (int64_t)s->streams[0]->time_base.num,
s->streams[0]->time_base.den);
avio_seek(pb, wav->fact_pos + 24, SEEK_SET);
avio_wl64(pb, number_of_samples);
}
 
avio_seek(pb, file_size, SEEK_SET);
avio_flush(pb);
}
 
return 0;
}
 
AVOutputFormat ff_w64_muxer = {
.name = "w64",
.long_name = NULL_IF_CONFIG_SMALL("Sony Wave64"),
.extensions = "w64",
.priv_data_size = sizeof(WAVMuxContext),
.audio_codec = AV_CODEC_ID_PCM_S16LE,
.video_codec = AV_CODEC_ID_NONE,
.write_header = w64_write_header,
.write_packet = wav_write_packet,
.write_trailer = w64_write_trailer,
.flags = AVFMT_TS_NONSTRICT,
.codec_tag = (const AVCodecTag* const []){ ff_codec_wav_tags, 0 },
};
#endif /* CONFIG_W64_MUXER */
/contrib/sdk/sources/ffmpeg/libavformat/wc3movie.c
0,0 → 1,304
/*
* Wing Commander III Movie (.mve) File Demuxer
* Copyright (c) 2003 The ffmpeg Project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Wing Commander III Movie file demuxer
* by Mike Melanson (melanson@pcisys.net)
* for more information on the WC3 .mve file format, visit:
* http://www.pcisys.net/~melanson/codecs/
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/dict.h"
#include "avformat.h"
#include "internal.h"
 
#define FORM_TAG MKTAG('F', 'O', 'R', 'M')
#define MOVE_TAG MKTAG('M', 'O', 'V', 'E')
#define PC__TAG MKTAG('_', 'P', 'C', '_')
#define SOND_TAG MKTAG('S', 'O', 'N', 'D')
#define BNAM_TAG MKTAG('B', 'N', 'A', 'M')
#define SIZE_TAG MKTAG('S', 'I', 'Z', 'E')
#define PALT_TAG MKTAG('P', 'A', 'L', 'T')
#define INDX_TAG MKTAG('I', 'N', 'D', 'X')
#define BRCH_TAG MKTAG('B', 'R', 'C', 'H')
#define SHOT_TAG MKTAG('S', 'H', 'O', 'T')
#define VGA__TAG MKTAG('V', 'G', 'A', ' ')
#define TEXT_TAG MKTAG('T', 'E', 'X', 'T')
#define AUDI_TAG MKTAG('A', 'U', 'D', 'I')
 
/* video resolution unless otherwise specified */
#define WC3_DEFAULT_WIDTH 320
#define WC3_DEFAULT_HEIGHT 165
 
/* always use the same PCM audio parameters */
#define WC3_SAMPLE_RATE 22050
#define WC3_AUDIO_CHANNELS 1
#define WC3_AUDIO_BITS 16
 
/* nice, constant framerate */
#define WC3_FRAME_FPS 15
 
#define PALETTE_SIZE (256 * 3)
 
typedef struct Wc3DemuxContext {
int width;
int height;
int64_t pts;
int video_stream_index;
int audio_stream_index;
 
AVPacket vpkt;
 
} Wc3DemuxContext;
 
static int wc3_probe(AVProbeData *p)
{
if (p->buf_size < 12)
return 0;
 
if ((AV_RL32(&p->buf[0]) != FORM_TAG) ||
(AV_RL32(&p->buf[8]) != MOVE_TAG))
return 0;
 
return AVPROBE_SCORE_MAX;
}
 
static int wc3_read_header(AVFormatContext *s)
{
Wc3DemuxContext *wc3 = s->priv_data;
AVIOContext *pb = s->pb;
unsigned int fourcc_tag;
unsigned int size;
AVStream *st;
int ret = 0;
char *buffer;
 
/* default context members */
wc3->width = WC3_DEFAULT_WIDTH;
wc3->height = WC3_DEFAULT_HEIGHT;
wc3->pts = 0;
wc3->video_stream_index = wc3->audio_stream_index = 0;
av_init_packet(&wc3->vpkt);
wc3->vpkt.data = NULL; wc3->vpkt.size = 0;
 
/* skip the first 3 32-bit numbers */
avio_skip(pb, 12);
 
/* traverse through the chunks and load the header information before
* the first BRCH tag */
fourcc_tag = avio_rl32(pb);
size = (avio_rb32(pb) + 1) & (~1);
 
do {
switch (fourcc_tag) {
 
case SOND_TAG:
case INDX_TAG:
/* SOND unknown, INDX unnecessary; ignore both */
avio_skip(pb, size);
break;
 
case PC__TAG:
/* number of palettes, unneeded */
avio_skip(pb, 12);
break;
 
case BNAM_TAG:
/* load up the name */
buffer = av_malloc(size+1);
if (!buffer)
return AVERROR(ENOMEM);
if ((ret = avio_read(pb, buffer, size)) != size)
return AVERROR(EIO);
buffer[size] = 0;
av_dict_set(&s->metadata, "title", buffer,
AV_DICT_DONT_STRDUP_VAL);
break;
 
case SIZE_TAG:
/* video resolution override */
wc3->width = avio_rl32(pb);
wc3->height = avio_rl32(pb);
break;
 
case PALT_TAG:
/* one of several palettes */
avio_seek(pb, -8, SEEK_CUR);
av_append_packet(pb, &wc3->vpkt, 8 + PALETTE_SIZE);
break;
 
default:
av_log(s, AV_LOG_ERROR, " unrecognized WC3 chunk: %c%c%c%c (0x%02X%02X%02X%02X)\n",
(uint8_t)fourcc_tag, (uint8_t)(fourcc_tag >> 8), (uint8_t)(fourcc_tag >> 16), (uint8_t)(fourcc_tag >> 24),
(uint8_t)fourcc_tag, (uint8_t)(fourcc_tag >> 8), (uint8_t)(fourcc_tag >> 16), (uint8_t)(fourcc_tag >> 24));
return AVERROR_INVALIDDATA;
}
 
fourcc_tag = avio_rl32(pb);
/* chunk sizes are 16-bit aligned */
size = (avio_rb32(pb) + 1) & (~1);
if (url_feof(pb))
return AVERROR(EIO);
 
} while (fourcc_tag != BRCH_TAG);
 
/* initialize the decoder streams */
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 33, 1, WC3_FRAME_FPS);
wc3->video_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_XAN_WC3;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->width = wc3->width;
st->codec->height = wc3->height;
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 33, 1, WC3_FRAME_FPS);
wc3->audio_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_PCM_S16LE;
st->codec->codec_tag = 1;
st->codec->channels = WC3_AUDIO_CHANNELS;
st->codec->channel_layout = AV_CH_LAYOUT_MONO;
st->codec->bits_per_coded_sample = WC3_AUDIO_BITS;
st->codec->sample_rate = WC3_SAMPLE_RATE;
st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
st->codec->bits_per_coded_sample;
st->codec->block_align = WC3_AUDIO_BITS * WC3_AUDIO_CHANNELS;
 
return 0;
}
 
static int wc3_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
Wc3DemuxContext *wc3 = s->priv_data;
AVIOContext *pb = s->pb;
unsigned int fourcc_tag;
unsigned int size;
int packet_read = 0;
int ret = 0;
unsigned char text[1024];
 
while (!packet_read) {
 
fourcc_tag = avio_rl32(pb);
/* chunk sizes are 16-bit aligned */
size = (avio_rb32(pb) + 1) & (~1);
if (url_feof(pb))
return AVERROR(EIO);
 
switch (fourcc_tag) {
 
case BRCH_TAG:
/* no-op */
break;
 
case SHOT_TAG:
/* load up new palette */
avio_seek(pb, -8, SEEK_CUR);
av_append_packet(pb, &wc3->vpkt, 8 + 4);
break;
 
case VGA__TAG:
/* send out video chunk */
avio_seek(pb, -8, SEEK_CUR);
ret= av_append_packet(pb, &wc3->vpkt, 8 + size);
// ignore error if we have some data
if (wc3->vpkt.size > 0)
ret = 0;
*pkt = wc3->vpkt;
wc3->vpkt.data = NULL; wc3->vpkt.size = 0;
pkt->stream_index = wc3->video_stream_index;
pkt->pts = wc3->pts;
packet_read = 1;
break;
 
case TEXT_TAG:
/* subtitle chunk */
#if 0
avio_skip(pb, size);
#else
if ((unsigned)size > sizeof(text) || (ret = avio_read(pb, text, size)) != size)
ret = AVERROR(EIO);
else {
int i = 0;
av_log (s, AV_LOG_DEBUG, "Subtitle time!\n");
av_log (s, AV_LOG_DEBUG, " inglish: %s\n", &text[i + 1]);
i += text[i] + 1;
av_log (s, AV_LOG_DEBUG, " doytsch: %s\n", &text[i + 1]);
i += text[i] + 1;
av_log (s, AV_LOG_DEBUG, " fronsay: %s\n", &text[i + 1]);
}
#endif
break;
 
case AUDI_TAG:
/* send out audio chunk */
ret= av_get_packet(pb, pkt, size);
pkt->stream_index = wc3->audio_stream_index;
pkt->pts = wc3->pts;
 
/* time to advance pts */
wc3->pts++;
 
packet_read = 1;
break;
 
default:
av_log (s, AV_LOG_ERROR, " unrecognized WC3 chunk: %c%c%c%c (0x%02X%02X%02X%02X)\n",
(uint8_t)fourcc_tag, (uint8_t)(fourcc_tag >> 8), (uint8_t)(fourcc_tag >> 16), (uint8_t)(fourcc_tag >> 24),
(uint8_t)fourcc_tag, (uint8_t)(fourcc_tag >> 8), (uint8_t)(fourcc_tag >> 16), (uint8_t)(fourcc_tag >> 24));
ret = AVERROR_INVALIDDATA;
packet_read = 1;
break;
}
}
 
return ret;
}
 
static int wc3_read_close(AVFormatContext *s)
{
Wc3DemuxContext *wc3 = s->priv_data;
 
if (wc3->vpkt.size > 0)
av_free_packet(&wc3->vpkt);
 
return 0;
}
 
AVInputFormat ff_wc3_demuxer = {
.name = "wc3movie",
.long_name = NULL_IF_CONFIG_SMALL("Wing Commander III movie"),
.priv_data_size = sizeof(Wc3DemuxContext),
.read_probe = wc3_probe,
.read_header = wc3_read_header,
.read_packet = wc3_read_packet,
.read_close = wc3_read_close,
};
/contrib/sdk/sources/ffmpeg/libavformat/webvttdec.c
0,0 → 1,223
/*
* Copyright (c) 2012 Clément Bœsch
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* WebVTT subtitle demuxer
* @see http://dev.w3.org/html5/webvtt/
*/
 
#include "avformat.h"
#include "internal.h"
#include "subtitles.h"
#include "libavutil/bprint.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/opt.h"
 
typedef struct {
const AVClass *class;
FFDemuxSubtitlesQueue q;
int kind;
} WebVTTContext;
 
static int webvtt_probe(AVProbeData *p)
{
const uint8_t *ptr = p->buf;
 
if (AV_RB24(ptr) == 0xEFBBBF)
ptr += 3; /* skip UTF-8 BOM */
if (!strncmp(ptr, "WEBVTT", 6) &&
(!ptr[6] || strchr("\n\r\t ", ptr[6])))
return AVPROBE_SCORE_MAX;
return 0;
}
 
static int64_t read_ts(const char *s)
{
int hh, mm, ss, ms;
if (sscanf(s, "%u:%u:%u.%u", &hh, &mm, &ss, &ms) == 4) return (hh*3600LL + mm*60LL + ss) * 1000LL + ms;
if (sscanf(s, "%u:%u.%u", &mm, &ss, &ms) == 3) return ( mm*60LL + ss) * 1000LL + ms;
return AV_NOPTS_VALUE;
}
 
static int webvtt_read_header(AVFormatContext *s)
{
WebVTTContext *webvtt = s->priv_data;
AVBPrint header, cue;
int res = 0;
AVStream *st = avformat_new_stream(s, NULL);
 
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 64, 1, 1000);
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
st->codec->codec_id = AV_CODEC_ID_WEBVTT;
st->disposition |= webvtt->kind;
 
av_bprint_init(&header, 0, AV_BPRINT_SIZE_UNLIMITED);
av_bprint_init(&cue, 0, AV_BPRINT_SIZE_UNLIMITED);
 
for (;;) {
int i;
int64_t pos;
AVPacket *sub;
const char *p, *identifier, *settings;
int identifier_len, settings_len;
int64_t ts_start, ts_end;
 
ff_subtitles_read_chunk(s->pb, &cue);
 
if (!cue.len)
break;
 
p = identifier = cue.str;
pos = avio_tell(s->pb);
 
/* ignore header chunk */
if (!strncmp(p, "\xEF\xBB\xBFWEBVTT", 9) ||
!strncmp(p, "WEBVTT", 6))
continue;
 
/* optional cue identifier (can be a number like in SRT or some kind of
* chaptering id) */
for (i = 0; p[i] && p[i] != '\n' && p[i] != '\r'; i++) {
if (!strncmp(p + i, "-->", 3)) {
identifier = NULL;
break;
}
}
if (!identifier)
identifier_len = 0;
else {
identifier_len = strcspn(p, "\r\n");
p += identifier_len;
if (*p == '\r')
p++;
if (*p == '\n')
p++;
}
 
/* cue timestamps */
if ((ts_start = read_ts(p)) == AV_NOPTS_VALUE)
break;
if (!(p = strstr(p, "-->")))
break;
p += 3;
do p++; while (*p == ' ' || *p == '\t');
if ((ts_end = read_ts(p)) == AV_NOPTS_VALUE)
break;
 
/* optional cue settings */
p += strcspn(p, "\n\t ");
while (*p == '\t' || *p == ' ')
p++;
settings = p;
settings_len = strcspn(p, "\r\n");
p += settings_len;
if (*p == '\r')
p++;
if (*p == '\n')
p++;
 
/* create packet */
sub = ff_subtitles_queue_insert(&webvtt->q, p, strlen(p), 0);
if (!sub) {
res = AVERROR(ENOMEM);
goto end;
}
sub->pos = pos;
sub->pts = ts_start;
sub->duration = ts_end - ts_start;
 
#define SET_SIDE_DATA(name, type) do { \
if (name##_len) { \
uint8_t *buf = av_packet_new_side_data(sub, type, name##_len); \
if (!buf) { \
res = AVERROR(ENOMEM); \
goto end; \
} \
memcpy(buf, name, name##_len); \
} \
} while (0)
 
SET_SIDE_DATA(identifier, AV_PKT_DATA_WEBVTT_IDENTIFIER);
SET_SIDE_DATA(settings, AV_PKT_DATA_WEBVTT_SETTINGS);
}
 
ff_subtitles_queue_finalize(&webvtt->q);
 
end:
av_bprint_finalize(&cue, NULL);
av_bprint_finalize(&header, NULL);
return res;
}
 
static int webvtt_read_packet(AVFormatContext *s, AVPacket *pkt)
{
WebVTTContext *webvtt = s->priv_data;
return ff_subtitles_queue_read_packet(&webvtt->q, pkt);
}
 
static int webvtt_read_seek(AVFormatContext *s, int stream_index,
int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
{
WebVTTContext *webvtt = s->priv_data;
return ff_subtitles_queue_seek(&webvtt->q, s, stream_index,
min_ts, ts, max_ts, flags);
}
 
static int webvtt_read_close(AVFormatContext *s)
{
WebVTTContext *webvtt = s->priv_data;
ff_subtitles_queue_clean(&webvtt->q);
return 0;
}
 
#define OFFSET(x) offsetof(WebVTTContext, x)
#define KIND_FLAGS AV_OPT_FLAG_SUBTITLE_PARAM
 
static const AVOption options[] = {
{ "kind", "Set kind of WebVTT track", OFFSET(kind), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, KIND_FLAGS, "webvtt_kind" },
{ "subtitles", "WebVTT subtitles kind", 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, INT_MIN, INT_MAX, 0, "webvtt_kind" },
{ "captions", "WebVTT captions kind", 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, INT_MIN, INT_MAX, 0, "webvtt_kind" },
{ "descriptions", "WebVTT descriptions kind", 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, INT_MIN, INT_MAX, 0, "webvtt_kind" },
{ "metadata", "WebVTT metadata kind", 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, INT_MIN, INT_MAX, 0, "webvtt_kind" },
{ NULL }
};
 
static const AVClass webvtt_demuxer_class = {
.class_name = "WebVTT demuxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVInputFormat ff_webvtt_demuxer = {
.name = "webvtt",
.long_name = NULL_IF_CONFIG_SMALL("WebVTT subtitle"),
.priv_data_size = sizeof(WebVTTContext),
.read_probe = webvtt_probe,
.read_header = webvtt_read_header,
.read_packet = webvtt_read_packet,
.read_seek2 = webvtt_read_seek,
.read_close = webvtt_read_close,
.extensions = "vtt",
.priv_class = &webvtt_demuxer_class,
};
/contrib/sdk/sources/ffmpeg/libavformat/webvttenc.c
0,0 → 1,99
/*
* Copyright (c) 2013 Matthew Heaney
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* WebVTT subtitle muxer
* @see http://dev.w3.org/html5/webvtt/
*/
 
#include "avformat.h"
#include "internal.h"
 
static void webvtt_write_time(AVIOContext *pb, int64_t millisec)
{
int64_t sec, min, hour;
sec = millisec / 1000;
millisec -= 1000 * sec;
min = sec / 60;
sec -= 60 * min;
hour = min / 60;
min -= 60 * hour;
 
if (hour > 0)
avio_printf(pb, "%"PRId64":", hour);
 
avio_printf(pb, "%02"PRId64":%02"PRId64".%03"PRId64"", min, sec, millisec);
}
 
static int webvtt_write_header(AVFormatContext *ctx)
{
AVStream *s = ctx->streams[0];
AVIOContext *pb = ctx->pb;
 
avpriv_set_pts_info(s, 64, 1, 1000);
 
avio_printf(pb, "WEBVTT\n");
avio_flush(pb);
 
return 0;
}
 
static int webvtt_write_packet(AVFormatContext *ctx, AVPacket *pkt)
{
AVIOContext *pb = ctx->pb;
int id_size, settings_size;
uint8_t *id, *settings;
 
avio_printf(pb, "\n");
 
id = av_packet_get_side_data(pkt, AV_PKT_DATA_WEBVTT_IDENTIFIER,
&id_size);
 
if (id && id_size > 0)
avio_printf(pb, "%.*s\n", id_size, id);
 
webvtt_write_time(pb, pkt->pts);
avio_printf(pb, " --> ");
webvtt_write_time(pb, pkt->pts + pkt->duration);
 
settings = av_packet_get_side_data(pkt, AV_PKT_DATA_WEBVTT_SETTINGS,
&settings_size);
 
if (settings && settings_size > 0)
avio_printf(pb, " %.*s", settings_size, settings);
 
avio_printf(pb, "\n");
 
avio_write(pb, pkt->data, pkt->size);
avio_printf(pb, "\n");
 
return 0;
}
 
AVOutputFormat ff_webvtt_muxer = {
.name = "webvtt",
.long_name = NULL_IF_CONFIG_SMALL("WebVTT subtitle"),
.extensions = "vtt",
.mime_type = "text/vtt",
.subtitle_codec = AV_CODEC_ID_WEBVTT,
.write_header = webvtt_write_header,
.write_packet = webvtt_write_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/westwood_aud.c
0,0 → 1,181
/*
* Westwood Studios AUD Format Demuxer
* Copyright (c) 2003 The ffmpeg Project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Westwood Studios AUD file demuxer
* by Mike Melanson (melanson@pcisys.net)
* for more information on the Westwood file formats, visit:
* http://www.pcisys.net/~melanson/codecs/
* http://www.geocities.com/SiliconValley/8682/aud3.txt
*
* Implementation note: There is no definite file signature for AUD files.
* The demuxer uses a probabilistic strategy for content detection. This
* entails performing sanity checks on certain header values in order to
* qualify a file. Refer to wsaud_probe() for the precise parameters.
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
 
#define AUD_HEADER_SIZE 12
#define AUD_CHUNK_PREAMBLE_SIZE 8
#define AUD_CHUNK_SIGNATURE 0x0000DEAF
 
static int wsaud_probe(AVProbeData *p)
{
int field;
 
/* Probabilistic content detection strategy: There is no file signature
* so perform sanity checks on various header parameters:
* 8000 <= sample rate (16 bits) <= 48000 ==> 40001 acceptable numbers
* flags <= 0x03 (2 LSBs are used) ==> 4 acceptable numbers
* compression type (8 bits) = 1 or 99 ==> 2 acceptable numbers
* first audio chunk signature (32 bits) ==> 1 acceptable number
* The number space contains 2^64 numbers. There are 40001 * 4 * 2 * 1 =
* 320008 acceptable number combinations.
*/
 
if (p->buf_size < AUD_HEADER_SIZE + AUD_CHUNK_PREAMBLE_SIZE)
return 0;
 
/* check sample rate */
field = AV_RL16(&p->buf[0]);
if ((field < 8000) || (field > 48000))
return 0;
 
/* enforce the rule that the top 6 bits of this flags field are reserved (0);
* this might not be true, but enforce it until deemed unnecessary */
if (p->buf[10] & 0xFC)
return 0;
 
if (p->buf[11] != 99 && p->buf[11] != 1)
return 0;
 
/* read ahead to the first audio chunk and validate the first header signature */
if (AV_RL32(&p->buf[16]) != AUD_CHUNK_SIGNATURE)
return 0;
 
/* return 1/2 certainty since this file check is a little sketchy */
return AVPROBE_SCORE_EXTENSION;
}
 
static int wsaud_read_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
AVStream *st;
unsigned char header[AUD_HEADER_SIZE];
int sample_rate, channels, codec;
 
if (avio_read(pb, header, AUD_HEADER_SIZE) != AUD_HEADER_SIZE)
return AVERROR(EIO);
 
sample_rate = AV_RL16(&header[0]);
channels = (header[10] & 0x1) + 1;
codec = header[11];
 
/* initialize the audio decoder stream */
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
switch (codec) {
case 1:
if (channels != 1) {
avpriv_request_sample(s, "Stereo WS-SND1");
return AVERROR_PATCHWELCOME;
}
st->codec->codec_id = AV_CODEC_ID_WESTWOOD_SND1;
break;
case 99:
st->codec->codec_id = AV_CODEC_ID_ADPCM_IMA_WS;
st->codec->bits_per_coded_sample = 4;
st->codec->bit_rate = channels * sample_rate * 4;
break;
default:
avpriv_request_sample(s, "Unknown codec: %d", codec);
return AVERROR_PATCHWELCOME;
}
avpriv_set_pts_info(st, 64, 1, sample_rate);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->channels = channels;
st->codec->channel_layout = channels == 1 ? AV_CH_LAYOUT_MONO :
AV_CH_LAYOUT_STEREO;
st->codec->sample_rate = sample_rate;
 
return 0;
}
 
static int wsaud_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
AVIOContext *pb = s->pb;
unsigned char preamble[AUD_CHUNK_PREAMBLE_SIZE];
unsigned int chunk_size;
int ret = 0;
AVStream *st = s->streams[0];
 
if (avio_read(pb, preamble, AUD_CHUNK_PREAMBLE_SIZE) !=
AUD_CHUNK_PREAMBLE_SIZE)
return AVERROR(EIO);
 
/* validate the chunk */
if (AV_RL32(&preamble[4]) != AUD_CHUNK_SIGNATURE)
return AVERROR_INVALIDDATA;
 
chunk_size = AV_RL16(&preamble[0]);
 
if (st->codec->codec_id == AV_CODEC_ID_WESTWOOD_SND1) {
/* For Westwood SND1 audio we need to add the output size and input
size to the start of the packet to match what is in VQA.
Specifically, this is needed to signal when a packet should be
decoding as raw 8-bit pcm or variable-size ADPCM. */
int out_size = AV_RL16(&preamble[2]);
if ((ret = av_new_packet(pkt, chunk_size + 4)))
return ret;
if ((ret = avio_read(pb, &pkt->data[4], chunk_size)) != chunk_size)
return ret < 0 ? ret : AVERROR(EIO);
AV_WL16(&pkt->data[0], out_size);
AV_WL16(&pkt->data[2], chunk_size);
 
pkt->duration = out_size;
} else {
ret = av_get_packet(pb, pkt, chunk_size);
if (ret != chunk_size)
return AVERROR(EIO);
 
/* 2 samples/byte, 1 or 2 samples per frame depending on stereo */
pkt->duration = (chunk_size * 2) / st->codec->channels;
}
pkt->stream_index = st->index;
 
return ret;
}
 
AVInputFormat ff_wsaud_demuxer = {
.name = "wsaud",
.long_name = NULL_IF_CONFIG_SMALL("Westwood Studios audio"),
.read_probe = wsaud_probe,
.read_header = wsaud_read_header,
.read_packet = wsaud_read_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/westwood_vqa.c
0,0 → 1,274
/*
* Westwood Studios VQA Format Demuxer
* Copyright (c) 2003 The ffmpeg Project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Westwood Studios VQA file demuxer
* by Mike Melanson (melanson@pcisys.net)
* for more information on the Westwood file formats, visit:
* http://www.pcisys.net/~melanson/codecs/
* http://www.geocities.com/SiliconValley/8682/aud3.txt
*/
 
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
 
#define FORM_TAG MKBETAG('F', 'O', 'R', 'M')
#define WVQA_TAG MKBETAG('W', 'V', 'Q', 'A')
#define VQHD_TAG MKBETAG('V', 'Q', 'H', 'D')
#define FINF_TAG MKBETAG('F', 'I', 'N', 'F')
#define SND0_TAG MKBETAG('S', 'N', 'D', '0')
#define SND1_TAG MKBETAG('S', 'N', 'D', '1')
#define SND2_TAG MKBETAG('S', 'N', 'D', '2')
#define VQFR_TAG MKBETAG('V', 'Q', 'F', 'R')
 
/* don't know what these tags are for, but acknowledge their existence */
#define CINF_TAG MKBETAG('C', 'I', 'N', 'F')
#define CINH_TAG MKBETAG('C', 'I', 'N', 'H')
#define CIND_TAG MKBETAG('C', 'I', 'N', 'D')
#define PINF_TAG MKBETAG('P', 'I', 'N', 'F')
#define PINH_TAG MKBETAG('P', 'I', 'N', 'H')
#define PIND_TAG MKBETAG('P', 'I', 'N', 'D')
#define CMDS_TAG MKBETAG('C', 'M', 'D', 'S')
 
#define VQA_HEADER_SIZE 0x2A
#define VQA_PREAMBLE_SIZE 8
 
typedef struct WsVqaDemuxContext {
int version;
int bps;
int channels;
int sample_rate;
int audio_stream_index;
int video_stream_index;
} WsVqaDemuxContext;
 
static int wsvqa_probe(AVProbeData *p)
{
/* need 12 bytes to qualify */
if (p->buf_size < 12)
return 0;
 
/* check for the VQA signatures */
if ((AV_RB32(&p->buf[0]) != FORM_TAG) ||
(AV_RB32(&p->buf[8]) != WVQA_TAG))
return 0;
 
return AVPROBE_SCORE_MAX;
}
 
static int wsvqa_read_header(AVFormatContext *s)
{
WsVqaDemuxContext *wsvqa = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *st;
uint8_t *header;
uint8_t scratch[VQA_PREAMBLE_SIZE];
uint32_t chunk_tag;
uint32_t chunk_size;
int fps;
 
/* initialize the video decoder stream */
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
st->start_time = 0;
wsvqa->video_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_WS_VQA;
st->codec->codec_tag = 0; /* no fourcc */
 
/* skip to the start of the VQA header */
avio_seek(pb, 20, SEEK_SET);
 
/* the VQA header needs to go to the decoder */
if (ff_alloc_extradata(st->codec, VQA_HEADER_SIZE))
return AVERROR(ENOMEM);
header = (uint8_t *)st->codec->extradata;
if (avio_read(pb, st->codec->extradata, VQA_HEADER_SIZE) !=
VQA_HEADER_SIZE) {
return AVERROR(EIO);
}
st->codec->width = AV_RL16(&header[6]);
st->codec->height = AV_RL16(&header[8]);
fps = header[12];
st->nb_frames =
st->duration = AV_RL16(&header[4]);
if (fps < 1 || fps > 30) {
av_log(s, AV_LOG_ERROR, "invalid fps: %d\n", fps);
return AVERROR_INVALIDDATA;
}
avpriv_set_pts_info(st, 64, 1, fps);
 
wsvqa->version = AV_RL16(&header[ 0]);
wsvqa->sample_rate = AV_RL16(&header[24]);
wsvqa->channels = header[26];
wsvqa->bps = header[27];
wsvqa->audio_stream_index = -1;
 
s->ctx_flags |= AVFMTCTX_NOHEADER;
 
/* there are 0 or more chunks before the FINF chunk; iterate until
* FINF has been skipped and the file will be ready to be demuxed */
do {
if (avio_read(pb, scratch, VQA_PREAMBLE_SIZE) != VQA_PREAMBLE_SIZE)
return AVERROR(EIO);
chunk_tag = AV_RB32(&scratch[0]);
chunk_size = AV_RB32(&scratch[4]);
 
/* catch any unknown header tags, for curiousity */
switch (chunk_tag) {
case CINF_TAG:
case CINH_TAG:
case CIND_TAG:
case PINF_TAG:
case PINH_TAG:
case PIND_TAG:
case FINF_TAG:
case CMDS_TAG:
break;
 
default:
av_log (s, AV_LOG_ERROR, " note: unknown chunk seen (%c%c%c%c)\n",
scratch[0], scratch[1],
scratch[2], scratch[3]);
break;
}
 
avio_skip(pb, chunk_size);
} while (chunk_tag != FINF_TAG);
 
return 0;
}
 
static int wsvqa_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
WsVqaDemuxContext *wsvqa = s->priv_data;
AVIOContext *pb = s->pb;
int ret = -1;
uint8_t preamble[VQA_PREAMBLE_SIZE];
uint32_t chunk_type;
uint32_t chunk_size;
int skip_byte;
 
while (avio_read(pb, preamble, VQA_PREAMBLE_SIZE) == VQA_PREAMBLE_SIZE) {
chunk_type = AV_RB32(&preamble[0]);
chunk_size = AV_RB32(&preamble[4]);
 
skip_byte = chunk_size & 0x01;
 
if ((chunk_type == SND0_TAG) || (chunk_type == SND1_TAG) ||
(chunk_type == SND2_TAG) || (chunk_type == VQFR_TAG)) {
 
ret= av_get_packet(pb, pkt, chunk_size);
if (ret<0)
return AVERROR(EIO);
 
switch (chunk_type) {
case SND0_TAG:
case SND1_TAG:
case SND2_TAG:
if (wsvqa->audio_stream_index == -1) {
AVStream *st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
wsvqa->audio_stream_index = st->index;
if (!wsvqa->sample_rate)
wsvqa->sample_rate = 22050;
if (!wsvqa->channels)
wsvqa->channels = 1;
if (!wsvqa->bps)
wsvqa->bps = 8;
st->codec->sample_rate = wsvqa->sample_rate;
st->codec->bits_per_coded_sample = wsvqa->bps;
st->codec->channels = wsvqa->channels;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
 
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
 
switch (chunk_type) {
case SND0_TAG:
if (wsvqa->bps == 16)
st->codec->codec_id = AV_CODEC_ID_PCM_S16LE;
else
st->codec->codec_id = AV_CODEC_ID_PCM_U8;
break;
case SND1_TAG:
st->codec->codec_id = AV_CODEC_ID_WESTWOOD_SND1;
break;
case SND2_TAG:
st->codec->codec_id = AV_CODEC_ID_ADPCM_IMA_WS;
if (ff_alloc_extradata(st->codec, 2))
return AVERROR(ENOMEM);
AV_WL16(st->codec->extradata, wsvqa->version);
break;
}
}
 
pkt->stream_index = wsvqa->audio_stream_index;
switch (chunk_type) {
case SND1_TAG:
/* unpacked size is stored in header */
if(pkt->data)
pkt->duration = AV_RL16(pkt->data) / wsvqa->channels;
break;
case SND2_TAG:
/* 2 samples/byte, 1 or 2 samples per frame depending on stereo */
pkt->duration = (chunk_size * 2) / wsvqa->channels;
break;
}
break;
case VQFR_TAG:
pkt->stream_index = wsvqa->video_stream_index;
pkt->duration = 1;
break;
}
 
/* stay on 16-bit alignment */
if (skip_byte)
avio_skip(pb, 1);
 
return ret;
} else {
switch(chunk_type){
case CMDS_TAG:
break;
default:
av_log(s, AV_LOG_INFO, "Skipping unknown chunk 0x%08X\n", chunk_type);
}
avio_skip(pb, chunk_size + skip_byte);
}
}
 
return ret;
}
 
AVInputFormat ff_wsvqa_demuxer = {
.name = "wsvqa",
.long_name = NULL_IF_CONFIG_SMALL("Westwood Studios VQA"),
.priv_data_size = sizeof(WsVqaDemuxContext),
.read_probe = wsvqa_probe,
.read_header = wsvqa_read_header,
.read_packet = wsvqa_read_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/wtv.h
0,0 → 1,58
/*
* Windows Television (WTV)
* Copyright (c) 2010-2011 Peter Ross <pross@xvid.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_WTV_H
#define AVFORMAT_WTV_H
 
#include "riff.h"
#include "asf.h"
 
#define WTV_SECTOR_BITS 12
#define WTV_SECTOR_SIZE (1 << WTV_SECTOR_BITS)
#define WTV_BIGSECTOR_BITS 18
#define WTV_PAD8(x) (((x) + 7) & ~7)
 
extern const uint8_t ff_timeline_le16[16];
extern const uint8_t ff_timeline_table_0_entries_Events_le16[62];
extern const uint8_t ff_table_0_entries_legacy_attrib_le16[58];
extern const uint8_t ff_table_0_entries_time_le16[40];
 
extern const ff_asf_guid ff_dir_entry_guid;
extern const ff_asf_guid ff_wtv_guid;
extern const ff_asf_guid ff_timestamp_guid;
extern const ff_asf_guid ff_data_guid;
extern const ff_asf_guid ff_SBE2_STREAM_DESC_EVENT;
extern const ff_asf_guid ff_stream1_guid;
extern const ff_asf_guid ff_sync_guid;
extern const ff_asf_guid ff_index_guid;
extern const ff_asf_guid ff_mediatype_audio;
extern const ff_asf_guid ff_mediatype_video;
extern const ff_asf_guid ff_format_none;
extern const AVCodecGuid ff_video_guids[];
 
extern const ff_asf_guid ff_DSATTRIB_TRANSPORT_PROPERTIES;
extern const ff_asf_guid ff_metadata_guid;
extern const ff_asf_guid ff_stream2_guid;
extern const ff_asf_guid ff_mediasubtype_cpfilters_processed;
extern const ff_asf_guid ff_format_cpfilters_processed;
extern const ff_asf_guid ff_format_waveformatex;
extern const ff_asf_guid ff_format_mpeg2_video;
#endif /* AVFORMAT_WTV_H */
/contrib/sdk/sources/ffmpeg/libavformat/wtv_common.c
0,0 → 1,82
/*
* Windows Television (WTV)
* Copyright (c) 2010-2011 Peter Ross <pross@xvid.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "wtv.h"
 
/* WTV GUIDs*/
const ff_asf_guid ff_dir_entry_guid =
{0x92,0xB7,0x74,0x91,0x59,0x70,0x70,0x44,0x88,0xDF,0x06,0x3B,0x82,0xCC,0x21,0x3D};
const ff_asf_guid ff_wtv_guid =
{0xB7,0xD8,0x00,0x20,0x37,0x49,0xDA,0x11,0xA6,0x4E,0x00,0x07,0xE9,0x5E,0xAD,0x8D};
const ff_asf_guid ff_timestamp_guid =
{0x5B,0x05,0xE6,0x1B,0x97,0xA9,0x49,0x43,0x88,0x17,0x1A,0x65,0x5A,0x29,0x8A,0x97};
const ff_asf_guid ff_data_guid =
{0x95,0xC3,0xD2,0xC2,0x7E,0x9A,0xDA,0x11,0x8B,0xF7,0x00,0x07,0xE9,0x5E,0xAD,0x8D};
const ff_asf_guid ff_SBE2_STREAM_DESC_EVENT =
{0xED,0xA4,0x13,0x23,0x2D,0xBF,0x4F,0x45,0xAD,0x8A,0xD9,0x5B,0xA7,0xF9,0x1F,0xEE};
const ff_asf_guid ff_stream1_guid =
{0xA1,0xC3,0xD2,0xC2,0x7E,0x9A,0xDA,0x11,0x8B,0xF7,0x00,0x07,0xE9,0x5E,0xAD,0x8D};
const ff_asf_guid ff_sync_guid =
{0x97,0xC3,0xD2,0xC2,0x7E,0x9A,0xDA,0x11,0x8B,0xF7,0x00,0x07,0xE9,0x5E,0xAD,0x8D};
const ff_asf_guid ff_index_guid =
{0x96,0xC3,0xD2,0xC2,0x7E,0x9A,0xDA,0x11,0x8B,0xF7,0x00,0x07,0xE9,0x5E,0xAD,0x8D};
const ff_asf_guid ff_mediatype_audio =
{'a','u','d','s',FF_MEDIASUBTYPE_BASE_GUID};
const ff_asf_guid ff_mediatype_video =
{'v','i','d','s',FF_MEDIASUBTYPE_BASE_GUID};
const ff_asf_guid ff_format_none =
{0xD6,0x17,0x64,0x0F,0x18,0xC3,0xD0,0x11,0xA4,0x3F,0x00,0xA0,0xC9,0x22,0x31,0x96};
 
/* declare utf16le strings */
#define _ , 0,
const uint8_t ff_timeline_le16[] =
{'t'_'i'_'m'_'e'_'l'_'i'_'n'_'e', 0};
const uint8_t ff_timeline_table_0_entries_Events_le16[] =
{'t'_'i'_'m'_'e'_'l'_'i'_'n'_'e'_'.'_'t'_'a'_'b'_'l'_'e'_'.'_'0'_'.'_'e'_'n'_'t'_'r'_'i'_'e'_'s'_'.'_'E'_'v'_'e'_'n'_'t'_'s', 0};
const uint8_t ff_table_0_entries_legacy_attrib_le16[] =
{'t'_'a'_'b'_'l'_'e'_'.'_'0'_'.'_'e'_'n'_'t'_'r'_'i'_'e'_'s'_'.'_'l'_'e'_'g'_'a'_'c'_'y'_'_'_'a'_'t'_'t'_'r'_'i'_'b', 0};
const uint8_t ff_table_0_entries_time_le16[] =
{'t'_'a'_'b'_'l'_'e'_'.'_'0'_'.'_'e'_'n'_'t'_'r'_'i'_'e'_'s'_'.'_'t'_'i'_'m'_'e', 0};
#undef _
 
const ff_asf_guid ff_DSATTRIB_TRANSPORT_PROPERTIES =
{0x12,0xF6,0x22,0xB6,0xAD,0x47,0x71,0x46,0xAD,0x6C,0x05,0xA9,0x8E,0x65,0xDE,0x3A};
const ff_asf_guid ff_metadata_guid =
{0x5A,0xFE,0xD7,0x6D,0xC8,0x1D,0x8F,0x4A,0x99,0x22,0xFA,0xB1,0x1C,0x38,0x14,0x53};
const ff_asf_guid ff_stream2_guid =
{0xA2,0xC3,0xD2,0xC2,0x7E,0x9A,0xDA,0x11,0x8B,0xF7,0x00,0x07,0xE9,0x5E,0xAD,0x8D};
 
/* Media subtypes */
const ff_asf_guid ff_mediasubtype_cpfilters_processed =
{0x28,0xBD,0xAD,0x46,0xD0,0x6F,0x96,0x47,0x93,0xB2,0x15,0x5C,0x51,0xDC,0x04,0x8D};
 
/* Formats */
const ff_asf_guid ff_format_cpfilters_processed =
{0x6F,0xB3,0x39,0x67,0x5F,0x1D,0xC2,0x4A,0x81,0x92,0x28,0xBB,0x0E,0x73,0xD1,0x6A};
const ff_asf_guid ff_format_waveformatex =
{0x81,0x9F,0x58,0x05,0x56,0xC3,0xCE,0x11,0xBF,0x01,0x00,0xAA,0x00,0x55,0x59,0x5A};
const ff_asf_guid ff_format_mpeg2_video =
{0xE3,0x80,0x6D,0xE0,0x46,0xDB,0xCF,0x11,0xB4,0xD1,0x00,0x80,0x5F,0x6C,0xBB,0xEA};
 
const AVCodecGuid ff_video_guids[] = {
{AV_CODEC_ID_MPEG2VIDEO, {0x26,0x80,0x6D,0xE0,0x46,0xDB,0xCF,0x11,0xB4,0xD1,0x00,0x80,0x5F,0x6C,0xBB,0xEA}},
{AV_CODEC_ID_NONE}
};
/contrib/sdk/sources/ffmpeg/libavformat/wtvdec.c
0,0 → 1,1095
/*
* Windows Television (WTV) demuxer
* Copyright (c) 2010-2011 Peter Ross <pross@xvid.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Windows Television (WTV) demuxer
* @author Peter Ross <pross@xvid.org>
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/intfloat.h"
#include "avformat.h"
#include "internal.h"
#include "wtv.h"
#include "mpegts.h"
 
/* Macros for formating GUIDs */
#define PRI_PRETTY_GUID \
"%08x-%04x-%04x-%02x%02x%02x%02x%02x%02x%02x%02x"
#define ARG_PRETTY_GUID(g) \
AV_RL32(g),AV_RL16(g+4),AV_RL16(g+6),g[8],g[9],g[10],g[11],g[12],g[13],g[14],g[15]
#define LEN_PRETTY_GUID 34
 
/*
*
* File system routines
*
*/
 
typedef struct {
AVIOContext *pb_filesystem; /**< file system (AVFormatContext->pb) */
 
int sector_bits; /**< sector shift bits; used to convert sector number into pb_filesystem offset */
uint32_t *sectors; /**< file allocation table */
int nb_sectors; /**< number of sectors */
 
int error;
int64_t position;
int64_t length;
} WtvFile;
 
static int64_t seek_by_sector(AVIOContext *pb, int64_t sector, int64_t offset)
{
return avio_seek(pb, (sector << WTV_SECTOR_BITS) + offset, SEEK_SET);
}
 
/**
* @return bytes read, 0 on end of file, or <0 on error
*/
static int wtvfile_read_packet(void *opaque, uint8_t *buf, int buf_size)
{
WtvFile *wf = opaque;
AVIOContext *pb = wf->pb_filesystem;
int nread = 0;
 
if (wf->error || pb->error)
return -1;
if (wf->position >= wf->length || url_feof(pb))
return 0;
 
buf_size = FFMIN(buf_size, wf->length - wf->position);
while(nread < buf_size) {
int n;
int remaining_in_sector = (1 << wf->sector_bits) - (wf->position & ((1 << wf->sector_bits) - 1));
int read_request = FFMIN(buf_size - nread, remaining_in_sector);
 
n = avio_read(pb, buf, read_request);
if (n <= 0)
break;
nread += n;
buf += n;
wf->position += n;
if (n == remaining_in_sector) {
int i = wf->position >> wf->sector_bits;
if (i >= wf->nb_sectors ||
(wf->sectors[i] != wf->sectors[i - 1] + (1 << (wf->sector_bits - WTV_SECTOR_BITS)) &&
seek_by_sector(pb, wf->sectors[i], 0) < 0)) {
wf->error = 1;
break;
}
}
}
return nread;
}
 
/**
* @return position (or file length)
*/
static int64_t wtvfile_seek(void *opaque, int64_t offset, int whence)
{
WtvFile *wf = opaque;
AVIOContext *pb = wf->pb_filesystem;
 
if (whence == AVSEEK_SIZE)
return wf->length;
else if (whence == SEEK_CUR)
offset = wf->position + offset;
else if (whence == SEEK_END)
offset = wf->length;
 
wf->error = offset < 0 || offset >= wf->length ||
seek_by_sector(pb, wf->sectors[offset >> wf->sector_bits],
offset & ((1 << wf->sector_bits) - 1)) < 0;
wf->position = offset;
return offset;
}
 
/**
* read non-zero integers (le32) from input stream
* @param pb
* @param[out] data destination
* @param count maximum number of integers to read
* @return total number of integers read
*/
static int read_ints(AVIOContext *pb, uint32_t *data, int count)
{
int i, total = 0;
for (i = 0; i < count; i++) {
if ((data[total] = avio_rl32(pb)))
total++;
}
return total;
}
 
/**
* Open file
* @param first_sector First sector
* @param length Length of file (bytes)
* @param depth File allocation table depth
* @return NULL on error
*/
static AVIOContext * wtvfile_open_sector(int first_sector, uint64_t length, int depth, AVFormatContext *s)
{
AVIOContext *pb;
WtvFile *wf;
uint8_t *buffer;
 
if (seek_by_sector(s->pb, first_sector, 0) < 0)
return NULL;
 
wf = av_mallocz(sizeof(WtvFile));
if (!wf)
return NULL;
 
if (depth == 0) {
wf->sectors = av_malloc(sizeof(uint32_t));
if (!wf->sectors) {
av_free(wf);
return NULL;
}
wf->sectors[0] = first_sector;
wf->nb_sectors = 1;
} else if (depth == 1) {
wf->sectors = av_malloc(WTV_SECTOR_SIZE);
if (!wf->sectors) {
av_free(wf);
return NULL;
}
wf->nb_sectors = read_ints(s->pb, wf->sectors, WTV_SECTOR_SIZE / 4);
} else if (depth == 2) {
uint32_t sectors1[WTV_SECTOR_SIZE / 4];
int nb_sectors1 = read_ints(s->pb, sectors1, WTV_SECTOR_SIZE / 4);
int i;
 
wf->sectors = av_malloc_array(nb_sectors1, 1 << WTV_SECTOR_BITS);
if (!wf->sectors) {
av_free(wf);
return NULL;
}
wf->nb_sectors = 0;
for (i = 0; i < nb_sectors1; i++) {
if (seek_by_sector(s->pb, sectors1[i], 0) < 0)
break;
wf->nb_sectors += read_ints(s->pb, wf->sectors + i * WTV_SECTOR_SIZE / 4, WTV_SECTOR_SIZE / 4);
}
} else {
av_log(s, AV_LOG_ERROR, "unsupported file allocation table depth (0x%x)\n", depth);
av_free(wf);
return NULL;
}
wf->sector_bits = length & (1ULL<<63) ? WTV_SECTOR_BITS : WTV_BIGSECTOR_BITS;
 
if (!wf->nb_sectors) {
av_free(wf->sectors);
av_free(wf);
return NULL;
}
 
if ((int64_t)wf->sectors[wf->nb_sectors - 1] << WTV_SECTOR_BITS > avio_tell(s->pb))
av_log(s, AV_LOG_WARNING, "truncated file\n");
 
/* check length */
length &= 0xFFFFFFFFFFFF;
if (length > ((int64_t)wf->nb_sectors << wf->sector_bits)) {
av_log(s, AV_LOG_WARNING, "reported file length (0x%"PRIx64") exceeds number of available sectors (0x%"PRIx64")\n", length, (int64_t)wf->nb_sectors << wf->sector_bits);
length = (int64_t)wf->nb_sectors << wf->sector_bits;
}
wf->length = length;
 
/* seek to initial sector */
wf->position = 0;
if (seek_by_sector(s->pb, wf->sectors[0], 0) < 0) {
av_free(wf->sectors);
av_free(wf);
return NULL;
}
 
wf->pb_filesystem = s->pb;
buffer = av_malloc(1 << wf->sector_bits);
if (!buffer) {
av_free(wf->sectors);
av_free(wf);
return NULL;
}
 
pb = avio_alloc_context(buffer, 1 << wf->sector_bits, 0, wf,
wtvfile_read_packet, NULL, wtvfile_seek);
if (!pb) {
av_free(buffer);
av_free(wf->sectors);
av_free(wf);
}
return pb;
}
 
/**
* Open file using filename
* @param[in] buf directory buffer
* @param buf_size directory buffer size
* @param[in] filename
* @param filename_size size of filename
* @return NULL on error
*/
static AVIOContext * wtvfile_open2(AVFormatContext *s, const uint8_t *buf, int buf_size, const uint8_t *filename, int filename_size)
{
const uint8_t *buf_end = buf + buf_size;
 
while(buf + 48 <= buf_end) {
int dir_length, name_size, first_sector, depth;
uint64_t file_length;
const uint8_t *name;
if (ff_guidcmp(buf, ff_dir_entry_guid)) {
av_log(s, AV_LOG_ERROR, "unknown guid "FF_PRI_GUID", expected dir_entry_guid; "
"remaining directory entries ignored\n", FF_ARG_GUID(buf));
break;
}
dir_length = AV_RL16(buf + 16);
file_length = AV_RL64(buf + 24);
name_size = 2 * AV_RL32(buf + 32);
if (name_size < 0) {
av_log(s, AV_LOG_ERROR,
"bad filename length, remaining directory entries ignored\n");
break;
}
if (48 + (int64_t)name_size > buf_end - buf) {
av_log(s, AV_LOG_ERROR, "filename exceeds buffer size; remaining directory entries ignored\n");
break;
}
first_sector = AV_RL32(buf + 40 + name_size);
depth = AV_RL32(buf + 44 + name_size);
 
/* compare file name; test optional null terminator */
name = buf + 40;
if (name_size >= filename_size &&
!memcmp(name, filename, filename_size) &&
(name_size < filename_size + 2 || !AV_RN16(name + filename_size)))
return wtvfile_open_sector(first_sector, file_length, depth, s);
 
buf += dir_length;
}
return 0;
}
 
#define wtvfile_open(s, buf, buf_size, filename) \
wtvfile_open2(s, buf, buf_size, filename, sizeof(filename))
 
/**
* Close file opened with wtvfile_open_sector(), or wtv_open()
*/
static void wtvfile_close(AVIOContext *pb)
{
WtvFile *wf = pb->opaque;
av_free(wf->sectors);
av_freep(&pb->opaque);
av_freep(&pb->buffer);
av_free(pb);
}
 
/*
*
* Main demuxer
*
*/
 
typedef struct {
int seen_data;
} WtvStream;
 
typedef struct {
AVIOContext *pb; /**< timeline file */
int64_t epoch;
int64_t pts; /**< pts for next data chunk */
int64_t last_valid_pts; /**< latest valid pts, used for interative seeking */
 
/* maintain private seek index, as the AVIndexEntry->pos is relative to the
start of the 'timeline' file, not the file system (AVFormatContext->pb) */
AVIndexEntry *index_entries;
int nb_index_entries;
unsigned int index_entries_allocated_size;
} WtvContext;
 
/* WTV GUIDs */
static const ff_asf_guid EVENTID_SubtitleSpanningEvent =
{0x48,0xC0,0xCE,0x5D,0xB9,0xD0,0x63,0x41,0x87,0x2C,0x4F,0x32,0x22,0x3B,0xE8,0x8A};
static const ff_asf_guid EVENTID_LanguageSpanningEvent =
{0x6D,0x66,0x92,0xE2,0x02,0x9C,0x8D,0x44,0xAA,0x8D,0x78,0x1A,0x93,0xFD,0xC3,0x95};
static const ff_asf_guid EVENTID_AudioDescriptorSpanningEvent =
{0x1C,0xD4,0x7B,0x10,0xDA,0xA6,0x91,0x46,0x83,0x69,0x11,0xB2,0xCD,0xAA,0x28,0x8E};
static const ff_asf_guid EVENTID_CtxADescriptorSpanningEvent =
{0xE6,0xA2,0xB4,0x3A,0x47,0x42,0x34,0x4B,0x89,0x6C,0x30,0xAF,0xA5,0xD2,0x1C,0x24};
static const ff_asf_guid EVENTID_CSDescriptorSpanningEvent =
{0xD9,0x79,0xE7,0xEf,0xF0,0x97,0x86,0x47,0x80,0x0D,0x95,0xCF,0x50,0x5D,0xDC,0x66};
static const ff_asf_guid EVENTID_DVBScramblingControlSpanningEvent =
{0xC4,0xE1,0xD4,0x4B,0xA1,0x90,0x09,0x41,0x82,0x36,0x27,0xF0,0x0E,0x7D,0xCC,0x5B};
static const ff_asf_guid EVENTID_StreamIDSpanningEvent =
{0x68,0xAB,0xF1,0xCA,0x53,0xE1,0x41,0x4D,0xA6,0xB3,0xA7,0xC9,0x98,0xDB,0x75,0xEE};
static const ff_asf_guid EVENTID_TeletextSpanningEvent =
{0x50,0xD9,0x99,0x95,0x33,0x5F,0x17,0x46,0xAF,0x7C,0x1E,0x54,0xB5,0x10,0xDA,0xA3};
static const ff_asf_guid EVENTID_AudioTypeSpanningEvent =
{0xBE,0xBF,0x1C,0x50,0x49,0xB8,0xCE,0x42,0x9B,0xE9,0x3D,0xB8,0x69,0xFB,0x82,0xB3};
 
/* Windows media GUIDs */
 
/* Media types */
static const ff_asf_guid mediasubtype_mpeg1payload =
{0x81,0xEB,0x36,0xE4,0x4F,0x52,0xCE,0x11,0x9F,0x53,0x00,0x20,0xAF,0x0B,0xA7,0x70};
static const ff_asf_guid mediatype_mpeg2_sections =
{0x6C,0x17,0x5F,0x45,0x06,0x4B,0xCE,0x47,0x9A,0xEF,0x8C,0xAE,0xF7,0x3D,0xF7,0xB5};
static const ff_asf_guid mediatype_mpeg2_pes =
{0x20,0x80,0x6D,0xE0,0x46,0xDB,0xCF,0x11,0xB4,0xD1,0x00,0x80,0x5F,0x6C,0xBB,0xEA};
static const ff_asf_guid mediatype_mstvcaption =
{0x89,0x8A,0x8B,0xB8,0x49,0xB0,0x80,0x4C,0xAD,0xCF,0x58,0x98,0x98,0x5E,0x22,0xC1};
 
/* Media subtypes */
static const ff_asf_guid mediasubtype_dvb_subtitle =
{0xC3,0xCB,0xFF,0x34,0xB3,0xD5,0x71,0x41,0x90,0x02,0xD4,0xC6,0x03,0x01,0x69,0x7F};
static const ff_asf_guid mediasubtype_teletext =
{0xE3,0x76,0x2A,0xF7,0x0A,0xEB,0xD0,0x11,0xAC,0xE4,0x00,0x00,0xC0,0xCC,0x16,0xBA};
static const ff_asf_guid mediasubtype_dtvccdata =
{0xAA,0xDD,0x2A,0xF5,0xF0,0x36,0xF5,0x43,0x95,0xEA,0x6D,0x86,0x64,0x84,0x26,0x2A};
static const ff_asf_guid mediasubtype_mpeg2_sections =
{0x79,0x85,0x9F,0x4A,0xF8,0x6B,0x92,0x43,0x8A,0x6D,0xD2,0xDD,0x09,0xFA,0x78,0x61};
 
/* Formats */
static const ff_asf_guid format_videoinfo2 =
{0xA0,0x76,0x2A,0xF7,0x0A,0xEB,0xD0,0x11,0xAC,0xE4,0x00,0x00,0xC0,0xCC,0x16,0xBA};
 
static int read_probe(AVProbeData *p)
{
return ff_guidcmp(p->buf, ff_wtv_guid) ? 0 : AVPROBE_SCORE_MAX;
}
 
/**
* Convert win32 FILETIME to ISO-8601 string
* @return <0 on error
*/
static int filetime_to_iso8601(char *buf, int buf_size, int64_t value)
{
time_t t = (value / 10000000LL) - 11644473600LL;
struct tm *tm = gmtime(&t);
if (!tm)
return -1;
strftime(buf, buf_size, "%Y-%m-%d %H:%M:%S", gmtime(&t));
return 0;
}
 
/**
* Convert crazy time (100ns since 1 Jan 0001) to ISO-8601 string
* @return <0 on error
*/
static int crazytime_to_iso8601(char *buf, int buf_size, int64_t value)
{
time_t t = (value / 10000000LL) - 719162LL*86400LL;
struct tm *tm = gmtime(&t);
if (!tm)
return -1;
strftime(buf, buf_size, "%Y-%m-%d %H:%M:%S", gmtime(&t));
return 0;
}
 
/**
* Convert OLE DATE to ISO-8601 string
* @return <0 on error
*/
static int oledate_to_iso8601(char *buf, int buf_size, int64_t value)
{
time_t t = (av_int2double(value) - 25569.0) * 86400;
struct tm *result= gmtime(&t);
if (!result)
return -1;
strftime(buf, buf_size, "%Y-%m-%d %H:%M:%S", result);
return 0;
}
 
static void get_attachment(AVFormatContext *s, AVIOContext *pb, int length)
{
char mime[1024];
char description[1024];
unsigned int filesize;
AVStream *st;
int ret;
int64_t pos = avio_tell(pb);
 
avio_get_str16le(pb, INT_MAX, mime, sizeof(mime));
if (strcmp(mime, "image/jpeg"))
goto done;
 
avio_r8(pb);
avio_get_str16le(pb, INT_MAX, description, sizeof(description));
filesize = avio_rl32(pb);
if (!filesize)
goto done;
 
st = avformat_new_stream(s, NULL);
if (!st)
goto done;
av_dict_set(&st->metadata, "title", description, 0);
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_MJPEG;
st->id = -1;
ret = av_get_packet(pb, &st->attached_pic, filesize);
if (ret < 0)
goto done;
st->attached_pic.stream_index = st->index;
st->attached_pic.flags |= AV_PKT_FLAG_KEY;
st->disposition |= AV_DISPOSITION_ATTACHED_PIC;
done:
avio_seek(pb, pos + length, SEEK_SET);
}
 
static void get_tag(AVFormatContext *s, AVIOContext *pb, const char *key, int type, int length)
{
int buf_size;
char *buf;
 
if (!strcmp(key, "WM/MediaThumbType")) {
avio_skip(pb, length);
return;
}
 
buf_size = FFMAX(2*length, LEN_PRETTY_GUID) + 1;
buf = av_malloc(buf_size);
if (!buf)
return;
 
if (type == 0 && length == 4) {
snprintf(buf, buf_size, "%"PRIi32, avio_rl32(pb));
} else if (type == 1) {
avio_get_str16le(pb, length, buf, buf_size);
if (!strlen(buf)) {
av_free(buf);
return;
}
} else if (type == 3 && length == 4) {
strcpy(buf, avio_rl32(pb) ? "true" : "false");
} else if (type == 4 && length == 8) {
int64_t num = avio_rl64(pb);
if (!strcmp(key, "WM/EncodingTime") ||
!strcmp(key, "WM/MediaOriginalBroadcastDateTime")) {
if (filetime_to_iso8601(buf, buf_size, num) < 0) {
av_free(buf);
return;
}
} else if (!strcmp(key, "WM/WMRVEncodeTime") ||
!strcmp(key, "WM/WMRVEndTime")) {
if (crazytime_to_iso8601(buf, buf_size, num) < 0) {
av_free(buf);
return;
}
} else if (!strcmp(key, "WM/WMRVExpirationDate")) {
if (oledate_to_iso8601(buf, buf_size, num) < 0 ) {
av_free(buf);
return;
}
} else if (!strcmp(key, "WM/WMRVBitrate"))
snprintf(buf, buf_size, "%f", av_int2double(num));
else
snprintf(buf, buf_size, "%"PRIi64, num);
} else if (type == 5 && length == 2) {
snprintf(buf, buf_size, "%"PRIi16, avio_rl16(pb));
} else if (type == 6 && length == 16) {
ff_asf_guid guid;
avio_read(pb, guid, 16);
snprintf(buf, buf_size, PRI_PRETTY_GUID, ARG_PRETTY_GUID(guid));
} else if (type == 2 && !strcmp(key, "WM/Picture")) {
get_attachment(s, pb, length);
av_freep(&buf);
return;
} else {
av_freep(&buf);
av_log(s, AV_LOG_WARNING, "unsupported metadata entry; key:%s, type:%d, length:0x%x\n", key, type, length);
avio_skip(pb, length);
return;
}
 
av_dict_set(&s->metadata, key, buf, 0);
av_freep(&buf);
}
 
/**
* Parse metadata entries
*/
static void parse_legacy_attrib(AVFormatContext *s, AVIOContext *pb)
{
ff_asf_guid guid;
int length, type;
while(!url_feof(pb)) {
char key[1024];
ff_get_guid(pb, &guid);
type = avio_rl32(pb);
length = avio_rl32(pb);
if (!length)
break;
if (ff_guidcmp(&guid, ff_metadata_guid)) {
av_log(s, AV_LOG_WARNING, "unknown guid "FF_PRI_GUID", expected metadata_guid; "
"remaining metadata entries ignored\n", FF_ARG_GUID(guid));
break;
}
avio_get_str16le(pb, INT_MAX, key, sizeof(key));
get_tag(s, pb, key, type, length);
}
 
ff_metadata_conv(&s->metadata, NULL, ff_asf_metadata_conv);
}
 
/**
* parse VIDEOINFOHEADER2 structure
* @return bytes consumed
*/
static int parse_videoinfoheader2(AVFormatContext *s, AVStream *st)
{
WtvContext *wtv = s->priv_data;
AVIOContext *pb = wtv->pb;
 
avio_skip(pb, 72); // picture aspect ratio is unreliable
ff_get_bmp_header(pb, st, NULL);
 
return 72 + 40;
}
 
/**
* Parse MPEG1WAVEFORMATEX extradata structure
*/
static void parse_mpeg1waveformatex(AVStream *st)
{
/* fwHeadLayer */
switch (AV_RL16(st->codec->extradata)) {
case 0x0001 : st->codec->codec_id = AV_CODEC_ID_MP1; break;
case 0x0002 : st->codec->codec_id = AV_CODEC_ID_MP2; break;
case 0x0004 : st->codec->codec_id = AV_CODEC_ID_MP3; break;
}
 
st->codec->bit_rate = AV_RL32(st->codec->extradata + 2); /* dwHeadBitrate */
 
/* dwHeadMode */
switch (AV_RL16(st->codec->extradata + 6)) {
case 1 :
case 2 :
case 4 : st->codec->channels = 2;
st->codec->channel_layout = AV_CH_LAYOUT_STEREO;
break;
case 8 : st->codec->channels = 1;
st->codec->channel_layout = AV_CH_LAYOUT_MONO;
break;
}
}
 
/**
* Initialise stream
* @param st Stream to initialise, or NULL to create and initialise new stream
* @return NULL on error
*/
static AVStream * new_stream(AVFormatContext *s, AVStream *st, int sid, int codec_type)
{
if (st) {
if (st->codec->extradata) {
av_freep(&st->codec->extradata);
st->codec->extradata_size = 0;
}
} else {
WtvStream *wst = av_mallocz(sizeof(WtvStream));
if (!wst)
return NULL;
st = avformat_new_stream(s, NULL);
if (!st) {
av_free(wst);
return NULL;
}
st->id = sid;
st->priv_data = wst;
}
st->codec->codec_type = codec_type;
st->need_parsing = AVSTREAM_PARSE_FULL;
avpriv_set_pts_info(st, 64, 1, 10000000);
return st;
}
 
/**
* parse Media Type structure and populate stream
* @param st Stream, or NULL to create new stream
* @param mediatype Mediatype GUID
* @param subtype Subtype GUID
* @param formattype Format GUID
* @param size Size of format buffer
* @return NULL on error
*/
static AVStream * parse_media_type(AVFormatContext *s, AVStream *st, int sid,
ff_asf_guid mediatype, ff_asf_guid subtype,
ff_asf_guid formattype, int size)
{
WtvContext *wtv = s->priv_data;
AVIOContext *pb = wtv->pb;
if (!ff_guidcmp(subtype, ff_mediasubtype_cpfilters_processed) &&
!ff_guidcmp(formattype, ff_format_cpfilters_processed)) {
ff_asf_guid actual_subtype;
ff_asf_guid actual_formattype;
 
if (size < 32) {
av_log(s, AV_LOG_WARNING, "format buffer size underflow\n");
avio_skip(pb, size);
return NULL;
}
 
avio_skip(pb, size - 32);
ff_get_guid(pb, &actual_subtype);
ff_get_guid(pb, &actual_formattype);
avio_seek(pb, -size, SEEK_CUR);
 
st = parse_media_type(s, st, sid, mediatype, actual_subtype, actual_formattype, size - 32);
avio_skip(pb, 32);
return st;
} else if (!ff_guidcmp(mediatype, ff_mediatype_audio)) {
st = new_stream(s, st, sid, AVMEDIA_TYPE_AUDIO);
if (!st)
return NULL;
if (!ff_guidcmp(formattype, ff_format_waveformatex)) {
int ret = ff_get_wav_header(pb, st->codec, size);
if (ret < 0)
return NULL;
} else {
if (ff_guidcmp(formattype, ff_format_none))
av_log(s, AV_LOG_WARNING, "unknown formattype:"FF_PRI_GUID"\n", FF_ARG_GUID(formattype));
avio_skip(pb, size);
}
 
if (!memcmp(subtype + 4, (const uint8_t[]){FF_MEDIASUBTYPE_BASE_GUID}, 12)) {
st->codec->codec_id = ff_wav_codec_get_id(AV_RL32(subtype), st->codec->bits_per_coded_sample);
} else if (!ff_guidcmp(subtype, mediasubtype_mpeg1payload)) {
if (st->codec->extradata && st->codec->extradata_size >= 22)
parse_mpeg1waveformatex(st);
else
av_log(s, AV_LOG_WARNING, "MPEG1WAVEFORMATEX underflow\n");
} else {
st->codec->codec_id = ff_codec_guid_get_id(ff_codec_wav_guids, subtype);
if (st->codec->codec_id == AV_CODEC_ID_NONE)
av_log(s, AV_LOG_WARNING, "unknown subtype:"FF_PRI_GUID"\n", FF_ARG_GUID(subtype));
}
return st;
} else if (!ff_guidcmp(mediatype, ff_mediatype_video)) {
st = new_stream(s, st, sid, AVMEDIA_TYPE_VIDEO);
if (!st)
return NULL;
if (!ff_guidcmp(formattype, format_videoinfo2)) {
int consumed = parse_videoinfoheader2(s, st);
avio_skip(pb, FFMAX(size - consumed, 0));
} else if (!ff_guidcmp(formattype, ff_format_mpeg2_video)) {
int consumed = parse_videoinfoheader2(s, st);
avio_skip(pb, FFMAX(size - consumed, 0));
} else {
if (ff_guidcmp(formattype, ff_format_none))
av_log(s, AV_LOG_WARNING, "unknown formattype:"FF_PRI_GUID"\n", FF_ARG_GUID(formattype));
avio_skip(pb, size);
}
 
if (!memcmp(subtype + 4, (const uint8_t[]){FF_MEDIASUBTYPE_BASE_GUID}, 12)) {
st->codec->codec_id = ff_codec_get_id(ff_codec_bmp_tags, AV_RL32(subtype));
} else {
st->codec->codec_id = ff_codec_guid_get_id(ff_video_guids, subtype);
}
if (st->codec->codec_id == AV_CODEC_ID_NONE)
av_log(s, AV_LOG_WARNING, "unknown subtype:"FF_PRI_GUID"\n", FF_ARG_GUID(subtype));
return st;
} else if (!ff_guidcmp(mediatype, mediatype_mpeg2_pes) &&
!ff_guidcmp(subtype, mediasubtype_dvb_subtitle)) {
st = new_stream(s, st, sid, AVMEDIA_TYPE_SUBTITLE);
if (!st)
return NULL;
if (ff_guidcmp(formattype, ff_format_none))
av_log(s, AV_LOG_WARNING, "unknown formattype:"FF_PRI_GUID"\n", FF_ARG_GUID(formattype));
avio_skip(pb, size);
st->codec->codec_id = AV_CODEC_ID_DVB_SUBTITLE;
return st;
} else if (!ff_guidcmp(mediatype, mediatype_mstvcaption) &&
(!ff_guidcmp(subtype, mediasubtype_teletext) || !ff_guidcmp(subtype, mediasubtype_dtvccdata))) {
st = new_stream(s, st, sid, AVMEDIA_TYPE_SUBTITLE);
if (!st)
return NULL;
if (ff_guidcmp(formattype, ff_format_none))
av_log(s, AV_LOG_WARNING, "unknown formattype:"FF_PRI_GUID"\n", FF_ARG_GUID(formattype));
avio_skip(pb, size);
st->codec->codec_id = !ff_guidcmp(subtype, mediasubtype_teletext) ? AV_CODEC_ID_DVB_TELETEXT : AV_CODEC_ID_EIA_608;
return st;
} else if (!ff_guidcmp(mediatype, mediatype_mpeg2_sections) &&
!ff_guidcmp(subtype, mediasubtype_mpeg2_sections)) {
if (ff_guidcmp(formattype, ff_format_none))
av_log(s, AV_LOG_WARNING, "unknown formattype:"FF_PRI_GUID"\n", FF_ARG_GUID(formattype));
avio_skip(pb, size);
return NULL;
}
 
av_log(s, AV_LOG_WARNING, "unknown media type, mediatype:"FF_PRI_GUID
", subtype:"FF_PRI_GUID", formattype:"FF_PRI_GUID"\n",
FF_ARG_GUID(mediatype), FF_ARG_GUID(subtype), FF_ARG_GUID(formattype));
avio_skip(pb, size);
return NULL;
}
 
enum {
SEEK_TO_DATA = 0,
SEEK_TO_PTS,
};
 
/**
* Parse WTV chunks
* @param mode SEEK_TO_DATA or SEEK_TO_PTS
* @param seekts timestamp
* @param[out] len_ptr Length of data chunk
* @return stream index of data chunk, or <0 on error
*/
static int parse_chunks(AVFormatContext *s, int mode, int64_t seekts, int *len_ptr)
{
WtvContext *wtv = s->priv_data;
AVIOContext *pb = wtv->pb;
while (!url_feof(pb)) {
ff_asf_guid g;
int len, sid, consumed;
 
ff_get_guid(pb, &g);
len = avio_rl32(pb);
if (len < 32)
break;
sid = avio_rl32(pb) & 0x7FFF;
avio_skip(pb, 8);
consumed = 32;
 
if (!ff_guidcmp(g, ff_SBE2_STREAM_DESC_EVENT)) {
if (ff_find_stream_index(s, sid) < 0) {
ff_asf_guid mediatype, subtype, formattype;
int size;
avio_skip(pb, 28);
ff_get_guid(pb, &mediatype);
ff_get_guid(pb, &subtype);
avio_skip(pb, 12);
ff_get_guid(pb, &formattype);
size = avio_rl32(pb);
parse_media_type(s, 0, sid, mediatype, subtype, formattype, size);
consumed += 92 + size;
}
} else if (!ff_guidcmp(g, ff_stream2_guid)) {
int stream_index = ff_find_stream_index(s, sid);
if (stream_index >= 0 && s->streams[stream_index]->priv_data && !((WtvStream*)s->streams[stream_index]->priv_data)->seen_data) {
ff_asf_guid mediatype, subtype, formattype;
int size;
avio_skip(pb, 12);
ff_get_guid(pb, &mediatype);
ff_get_guid(pb, &subtype);
avio_skip(pb, 12);
ff_get_guid(pb, &formattype);
size = avio_rl32(pb);
parse_media_type(s, s->streams[stream_index], sid, mediatype, subtype, formattype, size);
consumed += 76 + size;
}
} else if (!ff_guidcmp(g, EVENTID_AudioDescriptorSpanningEvent) ||
!ff_guidcmp(g, EVENTID_CtxADescriptorSpanningEvent) ||
!ff_guidcmp(g, EVENTID_CSDescriptorSpanningEvent) ||
!ff_guidcmp(g, EVENTID_StreamIDSpanningEvent) ||
!ff_guidcmp(g, EVENTID_SubtitleSpanningEvent) ||
!ff_guidcmp(g, EVENTID_TeletextSpanningEvent)) {
int stream_index = ff_find_stream_index(s, sid);
if (stream_index >= 0) {
AVStream *st = s->streams[stream_index];
uint8_t buf[258];
const uint8_t *pbuf = buf;
int buf_size;
 
avio_skip(pb, 8);
consumed += 8;
if (!ff_guidcmp(g, EVENTID_CtxADescriptorSpanningEvent) ||
!ff_guidcmp(g, EVENTID_CSDescriptorSpanningEvent)) {
avio_skip(pb, 6);
consumed += 6;
}
 
buf_size = FFMIN(len - consumed, sizeof(buf));
avio_read(pb, buf, buf_size);
consumed += buf_size;
ff_parse_mpeg2_descriptor(s, st, 0, &pbuf, buf + buf_size, NULL, 0, 0, NULL);
}
} else if (!ff_guidcmp(g, EVENTID_AudioTypeSpanningEvent)) {
int stream_index = ff_find_stream_index(s, sid);
if (stream_index >= 0) {
AVStream *st = s->streams[stream_index];
int audio_type;
avio_skip(pb, 8);
audio_type = avio_r8(pb);
if (audio_type == 2)
st->disposition |= AV_DISPOSITION_HEARING_IMPAIRED;
else if (audio_type == 3)
st->disposition |= AV_DISPOSITION_VISUAL_IMPAIRED;
consumed += 9;
}
} else if (!ff_guidcmp(g, EVENTID_DVBScramblingControlSpanningEvent)) {
int stream_index = ff_find_stream_index(s, sid);
if (stream_index >= 0) {
avio_skip(pb, 12);
if (avio_rl32(pb))
av_log(s, AV_LOG_WARNING, "DVB scrambled stream detected (st:%d), decoding will likely fail\n", stream_index);
consumed += 16;
}
} else if (!ff_guidcmp(g, EVENTID_LanguageSpanningEvent)) {
int stream_index = ff_find_stream_index(s, sid);
if (stream_index >= 0) {
AVStream *st = s->streams[stream_index];
uint8_t language[4];
avio_skip(pb, 12);
avio_read(pb, language, 3);
if (language[0]) {
language[3] = 0;
av_dict_set(&st->metadata, "language", language, 0);
if (!strcmp(language, "nar") || !strcmp(language, "NAR"))
st->disposition |= AV_DISPOSITION_VISUAL_IMPAIRED;
}
consumed += 15;
}
} else if (!ff_guidcmp(g, ff_timestamp_guid)) {
int stream_index = ff_find_stream_index(s, sid);
if (stream_index >= 0) {
avio_skip(pb, 8);
wtv->pts = avio_rl64(pb);
consumed += 16;
if (wtv->pts == -1)
wtv->pts = AV_NOPTS_VALUE;
else {
wtv->last_valid_pts = wtv->pts;
if (wtv->epoch == AV_NOPTS_VALUE || wtv->pts < wtv->epoch)
wtv->epoch = wtv->pts;
if (mode == SEEK_TO_PTS && wtv->pts >= seekts) {
avio_skip(pb, WTV_PAD8(len) - consumed);
return 0;
}
}
}
} else if (!ff_guidcmp(g, ff_data_guid)) {
int stream_index = ff_find_stream_index(s, sid);
if (mode == SEEK_TO_DATA && stream_index >= 0 && len > 32 && s->streams[stream_index]->priv_data) {
WtvStream *wst = s->streams[stream_index]->priv_data;
wst->seen_data = 1;
if (len_ptr) {
*len_ptr = len;
}
return stream_index;
}
} else if (!ff_guidcmp(g, /* DSATTRIB_WMDRMProtectionInfo */ (const ff_asf_guid){0x83,0x95,0x74,0x40,0x9D,0x6B,0xEC,0x4E,0xB4,0x3C,0x67,0xA1,0x80,0x1E,0x1A,0x9B})) {
int stream_index = ff_find_stream_index(s, sid);
if (stream_index >= 0)
av_log(s, AV_LOG_WARNING, "encrypted stream detected (st:%d), decoding will likely fail\n", stream_index);
} else if (
!ff_guidcmp(g, /* DSATTRIB_CAPTURE_STREAMTIME */ (const ff_asf_guid){0x14,0x56,0x1A,0x0C,0xCD,0x30,0x40,0x4F,0xBC,0xBF,0xD0,0x3E,0x52,0x30,0x62,0x07}) ||
!ff_guidcmp(g, /* DSATTRIB_PBDATAG_ATTRIBUTE */ (const ff_asf_guid){0x79,0x66,0xB5,0xE0,0xB9,0x12,0xCC,0x43,0xB7,0xDF,0x57,0x8C,0xAA,0x5A,0x7B,0x63}) ||
!ff_guidcmp(g, /* DSATTRIB_PicSampleSeq */ (const ff_asf_guid){0x02,0xAE,0x5B,0x2F,0x8F,0x7B,0x60,0x4F,0x82,0xD6,0xE4,0xEA,0x2F,0x1F,0x4C,0x99}) ||
!ff_guidcmp(g, /* DSATTRIB_TRANSPORT_PROPERTIES */ ff_DSATTRIB_TRANSPORT_PROPERTIES) ||
!ff_guidcmp(g, /* dvr_ms_vid_frame_rep_data */ (const ff_asf_guid){0xCC,0x32,0x64,0xDD,0x29,0xE2,0xDB,0x40,0x80,0xF6,0xD2,0x63,0x28,0xD2,0x76,0x1F}) ||
!ff_guidcmp(g, /* EVENTID_ChannelChangeSpanningEvent */ (const ff_asf_guid){0xE5,0xC5,0x67,0x90,0x5C,0x4C,0x05,0x42,0x86,0xC8,0x7A,0xFE,0x20,0xFE,0x1E,0xFA}) ||
!ff_guidcmp(g, /* EVENTID_ChannelInfoSpanningEvent */ (const ff_asf_guid){0x80,0x6D,0xF3,0x41,0x32,0x41,0xC2,0x4C,0xB1,0x21,0x01,0xA4,0x32,0x19,0xD8,0x1B}) ||
!ff_guidcmp(g, /* EVENTID_ChannelTypeSpanningEvent */ (const ff_asf_guid){0x51,0x1D,0xAB,0x72,0xD2,0x87,0x9B,0x48,0xBA,0x11,0x0E,0x08,0xDC,0x21,0x02,0x43}) ||
!ff_guidcmp(g, /* EVENTID_PIDListSpanningEvent */ (const ff_asf_guid){0x65,0x8F,0xFC,0x47,0xBB,0xE2,0x34,0x46,0x9C,0xEF,0xFD,0xBF,0xE6,0x26,0x1D,0x5C}) ||
!ff_guidcmp(g, /* EVENTID_SignalAndServiceStatusSpanningEvent */ (const ff_asf_guid){0xCB,0xC5,0x68,0x80,0x04,0x3C,0x2B,0x49,0xB4,0x7D,0x03,0x08,0x82,0x0D,0xCE,0x51}) ||
!ff_guidcmp(g, /* EVENTID_StreamTypeSpanningEvent */ (const ff_asf_guid){0xBC,0x2E,0xAF,0x82,0xA6,0x30,0x64,0x42,0xA8,0x0B,0xAD,0x2E,0x13,0x72,0xAC,0x60}) ||
!ff_guidcmp(g, (const ff_asf_guid){0x1E,0xBE,0xC3,0xC5,0x43,0x92,0xDC,0x11,0x85,0xE5,0x00,0x12,0x3F,0x6F,0x73,0xB9}) ||
!ff_guidcmp(g, (const ff_asf_guid){0x3B,0x86,0xA2,0xB1,0xEB,0x1E,0xC3,0x44,0x8C,0x88,0x1C,0xA3,0xFF,0xE3,0xE7,0x6A}) ||
!ff_guidcmp(g, (const ff_asf_guid){0x4E,0x7F,0x4C,0x5B,0xC4,0xD0,0x38,0x4B,0xA8,0x3E,0x21,0x7F,0x7B,0xBF,0x52,0xE7}) ||
!ff_guidcmp(g, (const ff_asf_guid){0x63,0x36,0xEB,0xFE,0xA1,0x7E,0xD9,0x11,0x83,0x08,0x00,0x07,0xE9,0x5E,0xAD,0x8D}) ||
!ff_guidcmp(g, (const ff_asf_guid){0x70,0xE9,0xF1,0xF8,0x89,0xA4,0x4C,0x4D,0x83,0x73,0xB8,0x12,0xE0,0xD5,0xF8,0x1E}) ||
!ff_guidcmp(g, ff_index_guid) ||
!ff_guidcmp(g, ff_sync_guid) ||
!ff_guidcmp(g, ff_stream1_guid) ||
!ff_guidcmp(g, (const ff_asf_guid){0xF7,0x10,0x02,0xB9,0xEE,0x7C,0xED,0x4E,0xBD,0x7F,0x05,0x40,0x35,0x86,0x18,0xA1})) {
//ignore known guids
} else
av_log(s, AV_LOG_WARNING, "unsupported chunk:"FF_PRI_GUID"\n", FF_ARG_GUID(g));
 
avio_skip(pb, WTV_PAD8(len) - consumed);
}
return AVERROR_EOF;
}
 
static int read_header(AVFormatContext *s)
{
WtvContext *wtv = s->priv_data;
int root_sector, root_size;
uint8_t root[WTV_SECTOR_SIZE];
AVIOContext *pb;
int64_t timeline_pos;
int ret;
 
wtv->epoch =
wtv->pts =
wtv->last_valid_pts = AV_NOPTS_VALUE;
 
/* read root directory sector */
avio_skip(s->pb, 0x30);
root_size = avio_rl32(s->pb);
if (root_size > sizeof(root)) {
av_log(s, AV_LOG_ERROR, "root directory size exceeds sector size\n");
return AVERROR_INVALIDDATA;
}
avio_skip(s->pb, 4);
root_sector = avio_rl32(s->pb);
 
seek_by_sector(s->pb, root_sector, 0);
root_size = avio_read(s->pb, root, root_size);
if (root_size < 0)
return AVERROR_INVALIDDATA;
 
/* parse chunks up until first data chunk */
wtv->pb = wtvfile_open(s, root, root_size, ff_timeline_le16);
if (!wtv->pb) {
av_log(s, AV_LOG_ERROR, "timeline data missing\n");
return AVERROR_INVALIDDATA;
}
 
ret = parse_chunks(s, SEEK_TO_DATA, 0, 0);
if (ret < 0)
return ret;
avio_seek(wtv->pb, -32, SEEK_CUR);
 
timeline_pos = avio_tell(s->pb); // save before opening another file
 
/* read metadata */
pb = wtvfile_open(s, root, root_size, ff_table_0_entries_legacy_attrib_le16);
if (pb) {
parse_legacy_attrib(s, pb);
wtvfile_close(pb);
}
 
/* read seek index */
if (s->nb_streams) {
AVStream *st = s->streams[0];
pb = wtvfile_open(s, root, root_size, ff_table_0_entries_time_le16);
if (pb) {
while(1) {
uint64_t timestamp = avio_rl64(pb);
uint64_t frame_nb = avio_rl64(pb);
if (url_feof(pb))
break;
ff_add_index_entry(&wtv->index_entries, &wtv->nb_index_entries, &wtv->index_entries_allocated_size,
0, timestamp, frame_nb, 0, AVINDEX_KEYFRAME);
}
wtvfile_close(pb);
 
if (wtv->nb_index_entries) {
pb = wtvfile_open(s, root, root_size, ff_timeline_table_0_entries_Events_le16);
if (pb) {
int i;
while (1) {
uint64_t frame_nb = avio_rl64(pb);
uint64_t position = avio_rl64(pb);
if (url_feof(pb))
break;
for (i = wtv->nb_index_entries - 1; i >= 0; i--) {
AVIndexEntry *e = wtv->index_entries + i;
if (frame_nb > e->size)
break;
if (position > e->pos)
e->pos = position;
}
}
wtvfile_close(pb);
st->duration = wtv->index_entries[wtv->nb_index_entries - 1].timestamp;
}
}
}
}
 
avio_seek(s->pb, timeline_pos, SEEK_SET);
return 0;
}
 
static int read_packet(AVFormatContext *s, AVPacket *pkt)
{
WtvContext *wtv = s->priv_data;
AVIOContext *pb = wtv->pb;
int stream_index, len, ret;
 
stream_index = parse_chunks(s, SEEK_TO_DATA, 0, &len);
if (stream_index < 0)
return stream_index;
 
ret = av_get_packet(pb, pkt, len - 32);
if (ret < 0)
return ret;
pkt->stream_index = stream_index;
pkt->pts = wtv->pts;
avio_skip(pb, WTV_PAD8(len) - len);
return 0;
}
 
static int read_seek(AVFormatContext *s, int stream_index,
int64_t ts, int flags)
{
WtvContext *wtv = s->priv_data;
AVIOContext *pb = wtv->pb;
AVStream *st = s->streams[0];
int64_t ts_relative;
int i;
 
if ((flags & AVSEEK_FLAG_FRAME) || (flags & AVSEEK_FLAG_BYTE))
return AVERROR(ENOSYS);
 
/* timestamp adjustment is required because wtv->pts values are absolute,
* whereas AVIndexEntry->timestamp values are relative to epoch. */
ts_relative = ts;
if (wtv->epoch != AV_NOPTS_VALUE)
ts_relative -= wtv->epoch;
 
i = ff_index_search_timestamp(wtv->index_entries, wtv->nb_index_entries, ts_relative, flags);
if (i < 0) {
if (wtv->last_valid_pts == AV_NOPTS_VALUE || ts < wtv->last_valid_pts) {
if (avio_seek(pb, 0, SEEK_SET) < 0)
return -1;
} else if (st->duration != AV_NOPTS_VALUE && ts_relative > st->duration && wtv->nb_index_entries) {
if (avio_seek(pb, wtv->index_entries[wtv->nb_index_entries - 1].pos, SEEK_SET) < 0)
return -1;
}
if (parse_chunks(s, SEEK_TO_PTS, ts, 0) < 0)
return AVERROR(ERANGE);
return 0;
}
if (avio_seek(pb, wtv->index_entries[i].pos, SEEK_SET) < 0)
return -1;
wtv->pts = wtv->index_entries[i].timestamp;
if (wtv->epoch != AV_NOPTS_VALUE)
wtv->pts += wtv->epoch;
wtv->last_valid_pts = wtv->pts;
return 0;
}
 
static int read_close(AVFormatContext *s)
{
WtvContext *wtv = s->priv_data;
av_freep(&wtv->index_entries);
wtvfile_close(wtv->pb);
return 0;
}
 
AVInputFormat ff_wtv_demuxer = {
.name = "wtv",
.long_name = NULL_IF_CONFIG_SMALL("Windows Television (WTV)"),
.priv_data_size = sizeof(WtvContext),
.read_probe = read_probe,
.read_header = read_header,
.read_packet = read_packet,
.read_seek = read_seek,
.read_close = read_close,
.flags = AVFMT_SHOW_IDS,
};
/contrib/sdk/sources/ffmpeg/libavformat/wtvenc.c
0,0 → 1,805
/*
* Windows Television (WTV) muxer
* Copyright (c) 2011 Zhentan Feng <spyfeng at gmail dot com>
* Copyright (c) 2011 Peter Ross <pross@xvid.org>
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Windows Television (WTV) demuxer
* @author Zhentan Feng <spyfeng at gmail dot com>
*/
 
#include "libavutil/intreadwrite.h"
#include "libavutil/avassert.h"
#include "avformat.h"
#include "avio_internal.h"
#include "internal.h"
#include "wtv.h"
#include "asf.h"
 
#define WTV_BIGSECTOR_SIZE (1 << WTV_BIGSECTOR_BITS)
#define INDEX_BASE 0x2
#define MAX_NB_INDEX 10
 
/* declare utf16le strings */
#define _ , 0,
static const uint8_t timeline_table_0_header_events[] =
{'t'_'i'_'m'_'e'_'l'_'i'_'n'_'e'_'.'_'t'_'a'_'b'_'l'_'e'_'.'_'0'_'.'_'h'_'e'_'a'_'d'_'e'_'r'_'.'_'E'_'v'_'e'_'n'_'t'_'s', 0};
static const uint8_t table_0_header_legacy_attrib[] =
{'t'_'a'_'b'_'l'_'e'_'.'_'0'_'.'_'h'_'e'_'a'_'d'_'e'_'r'_'.'_'l'_'e'_'g'_'a'_'c'_'y'_'_'_'a'_'t'_'t'_'r'_'i'_'b', 0};
static const uint8_t table_0_redirector_legacy_attrib[] =
{'t'_'a'_'b'_'l'_'e'_'.'_'0'_'.'_'r'_'e'_'d'_'i'_'r'_'e'_'c'_'t'_'o'_'r'_'.'_'l'_'e'_'g'_'a'_'c'_'y'_'_'_'a'_'t'_'t'_'r'_'i'_'b', 0};
static const uint8_t table_0_header_time[] =
{'t'_'a'_'b'_'l'_'e'_'.'_'0'_'.'_'h'_'e'_'a'_'d'_'e'_'r'_'.'_'t'_'i'_'m'_'e', 0};
static const uint8_t legacy_attrib[] =
{'l'_'e'_'g'_'a'_'c'_'y'_'_'_'a'_'t'_'t'_'r'_'i'_'b', 0};
#undef _
 
static const ff_asf_guid sub_wtv_guid =
{0x8C,0xC3,0xD2,0xC2,0x7E,0x9A,0xDA,0x11,0x8B,0xF7,0x00,0x07,0xE9,0x5E,0xAD,0x8D};
 
enum WtvFileIndex {
WTV_TIMELINE_TABLE_0_HEADER_EVENTS = 0,
WTV_TIMELINE_TABLE_0_ENTRIES_EVENTS,
WTV_TIMELINE,
WTV_TABLE_0_HEADER_LEGACY_ATTRIB,
WTV_TABLE_0_ENTRIES_LEGACY_ATTRIB,
WTV_TABLE_0_REDIRECTOR_LEGACY_ATTRIB,
WTV_TABLE_0_HEADER_TIME,
WTV_TABLE_0_ENTRIES_TIME,
WTV_FILES
};
 
typedef struct {
int64_t length;
const void *header;
int depth;
int first_sector;
} WtvFile;
 
typedef struct {
int64_t pos;
int64_t serial;
const ff_asf_guid * guid;
int stream_id;
} WtvChunkEntry;
 
typedef struct {
int64_t serial;
int64_t value;
} WtvSyncEntry;
 
typedef struct {
int64_t timeline_start_pos;
WtvFile file[WTV_FILES];
int64_t serial; /**< chunk serial number */
int64_t last_chunk_pos; /**< last chunk position */
int64_t last_timestamp_pos; /**< last timestamp chunk position */
int64_t first_index_pos; /**< first index_chunk position */
 
WtvChunkEntry index[MAX_NB_INDEX];
int nb_index;
int first_video_flag;
 
WtvSyncEntry *st_pairs; /* (serial, timestamp) pairs */
int nb_st_pairs;
WtvSyncEntry *sp_pairs; /* (serial, position) pairs */
int nb_sp_pairs;
 
int64_t last_pts;
int64_t last_serial;
 
AVPacket thumbnail;
} WtvContext;
 
 
static void add_serial_pair(WtvSyncEntry ** list, int * count, int64_t serial, int64_t value)
{
int new_count = *count + 1;
WtvSyncEntry *new_list = av_realloc(*list, new_count * sizeof(WtvSyncEntry));
if (!new_list)
return;
new_list[*count] = (WtvSyncEntry){serial, value};
*list = new_list;
*count = new_count;
}
 
typedef int WTVHeaderWriteFunc(AVIOContext *pb);
 
typedef struct {
const uint8_t *header;
int header_size;
WTVHeaderWriteFunc *write_header;
} WTVRootEntryTable;
 
#define write_pad(pb, size) ffio_fill(pb, 0, size)
 
static const ff_asf_guid *get_codec_guid(enum AVCodecID id, const AVCodecGuid *av_guid)
{
int i;
for (i = 0; av_guid[i].id != AV_CODEC_ID_NONE; i++) {
if (id == av_guid[i].id)
return &(av_guid[i].guid);
}
return NULL;
}
 
/**
* Write chunk header. If header chunk (0x80000000 set) then add to list of header chunks
*/
static void write_chunk_header(AVFormatContext *s, const ff_asf_guid *guid, int length, int stream_id)
{
WtvContext *wctx = s->priv_data;
AVIOContext *pb = s->pb;
 
wctx->last_chunk_pos = avio_tell(pb) - wctx->timeline_start_pos;
ff_put_guid(pb, guid);
avio_wl32(pb, 32 + length);
avio_wl32(pb, stream_id);
avio_wl64(pb, wctx->serial);
 
if ((stream_id & 0x80000000) && guid != &ff_index_guid) {
WtvChunkEntry *t = wctx->index + wctx->nb_index;
av_assert0(wctx->nb_index < MAX_NB_INDEX);
t->pos = wctx->last_chunk_pos;
t->serial = wctx->serial;
t->guid = guid;
t->stream_id = stream_id & 0x3FFFFFFF;
wctx->nb_index++;
}
}
 
static void write_chunk_header2(AVFormatContext *s, const ff_asf_guid *guid, int stream_id)
{
WtvContext *wctx = s->priv_data;
AVIOContext *pb = s->pb;
 
int64_t last_chunk_pos = wctx->last_chunk_pos;
write_chunk_header(s, guid, 0, stream_id); // length updated later
avio_wl64(pb, last_chunk_pos);
}
 
static void finish_chunk_noindex(AVFormatContext *s)
{
WtvContext *wctx = s->priv_data;
AVIOContext *pb = s->pb;
 
// update the chunk_len field and pad.
int64_t chunk_len = avio_tell(pb) - (wctx->last_chunk_pos + wctx->timeline_start_pos);
avio_seek(pb, -(chunk_len - 16), SEEK_CUR);
avio_wl32(pb, chunk_len);
avio_seek(pb, chunk_len - (16 + 4), SEEK_CUR);
 
write_pad(pb, WTV_PAD8(chunk_len) - chunk_len);
wctx->serial++;
}
 
static void write_index(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
WtvContext *wctx = s->priv_data;
int i;
 
write_chunk_header2(s, &ff_index_guid, 0x80000000);
avio_wl32(pb, 0);
avio_wl32(pb, 0);
 
for (i = 0; i < wctx->nb_index; i++) {
WtvChunkEntry *t = wctx->index + i;
ff_put_guid(pb, t->guid);
avio_wl64(pb, t->pos);
avio_wl32(pb, t->stream_id);
avio_wl32(pb, 0); // checksum?
avio_wl64(pb, t->serial);
}
wctx->nb_index = 0; // reset index
finish_chunk_noindex(s);
 
if (!wctx->first_index_pos)
wctx->first_index_pos = wctx->last_chunk_pos;
}
 
static void finish_chunk(AVFormatContext *s)
{
WtvContext *wctx = s->priv_data;
finish_chunk_noindex(s);
if (wctx->nb_index == MAX_NB_INDEX)
write_index(s);
}
 
static int write_stream_codec_info(AVFormatContext *s, AVStream *st)
{
WtvContext *wctx = s->priv_data;
const ff_asf_guid *g, *media_type, *format_type;
AVIOContext *pb = s->pb;
int64_t hdr_pos_start;
int hdr_size = 0;
 
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
g = get_codec_guid(st->codec->codec_id, ff_video_guids);
media_type = &ff_mediatype_video;
format_type = &ff_format_mpeg2_video;
} else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
g = get_codec_guid(st->codec->codec_id, ff_codec_wav_guids);
media_type = &ff_mediatype_audio;
format_type = &ff_format_waveformatex;
} else {
av_log(s, AV_LOG_ERROR, "unknown codec_type (0x%x)\n", st->codec->codec_type);
return -1;
}
 
if (g == NULL) {
av_log(s, AV_LOG_ERROR, "can't get video codec_id (0x%x) guid.\n", st->codec->codec_id);
return -1;
}
 
ff_put_guid(pb, media_type); // mediatype
ff_put_guid(pb, &ff_mediasubtype_cpfilters_processed); // subtype
write_pad(pb, 12);
ff_put_guid(pb,&ff_format_cpfilters_processed); // format type
avio_wl32(pb, 0); // size
 
hdr_pos_start = avio_tell(pb);
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
if (wctx->first_video_flag) {
write_pad(pb, 216); //The size is sensitive.
wctx->first_video_flag = 0;
} else {
write_pad(pb, 72); // aspect ratio
ff_put_bmp_header(pb, st->codec, ff_codec_bmp_tags, 0);
}
} else {
ff_put_wav_header(pb, st->codec);
}
hdr_size = avio_tell(pb) - hdr_pos_start;
 
// seek back write hdr_size
avio_seek(pb, -(hdr_size + 4), SEEK_CUR);
avio_wl32(pb, hdr_size + 32);
avio_seek(pb, hdr_size, SEEK_CUR);
ff_put_guid(pb, g); // actual_subtype
ff_put_guid(pb, format_type); // actual_formattype
 
return 0;
}
 
static int write_stream_codec(AVFormatContext *s, AVStream * st)
{
AVIOContext *pb = s->pb;
int ret;
write_chunk_header2(s, &ff_stream1_guid, 0x80000000 | 0x01);
 
avio_wl32(pb, 0x01);
write_pad(pb, 4);
write_pad(pb, 4);
 
ret = write_stream_codec_info(s, st);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "write stream codec info failed codec_type(0x%x)\n", st->codec->codec_type);
return -1;
}
 
finish_chunk(s);
return 0;
}
 
static void write_sync(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
WtvContext *wctx = s->priv_data;
int64_t last_chunk_pos = wctx->last_chunk_pos;
 
write_chunk_header(s, &ff_sync_guid, 0x18, 0);
avio_wl64(pb, wctx->first_index_pos);
avio_wl64(pb, wctx->last_timestamp_pos);
avio_wl64(pb, 0);
 
finish_chunk(s);
add_serial_pair(&wctx->sp_pairs, &wctx->nb_sp_pairs, wctx->serial, wctx->last_chunk_pos);
 
wctx->last_chunk_pos = last_chunk_pos;
}
 
static int write_stream_data(AVFormatContext *s, AVStream *st)
{
AVIOContext *pb = s->pb;
int ret;
 
write_chunk_header2(s, &ff_SBE2_STREAM_DESC_EVENT, 0x80000000 | (st->index + INDEX_BASE));
avio_wl32(pb, 0x00000001);
avio_wl32(pb, st->index + INDEX_BASE); //stream_id
avio_wl32(pb, 0x00000001);
write_pad(pb, 8);
 
ret = write_stream_codec_info(s, st);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "write stream codec info failed codec_type(0x%x)\n", st->codec->codec_type);
return -1;
}
finish_chunk(s);
 
avpriv_set_pts_info(st, 64, 1, 10000000);
 
return 0;
}
 
static int write_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
WtvContext *wctx = s->priv_data;
int i, pad, ret;
AVStream *st;
 
wctx->last_chunk_pos = -1;
wctx->last_timestamp_pos = -1;
 
ff_put_guid(pb, &ff_wtv_guid);
ff_put_guid(pb, &sub_wtv_guid);
 
avio_wl32(pb, 0x01);
avio_wl32(pb, 0x02);
avio_wl32(pb, 1 << WTV_SECTOR_BITS);
avio_wl32(pb, 1 << WTV_BIGSECTOR_BITS);
 
//write initial root fields
avio_wl32(pb, 0); // root_size, update later
write_pad(pb, 4);
avio_wl32(pb, 0); // root_sector, update it later.
 
write_pad(pb, 32);
avio_wl32(pb, 0); // file ends pointer, update it later.
 
pad = (1 << WTV_SECTOR_BITS) - avio_tell(pb);
write_pad(pb, pad);
 
wctx->timeline_start_pos = avio_tell(pb);
 
wctx->serial = 1;
wctx->last_chunk_pos = -1;
wctx->first_video_flag = 1;
 
for (i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
if (st->codec->codec_id == AV_CODEC_ID_MJPEG)
continue;
ret = write_stream_codec(s, st);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "write stream codec failed codec_type(0x%x)\n", st->codec->codec_type);
return -1;
}
if (!i)
write_sync(s);
}
 
for (i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
if (st->codec->codec_id == AV_CODEC_ID_MJPEG)
continue;
ret = write_stream_data(s, st);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "write stream data failed codec_type(0x%x)\n", st->codec->codec_type);
return -1;
}
}
 
if (wctx->nb_index)
write_index(s);
 
return 0;
}
 
static void write_timestamp(AVFormatContext *s, AVPacket *pkt)
{
AVIOContext *pb = s->pb;
WtvContext *wctx = s->priv_data;
AVCodecContext *enc = s->streams[pkt->stream_index]->codec;
 
write_chunk_header(s, &ff_timestamp_guid, 56, 0x40000000 | (INDEX_BASE + pkt->stream_index));
write_pad(pb, 8);
avio_wl64(pb, pkt->pts == AV_NOPTS_VALUE ? -1 : pkt->pts);
avio_wl64(pb, pkt->pts == AV_NOPTS_VALUE ? -1 : pkt->pts);
avio_wl64(pb, pkt->pts == AV_NOPTS_VALUE ? -1 : pkt->pts);
avio_wl64(pb, 0);
avio_wl64(pb, enc->codec_type == AVMEDIA_TYPE_VIDEO && (pkt->flags & AV_PKT_FLAG_KEY) ? 1 : 0);
avio_wl64(pb, 0);
 
wctx->last_timestamp_pos = wctx->last_chunk_pos;
}
 
static int write_packet(AVFormatContext *s, AVPacket *pkt)
{
AVIOContext *pb = s->pb;
WtvContext *wctx = s->priv_data;
 
if (s->streams[pkt->stream_index]->codec->codec_id == AV_CODEC_ID_MJPEG && !wctx->thumbnail.size) {
av_copy_packet(&wctx->thumbnail, pkt);
return 0;
}
 
/* emit sync chunk and 'timeline.table.0.entries.Event' record every 50 frames */
if (wctx->serial - (wctx->nb_sp_pairs ? wctx->sp_pairs[wctx->nb_sp_pairs - 1].serial : 0) >= 50)
write_sync(s);
 
/* emit 'table.0.entries.time' record every 500ms */
if (pkt->pts != AV_NOPTS_VALUE && pkt->pts - (wctx->nb_st_pairs ? wctx->st_pairs[wctx->nb_st_pairs - 1].value : 0) >= 5000000)
add_serial_pair(&wctx->st_pairs, &wctx->nb_st_pairs, wctx->serial, pkt->pts);
 
if (pkt->pts != AV_NOPTS_VALUE && pkt->pts > wctx->last_pts) {
wctx->last_pts = pkt->pts;
wctx->last_serial = wctx->serial;
}
 
// write timestamp chunk
write_timestamp(s, pkt);
 
write_chunk_header(s, &ff_data_guid, pkt->size, INDEX_BASE + pkt->stream_index);
avio_write(pb, pkt->data, pkt->size);
write_pad(pb, WTV_PAD8(pkt->size) - pkt->size);
 
wctx->serial++;
return 0;
}
 
static int write_table0_header_events(AVIOContext *pb)
{
avio_wl32(pb, 0x10);
write_pad(pb, 84);
avio_wl64(pb, 0x32);
return 96;
}
 
static int write_table0_header_legacy_attrib(AVIOContext *pb)
{
int pad = 0;
avio_wl32(pb, 0xFFFFFFFF);
write_pad(pb, 12);
avio_write(pb, legacy_attrib, sizeof(legacy_attrib));
pad = WTV_PAD8(sizeof(legacy_attrib)) - sizeof(legacy_attrib);
write_pad(pb, pad);
write_pad(pb, 32);
return 48 + WTV_PAD8(sizeof(legacy_attrib));
}
 
static int write_table0_header_time(AVIOContext *pb)
{
avio_wl32(pb, 0x10);
write_pad(pb, 76);
avio_wl64(pb, 0x40);
return 88;
}
 
static const WTVRootEntryTable wtv_root_entry_table[] = {
{ timeline_table_0_header_events, sizeof(timeline_table_0_header_events), write_table0_header_events},
{ ff_timeline_table_0_entries_Events_le16, sizeof(ff_timeline_table_0_entries_Events_le16), NULL},
{ ff_timeline_le16, sizeof(ff_timeline_le16), NULL},
{ table_0_header_legacy_attrib, sizeof(table_0_header_legacy_attrib), write_table0_header_legacy_attrib},
{ ff_table_0_entries_legacy_attrib_le16, sizeof(ff_table_0_entries_legacy_attrib_le16), NULL},
{ table_0_redirector_legacy_attrib, sizeof(table_0_redirector_legacy_attrib), NULL},
{ table_0_header_time, sizeof(table_0_header_time), write_table0_header_time},
{ ff_table_0_entries_time_le16, sizeof(ff_table_0_entries_time_le16), NULL},
};
 
static int write_root_table(AVFormatContext *s, int64_t sector_pos)
{
AVIOContext *pb = s->pb;
WtvContext *wctx = s->priv_data;
int size, pad;
int i;
 
const WTVRootEntryTable *h = wtv_root_entry_table;
for (i = 0; i < sizeof(wtv_root_entry_table)/sizeof(WTVRootEntryTable); i++, h++) {
WtvFile *w = &wctx->file[i];
int filename_padding = WTV_PAD8(h->header_size) - h->header_size;
WTVHeaderWriteFunc *write = h->write_header;
int len = 0;
int64_t len_pos;
 
ff_put_guid(pb, &ff_dir_entry_guid);
len_pos = avio_tell(pb);
avio_wl16(pb, 40 + h->header_size + filename_padding + 8); // maybe updated later
write_pad(pb, 6);
avio_wl64(pb, write ? 0 : w->length);// maybe update later
avio_wl32(pb, (h->header_size + filename_padding) >> 1);
write_pad(pb, 4);
 
avio_write(pb, h->header, h->header_size);
write_pad(pb, filename_padding);
 
if (write) {
len = write(pb);
// update length field
avio_seek(pb, len_pos, SEEK_SET);
avio_wl64(pb, 40 + h->header_size + filename_padding + len);
avio_wl64(pb, len |(1ULL<<62) | (1ULL<<60));
avio_seek(pb, 8 + h->header_size + filename_padding + len, SEEK_CUR);
} else {
avio_wl32(pb, w->first_sector);
avio_wl32(pb, w->depth);
}
}
 
// caculate root table size
size = avio_tell(pb) - sector_pos;
pad = WTV_SECTOR_SIZE- size;
write_pad(pb, pad);
 
return size;
}
 
static void write_fat(AVIOContext *pb, int start_sector, int nb_sectors, int shift)
{
int i;
for (i = 0; i < nb_sectors; i++) {
avio_wl32(pb, start_sector + (i << shift));
}
// pad left sector pointer size
write_pad(pb, WTV_SECTOR_SIZE - ((nb_sectors << 2) % WTV_SECTOR_SIZE));
}
 
static int64_t write_fat_sector(AVFormatContext *s, int64_t start_pos, int nb_sectors, int sector_bits, int depth)
{
int64_t start_sector = start_pos >> WTV_SECTOR_BITS;
int shift = sector_bits - WTV_SECTOR_BITS;
 
int64_t fat = avio_tell(s->pb);
write_fat(s->pb, start_sector, nb_sectors, shift);
 
if (depth == 2) {
int64_t start_sector1 = fat >> WTV_SECTOR_BITS;
int nb_sectors1 = ((nb_sectors << 2) + WTV_SECTOR_SIZE - 1) / WTV_SECTOR_SIZE;
int64_t fat1 = avio_tell(s->pb);
 
write_fat(s->pb, start_sector1, nb_sectors1, 0);
return fat1;
}
 
return fat;
}
 
static void write_table_entries_events(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
WtvContext *wctx = s->priv_data;
int i;
for (i = 0; i < wctx->nb_sp_pairs; i++) {
avio_wl64(pb, wctx->sp_pairs[i].serial);
avio_wl64(pb, wctx->sp_pairs[i].value);
}
}
 
static void write_table_entries_time(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
WtvContext *wctx = s->priv_data;
int i;
for (i = 0; i < wctx->nb_st_pairs; i++) {
avio_wl64(pb, wctx->st_pairs[i].value);
avio_wl64(pb, wctx->st_pairs[i].serial);
}
avio_wl64(pb, wctx->last_pts);
avio_wl64(pb, wctx->last_serial);
}
 
static void write_metadata_header(AVIOContext *pb, int type, const char *key, int value_size)
{
ff_put_guid(pb, &ff_metadata_guid);
avio_wl32(pb, type);
avio_wl32(pb, value_size);
avio_put_str16le(pb, key);
}
 
static int metadata_header_size(const char *key)
{
return 16 + 4 + 4 + strlen(key)*2 + 2;
}
 
static void write_tag_int32(AVIOContext *pb, const char *key, int value)
{
write_metadata_header(pb, 0, key, 4);
avio_wl32(pb, value);
}
 
static void write_tag(AVIOContext *pb, const char *key, const char *value)
{
write_metadata_header(pb, 1, key, strlen(value)*2 + 2);
avio_put_str16le(pb, value);
}
 
static int attachment_value_size(const AVPacket *pkt, const AVDictionaryEntry *e)
{
return strlen("image/jpeg")*2 + 2 + 1 + (e ? strlen(e->value)*2 : 0) + 2 + 4 + pkt->size;
}
 
static void write_table_entries_attrib(AVFormatContext *s)
{
WtvContext *wctx = s->priv_data;
AVIOContext *pb = s->pb;
AVDictionaryEntry *tag = 0;
 
//FIXME: translate special tags (e.g. WM/Bitrate) to binary representation
ff_metadata_conv(&s->metadata, ff_asf_metadata_conv, NULL);
while ((tag = av_dict_get(s->metadata, "", tag, AV_DICT_IGNORE_SUFFIX)))
write_tag(pb, tag->key, tag->value);
 
if (wctx->thumbnail.size) {
AVStream *st = s->streams[wctx->thumbnail.stream_index];
tag = av_dict_get(st->metadata, "title", NULL, 0);
write_metadata_header(pb, 2, "WM/Picture", attachment_value_size(&wctx->thumbnail, tag));
 
avio_put_str16le(pb, "image/jpeg");
avio_w8(pb, 0x10);
avio_put_str16le(pb, tag ? tag->value : "");
 
avio_wl32(pb, wctx->thumbnail.size);
avio_write(pb, wctx->thumbnail.data, wctx->thumbnail.size);
 
write_tag_int32(pb, "WM/MediaThumbType", 2);
}
}
 
static void write_table_redirector_legacy_attrib(AVFormatContext *s)
{
WtvContext *wctx = s->priv_data;
AVIOContext *pb = s->pb;
AVDictionaryEntry *tag = 0;
int64_t pos = 0;
 
//FIXME: translate special tags to binary representation
while ((tag = av_dict_get(s->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) {
avio_wl64(pb, pos);
pos += metadata_header_size(tag->key) + strlen(tag->value)*2 + 2;
}
 
if (wctx->thumbnail.size) {
AVStream *st = s->streams[wctx->thumbnail.stream_index];
avio_wl64(pb, pos);
pos += metadata_header_size("WM/Picture") + attachment_value_size(&wctx->thumbnail, av_dict_get(st->metadata, "title", NULL, 0));
 
avio_wl64(pb, pos);
pos += metadata_header_size("WM/MediaThumbType") + 4;
}
}
 
/**
* Pad the remainder of a file
* Write out fat table
* @return <0 on error
*/
static int finish_file(AVFormatContext *s, enum WtvFileIndex index, int64_t start_pos)
{
WtvContext *wctx = s->priv_data;
AVIOContext *pb = s->pb;
WtvFile *w = &wctx->file[index];
int64_t end_pos = avio_tell(pb);
int sector_bits, nb_sectors, pad;
 
av_assert0(index < WTV_FILES);
 
w->length = (end_pos - start_pos);
 
// determine optimal fat table depth, sector_bits, nb_sectors
if (w->length <= WTV_SECTOR_SIZE) {
w->depth = 0;
sector_bits = WTV_SECTOR_BITS;
} else if (w->length <= (WTV_SECTOR_SIZE / 4) * WTV_SECTOR_SIZE) {
w->depth = 1;
sector_bits = WTV_SECTOR_BITS;
} else if (w->length <= (WTV_SECTOR_SIZE / 4) * WTV_BIGSECTOR_SIZE) {
w->depth = 1;
sector_bits = WTV_BIGSECTOR_BITS;
} else if (w->length <= (int64_t)(WTV_SECTOR_SIZE / 4) * (WTV_SECTOR_SIZE / 4) * WTV_SECTOR_SIZE) {
w->depth = 2;
sector_bits = WTV_SECTOR_BITS;
} else if (w->length <= (int64_t)(WTV_SECTOR_SIZE / 4) * (WTV_SECTOR_SIZE / 4) * WTV_BIGSECTOR_SIZE) {
w->depth = 2;
sector_bits = WTV_BIGSECTOR_BITS;
} else {
av_log(s, AV_LOG_ERROR, "unsupported file allocation table depth (%"PRIi64" bytes)\n", w->length);
return -1;
}
 
// determine the nb_sectors
nb_sectors = (int)(w->length >> sector_bits);
 
// pad sector of timeline
pad = (1 << sector_bits) - (w->length % (1 << sector_bits));
if (pad) {
nb_sectors++;
write_pad(pb, pad);
}
 
//write fat table
if (w->depth > 0) {
w->first_sector = write_fat_sector(s, start_pos, nb_sectors, sector_bits, w->depth) >> WTV_SECTOR_BITS;
} else {
w->first_sector = start_pos >> WTV_SECTOR_BITS;
}
 
w->length |= 1ULL<<60;
if (sector_bits == WTV_SECTOR_BITS)
w->length |= 1ULL<<63;
 
return 0;
}
 
static int write_trailer(AVFormatContext *s)
{
WtvContext *wctx = s->priv_data;
AVIOContext *pb = s->pb;
int root_size;
int64_t sector_pos;
int64_t start_pos, file_end_pos;
 
if (finish_file(s, WTV_TIMELINE, wctx->timeline_start_pos) < 0)
return -1;
 
start_pos = avio_tell(pb);
write_table_entries_events(s);
if (finish_file(s, WTV_TIMELINE_TABLE_0_ENTRIES_EVENTS, start_pos) < 0)
return -1;
 
start_pos = avio_tell(pb);
write_table_entries_attrib(s);
if (finish_file(s, WTV_TABLE_0_ENTRIES_LEGACY_ATTRIB, start_pos) < 0)
return -1;
 
start_pos = avio_tell(pb);
write_table_redirector_legacy_attrib(s);
if (finish_file(s, WTV_TABLE_0_REDIRECTOR_LEGACY_ATTRIB, start_pos) < 0)
return -1;
 
start_pos = avio_tell(pb);
write_table_entries_time(s);
if (finish_file(s, WTV_TABLE_0_ENTRIES_TIME, start_pos) < 0)
return -1;
 
// write root table
sector_pos = avio_tell(pb);
root_size = write_root_table(s, sector_pos);
 
file_end_pos = avio_tell(pb);
// update root value
avio_seek(pb, 0x30, SEEK_SET);
avio_wl32(pb, root_size);
avio_seek(pb, 4, SEEK_CUR);
avio_wl32(pb, sector_pos >> WTV_SECTOR_BITS);
avio_seek(pb, 0x5c, SEEK_SET);
avio_wl32(pb, file_end_pos >> WTV_SECTOR_BITS);
 
avio_flush(pb);
 
av_free(wctx->sp_pairs);
av_free(wctx->st_pairs);
av_free_packet(&wctx->thumbnail);
return 0;
}
 
AVOutputFormat ff_wtv_muxer = {
.name = "wtv",
.long_name = NULL_IF_CONFIG_SMALL("Windows Television (WTV)"),
.extensions = "wtv",
.priv_data_size = sizeof(WtvContext),
.audio_codec = AV_CODEC_ID_AC3,
.video_codec = AV_CODEC_ID_MPEG2VIDEO,
.write_header = write_header,
.write_packet = write_packet,
.write_trailer = write_trailer,
.codec_tag = (const AVCodecTag* const []){ ff_codec_bmp_tags,
ff_codec_wav_tags, 0 },
};
/contrib/sdk/sources/ffmpeg/libavformat/wv.c
0,0 → 1,52
/*
* WavPack shared functions
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <stdint.h>
#include <string.h>
 
#include "libavutil/common.h"
#include "libavutil/intreadwrite.h"
 
#include "wv.h"
 
int ff_wv_parse_header(WvHeader *wv, const uint8_t *data)
{
memset(wv, 0, sizeof(*wv));
 
if (AV_RL32(data) != MKTAG('w', 'v', 'p', 'k'))
return AVERROR_INVALIDDATA;
 
wv->blocksize = AV_RL32(data + 4);
if (wv->blocksize < 24 || wv->blocksize > WV_BLOCK_LIMIT)
return AVERROR_INVALIDDATA;
wv->blocksize -= 24;
 
wv->version = AV_RL16(data + 8);
wv->total_samples = AV_RL32(data + 12);
wv->block_idx = AV_RL32(data + 16);
wv->samples = AV_RL32(data + 20);
wv->flags = AV_RL32(data + 24);
wv->crc = AV_RL32(data + 28);
 
wv->initial = !!(wv->flags & WV_FLAG_INITIAL_BLOCK);
wv->final = !!(wv->flags & WV_FLAG_FINAL_BLOCK);
 
return 0;
}
/contrib/sdk/sources/ffmpeg/libavformat/wv.h
0,0 → 1,56
/*
* WavPack shared functions
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVFORMAT_WV_H
#define AVFORMAT_WV_H
 
#include <stdint.h>
 
#define WV_HEADER_SIZE 32
 
#define WV_FLAG_INITIAL_BLOCK (1 << 11)
#define WV_FLAG_FINAL_BLOCK (1 << 12)
 
// specs say that maximum block size is 1Mb
#define WV_BLOCK_LIMIT 1048576
 
typedef struct WvHeader {
uint32_t blocksize; //< size of the block data (excluding the header)
uint16_t version; //< bitstream version
uint32_t total_samples; //< total number of samples in the stream
uint32_t block_idx; //< index of the first sample in this block
uint32_t samples; //< number of samples in this block
uint32_t flags;
uint32_t crc;
 
int initial, final;
} WvHeader;
 
/**
* Parse a WavPack block header.
*
* @param wv this struct will be filled with parse header information
* @param data header data, must be WV_HEADER_SIZE bytes long
*
* @return 0 on success, a negative AVERROR code on failure
*/
int ff_wv_parse_header(WvHeader *wv, const uint8_t *data);
 
#endif /* AVFORMAT_WV_H */
/contrib/sdk/sources/ffmpeg/libavformat/wvdec.c
0,0 → 1,354
/*
* WavPack demuxer
* Copyright (c) 2006,2011 Konstantin Shishkov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/dict.h"
#include "avformat.h"
#include "internal.h"
#include "apetag.h"
#include "id3v1.h"
#include "wv.h"
 
enum WV_FLAGS {
WV_MONO = 0x0004,
WV_HYBRID = 0x0008,
WV_JOINT = 0x0010,
WV_CROSSD = 0x0020,
WV_HSHAPE = 0x0040,
WV_FLOAT = 0x0080,
WV_INT32 = 0x0100,
WV_HBR = 0x0200,
WV_HBAL = 0x0400,
WV_MCINIT = 0x0800,
WV_MCEND = 0x1000,
};
 
static const int wv_rates[16] = {
6000, 8000, 9600, 11025, 12000, 16000, 22050, 24000,
32000, 44100, 48000, 64000, 88200, 96000, 192000, -1
};
 
typedef struct {
uint8_t block_header[WV_HEADER_SIZE];
WvHeader header;
int rate, chan, bpp;
uint32_t chmask;
int multichannel;
int block_parsed;
int64_t pos;
 
int64_t apetag_start;
} WVContext;
 
static int wv_probe(AVProbeData *p)
{
/* check file header */
if (p->buf_size <= 32)
return 0;
if (AV_RL32(&p->buf[0]) == MKTAG('w', 'v', 'p', 'k') &&
AV_RL32(&p->buf[4]) >= 24 &&
AV_RL32(&p->buf[4]) <= WV_BLOCK_LIMIT &&
AV_RL16(&p->buf[8]) >= 0x402 &&
AV_RL16(&p->buf[8]) <= 0x410)
return AVPROBE_SCORE_MAX;
else
return 0;
}
 
static int wv_read_block_header(AVFormatContext *ctx, AVIOContext *pb)
{
WVContext *wc = ctx->priv_data;
int ret;
int rate, bpp, chan;
uint32_t chmask, flags;
 
wc->pos = avio_tell(pb);
 
/* don't return bogus packets with the ape tag data */
if (wc->apetag_start && wc->pos >= wc->apetag_start)
return AVERROR_EOF;
 
ret = avio_read(pb, wc->block_header, WV_HEADER_SIZE);
if (ret != WV_HEADER_SIZE)
return (ret < 0) ? ret : AVERROR_EOF;
 
ret = ff_wv_parse_header(&wc->header, wc->block_header);
if (ret < 0) {
av_log(ctx, AV_LOG_ERROR, "Invalid block header.\n");
return ret;
}
 
if (wc->header.version < 0x402 || wc->header.version > 0x410) {
av_log(ctx, AV_LOG_ERROR, "Unsupported version %03X\n", wc->header.version);
return AVERROR_PATCHWELCOME;
}
 
/* Blocks with zero samples don't contain actual audio information
* and should be ignored */
if (!wc->header.samples)
return 0;
// parse flags
flags = wc->header.flags;
bpp = ((flags & 3) + 1) << 3;
chan = 1 + !(flags & WV_MONO);
chmask = flags & WV_MONO ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO;
rate = wv_rates[(flags >> 23) & 0xF];
wc->multichannel = !(wc->header.initial && wc->header.final);
if (wc->multichannel) {
chan = wc->chan;
chmask = wc->chmask;
}
if ((rate == -1 || !chan) && !wc->block_parsed) {
int64_t block_end = avio_tell(pb) + wc->header.blocksize;
if (!pb->seekable) {
av_log(ctx, AV_LOG_ERROR,
"Cannot determine additional parameters\n");
return AVERROR_INVALIDDATA;
}
while (avio_tell(pb) < block_end) {
int id, size;
id = avio_r8(pb);
size = (id & 0x80) ? avio_rl24(pb) : avio_r8(pb);
size <<= 1;
if (id & 0x40)
size--;
switch (id & 0x3F) {
case 0xD:
if (size <= 1) {
av_log(ctx, AV_LOG_ERROR,
"Insufficient channel information\n");
return AVERROR_INVALIDDATA;
}
chan = avio_r8(pb);
switch (size - 2) {
case 0:
chmask = avio_r8(pb);
break;
case 1:
chmask = avio_rl16(pb);
break;
case 2:
chmask = avio_rl24(pb);
break;
case 3:
chmask = avio_rl32(pb);
break;
case 5:
avio_skip(pb, 1);
chan |= (avio_r8(pb) & 0xF) << 8;
chmask = avio_rl24(pb);
break;
default:
av_log(ctx, AV_LOG_ERROR,
"Invalid channel info size %d\n", size);
return AVERROR_INVALIDDATA;
}
break;
case 0x27:
rate = avio_rl24(pb);
break;
default:
avio_skip(pb, size);
}
if (id & 0x40)
avio_skip(pb, 1);
}
if (rate == -1) {
av_log(ctx, AV_LOG_ERROR,
"Cannot determine custom sampling rate\n");
return AVERROR_INVALIDDATA;
}
avio_seek(pb, block_end - wc->header.blocksize, SEEK_SET);
}
if (!wc->bpp)
wc->bpp = bpp;
if (!wc->chan)
wc->chan = chan;
if (!wc->chmask)
wc->chmask = chmask;
if (!wc->rate)
wc->rate = rate;
 
if (flags && bpp != wc->bpp) {
av_log(ctx, AV_LOG_ERROR,
"Bits per sample differ, this block: %i, header block: %i\n",
bpp, wc->bpp);
return AVERROR_INVALIDDATA;
}
if (flags && !wc->multichannel && chan != wc->chan) {
av_log(ctx, AV_LOG_ERROR,
"Channels differ, this block: %i, header block: %i\n",
chan, wc->chan);
return AVERROR_INVALIDDATA;
}
if (flags && rate != -1 && rate != wc->rate) {
av_log(ctx, AV_LOG_ERROR,
"Sampling rate differ, this block: %i, header block: %i\n",
rate, wc->rate);
return AVERROR_INVALIDDATA;
}
return 0;
}
 
static int wv_read_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
WVContext *wc = s->priv_data;
AVStream *st;
int ret;
 
wc->block_parsed = 0;
for (;;) {
if ((ret = wv_read_block_header(s, pb)) < 0)
return ret;
if (!wc->header.samples)
avio_skip(pb, wc->header.blocksize);
else
break;
}
 
/* now we are ready: build format streams */
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_WAVPACK;
st->codec->channels = wc->chan;
st->codec->channel_layout = wc->chmask;
st->codec->sample_rate = wc->rate;
st->codec->bits_per_coded_sample = wc->bpp;
avpriv_set_pts_info(st, 64, 1, wc->rate);
st->start_time = 0;
if (wc->header.total_samples != 0xFFFFFFFFu)
st->duration = wc->header.total_samples;
 
if (s->pb->seekable) {
int64_t cur = avio_tell(s->pb);
wc->apetag_start = ff_ape_parse_tag(s);
if (!av_dict_get(s->metadata, "", NULL, AV_DICT_IGNORE_SUFFIX))
ff_id3v1_read(s);
avio_seek(s->pb, cur, SEEK_SET);
}
 
return 0;
}
 
static int wv_read_packet(AVFormatContext *s, AVPacket *pkt)
{
WVContext *wc = s->priv_data;
int ret;
int off;
int64_t pos;
uint32_t block_samples;
 
if (url_feof(s->pb))
return AVERROR_EOF;
if (wc->block_parsed) {
if ((ret = wv_read_block_header(s, s->pb)) < 0)
return ret;
}
 
pos = wc->pos;
if (av_new_packet(pkt, wc->header.blocksize + WV_HEADER_SIZE) < 0)
return AVERROR(ENOMEM);
memcpy(pkt->data, wc->block_header, WV_HEADER_SIZE);
ret = avio_read(s->pb, pkt->data + WV_HEADER_SIZE, wc->header.blocksize);
if (ret != wc->header.blocksize) {
av_free_packet(pkt);
return AVERROR(EIO);
}
while (!(wc->header.flags & WV_FLAG_FINAL_BLOCK)) {
if ((ret = wv_read_block_header(s, s->pb)) < 0) {
av_free_packet(pkt);
return ret;
}
 
off = pkt->size;
if ((ret = av_grow_packet(pkt, WV_HEADER_SIZE + wc->header.blocksize)) < 0) {
av_free_packet(pkt);
return ret;
}
memcpy(pkt->data + off, wc->block_header, WV_HEADER_SIZE);
 
ret = avio_read(s->pb, pkt->data + off + WV_HEADER_SIZE, wc->header.blocksize);
if (ret != wc->header.blocksize) {
av_free_packet(pkt);
return (ret < 0) ? ret : AVERROR_EOF;
}
}
pkt->stream_index = 0;
wc->block_parsed = 1;
pkt->pts = wc->header.block_idx;
block_samples = wc->header.samples;
if (block_samples > INT32_MAX)
av_log(s, AV_LOG_WARNING,
"Too many samples in block: %"PRIu32"\n", block_samples);
else
pkt->duration = block_samples;
 
av_add_index_entry(s->streams[0], pos, pkt->pts, 0, 0, AVINDEX_KEYFRAME);
return 0;
}
 
static int wv_read_seek(AVFormatContext *s, int stream_index,
int64_t timestamp, int flags)
{
AVStream *st = s->streams[stream_index];
WVContext *wc = s->priv_data;
AVPacket pkt1, *pkt = &pkt1;
int ret;
int index = av_index_search_timestamp(st, timestamp, flags);
int64_t pos, pts;
 
/* if found, seek there */
if (index >= 0 &&
timestamp <= st->index_entries[st->nb_index_entries - 1].timestamp) {
wc->block_parsed = 1;
avio_seek(s->pb, st->index_entries[index].pos, SEEK_SET);
return 0;
}
/* if timestamp is out of bounds, return error */
if (timestamp < 0 || timestamp >= s->duration)
return AVERROR(EINVAL);
 
pos = avio_tell(s->pb);
do {
ret = av_read_frame(s, pkt);
if (ret < 0) {
avio_seek(s->pb, pos, SEEK_SET);
return ret;
}
pts = pkt->pts;
av_free_packet(pkt);
} while(pts < timestamp);
return 0;
}
 
AVInputFormat ff_wv_demuxer = {
.name = "wv",
.long_name = NULL_IF_CONFIG_SMALL("WavPack"),
.priv_data_size = sizeof(WVContext),
.read_probe = wv_probe,
.read_header = wv_read_header,
.read_packet = wv_read_packet,
.read_seek = wv_read_seek,
};
/contrib/sdk/sources/ffmpeg/libavformat/wvenc.c
0,0 → 1,91
/*
* WavPack muxer
* Copyright (c) 2013 Konstantin Shishkov <kostya.shishkov@gmail.com>
* Copyright (c) 2012 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/attributes.h"
 
#include "apetag.h"
#include "avformat.h"
#include "wv.h"
 
typedef struct WvMuxContext {
int64_t samples;
} WvMuxContext;
 
static av_cold int wv_write_header(AVFormatContext *ctx)
{
if (ctx->nb_streams > 1 ||
ctx->streams[0]->codec->codec_id != AV_CODEC_ID_WAVPACK) {
av_log(ctx, AV_LOG_ERROR, "This muxer only supports a single WavPack stream.\n");
return AVERROR(EINVAL);
}
 
return 0;
}
 
static int wv_write_packet(AVFormatContext *ctx, AVPacket *pkt)
{
WvMuxContext *s = ctx->priv_data;
WvHeader header;
int ret;
 
if (pkt->size < WV_HEADER_SIZE ||
(ret = ff_wv_parse_header(&header, pkt->data)) < 0) {
av_log(ctx, AV_LOG_ERROR, "Invalid WavPack packet.\n");
return AVERROR(EINVAL);
}
s->samples += header.samples;
 
avio_write(ctx->pb, pkt->data, pkt->size);
 
return 0;
}
 
static av_cold int wv_write_trailer(AVFormatContext *ctx)
{
WvMuxContext *s = ctx->priv_data;
 
/* update total number of samples in the first block */
if (ctx->pb->seekable && s->samples &&
s->samples < UINT32_MAX) {
int64_t pos = avio_tell(ctx->pb);
avio_seek(ctx->pb, 12, SEEK_SET);
avio_wl32(ctx->pb, s->samples);
avio_seek(ctx->pb, pos, SEEK_SET);
}
 
ff_ape_write_tag(ctx);
return 0;
}
 
AVOutputFormat ff_wv_muxer = {
.name = "wv",
.long_name = NULL_IF_CONFIG_SMALL("raw WavPack"),
.mime_type = "audio/x-wavpack",
.extensions = "wv",
.priv_data_size = sizeof(WvMuxContext),
.audio_codec = AV_CODEC_ID_WAVPACK,
.video_codec = AV_CODEC_ID_NONE,
.write_header = wv_write_header,
.write_packet = wv_write_packet,
.write_trailer = wv_write_trailer,
.flags = AVFMT_NOTIMESTAMPS,
};
/contrib/sdk/sources/ffmpeg/libavformat/xa.c
0,0 → 1,131
/*
* Maxis XA (.xa) File Demuxer
* Copyright (c) 2008 Robert Marston
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Maxis XA File Demuxer
* by Robert Marston (rmarston@gmail.com)
* for more information on the XA audio format see
* http://wiki.multimedia.cx/index.php?title=Maxis_XA
*/
 
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
 
#define XA00_TAG MKTAG('X', 'A', 0, 0)
#define XAI0_TAG MKTAG('X', 'A', 'I', 0)
#define XAJ0_TAG MKTAG('X', 'A', 'J', 0)
 
typedef struct MaxisXADemuxContext {
uint32_t out_size;
uint32_t sent_bytes;
} MaxisXADemuxContext;
 
static int xa_probe(AVProbeData *p)
{
int channels, srate, bits_per_sample;
if (p->buf_size < 24)
return 0;
switch(AV_RL32(p->buf)) {
case XA00_TAG:
case XAI0_TAG:
case XAJ0_TAG:
break;
default:
return 0;
}
channels = AV_RL16(p->buf + 10);
srate = AV_RL32(p->buf + 12);
bits_per_sample = AV_RL16(p->buf + 22);
if (!channels || channels > 8 || !srate || srate > 192000 ||
bits_per_sample < 4 || bits_per_sample > 32)
return 0;
return AVPROBE_SCORE_EXTENSION;
}
 
static int xa_read_header(AVFormatContext *s)
{
MaxisXADemuxContext *xa = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *st;
 
/*Set up the XA Audio Decoder*/
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_ADPCM_EA_MAXIS_XA;
avio_skip(pb, 4); /* Skip the XA ID */
xa->out_size = avio_rl32(pb);
avio_skip(pb, 2); /* Skip the tag */
st->codec->channels = avio_rl16(pb);
st->codec->sample_rate = avio_rl32(pb);
avio_skip(pb, 4); /* Skip average byte rate */
avio_skip(pb, 2); /* Skip block align */
avio_skip(pb, 2); /* Skip bits-per-sample */
 
if (!st->codec->channels || !st->codec->sample_rate)
return AVERROR_INVALIDDATA;
 
st->codec->bit_rate = av_clip(15LL * st->codec->channels * 8 *
st->codec->sample_rate / 28, 0, INT_MAX);
 
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
st->start_time = 0;
 
return 0;
}
 
static int xa_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
MaxisXADemuxContext *xa = s->priv_data;
AVStream *st = s->streams[0];
AVIOContext *pb = s->pb;
unsigned int packet_size;
int ret;
 
if (xa->sent_bytes >= xa->out_size)
return AVERROR_EOF;
/* 1 byte header and 14 bytes worth of samples * number channels per block */
packet_size = 15*st->codec->channels;
 
ret = av_get_packet(pb, pkt, packet_size);
if(ret < 0)
return ret;
 
pkt->stream_index = st->index;
xa->sent_bytes += packet_size;
pkt->duration = 28;
 
return ret;
}
 
AVInputFormat ff_xa_demuxer = {
.name = "xa",
.long_name = NULL_IF_CONFIG_SMALL("Maxis XA"),
.priv_data_size = sizeof(MaxisXADemuxContext),
.read_probe = xa_probe,
.read_header = xa_read_header,
.read_packet = xa_read_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/xmv.c
0,0 → 1,578
/*
* Microsoft XMV demuxer
* Copyright (c) 2011 Sven Hesse <drmccoy@drmccoy.de>
* Copyright (c) 2011 Matthew Hoops <clone2727@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Microsoft XMV demuxer
*/
 
#include <stdint.h>
 
#include "libavutil/intreadwrite.h"
 
#include "avformat.h"
#include "internal.h"
#include "riff.h"
#include "libavutil/avassert.h"
 
/** The min size of an XMV header. */
#define XMV_MIN_HEADER_SIZE 36
 
/** Audio flag: ADPCM'd 5.1 stream, front left / right channels */
#define XMV_AUDIO_ADPCM51_FRONTLEFTRIGHT 1
/** Audio flag: ADPCM'd 5.1 stream, front center / low frequency channels */
#define XMV_AUDIO_ADPCM51_FRONTCENTERLOW 2
/** Audio flag: ADPCM'd 5.1 stream, rear left / right channels */
#define XMV_AUDIO_ADPCM51_REARLEFTRIGHT 4
 
/** Audio flag: Any of the ADPCM'd 5.1 stream flags. */
#define XMV_AUDIO_ADPCM51 (XMV_AUDIO_ADPCM51_FRONTLEFTRIGHT | \
XMV_AUDIO_ADPCM51_FRONTCENTERLOW | \
XMV_AUDIO_ADPCM51_REARLEFTRIGHT)
 
#define XMV_BLOCK_ALIGN_SIZE 36
 
/** A video packet with an XMV file. */
typedef struct XMVVideoPacket {
int stream_index; ///< The decoder stream index for this video packet.
 
uint32_t data_size; ///< The size of the remaining video data.
uint64_t data_offset; ///< The offset of the video data within the file.
 
uint32_t current_frame; ///< The current frame within this video packet.
uint32_t frame_count; ///< The amount of frames within this video packet.
 
int has_extradata; ///< Does the video packet contain extra data?
uint8_t extradata[4]; ///< The extra data
 
int64_t last_pts; ///< PTS of the last video frame.
int64_t pts; ///< PTS of the most current video frame.
} XMVVideoPacket;
 
/** An audio packet with an XMV file. */
typedef struct XMVAudioPacket {
int stream_index; ///< The decoder stream index for this audio packet.
 
/* Stream format properties. */
uint16_t compression; ///< The type of compression.
uint16_t channels; ///< Number of channels.
uint32_t sample_rate; ///< Sampling rate.
uint16_t bits_per_sample; ///< Bits per compressed sample.
uint32_t bit_rate; ///< Bits of compressed data per second.
uint16_t flags; ///< Flags
unsigned block_align; ///< Bytes per compressed block.
uint16_t block_samples; ///< Decompressed samples per compressed block.
 
enum AVCodecID codec_id; ///< The codec ID of the compression scheme.
 
uint32_t data_size; ///< The size of the remaining audio data.
uint64_t data_offset; ///< The offset of the audio data within the file.
 
uint32_t frame_size; ///< Number of bytes to put into an audio frame.
 
uint64_t block_count; ///< Running counter of decompressed audio block.
} XMVAudioPacket;
 
/** Context for demuxing an XMV file. */
typedef struct XMVDemuxContext {
uint16_t audio_track_count; ///< Number of audio track in this file.
 
uint32_t this_packet_size; ///< Size of the current packet.
uint32_t next_packet_size; ///< Size of the next packet.
 
uint64_t this_packet_offset; ///< Offset of the current packet.
uint64_t next_packet_offset; ///< Offset of the next packet.
 
uint16_t current_stream; ///< The index of the stream currently handling.
uint16_t stream_count; ///< The number of streams in this file.
 
XMVVideoPacket video; ///< The video packet contained in each packet.
XMVAudioPacket *audio; ///< The audio packets contained in each packet.
} XMVDemuxContext;
 
static int xmv_probe(AVProbeData *p)
{
uint32_t file_version;
 
if (p->buf_size < XMV_MIN_HEADER_SIZE)
return 0;
 
file_version = AV_RL32(p->buf + 16);
if ((file_version == 0) || (file_version > 4))
return 0;
 
if (!memcmp(p->buf + 12, "xobX", 4))
return AVPROBE_SCORE_MAX;
 
return 0;
}
 
static int xmv_read_close(AVFormatContext *s)
{
XMVDemuxContext *xmv = s->priv_data;
 
av_freep(&xmv->audio);
 
return 0;
}
 
static int xmv_read_header(AVFormatContext *s)
{
XMVDemuxContext *xmv = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *vst = NULL;
 
uint32_t file_version;
uint32_t this_packet_size;
uint16_t audio_track;
int ret;
 
avio_skip(pb, 4); /* Next packet size */
 
this_packet_size = avio_rl32(pb);
 
avio_skip(pb, 4); /* Max packet size */
avio_skip(pb, 4); /* "xobX" */
 
file_version = avio_rl32(pb);
if ((file_version != 4) && (file_version != 2))
avpriv_request_sample(s, "Uncommon version %d", file_version);
 
 
/* Video track */
 
vst = avformat_new_stream(s, NULL);
if (!vst)
return AVERROR(ENOMEM);
 
avpriv_set_pts_info(vst, 32, 1, 1000);
 
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codec->codec_id = AV_CODEC_ID_WMV2;
vst->codec->codec_tag = MKBETAG('W', 'M', 'V', '2');
vst->codec->width = avio_rl32(pb);
vst->codec->height = avio_rl32(pb);
 
vst->duration = avio_rl32(pb);
 
xmv->video.stream_index = vst->index;
 
/* Audio tracks */
 
xmv->audio_track_count = avio_rl16(pb);
 
avio_skip(pb, 2); /* Unknown (padding?) */
 
xmv->audio = av_malloc(xmv->audio_track_count * sizeof(XMVAudioPacket));
if (!xmv->audio) {
ret = AVERROR(ENOMEM);
goto fail;
}
 
for (audio_track = 0; audio_track < xmv->audio_track_count; audio_track++) {
XMVAudioPacket *packet = &xmv->audio[audio_track];
AVStream *ast = NULL;
 
packet->compression = avio_rl16(pb);
packet->channels = avio_rl16(pb);
packet->sample_rate = avio_rl32(pb);
packet->bits_per_sample = avio_rl16(pb);
packet->flags = avio_rl16(pb);
 
packet->bit_rate = packet->bits_per_sample *
packet->sample_rate *
packet->channels;
packet->block_align = XMV_BLOCK_ALIGN_SIZE * packet->channels;
packet->block_samples = 64;
packet->codec_id = ff_wav_codec_get_id(packet->compression,
packet->bits_per_sample);
 
packet->stream_index = -1;
 
packet->frame_size = 0;
packet->block_count = 0;
 
/* TODO: ADPCM'd 5.1 sound is encoded in three separate streams.
* Those need to be interleaved to a proper 5.1 stream. */
if (packet->flags & XMV_AUDIO_ADPCM51)
av_log(s, AV_LOG_WARNING, "Unsupported 5.1 ADPCM audio stream "
"(0x%04X)\n", packet->flags);
 
if (!packet->channels || !packet->sample_rate ||
packet->channels >= UINT16_MAX / XMV_BLOCK_ALIGN_SIZE) {
av_log(s, AV_LOG_ERROR, "Invalid parameters for audio track %d.\n",
audio_track);
ret = AVERROR_INVALIDDATA;
goto fail;
}
 
ast = avformat_new_stream(s, NULL);
if (!ast) {
ret = AVERROR(ENOMEM);
goto fail;
}
 
ast->codec->codec_type = AVMEDIA_TYPE_AUDIO;
ast->codec->codec_id = packet->codec_id;
ast->codec->codec_tag = packet->compression;
ast->codec->channels = packet->channels;
ast->codec->sample_rate = packet->sample_rate;
ast->codec->bits_per_coded_sample = packet->bits_per_sample;
ast->codec->bit_rate = packet->bit_rate;
ast->codec->block_align = 36 * packet->channels;
 
avpriv_set_pts_info(ast, 32, packet->block_samples, packet->sample_rate);
 
packet->stream_index = ast->index;
 
ast->duration = vst->duration;
}
 
 
/* Initialize the packet context */
 
xmv->next_packet_offset = avio_tell(pb);
xmv->next_packet_size = this_packet_size - xmv->next_packet_offset;
xmv->stream_count = xmv->audio_track_count + 1;
 
return 0;
 
fail:
xmv_read_close(s);
return ret;
}
 
static void xmv_read_extradata(uint8_t *extradata, AVIOContext *pb)
{
/* Read the XMV extradata */
 
uint32_t data = avio_rl32(pb);
 
int mspel_bit = !!(data & 0x01);
int loop_filter = !!(data & 0x02);
int abt_flag = !!(data & 0x04);
int j_type_bit = !!(data & 0x08);
int top_left_mv_flag = !!(data & 0x10);
int per_mb_rl_bit = !!(data & 0x20);
int slice_count = (data >> 6) & 7;
 
/* Write it back as standard WMV2 extradata */
 
data = 0;
 
data |= mspel_bit << 15;
data |= loop_filter << 14;
data |= abt_flag << 13;
data |= j_type_bit << 12;
data |= top_left_mv_flag << 11;
data |= per_mb_rl_bit << 10;
data |= slice_count << 7;
 
AV_WB32(extradata, data);
}
 
static int xmv_process_packet_header(AVFormatContext *s)
{
XMVDemuxContext *xmv = s->priv_data;
AVIOContext *pb = s->pb;
 
uint8_t data[8];
uint16_t audio_track;
uint64_t data_offset;
 
/* Next packet size */
xmv->next_packet_size = avio_rl32(pb);
 
/* Packet video header */
 
if (avio_read(pb, data, 8) != 8)
return AVERROR(EIO);
 
xmv->video.data_size = AV_RL32(data) & 0x007FFFFF;
 
xmv->video.current_frame = 0;
xmv->video.frame_count = (AV_RL32(data) >> 23) & 0xFF;
 
xmv->video.has_extradata = (data[3] & 0x80) != 0;
 
/* Adding the audio data sizes and the video data size keeps you 4 bytes
* short for every audio track. But as playing around with XMV files with
* ADPCM audio showed, taking the extra 4 bytes from the audio data gives
* you either completely distorted audio or click (when skipping the
* remaining 68 bytes of the ADPCM block). Subtracting 4 bytes for every
* audio track from the video data works at least for the audio. Probably
* some alignment thing?
* The video data has (always?) lots of padding, so it should work out...
*/
xmv->video.data_size -= xmv->audio_track_count * 4;
 
xmv->current_stream = 0;
if (!xmv->video.frame_count) {
xmv->video.frame_count = 1;
xmv->current_stream = xmv->stream_count > 1;
}
 
/* Packet audio header */
 
for (audio_track = 0; audio_track < xmv->audio_track_count; audio_track++) {
XMVAudioPacket *packet = &xmv->audio[audio_track];
 
if (avio_read(pb, data, 4) != 4)
return AVERROR(EIO);
 
packet->data_size = AV_RL32(data) & 0x007FFFFF;
if ((packet->data_size == 0) && (audio_track != 0))
/* This happens when I create an XMV with several identical audio
* streams. From the size calculations, duplicating the previous
* stream's size works out, but the track data itself is silent.
* Maybe this should also redirect the offset to the previous track?
*/
packet->data_size = xmv->audio[audio_track - 1].data_size;
 
/* Carve up the audio data in frame_count slices */
packet->frame_size = packet->data_size / xmv->video.frame_count;
packet->frame_size -= packet->frame_size % packet->block_align;
}
 
/* Packet data offsets */
 
data_offset = avio_tell(pb);
 
xmv->video.data_offset = data_offset;
data_offset += xmv->video.data_size;
 
for (audio_track = 0; audio_track < xmv->audio_track_count; audio_track++) {
xmv->audio[audio_track].data_offset = data_offset;
data_offset += xmv->audio[audio_track].data_size;
}
 
/* Video frames header */
 
/* Read new video extra data */
if (xmv->video.data_size > 0) {
if (xmv->video.has_extradata) {
xmv_read_extradata(xmv->video.extradata, pb);
 
xmv->video.data_size -= 4;
xmv->video.data_offset += 4;
 
if (xmv->video.stream_index >= 0) {
AVStream *vst = s->streams[xmv->video.stream_index];
 
av_assert0(xmv->video.stream_index < s->nb_streams);
 
if (vst->codec->extradata_size < 4) {
av_free(vst->codec->extradata);
 
ff_alloc_extradata(vst->codec, 4);
}
 
memcpy(vst->codec->extradata, xmv->video.extradata, 4);
}
}
}
 
return 0;
}
 
static int xmv_fetch_new_packet(AVFormatContext *s)
{
XMVDemuxContext *xmv = s->priv_data;
AVIOContext *pb = s->pb;
int result;
 
if (xmv->this_packet_offset == xmv->next_packet_offset)
return AVERROR_EOF;
 
/* Seek to it */
xmv->this_packet_offset = xmv->next_packet_offset;
if (avio_seek(pb, xmv->this_packet_offset, SEEK_SET) != xmv->this_packet_offset)
return AVERROR(EIO);
 
/* Update the size */
xmv->this_packet_size = xmv->next_packet_size;
if (xmv->this_packet_size < (12 + xmv->audio_track_count * 4))
return AVERROR(EIO);
 
/* Process the header */
result = xmv_process_packet_header(s);
if (result)
return result;
 
/* Update the offset */
xmv->next_packet_offset = xmv->this_packet_offset + xmv->this_packet_size;
 
return 0;
}
 
static int xmv_fetch_audio_packet(AVFormatContext *s,
AVPacket *pkt, uint32_t stream)
{
XMVDemuxContext *xmv = s->priv_data;
AVIOContext *pb = s->pb;
XMVAudioPacket *audio = &xmv->audio[stream];
 
uint32_t data_size;
uint32_t block_count;
int result;
 
/* Seek to it */
if (avio_seek(pb, audio->data_offset, SEEK_SET) != audio->data_offset)
return AVERROR(EIO);
 
if ((xmv->video.current_frame + 1) < xmv->video.frame_count)
/* Not the last frame, get at most frame_size bytes. */
data_size = FFMIN(audio->frame_size, audio->data_size);
else
/* Last frame, get the rest. */
data_size = audio->data_size;
 
/* Read the packet */
result = av_get_packet(pb, pkt, data_size);
if (result <= 0)
return result;
 
pkt->stream_index = audio->stream_index;
 
/* Calculate the PTS */
 
block_count = data_size / audio->block_align;
 
pkt->duration = block_count;
pkt->pts = audio->block_count;
pkt->dts = AV_NOPTS_VALUE;
 
audio->block_count += block_count;
 
/* Advance offset */
audio->data_size -= data_size;
audio->data_offset += data_size;
 
return 0;
}
 
static int xmv_fetch_video_packet(AVFormatContext *s,
AVPacket *pkt)
{
XMVDemuxContext *xmv = s->priv_data;
AVIOContext *pb = s->pb;
XMVVideoPacket *video = &xmv->video;
 
int result;
uint32_t frame_header;
uint32_t frame_size, frame_timestamp;
uint8_t *data, *end;
 
/* Seek to it */
if (avio_seek(pb, video->data_offset, SEEK_SET) != video->data_offset)
return AVERROR(EIO);
 
/* Read the frame header */
frame_header = avio_rl32(pb);
 
frame_size = (frame_header & 0x1FFFF) * 4 + 4;
frame_timestamp = (frame_header >> 17);
 
if ((frame_size + 4) > video->data_size)
return AVERROR(EIO);
 
/* Get the packet data */
result = av_get_packet(pb, pkt, frame_size);
if (result != frame_size)
return result;
 
/* Contrary to normal WMV2 video, the bit stream in XMV's
* WMV2 is little-endian.
* TODO: This manual swap is of course suboptimal.
*/
for (data = pkt->data, end = pkt->data + frame_size; data < end; data += 4)
AV_WB32(data, AV_RL32(data));
 
pkt->stream_index = video->stream_index;
 
/* Calculate the PTS */
 
video->last_pts = frame_timestamp + video->pts;
 
pkt->duration = 0;
pkt->pts = video->last_pts;
pkt->dts = AV_NOPTS_VALUE;
 
video->pts += frame_timestamp;
 
/* Keyframe? */
pkt->flags = (pkt->data[0] & 0x80) ? 0 : AV_PKT_FLAG_KEY;
 
/* Advance offset */
video->data_size -= frame_size + 4;
video->data_offset += frame_size + 4;
 
return 0;
}
 
static int xmv_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
XMVDemuxContext *xmv = s->priv_data;
int result;
 
if (xmv->video.current_frame == xmv->video.frame_count) {
/* No frames left in this packet, so we fetch a new one */
 
result = xmv_fetch_new_packet(s);
if (result)
return result;
}
 
if (xmv->current_stream == 0) {
/* Fetch a video frame */
 
result = xmv_fetch_video_packet(s, pkt);
if (result)
return result;
 
} else {
/* Fetch an audio frame */
 
result = xmv_fetch_audio_packet(s, pkt, xmv->current_stream - 1);
if (result)
return result;
}
 
/* Increase our counters */
if (++xmv->current_stream >= xmv->stream_count) {
xmv->current_stream = 0;
xmv->video.current_frame += 1;
}
 
return 0;
}
 
AVInputFormat ff_xmv_demuxer = {
.name = "xmv",
.long_name = NULL_IF_CONFIG_SMALL("Microsoft XMV"),
.priv_data_size = sizeof(XMVDemuxContext),
.read_probe = xmv_probe,
.read_header = xmv_read_header,
.read_packet = xmv_read_packet,
.read_close = xmv_read_close,
};
/contrib/sdk/sources/ffmpeg/libavformat/xwma.c
0,0 → 1,279
/*
* xWMA demuxer
* Copyright (c) 2011 Max Horn
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <inttypes.h>
 
#include "avformat.h"
#include "internal.h"
#include "riff.h"
 
/*
* Demuxer for xWMA, a Microsoft audio container used by XAudio 2.
*/
 
typedef struct {
int64_t data_end;
} XWMAContext;
 
static int xwma_probe(AVProbeData *p)
{
if (!memcmp(p->buf, "RIFF", 4) && !memcmp(p->buf + 8, "XWMA", 4))
return AVPROBE_SCORE_MAX;
return 0;
}
 
static int xwma_read_header(AVFormatContext *s)
{
int64_t size;
int ret;
uint32_t dpds_table_size = 0;
uint32_t *dpds_table = 0;
unsigned int tag;
AVIOContext *pb = s->pb;
AVStream *st;
XWMAContext *xwma = s->priv_data;
int i;
 
/* The following code is mostly copied from wav.c, with some
* minor alterations.
*/
 
/* check RIFF header */
tag = avio_rl32(pb);
if (tag != MKTAG('R', 'I', 'F', 'F'))
return -1;
avio_rl32(pb); /* file size */
tag = avio_rl32(pb);
if (tag != MKTAG('X', 'W', 'M', 'A'))
return -1;
 
/* parse fmt header */
tag = avio_rl32(pb);
if (tag != MKTAG('f', 'm', 't', ' '))
return -1;
size = avio_rl32(pb);
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
 
ret = ff_get_wav_header(pb, st->codec, size);
if (ret < 0)
return ret;
st->need_parsing = AVSTREAM_PARSE_NONE;
 
/* All xWMA files I have seen contained WMAv2 data. If there are files
* using WMA Pro or some other codec, then we need to figure out the right
* extradata for that. Thus, ask the user for feedback, but try to go on
* anyway.
*/
if (st->codec->codec_id != AV_CODEC_ID_WMAV2) {
avpriv_request_sample(s, "Unexpected codec (tag 0x04%x; id %d)",
st->codec->codec_tag, st->codec->codec_id);
} else {
/* In all xWMA files I have seen, there is no extradata. But the WMA
* codecs require extradata, so we provide our own fake extradata.
*
* First, check that there really was no extradata in the header. If
* there was, then try to use it, after asking the user to provide a
* sample of this unusual file.
*/
if (st->codec->extradata_size != 0) {
/* Surprise, surprise: We *did* get some extradata. No idea
* if it will work, but just go on and try it, after asking
* the user for a sample.
*/
avpriv_request_sample(s, "Unexpected extradata (%d bytes)",
st->codec->extradata_size);
} else {
st->codec->extradata_size = 6;
st->codec->extradata = av_mallocz(6 + FF_INPUT_BUFFER_PADDING_SIZE);
if (!st->codec->extradata)
return AVERROR(ENOMEM);
 
/* setup extradata with our experimentally obtained value */
st->codec->extradata[4] = 31;
}
}
 
if (!st->codec->channels) {
av_log(s, AV_LOG_WARNING, "Invalid channel count: %d\n",
st->codec->channels);
return AVERROR_INVALIDDATA;
}
if (!st->codec->bits_per_coded_sample) {
av_log(s, AV_LOG_WARNING, "Invalid bits_per_coded_sample: %d\n",
st->codec->bits_per_coded_sample);
return AVERROR_INVALIDDATA;
}
 
/* set the sample rate */
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
 
/* parse the remaining RIFF chunks */
for (;;) {
if (pb->eof_reached)
return -1;
/* read next chunk tag */
tag = avio_rl32(pb);
size = avio_rl32(pb);
if (tag == MKTAG('d', 'a', 't', 'a')) {
/* We assume that the data chunk comes last. */
break;
} else if (tag == MKTAG('d','p','d','s')) {
/* Quoting the MSDN xWMA docs on the dpds chunk: "Contains the
* decoded packet cumulative data size array, each element is the
* number of bytes accumulated after the corresponding xWMA packet
* is decoded in order."
*
* Each packet has size equal to st->codec->block_align, which in
* all cases I saw so far was always 2230. Thus, we can use the
* dpds data to compute a seeking index.
*/
 
/* Error out if there is more than one dpds chunk. */
if (dpds_table) {
av_log(s, AV_LOG_ERROR, "two dpds chunks present\n");
return -1;
}
 
/* Compute the number of entries in the dpds chunk. */
if (size & 3) { /* Size should be divisible by four */
av_log(s, AV_LOG_WARNING,
"dpds chunk size %"PRId64" not divisible by 4\n", size);
}
dpds_table_size = size / 4;
if (dpds_table_size == 0 || dpds_table_size >= INT_MAX / 4) {
av_log(s, AV_LOG_ERROR,
"dpds chunk size %"PRId64" invalid\n", size);
return -1;
}
 
/* Allocate some temporary storage to keep the dpds data around.
* for processing later on.
*/
dpds_table = av_malloc(dpds_table_size * sizeof(uint32_t));
if (!dpds_table) {
return AVERROR(ENOMEM);
}
 
for (i = 0; i < dpds_table_size; ++i) {
dpds_table[i] = avio_rl32(pb);
size -= 4;
}
}
avio_skip(pb, size);
}
 
/* Determine overall data length */
if (size < 0)
return -1;
if (!size) {
xwma->data_end = INT64_MAX;
} else
xwma->data_end = avio_tell(pb) + size;
 
 
if (dpds_table && dpds_table_size) {
int64_t cur_pos;
const uint32_t bytes_per_sample
= (st->codec->channels * st->codec->bits_per_coded_sample) >> 3;
 
/* Estimate the duration from the total number of output bytes. */
const uint64_t total_decoded_bytes = dpds_table[dpds_table_size - 1];
 
if (!bytes_per_sample) {
av_log(s, AV_LOG_ERROR,
"Invalid bits_per_coded_sample %d for %d channels\n",
st->codec->bits_per_coded_sample, st->codec->channels);
return AVERROR_INVALIDDATA;
}
 
st->duration = total_decoded_bytes / bytes_per_sample;
 
/* Use the dpds data to build a seek table. We can only do this after
* we know the offset to the data chunk, as we need that to determine
* the actual offset to each input block.
* Note: If we allowed ourselves to assume that the data chunk always
* follows immediately after the dpds block, we could of course guess
* the data block's start offset already while reading the dpds chunk.
* I decided against that, just in case other chunks ever are
* discovered.
*/
cur_pos = avio_tell(pb);
for (i = 0; i < dpds_table_size; ++i) {
/* From the number of output bytes that would accumulate in the
* output buffer after decoding the first (i+1) packets, we compute
* an offset / timestamp pair.
*/
av_add_index_entry(st,
cur_pos + (i+1) * st->codec->block_align, /* pos */
dpds_table[i] / bytes_per_sample, /* timestamp */
st->codec->block_align, /* size */
0, /* duration */
AVINDEX_KEYFRAME);
}
} else if (st->codec->bit_rate) {
/* No dpds chunk was present (or only an empty one), so estimate
* the total duration using the average bits per sample and the
* total data length.
*/
st->duration = (size<<3) * st->codec->sample_rate / st->codec->bit_rate;
}
 
av_free(dpds_table);
 
return 0;
}
 
static int xwma_read_packet(AVFormatContext *s, AVPacket *pkt)
{
int ret, size;
int64_t left;
AVStream *st;
XWMAContext *xwma = s->priv_data;
 
st = s->streams[0];
 
left = xwma->data_end - avio_tell(s->pb);
if (left <= 0) {
return AVERROR_EOF;
}
 
/* read a single block; the default block size is 2230. */
size = (st->codec->block_align > 1) ? st->codec->block_align : 2230;
size = FFMIN(size, left);
 
ret = av_get_packet(s->pb, pkt, size);
if (ret < 0)
return ret;
 
pkt->stream_index = 0;
return ret;
}
 
AVInputFormat ff_xwma_demuxer = {
.name = "xwma",
.long_name = NULL_IF_CONFIG_SMALL("Microsoft xWMA"),
.priv_data_size = sizeof(XWMAContext),
.read_probe = xwma_probe,
.read_header = xwma_read_header,
.read_packet = xwma_read_packet,
};
/contrib/sdk/sources/ffmpeg/libavformat/yop.c
0,0 → 1,230
/*
* Psygnosis YOP demuxer
*
* Copyright (C) 2010 Mohamed Naufal Basheer <naufal11@gmail.com>
* derived from the code by
* Copyright (C) 2009 Thomas P. Higdon <thomas.p.higdon@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/channel_layout.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
 
typedef struct yop_dec_context {
AVPacket video_packet;
 
int odd_frame;
int frame_size;
int audio_block_length;
int palette_size;
} YopDecContext;
 
static int yop_probe(AVProbeData *probe_packet)
{
if (AV_RB16(probe_packet->buf) == AV_RB16("YO") &&
probe_packet->buf[2]<10 &&
probe_packet->buf[3]<10 &&
probe_packet->buf[6] &&
probe_packet->buf[7] &&
!(probe_packet->buf[8] & 1) &&
!(probe_packet->buf[10] & 1) &&
AV_RL16(probe_packet->buf + 12 + 6) >= 920 &&
AV_RL16(probe_packet->buf + 12 + 6) < probe_packet->buf[12] * 3 + 4 + probe_packet->buf[7] * 2048
)
return AVPROBE_SCORE_MAX * 3 / 4;
 
return 0;
}
 
static int yop_read_header(AVFormatContext *s)
{
YopDecContext *yop = s->priv_data;
AVIOContext *pb = s->pb;
 
AVCodecContext *audio_dec, *video_dec;
AVStream *audio_stream, *video_stream;
 
int frame_rate, ret;
 
audio_stream = avformat_new_stream(s, NULL);
video_stream = avformat_new_stream(s, NULL);
if (!audio_stream || !video_stream)
return AVERROR(ENOMEM);
 
// Extra data that will be passed to the decoder
if (ff_alloc_extradata(video_stream->codec, 8))
return AVERROR(ENOMEM);
 
// Audio
audio_dec = audio_stream->codec;
audio_dec->codec_type = AVMEDIA_TYPE_AUDIO;
audio_dec->codec_id = AV_CODEC_ID_ADPCM_IMA_APC;
audio_dec->channels = 1;
audio_dec->channel_layout = AV_CH_LAYOUT_MONO;
audio_dec->sample_rate = 22050;
 
// Video
video_dec = video_stream->codec;
video_dec->codec_type = AVMEDIA_TYPE_VIDEO;
video_dec->codec_id = AV_CODEC_ID_YOP;
 
avio_skip(pb, 6);
 
frame_rate = avio_r8(pb);
yop->frame_size = avio_r8(pb) * 2048;
video_dec->width = avio_rl16(pb);
video_dec->height = avio_rl16(pb);
 
video_stream->sample_aspect_ratio = (AVRational){1, 2};
 
ret = avio_read(pb, video_dec->extradata, 8);
if (ret < 8)
return ret < 0 ? ret : AVERROR_EOF;
 
yop->palette_size = video_dec->extradata[0] * 3 + 4;
yop->audio_block_length = AV_RL16(video_dec->extradata + 6);
 
video_dec->bit_rate = 8 * (yop->frame_size - yop->audio_block_length) * frame_rate;
 
// 1840 samples per frame, 1 nibble per sample; hence 1840/2 = 920
if (yop->audio_block_length < 920 ||
yop->audio_block_length + yop->palette_size >= yop->frame_size) {
av_log(s, AV_LOG_ERROR, "YOP has invalid header\n");
return AVERROR_INVALIDDATA;
}
 
avio_seek(pb, 2048, SEEK_SET);
 
avpriv_set_pts_info(video_stream, 32, 1, frame_rate);
 
return 0;
}
 
static int yop_read_packet(AVFormatContext *s, AVPacket *pkt)
{
YopDecContext *yop = s->priv_data;
AVIOContext *pb = s->pb;
 
int ret;
int actual_video_data_size = yop->frame_size -
yop->audio_block_length - yop->palette_size;
 
yop->video_packet.stream_index = 1;
 
if (yop->video_packet.data) {
*pkt = yop->video_packet;
yop->video_packet.data = NULL;
yop->video_packet.buf = NULL;
#if FF_API_DESTRUCT_PACKET
FF_DISABLE_DEPRECATION_WARNINGS
yop->video_packet.destruct = NULL;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
yop->video_packet.size = 0;
pkt->data[0] = yop->odd_frame;
pkt->flags |= AV_PKT_FLAG_KEY;
yop->odd_frame ^= 1;
return pkt->size;
}
ret = av_new_packet(&yop->video_packet,
yop->frame_size - yop->audio_block_length);
if (ret < 0)
return ret;
 
yop->video_packet.pos = avio_tell(pb);
 
ret = avio_read(pb, yop->video_packet.data, yop->palette_size);
if (ret < 0) {
goto err_out;
}else if (ret < yop->palette_size) {
ret = AVERROR_EOF;
goto err_out;
}
 
ret = av_get_packet(pb, pkt, 920);
if (ret < 0)
goto err_out;
 
// Set position to the start of the frame
pkt->pos = yop->video_packet.pos;
 
avio_skip(pb, yop->audio_block_length - ret);
 
ret = avio_read(pb, yop->video_packet.data + yop->palette_size,
actual_video_data_size);
if (ret < 0)
goto err_out;
else if (ret < actual_video_data_size)
av_shrink_packet(&yop->video_packet, yop->palette_size + ret);
 
// Arbitrarily return the audio data first
return yop->audio_block_length;
 
err_out:
av_free_packet(&yop->video_packet);
return ret;
}
 
static int yop_read_close(AVFormatContext *s)
{
YopDecContext *yop = s->priv_data;
av_free_packet(&yop->video_packet);
return 0;
}
 
static int yop_read_seek(AVFormatContext *s, int stream_index,
int64_t timestamp, int flags)
{
YopDecContext *yop = s->priv_data;
int64_t frame_pos, pos_min, pos_max;
int frame_count;
 
if (!stream_index)
return -1;
 
pos_min = s->data_offset;
pos_max = avio_size(s->pb) - yop->frame_size;
frame_count = (pos_max - pos_min) / yop->frame_size;
 
timestamp = FFMAX(0, FFMIN(frame_count, timestamp));
 
frame_pos = timestamp * yop->frame_size + pos_min;
 
if (avio_seek(s->pb, frame_pos, SEEK_SET) < 0)
return -1;
 
av_free_packet(&yop->video_packet);
yop->odd_frame = timestamp & 1;
 
return 0;
}
 
AVInputFormat ff_yop_demuxer = {
.name = "yop",
.long_name = NULL_IF_CONFIG_SMALL("Psygnosis YOP"),
.priv_data_size = sizeof(YopDecContext),
.read_probe = yop_probe,
.read_header = yop_read_header,
.read_packet = yop_read_packet,
.read_close = yop_read_close,
.read_seek = yop_read_seek,
.extensions = "yop",
.flags = AVFMT_GENERIC_INDEX,
};
/contrib/sdk/sources/ffmpeg/libavformat/yuv4mpeg.c
0,0 → 1,609
/*
* YUV4MPEG format
* Copyright (c) 2001, 2002, 2003 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/pixdesc.h"
#include "avformat.h"
#include "internal.h"
 
#define Y4M_MAGIC "YUV4MPEG2"
#define Y4M_FRAME_MAGIC "FRAME"
#define Y4M_LINE_MAX 256
 
#if CONFIG_YUV4MPEGPIPE_MUXER
static int yuv4_generate_header(AVFormatContext *s, char* buf)
{
AVStream *st;
int width, height;
int raten, rated, aspectn, aspectd, n;
char inter;
const char *colorspace = "";
 
st = s->streams[0];
width = st->codec->width;
height = st->codec->height;
 
av_reduce(&raten, &rated, st->codec->time_base.den,
st->codec->time_base.num, (1UL << 31) - 1);
 
aspectn = st->sample_aspect_ratio.num;
aspectd = st->sample_aspect_ratio.den;
 
if (aspectn == 0 && aspectd == 1)
aspectd = 0; // 0:0 means unknown
 
inter = 'p'; /* progressive is the default */
if (st->codec->coded_frame && st->codec->coded_frame->interlaced_frame)
inter = st->codec->coded_frame->top_field_first ? 't' : 'b';
if (st->codec->field_order == AV_FIELD_PROGRESSIVE) {
inter = 'p';
} else if (st->codec->field_order == AV_FIELD_TB || st->codec->field_order == AV_FIELD_TT) {
inter = 't';
} else if (st->codec->field_order == AV_FIELD_BT || st->codec->field_order == AV_FIELD_BB) {
inter = 'b';
}
 
switch (st->codec->pix_fmt) {
case AV_PIX_FMT_GRAY8:
colorspace = " Cmono";
break;
case AV_PIX_FMT_GRAY16:
colorspace = " Cmono16";
break;
case AV_PIX_FMT_YUV411P:
colorspace = " C411 XYSCSS=411";
break;
case AV_PIX_FMT_YUV420P:
switch (st->codec->chroma_sample_location) {
case AVCHROMA_LOC_TOPLEFT: colorspace = " C420paldv XYSCSS=420PALDV"; break;
case AVCHROMA_LOC_LEFT: colorspace = " C420mpeg2 XYSCSS=420MPEG2"; break;
default: colorspace = " C420jpeg XYSCSS=420JPEG"; break;
}
break;
case AV_PIX_FMT_YUV422P:
colorspace = " C422 XYSCSS=422";
break;
case AV_PIX_FMT_YUV444P:
colorspace = " C444 XYSCSS=444";
break;
case AV_PIX_FMT_YUV420P9:
colorspace = " C420p9 XYSCSS=420P9";
break;
case AV_PIX_FMT_YUV422P9:
colorspace = " C422p9 XYSCSS=422P9";
break;
case AV_PIX_FMT_YUV444P9:
colorspace = " C444p9 XYSCSS=444P9";
break;
case AV_PIX_FMT_YUV420P10:
colorspace = " C420p10 XYSCSS=420P10";
break;
case AV_PIX_FMT_YUV422P10:
colorspace = " C422p10 XYSCSS=422P10";
break;
case AV_PIX_FMT_YUV444P10:
colorspace = " C444p10 XYSCSS=444P10";
break;
case AV_PIX_FMT_YUV420P12:
colorspace = " C420p12 XYSCSS=420P12";
break;
case AV_PIX_FMT_YUV422P12:
colorspace = " C422p12 XYSCSS=422P12";
break;
case AV_PIX_FMT_YUV444P12:
colorspace = " C444p12 XYSCSS=444P12";
break;
case AV_PIX_FMT_YUV420P14:
colorspace = " C420p14 XYSCSS=420P14";
break;
case AV_PIX_FMT_YUV422P14:
colorspace = " C422p14 XYSCSS=422P14";
break;
case AV_PIX_FMT_YUV444P14:
colorspace = " C444p14 XYSCSS=444P14";
break;
case AV_PIX_FMT_YUV420P16:
colorspace = " C420p16 XYSCSS=420P16";
break;
case AV_PIX_FMT_YUV422P16:
colorspace = " C422p16 XYSCSS=422P16";
break;
case AV_PIX_FMT_YUV444P16:
colorspace = " C444p16 XYSCSS=444P16";
break;
}
 
/* construct stream header, if this is the first frame */
n = snprintf(buf, Y4M_LINE_MAX, "%s W%d H%d F%d:%d I%c A%d:%d%s\n",
Y4M_MAGIC, width, height, raten, rated, inter,
aspectn, aspectd, colorspace);
 
return n;
}
 
static int yuv4_write_packet(AVFormatContext *s, AVPacket *pkt)
{
AVStream *st = s->streams[pkt->stream_index];
AVIOContext *pb = s->pb;
AVPicture *picture, picture_tmp;
int* first_pkt = s->priv_data;
int width, height, h_chroma_shift, v_chroma_shift;
int i;
char buf2[Y4M_LINE_MAX + 1];
char buf1[20];
uint8_t *ptr, *ptr1, *ptr2;
 
memcpy(&picture_tmp, pkt->data, sizeof(AVPicture));
picture = &picture_tmp;
 
/* for the first packet we have to output the header as well */
if (*first_pkt) {
*first_pkt = 0;
if (yuv4_generate_header(s, buf2) < 0) {
av_log(s, AV_LOG_ERROR,
"Error. YUV4MPEG stream header write failed.\n");
return AVERROR(EIO);
} else {
avio_write(pb, buf2, strlen(buf2));
}
}
 
/* construct frame header */
 
snprintf(buf1, sizeof(buf1), "%s\n", Y4M_FRAME_MAGIC);
avio_write(pb, buf1, strlen(buf1));
 
width = st->codec->width;
height = st->codec->height;
 
ptr = picture->data[0];
 
switch (st->codec->pix_fmt) {
case AV_PIX_FMT_GRAY8:
case AV_PIX_FMT_YUV411P:
case AV_PIX_FMT_YUV420P:
case AV_PIX_FMT_YUV422P:
case AV_PIX_FMT_YUV444P:
break;
case AV_PIX_FMT_GRAY16:
case AV_PIX_FMT_YUV420P9:
case AV_PIX_FMT_YUV422P9:
case AV_PIX_FMT_YUV444P9:
case AV_PIX_FMT_YUV420P10:
case AV_PIX_FMT_YUV422P10:
case AV_PIX_FMT_YUV444P10:
case AV_PIX_FMT_YUV420P12:
case AV_PIX_FMT_YUV422P12:
case AV_PIX_FMT_YUV444P12:
case AV_PIX_FMT_YUV420P14:
case AV_PIX_FMT_YUV422P14:
case AV_PIX_FMT_YUV444P14:
case AV_PIX_FMT_YUV420P16:
case AV_PIX_FMT_YUV422P16:
case AV_PIX_FMT_YUV444P16:
width *= 2;
break;
default:
av_log(s, AV_LOG_ERROR, "The pixel format '%s' is not supported.\n",
av_get_pix_fmt_name(st->codec->pix_fmt));
return AVERROR(EINVAL);
}
 
for (i = 0; i < height; i++) {
avio_write(pb, ptr, width);
ptr += picture->linesize[0];
}
 
if (st->codec->pix_fmt != AV_PIX_FMT_GRAY8 &&
st->codec->pix_fmt != AV_PIX_FMT_GRAY16) {
// Adjust for smaller Cb and Cr planes
av_pix_fmt_get_chroma_sub_sample(st->codec->pix_fmt, &h_chroma_shift,
&v_chroma_shift);
width = FF_CEIL_RSHIFT(width, h_chroma_shift);
height = FF_CEIL_RSHIFT(height, v_chroma_shift);
 
ptr1 = picture->data[1];
ptr2 = picture->data[2];
for (i = 0; i < height; i++) { /* Cb */
avio_write(pb, ptr1, width);
ptr1 += picture->linesize[1];
}
for (i = 0; i < height; i++) { /* Cr */
avio_write(pb, ptr2, width);
ptr2 += picture->linesize[2];
}
}
 
return 0;
}
 
static int yuv4_write_header(AVFormatContext *s)
{
int *first_pkt = s->priv_data;
 
if (s->nb_streams != 1)
return AVERROR(EIO);
 
if (s->streams[0]->codec->codec_id != AV_CODEC_ID_RAWVIDEO) {
av_log(s, AV_LOG_ERROR, "ERROR: Only rawvideo supported.\n");
return AVERROR_INVALIDDATA;
}
 
switch (s->streams[0]->codec->pix_fmt) {
case AV_PIX_FMT_YUV411P:
av_log(s, AV_LOG_WARNING, "Warning: generating rarely used 4:1:1 YUV "
"stream, some mjpegtools might not work.\n");
break;
case AV_PIX_FMT_GRAY8:
case AV_PIX_FMT_GRAY16:
case AV_PIX_FMT_YUV420P:
case AV_PIX_FMT_YUV422P:
case AV_PIX_FMT_YUV444P:
break;
case AV_PIX_FMT_YUV420P9:
case AV_PIX_FMT_YUV422P9:
case AV_PIX_FMT_YUV444P9:
case AV_PIX_FMT_YUV420P10:
case AV_PIX_FMT_YUV422P10:
case AV_PIX_FMT_YUV444P10:
case AV_PIX_FMT_YUV420P12:
case AV_PIX_FMT_YUV422P12:
case AV_PIX_FMT_YUV444P12:
case AV_PIX_FMT_YUV420P14:
case AV_PIX_FMT_YUV422P14:
case AV_PIX_FMT_YUV444P14:
case AV_PIX_FMT_YUV420P16:
case AV_PIX_FMT_YUV422P16:
case AV_PIX_FMT_YUV444P16:
if (s->streams[0]->codec->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
av_log(s, AV_LOG_ERROR, "'%s' is not a official yuv4mpegpipe pixel format. "
"Use '-strict -1' to encode to this pixel format.\n",
av_get_pix_fmt_name(s->streams[0]->codec->pix_fmt));
return AVERROR(EINVAL);
}
av_log(s, AV_LOG_WARNING, "Warning: generating non standard YUV stream. "
"Mjpegtools will not work.\n");
break;
default:
av_log(s, AV_LOG_ERROR, "ERROR: yuv4mpeg can only handle "
"yuv444p, yuv422p, yuv420p, yuv411p and gray8 pixel formats. "
"And using 'strict -1' also yuv444p9, yuv422p9, yuv420p9, "
"yuv444p10, yuv422p10, yuv420p10, "
"yuv444p12, yuv422p12, yuv420p12, "
"yuv444p14, yuv422p14, yuv420p14, "
"yuv444p16, yuv422p16, yuv420p16 "
"and gray16 pixel formats. "
"Use -pix_fmt to select one.\n");
return AVERROR(EIO);
}
 
*first_pkt = 1;
return 0;
}
 
AVOutputFormat ff_yuv4mpegpipe_muxer = {
.name = "yuv4mpegpipe",
.long_name = NULL_IF_CONFIG_SMALL("YUV4MPEG pipe"),
.extensions = "y4m",
.priv_data_size = sizeof(int),
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_RAWVIDEO,
.write_header = yuv4_write_header,
.write_packet = yuv4_write_packet,
.flags = AVFMT_RAWPICTURE,
};
#endif
 
/* Header size increased to allow room for optional flags */
#define MAX_YUV4_HEADER 80
#define MAX_FRAME_HEADER 80
 
static int yuv4_read_header(AVFormatContext *s)
{
char header[MAX_YUV4_HEADER + 10]; // Include headroom for
// the longest option
char *tokstart, *tokend, *header_end, interlaced = '?';
int i;
AVIOContext *pb = s->pb;
int width = -1, height = -1, raten = 0,
rated = 0, aspectn = 0, aspectd = 0;
enum AVPixelFormat pix_fmt = AV_PIX_FMT_NONE, alt_pix_fmt = AV_PIX_FMT_NONE;
enum AVChromaLocation chroma_sample_location = AVCHROMA_LOC_UNSPECIFIED;
AVStream *st;
 
for (i = 0; i < MAX_YUV4_HEADER; i++) {
header[i] = avio_r8(pb);
if (header[i] == '\n') {
header[i + 1] = 0x20; // Add a space after last option.
// Makes parsing "444" vs "444alpha" easier.
header[i + 2] = 0;
break;
}
}
if (i == MAX_YUV4_HEADER)
return -1;
if (strncmp(header, Y4M_MAGIC, strlen(Y4M_MAGIC)))
return -1;
 
header_end = &header[i + 1]; // Include space
for (tokstart = &header[strlen(Y4M_MAGIC) + 1];
tokstart < header_end; tokstart++) {
if (*tokstart == 0x20)
continue;
switch (*tokstart++) {
case 'W': // Width. Required.
width = strtol(tokstart, &tokend, 10);
tokstart = tokend;
break;
case 'H': // Height. Required.
height = strtol(tokstart, &tokend, 10);
tokstart = tokend;
break;
case 'C': // Color space
if (strncmp("420jpeg", tokstart, 7) == 0) {
pix_fmt = AV_PIX_FMT_YUV420P;
chroma_sample_location = AVCHROMA_LOC_CENTER;
} else if (strncmp("420mpeg2", tokstart, 8) == 0) {
pix_fmt = AV_PIX_FMT_YUV420P;
chroma_sample_location = AVCHROMA_LOC_LEFT;
} else if (strncmp("420paldv", tokstart, 8) == 0) {
pix_fmt = AV_PIX_FMT_YUV420P;
chroma_sample_location = AVCHROMA_LOC_TOPLEFT;
} else if (strncmp("420p16", tokstart, 6) == 0) {
pix_fmt = AV_PIX_FMT_YUV420P16;
} else if (strncmp("422p16", tokstart, 6) == 0) {
pix_fmt = AV_PIX_FMT_YUV422P16;
} else if (strncmp("444p16", tokstart, 6) == 0) {
pix_fmt = AV_PIX_FMT_YUV444P16;
} else if (strncmp("420p14", tokstart, 6) == 0) {
pix_fmt = AV_PIX_FMT_YUV420P14;
} else if (strncmp("422p14", tokstart, 6) == 0) {
pix_fmt = AV_PIX_FMT_YUV422P14;
} else if (strncmp("444p14", tokstart, 6) == 0) {
pix_fmt = AV_PIX_FMT_YUV444P14;
} else if (strncmp("420p12", tokstart, 6) == 0) {
pix_fmt = AV_PIX_FMT_YUV420P12;
} else if (strncmp("422p12", tokstart, 6) == 0) {
pix_fmt = AV_PIX_FMT_YUV422P12;
} else if (strncmp("444p12", tokstart, 6) == 0) {
pix_fmt = AV_PIX_FMT_YUV444P12;
} else if (strncmp("420p10", tokstart, 6) == 0) {
pix_fmt = AV_PIX_FMT_YUV420P10;
} else if (strncmp("422p10", tokstart, 6) == 0) {
pix_fmt = AV_PIX_FMT_YUV422P10;
} else if (strncmp("444p10", tokstart, 6) == 0) {
pix_fmt = AV_PIX_FMT_YUV444P10;
} else if (strncmp("420p9", tokstart, 5) == 0) {
pix_fmt = AV_PIX_FMT_YUV420P9;
} else if (strncmp("422p9", tokstart, 5) == 0) {
pix_fmt = AV_PIX_FMT_YUV422P9;
} else if (strncmp("444p9", tokstart, 5) == 0) {
pix_fmt = AV_PIX_FMT_YUV444P9;
} else if (strncmp("420", tokstart, 3) == 0) {
pix_fmt = AV_PIX_FMT_YUV420P;
chroma_sample_location = AVCHROMA_LOC_CENTER;
} else if (strncmp("411", tokstart, 3) == 0) {
pix_fmt = AV_PIX_FMT_YUV411P;
} else if (strncmp("422", tokstart, 3) == 0) {
pix_fmt = AV_PIX_FMT_YUV422P;
} else if (strncmp("444alpha", tokstart, 8) == 0 ) {
av_log(s, AV_LOG_ERROR, "Cannot handle 4:4:4:4 "
"YUV4MPEG stream.\n");
return -1;
} else if (strncmp("444", tokstart, 3) == 0) {
pix_fmt = AV_PIX_FMT_YUV444P;
} else if (strncmp("mono16", tokstart, 6) == 0) {
pix_fmt = AV_PIX_FMT_GRAY16;
} else if (strncmp("mono", tokstart, 4) == 0) {
pix_fmt = AV_PIX_FMT_GRAY8;
} else {
av_log(s, AV_LOG_ERROR, "YUV4MPEG stream contains an unknown "
"pixel format.\n");
return -1;
}
while (tokstart < header_end && *tokstart != 0x20)
tokstart++;
break;
case 'I': // Interlace type
interlaced = *tokstart++;
break;
case 'F': // Frame rate
sscanf(tokstart, "%d:%d", &raten, &rated); // 0:0 if unknown
while (tokstart < header_end && *tokstart != 0x20)
tokstart++;
break;
case 'A': // Pixel aspect
sscanf(tokstart, "%d:%d", &aspectn, &aspectd); // 0:0 if unknown
while (tokstart < header_end && *tokstart != 0x20)
tokstart++;
break;
case 'X': // Vendor extensions
if (strncmp("YSCSS=", tokstart, 6) == 0) {
// Older nonstandard pixel format representation
tokstart += 6;
if (strncmp("420JPEG", tokstart, 7) == 0)
alt_pix_fmt = AV_PIX_FMT_YUV420P;
else if (strncmp("420MPEG2", tokstart, 8) == 0)
alt_pix_fmt = AV_PIX_FMT_YUV420P;
else if (strncmp("420PALDV", tokstart, 8) == 0)
alt_pix_fmt = AV_PIX_FMT_YUV420P;
else if (strncmp("420P9", tokstart, 5) == 0)
alt_pix_fmt = AV_PIX_FMT_YUV420P9;
else if (strncmp("422P9", tokstart, 5) == 0)
alt_pix_fmt = AV_PIX_FMT_YUV422P9;
else if (strncmp("444P9", tokstart, 5) == 0)
alt_pix_fmt = AV_PIX_FMT_YUV444P9;
else if (strncmp("420P10", tokstart, 6) == 0)
alt_pix_fmt = AV_PIX_FMT_YUV420P10;
else if (strncmp("422P10", tokstart, 6) == 0)
alt_pix_fmt = AV_PIX_FMT_YUV422P10;
else if (strncmp("444P10", tokstart, 6) == 0)
alt_pix_fmt = AV_PIX_FMT_YUV444P10;
else if (strncmp("420P12", tokstart, 6) == 0)
alt_pix_fmt = AV_PIX_FMT_YUV420P12;
else if (strncmp("422P12", tokstart, 6) == 0)
alt_pix_fmt = AV_PIX_FMT_YUV422P12;
else if (strncmp("444P12", tokstart, 6) == 0)
alt_pix_fmt = AV_PIX_FMT_YUV444P12;
else if (strncmp("420P14", tokstart, 6) == 0)
alt_pix_fmt = AV_PIX_FMT_YUV420P14;
else if (strncmp("422P14", tokstart, 6) == 0)
alt_pix_fmt = AV_PIX_FMT_YUV422P14;
else if (strncmp("444P14", tokstart, 6) == 0)
alt_pix_fmt = AV_PIX_FMT_YUV444P14;
else if (strncmp("420P16", tokstart, 6) == 0)
alt_pix_fmt = AV_PIX_FMT_YUV420P16;
else if (strncmp("422P16", tokstart, 6) == 0)
alt_pix_fmt = AV_PIX_FMT_YUV422P16;
else if (strncmp("444P16", tokstart, 6) == 0)
alt_pix_fmt = AV_PIX_FMT_YUV444P16;
else if (strncmp("411", tokstart, 3) == 0)
alt_pix_fmt = AV_PIX_FMT_YUV411P;
else if (strncmp("422", tokstart, 3) == 0)
alt_pix_fmt = AV_PIX_FMT_YUV422P;
else if (strncmp("444", tokstart, 3) == 0)
alt_pix_fmt = AV_PIX_FMT_YUV444P;
}
while (tokstart < header_end && *tokstart != 0x20)
tokstart++;
break;
}
}
 
if (width == -1 || height == -1) {
av_log(s, AV_LOG_ERROR, "YUV4MPEG has invalid header.\n");
return -1;
}
 
if (pix_fmt == AV_PIX_FMT_NONE) {
if (alt_pix_fmt == AV_PIX_FMT_NONE)
pix_fmt = AV_PIX_FMT_YUV420P;
else
pix_fmt = alt_pix_fmt;
}
 
if (raten <= 0 || rated <= 0) {
// Frame rate unknown
raten = 25;
rated = 1;
}
 
if (aspectn == 0 && aspectd == 0) {
// Pixel aspect unknown
aspectd = 1;
}
 
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
st->codec->width = width;
st->codec->height = height;
av_reduce(&raten, &rated, raten, rated, (1UL << 31) - 1);
avpriv_set_pts_info(st, 64, rated, raten);
st->codec->pix_fmt = pix_fmt;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
st->sample_aspect_ratio = (AVRational){ aspectn, aspectd };
st->codec->chroma_sample_location = chroma_sample_location;
 
switch (interlaced){
case 'p':
st->codec->field_order = AV_FIELD_PROGRESSIVE;
break;
case 't':
st->codec->field_order = AV_FIELD_TB;
break;
case 'b':
st->codec->field_order = AV_FIELD_BT;
break;
case 'm':
av_log(s, AV_LOG_ERROR, "YUV4MPEG stream contains mixed "
"interlaced and non-interlaced frames.\n");
case '?':
st->codec->field_order = AV_FIELD_UNKNOWN;
break;
default:
av_log(s, AV_LOG_ERROR, "YUV4MPEG has invalid header.\n");
return AVERROR(EINVAL);
}
 
return 0;
}
 
static int yuv4_read_packet(AVFormatContext *s, AVPacket *pkt)
{
int i;
char header[MAX_FRAME_HEADER+1];
int packet_size, width, height, ret;
AVStream *st = s->streams[0];
 
for (i = 0; i < MAX_FRAME_HEADER; i++) {
header[i] = avio_r8(s->pb);
if (header[i] == '\n') {
header[i + 1] = 0;
break;
}
}
if (s->pb->error)
return s->pb->error;
else if (s->pb->eof_reached)
return AVERROR_EOF;
else if (i == MAX_FRAME_HEADER)
return AVERROR_INVALIDDATA;
 
if (strncmp(header, Y4M_FRAME_MAGIC, strlen(Y4M_FRAME_MAGIC)))
return AVERROR_INVALIDDATA;
 
width = st->codec->width;
height = st->codec->height;
 
packet_size = avpicture_get_size(st->codec->pix_fmt, width, height);
if (packet_size < 0)
return packet_size;
 
ret = av_get_packet(s->pb, pkt, packet_size);
if (ret < 0)
return ret;
else if (ret != packet_size)
return s->pb->eof_reached ? AVERROR_EOF : AVERROR(EIO);
 
pkt->stream_index = 0;
return 0;
}
 
static int yuv4_probe(AVProbeData *pd)
{
/* check file header */
if (strncmp(pd->buf, Y4M_MAGIC, sizeof(Y4M_MAGIC) - 1) == 0)
return AVPROBE_SCORE_MAX;
else
return 0;
}
 
#if CONFIG_YUV4MPEGPIPE_DEMUXER
AVInputFormat ff_yuv4mpegpipe_demuxer = {
.name = "yuv4mpegpipe",
.long_name = NULL_IF_CONFIG_SMALL("YUV4MPEG pipe"),
.read_probe = yuv4_probe,
.read_header = yuv4_read_header,
.read_packet = yuv4_read_packet,
.extensions = "y4m",
};
#endif